xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision 9317c2d0)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41 
42 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
43 
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
46 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 
50 struct dp_link_dpll {
51 	int clock;
52 	struct dpll dpll;
53 };
54 
55 static const struct dp_link_dpll gen4_dpll[] = {
56 	{ 162000,
57 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 	{ 270000,
59 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61 
62 static const struct dp_link_dpll pch_dpll[] = {
63 	{ 162000,
64 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 	{ 270000,
66 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68 
69 static const struct dp_link_dpll vlv_dpll[] = {
70 	{ 162000,
71 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 	{ 270000,
73 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75 
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81 	/*
82 	 * CHV requires to program fractional division for m2.
83 	 * m2 is stored in fixed point format using formula below
84 	 * (m2_int << 22) | m2_fraction
85 	 */
86 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
87 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
89 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
91 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93 
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 				  324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 				  324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99 
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110 
111 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113 
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117 
118 	return intel_dig_port->base.base.dev;
119 }
120 
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125 
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131 				      enum i915_pipe pipe);
132 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133 
134 static int
135 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
136 {
137 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138 
139 	switch (max_link_bw) {
140 	case DP_LINK_BW_1_62:
141 	case DP_LINK_BW_2_7:
142 	case DP_LINK_BW_5_4:
143 		break;
144 	default:
145 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 		     max_link_bw);
147 		max_link_bw = DP_LINK_BW_1_62;
148 		break;
149 	}
150 	return max_link_bw;
151 }
152 
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 	u8 source_max, sink_max;
157 
158 	source_max = intel_dig_port->max_lanes;
159 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160 
161 	return min(source_max, sink_max);
162 }
163 
164 /*
165  * The units on the numbers in the next two are... bizarre.  Examples will
166  * make it clearer; this one parallels an example in the eDP spec.
167  *
168  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169  *
170  *     270000 * 1 * 8 / 10 == 216000
171  *
172  * The actual data capacity of that configuration is 2.16Gbit/s, so the
173  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
174  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
175  * 119000.  At 18bpp that's 2142000 kilobits per second.
176  *
177  * Thus the strange-looking division by 10 in intel_dp_link_required, to
178  * get the result in decakilobits instead of kilobits.
179  */
180 
181 static int
182 intel_dp_link_required(int pixel_clock, int bpp)
183 {
184 	return (pixel_clock * bpp + 9) / 10;
185 }
186 
187 static int
188 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 {
190 	return (max_link_clock * max_lanes * 8) / 10;
191 }
192 
193 static int
194 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
195 {
196 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
197 	struct intel_encoder *encoder = &intel_dig_port->base;
198 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
199 	int max_dotclk = dev_priv->max_dotclk_freq;
200 	int ds_max_dotclk;
201 
202 	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
203 
204 	if (type != DP_DS_PORT_TYPE_VGA)
205 		return max_dotclk;
206 
207 	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
208 						    intel_dp->downstream_ports);
209 
210 	if (ds_max_dotclk != 0)
211 		max_dotclk = min(max_dotclk, ds_max_dotclk);
212 
213 	return max_dotclk;
214 }
215 
216 static int
217 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
218 {
219 	if (intel_dp->num_sink_rates) {
220 		*sink_rates = intel_dp->sink_rates;
221 		return intel_dp->num_sink_rates;
222 	}
223 
224 	*sink_rates = default_rates;
225 
226 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
227 }
228 
229 static int
230 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
231 {
232 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
233 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
234 	int size;
235 
236 	if (IS_BROXTON(dev_priv)) {
237 		*source_rates = bxt_rates;
238 		size = ARRAY_SIZE(bxt_rates);
239 	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
240 		*source_rates = skl_rates;
241 		size = ARRAY_SIZE(skl_rates);
242 	} else {
243 		*source_rates = default_rates;
244 		size = ARRAY_SIZE(default_rates);
245 	}
246 
247 	/* This depends on the fact that 5.4 is last value in the array */
248 	if (!intel_dp_source_supports_hbr2(intel_dp))
249 		size--;
250 
251 	return size;
252 }
253 
254 static int intersect_rates(const int *source_rates, int source_len,
255 			   const int *sink_rates, int sink_len,
256 			   int *common_rates)
257 {
258 	int i = 0, j = 0, k = 0;
259 
260 	while (i < source_len && j < sink_len) {
261 		if (source_rates[i] == sink_rates[j]) {
262 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
263 				return k;
264 			common_rates[k] = source_rates[i];
265 			++k;
266 			++i;
267 			++j;
268 		} else if (source_rates[i] < sink_rates[j]) {
269 			++i;
270 		} else {
271 			++j;
272 		}
273 	}
274 	return k;
275 }
276 
277 static int intel_dp_common_rates(struct intel_dp *intel_dp,
278 				 int *common_rates)
279 {
280 	const int *source_rates, *sink_rates;
281 	int source_len, sink_len;
282 
283 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
284 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
285 
286 	return intersect_rates(source_rates, source_len,
287 			       sink_rates, sink_len,
288 			       common_rates);
289 }
290 
291 static enum drm_mode_status
292 intel_dp_mode_valid(struct drm_connector *connector,
293 		    struct drm_display_mode *mode)
294 {
295 	struct intel_dp *intel_dp = intel_attached_dp(connector);
296 	struct intel_connector *intel_connector = to_intel_connector(connector);
297 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
298 	int target_clock = mode->clock;
299 	int max_rate, mode_rate, max_lanes, max_link_clock;
300 	int max_dotclk;
301 
302 	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
303 
304 	if (is_edp(intel_dp) && fixed_mode) {
305 		if (mode->hdisplay > fixed_mode->hdisplay)
306 			return MODE_PANEL;
307 
308 		if (mode->vdisplay > fixed_mode->vdisplay)
309 			return MODE_PANEL;
310 
311 		target_clock = fixed_mode->clock;
312 	}
313 
314 	max_link_clock = intel_dp_max_link_rate(intel_dp);
315 	max_lanes = intel_dp_max_lane_count(intel_dp);
316 
317 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
318 	mode_rate = intel_dp_link_required(target_clock, 18);
319 
320 	if (mode_rate > max_rate || target_clock > max_dotclk)
321 		return MODE_CLOCK_HIGH;
322 
323 	if (mode->clock < 10000)
324 		return MODE_CLOCK_LOW;
325 
326 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
327 		return MODE_H_ILLEGAL;
328 
329 	return MODE_OK;
330 }
331 
332 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
333 {
334 	int	i;
335 	uint32_t v = 0;
336 
337 	if (src_bytes > 4)
338 		src_bytes = 4;
339 	for (i = 0; i < src_bytes; i++)
340 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
341 	return v;
342 }
343 
344 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
345 {
346 	int i;
347 	if (dst_bytes > 4)
348 		dst_bytes = 4;
349 	for (i = 0; i < dst_bytes; i++)
350 		dst[i] = src >> ((3-i) * 8);
351 }
352 
353 static void
354 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
355 				    struct intel_dp *intel_dp);
356 static void
357 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
358 					      struct intel_dp *intel_dp,
359 					      bool force_disable_vdd);
360 static void
361 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
362 
363 static void pps_lock(struct intel_dp *intel_dp)
364 {
365 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
366 	struct intel_encoder *encoder = &intel_dig_port->base;
367 	struct drm_device *dev = encoder->base.dev;
368 	struct drm_i915_private *dev_priv = to_i915(dev);
369 	enum intel_display_power_domain power_domain;
370 
371 	/*
372 	 * See vlv_power_sequencer_reset() why we need
373 	 * a power domain reference here.
374 	 */
375 	power_domain = intel_display_port_aux_power_domain(encoder);
376 	intel_display_power_get(dev_priv, power_domain);
377 
378 	mutex_lock(&dev_priv->pps_mutex);
379 }
380 
381 static void pps_unlock(struct intel_dp *intel_dp)
382 {
383 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
384 	struct intel_encoder *encoder = &intel_dig_port->base;
385 	struct drm_device *dev = encoder->base.dev;
386 	struct drm_i915_private *dev_priv = to_i915(dev);
387 	enum intel_display_power_domain power_domain;
388 
389 	mutex_unlock(&dev_priv->pps_mutex);
390 
391 	power_domain = intel_display_port_aux_power_domain(encoder);
392 	intel_display_power_put(dev_priv, power_domain);
393 }
394 
395 static void
396 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
397 {
398 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
399 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
400 	enum i915_pipe pipe = intel_dp->pps_pipe;
401 	bool pll_enabled, release_cl_override = false;
402 	enum dpio_phy phy = DPIO_PHY(pipe);
403 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
404 	uint32_t DP;
405 
406 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
407 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
408 		 pipe_name(pipe), port_name(intel_dig_port->port)))
409 		return;
410 
411 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
412 		      pipe_name(pipe), port_name(intel_dig_port->port));
413 
414 	/* Preserve the BIOS-computed detected bit. This is
415 	 * supposed to be read-only.
416 	 */
417 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
418 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
419 	DP |= DP_PORT_WIDTH(1);
420 	DP |= DP_LINK_TRAIN_PAT_1;
421 
422 	if (IS_CHERRYVIEW(dev_priv))
423 		DP |= DP_PIPE_SELECT_CHV(pipe);
424 	else if (pipe == PIPE_B)
425 		DP |= DP_PIPEB_SELECT;
426 
427 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
428 
429 	/*
430 	 * The DPLL for the pipe must be enabled for this to work.
431 	 * So enable temporarily it if it's not already enabled.
432 	 */
433 	if (!pll_enabled) {
434 		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
435 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
436 
437 		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
438 				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
439 			DRM_ERROR("Failed to force on pll for pipe %c!\n",
440 				  pipe_name(pipe));
441 			return;
442 		}
443 	}
444 
445 	/*
446 	 * Similar magic as in intel_dp_enable_port().
447 	 * We _must_ do this port enable + disable trick
448 	 * to make this power seqeuencer lock onto the port.
449 	 * Otherwise even VDD force bit won't work.
450 	 */
451 	I915_WRITE(intel_dp->output_reg, DP);
452 	POSTING_READ(intel_dp->output_reg);
453 
454 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
455 	POSTING_READ(intel_dp->output_reg);
456 
457 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
458 	POSTING_READ(intel_dp->output_reg);
459 
460 	if (!pll_enabled) {
461 		vlv_force_pll_off(dev_priv, pipe);
462 
463 		if (release_cl_override)
464 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
465 	}
466 }
467 
468 static enum i915_pipe
469 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
470 {
471 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
472 	struct drm_device *dev = intel_dig_port->base.base.dev;
473 	struct drm_i915_private *dev_priv = to_i915(dev);
474 	struct intel_encoder *encoder;
475 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
476 	enum i915_pipe pipe;
477 
478 	lockdep_assert_held(&dev_priv->pps_mutex);
479 
480 	/* We should never land here with regular DP ports */
481 	WARN_ON(!is_edp(intel_dp));
482 
483 	if (intel_dp->pps_pipe != INVALID_PIPE)
484 		return intel_dp->pps_pipe;
485 
486 	/*
487 	 * We don't have power sequencer currently.
488 	 * Pick one that's not used by other ports.
489 	 */
490 	for_each_intel_encoder(dev, encoder) {
491 		struct intel_dp *tmp;
492 
493 		if (encoder->type != INTEL_OUTPUT_EDP)
494 			continue;
495 
496 		tmp = enc_to_intel_dp(&encoder->base);
497 
498 		if (tmp->pps_pipe != INVALID_PIPE)
499 			pipes &= ~(1 << tmp->pps_pipe);
500 	}
501 
502 	/*
503 	 * Didn't find one. This should not happen since there
504 	 * are two power sequencers and up to two eDP ports.
505 	 */
506 	if (WARN_ON(pipes == 0))
507 		pipe = PIPE_A;
508 	else
509 		pipe = ffs(pipes) - 1;
510 
511 	vlv_steal_power_sequencer(dev, pipe);
512 	intel_dp->pps_pipe = pipe;
513 
514 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
515 		      pipe_name(intel_dp->pps_pipe),
516 		      port_name(intel_dig_port->port));
517 
518 	/* init power sequencer on this pipe and port */
519 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
520 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
521 
522 	/*
523 	 * Even vdd force doesn't work until we've made
524 	 * the power sequencer lock in on the port.
525 	 */
526 	vlv_power_sequencer_kick(intel_dp);
527 
528 	return intel_dp->pps_pipe;
529 }
530 
531 static int
532 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
533 {
534 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
535 	struct drm_device *dev = intel_dig_port->base.base.dev;
536 	struct drm_i915_private *dev_priv = to_i915(dev);
537 
538 	lockdep_assert_held(&dev_priv->pps_mutex);
539 
540 	/* We should never land here with regular DP ports */
541 	WARN_ON(!is_edp(intel_dp));
542 
543 	/*
544 	 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
545 	 * mapping needs to be retrieved from VBT, for now just hard-code to
546 	 * use instance #0 always.
547 	 */
548 	if (!intel_dp->pps_reset)
549 		return 0;
550 
551 	intel_dp->pps_reset = false;
552 
553 	/*
554 	 * Only the HW needs to be reprogrammed, the SW state is fixed and
555 	 * has been setup during connector init.
556 	 */
557 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
558 
559 	return 0;
560 }
561 
562 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
563 			       enum i915_pipe pipe);
564 
565 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
566 			       enum i915_pipe pipe)
567 {
568 	return I915_READ(PP_STATUS(pipe)) & PP_ON;
569 }
570 
571 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
572 				enum i915_pipe pipe)
573 {
574 	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
575 }
576 
577 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
578 			 enum i915_pipe pipe)
579 {
580 	return true;
581 }
582 
583 static enum i915_pipe
584 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
585 		     enum port port,
586 		     vlv_pipe_check pipe_check)
587 {
588 	enum i915_pipe pipe;
589 
590 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
591 		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
592 			PANEL_PORT_SELECT_MASK;
593 
594 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
595 			continue;
596 
597 		if (!pipe_check(dev_priv, pipe))
598 			continue;
599 
600 		return pipe;
601 	}
602 
603 	return INVALID_PIPE;
604 }
605 
606 static void
607 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
608 {
609 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
610 	struct drm_device *dev = intel_dig_port->base.base.dev;
611 	struct drm_i915_private *dev_priv = to_i915(dev);
612 	enum port port = intel_dig_port->port;
613 
614 	lockdep_assert_held(&dev_priv->pps_mutex);
615 
616 	/* try to find a pipe with this port selected */
617 	/* first pick one where the panel is on */
618 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
619 						  vlv_pipe_has_pp_on);
620 	/* didn't find one? pick one where vdd is on */
621 	if (intel_dp->pps_pipe == INVALID_PIPE)
622 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
623 							  vlv_pipe_has_vdd_on);
624 	/* didn't find one? pick one with just the correct port */
625 	if (intel_dp->pps_pipe == INVALID_PIPE)
626 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
627 							  vlv_pipe_any);
628 
629 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
630 	if (intel_dp->pps_pipe == INVALID_PIPE) {
631 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
632 			      port_name(port));
633 		return;
634 	}
635 
636 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
637 		      port_name(port), pipe_name(intel_dp->pps_pipe));
638 
639 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
640 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
641 }
642 
643 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
644 {
645 	struct drm_device *dev = &dev_priv->drm;
646 	struct intel_encoder *encoder;
647 
648 	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
649 		    !IS_BROXTON(dev_priv)))
650 		return;
651 
652 	/*
653 	 * We can't grab pps_mutex here due to deadlock with power_domain
654 	 * mutex when power_domain functions are called while holding pps_mutex.
655 	 * That also means that in order to use pps_pipe the code needs to
656 	 * hold both a power domain reference and pps_mutex, and the power domain
657 	 * reference get/put must be done while _not_ holding pps_mutex.
658 	 * pps_{lock,unlock}() do these steps in the correct order, so one
659 	 * should use them always.
660 	 */
661 
662 	for_each_intel_encoder(dev, encoder) {
663 		struct intel_dp *intel_dp;
664 
665 		if (encoder->type != INTEL_OUTPUT_EDP)
666 			continue;
667 
668 		intel_dp = enc_to_intel_dp(&encoder->base);
669 		if (IS_BROXTON(dev_priv))
670 			intel_dp->pps_reset = true;
671 		else
672 			intel_dp->pps_pipe = INVALID_PIPE;
673 	}
674 }
675 
676 struct pps_registers {
677 	i915_reg_t pp_ctrl;
678 	i915_reg_t pp_stat;
679 	i915_reg_t pp_on;
680 	i915_reg_t pp_off;
681 	i915_reg_t pp_div;
682 };
683 
684 static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
685 				    struct intel_dp *intel_dp,
686 				    struct pps_registers *regs)
687 {
688 	int pps_idx = 0;
689 
690 	memset(regs, 0, sizeof(*regs));
691 
692 	if (IS_BROXTON(dev_priv))
693 		pps_idx = bxt_power_sequencer_idx(intel_dp);
694 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
695 		pps_idx = vlv_power_sequencer_pipe(intel_dp);
696 
697 	regs->pp_ctrl = PP_CONTROL(pps_idx);
698 	regs->pp_stat = PP_STATUS(pps_idx);
699 	regs->pp_on = PP_ON_DELAYS(pps_idx);
700 	regs->pp_off = PP_OFF_DELAYS(pps_idx);
701 	if (!IS_BROXTON(dev_priv))
702 		regs->pp_div = PP_DIVISOR(pps_idx);
703 }
704 
705 static i915_reg_t
706 _pp_ctrl_reg(struct intel_dp *intel_dp)
707 {
708 	struct pps_registers regs;
709 
710 	intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
711 				&regs);
712 
713 	return regs.pp_ctrl;
714 }
715 
716 static i915_reg_t
717 _pp_stat_reg(struct intel_dp *intel_dp)
718 {
719 	struct pps_registers regs;
720 
721 	intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
722 				&regs);
723 
724 	return regs.pp_stat;
725 }
726 
727 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
728    This function only applicable when panel PM state is not to be tracked */
729 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
730 			      void *unused)
731 {
732 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
733 						 edp_notifier);
734 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
735 	struct drm_i915_private *dev_priv = to_i915(dev);
736 
737 #if 0
738 	if (!is_edp(intel_dp) || code != SYS_RESTART)
739 #endif
740 	if (!is_edp(intel_dp))
741 		return 0;
742 
743 	pps_lock(intel_dp);
744 
745 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
746 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
747 		i915_reg_t pp_ctrl_reg, pp_div_reg;
748 		u32 pp_div;
749 
750 		pp_ctrl_reg = PP_CONTROL(pipe);
751 		pp_div_reg  = PP_DIVISOR(pipe);
752 		pp_div = I915_READ(pp_div_reg);
753 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
754 
755 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
756 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
757 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
758 		msleep(intel_dp->panel_power_cycle_delay);
759 	}
760 
761 	pps_unlock(intel_dp);
762 
763 	return 0;
764 }
765 
766 static bool edp_have_panel_power(struct intel_dp *intel_dp)
767 {
768 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
769 	struct drm_i915_private *dev_priv = to_i915(dev);
770 
771 	lockdep_assert_held(&dev_priv->pps_mutex);
772 
773 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
774 	    intel_dp->pps_pipe == INVALID_PIPE)
775 		return false;
776 
777 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
778 }
779 
780 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
781 {
782 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
783 	struct drm_i915_private *dev_priv = to_i915(dev);
784 
785 	lockdep_assert_held(&dev_priv->pps_mutex);
786 
787 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
788 	    intel_dp->pps_pipe == INVALID_PIPE)
789 		return false;
790 
791 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
792 }
793 
794 static void
795 intel_dp_check_edp(struct intel_dp *intel_dp)
796 {
797 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
798 	struct drm_i915_private *dev_priv = to_i915(dev);
799 
800 	if (!is_edp(intel_dp))
801 		return;
802 
803 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
804 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
805 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
806 			      I915_READ(_pp_stat_reg(intel_dp)),
807 			      I915_READ(_pp_ctrl_reg(intel_dp)));
808 	}
809 }
810 
811 static uint32_t
812 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
813 {
814 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
815 	struct drm_device *dev = intel_dig_port->base.base.dev;
816 	struct drm_i915_private *dev_priv = to_i915(dev);
817 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
818 	uint32_t status;
819 	bool done;
820 
821 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
822 	if (has_aux_irq)
823 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
824 					  msecs_to_jiffies_timeout(10));
825 	else
826 		done = wait_for(C, 10) == 0;
827 	if (!done)
828 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
829 			  has_aux_irq);
830 #undef C
831 
832 	return status;
833 }
834 
835 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
836 {
837 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
838 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
839 
840 	if (index)
841 		return 0;
842 
843 	/*
844 	 * The clock divider is based off the hrawclk, and would like to run at
845 	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
846 	 */
847 	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
848 }
849 
850 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
851 {
852 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
853 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
854 
855 	if (index)
856 		return 0;
857 
858 	/*
859 	 * The clock divider is based off the cdclk or PCH rawclk, and would
860 	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
861 	 * divide by 2000 and use that
862 	 */
863 	if (intel_dig_port->port == PORT_A)
864 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
865 	else
866 		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
867 }
868 
869 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
870 {
871 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
872 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
873 
874 	if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
875 		/* Workaround for non-ULT HSW */
876 		switch (index) {
877 		case 0: return 63;
878 		case 1: return 72;
879 		default: return 0;
880 		}
881 	}
882 
883 	return ilk_get_aux_clock_divider(intel_dp, index);
884 }
885 
886 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
887 {
888 	/*
889 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
890 	 * derive the clock from CDCLK automatically). We still implement the
891 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
892 	 */
893 	return index ? 0 : 1;
894 }
895 
896 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
897 				     bool has_aux_irq,
898 				     int send_bytes,
899 				     uint32_t aux_clock_divider)
900 {
901 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
902 	struct drm_i915_private *dev_priv =
903 			to_i915(intel_dig_port->base.base.dev);
904 	uint32_t precharge, timeout;
905 
906 	if (IS_GEN6(dev_priv))
907 		precharge = 3;
908 	else
909 		precharge = 5;
910 
911 	if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
912 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
913 	else
914 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
915 
916 	return DP_AUX_CH_CTL_SEND_BUSY |
917 	       DP_AUX_CH_CTL_DONE |
918 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
919 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
920 	       timeout |
921 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
922 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
923 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
924 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
925 }
926 
927 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
928 				      bool has_aux_irq,
929 				      int send_bytes,
930 				      uint32_t unused)
931 {
932 	return DP_AUX_CH_CTL_SEND_BUSY |
933 	       DP_AUX_CH_CTL_DONE |
934 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
935 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
936 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
937 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
938 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
939 	       DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
940 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
941 }
942 
943 static int
944 intel_dp_aux_ch(struct intel_dp *intel_dp,
945 		const uint8_t *send, int send_bytes,
946 		uint8_t *recv, int recv_size)
947 {
948 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
949 	struct drm_i915_private *dev_priv =
950 			to_i915(intel_dig_port->base.base.dev);
951 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
952 	uint32_t aux_clock_divider;
953 	int i, ret, recv_bytes;
954 	uint32_t status;
955 	int try, clock = 0;
956 	bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
957 	bool vdd;
958 
959 	pps_lock(intel_dp);
960 
961 	/*
962 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
963 	 * In such cases we want to leave VDD enabled and it's up to upper layers
964 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
965 	 * ourselves.
966 	 */
967 	vdd = edp_panel_vdd_on(intel_dp);
968 
969 	/* dp aux is extremely sensitive to irq latency, hence request the
970 	 * lowest possible wakeup latency and so prevent the cpu from going into
971 	 * deep sleep states.
972 	 */
973 	pm_qos_update_request(&dev_priv->pm_qos, 0);
974 
975 	intel_dp_check_edp(intel_dp);
976 
977 	/* Try to wait for any previous AUX channel activity */
978 	for (try = 0; try < 3; try++) {
979 		status = I915_READ_NOTRACE(ch_ctl);
980 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
981 			break;
982 		msleep(1);
983 	}
984 
985 	if (try == 3) {
986 		static u32 last_status = -1;
987 		const u32 status = I915_READ(ch_ctl);
988 
989 		if (status != last_status) {
990 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
991 			     status);
992 			last_status = status;
993 		}
994 
995 		ret = -EBUSY;
996 		goto out;
997 	}
998 
999 	/* Only 5 data registers! */
1000 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1001 		ret = -E2BIG;
1002 		goto out;
1003 	}
1004 
1005 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1006 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1007 							  has_aux_irq,
1008 							  send_bytes,
1009 							  aux_clock_divider);
1010 
1011 		/* Must try at least 3 times according to DP spec */
1012 		for (try = 0; try < 5; try++) {
1013 			/* Load the send data into the aux channel data registers */
1014 			for (i = 0; i < send_bytes; i += 4)
1015 				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
1016 					   intel_dp_pack_aux(send + i,
1017 							     send_bytes - i));
1018 
1019 			/* Send the command and wait for it to complete */
1020 			I915_WRITE(ch_ctl, send_ctl);
1021 
1022 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1023 
1024 			/* Clear done status and any errors */
1025 			I915_WRITE(ch_ctl,
1026 				   status |
1027 				   DP_AUX_CH_CTL_DONE |
1028 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
1029 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
1030 
1031 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1032 				continue;
1033 
1034 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1035 			 *   400us delay required for errors and timeouts
1036 			 *   Timeout errors from the HW already meet this
1037 			 *   requirement so skip to next iteration
1038 			 */
1039 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1040 				usleep_range(400, 500);
1041 				continue;
1042 			}
1043 			if (status & DP_AUX_CH_CTL_DONE)
1044 				goto done;
1045 		}
1046 	}
1047 
1048 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1049 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1050 		ret = -EBUSY;
1051 		goto out;
1052 	}
1053 
1054 done:
1055 	/* Check for timeout or receive error.
1056 	 * Timeouts occur when the sink is not connected
1057 	 */
1058 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1059 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1060 		ret = -EIO;
1061 		goto out;
1062 	}
1063 
1064 	/* Timeouts occur when the device isn't connected, so they're
1065 	 * "normal" -- don't fill the kernel log with these */
1066 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1067 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1068 		ret = -ETIMEDOUT;
1069 		goto out;
1070 	}
1071 
1072 	/* Unload any bytes sent back from the other side */
1073 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1074 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1075 
1076 	/*
1077 	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1078 	 * We have no idea of what happened so we return -EBUSY so
1079 	 * drm layer takes care for the necessary retries.
1080 	 */
1081 	if (recv_bytes == 0 || recv_bytes > 20) {
1082 		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1083 			      recv_bytes);
1084 		/*
1085 		 * FIXME: This patch was created on top of a series that
1086 		 * organize the retries at drm level. There EBUSY should
1087 		 * also take care for 1ms wait before retrying.
1088 		 * That aux retries re-org is still needed and after that is
1089 		 * merged we remove this sleep from here.
1090 		 */
1091 		usleep_range(1000, 1500);
1092 		ret = -EBUSY;
1093 		goto out;
1094 	}
1095 
1096 	if (recv_bytes > recv_size)
1097 		recv_bytes = recv_size;
1098 
1099 	for (i = 0; i < recv_bytes; i += 4)
1100 		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1101 				    recv + i, recv_bytes - i);
1102 
1103 	ret = recv_bytes;
1104 out:
1105 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1106 
1107 	if (vdd)
1108 		edp_panel_vdd_off(intel_dp, false);
1109 
1110 	pps_unlock(intel_dp);
1111 
1112 	return ret;
1113 }
1114 
1115 #define BARE_ADDRESS_SIZE	3
1116 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
1117 static ssize_t
1118 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1119 {
1120 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1121 	uint8_t txbuf[20], rxbuf[20];
1122 	size_t txsize, rxsize;
1123 	int ret;
1124 
1125 	txbuf[0] = (msg->request << 4) |
1126 		((msg->address >> 16) & 0xf);
1127 	txbuf[1] = (msg->address >> 8) & 0xff;
1128 	txbuf[2] = msg->address & 0xff;
1129 	txbuf[3] = msg->size - 1;
1130 
1131 	switch (msg->request & ~DP_AUX_I2C_MOT) {
1132 	case DP_AUX_NATIVE_WRITE:
1133 	case DP_AUX_I2C_WRITE:
1134 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1135 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1136 		rxsize = 2; /* 0 or 1 data bytes */
1137 
1138 		if (WARN_ON(txsize > 20))
1139 			return -E2BIG;
1140 
1141 		WARN_ON(!msg->buffer != !msg->size);
1142 
1143 		if (msg->buffer)
1144 			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1145 
1146 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1147 		if (ret > 0) {
1148 			msg->reply = rxbuf[0] >> 4;
1149 
1150 			if (ret > 1) {
1151 				/* Number of bytes written in a short write. */
1152 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
1153 			} else {
1154 				/* Return payload size. */
1155 				ret = msg->size;
1156 			}
1157 		}
1158 		break;
1159 
1160 	case DP_AUX_NATIVE_READ:
1161 	case DP_AUX_I2C_READ:
1162 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1163 		rxsize = msg->size + 1;
1164 
1165 		if (WARN_ON(rxsize > 20))
1166 			return -E2BIG;
1167 
1168 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1169 		if (ret > 0) {
1170 			msg->reply = rxbuf[0] >> 4;
1171 			/*
1172 			 * Assume happy day, and copy the data. The caller is
1173 			 * expected to check msg->reply before touching it.
1174 			 *
1175 			 * Return payload size.
1176 			 */
1177 			ret--;
1178 			memcpy(msg->buffer, rxbuf + 1, ret);
1179 		}
1180 		break;
1181 
1182 	default:
1183 		ret = -EINVAL;
1184 		break;
1185 	}
1186 
1187 	return ret;
1188 }
1189 
1190 static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1191 				enum port port)
1192 {
1193 	const struct ddi_vbt_port_info *info =
1194 		&dev_priv->vbt.ddi_port_info[port];
1195 	enum port aux_port;
1196 
1197 	if (!info->alternate_aux_channel) {
1198 		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1199 			      port_name(port), port_name(port));
1200 		return port;
1201 	}
1202 
1203 	switch (info->alternate_aux_channel) {
1204 	case DP_AUX_A:
1205 		aux_port = PORT_A;
1206 		break;
1207 	case DP_AUX_B:
1208 		aux_port = PORT_B;
1209 		break;
1210 	case DP_AUX_C:
1211 		aux_port = PORT_C;
1212 		break;
1213 	case DP_AUX_D:
1214 		aux_port = PORT_D;
1215 		break;
1216 	default:
1217 		MISSING_CASE(info->alternate_aux_channel);
1218 		aux_port = PORT_A;
1219 		break;
1220 	}
1221 
1222 	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1223 		      port_name(aux_port), port_name(port));
1224 
1225 	return aux_port;
1226 }
1227 
1228 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1229 				  enum port port)
1230 {
1231 	switch (port) {
1232 	case PORT_B:
1233 	case PORT_C:
1234 	case PORT_D:
1235 		return DP_AUX_CH_CTL(port);
1236 	default:
1237 		MISSING_CASE(port);
1238 		return DP_AUX_CH_CTL(PORT_B);
1239 	}
1240 }
1241 
1242 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1243 				   enum port port, int index)
1244 {
1245 	switch (port) {
1246 	case PORT_B:
1247 	case PORT_C:
1248 	case PORT_D:
1249 		return DP_AUX_CH_DATA(port, index);
1250 	default:
1251 		MISSING_CASE(port);
1252 		return DP_AUX_CH_DATA(PORT_B, index);
1253 	}
1254 }
1255 
1256 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1257 				  enum port port)
1258 {
1259 	switch (port) {
1260 	case PORT_A:
1261 		return DP_AUX_CH_CTL(port);
1262 	case PORT_B:
1263 	case PORT_C:
1264 	case PORT_D:
1265 		return PCH_DP_AUX_CH_CTL(port);
1266 	default:
1267 		MISSING_CASE(port);
1268 		return DP_AUX_CH_CTL(PORT_A);
1269 	}
1270 }
1271 
1272 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1273 				   enum port port, int index)
1274 {
1275 	switch (port) {
1276 	case PORT_A:
1277 		return DP_AUX_CH_DATA(port, index);
1278 	case PORT_B:
1279 	case PORT_C:
1280 	case PORT_D:
1281 		return PCH_DP_AUX_CH_DATA(port, index);
1282 	default:
1283 		MISSING_CASE(port);
1284 		return DP_AUX_CH_DATA(PORT_A, index);
1285 	}
1286 }
1287 
1288 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1289 				  enum port port)
1290 {
1291 	switch (port) {
1292 	case PORT_A:
1293 	case PORT_B:
1294 	case PORT_C:
1295 	case PORT_D:
1296 		return DP_AUX_CH_CTL(port);
1297 	default:
1298 		MISSING_CASE(port);
1299 		return DP_AUX_CH_CTL(PORT_A);
1300 	}
1301 }
1302 
1303 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1304 				   enum port port, int index)
1305 {
1306 	switch (port) {
1307 	case PORT_A:
1308 	case PORT_B:
1309 	case PORT_C:
1310 	case PORT_D:
1311 		return DP_AUX_CH_DATA(port, index);
1312 	default:
1313 		MISSING_CASE(port);
1314 		return DP_AUX_CH_DATA(PORT_A, index);
1315 	}
1316 }
1317 
1318 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1319 				    enum port port)
1320 {
1321 	if (INTEL_INFO(dev_priv)->gen >= 9)
1322 		return skl_aux_ctl_reg(dev_priv, port);
1323 	else if (HAS_PCH_SPLIT(dev_priv))
1324 		return ilk_aux_ctl_reg(dev_priv, port);
1325 	else
1326 		return g4x_aux_ctl_reg(dev_priv, port);
1327 }
1328 
1329 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1330 				     enum port port, int index)
1331 {
1332 	if (INTEL_INFO(dev_priv)->gen >= 9)
1333 		return skl_aux_data_reg(dev_priv, port, index);
1334 	else if (HAS_PCH_SPLIT(dev_priv))
1335 		return ilk_aux_data_reg(dev_priv, port, index);
1336 	else
1337 		return g4x_aux_data_reg(dev_priv, port, index);
1338 }
1339 
1340 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1341 {
1342 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1343 	enum port port = intel_aux_port(dev_priv,
1344 					dp_to_dig_port(intel_dp)->port);
1345 	int i;
1346 
1347 	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1348 	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1349 		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1350 }
1351 
1352 static void
1353 intel_dp_aux_fini(struct intel_dp *intel_dp)
1354 {
1355 	kfree(intel_dp->aux.name);
1356 }
1357 
1358 static void
1359 intel_dp_aux_init(struct intel_dp *intel_dp)
1360 {
1361 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1362 	enum port port = intel_dig_port->port;
1363 
1364 	intel_aux_reg_init(intel_dp);
1365 	drm_dp_aux_init(&intel_dp->aux);
1366 
1367 	/* Failure to allocate our preferred name is not critical */
1368 	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1369 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1370 }
1371 
1372 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1373 {
1374 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1375 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1376 
1377 	if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
1378 	    IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
1379 		return true;
1380 	else
1381 		return false;
1382 }
1383 
1384 static void
1385 intel_dp_set_clock(struct intel_encoder *encoder,
1386 		   struct intel_crtc_state *pipe_config)
1387 {
1388 	struct drm_device *dev = encoder->base.dev;
1389 	struct drm_i915_private *dev_priv = to_i915(dev);
1390 	const struct dp_link_dpll *divisor = NULL;
1391 	int i, count = 0;
1392 
1393 	if (IS_G4X(dev_priv)) {
1394 		divisor = gen4_dpll;
1395 		count = ARRAY_SIZE(gen4_dpll);
1396 	} else if (HAS_PCH_SPLIT(dev_priv)) {
1397 		divisor = pch_dpll;
1398 		count = ARRAY_SIZE(pch_dpll);
1399 	} else if (IS_CHERRYVIEW(dev_priv)) {
1400 		divisor = chv_dpll;
1401 		count = ARRAY_SIZE(chv_dpll);
1402 	} else if (IS_VALLEYVIEW(dev_priv)) {
1403 		divisor = vlv_dpll;
1404 		count = ARRAY_SIZE(vlv_dpll);
1405 	}
1406 
1407 	if (divisor && count) {
1408 		for (i = 0; i < count; i++) {
1409 			if (pipe_config->port_clock == divisor[i].clock) {
1410 				pipe_config->dpll = divisor[i].dpll;
1411 				pipe_config->clock_set = true;
1412 				break;
1413 			}
1414 		}
1415 	}
1416 }
1417 
1418 static void snprintf_int_array(char *str, size_t len,
1419 			       const int *array, int nelem)
1420 {
1421 	int i;
1422 
1423 	str[0] = '\0';
1424 
1425 	for (i = 0; i < nelem; i++) {
1426 		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1427 		if (r >= len)
1428 			return;
1429 		str += r;
1430 		len -= r;
1431 	}
1432 }
1433 
1434 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1435 {
1436 	const int *source_rates, *sink_rates;
1437 	int source_len, sink_len, common_len;
1438 	int common_rates[DP_MAX_SUPPORTED_RATES];
1439 	char str[128]; /* FIXME: too big for stack? */
1440 
1441 	if ((drm_debug & DRM_UT_KMS) == 0)
1442 		return;
1443 
1444 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1445 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1446 	DRM_DEBUG_KMS("source rates: %s\n", str);
1447 
1448 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1449 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1450 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1451 
1452 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1453 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1454 	DRM_DEBUG_KMS("common rates: %s\n", str);
1455 }
1456 
1457 bool
1458 __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
1459 {
1460 	u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
1461 						      DP_SINK_OUI;
1462 
1463 	return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
1464 	       sizeof(*desc);
1465 }
1466 
1467 bool intel_dp_read_desc(struct intel_dp *intel_dp)
1468 {
1469 	struct intel_dp_desc *desc = &intel_dp->desc;
1470 	bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
1471 		       DP_OUI_SUPPORT;
1472 	int dev_id_len;
1473 
1474 	if (!__intel_dp_read_desc(intel_dp, desc))
1475 		return false;
1476 
1477 	dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
1478 	DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
1479 		      drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
1480 		      (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
1481 		      dev_id_len, desc->device_id,
1482 		      desc->hw_rev >> 4, desc->hw_rev & 0xf,
1483 		      desc->sw_major_rev, desc->sw_minor_rev);
1484 
1485 	return true;
1486 }
1487 
1488 static int rate_to_index(int find, const int *rates)
1489 {
1490 	int i = 0;
1491 
1492 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1493 		if (find == rates[i])
1494 			break;
1495 
1496 	return i;
1497 }
1498 
1499 int
1500 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1501 {
1502 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1503 	int len;
1504 
1505 	len = intel_dp_common_rates(intel_dp, rates);
1506 	if (WARN_ON(len <= 0))
1507 		return 162000;
1508 
1509 	return rates[len - 1];
1510 }
1511 
1512 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1513 {
1514 	return rate_to_index(rate, intel_dp->sink_rates);
1515 }
1516 
1517 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1518 			   uint8_t *link_bw, uint8_t *rate_select)
1519 {
1520 	if (intel_dp->num_sink_rates) {
1521 		*link_bw = 0;
1522 		*rate_select =
1523 			intel_dp_rate_select(intel_dp, port_clock);
1524 	} else {
1525 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1526 		*rate_select = 0;
1527 	}
1528 }
1529 
1530 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1531 				struct intel_crtc_state *pipe_config)
1532 {
1533 	int bpp, bpc;
1534 
1535 	bpp = pipe_config->pipe_bpp;
1536 	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1537 
1538 	if (bpc > 0)
1539 		bpp = min(bpp, 3*bpc);
1540 
1541 	return bpp;
1542 }
1543 
1544 bool
1545 intel_dp_compute_config(struct intel_encoder *encoder,
1546 			struct intel_crtc_state *pipe_config,
1547 			struct drm_connector_state *conn_state)
1548 {
1549 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1550 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1551 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1552 	enum port port = dp_to_dig_port(intel_dp)->port;
1553 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1554 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1555 	int lane_count, clock;
1556 	int min_lane_count = 1;
1557 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1558 	/* Conveniently, the link BW constants become indices with a shift...*/
1559 	int min_clock = 0;
1560 	int max_clock;
1561 	int bpp, mode_rate;
1562 	int link_avail, link_clock;
1563 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1564 	int common_len;
1565 	uint8_t link_bw, rate_select;
1566 
1567 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1568 
1569 	/* No common link rates between source and sink */
1570 	WARN_ON(common_len <= 0);
1571 
1572 	max_clock = common_len - 1;
1573 
1574 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1575 		pipe_config->has_pch_encoder = true;
1576 
1577 	pipe_config->has_drrs = false;
1578 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1579 
1580 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1581 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1582 				       adjusted_mode);
1583 
1584 		if (INTEL_GEN(dev_priv) >= 9) {
1585 			int ret;
1586 			ret = skl_update_scaler_crtc(pipe_config);
1587 			if (ret)
1588 				return ret;
1589 		}
1590 
1591 		if (HAS_GMCH_DISPLAY(dev_priv))
1592 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1593 						 intel_connector->panel.fitting_mode);
1594 		else
1595 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1596 						intel_connector->panel.fitting_mode);
1597 	}
1598 
1599 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1600 		return false;
1601 
1602 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1603 		      "max bw %d pixel clock %iKHz\n",
1604 		      max_lane_count, common_rates[max_clock],
1605 		      adjusted_mode->crtc_clock);
1606 
1607 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1608 	 * bpc in between. */
1609 	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1610 	if (is_edp(intel_dp)) {
1611 
1612 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1613 		if (intel_connector->base.display_info.bpc == 0 &&
1614 			(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1615 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1616 				      dev_priv->vbt.edp.bpp);
1617 			bpp = dev_priv->vbt.edp.bpp;
1618 		}
1619 
1620 		/*
1621 		 * Use the maximum clock and number of lanes the eDP panel
1622 		 * advertizes being capable of. The panels are generally
1623 		 * designed to support only a single clock and lane
1624 		 * configuration, and typically these values correspond to the
1625 		 * native resolution of the panel.
1626 		 */
1627 		min_lane_count = max_lane_count;
1628 		min_clock = max_clock;
1629 	}
1630 
1631 	for (; bpp >= 6*3; bpp -= 2*3) {
1632 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1633 						   bpp);
1634 
1635 		for (clock = min_clock; clock <= max_clock; clock++) {
1636 			for (lane_count = min_lane_count;
1637 				lane_count <= max_lane_count;
1638 				lane_count <<= 1) {
1639 
1640 				link_clock = common_rates[clock];
1641 				link_avail = intel_dp_max_data_rate(link_clock,
1642 								    lane_count);
1643 
1644 				if (mode_rate <= link_avail) {
1645 					goto found;
1646 				}
1647 			}
1648 		}
1649 	}
1650 
1651 	return false;
1652 
1653 found:
1654 	if (intel_dp->color_range_auto) {
1655 		/*
1656 		 * See:
1657 		 * CEA-861-E - 5.1 Default Encoding Parameters
1658 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1659 		 */
1660 		pipe_config->limited_color_range =
1661 			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1662 	} else {
1663 		pipe_config->limited_color_range =
1664 			intel_dp->limited_color_range;
1665 	}
1666 
1667 	pipe_config->lane_count = lane_count;
1668 
1669 	pipe_config->pipe_bpp = bpp;
1670 	pipe_config->port_clock = common_rates[clock];
1671 
1672 	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1673 			      &link_bw, &rate_select);
1674 
1675 	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1676 		      link_bw, rate_select, pipe_config->lane_count,
1677 		      pipe_config->port_clock, bpp);
1678 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1679 		      mode_rate, link_avail);
1680 
1681 	intel_link_compute_m_n(bpp, lane_count,
1682 			       adjusted_mode->crtc_clock,
1683 			       pipe_config->port_clock,
1684 			       &pipe_config->dp_m_n);
1685 
1686 	if (intel_connector->panel.downclock_mode != NULL &&
1687 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1688 			pipe_config->has_drrs = true;
1689 			intel_link_compute_m_n(bpp, lane_count,
1690 				intel_connector->panel.downclock_mode->clock,
1691 				pipe_config->port_clock,
1692 				&pipe_config->dp_m2_n2);
1693 	}
1694 
1695 	/*
1696 	 * DPLL0 VCO may need to be adjusted to get the correct
1697 	 * clock for eDP. This will affect cdclk as well.
1698 	 */
1699 	if (is_edp(intel_dp) &&
1700 	    (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1701 		int vco;
1702 
1703 		switch (pipe_config->port_clock / 2) {
1704 		case 108000:
1705 		case 216000:
1706 			vco = 8640000;
1707 			break;
1708 		default:
1709 			vco = 8100000;
1710 			break;
1711 		}
1712 
1713 		to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1714 	}
1715 
1716 	if (!HAS_DDI(dev_priv))
1717 		intel_dp_set_clock(encoder, pipe_config);
1718 
1719 	return true;
1720 }
1721 
1722 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1723 			      int link_rate, uint8_t lane_count,
1724 			      bool link_mst)
1725 {
1726 	intel_dp->link_rate = link_rate;
1727 	intel_dp->lane_count = lane_count;
1728 	intel_dp->link_mst = link_mst;
1729 }
1730 
1731 static void intel_dp_prepare(struct intel_encoder *encoder,
1732 			     struct intel_crtc_state *pipe_config)
1733 {
1734 	struct drm_device *dev = encoder->base.dev;
1735 	struct drm_i915_private *dev_priv = to_i915(dev);
1736 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1737 	enum port port = dp_to_dig_port(intel_dp)->port;
1738 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1739 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1740 
1741 	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1742 				 pipe_config->lane_count,
1743 				 intel_crtc_has_type(pipe_config,
1744 						     INTEL_OUTPUT_DP_MST));
1745 
1746 	/*
1747 	 * There are four kinds of DP registers:
1748 	 *
1749 	 * 	IBX PCH
1750 	 * 	SNB CPU
1751 	 *	IVB CPU
1752 	 * 	CPT PCH
1753 	 *
1754 	 * IBX PCH and CPU are the same for almost everything,
1755 	 * except that the CPU DP PLL is configured in this
1756 	 * register
1757 	 *
1758 	 * CPT PCH is quite different, having many bits moved
1759 	 * to the TRANS_DP_CTL register instead. That
1760 	 * configuration happens (oddly) in ironlake_pch_enable
1761 	 */
1762 
1763 	/* Preserve the BIOS-computed detected bit. This is
1764 	 * supposed to be read-only.
1765 	 */
1766 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1767 
1768 	/* Handle DP bits in common between all three register formats */
1769 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1770 	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1771 
1772 	/* Split out the IBX/CPU vs CPT settings */
1773 
1774 	if (IS_GEN7(dev_priv) && port == PORT_A) {
1775 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1776 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1777 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1778 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1779 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1780 
1781 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1782 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1783 
1784 		intel_dp->DP |= crtc->pipe << 29;
1785 	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1786 		u32 trans_dp;
1787 
1788 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1789 
1790 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1791 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1792 			trans_dp |= TRANS_DP_ENH_FRAMING;
1793 		else
1794 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1795 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1796 	} else {
1797 		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1798 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1799 
1800 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1801 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1802 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1803 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1804 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1805 
1806 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1807 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1808 
1809 		if (IS_CHERRYVIEW(dev_priv))
1810 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1811 		else if (crtc->pipe == PIPE_B)
1812 			intel_dp->DP |= DP_PIPEB_SELECT;
1813 	}
1814 }
1815 
1816 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1817 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1818 
1819 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1820 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1821 
1822 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1823 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1824 
1825 static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1826 				   struct intel_dp *intel_dp);
1827 
1828 static void wait_panel_status(struct intel_dp *intel_dp,
1829 				       u32 mask,
1830 				       u32 value)
1831 {
1832 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1833 	struct drm_i915_private *dev_priv = to_i915(dev);
1834 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1835 
1836 	lockdep_assert_held(&dev_priv->pps_mutex);
1837 
1838 	intel_pps_verify_state(dev_priv, intel_dp);
1839 
1840 	pp_stat_reg = _pp_stat_reg(intel_dp);
1841 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1842 
1843 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1844 			mask, value,
1845 			I915_READ(pp_stat_reg),
1846 			I915_READ(pp_ctrl_reg));
1847 
1848 	if (intel_wait_for_register(dev_priv,
1849 				    pp_stat_reg, mask, value,
1850 				    5000))
1851 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1852 				I915_READ(pp_stat_reg),
1853 				I915_READ(pp_ctrl_reg));
1854 
1855 	DRM_DEBUG_KMS("Wait complete\n");
1856 }
1857 
1858 static void wait_panel_on(struct intel_dp *intel_dp)
1859 {
1860 	DRM_DEBUG_KMS("Wait for panel power on\n");
1861 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1862 }
1863 
1864 static void wait_panel_off(struct intel_dp *intel_dp)
1865 {
1866 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1867 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1868 }
1869 
1870 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1871 {
1872 	ktime_t panel_power_on_time;
1873 	s64 panel_power_off_duration;
1874 
1875 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1876 
1877 	/* take the difference of currrent time and panel power off time
1878 	 * and then make panel wait for t11_t12 if needed. */
1879 	panel_power_on_time = ktime_get_boottime();
1880 	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1881 
1882 	/* When we disable the VDD override bit last we have to do the manual
1883 	 * wait. */
1884 	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1885 		wait_remaining_ms_from_jiffies(jiffies,
1886 				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1887 
1888 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1889 }
1890 
1891 static void wait_backlight_on(struct intel_dp *intel_dp)
1892 {
1893 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1894 				       intel_dp->backlight_on_delay);
1895 }
1896 
1897 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1898 {
1899 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1900 				       intel_dp->backlight_off_delay);
1901 }
1902 
1903 /* Read the current pp_control value, unlocking the register if it
1904  * is locked
1905  */
1906 
1907 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1908 {
1909 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1910 	struct drm_i915_private *dev_priv = to_i915(dev);
1911 	u32 control;
1912 
1913 	lockdep_assert_held(&dev_priv->pps_mutex);
1914 
1915 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1916 	if (WARN_ON(!HAS_DDI(dev_priv) &&
1917 		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
1918 		control &= ~PANEL_UNLOCK_MASK;
1919 		control |= PANEL_UNLOCK_REGS;
1920 	}
1921 	return control;
1922 }
1923 
1924 /*
1925  * Must be paired with edp_panel_vdd_off().
1926  * Must hold pps_mutex around the whole on/off sequence.
1927  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1928  */
1929 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1930 {
1931 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1932 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1933 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1934 	struct drm_i915_private *dev_priv = to_i915(dev);
1935 	enum intel_display_power_domain power_domain;
1936 	u32 pp;
1937 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1938 	bool need_to_disable = !intel_dp->want_panel_vdd;
1939 
1940 	lockdep_assert_held(&dev_priv->pps_mutex);
1941 
1942 	if (!is_edp(intel_dp))
1943 		return false;
1944 
1945 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1946 	intel_dp->want_panel_vdd = true;
1947 
1948 	if (edp_have_panel_vdd(intel_dp))
1949 		return need_to_disable;
1950 
1951 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1952 	intel_display_power_get(dev_priv, power_domain);
1953 
1954 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1955 		      port_name(intel_dig_port->port));
1956 
1957 	if (!edp_have_panel_power(intel_dp))
1958 		wait_panel_power_cycle(intel_dp);
1959 
1960 	pp = ironlake_get_pp_control(intel_dp);
1961 	pp |= EDP_FORCE_VDD;
1962 
1963 	pp_stat_reg = _pp_stat_reg(intel_dp);
1964 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1965 
1966 	I915_WRITE(pp_ctrl_reg, pp);
1967 	POSTING_READ(pp_ctrl_reg);
1968 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1969 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1970 	/*
1971 	 * If the panel wasn't on, delay before accessing aux channel
1972 	 */
1973 	if (!edp_have_panel_power(intel_dp)) {
1974 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1975 			      port_name(intel_dig_port->port));
1976 		msleep(intel_dp->panel_power_up_delay);
1977 	}
1978 
1979 	return need_to_disable;
1980 }
1981 
1982 /*
1983  * Must be paired with intel_edp_panel_vdd_off() or
1984  * intel_edp_panel_off().
1985  * Nested calls to these functions are not allowed since
1986  * we drop the lock. Caller must use some higher level
1987  * locking to prevent nested calls from other threads.
1988  */
1989 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1990 {
1991 	bool vdd;
1992 
1993 	if (!is_edp(intel_dp))
1994 		return;
1995 
1996 	pps_lock(intel_dp);
1997 	vdd = edp_panel_vdd_on(intel_dp);
1998 	pps_unlock(intel_dp);
1999 
2000 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2001 	     port_name(dp_to_dig_port(intel_dp)->port));
2002 }
2003 
2004 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2005 {
2006 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2007 	struct drm_i915_private *dev_priv = to_i915(dev);
2008 	struct intel_digital_port *intel_dig_port =
2009 		dp_to_dig_port(intel_dp);
2010 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2011 	enum intel_display_power_domain power_domain;
2012 	u32 pp;
2013 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2014 
2015 	lockdep_assert_held(&dev_priv->pps_mutex);
2016 
2017 	WARN_ON(intel_dp->want_panel_vdd);
2018 
2019 	if (!edp_have_panel_vdd(intel_dp))
2020 		return;
2021 
2022 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2023 		      port_name(intel_dig_port->port));
2024 
2025 	pp = ironlake_get_pp_control(intel_dp);
2026 	pp &= ~EDP_FORCE_VDD;
2027 
2028 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2029 	pp_stat_reg = _pp_stat_reg(intel_dp);
2030 
2031 	I915_WRITE(pp_ctrl_reg, pp);
2032 	POSTING_READ(pp_ctrl_reg);
2033 
2034 	/* Make sure sequencer is idle before allowing subsequent activity */
2035 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2036 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2037 
2038 	if ((pp & PANEL_POWER_ON) == 0)
2039 		intel_dp->panel_power_off_time = ktime_get_boottime();
2040 
2041 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2042 	intel_display_power_put(dev_priv, power_domain);
2043 }
2044 
2045 static void edp_panel_vdd_work(struct work_struct *__work)
2046 {
2047 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2048 						 struct intel_dp, panel_vdd_work);
2049 
2050 	pps_lock(intel_dp);
2051 	if (!intel_dp->want_panel_vdd)
2052 		edp_panel_vdd_off_sync(intel_dp);
2053 	pps_unlock(intel_dp);
2054 }
2055 
2056 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2057 {
2058 	unsigned long delay;
2059 
2060 	/*
2061 	 * Queue the timer to fire a long time from now (relative to the power
2062 	 * down delay) to keep the panel power up across a sequence of
2063 	 * operations.
2064 	 */
2065 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2066 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2067 }
2068 
2069 /*
2070  * Must be paired with edp_panel_vdd_on().
2071  * Must hold pps_mutex around the whole on/off sequence.
2072  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2073  */
2074 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2075 {
2076 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2077 
2078 	lockdep_assert_held(&dev_priv->pps_mutex);
2079 
2080 	if (!is_edp(intel_dp))
2081 		return;
2082 
2083 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2084 	     port_name(dp_to_dig_port(intel_dp)->port));
2085 
2086 	intel_dp->want_panel_vdd = false;
2087 
2088 	if (sync)
2089 		edp_panel_vdd_off_sync(intel_dp);
2090 	else
2091 		edp_panel_vdd_schedule_off(intel_dp);
2092 }
2093 
2094 static void edp_panel_on(struct intel_dp *intel_dp)
2095 {
2096 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2097 	struct drm_i915_private *dev_priv = to_i915(dev);
2098 	u32 pp;
2099 	i915_reg_t pp_ctrl_reg;
2100 
2101 	lockdep_assert_held(&dev_priv->pps_mutex);
2102 
2103 	if (!is_edp(intel_dp))
2104 		return;
2105 
2106 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2107 		      port_name(dp_to_dig_port(intel_dp)->port));
2108 
2109 	if (WARN(edp_have_panel_power(intel_dp),
2110 		 "eDP port %c panel power already on\n",
2111 		 port_name(dp_to_dig_port(intel_dp)->port)))
2112 		return;
2113 
2114 	wait_panel_power_cycle(intel_dp);
2115 
2116 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2117 	pp = ironlake_get_pp_control(intel_dp);
2118 	if (IS_GEN5(dev_priv)) {
2119 		/* ILK workaround: disable reset around power sequence */
2120 		pp &= ~PANEL_POWER_RESET;
2121 		I915_WRITE(pp_ctrl_reg, pp);
2122 		POSTING_READ(pp_ctrl_reg);
2123 	}
2124 
2125 	pp |= PANEL_POWER_ON;
2126 	if (!IS_GEN5(dev_priv))
2127 		pp |= PANEL_POWER_RESET;
2128 
2129 	I915_WRITE(pp_ctrl_reg, pp);
2130 	POSTING_READ(pp_ctrl_reg);
2131 
2132 	wait_panel_on(intel_dp);
2133 	intel_dp->last_power_on = jiffies;
2134 
2135 	if (IS_GEN5(dev_priv)) {
2136 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2137 		I915_WRITE(pp_ctrl_reg, pp);
2138 		POSTING_READ(pp_ctrl_reg);
2139 	}
2140 }
2141 
2142 void intel_edp_panel_on(struct intel_dp *intel_dp)
2143 {
2144 	if (!is_edp(intel_dp))
2145 		return;
2146 
2147 	pps_lock(intel_dp);
2148 	edp_panel_on(intel_dp);
2149 	pps_unlock(intel_dp);
2150 }
2151 
2152 
2153 static void edp_panel_off(struct intel_dp *intel_dp)
2154 {
2155 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2156 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2157 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2158 	struct drm_i915_private *dev_priv = to_i915(dev);
2159 	enum intel_display_power_domain power_domain;
2160 	u32 pp;
2161 	i915_reg_t pp_ctrl_reg;
2162 
2163 	lockdep_assert_held(&dev_priv->pps_mutex);
2164 
2165 	if (!is_edp(intel_dp))
2166 		return;
2167 
2168 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2169 		      port_name(dp_to_dig_port(intel_dp)->port));
2170 
2171 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2172 	     port_name(dp_to_dig_port(intel_dp)->port));
2173 
2174 	pp = ironlake_get_pp_control(intel_dp);
2175 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2176 	 * panels get very unhappy and cease to work. */
2177 	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2178 		EDP_BLC_ENABLE);
2179 
2180 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2181 
2182 	intel_dp->want_panel_vdd = false;
2183 
2184 	I915_WRITE(pp_ctrl_reg, pp);
2185 	POSTING_READ(pp_ctrl_reg);
2186 
2187 	intel_dp->panel_power_off_time = ktime_get_boottime();
2188 	wait_panel_off(intel_dp);
2189 
2190 	/* We got a reference when we enabled the VDD. */
2191 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2192 	intel_display_power_put(dev_priv, power_domain);
2193 }
2194 
2195 void intel_edp_panel_off(struct intel_dp *intel_dp)
2196 {
2197 	if (!is_edp(intel_dp))
2198 		return;
2199 
2200 	pps_lock(intel_dp);
2201 	edp_panel_off(intel_dp);
2202 	pps_unlock(intel_dp);
2203 }
2204 
2205 /* Enable backlight in the panel power control. */
2206 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2207 {
2208 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2209 	struct drm_device *dev = intel_dig_port->base.base.dev;
2210 	struct drm_i915_private *dev_priv = to_i915(dev);
2211 	u32 pp;
2212 	i915_reg_t pp_ctrl_reg;
2213 
2214 	/*
2215 	 * If we enable the backlight right away following a panel power
2216 	 * on, we may see slight flicker as the panel syncs with the eDP
2217 	 * link.  So delay a bit to make sure the image is solid before
2218 	 * allowing it to appear.
2219 	 */
2220 	wait_backlight_on(intel_dp);
2221 
2222 	pps_lock(intel_dp);
2223 
2224 	pp = ironlake_get_pp_control(intel_dp);
2225 	pp |= EDP_BLC_ENABLE;
2226 
2227 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2228 
2229 	I915_WRITE(pp_ctrl_reg, pp);
2230 	POSTING_READ(pp_ctrl_reg);
2231 
2232 	pps_unlock(intel_dp);
2233 }
2234 
2235 /* Enable backlight PWM and backlight PP control. */
2236 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2237 {
2238 	if (!is_edp(intel_dp))
2239 		return;
2240 
2241 	DRM_DEBUG_KMS("\n");
2242 
2243 	intel_panel_enable_backlight(intel_dp->attached_connector);
2244 	_intel_edp_backlight_on(intel_dp);
2245 }
2246 
2247 /* Disable backlight in the panel power control. */
2248 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2249 {
2250 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2251 	struct drm_i915_private *dev_priv = to_i915(dev);
2252 	u32 pp;
2253 	i915_reg_t pp_ctrl_reg;
2254 
2255 	if (!is_edp(intel_dp))
2256 		return;
2257 
2258 	pps_lock(intel_dp);
2259 
2260 	pp = ironlake_get_pp_control(intel_dp);
2261 	pp &= ~EDP_BLC_ENABLE;
2262 
2263 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2264 
2265 	I915_WRITE(pp_ctrl_reg, pp);
2266 	POSTING_READ(pp_ctrl_reg);
2267 
2268 	pps_unlock(intel_dp);
2269 
2270 	intel_dp->last_backlight_off = jiffies;
2271 	edp_wait_backlight_off(intel_dp);
2272 }
2273 
2274 /* Disable backlight PP control and backlight PWM. */
2275 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2276 {
2277 	if (!is_edp(intel_dp))
2278 		return;
2279 
2280 	DRM_DEBUG_KMS("\n");
2281 
2282 	_intel_edp_backlight_off(intel_dp);
2283 	intel_panel_disable_backlight(intel_dp->attached_connector);
2284 }
2285 
2286 /*
2287  * Hook for controlling the panel power control backlight through the bl_power
2288  * sysfs attribute. Take care to handle multiple calls.
2289  */
2290 static void intel_edp_backlight_power(struct intel_connector *connector,
2291 				      bool enable)
2292 {
2293 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2294 	bool is_enabled;
2295 
2296 	pps_lock(intel_dp);
2297 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2298 	pps_unlock(intel_dp);
2299 
2300 	if (is_enabled == enable)
2301 		return;
2302 
2303 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2304 		      enable ? "enable" : "disable");
2305 
2306 	if (enable)
2307 		_intel_edp_backlight_on(intel_dp);
2308 	else
2309 		_intel_edp_backlight_off(intel_dp);
2310 }
2311 
2312 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2313 {
2314 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2315 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2316 	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2317 
2318 	I915_STATE_WARN(cur_state != state,
2319 			"DP port %c state assertion failure (expected %s, current %s)\n",
2320 			port_name(dig_port->port),
2321 			onoff(state), onoff(cur_state));
2322 }
2323 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2324 
2325 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2326 {
2327 	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2328 
2329 	I915_STATE_WARN(cur_state != state,
2330 			"eDP PLL state assertion failure (expected %s, current %s)\n",
2331 			onoff(state), onoff(cur_state));
2332 }
2333 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2334 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2335 
2336 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2337 				struct intel_crtc_state *pipe_config)
2338 {
2339 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2340 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2341 
2342 	assert_pipe_disabled(dev_priv, crtc->pipe);
2343 	assert_dp_port_disabled(intel_dp);
2344 	assert_edp_pll_disabled(dev_priv);
2345 
2346 	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2347 		      pipe_config->port_clock);
2348 
2349 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2350 
2351 	if (pipe_config->port_clock == 162000)
2352 		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2353 	else
2354 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2355 
2356 	I915_WRITE(DP_A, intel_dp->DP);
2357 	POSTING_READ(DP_A);
2358 	udelay(500);
2359 
2360 	/*
2361 	 * [DevILK] Work around required when enabling DP PLL
2362 	 * while a pipe is enabled going to FDI:
2363 	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2364 	 * 2. Program DP PLL enable
2365 	 */
2366 	if (IS_GEN5(dev_priv))
2367 		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2368 
2369 	intel_dp->DP |= DP_PLL_ENABLE;
2370 
2371 	I915_WRITE(DP_A, intel_dp->DP);
2372 	POSTING_READ(DP_A);
2373 	udelay(200);
2374 }
2375 
2376 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2377 {
2378 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2379 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2380 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2381 
2382 	assert_pipe_disabled(dev_priv, crtc->pipe);
2383 	assert_dp_port_disabled(intel_dp);
2384 	assert_edp_pll_enabled(dev_priv);
2385 
2386 	DRM_DEBUG_KMS("disabling eDP PLL\n");
2387 
2388 	intel_dp->DP &= ~DP_PLL_ENABLE;
2389 
2390 	I915_WRITE(DP_A, intel_dp->DP);
2391 	POSTING_READ(DP_A);
2392 	udelay(200);
2393 }
2394 
2395 /* If the sink supports it, try to set the power state appropriately */
2396 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2397 {
2398 	int ret, i;
2399 
2400 	/* Should have a valid DPCD by this point */
2401 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2402 		return;
2403 
2404 	if (mode != DRM_MODE_DPMS_ON) {
2405 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2406 					 DP_SET_POWER_D3);
2407 	} else {
2408 		/*
2409 		 * When turning on, we need to retry for 1ms to give the sink
2410 		 * time to wake up.
2411 		 */
2412 		for (i = 0; i < 3; i++) {
2413 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2414 						 DP_SET_POWER_D0);
2415 			if (ret == 1)
2416 				break;
2417 			msleep(1);
2418 		}
2419 	}
2420 
2421 	if (ret != 1)
2422 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2423 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2424 }
2425 
2426 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2427 				  enum i915_pipe *pipe)
2428 {
2429 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2430 	enum port port = dp_to_dig_port(intel_dp)->port;
2431 	struct drm_device *dev = encoder->base.dev;
2432 	struct drm_i915_private *dev_priv = to_i915(dev);
2433 	enum intel_display_power_domain power_domain;
2434 	u32 tmp;
2435 	bool ret;
2436 
2437 	power_domain = intel_display_port_power_domain(encoder);
2438 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2439 		return false;
2440 
2441 	ret = false;
2442 
2443 	tmp = I915_READ(intel_dp->output_reg);
2444 
2445 	if (!(tmp & DP_PORT_EN))
2446 		goto out;
2447 
2448 	if (IS_GEN7(dev_priv) && port == PORT_A) {
2449 		*pipe = PORT_TO_PIPE_CPT(tmp);
2450 	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2451 		enum i915_pipe p;
2452 
2453 		for_each_pipe(dev_priv, p) {
2454 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2455 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2456 				*pipe = p;
2457 				ret = true;
2458 
2459 				goto out;
2460 			}
2461 		}
2462 
2463 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2464 			      i915_mmio_reg_offset(intel_dp->output_reg));
2465 	} else if (IS_CHERRYVIEW(dev_priv)) {
2466 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2467 	} else {
2468 		*pipe = PORT_TO_PIPE(tmp);
2469 	}
2470 
2471 	ret = true;
2472 
2473 out:
2474 	intel_display_power_put(dev_priv, power_domain);
2475 
2476 	return ret;
2477 }
2478 
2479 static void intel_dp_get_config(struct intel_encoder *encoder,
2480 				struct intel_crtc_state *pipe_config)
2481 {
2482 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2483 	u32 tmp, flags = 0;
2484 	struct drm_device *dev = encoder->base.dev;
2485 	struct drm_i915_private *dev_priv = to_i915(dev);
2486 	enum port port = dp_to_dig_port(intel_dp)->port;
2487 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2488 
2489 	tmp = I915_READ(intel_dp->output_reg);
2490 
2491 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2492 
2493 	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2494 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2495 
2496 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2497 			flags |= DRM_MODE_FLAG_PHSYNC;
2498 		else
2499 			flags |= DRM_MODE_FLAG_NHSYNC;
2500 
2501 		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2502 			flags |= DRM_MODE_FLAG_PVSYNC;
2503 		else
2504 			flags |= DRM_MODE_FLAG_NVSYNC;
2505 	} else {
2506 		if (tmp & DP_SYNC_HS_HIGH)
2507 			flags |= DRM_MODE_FLAG_PHSYNC;
2508 		else
2509 			flags |= DRM_MODE_FLAG_NHSYNC;
2510 
2511 		if (tmp & DP_SYNC_VS_HIGH)
2512 			flags |= DRM_MODE_FLAG_PVSYNC;
2513 		else
2514 			flags |= DRM_MODE_FLAG_NVSYNC;
2515 	}
2516 
2517 	pipe_config->base.adjusted_mode.flags |= flags;
2518 
2519 	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2520 		pipe_config->limited_color_range = true;
2521 
2522 	pipe_config->lane_count =
2523 		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2524 
2525 	intel_dp_get_m_n(crtc, pipe_config);
2526 
2527 	if (port == PORT_A) {
2528 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2529 			pipe_config->port_clock = 162000;
2530 		else
2531 			pipe_config->port_clock = 270000;
2532 	}
2533 
2534 	pipe_config->base.adjusted_mode.crtc_clock =
2535 		intel_dotclock_calculate(pipe_config->port_clock,
2536 					 &pipe_config->dp_m_n);
2537 
2538 	if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2539 	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2540 		/*
2541 		 * This is a big fat ugly hack.
2542 		 *
2543 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2544 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2545 		 * unknown we fail to light up. Yet the same BIOS boots up with
2546 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2547 		 * max, not what it tells us to use.
2548 		 *
2549 		 * Note: This will still be broken if the eDP panel is not lit
2550 		 * up by the BIOS, and thus we can't get the mode at module
2551 		 * load.
2552 		 */
2553 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2554 			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2555 		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2556 	}
2557 }
2558 
2559 static void intel_disable_dp(struct intel_encoder *encoder,
2560 			     struct intel_crtc_state *old_crtc_state,
2561 			     struct drm_connector_state *old_conn_state)
2562 {
2563 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2564 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2565 
2566 	if (old_crtc_state->has_audio)
2567 		intel_audio_codec_disable(encoder);
2568 
2569 	if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2570 		intel_psr_disable(intel_dp);
2571 
2572 	/* Make sure the panel is off before trying to change the mode. But also
2573 	 * ensure that we have vdd while we switch off the panel. */
2574 	intel_edp_panel_vdd_on(intel_dp);
2575 	intel_edp_backlight_off(intel_dp);
2576 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2577 	intel_edp_panel_off(intel_dp);
2578 
2579 	/* disable the port before the pipe on g4x */
2580 	if (INTEL_GEN(dev_priv) < 5)
2581 		intel_dp_link_down(intel_dp);
2582 }
2583 
2584 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2585 				struct intel_crtc_state *old_crtc_state,
2586 				struct drm_connector_state *old_conn_state)
2587 {
2588 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2589 	enum port port = dp_to_dig_port(intel_dp)->port;
2590 
2591 	intel_dp_link_down(intel_dp);
2592 
2593 	/* Only ilk+ has port A */
2594 	if (port == PORT_A)
2595 		ironlake_edp_pll_off(intel_dp);
2596 }
2597 
2598 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2599 				struct intel_crtc_state *old_crtc_state,
2600 				struct drm_connector_state *old_conn_state)
2601 {
2602 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2603 
2604 	intel_dp_link_down(intel_dp);
2605 }
2606 
2607 static void chv_post_disable_dp(struct intel_encoder *encoder,
2608 				struct intel_crtc_state *old_crtc_state,
2609 				struct drm_connector_state *old_conn_state)
2610 {
2611 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2612 	struct drm_device *dev = encoder->base.dev;
2613 	struct drm_i915_private *dev_priv = to_i915(dev);
2614 
2615 	intel_dp_link_down(intel_dp);
2616 
2617 	mutex_lock(&dev_priv->sb_lock);
2618 
2619 	/* Assert data lane reset */
2620 	chv_data_lane_soft_reset(encoder, true);
2621 
2622 	mutex_unlock(&dev_priv->sb_lock);
2623 }
2624 
2625 static void
2626 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2627 			 uint32_t *DP,
2628 			 uint8_t dp_train_pat)
2629 {
2630 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2631 	struct drm_device *dev = intel_dig_port->base.base.dev;
2632 	struct drm_i915_private *dev_priv = to_i915(dev);
2633 	enum port port = intel_dig_port->port;
2634 
2635 	if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2636 		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2637 			      dp_train_pat & DP_TRAINING_PATTERN_MASK);
2638 
2639 	if (HAS_DDI(dev_priv)) {
2640 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2641 
2642 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2643 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2644 		else
2645 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2646 
2647 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2648 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2649 		case DP_TRAINING_PATTERN_DISABLE:
2650 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2651 
2652 			break;
2653 		case DP_TRAINING_PATTERN_1:
2654 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2655 			break;
2656 		case DP_TRAINING_PATTERN_2:
2657 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2658 			break;
2659 		case DP_TRAINING_PATTERN_3:
2660 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2661 			break;
2662 		}
2663 		I915_WRITE(DP_TP_CTL(port), temp);
2664 
2665 	} else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2666 		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2667 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2668 
2669 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2670 		case DP_TRAINING_PATTERN_DISABLE:
2671 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2672 			break;
2673 		case DP_TRAINING_PATTERN_1:
2674 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2675 			break;
2676 		case DP_TRAINING_PATTERN_2:
2677 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2678 			break;
2679 		case DP_TRAINING_PATTERN_3:
2680 			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2681 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2682 			break;
2683 		}
2684 
2685 	} else {
2686 		if (IS_CHERRYVIEW(dev_priv))
2687 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2688 		else
2689 			*DP &= ~DP_LINK_TRAIN_MASK;
2690 
2691 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2692 		case DP_TRAINING_PATTERN_DISABLE:
2693 			*DP |= DP_LINK_TRAIN_OFF;
2694 			break;
2695 		case DP_TRAINING_PATTERN_1:
2696 			*DP |= DP_LINK_TRAIN_PAT_1;
2697 			break;
2698 		case DP_TRAINING_PATTERN_2:
2699 			*DP |= DP_LINK_TRAIN_PAT_2;
2700 			break;
2701 		case DP_TRAINING_PATTERN_3:
2702 			if (IS_CHERRYVIEW(dev_priv)) {
2703 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2704 			} else {
2705 				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2706 				*DP |= DP_LINK_TRAIN_PAT_2;
2707 			}
2708 			break;
2709 		}
2710 	}
2711 }
2712 
2713 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2714 				 struct intel_crtc_state *old_crtc_state)
2715 {
2716 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2717 	struct drm_i915_private *dev_priv = to_i915(dev);
2718 
2719 	/* enable with pattern 1 (as per spec) */
2720 
2721 	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2722 
2723 	/*
2724 	 * Magic for VLV/CHV. We _must_ first set up the register
2725 	 * without actually enabling the port, and then do another
2726 	 * write to enable the port. Otherwise link training will
2727 	 * fail when the power sequencer is freshly used for this port.
2728 	 */
2729 	intel_dp->DP |= DP_PORT_EN;
2730 	if (old_crtc_state->has_audio)
2731 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2732 
2733 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2734 	POSTING_READ(intel_dp->output_reg);
2735 }
2736 
2737 static void intel_enable_dp(struct intel_encoder *encoder,
2738 			    struct intel_crtc_state *pipe_config,
2739 			    struct drm_connector_state *conn_state)
2740 {
2741 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2742 	struct drm_device *dev = encoder->base.dev;
2743 	struct drm_i915_private *dev_priv = to_i915(dev);
2744 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2745 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2746 	enum i915_pipe pipe = crtc->pipe;
2747 
2748 	if (WARN_ON(dp_reg & DP_PORT_EN))
2749 		return;
2750 
2751 	pps_lock(intel_dp);
2752 
2753 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2754 		vlv_init_panel_power_sequencer(intel_dp);
2755 
2756 	intel_dp_enable_port(intel_dp, pipe_config);
2757 
2758 	edp_panel_vdd_on(intel_dp);
2759 	edp_panel_on(intel_dp);
2760 	edp_panel_vdd_off(intel_dp, true);
2761 
2762 	pps_unlock(intel_dp);
2763 
2764 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2765 		unsigned int lane_mask = 0x0;
2766 
2767 		if (IS_CHERRYVIEW(dev_priv))
2768 			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2769 
2770 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2771 				    lane_mask);
2772 	}
2773 
2774 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2775 	intel_dp_start_link_train(intel_dp);
2776 	intel_dp_stop_link_train(intel_dp);
2777 
2778 	if (pipe_config->has_audio) {
2779 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2780 				 pipe_name(pipe));
2781 		intel_audio_codec_enable(encoder, pipe_config, conn_state);
2782 	}
2783 }
2784 
2785 static void g4x_enable_dp(struct intel_encoder *encoder,
2786 			  struct intel_crtc_state *pipe_config,
2787 			  struct drm_connector_state *conn_state)
2788 {
2789 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2790 
2791 	intel_enable_dp(encoder, pipe_config, conn_state);
2792 	intel_edp_backlight_on(intel_dp);
2793 }
2794 
2795 static void vlv_enable_dp(struct intel_encoder *encoder,
2796 			  struct intel_crtc_state *pipe_config,
2797 			  struct drm_connector_state *conn_state)
2798 {
2799 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2800 
2801 	intel_edp_backlight_on(intel_dp);
2802 	intel_psr_enable(intel_dp);
2803 }
2804 
2805 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2806 			      struct intel_crtc_state *pipe_config,
2807 			      struct drm_connector_state *conn_state)
2808 {
2809 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2810 	enum port port = dp_to_dig_port(intel_dp)->port;
2811 
2812 	intel_dp_prepare(encoder, pipe_config);
2813 
2814 	/* Only ilk+ has port A */
2815 	if (port == PORT_A)
2816 		ironlake_edp_pll_on(intel_dp, pipe_config);
2817 }
2818 
2819 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2820 {
2821 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2822 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2823 	enum i915_pipe pipe = intel_dp->pps_pipe;
2824 	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2825 
2826 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2827 		return;
2828 
2829 	edp_panel_vdd_off_sync(intel_dp);
2830 
2831 	/*
2832 	 * VLV seems to get confused when multiple power seqeuencers
2833 	 * have the same port selected (even if only one has power/vdd
2834 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2835 	 * CHV on the other hand doesn't seem to mind having the same port
2836 	 * selected in multiple power seqeuencers, but let's clear the
2837 	 * port select always when logically disconnecting a power sequencer
2838 	 * from a port.
2839 	 */
2840 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2841 		      pipe_name(pipe), port_name(intel_dig_port->port));
2842 	I915_WRITE(pp_on_reg, 0);
2843 	POSTING_READ(pp_on_reg);
2844 
2845 	intel_dp->pps_pipe = INVALID_PIPE;
2846 }
2847 
2848 static void vlv_steal_power_sequencer(struct drm_device *dev,
2849 				      enum i915_pipe pipe)
2850 {
2851 	struct drm_i915_private *dev_priv = to_i915(dev);
2852 	struct intel_encoder *encoder;
2853 
2854 	lockdep_assert_held(&dev_priv->pps_mutex);
2855 
2856 	for_each_intel_encoder(dev, encoder) {
2857 		struct intel_dp *intel_dp;
2858 		enum port port;
2859 
2860 		if (encoder->type != INTEL_OUTPUT_EDP)
2861 			continue;
2862 
2863 		intel_dp = enc_to_intel_dp(&encoder->base);
2864 		port = dp_to_dig_port(intel_dp)->port;
2865 
2866 		if (intel_dp->pps_pipe != pipe)
2867 			continue;
2868 
2869 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2870 			      pipe_name(pipe), port_name(port));
2871 
2872 		WARN(encoder->base.crtc,
2873 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2874 		     pipe_name(pipe), port_name(port));
2875 
2876 		/* make sure vdd is off before we steal it */
2877 		vlv_detach_power_sequencer(intel_dp);
2878 	}
2879 }
2880 
2881 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2882 {
2883 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2884 	struct intel_encoder *encoder = &intel_dig_port->base;
2885 	struct drm_device *dev = encoder->base.dev;
2886 	struct drm_i915_private *dev_priv = to_i915(dev);
2887 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2888 
2889 	lockdep_assert_held(&dev_priv->pps_mutex);
2890 
2891 	if (!is_edp(intel_dp))
2892 		return;
2893 
2894 	if (intel_dp->pps_pipe == crtc->pipe)
2895 		return;
2896 
2897 	/*
2898 	 * If another power sequencer was being used on this
2899 	 * port previously make sure to turn off vdd there while
2900 	 * we still have control of it.
2901 	 */
2902 	if (intel_dp->pps_pipe != INVALID_PIPE)
2903 		vlv_detach_power_sequencer(intel_dp);
2904 
2905 	/*
2906 	 * We may be stealing the power
2907 	 * sequencer from another port.
2908 	 */
2909 	vlv_steal_power_sequencer(dev, crtc->pipe);
2910 
2911 	/* now it's all ours */
2912 	intel_dp->pps_pipe = crtc->pipe;
2913 
2914 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2915 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2916 
2917 	/* init power sequencer on this pipe and port */
2918 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2919 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
2920 }
2921 
2922 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
2923 			      struct intel_crtc_state *pipe_config,
2924 			      struct drm_connector_state *conn_state)
2925 {
2926 	vlv_phy_pre_encoder_enable(encoder);
2927 
2928 	intel_enable_dp(encoder, pipe_config, conn_state);
2929 }
2930 
2931 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
2932 				  struct intel_crtc_state *pipe_config,
2933 				  struct drm_connector_state *conn_state)
2934 {
2935 	intel_dp_prepare(encoder, pipe_config);
2936 
2937 	vlv_phy_pre_pll_enable(encoder);
2938 }
2939 
2940 static void chv_pre_enable_dp(struct intel_encoder *encoder,
2941 			      struct intel_crtc_state *pipe_config,
2942 			      struct drm_connector_state *conn_state)
2943 {
2944 	chv_phy_pre_encoder_enable(encoder);
2945 
2946 	intel_enable_dp(encoder, pipe_config, conn_state);
2947 
2948 	/* Second common lane will stay alive on its own now */
2949 	chv_phy_release_cl2_override(encoder);
2950 }
2951 
2952 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
2953 				  struct intel_crtc_state *pipe_config,
2954 				  struct drm_connector_state *conn_state)
2955 {
2956 	intel_dp_prepare(encoder, pipe_config);
2957 
2958 	chv_phy_pre_pll_enable(encoder);
2959 }
2960 
2961 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
2962 				    struct intel_crtc_state *pipe_config,
2963 				    struct drm_connector_state *conn_state)
2964 {
2965 	chv_phy_post_pll_disable(encoder);
2966 }
2967 
2968 /*
2969  * Fetch AUX CH registers 0x202 - 0x207 which contain
2970  * link status information
2971  */
2972 bool
2973 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2974 {
2975 	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
2976 				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2977 }
2978 
2979 /* These are source-specific values. */
2980 uint8_t
2981 intel_dp_voltage_max(struct intel_dp *intel_dp)
2982 {
2983 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2984 	enum port port = dp_to_dig_port(intel_dp)->port;
2985 
2986 	if (IS_BROXTON(dev_priv))
2987 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2988 	else if (INTEL_GEN(dev_priv) >= 9) {
2989 		if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
2990 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2991 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2992 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2993 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2994 	else if (IS_GEN7(dev_priv) && port == PORT_A)
2995 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2996 	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
2997 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2998 	else
2999 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3000 }
3001 
3002 uint8_t
3003 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3004 {
3005 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3006 	enum port port = dp_to_dig_port(intel_dp)->port;
3007 
3008 	if (INTEL_GEN(dev_priv) >= 9) {
3009 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3010 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3011 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3012 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3013 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3014 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3015 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3016 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3017 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3018 		default:
3019 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3020 		}
3021 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3022 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3023 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3024 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3025 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3026 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3027 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3028 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3029 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3030 		default:
3031 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3032 		}
3033 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3034 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3035 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3036 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3037 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3038 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3039 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3040 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3041 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3042 		default:
3043 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3044 		}
3045 	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3046 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3047 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3048 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3049 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3050 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3051 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3052 		default:
3053 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3054 		}
3055 	} else {
3056 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3057 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3059 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3060 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3061 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3062 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3063 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3064 		default:
3065 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3066 		}
3067 	}
3068 }
3069 
3070 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3071 {
3072 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3073 	unsigned long demph_reg_value, preemph_reg_value,
3074 		uniqtranscale_reg_value;
3075 	uint8_t train_set = intel_dp->train_set[0];
3076 
3077 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3078 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3079 		preemph_reg_value = 0x0004000;
3080 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3081 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3082 			demph_reg_value = 0x2B405555;
3083 			uniqtranscale_reg_value = 0x552AB83A;
3084 			break;
3085 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3086 			demph_reg_value = 0x2B404040;
3087 			uniqtranscale_reg_value = 0x5548B83A;
3088 			break;
3089 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3090 			demph_reg_value = 0x2B245555;
3091 			uniqtranscale_reg_value = 0x5560B83A;
3092 			break;
3093 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3094 			demph_reg_value = 0x2B405555;
3095 			uniqtranscale_reg_value = 0x5598DA3A;
3096 			break;
3097 		default:
3098 			return 0;
3099 		}
3100 		break;
3101 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3102 		preemph_reg_value = 0x0002000;
3103 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3104 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3105 			demph_reg_value = 0x2B404040;
3106 			uniqtranscale_reg_value = 0x5552B83A;
3107 			break;
3108 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3109 			demph_reg_value = 0x2B404848;
3110 			uniqtranscale_reg_value = 0x5580B83A;
3111 			break;
3112 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3113 			demph_reg_value = 0x2B404040;
3114 			uniqtranscale_reg_value = 0x55ADDA3A;
3115 			break;
3116 		default:
3117 			return 0;
3118 		}
3119 		break;
3120 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3121 		preemph_reg_value = 0x0000000;
3122 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3123 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3124 			demph_reg_value = 0x2B305555;
3125 			uniqtranscale_reg_value = 0x5570B83A;
3126 			break;
3127 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3128 			demph_reg_value = 0x2B2B4040;
3129 			uniqtranscale_reg_value = 0x55ADDA3A;
3130 			break;
3131 		default:
3132 			return 0;
3133 		}
3134 		break;
3135 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3136 		preemph_reg_value = 0x0006000;
3137 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3138 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3139 			demph_reg_value = 0x1B405555;
3140 			uniqtranscale_reg_value = 0x55ADDA3A;
3141 			break;
3142 		default:
3143 			return 0;
3144 		}
3145 		break;
3146 	default:
3147 		return 0;
3148 	}
3149 
3150 	vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3151 				 uniqtranscale_reg_value, 0);
3152 
3153 	return 0;
3154 }
3155 
3156 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3157 {
3158 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3159 	u32 deemph_reg_value, margin_reg_value;
3160 	bool uniq_trans_scale = false;
3161 	uint8_t train_set = intel_dp->train_set[0];
3162 
3163 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3164 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3165 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3166 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3167 			deemph_reg_value = 128;
3168 			margin_reg_value = 52;
3169 			break;
3170 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3171 			deemph_reg_value = 128;
3172 			margin_reg_value = 77;
3173 			break;
3174 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3175 			deemph_reg_value = 128;
3176 			margin_reg_value = 102;
3177 			break;
3178 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3179 			deemph_reg_value = 128;
3180 			margin_reg_value = 154;
3181 			uniq_trans_scale = true;
3182 			break;
3183 		default:
3184 			return 0;
3185 		}
3186 		break;
3187 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3188 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3189 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3190 			deemph_reg_value = 85;
3191 			margin_reg_value = 78;
3192 			break;
3193 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3194 			deemph_reg_value = 85;
3195 			margin_reg_value = 116;
3196 			break;
3197 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3198 			deemph_reg_value = 85;
3199 			margin_reg_value = 154;
3200 			break;
3201 		default:
3202 			return 0;
3203 		}
3204 		break;
3205 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3206 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3207 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3208 			deemph_reg_value = 64;
3209 			margin_reg_value = 104;
3210 			break;
3211 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3212 			deemph_reg_value = 64;
3213 			margin_reg_value = 154;
3214 			break;
3215 		default:
3216 			return 0;
3217 		}
3218 		break;
3219 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3220 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3221 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3222 			deemph_reg_value = 43;
3223 			margin_reg_value = 154;
3224 			break;
3225 		default:
3226 			return 0;
3227 		}
3228 		break;
3229 	default:
3230 		return 0;
3231 	}
3232 
3233 	chv_set_phy_signal_level(encoder, deemph_reg_value,
3234 				 margin_reg_value, uniq_trans_scale);
3235 
3236 	return 0;
3237 }
3238 
3239 static uint32_t
3240 gen4_signal_levels(uint8_t train_set)
3241 {
3242 	uint32_t	signal_levels = 0;
3243 
3244 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3245 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3246 	default:
3247 		signal_levels |= DP_VOLTAGE_0_4;
3248 		break;
3249 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3250 		signal_levels |= DP_VOLTAGE_0_6;
3251 		break;
3252 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3253 		signal_levels |= DP_VOLTAGE_0_8;
3254 		break;
3255 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3256 		signal_levels |= DP_VOLTAGE_1_2;
3257 		break;
3258 	}
3259 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3260 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3261 	default:
3262 		signal_levels |= DP_PRE_EMPHASIS_0;
3263 		break;
3264 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3265 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3266 		break;
3267 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3268 		signal_levels |= DP_PRE_EMPHASIS_6;
3269 		break;
3270 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3271 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3272 		break;
3273 	}
3274 	return signal_levels;
3275 }
3276 
3277 /* Gen6's DP voltage swing and pre-emphasis control */
3278 static uint32_t
3279 gen6_edp_signal_levels(uint8_t train_set)
3280 {
3281 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3282 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3283 	switch (signal_levels) {
3284 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3285 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3286 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3287 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3288 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3289 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3290 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3291 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3292 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3293 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3294 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3295 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3296 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3297 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3298 	default:
3299 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3300 			      "0x%x\n", signal_levels);
3301 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3302 	}
3303 }
3304 
3305 /* Gen7's DP voltage swing and pre-emphasis control */
3306 static uint32_t
3307 gen7_edp_signal_levels(uint8_t train_set)
3308 {
3309 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3310 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3311 	switch (signal_levels) {
3312 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3313 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3314 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3315 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3316 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3317 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3318 
3319 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3320 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3321 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3322 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3323 
3324 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3325 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3326 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3327 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3328 
3329 	default:
3330 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3331 			      "0x%x\n", signal_levels);
3332 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3333 	}
3334 }
3335 
3336 void
3337 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3338 {
3339 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3340 	enum port port = intel_dig_port->port;
3341 	struct drm_device *dev = intel_dig_port->base.base.dev;
3342 	struct drm_i915_private *dev_priv = to_i915(dev);
3343 	uint32_t signal_levels, mask = 0;
3344 	uint8_t train_set = intel_dp->train_set[0];
3345 
3346 	if (HAS_DDI(dev_priv)) {
3347 		signal_levels = ddi_signal_levels(intel_dp);
3348 
3349 		if (IS_BROXTON(dev_priv))
3350 			signal_levels = 0;
3351 		else
3352 			mask = DDI_BUF_EMP_MASK;
3353 	} else if (IS_CHERRYVIEW(dev_priv)) {
3354 		signal_levels = chv_signal_levels(intel_dp);
3355 	} else if (IS_VALLEYVIEW(dev_priv)) {
3356 		signal_levels = vlv_signal_levels(intel_dp);
3357 	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3358 		signal_levels = gen7_edp_signal_levels(train_set);
3359 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3360 	} else if (IS_GEN6(dev_priv) && port == PORT_A) {
3361 		signal_levels = gen6_edp_signal_levels(train_set);
3362 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3363 	} else {
3364 		signal_levels = gen4_signal_levels(train_set);
3365 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3366 	}
3367 
3368 	if (mask)
3369 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3370 
3371 	DRM_DEBUG_KMS("Using vswing level %d\n",
3372 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3373 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3374 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3375 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3376 
3377 	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3378 
3379 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3380 	POSTING_READ(intel_dp->output_reg);
3381 }
3382 
3383 void
3384 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3385 				       uint8_t dp_train_pat)
3386 {
3387 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3388 	struct drm_i915_private *dev_priv =
3389 		to_i915(intel_dig_port->base.base.dev);
3390 
3391 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3392 
3393 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3394 	POSTING_READ(intel_dp->output_reg);
3395 }
3396 
3397 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3398 {
3399 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3400 	struct drm_device *dev = intel_dig_port->base.base.dev;
3401 	struct drm_i915_private *dev_priv = to_i915(dev);
3402 	enum port port = intel_dig_port->port;
3403 	uint32_t val;
3404 
3405 	if (!HAS_DDI(dev_priv))
3406 		return;
3407 
3408 	val = I915_READ(DP_TP_CTL(port));
3409 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3410 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3411 	I915_WRITE(DP_TP_CTL(port), val);
3412 
3413 	/*
3414 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3415 	 * we need to set idle transmission mode is to work around a HW issue
3416 	 * where we enable the pipe while not in idle link-training mode.
3417 	 * In this case there is requirement to wait for a minimum number of
3418 	 * idle patterns to be sent.
3419 	 */
3420 	if (port == PORT_A)
3421 		return;
3422 
3423 	if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3424 				    DP_TP_STATUS_IDLE_DONE,
3425 				    DP_TP_STATUS_IDLE_DONE,
3426 				    1))
3427 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3428 }
3429 
3430 static void
3431 intel_dp_link_down(struct intel_dp *intel_dp)
3432 {
3433 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3434 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3435 	enum port port = intel_dig_port->port;
3436 	struct drm_device *dev = intel_dig_port->base.base.dev;
3437 	struct drm_i915_private *dev_priv = to_i915(dev);
3438 	uint32_t DP = intel_dp->DP;
3439 
3440 	if (WARN_ON(HAS_DDI(dev_priv)))
3441 		return;
3442 
3443 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3444 		return;
3445 
3446 	DRM_DEBUG_KMS("\n");
3447 
3448 	if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3449 	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3450 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3451 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3452 	} else {
3453 		if (IS_CHERRYVIEW(dev_priv))
3454 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3455 		else
3456 			DP &= ~DP_LINK_TRAIN_MASK;
3457 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3458 	}
3459 	I915_WRITE(intel_dp->output_reg, DP);
3460 	POSTING_READ(intel_dp->output_reg);
3461 
3462 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3463 	I915_WRITE(intel_dp->output_reg, DP);
3464 	POSTING_READ(intel_dp->output_reg);
3465 
3466 	/*
3467 	 * HW workaround for IBX, we need to move the port
3468 	 * to transcoder A after disabling it to allow the
3469 	 * matching HDMI port to be enabled on transcoder A.
3470 	 */
3471 	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3472 		/*
3473 		 * We get CPU/PCH FIFO underruns on the other pipe when
3474 		 * doing the workaround. Sweep them under the rug.
3475 		 */
3476 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3477 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3478 
3479 		/* always enable with pattern 1 (as per spec) */
3480 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3481 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3482 		I915_WRITE(intel_dp->output_reg, DP);
3483 		POSTING_READ(intel_dp->output_reg);
3484 
3485 		DP &= ~DP_PORT_EN;
3486 		I915_WRITE(intel_dp->output_reg, DP);
3487 		POSTING_READ(intel_dp->output_reg);
3488 
3489 		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3490 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3491 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3492 	}
3493 
3494 	msleep(intel_dp->panel_power_down_delay);
3495 
3496 	intel_dp->DP = DP;
3497 }
3498 
3499 bool
3500 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3501 {
3502 	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3503 			     sizeof(intel_dp->dpcd)) < 0)
3504 		return false; /* aux transfer failed */
3505 
3506 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3507 
3508 	return intel_dp->dpcd[DP_DPCD_REV] != 0;
3509 }
3510 
3511 static bool
3512 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3513 {
3514 	struct drm_i915_private *dev_priv =
3515 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3516 
3517 	/* this function is meant to be called only once */
3518 	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3519 
3520 	if (!intel_dp_read_dpcd(intel_dp))
3521 		return false;
3522 
3523 	intel_dp_read_desc(intel_dp);
3524 
3525 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3526 		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3527 			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3528 
3529 	/* Check if the panel supports PSR */
3530 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3531 			 intel_dp->psr_dpcd,
3532 			 sizeof(intel_dp->psr_dpcd));
3533 	if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3534 		dev_priv->psr.sink_support = true;
3535 		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3536 	}
3537 
3538 	if (INTEL_GEN(dev_priv) >= 9 &&
3539 	    (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3540 		uint8_t frame_sync_cap;
3541 
3542 		dev_priv->psr.sink_support = true;
3543 		drm_dp_dpcd_read(&intel_dp->aux,
3544 				 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3545 				 &frame_sync_cap, 1);
3546 		dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3547 		/* PSR2 needs frame sync as well */
3548 		dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3549 		DRM_DEBUG_KMS("PSR2 %s on sink",
3550 			      dev_priv->psr.psr2_support ? "supported" : "not supported");
3551 	}
3552 
3553 	/* Read the eDP Display control capabilities registers */
3554 	if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3555 	    drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3556 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3557 			     sizeof(intel_dp->edp_dpcd))
3558 		DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3559 			      intel_dp->edp_dpcd);
3560 
3561 	/* Intermediate frequency support */
3562 	if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
3563 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3564 		int i;
3565 
3566 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3567 				sink_rates, sizeof(sink_rates));
3568 
3569 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3570 			int val = le16_to_cpu(sink_rates[i]);
3571 
3572 			if (val == 0)
3573 				break;
3574 
3575 			/* Value read is in kHz while drm clock is saved in deca-kHz */
3576 			intel_dp->sink_rates[i] = (val * 200) / 10;
3577 		}
3578 		intel_dp->num_sink_rates = i;
3579 	}
3580 
3581 	return true;
3582 }
3583 
3584 
3585 static bool
3586 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3587 {
3588 	if (!intel_dp_read_dpcd(intel_dp))
3589 		return false;
3590 
3591 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3592 			     &intel_dp->sink_count, 1) < 0)
3593 		return false;
3594 
3595 	/*
3596 	 * Sink count can change between short pulse hpd hence
3597 	 * a member variable in intel_dp will track any changes
3598 	 * between short pulse interrupts.
3599 	 */
3600 	intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3601 
3602 	/*
3603 	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3604 	 * a dongle is present but no display. Unless we require to know
3605 	 * if a dongle is present or not, we don't need to update
3606 	 * downstream port information. So, an early return here saves
3607 	 * time from performing other operations which are not required.
3608 	 */
3609 	if (!is_edp(intel_dp) && !intel_dp->sink_count)
3610 		return false;
3611 
3612 	if (!drm_dp_is_branch(intel_dp->dpcd))
3613 		return true; /* native DP sink */
3614 
3615 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3616 		return true; /* no per-port downstream info */
3617 
3618 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3619 			     intel_dp->downstream_ports,
3620 			     DP_MAX_DOWNSTREAM_PORTS) < 0)
3621 		return false; /* downstream port status fetch failed */
3622 
3623 	return true;
3624 }
3625 
3626 static bool
3627 intel_dp_can_mst(struct intel_dp *intel_dp)
3628 {
3629 	u8 buf[1];
3630 
3631 	if (!i915.enable_dp_mst)
3632 		return false;
3633 
3634 	if (!intel_dp->can_mst)
3635 		return false;
3636 
3637 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3638 		return false;
3639 
3640 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
3641 		return false;
3642 
3643 	return buf[0] & DP_MST_CAP;
3644 }
3645 
3646 static void
3647 intel_dp_configure_mst(struct intel_dp *intel_dp)
3648 {
3649 	if (!i915.enable_dp_mst)
3650 		return;
3651 
3652 	if (!intel_dp->can_mst)
3653 		return;
3654 
3655 	intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3656 
3657 	if (intel_dp->is_mst)
3658 		DRM_DEBUG_KMS("Sink is MST capable\n");
3659 	else
3660 		DRM_DEBUG_KMS("Sink is not MST capable\n");
3661 
3662 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3663 					intel_dp->is_mst);
3664 }
3665 
3666 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3667 {
3668 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3669 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3670 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3671 	u8 buf;
3672 	int ret = 0;
3673 	int count = 0;
3674 	int attempts = 10;
3675 
3676 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3677 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3678 		ret = -EIO;
3679 		goto out;
3680 	}
3681 
3682 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3683 			       buf & ~DP_TEST_SINK_START) < 0) {
3684 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3685 		ret = -EIO;
3686 		goto out;
3687 	}
3688 
3689 	do {
3690 		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3691 
3692 		if (drm_dp_dpcd_readb(&intel_dp->aux,
3693 				      DP_TEST_SINK_MISC, &buf) < 0) {
3694 			ret = -EIO;
3695 			goto out;
3696 		}
3697 		count = buf & DP_TEST_COUNT_MASK;
3698 	} while (--attempts && count);
3699 
3700 	if (attempts == 0) {
3701 		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3702 		ret = -ETIMEDOUT;
3703 	}
3704 
3705  out:
3706 	hsw_enable_ips(intel_crtc);
3707 	return ret;
3708 }
3709 
3710 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3711 {
3712 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3713 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3714 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3715 	u8 buf;
3716 	int ret;
3717 
3718 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3719 		return -EIO;
3720 
3721 	if (!(buf & DP_TEST_CRC_SUPPORTED))
3722 		return -ENOTTY;
3723 
3724 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3725 		return -EIO;
3726 
3727 	if (buf & DP_TEST_SINK_START) {
3728 		ret = intel_dp_sink_crc_stop(intel_dp);
3729 		if (ret)
3730 			return ret;
3731 	}
3732 
3733 	hsw_disable_ips(intel_crtc);
3734 
3735 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3736 			       buf | DP_TEST_SINK_START) < 0) {
3737 		hsw_enable_ips(intel_crtc);
3738 		return -EIO;
3739 	}
3740 
3741 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3742 	return 0;
3743 }
3744 
3745 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3746 {
3747 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3748 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3749 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3750 	u8 buf;
3751 	int count, ret;
3752 	int attempts = 6;
3753 
3754 	ret = intel_dp_sink_crc_start(intel_dp);
3755 	if (ret)
3756 		return ret;
3757 
3758 	do {
3759 		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3760 
3761 		if (drm_dp_dpcd_readb(&intel_dp->aux,
3762 				      DP_TEST_SINK_MISC, &buf) < 0) {
3763 			ret = -EIO;
3764 			goto stop;
3765 		}
3766 		count = buf & DP_TEST_COUNT_MASK;
3767 
3768 	} while (--attempts && count == 0);
3769 
3770 	if (attempts == 0) {
3771 		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3772 		ret = -ETIMEDOUT;
3773 		goto stop;
3774 	}
3775 
3776 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3777 		ret = -EIO;
3778 		goto stop;
3779 	}
3780 
3781 stop:
3782 	intel_dp_sink_crc_stop(intel_dp);
3783 	return ret;
3784 }
3785 
3786 static bool
3787 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3788 {
3789 	return drm_dp_dpcd_read(&intel_dp->aux,
3790 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
3791 				       sink_irq_vector, 1) == 1;
3792 }
3793 
3794 static bool
3795 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3796 {
3797 	int ret;
3798 
3799 	ret = drm_dp_dpcd_read(&intel_dp->aux,
3800 					     DP_SINK_COUNT_ESI,
3801 					     sink_irq_vector, 14);
3802 	if (ret != 14)
3803 		return false;
3804 
3805 	return true;
3806 }
3807 
3808 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3809 {
3810 	uint8_t test_result = DP_TEST_ACK;
3811 	return test_result;
3812 }
3813 
3814 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3815 {
3816 	uint8_t test_result = DP_TEST_NAK;
3817 	return test_result;
3818 }
3819 
3820 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
3821 {
3822 	uint8_t test_result = DP_TEST_NAK;
3823 	struct intel_connector *intel_connector = intel_dp->attached_connector;
3824 	struct drm_connector *connector = &intel_connector->base;
3825 
3826 	if (intel_connector->detect_edid == NULL ||
3827 	    connector->edid_corrupt ||
3828 	    intel_dp->aux.i2c_defer_count > 6) {
3829 		/* Check EDID read for NACKs, DEFERs and corruption
3830 		 * (DP CTS 1.2 Core r1.1)
3831 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
3832 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
3833 		 *    4.2.2.6 : EDID corruption detected
3834 		 * Use failsafe mode for all cases
3835 		 */
3836 		if (intel_dp->aux.i2c_nack_count > 0 ||
3837 			intel_dp->aux.i2c_defer_count > 0)
3838 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
3839 				      intel_dp->aux.i2c_nack_count,
3840 				      intel_dp->aux.i2c_defer_count);
3841 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
3842 	} else {
3843 		struct edid *block = intel_connector->detect_edid;
3844 
3845 		/* We have to write the checksum
3846 		 * of the last block read
3847 		 */
3848 		block += intel_connector->detect_edid->extensions;
3849 
3850 		if (!drm_dp_dpcd_write(&intel_dp->aux,
3851 					DP_TEST_EDID_CHECKSUM,
3852 					&block->checksum,
3853 					1))
3854 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
3855 
3856 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3857 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
3858 	}
3859 
3860 	/* Set test active flag here so userspace doesn't interrupt things */
3861 	intel_dp->compliance_test_active = 1;
3862 
3863 	return test_result;
3864 }
3865 
3866 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3867 {
3868 	uint8_t test_result = DP_TEST_NAK;
3869 	return test_result;
3870 }
3871 
3872 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3873 {
3874 	uint8_t response = DP_TEST_NAK;
3875 	uint8_t rxdata = 0;
3876 	int status = 0;
3877 
3878 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
3879 	if (status <= 0) {
3880 		DRM_DEBUG_KMS("Could not read test request from sink\n");
3881 		goto update_status;
3882 	}
3883 
3884 	switch (rxdata) {
3885 	case DP_TEST_LINK_TRAINING:
3886 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
3887 		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
3888 		response = intel_dp_autotest_link_training(intel_dp);
3889 		break;
3890 	case DP_TEST_LINK_VIDEO_PATTERN:
3891 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
3892 		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
3893 		response = intel_dp_autotest_video_pattern(intel_dp);
3894 		break;
3895 	case DP_TEST_LINK_EDID_READ:
3896 		DRM_DEBUG_KMS("EDID test requested\n");
3897 		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
3898 		response = intel_dp_autotest_edid(intel_dp);
3899 		break;
3900 	case DP_TEST_LINK_PHY_TEST_PATTERN:
3901 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
3902 		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
3903 		response = intel_dp_autotest_phy_pattern(intel_dp);
3904 		break;
3905 	default:
3906 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
3907 		break;
3908 	}
3909 
3910 update_status:
3911 	status = drm_dp_dpcd_write(&intel_dp->aux,
3912 				   DP_TEST_RESPONSE,
3913 				   &response, 1);
3914 	if (status <= 0)
3915 		DRM_DEBUG_KMS("Could not write test response to sink\n");
3916 }
3917 
3918 static int
3919 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3920 {
3921 	bool bret;
3922 
3923 	if (intel_dp->is_mst) {
3924 		u8 esi[16] = { 0 };
3925 		int ret = 0;
3926 		int retry;
3927 		bool handled;
3928 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3929 go_again:
3930 		if (bret == true) {
3931 
3932 			/* check link status - esi[10] = 0x200c */
3933 			if (intel_dp->active_mst_links &&
3934 			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3935 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3936 				intel_dp_start_link_train(intel_dp);
3937 				intel_dp_stop_link_train(intel_dp);
3938 			}
3939 
3940 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
3941 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3942 
3943 			if (handled) {
3944 				for (retry = 0; retry < 3; retry++) {
3945 					int wret;
3946 					wret = drm_dp_dpcd_write(&intel_dp->aux,
3947 								 DP_SINK_COUNT_ESI+1,
3948 								 &esi[1], 3);
3949 					if (wret == 3) {
3950 						break;
3951 					}
3952 				}
3953 
3954 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3955 				if (bret == true) {
3956 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3957 					goto go_again;
3958 				}
3959 			} else
3960 				ret = 0;
3961 
3962 			return ret;
3963 		} else {
3964 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3965 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3966 			intel_dp->is_mst = false;
3967 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3968 			/* send a hotplug event */
3969 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3970 		}
3971 	}
3972 	return -EINVAL;
3973 }
3974 
3975 static void
3976 intel_dp_retrain_link(struct intel_dp *intel_dp)
3977 {
3978 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3979 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3980 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
3981 
3982 	/* Suppress underruns caused by re-training */
3983 	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3984 	if (crtc->config->has_pch_encoder)
3985 		intel_set_pch_fifo_underrun_reporting(dev_priv,
3986 						      intel_crtc_pch_transcoder(crtc), false);
3987 
3988 	intel_dp_start_link_train(intel_dp);
3989 	intel_dp_stop_link_train(intel_dp);
3990 
3991 	/* Keep underrun reporting disabled until things are stable */
3992 	intel_wait_for_vblank(dev_priv, crtc->pipe);
3993 
3994 	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
3995 	if (crtc->config->has_pch_encoder)
3996 		intel_set_pch_fifo_underrun_reporting(dev_priv,
3997 						      intel_crtc_pch_transcoder(crtc), true);
3998 }
3999 
4000 static void
4001 intel_dp_check_link_status(struct intel_dp *intel_dp)
4002 {
4003 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4004 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4005 	u8 link_status[DP_LINK_STATUS_SIZE];
4006 
4007 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4008 
4009 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4010 		DRM_ERROR("Failed to get link status\n");
4011 		return;
4012 	}
4013 
4014 	if (!intel_encoder->base.crtc)
4015 		return;
4016 
4017 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4018 		return;
4019 
4020 	/* FIXME: we need to synchronize this sort of stuff with hardware
4021 	 * readout. Currently fast link training doesn't work on boot-up. */
4022 	if (!intel_dp->lane_count)
4023 		return;
4024 
4025 	/* if link training is requested we should perform it always */
4026 	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4027 	    (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4028 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4029 			      intel_encoder->base.name);
4030 
4031 		intel_dp_retrain_link(intel_dp);
4032 	}
4033 }
4034 
4035 /*
4036  * According to DP spec
4037  * 5.1.2:
4038  *  1. Read DPCD
4039  *  2. Configure link according to Receiver Capabilities
4040  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4041  *  4. Check link status on receipt of hot-plug interrupt
4042  *
4043  * intel_dp_short_pulse -  handles short pulse interrupts
4044  * when full detection is not required.
4045  * Returns %true if short pulse is handled and full detection
4046  * is NOT required and %false otherwise.
4047  */
4048 static bool
4049 intel_dp_short_pulse(struct intel_dp *intel_dp)
4050 {
4051 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4052 	u8 sink_irq_vector = 0;
4053 	u8 old_sink_count = intel_dp->sink_count;
4054 	bool ret;
4055 
4056 	/*
4057 	 * Clearing compliance test variables to allow capturing
4058 	 * of values for next automated test request.
4059 	 */
4060 	intel_dp->compliance_test_active = 0;
4061 	intel_dp->compliance_test_type = 0;
4062 	intel_dp->compliance_test_data = 0;
4063 
4064 	/*
4065 	 * Now read the DPCD to see if it's actually running
4066 	 * If the current value of sink count doesn't match with
4067 	 * the value that was stored earlier or dpcd read failed
4068 	 * we need to do full detection
4069 	 */
4070 	ret = intel_dp_get_dpcd(intel_dp);
4071 
4072 	if ((old_sink_count != intel_dp->sink_count) || !ret) {
4073 		/* No need to proceed if we are going to do full detect */
4074 		return false;
4075 	}
4076 
4077 	/* Try to read the source of the interrupt */
4078 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4079 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4080 	    sink_irq_vector != 0) {
4081 		/* Clear interrupt source */
4082 		drm_dp_dpcd_writeb(&intel_dp->aux,
4083 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4084 				   sink_irq_vector);
4085 
4086 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4087 			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4088 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4089 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4090 	}
4091 
4092 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4093 	intel_dp_check_link_status(intel_dp);
4094 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
4095 
4096 	return true;
4097 }
4098 
4099 /* XXX this is probably wrong for multiple downstream ports */
4100 static enum drm_connector_status
4101 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4102 {
4103 	uint8_t *dpcd = intel_dp->dpcd;
4104 	uint8_t type;
4105 
4106 	if (!intel_dp_get_dpcd(intel_dp))
4107 		return connector_status_disconnected;
4108 
4109 	if (is_edp(intel_dp))
4110 		return connector_status_connected;
4111 
4112 	/* if there's no downstream port, we're done */
4113 	if (!drm_dp_is_branch(dpcd))
4114 		return connector_status_connected;
4115 
4116 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4117 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4118 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4119 
4120 		return intel_dp->sink_count ?
4121 		connector_status_connected : connector_status_disconnected;
4122 	}
4123 
4124 	if (intel_dp_can_mst(intel_dp))
4125 		return connector_status_connected;
4126 
4127 	/* If no HPD, poke DDC gently */
4128 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4129 		return connector_status_connected;
4130 
4131 	/* Well we tried, say unknown for unreliable port types */
4132 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4133 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4134 		if (type == DP_DS_PORT_TYPE_VGA ||
4135 		    type == DP_DS_PORT_TYPE_NON_EDID)
4136 			return connector_status_unknown;
4137 	} else {
4138 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4139 			DP_DWN_STRM_PORT_TYPE_MASK;
4140 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4141 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4142 			return connector_status_unknown;
4143 	}
4144 
4145 	/* Anything else is out of spec, warn and ignore */
4146 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4147 	return connector_status_disconnected;
4148 }
4149 
4150 static enum drm_connector_status
4151 edp_detect(struct intel_dp *intel_dp)
4152 {
4153 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4154 	enum drm_connector_status status;
4155 
4156 	status = intel_panel_detect(dev);
4157 	if (status == connector_status_unknown)
4158 		status = connector_status_connected;
4159 
4160 	return status;
4161 }
4162 
4163 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4164 				       struct intel_digital_port *port)
4165 {
4166 	u32 bit;
4167 
4168 	switch (port->port) {
4169 	case PORT_A:
4170 		return true;
4171 	case PORT_B:
4172 		bit = SDE_PORTB_HOTPLUG;
4173 		break;
4174 	case PORT_C:
4175 		bit = SDE_PORTC_HOTPLUG;
4176 		break;
4177 	case PORT_D:
4178 		bit = SDE_PORTD_HOTPLUG;
4179 		break;
4180 	default:
4181 		MISSING_CASE(port->port);
4182 		return false;
4183 	}
4184 
4185 	return I915_READ(SDEISR) & bit;
4186 }
4187 
4188 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4189 				       struct intel_digital_port *port)
4190 {
4191 	u32 bit;
4192 
4193 	switch (port->port) {
4194 	case PORT_A:
4195 		return true;
4196 	case PORT_B:
4197 		bit = SDE_PORTB_HOTPLUG_CPT;
4198 		break;
4199 	case PORT_C:
4200 		bit = SDE_PORTC_HOTPLUG_CPT;
4201 		break;
4202 	case PORT_D:
4203 		bit = SDE_PORTD_HOTPLUG_CPT;
4204 		break;
4205 	case PORT_E:
4206 		bit = SDE_PORTE_HOTPLUG_SPT;
4207 		break;
4208 	default:
4209 		MISSING_CASE(port->port);
4210 		return false;
4211 	}
4212 
4213 	return I915_READ(SDEISR) & bit;
4214 }
4215 
4216 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4217 				       struct intel_digital_port *port)
4218 {
4219 	u32 bit;
4220 
4221 	switch (port->port) {
4222 	case PORT_B:
4223 		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4224 		break;
4225 	case PORT_C:
4226 		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4227 		break;
4228 	case PORT_D:
4229 		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4230 		break;
4231 	default:
4232 		MISSING_CASE(port->port);
4233 		return false;
4234 	}
4235 
4236 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4237 }
4238 
4239 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4240 					struct intel_digital_port *port)
4241 {
4242 	u32 bit;
4243 
4244 	switch (port->port) {
4245 	case PORT_B:
4246 		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4247 		break;
4248 	case PORT_C:
4249 		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4250 		break;
4251 	case PORT_D:
4252 		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4253 		break;
4254 	default:
4255 		MISSING_CASE(port->port);
4256 		return false;
4257 	}
4258 
4259 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4260 }
4261 
4262 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4263 				       struct intel_digital_port *intel_dig_port)
4264 {
4265 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4266 	enum port port;
4267 	u32 bit;
4268 
4269 	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4270 	switch (port) {
4271 	case PORT_A:
4272 		bit = BXT_DE_PORT_HP_DDIA;
4273 		break;
4274 	case PORT_B:
4275 		bit = BXT_DE_PORT_HP_DDIB;
4276 		break;
4277 	case PORT_C:
4278 		bit = BXT_DE_PORT_HP_DDIC;
4279 		break;
4280 	default:
4281 		MISSING_CASE(port);
4282 		return false;
4283 	}
4284 
4285 	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4286 }
4287 
4288 /*
4289  * intel_digital_port_connected - is the specified port connected?
4290  * @dev_priv: i915 private structure
4291  * @port: the port to test
4292  *
4293  * Return %true if @port is connected, %false otherwise.
4294  */
4295 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4296 				  struct intel_digital_port *port)
4297 {
4298 	if (HAS_PCH_IBX(dev_priv))
4299 		return ibx_digital_port_connected(dev_priv, port);
4300 	else if (HAS_PCH_SPLIT(dev_priv))
4301 		return cpt_digital_port_connected(dev_priv, port);
4302 	else if (IS_BROXTON(dev_priv))
4303 		return bxt_digital_port_connected(dev_priv, port);
4304 	else if (IS_GM45(dev_priv))
4305 		return gm45_digital_port_connected(dev_priv, port);
4306 	else
4307 		return g4x_digital_port_connected(dev_priv, port);
4308 }
4309 
4310 static struct edid *
4311 intel_dp_get_edid(struct intel_dp *intel_dp)
4312 {
4313 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4314 
4315 	/* use cached edid if we have one */
4316 	if (intel_connector->edid) {
4317 		/* invalid edid */
4318 		if (IS_ERR(intel_connector->edid))
4319 			return NULL;
4320 
4321 		return drm_edid_duplicate(intel_connector->edid);
4322 	} else
4323 		return drm_get_edid(&intel_connector->base,
4324 				    &intel_dp->aux.ddc);
4325 }
4326 
4327 static void
4328 intel_dp_set_edid(struct intel_dp *intel_dp)
4329 {
4330 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4331 	struct edid *edid;
4332 
4333 	intel_dp_unset_edid(intel_dp);
4334 	edid = intel_dp_get_edid(intel_dp);
4335 	intel_connector->detect_edid = edid;
4336 
4337 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4338 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4339 	else
4340 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4341 }
4342 
4343 static void
4344 intel_dp_unset_edid(struct intel_dp *intel_dp)
4345 {
4346 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4347 
4348 	kfree(intel_connector->detect_edid);
4349 	intel_connector->detect_edid = NULL;
4350 
4351 	intel_dp->has_audio = false;
4352 }
4353 
4354 static enum drm_connector_status
4355 intel_dp_long_pulse(struct intel_connector *intel_connector)
4356 {
4357 	struct drm_connector *connector = &intel_connector->base;
4358 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4359 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4360 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4361 	struct drm_device *dev = connector->dev;
4362 	enum drm_connector_status status;
4363 	enum intel_display_power_domain power_domain;
4364 	u8 sink_irq_vector = 0;
4365 
4366 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4367 	intel_display_power_get(to_i915(dev), power_domain);
4368 
4369 	/* Can't disconnect eDP, but you can close the lid... */
4370 	if (is_edp(intel_dp))
4371 		status = edp_detect(intel_dp);
4372 	else if (intel_digital_port_connected(to_i915(dev),
4373 					      dp_to_dig_port(intel_dp)))
4374 		status = intel_dp_detect_dpcd(intel_dp);
4375 	else
4376 		status = connector_status_disconnected;
4377 
4378 	if (status == connector_status_disconnected) {
4379 		intel_dp->compliance_test_active = 0;
4380 		intel_dp->compliance_test_type = 0;
4381 		intel_dp->compliance_test_data = 0;
4382 
4383 		if (intel_dp->is_mst) {
4384 			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4385 				      intel_dp->is_mst,
4386 				      intel_dp->mst_mgr.mst_state);
4387 			intel_dp->is_mst = false;
4388 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4389 							intel_dp->is_mst);
4390 		}
4391 
4392 		goto out;
4393 	}
4394 
4395 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4396 		intel_encoder->type = INTEL_OUTPUT_DP;
4397 
4398 	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4399 		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
4400 		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4401 
4402 	intel_dp_print_rates(intel_dp);
4403 
4404 	intel_dp_read_desc(intel_dp);
4405 
4406 	intel_dp_configure_mst(intel_dp);
4407 
4408 	if (intel_dp->is_mst) {
4409 		/*
4410 		 * If we are in MST mode then this connector
4411 		 * won't appear connected or have anything
4412 		 * with EDID on it
4413 		 */
4414 		status = connector_status_disconnected;
4415 		goto out;
4416 	} else if (connector->status == connector_status_connected) {
4417 		/*
4418 		 * If display was connected already and is still connected
4419 		 * check links status, there has been known issues of
4420 		 * link loss triggerring long pulse!!!!
4421 		 */
4422 		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4423 		intel_dp_check_link_status(intel_dp);
4424 		drm_modeset_unlock(&dev->mode_config.connection_mutex);
4425 		goto out;
4426 	}
4427 
4428 	/*
4429 	 * Clearing NACK and defer counts to get their exact values
4430 	 * while reading EDID which are required by Compliance tests
4431 	 * 4.2.2.4 and 4.2.2.5
4432 	 */
4433 	intel_dp->aux.i2c_nack_count = 0;
4434 	intel_dp->aux.i2c_defer_count = 0;
4435 
4436 	intel_dp_set_edid(intel_dp);
4437 	if (is_edp(intel_dp) || intel_connector->detect_edid)
4438 		status = connector_status_connected;
4439 	intel_dp->detect_done = true;
4440 
4441 	/* Try to read the source of the interrupt */
4442 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4443 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4444 	    sink_irq_vector != 0) {
4445 		/* Clear interrupt source */
4446 		drm_dp_dpcd_writeb(&intel_dp->aux,
4447 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4448 				   sink_irq_vector);
4449 
4450 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4451 			intel_dp_handle_test_request(intel_dp);
4452 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4453 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4454 	}
4455 
4456 out:
4457 	if (status != connector_status_connected && !intel_dp->is_mst)
4458 		intel_dp_unset_edid(intel_dp);
4459 
4460 	intel_display_power_put(to_i915(dev), power_domain);
4461 	return status;
4462 }
4463 
4464 static enum drm_connector_status
4465 intel_dp_detect(struct drm_connector *connector, bool force)
4466 {
4467 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4468 	enum drm_connector_status status = connector->status;
4469 
4470 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4471 		      connector->base.id, connector->name);
4472 
4473 	/* If full detect is not performed yet, do a full detect */
4474 	if (!intel_dp->detect_done)
4475 		status = intel_dp_long_pulse(intel_dp->attached_connector);
4476 
4477 	intel_dp->detect_done = false;
4478 
4479 	return status;
4480 }
4481 
4482 static void
4483 intel_dp_force(struct drm_connector *connector)
4484 {
4485 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4486 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4487 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4488 	enum intel_display_power_domain power_domain;
4489 
4490 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4491 		      connector->base.id, connector->name);
4492 	intel_dp_unset_edid(intel_dp);
4493 
4494 	if (connector->status != connector_status_connected)
4495 		return;
4496 
4497 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4498 	intel_display_power_get(dev_priv, power_domain);
4499 
4500 	intel_dp_set_edid(intel_dp);
4501 
4502 	intel_display_power_put(dev_priv, power_domain);
4503 
4504 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4505 		intel_encoder->type = INTEL_OUTPUT_DP;
4506 }
4507 
4508 static int intel_dp_get_modes(struct drm_connector *connector)
4509 {
4510 	struct intel_connector *intel_connector = to_intel_connector(connector);
4511 	struct edid *edid;
4512 
4513 	edid = intel_connector->detect_edid;
4514 	if (edid) {
4515 		int ret = intel_connector_update_modes(connector, edid);
4516 		if (ret)
4517 			return ret;
4518 	}
4519 
4520 	/* if eDP has no EDID, fall back to fixed mode */
4521 	if (is_edp(intel_attached_dp(connector)) &&
4522 	    intel_connector->panel.fixed_mode) {
4523 		struct drm_display_mode *mode;
4524 
4525 		mode = drm_mode_duplicate(connector->dev,
4526 					  intel_connector->panel.fixed_mode);
4527 		if (mode) {
4528 			drm_mode_probed_add(connector, mode);
4529 			return 1;
4530 		}
4531 	}
4532 
4533 	return 0;
4534 }
4535 
4536 static bool
4537 intel_dp_detect_audio(struct drm_connector *connector)
4538 {
4539 	bool has_audio = false;
4540 	struct edid *edid;
4541 
4542 	edid = to_intel_connector(connector)->detect_edid;
4543 	if (edid)
4544 		has_audio = drm_detect_monitor_audio(edid);
4545 
4546 	return has_audio;
4547 }
4548 
4549 static int
4550 intel_dp_set_property(struct drm_connector *connector,
4551 		      struct drm_property *property,
4552 		      uint64_t val)
4553 {
4554 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4555 	struct intel_connector *intel_connector = to_intel_connector(connector);
4556 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4557 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4558 	int ret;
4559 
4560 	ret = drm_object_property_set_value(&connector->base, property, val);
4561 	if (ret)
4562 		return ret;
4563 
4564 	if (property == dev_priv->force_audio_property) {
4565 		int i = val;
4566 		bool has_audio;
4567 
4568 		if (i == intel_dp->force_audio)
4569 			return 0;
4570 
4571 		intel_dp->force_audio = i;
4572 
4573 		if (i == HDMI_AUDIO_AUTO)
4574 			has_audio = intel_dp_detect_audio(connector);
4575 		else
4576 			has_audio = (i == HDMI_AUDIO_ON);
4577 
4578 		if (has_audio == intel_dp->has_audio)
4579 			return 0;
4580 
4581 		intel_dp->has_audio = has_audio;
4582 		goto done;
4583 	}
4584 
4585 	if (property == dev_priv->broadcast_rgb_property) {
4586 		bool old_auto = intel_dp->color_range_auto;
4587 		bool old_range = intel_dp->limited_color_range;
4588 
4589 		switch (val) {
4590 		case INTEL_BROADCAST_RGB_AUTO:
4591 			intel_dp->color_range_auto = true;
4592 			break;
4593 		case INTEL_BROADCAST_RGB_FULL:
4594 			intel_dp->color_range_auto = false;
4595 			intel_dp->limited_color_range = false;
4596 			break;
4597 		case INTEL_BROADCAST_RGB_LIMITED:
4598 			intel_dp->color_range_auto = false;
4599 			intel_dp->limited_color_range = true;
4600 			break;
4601 		default:
4602 			return -EINVAL;
4603 		}
4604 
4605 		if (old_auto == intel_dp->color_range_auto &&
4606 		    old_range == intel_dp->limited_color_range)
4607 			return 0;
4608 
4609 		goto done;
4610 	}
4611 
4612 	if (is_edp(intel_dp) &&
4613 	    property == connector->dev->mode_config.scaling_mode_property) {
4614 		if (val == DRM_MODE_SCALE_NONE) {
4615 			DRM_DEBUG_KMS("no scaling not supported\n");
4616 			return -EINVAL;
4617 		}
4618 		if (HAS_GMCH_DISPLAY(dev_priv) &&
4619 		    val == DRM_MODE_SCALE_CENTER) {
4620 			DRM_DEBUG_KMS("centering not supported\n");
4621 			return -EINVAL;
4622 		}
4623 
4624 		if (intel_connector->panel.fitting_mode == val) {
4625 			/* the eDP scaling property is not changed */
4626 			return 0;
4627 		}
4628 		intel_connector->panel.fitting_mode = val;
4629 
4630 		goto done;
4631 	}
4632 
4633 	return -EINVAL;
4634 
4635 done:
4636 	if (intel_encoder->base.crtc)
4637 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4638 
4639 	return 0;
4640 }
4641 
4642 static int
4643 intel_dp_connector_register(struct drm_connector *connector)
4644 {
4645 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4646 	int ret;
4647 
4648 	ret = intel_connector_register(connector);
4649 	if (ret)
4650 		return ret;
4651 
4652 	i915_debugfs_connector_add(connector);
4653 
4654 	DRM_DEBUG_KMS("registering %s bus for %s\n",
4655 		      intel_dp->aux.name, connector->kdev->kobj.name);
4656 
4657 	intel_dp->aux.dev = connector->kdev;
4658 	return drm_dp_aux_register(&intel_dp->aux);
4659 }
4660 
4661 static void
4662 intel_dp_connector_unregister(struct drm_connector *connector)
4663 {
4664 	drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4665 	intel_connector_unregister(connector);
4666 }
4667 
4668 static void
4669 intel_dp_connector_destroy(struct drm_connector *connector)
4670 {
4671 	struct intel_connector *intel_connector = to_intel_connector(connector);
4672 
4673 	kfree(intel_connector->detect_edid);
4674 
4675 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4676 		kfree(intel_connector->edid);
4677 
4678 	/* Can't call is_edp() since the encoder may have been destroyed
4679 	 * already. */
4680 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4681 		intel_panel_fini(&intel_connector->panel);
4682 
4683 	drm_connector_cleanup(connector);
4684 	kfree(connector);
4685 }
4686 
4687 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4688 {
4689 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4690 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4691 
4692 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4693 	if (is_edp(intel_dp)) {
4694 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4695 		/*
4696 		 * vdd might still be enabled do to the delayed vdd off.
4697 		 * Make sure vdd is actually turned off here.
4698 		 */
4699 		pps_lock(intel_dp);
4700 		edp_panel_vdd_off_sync(intel_dp);
4701 		pps_unlock(intel_dp);
4702 
4703 		if (intel_dp->edp_notifier.notifier_call) {
4704 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4705 			intel_dp->edp_notifier.notifier_call = NULL;
4706 		}
4707 	}
4708 
4709 	intel_dp_aux_fini(intel_dp);
4710 
4711 	drm_encoder_cleanup(encoder);
4712 	kfree(intel_dig_port);
4713 }
4714 
4715 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4716 {
4717 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4718 
4719 	if (!is_edp(intel_dp))
4720 		return;
4721 
4722 	/*
4723 	 * vdd might still be enabled do to the delayed vdd off.
4724 	 * Make sure vdd is actually turned off here.
4725 	 */
4726 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4727 	pps_lock(intel_dp);
4728 	edp_panel_vdd_off_sync(intel_dp);
4729 	pps_unlock(intel_dp);
4730 }
4731 
4732 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4733 {
4734 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4735 	struct drm_device *dev = intel_dig_port->base.base.dev;
4736 	struct drm_i915_private *dev_priv = to_i915(dev);
4737 	enum intel_display_power_domain power_domain;
4738 
4739 	lockdep_assert_held(&dev_priv->pps_mutex);
4740 
4741 	if (!edp_have_panel_vdd(intel_dp))
4742 		return;
4743 
4744 	/*
4745 	 * The VDD bit needs a power domain reference, so if the bit is
4746 	 * already enabled when we boot or resume, grab this reference and
4747 	 * schedule a vdd off, so we don't hold on to the reference
4748 	 * indefinitely.
4749 	 */
4750 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4751 	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4752 	intel_display_power_get(dev_priv, power_domain);
4753 
4754 	edp_panel_vdd_schedule_off(intel_dp);
4755 }
4756 
4757 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4758 {
4759 	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4760 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4761 	struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
4762 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4763 
4764 	if (!HAS_DDI(dev_priv))
4765 		intel_dp->DP = I915_READ(intel_dp->output_reg);
4766 
4767 	if (IS_GEN9(dev_priv) && lspcon->active)
4768 		lspcon_resume(lspcon);
4769 
4770 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4771 		return;
4772 
4773 	pps_lock(intel_dp);
4774 
4775 	/* Reinit the power sequencer, in case BIOS did something with it. */
4776 	intel_dp_pps_init(encoder->dev, intel_dp);
4777 	intel_edp_panel_vdd_sanitize(intel_dp);
4778 
4779 	pps_unlock(intel_dp);
4780 }
4781 
4782 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4783 	.dpms = drm_atomic_helper_connector_dpms,
4784 	.detect = intel_dp_detect,
4785 	.force = intel_dp_force,
4786 	.fill_modes = drm_helper_probe_single_connector_modes,
4787 	.set_property = intel_dp_set_property,
4788 	.atomic_get_property = intel_connector_atomic_get_property,
4789 	.late_register = intel_dp_connector_register,
4790 	.early_unregister = intel_dp_connector_unregister,
4791 	.destroy = intel_dp_connector_destroy,
4792 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4793 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4794 };
4795 
4796 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4797 	.get_modes = intel_dp_get_modes,
4798 	.mode_valid = intel_dp_mode_valid,
4799 };
4800 
4801 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4802 	.reset = intel_dp_encoder_reset,
4803 	.destroy = intel_dp_encoder_destroy,
4804 };
4805 
4806 enum irqreturn
4807 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4808 {
4809 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4810 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4811 	struct drm_device *dev = intel_dig_port->base.base.dev;
4812 	struct drm_i915_private *dev_priv = to_i915(dev);
4813 	enum intel_display_power_domain power_domain;
4814 	enum irqreturn ret = IRQ_NONE;
4815 
4816 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4817 	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4818 		intel_dig_port->base.type = INTEL_OUTPUT_DP;
4819 
4820 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4821 		/*
4822 		 * vdd off can generate a long pulse on eDP which
4823 		 * would require vdd on to handle it, and thus we
4824 		 * would end up in an endless cycle of
4825 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4826 		 */
4827 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4828 			      port_name(intel_dig_port->port));
4829 		return IRQ_HANDLED;
4830 	}
4831 
4832 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4833 		      port_name(intel_dig_port->port),
4834 		      long_hpd ? "long" : "short");
4835 
4836 	if (long_hpd) {
4837 		intel_dp->detect_done = false;
4838 		return IRQ_NONE;
4839 	}
4840 
4841 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4842 	intel_display_power_get(dev_priv, power_domain);
4843 
4844 	if (intel_dp->is_mst) {
4845 		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
4846 			/*
4847 			 * If we were in MST mode, and device is not
4848 			 * there, get out of MST mode
4849 			 */
4850 			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4851 				      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4852 			intel_dp->is_mst = false;
4853 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4854 							intel_dp->is_mst);
4855 			intel_dp->detect_done = false;
4856 			goto put_power;
4857 		}
4858 	}
4859 
4860 	if (!intel_dp->is_mst) {
4861 		if (!intel_dp_short_pulse(intel_dp)) {
4862 			intel_dp->detect_done = false;
4863 			goto put_power;
4864 		}
4865 	}
4866 
4867 	ret = IRQ_HANDLED;
4868 
4869 put_power:
4870 	intel_display_power_put(dev_priv, power_domain);
4871 
4872 	return ret;
4873 }
4874 
4875 /* check the VBT to see whether the eDP is on another port */
4876 bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
4877 {
4878 	/*
4879 	 * eDP not supported on g4x. so bail out early just
4880 	 * for a bit extra safety in case the VBT is bonkers.
4881 	 */
4882 	if (INTEL_GEN(dev_priv) < 5)
4883 		return false;
4884 
4885 	if (port == PORT_A)
4886 		return true;
4887 
4888 	return intel_bios_is_port_edp(dev_priv, port);
4889 }
4890 
4891 void
4892 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4893 {
4894 	struct intel_connector *intel_connector = to_intel_connector(connector);
4895 
4896 	intel_attach_force_audio_property(connector);
4897 	intel_attach_broadcast_rgb_property(connector);
4898 	intel_dp->color_range_auto = true;
4899 
4900 	if (is_edp(intel_dp)) {
4901 		drm_mode_create_scaling_mode_property(connector->dev);
4902 		drm_object_attach_property(
4903 			&connector->base,
4904 			connector->dev->mode_config.scaling_mode_property,
4905 			DRM_MODE_SCALE_ASPECT);
4906 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4907 	}
4908 }
4909 
4910 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4911 {
4912 	intel_dp->panel_power_off_time = ktime_get_boottime();
4913 	intel_dp->last_power_on = jiffies;
4914 	intel_dp->last_backlight_off = jiffies;
4915 }
4916 
4917 static void
4918 intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
4919 			   struct intel_dp *intel_dp, struct edp_power_seq *seq)
4920 {
4921 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
4922 	struct pps_registers regs;
4923 
4924 	intel_pps_get_registers(dev_priv, intel_dp, &regs);
4925 
4926 	/* Workaround: Need to write PP_CONTROL with the unlock key as
4927 	 * the very first thing. */
4928 	pp_ctl = ironlake_get_pp_control(intel_dp);
4929 
4930 	pp_on = I915_READ(regs.pp_on);
4931 	pp_off = I915_READ(regs.pp_off);
4932 	if (!IS_BROXTON(dev_priv)) {
4933 		I915_WRITE(regs.pp_ctrl, pp_ctl);
4934 		pp_div = I915_READ(regs.pp_div);
4935 	}
4936 
4937 	/* Pull timing values out of registers */
4938 	seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4939 		     PANEL_POWER_UP_DELAY_SHIFT;
4940 
4941 	seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4942 		  PANEL_LIGHT_ON_DELAY_SHIFT;
4943 
4944 	seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4945 		  PANEL_LIGHT_OFF_DELAY_SHIFT;
4946 
4947 	seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4948 		   PANEL_POWER_DOWN_DELAY_SHIFT;
4949 
4950 	if (IS_BROXTON(dev_priv)) {
4951 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
4952 			BXT_POWER_CYCLE_DELAY_SHIFT;
4953 		if (tmp > 0)
4954 			seq->t11_t12 = (tmp - 1) * 1000;
4955 		else
4956 			seq->t11_t12 = 0;
4957 	} else {
4958 		seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4959 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4960 	}
4961 }
4962 
4963 static void
4964 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
4965 {
4966 	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4967 		      state_name,
4968 		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
4969 }
4970 
4971 static void
4972 intel_pps_verify_state(struct drm_i915_private *dev_priv,
4973 		       struct intel_dp *intel_dp)
4974 {
4975 	struct edp_power_seq hw;
4976 	struct edp_power_seq *sw = &intel_dp->pps_delays;
4977 
4978 	intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
4979 
4980 	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
4981 	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
4982 		DRM_ERROR("PPS state mismatch\n");
4983 		intel_pps_dump_state("sw", sw);
4984 		intel_pps_dump_state("hw", &hw);
4985 	}
4986 }
4987 
4988 static void
4989 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4990 				    struct intel_dp *intel_dp)
4991 {
4992 	struct drm_i915_private *dev_priv = to_i915(dev);
4993 	struct edp_power_seq cur, vbt, spec,
4994 		*final = &intel_dp->pps_delays;
4995 
4996 	lockdep_assert_held(&dev_priv->pps_mutex);
4997 
4998 	/* already initialized? */
4999 	if (final->t11_t12 != 0)
5000 		return;
5001 
5002 	intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
5003 
5004 	intel_pps_dump_state("cur", &cur);
5005 
5006 	vbt = dev_priv->vbt.edp.pps;
5007 
5008 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5009 	 * our hw here, which are all in 100usec. */
5010 	spec.t1_t3 = 210 * 10;
5011 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5012 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5013 	spec.t10 = 500 * 10;
5014 	/* This one is special and actually in units of 100ms, but zero
5015 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5016 	 * table multiplies it with 1000 to make it in units of 100usec,
5017 	 * too. */
5018 	spec.t11_t12 = (510 + 100) * 10;
5019 
5020 	intel_pps_dump_state("vbt", &vbt);
5021 
5022 	/* Use the max of the register settings and vbt. If both are
5023 	 * unset, fall back to the spec limits. */
5024 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5025 				       spec.field : \
5026 				       max(cur.field, vbt.field))
5027 	assign_final(t1_t3);
5028 	assign_final(t8);
5029 	assign_final(t9);
5030 	assign_final(t10);
5031 	assign_final(t11_t12);
5032 #undef assign_final
5033 
5034 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5035 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5036 	intel_dp->backlight_on_delay = get_delay(t8);
5037 	intel_dp->backlight_off_delay = get_delay(t9);
5038 	intel_dp->panel_power_down_delay = get_delay(t10);
5039 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5040 #undef get_delay
5041 
5042 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5043 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5044 		      intel_dp->panel_power_cycle_delay);
5045 
5046 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5047 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5048 
5049 	/*
5050 	 * We override the HW backlight delays to 1 because we do manual waits
5051 	 * on them. For T8, even BSpec recommends doing it. For T9, if we
5052 	 * don't do this, we'll end up waiting for the backlight off delay
5053 	 * twice: once when we do the manual sleep, and once when we disable
5054 	 * the panel and wait for the PP_STATUS bit to become zero.
5055 	 */
5056 	final->t8 = 1;
5057 	final->t9 = 1;
5058 }
5059 
5060 static void
5061 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5062 					      struct intel_dp *intel_dp,
5063 					      bool force_disable_vdd)
5064 {
5065 	struct drm_i915_private *dev_priv = to_i915(dev);
5066 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5067 	int div = dev_priv->rawclk_freq / 1000;
5068 	struct pps_registers regs;
5069 	enum port port = dp_to_dig_port(intel_dp)->port;
5070 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5071 
5072 	lockdep_assert_held(&dev_priv->pps_mutex);
5073 
5074 	intel_pps_get_registers(dev_priv, intel_dp, &regs);
5075 
5076 	/*
5077 	 * On some VLV machines the BIOS can leave the VDD
5078 	 * enabled even on power seqeuencers which aren't
5079 	 * hooked up to any port. This would mess up the
5080 	 * power domain tracking the first time we pick
5081 	 * one of these power sequencers for use since
5082 	 * edp_panel_vdd_on() would notice that the VDD was
5083 	 * already on and therefore wouldn't grab the power
5084 	 * domain reference. Disable VDD first to avoid this.
5085 	 * This also avoids spuriously turning the VDD on as
5086 	 * soon as the new power seqeuencer gets initialized.
5087 	 */
5088 	if (force_disable_vdd) {
5089 		u32 pp = ironlake_get_pp_control(intel_dp);
5090 
5091 		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5092 
5093 		if (pp & EDP_FORCE_VDD)
5094 			DRM_DEBUG_KMS("VDD already on, disabling first\n");
5095 
5096 		pp &= ~EDP_FORCE_VDD;
5097 
5098 		I915_WRITE(regs.pp_ctrl, pp);
5099 	}
5100 
5101 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5102 		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5103 	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5104 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5105 	/* Compute the divisor for the pp clock, simply match the Bspec
5106 	 * formula. */
5107 	if (IS_BROXTON(dev_priv)) {
5108 		pp_div = I915_READ(regs.pp_ctrl);
5109 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5110 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5111 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5112 	} else {
5113 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5114 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5115 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5116 	}
5117 
5118 	/* Haswell doesn't have any port selection bits for the panel
5119 	 * power sequencer any more. */
5120 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5121 		port_sel = PANEL_PORT_SELECT_VLV(port);
5122 	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5123 		if (port == PORT_A)
5124 			port_sel = PANEL_PORT_SELECT_DPA;
5125 		else
5126 			port_sel = PANEL_PORT_SELECT_DPD;
5127 	}
5128 
5129 	pp_on |= port_sel;
5130 
5131 	I915_WRITE(regs.pp_on, pp_on);
5132 	I915_WRITE(regs.pp_off, pp_off);
5133 	if (IS_BROXTON(dev_priv))
5134 		I915_WRITE(regs.pp_ctrl, pp_div);
5135 	else
5136 		I915_WRITE(regs.pp_div, pp_div);
5137 
5138 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5139 		      I915_READ(regs.pp_on),
5140 		      I915_READ(regs.pp_off),
5141 		      IS_BROXTON(dev_priv) ?
5142 		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5143 		      I915_READ(regs.pp_div));
5144 }
5145 
5146 static void intel_dp_pps_init(struct drm_device *dev,
5147 			      struct intel_dp *intel_dp)
5148 {
5149 	struct drm_i915_private *dev_priv = to_i915(dev);
5150 
5151 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5152 		vlv_initial_power_sequencer_setup(intel_dp);
5153 	} else {
5154 		intel_dp_init_panel_power_sequencer(dev, intel_dp);
5155 		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5156 	}
5157 }
5158 
5159 /**
5160  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5161  * @dev_priv: i915 device
5162  * @crtc_state: a pointer to the active intel_crtc_state
5163  * @refresh_rate: RR to be programmed
5164  *
5165  * This function gets called when refresh rate (RR) has to be changed from
5166  * one frequency to another. Switches can be between high and low RR
5167  * supported by the panel or to any other RR based on media playback (in
5168  * this case, RR value needs to be passed from user space).
5169  *
5170  * The caller of this function needs to take a lock on dev_priv->drrs.
5171  */
5172 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5173 				    struct intel_crtc_state *crtc_state,
5174 				    int refresh_rate)
5175 {
5176 	struct intel_encoder *encoder;
5177 	struct intel_digital_port *dig_port = NULL;
5178 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5179 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5180 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5181 
5182 	if (refresh_rate <= 0) {
5183 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5184 		return;
5185 	}
5186 
5187 	if (intel_dp == NULL) {
5188 		DRM_DEBUG_KMS("DRRS not supported.\n");
5189 		return;
5190 	}
5191 
5192 	/*
5193 	 * FIXME: This needs proper synchronization with psr state for some
5194 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5195 	 */
5196 
5197 	dig_port = dp_to_dig_port(intel_dp);
5198 	encoder = &dig_port->base;
5199 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5200 
5201 	if (!intel_crtc) {
5202 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5203 		return;
5204 	}
5205 
5206 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5207 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5208 		return;
5209 	}
5210 
5211 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5212 			refresh_rate)
5213 		index = DRRS_LOW_RR;
5214 
5215 	if (index == dev_priv->drrs.refresh_rate_type) {
5216 		DRM_DEBUG_KMS(
5217 			"DRRS requested for previously set RR...ignoring\n");
5218 		return;
5219 	}
5220 
5221 	if (!crtc_state->base.active) {
5222 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5223 		return;
5224 	}
5225 
5226 	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5227 		switch (index) {
5228 		case DRRS_HIGH_RR:
5229 			intel_dp_set_m_n(intel_crtc, M1_N1);
5230 			break;
5231 		case DRRS_LOW_RR:
5232 			intel_dp_set_m_n(intel_crtc, M2_N2);
5233 			break;
5234 		case DRRS_MAX_RR:
5235 		default:
5236 			DRM_ERROR("Unsupported refreshrate type\n");
5237 		}
5238 	} else if (INTEL_GEN(dev_priv) > 6) {
5239 		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5240 		u32 val;
5241 
5242 		val = I915_READ(reg);
5243 		if (index > DRRS_HIGH_RR) {
5244 			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5245 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5246 			else
5247 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5248 		} else {
5249 			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5250 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5251 			else
5252 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5253 		}
5254 		I915_WRITE(reg, val);
5255 	}
5256 
5257 	dev_priv->drrs.refresh_rate_type = index;
5258 
5259 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5260 }
5261 
5262 /**
5263  * intel_edp_drrs_enable - init drrs struct if supported
5264  * @intel_dp: DP struct
5265  * @crtc_state: A pointer to the active crtc state.
5266  *
5267  * Initializes frontbuffer_bits and drrs.dp
5268  */
5269 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5270 			   struct intel_crtc_state *crtc_state)
5271 {
5272 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5273 	struct drm_i915_private *dev_priv = to_i915(dev);
5274 
5275 	if (!crtc_state->has_drrs) {
5276 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5277 		return;
5278 	}
5279 
5280 	mutex_lock(&dev_priv->drrs.mutex);
5281 	if (WARN_ON(dev_priv->drrs.dp)) {
5282 		DRM_ERROR("DRRS already enabled\n");
5283 		goto unlock;
5284 	}
5285 
5286 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5287 
5288 	dev_priv->drrs.dp = intel_dp;
5289 
5290 unlock:
5291 	mutex_unlock(&dev_priv->drrs.mutex);
5292 }
5293 
5294 /**
5295  * intel_edp_drrs_disable - Disable DRRS
5296  * @intel_dp: DP struct
5297  * @old_crtc_state: Pointer to old crtc_state.
5298  *
5299  */
5300 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5301 			    struct intel_crtc_state *old_crtc_state)
5302 {
5303 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5304 	struct drm_i915_private *dev_priv = to_i915(dev);
5305 
5306 	if (!old_crtc_state->has_drrs)
5307 		return;
5308 
5309 	mutex_lock(&dev_priv->drrs.mutex);
5310 	if (!dev_priv->drrs.dp) {
5311 		mutex_unlock(&dev_priv->drrs.mutex);
5312 		return;
5313 	}
5314 
5315 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5316 		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5317 			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5318 
5319 	dev_priv->drrs.dp = NULL;
5320 	mutex_unlock(&dev_priv->drrs.mutex);
5321 
5322 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5323 }
5324 
5325 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5326 {
5327 	struct drm_i915_private *dev_priv =
5328 		container_of(work, typeof(*dev_priv), drrs.work.work);
5329 	struct intel_dp *intel_dp;
5330 
5331 	mutex_lock(&dev_priv->drrs.mutex);
5332 
5333 	intel_dp = dev_priv->drrs.dp;
5334 
5335 	if (!intel_dp)
5336 		goto unlock;
5337 
5338 	/*
5339 	 * The delayed work can race with an invalidate hence we need to
5340 	 * recheck.
5341 	 */
5342 
5343 	if (dev_priv->drrs.busy_frontbuffer_bits)
5344 		goto unlock;
5345 
5346 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5347 		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5348 
5349 		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5350 			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5351 	}
5352 
5353 unlock:
5354 	mutex_unlock(&dev_priv->drrs.mutex);
5355 }
5356 
5357 /**
5358  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5359  * @dev_priv: i915 device
5360  * @frontbuffer_bits: frontbuffer plane tracking bits
5361  *
5362  * This function gets called everytime rendering on the given planes start.
5363  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5364  *
5365  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5366  */
5367 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5368 			       unsigned int frontbuffer_bits)
5369 {
5370 	struct drm_crtc *crtc;
5371 	enum i915_pipe pipe;
5372 
5373 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5374 		return;
5375 
5376 	cancel_delayed_work(&dev_priv->drrs.work);
5377 
5378 	mutex_lock(&dev_priv->drrs.mutex);
5379 	if (!dev_priv->drrs.dp) {
5380 		mutex_unlock(&dev_priv->drrs.mutex);
5381 		return;
5382 	}
5383 
5384 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5385 	pipe = to_intel_crtc(crtc)->pipe;
5386 
5387 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5388 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5389 
5390 	/* invalidate means busy screen hence upclock */
5391 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5392 		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5393 			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5394 
5395 	mutex_unlock(&dev_priv->drrs.mutex);
5396 }
5397 
5398 /**
5399  * intel_edp_drrs_flush - Restart Idleness DRRS
5400  * @dev_priv: i915 device
5401  * @frontbuffer_bits: frontbuffer plane tracking bits
5402  *
5403  * This function gets called every time rendering on the given planes has
5404  * completed or flip on a crtc is completed. So DRRS should be upclocked
5405  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5406  * if no other planes are dirty.
5407  *
5408  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5409  */
5410 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5411 			  unsigned int frontbuffer_bits)
5412 {
5413 	struct drm_crtc *crtc;
5414 	enum i915_pipe pipe;
5415 
5416 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5417 		return;
5418 
5419 	cancel_delayed_work(&dev_priv->drrs.work);
5420 
5421 	mutex_lock(&dev_priv->drrs.mutex);
5422 	if (!dev_priv->drrs.dp) {
5423 		mutex_unlock(&dev_priv->drrs.mutex);
5424 		return;
5425 	}
5426 
5427 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5428 	pipe = to_intel_crtc(crtc)->pipe;
5429 
5430 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5431 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5432 
5433 	/* flush means busy screen hence upclock */
5434 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5435 		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5436 				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5437 
5438 	/*
5439 	 * flush also means no more activity hence schedule downclock, if all
5440 	 * other fbs are quiescent too
5441 	 */
5442 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5443 		schedule_delayed_work(&dev_priv->drrs.work,
5444 				msecs_to_jiffies(1000));
5445 	mutex_unlock(&dev_priv->drrs.mutex);
5446 }
5447 
5448 /**
5449  * DOC: Display Refresh Rate Switching (DRRS)
5450  *
5451  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5452  * which enables swtching between low and high refresh rates,
5453  * dynamically, based on the usage scenario. This feature is applicable
5454  * for internal panels.
5455  *
5456  * Indication that the panel supports DRRS is given by the panel EDID, which
5457  * would list multiple refresh rates for one resolution.
5458  *
5459  * DRRS is of 2 types - static and seamless.
5460  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5461  * (may appear as a blink on screen) and is used in dock-undock scenario.
5462  * Seamless DRRS involves changing RR without any visual effect to the user
5463  * and can be used during normal system usage. This is done by programming
5464  * certain registers.
5465  *
5466  * Support for static/seamless DRRS may be indicated in the VBT based on
5467  * inputs from the panel spec.
5468  *
5469  * DRRS saves power by switching to low RR based on usage scenarios.
5470  *
5471  * The implementation is based on frontbuffer tracking implementation.  When
5472  * there is a disturbance on the screen triggered by user activity or a periodic
5473  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5474  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5475  * made.
5476  *
5477  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5478  * and intel_edp_drrs_flush() are called.
5479  *
5480  * DRRS can be further extended to support other internal panels and also
5481  * the scenario of video playback wherein RR is set based on the rate
5482  * requested by userspace.
5483  */
5484 
5485 /**
5486  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5487  * @intel_connector: eDP connector
5488  * @fixed_mode: preferred mode of panel
5489  *
5490  * This function is  called only once at driver load to initialize basic
5491  * DRRS stuff.
5492  *
5493  * Returns:
5494  * Downclock mode if panel supports it, else return NULL.
5495  * DRRS support is determined by the presence of downclock mode (apart
5496  * from VBT setting).
5497  */
5498 static struct drm_display_mode *
5499 intel_dp_drrs_init(struct intel_connector *intel_connector,
5500 		struct drm_display_mode *fixed_mode)
5501 {
5502 	struct drm_connector *connector = &intel_connector->base;
5503 	struct drm_device *dev = connector->dev;
5504 	struct drm_i915_private *dev_priv = to_i915(dev);
5505 	struct drm_display_mode *downclock_mode = NULL;
5506 
5507 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5508 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5509 
5510 	if (INTEL_GEN(dev_priv) <= 6) {
5511 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5512 		return NULL;
5513 	}
5514 
5515 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5516 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5517 		return NULL;
5518 	}
5519 
5520 	downclock_mode = intel_find_panel_downclock
5521 					(dev, fixed_mode, connector);
5522 
5523 	if (!downclock_mode) {
5524 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5525 		return NULL;
5526 	}
5527 
5528 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5529 
5530 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5531 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5532 	return downclock_mode;
5533 }
5534 
5535 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5536 				     struct intel_connector *intel_connector)
5537 {
5538 	struct drm_connector *connector = &intel_connector->base;
5539 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5540 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5541 	struct drm_device *dev = intel_encoder->base.dev;
5542 	struct drm_i915_private *dev_priv = to_i915(dev);
5543 	struct drm_display_mode *fixed_mode = NULL;
5544 	struct drm_display_mode *downclock_mode = NULL;
5545 	bool has_dpcd;
5546 	struct drm_display_mode *scan;
5547 	struct edid *edid;
5548 	enum i915_pipe pipe = INVALID_PIPE;
5549 
5550 	if (!is_edp(intel_dp))
5551 		return true;
5552 
5553 	/*
5554 	 * On IBX/CPT we may get here with LVDS already registered. Since the
5555 	 * driver uses the only internal power sequencer available for both
5556 	 * eDP and LVDS bail out early in this case to prevent interfering
5557 	 * with an already powered-on LVDS power sequencer.
5558 	 */
5559 	if (intel_get_lvds_encoder(dev)) {
5560 		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5561 		DRM_INFO("LVDS was detected, not registering eDP\n");
5562 
5563 		return false;
5564 	}
5565 
5566 	pps_lock(intel_dp);
5567 
5568 	intel_dp_init_panel_power_timestamps(intel_dp);
5569 	intel_dp_pps_init(dev, intel_dp);
5570 	intel_edp_panel_vdd_sanitize(intel_dp);
5571 
5572 	pps_unlock(intel_dp);
5573 
5574 	/* Cache DPCD and EDID for edp. */
5575 	has_dpcd = intel_edp_init_dpcd(intel_dp);
5576 
5577 	if (!has_dpcd) {
5578 		/* if this fails, presume the device is a ghost */
5579 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5580 		goto out_vdd_off;
5581 	}
5582 
5583 	mutex_lock(&dev->mode_config.mutex);
5584 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5585 	if (edid) {
5586 		if (drm_add_edid_modes(connector, edid)) {
5587 			drm_mode_connector_update_edid_property(connector,
5588 								edid);
5589 			drm_edid_to_eld(connector, edid);
5590 		} else {
5591 			kfree(edid);
5592 			edid = ERR_PTR(-EINVAL);
5593 		}
5594 	} else {
5595 		edid = ERR_PTR(-ENOENT);
5596 	}
5597 	intel_connector->edid = edid;
5598 
5599 	/* prefer fixed mode from EDID if available */
5600 	list_for_each_entry(scan, &connector->probed_modes, head) {
5601 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5602 			fixed_mode = drm_mode_duplicate(dev, scan);
5603 			downclock_mode = intel_dp_drrs_init(
5604 						intel_connector, fixed_mode);
5605 			break;
5606 		}
5607 	}
5608 
5609 	/* fallback to VBT if available for eDP */
5610 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5611 		fixed_mode = drm_mode_duplicate(dev,
5612 					dev_priv->vbt.lfp_lvds_vbt_mode);
5613 		if (fixed_mode) {
5614 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5615 			connector->display_info.width_mm = fixed_mode->width_mm;
5616 			connector->display_info.height_mm = fixed_mode->height_mm;
5617 		}
5618 	}
5619 	mutex_unlock(&dev->mode_config.mutex);
5620 
5621 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5622 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5623 		register_reboot_notifier(&intel_dp->edp_notifier);
5624 
5625 		/*
5626 		 * Figure out the current pipe for the initial backlight setup.
5627 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5628 		 * fails just assume pipe A.
5629 		 */
5630 		if (IS_CHERRYVIEW(dev_priv))
5631 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5632 		else
5633 			pipe = PORT_TO_PIPE(intel_dp->DP);
5634 
5635 		if (pipe != PIPE_A && pipe != PIPE_B)
5636 			pipe = intel_dp->pps_pipe;
5637 
5638 		if (pipe != PIPE_A && pipe != PIPE_B)
5639 			pipe = PIPE_A;
5640 
5641 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5642 			      pipe_name(pipe));
5643 	}
5644 
5645 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5646 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5647 	intel_panel_setup_backlight(connector, pipe);
5648 
5649 	return true;
5650 
5651 out_vdd_off:
5652 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5653 	/*
5654 	 * vdd might still be enabled do to the delayed vdd off.
5655 	 * Make sure vdd is actually turned off here.
5656 	 */
5657 	pps_lock(intel_dp);
5658 	edp_panel_vdd_off_sync(intel_dp);
5659 	pps_unlock(intel_dp);
5660 
5661 	return false;
5662 }
5663 
5664 bool
5665 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5666 			struct intel_connector *intel_connector)
5667 {
5668 	struct drm_connector *connector = &intel_connector->base;
5669 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5670 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5671 	struct drm_device *dev = intel_encoder->base.dev;
5672 	struct drm_i915_private *dev_priv = to_i915(dev);
5673 	enum port port = intel_dig_port->port;
5674 	int type;
5675 
5676 	if (WARN(intel_dig_port->max_lanes < 1,
5677 		 "Not enough lanes (%d) for DP on port %c\n",
5678 		 intel_dig_port->max_lanes, port_name(port)))
5679 		return false;
5680 
5681 	intel_dp->pps_pipe = INVALID_PIPE;
5682 
5683 	/* intel_dp vfuncs */
5684 	if (INTEL_GEN(dev_priv) >= 9)
5685 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5686 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5687 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5688 	else if (HAS_PCH_SPLIT(dev_priv))
5689 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5690 	else
5691 		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5692 
5693 	if (INTEL_GEN(dev_priv) >= 9)
5694 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5695 	else
5696 		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5697 
5698 	if (HAS_DDI(dev_priv))
5699 		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5700 
5701 	/* Preserve the current hw state. */
5702 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5703 	intel_dp->attached_connector = intel_connector;
5704 
5705 	if (intel_dp_is_edp(dev_priv, port))
5706 		type = DRM_MODE_CONNECTOR_eDP;
5707 	else
5708 		type = DRM_MODE_CONNECTOR_DisplayPort;
5709 
5710 	/*
5711 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5712 	 * for DP the encoder type can be set by the caller to
5713 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5714 	 */
5715 	if (type == DRM_MODE_CONNECTOR_eDP)
5716 		intel_encoder->type = INTEL_OUTPUT_EDP;
5717 
5718 	/* eDP only on port B and/or C on vlv/chv */
5719 	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5720 		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5721 		return false;
5722 
5723 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5724 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5725 			port_name(port));
5726 
5727 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5728 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5729 
5730 	connector->interlace_allowed = true;
5731 	connector->doublescan_allowed = 0;
5732 
5733 	intel_dp_aux_init(intel_dp);
5734 
5735 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5736 			  edp_panel_vdd_work);
5737 
5738 	intel_connector_attach_encoder(intel_connector, intel_encoder);
5739 
5740 	if (HAS_DDI(dev_priv))
5741 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5742 	else
5743 		intel_connector->get_hw_state = intel_connector_get_hw_state;
5744 
5745 	/* Set up the hotplug pin. */
5746 	switch (port) {
5747 	case PORT_A:
5748 		intel_encoder->hpd_pin = HPD_PORT_A;
5749 		break;
5750 	case PORT_B:
5751 		intel_encoder->hpd_pin = HPD_PORT_B;
5752 		if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
5753 			intel_encoder->hpd_pin = HPD_PORT_A;
5754 		break;
5755 	case PORT_C:
5756 		intel_encoder->hpd_pin = HPD_PORT_C;
5757 		break;
5758 	case PORT_D:
5759 		intel_encoder->hpd_pin = HPD_PORT_D;
5760 		break;
5761 	case PORT_E:
5762 		intel_encoder->hpd_pin = HPD_PORT_E;
5763 		break;
5764 	default:
5765 		BUG();
5766 	}
5767 
5768 	/* init MST on ports that can support it */
5769 	if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
5770 	    (port == PORT_B || port == PORT_C || port == PORT_D))
5771 		intel_dp_mst_encoder_init(intel_dig_port,
5772 					  intel_connector->base.base.id);
5773 
5774 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5775 		intel_dp_aux_fini(intel_dp);
5776 		intel_dp_mst_encoder_cleanup(intel_dig_port);
5777 		goto fail;
5778 	}
5779 
5780 	intel_dp_add_properties(intel_dp, connector);
5781 
5782 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5783 	 * 0xd.  Failure to do so will result in spurious interrupts being
5784 	 * generated on the port when a cable is not attached.
5785 	 */
5786 	if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
5787 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5788 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5789 	}
5790 
5791 	return true;
5792 
5793 fail:
5794 	drm_connector_cleanup(connector);
5795 
5796 	return false;
5797 }
5798 
5799 bool intel_dp_init(struct drm_device *dev,
5800 		   i915_reg_t output_reg,
5801 		   enum port port)
5802 {
5803 	struct drm_i915_private *dev_priv = to_i915(dev);
5804 	struct intel_digital_port *intel_dig_port;
5805 	struct intel_encoder *intel_encoder;
5806 	struct drm_encoder *encoder;
5807 	struct intel_connector *intel_connector;
5808 
5809 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5810 	if (!intel_dig_port)
5811 		return false;
5812 
5813 	intel_connector = intel_connector_alloc();
5814 	if (!intel_connector)
5815 		goto err_connector_alloc;
5816 
5817 	intel_encoder = &intel_dig_port->base;
5818 	encoder = &intel_encoder->base;
5819 
5820 	if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5821 			     DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5822 		goto err_encoder_init;
5823 
5824 	intel_encoder->compute_config = intel_dp_compute_config;
5825 	intel_encoder->disable = intel_disable_dp;
5826 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
5827 	intel_encoder->get_config = intel_dp_get_config;
5828 	intel_encoder->suspend = intel_dp_encoder_suspend;
5829 	if (IS_CHERRYVIEW(dev_priv)) {
5830 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5831 		intel_encoder->pre_enable = chv_pre_enable_dp;
5832 		intel_encoder->enable = vlv_enable_dp;
5833 		intel_encoder->post_disable = chv_post_disable_dp;
5834 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5835 	} else if (IS_VALLEYVIEW(dev_priv)) {
5836 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5837 		intel_encoder->pre_enable = vlv_pre_enable_dp;
5838 		intel_encoder->enable = vlv_enable_dp;
5839 		intel_encoder->post_disable = vlv_post_disable_dp;
5840 	} else {
5841 		intel_encoder->pre_enable = g4x_pre_enable_dp;
5842 		intel_encoder->enable = g4x_enable_dp;
5843 		if (INTEL_GEN(dev_priv) >= 5)
5844 			intel_encoder->post_disable = ilk_post_disable_dp;
5845 	}
5846 
5847 	intel_dig_port->port = port;
5848 	intel_dig_port->dp.output_reg = output_reg;
5849 	intel_dig_port->max_lanes = 4;
5850 
5851 	intel_encoder->type = INTEL_OUTPUT_DP;
5852 	if (IS_CHERRYVIEW(dev_priv)) {
5853 		if (port == PORT_D)
5854 			intel_encoder->crtc_mask = 1 << 2;
5855 		else
5856 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5857 	} else {
5858 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5859 	}
5860 	intel_encoder->cloneable = 0;
5861 	intel_encoder->port = port;
5862 
5863 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5864 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
5865 
5866 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5867 		goto err_init_connector;
5868 
5869 	return true;
5870 
5871 err_init_connector:
5872 	drm_encoder_cleanup(encoder);
5873 err_encoder_init:
5874 	kfree(intel_connector);
5875 err_connector_alloc:
5876 	kfree(intel_dig_port);
5877 	return false;
5878 }
5879 
5880 void intel_dp_mst_suspend(struct drm_device *dev)
5881 {
5882 	struct drm_i915_private *dev_priv = to_i915(dev);
5883 	int i;
5884 
5885 	/* disable MST */
5886 	for (i = 0; i < I915_MAX_PORTS; i++) {
5887 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5888 
5889 		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5890 			continue;
5891 
5892 		if (intel_dig_port->dp.is_mst)
5893 			drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5894 	}
5895 }
5896 
5897 void intel_dp_mst_resume(struct drm_device *dev)
5898 {
5899 	struct drm_i915_private *dev_priv = to_i915(dev);
5900 	int i;
5901 
5902 	for (i = 0; i < I915_MAX_PORTS; i++) {
5903 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5904 		int ret;
5905 
5906 		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5907 			continue;
5908 
5909 		ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5910 		if (ret)
5911 			intel_dp_check_mst_status(&intel_dig_port->dp);
5912 	}
5913 }
5914