xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision 01bedb5a)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_edid.h>
40 #include "intel_drv.h"
41 #include <drm/i915_drm.h>
42 #include "i915_drv.h"
43 
44 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
45 
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
48 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 
52 struct dp_link_dpll {
53 	int clock;
54 	struct dpll dpll;
55 };
56 
57 static const struct dp_link_dpll gen4_dpll[] = {
58 	{ 162000,
59 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 	{ 270000,
61 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63 
64 static const struct dp_link_dpll pch_dpll[] = {
65 	{ 162000,
66 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 	{ 270000,
68 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70 
71 static const struct dp_link_dpll vlv_dpll[] = {
72 	{ 162000,
73 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 	{ 270000,
75 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77 
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83 	/*
84 	 * CHV requires to program fractional division for m2.
85 	 * m2 is stored in fixed point format using formula below
86 	 * (m2_int << 22) | m2_fraction
87 	 */
88 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
89 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
91 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
93 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95 
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 				  324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 				  324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101 
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 
113 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115 
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 
120 	return intel_dig_port->base.base.dev;
121 }
122 
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127 
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 				      enum i915_pipe pipe);
134 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
135 
136 static int
137 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
138 {
139 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
140 
141 	switch (max_link_bw) {
142 	case DP_LINK_BW_1_62:
143 	case DP_LINK_BW_2_7:
144 	case DP_LINK_BW_5_4:
145 		break;
146 	default:
147 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 		     max_link_bw);
149 		max_link_bw = DP_LINK_BW_1_62;
150 		break;
151 	}
152 	return max_link_bw;
153 }
154 
155 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156 {
157 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 	u8 source_max, sink_max;
159 
160 	source_max = intel_dig_port->max_lanes;
161 	sink_max = intel_dp->max_sink_lane_count;
162 
163 	return min(source_max, sink_max);
164 }
165 
166 int
167 intel_dp_link_required(int pixel_clock, int bpp)
168 {
169 	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
170 	return DIV_ROUND_UP(pixel_clock * bpp, 8);
171 }
172 
173 int
174 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
175 {
176 	/* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
177 	 * link rate that is generally expressed in Gbps. Since, 8 bits of data
178 	 * is transmitted every LS_Clk per lane, there is no need to account for
179 	 * the channel encoding that is done in the PHY layer here.
180 	 */
181 
182 	return max_link_clock * max_lanes;
183 }
184 
185 static int
186 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
187 {
188 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
189 	struct intel_encoder *encoder = &intel_dig_port->base;
190 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
191 	int max_dotclk = dev_priv->max_dotclk_freq;
192 	int ds_max_dotclk;
193 
194 	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
195 
196 	if (type != DP_DS_PORT_TYPE_VGA)
197 		return max_dotclk;
198 
199 	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
200 						    intel_dp->downstream_ports);
201 
202 	if (ds_max_dotclk != 0)
203 		max_dotclk = min(max_dotclk, ds_max_dotclk);
204 
205 	return max_dotclk;
206 }
207 
208 static int
209 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
210 {
211 	if (intel_dp->num_sink_rates) {
212 		*sink_rates = intel_dp->sink_rates;
213 		return intel_dp->num_sink_rates;
214 	}
215 
216 	*sink_rates = default_rates;
217 
218 	return (intel_dp->max_sink_link_bw >> 3) + 1;
219 }
220 
221 static int
222 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
223 {
224 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
225 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
226 	int size;
227 
228 	if (IS_GEN9_LP(dev_priv)) {
229 		*source_rates = bxt_rates;
230 		size = ARRAY_SIZE(bxt_rates);
231 	} else if (IS_GEN9_BC(dev_priv)) {
232 		*source_rates = skl_rates;
233 		size = ARRAY_SIZE(skl_rates);
234 	} else {
235 		*source_rates = default_rates;
236 		size = ARRAY_SIZE(default_rates);
237 	}
238 
239 	/* This depends on the fact that 5.4 is last value in the array */
240 	if (!intel_dp_source_supports_hbr2(intel_dp))
241 		size--;
242 
243 	return size;
244 }
245 
246 static int intersect_rates(const int *source_rates, int source_len,
247 			   const int *sink_rates, int sink_len,
248 			   int *common_rates)
249 {
250 	int i = 0, j = 0, k = 0;
251 
252 	while (i < source_len && j < sink_len) {
253 		if (source_rates[i] == sink_rates[j]) {
254 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
255 				return k;
256 			common_rates[k] = source_rates[i];
257 			++k;
258 			++i;
259 			++j;
260 		} else if (source_rates[i] < sink_rates[j]) {
261 			++i;
262 		} else {
263 			++j;
264 		}
265 	}
266 	return k;
267 }
268 
269 static int intel_dp_common_rates(struct intel_dp *intel_dp,
270 				 int *common_rates)
271 {
272 	const int *source_rates, *sink_rates;
273 	int source_len, sink_len;
274 
275 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
276 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
277 
278 	return intersect_rates(source_rates, source_len,
279 			       sink_rates, sink_len,
280 			       common_rates);
281 }
282 
283 static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
284 				    int *common_rates, int link_rate)
285 {
286 	int common_len;
287 	int index;
288 
289 	common_len = intel_dp_common_rates(intel_dp, common_rates);
290 	for (index = 0; index < common_len; index++) {
291 		if (link_rate == common_rates[common_len - index - 1])
292 			return common_len - index - 1;
293 	}
294 
295 	return -1;
296 }
297 
298 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
299 					    int link_rate, uint8_t lane_count)
300 {
301 	int common_rates[DP_MAX_SUPPORTED_RATES];
302 	int link_rate_index;
303 
304 	link_rate_index = intel_dp_link_rate_index(intel_dp,
305 						   common_rates,
306 						   link_rate);
307 	if (link_rate_index > 0) {
308 		intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
309 		intel_dp->max_sink_lane_count = lane_count;
310 	} else if (lane_count > 1) {
311 		intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
312 		intel_dp->max_sink_lane_count = lane_count >> 1;
313 	} else {
314 		DRM_ERROR("Link Training Unsuccessful\n");
315 		return -1;
316 	}
317 
318 	return 0;
319 }
320 
321 static enum drm_mode_status
322 intel_dp_mode_valid(struct drm_connector *connector,
323 		    struct drm_display_mode *mode)
324 {
325 	struct intel_dp *intel_dp = intel_attached_dp(connector);
326 	struct intel_connector *intel_connector = to_intel_connector(connector);
327 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
328 	int target_clock = mode->clock;
329 	int max_rate, mode_rate, max_lanes, max_link_clock;
330 	int max_dotclk;
331 
332 	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
333 
334 	if (is_edp(intel_dp) && fixed_mode) {
335 		if (mode->hdisplay > fixed_mode->hdisplay)
336 			return MODE_PANEL;
337 
338 		if (mode->vdisplay > fixed_mode->vdisplay)
339 			return MODE_PANEL;
340 
341 		target_clock = fixed_mode->clock;
342 	}
343 
344 	max_link_clock = intel_dp_max_link_rate(intel_dp);
345 	max_lanes = intel_dp_max_lane_count(intel_dp);
346 
347 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
348 	mode_rate = intel_dp_link_required(target_clock, 18);
349 
350 	if (mode_rate > max_rate || target_clock > max_dotclk)
351 		return MODE_CLOCK_HIGH;
352 
353 	if (mode->clock < 10000)
354 		return MODE_CLOCK_LOW;
355 
356 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
357 		return MODE_H_ILLEGAL;
358 
359 	return MODE_OK;
360 }
361 
362 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
363 {
364 	int	i;
365 	uint32_t v = 0;
366 
367 	if (src_bytes > 4)
368 		src_bytes = 4;
369 	for (i = 0; i < src_bytes; i++)
370 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
371 	return v;
372 }
373 
374 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
375 {
376 	int i;
377 	if (dst_bytes > 4)
378 		dst_bytes = 4;
379 	for (i = 0; i < dst_bytes; i++)
380 		dst[i] = src >> ((3-i) * 8);
381 }
382 
383 static void
384 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
385 				    struct intel_dp *intel_dp);
386 static void
387 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
388 					      struct intel_dp *intel_dp,
389 					      bool force_disable_vdd);
390 static void
391 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
392 
393 static void pps_lock(struct intel_dp *intel_dp)
394 {
395 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
396 	struct intel_encoder *encoder = &intel_dig_port->base;
397 	struct drm_device *dev = encoder->base.dev;
398 	struct drm_i915_private *dev_priv = to_i915(dev);
399 
400 	/*
401 	 * See vlv_power_sequencer_reset() why we need
402 	 * a power domain reference here.
403 	 */
404 	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
405 
406 	mutex_lock(&dev_priv->pps_mutex);
407 }
408 
409 static void pps_unlock(struct intel_dp *intel_dp)
410 {
411 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
412 	struct intel_encoder *encoder = &intel_dig_port->base;
413 	struct drm_device *dev = encoder->base.dev;
414 	struct drm_i915_private *dev_priv = to_i915(dev);
415 
416 	mutex_unlock(&dev_priv->pps_mutex);
417 
418 	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
419 }
420 
421 static void
422 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
423 {
424 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
425 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
426 	enum i915_pipe pipe = intel_dp->pps_pipe;
427 	bool pll_enabled, release_cl_override = false;
428 	enum dpio_phy phy = DPIO_PHY(pipe);
429 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
430 	uint32_t DP;
431 
432 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
433 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
434 		 pipe_name(pipe), port_name(intel_dig_port->port)))
435 		return;
436 
437 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
438 		      pipe_name(pipe), port_name(intel_dig_port->port));
439 
440 	/* Preserve the BIOS-computed detected bit. This is
441 	 * supposed to be read-only.
442 	 */
443 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
444 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
445 	DP |= DP_PORT_WIDTH(1);
446 	DP |= DP_LINK_TRAIN_PAT_1;
447 
448 	if (IS_CHERRYVIEW(dev_priv))
449 		DP |= DP_PIPE_SELECT_CHV(pipe);
450 	else if (pipe == PIPE_B)
451 		DP |= DP_PIPEB_SELECT;
452 
453 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
454 
455 	/*
456 	 * The DPLL for the pipe must be enabled for this to work.
457 	 * So enable temporarily it if it's not already enabled.
458 	 */
459 	if (!pll_enabled) {
460 		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
461 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
462 
463 		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
464 				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
465 			DRM_ERROR("Failed to force on pll for pipe %c!\n",
466 				  pipe_name(pipe));
467 			return;
468 		}
469 	}
470 
471 	/*
472 	 * Similar magic as in intel_dp_enable_port().
473 	 * We _must_ do this port enable + disable trick
474 	 * to make this power seqeuencer lock onto the port.
475 	 * Otherwise even VDD force bit won't work.
476 	 */
477 	I915_WRITE(intel_dp->output_reg, DP);
478 	POSTING_READ(intel_dp->output_reg);
479 
480 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
481 	POSTING_READ(intel_dp->output_reg);
482 
483 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
484 	POSTING_READ(intel_dp->output_reg);
485 
486 	if (!pll_enabled) {
487 		vlv_force_pll_off(dev_priv, pipe);
488 
489 		if (release_cl_override)
490 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
491 	}
492 }
493 
494 static enum i915_pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
495 {
496 	struct intel_encoder *encoder;
497 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
498 
499 	/*
500 	 * We don't have power sequencer currently.
501 	 * Pick one that's not used by other ports.
502 	 */
503 	for_each_intel_encoder(&dev_priv->drm, encoder) {
504 		struct intel_dp *intel_dp;
505 
506 		if (encoder->type != INTEL_OUTPUT_DP &&
507 		    encoder->type != INTEL_OUTPUT_EDP)
508 			continue;
509 
510 		intel_dp = enc_to_intel_dp(&encoder->base);
511 
512 		if (encoder->type == INTEL_OUTPUT_EDP) {
513 			WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
514 				intel_dp->active_pipe != intel_dp->pps_pipe);
515 
516 			if (intel_dp->pps_pipe != INVALID_PIPE)
517 				pipes &= ~(1 << intel_dp->pps_pipe);
518 		} else {
519 			WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
520 
521 			if (intel_dp->active_pipe != INVALID_PIPE)
522 				pipes &= ~(1 << intel_dp->active_pipe);
523 		}
524 	}
525 
526 	if (pipes == 0)
527 		return INVALID_PIPE;
528 
529 	return ffs(pipes) - 1;
530 }
531 
532 static enum i915_pipe
533 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
534 {
535 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
536 	struct drm_device *dev = intel_dig_port->base.base.dev;
537 	struct drm_i915_private *dev_priv = to_i915(dev);
538 	enum i915_pipe pipe;
539 
540 	lockdep_assert_held(&dev_priv->pps_mutex);
541 
542 	/* We should never land here with regular DP ports */
543 	WARN_ON(!is_edp(intel_dp));
544 
545 	WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
546 		intel_dp->active_pipe != intel_dp->pps_pipe);
547 
548 	if (intel_dp->pps_pipe != INVALID_PIPE)
549 		return intel_dp->pps_pipe;
550 
551 	pipe = vlv_find_free_pps(dev_priv);
552 
553 	/*
554 	 * Didn't find one. This should not happen since there
555 	 * are two power sequencers and up to two eDP ports.
556 	 */
557 	if (WARN_ON(pipe == INVALID_PIPE))
558 		pipe = PIPE_A;
559 
560 	vlv_steal_power_sequencer(dev, pipe);
561 	intel_dp->pps_pipe = pipe;
562 
563 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
564 		      pipe_name(intel_dp->pps_pipe),
565 		      port_name(intel_dig_port->port));
566 
567 	/* init power sequencer on this pipe and port */
568 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
569 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
570 
571 	/*
572 	 * Even vdd force doesn't work until we've made
573 	 * the power sequencer lock in on the port.
574 	 */
575 	vlv_power_sequencer_kick(intel_dp);
576 
577 	return intel_dp->pps_pipe;
578 }
579 
580 static int
581 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
582 {
583 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
584 	struct drm_device *dev = intel_dig_port->base.base.dev;
585 	struct drm_i915_private *dev_priv = to_i915(dev);
586 
587 	lockdep_assert_held(&dev_priv->pps_mutex);
588 
589 	/* We should never land here with regular DP ports */
590 	WARN_ON(!is_edp(intel_dp));
591 
592 	/*
593 	 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
594 	 * mapping needs to be retrieved from VBT, for now just hard-code to
595 	 * use instance #0 always.
596 	 */
597 	if (!intel_dp->pps_reset)
598 		return 0;
599 
600 	intel_dp->pps_reset = false;
601 
602 	/*
603 	 * Only the HW needs to be reprogrammed, the SW state is fixed and
604 	 * has been setup during connector init.
605 	 */
606 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
607 
608 	return 0;
609 }
610 
611 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
612 			       enum i915_pipe pipe);
613 
614 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
615 			       enum i915_pipe pipe)
616 {
617 	return I915_READ(PP_STATUS(pipe)) & PP_ON;
618 }
619 
620 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
621 				enum i915_pipe pipe)
622 {
623 	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
624 }
625 
626 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
627 			 enum i915_pipe pipe)
628 {
629 	return true;
630 }
631 
632 static enum i915_pipe
633 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
634 		     enum port port,
635 		     vlv_pipe_check pipe_check)
636 {
637 	enum i915_pipe pipe;
638 
639 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
640 		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
641 			PANEL_PORT_SELECT_MASK;
642 
643 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
644 			continue;
645 
646 		if (!pipe_check(dev_priv, pipe))
647 			continue;
648 
649 		return pipe;
650 	}
651 
652 	return INVALID_PIPE;
653 }
654 
655 static void
656 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
657 {
658 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
659 	struct drm_device *dev = intel_dig_port->base.base.dev;
660 	struct drm_i915_private *dev_priv = to_i915(dev);
661 	enum port port = intel_dig_port->port;
662 
663 	lockdep_assert_held(&dev_priv->pps_mutex);
664 
665 	/* try to find a pipe with this port selected */
666 	/* first pick one where the panel is on */
667 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
668 						  vlv_pipe_has_pp_on);
669 	/* didn't find one? pick one where vdd is on */
670 	if (intel_dp->pps_pipe == INVALID_PIPE)
671 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
672 							  vlv_pipe_has_vdd_on);
673 	/* didn't find one? pick one with just the correct port */
674 	if (intel_dp->pps_pipe == INVALID_PIPE)
675 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
676 							  vlv_pipe_any);
677 
678 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
679 	if (intel_dp->pps_pipe == INVALID_PIPE) {
680 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
681 			      port_name(port));
682 		return;
683 	}
684 
685 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
686 		      port_name(port), pipe_name(intel_dp->pps_pipe));
687 
688 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
689 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
690 }
691 
692 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
693 {
694 	struct drm_device *dev = &dev_priv->drm;
695 	struct intel_encoder *encoder;
696 
697 	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
698 		    !IS_GEN9_LP(dev_priv)))
699 		return;
700 
701 	/*
702 	 * We can't grab pps_mutex here due to deadlock with power_domain
703 	 * mutex when power_domain functions are called while holding pps_mutex.
704 	 * That also means that in order to use pps_pipe the code needs to
705 	 * hold both a power domain reference and pps_mutex, and the power domain
706 	 * reference get/put must be done while _not_ holding pps_mutex.
707 	 * pps_{lock,unlock}() do these steps in the correct order, so one
708 	 * should use them always.
709 	 */
710 
711 	for_each_intel_encoder(dev, encoder) {
712 		struct intel_dp *intel_dp;
713 
714 		if (encoder->type != INTEL_OUTPUT_DP &&
715 		    encoder->type != INTEL_OUTPUT_EDP)
716 			continue;
717 
718 		intel_dp = enc_to_intel_dp(&encoder->base);
719 
720 		WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
721 
722 		if (encoder->type != INTEL_OUTPUT_EDP)
723 			continue;
724 
725 		if (IS_GEN9_LP(dev_priv))
726 			intel_dp->pps_reset = true;
727 		else
728 			intel_dp->pps_pipe = INVALID_PIPE;
729 	}
730 }
731 
732 struct pps_registers {
733 	i915_reg_t pp_ctrl;
734 	i915_reg_t pp_stat;
735 	i915_reg_t pp_on;
736 	i915_reg_t pp_off;
737 	i915_reg_t pp_div;
738 };
739 
740 static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
741 				    struct intel_dp *intel_dp,
742 				    struct pps_registers *regs)
743 {
744 	int pps_idx = 0;
745 
746 	memset(regs, 0, sizeof(*regs));
747 
748 	if (IS_GEN9_LP(dev_priv))
749 		pps_idx = bxt_power_sequencer_idx(intel_dp);
750 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
751 		pps_idx = vlv_power_sequencer_pipe(intel_dp);
752 
753 	regs->pp_ctrl = PP_CONTROL(pps_idx);
754 	regs->pp_stat = PP_STATUS(pps_idx);
755 	regs->pp_on = PP_ON_DELAYS(pps_idx);
756 	regs->pp_off = PP_OFF_DELAYS(pps_idx);
757 	if (!IS_GEN9_LP(dev_priv))
758 		regs->pp_div = PP_DIVISOR(pps_idx);
759 }
760 
761 static i915_reg_t
762 _pp_ctrl_reg(struct intel_dp *intel_dp)
763 {
764 	struct pps_registers regs;
765 
766 	intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
767 				&regs);
768 
769 	return regs.pp_ctrl;
770 }
771 
772 static i915_reg_t
773 _pp_stat_reg(struct intel_dp *intel_dp)
774 {
775 	struct pps_registers regs;
776 
777 	intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
778 				&regs);
779 
780 	return regs.pp_stat;
781 }
782 
783 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
784    This function only applicable when panel PM state is not to be tracked */
785 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
786 			      void *unused)
787 {
788 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
789 						 edp_notifier);
790 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
791 	struct drm_i915_private *dev_priv = to_i915(dev);
792 
793 #if 0
794 	if (!is_edp(intel_dp) || code != SYS_RESTART)
795 #endif
796 	if (!is_edp(intel_dp))
797 		return 0;
798 
799 	pps_lock(intel_dp);
800 
801 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
802 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
803 		i915_reg_t pp_ctrl_reg, pp_div_reg;
804 		u32 pp_div;
805 
806 		pp_ctrl_reg = PP_CONTROL(pipe);
807 		pp_div_reg  = PP_DIVISOR(pipe);
808 		pp_div = I915_READ(pp_div_reg);
809 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
810 
811 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
812 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
813 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
814 		msleep(intel_dp->panel_power_cycle_delay);
815 	}
816 
817 	pps_unlock(intel_dp);
818 
819 	return 0;
820 }
821 
822 static bool edp_have_panel_power(struct intel_dp *intel_dp)
823 {
824 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
825 	struct drm_i915_private *dev_priv = to_i915(dev);
826 
827 	lockdep_assert_held(&dev_priv->pps_mutex);
828 
829 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
830 	    intel_dp->pps_pipe == INVALID_PIPE)
831 		return false;
832 
833 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
834 }
835 
836 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
837 {
838 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
839 	struct drm_i915_private *dev_priv = to_i915(dev);
840 
841 	lockdep_assert_held(&dev_priv->pps_mutex);
842 
843 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
844 	    intel_dp->pps_pipe == INVALID_PIPE)
845 		return false;
846 
847 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
848 }
849 
850 static void
851 intel_dp_check_edp(struct intel_dp *intel_dp)
852 {
853 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
854 	struct drm_i915_private *dev_priv = to_i915(dev);
855 
856 	if (!is_edp(intel_dp))
857 		return;
858 
859 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
860 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
861 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
862 			      I915_READ(_pp_stat_reg(intel_dp)),
863 			      I915_READ(_pp_ctrl_reg(intel_dp)));
864 	}
865 }
866 
867 static uint32_t
868 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
869 {
870 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
871 	struct drm_device *dev = intel_dig_port->base.base.dev;
872 	struct drm_i915_private *dev_priv = to_i915(dev);
873 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
874 	uint32_t status;
875 	bool done;
876 
877 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
878 	if (has_aux_irq)
879 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
880 					  msecs_to_jiffies_timeout(10));
881 	else
882 		done = wait_for(C, 10) == 0;
883 	if (!done)
884 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
885 			  has_aux_irq);
886 #undef C
887 
888 	return status;
889 }
890 
891 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
892 {
893 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
894 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
895 
896 	if (index)
897 		return 0;
898 
899 	/*
900 	 * The clock divider is based off the hrawclk, and would like to run at
901 	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
902 	 */
903 	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
904 }
905 
906 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
907 {
908 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
909 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
910 
911 	if (index)
912 		return 0;
913 
914 	/*
915 	 * The clock divider is based off the cdclk or PCH rawclk, and would
916 	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
917 	 * divide by 2000 and use that
918 	 */
919 	if (intel_dig_port->port == PORT_A)
920 		return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
921 	else
922 		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
923 }
924 
925 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
926 {
927 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
928 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
929 
930 	if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
931 		/* Workaround for non-ULT HSW */
932 		switch (index) {
933 		case 0: return 63;
934 		case 1: return 72;
935 		default: return 0;
936 		}
937 	}
938 
939 	return ilk_get_aux_clock_divider(intel_dp, index);
940 }
941 
942 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
943 {
944 	/*
945 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
946 	 * derive the clock from CDCLK automatically). We still implement the
947 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
948 	 */
949 	return index ? 0 : 1;
950 }
951 
952 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
953 				     bool has_aux_irq,
954 				     int send_bytes,
955 				     uint32_t aux_clock_divider)
956 {
957 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
958 	struct drm_i915_private *dev_priv =
959 			to_i915(intel_dig_port->base.base.dev);
960 	uint32_t precharge, timeout;
961 
962 	if (IS_GEN6(dev_priv))
963 		precharge = 3;
964 	else
965 		precharge = 5;
966 
967 	if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
968 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
969 	else
970 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
971 
972 	return DP_AUX_CH_CTL_SEND_BUSY |
973 	       DP_AUX_CH_CTL_DONE |
974 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
975 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
976 	       timeout |
977 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
978 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
979 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
980 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
981 }
982 
983 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
984 				      bool has_aux_irq,
985 				      int send_bytes,
986 				      uint32_t unused)
987 {
988 	return DP_AUX_CH_CTL_SEND_BUSY |
989 	       DP_AUX_CH_CTL_DONE |
990 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
991 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
992 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
993 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
994 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
995 	       DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
996 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
997 }
998 
999 static int
1000 intel_dp_aux_ch(struct intel_dp *intel_dp,
1001 		const uint8_t *send, int send_bytes,
1002 		uint8_t *recv, int recv_size)
1003 {
1004 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1005 	struct drm_i915_private *dev_priv =
1006 			to_i915(intel_dig_port->base.base.dev);
1007 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
1008 	uint32_t aux_clock_divider;
1009 	int i, ret, recv_bytes;
1010 	uint32_t status;
1011 	int try, clock = 0;
1012 	bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
1013 	bool vdd;
1014 
1015 	pps_lock(intel_dp);
1016 
1017 	/*
1018 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1019 	 * In such cases we want to leave VDD enabled and it's up to upper layers
1020 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1021 	 * ourselves.
1022 	 */
1023 	vdd = edp_panel_vdd_on(intel_dp);
1024 
1025 	/* dp aux is extremely sensitive to irq latency, hence request the
1026 	 * lowest possible wakeup latency and so prevent the cpu from going into
1027 	 * deep sleep states.
1028 	 */
1029 	pm_qos_update_request(&dev_priv->pm_qos, 0);
1030 
1031 	intel_dp_check_edp(intel_dp);
1032 
1033 	/* Try to wait for any previous AUX channel activity */
1034 	for (try = 0; try < 3; try++) {
1035 		status = I915_READ_NOTRACE(ch_ctl);
1036 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1037 			break;
1038 		msleep(1);
1039 	}
1040 
1041 	if (try == 3) {
1042 		static u32 last_status = -1;
1043 		const u32 status = I915_READ(ch_ctl);
1044 
1045 		if (status != last_status) {
1046 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
1047 			     status);
1048 			last_status = status;
1049 		}
1050 
1051 		ret = -EBUSY;
1052 		goto out;
1053 	}
1054 
1055 	/* Only 5 data registers! */
1056 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1057 		ret = -E2BIG;
1058 		goto out;
1059 	}
1060 
1061 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1062 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1063 							  has_aux_irq,
1064 							  send_bytes,
1065 							  aux_clock_divider);
1066 
1067 		/* Must try at least 3 times according to DP spec */
1068 		for (try = 0; try < 5; try++) {
1069 			/* Load the send data into the aux channel data registers */
1070 			for (i = 0; i < send_bytes; i += 4)
1071 				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
1072 					   intel_dp_pack_aux(send + i,
1073 							     send_bytes - i));
1074 
1075 			/* Send the command and wait for it to complete */
1076 			I915_WRITE(ch_ctl, send_ctl);
1077 
1078 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1079 
1080 			/* Clear done status and any errors */
1081 			I915_WRITE(ch_ctl,
1082 				   status |
1083 				   DP_AUX_CH_CTL_DONE |
1084 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
1085 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
1086 
1087 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1088 				continue;
1089 
1090 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1091 			 *   400us delay required for errors and timeouts
1092 			 *   Timeout errors from the HW already meet this
1093 			 *   requirement so skip to next iteration
1094 			 */
1095 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1096 				usleep_range(400, 500);
1097 				continue;
1098 			}
1099 			if (status & DP_AUX_CH_CTL_DONE)
1100 				goto done;
1101 		}
1102 	}
1103 
1104 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1105 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1106 		ret = -EBUSY;
1107 		goto out;
1108 	}
1109 
1110 done:
1111 	/* Check for timeout or receive error.
1112 	 * Timeouts occur when the sink is not connected
1113 	 */
1114 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1115 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1116 		ret = -EIO;
1117 		goto out;
1118 	}
1119 
1120 	/* Timeouts occur when the device isn't connected, so they're
1121 	 * "normal" -- don't fill the kernel log with these */
1122 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1123 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1124 		ret = -ETIMEDOUT;
1125 		goto out;
1126 	}
1127 
1128 	/* Unload any bytes sent back from the other side */
1129 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1130 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1131 
1132 	/*
1133 	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1134 	 * We have no idea of what happened so we return -EBUSY so
1135 	 * drm layer takes care for the necessary retries.
1136 	 */
1137 	if (recv_bytes == 0 || recv_bytes > 20) {
1138 		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1139 			      recv_bytes);
1140 		/*
1141 		 * FIXME: This patch was created on top of a series that
1142 		 * organize the retries at drm level. There EBUSY should
1143 		 * also take care for 1ms wait before retrying.
1144 		 * That aux retries re-org is still needed and after that is
1145 		 * merged we remove this sleep from here.
1146 		 */
1147 		usleep_range(1000, 1500);
1148 		ret = -EBUSY;
1149 		goto out;
1150 	}
1151 
1152 	if (recv_bytes > recv_size)
1153 		recv_bytes = recv_size;
1154 
1155 	for (i = 0; i < recv_bytes; i += 4)
1156 		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1157 				    recv + i, recv_bytes - i);
1158 
1159 	ret = recv_bytes;
1160 out:
1161 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1162 
1163 	if (vdd)
1164 		edp_panel_vdd_off(intel_dp, false);
1165 
1166 	pps_unlock(intel_dp);
1167 
1168 	return ret;
1169 }
1170 
1171 #define BARE_ADDRESS_SIZE	3
1172 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
1173 static ssize_t
1174 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1175 {
1176 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1177 	uint8_t txbuf[20], rxbuf[20];
1178 	size_t txsize, rxsize;
1179 	int ret;
1180 
1181 	txbuf[0] = (msg->request << 4) |
1182 		((msg->address >> 16) & 0xf);
1183 	txbuf[1] = (msg->address >> 8) & 0xff;
1184 	txbuf[2] = msg->address & 0xff;
1185 	txbuf[3] = msg->size - 1;
1186 
1187 	switch (msg->request & ~DP_AUX_I2C_MOT) {
1188 	case DP_AUX_NATIVE_WRITE:
1189 	case DP_AUX_I2C_WRITE:
1190 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1191 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1192 		rxsize = 2; /* 0 or 1 data bytes */
1193 
1194 		if (WARN_ON(txsize > 20))
1195 			return -E2BIG;
1196 
1197 		WARN_ON(!msg->buffer != !msg->size);
1198 
1199 		if (msg->buffer)
1200 			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1201 
1202 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1203 		if (ret > 0) {
1204 			msg->reply = rxbuf[0] >> 4;
1205 
1206 			if (ret > 1) {
1207 				/* Number of bytes written in a short write. */
1208 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
1209 			} else {
1210 				/* Return payload size. */
1211 				ret = msg->size;
1212 			}
1213 		}
1214 		break;
1215 
1216 	case DP_AUX_NATIVE_READ:
1217 	case DP_AUX_I2C_READ:
1218 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1219 		rxsize = msg->size + 1;
1220 
1221 		if (WARN_ON(rxsize > 20))
1222 			return -E2BIG;
1223 
1224 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1225 		if (ret > 0) {
1226 			msg->reply = rxbuf[0] >> 4;
1227 			/*
1228 			 * Assume happy day, and copy the data. The caller is
1229 			 * expected to check msg->reply before touching it.
1230 			 *
1231 			 * Return payload size.
1232 			 */
1233 			ret--;
1234 			memcpy(msg->buffer, rxbuf + 1, ret);
1235 		}
1236 		break;
1237 
1238 	default:
1239 		ret = -EINVAL;
1240 		break;
1241 	}
1242 
1243 	return ret;
1244 }
1245 
1246 static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1247 				enum port port)
1248 {
1249 	const struct ddi_vbt_port_info *info =
1250 		&dev_priv->vbt.ddi_port_info[port];
1251 	enum port aux_port;
1252 
1253 	if (!info->alternate_aux_channel) {
1254 		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1255 			      port_name(port), port_name(port));
1256 		return port;
1257 	}
1258 
1259 	switch (info->alternate_aux_channel) {
1260 	case DP_AUX_A:
1261 		aux_port = PORT_A;
1262 		break;
1263 	case DP_AUX_B:
1264 		aux_port = PORT_B;
1265 		break;
1266 	case DP_AUX_C:
1267 		aux_port = PORT_C;
1268 		break;
1269 	case DP_AUX_D:
1270 		aux_port = PORT_D;
1271 		break;
1272 	default:
1273 		MISSING_CASE(info->alternate_aux_channel);
1274 		aux_port = PORT_A;
1275 		break;
1276 	}
1277 
1278 	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1279 		      port_name(aux_port), port_name(port));
1280 
1281 	return aux_port;
1282 }
1283 
1284 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1285 				  enum port port)
1286 {
1287 	switch (port) {
1288 	case PORT_B:
1289 	case PORT_C:
1290 	case PORT_D:
1291 		return DP_AUX_CH_CTL(port);
1292 	default:
1293 		MISSING_CASE(port);
1294 		return DP_AUX_CH_CTL(PORT_B);
1295 	}
1296 }
1297 
1298 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1299 				   enum port port, int index)
1300 {
1301 	switch (port) {
1302 	case PORT_B:
1303 	case PORT_C:
1304 	case PORT_D:
1305 		return DP_AUX_CH_DATA(port, index);
1306 	default:
1307 		MISSING_CASE(port);
1308 		return DP_AUX_CH_DATA(PORT_B, index);
1309 	}
1310 }
1311 
1312 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1313 				  enum port port)
1314 {
1315 	switch (port) {
1316 	case PORT_A:
1317 		return DP_AUX_CH_CTL(port);
1318 	case PORT_B:
1319 	case PORT_C:
1320 	case PORT_D:
1321 		return PCH_DP_AUX_CH_CTL(port);
1322 	default:
1323 		MISSING_CASE(port);
1324 		return DP_AUX_CH_CTL(PORT_A);
1325 	}
1326 }
1327 
1328 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1329 				   enum port port, int index)
1330 {
1331 	switch (port) {
1332 	case PORT_A:
1333 		return DP_AUX_CH_DATA(port, index);
1334 	case PORT_B:
1335 	case PORT_C:
1336 	case PORT_D:
1337 		return PCH_DP_AUX_CH_DATA(port, index);
1338 	default:
1339 		MISSING_CASE(port);
1340 		return DP_AUX_CH_DATA(PORT_A, index);
1341 	}
1342 }
1343 
1344 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1345 				  enum port port)
1346 {
1347 	switch (port) {
1348 	case PORT_A:
1349 	case PORT_B:
1350 	case PORT_C:
1351 	case PORT_D:
1352 		return DP_AUX_CH_CTL(port);
1353 	default:
1354 		MISSING_CASE(port);
1355 		return DP_AUX_CH_CTL(PORT_A);
1356 	}
1357 }
1358 
1359 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1360 				   enum port port, int index)
1361 {
1362 	switch (port) {
1363 	case PORT_A:
1364 	case PORT_B:
1365 	case PORT_C:
1366 	case PORT_D:
1367 		return DP_AUX_CH_DATA(port, index);
1368 	default:
1369 		MISSING_CASE(port);
1370 		return DP_AUX_CH_DATA(PORT_A, index);
1371 	}
1372 }
1373 
1374 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1375 				    enum port port)
1376 {
1377 	if (INTEL_INFO(dev_priv)->gen >= 9)
1378 		return skl_aux_ctl_reg(dev_priv, port);
1379 	else if (HAS_PCH_SPLIT(dev_priv))
1380 		return ilk_aux_ctl_reg(dev_priv, port);
1381 	else
1382 		return g4x_aux_ctl_reg(dev_priv, port);
1383 }
1384 
1385 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1386 				     enum port port, int index)
1387 {
1388 	if (INTEL_INFO(dev_priv)->gen >= 9)
1389 		return skl_aux_data_reg(dev_priv, port, index);
1390 	else if (HAS_PCH_SPLIT(dev_priv))
1391 		return ilk_aux_data_reg(dev_priv, port, index);
1392 	else
1393 		return g4x_aux_data_reg(dev_priv, port, index);
1394 }
1395 
1396 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1397 {
1398 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1399 	enum port port = intel_aux_port(dev_priv,
1400 					dp_to_dig_port(intel_dp)->port);
1401 	int i;
1402 
1403 	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1404 	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1405 		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1406 }
1407 
1408 static void
1409 intel_dp_aux_fini(struct intel_dp *intel_dp)
1410 {
1411 	kfree(intel_dp->aux.name);
1412 }
1413 
1414 static void
1415 intel_dp_aux_init(struct intel_dp *intel_dp)
1416 {
1417 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1418 	enum port port = intel_dig_port->port;
1419 
1420 	intel_aux_reg_init(intel_dp);
1421 	drm_dp_aux_init(&intel_dp->aux);
1422 
1423 	/* Failure to allocate our preferred name is not critical */
1424 	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1425 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1426 }
1427 
1428 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1429 {
1430 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1431 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1432 
1433 	if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
1434 	    IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
1435 		return true;
1436 	else
1437 		return false;
1438 }
1439 
1440 static void
1441 intel_dp_set_clock(struct intel_encoder *encoder,
1442 		   struct intel_crtc_state *pipe_config)
1443 {
1444 	struct drm_device *dev = encoder->base.dev;
1445 	struct drm_i915_private *dev_priv = to_i915(dev);
1446 	const struct dp_link_dpll *divisor = NULL;
1447 	int i, count = 0;
1448 
1449 	if (IS_G4X(dev_priv)) {
1450 		divisor = gen4_dpll;
1451 		count = ARRAY_SIZE(gen4_dpll);
1452 	} else if (HAS_PCH_SPLIT(dev_priv)) {
1453 		divisor = pch_dpll;
1454 		count = ARRAY_SIZE(pch_dpll);
1455 	} else if (IS_CHERRYVIEW(dev_priv)) {
1456 		divisor = chv_dpll;
1457 		count = ARRAY_SIZE(chv_dpll);
1458 	} else if (IS_VALLEYVIEW(dev_priv)) {
1459 		divisor = vlv_dpll;
1460 		count = ARRAY_SIZE(vlv_dpll);
1461 	}
1462 
1463 	if (divisor && count) {
1464 		for (i = 0; i < count; i++) {
1465 			if (pipe_config->port_clock == divisor[i].clock) {
1466 				pipe_config->dpll = divisor[i].dpll;
1467 				pipe_config->clock_set = true;
1468 				break;
1469 			}
1470 		}
1471 	}
1472 }
1473 
1474 static void snprintf_int_array(char *str, size_t len,
1475 			       const int *array, int nelem)
1476 {
1477 	int i;
1478 
1479 	str[0] = '\0';
1480 
1481 	for (i = 0; i < nelem; i++) {
1482 		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1483 		if (r >= len)
1484 			return;
1485 		str += r;
1486 		len -= r;
1487 	}
1488 }
1489 
1490 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1491 {
1492 	const int *source_rates, *sink_rates;
1493 	int source_len, sink_len, common_len;
1494 	int common_rates[DP_MAX_SUPPORTED_RATES];
1495 	char str[128]; /* FIXME: too big for stack? */
1496 
1497 	if ((drm_debug & DRM_UT_KMS) == 0)
1498 		return;
1499 
1500 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1501 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1502 	DRM_DEBUG_KMS("source rates: %s\n", str);
1503 
1504 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1505 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1506 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1507 
1508 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1509 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1510 	DRM_DEBUG_KMS("common rates: %s\n", str);
1511 }
1512 
1513 static int rate_to_index(int find, const int *rates)
1514 {
1515 	int i = 0;
1516 
1517 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1518 		if (find == rates[i])
1519 			break;
1520 
1521 	return i;
1522 }
1523 
1524 int
1525 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1526 {
1527 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1528 	int len;
1529 
1530 	len = intel_dp_common_rates(intel_dp, rates);
1531 	if (WARN_ON(len <= 0))
1532 		return 162000;
1533 
1534 	return rates[len - 1];
1535 }
1536 
1537 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1538 {
1539 	return rate_to_index(rate, intel_dp->sink_rates);
1540 }
1541 
1542 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1543 			   uint8_t *link_bw, uint8_t *rate_select)
1544 {
1545 	if (intel_dp->num_sink_rates) {
1546 		*link_bw = 0;
1547 		*rate_select =
1548 			intel_dp_rate_select(intel_dp, port_clock);
1549 	} else {
1550 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1551 		*rate_select = 0;
1552 	}
1553 }
1554 
1555 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1556 				struct intel_crtc_state *pipe_config)
1557 {
1558 	int bpp, bpc;
1559 
1560 	bpp = pipe_config->pipe_bpp;
1561 	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1562 
1563 	if (bpc > 0)
1564 		bpp = min(bpp, 3*bpc);
1565 
1566 	/* For DP Compliance we override the computed bpp for the pipe */
1567 	if (intel_dp->compliance.test_data.bpc != 0) {
1568 		pipe_config->pipe_bpp =	3*intel_dp->compliance.test_data.bpc;
1569 		pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
1570 		DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
1571 			      pipe_config->pipe_bpp);
1572 	}
1573 	return bpp;
1574 }
1575 
1576 bool
1577 intel_dp_compute_config(struct intel_encoder *encoder,
1578 			struct intel_crtc_state *pipe_config,
1579 			struct drm_connector_state *conn_state)
1580 {
1581 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1582 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1583 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1584 	enum port port = dp_to_dig_port(intel_dp)->port;
1585 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1586 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1587 	int lane_count, clock;
1588 	int min_lane_count = 1;
1589 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1590 	/* Conveniently, the link BW constants become indices with a shift...*/
1591 	int min_clock = 0;
1592 	int max_clock;
1593 	int link_rate_index;
1594 	int bpp, mode_rate;
1595 	int link_avail, link_clock;
1596 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1597 	int common_len;
1598 	uint8_t link_bw, rate_select;
1599 	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
1600 					   DP_DPCD_QUIRK_LIMITED_M_N);
1601 
1602 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1603 
1604 	/* No common link rates between source and sink */
1605 	WARN_ON(common_len <= 0);
1606 
1607 	max_clock = common_len - 1;
1608 
1609 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1610 		pipe_config->has_pch_encoder = true;
1611 
1612 	pipe_config->has_drrs = false;
1613 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1614 
1615 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1616 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1617 				       adjusted_mode);
1618 
1619 		if (INTEL_GEN(dev_priv) >= 9) {
1620 			int ret;
1621 			ret = skl_update_scaler_crtc(pipe_config);
1622 			if (ret)
1623 				return ret;
1624 		}
1625 
1626 		if (HAS_GMCH_DISPLAY(dev_priv))
1627 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1628 						 intel_connector->panel.fitting_mode);
1629 		else
1630 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1631 						intel_connector->panel.fitting_mode);
1632 	}
1633 
1634 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1635 		return false;
1636 
1637 	/* Use values requested by Compliance Test Request */
1638 	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1639 		link_rate_index = intel_dp_link_rate_index(intel_dp,
1640 							   common_rates,
1641 							   intel_dp->compliance.test_link_rate);
1642 		if (link_rate_index >= 0)
1643 			min_clock = max_clock = link_rate_index;
1644 		min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
1645 	}
1646 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1647 		      "max bw %d pixel clock %iKHz\n",
1648 		      max_lane_count, common_rates[max_clock],
1649 		      adjusted_mode->crtc_clock);
1650 
1651 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1652 	 * bpc in between. */
1653 	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1654 	if (is_edp(intel_dp)) {
1655 
1656 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1657 		if (intel_connector->base.display_info.bpc == 0 &&
1658 			(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1659 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1660 				      dev_priv->vbt.edp.bpp);
1661 			bpp = dev_priv->vbt.edp.bpp;
1662 		}
1663 
1664 		/*
1665 		 * Use the maximum clock and number of lanes the eDP panel
1666 		 * advertizes being capable of. The panels are generally
1667 		 * designed to support only a single clock and lane
1668 		 * configuration, and typically these values correspond to the
1669 		 * native resolution of the panel.
1670 		 */
1671 		min_lane_count = max_lane_count;
1672 		min_clock = max_clock;
1673 	}
1674 
1675 	for (; bpp >= 6*3; bpp -= 2*3) {
1676 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1677 						   bpp);
1678 
1679 		for (clock = min_clock; clock <= max_clock; clock++) {
1680 			for (lane_count = min_lane_count;
1681 				lane_count <= max_lane_count;
1682 				lane_count <<= 1) {
1683 
1684 				link_clock = common_rates[clock];
1685 				link_avail = intel_dp_max_data_rate(link_clock,
1686 								    lane_count);
1687 
1688 				if (mode_rate <= link_avail) {
1689 					goto found;
1690 				}
1691 			}
1692 		}
1693 	}
1694 
1695 	return false;
1696 
1697 found:
1698 	if (intel_dp->color_range_auto) {
1699 		/*
1700 		 * See:
1701 		 * CEA-861-E - 5.1 Default Encoding Parameters
1702 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1703 		 */
1704 		pipe_config->limited_color_range =
1705 			bpp != 18 &&
1706 			drm_default_rgb_quant_range(adjusted_mode) ==
1707 			HDMI_QUANTIZATION_RANGE_LIMITED;
1708 	} else {
1709 		pipe_config->limited_color_range =
1710 			intel_dp->limited_color_range;
1711 	}
1712 
1713 	pipe_config->lane_count = lane_count;
1714 
1715 	pipe_config->pipe_bpp = bpp;
1716 	pipe_config->port_clock = common_rates[clock];
1717 
1718 	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1719 			      &link_bw, &rate_select);
1720 
1721 	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1722 		      link_bw, rate_select, pipe_config->lane_count,
1723 		      pipe_config->port_clock, bpp);
1724 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1725 		      mode_rate, link_avail);
1726 
1727 	intel_link_compute_m_n(bpp, lane_count,
1728 			       adjusted_mode->crtc_clock,
1729 			       pipe_config->port_clock,
1730 			       &pipe_config->dp_m_n,
1731 			       reduce_m_n);
1732 
1733 	if (intel_connector->panel.downclock_mode != NULL &&
1734 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1735 			pipe_config->has_drrs = true;
1736 			intel_link_compute_m_n(bpp, lane_count,
1737 				intel_connector->panel.downclock_mode->clock,
1738 				pipe_config->port_clock,
1739 				&pipe_config->dp_m2_n2,
1740 				reduce_m_n);
1741 	}
1742 
1743 	/*
1744 	 * DPLL0 VCO may need to be adjusted to get the correct
1745 	 * clock for eDP. This will affect cdclk as well.
1746 	 */
1747 	if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
1748 		int vco;
1749 
1750 		switch (pipe_config->port_clock / 2) {
1751 		case 108000:
1752 		case 216000:
1753 			vco = 8640000;
1754 			break;
1755 		default:
1756 			vco = 8100000;
1757 			break;
1758 		}
1759 
1760 		to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
1761 	}
1762 
1763 	if (!HAS_DDI(dev_priv))
1764 		intel_dp_set_clock(encoder, pipe_config);
1765 
1766 	return true;
1767 }
1768 
1769 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1770 			      int link_rate, uint8_t lane_count,
1771 			      bool link_mst)
1772 {
1773 	intel_dp->link_rate = link_rate;
1774 	intel_dp->lane_count = lane_count;
1775 	intel_dp->link_mst = link_mst;
1776 }
1777 
1778 static void intel_dp_prepare(struct intel_encoder *encoder,
1779 			     struct intel_crtc_state *pipe_config)
1780 {
1781 	struct drm_device *dev = encoder->base.dev;
1782 	struct drm_i915_private *dev_priv = to_i915(dev);
1783 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1784 	enum port port = dp_to_dig_port(intel_dp)->port;
1785 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1786 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1787 
1788 	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1789 				 pipe_config->lane_count,
1790 				 intel_crtc_has_type(pipe_config,
1791 						     INTEL_OUTPUT_DP_MST));
1792 
1793 	/*
1794 	 * There are four kinds of DP registers:
1795 	 *
1796 	 * 	IBX PCH
1797 	 * 	SNB CPU
1798 	 *	IVB CPU
1799 	 * 	CPT PCH
1800 	 *
1801 	 * IBX PCH and CPU are the same for almost everything,
1802 	 * except that the CPU DP PLL is configured in this
1803 	 * register
1804 	 *
1805 	 * CPT PCH is quite different, having many bits moved
1806 	 * to the TRANS_DP_CTL register instead. That
1807 	 * configuration happens (oddly) in ironlake_pch_enable
1808 	 */
1809 
1810 	/* Preserve the BIOS-computed detected bit. This is
1811 	 * supposed to be read-only.
1812 	 */
1813 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1814 
1815 	/* Handle DP bits in common between all three register formats */
1816 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1817 	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1818 
1819 	/* Split out the IBX/CPU vs CPT settings */
1820 
1821 	if (IS_GEN7(dev_priv) && port == PORT_A) {
1822 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1823 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1824 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1825 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1826 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1827 
1828 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1829 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1830 
1831 		intel_dp->DP |= crtc->pipe << 29;
1832 	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1833 		u32 trans_dp;
1834 
1835 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1836 
1837 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1838 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1839 			trans_dp |= TRANS_DP_ENH_FRAMING;
1840 		else
1841 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1842 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1843 	} else {
1844 		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1845 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1846 
1847 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1848 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1849 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1850 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1851 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1852 
1853 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1854 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1855 
1856 		if (IS_CHERRYVIEW(dev_priv))
1857 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1858 		else if (crtc->pipe == PIPE_B)
1859 			intel_dp->DP |= DP_PIPEB_SELECT;
1860 	}
1861 }
1862 
1863 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1864 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1865 
1866 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1867 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1868 
1869 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1870 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1871 
1872 static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1873 				   struct intel_dp *intel_dp);
1874 
1875 static void wait_panel_status(struct intel_dp *intel_dp,
1876 				       u32 mask,
1877 				       u32 value)
1878 {
1879 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1880 	struct drm_i915_private *dev_priv = to_i915(dev);
1881 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1882 
1883 	lockdep_assert_held(&dev_priv->pps_mutex);
1884 
1885 	intel_pps_verify_state(dev_priv, intel_dp);
1886 
1887 	pp_stat_reg = _pp_stat_reg(intel_dp);
1888 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1889 
1890 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1891 			mask, value,
1892 			I915_READ(pp_stat_reg),
1893 			I915_READ(pp_ctrl_reg));
1894 
1895 	if (intel_wait_for_register(dev_priv,
1896 				    pp_stat_reg, mask, value,
1897 				    5000))
1898 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1899 				I915_READ(pp_stat_reg),
1900 				I915_READ(pp_ctrl_reg));
1901 
1902 	DRM_DEBUG_KMS("Wait complete\n");
1903 }
1904 
1905 static void wait_panel_on(struct intel_dp *intel_dp)
1906 {
1907 	DRM_DEBUG_KMS("Wait for panel power on\n");
1908 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1909 }
1910 
1911 static void wait_panel_off(struct intel_dp *intel_dp)
1912 {
1913 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1914 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1915 }
1916 
1917 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1918 {
1919 	ktime_t panel_power_on_time;
1920 	s64 panel_power_off_duration;
1921 
1922 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1923 
1924 	/* take the difference of currrent time and panel power off time
1925 	 * and then make panel wait for t11_t12 if needed. */
1926 	panel_power_on_time = ktime_get_boottime();
1927 	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1928 
1929 	/* When we disable the VDD override bit last we have to do the manual
1930 	 * wait. */
1931 	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1932 		wait_remaining_ms_from_jiffies(jiffies,
1933 				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1934 
1935 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1936 }
1937 
1938 static void wait_backlight_on(struct intel_dp *intel_dp)
1939 {
1940 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1941 				       intel_dp->backlight_on_delay);
1942 }
1943 
1944 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1945 {
1946 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1947 				       intel_dp->backlight_off_delay);
1948 }
1949 
1950 /* Read the current pp_control value, unlocking the register if it
1951  * is locked
1952  */
1953 
1954 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1955 {
1956 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1957 	struct drm_i915_private *dev_priv = to_i915(dev);
1958 	u32 control;
1959 
1960 	lockdep_assert_held(&dev_priv->pps_mutex);
1961 
1962 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1963 	if (WARN_ON(!HAS_DDI(dev_priv) &&
1964 		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
1965 		control &= ~PANEL_UNLOCK_MASK;
1966 		control |= PANEL_UNLOCK_REGS;
1967 	}
1968 	return control;
1969 }
1970 
1971 /*
1972  * Must be paired with edp_panel_vdd_off().
1973  * Must hold pps_mutex around the whole on/off sequence.
1974  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1975  */
1976 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1977 {
1978 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1979 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1980 	struct drm_i915_private *dev_priv = to_i915(dev);
1981 	u32 pp;
1982 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1983 	bool need_to_disable = !intel_dp->want_panel_vdd;
1984 
1985 	lockdep_assert_held(&dev_priv->pps_mutex);
1986 
1987 	if (!is_edp(intel_dp))
1988 		return false;
1989 
1990 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1991 	intel_dp->want_panel_vdd = true;
1992 
1993 	if (edp_have_panel_vdd(intel_dp))
1994 		return need_to_disable;
1995 
1996 	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
1997 
1998 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1999 		      port_name(intel_dig_port->port));
2000 
2001 	if (!edp_have_panel_power(intel_dp))
2002 		wait_panel_power_cycle(intel_dp);
2003 
2004 	pp = ironlake_get_pp_control(intel_dp);
2005 	pp |= EDP_FORCE_VDD;
2006 
2007 	pp_stat_reg = _pp_stat_reg(intel_dp);
2008 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2009 
2010 	I915_WRITE(pp_ctrl_reg, pp);
2011 	POSTING_READ(pp_ctrl_reg);
2012 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2013 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2014 	/*
2015 	 * If the panel wasn't on, delay before accessing aux channel
2016 	 */
2017 	if (!edp_have_panel_power(intel_dp)) {
2018 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2019 			      port_name(intel_dig_port->port));
2020 		msleep(intel_dp->panel_power_up_delay);
2021 	}
2022 
2023 	return need_to_disable;
2024 }
2025 
2026 /*
2027  * Must be paired with intel_edp_panel_vdd_off() or
2028  * intel_edp_panel_off().
2029  * Nested calls to these functions are not allowed since
2030  * we drop the lock. Caller must use some higher level
2031  * locking to prevent nested calls from other threads.
2032  */
2033 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2034 {
2035 	bool vdd;
2036 
2037 	if (!is_edp(intel_dp))
2038 		return;
2039 
2040 	pps_lock(intel_dp);
2041 	vdd = edp_panel_vdd_on(intel_dp);
2042 	pps_unlock(intel_dp);
2043 
2044 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2045 	     port_name(dp_to_dig_port(intel_dp)->port));
2046 }
2047 
2048 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2049 {
2050 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2051 	struct drm_i915_private *dev_priv = to_i915(dev);
2052 	struct intel_digital_port *intel_dig_port =
2053 		dp_to_dig_port(intel_dp);
2054 	u32 pp;
2055 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2056 
2057 	lockdep_assert_held(&dev_priv->pps_mutex);
2058 
2059 	WARN_ON(intel_dp->want_panel_vdd);
2060 
2061 	if (!edp_have_panel_vdd(intel_dp))
2062 		return;
2063 
2064 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2065 		      port_name(intel_dig_port->port));
2066 
2067 	pp = ironlake_get_pp_control(intel_dp);
2068 	pp &= ~EDP_FORCE_VDD;
2069 
2070 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2071 	pp_stat_reg = _pp_stat_reg(intel_dp);
2072 
2073 	I915_WRITE(pp_ctrl_reg, pp);
2074 	POSTING_READ(pp_ctrl_reg);
2075 
2076 	/* Make sure sequencer is idle before allowing subsequent activity */
2077 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2078 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2079 
2080 	if ((pp & PANEL_POWER_ON) == 0)
2081 		intel_dp->panel_power_off_time = ktime_get_boottime();
2082 
2083 	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2084 }
2085 
2086 static void edp_panel_vdd_work(struct work_struct *__work)
2087 {
2088 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2089 						 struct intel_dp, panel_vdd_work);
2090 
2091 	pps_lock(intel_dp);
2092 	if (!intel_dp->want_panel_vdd)
2093 		edp_panel_vdd_off_sync(intel_dp);
2094 	pps_unlock(intel_dp);
2095 }
2096 
2097 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2098 {
2099 	unsigned long delay;
2100 
2101 	/*
2102 	 * Queue the timer to fire a long time from now (relative to the power
2103 	 * down delay) to keep the panel power up across a sequence of
2104 	 * operations.
2105 	 */
2106 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2107 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2108 }
2109 
2110 /*
2111  * Must be paired with edp_panel_vdd_on().
2112  * Must hold pps_mutex around the whole on/off sequence.
2113  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2114  */
2115 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2116 {
2117 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2118 
2119 	lockdep_assert_held(&dev_priv->pps_mutex);
2120 
2121 	if (!is_edp(intel_dp))
2122 		return;
2123 
2124 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2125 	     port_name(dp_to_dig_port(intel_dp)->port));
2126 
2127 	intel_dp->want_panel_vdd = false;
2128 
2129 	if (sync)
2130 		edp_panel_vdd_off_sync(intel_dp);
2131 	else
2132 		edp_panel_vdd_schedule_off(intel_dp);
2133 }
2134 
2135 static void edp_panel_on(struct intel_dp *intel_dp)
2136 {
2137 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2138 	struct drm_i915_private *dev_priv = to_i915(dev);
2139 	u32 pp;
2140 	i915_reg_t pp_ctrl_reg;
2141 
2142 	lockdep_assert_held(&dev_priv->pps_mutex);
2143 
2144 	if (!is_edp(intel_dp))
2145 		return;
2146 
2147 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2148 		      port_name(dp_to_dig_port(intel_dp)->port));
2149 
2150 	if (WARN(edp_have_panel_power(intel_dp),
2151 		 "eDP port %c panel power already on\n",
2152 		 port_name(dp_to_dig_port(intel_dp)->port)))
2153 		return;
2154 
2155 	wait_panel_power_cycle(intel_dp);
2156 
2157 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2158 	pp = ironlake_get_pp_control(intel_dp);
2159 	if (IS_GEN5(dev_priv)) {
2160 		/* ILK workaround: disable reset around power sequence */
2161 		pp &= ~PANEL_POWER_RESET;
2162 		I915_WRITE(pp_ctrl_reg, pp);
2163 		POSTING_READ(pp_ctrl_reg);
2164 	}
2165 
2166 	pp |= PANEL_POWER_ON;
2167 	if (!IS_GEN5(dev_priv))
2168 		pp |= PANEL_POWER_RESET;
2169 
2170 	I915_WRITE(pp_ctrl_reg, pp);
2171 	POSTING_READ(pp_ctrl_reg);
2172 
2173 	wait_panel_on(intel_dp);
2174 	intel_dp->last_power_on = jiffies;
2175 
2176 	if (IS_GEN5(dev_priv)) {
2177 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2178 		I915_WRITE(pp_ctrl_reg, pp);
2179 		POSTING_READ(pp_ctrl_reg);
2180 	}
2181 }
2182 
2183 void intel_edp_panel_on(struct intel_dp *intel_dp)
2184 {
2185 	if (!is_edp(intel_dp))
2186 		return;
2187 
2188 	pps_lock(intel_dp);
2189 	edp_panel_on(intel_dp);
2190 	pps_unlock(intel_dp);
2191 }
2192 
2193 
2194 static void edp_panel_off(struct intel_dp *intel_dp)
2195 {
2196 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2197 	struct drm_i915_private *dev_priv = to_i915(dev);
2198 	u32 pp;
2199 	i915_reg_t pp_ctrl_reg;
2200 
2201 	lockdep_assert_held(&dev_priv->pps_mutex);
2202 
2203 	if (!is_edp(intel_dp))
2204 		return;
2205 
2206 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2207 		      port_name(dp_to_dig_port(intel_dp)->port));
2208 
2209 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2210 	     port_name(dp_to_dig_port(intel_dp)->port));
2211 
2212 	pp = ironlake_get_pp_control(intel_dp);
2213 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2214 	 * panels get very unhappy and cease to work. */
2215 	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2216 		EDP_BLC_ENABLE);
2217 
2218 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2219 
2220 	intel_dp->want_panel_vdd = false;
2221 
2222 	I915_WRITE(pp_ctrl_reg, pp);
2223 	POSTING_READ(pp_ctrl_reg);
2224 
2225 	intel_dp->panel_power_off_time = ktime_get_boottime();
2226 	wait_panel_off(intel_dp);
2227 
2228 	/* We got a reference when we enabled the VDD. */
2229 	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2230 }
2231 
2232 void intel_edp_panel_off(struct intel_dp *intel_dp)
2233 {
2234 	if (!is_edp(intel_dp))
2235 		return;
2236 
2237 	pps_lock(intel_dp);
2238 	edp_panel_off(intel_dp);
2239 	pps_unlock(intel_dp);
2240 }
2241 
2242 /* Enable backlight in the panel power control. */
2243 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2244 {
2245 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2246 	struct drm_device *dev = intel_dig_port->base.base.dev;
2247 	struct drm_i915_private *dev_priv = to_i915(dev);
2248 	u32 pp;
2249 	i915_reg_t pp_ctrl_reg;
2250 
2251 	/*
2252 	 * If we enable the backlight right away following a panel power
2253 	 * on, we may see slight flicker as the panel syncs with the eDP
2254 	 * link.  So delay a bit to make sure the image is solid before
2255 	 * allowing it to appear.
2256 	 */
2257 	wait_backlight_on(intel_dp);
2258 
2259 	pps_lock(intel_dp);
2260 
2261 	pp = ironlake_get_pp_control(intel_dp);
2262 	pp |= EDP_BLC_ENABLE;
2263 
2264 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2265 
2266 	I915_WRITE(pp_ctrl_reg, pp);
2267 	POSTING_READ(pp_ctrl_reg);
2268 
2269 	pps_unlock(intel_dp);
2270 }
2271 
2272 /* Enable backlight PWM and backlight PP control. */
2273 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2274 {
2275 	if (!is_edp(intel_dp))
2276 		return;
2277 
2278 	DRM_DEBUG_KMS("\n");
2279 
2280 	intel_panel_enable_backlight(intel_dp->attached_connector);
2281 	_intel_edp_backlight_on(intel_dp);
2282 }
2283 
2284 /* Disable backlight in the panel power control. */
2285 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2286 {
2287 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2288 	struct drm_i915_private *dev_priv = to_i915(dev);
2289 	u32 pp;
2290 	i915_reg_t pp_ctrl_reg;
2291 
2292 	if (!is_edp(intel_dp))
2293 		return;
2294 
2295 	pps_lock(intel_dp);
2296 
2297 	pp = ironlake_get_pp_control(intel_dp);
2298 	pp &= ~EDP_BLC_ENABLE;
2299 
2300 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2301 
2302 	I915_WRITE(pp_ctrl_reg, pp);
2303 	POSTING_READ(pp_ctrl_reg);
2304 
2305 	pps_unlock(intel_dp);
2306 
2307 	intel_dp->last_backlight_off = jiffies;
2308 	edp_wait_backlight_off(intel_dp);
2309 }
2310 
2311 /* Disable backlight PP control and backlight PWM. */
2312 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2313 {
2314 	if (!is_edp(intel_dp))
2315 		return;
2316 
2317 	DRM_DEBUG_KMS("\n");
2318 
2319 	_intel_edp_backlight_off(intel_dp);
2320 	intel_panel_disable_backlight(intel_dp->attached_connector);
2321 }
2322 
2323 /*
2324  * Hook for controlling the panel power control backlight through the bl_power
2325  * sysfs attribute. Take care to handle multiple calls.
2326  */
2327 static void intel_edp_backlight_power(struct intel_connector *connector,
2328 				      bool enable)
2329 {
2330 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2331 	bool is_enabled;
2332 
2333 	pps_lock(intel_dp);
2334 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2335 	pps_unlock(intel_dp);
2336 
2337 	if (is_enabled == enable)
2338 		return;
2339 
2340 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2341 		      enable ? "enable" : "disable");
2342 
2343 	if (enable)
2344 		_intel_edp_backlight_on(intel_dp);
2345 	else
2346 		_intel_edp_backlight_off(intel_dp);
2347 }
2348 
2349 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2350 {
2351 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2352 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2353 	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2354 
2355 	I915_STATE_WARN(cur_state != state,
2356 			"DP port %c state assertion failure (expected %s, current %s)\n",
2357 			port_name(dig_port->port),
2358 			onoff(state), onoff(cur_state));
2359 }
2360 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2361 
2362 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2363 {
2364 	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2365 
2366 	I915_STATE_WARN(cur_state != state,
2367 			"eDP PLL state assertion failure (expected %s, current %s)\n",
2368 			onoff(state), onoff(cur_state));
2369 }
2370 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2371 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2372 
2373 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2374 				struct intel_crtc_state *pipe_config)
2375 {
2376 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2377 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2378 
2379 	assert_pipe_disabled(dev_priv, crtc->pipe);
2380 	assert_dp_port_disabled(intel_dp);
2381 	assert_edp_pll_disabled(dev_priv);
2382 
2383 	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2384 		      pipe_config->port_clock);
2385 
2386 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2387 
2388 	if (pipe_config->port_clock == 162000)
2389 		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2390 	else
2391 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2392 
2393 	I915_WRITE(DP_A, intel_dp->DP);
2394 	POSTING_READ(DP_A);
2395 	udelay(500);
2396 
2397 	/*
2398 	 * [DevILK] Work around required when enabling DP PLL
2399 	 * while a pipe is enabled going to FDI:
2400 	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2401 	 * 2. Program DP PLL enable
2402 	 */
2403 	if (IS_GEN5(dev_priv))
2404 		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2405 
2406 	intel_dp->DP |= DP_PLL_ENABLE;
2407 
2408 	I915_WRITE(DP_A, intel_dp->DP);
2409 	POSTING_READ(DP_A);
2410 	udelay(200);
2411 }
2412 
2413 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2414 {
2415 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2416 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2417 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2418 
2419 	assert_pipe_disabled(dev_priv, crtc->pipe);
2420 	assert_dp_port_disabled(intel_dp);
2421 	assert_edp_pll_enabled(dev_priv);
2422 
2423 	DRM_DEBUG_KMS("disabling eDP PLL\n");
2424 
2425 	intel_dp->DP &= ~DP_PLL_ENABLE;
2426 
2427 	I915_WRITE(DP_A, intel_dp->DP);
2428 	POSTING_READ(DP_A);
2429 	udelay(200);
2430 }
2431 
2432 /* If the sink supports it, try to set the power state appropriately */
2433 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2434 {
2435 	int ret, i;
2436 
2437 	/* Should have a valid DPCD by this point */
2438 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2439 		return;
2440 
2441 	if (mode != DRM_MODE_DPMS_ON) {
2442 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2443 					 DP_SET_POWER_D3);
2444 	} else {
2445 		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2446 
2447 		/*
2448 		 * When turning on, we need to retry for 1ms to give the sink
2449 		 * time to wake up.
2450 		 */
2451 		for (i = 0; i < 3; i++) {
2452 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2453 						 DP_SET_POWER_D0);
2454 			if (ret == 1)
2455 				break;
2456 			msleep(1);
2457 		}
2458 
2459 		if (ret == 1 && lspcon->active)
2460 			lspcon_wait_pcon_mode(lspcon);
2461 	}
2462 
2463 	if (ret != 1)
2464 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2465 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2466 }
2467 
2468 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2469 				  enum i915_pipe *pipe)
2470 {
2471 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2472 	enum port port = dp_to_dig_port(intel_dp)->port;
2473 	struct drm_device *dev = encoder->base.dev;
2474 	struct drm_i915_private *dev_priv = to_i915(dev);
2475 	u32 tmp;
2476 	bool ret;
2477 
2478 	if (!intel_display_power_get_if_enabled(dev_priv,
2479 						encoder->power_domain))
2480 		return false;
2481 
2482 	ret = false;
2483 
2484 	tmp = I915_READ(intel_dp->output_reg);
2485 
2486 	if (!(tmp & DP_PORT_EN))
2487 		goto out;
2488 
2489 	if (IS_GEN7(dev_priv) && port == PORT_A) {
2490 		*pipe = PORT_TO_PIPE_CPT(tmp);
2491 	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2492 		enum i915_pipe p;
2493 
2494 		for_each_pipe(dev_priv, p) {
2495 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2496 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2497 				*pipe = p;
2498 				ret = true;
2499 
2500 				goto out;
2501 			}
2502 		}
2503 
2504 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2505 			      i915_mmio_reg_offset(intel_dp->output_reg));
2506 	} else if (IS_CHERRYVIEW(dev_priv)) {
2507 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2508 	} else {
2509 		*pipe = PORT_TO_PIPE(tmp);
2510 	}
2511 
2512 	ret = true;
2513 
2514 out:
2515 	intel_display_power_put(dev_priv, encoder->power_domain);
2516 
2517 	return ret;
2518 }
2519 
2520 static void intel_dp_get_config(struct intel_encoder *encoder,
2521 				struct intel_crtc_state *pipe_config)
2522 {
2523 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2524 	u32 tmp, flags = 0;
2525 	struct drm_device *dev = encoder->base.dev;
2526 	struct drm_i915_private *dev_priv = to_i915(dev);
2527 	enum port port = dp_to_dig_port(intel_dp)->port;
2528 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2529 
2530 	tmp = I915_READ(intel_dp->output_reg);
2531 
2532 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2533 
2534 	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2535 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2536 
2537 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2538 			flags |= DRM_MODE_FLAG_PHSYNC;
2539 		else
2540 			flags |= DRM_MODE_FLAG_NHSYNC;
2541 
2542 		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2543 			flags |= DRM_MODE_FLAG_PVSYNC;
2544 		else
2545 			flags |= DRM_MODE_FLAG_NVSYNC;
2546 	} else {
2547 		if (tmp & DP_SYNC_HS_HIGH)
2548 			flags |= DRM_MODE_FLAG_PHSYNC;
2549 		else
2550 			flags |= DRM_MODE_FLAG_NHSYNC;
2551 
2552 		if (tmp & DP_SYNC_VS_HIGH)
2553 			flags |= DRM_MODE_FLAG_PVSYNC;
2554 		else
2555 			flags |= DRM_MODE_FLAG_NVSYNC;
2556 	}
2557 
2558 	pipe_config->base.adjusted_mode.flags |= flags;
2559 
2560 	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2561 		pipe_config->limited_color_range = true;
2562 
2563 	pipe_config->lane_count =
2564 		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2565 
2566 	intel_dp_get_m_n(crtc, pipe_config);
2567 
2568 	if (port == PORT_A) {
2569 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2570 			pipe_config->port_clock = 162000;
2571 		else
2572 			pipe_config->port_clock = 270000;
2573 	}
2574 
2575 	pipe_config->base.adjusted_mode.crtc_clock =
2576 		intel_dotclock_calculate(pipe_config->port_clock,
2577 					 &pipe_config->dp_m_n);
2578 
2579 	if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2580 	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2581 		/*
2582 		 * This is a big fat ugly hack.
2583 		 *
2584 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2585 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2586 		 * unknown we fail to light up. Yet the same BIOS boots up with
2587 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2588 		 * max, not what it tells us to use.
2589 		 *
2590 		 * Note: This will still be broken if the eDP panel is not lit
2591 		 * up by the BIOS, and thus we can't get the mode at module
2592 		 * load.
2593 		 */
2594 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2595 			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2596 		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2597 	}
2598 }
2599 
2600 static void intel_disable_dp(struct intel_encoder *encoder,
2601 			     struct intel_crtc_state *old_crtc_state,
2602 			     struct drm_connector_state *old_conn_state)
2603 {
2604 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2605 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2606 
2607 	if (old_crtc_state->has_audio)
2608 		intel_audio_codec_disable(encoder);
2609 
2610 	if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2611 		intel_psr_disable(intel_dp);
2612 
2613 	/* Make sure the panel is off before trying to change the mode. But also
2614 	 * ensure that we have vdd while we switch off the panel. */
2615 	intel_edp_panel_vdd_on(intel_dp);
2616 	intel_edp_backlight_off(intel_dp);
2617 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2618 	intel_edp_panel_off(intel_dp);
2619 
2620 	/* disable the port before the pipe on g4x */
2621 	if (INTEL_GEN(dev_priv) < 5)
2622 		intel_dp_link_down(intel_dp);
2623 }
2624 
2625 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2626 				struct intel_crtc_state *old_crtc_state,
2627 				struct drm_connector_state *old_conn_state)
2628 {
2629 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2630 	enum port port = dp_to_dig_port(intel_dp)->port;
2631 
2632 	intel_dp_link_down(intel_dp);
2633 
2634 	/* Only ilk+ has port A */
2635 	if (port == PORT_A)
2636 		ironlake_edp_pll_off(intel_dp);
2637 }
2638 
2639 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2640 				struct intel_crtc_state *old_crtc_state,
2641 				struct drm_connector_state *old_conn_state)
2642 {
2643 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2644 
2645 	intel_dp_link_down(intel_dp);
2646 }
2647 
2648 static void chv_post_disable_dp(struct intel_encoder *encoder,
2649 				struct intel_crtc_state *old_crtc_state,
2650 				struct drm_connector_state *old_conn_state)
2651 {
2652 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2653 	struct drm_device *dev = encoder->base.dev;
2654 	struct drm_i915_private *dev_priv = to_i915(dev);
2655 
2656 	intel_dp_link_down(intel_dp);
2657 
2658 	mutex_lock(&dev_priv->sb_lock);
2659 
2660 	/* Assert data lane reset */
2661 	chv_data_lane_soft_reset(encoder, true);
2662 
2663 	mutex_unlock(&dev_priv->sb_lock);
2664 }
2665 
2666 static void
2667 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2668 			 uint32_t *DP,
2669 			 uint8_t dp_train_pat)
2670 {
2671 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2672 	struct drm_device *dev = intel_dig_port->base.base.dev;
2673 	struct drm_i915_private *dev_priv = to_i915(dev);
2674 	enum port port = intel_dig_port->port;
2675 
2676 	if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2677 		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2678 			      dp_train_pat & DP_TRAINING_PATTERN_MASK);
2679 
2680 	if (HAS_DDI(dev_priv)) {
2681 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2682 
2683 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2684 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2685 		else
2686 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2687 
2688 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2689 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2690 		case DP_TRAINING_PATTERN_DISABLE:
2691 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2692 
2693 			break;
2694 		case DP_TRAINING_PATTERN_1:
2695 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2696 			break;
2697 		case DP_TRAINING_PATTERN_2:
2698 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2699 			break;
2700 		case DP_TRAINING_PATTERN_3:
2701 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2702 			break;
2703 		}
2704 		I915_WRITE(DP_TP_CTL(port), temp);
2705 
2706 	} else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2707 		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2708 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2709 
2710 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2711 		case DP_TRAINING_PATTERN_DISABLE:
2712 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2713 			break;
2714 		case DP_TRAINING_PATTERN_1:
2715 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2716 			break;
2717 		case DP_TRAINING_PATTERN_2:
2718 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2719 			break;
2720 		case DP_TRAINING_PATTERN_3:
2721 			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2722 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2723 			break;
2724 		}
2725 
2726 	} else {
2727 		if (IS_CHERRYVIEW(dev_priv))
2728 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2729 		else
2730 			*DP &= ~DP_LINK_TRAIN_MASK;
2731 
2732 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2733 		case DP_TRAINING_PATTERN_DISABLE:
2734 			*DP |= DP_LINK_TRAIN_OFF;
2735 			break;
2736 		case DP_TRAINING_PATTERN_1:
2737 			*DP |= DP_LINK_TRAIN_PAT_1;
2738 			break;
2739 		case DP_TRAINING_PATTERN_2:
2740 			*DP |= DP_LINK_TRAIN_PAT_2;
2741 			break;
2742 		case DP_TRAINING_PATTERN_3:
2743 			if (IS_CHERRYVIEW(dev_priv)) {
2744 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2745 			} else {
2746 				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2747 				*DP |= DP_LINK_TRAIN_PAT_2;
2748 			}
2749 			break;
2750 		}
2751 	}
2752 }
2753 
2754 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2755 				 struct intel_crtc_state *old_crtc_state)
2756 {
2757 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2758 	struct drm_i915_private *dev_priv = to_i915(dev);
2759 
2760 	/* enable with pattern 1 (as per spec) */
2761 
2762 	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2763 
2764 	/*
2765 	 * Magic for VLV/CHV. We _must_ first set up the register
2766 	 * without actually enabling the port, and then do another
2767 	 * write to enable the port. Otherwise link training will
2768 	 * fail when the power sequencer is freshly used for this port.
2769 	 */
2770 	intel_dp->DP |= DP_PORT_EN;
2771 	if (old_crtc_state->has_audio)
2772 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2773 
2774 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2775 	POSTING_READ(intel_dp->output_reg);
2776 }
2777 
2778 static void intel_enable_dp(struct intel_encoder *encoder,
2779 			    struct intel_crtc_state *pipe_config,
2780 			    struct drm_connector_state *conn_state)
2781 {
2782 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2783 	struct drm_device *dev = encoder->base.dev;
2784 	struct drm_i915_private *dev_priv = to_i915(dev);
2785 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2786 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2787 	enum i915_pipe pipe = crtc->pipe;
2788 
2789 	if (WARN_ON(dp_reg & DP_PORT_EN))
2790 		return;
2791 
2792 	pps_lock(intel_dp);
2793 
2794 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2795 		vlv_init_panel_power_sequencer(intel_dp);
2796 
2797 	intel_dp_enable_port(intel_dp, pipe_config);
2798 
2799 	edp_panel_vdd_on(intel_dp);
2800 	edp_panel_on(intel_dp);
2801 	edp_panel_vdd_off(intel_dp, true);
2802 
2803 	pps_unlock(intel_dp);
2804 
2805 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2806 		unsigned int lane_mask = 0x0;
2807 
2808 		if (IS_CHERRYVIEW(dev_priv))
2809 			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2810 
2811 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2812 				    lane_mask);
2813 	}
2814 
2815 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2816 	intel_dp_start_link_train(intel_dp);
2817 	intel_dp_stop_link_train(intel_dp);
2818 
2819 	if (pipe_config->has_audio) {
2820 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2821 				 pipe_name(pipe));
2822 		intel_audio_codec_enable(encoder, pipe_config, conn_state);
2823 	}
2824 }
2825 
2826 static void g4x_enable_dp(struct intel_encoder *encoder,
2827 			  struct intel_crtc_state *pipe_config,
2828 			  struct drm_connector_state *conn_state)
2829 {
2830 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2831 
2832 	intel_enable_dp(encoder, pipe_config, conn_state);
2833 	intel_edp_backlight_on(intel_dp);
2834 }
2835 
2836 static void vlv_enable_dp(struct intel_encoder *encoder,
2837 			  struct intel_crtc_state *pipe_config,
2838 			  struct drm_connector_state *conn_state)
2839 {
2840 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2841 
2842 	intel_edp_backlight_on(intel_dp);
2843 	intel_psr_enable(intel_dp);
2844 }
2845 
2846 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2847 			      struct intel_crtc_state *pipe_config,
2848 			      struct drm_connector_state *conn_state)
2849 {
2850 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2851 	enum port port = dp_to_dig_port(intel_dp)->port;
2852 
2853 	intel_dp_prepare(encoder, pipe_config);
2854 
2855 	/* Only ilk+ has port A */
2856 	if (port == PORT_A)
2857 		ironlake_edp_pll_on(intel_dp, pipe_config);
2858 }
2859 
2860 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2861 {
2862 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2863 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2864 	enum i915_pipe pipe = intel_dp->pps_pipe;
2865 	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2866 
2867 	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2868 
2869 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2870 		return;
2871 
2872 	edp_panel_vdd_off_sync(intel_dp);
2873 
2874 	/*
2875 	 * VLV seems to get confused when multiple power seqeuencers
2876 	 * have the same port selected (even if only one has power/vdd
2877 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2878 	 * CHV on the other hand doesn't seem to mind having the same port
2879 	 * selected in multiple power seqeuencers, but let's clear the
2880 	 * port select always when logically disconnecting a power sequencer
2881 	 * from a port.
2882 	 */
2883 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2884 		      pipe_name(pipe), port_name(intel_dig_port->port));
2885 	I915_WRITE(pp_on_reg, 0);
2886 	POSTING_READ(pp_on_reg);
2887 
2888 	intel_dp->pps_pipe = INVALID_PIPE;
2889 }
2890 
2891 static void vlv_steal_power_sequencer(struct drm_device *dev,
2892 				      enum i915_pipe pipe)
2893 {
2894 	struct drm_i915_private *dev_priv = to_i915(dev);
2895 	struct intel_encoder *encoder;
2896 
2897 	lockdep_assert_held(&dev_priv->pps_mutex);
2898 
2899 	for_each_intel_encoder(dev, encoder) {
2900 		struct intel_dp *intel_dp;
2901 		enum port port;
2902 
2903 		if (encoder->type != INTEL_OUTPUT_DP &&
2904 		    encoder->type != INTEL_OUTPUT_EDP)
2905 			continue;
2906 
2907 		intel_dp = enc_to_intel_dp(&encoder->base);
2908 		port = dp_to_dig_port(intel_dp)->port;
2909 
2910 		WARN(intel_dp->active_pipe == pipe,
2911 		     "stealing pipe %c power sequencer from active (e)DP port %c\n",
2912 		     pipe_name(pipe), port_name(port));
2913 
2914 		if (intel_dp->pps_pipe != pipe)
2915 			continue;
2916 
2917 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2918 			      pipe_name(pipe), port_name(port));
2919 
2920 		/* make sure vdd is off before we steal it */
2921 		vlv_detach_power_sequencer(intel_dp);
2922 	}
2923 }
2924 
2925 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2926 {
2927 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2928 	struct intel_encoder *encoder = &intel_dig_port->base;
2929 	struct drm_device *dev = encoder->base.dev;
2930 	struct drm_i915_private *dev_priv = to_i915(dev);
2931 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2932 
2933 	lockdep_assert_held(&dev_priv->pps_mutex);
2934 
2935 	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2936 
2937 	if (intel_dp->pps_pipe != INVALID_PIPE &&
2938 	    intel_dp->pps_pipe != crtc->pipe) {
2939 		/*
2940 		 * If another power sequencer was being used on this
2941 		 * port previously make sure to turn off vdd there while
2942 		 * we still have control of it.
2943 		 */
2944 		vlv_detach_power_sequencer(intel_dp);
2945 	}
2946 
2947 	/*
2948 	 * We may be stealing the power
2949 	 * sequencer from another port.
2950 	 */
2951 	vlv_steal_power_sequencer(dev, crtc->pipe);
2952 
2953 	intel_dp->active_pipe = crtc->pipe;
2954 
2955 	if (!is_edp(intel_dp))
2956 		return;
2957 
2958 	/* now it's all ours */
2959 	intel_dp->pps_pipe = crtc->pipe;
2960 
2961 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2962 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2963 
2964 	/* init power sequencer on this pipe and port */
2965 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2966 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
2967 }
2968 
2969 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
2970 			      struct intel_crtc_state *pipe_config,
2971 			      struct drm_connector_state *conn_state)
2972 {
2973 	vlv_phy_pre_encoder_enable(encoder);
2974 
2975 	intel_enable_dp(encoder, pipe_config, conn_state);
2976 }
2977 
2978 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
2979 				  struct intel_crtc_state *pipe_config,
2980 				  struct drm_connector_state *conn_state)
2981 {
2982 	intel_dp_prepare(encoder, pipe_config);
2983 
2984 	vlv_phy_pre_pll_enable(encoder);
2985 }
2986 
2987 static void chv_pre_enable_dp(struct intel_encoder *encoder,
2988 			      struct intel_crtc_state *pipe_config,
2989 			      struct drm_connector_state *conn_state)
2990 {
2991 	chv_phy_pre_encoder_enable(encoder);
2992 
2993 	intel_enable_dp(encoder, pipe_config, conn_state);
2994 
2995 	/* Second common lane will stay alive on its own now */
2996 	chv_phy_release_cl2_override(encoder);
2997 }
2998 
2999 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3000 				  struct intel_crtc_state *pipe_config,
3001 				  struct drm_connector_state *conn_state)
3002 {
3003 	intel_dp_prepare(encoder, pipe_config);
3004 
3005 	chv_phy_pre_pll_enable(encoder);
3006 }
3007 
3008 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3009 				    struct intel_crtc_state *pipe_config,
3010 				    struct drm_connector_state *conn_state)
3011 {
3012 	chv_phy_post_pll_disable(encoder);
3013 }
3014 
3015 /*
3016  * Fetch AUX CH registers 0x202 - 0x207 which contain
3017  * link status information
3018  */
3019 bool
3020 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3021 {
3022 	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3023 				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3024 }
3025 
3026 static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
3027 {
3028 	uint8_t psr_caps = 0;
3029 
3030 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
3031 	return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
3032 }
3033 
3034 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3035 {
3036 	uint8_t dprx = 0;
3037 
3038 	drm_dp_dpcd_readb(&intel_dp->aux,
3039 			DP_DPRX_FEATURE_ENUMERATION_LIST,
3040 			&dprx);
3041 	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3042 }
3043 
3044 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
3045 {
3046 	uint8_t alpm_caps = 0;
3047 
3048 	drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
3049 	return alpm_caps & DP_ALPM_CAP;
3050 }
3051 
3052 /* These are source-specific values. */
3053 uint8_t
3054 intel_dp_voltage_max(struct intel_dp *intel_dp)
3055 {
3056 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3057 	enum port port = dp_to_dig_port(intel_dp)->port;
3058 
3059 	if (IS_GEN9_LP(dev_priv))
3060 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3061 	else if (INTEL_GEN(dev_priv) >= 9) {
3062 		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3063 		return intel_ddi_dp_voltage_max(encoder);
3064 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3065 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3066 	else if (IS_GEN7(dev_priv) && port == PORT_A)
3067 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3068 	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3069 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3070 	else
3071 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3072 }
3073 
3074 uint8_t
3075 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3076 {
3077 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3078 	enum port port = dp_to_dig_port(intel_dp)->port;
3079 
3080 	if (INTEL_GEN(dev_priv) >= 9) {
3081 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3082 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3083 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3084 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3085 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3086 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3087 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3088 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3089 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3090 		default:
3091 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3092 		}
3093 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3094 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3095 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3096 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3097 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3098 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3099 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3100 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3101 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3102 		default:
3103 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3104 		}
3105 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3106 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3107 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3108 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3109 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3110 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3111 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3112 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3113 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3114 		default:
3115 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3116 		}
3117 	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3118 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3119 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3120 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3121 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3122 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3123 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3124 		default:
3125 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3126 		}
3127 	} else {
3128 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3129 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3130 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3131 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3132 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3133 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3134 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3135 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3136 		default:
3137 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3138 		}
3139 	}
3140 }
3141 
3142 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3143 {
3144 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3145 	unsigned long demph_reg_value, preemph_reg_value,
3146 		uniqtranscale_reg_value;
3147 	uint8_t train_set = intel_dp->train_set[0];
3148 
3149 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3150 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3151 		preemph_reg_value = 0x0004000;
3152 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3153 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3154 			demph_reg_value = 0x2B405555;
3155 			uniqtranscale_reg_value = 0x552AB83A;
3156 			break;
3157 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3158 			demph_reg_value = 0x2B404040;
3159 			uniqtranscale_reg_value = 0x5548B83A;
3160 			break;
3161 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3162 			demph_reg_value = 0x2B245555;
3163 			uniqtranscale_reg_value = 0x5560B83A;
3164 			break;
3165 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3166 			demph_reg_value = 0x2B405555;
3167 			uniqtranscale_reg_value = 0x5598DA3A;
3168 			break;
3169 		default:
3170 			return 0;
3171 		}
3172 		break;
3173 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3174 		preemph_reg_value = 0x0002000;
3175 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3176 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3177 			demph_reg_value = 0x2B404040;
3178 			uniqtranscale_reg_value = 0x5552B83A;
3179 			break;
3180 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3181 			demph_reg_value = 0x2B404848;
3182 			uniqtranscale_reg_value = 0x5580B83A;
3183 			break;
3184 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3185 			demph_reg_value = 0x2B404040;
3186 			uniqtranscale_reg_value = 0x55ADDA3A;
3187 			break;
3188 		default:
3189 			return 0;
3190 		}
3191 		break;
3192 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3193 		preemph_reg_value = 0x0000000;
3194 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3195 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3196 			demph_reg_value = 0x2B305555;
3197 			uniqtranscale_reg_value = 0x5570B83A;
3198 			break;
3199 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3200 			demph_reg_value = 0x2B2B4040;
3201 			uniqtranscale_reg_value = 0x55ADDA3A;
3202 			break;
3203 		default:
3204 			return 0;
3205 		}
3206 		break;
3207 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3208 		preemph_reg_value = 0x0006000;
3209 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3210 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3211 			demph_reg_value = 0x1B405555;
3212 			uniqtranscale_reg_value = 0x55ADDA3A;
3213 			break;
3214 		default:
3215 			return 0;
3216 		}
3217 		break;
3218 	default:
3219 		return 0;
3220 	}
3221 
3222 	vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3223 				 uniqtranscale_reg_value, 0);
3224 
3225 	return 0;
3226 }
3227 
3228 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3229 {
3230 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3231 	u32 deemph_reg_value, margin_reg_value;
3232 	bool uniq_trans_scale = false;
3233 	uint8_t train_set = intel_dp->train_set[0];
3234 
3235 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3236 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3237 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3238 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3239 			deemph_reg_value = 128;
3240 			margin_reg_value = 52;
3241 			break;
3242 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3243 			deemph_reg_value = 128;
3244 			margin_reg_value = 77;
3245 			break;
3246 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3247 			deemph_reg_value = 128;
3248 			margin_reg_value = 102;
3249 			break;
3250 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3251 			deemph_reg_value = 128;
3252 			margin_reg_value = 154;
3253 			uniq_trans_scale = true;
3254 			break;
3255 		default:
3256 			return 0;
3257 		}
3258 		break;
3259 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3260 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3261 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3262 			deemph_reg_value = 85;
3263 			margin_reg_value = 78;
3264 			break;
3265 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3266 			deemph_reg_value = 85;
3267 			margin_reg_value = 116;
3268 			break;
3269 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3270 			deemph_reg_value = 85;
3271 			margin_reg_value = 154;
3272 			break;
3273 		default:
3274 			return 0;
3275 		}
3276 		break;
3277 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3278 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3279 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3280 			deemph_reg_value = 64;
3281 			margin_reg_value = 104;
3282 			break;
3283 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3284 			deemph_reg_value = 64;
3285 			margin_reg_value = 154;
3286 			break;
3287 		default:
3288 			return 0;
3289 		}
3290 		break;
3291 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3292 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3293 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3294 			deemph_reg_value = 43;
3295 			margin_reg_value = 154;
3296 			break;
3297 		default:
3298 			return 0;
3299 		}
3300 		break;
3301 	default:
3302 		return 0;
3303 	}
3304 
3305 	chv_set_phy_signal_level(encoder, deemph_reg_value,
3306 				 margin_reg_value, uniq_trans_scale);
3307 
3308 	return 0;
3309 }
3310 
3311 static uint32_t
3312 gen4_signal_levels(uint8_t train_set)
3313 {
3314 	uint32_t	signal_levels = 0;
3315 
3316 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3317 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3318 	default:
3319 		signal_levels |= DP_VOLTAGE_0_4;
3320 		break;
3321 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3322 		signal_levels |= DP_VOLTAGE_0_6;
3323 		break;
3324 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3325 		signal_levels |= DP_VOLTAGE_0_8;
3326 		break;
3327 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3328 		signal_levels |= DP_VOLTAGE_1_2;
3329 		break;
3330 	}
3331 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3332 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3333 	default:
3334 		signal_levels |= DP_PRE_EMPHASIS_0;
3335 		break;
3336 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3337 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3338 		break;
3339 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3340 		signal_levels |= DP_PRE_EMPHASIS_6;
3341 		break;
3342 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3343 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3344 		break;
3345 	}
3346 	return signal_levels;
3347 }
3348 
3349 /* Gen6's DP voltage swing and pre-emphasis control */
3350 static uint32_t
3351 gen6_edp_signal_levels(uint8_t train_set)
3352 {
3353 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3354 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3355 	switch (signal_levels) {
3356 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3357 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3358 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3359 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3360 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3361 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3362 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3363 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3364 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3365 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3366 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3367 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3368 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3369 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3370 	default:
3371 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3372 			      "0x%x\n", signal_levels);
3373 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3374 	}
3375 }
3376 
3377 /* Gen7's DP voltage swing and pre-emphasis control */
3378 static uint32_t
3379 gen7_edp_signal_levels(uint8_t train_set)
3380 {
3381 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3382 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3383 	switch (signal_levels) {
3384 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3385 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3386 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3387 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3388 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3389 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3390 
3391 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3392 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3393 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3394 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3395 
3396 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3397 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3398 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3399 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3400 
3401 	default:
3402 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3403 			      "0x%x\n", signal_levels);
3404 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3405 	}
3406 }
3407 
3408 void
3409 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3410 {
3411 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3412 	enum port port = intel_dig_port->port;
3413 	struct drm_device *dev = intel_dig_port->base.base.dev;
3414 	struct drm_i915_private *dev_priv = to_i915(dev);
3415 	uint32_t signal_levels, mask = 0;
3416 	uint8_t train_set = intel_dp->train_set[0];
3417 
3418 	if (HAS_DDI(dev_priv)) {
3419 		signal_levels = ddi_signal_levels(intel_dp);
3420 
3421 		if (IS_GEN9_LP(dev_priv))
3422 			signal_levels = 0;
3423 		else
3424 			mask = DDI_BUF_EMP_MASK;
3425 	} else if (IS_CHERRYVIEW(dev_priv)) {
3426 		signal_levels = chv_signal_levels(intel_dp);
3427 	} else if (IS_VALLEYVIEW(dev_priv)) {
3428 		signal_levels = vlv_signal_levels(intel_dp);
3429 	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3430 		signal_levels = gen7_edp_signal_levels(train_set);
3431 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3432 	} else if (IS_GEN6(dev_priv) && port == PORT_A) {
3433 		signal_levels = gen6_edp_signal_levels(train_set);
3434 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3435 	} else {
3436 		signal_levels = gen4_signal_levels(train_set);
3437 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3438 	}
3439 
3440 	if (mask)
3441 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3442 
3443 	DRM_DEBUG_KMS("Using vswing level %d\n",
3444 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3445 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3446 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3447 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3448 
3449 	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3450 
3451 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3452 	POSTING_READ(intel_dp->output_reg);
3453 }
3454 
3455 void
3456 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3457 				       uint8_t dp_train_pat)
3458 {
3459 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3460 	struct drm_i915_private *dev_priv =
3461 		to_i915(intel_dig_port->base.base.dev);
3462 
3463 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3464 
3465 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3466 	POSTING_READ(intel_dp->output_reg);
3467 }
3468 
3469 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3470 {
3471 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3472 	struct drm_device *dev = intel_dig_port->base.base.dev;
3473 	struct drm_i915_private *dev_priv = to_i915(dev);
3474 	enum port port = intel_dig_port->port;
3475 	uint32_t val;
3476 
3477 	if (!HAS_DDI(dev_priv))
3478 		return;
3479 
3480 	val = I915_READ(DP_TP_CTL(port));
3481 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3482 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3483 	I915_WRITE(DP_TP_CTL(port), val);
3484 
3485 	/*
3486 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3487 	 * we need to set idle transmission mode is to work around a HW issue
3488 	 * where we enable the pipe while not in idle link-training mode.
3489 	 * In this case there is requirement to wait for a minimum number of
3490 	 * idle patterns to be sent.
3491 	 */
3492 	if (port == PORT_A)
3493 		return;
3494 
3495 	if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3496 				    DP_TP_STATUS_IDLE_DONE,
3497 				    DP_TP_STATUS_IDLE_DONE,
3498 				    1))
3499 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3500 }
3501 
3502 static void
3503 intel_dp_link_down(struct intel_dp *intel_dp)
3504 {
3505 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3506 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3507 	enum port port = intel_dig_port->port;
3508 	struct drm_device *dev = intel_dig_port->base.base.dev;
3509 	struct drm_i915_private *dev_priv = to_i915(dev);
3510 	uint32_t DP = intel_dp->DP;
3511 
3512 	if (WARN_ON(HAS_DDI(dev_priv)))
3513 		return;
3514 
3515 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3516 		return;
3517 
3518 	DRM_DEBUG_KMS("\n");
3519 
3520 	if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3521 	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3522 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3523 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3524 	} else {
3525 		if (IS_CHERRYVIEW(dev_priv))
3526 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3527 		else
3528 			DP &= ~DP_LINK_TRAIN_MASK;
3529 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3530 	}
3531 	I915_WRITE(intel_dp->output_reg, DP);
3532 	POSTING_READ(intel_dp->output_reg);
3533 
3534 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3535 	I915_WRITE(intel_dp->output_reg, DP);
3536 	POSTING_READ(intel_dp->output_reg);
3537 
3538 	/*
3539 	 * HW workaround for IBX, we need to move the port
3540 	 * to transcoder A after disabling it to allow the
3541 	 * matching HDMI port to be enabled on transcoder A.
3542 	 */
3543 	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3544 		/*
3545 		 * We get CPU/PCH FIFO underruns on the other pipe when
3546 		 * doing the workaround. Sweep them under the rug.
3547 		 */
3548 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3549 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3550 
3551 		/* always enable with pattern 1 (as per spec) */
3552 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3553 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3554 		I915_WRITE(intel_dp->output_reg, DP);
3555 		POSTING_READ(intel_dp->output_reg);
3556 
3557 		DP &= ~DP_PORT_EN;
3558 		I915_WRITE(intel_dp->output_reg, DP);
3559 		POSTING_READ(intel_dp->output_reg);
3560 
3561 		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3562 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3563 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3564 	}
3565 
3566 	msleep(intel_dp->panel_power_down_delay);
3567 
3568 	intel_dp->DP = DP;
3569 
3570 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3571 		pps_lock(intel_dp);
3572 		intel_dp->active_pipe = INVALID_PIPE;
3573 		pps_unlock(intel_dp);
3574 	}
3575 }
3576 
3577 bool
3578 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3579 {
3580 	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3581 			     sizeof(intel_dp->dpcd)) < 0)
3582 		return false; /* aux transfer failed */
3583 
3584 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3585 
3586 	return intel_dp->dpcd[DP_DPCD_REV] != 0;
3587 }
3588 
3589 static bool
3590 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3591 {
3592 	struct drm_i915_private *dev_priv =
3593 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3594 
3595 	/* this function is meant to be called only once */
3596 	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3597 
3598 	if (!intel_dp_read_dpcd(intel_dp))
3599 		return false;
3600 
3601 	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3602 			 drm_dp_is_branch(intel_dp->dpcd));
3603 
3604 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3605 		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3606 			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3607 
3608 	/* Check if the panel supports PSR */
3609 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3610 			 intel_dp->psr_dpcd,
3611 			 sizeof(intel_dp->psr_dpcd));
3612 	if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3613 		dev_priv->psr.sink_support = true;
3614 		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3615 	}
3616 
3617 	if (INTEL_GEN(dev_priv) >= 9 &&
3618 	    (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3619 		uint8_t frame_sync_cap;
3620 
3621 		dev_priv->psr.sink_support = true;
3622 		drm_dp_dpcd_read(&intel_dp->aux,
3623 				 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3624 				 &frame_sync_cap, 1);
3625 		dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3626 		/* PSR2 needs frame sync as well */
3627 		dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3628 		DRM_DEBUG_KMS("PSR2 %s on sink",
3629 			      dev_priv->psr.psr2_support ? "supported" : "not supported");
3630 
3631 		if (dev_priv->psr.psr2_support) {
3632 			dev_priv->psr.y_cord_support =
3633 				intel_dp_get_y_cord_status(intel_dp);
3634 			dev_priv->psr.colorimetry_support =
3635 				intel_dp_get_colorimetry_status(intel_dp);
3636 			dev_priv->psr.alpm =
3637 				intel_dp_get_alpm_status(intel_dp);
3638 		}
3639 
3640 	}
3641 
3642 	/* Read the eDP Display control capabilities registers */
3643 	if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3644 	    drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3645 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3646 			     sizeof(intel_dp->edp_dpcd))
3647 		DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3648 			      intel_dp->edp_dpcd);
3649 
3650 	/* Intermediate frequency support */
3651 	if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
3652 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3653 		int i;
3654 
3655 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3656 				sink_rates, sizeof(sink_rates));
3657 
3658 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3659 			int val = le16_to_cpu(sink_rates[i]);
3660 
3661 			if (val == 0)
3662 				break;
3663 
3664 			/* Value read multiplied by 200kHz gives the per-lane
3665 			 * link rate in kHz. The source rates are, however,
3666 			 * stored in terms of LS_Clk kHz. The full conversion
3667 			 * back to symbols is
3668 			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3669 			 */
3670 			intel_dp->sink_rates[i] = (val * 200) / 10;
3671 		}
3672 		intel_dp->num_sink_rates = i;
3673 	}
3674 
3675 	return true;
3676 }
3677 
3678 
3679 static bool
3680 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3681 {
3682 	if (!intel_dp_read_dpcd(intel_dp))
3683 		return false;
3684 
3685 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3686 			     &intel_dp->sink_count, 1) < 0)
3687 		return false;
3688 
3689 	/*
3690 	 * Sink count can change between short pulse hpd hence
3691 	 * a member variable in intel_dp will track any changes
3692 	 * between short pulse interrupts.
3693 	 */
3694 	intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3695 
3696 	/*
3697 	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3698 	 * a dongle is present but no display. Unless we require to know
3699 	 * if a dongle is present or not, we don't need to update
3700 	 * downstream port information. So, an early return here saves
3701 	 * time from performing other operations which are not required.
3702 	 */
3703 	if (!is_edp(intel_dp) && !intel_dp->sink_count)
3704 		return false;
3705 
3706 	if (!drm_dp_is_branch(intel_dp->dpcd))
3707 		return true; /* native DP sink */
3708 
3709 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3710 		return true; /* no per-port downstream info */
3711 
3712 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3713 			     intel_dp->downstream_ports,
3714 			     DP_MAX_DOWNSTREAM_PORTS) < 0)
3715 		return false; /* downstream port status fetch failed */
3716 
3717 	return true;
3718 }
3719 
3720 static bool
3721 intel_dp_can_mst(struct intel_dp *intel_dp)
3722 {
3723 	u8 buf[1];
3724 
3725 	if (!i915.enable_dp_mst)
3726 		return false;
3727 
3728 	if (!intel_dp->can_mst)
3729 		return false;
3730 
3731 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3732 		return false;
3733 
3734 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
3735 		return false;
3736 
3737 	return buf[0] & DP_MST_CAP;
3738 }
3739 
3740 static void
3741 intel_dp_configure_mst(struct intel_dp *intel_dp)
3742 {
3743 	if (!i915.enable_dp_mst)
3744 		return;
3745 
3746 	if (!intel_dp->can_mst)
3747 		return;
3748 
3749 	intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3750 
3751 	if (intel_dp->is_mst)
3752 		DRM_DEBUG_KMS("Sink is MST capable\n");
3753 	else
3754 		DRM_DEBUG_KMS("Sink is not MST capable\n");
3755 
3756 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3757 					intel_dp->is_mst);
3758 }
3759 
3760 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3761 {
3762 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3763 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3764 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3765 	u8 buf;
3766 	int ret = 0;
3767 	int count = 0;
3768 	int attempts = 10;
3769 
3770 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3771 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3772 		ret = -EIO;
3773 		goto out;
3774 	}
3775 
3776 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3777 			       buf & ~DP_TEST_SINK_START) < 0) {
3778 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3779 		ret = -EIO;
3780 		goto out;
3781 	}
3782 
3783 	do {
3784 		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3785 
3786 		if (drm_dp_dpcd_readb(&intel_dp->aux,
3787 				      DP_TEST_SINK_MISC, &buf) < 0) {
3788 			ret = -EIO;
3789 			goto out;
3790 		}
3791 		count = buf & DP_TEST_COUNT_MASK;
3792 	} while (--attempts && count);
3793 
3794 	if (attempts == 0) {
3795 		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3796 		ret = -ETIMEDOUT;
3797 	}
3798 
3799  out:
3800 	hsw_enable_ips(intel_crtc);
3801 	return ret;
3802 }
3803 
3804 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3805 {
3806 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3807 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3808 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3809 	u8 buf;
3810 	int ret;
3811 
3812 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3813 		return -EIO;
3814 
3815 	if (!(buf & DP_TEST_CRC_SUPPORTED))
3816 		return -ENOTTY;
3817 
3818 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3819 		return -EIO;
3820 
3821 	if (buf & DP_TEST_SINK_START) {
3822 		ret = intel_dp_sink_crc_stop(intel_dp);
3823 		if (ret)
3824 			return ret;
3825 	}
3826 
3827 	hsw_disable_ips(intel_crtc);
3828 
3829 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3830 			       buf | DP_TEST_SINK_START) < 0) {
3831 		hsw_enable_ips(intel_crtc);
3832 		return -EIO;
3833 	}
3834 
3835 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3836 	return 0;
3837 }
3838 
3839 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3840 {
3841 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3842 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3843 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3844 	u8 buf;
3845 	int count, ret;
3846 	int attempts = 6;
3847 
3848 	ret = intel_dp_sink_crc_start(intel_dp);
3849 	if (ret)
3850 		return ret;
3851 
3852 	do {
3853 		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3854 
3855 		if (drm_dp_dpcd_readb(&intel_dp->aux,
3856 				      DP_TEST_SINK_MISC, &buf) < 0) {
3857 			ret = -EIO;
3858 			goto stop;
3859 		}
3860 		count = buf & DP_TEST_COUNT_MASK;
3861 
3862 	} while (--attempts && count == 0);
3863 
3864 	if (attempts == 0) {
3865 		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3866 		ret = -ETIMEDOUT;
3867 		goto stop;
3868 	}
3869 
3870 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3871 		ret = -EIO;
3872 		goto stop;
3873 	}
3874 
3875 stop:
3876 	intel_dp_sink_crc_stop(intel_dp);
3877 	return ret;
3878 }
3879 
3880 static bool
3881 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3882 {
3883 	return drm_dp_dpcd_read(&intel_dp->aux,
3884 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
3885 				       sink_irq_vector, 1) == 1;
3886 }
3887 
3888 static bool
3889 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3890 {
3891 	int ret;
3892 
3893 	ret = drm_dp_dpcd_read(&intel_dp->aux,
3894 					     DP_SINK_COUNT_ESI,
3895 					     sink_irq_vector, 14);
3896 	if (ret != 14)
3897 		return false;
3898 
3899 	return true;
3900 }
3901 
3902 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3903 {
3904 	int status = 0;
3905 	int min_lane_count = 1;
3906 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
3907 	int link_rate_index, test_link_rate;
3908 	uint8_t test_lane_count, test_link_bw;
3909 	/* (DP CTS 1.2)
3910 	 * 4.3.1.11
3911 	 */
3912 	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
3913 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
3914 				   &test_lane_count);
3915 
3916 	if (status <= 0) {
3917 		DRM_DEBUG_KMS("Lane count read failed\n");
3918 		return DP_TEST_NAK;
3919 	}
3920 	test_lane_count &= DP_MAX_LANE_COUNT_MASK;
3921 	/* Validate the requested lane count */
3922 	if (test_lane_count < min_lane_count ||
3923 	    test_lane_count > intel_dp->max_sink_lane_count)
3924 		return DP_TEST_NAK;
3925 
3926 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
3927 				   &test_link_bw);
3928 	if (status <= 0) {
3929 		DRM_DEBUG_KMS("Link Rate read failed\n");
3930 		return DP_TEST_NAK;
3931 	}
3932 	/* Validate the requested link rate */
3933 	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
3934 	link_rate_index = intel_dp_link_rate_index(intel_dp,
3935 						   common_rates,
3936 						   test_link_rate);
3937 	if (link_rate_index < 0)
3938 		return DP_TEST_NAK;
3939 
3940 	intel_dp->compliance.test_lane_count = test_lane_count;
3941 	intel_dp->compliance.test_link_rate = test_link_rate;
3942 
3943 	return DP_TEST_ACK;
3944 }
3945 
3946 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3947 {
3948 	uint8_t test_pattern;
3949 	uint16_t test_misc;
3950 	__be16 h_width, v_height;
3951 	int status = 0;
3952 
3953 	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
3954 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
3955 				  &test_pattern, 1);
3956 	if (status <= 0) {
3957 		DRM_DEBUG_KMS("Test pattern read failed\n");
3958 		return DP_TEST_NAK;
3959 	}
3960 	if (test_pattern != DP_COLOR_RAMP)
3961 		return DP_TEST_NAK;
3962 
3963 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
3964 				  &h_width, 2);
3965 	if (status <= 0) {
3966 		DRM_DEBUG_KMS("H Width read failed\n");
3967 		return DP_TEST_NAK;
3968 	}
3969 
3970 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
3971 				  &v_height, 2);
3972 	if (status <= 0) {
3973 		DRM_DEBUG_KMS("V Height read failed\n");
3974 		return DP_TEST_NAK;
3975 	}
3976 
3977 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
3978 				  &test_misc, 1);
3979 	if (status <= 0) {
3980 		DRM_DEBUG_KMS("TEST MISC read failed\n");
3981 		return DP_TEST_NAK;
3982 	}
3983 	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
3984 		return DP_TEST_NAK;
3985 	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
3986 		return DP_TEST_NAK;
3987 	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
3988 	case DP_TEST_BIT_DEPTH_6:
3989 		intel_dp->compliance.test_data.bpc = 6;
3990 		break;
3991 	case DP_TEST_BIT_DEPTH_8:
3992 		intel_dp->compliance.test_data.bpc = 8;
3993 		break;
3994 	default:
3995 		return DP_TEST_NAK;
3996 	}
3997 
3998 	intel_dp->compliance.test_data.video_pattern = test_pattern;
3999 	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4000 	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4001 	/* Set test active flag here so userspace doesn't interrupt things */
4002 	intel_dp->compliance.test_active = 1;
4003 
4004 	return DP_TEST_ACK;
4005 }
4006 
4007 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4008 {
4009 	uint8_t test_result = DP_TEST_ACK;
4010 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4011 	struct drm_connector *connector = &intel_connector->base;
4012 
4013 	if (intel_connector->detect_edid == NULL ||
4014 	    connector->edid_corrupt ||
4015 	    intel_dp->aux.i2c_defer_count > 6) {
4016 		/* Check EDID read for NACKs, DEFERs and corruption
4017 		 * (DP CTS 1.2 Core r1.1)
4018 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4019 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4020 		 *    4.2.2.6 : EDID corruption detected
4021 		 * Use failsafe mode for all cases
4022 		 */
4023 		if (intel_dp->aux.i2c_nack_count > 0 ||
4024 			intel_dp->aux.i2c_defer_count > 0)
4025 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4026 				      intel_dp->aux.i2c_nack_count,
4027 				      intel_dp->aux.i2c_defer_count);
4028 		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4029 	} else {
4030 		struct edid *block = intel_connector->detect_edid;
4031 
4032 		/* We have to write the checksum
4033 		 * of the last block read
4034 		 */
4035 		block += intel_connector->detect_edid->extensions;
4036 
4037 		if (!drm_dp_dpcd_write(&intel_dp->aux,
4038 					DP_TEST_EDID_CHECKSUM,
4039 					&block->checksum,
4040 					1))
4041 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4042 
4043 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4044 		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4045 	}
4046 
4047 	/* Set test active flag here so userspace doesn't interrupt things */
4048 	intel_dp->compliance.test_active = 1;
4049 
4050 	return test_result;
4051 }
4052 
4053 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4054 {
4055 	uint8_t test_result = DP_TEST_NAK;
4056 	return test_result;
4057 }
4058 
4059 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4060 {
4061 	uint8_t response = DP_TEST_NAK;
4062 	uint8_t request = 0;
4063 	int status;
4064 
4065 	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4066 	if (status <= 0) {
4067 		DRM_DEBUG_KMS("Could not read test request from sink\n");
4068 		goto update_status;
4069 	}
4070 
4071 	switch (request) {
4072 	case DP_TEST_LINK_TRAINING:
4073 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4074 		response = intel_dp_autotest_link_training(intel_dp);
4075 		break;
4076 	case DP_TEST_LINK_VIDEO_PATTERN:
4077 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4078 		response = intel_dp_autotest_video_pattern(intel_dp);
4079 		break;
4080 	case DP_TEST_LINK_EDID_READ:
4081 		DRM_DEBUG_KMS("EDID test requested\n");
4082 		response = intel_dp_autotest_edid(intel_dp);
4083 		break;
4084 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4085 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4086 		response = intel_dp_autotest_phy_pattern(intel_dp);
4087 		break;
4088 	default:
4089 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4090 		break;
4091 	}
4092 
4093 	if (response & DP_TEST_ACK)
4094 		intel_dp->compliance.test_type = request;
4095 
4096 update_status:
4097 	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4098 	if (status <= 0)
4099 		DRM_DEBUG_KMS("Could not write test response to sink\n");
4100 }
4101 
4102 static int
4103 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4104 {
4105 	bool bret;
4106 
4107 	if (intel_dp->is_mst) {
4108 		u8 esi[16] = { 0 };
4109 		int ret = 0;
4110 		int retry;
4111 		bool handled;
4112 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4113 go_again:
4114 		if (bret == true) {
4115 
4116 			/* check link status - esi[10] = 0x200c */
4117 			if (intel_dp->active_mst_links &&
4118 			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4119 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4120 				intel_dp_start_link_train(intel_dp);
4121 				intel_dp_stop_link_train(intel_dp);
4122 			}
4123 
4124 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4125 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4126 
4127 			if (handled) {
4128 				for (retry = 0; retry < 3; retry++) {
4129 					int wret;
4130 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4131 								 DP_SINK_COUNT_ESI+1,
4132 								 &esi[1], 3);
4133 					if (wret == 3) {
4134 						break;
4135 					}
4136 				}
4137 
4138 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4139 				if (bret == true) {
4140 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4141 					goto go_again;
4142 				}
4143 			} else
4144 				ret = 0;
4145 
4146 			return ret;
4147 		} else {
4148 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4149 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4150 			intel_dp->is_mst = false;
4151 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4152 			/* send a hotplug event */
4153 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4154 		}
4155 	}
4156 	return -EINVAL;
4157 }
4158 
4159 static void
4160 intel_dp_retrain_link(struct intel_dp *intel_dp)
4161 {
4162 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4163 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4164 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4165 
4166 	/* Suppress underruns caused by re-training */
4167 	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4168 	if (crtc->config->has_pch_encoder)
4169 		intel_set_pch_fifo_underrun_reporting(dev_priv,
4170 						      intel_crtc_pch_transcoder(crtc), false);
4171 
4172 	intel_dp_start_link_train(intel_dp);
4173 	intel_dp_stop_link_train(intel_dp);
4174 
4175 	/* Keep underrun reporting disabled until things are stable */
4176 	intel_wait_for_vblank(dev_priv, crtc->pipe);
4177 
4178 	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4179 	if (crtc->config->has_pch_encoder)
4180 		intel_set_pch_fifo_underrun_reporting(dev_priv,
4181 						      intel_crtc_pch_transcoder(crtc), true);
4182 }
4183 
4184 static void
4185 intel_dp_check_link_status(struct intel_dp *intel_dp)
4186 {
4187 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4188 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4189 	u8 link_status[DP_LINK_STATUS_SIZE];
4190 
4191 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4192 
4193 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4194 		DRM_ERROR("Failed to get link status\n");
4195 		return;
4196 	}
4197 
4198 	if (!intel_encoder->base.crtc)
4199 		return;
4200 
4201 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4202 		return;
4203 
4204 	/* FIXME: we need to synchronize this sort of stuff with hardware
4205 	 * readout. Currently fast link training doesn't work on boot-up. */
4206 	if (!intel_dp->lane_count)
4207 		return;
4208 
4209 	/* Retrain if Channel EQ or CR not ok */
4210 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4211 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4212 			      intel_encoder->base.name);
4213 
4214 		intel_dp_retrain_link(intel_dp);
4215 	}
4216 }
4217 
4218 /*
4219  * According to DP spec
4220  * 5.1.2:
4221  *  1. Read DPCD
4222  *  2. Configure link according to Receiver Capabilities
4223  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4224  *  4. Check link status on receipt of hot-plug interrupt
4225  *
4226  * intel_dp_short_pulse -  handles short pulse interrupts
4227  * when full detection is not required.
4228  * Returns %true if short pulse is handled and full detection
4229  * is NOT required and %false otherwise.
4230  */
4231 static bool
4232 intel_dp_short_pulse(struct intel_dp *intel_dp)
4233 {
4234 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4235 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4236 	u8 sink_irq_vector = 0;
4237 	u8 old_sink_count = intel_dp->sink_count;
4238 	bool ret;
4239 
4240 	/*
4241 	 * Clearing compliance test variables to allow capturing
4242 	 * of values for next automated test request.
4243 	 */
4244 	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4245 
4246 	/*
4247 	 * Now read the DPCD to see if it's actually running
4248 	 * If the current value of sink count doesn't match with
4249 	 * the value that was stored earlier or dpcd read failed
4250 	 * we need to do full detection
4251 	 */
4252 	ret = intel_dp_get_dpcd(intel_dp);
4253 
4254 	if ((old_sink_count != intel_dp->sink_count) || !ret) {
4255 		/* No need to proceed if we are going to do full detect */
4256 		return false;
4257 	}
4258 
4259 	/* Try to read the source of the interrupt */
4260 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4261 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4262 	    sink_irq_vector != 0) {
4263 		/* Clear interrupt source */
4264 		drm_dp_dpcd_writeb(&intel_dp->aux,
4265 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4266 				   sink_irq_vector);
4267 
4268 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4269 			intel_dp_handle_test_request(intel_dp);
4270 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4271 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4272 	}
4273 
4274 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4275 	intel_dp_check_link_status(intel_dp);
4276 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
4277 	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4278 		DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4279 		/* Send a Hotplug Uevent to userspace to start modeset */
4280 		drm_kms_helper_hotplug_event(intel_encoder->base.dev);
4281 	}
4282 
4283 	return true;
4284 }
4285 
4286 /* XXX this is probably wrong for multiple downstream ports */
4287 static enum drm_connector_status
4288 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4289 {
4290 	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4291 	uint8_t *dpcd = intel_dp->dpcd;
4292 	uint8_t type;
4293 
4294 	if (lspcon->active)
4295 		lspcon_resume(lspcon);
4296 
4297 	if (!intel_dp_get_dpcd(intel_dp))
4298 		return connector_status_disconnected;
4299 
4300 	if (is_edp(intel_dp))
4301 		return connector_status_connected;
4302 
4303 	/* if there's no downstream port, we're done */
4304 	if (!drm_dp_is_branch(dpcd))
4305 		return connector_status_connected;
4306 
4307 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4308 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4309 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4310 
4311 		return intel_dp->sink_count ?
4312 		connector_status_connected : connector_status_disconnected;
4313 	}
4314 
4315 	if (intel_dp_can_mst(intel_dp))
4316 		return connector_status_connected;
4317 
4318 	/* If no HPD, poke DDC gently */
4319 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4320 		return connector_status_connected;
4321 
4322 	/* Well we tried, say unknown for unreliable port types */
4323 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4324 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4325 		if (type == DP_DS_PORT_TYPE_VGA ||
4326 		    type == DP_DS_PORT_TYPE_NON_EDID)
4327 			return connector_status_unknown;
4328 	} else {
4329 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4330 			DP_DWN_STRM_PORT_TYPE_MASK;
4331 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4332 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4333 			return connector_status_unknown;
4334 	}
4335 
4336 	/* Anything else is out of spec, warn and ignore */
4337 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4338 	return connector_status_disconnected;
4339 }
4340 
4341 static enum drm_connector_status
4342 edp_detect(struct intel_dp *intel_dp)
4343 {
4344 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4345 	struct drm_i915_private *dev_priv = to_i915(dev);
4346 	enum drm_connector_status status;
4347 
4348 	status = intel_panel_detect(dev_priv);
4349 	if (status == connector_status_unknown)
4350 		status = connector_status_connected;
4351 
4352 	return status;
4353 }
4354 
4355 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4356 				       struct intel_digital_port *port)
4357 {
4358 	u32 bit;
4359 
4360 	switch (port->port) {
4361 	case PORT_A:
4362 		return true;
4363 	case PORT_B:
4364 		bit = SDE_PORTB_HOTPLUG;
4365 		break;
4366 	case PORT_C:
4367 		bit = SDE_PORTC_HOTPLUG;
4368 		break;
4369 	case PORT_D:
4370 		bit = SDE_PORTD_HOTPLUG;
4371 		break;
4372 	default:
4373 		MISSING_CASE(port->port);
4374 		return false;
4375 	}
4376 
4377 	return I915_READ(SDEISR) & bit;
4378 }
4379 
4380 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4381 				       struct intel_digital_port *port)
4382 {
4383 	u32 bit;
4384 
4385 	switch (port->port) {
4386 	case PORT_A:
4387 		return true;
4388 	case PORT_B:
4389 		bit = SDE_PORTB_HOTPLUG_CPT;
4390 		break;
4391 	case PORT_C:
4392 		bit = SDE_PORTC_HOTPLUG_CPT;
4393 		break;
4394 	case PORT_D:
4395 		bit = SDE_PORTD_HOTPLUG_CPT;
4396 		break;
4397 	case PORT_E:
4398 		bit = SDE_PORTE_HOTPLUG_SPT;
4399 		break;
4400 	default:
4401 		MISSING_CASE(port->port);
4402 		return false;
4403 	}
4404 
4405 	return I915_READ(SDEISR) & bit;
4406 }
4407 
4408 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4409 				       struct intel_digital_port *port)
4410 {
4411 	u32 bit;
4412 
4413 	switch (port->port) {
4414 	case PORT_B:
4415 		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4416 		break;
4417 	case PORT_C:
4418 		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4419 		break;
4420 	case PORT_D:
4421 		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4422 		break;
4423 	default:
4424 		MISSING_CASE(port->port);
4425 		return false;
4426 	}
4427 
4428 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4429 }
4430 
4431 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4432 					struct intel_digital_port *port)
4433 {
4434 	u32 bit;
4435 
4436 	switch (port->port) {
4437 	case PORT_B:
4438 		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4439 		break;
4440 	case PORT_C:
4441 		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4442 		break;
4443 	case PORT_D:
4444 		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4445 		break;
4446 	default:
4447 		MISSING_CASE(port->port);
4448 		return false;
4449 	}
4450 
4451 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4452 }
4453 
4454 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4455 				       struct intel_digital_port *intel_dig_port)
4456 {
4457 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4458 	enum port port;
4459 	u32 bit;
4460 
4461 	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4462 	switch (port) {
4463 	case PORT_A:
4464 		bit = BXT_DE_PORT_HP_DDIA;
4465 		break;
4466 	case PORT_B:
4467 		bit = BXT_DE_PORT_HP_DDIB;
4468 		break;
4469 	case PORT_C:
4470 		bit = BXT_DE_PORT_HP_DDIC;
4471 		break;
4472 	default:
4473 		MISSING_CASE(port);
4474 		return false;
4475 	}
4476 
4477 	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4478 }
4479 
4480 /*
4481  * intel_digital_port_connected - is the specified port connected?
4482  * @dev_priv: i915 private structure
4483  * @port: the port to test
4484  *
4485  * Return %true if @port is connected, %false otherwise.
4486  */
4487 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4488 				  struct intel_digital_port *port)
4489 {
4490 	if (HAS_PCH_IBX(dev_priv))
4491 		return ibx_digital_port_connected(dev_priv, port);
4492 	else if (HAS_PCH_SPLIT(dev_priv))
4493 		return cpt_digital_port_connected(dev_priv, port);
4494 	else if (IS_GEN9_LP(dev_priv))
4495 		return bxt_digital_port_connected(dev_priv, port);
4496 	else if (IS_GM45(dev_priv))
4497 		return gm45_digital_port_connected(dev_priv, port);
4498 	else
4499 		return g4x_digital_port_connected(dev_priv, port);
4500 }
4501 
4502 static struct edid *
4503 intel_dp_get_edid(struct intel_dp *intel_dp)
4504 {
4505 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4506 
4507 	/* use cached edid if we have one */
4508 	if (intel_connector->edid) {
4509 		/* invalid edid */
4510 		if (IS_ERR(intel_connector->edid))
4511 			return NULL;
4512 
4513 		return drm_edid_duplicate(intel_connector->edid);
4514 	} else
4515 		return drm_get_edid(&intel_connector->base,
4516 				    &intel_dp->aux.ddc);
4517 }
4518 
4519 static void
4520 intel_dp_set_edid(struct intel_dp *intel_dp)
4521 {
4522 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4523 	struct edid *edid;
4524 
4525 	intel_dp_unset_edid(intel_dp);
4526 	edid = intel_dp_get_edid(intel_dp);
4527 	intel_connector->detect_edid = edid;
4528 
4529 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4530 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4531 	else
4532 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4533 }
4534 
4535 static void
4536 intel_dp_unset_edid(struct intel_dp *intel_dp)
4537 {
4538 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4539 
4540 	kfree(intel_connector->detect_edid);
4541 	intel_connector->detect_edid = NULL;
4542 
4543 	intel_dp->has_audio = false;
4544 }
4545 
4546 static int
4547 intel_dp_long_pulse(struct intel_connector *intel_connector)
4548 {
4549 	struct drm_connector *connector = &intel_connector->base;
4550 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4551 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4552 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4553 	struct drm_device *dev = connector->dev;
4554 	enum drm_connector_status status;
4555 	u8 sink_irq_vector = 0;
4556 
4557 	WARN_ON(!drm_modeset_is_locked(&connector->dev->mode_config.connection_mutex));
4558 
4559 	intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
4560 
4561 	/* Can't disconnect eDP, but you can close the lid... */
4562 	if (is_edp(intel_dp))
4563 		status = edp_detect(intel_dp);
4564 	else if (intel_digital_port_connected(to_i915(dev),
4565 					      dp_to_dig_port(intel_dp)))
4566 		status = intel_dp_detect_dpcd(intel_dp);
4567 	else
4568 		status = connector_status_disconnected;
4569 
4570 	if (status == connector_status_disconnected) {
4571 		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4572 
4573 		if (intel_dp->is_mst) {
4574 			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4575 				      intel_dp->is_mst,
4576 				      intel_dp->mst_mgr.mst_state);
4577 			intel_dp->is_mst = false;
4578 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4579 							intel_dp->is_mst);
4580 		}
4581 
4582 		goto out;
4583 	}
4584 
4585 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4586 		intel_encoder->type = INTEL_OUTPUT_DP;
4587 
4588 	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4589 		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
4590 		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4591 
4592 	if (intel_dp->reset_link_params) {
4593 		/* Set the max lane count for sink */
4594 		intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
4595 
4596 		/* Set the max link BW for sink */
4597 		intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
4598 
4599 		intel_dp->reset_link_params = false;
4600 	}
4601 
4602 	intel_dp_print_rates(intel_dp);
4603 
4604 	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4605 			 drm_dp_is_branch(intel_dp->dpcd));
4606 
4607 	intel_dp_configure_mst(intel_dp);
4608 
4609 	if (intel_dp->is_mst) {
4610 		/*
4611 		 * If we are in MST mode then this connector
4612 		 * won't appear connected or have anything
4613 		 * with EDID on it
4614 		 */
4615 		status = connector_status_disconnected;
4616 		goto out;
4617 	} else {
4618 		/*
4619 		 * If display is now connected check links status,
4620 		 * there has been known issues of link loss triggerring
4621 		 * long pulse.
4622 		 *
4623 		 * Some sinks (eg. ASUS PB287Q) seem to perform some
4624 		 * weird HPD ping pong during modesets. So we can apparently
4625 		 * end up with HPD going low during a modeset, and then
4626 		 * going back up soon after. And once that happens we must
4627 		 * retrain the link to get a picture. That's in case no
4628 		 * userspace component reacted to intermittent HPD dip.
4629 		 */
4630 		intel_dp_check_link_status(intel_dp);
4631 	}
4632 
4633 	/*
4634 	 * Clearing NACK and defer counts to get their exact values
4635 	 * while reading EDID which are required by Compliance tests
4636 	 * 4.2.2.4 and 4.2.2.5
4637 	 */
4638 	intel_dp->aux.i2c_nack_count = 0;
4639 	intel_dp->aux.i2c_defer_count = 0;
4640 
4641 	intel_dp_set_edid(intel_dp);
4642 	if (is_edp(intel_dp) || intel_connector->detect_edid)
4643 		status = connector_status_connected;
4644 	intel_dp->detect_done = true;
4645 
4646 	/* Try to read the source of the interrupt */
4647 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4648 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4649 	    sink_irq_vector != 0) {
4650 		/* Clear interrupt source */
4651 		drm_dp_dpcd_writeb(&intel_dp->aux,
4652 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4653 				   sink_irq_vector);
4654 
4655 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4656 			intel_dp_handle_test_request(intel_dp);
4657 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4658 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4659 	}
4660 
4661 out:
4662 	if (status != connector_status_connected && !intel_dp->is_mst)
4663 		intel_dp_unset_edid(intel_dp);
4664 
4665 	intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
4666 	return status;
4667 }
4668 
4669 static int
4670 intel_dp_detect(struct drm_connector *connector,
4671 		struct drm_modeset_acquire_ctx *ctx,
4672 		bool force)
4673 {
4674 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4675 	int status = connector->status;
4676 
4677 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4678 		      connector->base.id, connector->name);
4679 
4680 	/* If full detect is not performed yet, do a full detect */
4681 	if (!intel_dp->detect_done)
4682 		status = intel_dp_long_pulse(intel_dp->attached_connector);
4683 
4684 	intel_dp->detect_done = false;
4685 
4686 	return status;
4687 }
4688 
4689 static void
4690 intel_dp_force(struct drm_connector *connector)
4691 {
4692 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4693 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4694 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4695 
4696 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4697 		      connector->base.id, connector->name);
4698 	intel_dp_unset_edid(intel_dp);
4699 
4700 	if (connector->status != connector_status_connected)
4701 		return;
4702 
4703 	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4704 
4705 	intel_dp_set_edid(intel_dp);
4706 
4707 	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4708 
4709 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4710 		intel_encoder->type = INTEL_OUTPUT_DP;
4711 }
4712 
4713 static int intel_dp_get_modes(struct drm_connector *connector)
4714 {
4715 	struct intel_connector *intel_connector = to_intel_connector(connector);
4716 	struct edid *edid;
4717 
4718 	edid = intel_connector->detect_edid;
4719 	if (edid) {
4720 		int ret = intel_connector_update_modes(connector, edid);
4721 		if (ret)
4722 			return ret;
4723 	}
4724 
4725 	/* if eDP has no EDID, fall back to fixed mode */
4726 	if (is_edp(intel_attached_dp(connector)) &&
4727 	    intel_connector->panel.fixed_mode) {
4728 		struct drm_display_mode *mode;
4729 
4730 		mode = drm_mode_duplicate(connector->dev,
4731 					  intel_connector->panel.fixed_mode);
4732 		if (mode) {
4733 			drm_mode_probed_add(connector, mode);
4734 			return 1;
4735 		}
4736 	}
4737 
4738 	return 0;
4739 }
4740 
4741 static bool
4742 intel_dp_detect_audio(struct drm_connector *connector)
4743 {
4744 	bool has_audio = false;
4745 	struct edid *edid;
4746 
4747 	edid = to_intel_connector(connector)->detect_edid;
4748 	if (edid)
4749 		has_audio = drm_detect_monitor_audio(edid);
4750 
4751 	return has_audio;
4752 }
4753 
4754 static int
4755 intel_dp_set_property(struct drm_connector *connector,
4756 		      struct drm_property *property,
4757 		      uint64_t val)
4758 {
4759 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4760 	struct intel_connector *intel_connector = to_intel_connector(connector);
4761 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4762 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4763 	int ret;
4764 
4765 	ret = drm_object_property_set_value(&connector->base, property, val);
4766 	if (ret)
4767 		return ret;
4768 
4769 	if (property == dev_priv->force_audio_property) {
4770 		int i = val;
4771 		bool has_audio;
4772 
4773 		if (i == intel_dp->force_audio)
4774 			return 0;
4775 
4776 		intel_dp->force_audio = i;
4777 
4778 		if (i == HDMI_AUDIO_AUTO)
4779 			has_audio = intel_dp_detect_audio(connector);
4780 		else
4781 			has_audio = (i == HDMI_AUDIO_ON);
4782 
4783 		if (has_audio == intel_dp->has_audio)
4784 			return 0;
4785 
4786 		intel_dp->has_audio = has_audio;
4787 		goto done;
4788 	}
4789 
4790 	if (property == dev_priv->broadcast_rgb_property) {
4791 		bool old_auto = intel_dp->color_range_auto;
4792 		bool old_range = intel_dp->limited_color_range;
4793 
4794 		switch (val) {
4795 		case INTEL_BROADCAST_RGB_AUTO:
4796 			intel_dp->color_range_auto = true;
4797 			break;
4798 		case INTEL_BROADCAST_RGB_FULL:
4799 			intel_dp->color_range_auto = false;
4800 			intel_dp->limited_color_range = false;
4801 			break;
4802 		case INTEL_BROADCAST_RGB_LIMITED:
4803 			intel_dp->color_range_auto = false;
4804 			intel_dp->limited_color_range = true;
4805 			break;
4806 		default:
4807 			return -EINVAL;
4808 		}
4809 
4810 		if (old_auto == intel_dp->color_range_auto &&
4811 		    old_range == intel_dp->limited_color_range)
4812 			return 0;
4813 
4814 		goto done;
4815 	}
4816 
4817 	if (is_edp(intel_dp) &&
4818 	    property == connector->dev->mode_config.scaling_mode_property) {
4819 		if (val == DRM_MODE_SCALE_NONE) {
4820 			DRM_DEBUG_KMS("no scaling not supported\n");
4821 			return -EINVAL;
4822 		}
4823 		if (HAS_GMCH_DISPLAY(dev_priv) &&
4824 		    val == DRM_MODE_SCALE_CENTER) {
4825 			DRM_DEBUG_KMS("centering not supported\n");
4826 			return -EINVAL;
4827 		}
4828 
4829 		if (intel_connector->panel.fitting_mode == val) {
4830 			/* the eDP scaling property is not changed */
4831 			return 0;
4832 		}
4833 		intel_connector->panel.fitting_mode = val;
4834 
4835 		goto done;
4836 	}
4837 
4838 	return -EINVAL;
4839 
4840 done:
4841 	if (intel_encoder->base.crtc)
4842 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4843 
4844 	return 0;
4845 }
4846 
4847 static int
4848 intel_dp_connector_register(struct drm_connector *connector)
4849 {
4850 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4851 	int ret;
4852 
4853 	ret = intel_connector_register(connector);
4854 	if (ret)
4855 		return ret;
4856 
4857 	i915_debugfs_connector_add(connector);
4858 
4859 	DRM_DEBUG_KMS("registering %s bus for %s\n",
4860 		      intel_dp->aux.name, connector->kdev->kobj.name);
4861 
4862 	intel_dp->aux.dev = connector->kdev;
4863 	return drm_dp_aux_register(&intel_dp->aux);
4864 }
4865 
4866 static void
4867 intel_dp_connector_unregister(struct drm_connector *connector)
4868 {
4869 	drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4870 	intel_connector_unregister(connector);
4871 }
4872 
4873 static void
4874 intel_dp_connector_destroy(struct drm_connector *connector)
4875 {
4876 	struct intel_connector *intel_connector = to_intel_connector(connector);
4877 
4878 	kfree(intel_connector->detect_edid);
4879 
4880 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4881 		kfree(intel_connector->edid);
4882 
4883 	/* Can't call is_edp() since the encoder may have been destroyed
4884 	 * already. */
4885 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4886 		intel_panel_fini(&intel_connector->panel);
4887 
4888 	drm_connector_cleanup(connector);
4889 	kfree(connector);
4890 }
4891 
4892 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4893 {
4894 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4895 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4896 
4897 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4898 	if (is_edp(intel_dp)) {
4899 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4900 		/*
4901 		 * vdd might still be enabled do to the delayed vdd off.
4902 		 * Make sure vdd is actually turned off here.
4903 		 */
4904 		pps_lock(intel_dp);
4905 		edp_panel_vdd_off_sync(intel_dp);
4906 		pps_unlock(intel_dp);
4907 
4908 		if (intel_dp->edp_notifier.notifier_call) {
4909 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4910 			intel_dp->edp_notifier.notifier_call = NULL;
4911 		}
4912 	}
4913 
4914 	intel_dp_aux_fini(intel_dp);
4915 
4916 	drm_encoder_cleanup(encoder);
4917 	kfree(intel_dig_port);
4918 }
4919 
4920 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4921 {
4922 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4923 
4924 	if (!is_edp(intel_dp))
4925 		return;
4926 
4927 	/*
4928 	 * vdd might still be enabled do to the delayed vdd off.
4929 	 * Make sure vdd is actually turned off here.
4930 	 */
4931 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4932 	pps_lock(intel_dp);
4933 	edp_panel_vdd_off_sync(intel_dp);
4934 	pps_unlock(intel_dp);
4935 }
4936 
4937 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4938 {
4939 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4940 	struct drm_device *dev = intel_dig_port->base.base.dev;
4941 	struct drm_i915_private *dev_priv = to_i915(dev);
4942 
4943 	lockdep_assert_held(&dev_priv->pps_mutex);
4944 
4945 	if (!edp_have_panel_vdd(intel_dp))
4946 		return;
4947 
4948 	/*
4949 	 * The VDD bit needs a power domain reference, so if the bit is
4950 	 * already enabled when we boot or resume, grab this reference and
4951 	 * schedule a vdd off, so we don't hold on to the reference
4952 	 * indefinitely.
4953 	 */
4954 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4955 	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4956 
4957 	edp_panel_vdd_schedule_off(intel_dp);
4958 }
4959 
4960 static enum i915_pipe vlv_active_pipe(struct intel_dp *intel_dp)
4961 {
4962 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4963 
4964 	if ((intel_dp->DP & DP_PORT_EN) == 0)
4965 		return INVALID_PIPE;
4966 
4967 	if (IS_CHERRYVIEW(dev_priv))
4968 		return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
4969 	else
4970 		return PORT_TO_PIPE(intel_dp->DP);
4971 }
4972 
4973 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4974 {
4975 	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4976 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4977 	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4978 
4979 	if (!HAS_DDI(dev_priv))
4980 		intel_dp->DP = I915_READ(intel_dp->output_reg);
4981 
4982 	if (lspcon->active)
4983 		lspcon_resume(lspcon);
4984 
4985 	intel_dp->reset_link_params = true;
4986 
4987 	pps_lock(intel_dp);
4988 
4989 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4990 		intel_dp->active_pipe = vlv_active_pipe(intel_dp);
4991 
4992 	if (is_edp(intel_dp)) {
4993 		/* Reinit the power sequencer, in case BIOS did something with it. */
4994 		intel_dp_pps_init(encoder->dev, intel_dp);
4995 		intel_edp_panel_vdd_sanitize(intel_dp);
4996 	}
4997 
4998 	pps_unlock(intel_dp);
4999 }
5000 
5001 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5002 	.dpms = drm_atomic_helper_connector_dpms,
5003 	.force = intel_dp_force,
5004 	.fill_modes = drm_helper_probe_single_connector_modes,
5005 	.set_property = intel_dp_set_property,
5006 	.atomic_get_property = intel_connector_atomic_get_property,
5007 	.late_register = intel_dp_connector_register,
5008 	.early_unregister = intel_dp_connector_unregister,
5009 	.destroy = intel_dp_connector_destroy,
5010 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5011 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5012 };
5013 
5014 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5015 	.detect_ctx = intel_dp_detect,
5016 	.get_modes = intel_dp_get_modes,
5017 	.mode_valid = intel_dp_mode_valid,
5018 };
5019 
5020 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5021 	.reset = intel_dp_encoder_reset,
5022 	.destroy = intel_dp_encoder_destroy,
5023 };
5024 
5025 enum irqreturn
5026 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5027 {
5028 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5029 	struct drm_device *dev = intel_dig_port->base.base.dev;
5030 	struct drm_i915_private *dev_priv = to_i915(dev);
5031 	enum irqreturn ret = IRQ_NONE;
5032 
5033 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5034 	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5035 		intel_dig_port->base.type = INTEL_OUTPUT_DP;
5036 
5037 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5038 		/*
5039 		 * vdd off can generate a long pulse on eDP which
5040 		 * would require vdd on to handle it, and thus we
5041 		 * would end up in an endless cycle of
5042 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5043 		 */
5044 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5045 			      port_name(intel_dig_port->port));
5046 		return IRQ_HANDLED;
5047 	}
5048 
5049 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5050 		      port_name(intel_dig_port->port),
5051 		      long_hpd ? "long" : "short");
5052 
5053 	if (long_hpd) {
5054 		intel_dp->reset_link_params = true;
5055 		intel_dp->detect_done = false;
5056 		return IRQ_NONE;
5057 	}
5058 
5059 	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5060 
5061 	if (intel_dp->is_mst) {
5062 		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5063 			/*
5064 			 * If we were in MST mode, and device is not
5065 			 * there, get out of MST mode
5066 			 */
5067 			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5068 				      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5069 			intel_dp->is_mst = false;
5070 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5071 							intel_dp->is_mst);
5072 			intel_dp->detect_done = false;
5073 			goto put_power;
5074 		}
5075 	}
5076 
5077 	if (!intel_dp->is_mst) {
5078 		if (!intel_dp_short_pulse(intel_dp)) {
5079 			intel_dp->detect_done = false;
5080 			goto put_power;
5081 		}
5082 	}
5083 
5084 	ret = IRQ_HANDLED;
5085 
5086 put_power:
5087 	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
5088 
5089 	return ret;
5090 }
5091 
5092 /* check the VBT to see whether the eDP is on another port */
5093 bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
5094 {
5095 	/*
5096 	 * eDP not supported on g4x. so bail out early just
5097 	 * for a bit extra safety in case the VBT is bonkers.
5098 	 */
5099 	if (INTEL_GEN(dev_priv) < 5)
5100 		return false;
5101 
5102 	if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5103 		return true;
5104 
5105 	return intel_bios_is_port_edp(dev_priv, port);
5106 }
5107 
5108 void
5109 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5110 {
5111 	struct intel_connector *intel_connector = to_intel_connector(connector);
5112 
5113 	intel_attach_force_audio_property(connector);
5114 	intel_attach_broadcast_rgb_property(connector);
5115 	intel_dp->color_range_auto = true;
5116 
5117 	if (is_edp(intel_dp)) {
5118 		drm_mode_create_scaling_mode_property(connector->dev);
5119 		drm_object_attach_property(
5120 			&connector->base,
5121 			connector->dev->mode_config.scaling_mode_property,
5122 			DRM_MODE_SCALE_ASPECT);
5123 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5124 	}
5125 }
5126 
5127 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5128 {
5129 	intel_dp->panel_power_off_time = ktime_get_boottime();
5130 	intel_dp->last_power_on = jiffies;
5131 	intel_dp->last_backlight_off = jiffies;
5132 }
5133 
5134 static void
5135 intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
5136 			   struct intel_dp *intel_dp, struct edp_power_seq *seq)
5137 {
5138 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5139 	struct pps_registers regs;
5140 
5141 	intel_pps_get_registers(dev_priv, intel_dp, &regs);
5142 
5143 	/* Workaround: Need to write PP_CONTROL with the unlock key as
5144 	 * the very first thing. */
5145 	pp_ctl = ironlake_get_pp_control(intel_dp);
5146 
5147 	pp_on = I915_READ(regs.pp_on);
5148 	pp_off = I915_READ(regs.pp_off);
5149 	if (!IS_GEN9_LP(dev_priv)) {
5150 		I915_WRITE(regs.pp_ctrl, pp_ctl);
5151 		pp_div = I915_READ(regs.pp_div);
5152 	}
5153 
5154 	/* Pull timing values out of registers */
5155 	seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5156 		     PANEL_POWER_UP_DELAY_SHIFT;
5157 
5158 	seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5159 		  PANEL_LIGHT_ON_DELAY_SHIFT;
5160 
5161 	seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5162 		  PANEL_LIGHT_OFF_DELAY_SHIFT;
5163 
5164 	seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5165 		   PANEL_POWER_DOWN_DELAY_SHIFT;
5166 
5167 	if (IS_GEN9_LP(dev_priv)) {
5168 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5169 			BXT_POWER_CYCLE_DELAY_SHIFT;
5170 		if (tmp > 0)
5171 			seq->t11_t12 = (tmp - 1) * 1000;
5172 		else
5173 			seq->t11_t12 = 0;
5174 	} else {
5175 		seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5176 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5177 	}
5178 }
5179 
5180 static void
5181 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
5182 {
5183 	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5184 		      state_name,
5185 		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
5186 }
5187 
5188 static void
5189 intel_pps_verify_state(struct drm_i915_private *dev_priv,
5190 		       struct intel_dp *intel_dp)
5191 {
5192 	struct edp_power_seq hw;
5193 	struct edp_power_seq *sw = &intel_dp->pps_delays;
5194 
5195 	intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
5196 
5197 	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
5198 	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
5199 		DRM_ERROR("PPS state mismatch\n");
5200 		intel_pps_dump_state("sw", sw);
5201 		intel_pps_dump_state("hw", &hw);
5202 	}
5203 }
5204 
5205 static void
5206 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5207 				    struct intel_dp *intel_dp)
5208 {
5209 	struct drm_i915_private *dev_priv = to_i915(dev);
5210 	struct edp_power_seq cur, vbt, spec,
5211 		*final = &intel_dp->pps_delays;
5212 
5213 	lockdep_assert_held(&dev_priv->pps_mutex);
5214 
5215 	/* already initialized? */
5216 	if (final->t11_t12 != 0)
5217 		return;
5218 
5219 	intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
5220 
5221 	intel_pps_dump_state("cur", &cur);
5222 
5223 	vbt = dev_priv->vbt.edp.pps;
5224 
5225 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5226 	 * our hw here, which are all in 100usec. */
5227 	spec.t1_t3 = 210 * 10;
5228 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5229 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5230 	spec.t10 = 500 * 10;
5231 	/* This one is special and actually in units of 100ms, but zero
5232 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5233 	 * table multiplies it with 1000 to make it in units of 100usec,
5234 	 * too. */
5235 	spec.t11_t12 = (510 + 100) * 10;
5236 
5237 	intel_pps_dump_state("vbt", &vbt);
5238 
5239 	/* Use the max of the register settings and vbt. If both are
5240 	 * unset, fall back to the spec limits. */
5241 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5242 				       spec.field : \
5243 				       max(cur.field, vbt.field))
5244 	assign_final(t1_t3);
5245 	assign_final(t8);
5246 	assign_final(t9);
5247 	assign_final(t10);
5248 	assign_final(t11_t12);
5249 #undef assign_final
5250 
5251 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5252 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5253 	intel_dp->backlight_on_delay = get_delay(t8);
5254 	intel_dp->backlight_off_delay = get_delay(t9);
5255 	intel_dp->panel_power_down_delay = get_delay(t10);
5256 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5257 #undef get_delay
5258 
5259 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5260 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5261 		      intel_dp->panel_power_cycle_delay);
5262 
5263 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5264 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5265 
5266 	/*
5267 	 * We override the HW backlight delays to 1 because we do manual waits
5268 	 * on them. For T8, even BSpec recommends doing it. For T9, if we
5269 	 * don't do this, we'll end up waiting for the backlight off delay
5270 	 * twice: once when we do the manual sleep, and once when we disable
5271 	 * the panel and wait for the PP_STATUS bit to become zero.
5272 	 */
5273 	final->t8 = 1;
5274 	final->t9 = 1;
5275 }
5276 
5277 static void
5278 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5279 					      struct intel_dp *intel_dp,
5280 					      bool force_disable_vdd)
5281 {
5282 	struct drm_i915_private *dev_priv = to_i915(dev);
5283 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5284 	int div = dev_priv->rawclk_freq / 1000;
5285 	struct pps_registers regs;
5286 	enum port port = dp_to_dig_port(intel_dp)->port;
5287 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5288 
5289 	lockdep_assert_held(&dev_priv->pps_mutex);
5290 
5291 	intel_pps_get_registers(dev_priv, intel_dp, &regs);
5292 
5293 	/*
5294 	 * On some VLV machines the BIOS can leave the VDD
5295 	 * enabled even on power seqeuencers which aren't
5296 	 * hooked up to any port. This would mess up the
5297 	 * power domain tracking the first time we pick
5298 	 * one of these power sequencers for use since
5299 	 * edp_panel_vdd_on() would notice that the VDD was
5300 	 * already on and therefore wouldn't grab the power
5301 	 * domain reference. Disable VDD first to avoid this.
5302 	 * This also avoids spuriously turning the VDD on as
5303 	 * soon as the new power seqeuencer gets initialized.
5304 	 */
5305 	if (force_disable_vdd) {
5306 		u32 pp = ironlake_get_pp_control(intel_dp);
5307 
5308 		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5309 
5310 		if (pp & EDP_FORCE_VDD)
5311 			DRM_DEBUG_KMS("VDD already on, disabling first\n");
5312 
5313 		pp &= ~EDP_FORCE_VDD;
5314 
5315 		I915_WRITE(regs.pp_ctrl, pp);
5316 	}
5317 
5318 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5319 		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5320 	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5321 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5322 	/* Compute the divisor for the pp clock, simply match the Bspec
5323 	 * formula. */
5324 	if (IS_GEN9_LP(dev_priv)) {
5325 		pp_div = I915_READ(regs.pp_ctrl);
5326 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5327 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5328 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5329 	} else {
5330 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5331 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5332 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5333 	}
5334 
5335 	/* Haswell doesn't have any port selection bits for the panel
5336 	 * power sequencer any more. */
5337 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5338 		port_sel = PANEL_PORT_SELECT_VLV(port);
5339 	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5340 		if (port == PORT_A)
5341 			port_sel = PANEL_PORT_SELECT_DPA;
5342 		else
5343 			port_sel = PANEL_PORT_SELECT_DPD;
5344 	}
5345 
5346 	pp_on |= port_sel;
5347 
5348 	I915_WRITE(regs.pp_on, pp_on);
5349 	I915_WRITE(regs.pp_off, pp_off);
5350 	if (IS_GEN9_LP(dev_priv))
5351 		I915_WRITE(regs.pp_ctrl, pp_div);
5352 	else
5353 		I915_WRITE(regs.pp_div, pp_div);
5354 
5355 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5356 		      I915_READ(regs.pp_on),
5357 		      I915_READ(regs.pp_off),
5358 		      IS_GEN9_LP(dev_priv) ?
5359 		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5360 		      I915_READ(regs.pp_div));
5361 }
5362 
5363 static void intel_dp_pps_init(struct drm_device *dev,
5364 			      struct intel_dp *intel_dp)
5365 {
5366 	struct drm_i915_private *dev_priv = to_i915(dev);
5367 
5368 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5369 		vlv_initial_power_sequencer_setup(intel_dp);
5370 	} else {
5371 		intel_dp_init_panel_power_sequencer(dev, intel_dp);
5372 		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5373 	}
5374 }
5375 
5376 /**
5377  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5378  * @dev_priv: i915 device
5379  * @crtc_state: a pointer to the active intel_crtc_state
5380  * @refresh_rate: RR to be programmed
5381  *
5382  * This function gets called when refresh rate (RR) has to be changed from
5383  * one frequency to another. Switches can be between high and low RR
5384  * supported by the panel or to any other RR based on media playback (in
5385  * this case, RR value needs to be passed from user space).
5386  *
5387  * The caller of this function needs to take a lock on dev_priv->drrs.
5388  */
5389 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5390 				    struct intel_crtc_state *crtc_state,
5391 				    int refresh_rate)
5392 {
5393 	struct intel_encoder *encoder;
5394 	struct intel_digital_port *dig_port = NULL;
5395 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5396 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5397 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5398 
5399 	if (refresh_rate <= 0) {
5400 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5401 		return;
5402 	}
5403 
5404 	if (intel_dp == NULL) {
5405 		DRM_DEBUG_KMS("DRRS not supported.\n");
5406 		return;
5407 	}
5408 
5409 	/*
5410 	 * FIXME: This needs proper synchronization with psr state for some
5411 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5412 	 */
5413 
5414 	dig_port = dp_to_dig_port(intel_dp);
5415 	encoder = &dig_port->base;
5416 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5417 
5418 	if (!intel_crtc) {
5419 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5420 		return;
5421 	}
5422 
5423 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5424 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5425 		return;
5426 	}
5427 
5428 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5429 			refresh_rate)
5430 		index = DRRS_LOW_RR;
5431 
5432 	if (index == dev_priv->drrs.refresh_rate_type) {
5433 		DRM_DEBUG_KMS(
5434 			"DRRS requested for previously set RR...ignoring\n");
5435 		return;
5436 	}
5437 
5438 	if (!crtc_state->base.active) {
5439 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5440 		return;
5441 	}
5442 
5443 	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5444 		switch (index) {
5445 		case DRRS_HIGH_RR:
5446 			intel_dp_set_m_n(intel_crtc, M1_N1);
5447 			break;
5448 		case DRRS_LOW_RR:
5449 			intel_dp_set_m_n(intel_crtc, M2_N2);
5450 			break;
5451 		case DRRS_MAX_RR:
5452 		default:
5453 			DRM_ERROR("Unsupported refreshrate type\n");
5454 		}
5455 	} else if (INTEL_GEN(dev_priv) > 6) {
5456 		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5457 		u32 val;
5458 
5459 		val = I915_READ(reg);
5460 		if (index > DRRS_HIGH_RR) {
5461 			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5462 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5463 			else
5464 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5465 		} else {
5466 			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5467 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5468 			else
5469 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5470 		}
5471 		I915_WRITE(reg, val);
5472 	}
5473 
5474 	dev_priv->drrs.refresh_rate_type = index;
5475 
5476 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5477 }
5478 
5479 /**
5480  * intel_edp_drrs_enable - init drrs struct if supported
5481  * @intel_dp: DP struct
5482  * @crtc_state: A pointer to the active crtc state.
5483  *
5484  * Initializes frontbuffer_bits and drrs.dp
5485  */
5486 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5487 			   struct intel_crtc_state *crtc_state)
5488 {
5489 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5490 	struct drm_i915_private *dev_priv = to_i915(dev);
5491 
5492 	if (!crtc_state->has_drrs) {
5493 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5494 		return;
5495 	}
5496 
5497 	mutex_lock(&dev_priv->drrs.mutex);
5498 	if (WARN_ON(dev_priv->drrs.dp)) {
5499 		DRM_ERROR("DRRS already enabled\n");
5500 		goto unlock;
5501 	}
5502 
5503 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5504 
5505 	dev_priv->drrs.dp = intel_dp;
5506 
5507 unlock:
5508 	mutex_unlock(&dev_priv->drrs.mutex);
5509 }
5510 
5511 /**
5512  * intel_edp_drrs_disable - Disable DRRS
5513  * @intel_dp: DP struct
5514  * @old_crtc_state: Pointer to old crtc_state.
5515  *
5516  */
5517 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5518 			    struct intel_crtc_state *old_crtc_state)
5519 {
5520 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5521 	struct drm_i915_private *dev_priv = to_i915(dev);
5522 
5523 	if (!old_crtc_state->has_drrs)
5524 		return;
5525 
5526 	mutex_lock(&dev_priv->drrs.mutex);
5527 	if (!dev_priv->drrs.dp) {
5528 		mutex_unlock(&dev_priv->drrs.mutex);
5529 		return;
5530 	}
5531 
5532 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5533 		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5534 			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5535 
5536 	dev_priv->drrs.dp = NULL;
5537 	mutex_unlock(&dev_priv->drrs.mutex);
5538 
5539 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5540 }
5541 
5542 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5543 {
5544 	struct drm_i915_private *dev_priv =
5545 		container_of(work, typeof(*dev_priv), drrs.work.work);
5546 	struct intel_dp *intel_dp;
5547 
5548 	mutex_lock(&dev_priv->drrs.mutex);
5549 
5550 	intel_dp = dev_priv->drrs.dp;
5551 
5552 	if (!intel_dp)
5553 		goto unlock;
5554 
5555 	/*
5556 	 * The delayed work can race with an invalidate hence we need to
5557 	 * recheck.
5558 	 */
5559 
5560 	if (dev_priv->drrs.busy_frontbuffer_bits)
5561 		goto unlock;
5562 
5563 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5564 		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5565 
5566 		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5567 			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5568 	}
5569 
5570 unlock:
5571 	mutex_unlock(&dev_priv->drrs.mutex);
5572 }
5573 
5574 /**
5575  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5576  * @dev_priv: i915 device
5577  * @frontbuffer_bits: frontbuffer plane tracking bits
5578  *
5579  * This function gets called everytime rendering on the given planes start.
5580  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5581  *
5582  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5583  */
5584 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5585 			       unsigned int frontbuffer_bits)
5586 {
5587 	struct drm_crtc *crtc;
5588 	enum i915_pipe pipe;
5589 
5590 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5591 		return;
5592 
5593 	cancel_delayed_work(&dev_priv->drrs.work);
5594 
5595 	mutex_lock(&dev_priv->drrs.mutex);
5596 	if (!dev_priv->drrs.dp) {
5597 		mutex_unlock(&dev_priv->drrs.mutex);
5598 		return;
5599 	}
5600 
5601 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5602 	pipe = to_intel_crtc(crtc)->pipe;
5603 
5604 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5605 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5606 
5607 	/* invalidate means busy screen hence upclock */
5608 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5609 		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5610 			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5611 
5612 	mutex_unlock(&dev_priv->drrs.mutex);
5613 }
5614 
5615 /**
5616  * intel_edp_drrs_flush - Restart Idleness DRRS
5617  * @dev_priv: i915 device
5618  * @frontbuffer_bits: frontbuffer plane tracking bits
5619  *
5620  * This function gets called every time rendering on the given planes has
5621  * completed or flip on a crtc is completed. So DRRS should be upclocked
5622  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5623  * if no other planes are dirty.
5624  *
5625  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5626  */
5627 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5628 			  unsigned int frontbuffer_bits)
5629 {
5630 	struct drm_crtc *crtc;
5631 	enum i915_pipe pipe;
5632 
5633 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5634 		return;
5635 
5636 	cancel_delayed_work(&dev_priv->drrs.work);
5637 
5638 	mutex_lock(&dev_priv->drrs.mutex);
5639 	if (!dev_priv->drrs.dp) {
5640 		mutex_unlock(&dev_priv->drrs.mutex);
5641 		return;
5642 	}
5643 
5644 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5645 	pipe = to_intel_crtc(crtc)->pipe;
5646 
5647 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5648 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5649 
5650 	/* flush means busy screen hence upclock */
5651 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5652 		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5653 				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5654 
5655 	/*
5656 	 * flush also means no more activity hence schedule downclock, if all
5657 	 * other fbs are quiescent too
5658 	 */
5659 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5660 		schedule_delayed_work(&dev_priv->drrs.work,
5661 				msecs_to_jiffies(1000));
5662 	mutex_unlock(&dev_priv->drrs.mutex);
5663 }
5664 
5665 /**
5666  * DOC: Display Refresh Rate Switching (DRRS)
5667  *
5668  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5669  * which enables swtching between low and high refresh rates,
5670  * dynamically, based on the usage scenario. This feature is applicable
5671  * for internal panels.
5672  *
5673  * Indication that the panel supports DRRS is given by the panel EDID, which
5674  * would list multiple refresh rates for one resolution.
5675  *
5676  * DRRS is of 2 types - static and seamless.
5677  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5678  * (may appear as a blink on screen) and is used in dock-undock scenario.
5679  * Seamless DRRS involves changing RR without any visual effect to the user
5680  * and can be used during normal system usage. This is done by programming
5681  * certain registers.
5682  *
5683  * Support for static/seamless DRRS may be indicated in the VBT based on
5684  * inputs from the panel spec.
5685  *
5686  * DRRS saves power by switching to low RR based on usage scenarios.
5687  *
5688  * The implementation is based on frontbuffer tracking implementation.  When
5689  * there is a disturbance on the screen triggered by user activity or a periodic
5690  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5691  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5692  * made.
5693  *
5694  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5695  * and intel_edp_drrs_flush() are called.
5696  *
5697  * DRRS can be further extended to support other internal panels and also
5698  * the scenario of video playback wherein RR is set based on the rate
5699  * requested by userspace.
5700  */
5701 
5702 /**
5703  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5704  * @intel_connector: eDP connector
5705  * @fixed_mode: preferred mode of panel
5706  *
5707  * This function is  called only once at driver load to initialize basic
5708  * DRRS stuff.
5709  *
5710  * Returns:
5711  * Downclock mode if panel supports it, else return NULL.
5712  * DRRS support is determined by the presence of downclock mode (apart
5713  * from VBT setting).
5714  */
5715 static struct drm_display_mode *
5716 intel_dp_drrs_init(struct intel_connector *intel_connector,
5717 		struct drm_display_mode *fixed_mode)
5718 {
5719 	struct drm_connector *connector = &intel_connector->base;
5720 	struct drm_device *dev = connector->dev;
5721 	struct drm_i915_private *dev_priv = to_i915(dev);
5722 	struct drm_display_mode *downclock_mode = NULL;
5723 
5724 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5725 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5726 
5727 	if (INTEL_GEN(dev_priv) <= 6) {
5728 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5729 		return NULL;
5730 	}
5731 
5732 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5733 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5734 		return NULL;
5735 	}
5736 
5737 	downclock_mode = intel_find_panel_downclock
5738 					(dev_priv, fixed_mode, connector);
5739 
5740 	if (!downclock_mode) {
5741 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5742 		return NULL;
5743 	}
5744 
5745 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5746 
5747 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5748 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5749 	return downclock_mode;
5750 }
5751 
5752 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5753 				     struct intel_connector *intel_connector)
5754 {
5755 	struct drm_connector *connector = &intel_connector->base;
5756 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5757 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5758 	struct drm_device *dev = intel_encoder->base.dev;
5759 	struct drm_i915_private *dev_priv = to_i915(dev);
5760 	struct drm_display_mode *fixed_mode = NULL;
5761 	struct drm_display_mode *downclock_mode = NULL;
5762 	bool has_dpcd;
5763 	struct drm_display_mode *scan;
5764 	struct edid *edid;
5765 	enum i915_pipe pipe = INVALID_PIPE;
5766 
5767 	if (!is_edp(intel_dp))
5768 		return true;
5769 
5770 	/*
5771 	 * On IBX/CPT we may get here with LVDS already registered. Since the
5772 	 * driver uses the only internal power sequencer available for both
5773 	 * eDP and LVDS bail out early in this case to prevent interfering
5774 	 * with an already powered-on LVDS power sequencer.
5775 	 */
5776 	if (intel_get_lvds_encoder(dev)) {
5777 		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5778 		DRM_INFO("LVDS was detected, not registering eDP\n");
5779 
5780 		return false;
5781 	}
5782 
5783 	pps_lock(intel_dp);
5784 
5785 	intel_dp_init_panel_power_timestamps(intel_dp);
5786 	intel_dp_pps_init(dev, intel_dp);
5787 	intel_edp_panel_vdd_sanitize(intel_dp);
5788 
5789 	pps_unlock(intel_dp);
5790 
5791 	/* Cache DPCD and EDID for edp. */
5792 	has_dpcd = intel_edp_init_dpcd(intel_dp);
5793 
5794 	if (!has_dpcd) {
5795 		/* if this fails, presume the device is a ghost */
5796 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5797 		goto out_vdd_off;
5798 	}
5799 
5800 	mutex_lock(&dev->mode_config.mutex);
5801 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5802 	if (edid) {
5803 		if (drm_add_edid_modes(connector, edid)) {
5804 			drm_mode_connector_update_edid_property(connector,
5805 								edid);
5806 			drm_edid_to_eld(connector, edid);
5807 		} else {
5808 			kfree(edid);
5809 			edid = ERR_PTR(-EINVAL);
5810 		}
5811 	} else {
5812 		edid = ERR_PTR(-ENOENT);
5813 	}
5814 	intel_connector->edid = edid;
5815 
5816 	/* prefer fixed mode from EDID if available */
5817 	list_for_each_entry(scan, &connector->probed_modes, head) {
5818 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5819 			fixed_mode = drm_mode_duplicate(dev, scan);
5820 			downclock_mode = intel_dp_drrs_init(
5821 						intel_connector, fixed_mode);
5822 			break;
5823 		}
5824 	}
5825 
5826 	/* fallback to VBT if available for eDP */
5827 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5828 		fixed_mode = drm_mode_duplicate(dev,
5829 					dev_priv->vbt.lfp_lvds_vbt_mode);
5830 		if (fixed_mode) {
5831 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5832 			connector->display_info.width_mm = fixed_mode->width_mm;
5833 			connector->display_info.height_mm = fixed_mode->height_mm;
5834 		}
5835 	}
5836 	mutex_unlock(&dev->mode_config.mutex);
5837 
5838 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5839 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5840 		register_reboot_notifier(&intel_dp->edp_notifier);
5841 
5842 		/*
5843 		 * Figure out the current pipe for the initial backlight setup.
5844 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5845 		 * fails just assume pipe A.
5846 		 */
5847 		pipe = vlv_active_pipe(intel_dp);
5848 
5849 		if (pipe != PIPE_A && pipe != PIPE_B)
5850 			pipe = intel_dp->pps_pipe;
5851 
5852 		if (pipe != PIPE_A && pipe != PIPE_B)
5853 			pipe = PIPE_A;
5854 
5855 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5856 			      pipe_name(pipe));
5857 	}
5858 
5859 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5860 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5861 	intel_panel_setup_backlight(connector, pipe);
5862 
5863 	return true;
5864 
5865 out_vdd_off:
5866 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5867 	/*
5868 	 * vdd might still be enabled do to the delayed vdd off.
5869 	 * Make sure vdd is actually turned off here.
5870 	 */
5871 	pps_lock(intel_dp);
5872 	edp_panel_vdd_off_sync(intel_dp);
5873 	pps_unlock(intel_dp);
5874 
5875 	return false;
5876 }
5877 
5878 /* Set up the hotplug pin and aux power domain. */
5879 static void
5880 intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
5881 {
5882 	struct intel_encoder *encoder = &intel_dig_port->base;
5883 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5884 
5885 	switch (intel_dig_port->port) {
5886 	case PORT_A:
5887 		encoder->hpd_pin = HPD_PORT_A;
5888 		intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
5889 		break;
5890 	case PORT_B:
5891 		encoder->hpd_pin = HPD_PORT_B;
5892 		intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
5893 		break;
5894 	case PORT_C:
5895 		encoder->hpd_pin = HPD_PORT_C;
5896 		intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
5897 		break;
5898 	case PORT_D:
5899 		encoder->hpd_pin = HPD_PORT_D;
5900 		intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
5901 		break;
5902 	case PORT_E:
5903 		encoder->hpd_pin = HPD_PORT_E;
5904 
5905 		/* FIXME: Check VBT for actual wiring of PORT E */
5906 		intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
5907 		break;
5908 	default:
5909 		MISSING_CASE(intel_dig_port->port);
5910 	}
5911 }
5912 
5913 bool
5914 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5915 			struct intel_connector *intel_connector)
5916 {
5917 	struct drm_connector *connector = &intel_connector->base;
5918 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5919 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5920 	struct drm_device *dev = intel_encoder->base.dev;
5921 	struct drm_i915_private *dev_priv = to_i915(dev);
5922 	enum port port = intel_dig_port->port;
5923 	int type;
5924 
5925 	if (WARN(intel_dig_port->max_lanes < 1,
5926 		 "Not enough lanes (%d) for DP on port %c\n",
5927 		 intel_dig_port->max_lanes, port_name(port)))
5928 		return false;
5929 
5930 	intel_dp->reset_link_params = true;
5931 	intel_dp->pps_pipe = INVALID_PIPE;
5932 	intel_dp->active_pipe = INVALID_PIPE;
5933 
5934 	/* intel_dp vfuncs */
5935 	if (INTEL_GEN(dev_priv) >= 9)
5936 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5937 	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5938 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5939 	else if (HAS_PCH_SPLIT(dev_priv))
5940 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5941 	else
5942 		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5943 
5944 	if (INTEL_GEN(dev_priv) >= 9)
5945 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5946 	else
5947 		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5948 
5949 	if (HAS_DDI(dev_priv))
5950 		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5951 
5952 	/* Preserve the current hw state. */
5953 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5954 	intel_dp->attached_connector = intel_connector;
5955 
5956 	if (intel_dp_is_edp(dev_priv, port))
5957 		type = DRM_MODE_CONNECTOR_eDP;
5958 	else
5959 		type = DRM_MODE_CONNECTOR_DisplayPort;
5960 
5961 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5962 		intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5963 
5964 	/*
5965 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5966 	 * for DP the encoder type can be set by the caller to
5967 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5968 	 */
5969 	if (type == DRM_MODE_CONNECTOR_eDP)
5970 		intel_encoder->type = INTEL_OUTPUT_EDP;
5971 
5972 	/* eDP only on port B and/or C on vlv/chv */
5973 	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5974 		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5975 		return false;
5976 
5977 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5978 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5979 			port_name(port));
5980 
5981 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5982 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5983 
5984 	connector->interlace_allowed = true;
5985 	connector->doublescan_allowed = 0;
5986 
5987 	intel_dp_init_connector_port_info(intel_dig_port);
5988 
5989 	intel_dp_aux_init(intel_dp);
5990 
5991 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5992 			  edp_panel_vdd_work);
5993 
5994 	intel_connector_attach_encoder(intel_connector, intel_encoder);
5995 
5996 	if (HAS_DDI(dev_priv))
5997 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5998 	else
5999 		intel_connector->get_hw_state = intel_connector_get_hw_state;
6000 
6001 	/* init MST on ports that can support it */
6002 	if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
6003 	    (port == PORT_B || port == PORT_C || port == PORT_D))
6004 		intel_dp_mst_encoder_init(intel_dig_port,
6005 					  intel_connector->base.base.id);
6006 
6007 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6008 		intel_dp_aux_fini(intel_dp);
6009 		intel_dp_mst_encoder_cleanup(intel_dig_port);
6010 		goto fail;
6011 	}
6012 
6013 	intel_dp_add_properties(intel_dp, connector);
6014 
6015 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6016 	 * 0xd.  Failure to do so will result in spurious interrupts being
6017 	 * generated on the port when a cable is not attached.
6018 	 */
6019 	if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
6020 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6021 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6022 	}
6023 
6024 	return true;
6025 
6026 fail:
6027 	drm_connector_cleanup(connector);
6028 
6029 	return false;
6030 }
6031 
6032 bool intel_dp_init(struct drm_i915_private *dev_priv,
6033 		   i915_reg_t output_reg,
6034 		   enum port port)
6035 {
6036 	struct intel_digital_port *intel_dig_port;
6037 	struct intel_encoder *intel_encoder;
6038 	struct drm_encoder *encoder;
6039 	struct intel_connector *intel_connector;
6040 
6041 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6042 	if (!intel_dig_port)
6043 		return false;
6044 
6045 	intel_connector = intel_connector_alloc();
6046 	if (!intel_connector)
6047 		goto err_connector_alloc;
6048 
6049 	intel_encoder = &intel_dig_port->base;
6050 	encoder = &intel_encoder->base;
6051 
6052 	if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6053 			     &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6054 			     "DP %c", port_name(port)))
6055 		goto err_encoder_init;
6056 
6057 	intel_encoder->compute_config = intel_dp_compute_config;
6058 	intel_encoder->disable = intel_disable_dp;
6059 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6060 	intel_encoder->get_config = intel_dp_get_config;
6061 	intel_encoder->suspend = intel_dp_encoder_suspend;
6062 	if (IS_CHERRYVIEW(dev_priv)) {
6063 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6064 		intel_encoder->pre_enable = chv_pre_enable_dp;
6065 		intel_encoder->enable = vlv_enable_dp;
6066 		intel_encoder->post_disable = chv_post_disable_dp;
6067 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6068 	} else if (IS_VALLEYVIEW(dev_priv)) {
6069 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6070 		intel_encoder->pre_enable = vlv_pre_enable_dp;
6071 		intel_encoder->enable = vlv_enable_dp;
6072 		intel_encoder->post_disable = vlv_post_disable_dp;
6073 	} else {
6074 		intel_encoder->pre_enable = g4x_pre_enable_dp;
6075 		intel_encoder->enable = g4x_enable_dp;
6076 		if (INTEL_GEN(dev_priv) >= 5)
6077 			intel_encoder->post_disable = ilk_post_disable_dp;
6078 	}
6079 
6080 	intel_dig_port->port = port;
6081 	intel_dig_port->dp.output_reg = output_reg;
6082 	intel_dig_port->max_lanes = 4;
6083 
6084 	intel_encoder->type = INTEL_OUTPUT_DP;
6085 	intel_encoder->power_domain = intel_port_to_power_domain(port);
6086 	if (IS_CHERRYVIEW(dev_priv)) {
6087 		if (port == PORT_D)
6088 			intel_encoder->crtc_mask = 1 << 2;
6089 		else
6090 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6091 	} else {
6092 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6093 	}
6094 	intel_encoder->cloneable = 0;
6095 	intel_encoder->port = port;
6096 
6097 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6098 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6099 
6100 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6101 		goto err_init_connector;
6102 
6103 	return true;
6104 
6105 err_init_connector:
6106 	drm_encoder_cleanup(encoder);
6107 err_encoder_init:
6108 	kfree(intel_connector);
6109 err_connector_alloc:
6110 	kfree(intel_dig_port);
6111 	return false;
6112 }
6113 
6114 void intel_dp_mst_suspend(struct drm_device *dev)
6115 {
6116 	struct drm_i915_private *dev_priv = to_i915(dev);
6117 	int i;
6118 
6119 	/* disable MST */
6120 	for (i = 0; i < I915_MAX_PORTS; i++) {
6121 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6122 
6123 		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6124 			continue;
6125 
6126 		if (intel_dig_port->dp.is_mst)
6127 			drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6128 	}
6129 }
6130 
6131 void intel_dp_mst_resume(struct drm_device *dev)
6132 {
6133 	struct drm_i915_private *dev_priv = to_i915(dev);
6134 	int i;
6135 
6136 	for (i = 0; i < I915_MAX_PORTS; i++) {
6137 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6138 		int ret;
6139 
6140 		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6141 			continue;
6142 
6143 		ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6144 		if (ret)
6145 			intel_dp_check_mst_status(&intel_dig_port->dp);
6146 	}
6147 }
6148