1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/err.h>
32 #include <linux/module.h>
33 #include <linux/printk.h>
34 #include <linux/pm_qos.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_crtc.h>
37 #include <drm/drm_crtc_helper.h>
38 #include <drm/drm_edid.h>
39 #include "intel_drv.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42
43 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44
45 struct dp_link_dpll {
46 int link_bw;
47 struct dpll dpll;
48 };
49
50 static const struct dp_link_dpll gen4_dpll[] = {
51 { DP_LINK_BW_1_62,
52 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
53 { DP_LINK_BW_2_7,
54 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
55 };
56
57 static const struct dp_link_dpll pch_dpll[] = {
58 { DP_LINK_BW_1_62,
59 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
60 { DP_LINK_BW_2_7,
61 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
62 };
63
64 static const struct dp_link_dpll vlv_dpll[] = {
65 { DP_LINK_BW_1_62,
66 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
67 { DP_LINK_BW_2_7,
68 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
69 };
70
71 /**
72 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
73 * @intel_dp: DP struct
74 *
75 * If a CPU or PCH DP output is attached to an eDP panel, this function
76 * will return true, and false otherwise.
77 */
is_edp(struct intel_dp * intel_dp)78 static bool is_edp(struct intel_dp *intel_dp)
79 {
80 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
81
82 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
83 }
84
intel_dp_to_dev(struct intel_dp * intel_dp)85 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
86 {
87 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
88
89 return intel_dig_port->base.base.dev;
90 }
91
intel_attached_dp(struct drm_connector * connector)92 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
93 {
94 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
95 }
96
97 static void intel_dp_link_down(struct intel_dp *intel_dp);
98 static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
99 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
100
101 static int
intel_dp_max_link_bw(struct intel_dp * intel_dp)102 intel_dp_max_link_bw(struct intel_dp *intel_dp)
103 {
104 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
105 struct drm_device *dev = intel_dp->attached_connector->base.dev;
106
107 switch (max_link_bw) {
108 case DP_LINK_BW_1_62:
109 case DP_LINK_BW_2_7:
110 break;
111 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
112 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
113 INTEL_INFO(dev)->gen >= 8) &&
114 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
115 max_link_bw = DP_LINK_BW_5_4;
116 else
117 max_link_bw = DP_LINK_BW_2_7;
118 break;
119 default:
120 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
121 max_link_bw);
122 max_link_bw = DP_LINK_BW_1_62;
123 break;
124 }
125 return max_link_bw;
126 }
127
intel_dp_max_lane_count(struct intel_dp * intel_dp)128 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
129 {
130 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
131 struct drm_device *dev = intel_dig_port->base.base.dev;
132 u8 source_max, sink_max;
133
134 source_max = 4;
135 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
136 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
137 source_max = 2;
138
139 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
140
141 return min(source_max, sink_max);
142 }
143
144 /*
145 * The units on the numbers in the next two are... bizarre. Examples will
146 * make it clearer; this one parallels an example in the eDP spec.
147 *
148 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
149 *
150 * 270000 * 1 * 8 / 10 == 216000
151 *
152 * The actual data capacity of that configuration is 2.16Gbit/s, so the
153 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
154 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
155 * 119000. At 18bpp that's 2142000 kilobits per second.
156 *
157 * Thus the strange-looking division by 10 in intel_dp_link_required, to
158 * get the result in decakilobits instead of kilobits.
159 */
160
161 static int
intel_dp_link_required(int pixel_clock,int bpp)162 intel_dp_link_required(int pixel_clock, int bpp)
163 {
164 return (pixel_clock * bpp + 9) / 10;
165 }
166
167 static int
intel_dp_max_data_rate(int max_link_clock,int max_lanes)168 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
169 {
170 return (max_link_clock * max_lanes * 8) / 10;
171 }
172
173 static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)174 intel_dp_mode_valid(struct drm_connector *connector,
175 struct drm_display_mode *mode)
176 {
177 struct intel_dp *intel_dp = intel_attached_dp(connector);
178 struct intel_connector *intel_connector = to_intel_connector(connector);
179 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
180 int target_clock = mode->clock;
181 int max_rate, mode_rate, max_lanes, max_link_clock;
182
183 if (is_edp(intel_dp) && fixed_mode) {
184 if (mode->hdisplay > fixed_mode->hdisplay)
185 return MODE_PANEL;
186
187 if (mode->vdisplay > fixed_mode->vdisplay)
188 return MODE_PANEL;
189
190 target_clock = fixed_mode->clock;
191 }
192
193 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
194 max_lanes = intel_dp_max_lane_count(intel_dp);
195
196 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
197 mode_rate = intel_dp_link_required(target_clock, 18);
198
199 if (mode_rate > max_rate)
200 return MODE_CLOCK_HIGH;
201
202 if (mode->clock < 10000)
203 return MODE_CLOCK_LOW;
204
205 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
206 return MODE_H_ILLEGAL;
207
208 return MODE_OK;
209 }
210
211 static uint32_t
pack_aux(uint8_t * src,int src_bytes)212 pack_aux(uint8_t *src, int src_bytes)
213 {
214 int i;
215 uint32_t v = 0;
216
217 if (src_bytes > 4)
218 src_bytes = 4;
219 for (i = 0; i < src_bytes; i++)
220 v |= ((uint32_t) src[i]) << ((3-i) * 8);
221 return v;
222 }
223
224 static void
unpack_aux(uint32_t src,uint8_t * dst,int dst_bytes)225 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
226 {
227 int i;
228 if (dst_bytes > 4)
229 dst_bytes = 4;
230 for (i = 0; i < dst_bytes; i++)
231 dst[i] = src >> ((3-i) * 8);
232 }
233
234 /* hrawclock is 1/4 the FSB frequency */
235 static int
intel_hrawclk(struct drm_device * dev)236 intel_hrawclk(struct drm_device *dev)
237 {
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 uint32_t clkcfg;
240
241 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
242 if (IS_VALLEYVIEW(dev))
243 return 200;
244
245 clkcfg = I915_READ(CLKCFG);
246 switch (clkcfg & CLKCFG_FSB_MASK) {
247 case CLKCFG_FSB_400:
248 return 100;
249 case CLKCFG_FSB_533:
250 return 133;
251 case CLKCFG_FSB_667:
252 return 166;
253 case CLKCFG_FSB_800:
254 return 200;
255 case CLKCFG_FSB_1067:
256 return 266;
257 case CLKCFG_FSB_1333:
258 return 333;
259 /* these two are just a guess; one of them might be right */
260 case CLKCFG_FSB_1600:
261 case CLKCFG_FSB_1600_ALT:
262 return 400;
263 default:
264 return 133;
265 }
266 }
267
268 static void
269 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
270 struct intel_dp *intel_dp,
271 struct edp_power_seq *out);
272 static void
273 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
274 struct intel_dp *intel_dp,
275 struct edp_power_seq *out);
276
277 static enum i915_pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)278 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
279 {
280 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
281 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
282 struct drm_device *dev = intel_dig_port->base.base.dev;
283 struct drm_i915_private *dev_priv = dev->dev_private;
284 enum port port = intel_dig_port->port;
285 enum i915_pipe pipe;
286
287 /* modeset should have pipe */
288 if (crtc)
289 return to_intel_crtc(crtc)->pipe;
290
291 /* init time, try to find a pipe with this port selected */
292 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
293 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
294 PANEL_PORT_SELECT_MASK;
295 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
296 return pipe;
297 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
298 return pipe;
299 }
300
301 /* shrug */
302 return PIPE_A;
303 }
304
_pp_ctrl_reg(struct intel_dp * intel_dp)305 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
306 {
307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
308
309 if (HAS_PCH_SPLIT(dev))
310 return PCH_PP_CONTROL;
311 else
312 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
313 }
314
_pp_stat_reg(struct intel_dp * intel_dp)315 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
316 {
317 struct drm_device *dev = intel_dp_to_dev(intel_dp);
318
319 if (HAS_PCH_SPLIT(dev))
320 return PCH_PP_STATUS;
321 else
322 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
323 }
324
edp_have_panel_power(struct intel_dp * intel_dp)325 static bool edp_have_panel_power(struct intel_dp *intel_dp)
326 {
327 struct drm_device *dev = intel_dp_to_dev(intel_dp);
328 struct drm_i915_private *dev_priv = dev->dev_private;
329
330 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
331 }
332
edp_have_panel_vdd(struct intel_dp * intel_dp)333 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
334 {
335 struct drm_device *dev = intel_dp_to_dev(intel_dp);
336 struct drm_i915_private *dev_priv = dev->dev_private;
337
338 return !dev_priv->pm.suspended &&
339 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
340 }
341
342 static void
intel_dp_check_edp(struct intel_dp * intel_dp)343 intel_dp_check_edp(struct intel_dp *intel_dp)
344 {
345 struct drm_device *dev = intel_dp_to_dev(intel_dp);
346 struct drm_i915_private *dev_priv = dev->dev_private;
347
348 if (!is_edp(intel_dp))
349 return;
350
351 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
352 WARN(1, "eDP powered off while attempting aux channel communication.\n");
353 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
354 I915_READ(_pp_stat_reg(intel_dp)),
355 I915_READ(_pp_ctrl_reg(intel_dp)));
356 }
357 }
358
359 static uint32_t
intel_dp_aux_wait_done(struct intel_dp * intel_dp,bool has_aux_irq)360 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
361 {
362 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
363 struct drm_device *dev = intel_dig_port->base.base.dev;
364 struct drm_i915_private *dev_priv = dev->dev_private;
365 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
366 uint32_t status;
367 bool done;
368
369 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
370 #ifdef __NetBSD__
371 if (has_aux_irq && !cold) {
372 int ret;
373 spin_lock(&dev_priv->gmbus_wait_lock);
374 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret,
375 &dev_priv->gmbus_wait_queue, &dev_priv->gmbus_wait_lock,
376 msecs_to_jiffies_timeout(10),
377 C);
378 if (ret < 0) /* Failure: pretend same as done. */
379 done = true;
380 else if (ret == 0) /* Timed out: not done. */
381 done = false;
382 else /* Succeeded (ret > 0): done. */
383 done = true;
384 spin_unlock(&dev_priv->gmbus_wait_lock);
385 } else {
386 done = wait_for_atomic(C, 10) == 0;
387 }
388 #else
389 if (has_aux_irq)
390 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
391 msecs_to_jiffies_timeout(10));
392 else
393 done = wait_for_atomic(C, 10) == 0;
394 #endif
395 if (!done)
396 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
397 has_aux_irq);
398 #undef C
399
400 return status;
401 }
402
i9xx_get_aux_clock_divider(struct intel_dp * intel_dp,int index)403 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
404 {
405 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
406 struct drm_device *dev = intel_dig_port->base.base.dev;
407
408 /*
409 * The clock divider is based off the hrawclk, and would like to run at
410 * 2MHz. So, take the hrawclk value and divide by 2 and use that
411 */
412 return index ? 0 : intel_hrawclk(dev) / 2;
413 }
414
ilk_get_aux_clock_divider(struct intel_dp * intel_dp,int index)415 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
416 {
417 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
418 struct drm_device *dev = intel_dig_port->base.base.dev;
419
420 if (index)
421 return 0;
422
423 if (intel_dig_port->port == PORT_A) {
424 if (IS_GEN6(dev) || IS_GEN7(dev))
425 return 200; /* SNB & IVB eDP input clock at 400Mhz */
426 else
427 return 225; /* eDP input clock at 450Mhz */
428 } else {
429 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
430 }
431 }
432
hsw_get_aux_clock_divider(struct intel_dp * intel_dp,int index)433 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
434 {
435 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
436 struct drm_device *dev = intel_dig_port->base.base.dev;
437 struct drm_i915_private *dev_priv = dev->dev_private;
438
439 if (intel_dig_port->port == PORT_A) {
440 if (index)
441 return 0;
442 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
443 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
444 /* Workaround for non-ULT HSW */
445 switch (index) {
446 case 0: return 63;
447 case 1: return 72;
448 default: return 0;
449 }
450 } else {
451 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
452 }
453 }
454
vlv_get_aux_clock_divider(struct intel_dp * intel_dp,int index)455 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
456 {
457 return index ? 0 : 100;
458 }
459
i9xx_get_aux_send_ctl(struct intel_dp * intel_dp,bool has_aux_irq,int send_bytes,uint32_t aux_clock_divider)460 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
461 bool has_aux_irq,
462 int send_bytes,
463 uint32_t aux_clock_divider)
464 {
465 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
466 struct drm_device *dev = intel_dig_port->base.base.dev;
467 uint32_t precharge, timeout;
468
469 if (IS_GEN6(dev))
470 precharge = 3;
471 else
472 precharge = 5;
473
474 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
475 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
476 else
477 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
478
479 return DP_AUX_CH_CTL_SEND_BUSY |
480 DP_AUX_CH_CTL_DONE |
481 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
482 DP_AUX_CH_CTL_TIME_OUT_ERROR |
483 timeout |
484 DP_AUX_CH_CTL_RECEIVE_ERROR |
485 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
486 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
487 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
488 }
489
490 static int
intel_dp_aux_ch(struct intel_dp * intel_dp,uint8_t * send,int send_bytes,uint8_t * recv,int recv_size)491 intel_dp_aux_ch(struct intel_dp *intel_dp,
492 uint8_t *send, int send_bytes,
493 uint8_t *recv, int recv_size)
494 {
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
499 uint32_t ch_data = ch_ctl + 4;
500 uint32_t aux_clock_divider;
501 int i, ret, recv_bytes;
502 uint32_t status;
503 int try, clock = 0;
504 bool has_aux_irq = HAS_AUX_IRQ(dev);
505 bool vdd;
506
507 vdd = _edp_panel_vdd_on(intel_dp);
508
509 /* dp aux is extremely sensitive to irq latency, hence request the
510 * lowest possible wakeup latency and so prevent the cpu from going into
511 * deep sleep states.
512 */
513 pm_qos_update_request(&dev_priv->pm_qos, 0);
514
515 intel_dp_check_edp(intel_dp);
516
517 intel_aux_display_runtime_get(dev_priv);
518
519 /* Try to wait for any previous AUX channel activity */
520 for (try = 0; try < 3; try++) {
521 status = I915_READ_NOTRACE(ch_ctl);
522 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
523 break;
524 msleep(1);
525 }
526
527 if (try == 3) {
528 WARN(1, "dp_aux_ch not started status 0x%08x\n",
529 I915_READ(ch_ctl));
530 ret = -EBUSY;
531 goto out;
532 }
533
534 /* Only 5 data registers! */
535 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
536 ret = -E2BIG;
537 goto out;
538 }
539
540 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
541 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
542 has_aux_irq,
543 send_bytes,
544 aux_clock_divider);
545
546 /* Must try at least 3 times according to DP spec */
547 for (try = 0; try < 5; try++) {
548 /* Load the send data into the aux channel data registers */
549 for (i = 0; i < send_bytes; i += 4)
550 I915_WRITE(ch_data + i,
551 pack_aux(send + i, send_bytes - i));
552
553 /* Send the command and wait for it to complete */
554 I915_WRITE(ch_ctl, send_ctl);
555
556 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
557
558 /* Clear done status and any errors */
559 I915_WRITE(ch_ctl,
560 status |
561 DP_AUX_CH_CTL_DONE |
562 DP_AUX_CH_CTL_TIME_OUT_ERROR |
563 DP_AUX_CH_CTL_RECEIVE_ERROR);
564
565 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
566 DP_AUX_CH_CTL_RECEIVE_ERROR))
567 continue;
568 if (status & DP_AUX_CH_CTL_DONE)
569 break;
570 }
571 if (status & DP_AUX_CH_CTL_DONE)
572 break;
573 }
574
575 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
576 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
577 ret = -EBUSY;
578 goto out;
579 }
580
581 /* Check for timeout or receive error.
582 * Timeouts occur when the sink is not connected
583 */
584 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
585 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
586 ret = -EIO;
587 goto out;
588 }
589
590 /* Timeouts occur when the device isn't connected, so they're
591 * "normal" -- don't fill the kernel log with these */
592 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
593 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
594 ret = -ETIMEDOUT;
595 goto out;
596 }
597
598 /* Unload any bytes sent back from the other side */
599 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
600 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
601 if (recv_bytes > recv_size)
602 recv_bytes = recv_size;
603
604 for (i = 0; i < recv_bytes; i += 4)
605 unpack_aux(I915_READ(ch_data + i),
606 recv + i, recv_bytes - i);
607
608 ret = recv_bytes;
609 out:
610 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
611 intel_aux_display_runtime_put(dev_priv);
612
613 if (vdd)
614 edp_panel_vdd_off(intel_dp, false);
615
616 return ret;
617 }
618
619 #define BARE_ADDRESS_SIZE 3
620 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
621 static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)622 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
623 {
624 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
625 uint8_t txbuf[20], rxbuf[20];
626 size_t txsize, rxsize;
627 int ret;
628
629 txbuf[0] = msg->request << 4;
630 txbuf[1] = msg->address >> 8;
631 txbuf[2] = msg->address & 0xff;
632 txbuf[3] = msg->size - 1;
633
634 switch (msg->request & ~DP_AUX_I2C_MOT) {
635 case DP_AUX_NATIVE_WRITE:
636 case DP_AUX_I2C_WRITE:
637 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
638 rxsize = 1;
639
640 if (WARN_ON(txsize > 20))
641 return -E2BIG;
642
643 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
644
645 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
646 if (ret > 0) {
647 msg->reply = rxbuf[0] >> 4;
648
649 /* Return payload size. */
650 ret = msg->size;
651 }
652 break;
653
654 case DP_AUX_NATIVE_READ:
655 case DP_AUX_I2C_READ:
656 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
657 rxsize = msg->size + 1;
658
659 if (WARN_ON(rxsize > 20))
660 return -E2BIG;
661
662 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
663 if (ret > 0) {
664 msg->reply = rxbuf[0] >> 4;
665 /*
666 * Assume happy day, and copy the data. The caller is
667 * expected to check msg->reply before touching it.
668 *
669 * Return payload size.
670 */
671 ret--;
672 memcpy(msg->buffer, rxbuf + 1, ret);
673 }
674 break;
675
676 default:
677 ret = -EINVAL;
678 break;
679 }
680
681 return ret;
682 }
683
684 static void
intel_dp_aux_init(struct intel_dp * intel_dp,struct intel_connector * connector)685 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
686 {
687 struct drm_device *dev = intel_dp_to_dev(intel_dp);
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 enum port port = intel_dig_port->port;
690 const char *name = NULL;
691 int ret;
692
693 switch (port) {
694 case PORT_A:
695 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
696 name = "DPDDC-A";
697 break;
698 case PORT_B:
699 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
700 name = "DPDDC-B";
701 break;
702 case PORT_C:
703 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
704 name = "DPDDC-C";
705 break;
706 case PORT_D:
707 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
708 name = "DPDDC-D";
709 break;
710 default:
711 BUG();
712 }
713
714 if (!HAS_DDI(dev))
715 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
716
717 intel_dp->aux.name = name;
718 intel_dp->aux.dev = dev->dev;
719 intel_dp->aux.transfer = intel_dp_aux_transfer;
720
721 #ifdef __NetBSD__
722 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
723 device_xname(connector->base.dev->dev));
724 #else
725 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
726 connector->base.kdev->kobj.name);
727 #endif
728
729 ret = drm_dp_aux_register_i2c_bus(&intel_dp->aux);
730 if (ret < 0) {
731 DRM_ERROR("drm_dp_aux_register_i2c_bus() for %s failed (%d)\n",
732 name, ret);
733 return;
734 }
735
736 #ifndef __NetBSD__
737 ret = sysfs_create_link(&connector->base.kdev->kobj,
738 &intel_dp->aux.ddc.dev.kobj,
739 intel_dp->aux.ddc.dev.kobj.name);
740 if (ret < 0) {
741 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
742 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
743 }
744 #endif
745 }
746
747 static void
intel_dp_connector_unregister(struct intel_connector * intel_connector)748 intel_dp_connector_unregister(struct intel_connector *intel_connector)
749 {
750 #ifndef __NetBSD__
751 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
752 #endif
753
754 #ifndef __NetBSD__
755 sysfs_remove_link(&intel_connector->base.kdev->kobj,
756 intel_dp->aux.ddc.dev.kobj.name);
757 #endif
758 intel_connector_unregister(intel_connector);
759 }
760
761 static void
intel_dp_set_clock(struct intel_encoder * encoder,struct intel_crtc_config * pipe_config,int link_bw)762 intel_dp_set_clock(struct intel_encoder *encoder,
763 struct intel_crtc_config *pipe_config, int link_bw)
764 {
765 struct drm_device *dev = encoder->base.dev;
766 const struct dp_link_dpll *divisor = NULL;
767 int i, count = 0;
768
769 if (IS_G4X(dev)) {
770 divisor = gen4_dpll;
771 count = ARRAY_SIZE(gen4_dpll);
772 } else if (IS_HASWELL(dev)) {
773 /* Haswell has special-purpose DP DDI clocks. */
774 } else if (HAS_PCH_SPLIT(dev)) {
775 divisor = pch_dpll;
776 count = ARRAY_SIZE(pch_dpll);
777 } else if (IS_VALLEYVIEW(dev)) {
778 divisor = vlv_dpll;
779 count = ARRAY_SIZE(vlv_dpll);
780 }
781
782 if (divisor && count) {
783 for (i = 0; i < count; i++) {
784 if (link_bw == divisor[i].link_bw) {
785 pipe_config->dpll = divisor[i].dpll;
786 pipe_config->clock_set = true;
787 break;
788 }
789 }
790 }
791 }
792
793 bool
intel_dp_compute_config(struct intel_encoder * encoder,struct intel_crtc_config * pipe_config)794 intel_dp_compute_config(struct intel_encoder *encoder,
795 struct intel_crtc_config *pipe_config)
796 {
797 struct drm_device *dev = encoder->base.dev;
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
800 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
801 enum port port = dp_to_dig_port(intel_dp)->port;
802 struct intel_crtc *intel_crtc = encoder->new_crtc;
803 struct intel_connector *intel_connector = intel_dp->attached_connector;
804 int lane_count, clock;
805 int min_lane_count = 1;
806 int max_lane_count = intel_dp_max_lane_count(intel_dp);
807 /* Conveniently, the link BW constants become indices with a shift...*/
808 int min_clock = 0;
809 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
810 int bpp, mode_rate;
811 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
812 int link_avail, link_clock;
813
814 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
815 pipe_config->has_pch_encoder = true;
816
817 pipe_config->has_dp_encoder = true;
818
819 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
820 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
821 adjusted_mode);
822 if (!HAS_PCH_SPLIT(dev))
823 intel_gmch_panel_fitting(intel_crtc, pipe_config,
824 intel_connector->panel.fitting_mode);
825 else
826 intel_pch_panel_fitting(intel_crtc, pipe_config,
827 intel_connector->panel.fitting_mode);
828 }
829
830 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
831 return false;
832
833 DRM_DEBUG_KMS("DP link computation with max lane count %i "
834 "max bw %02x pixel clock %iKHz\n",
835 max_lane_count, bws[max_clock],
836 adjusted_mode->crtc_clock);
837
838 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
839 * bpc in between. */
840 bpp = pipe_config->pipe_bpp;
841 if (is_edp(intel_dp)) {
842 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
843 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
844 dev_priv->vbt.edp_bpp);
845 bpp = dev_priv->vbt.edp_bpp;
846 }
847
848 if (IS_BROADWELL(dev)) {
849 /* Yes, it's an ugly hack. */
850 min_lane_count = max_lane_count;
851 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
852 min_lane_count);
853 } else if (dev_priv->vbt.edp_lanes) {
854 min_lane_count = min(dev_priv->vbt.edp_lanes,
855 max_lane_count);
856 DRM_DEBUG_KMS("using min %u lanes per VBT\n",
857 min_lane_count);
858 }
859
860 if (dev_priv->vbt.edp_rate) {
861 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
862 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
863 bws[min_clock]);
864 }
865 }
866
867 for (; bpp >= 6*3; bpp -= 2*3) {
868 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
869 bpp);
870
871 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
872 for (clock = min_clock; clock <= max_clock; clock++) {
873 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
874 link_avail = intel_dp_max_data_rate(link_clock,
875 lane_count);
876
877 if (mode_rate <= link_avail) {
878 goto found;
879 }
880 }
881 }
882 }
883
884 return false;
885
886 found:
887 if (intel_dp->color_range_auto) {
888 /*
889 * See:
890 * CEA-861-E - 5.1 Default Encoding Parameters
891 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
892 */
893 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
894 intel_dp->color_range = DP_COLOR_RANGE_16_235;
895 else
896 intel_dp->color_range = 0;
897 }
898
899 if (intel_dp->color_range)
900 pipe_config->limited_color_range = true;
901
902 intel_dp->link_bw = bws[clock];
903 intel_dp->lane_count = lane_count;
904 pipe_config->pipe_bpp = bpp;
905 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
906
907 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
908 intel_dp->link_bw, intel_dp->lane_count,
909 pipe_config->port_clock, bpp);
910 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
911 mode_rate, link_avail);
912
913 intel_link_compute_m_n(bpp, lane_count,
914 adjusted_mode->crtc_clock,
915 pipe_config->port_clock,
916 &pipe_config->dp_m_n);
917
918 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
919
920 return true;
921 }
922
ironlake_set_pll_cpu_edp(struct intel_dp * intel_dp)923 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
924 {
925 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
926 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
927 struct drm_device *dev = crtc->base.dev;
928 struct drm_i915_private *dev_priv = dev->dev_private;
929 u32 dpa_ctl;
930
931 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
932 dpa_ctl = I915_READ(DP_A);
933 dpa_ctl &= ~DP_PLL_FREQ_MASK;
934
935 if (crtc->config.port_clock == 162000) {
936 /* For a long time we've carried around a ILK-DevA w/a for the
937 * 160MHz clock. If we're really unlucky, it's still required.
938 */
939 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
940 dpa_ctl |= DP_PLL_FREQ_160MHZ;
941 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
942 } else {
943 dpa_ctl |= DP_PLL_FREQ_270MHZ;
944 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
945 }
946
947 I915_WRITE(DP_A, dpa_ctl);
948
949 POSTING_READ(DP_A);
950 udelay(500);
951 }
952
intel_dp_mode_set(struct intel_encoder * encoder)953 static void intel_dp_mode_set(struct intel_encoder *encoder)
954 {
955 struct drm_device *dev = encoder->base.dev;
956 struct drm_i915_private *dev_priv = dev->dev_private;
957 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
958 enum port port = dp_to_dig_port(intel_dp)->port;
959 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
960 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
961
962 /*
963 * There are four kinds of DP registers:
964 *
965 * IBX PCH
966 * SNB CPU
967 * IVB CPU
968 * CPT PCH
969 *
970 * IBX PCH and CPU are the same for almost everything,
971 * except that the CPU DP PLL is configured in this
972 * register
973 *
974 * CPT PCH is quite different, having many bits moved
975 * to the TRANS_DP_CTL register instead. That
976 * configuration happens (oddly) in ironlake_pch_enable
977 */
978
979 /* Preserve the BIOS-computed detected bit. This is
980 * supposed to be read-only.
981 */
982 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
983
984 /* Handle DP bits in common between all three register formats */
985 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
986 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
987
988 if (intel_dp->has_audio) {
989 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
990 pipe_name(crtc->pipe));
991 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
992 intel_write_eld(&encoder->base, adjusted_mode);
993 }
994
995 /* Split out the IBX/CPU vs CPT settings */
996
997 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
998 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
999 intel_dp->DP |= DP_SYNC_HS_HIGH;
1000 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1001 intel_dp->DP |= DP_SYNC_VS_HIGH;
1002 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1003
1004 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1005 intel_dp->DP |= DP_ENHANCED_FRAMING;
1006
1007 intel_dp->DP |= crtc->pipe << 29;
1008 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1009 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1010 intel_dp->DP |= intel_dp->color_range;
1011
1012 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1013 intel_dp->DP |= DP_SYNC_HS_HIGH;
1014 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1015 intel_dp->DP |= DP_SYNC_VS_HIGH;
1016 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1017
1018 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1019 intel_dp->DP |= DP_ENHANCED_FRAMING;
1020
1021 if (crtc->pipe == 1)
1022 intel_dp->DP |= DP_PIPEB_SELECT;
1023 } else {
1024 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1025 }
1026
1027 if (port == PORT_A && !IS_VALLEYVIEW(dev))
1028 ironlake_set_pll_cpu_edp(intel_dp);
1029 }
1030
1031 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1032 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1033
1034 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1035 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1036
1037 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1038 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1039
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)1040 static void wait_panel_status(struct intel_dp *intel_dp,
1041 u32 mask,
1042 u32 value)
1043 {
1044 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1045 struct drm_i915_private *dev_priv = dev->dev_private;
1046 u32 pp_stat_reg, pp_ctrl_reg;
1047
1048 pp_stat_reg = _pp_stat_reg(intel_dp);
1049 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1050
1051 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1052 mask, value,
1053 I915_READ(pp_stat_reg),
1054 I915_READ(pp_ctrl_reg));
1055
1056 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1057 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1058 I915_READ(pp_stat_reg),
1059 I915_READ(pp_ctrl_reg));
1060 }
1061
1062 DRM_DEBUG_KMS("Wait complete\n");
1063 }
1064
wait_panel_on(struct intel_dp * intel_dp)1065 static void wait_panel_on(struct intel_dp *intel_dp)
1066 {
1067 DRM_DEBUG_KMS("Wait for panel power on\n");
1068 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1069 }
1070
wait_panel_off(struct intel_dp * intel_dp)1071 static void wait_panel_off(struct intel_dp *intel_dp)
1072 {
1073 DRM_DEBUG_KMS("Wait for panel power off time\n");
1074 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1075 }
1076
wait_panel_power_cycle(struct intel_dp * intel_dp)1077 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1078 {
1079 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1080
1081 /* When we disable the VDD override bit last we have to do the manual
1082 * wait. */
1083 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1084 intel_dp->panel_power_cycle_delay);
1085
1086 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1087 }
1088
wait_backlight_on(struct intel_dp * intel_dp)1089 static void wait_backlight_on(struct intel_dp *intel_dp)
1090 {
1091 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1092 intel_dp->backlight_on_delay);
1093 }
1094
edp_wait_backlight_off(struct intel_dp * intel_dp)1095 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1096 {
1097 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1098 intel_dp->backlight_off_delay);
1099 }
1100
1101 /* Read the current pp_control value, unlocking the register if it
1102 * is locked
1103 */
1104
ironlake_get_pp_control(struct intel_dp * intel_dp)1105 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1106 {
1107 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1108 struct drm_i915_private *dev_priv = dev->dev_private;
1109 u32 control;
1110
1111 control = I915_READ(_pp_ctrl_reg(intel_dp));
1112 control &= ~PANEL_UNLOCK_MASK;
1113 control |= PANEL_UNLOCK_REGS;
1114 return control;
1115 }
1116
_edp_panel_vdd_on(struct intel_dp * intel_dp)1117 static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1118 {
1119 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1120 struct drm_i915_private *dev_priv = dev->dev_private;
1121 u32 pp;
1122 u32 pp_stat_reg, pp_ctrl_reg;
1123 bool need_to_disable = !intel_dp->want_panel_vdd;
1124
1125 if (!is_edp(intel_dp))
1126 return false;
1127
1128 intel_dp->want_panel_vdd = true;
1129
1130 if (edp_have_panel_vdd(intel_dp))
1131 return need_to_disable;
1132
1133 intel_runtime_pm_get(dev_priv);
1134
1135 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1136
1137 if (!edp_have_panel_power(intel_dp))
1138 wait_panel_power_cycle(intel_dp);
1139
1140 pp = ironlake_get_pp_control(intel_dp);
1141 pp |= EDP_FORCE_VDD;
1142
1143 pp_stat_reg = _pp_stat_reg(intel_dp);
1144 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1145
1146 I915_WRITE(pp_ctrl_reg, pp);
1147 POSTING_READ(pp_ctrl_reg);
1148 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1149 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1150 /*
1151 * If the panel wasn't on, delay before accessing aux channel
1152 */
1153 if (!edp_have_panel_power(intel_dp)) {
1154 DRM_DEBUG_KMS("eDP was not running\n");
1155 msleep(intel_dp->panel_power_up_delay);
1156 }
1157
1158 return need_to_disable;
1159 }
1160
intel_edp_panel_vdd_on(struct intel_dp * intel_dp)1161 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1162 {
1163 if (is_edp(intel_dp)) {
1164 bool vdd = _edp_panel_vdd_on(intel_dp);
1165
1166 WARN(!vdd, "eDP VDD already requested on\n");
1167 }
1168 }
1169
edp_panel_vdd_off_sync(struct intel_dp * intel_dp)1170 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1171 {
1172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1173 struct drm_i915_private *dev_priv = dev->dev_private;
1174 u32 pp;
1175 u32 pp_stat_reg, pp_ctrl_reg;
1176
1177 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1178
1179 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1180 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1181
1182 pp = ironlake_get_pp_control(intel_dp);
1183 pp &= ~EDP_FORCE_VDD;
1184
1185 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1186 pp_stat_reg = _pp_stat_reg(intel_dp);
1187
1188 I915_WRITE(pp_ctrl_reg, pp);
1189 POSTING_READ(pp_ctrl_reg);
1190
1191 /* Make sure sequencer is idle before allowing subsequent activity */
1192 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1193 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1194
1195 if ((pp & POWER_TARGET_ON) == 0)
1196 intel_dp->last_power_cycle = jiffies;
1197
1198 intel_runtime_pm_put(dev_priv);
1199 }
1200 }
1201
edp_panel_vdd_work(struct work_struct * __work)1202 static void edp_panel_vdd_work(struct work_struct *__work)
1203 {
1204 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1205 struct intel_dp, panel_vdd_work);
1206 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1207
1208 mutex_lock(&dev->mode_config.mutex);
1209 edp_panel_vdd_off_sync(intel_dp);
1210 mutex_unlock(&dev->mode_config.mutex);
1211 }
1212
edp_panel_vdd_off(struct intel_dp * intel_dp,bool sync)1213 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1214 {
1215 if (!is_edp(intel_dp))
1216 return;
1217
1218 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1219
1220 intel_dp->want_panel_vdd = false;
1221
1222 if (sync) {
1223 edp_panel_vdd_off_sync(intel_dp);
1224 } else {
1225 /*
1226 * Queue the timer to fire a long
1227 * time from now (relative to the power down delay)
1228 * to keep the panel power up across a sequence of operations
1229 */
1230 schedule_delayed_work(&intel_dp->panel_vdd_work,
1231 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1232 }
1233 }
1234
intel_edp_panel_on(struct intel_dp * intel_dp)1235 void intel_edp_panel_on(struct intel_dp *intel_dp)
1236 {
1237 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1238 struct drm_i915_private *dev_priv = dev->dev_private;
1239 u32 pp;
1240 u32 pp_ctrl_reg;
1241
1242 if (!is_edp(intel_dp))
1243 return;
1244
1245 DRM_DEBUG_KMS("Turn eDP power on\n");
1246
1247 if (edp_have_panel_power(intel_dp)) {
1248 DRM_DEBUG_KMS("eDP power already on\n");
1249 return;
1250 }
1251
1252 wait_panel_power_cycle(intel_dp);
1253
1254 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1255 pp = ironlake_get_pp_control(intel_dp);
1256 if (IS_GEN5(dev)) {
1257 /* ILK workaround: disable reset around power sequence */
1258 pp &= ~PANEL_POWER_RESET;
1259 I915_WRITE(pp_ctrl_reg, pp);
1260 POSTING_READ(pp_ctrl_reg);
1261 }
1262
1263 pp |= POWER_TARGET_ON;
1264 if (!IS_GEN5(dev))
1265 pp |= PANEL_POWER_RESET;
1266
1267 I915_WRITE(pp_ctrl_reg, pp);
1268 POSTING_READ(pp_ctrl_reg);
1269
1270 wait_panel_on(intel_dp);
1271 intel_dp->last_power_on = jiffies;
1272
1273 if (IS_GEN5(dev)) {
1274 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1275 I915_WRITE(pp_ctrl_reg, pp);
1276 POSTING_READ(pp_ctrl_reg);
1277 }
1278 }
1279
intel_edp_panel_off(struct intel_dp * intel_dp)1280 void intel_edp_panel_off(struct intel_dp *intel_dp)
1281 {
1282 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1283 struct drm_i915_private *dev_priv = dev->dev_private;
1284 u32 pp;
1285 u32 pp_ctrl_reg;
1286
1287 if (!is_edp(intel_dp))
1288 return;
1289
1290 DRM_DEBUG_KMS("Turn eDP power off\n");
1291
1292 edp_wait_backlight_off(intel_dp);
1293
1294 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1295
1296 pp = ironlake_get_pp_control(intel_dp);
1297 /* We need to switch off panel power _and_ force vdd, for otherwise some
1298 * panels get very unhappy and cease to work. */
1299 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1300 EDP_BLC_ENABLE);
1301
1302 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1303
1304 intel_dp->want_panel_vdd = false;
1305
1306 I915_WRITE(pp_ctrl_reg, pp);
1307 POSTING_READ(pp_ctrl_reg);
1308
1309 intel_dp->last_power_cycle = jiffies;
1310 wait_panel_off(intel_dp);
1311
1312 /* We got a reference when we enabled the VDD. */
1313 intel_runtime_pm_put(dev_priv);
1314 }
1315
intel_edp_backlight_on(struct intel_dp * intel_dp)1316 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1317 {
1318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1319 struct drm_device *dev = intel_dig_port->base.base.dev;
1320 struct drm_i915_private *dev_priv = dev->dev_private;
1321 u32 pp;
1322 u32 pp_ctrl_reg;
1323
1324 if (!is_edp(intel_dp))
1325 return;
1326
1327 DRM_DEBUG_KMS("\n");
1328 /*
1329 * If we enable the backlight right away following a panel power
1330 * on, we may see slight flicker as the panel syncs with the eDP
1331 * link. So delay a bit to make sure the image is solid before
1332 * allowing it to appear.
1333 */
1334 wait_backlight_on(intel_dp);
1335 pp = ironlake_get_pp_control(intel_dp);
1336 pp |= EDP_BLC_ENABLE;
1337
1338 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1339
1340 I915_WRITE(pp_ctrl_reg, pp);
1341 POSTING_READ(pp_ctrl_reg);
1342
1343 intel_panel_enable_backlight(intel_dp->attached_connector);
1344 }
1345
intel_edp_backlight_off(struct intel_dp * intel_dp)1346 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1347 {
1348 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1349 struct drm_i915_private *dev_priv = dev->dev_private;
1350 u32 pp;
1351 u32 pp_ctrl_reg;
1352
1353 if (!is_edp(intel_dp))
1354 return;
1355
1356 intel_panel_disable_backlight(intel_dp->attached_connector);
1357
1358 DRM_DEBUG_KMS("\n");
1359 pp = ironlake_get_pp_control(intel_dp);
1360 pp &= ~EDP_BLC_ENABLE;
1361
1362 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1363
1364 I915_WRITE(pp_ctrl_reg, pp);
1365 POSTING_READ(pp_ctrl_reg);
1366 intel_dp->last_backlight_off = jiffies;
1367 }
1368
ironlake_edp_pll_on(struct intel_dp * intel_dp)1369 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1370 {
1371 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1372 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1373 struct drm_device *dev = crtc->dev;
1374 struct drm_i915_private *dev_priv = dev->dev_private;
1375 u32 dpa_ctl;
1376
1377 assert_pipe_disabled(dev_priv,
1378 to_intel_crtc(crtc)->pipe);
1379
1380 DRM_DEBUG_KMS("\n");
1381 dpa_ctl = I915_READ(DP_A);
1382 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1383 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1384
1385 /* We don't adjust intel_dp->DP while tearing down the link, to
1386 * facilitate link retraining (e.g. after hotplug). Hence clear all
1387 * enable bits here to ensure that we don't enable too much. */
1388 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1389 intel_dp->DP |= DP_PLL_ENABLE;
1390 I915_WRITE(DP_A, intel_dp->DP);
1391 POSTING_READ(DP_A);
1392 udelay(200);
1393 }
1394
ironlake_edp_pll_off(struct intel_dp * intel_dp)1395 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1396 {
1397 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1398 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1399 struct drm_device *dev = crtc->dev;
1400 struct drm_i915_private *dev_priv = dev->dev_private;
1401 u32 dpa_ctl;
1402
1403 assert_pipe_disabled(dev_priv,
1404 to_intel_crtc(crtc)->pipe);
1405
1406 dpa_ctl = I915_READ(DP_A);
1407 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1408 "dp pll off, should be on\n");
1409 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1410
1411 /* We can't rely on the value tracked for the DP register in
1412 * intel_dp->DP because link_down must not change that (otherwise link
1413 * re-training will fail. */
1414 dpa_ctl &= ~DP_PLL_ENABLE;
1415 I915_WRITE(DP_A, dpa_ctl);
1416 POSTING_READ(DP_A);
1417 udelay(200);
1418 }
1419
1420 /* If the sink supports it, try to set the power state appropriately */
intel_dp_sink_dpms(struct intel_dp * intel_dp,int mode)1421 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1422 {
1423 int ret, i;
1424
1425 /* Should have a valid DPCD by this point */
1426 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1427 return;
1428
1429 if (mode != DRM_MODE_DPMS_ON) {
1430 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1431 DP_SET_POWER_D3);
1432 if (ret != 1)
1433 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1434 } else {
1435 /*
1436 * When turning on, we need to retry for 1ms to give the sink
1437 * time to wake up.
1438 */
1439 for (i = 0; i < 3; i++) {
1440 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1441 DP_SET_POWER_D0);
1442 if (ret == 1)
1443 break;
1444 msleep(1);
1445 }
1446 }
1447 }
1448
intel_dp_get_hw_state(struct intel_encoder * encoder,enum i915_pipe * pipe)1449 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1450 enum i915_pipe *pipe)
1451 {
1452 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1453 enum port port = dp_to_dig_port(intel_dp)->port;
1454 struct drm_device *dev = encoder->base.dev;
1455 struct drm_i915_private *dev_priv = dev->dev_private;
1456 enum intel_display_power_domain power_domain;
1457 u32 tmp;
1458
1459 power_domain = intel_display_port_power_domain(encoder);
1460 if (!intel_display_power_enabled(dev_priv, power_domain))
1461 return false;
1462
1463 tmp = I915_READ(intel_dp->output_reg);
1464
1465 if (!(tmp & DP_PORT_EN))
1466 return false;
1467
1468 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1469 *pipe = PORT_TO_PIPE_CPT(tmp);
1470 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1471 *pipe = PORT_TO_PIPE(tmp);
1472 } else {
1473 u32 trans_sel;
1474 u32 trans_dp;
1475 int i;
1476
1477 switch (intel_dp->output_reg) {
1478 case PCH_DP_B:
1479 trans_sel = TRANS_DP_PORT_SEL_B;
1480 break;
1481 case PCH_DP_C:
1482 trans_sel = TRANS_DP_PORT_SEL_C;
1483 break;
1484 case PCH_DP_D:
1485 trans_sel = TRANS_DP_PORT_SEL_D;
1486 break;
1487 default:
1488 return true;
1489 }
1490
1491 for_each_pipe(i) {
1492 trans_dp = I915_READ(TRANS_DP_CTL(i));
1493 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1494 *pipe = i;
1495 return true;
1496 }
1497 }
1498
1499 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1500 intel_dp->output_reg);
1501 }
1502
1503 return true;
1504 }
1505
intel_dp_get_config(struct intel_encoder * encoder,struct intel_crtc_config * pipe_config)1506 static void intel_dp_get_config(struct intel_encoder *encoder,
1507 struct intel_crtc_config *pipe_config)
1508 {
1509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1510 u32 tmp, flags = 0;
1511 struct drm_device *dev = encoder->base.dev;
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 enum port port = dp_to_dig_port(intel_dp)->port;
1514 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1515 int dotclock;
1516
1517 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1518 tmp = I915_READ(intel_dp->output_reg);
1519 if (tmp & DP_SYNC_HS_HIGH)
1520 flags |= DRM_MODE_FLAG_PHSYNC;
1521 else
1522 flags |= DRM_MODE_FLAG_NHSYNC;
1523
1524 if (tmp & DP_SYNC_VS_HIGH)
1525 flags |= DRM_MODE_FLAG_PVSYNC;
1526 else
1527 flags |= DRM_MODE_FLAG_NVSYNC;
1528 } else {
1529 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1530 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1531 flags |= DRM_MODE_FLAG_PHSYNC;
1532 else
1533 flags |= DRM_MODE_FLAG_NHSYNC;
1534
1535 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1536 flags |= DRM_MODE_FLAG_PVSYNC;
1537 else
1538 flags |= DRM_MODE_FLAG_NVSYNC;
1539 }
1540
1541 pipe_config->adjusted_mode.flags |= flags;
1542
1543 pipe_config->has_dp_encoder = true;
1544
1545 intel_dp_get_m_n(crtc, pipe_config);
1546
1547 if (port == PORT_A) {
1548 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1549 pipe_config->port_clock = 162000;
1550 else
1551 pipe_config->port_clock = 270000;
1552 }
1553
1554 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1555 &pipe_config->dp_m_n);
1556
1557 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1558 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1559
1560 pipe_config->adjusted_mode.crtc_clock = dotclock;
1561
1562 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1563 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1564 /*
1565 * This is a big fat ugly hack.
1566 *
1567 * Some machines in UEFI boot mode provide us a VBT that has 18
1568 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1569 * unknown we fail to light up. Yet the same BIOS boots up with
1570 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1571 * max, not what it tells us to use.
1572 *
1573 * Note: This will still be broken if the eDP panel is not lit
1574 * up by the BIOS, and thus we can't get the mode at module
1575 * load.
1576 */
1577 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1578 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1579 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1580 }
1581 }
1582
is_edp_psr(struct drm_device * dev)1583 static bool is_edp_psr(struct drm_device *dev)
1584 {
1585 struct drm_i915_private *dev_priv = dev->dev_private;
1586
1587 return dev_priv->psr.sink_support;
1588 }
1589
intel_edp_is_psr_enabled(struct drm_device * dev)1590 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1591 {
1592 struct drm_i915_private *dev_priv = dev->dev_private;
1593
1594 if (!HAS_PSR(dev))
1595 return false;
1596
1597 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1598 }
1599
intel_edp_psr_write_vsc(struct intel_dp * intel_dp,struct edp_vsc_psr * vsc_psr)1600 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1601 struct edp_vsc_psr *vsc_psr)
1602 {
1603 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1604 struct drm_device *dev = dig_port->base.base.dev;
1605 struct drm_i915_private *dev_priv = dev->dev_private;
1606 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1607 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1608 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1609 uint32_t *data = (uint32_t *) vsc_psr;
1610 unsigned int i;
1611
1612 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1613 the video DIP being updated before program video DIP data buffer
1614 registers for DIP being updated. */
1615 I915_WRITE(ctl_reg, 0);
1616 POSTING_READ(ctl_reg);
1617
1618 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1619 if (i < sizeof(struct edp_vsc_psr))
1620 I915_WRITE(data_reg + i, *data++);
1621 else
1622 I915_WRITE(data_reg + i, 0);
1623 }
1624
1625 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1626 POSTING_READ(ctl_reg);
1627 }
1628
intel_edp_psr_setup(struct intel_dp * intel_dp)1629 static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1630 {
1631 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1632 struct drm_i915_private *dev_priv = dev->dev_private;
1633 struct edp_vsc_psr psr_vsc;
1634
1635 if (intel_dp->psr_setup_done)
1636 return;
1637
1638 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1639 memset(&psr_vsc, 0, sizeof(psr_vsc));
1640 psr_vsc.sdp_header.HB0 = 0;
1641 psr_vsc.sdp_header.HB1 = 0x7;
1642 psr_vsc.sdp_header.HB2 = 0x2;
1643 psr_vsc.sdp_header.HB3 = 0x8;
1644 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1645
1646 /* Avoid continuous PSR exit by masking memup and hpd */
1647 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1648 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1649
1650 intel_dp->psr_setup_done = true;
1651 }
1652
intel_edp_psr_enable_sink(struct intel_dp * intel_dp)1653 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1654 {
1655 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 uint32_t aux_clock_divider;
1658 int precharge = 0x3;
1659 int msg_size = 5; /* Header(4) + Message(1) */
1660
1661 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1662
1663 /* Enable PSR in sink */
1664 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1665 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1666 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1667 else
1668 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1669 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
1670
1671 /* Setup AUX registers */
1672 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1673 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1674 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1675 DP_AUX_CH_CTL_TIME_OUT_400us |
1676 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1677 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1678 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1679 }
1680
intel_edp_psr_enable_source(struct intel_dp * intel_dp)1681 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1682 {
1683 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1684 struct drm_i915_private *dev_priv = dev->dev_private;
1685 uint32_t max_sleep_time = 0x1f;
1686 uint32_t idle_frames = 1;
1687 uint32_t val = 0x0;
1688 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1689
1690 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1691 val |= EDP_PSR_LINK_STANDBY;
1692 val |= EDP_PSR_TP2_TP3_TIME_0us;
1693 val |= EDP_PSR_TP1_TIME_0us;
1694 val |= EDP_PSR_SKIP_AUX_EXIT;
1695 } else
1696 val |= EDP_PSR_LINK_DISABLE;
1697
1698 I915_WRITE(EDP_PSR_CTL(dev), val |
1699 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
1700 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1701 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1702 EDP_PSR_ENABLE);
1703 }
1704
intel_edp_psr_match_conditions(struct intel_dp * intel_dp)1705 static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1706 {
1707 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1708 struct drm_device *dev = dig_port->base.base.dev;
1709 struct drm_i915_private *dev_priv = dev->dev_private;
1710 struct drm_crtc *crtc = dig_port->base.base.crtc;
1711 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1712 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1713 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1714
1715 dev_priv->psr.source_ok = false;
1716
1717 if (!HAS_PSR(dev)) {
1718 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1719 return false;
1720 }
1721
1722 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1723 (dig_port->port != PORT_A)) {
1724 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1725 return false;
1726 }
1727
1728 if (!i915.enable_psr) {
1729 DRM_DEBUG_KMS("PSR disable by flag\n");
1730 return false;
1731 }
1732
1733 crtc = dig_port->base.base.crtc;
1734 if (crtc == NULL) {
1735 DRM_DEBUG_KMS("crtc not active for PSR\n");
1736 return false;
1737 }
1738
1739 intel_crtc = to_intel_crtc(crtc);
1740 if (!intel_crtc_active(crtc)) {
1741 DRM_DEBUG_KMS("crtc not active for PSR\n");
1742 return false;
1743 }
1744
1745 obj = to_intel_framebuffer(crtc->primary->fb)->obj;
1746 if (obj->tiling_mode != I915_TILING_X ||
1747 obj->fence_reg == I915_FENCE_REG_NONE) {
1748 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1749 return false;
1750 }
1751
1752 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1753 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1754 return false;
1755 }
1756
1757 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1758 S3D_ENABLE) {
1759 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1760 return false;
1761 }
1762
1763 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1764 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1765 return false;
1766 }
1767
1768 dev_priv->psr.source_ok = true;
1769 return true;
1770 }
1771
intel_edp_psr_do_enable(struct intel_dp * intel_dp)1772 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1773 {
1774 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1775
1776 if (!intel_edp_psr_match_conditions(intel_dp) ||
1777 intel_edp_is_psr_enabled(dev))
1778 return;
1779
1780 /* Setup PSR once */
1781 intel_edp_psr_setup(intel_dp);
1782
1783 /* Enable PSR on the panel */
1784 intel_edp_psr_enable_sink(intel_dp);
1785
1786 /* Enable PSR on the host */
1787 intel_edp_psr_enable_source(intel_dp);
1788 }
1789
intel_edp_psr_enable(struct intel_dp * intel_dp)1790 void intel_edp_psr_enable(struct intel_dp *intel_dp)
1791 {
1792 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1793
1794 if (intel_edp_psr_match_conditions(intel_dp) &&
1795 !intel_edp_is_psr_enabled(dev))
1796 intel_edp_psr_do_enable(intel_dp);
1797 }
1798
intel_edp_psr_disable(struct intel_dp * intel_dp)1799 void intel_edp_psr_disable(struct intel_dp *intel_dp)
1800 {
1801 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1802 struct drm_i915_private *dev_priv = dev->dev_private;
1803
1804 if (!intel_edp_is_psr_enabled(dev))
1805 return;
1806
1807 I915_WRITE(EDP_PSR_CTL(dev),
1808 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1809
1810 /* Wait till PSR is idle */
1811 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1812 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1813 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1814 }
1815
intel_edp_psr_update(struct drm_device * dev)1816 void intel_edp_psr_update(struct drm_device *dev)
1817 {
1818 struct intel_encoder *encoder;
1819 struct intel_dp *intel_dp = NULL;
1820
1821 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1822 if (encoder->type == INTEL_OUTPUT_EDP) {
1823 intel_dp = enc_to_intel_dp(&encoder->base);
1824
1825 if (!is_edp_psr(dev))
1826 return;
1827
1828 if (!intel_edp_psr_match_conditions(intel_dp))
1829 intel_edp_psr_disable(intel_dp);
1830 else
1831 if (!intel_edp_is_psr_enabled(dev))
1832 intel_edp_psr_do_enable(intel_dp);
1833 }
1834 }
1835
intel_disable_dp(struct intel_encoder * encoder)1836 static void intel_disable_dp(struct intel_encoder *encoder)
1837 {
1838 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1839 enum port port = dp_to_dig_port(intel_dp)->port;
1840 struct drm_device *dev = encoder->base.dev;
1841
1842 /* Make sure the panel is off before trying to change the mode. But also
1843 * ensure that we have vdd while we switch off the panel. */
1844 intel_edp_panel_vdd_on(intel_dp);
1845 intel_edp_backlight_off(intel_dp);
1846 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1847 intel_edp_panel_off(intel_dp);
1848
1849 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1850 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1851 intel_dp_link_down(intel_dp);
1852 }
1853
intel_post_disable_dp(struct intel_encoder * encoder)1854 static void intel_post_disable_dp(struct intel_encoder *encoder)
1855 {
1856 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1857 enum port port = dp_to_dig_port(intel_dp)->port;
1858 struct drm_device *dev = encoder->base.dev;
1859
1860 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1861 intel_dp_link_down(intel_dp);
1862 if (!IS_VALLEYVIEW(dev))
1863 ironlake_edp_pll_off(intel_dp);
1864 }
1865 }
1866
intel_enable_dp(struct intel_encoder * encoder)1867 static void intel_enable_dp(struct intel_encoder *encoder)
1868 {
1869 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1870 struct drm_device *dev = encoder->base.dev;
1871 struct drm_i915_private *dev_priv = dev->dev_private;
1872 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1873
1874 if (WARN_ON(dp_reg & DP_PORT_EN))
1875 return;
1876
1877 intel_edp_panel_vdd_on(intel_dp);
1878 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1879 intel_dp_start_link_train(intel_dp);
1880 intel_edp_panel_on(intel_dp);
1881 edp_panel_vdd_off(intel_dp, true);
1882 intel_dp_complete_link_train(intel_dp);
1883 intel_dp_stop_link_train(intel_dp);
1884 }
1885
g4x_enable_dp(struct intel_encoder * encoder)1886 static void g4x_enable_dp(struct intel_encoder *encoder)
1887 {
1888 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1889
1890 intel_enable_dp(encoder);
1891 intel_edp_backlight_on(intel_dp);
1892 }
1893
vlv_enable_dp(struct intel_encoder * encoder)1894 static void vlv_enable_dp(struct intel_encoder *encoder)
1895 {
1896 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1897
1898 intel_edp_backlight_on(intel_dp);
1899 }
1900
g4x_pre_enable_dp(struct intel_encoder * encoder)1901 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
1902 {
1903 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1904 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1905
1906 if (dport->port == PORT_A)
1907 ironlake_edp_pll_on(intel_dp);
1908 }
1909
vlv_pre_enable_dp(struct intel_encoder * encoder)1910 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1911 {
1912 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1913 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1914 struct drm_device *dev = encoder->base.dev;
1915 struct drm_i915_private *dev_priv = dev->dev_private;
1916 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1917 enum dpio_channel port = vlv_dport_to_channel(dport);
1918 int pipe = intel_crtc->pipe;
1919 struct edp_power_seq power_seq;
1920 u32 val;
1921
1922 mutex_lock(&dev_priv->dpio_lock);
1923
1924 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1925 val = 0;
1926 if (pipe)
1927 val |= (1<<21);
1928 else
1929 val &= ~(1<<21);
1930 val |= 0x001000c4;
1931 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1932 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1933 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1934
1935 mutex_unlock(&dev_priv->dpio_lock);
1936
1937 if (is_edp(intel_dp)) {
1938 /* init power sequencer on this pipe and port */
1939 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1940 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1941 &power_seq);
1942 }
1943
1944 intel_enable_dp(encoder);
1945
1946 vlv_wait_port_ready(dev_priv, dport);
1947 }
1948
vlv_dp_pre_pll_enable(struct intel_encoder * encoder)1949 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
1950 {
1951 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1952 struct drm_device *dev = encoder->base.dev;
1953 struct drm_i915_private *dev_priv = dev->dev_private;
1954 struct intel_crtc *intel_crtc =
1955 to_intel_crtc(encoder->base.crtc);
1956 enum dpio_channel port = vlv_dport_to_channel(dport);
1957 int pipe = intel_crtc->pipe;
1958
1959 /* Program Tx lane resets to default */
1960 mutex_lock(&dev_priv->dpio_lock);
1961 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1962 DPIO_PCS_TX_LANE2_RESET |
1963 DPIO_PCS_TX_LANE1_RESET);
1964 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1965 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1966 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1967 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1968 DPIO_PCS_CLK_SOFT_RESET);
1969
1970 /* Fix up inter-pair skew failure */
1971 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1972 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1973 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1974 mutex_unlock(&dev_priv->dpio_lock);
1975 }
1976
1977 /*
1978 * Native read with retry for link status and receiver capability reads for
1979 * cases where the sink may still be asleep.
1980 *
1981 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
1982 * supposed to retry 3 times per the spec.
1983 */
1984 static ssize_t
intel_dp_dpcd_read_wake(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)1985 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
1986 void *buffer, size_t size)
1987 {
1988 ssize_t ret;
1989 int i;
1990
1991 for (i = 0; i < 3; i++) {
1992 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
1993 if (ret == size)
1994 return ret;
1995 msleep(1);
1996 }
1997
1998 return ret;
1999 }
2000
2001 /*
2002 * Fetch AUX CH registers 0x202 - 0x207 which contain
2003 * link status information
2004 */
2005 static bool
intel_dp_get_link_status(struct intel_dp * intel_dp,uint8_t link_status[DP_LINK_STATUS_SIZE])2006 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2007 {
2008 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2009 DP_LANE0_1_STATUS,
2010 link_status,
2011 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2012 }
2013
2014 /*
2015 * These are source-specific values; current Intel hardware supports
2016 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
2017 */
2018
2019 static uint8_t
intel_dp_voltage_max(struct intel_dp * intel_dp)2020 intel_dp_voltage_max(struct intel_dp *intel_dp)
2021 {
2022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2023 enum port port = dp_to_dig_port(intel_dp)->port;
2024
2025 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
2026 return DP_TRAIN_VOLTAGE_SWING_1200;
2027 else if (IS_GEN7(dev) && port == PORT_A)
2028 return DP_TRAIN_VOLTAGE_SWING_800;
2029 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2030 return DP_TRAIN_VOLTAGE_SWING_1200;
2031 else
2032 return DP_TRAIN_VOLTAGE_SWING_800;
2033 }
2034
2035 static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp * intel_dp,uint8_t voltage_swing)2036 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2037 {
2038 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2039 enum port port = dp_to_dig_port(intel_dp)->port;
2040
2041 if (IS_BROADWELL(dev)) {
2042 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2043 case DP_TRAIN_VOLTAGE_SWING_400:
2044 case DP_TRAIN_VOLTAGE_SWING_600:
2045 return DP_TRAIN_PRE_EMPHASIS_6;
2046 case DP_TRAIN_VOLTAGE_SWING_800:
2047 return DP_TRAIN_PRE_EMPHASIS_3_5;
2048 case DP_TRAIN_VOLTAGE_SWING_1200:
2049 default:
2050 return DP_TRAIN_PRE_EMPHASIS_0;
2051 }
2052 } else if (IS_HASWELL(dev)) {
2053 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2054 case DP_TRAIN_VOLTAGE_SWING_400:
2055 return DP_TRAIN_PRE_EMPHASIS_9_5;
2056 case DP_TRAIN_VOLTAGE_SWING_600:
2057 return DP_TRAIN_PRE_EMPHASIS_6;
2058 case DP_TRAIN_VOLTAGE_SWING_800:
2059 return DP_TRAIN_PRE_EMPHASIS_3_5;
2060 case DP_TRAIN_VOLTAGE_SWING_1200:
2061 default:
2062 return DP_TRAIN_PRE_EMPHASIS_0;
2063 }
2064 } else if (IS_VALLEYVIEW(dev)) {
2065 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2066 case DP_TRAIN_VOLTAGE_SWING_400:
2067 return DP_TRAIN_PRE_EMPHASIS_9_5;
2068 case DP_TRAIN_VOLTAGE_SWING_600:
2069 return DP_TRAIN_PRE_EMPHASIS_6;
2070 case DP_TRAIN_VOLTAGE_SWING_800:
2071 return DP_TRAIN_PRE_EMPHASIS_3_5;
2072 case DP_TRAIN_VOLTAGE_SWING_1200:
2073 default:
2074 return DP_TRAIN_PRE_EMPHASIS_0;
2075 }
2076 } else if (IS_GEN7(dev) && port == PORT_A) {
2077 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2078 case DP_TRAIN_VOLTAGE_SWING_400:
2079 return DP_TRAIN_PRE_EMPHASIS_6;
2080 case DP_TRAIN_VOLTAGE_SWING_600:
2081 case DP_TRAIN_VOLTAGE_SWING_800:
2082 return DP_TRAIN_PRE_EMPHASIS_3_5;
2083 default:
2084 return DP_TRAIN_PRE_EMPHASIS_0;
2085 }
2086 } else {
2087 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2088 case DP_TRAIN_VOLTAGE_SWING_400:
2089 return DP_TRAIN_PRE_EMPHASIS_6;
2090 case DP_TRAIN_VOLTAGE_SWING_600:
2091 return DP_TRAIN_PRE_EMPHASIS_6;
2092 case DP_TRAIN_VOLTAGE_SWING_800:
2093 return DP_TRAIN_PRE_EMPHASIS_3_5;
2094 case DP_TRAIN_VOLTAGE_SWING_1200:
2095 default:
2096 return DP_TRAIN_PRE_EMPHASIS_0;
2097 }
2098 }
2099 }
2100
intel_vlv_signal_levels(struct intel_dp * intel_dp)2101 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2102 {
2103 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2106 struct intel_crtc *intel_crtc =
2107 to_intel_crtc(dport->base.base.crtc);
2108 unsigned long demph_reg_value, preemph_reg_value,
2109 uniqtranscale_reg_value;
2110 uint8_t train_set = intel_dp->train_set[0];
2111 enum dpio_channel port = vlv_dport_to_channel(dport);
2112 int pipe = intel_crtc->pipe;
2113
2114 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2115 case DP_TRAIN_PRE_EMPHASIS_0:
2116 preemph_reg_value = 0x0004000;
2117 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2118 case DP_TRAIN_VOLTAGE_SWING_400:
2119 demph_reg_value = 0x2B405555;
2120 uniqtranscale_reg_value = 0x552AB83A;
2121 break;
2122 case DP_TRAIN_VOLTAGE_SWING_600:
2123 demph_reg_value = 0x2B404040;
2124 uniqtranscale_reg_value = 0x5548B83A;
2125 break;
2126 case DP_TRAIN_VOLTAGE_SWING_800:
2127 demph_reg_value = 0x2B245555;
2128 uniqtranscale_reg_value = 0x5560B83A;
2129 break;
2130 case DP_TRAIN_VOLTAGE_SWING_1200:
2131 demph_reg_value = 0x2B405555;
2132 uniqtranscale_reg_value = 0x5598DA3A;
2133 break;
2134 default:
2135 return 0;
2136 }
2137 break;
2138 case DP_TRAIN_PRE_EMPHASIS_3_5:
2139 preemph_reg_value = 0x0002000;
2140 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2141 case DP_TRAIN_VOLTAGE_SWING_400:
2142 demph_reg_value = 0x2B404040;
2143 uniqtranscale_reg_value = 0x5552B83A;
2144 break;
2145 case DP_TRAIN_VOLTAGE_SWING_600:
2146 demph_reg_value = 0x2B404848;
2147 uniqtranscale_reg_value = 0x5580B83A;
2148 break;
2149 case DP_TRAIN_VOLTAGE_SWING_800:
2150 demph_reg_value = 0x2B404040;
2151 uniqtranscale_reg_value = 0x55ADDA3A;
2152 break;
2153 default:
2154 return 0;
2155 }
2156 break;
2157 case DP_TRAIN_PRE_EMPHASIS_6:
2158 preemph_reg_value = 0x0000000;
2159 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2160 case DP_TRAIN_VOLTAGE_SWING_400:
2161 demph_reg_value = 0x2B305555;
2162 uniqtranscale_reg_value = 0x5570B83A;
2163 break;
2164 case DP_TRAIN_VOLTAGE_SWING_600:
2165 demph_reg_value = 0x2B2B4040;
2166 uniqtranscale_reg_value = 0x55ADDA3A;
2167 break;
2168 default:
2169 return 0;
2170 }
2171 break;
2172 case DP_TRAIN_PRE_EMPHASIS_9_5:
2173 preemph_reg_value = 0x0006000;
2174 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2175 case DP_TRAIN_VOLTAGE_SWING_400:
2176 demph_reg_value = 0x1B405555;
2177 uniqtranscale_reg_value = 0x55ADDA3A;
2178 break;
2179 default:
2180 return 0;
2181 }
2182 break;
2183 default:
2184 return 0;
2185 }
2186
2187 mutex_lock(&dev_priv->dpio_lock);
2188 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2189 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2190 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2191 uniqtranscale_reg_value);
2192 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2193 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2194 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2195 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2196 mutex_unlock(&dev_priv->dpio_lock);
2197
2198 return 0;
2199 }
2200
2201 static void
intel_get_adjust_train(struct intel_dp * intel_dp,const uint8_t link_status[DP_LINK_STATUS_SIZE])2202 intel_get_adjust_train(struct intel_dp *intel_dp,
2203 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2204 {
2205 uint8_t v = 0;
2206 uint8_t p = 0;
2207 int lane;
2208 uint8_t voltage_max;
2209 uint8_t preemph_max;
2210
2211 for (lane = 0; lane < intel_dp->lane_count; lane++) {
2212 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2213 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2214
2215 if (this_v > v)
2216 v = this_v;
2217 if (this_p > p)
2218 p = this_p;
2219 }
2220
2221 voltage_max = intel_dp_voltage_max(intel_dp);
2222 if (v >= voltage_max)
2223 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2224
2225 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2226 if (p >= preemph_max)
2227 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2228
2229 for (lane = 0; lane < 4; lane++)
2230 intel_dp->train_set[lane] = v | p;
2231 }
2232
2233 static uint32_t
intel_gen4_signal_levels(uint8_t train_set)2234 intel_gen4_signal_levels(uint8_t train_set)
2235 {
2236 uint32_t signal_levels = 0;
2237
2238 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2239 case DP_TRAIN_VOLTAGE_SWING_400:
2240 default:
2241 signal_levels |= DP_VOLTAGE_0_4;
2242 break;
2243 case DP_TRAIN_VOLTAGE_SWING_600:
2244 signal_levels |= DP_VOLTAGE_0_6;
2245 break;
2246 case DP_TRAIN_VOLTAGE_SWING_800:
2247 signal_levels |= DP_VOLTAGE_0_8;
2248 break;
2249 case DP_TRAIN_VOLTAGE_SWING_1200:
2250 signal_levels |= DP_VOLTAGE_1_2;
2251 break;
2252 }
2253 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2254 case DP_TRAIN_PRE_EMPHASIS_0:
2255 default:
2256 signal_levels |= DP_PRE_EMPHASIS_0;
2257 break;
2258 case DP_TRAIN_PRE_EMPHASIS_3_5:
2259 signal_levels |= DP_PRE_EMPHASIS_3_5;
2260 break;
2261 case DP_TRAIN_PRE_EMPHASIS_6:
2262 signal_levels |= DP_PRE_EMPHASIS_6;
2263 break;
2264 case DP_TRAIN_PRE_EMPHASIS_9_5:
2265 signal_levels |= DP_PRE_EMPHASIS_9_5;
2266 break;
2267 }
2268 return signal_levels;
2269 }
2270
2271 /* Gen6's DP voltage swing and pre-emphasis control */
2272 static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set)2273 intel_gen6_edp_signal_levels(uint8_t train_set)
2274 {
2275 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2276 DP_TRAIN_PRE_EMPHASIS_MASK);
2277 switch (signal_levels) {
2278 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2279 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2280 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2281 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2282 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2283 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2284 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2285 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2286 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2287 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2288 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2289 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2290 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2291 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2292 default:
2293 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2294 "0x%x\n", signal_levels);
2295 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2296 }
2297 }
2298
2299 /* Gen7's DP voltage swing and pre-emphasis control */
2300 static uint32_t
intel_gen7_edp_signal_levels(uint8_t train_set)2301 intel_gen7_edp_signal_levels(uint8_t train_set)
2302 {
2303 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2304 DP_TRAIN_PRE_EMPHASIS_MASK);
2305 switch (signal_levels) {
2306 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2307 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2308 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2309 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2310 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2311 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2312
2313 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2314 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2315 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2316 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2317
2318 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2319 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2320 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2321 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2322
2323 default:
2324 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2325 "0x%x\n", signal_levels);
2326 return EDP_LINK_TRAIN_500MV_0DB_IVB;
2327 }
2328 }
2329
2330 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2331 static uint32_t
intel_hsw_signal_levels(uint8_t train_set)2332 intel_hsw_signal_levels(uint8_t train_set)
2333 {
2334 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2335 DP_TRAIN_PRE_EMPHASIS_MASK);
2336 switch (signal_levels) {
2337 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2338 return DDI_BUF_EMP_400MV_0DB_HSW;
2339 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2340 return DDI_BUF_EMP_400MV_3_5DB_HSW;
2341 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2342 return DDI_BUF_EMP_400MV_6DB_HSW;
2343 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2344 return DDI_BUF_EMP_400MV_9_5DB_HSW;
2345
2346 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2347 return DDI_BUF_EMP_600MV_0DB_HSW;
2348 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2349 return DDI_BUF_EMP_600MV_3_5DB_HSW;
2350 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2351 return DDI_BUF_EMP_600MV_6DB_HSW;
2352
2353 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2354 return DDI_BUF_EMP_800MV_0DB_HSW;
2355 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2356 return DDI_BUF_EMP_800MV_3_5DB_HSW;
2357 default:
2358 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2359 "0x%x\n", signal_levels);
2360 return DDI_BUF_EMP_400MV_0DB_HSW;
2361 }
2362 }
2363
2364 static uint32_t
intel_bdw_signal_levels(uint8_t train_set)2365 intel_bdw_signal_levels(uint8_t train_set)
2366 {
2367 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2368 DP_TRAIN_PRE_EMPHASIS_MASK);
2369 switch (signal_levels) {
2370 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2371 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2372 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2373 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2374 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2375 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2376
2377 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2378 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2379 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2380 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2381 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2382 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2383
2384 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2385 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2386 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2387 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2388
2389 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2390 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2391
2392 default:
2393 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2394 "0x%x\n", signal_levels);
2395 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2396 }
2397 }
2398
2399 /* Properly updates "DP" with the correct signal levels. */
2400 static void
intel_dp_set_signal_levels(struct intel_dp * intel_dp,uint32_t * DP)2401 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2402 {
2403 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2404 enum port port = intel_dig_port->port;
2405 struct drm_device *dev = intel_dig_port->base.base.dev;
2406 uint32_t signal_levels, mask;
2407 uint8_t train_set = intel_dp->train_set[0];
2408
2409 if (IS_BROADWELL(dev)) {
2410 signal_levels = intel_bdw_signal_levels(train_set);
2411 mask = DDI_BUF_EMP_MASK;
2412 } else if (IS_HASWELL(dev)) {
2413 signal_levels = intel_hsw_signal_levels(train_set);
2414 mask = DDI_BUF_EMP_MASK;
2415 } else if (IS_VALLEYVIEW(dev)) {
2416 signal_levels = intel_vlv_signal_levels(intel_dp);
2417 mask = 0;
2418 } else if (IS_GEN7(dev) && port == PORT_A) {
2419 signal_levels = intel_gen7_edp_signal_levels(train_set);
2420 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
2421 } else if (IS_GEN6(dev) && port == PORT_A) {
2422 signal_levels = intel_gen6_edp_signal_levels(train_set);
2423 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2424 } else {
2425 signal_levels = intel_gen4_signal_levels(train_set);
2426 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2427 }
2428
2429 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2430
2431 *DP = (*DP & ~mask) | signal_levels;
2432 }
2433
2434 static bool
intel_dp_set_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)2435 intel_dp_set_link_train(struct intel_dp *intel_dp,
2436 uint32_t *DP,
2437 uint8_t dp_train_pat)
2438 {
2439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2440 struct drm_device *dev = intel_dig_port->base.base.dev;
2441 struct drm_i915_private *dev_priv = dev->dev_private;
2442 enum port port = intel_dig_port->port;
2443 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2444 int ret, len;
2445
2446 if (HAS_DDI(dev)) {
2447 uint32_t temp = I915_READ(DP_TP_CTL(port));
2448
2449 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2450 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2451 else
2452 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2453
2454 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2455 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2456 case DP_TRAINING_PATTERN_DISABLE:
2457 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2458
2459 break;
2460 case DP_TRAINING_PATTERN_1:
2461 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2462 break;
2463 case DP_TRAINING_PATTERN_2:
2464 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2465 break;
2466 case DP_TRAINING_PATTERN_3:
2467 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2468 break;
2469 }
2470 I915_WRITE(DP_TP_CTL(port), temp);
2471
2472 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2473 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2474
2475 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2476 case DP_TRAINING_PATTERN_DISABLE:
2477 *DP |= DP_LINK_TRAIN_OFF_CPT;
2478 break;
2479 case DP_TRAINING_PATTERN_1:
2480 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2481 break;
2482 case DP_TRAINING_PATTERN_2:
2483 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2484 break;
2485 case DP_TRAINING_PATTERN_3:
2486 DRM_ERROR("DP training pattern 3 not supported\n");
2487 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2488 break;
2489 }
2490
2491 } else {
2492 *DP &= ~DP_LINK_TRAIN_MASK;
2493
2494 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2495 case DP_TRAINING_PATTERN_DISABLE:
2496 *DP |= DP_LINK_TRAIN_OFF;
2497 break;
2498 case DP_TRAINING_PATTERN_1:
2499 *DP |= DP_LINK_TRAIN_PAT_1;
2500 break;
2501 case DP_TRAINING_PATTERN_2:
2502 *DP |= DP_LINK_TRAIN_PAT_2;
2503 break;
2504 case DP_TRAINING_PATTERN_3:
2505 DRM_ERROR("DP training pattern 3 not supported\n");
2506 *DP |= DP_LINK_TRAIN_PAT_2;
2507 break;
2508 }
2509 }
2510
2511 I915_WRITE(intel_dp->output_reg, *DP);
2512 POSTING_READ(intel_dp->output_reg);
2513
2514 buf[0] = dp_train_pat;
2515 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2516 DP_TRAINING_PATTERN_DISABLE) {
2517 /* don't write DP_TRAINING_LANEx_SET on disable */
2518 len = 1;
2519 } else {
2520 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2521 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2522 len = intel_dp->lane_count + 1;
2523 }
2524
2525 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
2526 buf, len);
2527
2528 return ret == len;
2529 }
2530
2531 static bool
intel_dp_reset_link_train(struct intel_dp * intel_dp,uint32_t * DP,uint8_t dp_train_pat)2532 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2533 uint8_t dp_train_pat)
2534 {
2535 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2536 intel_dp_set_signal_levels(intel_dp, DP);
2537 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2538 }
2539
2540 static bool
intel_dp_update_link_train(struct intel_dp * intel_dp,uint32_t * DP,const uint8_t link_status[DP_LINK_STATUS_SIZE])2541 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2542 const uint8_t link_status[DP_LINK_STATUS_SIZE])
2543 {
2544 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2545 struct drm_device *dev = intel_dig_port->base.base.dev;
2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547 int ret;
2548
2549 intel_get_adjust_train(intel_dp, link_status);
2550 intel_dp_set_signal_levels(intel_dp, DP);
2551
2552 I915_WRITE(intel_dp->output_reg, *DP);
2553 POSTING_READ(intel_dp->output_reg);
2554
2555 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
2556 intel_dp->train_set, intel_dp->lane_count);
2557
2558 return ret == intel_dp->lane_count;
2559 }
2560
intel_dp_set_idle_link_train(struct intel_dp * intel_dp)2561 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2562 {
2563 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2564 struct drm_device *dev = intel_dig_port->base.base.dev;
2565 struct drm_i915_private *dev_priv = dev->dev_private;
2566 enum port port = intel_dig_port->port;
2567 uint32_t val;
2568
2569 if (!HAS_DDI(dev))
2570 return;
2571
2572 val = I915_READ(DP_TP_CTL(port));
2573 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2574 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2575 I915_WRITE(DP_TP_CTL(port), val);
2576
2577 /*
2578 * On PORT_A we can have only eDP in SST mode. There the only reason
2579 * we need to set idle transmission mode is to work around a HW issue
2580 * where we enable the pipe while not in idle link-training mode.
2581 * In this case there is requirement to wait for a minimum number of
2582 * idle patterns to be sent.
2583 */
2584 if (port == PORT_A)
2585 return;
2586
2587 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2588 1))
2589 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2590 }
2591
2592 /* Enable corresponding port and start training pattern 1 */
2593 void
intel_dp_start_link_train(struct intel_dp * intel_dp)2594 intel_dp_start_link_train(struct intel_dp *intel_dp)
2595 {
2596 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2597 struct drm_device *dev = encoder->dev;
2598 int i;
2599 uint8_t voltage;
2600 int voltage_tries, loop_tries;
2601 uint32_t DP = intel_dp->DP;
2602 uint8_t link_config[2];
2603
2604 if (HAS_DDI(dev))
2605 intel_ddi_prepare_link_retrain(encoder);
2606
2607 /* Write the link configuration data */
2608 link_config[0] = intel_dp->link_bw;
2609 link_config[1] = intel_dp->lane_count;
2610 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2611 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2612 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
2613
2614 link_config[0] = 0;
2615 link_config[1] = DP_SET_ANSI_8B10B;
2616 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
2617
2618 DP |= DP_PORT_EN;
2619
2620 /* clock recovery */
2621 if (!intel_dp_reset_link_train(intel_dp, &DP,
2622 DP_TRAINING_PATTERN_1 |
2623 DP_LINK_SCRAMBLING_DISABLE)) {
2624 DRM_ERROR("failed to enable link training\n");
2625 return;
2626 }
2627
2628 voltage = 0xff;
2629 voltage_tries = 0;
2630 loop_tries = 0;
2631 for (;;) {
2632 uint8_t link_status[DP_LINK_STATUS_SIZE];
2633
2634 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2635 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2636 DRM_ERROR("failed to get link status\n");
2637 break;
2638 }
2639
2640 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2641 DRM_DEBUG_KMS("clock recovery OK\n");
2642 break;
2643 }
2644
2645 /* Check to see if we've tried the max voltage */
2646 for (i = 0; i < intel_dp->lane_count; i++)
2647 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2648 break;
2649 if (i == intel_dp->lane_count) {
2650 ++loop_tries;
2651 if (loop_tries == 5) {
2652 DRM_ERROR("too many full retries, give up\n");
2653 break;
2654 }
2655 intel_dp_reset_link_train(intel_dp, &DP,
2656 DP_TRAINING_PATTERN_1 |
2657 DP_LINK_SCRAMBLING_DISABLE);
2658 voltage_tries = 0;
2659 continue;
2660 }
2661
2662 /* Check to see if we've tried the same voltage 5 times */
2663 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2664 ++voltage_tries;
2665 if (voltage_tries == 5) {
2666 DRM_ERROR("too many voltage retries, give up\n");
2667 break;
2668 }
2669 } else
2670 voltage_tries = 0;
2671 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2672
2673 /* Update training set as requested by target */
2674 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2675 DRM_ERROR("failed to update link training\n");
2676 break;
2677 }
2678 }
2679
2680 intel_dp->DP = DP;
2681 }
2682
2683 void
intel_dp_complete_link_train(struct intel_dp * intel_dp)2684 intel_dp_complete_link_train(struct intel_dp *intel_dp)
2685 {
2686 bool channel_eq = false;
2687 int tries, cr_tries;
2688 uint32_t DP = intel_dp->DP;
2689 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2690
2691 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
2692 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
2693 training_pattern = DP_TRAINING_PATTERN_3;
2694
2695 /* channel equalization */
2696 if (!intel_dp_set_link_train(intel_dp, &DP,
2697 training_pattern |
2698 DP_LINK_SCRAMBLING_DISABLE)) {
2699 DRM_ERROR("failed to start channel equalization\n");
2700 return;
2701 }
2702
2703 tries = 0;
2704 cr_tries = 0;
2705 channel_eq = false;
2706 for (;;) {
2707 uint8_t link_status[DP_LINK_STATUS_SIZE];
2708
2709 if (cr_tries > 5) {
2710 DRM_ERROR("failed to train DP, aborting\n");
2711 break;
2712 }
2713
2714 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2715 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2716 DRM_ERROR("failed to get link status\n");
2717 break;
2718 }
2719
2720 /* Make sure clock is still ok */
2721 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2722 intel_dp_start_link_train(intel_dp);
2723 intel_dp_set_link_train(intel_dp, &DP,
2724 training_pattern |
2725 DP_LINK_SCRAMBLING_DISABLE);
2726 cr_tries++;
2727 continue;
2728 }
2729
2730 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2731 channel_eq = true;
2732 break;
2733 }
2734
2735 /* Try 5 times, then try clock recovery if that fails */
2736 if (tries > 5) {
2737 intel_dp_link_down(intel_dp);
2738 intel_dp_start_link_train(intel_dp);
2739 intel_dp_set_link_train(intel_dp, &DP,
2740 training_pattern |
2741 DP_LINK_SCRAMBLING_DISABLE);
2742 tries = 0;
2743 cr_tries++;
2744 continue;
2745 }
2746
2747 /* Update training set as requested by target */
2748 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2749 DRM_ERROR("failed to update link training\n");
2750 break;
2751 }
2752 ++tries;
2753 }
2754
2755 intel_dp_set_idle_link_train(intel_dp);
2756
2757 intel_dp->DP = DP;
2758
2759 if (channel_eq)
2760 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2761
2762 }
2763
intel_dp_stop_link_train(struct intel_dp * intel_dp)2764 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2765 {
2766 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2767 DP_TRAINING_PATTERN_DISABLE);
2768 }
2769
2770 static void
intel_dp_link_down(struct intel_dp * intel_dp)2771 intel_dp_link_down(struct intel_dp *intel_dp)
2772 {
2773 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2774 enum port port = intel_dig_port->port;
2775 struct drm_device *dev = intel_dig_port->base.base.dev;
2776 struct drm_i915_private *dev_priv = dev->dev_private;
2777 struct intel_crtc *intel_crtc =
2778 to_intel_crtc(intel_dig_port->base.base.crtc);
2779 uint32_t DP = intel_dp->DP;
2780
2781 /*
2782 * DDI code has a strict mode set sequence and we should try to respect
2783 * it, otherwise we might hang the machine in many different ways. So we
2784 * really should be disabling the port only on a complete crtc_disable
2785 * sequence. This function is just called under two conditions on DDI
2786 * code:
2787 * - Link train failed while doing crtc_enable, and on this case we
2788 * really should respect the mode set sequence and wait for a
2789 * crtc_disable.
2790 * - Someone turned the monitor off and intel_dp_check_link_status
2791 * called us. We don't need to disable the whole port on this case, so
2792 * when someone turns the monitor on again,
2793 * intel_ddi_prepare_link_retrain will take care of redoing the link
2794 * train.
2795 */
2796 if (HAS_DDI(dev))
2797 return;
2798
2799 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2800 return;
2801
2802 DRM_DEBUG_KMS("\n");
2803
2804 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2805 DP &= ~DP_LINK_TRAIN_MASK_CPT;
2806 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2807 } else {
2808 DP &= ~DP_LINK_TRAIN_MASK;
2809 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2810 }
2811 POSTING_READ(intel_dp->output_reg);
2812
2813 /* We don't really know why we're doing this */
2814 intel_wait_for_vblank(dev, intel_crtc->pipe);
2815
2816 if (HAS_PCH_IBX(dev) &&
2817 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2818 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2819
2820 /* Hardware workaround: leaving our transcoder select
2821 * set to transcoder B while it's off will prevent the
2822 * corresponding HDMI output on transcoder A.
2823 *
2824 * Combine this with another hardware workaround:
2825 * transcoder select bit can only be cleared while the
2826 * port is enabled.
2827 */
2828 DP &= ~DP_PIPEB_SELECT;
2829 I915_WRITE(intel_dp->output_reg, DP);
2830
2831 /* Changes to enable or select take place the vblank
2832 * after being written.
2833 */
2834 if (WARN_ON(crtc == NULL)) {
2835 /* We should never try to disable a port without a crtc
2836 * attached. For paranoia keep the code around for a
2837 * bit. */
2838 POSTING_READ(intel_dp->output_reg);
2839 msleep(50);
2840 } else
2841 intel_wait_for_vblank(dev, intel_crtc->pipe);
2842 }
2843
2844 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2845 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2846 POSTING_READ(intel_dp->output_reg);
2847 msleep(intel_dp->panel_power_down_delay);
2848 }
2849
2850 static bool
intel_dp_get_dpcd(struct intel_dp * intel_dp)2851 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2852 {
2853 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2854 struct drm_device *dev = dig_port->base.base.dev;
2855 struct drm_i915_private *dev_priv = dev->dev_private;
2856
2857 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2858
2859 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
2860 sizeof(intel_dp->dpcd)) < 0)
2861 return false; /* aux transfer failed */
2862
2863 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2864 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2865 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2866
2867 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2868 return false; /* DPCD not present */
2869
2870 /* Check if the panel supports PSR */
2871 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2872 if (is_edp(intel_dp)) {
2873 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
2874 intel_dp->psr_dpcd,
2875 sizeof(intel_dp->psr_dpcd));
2876 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2877 dev_priv->psr.sink_support = true;
2878 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2879 }
2880 }
2881
2882 /* Training Pattern 3 support */
2883 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
2884 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
2885 intel_dp->use_tps3 = true;
2886 DRM_DEBUG_KMS("Displayport TPS3 supported");
2887 } else
2888 intel_dp->use_tps3 = false;
2889
2890 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2891 DP_DWN_STRM_PORT_PRESENT))
2892 return true; /* native DP sink */
2893
2894 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2895 return true; /* no per-port downstream info */
2896
2897 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
2898 intel_dp->downstream_ports,
2899 DP_MAX_DOWNSTREAM_PORTS) < 0)
2900 return false; /* downstream port status fetch failed */
2901
2902 return true;
2903 }
2904
2905 static void
intel_dp_probe_oui(struct intel_dp * intel_dp)2906 intel_dp_probe_oui(struct intel_dp *intel_dp)
2907 {
2908 u8 buf[3];
2909
2910 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2911 return;
2912
2913 intel_edp_panel_vdd_on(intel_dp);
2914
2915 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
2916 DRM_DEBUG_KMS("Sink OUI: %02hhx%02hhx%02hhx\n",
2917 buf[0], buf[1], buf[2]);
2918
2919 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
2920 DRM_DEBUG_KMS("Branch OUI: %02hhx%02hhx%02hhx\n",
2921 buf[0], buf[1], buf[2]);
2922
2923 edp_panel_vdd_off(intel_dp, false);
2924 }
2925
intel_dp_sink_crc(struct intel_dp * intel_dp,u8 * crc)2926 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
2927 {
2928 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2929 struct drm_device *dev = intel_dig_port->base.base.dev;
2930 struct intel_crtc *intel_crtc =
2931 to_intel_crtc(intel_dig_port->base.base.crtc);
2932 u8 buf[1];
2933
2934 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
2935 return -EAGAIN;
2936
2937 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
2938 return -ENOTTY;
2939
2940 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
2941 DP_TEST_SINK_START) < 0)
2942 return -EAGAIN;
2943
2944 /* Wait 2 vblanks to be sure we will have the correct CRC value */
2945 intel_wait_for_vblank(dev, intel_crtc->pipe);
2946 intel_wait_for_vblank(dev, intel_crtc->pipe);
2947
2948 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
2949 return -EAGAIN;
2950
2951 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
2952 return 0;
2953 }
2954
2955 static bool
intel_dp_get_sink_irq(struct intel_dp * intel_dp,u8 * sink_irq_vector)2956 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2957 {
2958 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2959 DP_DEVICE_SERVICE_IRQ_VECTOR,
2960 sink_irq_vector, 1) == 1;
2961 }
2962
2963 static void
intel_dp_handle_test_request(struct intel_dp * intel_dp)2964 intel_dp_handle_test_request(struct intel_dp *intel_dp)
2965 {
2966 /* NAK by default */
2967 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
2968 }
2969
2970 /*
2971 * According to DP spec
2972 * 5.1.2:
2973 * 1. Read DPCD
2974 * 2. Configure link according to Receiver Capabilities
2975 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2976 * 4. Check link status on receipt of hot-plug interrupt
2977 */
2978
2979 void
intel_dp_check_link_status(struct intel_dp * intel_dp)2980 intel_dp_check_link_status(struct intel_dp *intel_dp)
2981 {
2982 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2983 u8 sink_irq_vector;
2984 u8 link_status[DP_LINK_STATUS_SIZE];
2985
2986 if (!intel_encoder->connectors_active)
2987 return;
2988
2989 if (WARN_ON(!intel_encoder->base.crtc))
2990 return;
2991
2992 /* Try to read receiver status if the link appears to be up */
2993 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2994 return;
2995 }
2996
2997 /* Now read the DPCD to see if it's actually running */
2998 if (!intel_dp_get_dpcd(intel_dp)) {
2999 return;
3000 }
3001
3002 /* Try to read the source of the interrupt */
3003 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3004 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3005 /* Clear interrupt source */
3006 drm_dp_dpcd_writeb(&intel_dp->aux,
3007 DP_DEVICE_SERVICE_IRQ_VECTOR,
3008 sink_irq_vector);
3009
3010 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3011 intel_dp_handle_test_request(intel_dp);
3012 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3013 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3014 }
3015
3016 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3017 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3018 drm_get_encoder_name(&intel_encoder->base));
3019 intel_dp_start_link_train(intel_dp);
3020 intel_dp_complete_link_train(intel_dp);
3021 intel_dp_stop_link_train(intel_dp);
3022 }
3023 }
3024
3025 /* XXX this is probably wrong for multiple downstream ports */
3026 static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp * intel_dp)3027 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3028 {
3029 uint8_t *dpcd = intel_dp->dpcd;
3030 uint8_t type;
3031
3032 if (!intel_dp_get_dpcd(intel_dp))
3033 return connector_status_disconnected;
3034
3035 /* if there's no downstream port, we're done */
3036 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3037 return connector_status_connected;
3038
3039 /* If we're HPD-aware, SINK_COUNT changes dynamically */
3040 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3041 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3042 uint8_t reg;
3043
3044 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
3045 ®, 1) < 0)
3046 return connector_status_unknown;
3047
3048 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
3049 : connector_status_disconnected;
3050 }
3051
3052 /* If no HPD, poke DDC gently */
3053 if (drm_probe_ddc(&intel_dp->aux.ddc))
3054 return connector_status_connected;
3055
3056 /* Well we tried, say unknown for unreliable port types */
3057 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3058 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3059 if (type == DP_DS_PORT_TYPE_VGA ||
3060 type == DP_DS_PORT_TYPE_NON_EDID)
3061 return connector_status_unknown;
3062 } else {
3063 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3064 DP_DWN_STRM_PORT_TYPE_MASK;
3065 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3066 type == DP_DWN_STRM_PORT_TYPE_OTHER)
3067 return connector_status_unknown;
3068 }
3069
3070 /* Anything else is out of spec, warn and ignore */
3071 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3072 return connector_status_disconnected;
3073 }
3074
3075 static enum drm_connector_status
ironlake_dp_detect(struct intel_dp * intel_dp)3076 ironlake_dp_detect(struct intel_dp *intel_dp)
3077 {
3078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3079 struct drm_i915_private *dev_priv = dev->dev_private;
3080 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3081 enum drm_connector_status status;
3082
3083 /* Can't disconnect eDP, but you can close the lid... */
3084 if (is_edp(intel_dp)) {
3085 status = intel_panel_detect(dev);
3086 if (status == connector_status_unknown)
3087 status = connector_status_connected;
3088 return status;
3089 }
3090
3091 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3092 return connector_status_disconnected;
3093
3094 return intel_dp_detect_dpcd(intel_dp);
3095 }
3096
3097 static enum drm_connector_status
g4x_dp_detect(struct intel_dp * intel_dp)3098 g4x_dp_detect(struct intel_dp *intel_dp)
3099 {
3100 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3101 struct drm_i915_private *dev_priv = dev->dev_private;
3102 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3103 uint32_t bit;
3104
3105 /* Can't disconnect eDP, but you can close the lid... */
3106 if (is_edp(intel_dp)) {
3107 enum drm_connector_status status;
3108
3109 status = intel_panel_detect(dev);
3110 if (status == connector_status_unknown)
3111 status = connector_status_connected;
3112 return status;
3113 }
3114
3115 if (IS_VALLEYVIEW(dev)) {
3116 switch (intel_dig_port->port) {
3117 case PORT_B:
3118 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3119 break;
3120 case PORT_C:
3121 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3122 break;
3123 case PORT_D:
3124 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3125 break;
3126 default:
3127 return connector_status_unknown;
3128 }
3129 } else {
3130 switch (intel_dig_port->port) {
3131 case PORT_B:
3132 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3133 break;
3134 case PORT_C:
3135 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3136 break;
3137 case PORT_D:
3138 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3139 break;
3140 default:
3141 return connector_status_unknown;
3142 }
3143 }
3144
3145 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3146 return connector_status_disconnected;
3147
3148 return intel_dp_detect_dpcd(intel_dp);
3149 }
3150
3151 static struct edid *
intel_dp_get_edid(struct drm_connector * connector,struct i2c_adapter * adapter)3152 intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3153 {
3154 struct intel_connector *intel_connector = to_intel_connector(connector);
3155
3156 /* use cached edid if we have one */
3157 if (intel_connector->edid) {
3158 /* invalid edid */
3159 if (IS_ERR(intel_connector->edid))
3160 return NULL;
3161
3162 return drm_edid_duplicate(intel_connector->edid);
3163 }
3164
3165 return drm_get_edid(connector, adapter);
3166 }
3167
3168 static int
intel_dp_get_edid_modes(struct drm_connector * connector,struct i2c_adapter * adapter)3169 intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3170 {
3171 struct intel_connector *intel_connector = to_intel_connector(connector);
3172
3173 /* use cached edid if we have one */
3174 if (intel_connector->edid) {
3175 /* invalid edid */
3176 if (IS_ERR(intel_connector->edid))
3177 return 0;
3178
3179 return intel_connector_update_modes(connector,
3180 intel_connector->edid);
3181 }
3182
3183 return intel_ddc_get_modes(connector, adapter);
3184 }
3185
3186 static enum drm_connector_status
intel_dp_detect(struct drm_connector * connector,bool force)3187 intel_dp_detect(struct drm_connector *connector, bool force)
3188 {
3189 struct intel_dp *intel_dp = intel_attached_dp(connector);
3190 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3191 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3192 struct drm_device *dev = connector->dev;
3193 struct drm_i915_private *dev_priv = dev->dev_private;
3194 enum drm_connector_status status;
3195 enum intel_display_power_domain power_domain;
3196 struct edid *edid = NULL;
3197
3198 intel_runtime_pm_get(dev_priv);
3199
3200 power_domain = intel_display_port_power_domain(intel_encoder);
3201 intel_display_power_get(dev_priv, power_domain);
3202
3203 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3204 connector->base.id, drm_get_connector_name(connector));
3205
3206 intel_dp->has_audio = false;
3207
3208 if (HAS_PCH_SPLIT(dev))
3209 status = ironlake_dp_detect(intel_dp);
3210 else
3211 status = g4x_dp_detect(intel_dp);
3212
3213 if (status != connector_status_connected)
3214 goto out;
3215
3216 intel_dp_probe_oui(intel_dp);
3217
3218 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3219 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3220 } else {
3221 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3222 if (edid) {
3223 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3224 kfree(edid);
3225 }
3226 }
3227
3228 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3229 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3230 status = connector_status_connected;
3231
3232 out:
3233 intel_display_power_put(dev_priv, power_domain);
3234
3235 intel_runtime_pm_put(dev_priv);
3236
3237 return status;
3238 }
3239
intel_dp_get_modes(struct drm_connector * connector)3240 static int intel_dp_get_modes(struct drm_connector *connector)
3241 {
3242 struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3244 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3245 struct intel_connector *intel_connector = to_intel_connector(connector);
3246 struct drm_device *dev = connector->dev;
3247 struct drm_i915_private *dev_priv = dev->dev_private;
3248 enum intel_display_power_domain power_domain;
3249 int ret;
3250
3251 /* We should parse the EDID data and find out if it has an audio sink
3252 */
3253
3254 power_domain = intel_display_port_power_domain(intel_encoder);
3255 intel_display_power_get(dev_priv, power_domain);
3256
3257 ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
3258 intel_display_power_put(dev_priv, power_domain);
3259 if (ret)
3260 return ret;
3261
3262 /* if eDP has no EDID, fall back to fixed mode */
3263 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3264 struct drm_display_mode *mode;
3265 mode = drm_mode_duplicate(dev,
3266 intel_connector->panel.fixed_mode);
3267 if (mode) {
3268 drm_mode_probed_add(connector, mode);
3269 return 1;
3270 }
3271 }
3272 return 0;
3273 }
3274
3275 static bool
intel_dp_detect_audio(struct drm_connector * connector)3276 intel_dp_detect_audio(struct drm_connector *connector)
3277 {
3278 struct intel_dp *intel_dp = intel_attached_dp(connector);
3279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3280 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3281 struct drm_device *dev = connector->dev;
3282 struct drm_i915_private *dev_priv = dev->dev_private;
3283 enum intel_display_power_domain power_domain;
3284 struct edid *edid;
3285 bool has_audio = false;
3286
3287 power_domain = intel_display_port_power_domain(intel_encoder);
3288 intel_display_power_get(dev_priv, power_domain);
3289
3290 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3291 if (edid) {
3292 has_audio = drm_detect_monitor_audio(edid);
3293 kfree(edid);
3294 }
3295
3296 intel_display_power_put(dev_priv, power_domain);
3297
3298 return has_audio;
3299 }
3300
3301 static int
intel_dp_set_property(struct drm_connector * connector,struct drm_property * property,uint64_t val)3302 intel_dp_set_property(struct drm_connector *connector,
3303 struct drm_property *property,
3304 uint64_t val)
3305 {
3306 struct drm_i915_private *dev_priv = connector->dev->dev_private;
3307 struct intel_connector *intel_connector = to_intel_connector(connector);
3308 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3309 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3310 int ret;
3311
3312 ret = drm_object_property_set_value(&connector->base, property, val);
3313 if (ret)
3314 return ret;
3315
3316 if (property == dev_priv->force_audio_property) {
3317 int i = val;
3318 bool has_audio;
3319
3320 if (i == intel_dp->force_audio)
3321 return 0;
3322
3323 intel_dp->force_audio = i;
3324
3325 if (i == HDMI_AUDIO_AUTO)
3326 has_audio = intel_dp_detect_audio(connector);
3327 else
3328 has_audio = (i == HDMI_AUDIO_ON);
3329
3330 if (has_audio == intel_dp->has_audio)
3331 return 0;
3332
3333 intel_dp->has_audio = has_audio;
3334 goto done;
3335 }
3336
3337 if (property == dev_priv->broadcast_rgb_property) {
3338 bool old_auto = intel_dp->color_range_auto;
3339 uint32_t old_range = intel_dp->color_range;
3340
3341 switch (val) {
3342 case INTEL_BROADCAST_RGB_AUTO:
3343 intel_dp->color_range_auto = true;
3344 break;
3345 case INTEL_BROADCAST_RGB_FULL:
3346 intel_dp->color_range_auto = false;
3347 intel_dp->color_range = 0;
3348 break;
3349 case INTEL_BROADCAST_RGB_LIMITED:
3350 intel_dp->color_range_auto = false;
3351 intel_dp->color_range = DP_COLOR_RANGE_16_235;
3352 break;
3353 default:
3354 return -EINVAL;
3355 }
3356
3357 if (old_auto == intel_dp->color_range_auto &&
3358 old_range == intel_dp->color_range)
3359 return 0;
3360
3361 goto done;
3362 }
3363
3364 if (is_edp(intel_dp) &&
3365 property == connector->dev->mode_config.scaling_mode_property) {
3366 if (val == DRM_MODE_SCALE_NONE) {
3367 DRM_DEBUG_KMS("no scaling not supported\n");
3368 return -EINVAL;
3369 }
3370
3371 if (intel_connector->panel.fitting_mode == val) {
3372 /* the eDP scaling property is not changed */
3373 return 0;
3374 }
3375 intel_connector->panel.fitting_mode = val;
3376
3377 goto done;
3378 }
3379
3380 return -EINVAL;
3381
3382 done:
3383 if (intel_encoder->base.crtc)
3384 intel_crtc_restore_mode(intel_encoder->base.crtc);
3385
3386 return 0;
3387 }
3388
3389 static void
intel_dp_connector_destroy(struct drm_connector * connector)3390 intel_dp_connector_destroy(struct drm_connector *connector)
3391 {
3392 struct intel_connector *intel_connector = to_intel_connector(connector);
3393
3394 if (!IS_ERR_OR_NULL(intel_connector->edid))
3395 kfree(intel_connector->edid);
3396
3397 /* Can't call is_edp() since the encoder may have been destroyed
3398 * already. */
3399 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3400 intel_panel_fini(&intel_connector->panel);
3401
3402 drm_connector_cleanup(connector);
3403 kfree(connector);
3404 }
3405
intel_dp_encoder_destroy(struct drm_encoder * encoder)3406 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3407 {
3408 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3409 struct intel_dp *intel_dp = &intel_dig_port->dp;
3410 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3411
3412 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
3413 drm_encoder_cleanup(encoder);
3414 if (is_edp(intel_dp)) {
3415 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3416 mutex_lock(&dev->mode_config.mutex);
3417 edp_panel_vdd_off_sync(intel_dp);
3418 mutex_unlock(&dev->mode_config.mutex);
3419 }
3420 kfree(intel_dig_port);
3421 }
3422
3423 static const struct drm_connector_funcs intel_dp_connector_funcs = {
3424 .dpms = intel_connector_dpms,
3425 .detect = intel_dp_detect,
3426 .fill_modes = drm_helper_probe_single_connector_modes,
3427 .set_property = intel_dp_set_property,
3428 .destroy = intel_dp_connector_destroy,
3429 };
3430
3431 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3432 .get_modes = intel_dp_get_modes,
3433 .mode_valid = intel_dp_mode_valid,
3434 .best_encoder = intel_best_encoder,
3435 };
3436
3437 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
3438 .destroy = intel_dp_encoder_destroy,
3439 };
3440
3441 static void
intel_dp_hot_plug(struct intel_encoder * intel_encoder)3442 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3443 {
3444 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3445
3446 intel_dp_check_link_status(intel_dp);
3447 }
3448
3449 /* Return which DP Port should be selected for Transcoder DP control */
3450 int
intel_trans_dp_port_sel(struct drm_crtc * crtc)3451 intel_trans_dp_port_sel(struct drm_crtc *crtc)
3452 {
3453 struct drm_device *dev = crtc->dev;
3454 struct intel_encoder *intel_encoder;
3455 struct intel_dp *intel_dp;
3456
3457 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3458 intel_dp = enc_to_intel_dp(&intel_encoder->base);
3459
3460 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3461 intel_encoder->type == INTEL_OUTPUT_EDP)
3462 return intel_dp->output_reg;
3463 }
3464
3465 return -1;
3466 }
3467
3468 /* check the VBT to see whether the eDP is on DP-D port */
intel_dp_is_edp(struct drm_device * dev,enum port port)3469 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3470 {
3471 struct drm_i915_private *dev_priv = dev->dev_private;
3472 union child_device_config *p_child;
3473 int i;
3474 static const short port_mapping[] = {
3475 [PORT_B] = PORT_IDPB,
3476 [PORT_C] = PORT_IDPC,
3477 [PORT_D] = PORT_IDPD,
3478 };
3479
3480 if (port == PORT_A)
3481 return true;
3482
3483 if (!dev_priv->vbt.child_dev_num)
3484 return false;
3485
3486 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3487 p_child = dev_priv->vbt.child_dev + i;
3488
3489 if (p_child->common.dvo_port == port_mapping[port] &&
3490 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3491 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3492 return true;
3493 }
3494 return false;
3495 }
3496
3497 static void
intel_dp_add_properties(struct intel_dp * intel_dp,struct drm_connector * connector)3498 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3499 {
3500 struct intel_connector *intel_connector = to_intel_connector(connector);
3501
3502 intel_attach_force_audio_property(connector);
3503 intel_attach_broadcast_rgb_property(connector);
3504 intel_dp->color_range_auto = true;
3505
3506 if (is_edp(intel_dp)) {
3507 drm_mode_create_scaling_mode_property(connector->dev);
3508 drm_object_attach_property(
3509 &connector->base,
3510 connector->dev->mode_config.scaling_mode_property,
3511 DRM_MODE_SCALE_ASPECT);
3512 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
3513 }
3514 }
3515
intel_dp_init_panel_power_timestamps(struct intel_dp * intel_dp)3516 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
3517 {
3518 intel_dp->last_power_cycle = jiffies;
3519 intel_dp->last_power_on = jiffies;
3520 intel_dp->last_backlight_off = jiffies;
3521 }
3522
3523 static void
intel_dp_init_panel_power_sequencer(struct drm_device * dev,struct intel_dp * intel_dp,struct edp_power_seq * out)3524 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3525 struct intel_dp *intel_dp,
3526 struct edp_power_seq *out)
3527 {
3528 struct drm_i915_private *dev_priv = dev->dev_private;
3529 struct edp_power_seq cur, vbt, spec, final;
3530 u32 pp_on, pp_off, pp_div, pp;
3531 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3532
3533 if (HAS_PCH_SPLIT(dev)) {
3534 pp_ctrl_reg = PCH_PP_CONTROL;
3535 pp_on_reg = PCH_PP_ON_DELAYS;
3536 pp_off_reg = PCH_PP_OFF_DELAYS;
3537 pp_div_reg = PCH_PP_DIVISOR;
3538 } else {
3539 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3540
3541 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3542 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3543 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3544 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3545 }
3546
3547 /* Workaround: Need to write PP_CONTROL with the unlock key as
3548 * the very first thing. */
3549 pp = ironlake_get_pp_control(intel_dp);
3550 I915_WRITE(pp_ctrl_reg, pp);
3551
3552 pp_on = I915_READ(pp_on_reg);
3553 pp_off = I915_READ(pp_off_reg);
3554 pp_div = I915_READ(pp_div_reg);
3555
3556 /* Pull timing values out of registers */
3557 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3558 PANEL_POWER_UP_DELAY_SHIFT;
3559
3560 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3561 PANEL_LIGHT_ON_DELAY_SHIFT;
3562
3563 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3564 PANEL_LIGHT_OFF_DELAY_SHIFT;
3565
3566 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3567 PANEL_POWER_DOWN_DELAY_SHIFT;
3568
3569 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3570 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3571
3572 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3573 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3574
3575 vbt = dev_priv->vbt.edp_pps;
3576
3577 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3578 * our hw here, which are all in 100usec. */
3579 spec.t1_t3 = 210 * 10;
3580 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3581 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3582 spec.t10 = 500 * 10;
3583 /* This one is special and actually in units of 100ms, but zero
3584 * based in the hw (so we need to add 100 ms). But the sw vbt
3585 * table multiplies it with 1000 to make it in units of 100usec,
3586 * too. */
3587 spec.t11_t12 = (510 + 100) * 10;
3588
3589 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3590 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3591
3592 /* Use the max of the register settings and vbt. If both are
3593 * unset, fall back to the spec limits. */
3594 #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
3595 spec.field : \
3596 max(cur.field, vbt.field))
3597 assign_final(t1_t3);
3598 assign_final(t8);
3599 assign_final(t9);
3600 assign_final(t10);
3601 assign_final(t11_t12);
3602 #undef assign_final
3603
3604 #define get_delay(field) (DIV_ROUND_UP(final.field, 10))
3605 intel_dp->panel_power_up_delay = get_delay(t1_t3);
3606 intel_dp->backlight_on_delay = get_delay(t8);
3607 intel_dp->backlight_off_delay = get_delay(t9);
3608 intel_dp->panel_power_down_delay = get_delay(t10);
3609 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3610 #undef get_delay
3611
3612 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3613 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3614 intel_dp->panel_power_cycle_delay);
3615
3616 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3617 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3618
3619 if (out)
3620 *out = final;
3621 }
3622
3623 static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device * dev,struct intel_dp * intel_dp,struct edp_power_seq * seq)3624 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3625 struct intel_dp *intel_dp,
3626 struct edp_power_seq *seq)
3627 {
3628 struct drm_i915_private *dev_priv = dev->dev_private;
3629 u32 pp_on, pp_off, pp_div, port_sel = 0;
3630 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3631 int pp_on_reg, pp_off_reg, pp_div_reg;
3632
3633 if (HAS_PCH_SPLIT(dev)) {
3634 pp_on_reg = PCH_PP_ON_DELAYS;
3635 pp_off_reg = PCH_PP_OFF_DELAYS;
3636 pp_div_reg = PCH_PP_DIVISOR;
3637 } else {
3638 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3639
3640 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3641 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3642 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3643 }
3644
3645 /*
3646 * And finally store the new values in the power sequencer. The
3647 * backlight delays are set to 1 because we do manual waits on them. For
3648 * T8, even BSpec recommends doing it. For T9, if we don't do this,
3649 * we'll end up waiting for the backlight off delay twice: once when we
3650 * do the manual sleep, and once when we disable the panel and wait for
3651 * the PP_STATUS bit to become zero.
3652 */
3653 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3654 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
3655 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3656 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3657 /* Compute the divisor for the pp clock, simply match the Bspec
3658 * formula. */
3659 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
3660 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
3661 << PANEL_POWER_CYCLE_DELAY_SHIFT);
3662
3663 /* Haswell doesn't have any port selection bits for the panel
3664 * power sequencer any more. */
3665 if (IS_VALLEYVIEW(dev)) {
3666 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3667 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3668 else
3669 port_sel = PANEL_PORT_SELECT_DPC_VLV;
3670 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3671 if (dp_to_dig_port(intel_dp)->port == PORT_A)
3672 port_sel = PANEL_PORT_SELECT_DPA;
3673 else
3674 port_sel = PANEL_PORT_SELECT_DPD;
3675 }
3676
3677 pp_on |= port_sel;
3678
3679 I915_WRITE(pp_on_reg, pp_on);
3680 I915_WRITE(pp_off_reg, pp_off);
3681 I915_WRITE(pp_div_reg, pp_div);
3682
3683 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3684 I915_READ(pp_on_reg),
3685 I915_READ(pp_off_reg),
3686 I915_READ(pp_div_reg));
3687 }
3688
intel_edp_init_connector(struct intel_dp * intel_dp,struct intel_connector * intel_connector,struct edp_power_seq * power_seq)3689 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3690 struct intel_connector *intel_connector,
3691 struct edp_power_seq *power_seq)
3692 {
3693 struct drm_connector *connector = &intel_connector->base;
3694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3695 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3696 struct drm_device *dev = intel_encoder->base.dev;
3697 struct drm_i915_private *dev_priv = dev->dev_private;
3698 struct drm_display_mode *fixed_mode = NULL;
3699 bool has_dpcd;
3700 struct drm_display_mode *scan;
3701 struct edid *edid;
3702
3703 if (!is_edp(intel_dp))
3704 return true;
3705
3706 /* The VDD bit needs a power domain reference, so if the bit is already
3707 * enabled when we boot, grab this reference. */
3708 if (edp_have_panel_vdd(intel_dp)) {
3709 enum intel_display_power_domain power_domain;
3710 power_domain = intel_display_port_power_domain(intel_encoder);
3711 intel_display_power_get(dev_priv, power_domain);
3712 }
3713
3714 /* Cache DPCD and EDID for edp. */
3715 intel_edp_panel_vdd_on(intel_dp);
3716 has_dpcd = intel_dp_get_dpcd(intel_dp);
3717 edp_panel_vdd_off(intel_dp, false);
3718
3719 if (has_dpcd) {
3720 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3721 dev_priv->no_aux_handshake =
3722 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3723 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3724 } else {
3725 /* if this fails, presume the device is a ghost */
3726 DRM_INFO("failed to retrieve link info, disabling eDP\n");
3727 return false;
3728 }
3729
3730 /* We now know it's not a ghost, init power sequence regs. */
3731 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
3732
3733 mutex_lock(&dev->mode_config.mutex);
3734 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
3735 if (edid) {
3736 if (drm_add_edid_modes(connector, edid)) {
3737 drm_mode_connector_update_edid_property(connector,
3738 edid);
3739 drm_edid_to_eld(connector, edid);
3740 } else {
3741 kfree(edid);
3742 edid = ERR_PTR(-EINVAL);
3743 }
3744 } else {
3745 edid = ERR_PTR(-ENOENT);
3746 }
3747 intel_connector->edid = edid;
3748
3749 /* prefer fixed mode from EDID if available */
3750 list_for_each_entry(scan, &connector->probed_modes, head) {
3751 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3752 fixed_mode = drm_mode_duplicate(dev, scan);
3753 break;
3754 }
3755 }
3756
3757 /* fallback to VBT if available for eDP */
3758 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3759 fixed_mode = drm_mode_duplicate(dev,
3760 dev_priv->vbt.lfp_lvds_vbt_mode);
3761 if (fixed_mode)
3762 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3763 }
3764 mutex_unlock(&dev->mode_config.mutex);
3765
3766 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
3767 intel_panel_setup_backlight(connector);
3768
3769 return true;
3770 }
3771
3772 bool
intel_dp_init_connector(struct intel_digital_port * intel_dig_port,struct intel_connector * intel_connector)3773 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3774 struct intel_connector *intel_connector)
3775 {
3776 struct drm_connector *connector = &intel_connector->base;
3777 struct intel_dp *intel_dp = &intel_dig_port->dp;
3778 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3779 struct drm_device *dev = intel_encoder->base.dev;
3780 struct drm_i915_private *dev_priv = dev->dev_private;
3781 enum port port = intel_dig_port->port;
3782 #ifdef __NetBSD__
3783 static const struct edp_power_seq zero_power_seq;
3784 struct edp_power_seq power_seq = zero_power_seq;
3785 #else
3786 struct edp_power_seq power_seq = { 0 };
3787 #endif
3788 int type;
3789
3790 /* intel_dp vfuncs */
3791 if (IS_VALLEYVIEW(dev))
3792 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
3793 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3794 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
3795 else if (HAS_PCH_SPLIT(dev))
3796 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
3797 else
3798 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
3799
3800 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
3801
3802 /* Preserve the current hw state. */
3803 intel_dp->DP = I915_READ(intel_dp->output_reg);
3804 intel_dp->attached_connector = intel_connector;
3805
3806 if (intel_dp_is_edp(dev, port))
3807 type = DRM_MODE_CONNECTOR_eDP;
3808 else
3809 type = DRM_MODE_CONNECTOR_DisplayPort;
3810
3811 /*
3812 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3813 * for DP the encoder type can be set by the caller to
3814 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3815 */
3816 if (type == DRM_MODE_CONNECTOR_eDP)
3817 intel_encoder->type = INTEL_OUTPUT_EDP;
3818
3819 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3820 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3821 port_name(port));
3822
3823 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
3824 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3825
3826 connector->interlace_allowed = true;
3827 connector->doublescan_allowed = 0;
3828
3829 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3830 edp_panel_vdd_work);
3831
3832 intel_connector_attach_encoder(intel_connector, intel_encoder);
3833 drm_sysfs_connector_add(connector);
3834
3835 if (HAS_DDI(dev))
3836 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3837 else
3838 intel_connector->get_hw_state = intel_connector_get_hw_state;
3839 intel_connector->unregister = intel_dp_connector_unregister;
3840
3841 /* Set up the hotplug pin. */
3842 switch (port) {
3843 case PORT_A:
3844 intel_encoder->hpd_pin = HPD_PORT_A;
3845 break;
3846 case PORT_B:
3847 intel_encoder->hpd_pin = HPD_PORT_B;
3848 break;
3849 case PORT_C:
3850 intel_encoder->hpd_pin = HPD_PORT_C;
3851 break;
3852 case PORT_D:
3853 intel_encoder->hpd_pin = HPD_PORT_D;
3854 break;
3855 default:
3856 BUG();
3857 }
3858
3859 if (is_edp(intel_dp)) {
3860 intel_dp_init_panel_power_timestamps(intel_dp);
3861 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3862 }
3863
3864 intel_dp_aux_init(intel_dp, intel_connector);
3865
3866 intel_dp->psr_setup_done = false;
3867
3868 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
3869 drm_dp_aux_unregister_i2c_bus(&intel_dp->aux);
3870 if (is_edp(intel_dp)) {
3871 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3872 mutex_lock(&dev->mode_config.mutex);
3873 edp_panel_vdd_off_sync(intel_dp);
3874 mutex_unlock(&dev->mode_config.mutex);
3875 }
3876 drm_sysfs_connector_remove(connector);
3877 drm_connector_cleanup(connector);
3878 return false;
3879 }
3880
3881 intel_dp_add_properties(intel_dp, connector);
3882
3883 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3884 * 0xd. Failure to do so will result in spurious interrupts being
3885 * generated on the port when a cable is not attached.
3886 */
3887 if (IS_G4X(dev) && !IS_GM45(dev)) {
3888 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3889 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3890 }
3891
3892 return true;
3893 }
3894
3895 void
intel_dp_init(struct drm_device * dev,int output_reg,enum port port)3896 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3897 {
3898 struct intel_digital_port *intel_dig_port;
3899 struct intel_encoder *intel_encoder;
3900 struct drm_encoder *encoder;
3901 struct intel_connector *intel_connector;
3902
3903 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3904 if (!intel_dig_port)
3905 return;
3906
3907 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3908 if (!intel_connector) {
3909 kfree(intel_dig_port);
3910 return;
3911 }
3912
3913 intel_encoder = &intel_dig_port->base;
3914 encoder = &intel_encoder->base;
3915
3916 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3917 DRM_MODE_ENCODER_TMDS);
3918
3919 intel_encoder->compute_config = intel_dp_compute_config;
3920 intel_encoder->mode_set = intel_dp_mode_set;
3921 intel_encoder->disable = intel_disable_dp;
3922 intel_encoder->post_disable = intel_post_disable_dp;
3923 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3924 intel_encoder->get_config = intel_dp_get_config;
3925 if (IS_VALLEYVIEW(dev)) {
3926 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
3927 intel_encoder->pre_enable = vlv_pre_enable_dp;
3928 intel_encoder->enable = vlv_enable_dp;
3929 } else {
3930 intel_encoder->pre_enable = g4x_pre_enable_dp;
3931 intel_encoder->enable = g4x_enable_dp;
3932 }
3933
3934 intel_dig_port->port = port;
3935 intel_dig_port->dp.output_reg = output_reg;
3936
3937 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3938 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3939 intel_encoder->cloneable = 0;
3940 intel_encoder->hot_plug = intel_dp_hot_plug;
3941
3942 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3943 drm_encoder_cleanup(encoder);
3944 kfree(intel_dig_port);
3945 kfree(intel_connector);
3946 }
3947 }
3948