1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 * $FreeBSD: src/sys/dev/drm2/i915/intel_dp.c,v 1.1 2012/05/22 11:07:44 kib Exp $ 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc.h> 31 #include <drm/drm_crtc_helper.h> 32 #include <drm/drm_edid.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 37 #define DP_RECEIVER_CAP_SIZE 0xf 38 #define DP_LINK_STATUS_SIZE 6 39 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41 #define DP_LINK_CONFIGURATION_SIZE 9 42 43 /** 44 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 45 * @intel_dp: DP struct 46 * 47 * If a CPU or PCH DP output is attached to an eDP panel, this function 48 * will return true, and false otherwise. 49 */ 50 static bool is_edp(struct intel_dp *intel_dp) 51 { 52 return intel_dp->base.type == INTEL_OUTPUT_EDP; 53 } 54 55 /** 56 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 57 * @intel_dp: DP struct 58 * 59 * Returns true if the given DP struct corresponds to a PCH DP port attached 60 * to an eDP panel, false otherwise. Helpful for determining whether we 61 * may need FDI resources for a given DP output or not. 62 */ 63 static bool is_pch_edp(struct intel_dp *intel_dp) 64 { 65 return intel_dp->is_pch_edp; 66 } 67 68 /** 69 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 70 * @intel_dp: DP struct 71 * 72 * Returns true if the given DP struct corresponds to a CPU eDP port. 73 */ 74 static bool is_cpu_edp(struct intel_dp *intel_dp) 75 { 76 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 77 } 78 79 static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 80 { 81 return container_of(encoder, struct intel_dp, base.base); 82 } 83 84 static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 85 { 86 return container_of(intel_attached_encoder(connector), 87 struct intel_dp, base); 88 } 89 90 /** 91 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 92 * @encoder: DRM encoder 93 * 94 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 95 * by intel_display.c. 96 */ 97 bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 98 { 99 struct intel_dp *intel_dp; 100 101 if (!encoder) 102 return false; 103 104 intel_dp = enc_to_intel_dp(encoder); 105 106 return is_pch_edp(intel_dp); 107 } 108 109 static void intel_dp_start_link_train(struct intel_dp *intel_dp); 110 static void intel_dp_complete_link_train(struct intel_dp *intel_dp); 111 static void intel_dp_link_down(struct intel_dp *intel_dp); 112 113 void 114 intel_edp_link_config(struct intel_encoder *intel_encoder, 115 int *lane_num, int *link_bw) 116 { 117 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 118 119 *lane_num = intel_dp->lane_count; 120 if (intel_dp->link_bw == DP_LINK_BW_1_62) 121 *link_bw = 162000; 122 else if (intel_dp->link_bw == DP_LINK_BW_2_7) 123 *link_bw = 270000; 124 } 125 126 static int 127 intel_dp_max_lane_count(struct intel_dp *intel_dp) 128 { 129 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 130 switch (max_lane_count) { 131 case 1: case 2: case 4: 132 break; 133 default: 134 max_lane_count = 4; 135 } 136 return max_lane_count; 137 } 138 139 static int 140 intel_dp_max_link_bw(struct intel_dp *intel_dp) 141 { 142 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 143 144 switch (max_link_bw) { 145 case DP_LINK_BW_1_62: 146 case DP_LINK_BW_2_7: 147 break; 148 default: 149 max_link_bw = DP_LINK_BW_1_62; 150 break; 151 } 152 return max_link_bw; 153 } 154 155 static int 156 intel_dp_link_clock(uint8_t link_bw) 157 { 158 if (link_bw == DP_LINK_BW_2_7) 159 return 270000; 160 else 161 return 162000; 162 } 163 164 /* 165 * The units on the numbers in the next two are... bizarre. Examples will 166 * make it clearer; this one parallels an example in the eDP spec. 167 * 168 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 169 * 170 * 270000 * 1 * 8 / 10 == 216000 171 * 172 * The actual data capacity of that configuration is 2.16Gbit/s, so the 173 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 174 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 175 * 119000. At 18bpp that's 2142000 kilobits per second. 176 * 177 * Thus the strange-looking division by 10 in intel_dp_link_required, to 178 * get the result in decakilobits instead of kilobits. 179 */ 180 181 static int 182 intel_dp_link_required(int pixel_clock, int bpp) 183 { 184 return (pixel_clock * bpp + 9) / 10; 185 } 186 187 static int 188 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 189 { 190 return (max_link_clock * max_lanes * 8) / 10; 191 } 192 193 static bool 194 intel_dp_adjust_dithering(struct intel_dp *intel_dp, 195 const struct drm_display_mode *mode, 196 struct drm_display_mode *adjusted_mode) 197 { 198 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 199 int max_lanes = intel_dp_max_lane_count(intel_dp); 200 int max_rate, mode_rate; 201 202 mode_rate = intel_dp_link_required(mode->clock, 24); 203 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 204 205 if (mode_rate > max_rate) { 206 mode_rate = intel_dp_link_required(mode->clock, 18); 207 if (mode_rate > max_rate) 208 return false; 209 210 if (adjusted_mode) 211 adjusted_mode->private_flags 212 |= INTEL_MODE_DP_FORCE_6BPC; 213 214 return true; 215 } 216 217 return true; 218 } 219 220 static int 221 intel_dp_mode_valid(struct drm_connector *connector, 222 struct drm_display_mode *mode) 223 { 224 struct intel_dp *intel_dp = intel_attached_dp(connector); 225 226 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 227 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 228 return MODE_PANEL; 229 230 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) 231 return MODE_PANEL; 232 } 233 234 if (!intel_dp_adjust_dithering(intel_dp, mode, NULL)) 235 return MODE_CLOCK_HIGH; 236 237 if (mode->clock < 10000) 238 return MODE_CLOCK_LOW; 239 240 return MODE_OK; 241 } 242 243 static uint32_t 244 pack_aux(uint8_t *src, int src_bytes) 245 { 246 int i; 247 uint32_t v = 0; 248 249 if (src_bytes > 4) 250 src_bytes = 4; 251 for (i = 0; i < src_bytes; i++) 252 v |= ((uint32_t) src[i]) << ((3-i) * 8); 253 return v; 254 } 255 256 static void 257 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 258 { 259 int i; 260 if (dst_bytes > 4) 261 dst_bytes = 4; 262 for (i = 0; i < dst_bytes; i++) 263 dst[i] = src >> ((3-i) * 8); 264 } 265 266 /* hrawclock is 1/4 the FSB frequency */ 267 static int 268 intel_hrawclk(struct drm_device *dev) 269 { 270 struct drm_i915_private *dev_priv = dev->dev_private; 271 uint32_t clkcfg; 272 273 clkcfg = I915_READ(CLKCFG); 274 switch (clkcfg & CLKCFG_FSB_MASK) { 275 case CLKCFG_FSB_400: 276 return 100; 277 case CLKCFG_FSB_533: 278 return 133; 279 case CLKCFG_FSB_667: 280 return 166; 281 case CLKCFG_FSB_800: 282 return 200; 283 case CLKCFG_FSB_1067: 284 return 266; 285 case CLKCFG_FSB_1333: 286 return 333; 287 /* these two are just a guess; one of them might be right */ 288 case CLKCFG_FSB_1600: 289 case CLKCFG_FSB_1600_ALT: 290 return 400; 291 default: 292 return 133; 293 } 294 } 295 296 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 297 { 298 struct drm_device *dev = intel_dp->base.base.dev; 299 struct drm_i915_private *dev_priv = dev->dev_private; 300 301 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 302 } 303 304 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 305 { 306 struct drm_device *dev = intel_dp->base.base.dev; 307 struct drm_i915_private *dev_priv = dev->dev_private; 308 309 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 310 } 311 312 static void 313 intel_dp_check_edp(struct intel_dp *intel_dp) 314 { 315 struct drm_device *dev = intel_dp->base.base.dev; 316 struct drm_i915_private *dev_priv = dev->dev_private; 317 318 if (!is_edp(intel_dp)) 319 return; 320 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 321 kprintf("eDP powered off while attempting aux channel communication.\n"); 322 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 323 I915_READ(PCH_PP_STATUS), 324 I915_READ(PCH_PP_CONTROL)); 325 } 326 } 327 328 static int 329 intel_dp_aux_ch(struct intel_dp *intel_dp, 330 uint8_t *send, int send_bytes, 331 uint8_t *recv, int recv_size) 332 { 333 uint32_t output_reg = intel_dp->output_reg; 334 struct drm_device *dev = intel_dp->base.base.dev; 335 struct drm_i915_private *dev_priv = dev->dev_private; 336 uint32_t ch_ctl = output_reg + 0x10; 337 uint32_t ch_data = ch_ctl + 4; 338 int i; 339 int recv_bytes; 340 uint32_t status; 341 uint32_t aux_clock_divider; 342 int try, precharge = 5; 343 344 intel_dp_check_edp(intel_dp); 345 /* The clock divider is based off the hrawclk, 346 * and would like to run at 2MHz. So, take the 347 * hrawclk value and divide by 2 and use that 348 * 349 * Note that PCH attached eDP panels should use a 125MHz input 350 * clock divider. 351 */ 352 if (is_cpu_edp(intel_dp)) { 353 if (IS_GEN6(dev) || IS_GEN7(dev)) 354 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 355 else 356 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 357 } else if (HAS_PCH_SPLIT(dev)) 358 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ 359 else 360 aux_clock_divider = intel_hrawclk(dev) / 2; 361 362 /* Try to wait for any previous AUX channel activity */ 363 for (try = 0; try < 3; try++) { 364 status = I915_READ(ch_ctl); 365 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 366 break; 367 DELAY(1000); 368 } 369 370 if (try == 3) { 371 kprintf("dp_aux_ch not started status 0x%08x\n", 372 I915_READ(ch_ctl)); 373 return -EBUSY; 374 } 375 376 /* Must try at least 3 times according to DP spec */ 377 for (try = 0; try < 5; try++) { 378 /* Load the send data into the aux channel data registers */ 379 for (i = 0; i < send_bytes; i += 4) 380 I915_WRITE(ch_data + i, 381 pack_aux(send + i, send_bytes - i)); 382 383 /* Send the command and wait for it to complete */ 384 I915_WRITE(ch_ctl, 385 DP_AUX_CH_CTL_SEND_BUSY | 386 DP_AUX_CH_CTL_TIME_OUT_400us | 387 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 388 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 389 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 390 DP_AUX_CH_CTL_DONE | 391 DP_AUX_CH_CTL_TIME_OUT_ERROR | 392 DP_AUX_CH_CTL_RECEIVE_ERROR); 393 for (;;) { 394 status = I915_READ(ch_ctl); 395 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 396 break; 397 DELAY(100); 398 } 399 400 /* Clear done status and any errors */ 401 I915_WRITE(ch_ctl, 402 status | 403 DP_AUX_CH_CTL_DONE | 404 DP_AUX_CH_CTL_TIME_OUT_ERROR | 405 DP_AUX_CH_CTL_RECEIVE_ERROR); 406 407 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 408 DP_AUX_CH_CTL_RECEIVE_ERROR)) 409 continue; 410 if (status & DP_AUX_CH_CTL_DONE) 411 break; 412 } 413 414 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 415 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 416 return -EBUSY; 417 } 418 419 /* Check for timeout or receive error. 420 * Timeouts occur when the sink is not connected 421 */ 422 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 423 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 424 return -EIO; 425 } 426 427 /* Timeouts occur when the device isn't connected, so they're 428 * "normal" -- don't fill the kernel log with these */ 429 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 430 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 431 return -ETIMEDOUT; 432 } 433 434 /* Unload any bytes sent back from the other side */ 435 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 436 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 437 if (recv_bytes > recv_size) 438 recv_bytes = recv_size; 439 440 for (i = 0; i < recv_bytes; i += 4) 441 unpack_aux(I915_READ(ch_data + i), 442 recv + i, recv_bytes - i); 443 444 return recv_bytes; 445 } 446 447 /* Write data to the aux channel in native mode */ 448 static int 449 intel_dp_aux_native_write(struct intel_dp *intel_dp, 450 uint16_t address, uint8_t *send, int send_bytes) 451 { 452 int ret; 453 uint8_t msg[20]; 454 int msg_bytes; 455 uint8_t ack; 456 457 intel_dp_check_edp(intel_dp); 458 if (send_bytes > 16) 459 return -1; 460 msg[0] = AUX_NATIVE_WRITE << 4; 461 msg[1] = address >> 8; 462 msg[2] = address & 0xff; 463 msg[3] = send_bytes - 1; 464 memcpy(&msg[4], send, send_bytes); 465 msg_bytes = send_bytes + 4; 466 for (;;) { 467 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 468 if (ret < 0) 469 return ret; 470 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 471 break; 472 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 473 DELAY(100); 474 else 475 return -EIO; 476 } 477 return send_bytes; 478 } 479 480 /* Write a single byte to the aux channel in native mode */ 481 static int 482 intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 483 uint16_t address, uint8_t byte) 484 { 485 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 486 } 487 488 /* read bytes from a native aux channel */ 489 static int 490 intel_dp_aux_native_read(struct intel_dp *intel_dp, 491 uint16_t address, uint8_t *recv, int recv_bytes) 492 { 493 uint8_t msg[4]; 494 int msg_bytes; 495 uint8_t reply[20]; 496 int reply_bytes; 497 uint8_t ack; 498 int ret; 499 500 intel_dp_check_edp(intel_dp); 501 msg[0] = AUX_NATIVE_READ << 4; 502 msg[1] = address >> 8; 503 msg[2] = address & 0xff; 504 msg[3] = recv_bytes - 1; 505 506 msg_bytes = 4; 507 reply_bytes = recv_bytes + 1; 508 509 for (;;) { 510 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 511 reply, reply_bytes); 512 if (ret == 0) 513 return -EPROTO; 514 if (ret < 0) 515 return ret; 516 ack = reply[0]; 517 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 518 memcpy(recv, reply + 1, ret - 1); 519 return ret - 1; 520 } 521 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 522 DELAY(100); 523 else 524 return -EIO; 525 } 526 } 527 528 static int 529 intel_dp_i2c_aux_ch(device_t idev, int mode, uint8_t write_byte, 530 uint8_t *read_byte) 531 { 532 struct iic_dp_aux_data *data; 533 struct intel_dp *intel_dp; 534 uint16_t address; 535 uint8_t msg[5]; 536 uint8_t reply[2]; 537 unsigned retry; 538 int msg_bytes; 539 int reply_bytes; 540 int ret; 541 542 data = device_get_softc(idev); 543 intel_dp = data->priv; 544 address = data->address; 545 546 intel_dp_check_edp(intel_dp); 547 /* Set up the command byte */ 548 if (mode & MODE_I2C_READ) 549 msg[0] = AUX_I2C_READ << 4; 550 else 551 msg[0] = AUX_I2C_WRITE << 4; 552 553 if (!(mode & MODE_I2C_STOP)) 554 msg[0] |= AUX_I2C_MOT << 4; 555 556 msg[1] = address >> 8; 557 msg[2] = address; 558 559 switch (mode) { 560 case MODE_I2C_WRITE: 561 msg[3] = 0; 562 msg[4] = write_byte; 563 msg_bytes = 5; 564 reply_bytes = 1; 565 break; 566 case MODE_I2C_READ: 567 msg[3] = 0; 568 msg_bytes = 4; 569 reply_bytes = 2; 570 break; 571 default: 572 msg_bytes = 3; 573 reply_bytes = 1; 574 break; 575 } 576 577 for (retry = 0; retry < 5; retry++) { 578 ret = intel_dp_aux_ch(intel_dp, 579 msg, msg_bytes, 580 reply, reply_bytes); 581 if (ret < 0) { 582 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 583 return (-ret); 584 } 585 586 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 587 case AUX_NATIVE_REPLY_ACK: 588 /* I2C-over-AUX Reply field is only valid 589 * when paired with AUX ACK. 590 */ 591 break; 592 case AUX_NATIVE_REPLY_NACK: 593 DRM_DEBUG_KMS("aux_ch native nack\n"); 594 return (EREMOTEIO); 595 case AUX_NATIVE_REPLY_DEFER: 596 DELAY(100); 597 continue; 598 default: 599 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 600 reply[0]); 601 return (EREMOTEIO); 602 } 603 604 switch (reply[0] & AUX_I2C_REPLY_MASK) { 605 case AUX_I2C_REPLY_ACK: 606 if (mode == MODE_I2C_READ) { 607 *read_byte = reply[1]; 608 } 609 return (0/*reply_bytes - 1*/); 610 case AUX_I2C_REPLY_NACK: 611 DRM_DEBUG_KMS("aux_i2c nack\n"); 612 return (EREMOTEIO); 613 case AUX_I2C_REPLY_DEFER: 614 DRM_DEBUG_KMS("aux_i2c defer\n"); 615 DELAY(100); 616 break; 617 default: 618 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 619 return (EREMOTEIO); 620 } 621 } 622 623 DRM_ERROR("too many retries, giving up\n"); 624 return (EREMOTEIO); 625 } 626 627 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 628 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 629 630 static int 631 intel_dp_i2c_init(struct intel_dp *intel_dp, 632 struct intel_connector *intel_connector, const char *name) 633 { 634 int ret; 635 636 DRM_DEBUG_KMS("i2c_init %s\n", name); 637 638 ironlake_edp_panel_vdd_on(intel_dp); 639 ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name, 640 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus, 641 &intel_dp->adapter); 642 ironlake_edp_panel_vdd_off(intel_dp, false); 643 return (ret); 644 } 645 646 static bool 647 intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, 648 struct drm_display_mode *adjusted_mode) 649 { 650 struct drm_device *dev = encoder->dev; 651 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 652 int lane_count, clock; 653 int max_lane_count = intel_dp_max_lane_count(intel_dp); 654 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 655 int bpp; 656 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 657 658 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 659 intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); 660 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, 661 mode, adjusted_mode); 662 } 663 664 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode)) 665 return false; 666 667 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 668 669 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 670 for (clock = 0; clock <= max_clock; clock++) { 671 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 672 673 if (intel_dp_link_required(adjusted_mode->clock, bpp) 674 <= link_avail) { 675 intel_dp->link_bw = bws[clock]; 676 intel_dp->lane_count = lane_count; 677 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 678 DRM_DEBUG_KMS("Display port link bw %02x lane " 679 "count %d clock %d\n", 680 intel_dp->link_bw, intel_dp->lane_count, 681 adjusted_mode->clock); 682 return true; 683 } 684 } 685 } 686 687 return false; 688 } 689 690 struct intel_dp_m_n { 691 uint32_t tu; 692 uint32_t gmch_m; 693 uint32_t gmch_n; 694 uint32_t link_m; 695 uint32_t link_n; 696 }; 697 698 static void 699 intel_reduce_ratio(uint32_t *num, uint32_t *den) 700 { 701 while (*num > 0xffffff || *den > 0xffffff) { 702 *num >>= 1; 703 *den >>= 1; 704 } 705 } 706 707 static void 708 intel_dp_compute_m_n(int bpp, 709 int nlanes, 710 int pixel_clock, 711 int link_clock, 712 struct intel_dp_m_n *m_n) 713 { 714 m_n->tu = 64; 715 m_n->gmch_m = (pixel_clock * bpp) >> 3; 716 m_n->gmch_n = link_clock * nlanes; 717 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 718 m_n->link_m = pixel_clock; 719 m_n->link_n = link_clock; 720 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 721 } 722 723 void 724 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 725 struct drm_display_mode *adjusted_mode) 726 { 727 struct drm_device *dev = crtc->dev; 728 struct drm_mode_config *mode_config = &dev->mode_config; 729 struct drm_encoder *encoder; 730 struct drm_i915_private *dev_priv = dev->dev_private; 731 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 732 int lane_count = 4; 733 struct intel_dp_m_n m_n; 734 int pipe = intel_crtc->pipe; 735 736 /* 737 * Find the lane count in the intel_encoder private 738 */ 739 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 740 struct intel_dp *intel_dp; 741 742 if (encoder->crtc != crtc) 743 continue; 744 745 intel_dp = enc_to_intel_dp(encoder); 746 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 747 intel_dp->base.type == INTEL_OUTPUT_EDP) 748 { 749 lane_count = intel_dp->lane_count; 750 break; 751 } 752 } 753 754 /* 755 * Compute the GMCH and Link ratios. The '3' here is 756 * the number of bytes_per_pixel post-LUT, which we always 757 * set up for 8-bits of R/G/B, or 3 bytes total. 758 */ 759 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 760 mode->clock, adjusted_mode->clock, &m_n); 761 762 if (HAS_PCH_SPLIT(dev)) { 763 I915_WRITE(TRANSDATA_M1(pipe), 764 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 765 m_n.gmch_m); 766 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 767 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 768 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 769 } else { 770 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 771 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 772 m_n.gmch_m); 773 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 774 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 775 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 776 } 777 } 778 779 static void ironlake_edp_pll_on(struct drm_encoder *encoder); 780 static void ironlake_edp_pll_off(struct drm_encoder *encoder); 781 782 static void 783 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 784 struct drm_display_mode *adjusted_mode) 785 { 786 struct drm_device *dev = encoder->dev; 787 struct drm_i915_private *dev_priv = dev->dev_private; 788 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 789 struct drm_crtc *crtc = intel_dp->base.base.crtc; 790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 791 792 /* Turn on the eDP PLL if needed */ 793 if (is_edp(intel_dp)) { 794 if (!is_pch_edp(intel_dp)) 795 ironlake_edp_pll_on(encoder); 796 else 797 ironlake_edp_pll_off(encoder); 798 } 799 800 /* 801 * There are four kinds of DP registers: 802 * 803 * IBX PCH 804 * SNB CPU 805 * IVB CPU 806 * CPT PCH 807 * 808 * IBX PCH and CPU are the same for almost everything, 809 * except that the CPU DP PLL is configured in this 810 * register 811 * 812 * CPT PCH is quite different, having many bits moved 813 * to the TRANS_DP_CTL register instead. That 814 * configuration happens (oddly) in ironlake_pch_enable 815 */ 816 817 /* Preserve the BIOS-computed detected bit. This is 818 * supposed to be read-only. 819 */ 820 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 821 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 822 823 /* Handle DP bits in common between all three register formats */ 824 825 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 826 827 switch (intel_dp->lane_count) { 828 case 1: 829 intel_dp->DP |= DP_PORT_WIDTH_1; 830 break; 831 case 2: 832 intel_dp->DP |= DP_PORT_WIDTH_2; 833 break; 834 case 4: 835 intel_dp->DP |= DP_PORT_WIDTH_4; 836 break; 837 } 838 if (intel_dp->has_audio) { 839 DRM_DEBUG_KMS("Enabling DP audio on pipe %c\n", 840 pipe_name(intel_crtc->pipe)); 841 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 842 intel_write_eld(encoder, adjusted_mode); 843 } 844 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 845 intel_dp->link_configuration[0] = intel_dp->link_bw; 846 intel_dp->link_configuration[1] = intel_dp->lane_count; 847 /* 848 * Check for DPCD version > 1.1 and enhanced framing support 849 */ 850 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 851 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 852 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 853 } 854 855 /* Split out the IBX/CPU vs CPT settings */ 856 857 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 858 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 859 intel_dp->DP |= DP_SYNC_HS_HIGH; 860 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 861 intel_dp->DP |= DP_SYNC_VS_HIGH; 862 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 863 864 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 865 intel_dp->DP |= DP_ENHANCED_FRAMING; 866 867 intel_dp->DP |= intel_crtc->pipe << 29; 868 869 /* don't miss out required setting for eDP */ 870 intel_dp->DP |= DP_PLL_ENABLE; 871 if (adjusted_mode->clock < 200000) 872 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 873 else 874 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 875 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 876 intel_dp->DP |= intel_dp->color_range; 877 878 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 879 intel_dp->DP |= DP_SYNC_HS_HIGH; 880 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 881 intel_dp->DP |= DP_SYNC_VS_HIGH; 882 intel_dp->DP |= DP_LINK_TRAIN_OFF; 883 884 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 885 intel_dp->DP |= DP_ENHANCED_FRAMING; 886 887 if (intel_crtc->pipe == 1) 888 intel_dp->DP |= DP_PIPEB_SELECT; 889 890 if (is_cpu_edp(intel_dp)) { 891 /* don't miss out required setting for eDP */ 892 intel_dp->DP |= DP_PLL_ENABLE; 893 if (adjusted_mode->clock < 200000) 894 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 895 else 896 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 897 } 898 } else { 899 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 900 } 901 } 902 903 #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 904 #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 905 906 #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 907 #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 908 909 #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 910 #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 911 912 static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 913 u32 mask, 914 u32 value) 915 { 916 struct drm_device *dev = intel_dp->base.base.dev; 917 struct drm_i915_private *dev_priv = dev->dev_private; 918 919 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 920 mask, value, 921 I915_READ(PCH_PP_STATUS), 922 I915_READ(PCH_PP_CONTROL)); 923 924 if (_intel_wait_for(dev, 925 (I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10, "915iwp")) { 926 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 927 I915_READ(PCH_PP_STATUS), 928 I915_READ(PCH_PP_CONTROL)); 929 } 930 } 931 932 static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 933 { 934 DRM_DEBUG_KMS("Wait for panel power on\n"); 935 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 936 } 937 938 static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 939 { 940 DRM_DEBUG_KMS("Wait for panel power off time\n"); 941 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 942 } 943 944 static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 945 { 946 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 947 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 948 } 949 950 951 /* Read the current pp_control value, unlocking the register if it 952 * is locked 953 */ 954 955 static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 956 { 957 u32 control = I915_READ(PCH_PP_CONTROL); 958 959 control &= ~PANEL_UNLOCK_MASK; 960 control |= PANEL_UNLOCK_REGS; 961 return control; 962 } 963 964 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 965 { 966 struct drm_device *dev = intel_dp->base.base.dev; 967 struct drm_i915_private *dev_priv = dev->dev_private; 968 u32 pp; 969 970 if (!is_edp(intel_dp)) 971 return; 972 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 973 974 if (intel_dp->want_panel_vdd) 975 kprintf("eDP VDD already requested on\n"); 976 977 intel_dp->want_panel_vdd = true; 978 979 if (ironlake_edp_have_panel_vdd(intel_dp)) { 980 DRM_DEBUG_KMS("eDP VDD already on\n"); 981 return; 982 } 983 984 if (!ironlake_edp_have_panel_power(intel_dp)) 985 ironlake_wait_panel_power_cycle(intel_dp); 986 987 pp = ironlake_get_pp_control(dev_priv); 988 pp |= EDP_FORCE_VDD; 989 I915_WRITE(PCH_PP_CONTROL, pp); 990 POSTING_READ(PCH_PP_CONTROL); 991 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 992 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 993 994 /* 995 * If the panel wasn't on, delay before accessing aux channel 996 */ 997 if (!ironlake_edp_have_panel_power(intel_dp)) { 998 DRM_DEBUG_KMS("eDP was not running\n"); 999 DELAY(intel_dp->panel_power_up_delay * 1000); 1000 } 1001 } 1002 1003 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1004 { 1005 struct drm_device *dev = intel_dp->base.base.dev; 1006 struct drm_i915_private *dev_priv = dev->dev_private; 1007 u32 pp; 1008 1009 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1010 pp = ironlake_get_pp_control(dev_priv); 1011 pp &= ~EDP_FORCE_VDD; 1012 I915_WRITE(PCH_PP_CONTROL, pp); 1013 POSTING_READ(PCH_PP_CONTROL); 1014 1015 /* Make sure sequencer is idle before allowing subsequent activity */ 1016 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1017 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1018 1019 DELAY(intel_dp->panel_power_down_delay * 1000); 1020 } 1021 } 1022 1023 static void ironlake_panel_vdd_work(struct work_struct *__work) 1024 { 1025 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1026 struct intel_dp, panel_vdd_work); 1027 struct drm_device *dev = intel_dp->base.base.dev; 1028 1029 lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE); 1030 ironlake_panel_vdd_off_sync(intel_dp); 1031 lockmgr(&dev->mode_config.mutex, LK_RELEASE); 1032 } 1033 1034 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1035 { 1036 if (!is_edp(intel_dp)) 1037 return; 1038 1039 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1040 if (!intel_dp->want_panel_vdd) 1041 kprintf("eDP VDD not forced on\n"); 1042 1043 intel_dp->want_panel_vdd = false; 1044 1045 if (sync) { 1046 ironlake_panel_vdd_off_sync(intel_dp); 1047 } else { 1048 /* 1049 * Queue the timer to fire a long 1050 * time from now (relative to the power down delay) 1051 * to keep the panel power up across a sequence of operations 1052 */ 1053 schedule_delayed_work(&intel_dp->panel_vdd_work, 1054 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1055 } 1056 } 1057 1058 static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1059 { 1060 struct drm_device *dev = intel_dp->base.base.dev; 1061 struct drm_i915_private *dev_priv = dev->dev_private; 1062 u32 pp; 1063 1064 if (!is_edp(intel_dp)) 1065 return; 1066 1067 DRM_DEBUG_KMS("Turn eDP power on\n"); 1068 1069 if (ironlake_edp_have_panel_power(intel_dp)) { 1070 DRM_DEBUG_KMS("eDP power already on\n"); 1071 return; 1072 } 1073 1074 ironlake_wait_panel_power_cycle(intel_dp); 1075 1076 pp = ironlake_get_pp_control(dev_priv); 1077 if (IS_GEN5(dev)) { 1078 /* ILK workaround: disable reset around power sequence */ 1079 pp &= ~PANEL_POWER_RESET; 1080 I915_WRITE(PCH_PP_CONTROL, pp); 1081 POSTING_READ(PCH_PP_CONTROL); 1082 } 1083 1084 pp |= POWER_TARGET_ON; 1085 if (!IS_GEN5(dev)) 1086 pp |= PANEL_POWER_RESET; 1087 1088 I915_WRITE(PCH_PP_CONTROL, pp); 1089 POSTING_READ(PCH_PP_CONTROL); 1090 1091 ironlake_wait_panel_on(intel_dp); 1092 1093 if (IS_GEN5(dev)) { 1094 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1095 I915_WRITE(PCH_PP_CONTROL, pp); 1096 POSTING_READ(PCH_PP_CONTROL); 1097 } 1098 } 1099 1100 static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1101 { 1102 struct drm_device *dev = intel_dp->base.base.dev; 1103 struct drm_i915_private *dev_priv = dev->dev_private; 1104 u32 pp; 1105 1106 if (!is_edp(intel_dp)) 1107 return; 1108 1109 DRM_DEBUG_KMS("Turn eDP power off\n"); 1110 1111 if (intel_dp->want_panel_vdd) 1112 kprintf("Cannot turn power off while VDD is on\n"); 1113 1114 pp = ironlake_get_pp_control(dev_priv); 1115 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1116 I915_WRITE(PCH_PP_CONTROL, pp); 1117 POSTING_READ(PCH_PP_CONTROL); 1118 1119 ironlake_wait_panel_off(intel_dp); 1120 } 1121 1122 static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1123 { 1124 struct drm_device *dev = intel_dp->base.base.dev; 1125 struct drm_i915_private *dev_priv = dev->dev_private; 1126 u32 pp; 1127 1128 if (!is_edp(intel_dp)) 1129 return; 1130 1131 DRM_DEBUG_KMS("\n"); 1132 /* 1133 * If we enable the backlight right away following a panel power 1134 * on, we may see slight flicker as the panel syncs with the eDP 1135 * link. So delay a bit to make sure the image is solid before 1136 * allowing it to appear. 1137 */ 1138 DELAY(intel_dp->backlight_on_delay * 1000); 1139 pp = ironlake_get_pp_control(dev_priv); 1140 pp |= EDP_BLC_ENABLE; 1141 I915_WRITE(PCH_PP_CONTROL, pp); 1142 POSTING_READ(PCH_PP_CONTROL); 1143 } 1144 1145 static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1146 { 1147 struct drm_device *dev = intel_dp->base.base.dev; 1148 struct drm_i915_private *dev_priv = dev->dev_private; 1149 u32 pp; 1150 1151 if (!is_edp(intel_dp)) 1152 return; 1153 1154 DRM_DEBUG_KMS("\n"); 1155 pp = ironlake_get_pp_control(dev_priv); 1156 pp &= ~EDP_BLC_ENABLE; 1157 I915_WRITE(PCH_PP_CONTROL, pp); 1158 POSTING_READ(PCH_PP_CONTROL); 1159 DELAY(intel_dp->backlight_off_delay * 1000); 1160 } 1161 1162 static void ironlake_edp_pll_on(struct drm_encoder *encoder) 1163 { 1164 struct drm_device *dev = encoder->dev; 1165 struct drm_i915_private *dev_priv = dev->dev_private; 1166 u32 dpa_ctl; 1167 1168 DRM_DEBUG_KMS("\n"); 1169 dpa_ctl = I915_READ(DP_A); 1170 dpa_ctl |= DP_PLL_ENABLE; 1171 I915_WRITE(DP_A, dpa_ctl); 1172 POSTING_READ(DP_A); 1173 DELAY(200); 1174 } 1175 1176 static void ironlake_edp_pll_off(struct drm_encoder *encoder) 1177 { 1178 struct drm_device *dev = encoder->dev; 1179 struct drm_i915_private *dev_priv = dev->dev_private; 1180 u32 dpa_ctl; 1181 1182 dpa_ctl = I915_READ(DP_A); 1183 dpa_ctl &= ~DP_PLL_ENABLE; 1184 I915_WRITE(DP_A, dpa_ctl); 1185 POSTING_READ(DP_A); 1186 DELAY(200); 1187 } 1188 1189 /* If the sink supports it, try to set the power state appropriately */ 1190 static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1191 { 1192 int ret, i; 1193 1194 /* Should have a valid DPCD by this point */ 1195 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1196 return; 1197 1198 if (mode != DRM_MODE_DPMS_ON) { 1199 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1200 DP_SET_POWER_D3); 1201 if (ret != 1) 1202 DRM_DEBUG("failed to write sink power state\n"); 1203 } else { 1204 /* 1205 * When turning on, we need to retry for 1ms to give the sink 1206 * time to wake up. 1207 */ 1208 for (i = 0; i < 3; i++) { 1209 ret = intel_dp_aux_native_write_1(intel_dp, 1210 DP_SET_POWER, 1211 DP_SET_POWER_D0); 1212 if (ret == 1) 1213 break; 1214 DELAY(1000); 1215 } 1216 } 1217 } 1218 1219 static void intel_dp_prepare(struct drm_encoder *encoder) 1220 { 1221 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1222 1223 ironlake_edp_backlight_off(intel_dp); 1224 ironlake_edp_panel_off(intel_dp); 1225 1226 /* Wake up the sink first */ 1227 ironlake_edp_panel_vdd_on(intel_dp); 1228 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1229 intel_dp_link_down(intel_dp); 1230 ironlake_edp_panel_vdd_off(intel_dp, false); 1231 1232 /* Make sure the panel is off before trying to 1233 * change the mode 1234 */ 1235 } 1236 1237 static void intel_dp_commit(struct drm_encoder *encoder) 1238 { 1239 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1240 struct drm_device *dev = encoder->dev; 1241 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); 1242 1243 ironlake_edp_panel_vdd_on(intel_dp); 1244 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1245 intel_dp_start_link_train(intel_dp); 1246 ironlake_edp_panel_on(intel_dp); 1247 ironlake_edp_panel_vdd_off(intel_dp, true); 1248 intel_dp_complete_link_train(intel_dp); 1249 ironlake_edp_backlight_on(intel_dp); 1250 1251 intel_dp->dpms_mode = DRM_MODE_DPMS_ON; 1252 1253 if (HAS_PCH_CPT(dev)) 1254 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 1255 } 1256 1257 static void 1258 intel_dp_dpms(struct drm_encoder *encoder, int mode) 1259 { 1260 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1261 struct drm_device *dev = encoder->dev; 1262 struct drm_i915_private *dev_priv = dev->dev_private; 1263 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1264 1265 if (mode != DRM_MODE_DPMS_ON) { 1266 ironlake_edp_backlight_off(intel_dp); 1267 ironlake_edp_panel_off(intel_dp); 1268 1269 ironlake_edp_panel_vdd_on(intel_dp); 1270 intel_dp_sink_dpms(intel_dp, mode); 1271 intel_dp_link_down(intel_dp); 1272 ironlake_edp_panel_vdd_off(intel_dp, false); 1273 1274 if (is_cpu_edp(intel_dp)) 1275 ironlake_edp_pll_off(encoder); 1276 } else { 1277 if (is_cpu_edp(intel_dp)) 1278 ironlake_edp_pll_on(encoder); 1279 1280 ironlake_edp_panel_vdd_on(intel_dp); 1281 intel_dp_sink_dpms(intel_dp, mode); 1282 if (!(dp_reg & DP_PORT_EN)) { 1283 intel_dp_start_link_train(intel_dp); 1284 ironlake_edp_panel_on(intel_dp); 1285 ironlake_edp_panel_vdd_off(intel_dp, true); 1286 intel_dp_complete_link_train(intel_dp); 1287 } else 1288 ironlake_edp_panel_vdd_off(intel_dp, false); 1289 ironlake_edp_backlight_on(intel_dp); 1290 } 1291 intel_dp->dpms_mode = mode; 1292 } 1293 /* 1294 * Native read with retry for link status and receiver capability reads for 1295 * cases where the sink may still be asleep. 1296 */ 1297 static bool 1298 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1299 uint8_t *recv, int recv_bytes) 1300 { 1301 int ret, i; 1302 1303 /* 1304 * Sinks are *supposed* to come up within 1ms from an off state, 1305 * but we're also supposed to retry 3 times per the spec. 1306 */ 1307 for (i = 0; i < 3; i++) { 1308 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1309 recv_bytes); 1310 if (ret == recv_bytes) 1311 return true; 1312 DELAY(1000); 1313 } 1314 1315 return false; 1316 } 1317 1318 /* 1319 * Fetch AUX CH registers 0x202 - 0x207 which contain 1320 * link status information 1321 */ 1322 static bool 1323 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1324 { 1325 return intel_dp_aux_native_read_retry(intel_dp, 1326 DP_LANE0_1_STATUS, 1327 link_status, 1328 DP_LINK_STATUS_SIZE); 1329 } 1330 1331 static uint8_t 1332 intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1333 int r) 1334 { 1335 return link_status[r - DP_LANE0_1_STATUS]; 1336 } 1337 1338 static uint8_t 1339 intel_get_adjust_request_voltage(uint8_t adjust_request[2], 1340 int lane) 1341 { 1342 int s = ((lane & 1) ? 1343 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1344 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1345 uint8_t l = adjust_request[lane>>1]; 1346 1347 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1348 } 1349 1350 static uint8_t 1351 intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], 1352 int lane) 1353 { 1354 int s = ((lane & 1) ? 1355 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1356 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1357 uint8_t l = adjust_request[lane>>1]; 1358 1359 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1360 } 1361 1362 1363 #if 0 1364 static char *voltage_names[] = { 1365 "0.4V", "0.6V", "0.8V", "1.2V" 1366 }; 1367 static char *pre_emph_names[] = { 1368 "0dB", "3.5dB", "6dB", "9.5dB" 1369 }; 1370 static char *link_train_names[] = { 1371 "pattern 1", "pattern 2", "idle", "off" 1372 }; 1373 #endif 1374 1375 /* 1376 * These are source-specific values; current Intel hardware supports 1377 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1378 */ 1379 1380 static uint8_t 1381 intel_dp_voltage_max(struct intel_dp *intel_dp) 1382 { 1383 struct drm_device *dev = intel_dp->base.base.dev; 1384 1385 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1386 return DP_TRAIN_VOLTAGE_SWING_800; 1387 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1388 return DP_TRAIN_VOLTAGE_SWING_1200; 1389 else 1390 return DP_TRAIN_VOLTAGE_SWING_800; 1391 } 1392 1393 static uint8_t 1394 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1395 { 1396 struct drm_device *dev = intel_dp->base.base.dev; 1397 1398 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1399 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1400 case DP_TRAIN_VOLTAGE_SWING_400: 1401 return DP_TRAIN_PRE_EMPHASIS_6; 1402 case DP_TRAIN_VOLTAGE_SWING_600: 1403 case DP_TRAIN_VOLTAGE_SWING_800: 1404 return DP_TRAIN_PRE_EMPHASIS_3_5; 1405 default: 1406 return DP_TRAIN_PRE_EMPHASIS_0; 1407 } 1408 } else { 1409 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1410 case DP_TRAIN_VOLTAGE_SWING_400: 1411 return DP_TRAIN_PRE_EMPHASIS_6; 1412 case DP_TRAIN_VOLTAGE_SWING_600: 1413 return DP_TRAIN_PRE_EMPHASIS_6; 1414 case DP_TRAIN_VOLTAGE_SWING_800: 1415 return DP_TRAIN_PRE_EMPHASIS_3_5; 1416 case DP_TRAIN_VOLTAGE_SWING_1200: 1417 default: 1418 return DP_TRAIN_PRE_EMPHASIS_0; 1419 } 1420 } 1421 } 1422 1423 static void 1424 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1425 { 1426 uint8_t v = 0; 1427 uint8_t p = 0; 1428 int lane; 1429 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); 1430 uint8_t voltage_max; 1431 uint8_t preemph_max; 1432 1433 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1434 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1435 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1436 1437 if (this_v > v) 1438 v = this_v; 1439 if (this_p > p) 1440 p = this_p; 1441 } 1442 1443 voltage_max = intel_dp_voltage_max(intel_dp); 1444 if (v >= voltage_max) 1445 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1446 1447 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1448 if (p >= preemph_max) 1449 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1450 1451 for (lane = 0; lane < 4; lane++) 1452 intel_dp->train_set[lane] = v | p; 1453 } 1454 1455 static uint32_t 1456 intel_dp_signal_levels(uint8_t train_set) 1457 { 1458 uint32_t signal_levels = 0; 1459 1460 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1461 case DP_TRAIN_VOLTAGE_SWING_400: 1462 default: 1463 signal_levels |= DP_VOLTAGE_0_4; 1464 break; 1465 case DP_TRAIN_VOLTAGE_SWING_600: 1466 signal_levels |= DP_VOLTAGE_0_6; 1467 break; 1468 case DP_TRAIN_VOLTAGE_SWING_800: 1469 signal_levels |= DP_VOLTAGE_0_8; 1470 break; 1471 case DP_TRAIN_VOLTAGE_SWING_1200: 1472 signal_levels |= DP_VOLTAGE_1_2; 1473 break; 1474 } 1475 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1476 case DP_TRAIN_PRE_EMPHASIS_0: 1477 default: 1478 signal_levels |= DP_PRE_EMPHASIS_0; 1479 break; 1480 case DP_TRAIN_PRE_EMPHASIS_3_5: 1481 signal_levels |= DP_PRE_EMPHASIS_3_5; 1482 break; 1483 case DP_TRAIN_PRE_EMPHASIS_6: 1484 signal_levels |= DP_PRE_EMPHASIS_6; 1485 break; 1486 case DP_TRAIN_PRE_EMPHASIS_9_5: 1487 signal_levels |= DP_PRE_EMPHASIS_9_5; 1488 break; 1489 } 1490 return signal_levels; 1491 } 1492 1493 /* Gen6's DP voltage swing and pre-emphasis control */ 1494 static uint32_t 1495 intel_gen6_edp_signal_levels(uint8_t train_set) 1496 { 1497 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1498 DP_TRAIN_PRE_EMPHASIS_MASK); 1499 switch (signal_levels) { 1500 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1501 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1502 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1503 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1504 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1505 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1506 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1507 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1508 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1509 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1510 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1511 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1512 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1513 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1514 default: 1515 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1516 "0x%x\n", signal_levels); 1517 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1518 } 1519 } 1520 1521 /* Gen7's DP voltage swing and pre-emphasis control */ 1522 static uint32_t 1523 intel_gen7_edp_signal_levels(uint8_t train_set) 1524 { 1525 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1526 DP_TRAIN_PRE_EMPHASIS_MASK); 1527 switch (signal_levels) { 1528 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1529 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1530 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1531 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1532 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1533 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1534 1535 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1536 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1537 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1538 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1539 1540 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1541 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1542 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1543 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1544 1545 default: 1546 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1547 "0x%x\n", signal_levels); 1548 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1549 } 1550 } 1551 1552 static uint8_t 1553 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1554 int lane) 1555 { 1556 int s = (lane & 1) * 4; 1557 uint8_t l = link_status[lane>>1]; 1558 1559 return (l >> s) & 0xf; 1560 } 1561 1562 /* Check for clock recovery is done on all channels */ 1563 static bool 1564 intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) 1565 { 1566 int lane; 1567 uint8_t lane_status; 1568 1569 for (lane = 0; lane < lane_count; lane++) { 1570 lane_status = intel_get_lane_status(link_status, lane); 1571 if ((lane_status & DP_LANE_CR_DONE) == 0) 1572 return false; 1573 } 1574 return true; 1575 } 1576 1577 /* Check to see if channel eq is done on all channels */ 1578 #define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ 1579 DP_LANE_CHANNEL_EQ_DONE|\ 1580 DP_LANE_SYMBOL_LOCKED) 1581 static bool 1582 intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1583 { 1584 uint8_t lane_align; 1585 uint8_t lane_status; 1586 int lane; 1587 1588 lane_align = intel_dp_link_status(link_status, 1589 DP_LANE_ALIGN_STATUS_UPDATED); 1590 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1591 return false; 1592 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1593 lane_status = intel_get_lane_status(link_status, lane); 1594 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1595 return false; 1596 } 1597 return true; 1598 } 1599 1600 static bool 1601 intel_dp_set_link_train(struct intel_dp *intel_dp, 1602 uint32_t dp_reg_value, 1603 uint8_t dp_train_pat) 1604 { 1605 struct drm_device *dev = intel_dp->base.base.dev; 1606 struct drm_i915_private *dev_priv = dev->dev_private; 1607 int ret; 1608 1609 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1610 POSTING_READ(intel_dp->output_reg); 1611 1612 intel_dp_aux_native_write_1(intel_dp, 1613 DP_TRAINING_PATTERN_SET, 1614 dp_train_pat); 1615 1616 ret = intel_dp_aux_native_write(intel_dp, 1617 DP_TRAINING_LANE0_SET, 1618 intel_dp->train_set, 1619 intel_dp->lane_count); 1620 if (ret != intel_dp->lane_count) 1621 return false; 1622 1623 return true; 1624 } 1625 1626 /* Enable corresponding port and start training pattern 1 */ 1627 static void 1628 intel_dp_start_link_train(struct intel_dp *intel_dp) 1629 { 1630 struct drm_device *dev = intel_dp->base.base.dev; 1631 struct drm_i915_private *dev_priv = dev->dev_private; 1632 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); 1633 int i; 1634 uint8_t voltage; 1635 bool clock_recovery = false; 1636 int voltage_tries, loop_tries; 1637 u32 reg; 1638 uint32_t DP = intel_dp->DP; 1639 1640 /* Enable output, wait for it to become active */ 1641 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 1642 POSTING_READ(intel_dp->output_reg); 1643 intel_wait_for_vblank(dev, intel_crtc->pipe); 1644 1645 /* Write the link configuration data */ 1646 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1647 intel_dp->link_configuration, 1648 DP_LINK_CONFIGURATION_SIZE); 1649 1650 DP |= DP_PORT_EN; 1651 1652 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1653 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1654 else 1655 DP &= ~DP_LINK_TRAIN_MASK; 1656 memset(intel_dp->train_set, 0, 4); 1657 voltage = 0xff; 1658 voltage_tries = 0; 1659 loop_tries = 0; 1660 clock_recovery = false; 1661 for (;;) { 1662 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1663 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1664 uint32_t signal_levels; 1665 1666 1667 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1668 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1669 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1670 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1671 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1672 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1673 } else { 1674 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1675 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); 1676 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1677 } 1678 1679 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1680 reg = DP | DP_LINK_TRAIN_PAT_1_CPT; 1681 else 1682 reg = DP | DP_LINK_TRAIN_PAT_1; 1683 1684 if (!intel_dp_set_link_train(intel_dp, reg, 1685 DP_TRAINING_PATTERN_1)) 1686 break; 1687 /* Set training pattern 1 */ 1688 1689 DELAY(100); 1690 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1691 DRM_ERROR("failed to get link status\n"); 1692 break; 1693 } 1694 1695 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1696 DRM_DEBUG_KMS("clock recovery OK\n"); 1697 clock_recovery = true; 1698 break; 1699 } 1700 1701 /* Check to see if we've tried the max voltage */ 1702 for (i = 0; i < intel_dp->lane_count; i++) 1703 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1704 break; 1705 if (i == intel_dp->lane_count) { 1706 ++loop_tries; 1707 if (loop_tries == 5) { 1708 DRM_DEBUG_KMS("too many full retries, give up\n"); 1709 break; 1710 } 1711 memset(intel_dp->train_set, 0, 4); 1712 voltage_tries = 0; 1713 continue; 1714 } 1715 1716 /* Check to see if we've tried the same voltage 5 times */ 1717 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1718 ++voltage_tries; 1719 if (voltage_tries == 5) { 1720 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1721 break; 1722 } 1723 } else 1724 voltage_tries = 0; 1725 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1726 1727 /* Compute new intel_dp->train_set as requested by target */ 1728 intel_get_adjust_train(intel_dp, link_status); 1729 } 1730 1731 intel_dp->DP = DP; 1732 } 1733 1734 static void 1735 intel_dp_complete_link_train(struct intel_dp *intel_dp) 1736 { 1737 struct drm_device *dev = intel_dp->base.base.dev; 1738 struct drm_i915_private *dev_priv = dev->dev_private; 1739 bool channel_eq = false; 1740 int tries, cr_tries; 1741 u32 reg; 1742 uint32_t DP = intel_dp->DP; 1743 1744 /* channel equalization */ 1745 tries = 0; 1746 cr_tries = 0; 1747 channel_eq = false; 1748 for (;;) { 1749 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1750 uint32_t signal_levels; 1751 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1752 1753 if (cr_tries > 5) { 1754 DRM_ERROR("failed to train DP, aborting\n"); 1755 intel_dp_link_down(intel_dp); 1756 break; 1757 } 1758 1759 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1760 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1761 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1762 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1763 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1764 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1765 } else { 1766 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1767 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1768 } 1769 1770 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1771 reg = DP | DP_LINK_TRAIN_PAT_2_CPT; 1772 else 1773 reg = DP | DP_LINK_TRAIN_PAT_2; 1774 1775 /* channel eq pattern */ 1776 if (!intel_dp_set_link_train(intel_dp, reg, 1777 DP_TRAINING_PATTERN_2)) 1778 break; 1779 1780 DELAY(400); 1781 if (!intel_dp_get_link_status(intel_dp, link_status)) 1782 break; 1783 1784 /* Make sure clock is still ok */ 1785 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1786 intel_dp_start_link_train(intel_dp); 1787 cr_tries++; 1788 continue; 1789 } 1790 1791 if (intel_channel_eq_ok(intel_dp, link_status)) { 1792 channel_eq = true; 1793 break; 1794 } 1795 1796 /* Try 5 times, then try clock recovery if that fails */ 1797 if (tries > 5) { 1798 intel_dp_link_down(intel_dp); 1799 intel_dp_start_link_train(intel_dp); 1800 tries = 0; 1801 cr_tries++; 1802 continue; 1803 } 1804 1805 /* Compute new intel_dp->train_set as requested by target */ 1806 intel_get_adjust_train(intel_dp, link_status); 1807 ++tries; 1808 } 1809 1810 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1811 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1812 else 1813 reg = DP | DP_LINK_TRAIN_OFF; 1814 1815 I915_WRITE(intel_dp->output_reg, reg); 1816 POSTING_READ(intel_dp->output_reg); 1817 intel_dp_aux_native_write_1(intel_dp, 1818 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1819 } 1820 1821 static void 1822 intel_dp_link_down(struct intel_dp *intel_dp) 1823 { 1824 struct drm_device *dev = intel_dp->base.base.dev; 1825 struct drm_i915_private *dev_priv = dev->dev_private; 1826 uint32_t DP = intel_dp->DP; 1827 1828 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) 1829 return; 1830 1831 DRM_DEBUG_KMS("\n"); 1832 1833 if (is_edp(intel_dp)) { 1834 DP &= ~DP_PLL_ENABLE; 1835 I915_WRITE(intel_dp->output_reg, DP); 1836 POSTING_READ(intel_dp->output_reg); 1837 DELAY(100); 1838 } 1839 1840 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1841 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1842 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1843 } else { 1844 DP &= ~DP_LINK_TRAIN_MASK; 1845 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1846 } 1847 POSTING_READ(intel_dp->output_reg); 1848 1849 DELAY(17*1000); 1850 1851 if (is_edp(intel_dp)) { 1852 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1853 DP |= DP_LINK_TRAIN_OFF_CPT; 1854 else 1855 DP |= DP_LINK_TRAIN_OFF; 1856 } 1857 1858 1859 if (!HAS_PCH_CPT(dev) && 1860 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1861 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1862 1863 /* Hardware workaround: leaving our transcoder select 1864 * set to transcoder B while it's off will prevent the 1865 * corresponding HDMI output on transcoder A. 1866 * 1867 * Combine this with another hardware workaround: 1868 * transcoder select bit can only be cleared while the 1869 * port is enabled. 1870 */ 1871 DP &= ~DP_PIPEB_SELECT; 1872 I915_WRITE(intel_dp->output_reg, DP); 1873 1874 /* Changes to enable or select take place the vblank 1875 * after being written. 1876 */ 1877 if (crtc == NULL) { 1878 /* We can arrive here never having been attached 1879 * to a CRTC, for instance, due to inheriting 1880 * random state from the BIOS. 1881 * 1882 * If the pipe is not running, play safe and 1883 * wait for the clocks to stabilise before 1884 * continuing. 1885 */ 1886 POSTING_READ(intel_dp->output_reg); 1887 DELAY(50 * 1000); 1888 } else 1889 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 1890 } 1891 1892 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 1893 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1894 POSTING_READ(intel_dp->output_reg); 1895 DELAY(intel_dp->panel_power_down_delay * 1000); 1896 } 1897 1898 static bool 1899 intel_dp_get_dpcd(struct intel_dp *intel_dp) 1900 { 1901 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 1902 sizeof(intel_dp->dpcd)) && 1903 (intel_dp->dpcd[DP_DPCD_REV] != 0)) { 1904 return true; 1905 } 1906 1907 return false; 1908 } 1909 1910 static bool 1911 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 1912 { 1913 int ret; 1914 1915 ret = intel_dp_aux_native_read_retry(intel_dp, 1916 DP_DEVICE_SERVICE_IRQ_VECTOR, 1917 sink_irq_vector, 1); 1918 if (!ret) 1919 return false; 1920 1921 return true; 1922 } 1923 1924 static void 1925 intel_dp_handle_test_request(struct intel_dp *intel_dp) 1926 { 1927 /* NAK by default */ 1928 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); 1929 } 1930 1931 /* 1932 * According to DP spec 1933 * 5.1.2: 1934 * 1. Read DPCD 1935 * 2. Configure link according to Receiver Capabilities 1936 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 1937 * 4. Check link status on receipt of hot-plug interrupt 1938 */ 1939 1940 static void 1941 intel_dp_check_link_status(struct intel_dp *intel_dp) 1942 { 1943 u8 sink_irq_vector; 1944 u8 link_status[DP_LINK_STATUS_SIZE]; 1945 1946 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1947 return; 1948 1949 if (!intel_dp->base.base.crtc) 1950 return; 1951 1952 /* Try to read receiver status if the link appears to be up */ 1953 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1954 intel_dp_link_down(intel_dp); 1955 return; 1956 } 1957 1958 /* Now read the DPCD to see if it's actually running */ 1959 if (!intel_dp_get_dpcd(intel_dp)) { 1960 intel_dp_link_down(intel_dp); 1961 return; 1962 } 1963 1964 /* Try to read the source of the interrupt */ 1965 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 1966 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 1967 /* Clear interrupt source */ 1968 intel_dp_aux_native_write_1(intel_dp, 1969 DP_DEVICE_SERVICE_IRQ_VECTOR, 1970 sink_irq_vector); 1971 1972 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 1973 intel_dp_handle_test_request(intel_dp); 1974 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 1975 DRM_DEBUG_KMS("CP or sink specific irq unhandled\n"); 1976 } 1977 1978 if (!intel_channel_eq_ok(intel_dp, link_status)) { 1979 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1980 drm_get_encoder_name(&intel_dp->base.base)); 1981 intel_dp_start_link_train(intel_dp); 1982 intel_dp_complete_link_train(intel_dp); 1983 } 1984 } 1985 1986 static enum drm_connector_status 1987 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 1988 { 1989 if (intel_dp_get_dpcd(intel_dp)) 1990 return connector_status_connected; 1991 return connector_status_disconnected; 1992 } 1993 1994 static enum drm_connector_status 1995 ironlake_dp_detect(struct intel_dp *intel_dp) 1996 { 1997 enum drm_connector_status status; 1998 1999 /* Can't disconnect eDP, but you can close the lid... */ 2000 if (is_edp(intel_dp)) { 2001 status = intel_panel_detect(intel_dp->base.base.dev); 2002 if (status == connector_status_unknown) 2003 status = connector_status_connected; 2004 return status; 2005 } 2006 2007 return intel_dp_detect_dpcd(intel_dp); 2008 } 2009 2010 static enum drm_connector_status 2011 g4x_dp_detect(struct intel_dp *intel_dp) 2012 { 2013 struct drm_device *dev = intel_dp->base.base.dev; 2014 struct drm_i915_private *dev_priv = dev->dev_private; 2015 uint32_t temp, bit; 2016 2017 switch (intel_dp->output_reg) { 2018 case DP_B: 2019 bit = DPB_HOTPLUG_INT_STATUS; 2020 break; 2021 case DP_C: 2022 bit = DPC_HOTPLUG_INT_STATUS; 2023 break; 2024 case DP_D: 2025 bit = DPD_HOTPLUG_INT_STATUS; 2026 break; 2027 default: 2028 return connector_status_unknown; 2029 } 2030 2031 temp = I915_READ(PORT_HOTPLUG_STAT); 2032 2033 if ((temp & bit) == 0) 2034 return connector_status_disconnected; 2035 2036 return intel_dp_detect_dpcd(intel_dp); 2037 } 2038 2039 static struct edid * 2040 intel_dp_get_edid(struct drm_connector *connector, device_t adapter) 2041 { 2042 struct intel_dp *intel_dp = intel_attached_dp(connector); 2043 struct edid *edid; 2044 2045 ironlake_edp_panel_vdd_on(intel_dp); 2046 edid = drm_get_edid(connector, adapter); 2047 ironlake_edp_panel_vdd_off(intel_dp, false); 2048 return edid; 2049 } 2050 2051 static int 2052 intel_dp_get_edid_modes(struct drm_connector *connector, device_t adapter) 2053 { 2054 struct intel_dp *intel_dp = intel_attached_dp(connector); 2055 int ret; 2056 2057 ironlake_edp_panel_vdd_on(intel_dp); 2058 ret = intel_ddc_get_modes(connector, adapter); 2059 ironlake_edp_panel_vdd_off(intel_dp, false); 2060 return ret; 2061 } 2062 2063 2064 /** 2065 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2066 * 2067 * \return true if DP port is connected. 2068 * \return false if DP port is disconnected. 2069 */ 2070 static enum drm_connector_status 2071 intel_dp_detect(struct drm_connector *connector, bool force) 2072 { 2073 struct intel_dp *intel_dp = intel_attached_dp(connector); 2074 struct drm_device *dev = intel_dp->base.base.dev; 2075 enum drm_connector_status status; 2076 struct edid *edid = NULL; 2077 2078 intel_dp->has_audio = false; 2079 2080 if (HAS_PCH_SPLIT(dev)) 2081 status = ironlake_dp_detect(intel_dp); 2082 else 2083 status = g4x_dp_detect(intel_dp); 2084 if (status != connector_status_connected) 2085 return status; 2086 2087 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2088 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2089 } else { 2090 edid = intel_dp_get_edid(connector, intel_dp->adapter); 2091 if (edid) { 2092 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2093 drm_free(edid, DRM_MEM_KMS); 2094 } 2095 } 2096 2097 return connector_status_connected; 2098 } 2099 2100 static int intel_dp_get_modes(struct drm_connector *connector) 2101 { 2102 struct intel_dp *intel_dp = intel_attached_dp(connector); 2103 struct drm_device *dev = intel_dp->base.base.dev; 2104 struct drm_i915_private *dev_priv = dev->dev_private; 2105 int ret; 2106 2107 /* We should parse the EDID data and find out if it has an audio sink 2108 */ 2109 2110 ret = intel_dp_get_edid_modes(connector, intel_dp->adapter); 2111 if (ret) { 2112 if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { 2113 struct drm_display_mode *newmode; 2114 list_for_each_entry(newmode, &connector->probed_modes, 2115 head) { 2116 if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { 2117 intel_dp->panel_fixed_mode = 2118 drm_mode_duplicate(dev, newmode); 2119 break; 2120 } 2121 } 2122 } 2123 return ret; 2124 } 2125 2126 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 2127 if (is_edp(intel_dp)) { 2128 /* initialize panel mode from VBT if available for eDP */ 2129 if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { 2130 intel_dp->panel_fixed_mode = 2131 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2132 if (intel_dp->panel_fixed_mode) { 2133 intel_dp->panel_fixed_mode->type |= 2134 DRM_MODE_TYPE_PREFERRED; 2135 } 2136 } 2137 if (intel_dp->panel_fixed_mode) { 2138 struct drm_display_mode *mode; 2139 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); 2140 drm_mode_probed_add(connector, mode); 2141 return 1; 2142 } 2143 } 2144 return 0; 2145 } 2146 2147 static bool 2148 intel_dp_detect_audio(struct drm_connector *connector) 2149 { 2150 struct intel_dp *intel_dp = intel_attached_dp(connector); 2151 struct edid *edid; 2152 bool has_audio = false; 2153 2154 edid = intel_dp_get_edid(connector, intel_dp->adapter); 2155 if (edid) { 2156 has_audio = drm_detect_monitor_audio(edid); 2157 2158 drm_free(edid, DRM_MEM_KMS); 2159 } 2160 2161 return has_audio; 2162 } 2163 2164 static int 2165 intel_dp_set_property(struct drm_connector *connector, 2166 struct drm_property *property, 2167 uint64_t val) 2168 { 2169 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2170 struct intel_dp *intel_dp = intel_attached_dp(connector); 2171 int ret; 2172 2173 ret = drm_object_property_set_value(&connector->base, property, val); 2174 if (ret) 2175 return ret; 2176 2177 if (property == dev_priv->force_audio_property) { 2178 int i = val; 2179 bool has_audio; 2180 2181 if (i == intel_dp->force_audio) 2182 return 0; 2183 2184 intel_dp->force_audio = i; 2185 2186 if (i == HDMI_AUDIO_AUTO) 2187 has_audio = intel_dp_detect_audio(connector); 2188 else 2189 has_audio = (i == HDMI_AUDIO_ON); 2190 2191 if (has_audio == intel_dp->has_audio) 2192 return 0; 2193 2194 intel_dp->has_audio = has_audio; 2195 goto done; 2196 } 2197 2198 if (property == dev_priv->broadcast_rgb_property) { 2199 if (val == !!intel_dp->color_range) 2200 return 0; 2201 2202 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2203 goto done; 2204 } 2205 2206 return -EINVAL; 2207 2208 done: 2209 if (intel_dp->base.base.crtc) { 2210 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2211 drm_crtc_helper_set_mode(crtc, &crtc->mode, 2212 crtc->x, crtc->y, 2213 crtc->fb); 2214 } 2215 2216 return 0; 2217 } 2218 2219 static void 2220 intel_dp_destroy(struct drm_connector *connector) 2221 { 2222 struct drm_device *dev = connector->dev; 2223 2224 if (intel_dpd_is_edp(dev)) 2225 intel_panel_destroy_backlight(dev); 2226 2227 #if 0 2228 drm_sysfs_connector_remove(connector); 2229 #endif 2230 drm_connector_cleanup(connector); 2231 drm_free(connector, DRM_MEM_KMS); 2232 } 2233 2234 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2235 { 2236 struct drm_device *dev; 2237 struct intel_dp *intel_dp; 2238 2239 intel_dp = enc_to_intel_dp(encoder); 2240 dev = encoder->dev; 2241 2242 if (intel_dp->dp_iic_bus != NULL) { 2243 if (intel_dp->adapter != NULL) { 2244 device_delete_child(intel_dp->dp_iic_bus, 2245 intel_dp->adapter); 2246 } 2247 device_delete_child(dev->dev, intel_dp->dp_iic_bus); 2248 } 2249 drm_encoder_cleanup(encoder); 2250 if (is_edp(intel_dp)) { 2251 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2252 ironlake_panel_vdd_off_sync(intel_dp); 2253 } 2254 drm_free(intel_dp, DRM_MEM_KMS); 2255 } 2256 2257 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2258 .dpms = intel_dp_dpms, 2259 .mode_fixup = intel_dp_mode_fixup, 2260 .prepare = intel_dp_prepare, 2261 .mode_set = intel_dp_mode_set, 2262 .commit = intel_dp_commit, 2263 }; 2264 2265 static const struct drm_connector_funcs intel_dp_connector_funcs = { 2266 .dpms = drm_helper_connector_dpms, 2267 .detect = intel_dp_detect, 2268 .fill_modes = drm_helper_probe_single_connector_modes, 2269 .set_property = intel_dp_set_property, 2270 .destroy = intel_dp_destroy, 2271 }; 2272 2273 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2274 .get_modes = intel_dp_get_modes, 2275 .mode_valid = intel_dp_mode_valid, 2276 .best_encoder = intel_best_encoder, 2277 }; 2278 2279 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2280 .destroy = intel_dp_encoder_destroy, 2281 }; 2282 2283 static void 2284 intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2285 { 2286 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 2287 2288 intel_dp_check_link_status(intel_dp); 2289 } 2290 2291 /* Return which DP Port should be selected for Transcoder DP control */ 2292 int 2293 intel_trans_dp_port_sel(struct drm_crtc *crtc) 2294 { 2295 struct drm_device *dev = crtc->dev; 2296 struct drm_mode_config *mode_config = &dev->mode_config; 2297 struct drm_encoder *encoder; 2298 2299 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 2300 struct intel_dp *intel_dp; 2301 2302 if (encoder->crtc != crtc) 2303 continue; 2304 2305 intel_dp = enc_to_intel_dp(encoder); 2306 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2307 intel_dp->base.type == INTEL_OUTPUT_EDP) 2308 return intel_dp->output_reg; 2309 } 2310 2311 return -1; 2312 } 2313 2314 /* check the VBT to see whether the eDP is on DP-D port */ 2315 bool intel_dpd_is_edp(struct drm_device *dev) 2316 { 2317 struct drm_i915_private *dev_priv = dev->dev_private; 2318 struct child_device_config *p_child; 2319 int i; 2320 2321 if (!dev_priv->child_dev_num) 2322 return false; 2323 2324 for (i = 0; i < dev_priv->child_dev_num; i++) { 2325 p_child = dev_priv->child_dev + i; 2326 2327 if (p_child->dvo_port == PORT_IDPD && 2328 p_child->device_type == DEVICE_TYPE_eDP) 2329 return true; 2330 } 2331 return false; 2332 } 2333 2334 static void 2335 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2336 { 2337 intel_attach_force_audio_property(connector); 2338 intel_attach_broadcast_rgb_property(connector); 2339 } 2340 2341 void 2342 intel_dp_init(struct drm_device *dev, int output_reg) 2343 { 2344 struct drm_i915_private *dev_priv = dev->dev_private; 2345 struct drm_connector *connector; 2346 struct intel_dp *intel_dp; 2347 struct intel_encoder *intel_encoder; 2348 struct intel_connector *intel_connector; 2349 const char *name = NULL; 2350 int type; 2351 2352 intel_dp = kmalloc(sizeof(struct intel_dp), DRM_MEM_KMS, 2353 M_WAITOK | M_ZERO); 2354 2355 intel_dp->output_reg = output_reg; 2356 intel_dp->dpms_mode = -1; 2357 2358 intel_connector = kmalloc(sizeof(struct intel_connector), DRM_MEM_KMS, 2359 M_WAITOK | M_ZERO); 2360 intel_encoder = &intel_dp->base; 2361 2362 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) 2363 if (intel_dpd_is_edp(dev)) 2364 intel_dp->is_pch_edp = true; 2365 2366 if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2367 type = DRM_MODE_CONNECTOR_eDP; 2368 intel_encoder->type = INTEL_OUTPUT_EDP; 2369 } else { 2370 type = DRM_MODE_CONNECTOR_DisplayPort; 2371 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2372 } 2373 2374 connector = &intel_connector->base; 2375 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2376 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2377 2378 connector->polled = DRM_CONNECTOR_POLL_HPD; 2379 2380 if (output_reg == DP_B || output_reg == PCH_DP_B) 2381 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 2382 else if (output_reg == DP_C || output_reg == PCH_DP_C) 2383 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); 2384 else if (output_reg == DP_D || output_reg == PCH_DP_D) 2385 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 2386 2387 if (is_edp(intel_dp)) { 2388 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 2389 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2390 ironlake_panel_vdd_work); 2391 } 2392 2393 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2394 connector->interlace_allowed = true; 2395 connector->doublescan_allowed = 0; 2396 2397 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2398 DRM_MODE_ENCODER_TMDS); 2399 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2400 2401 intel_connector_attach_encoder(intel_connector, intel_encoder); 2402 #if 0 2403 drm_sysfs_connector_add(connector); 2404 #endif 2405 2406 /* Set up the DDC bus. */ 2407 switch (output_reg) { 2408 case DP_A: 2409 name = "DPDDC-A"; 2410 break; 2411 case DP_B: 2412 case PCH_DP_B: 2413 dev_priv->hotplug_supported_mask |= 2414 HDMIB_HOTPLUG_INT_STATUS; 2415 name = "DPDDC-B"; 2416 break; 2417 case DP_C: 2418 case PCH_DP_C: 2419 dev_priv->hotplug_supported_mask |= 2420 HDMIC_HOTPLUG_INT_STATUS; 2421 name = "DPDDC-C"; 2422 break; 2423 case DP_D: 2424 case PCH_DP_D: 2425 dev_priv->hotplug_supported_mask |= 2426 HDMID_HOTPLUG_INT_STATUS; 2427 name = "DPDDC-D"; 2428 break; 2429 } 2430 2431 /* Cache some DPCD data in the eDP case */ 2432 if (is_edp(intel_dp)) { 2433 bool ret; 2434 struct edp_power_seq cur, vbt; 2435 u32 pp_on, pp_off, pp_div; 2436 2437 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2438 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2439 pp_div = I915_READ(PCH_PP_DIVISOR); 2440 2441 /* Pull timing values out of registers */ 2442 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2443 PANEL_POWER_UP_DELAY_SHIFT; 2444 2445 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2446 PANEL_LIGHT_ON_DELAY_SHIFT; 2447 2448 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2449 PANEL_LIGHT_OFF_DELAY_SHIFT; 2450 2451 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2452 PANEL_POWER_DOWN_DELAY_SHIFT; 2453 2454 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2455 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2456 2457 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2458 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2459 2460 vbt = dev_priv->edp.pps; 2461 2462 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2463 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2464 2465 #define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) 2466 2467 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2468 intel_dp->backlight_on_delay = get_delay(t8); 2469 intel_dp->backlight_off_delay = get_delay(t9); 2470 intel_dp->panel_power_down_delay = get_delay(t10); 2471 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2472 2473 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2474 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2475 intel_dp->panel_power_cycle_delay); 2476 2477 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2478 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2479 2480 ironlake_edp_panel_vdd_on(intel_dp); 2481 ret = intel_dp_get_dpcd(intel_dp); 2482 ironlake_edp_panel_vdd_off(intel_dp, false); 2483 2484 if (ret) { 2485 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2486 dev_priv->no_aux_handshake = 2487 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2488 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2489 } else { 2490 /* if this fails, presume the device is a ghost */ 2491 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2492 intel_dp_encoder_destroy(&intel_dp->base.base); 2493 intel_dp_destroy(&intel_connector->base); 2494 return; 2495 } 2496 } 2497 2498 intel_dp_i2c_init(intel_dp, intel_connector, name); 2499 2500 intel_encoder->hot_plug = intel_dp_hot_plug; 2501 2502 if (is_edp(intel_dp)) { 2503 dev_priv->int_edp_connector = connector; 2504 intel_panel_setup_backlight(dev); 2505 } 2506 2507 intel_dp_add_properties(intel_dp, connector); 2508 2509 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2510 * 0xd. Failure to do so will result in spurious interrupts being 2511 * generated on the port when a cable is not attached. 2512 */ 2513 if (IS_G4X(dev) && !IS_GM45(dev)) { 2514 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2515 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2516 } 2517 } 2518