1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/i2c.h> 29 #include <linux/export.h> 30 #include <drm/drmP.h> 31 #include <drm/drm_crtc.h> 32 #include <drm/drm_crtc_helper.h> 33 #include <drm/drm_edid.h> 34 #include "intel_drv.h" 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 38 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) 39 40 /** 41 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 42 * @intel_dp: DP struct 43 * 44 * If a CPU or PCH DP output is attached to an eDP panel, this function 45 * will return true, and false otherwise. 46 */ 47 static bool is_edp(struct intel_dp *intel_dp) 48 { 49 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 50 51 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 52 } 53 54 /** 55 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 56 * @intel_dp: DP struct 57 * 58 * Returns true if the given DP struct corresponds to a PCH DP port attached 59 * to an eDP panel, false otherwise. Helpful for determining whether we 60 * may need FDI resources for a given DP output or not. 61 */ 62 static bool is_pch_edp(struct intel_dp *intel_dp) 63 { 64 return intel_dp->is_pch_edp; 65 } 66 67 /** 68 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 69 * @intel_dp: DP struct 70 * 71 * Returns true if the given DP struct corresponds to a CPU eDP port. 72 */ 73 static bool is_cpu_edp(struct intel_dp *intel_dp) 74 { 75 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 76 } 77 78 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 79 { 80 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 81 82 return intel_dig_port->base.base.dev; 83 } 84 85 static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 86 { 87 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 88 } 89 90 /** 91 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 92 * @encoder: DRM encoder 93 * 94 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 95 * by intel_display.c. 96 */ 97 bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 98 { 99 struct intel_dp *intel_dp; 100 101 if (!encoder) 102 return false; 103 104 intel_dp = enc_to_intel_dp(encoder); 105 106 return is_pch_edp(intel_dp); 107 } 108 109 static void intel_dp_link_down(struct intel_dp *intel_dp); 110 111 void 112 intel_edp_link_config(struct intel_encoder *intel_encoder, 113 int *lane_num, int *link_bw) 114 { 115 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 116 117 *lane_num = intel_dp->lane_count; 118 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 119 } 120 121 int 122 intel_edp_target_clock(struct intel_encoder *intel_encoder, 123 struct drm_display_mode *mode) 124 { 125 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 126 struct intel_connector *intel_connector = intel_dp->attached_connector; 127 128 if (intel_connector->panel.fixed_mode) 129 return intel_connector->panel.fixed_mode->clock; 130 else 131 return mode->clock; 132 } 133 134 static int 135 intel_dp_max_link_bw(struct intel_dp *intel_dp) 136 { 137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 138 139 switch (max_link_bw) { 140 case DP_LINK_BW_1_62: 141 case DP_LINK_BW_2_7: 142 break; 143 default: 144 max_link_bw = DP_LINK_BW_1_62; 145 break; 146 } 147 return max_link_bw; 148 } 149 150 static int 151 intel_dp_link_clock(uint8_t link_bw) 152 { 153 if (link_bw == DP_LINK_BW_2_7) 154 return 270000; 155 else 156 return 162000; 157 } 158 159 /* 160 * The units on the numbers in the next two are... bizarre. Examples will 161 * make it clearer; this one parallels an example in the eDP spec. 162 * 163 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 164 * 165 * 270000 * 1 * 8 / 10 == 216000 166 * 167 * The actual data capacity of that configuration is 2.16Gbit/s, so the 168 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 169 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 170 * 119000. At 18bpp that's 2142000 kilobits per second. 171 * 172 * Thus the strange-looking division by 10 in intel_dp_link_required, to 173 * get the result in decakilobits instead of kilobits. 174 */ 175 176 static int 177 intel_dp_link_required(int pixel_clock, int bpp) 178 { 179 return (pixel_clock * bpp + 9) / 10; 180 } 181 182 static int 183 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 184 { 185 return (max_link_clock * max_lanes * 8) / 10; 186 } 187 188 static bool 189 intel_dp_adjust_dithering(struct intel_dp *intel_dp, 190 struct drm_display_mode *mode, 191 bool adjust_mode) 192 { 193 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 194 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 195 int max_rate, mode_rate; 196 197 mode_rate = intel_dp_link_required(mode->clock, 24); 198 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 199 200 if (mode_rate > max_rate) { 201 mode_rate = intel_dp_link_required(mode->clock, 18); 202 if (mode_rate > max_rate) 203 return false; 204 205 if (adjust_mode) 206 mode->private_flags 207 |= INTEL_MODE_DP_FORCE_6BPC; 208 209 return true; 210 } 211 212 return true; 213 } 214 215 static int 216 intel_dp_mode_valid(struct drm_connector *connector, 217 struct drm_display_mode *mode) 218 { 219 struct intel_dp *intel_dp = intel_attached_dp(connector); 220 struct intel_connector *intel_connector = to_intel_connector(connector); 221 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 222 223 if (is_edp(intel_dp) && fixed_mode) { 224 if (mode->hdisplay > fixed_mode->hdisplay) 225 return MODE_PANEL; 226 227 if (mode->vdisplay > fixed_mode->vdisplay) 228 return MODE_PANEL; 229 } 230 231 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 232 return MODE_CLOCK_HIGH; 233 234 if (mode->clock < 10000) 235 return MODE_CLOCK_LOW; 236 237 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 238 return MODE_H_ILLEGAL; 239 240 return MODE_OK; 241 } 242 243 static uint32_t 244 pack_aux(uint8_t *src, int src_bytes) 245 { 246 int i; 247 uint32_t v = 0; 248 249 if (src_bytes > 4) 250 src_bytes = 4; 251 for (i = 0; i < src_bytes; i++) 252 v |= ((uint32_t) src[i]) << ((3-i) * 8); 253 return v; 254 } 255 256 static void 257 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 258 { 259 int i; 260 if (dst_bytes > 4) 261 dst_bytes = 4; 262 for (i = 0; i < dst_bytes; i++) 263 dst[i] = src >> ((3-i) * 8); 264 } 265 266 /* hrawclock is 1/4 the FSB frequency */ 267 static int 268 intel_hrawclk(struct drm_device *dev) 269 { 270 struct drm_i915_private *dev_priv = dev->dev_private; 271 uint32_t clkcfg; 272 273 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 274 if (IS_VALLEYVIEW(dev)) 275 return 200; 276 277 clkcfg = I915_READ(CLKCFG); 278 switch (clkcfg & CLKCFG_FSB_MASK) { 279 case CLKCFG_FSB_400: 280 return 100; 281 case CLKCFG_FSB_533: 282 return 133; 283 case CLKCFG_FSB_667: 284 return 166; 285 case CLKCFG_FSB_800: 286 return 200; 287 case CLKCFG_FSB_1067: 288 return 266; 289 case CLKCFG_FSB_1333: 290 return 333; 291 /* these two are just a guess; one of them might be right */ 292 case CLKCFG_FSB_1600: 293 case CLKCFG_FSB_1600_ALT: 294 return 400; 295 default: 296 return 133; 297 } 298 } 299 300 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 301 { 302 struct drm_device *dev = intel_dp_to_dev(intel_dp); 303 struct drm_i915_private *dev_priv = dev->dev_private; 304 305 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 306 } 307 308 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 309 { 310 struct drm_device *dev = intel_dp_to_dev(intel_dp); 311 struct drm_i915_private *dev_priv = dev->dev_private; 312 313 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 314 } 315 316 static void 317 intel_dp_check_edp(struct intel_dp *intel_dp) 318 { 319 struct drm_device *dev = intel_dp_to_dev(intel_dp); 320 struct drm_i915_private *dev_priv = dev->dev_private; 321 322 if (!is_edp(intel_dp)) 323 return; 324 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 325 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 326 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 327 I915_READ(PCH_PP_STATUS), 328 I915_READ(PCH_PP_CONTROL)); 329 } 330 } 331 332 static int 333 intel_dp_aux_ch(struct intel_dp *intel_dp, 334 uint8_t *send, int send_bytes, 335 uint8_t *recv, int recv_size) 336 { 337 uint32_t output_reg = intel_dp->output_reg; 338 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 339 struct drm_device *dev = intel_dig_port->base.base.dev; 340 struct drm_i915_private *dev_priv = dev->dev_private; 341 uint32_t ch_ctl = output_reg + 0x10; 342 uint32_t ch_data = ch_ctl + 4; 343 int i; 344 int recv_bytes; 345 uint32_t status; 346 uint32_t aux_clock_divider; 347 int try, precharge; 348 349 if (IS_HASWELL(dev)) { 350 switch (intel_dig_port->port) { 351 case PORT_A: 352 ch_ctl = DPA_AUX_CH_CTL; 353 ch_data = DPA_AUX_CH_DATA1; 354 break; 355 case PORT_B: 356 ch_ctl = PCH_DPB_AUX_CH_CTL; 357 ch_data = PCH_DPB_AUX_CH_DATA1; 358 break; 359 case PORT_C: 360 ch_ctl = PCH_DPC_AUX_CH_CTL; 361 ch_data = PCH_DPC_AUX_CH_DATA1; 362 break; 363 case PORT_D: 364 ch_ctl = PCH_DPD_AUX_CH_CTL; 365 ch_data = PCH_DPD_AUX_CH_DATA1; 366 break; 367 default: 368 BUG(); 369 } 370 } 371 372 intel_dp_check_edp(intel_dp); 373 /* The clock divider is based off the hrawclk, 374 * and would like to run at 2MHz. So, take the 375 * hrawclk value and divide by 2 and use that 376 * 377 * Note that PCH attached eDP panels should use a 125MHz input 378 * clock divider. 379 */ 380 if (is_cpu_edp(intel_dp)) { 381 if (IS_HASWELL(dev)) 382 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 383 else if (IS_VALLEYVIEW(dev)) 384 aux_clock_divider = 100; 385 else if (IS_GEN6(dev) || IS_GEN7(dev)) 386 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 387 else 388 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 389 } else if (HAS_PCH_SPLIT(dev)) 390 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 391 else 392 aux_clock_divider = intel_hrawclk(dev) / 2; 393 394 if (IS_GEN6(dev)) 395 precharge = 3; 396 else 397 precharge = 5; 398 399 /* Try to wait for any previous AUX channel activity */ 400 for (try = 0; try < 3; try++) { 401 status = I915_READ(ch_ctl); 402 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 403 break; 404 msleep(1); 405 } 406 407 if (try == 3) { 408 WARN(1, "dp_aux_ch not started status 0x%08x\n", 409 I915_READ(ch_ctl)); 410 return -EBUSY; 411 } 412 413 /* Must try at least 3 times according to DP spec */ 414 for (try = 0; try < 5; try++) { 415 /* Load the send data into the aux channel data registers */ 416 for (i = 0; i < send_bytes; i += 4) 417 I915_WRITE(ch_data + i, 418 pack_aux(send + i, send_bytes - i)); 419 420 /* Send the command and wait for it to complete */ 421 I915_WRITE(ch_ctl, 422 DP_AUX_CH_CTL_SEND_BUSY | 423 DP_AUX_CH_CTL_TIME_OUT_400us | 424 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 425 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 426 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 427 DP_AUX_CH_CTL_DONE | 428 DP_AUX_CH_CTL_TIME_OUT_ERROR | 429 DP_AUX_CH_CTL_RECEIVE_ERROR); 430 for (;;) { 431 status = I915_READ(ch_ctl); 432 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 433 break; 434 udelay(100); 435 } 436 437 /* Clear done status and any errors */ 438 I915_WRITE(ch_ctl, 439 status | 440 DP_AUX_CH_CTL_DONE | 441 DP_AUX_CH_CTL_TIME_OUT_ERROR | 442 DP_AUX_CH_CTL_RECEIVE_ERROR); 443 444 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 445 DP_AUX_CH_CTL_RECEIVE_ERROR)) 446 continue; 447 if (status & DP_AUX_CH_CTL_DONE) 448 break; 449 } 450 451 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 452 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 453 return -EBUSY; 454 } 455 456 /* Check for timeout or receive error. 457 * Timeouts occur when the sink is not connected 458 */ 459 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 460 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 461 return -EIO; 462 } 463 464 /* Timeouts occur when the device isn't connected, so they're 465 * "normal" -- don't fill the kernel log with these */ 466 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 467 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 468 return -ETIMEDOUT; 469 } 470 471 /* Unload any bytes sent back from the other side */ 472 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 473 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 474 if (recv_bytes > recv_size) 475 recv_bytes = recv_size; 476 477 for (i = 0; i < recv_bytes; i += 4) 478 unpack_aux(I915_READ(ch_data + i), 479 recv + i, recv_bytes - i); 480 481 return recv_bytes; 482 } 483 484 /* Write data to the aux channel in native mode */ 485 static int 486 intel_dp_aux_native_write(struct intel_dp *intel_dp, 487 uint16_t address, uint8_t *send, int send_bytes) 488 { 489 int ret; 490 uint8_t msg[20]; 491 int msg_bytes; 492 uint8_t ack; 493 494 intel_dp_check_edp(intel_dp); 495 if (send_bytes > 16) 496 return -1; 497 msg[0] = AUX_NATIVE_WRITE << 4; 498 msg[1] = address >> 8; 499 msg[2] = address & 0xff; 500 msg[3] = send_bytes - 1; 501 memcpy(&msg[4], send, send_bytes); 502 msg_bytes = send_bytes + 4; 503 for (;;) { 504 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 505 if (ret < 0) 506 return ret; 507 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 508 break; 509 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 510 udelay(100); 511 else 512 return -EIO; 513 } 514 return send_bytes; 515 } 516 517 /* Write a single byte to the aux channel in native mode */ 518 static int 519 intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 520 uint16_t address, uint8_t byte) 521 { 522 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 523 } 524 525 /* read bytes from a native aux channel */ 526 static int 527 intel_dp_aux_native_read(struct intel_dp *intel_dp, 528 uint16_t address, uint8_t *recv, int recv_bytes) 529 { 530 uint8_t msg[4]; 531 int msg_bytes; 532 uint8_t reply[20]; 533 int reply_bytes; 534 uint8_t ack; 535 int ret; 536 537 intel_dp_check_edp(intel_dp); 538 msg[0] = AUX_NATIVE_READ << 4; 539 msg[1] = address >> 8; 540 msg[2] = address & 0xff; 541 msg[3] = recv_bytes - 1; 542 543 msg_bytes = 4; 544 reply_bytes = recv_bytes + 1; 545 546 for (;;) { 547 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 548 reply, reply_bytes); 549 if (ret == 0) 550 return -EPROTO; 551 if (ret < 0) 552 return ret; 553 ack = reply[0]; 554 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 555 memcpy(recv, reply + 1, ret - 1); 556 return ret - 1; 557 } 558 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 559 udelay(100); 560 else 561 return -EIO; 562 } 563 } 564 565 static int 566 intel_dp_i2c_aux_ch(device_t idev, int mode, 567 uint8_t write_byte, uint8_t *read_byte) 568 { 569 struct iic_dp_aux_data *data; 570 struct intel_dp *intel_dp; 571 uint16_t address; 572 573 uint8_t msg[5]; 574 uint8_t reply[2]; 575 unsigned retry; 576 int msg_bytes; 577 int reply_bytes; 578 int ret; 579 580 data = device_get_softc(idev); 581 intel_dp = data->priv; 582 address = data->address; 583 584 intel_dp_check_edp(intel_dp); 585 /* Set up the command byte */ 586 if (mode & MODE_I2C_READ) 587 msg[0] = AUX_I2C_READ << 4; 588 else 589 msg[0] = AUX_I2C_WRITE << 4; 590 591 if (!(mode & MODE_I2C_STOP)) 592 msg[0] |= AUX_I2C_MOT << 4; 593 594 msg[1] = address >> 8; 595 msg[2] = address; 596 597 switch (mode) { 598 case MODE_I2C_WRITE: 599 msg[3] = 0; 600 msg[4] = write_byte; 601 msg_bytes = 5; 602 reply_bytes = 1; 603 break; 604 case MODE_I2C_READ: 605 msg[3] = 0; 606 msg_bytes = 4; 607 reply_bytes = 2; 608 break; 609 default: 610 msg_bytes = 3; 611 reply_bytes = 1; 612 break; 613 } 614 615 for (retry = 0; retry < 5; retry++) { 616 ret = intel_dp_aux_ch(intel_dp, 617 msg, msg_bytes, 618 reply, reply_bytes); 619 if (ret < 0) { 620 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 621 return ret; 622 } 623 624 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 625 case AUX_NATIVE_REPLY_ACK: 626 /* I2C-over-AUX Reply field is only valid 627 * when paired with AUX ACK. 628 */ 629 break; 630 case AUX_NATIVE_REPLY_NACK: 631 DRM_DEBUG_KMS("aux_ch native nack\n"); 632 return -EREMOTEIO; 633 case AUX_NATIVE_REPLY_DEFER: 634 udelay(100); 635 continue; 636 default: 637 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 638 reply[0]); 639 return -EREMOTEIO; 640 } 641 642 switch (reply[0] & AUX_I2C_REPLY_MASK) { 643 case AUX_I2C_REPLY_ACK: 644 if (mode == MODE_I2C_READ) { 645 *read_byte = reply[1]; 646 } 647 return (0/*reply_bytes - 1*/); 648 case AUX_I2C_REPLY_NACK: 649 DRM_DEBUG_KMS("aux_i2c nack\n"); 650 return -EREMOTEIO; 651 case AUX_I2C_REPLY_DEFER: 652 DRM_DEBUG_KMS("aux_i2c defer\n"); 653 udelay(100); 654 break; 655 default: 656 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 657 return -EREMOTEIO; 658 } 659 } 660 661 DRM_ERROR("too many retries, giving up\n"); 662 return -EREMOTEIO; 663 } 664 665 static int 666 intel_dp_i2c_init(struct intel_dp *intel_dp, 667 struct intel_connector *intel_connector, const char *name) 668 { 669 int ret; 670 671 DRM_DEBUG_KMS("i2c_init %s\n", name); 672 673 ironlake_edp_panel_vdd_on(intel_dp); 674 ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name, 675 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus, 676 &intel_dp->adapter); 677 ironlake_edp_panel_vdd_off(intel_dp, false); 678 return ret; 679 } 680 681 bool 682 intel_dp_mode_fixup(struct drm_encoder *encoder, 683 const struct drm_display_mode *mode, 684 struct drm_display_mode *adjusted_mode) 685 { 686 struct drm_device *dev = encoder->dev; 687 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 688 struct intel_connector *intel_connector = intel_dp->attached_connector; 689 int lane_count, clock; 690 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 691 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 692 int bpp, mode_rate; 693 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 694 695 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 696 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 697 adjusted_mode); 698 intel_pch_panel_fitting(dev, 699 intel_connector->panel.fitting_mode, 700 mode, adjusted_mode); 701 } 702 703 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 704 return false; 705 706 DRM_DEBUG_KMS("DP link computation with max lane count %i " 707 "max bw %02x pixel clock %iKHz\n", 708 max_lane_count, bws[max_clock], adjusted_mode->clock); 709 710 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 711 return false; 712 713 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 714 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 715 716 for (clock = 0; clock <= max_clock; clock++) { 717 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 718 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 719 720 if (mode_rate <= link_avail) { 721 intel_dp->link_bw = bws[clock]; 722 intel_dp->lane_count = lane_count; 723 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 724 DRM_DEBUG_KMS("DP link bw %02x lane " 725 "count %d clock %d bpp %d\n", 726 intel_dp->link_bw, intel_dp->lane_count, 727 adjusted_mode->clock, bpp); 728 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 729 mode_rate, link_avail); 730 return true; 731 } 732 } 733 } 734 735 return false; 736 } 737 738 struct intel_dp_m_n { 739 uint32_t tu; 740 uint32_t gmch_m; 741 uint32_t gmch_n; 742 uint32_t link_m; 743 uint32_t link_n; 744 }; 745 746 static void 747 intel_reduce_ratio(uint32_t *num, uint32_t *den) 748 { 749 while (*num > 0xffffff || *den > 0xffffff) { 750 *num >>= 1; 751 *den >>= 1; 752 } 753 } 754 755 static void 756 intel_dp_compute_m_n(int bpp, 757 int nlanes, 758 int pixel_clock, 759 int link_clock, 760 struct intel_dp_m_n *m_n) 761 { 762 m_n->tu = 64; 763 m_n->gmch_m = (pixel_clock * bpp) >> 3; 764 m_n->gmch_n = link_clock * nlanes; 765 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 766 m_n->link_m = pixel_clock; 767 m_n->link_n = link_clock; 768 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 769 } 770 771 void 772 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 773 struct drm_display_mode *adjusted_mode) 774 { 775 struct drm_device *dev = crtc->dev; 776 struct intel_encoder *intel_encoder; 777 struct intel_dp *intel_dp; 778 struct drm_i915_private *dev_priv = dev->dev_private; 779 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 780 int lane_count = 4; 781 struct intel_dp_m_n m_n; 782 int pipe = intel_crtc->pipe; 783 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 784 int target_clock; 785 786 /* 787 * Find the lane count in the intel_encoder private 788 */ 789 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 790 intel_dp = enc_to_intel_dp(&intel_encoder->base); 791 792 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 793 intel_encoder->type == INTEL_OUTPUT_EDP) 794 { 795 lane_count = intel_dp->lane_count; 796 break; 797 } 798 } 799 800 target_clock = mode->clock; 801 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 802 if (intel_encoder->type == INTEL_OUTPUT_EDP) { 803 target_clock = intel_edp_target_clock(intel_encoder, 804 mode); 805 break; 806 } 807 } 808 809 /* 810 * Compute the GMCH and Link ratios. The '3' here is 811 * the number of bytes_per_pixel post-LUT, which we always 812 * set up for 8-bits of R/G/B, or 3 bytes total. 813 */ 814 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 815 target_clock, adjusted_mode->clock, &m_n); 816 817 if (IS_HASWELL(dev)) { 818 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 819 TU_SIZE(m_n.tu) | m_n.gmch_m); 820 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 821 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); 822 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); 823 } else if (HAS_PCH_SPLIT(dev)) { 824 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 825 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 826 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 827 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 828 } else if (IS_VALLEYVIEW(dev)) { 829 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 830 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 831 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 832 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 833 } else { 834 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 835 TU_SIZE(m_n.tu) | m_n.gmch_m); 836 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 837 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 838 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 839 } 840 } 841 842 void intel_dp_init_link_config(struct intel_dp *intel_dp) 843 { 844 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 845 intel_dp->link_configuration[0] = intel_dp->link_bw; 846 intel_dp->link_configuration[1] = intel_dp->lane_count; 847 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 848 /* 849 * Check for DPCD version > 1.1 and enhanced framing support 850 */ 851 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 852 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 853 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 854 } 855 } 856 857 static void 858 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 859 struct drm_display_mode *adjusted_mode) 860 { 861 struct drm_device *dev = encoder->dev; 862 struct drm_i915_private *dev_priv = dev->dev_private; 863 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 864 struct drm_crtc *crtc = encoder->crtc; 865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 866 867 /* 868 * There are four kinds of DP registers: 869 * 870 * IBX PCH 871 * SNB CPU 872 * IVB CPU 873 * CPT PCH 874 * 875 * IBX PCH and CPU are the same for almost everything, 876 * except that the CPU DP PLL is configured in this 877 * register 878 * 879 * CPT PCH is quite different, having many bits moved 880 * to the TRANS_DP_CTL register instead. That 881 * configuration happens (oddly) in ironlake_pch_enable 882 */ 883 884 /* Preserve the BIOS-computed detected bit. This is 885 * supposed to be read-only. 886 */ 887 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 888 889 /* Handle DP bits in common between all three register formats */ 890 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 891 892 switch (intel_dp->lane_count) { 893 case 1: 894 intel_dp->DP |= DP_PORT_WIDTH_1; 895 break; 896 case 2: 897 intel_dp->DP |= DP_PORT_WIDTH_2; 898 break; 899 case 4: 900 intel_dp->DP |= DP_PORT_WIDTH_4; 901 break; 902 } 903 if (intel_dp->has_audio) { 904 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 905 pipe_name(intel_crtc->pipe)); 906 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 907 intel_write_eld(encoder, adjusted_mode); 908 } 909 910 intel_dp_init_link_config(intel_dp); 911 912 /* Split out the IBX/CPU vs CPT settings */ 913 914 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 915 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 916 intel_dp->DP |= DP_SYNC_HS_HIGH; 917 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 918 intel_dp->DP |= DP_SYNC_VS_HIGH; 919 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 920 921 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 922 intel_dp->DP |= DP_ENHANCED_FRAMING; 923 924 intel_dp->DP |= intel_crtc->pipe << 29; 925 926 /* don't miss out required setting for eDP */ 927 if (adjusted_mode->clock < 200000) 928 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 929 else 930 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 931 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 932 intel_dp->DP |= intel_dp->color_range; 933 934 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 935 intel_dp->DP |= DP_SYNC_HS_HIGH; 936 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 937 intel_dp->DP |= DP_SYNC_VS_HIGH; 938 intel_dp->DP |= DP_LINK_TRAIN_OFF; 939 940 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 941 intel_dp->DP |= DP_ENHANCED_FRAMING; 942 943 if (intel_crtc->pipe == 1) 944 intel_dp->DP |= DP_PIPEB_SELECT; 945 946 if (is_cpu_edp(intel_dp)) { 947 /* don't miss out required setting for eDP */ 948 if (adjusted_mode->clock < 200000) 949 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 950 else 951 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 952 } 953 } else { 954 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 955 } 956 } 957 958 #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 959 #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 960 961 #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 962 #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 963 964 #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 965 #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 966 967 static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 968 u32 mask, 969 u32 value) 970 { 971 struct drm_device *dev = intel_dp_to_dev(intel_dp); 972 struct drm_i915_private *dev_priv = dev->dev_private; 973 974 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 975 mask, value, 976 I915_READ(PCH_PP_STATUS), 977 I915_READ(PCH_PP_CONTROL)); 978 979 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 980 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 981 I915_READ(PCH_PP_STATUS), 982 I915_READ(PCH_PP_CONTROL)); 983 } 984 } 985 986 static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 987 { 988 DRM_DEBUG_KMS("Wait for panel power on\n"); 989 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 990 } 991 992 static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 993 { 994 DRM_DEBUG_KMS("Wait for panel power off time\n"); 995 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 996 } 997 998 static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 999 { 1000 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1001 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1002 } 1003 1004 1005 /* Read the current pp_control value, unlocking the register if it 1006 * is locked 1007 */ 1008 1009 static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 1010 { 1011 u32 control = I915_READ(PCH_PP_CONTROL); 1012 1013 control &= ~PANEL_UNLOCK_MASK; 1014 control |= PANEL_UNLOCK_REGS; 1015 return control; 1016 } 1017 1018 void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1019 { 1020 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1021 struct drm_i915_private *dev_priv = dev->dev_private; 1022 u32 pp; 1023 1024 if (!is_edp(intel_dp)) 1025 return; 1026 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1027 1028 WARN(intel_dp->want_panel_vdd, 1029 "eDP VDD already requested on\n"); 1030 1031 intel_dp->want_panel_vdd = true; 1032 1033 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1034 DRM_DEBUG_KMS("eDP VDD already on\n"); 1035 return; 1036 } 1037 1038 if (!ironlake_edp_have_panel_power(intel_dp)) 1039 ironlake_wait_panel_power_cycle(intel_dp); 1040 1041 pp = ironlake_get_pp_control(dev_priv); 1042 pp |= EDP_FORCE_VDD; 1043 I915_WRITE(PCH_PP_CONTROL, pp); 1044 POSTING_READ(PCH_PP_CONTROL); 1045 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1046 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1047 1048 /* 1049 * If the panel wasn't on, delay before accessing aux channel 1050 */ 1051 if (!ironlake_edp_have_panel_power(intel_dp)) { 1052 DRM_DEBUG_KMS("eDP was not running\n"); 1053 msleep(intel_dp->panel_power_up_delay); 1054 } 1055 } 1056 1057 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1058 { 1059 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1060 struct drm_i915_private *dev_priv = dev->dev_private; 1061 u32 pp; 1062 1063 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1064 pp = ironlake_get_pp_control(dev_priv); 1065 pp &= ~EDP_FORCE_VDD; 1066 I915_WRITE(PCH_PP_CONTROL, pp); 1067 POSTING_READ(PCH_PP_CONTROL); 1068 1069 /* Make sure sequencer is idle before allowing subsequent activity */ 1070 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1071 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1072 1073 msleep(intel_dp->panel_power_down_delay); 1074 } 1075 } 1076 1077 static void ironlake_panel_vdd_work(struct work_struct *__work) 1078 { 1079 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1080 struct intel_dp, panel_vdd_work); 1081 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1082 1083 lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE); 1084 ironlake_panel_vdd_off_sync(intel_dp); 1085 lockmgr(&dev->mode_config.mutex, LK_RELEASE); 1086 } 1087 1088 void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1089 { 1090 if (!is_edp(intel_dp)) 1091 return; 1092 1093 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1094 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1095 1096 intel_dp->want_panel_vdd = false; 1097 1098 if (sync) { 1099 ironlake_panel_vdd_off_sync(intel_dp); 1100 } else { 1101 /* 1102 * Queue the timer to fire a long 1103 * time from now (relative to the power down delay) 1104 * to keep the panel power up across a sequence of operations 1105 */ 1106 schedule_delayed_work(&intel_dp->panel_vdd_work, 1107 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1108 } 1109 } 1110 1111 void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1112 { 1113 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1114 struct drm_i915_private *dev_priv = dev->dev_private; 1115 u32 pp; 1116 1117 if (!is_edp(intel_dp)) 1118 return; 1119 1120 DRM_DEBUG_KMS("Turn eDP power on\n"); 1121 1122 if (ironlake_edp_have_panel_power(intel_dp)) { 1123 DRM_DEBUG_KMS("eDP power already on\n"); 1124 return; 1125 } 1126 1127 ironlake_wait_panel_power_cycle(intel_dp); 1128 1129 pp = ironlake_get_pp_control(dev_priv); 1130 if (IS_GEN5(dev)) { 1131 /* ILK workaround: disable reset around power sequence */ 1132 pp &= ~PANEL_POWER_RESET; 1133 I915_WRITE(PCH_PP_CONTROL, pp); 1134 POSTING_READ(PCH_PP_CONTROL); 1135 } 1136 1137 pp |= POWER_TARGET_ON; 1138 if (!IS_GEN5(dev)) 1139 pp |= PANEL_POWER_RESET; 1140 1141 I915_WRITE(PCH_PP_CONTROL, pp); 1142 POSTING_READ(PCH_PP_CONTROL); 1143 1144 ironlake_wait_panel_on(intel_dp); 1145 1146 if (IS_GEN5(dev)) { 1147 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1148 I915_WRITE(PCH_PP_CONTROL, pp); 1149 POSTING_READ(PCH_PP_CONTROL); 1150 } 1151 } 1152 1153 void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1154 { 1155 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1156 struct drm_i915_private *dev_priv = dev->dev_private; 1157 u32 pp; 1158 1159 if (!is_edp(intel_dp)) 1160 return; 1161 1162 DRM_DEBUG_KMS("Turn eDP power off\n"); 1163 1164 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1165 1166 pp = ironlake_get_pp_control(dev_priv); 1167 /* We need to switch off panel power _and_ force vdd, for otherwise some 1168 * panels get very unhappy and cease to work. */ 1169 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1170 I915_WRITE(PCH_PP_CONTROL, pp); 1171 POSTING_READ(PCH_PP_CONTROL); 1172 1173 intel_dp->want_panel_vdd = false; 1174 1175 ironlake_wait_panel_off(intel_dp); 1176 } 1177 1178 void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1179 { 1180 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1181 struct drm_device *dev = intel_dig_port->base.base.dev; 1182 struct drm_i915_private *dev_priv = dev->dev_private; 1183 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; 1184 u32 pp; 1185 1186 if (!is_edp(intel_dp)) 1187 return; 1188 1189 DRM_DEBUG_KMS("\n"); 1190 /* 1191 * If we enable the backlight right away following a panel power 1192 * on, we may see slight flicker as the panel syncs with the eDP 1193 * link. So delay a bit to make sure the image is solid before 1194 * allowing it to appear. 1195 */ 1196 msleep(intel_dp->backlight_on_delay); 1197 pp = ironlake_get_pp_control(dev_priv); 1198 pp |= EDP_BLC_ENABLE; 1199 I915_WRITE(PCH_PP_CONTROL, pp); 1200 POSTING_READ(PCH_PP_CONTROL); 1201 1202 intel_panel_enable_backlight(dev, pipe); 1203 } 1204 1205 void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1206 { 1207 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1208 struct drm_i915_private *dev_priv = dev->dev_private; 1209 u32 pp; 1210 1211 if (!is_edp(intel_dp)) 1212 return; 1213 1214 intel_panel_disable_backlight(dev); 1215 1216 DRM_DEBUG_KMS("\n"); 1217 pp = ironlake_get_pp_control(dev_priv); 1218 pp &= ~EDP_BLC_ENABLE; 1219 I915_WRITE(PCH_PP_CONTROL, pp); 1220 POSTING_READ(PCH_PP_CONTROL); 1221 msleep(intel_dp->backlight_off_delay); 1222 } 1223 1224 static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1225 { 1226 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1227 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1228 struct drm_device *dev = crtc->dev; 1229 struct drm_i915_private *dev_priv = dev->dev_private; 1230 u32 dpa_ctl; 1231 1232 assert_pipe_disabled(dev_priv, 1233 to_intel_crtc(crtc)->pipe); 1234 1235 DRM_DEBUG_KMS("\n"); 1236 dpa_ctl = I915_READ(DP_A); 1237 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1238 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1239 1240 /* We don't adjust intel_dp->DP while tearing down the link, to 1241 * facilitate link retraining (e.g. after hotplug). Hence clear all 1242 * enable bits here to ensure that we don't enable too much. */ 1243 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1244 intel_dp->DP |= DP_PLL_ENABLE; 1245 I915_WRITE(DP_A, intel_dp->DP); 1246 POSTING_READ(DP_A); 1247 udelay(200); 1248 } 1249 1250 static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1251 { 1252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1253 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1254 struct drm_device *dev = crtc->dev; 1255 struct drm_i915_private *dev_priv = dev->dev_private; 1256 u32 dpa_ctl; 1257 1258 assert_pipe_disabled(dev_priv, 1259 to_intel_crtc(crtc)->pipe); 1260 1261 dpa_ctl = I915_READ(DP_A); 1262 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1263 "dp pll off, should be on\n"); 1264 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1265 1266 /* We can't rely on the value tracked for the DP register in 1267 * intel_dp->DP because link_down must not change that (otherwise link 1268 * re-training will fail. */ 1269 dpa_ctl &= ~DP_PLL_ENABLE; 1270 I915_WRITE(DP_A, dpa_ctl); 1271 POSTING_READ(DP_A); 1272 udelay(200); 1273 } 1274 1275 /* If the sink supports it, try to set the power state appropriately */ 1276 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1277 { 1278 int ret, i; 1279 1280 /* Should have a valid DPCD by this point */ 1281 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1282 return; 1283 1284 if (mode != DRM_MODE_DPMS_ON) { 1285 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1286 DP_SET_POWER_D3); 1287 if (ret != 1) 1288 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1289 } else { 1290 /* 1291 * When turning on, we need to retry for 1ms to give the sink 1292 * time to wake up. 1293 */ 1294 for (i = 0; i < 3; i++) { 1295 ret = intel_dp_aux_native_write_1(intel_dp, 1296 DP_SET_POWER, 1297 DP_SET_POWER_D0); 1298 if (ret == 1) 1299 break; 1300 msleep(1); 1301 } 1302 } 1303 } 1304 1305 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1306 enum i915_pipe *pipe) 1307 { 1308 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1309 struct drm_device *dev = encoder->base.dev; 1310 struct drm_i915_private *dev_priv = dev->dev_private; 1311 u32 tmp = I915_READ(intel_dp->output_reg); 1312 1313 if (!(tmp & DP_PORT_EN)) 1314 return false; 1315 1316 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1317 *pipe = PORT_TO_PIPE_CPT(tmp); 1318 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1319 *pipe = PORT_TO_PIPE(tmp); 1320 } else { 1321 u32 trans_sel; 1322 u32 trans_dp; 1323 int i; 1324 1325 switch (intel_dp->output_reg) { 1326 case PCH_DP_B: 1327 trans_sel = TRANS_DP_PORT_SEL_B; 1328 break; 1329 case PCH_DP_C: 1330 trans_sel = TRANS_DP_PORT_SEL_C; 1331 break; 1332 case PCH_DP_D: 1333 trans_sel = TRANS_DP_PORT_SEL_D; 1334 break; 1335 default: 1336 return true; 1337 } 1338 1339 for_each_pipe(i) { 1340 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1341 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1342 *pipe = i; 1343 return true; 1344 } 1345 } 1346 1347 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1348 intel_dp->output_reg); 1349 } 1350 1351 return true; 1352 } 1353 1354 static void intel_disable_dp(struct intel_encoder *encoder) 1355 { 1356 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1357 1358 /* Make sure the panel is off before trying to change the mode. But also 1359 * ensure that we have vdd while we switch off the panel. */ 1360 ironlake_edp_panel_vdd_on(intel_dp); 1361 ironlake_edp_backlight_off(intel_dp); 1362 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1363 ironlake_edp_panel_off(intel_dp); 1364 1365 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1366 if (!is_cpu_edp(intel_dp)) 1367 intel_dp_link_down(intel_dp); 1368 } 1369 1370 static void intel_post_disable_dp(struct intel_encoder *encoder) 1371 { 1372 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1373 1374 if (is_cpu_edp(intel_dp)) { 1375 intel_dp_link_down(intel_dp); 1376 ironlake_edp_pll_off(intel_dp); 1377 } 1378 } 1379 1380 static void intel_enable_dp(struct intel_encoder *encoder) 1381 { 1382 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1383 struct drm_device *dev = encoder->base.dev; 1384 struct drm_i915_private *dev_priv = dev->dev_private; 1385 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1386 1387 if (WARN_ON(dp_reg & DP_PORT_EN)) 1388 return; 1389 1390 ironlake_edp_panel_vdd_on(intel_dp); 1391 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1392 intel_dp_start_link_train(intel_dp); 1393 ironlake_edp_panel_on(intel_dp); 1394 ironlake_edp_panel_vdd_off(intel_dp, true); 1395 intel_dp_complete_link_train(intel_dp); 1396 ironlake_edp_backlight_on(intel_dp); 1397 } 1398 1399 static void intel_pre_enable_dp(struct intel_encoder *encoder) 1400 { 1401 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1402 1403 if (is_cpu_edp(intel_dp)) 1404 ironlake_edp_pll_on(intel_dp); 1405 } 1406 1407 /* 1408 * Native read with retry for link status and receiver capability reads for 1409 * cases where the sink may still be asleep. 1410 */ 1411 static bool 1412 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1413 uint8_t *recv, int recv_bytes) 1414 { 1415 int ret, i; 1416 1417 /* 1418 * Sinks are *supposed* to come up within 1ms from an off state, 1419 * but we're also supposed to retry 3 times per the spec. 1420 */ 1421 for (i = 0; i < 3; i++) { 1422 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1423 recv_bytes); 1424 if (ret == recv_bytes) 1425 return true; 1426 msleep(1); 1427 } 1428 1429 return false; 1430 } 1431 1432 /* 1433 * Fetch AUX CH registers 0x202 - 0x207 which contain 1434 * link status information 1435 */ 1436 static bool 1437 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1438 { 1439 return intel_dp_aux_native_read_retry(intel_dp, 1440 DP_LANE0_1_STATUS, 1441 link_status, 1442 DP_LINK_STATUS_SIZE); 1443 } 1444 1445 #if 0 1446 static char *voltage_names[] = { 1447 "0.4V", "0.6V", "0.8V", "1.2V" 1448 }; 1449 static char *pre_emph_names[] = { 1450 "0dB", "3.5dB", "6dB", "9.5dB" 1451 }; 1452 static char *link_train_names[] = { 1453 "pattern 1", "pattern 2", "idle", "off" 1454 }; 1455 #endif 1456 1457 /* 1458 * These are source-specific values; current Intel hardware supports 1459 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1460 */ 1461 1462 static uint8_t 1463 intel_dp_voltage_max(struct intel_dp *intel_dp) 1464 { 1465 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1466 1467 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1468 return DP_TRAIN_VOLTAGE_SWING_800; 1469 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1470 return DP_TRAIN_VOLTAGE_SWING_1200; 1471 else 1472 return DP_TRAIN_VOLTAGE_SWING_800; 1473 } 1474 1475 static uint8_t 1476 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1477 { 1478 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1479 1480 if (IS_HASWELL(dev)) { 1481 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1482 case DP_TRAIN_VOLTAGE_SWING_400: 1483 return DP_TRAIN_PRE_EMPHASIS_9_5; 1484 case DP_TRAIN_VOLTAGE_SWING_600: 1485 return DP_TRAIN_PRE_EMPHASIS_6; 1486 case DP_TRAIN_VOLTAGE_SWING_800: 1487 return DP_TRAIN_PRE_EMPHASIS_3_5; 1488 case DP_TRAIN_VOLTAGE_SWING_1200: 1489 default: 1490 return DP_TRAIN_PRE_EMPHASIS_0; 1491 } 1492 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1493 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1494 case DP_TRAIN_VOLTAGE_SWING_400: 1495 return DP_TRAIN_PRE_EMPHASIS_6; 1496 case DP_TRAIN_VOLTAGE_SWING_600: 1497 case DP_TRAIN_VOLTAGE_SWING_800: 1498 return DP_TRAIN_PRE_EMPHASIS_3_5; 1499 default: 1500 return DP_TRAIN_PRE_EMPHASIS_0; 1501 } 1502 } else { 1503 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1504 case DP_TRAIN_VOLTAGE_SWING_400: 1505 return DP_TRAIN_PRE_EMPHASIS_6; 1506 case DP_TRAIN_VOLTAGE_SWING_600: 1507 return DP_TRAIN_PRE_EMPHASIS_6; 1508 case DP_TRAIN_VOLTAGE_SWING_800: 1509 return DP_TRAIN_PRE_EMPHASIS_3_5; 1510 case DP_TRAIN_VOLTAGE_SWING_1200: 1511 default: 1512 return DP_TRAIN_PRE_EMPHASIS_0; 1513 } 1514 } 1515 } 1516 1517 static void 1518 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1519 { 1520 uint8_t v = 0; 1521 uint8_t p = 0; 1522 int lane; 1523 uint8_t voltage_max; 1524 uint8_t preemph_max; 1525 1526 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1527 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 1528 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 1529 1530 if (this_v > v) 1531 v = this_v; 1532 if (this_p > p) 1533 p = this_p; 1534 } 1535 1536 voltage_max = intel_dp_voltage_max(intel_dp); 1537 if (v >= voltage_max) 1538 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1539 1540 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1541 if (p >= preemph_max) 1542 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1543 1544 for (lane = 0; lane < 4; lane++) 1545 intel_dp->train_set[lane] = v | p; 1546 } 1547 1548 static uint32_t 1549 intel_dp_signal_levels(uint8_t train_set) 1550 { 1551 uint32_t signal_levels = 0; 1552 1553 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1554 case DP_TRAIN_VOLTAGE_SWING_400: 1555 default: 1556 signal_levels |= DP_VOLTAGE_0_4; 1557 break; 1558 case DP_TRAIN_VOLTAGE_SWING_600: 1559 signal_levels |= DP_VOLTAGE_0_6; 1560 break; 1561 case DP_TRAIN_VOLTAGE_SWING_800: 1562 signal_levels |= DP_VOLTAGE_0_8; 1563 break; 1564 case DP_TRAIN_VOLTAGE_SWING_1200: 1565 signal_levels |= DP_VOLTAGE_1_2; 1566 break; 1567 } 1568 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1569 case DP_TRAIN_PRE_EMPHASIS_0: 1570 default: 1571 signal_levels |= DP_PRE_EMPHASIS_0; 1572 break; 1573 case DP_TRAIN_PRE_EMPHASIS_3_5: 1574 signal_levels |= DP_PRE_EMPHASIS_3_5; 1575 break; 1576 case DP_TRAIN_PRE_EMPHASIS_6: 1577 signal_levels |= DP_PRE_EMPHASIS_6; 1578 break; 1579 case DP_TRAIN_PRE_EMPHASIS_9_5: 1580 signal_levels |= DP_PRE_EMPHASIS_9_5; 1581 break; 1582 } 1583 return signal_levels; 1584 } 1585 1586 /* Gen6's DP voltage swing and pre-emphasis control */ 1587 static uint32_t 1588 intel_gen6_edp_signal_levels(uint8_t train_set) 1589 { 1590 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1591 DP_TRAIN_PRE_EMPHASIS_MASK); 1592 switch (signal_levels) { 1593 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1594 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1595 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1596 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1597 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1598 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1599 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1600 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1601 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1602 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1603 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1604 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1605 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1606 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1607 default: 1608 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1609 "0x%x\n", signal_levels); 1610 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1611 } 1612 } 1613 1614 /* Gen7's DP voltage swing and pre-emphasis control */ 1615 static uint32_t 1616 intel_gen7_edp_signal_levels(uint8_t train_set) 1617 { 1618 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1619 DP_TRAIN_PRE_EMPHASIS_MASK); 1620 switch (signal_levels) { 1621 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1622 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1623 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1624 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1625 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1626 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1627 1628 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1629 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1630 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1631 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1632 1633 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1634 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1635 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1636 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1637 1638 default: 1639 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1640 "0x%x\n", signal_levels); 1641 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1642 } 1643 } 1644 1645 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1646 static uint32_t 1647 intel_dp_signal_levels_hsw(uint8_t train_set) 1648 { 1649 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1650 DP_TRAIN_PRE_EMPHASIS_MASK); 1651 switch (signal_levels) { 1652 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1653 return DDI_BUF_EMP_400MV_0DB_HSW; 1654 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1655 return DDI_BUF_EMP_400MV_3_5DB_HSW; 1656 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1657 return DDI_BUF_EMP_400MV_6DB_HSW; 1658 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 1659 return DDI_BUF_EMP_400MV_9_5DB_HSW; 1660 1661 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1662 return DDI_BUF_EMP_600MV_0DB_HSW; 1663 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1664 return DDI_BUF_EMP_600MV_3_5DB_HSW; 1665 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1666 return DDI_BUF_EMP_600MV_6DB_HSW; 1667 1668 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1669 return DDI_BUF_EMP_800MV_0DB_HSW; 1670 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1671 return DDI_BUF_EMP_800MV_3_5DB_HSW; 1672 default: 1673 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1674 "0x%x\n", signal_levels); 1675 return DDI_BUF_EMP_400MV_0DB_HSW; 1676 } 1677 } 1678 1679 static bool 1680 intel_dp_set_link_train(struct intel_dp *intel_dp, 1681 uint32_t dp_reg_value, 1682 uint8_t dp_train_pat) 1683 { 1684 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1685 struct drm_device *dev = intel_dig_port->base.base.dev; 1686 struct drm_i915_private *dev_priv = dev->dev_private; 1687 enum port port = intel_dig_port->port; 1688 int ret; 1689 uint32_t temp; 1690 1691 if (IS_HASWELL(dev)) { 1692 temp = I915_READ(DP_TP_CTL(port)); 1693 1694 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1695 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1696 else 1697 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 1698 1699 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1700 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1701 case DP_TRAINING_PATTERN_DISABLE: 1702 temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1703 I915_WRITE(DP_TP_CTL(port), temp); 1704 1705 if (wait_for((I915_READ(DP_TP_STATUS(port)) & 1706 DP_TP_STATUS_IDLE_DONE), 1)) 1707 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1708 1709 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1710 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1711 1712 break; 1713 case DP_TRAINING_PATTERN_1: 1714 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 1715 break; 1716 case DP_TRAINING_PATTERN_2: 1717 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 1718 break; 1719 case DP_TRAINING_PATTERN_3: 1720 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 1721 break; 1722 } 1723 I915_WRITE(DP_TP_CTL(port), temp); 1724 1725 } else if (HAS_PCH_CPT(dev) && 1726 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1727 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1728 1729 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1730 case DP_TRAINING_PATTERN_DISABLE: 1731 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1732 break; 1733 case DP_TRAINING_PATTERN_1: 1734 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1735 break; 1736 case DP_TRAINING_PATTERN_2: 1737 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1738 break; 1739 case DP_TRAINING_PATTERN_3: 1740 DRM_ERROR("DP training pattern 3 not supported\n"); 1741 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1742 break; 1743 } 1744 1745 } else { 1746 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1747 1748 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1749 case DP_TRAINING_PATTERN_DISABLE: 1750 dp_reg_value |= DP_LINK_TRAIN_OFF; 1751 break; 1752 case DP_TRAINING_PATTERN_1: 1753 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1754 break; 1755 case DP_TRAINING_PATTERN_2: 1756 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1757 break; 1758 case DP_TRAINING_PATTERN_3: 1759 DRM_ERROR("DP training pattern 3 not supported\n"); 1760 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1761 break; 1762 } 1763 } 1764 1765 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1766 POSTING_READ(intel_dp->output_reg); 1767 1768 intel_dp_aux_native_write_1(intel_dp, 1769 DP_TRAINING_PATTERN_SET, 1770 dp_train_pat); 1771 1772 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1773 DP_TRAINING_PATTERN_DISABLE) { 1774 ret = intel_dp_aux_native_write(intel_dp, 1775 DP_TRAINING_LANE0_SET, 1776 intel_dp->train_set, 1777 intel_dp->lane_count); 1778 if (ret != intel_dp->lane_count) 1779 return false; 1780 } 1781 1782 return true; 1783 } 1784 1785 /* Enable corresponding port and start training pattern 1 */ 1786 void 1787 intel_dp_start_link_train(struct intel_dp *intel_dp) 1788 { 1789 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 1790 struct drm_device *dev = encoder->dev; 1791 int i; 1792 uint8_t voltage; 1793 bool clock_recovery = false; 1794 int voltage_tries, loop_tries; 1795 uint32_t DP = intel_dp->DP; 1796 1797 if (IS_HASWELL(dev)) 1798 intel_ddi_prepare_link_retrain(encoder); 1799 1800 /* Write the link configuration data */ 1801 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1802 intel_dp->link_configuration, 1803 DP_LINK_CONFIGURATION_SIZE); 1804 1805 DP |= DP_PORT_EN; 1806 1807 memset(intel_dp->train_set, 0, 4); 1808 voltage = 0xff; 1809 voltage_tries = 0; 1810 loop_tries = 0; 1811 clock_recovery = false; 1812 for (;;) { 1813 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1814 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1815 uint32_t signal_levels; 1816 1817 if (IS_HASWELL(dev)) { 1818 signal_levels = intel_dp_signal_levels_hsw( 1819 intel_dp->train_set[0]); 1820 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1821 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1822 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1823 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1824 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1825 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1826 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1827 } else { 1828 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1829 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1830 } 1831 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", 1832 signal_levels); 1833 1834 /* Set training pattern 1 */ 1835 if (!intel_dp_set_link_train(intel_dp, DP, 1836 DP_TRAINING_PATTERN_1 | 1837 DP_LINK_SCRAMBLING_DISABLE)) 1838 break; 1839 1840 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 1841 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1842 DRM_ERROR("failed to get link status\n"); 1843 break; 1844 } 1845 1846 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1847 DRM_DEBUG_KMS("clock recovery OK\n"); 1848 clock_recovery = true; 1849 break; 1850 } 1851 1852 /* Check to see if we've tried the max voltage */ 1853 for (i = 0; i < intel_dp->lane_count; i++) 1854 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1855 break; 1856 if (i == intel_dp->lane_count) { 1857 ++loop_tries; 1858 if (loop_tries == 5) { 1859 DRM_DEBUG_KMS("too many full retries, give up\n"); 1860 break; 1861 } 1862 memset(intel_dp->train_set, 0, 4); 1863 voltage_tries = 0; 1864 continue; 1865 } 1866 1867 /* Check to see if we've tried the same voltage 5 times */ 1868 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1869 ++voltage_tries; 1870 if (voltage_tries == 5) { 1871 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1872 break; 1873 } 1874 } else 1875 voltage_tries = 0; 1876 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1877 1878 /* Compute new intel_dp->train_set as requested by target */ 1879 intel_get_adjust_train(intel_dp, link_status); 1880 } 1881 1882 intel_dp->DP = DP; 1883 } 1884 1885 void 1886 intel_dp_complete_link_train(struct intel_dp *intel_dp) 1887 { 1888 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1889 bool channel_eq = false; 1890 int tries, cr_tries; 1891 uint32_t DP = intel_dp->DP; 1892 1893 /* channel equalization */ 1894 tries = 0; 1895 cr_tries = 0; 1896 channel_eq = false; 1897 for (;;) { 1898 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1899 uint32_t signal_levels; 1900 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1901 1902 if (cr_tries > 5) { 1903 DRM_ERROR("failed to train DP, aborting\n"); 1904 intel_dp_link_down(intel_dp); 1905 break; 1906 } 1907 1908 if (IS_HASWELL(dev)) { 1909 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); 1910 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1911 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1912 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1913 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1914 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1915 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1916 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1917 } else { 1918 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1919 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1920 } 1921 1922 /* channel eq pattern */ 1923 if (!intel_dp_set_link_train(intel_dp, DP, 1924 DP_TRAINING_PATTERN_2 | 1925 DP_LINK_SCRAMBLING_DISABLE)) 1926 break; 1927 1928 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 1929 if (!intel_dp_get_link_status(intel_dp, link_status)) 1930 break; 1931 1932 /* Make sure clock is still ok */ 1933 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1934 intel_dp_start_link_train(intel_dp); 1935 cr_tries++; 1936 continue; 1937 } 1938 1939 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 1940 channel_eq = true; 1941 break; 1942 } 1943 1944 /* Try 5 times, then try clock recovery if that fails */ 1945 if (tries > 5) { 1946 intel_dp_link_down(intel_dp); 1947 intel_dp_start_link_train(intel_dp); 1948 tries = 0; 1949 cr_tries++; 1950 continue; 1951 } 1952 1953 /* Compute new intel_dp->train_set as requested by target */ 1954 intel_get_adjust_train(intel_dp, link_status); 1955 ++tries; 1956 } 1957 1958 if (channel_eq) 1959 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); 1960 1961 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1962 } 1963 1964 static void 1965 intel_dp_link_down(struct intel_dp *intel_dp) 1966 { 1967 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1968 struct drm_device *dev = intel_dig_port->base.base.dev; 1969 struct drm_i915_private *dev_priv = dev->dev_private; 1970 uint32_t DP = intel_dp->DP; 1971 1972 /* 1973 * DDI code has a strict mode set sequence and we should try to respect 1974 * it, otherwise we might hang the machine in many different ways. So we 1975 * really should be disabling the port only on a complete crtc_disable 1976 * sequence. This function is just called under two conditions on DDI 1977 * code: 1978 * - Link train failed while doing crtc_enable, and on this case we 1979 * really should respect the mode set sequence and wait for a 1980 * crtc_disable. 1981 * - Someone turned the monitor off and intel_dp_check_link_status 1982 * called us. We don't need to disable the whole port on this case, so 1983 * when someone turns the monitor on again, 1984 * intel_ddi_prepare_link_retrain will take care of redoing the link 1985 * train. 1986 */ 1987 if (IS_HASWELL(dev)) 1988 return; 1989 1990 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 1991 return; 1992 1993 DRM_DEBUG_KMS("\n"); 1994 1995 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1996 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1997 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1998 } else { 1999 DP &= ~DP_LINK_TRAIN_MASK; 2000 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 2001 } 2002 POSTING_READ(intel_dp->output_reg); 2003 2004 msleep(17); 2005 2006 if (HAS_PCH_IBX(dev) && 2007 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2008 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 2009 2010 /* Hardware workaround: leaving our transcoder select 2011 * set to transcoder B while it's off will prevent the 2012 * corresponding HDMI output on transcoder A. 2013 * 2014 * Combine this with another hardware workaround: 2015 * transcoder select bit can only be cleared while the 2016 * port is enabled. 2017 */ 2018 DP &= ~DP_PIPEB_SELECT; 2019 I915_WRITE(intel_dp->output_reg, DP); 2020 2021 /* Changes to enable or select take place the vblank 2022 * after being written. 2023 */ 2024 if (crtc == NULL) { 2025 /* We can arrive here never having been attached 2026 * to a CRTC, for instance, due to inheriting 2027 * random state from the BIOS. 2028 * 2029 * If the pipe is not running, play safe and 2030 * wait for the clocks to stabilise before 2031 * continuing. 2032 */ 2033 POSTING_READ(intel_dp->output_reg); 2034 msleep(50); 2035 } else 2036 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2037 } 2038 2039 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2040 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 2041 POSTING_READ(intel_dp->output_reg); 2042 msleep(intel_dp->panel_power_down_delay); 2043 } 2044 2045 static bool 2046 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2047 { 2048 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2049 sizeof(intel_dp->dpcd)) == 0) 2050 return false; /* aux transfer failed */ 2051 2052 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2053 return false; /* DPCD not present */ 2054 2055 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2056 DP_DWN_STRM_PORT_PRESENT)) 2057 return true; /* native DP sink */ 2058 2059 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 2060 return true; /* no per-port downstream info */ 2061 2062 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 2063 intel_dp->downstream_ports, 2064 DP_MAX_DOWNSTREAM_PORTS) == 0) 2065 return false; /* downstream port status fetch failed */ 2066 2067 return true; 2068 } 2069 2070 static void 2071 intel_dp_probe_oui(struct intel_dp *intel_dp) 2072 { 2073 u8 buf[3]; 2074 2075 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2076 return; 2077 2078 ironlake_edp_panel_vdd_on(intel_dp); 2079 2080 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2081 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2082 buf[0], buf[1], buf[2]); 2083 2084 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2085 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2086 buf[0], buf[1], buf[2]); 2087 2088 ironlake_edp_panel_vdd_off(intel_dp, false); 2089 } 2090 2091 static bool 2092 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2093 { 2094 int ret; 2095 2096 ret = intel_dp_aux_native_read_retry(intel_dp, 2097 DP_DEVICE_SERVICE_IRQ_VECTOR, 2098 sink_irq_vector, 1); 2099 if (!ret) 2100 return false; 2101 2102 return true; 2103 } 2104 2105 static void 2106 intel_dp_handle_test_request(struct intel_dp *intel_dp) 2107 { 2108 /* NAK by default */ 2109 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 2110 } 2111 2112 /* 2113 * According to DP spec 2114 * 5.1.2: 2115 * 1. Read DPCD 2116 * 2. Configure link according to Receiver Capabilities 2117 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2118 * 4. Check link status on receipt of hot-plug interrupt 2119 */ 2120 2121 void 2122 intel_dp_check_link_status(struct intel_dp *intel_dp) 2123 { 2124 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 2125 u8 sink_irq_vector; 2126 u8 link_status[DP_LINK_STATUS_SIZE]; 2127 2128 if (!intel_encoder->connectors_active) 2129 return; 2130 2131 if (WARN_ON(!intel_encoder->base.crtc)) 2132 return; 2133 2134 /* Try to read receiver status if the link appears to be up */ 2135 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2136 intel_dp_link_down(intel_dp); 2137 return; 2138 } 2139 2140 /* Now read the DPCD to see if it's actually running */ 2141 if (!intel_dp_get_dpcd(intel_dp)) { 2142 intel_dp_link_down(intel_dp); 2143 return; 2144 } 2145 2146 /* Try to read the source of the interrupt */ 2147 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2148 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2149 /* Clear interrupt source */ 2150 intel_dp_aux_native_write_1(intel_dp, 2151 DP_DEVICE_SERVICE_IRQ_VECTOR, 2152 sink_irq_vector); 2153 2154 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2155 intel_dp_handle_test_request(intel_dp); 2156 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2157 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2158 } 2159 2160 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 2161 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2162 drm_get_encoder_name(&intel_encoder->base)); 2163 intel_dp_start_link_train(intel_dp); 2164 intel_dp_complete_link_train(intel_dp); 2165 } 2166 } 2167 2168 /* XXX this is probably wrong for multiple downstream ports */ 2169 static enum drm_connector_status 2170 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2171 { 2172 uint8_t *dpcd = intel_dp->dpcd; 2173 bool hpd; 2174 uint8_t type; 2175 2176 if (!intel_dp_get_dpcd(intel_dp)) 2177 return connector_status_disconnected; 2178 2179 /* if there's no downstream port, we're done */ 2180 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2181 return connector_status_connected; 2182 2183 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2184 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2185 if (hpd) { 2186 uint8_t reg; 2187 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2188 ®, 1)) 2189 return connector_status_unknown; 2190 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2191 : connector_status_disconnected; 2192 } 2193 2194 /* If no HPD, poke DDC gently */ 2195 if (drm_probe_ddc(intel_dp->adapter)) 2196 return connector_status_connected; 2197 2198 /* Well we tried, say unknown for unreliable port types */ 2199 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2200 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2201 return connector_status_unknown; 2202 2203 /* Anything else is out of spec, warn and ignore */ 2204 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2205 return connector_status_disconnected; 2206 } 2207 2208 static enum drm_connector_status 2209 ironlake_dp_detect(struct intel_dp *intel_dp) 2210 { 2211 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2212 enum drm_connector_status status; 2213 2214 /* Can't disconnect eDP, but you can close the lid... */ 2215 if (is_edp(intel_dp)) { 2216 status = intel_panel_detect(dev); 2217 if (status == connector_status_unknown) 2218 status = connector_status_connected; 2219 return status; 2220 } 2221 2222 return intel_dp_detect_dpcd(intel_dp); 2223 } 2224 2225 static enum drm_connector_status 2226 g4x_dp_detect(struct intel_dp *intel_dp) 2227 { 2228 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2229 struct drm_i915_private *dev_priv = dev->dev_private; 2230 uint32_t bit; 2231 2232 switch (intel_dp->output_reg) { 2233 case DP_B: 2234 bit = DPB_HOTPLUG_LIVE_STATUS; 2235 break; 2236 case DP_C: 2237 bit = DPC_HOTPLUG_LIVE_STATUS; 2238 break; 2239 case DP_D: 2240 bit = DPD_HOTPLUG_LIVE_STATUS; 2241 break; 2242 default: 2243 return connector_status_unknown; 2244 } 2245 2246 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2247 return connector_status_disconnected; 2248 2249 return intel_dp_detect_dpcd(intel_dp); 2250 } 2251 2252 static struct edid * 2253 intel_dp_get_edid(struct drm_connector *connector, struct device *adapter) 2254 { 2255 struct intel_connector *intel_connector = to_intel_connector(connector); 2256 2257 /* use cached edid if we have one */ 2258 if (intel_connector->edid) { 2259 struct edid *edid; 2260 int size; 2261 2262 /* invalid edid */ 2263 if (IS_ERR(intel_connector->edid)) 2264 return NULL; 2265 2266 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; 2267 edid = kmalloc(size, M_DRM, M_WAITOK); 2268 if (!edid) 2269 return NULL; 2270 2271 memcpy(edid, intel_connector->edid, size); 2272 return edid; 2273 } 2274 2275 return drm_get_edid(connector, adapter); 2276 } 2277 2278 static int 2279 intel_dp_get_edid_modes(struct drm_connector *connector, struct device *adapter) 2280 { 2281 struct intel_connector *intel_connector = to_intel_connector(connector); 2282 2283 /* use cached edid if we have one */ 2284 if (intel_connector->edid) { 2285 /* invalid edid */ 2286 if (IS_ERR(intel_connector->edid)) 2287 return 0; 2288 2289 return intel_connector_update_modes(connector, 2290 intel_connector->edid); 2291 } 2292 2293 return intel_ddc_get_modes(connector, adapter); 2294 } 2295 2296 2297 /** 2298 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2299 * 2300 * \return true if DP port is connected. 2301 * \return false if DP port is disconnected. 2302 */ 2303 static enum drm_connector_status 2304 intel_dp_detect(struct drm_connector *connector, bool force) 2305 { 2306 struct intel_dp *intel_dp = intel_attached_dp(connector); 2307 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2308 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2309 struct drm_device *dev = connector->dev; 2310 enum drm_connector_status status; 2311 struct edid *edid = NULL; 2312 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2313 2314 intel_dp->has_audio = false; 2315 2316 if (HAS_PCH_SPLIT(dev)) 2317 status = ironlake_dp_detect(intel_dp); 2318 else 2319 status = g4x_dp_detect(intel_dp); 2320 2321 ksnprintf(dpcd_hex_dump, 2322 sizeof(dpcd_hex_dump), 2323 "%02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", 2324 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], 2325 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], 2326 intel_dp->dpcd[6], intel_dp->dpcd[7]); 2327 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2328 2329 if (status != connector_status_connected) 2330 return status; 2331 2332 intel_dp_probe_oui(intel_dp); 2333 2334 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2335 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2336 } else { 2337 edid = intel_dp_get_edid(connector, intel_dp->adapter); 2338 if (edid) { 2339 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2340 kfree(edid, M_DRM); 2341 } 2342 } 2343 2344 if (intel_encoder->type != INTEL_OUTPUT_EDP) 2345 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2346 return connector_status_connected; 2347 } 2348 2349 static int intel_dp_get_modes(struct drm_connector *connector) 2350 { 2351 struct intel_dp *intel_dp = intel_attached_dp(connector); 2352 struct intel_connector *intel_connector = to_intel_connector(connector); 2353 struct drm_device *dev = connector->dev; 2354 int ret; 2355 2356 /* We should parse the EDID data and find out if it has an audio sink 2357 */ 2358 2359 ret = intel_dp_get_edid_modes(connector, intel_dp->adapter); 2360 if (ret) 2361 return ret; 2362 2363 /* if eDP has no EDID, fall back to fixed mode */ 2364 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2365 struct drm_display_mode *mode; 2366 mode = drm_mode_duplicate(dev, 2367 intel_connector->panel.fixed_mode); 2368 if (mode) { 2369 drm_mode_probed_add(connector, mode); 2370 return 1; 2371 } 2372 } 2373 return 0; 2374 } 2375 2376 static bool 2377 intel_dp_detect_audio(struct drm_connector *connector) 2378 { 2379 struct intel_dp *intel_dp = intel_attached_dp(connector); 2380 struct edid *edid; 2381 bool has_audio = false; 2382 2383 edid = intel_dp_get_edid(connector, intel_dp->adapter); 2384 if (edid) { 2385 has_audio = drm_detect_monitor_audio(edid); 2386 kfree(edid, M_DRM); 2387 } 2388 2389 return has_audio; 2390 } 2391 2392 static int 2393 intel_dp_set_property(struct drm_connector *connector, 2394 struct drm_property *property, 2395 uint64_t val) 2396 { 2397 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2398 struct intel_connector *intel_connector = to_intel_connector(connector); 2399 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 2400 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2401 int ret; 2402 2403 ret = drm_object_property_set_value(&connector->base, property, val); 2404 if (ret) 2405 return ret; 2406 2407 if (property == dev_priv->force_audio_property) { 2408 int i = val; 2409 bool has_audio; 2410 2411 if (i == intel_dp->force_audio) 2412 return 0; 2413 2414 intel_dp->force_audio = i; 2415 2416 if (i == HDMI_AUDIO_AUTO) 2417 has_audio = intel_dp_detect_audio(connector); 2418 else 2419 has_audio = (i == HDMI_AUDIO_ON); 2420 2421 if (has_audio == intel_dp->has_audio) 2422 return 0; 2423 2424 intel_dp->has_audio = has_audio; 2425 goto done; 2426 } 2427 2428 if (property == dev_priv->broadcast_rgb_property) { 2429 if (val == !!intel_dp->color_range) 2430 return 0; 2431 2432 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2433 goto done; 2434 } 2435 2436 if (is_edp(intel_dp) && 2437 property == connector->dev->mode_config.scaling_mode_property) { 2438 if (val == DRM_MODE_SCALE_NONE) { 2439 DRM_DEBUG_KMS("no scaling not supported\n"); 2440 return -EINVAL; 2441 } 2442 2443 if (intel_connector->panel.fitting_mode == val) { 2444 /* the eDP scaling property is not changed */ 2445 return 0; 2446 } 2447 intel_connector->panel.fitting_mode = val; 2448 2449 goto done; 2450 } 2451 2452 return -EINVAL; 2453 2454 done: 2455 if (intel_encoder->base.crtc) { 2456 struct drm_crtc *crtc = intel_encoder->base.crtc; 2457 intel_set_mode(crtc, &crtc->mode, 2458 crtc->x, crtc->y, crtc->fb); 2459 } 2460 2461 return 0; 2462 } 2463 2464 static void 2465 intel_dp_destroy(struct drm_connector *connector) 2466 { 2467 struct intel_dp *intel_dp = intel_attached_dp(connector); 2468 struct intel_connector *intel_connector = to_intel_connector(connector); 2469 2470 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2471 kfree(intel_connector->edid, M_DRM); 2472 2473 if (is_edp(intel_dp)) 2474 intel_panel_fini(&intel_connector->panel); 2475 2476 #if 0 2477 drm_sysfs_connector_remove(connector); 2478 #endif 2479 drm_connector_cleanup(connector); 2480 kfree(connector, M_DRM); 2481 } 2482 2483 void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2484 { 2485 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 2486 struct intel_dp *intel_dp = &intel_dig_port->dp; 2487 struct drm_device *dev = encoder->dev; 2488 2489 if (intel_dp->dp_iic_bus != NULL) { 2490 if (intel_dp->adapter != NULL) { 2491 device_delete_child(intel_dp->dp_iic_bus, 2492 intel_dp->adapter); 2493 } 2494 device_delete_child(dev->dev, intel_dp->dp_iic_bus); 2495 } 2496 drm_encoder_cleanup(encoder); 2497 if (is_edp(intel_dp)) { 2498 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2499 ironlake_panel_vdd_off_sync(intel_dp); 2500 } 2501 kfree(intel_dig_port, M_DRM); 2502 } 2503 2504 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2505 .mode_fixup = intel_dp_mode_fixup, 2506 .mode_set = intel_dp_mode_set, 2507 .disable = intel_encoder_noop, 2508 }; 2509 2510 static const struct drm_connector_funcs intel_dp_connector_funcs = { 2511 .dpms = intel_connector_dpms, 2512 .detect = intel_dp_detect, 2513 .fill_modes = drm_helper_probe_single_connector_modes, 2514 .set_property = intel_dp_set_property, 2515 .destroy = intel_dp_destroy, 2516 }; 2517 2518 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2519 .get_modes = intel_dp_get_modes, 2520 .mode_valid = intel_dp_mode_valid, 2521 .best_encoder = intel_best_encoder, 2522 }; 2523 2524 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2525 .destroy = intel_dp_encoder_destroy, 2526 }; 2527 2528 static void 2529 intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2530 { 2531 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2532 2533 intel_dp_check_link_status(intel_dp); 2534 } 2535 2536 /* Return which DP Port should be selected for Transcoder DP control */ 2537 int 2538 intel_trans_dp_port_sel(struct drm_crtc *crtc) 2539 { 2540 struct drm_device *dev = crtc->dev; 2541 struct intel_encoder *intel_encoder; 2542 struct intel_dp *intel_dp; 2543 2544 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2545 intel_dp = enc_to_intel_dp(&intel_encoder->base); 2546 2547 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2548 intel_encoder->type == INTEL_OUTPUT_EDP) 2549 return intel_dp->output_reg; 2550 } 2551 2552 return -1; 2553 } 2554 2555 /* check the VBT to see whether the eDP is on DP-D port */ 2556 bool intel_dpd_is_edp(struct drm_device *dev) 2557 { 2558 struct drm_i915_private *dev_priv = dev->dev_private; 2559 struct child_device_config *p_child; 2560 int i; 2561 2562 if (!dev_priv->child_dev_num) 2563 return false; 2564 2565 for (i = 0; i < dev_priv->child_dev_num; i++) { 2566 p_child = dev_priv->child_dev + i; 2567 2568 if (p_child->dvo_port == PORT_IDPD && 2569 p_child->device_type == DEVICE_TYPE_eDP) 2570 return true; 2571 } 2572 return false; 2573 } 2574 2575 static void 2576 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2577 { 2578 struct intel_connector *intel_connector = to_intel_connector(connector); 2579 2580 intel_attach_force_audio_property(connector); 2581 intel_attach_broadcast_rgb_property(connector); 2582 2583 if (is_edp(intel_dp)) { 2584 drm_mode_create_scaling_mode_property(connector->dev); 2585 drm_object_attach_property( 2586 &connector->base, 2587 connector->dev->mode_config.scaling_mode_property, 2588 DRM_MODE_SCALE_ASPECT); 2589 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 2590 } 2591 } 2592 2593 static void 2594 intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2595 struct intel_dp *intel_dp, 2596 struct edp_power_seq *out) 2597 { 2598 struct drm_i915_private *dev_priv = dev->dev_private; 2599 struct edp_power_seq cur, vbt, spec, final; 2600 u32 pp_on, pp_off, pp_div, pp; 2601 2602 /* Workaround: Need to write PP_CONTROL with the unlock key as 2603 * the very first thing. */ 2604 pp = ironlake_get_pp_control(dev_priv); 2605 I915_WRITE(PCH_PP_CONTROL, pp); 2606 2607 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2608 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2609 pp_div = I915_READ(PCH_PP_DIVISOR); 2610 2611 /* Pull timing values out of registers */ 2612 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2613 PANEL_POWER_UP_DELAY_SHIFT; 2614 2615 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2616 PANEL_LIGHT_ON_DELAY_SHIFT; 2617 2618 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2619 PANEL_LIGHT_OFF_DELAY_SHIFT; 2620 2621 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2622 PANEL_POWER_DOWN_DELAY_SHIFT; 2623 2624 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2625 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2626 2627 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2628 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2629 2630 vbt = dev_priv->edp.pps; 2631 2632 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 2633 * our hw here, which are all in 100usec. */ 2634 spec.t1_t3 = 210 * 10; 2635 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 2636 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 2637 spec.t10 = 500 * 10; 2638 /* This one is special and actually in units of 100ms, but zero 2639 * based in the hw (so we need to add 100 ms). But the sw vbt 2640 * table multiplies it with 1000 to make it in units of 100usec, 2641 * too. */ 2642 spec.t11_t12 = (510 + 100) * 10; 2643 2644 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2645 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2646 2647 /* Use the max of the register settings and vbt. If both are 2648 * unset, fall back to the spec limits. */ 2649 #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 2650 spec.field : \ 2651 max(cur.field, vbt.field)) 2652 assign_final(t1_t3); 2653 assign_final(t8); 2654 assign_final(t9); 2655 assign_final(t10); 2656 assign_final(t11_t12); 2657 #undef assign_final 2658 2659 #define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 2660 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2661 intel_dp->backlight_on_delay = get_delay(t8); 2662 intel_dp->backlight_off_delay = get_delay(t9); 2663 intel_dp->panel_power_down_delay = get_delay(t10); 2664 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2665 #undef get_delay 2666 2667 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2668 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2669 intel_dp->panel_power_cycle_delay); 2670 2671 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2672 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2673 2674 if (out) 2675 *out = final; 2676 } 2677 2678 static void 2679 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 2680 struct intel_dp *intel_dp, 2681 struct edp_power_seq *seq) 2682 { 2683 struct drm_i915_private *dev_priv = dev->dev_private; 2684 u32 pp_on, pp_off, pp_div; 2685 2686 /* And finally store the new values in the power sequencer. */ 2687 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2688 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2689 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2690 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2691 /* Compute the divisor for the pp clock, simply match the Bspec 2692 * formula. */ 2693 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2694 << PP_REFERENCE_DIVIDER_SHIFT; 2695 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 2696 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2697 2698 /* Haswell doesn't have any port selection bits for the panel 2699 * power sequencer any more. */ 2700 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 2701 if (is_cpu_edp(intel_dp)) 2702 pp_on |= PANEL_POWER_PORT_DP_A; 2703 else 2704 pp_on |= PANEL_POWER_PORT_DP_D; 2705 } 2706 2707 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2708 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2709 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2710 2711 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2712 I915_READ(PCH_PP_ON_DELAYS), 2713 I915_READ(PCH_PP_OFF_DELAYS), 2714 I915_READ(PCH_PP_DIVISOR)); 2715 } 2716 2717 void 2718 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 2719 struct intel_connector *intel_connector) 2720 { 2721 struct drm_connector *connector = &intel_connector->base; 2722 struct intel_dp *intel_dp = &intel_dig_port->dp; 2723 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2724 struct drm_device *dev = intel_encoder->base.dev; 2725 struct drm_i915_private *dev_priv = dev->dev_private; 2726 struct drm_display_mode *fixed_mode = NULL; 2727 struct edp_power_seq power_seq = { 0 }; 2728 enum port port = intel_dig_port->port; 2729 const char *name = NULL; 2730 int type; 2731 2732 /* Preserve the current hw state. */ 2733 intel_dp->DP = I915_READ(intel_dp->output_reg); 2734 intel_dp->attached_connector = intel_connector; 2735 2736 if (HAS_PCH_SPLIT(dev) && port == PORT_D) 2737 if (intel_dpd_is_edp(dev)) 2738 intel_dp->is_pch_edp = true; 2739 2740 /* 2741 * FIXME : We need to initialize built-in panels before external panels. 2742 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup 2743 */ 2744 if (IS_VALLEYVIEW(dev) && port == PORT_C) { 2745 type = DRM_MODE_CONNECTOR_eDP; 2746 intel_encoder->type = INTEL_OUTPUT_EDP; 2747 } else if (port == PORT_A || is_pch_edp(intel_dp)) { 2748 type = DRM_MODE_CONNECTOR_eDP; 2749 intel_encoder->type = INTEL_OUTPUT_EDP; 2750 } else { 2751 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for 2752 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't 2753 * rewrite it. 2754 */ 2755 type = DRM_MODE_CONNECTOR_DisplayPort; 2756 } 2757 2758 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2759 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2760 2761 connector->polled = DRM_CONNECTOR_POLL_HPD; 2762 connector->interlace_allowed = true; 2763 connector->doublescan_allowed = 0; 2764 2765 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2766 ironlake_panel_vdd_work); 2767 2768 intel_connector_attach_encoder(intel_connector, intel_encoder); 2769 #if 0 2770 drm_sysfs_connector_add(connector); 2771 #endif 2772 2773 if (IS_HASWELL(dev)) 2774 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 2775 else 2776 intel_connector->get_hw_state = intel_connector_get_hw_state; 2777 2778 2779 /* Set up the DDC bus. */ 2780 switch (port) { 2781 case PORT_A: 2782 name = "DPDDC-A"; 2783 break; 2784 case PORT_B: 2785 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2786 name = "DPDDC-B"; 2787 break; 2788 case PORT_C: 2789 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2790 name = "DPDDC-C"; 2791 break; 2792 case PORT_D: 2793 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2794 name = "DPDDC-D"; 2795 break; 2796 default: 2797 WARN(1, "Invalid port %c\n", port_name(port)); 2798 break; 2799 } 2800 2801 if (is_edp(intel_dp)) 2802 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2803 2804 intel_dp_i2c_init(intel_dp, intel_connector, name); 2805 2806 /* Cache DPCD and EDID for edp. */ 2807 if (is_edp(intel_dp)) { 2808 bool ret; 2809 struct drm_display_mode *scan; 2810 struct edid *edid; 2811 2812 ironlake_edp_panel_vdd_on(intel_dp); 2813 ret = intel_dp_get_dpcd(intel_dp); 2814 ironlake_edp_panel_vdd_off(intel_dp, false); 2815 2816 if (ret) { 2817 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2818 dev_priv->no_aux_handshake = 2819 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2820 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2821 } else { 2822 /* if this fails, presume the device is a ghost */ 2823 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2824 intel_dp_encoder_destroy(&intel_encoder->base); 2825 intel_dp_destroy(connector); 2826 return; 2827 } 2828 2829 /* We now know it's not a ghost, init power sequence regs. */ 2830 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2831 &power_seq); 2832 2833 ironlake_edp_panel_vdd_on(intel_dp); 2834 edid = drm_get_edid(connector, intel_dp->adapter); 2835 if (edid) { 2836 if (drm_add_edid_modes(connector, edid)) { 2837 drm_mode_connector_update_edid_property(connector, edid); 2838 drm_edid_to_eld(connector, edid); 2839 } else { 2840 kfree(edid, M_DRM); 2841 edid = ERR_PTR(-EINVAL); 2842 } 2843 } else { 2844 edid = ERR_PTR(-ENOENT); 2845 } 2846 intel_connector->edid = edid; 2847 2848 /* prefer fixed mode from EDID if available */ 2849 list_for_each_entry(scan, &connector->probed_modes, head) { 2850 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 2851 fixed_mode = drm_mode_duplicate(dev, scan); 2852 break; 2853 } 2854 } 2855 2856 /* fallback to VBT if available for eDP */ 2857 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { 2858 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2859 if (fixed_mode) 2860 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 2861 } 2862 2863 ironlake_edp_panel_vdd_off(intel_dp, false); 2864 } 2865 2866 if (is_edp(intel_dp)) { 2867 intel_panel_init(&intel_connector->panel, fixed_mode); 2868 intel_panel_setup_backlight(connector); 2869 } 2870 2871 intel_dp_add_properties(intel_dp, connector); 2872 2873 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2874 * 0xd. Failure to do so will result in spurious interrupts being 2875 * generated on the port when a cable is not attached. 2876 */ 2877 if (IS_G4X(dev) && !IS_GM45(dev)) { 2878 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2879 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2880 } 2881 } 2882 2883 void 2884 intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2885 { 2886 struct intel_digital_port *intel_dig_port; 2887 struct intel_encoder *intel_encoder; 2888 struct drm_encoder *encoder; 2889 struct intel_connector *intel_connector; 2890 2891 intel_dig_port = kmalloc(sizeof(struct intel_digital_port), M_DRM, 2892 M_WAITOK | M_ZERO); 2893 if (!intel_dig_port) 2894 return; 2895 2896 intel_connector = kmalloc(sizeof(struct intel_connector), M_DRM, 2897 M_WAITOK | M_ZERO); 2898 if (!intel_connector) { 2899 kfree(intel_dig_port, M_DRM); 2900 return; 2901 } 2902 2903 intel_encoder = &intel_dig_port->base; 2904 encoder = &intel_encoder->base; 2905 2906 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2907 DRM_MODE_ENCODER_TMDS); 2908 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2909 2910 intel_encoder->enable = intel_enable_dp; 2911 intel_encoder->pre_enable = intel_pre_enable_dp; 2912 intel_encoder->disable = intel_disable_dp; 2913 intel_encoder->post_disable = intel_post_disable_dp; 2914 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2915 2916 intel_dig_port->port = port; 2917 intel_dig_port->dp.output_reg = output_reg; 2918 2919 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2920 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2921 intel_encoder->cloneable = false; 2922 intel_encoder->hot_plug = intel_dp_hot_plug; 2923 2924 intel_dp_init_connector(intel_dig_port, intel_connector); 2925 } 2926