1 /* $NetBSD: intel_dp.c,v 1.5 2021/12/19 12:03:57 riastradh Exp $ */ 2 3 /* 4 * Copyright © 2008 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 * Authors: 26 * Keith Packard <keithp@keithp.com> 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: intel_dp.c,v 1.5 2021/12/19 12:03:57 riastradh Exp $"); 32 33 #include <linux/export.h> 34 #include <linux/i2c.h> 35 #include <linux/notifier.h> 36 #include <linux/reboot.h> 37 #include <linux/slab.h> 38 #include <linux/types.h> 39 40 #include <asm/byteorder.h> 41 42 #include <drm/drm_atomic_helper.h> 43 #include <drm/drm_crtc.h> 44 #include <drm/drm_dp_helper.h> 45 #include <drm/drm_edid.h> 46 #include <drm/drm_hdcp.h> 47 #include <drm/drm_probe_helper.h> 48 #include <drm/i915_drm.h> 49 50 #include "i915_debugfs.h" 51 #include "i915_drv.h" 52 #include "i915_trace.h" 53 #include "intel_atomic.h" 54 #include "intel_audio.h" 55 #include "intel_connector.h" 56 #include "intel_ddi.h" 57 #include "intel_display_types.h" 58 #include "intel_dp.h" 59 #include "intel_dp_link_training.h" 60 #include "intel_dp_mst.h" 61 #include "intel_dpio_phy.h" 62 #include "intel_fifo_underrun.h" 63 #include "intel_hdcp.h" 64 #include "intel_hdmi.h" 65 #include "intel_hotplug.h" 66 #include "intel_lspcon.h" 67 #include "intel_lvds.h" 68 #include "intel_panel.h" 69 #include "intel_psr.h" 70 #include "intel_sideband.h" 71 #include "intel_tc.h" 72 #include "intel_vdsc.h" 73 74 #define DP_DPRX_ESI_LEN 14 75 76 /* DP DSC throughput values used for slice count calculations KPixels/s */ 77 #define DP_DSC_PEAK_PIXEL_RATE 2720000 78 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 79 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 80 81 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 82 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 83 84 /* Compliance test status bits */ 85 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 86 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 87 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 88 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 89 90 struct dp_link_dpll { 91 int clock; 92 struct dpll dpll; 93 }; 94 95 static const struct dp_link_dpll g4x_dpll[] = { 96 { 162000, 97 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 98 { 270000, 99 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 100 }; 101 102 static const struct dp_link_dpll pch_dpll[] = { 103 { 162000, 104 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 105 { 270000, 106 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 107 }; 108 109 static const struct dp_link_dpll vlv_dpll[] = { 110 { 162000, 111 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 112 { 270000, 113 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 114 }; 115 116 /* 117 * CHV supports eDP 1.4 that have more link rates. 118 * Below only provides the fixed rate but exclude variable rate. 119 */ 120 static const struct dp_link_dpll chv_dpll[] = { 121 /* 122 * CHV requires to program fractional division for m2. 123 * m2 is stored in fixed point format using formula below 124 * (m2_int << 22) | m2_fraction 125 */ 126 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 127 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 128 { 270000, /* m2_int = 27, m2_fraction = 0 */ 129 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 130 }; 131 132 /* Constants for DP DSC configurations */ 133 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 134 135 /* With Single pipe configuration, HW is capable of supporting maximum 136 * of 4 slices per line. 137 */ 138 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 139 140 /** 141 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 142 * @intel_dp: DP struct 143 * 144 * If a CPU or PCH DP output is attached to an eDP panel, this function 145 * will return true, and false otherwise. 146 */ 147 bool intel_dp_is_edp(struct intel_dp *intel_dp) 148 { 149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 150 151 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 152 } 153 154 static struct intel_dp *intel_attached_dp(struct intel_connector *connector) 155 { 156 return enc_to_intel_dp(intel_attached_encoder(connector)); 157 } 158 159 static void intel_dp_link_down(struct intel_encoder *encoder, 160 const struct intel_crtc_state *old_crtc_state); 161 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 162 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 163 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 164 const struct intel_crtc_state *crtc_state); 165 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 166 enum pipe pipe); 167 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 168 169 /* update sink rates from dpcd */ 170 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 171 { 172 static const int dp_rates[] = { 173 162000, 270000, 540000, 810000 174 }; 175 int i, max_rate; 176 177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 179 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 180 if (dp_rates[i] > max_rate) 181 break; 182 intel_dp->sink_rates[i] = dp_rates[i]; 183 } 184 185 intel_dp->num_sink_rates = i; 186 } 187 188 /* Get length of rates array potentially limited by max_rate. */ 189 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 190 { 191 int i; 192 193 /* Limit results by potentially reduced max rate */ 194 for (i = 0; i < len; i++) { 195 if (rates[len - i - 1] <= max_rate) 196 return len - i; 197 } 198 199 return 0; 200 } 201 202 /* Get length of common rates array potentially limited by max_rate. */ 203 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 204 int max_rate) 205 { 206 return intel_dp_rate_limit_len(intel_dp->common_rates, 207 intel_dp->num_common_rates, max_rate); 208 } 209 210 /* Theoretical max between source and sink */ 211 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 212 { 213 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 214 } 215 216 /* Theoretical max between source and sink */ 217 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 218 { 219 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 220 int source_max = intel_dig_port->max_lanes; 221 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 222 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); 223 224 return min3(source_max, sink_max, fia_max); 225 } 226 227 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 228 { 229 return intel_dp->max_link_lane_count; 230 } 231 232 int 233 intel_dp_link_required(int pixel_clock, int bpp) 234 { 235 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 236 return DIV_ROUND_UP(pixel_clock * bpp, 8); 237 } 238 239 int 240 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 241 { 242 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 243 * link rate that is generally expressed in Gbps. Since, 8 bits of data 244 * is transmitted every LS_Clk per lane, there is no need to account for 245 * the channel encoding that is done in the PHY layer here. 246 */ 247 248 return max_link_clock * max_lanes; 249 } 250 251 static int 252 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 253 { 254 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 255 struct intel_encoder *encoder = &intel_dig_port->base; 256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 257 int max_dotclk = dev_priv->max_dotclk_freq; 258 int ds_max_dotclk; 259 260 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 261 262 if (type != DP_DS_PORT_TYPE_VGA) 263 return max_dotclk; 264 265 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 266 intel_dp->downstream_ports); 267 268 if (ds_max_dotclk != 0) 269 max_dotclk = min(max_dotclk, ds_max_dotclk); 270 271 return max_dotclk; 272 } 273 274 static int cnl_max_source_rate(struct intel_dp *intel_dp) 275 { 276 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 277 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 278 enum port port = dig_port->base.port; 279 280 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 281 282 /* Low voltage SKUs are limited to max of 5.4G */ 283 if (voltage == VOLTAGE_INFO_0_85V) 284 return 540000; 285 286 /* For this SKU 8.1G is supported in all ports */ 287 if (IS_CNL_WITH_PORT_F(dev_priv)) 288 return 810000; 289 290 /* For other SKUs, max rate on ports A and D is 5.4G */ 291 if (port == PORT_A || port == PORT_D) 292 return 540000; 293 294 return 810000; 295 } 296 297 static int icl_max_source_rate(struct intel_dp *intel_dp) 298 { 299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 301 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 302 303 if (intel_phy_is_combo(dev_priv, phy) && 304 !IS_ELKHARTLAKE(dev_priv) && 305 !intel_dp_is_edp(intel_dp)) 306 return 540000; 307 308 return 810000; 309 } 310 311 static void 312 intel_dp_set_source_rates(struct intel_dp *intel_dp) 313 { 314 /* The values must be in increasing order */ 315 static const int cnl_rates[] = { 316 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 317 }; 318 static const int bxt_rates[] = { 319 162000, 216000, 243000, 270000, 324000, 432000, 540000 320 }; 321 static const int skl_rates[] = { 322 162000, 216000, 270000, 324000, 432000, 540000 323 }; 324 static const int hsw_rates[] = { 325 162000, 270000, 540000 326 }; 327 static const int g4x_rates[] = { 328 162000, 270000 329 }; 330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 331 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 332 const struct ddi_vbt_port_info *info = 333 &dev_priv->vbt.ddi_port_info[dig_port->base.port]; 334 const int *source_rates; 335 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate; 336 337 /* This should only be done once */ 338 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates); 339 340 if (INTEL_GEN(dev_priv) >= 10) { 341 source_rates = cnl_rates; 342 size = ARRAY_SIZE(cnl_rates); 343 if (IS_GEN(dev_priv, 10)) 344 max_rate = cnl_max_source_rate(intel_dp); 345 else 346 max_rate = icl_max_source_rate(intel_dp); 347 } else if (IS_GEN9_LP(dev_priv)) { 348 source_rates = bxt_rates; 349 size = ARRAY_SIZE(bxt_rates); 350 } else if (IS_GEN9_BC(dev_priv)) { 351 source_rates = skl_rates; 352 size = ARRAY_SIZE(skl_rates); 353 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 354 IS_BROADWELL(dev_priv)) { 355 source_rates = hsw_rates; 356 size = ARRAY_SIZE(hsw_rates); 357 } else { 358 source_rates = g4x_rates; 359 size = ARRAY_SIZE(g4x_rates); 360 } 361 362 if (max_rate && vbt_max_rate) 363 max_rate = min(max_rate, vbt_max_rate); 364 else if (vbt_max_rate) 365 max_rate = vbt_max_rate; 366 367 if (max_rate) 368 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 369 370 intel_dp->source_rates = source_rates; 371 intel_dp->num_source_rates = size; 372 } 373 374 static int intersect_rates(const int *source_rates, int source_len, 375 const int *sink_rates, int sink_len, 376 int *common_rates) 377 { 378 int i = 0, j = 0, k = 0; 379 380 while (i < source_len && j < sink_len) { 381 if (source_rates[i] == sink_rates[j]) { 382 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 383 return k; 384 common_rates[k] = source_rates[i]; 385 ++k; 386 ++i; 387 ++j; 388 } else if (source_rates[i] < sink_rates[j]) { 389 ++i; 390 } else { 391 ++j; 392 } 393 } 394 return k; 395 } 396 397 /* return index of rate in rates array, or -1 if not found */ 398 static int intel_dp_rate_index(const int *rates, int len, int rate) 399 { 400 int i; 401 402 for (i = 0; i < len; i++) 403 if (rate == rates[i]) 404 return i; 405 406 return -1; 407 } 408 409 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 410 { 411 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); 412 413 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 414 intel_dp->num_source_rates, 415 intel_dp->sink_rates, 416 intel_dp->num_sink_rates, 417 intel_dp->common_rates); 418 419 /* Paranoia, there should always be something in common. */ 420 if (WARN_ON(intel_dp->num_common_rates == 0)) { 421 intel_dp->common_rates[0] = 162000; 422 intel_dp->num_common_rates = 1; 423 } 424 } 425 426 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 427 u8 lane_count) 428 { 429 /* 430 * FIXME: we need to synchronize the current link parameters with 431 * hardware readout. Currently fast link training doesn't work on 432 * boot-up. 433 */ 434 if (link_rate == 0 || 435 link_rate > intel_dp->max_link_rate) 436 return false; 437 438 if (lane_count == 0 || 439 lane_count > intel_dp_max_lane_count(intel_dp)) 440 return false; 441 442 return true; 443 } 444 445 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 446 int link_rate, 447 u8 lane_count) 448 { 449 const struct drm_display_mode *fixed_mode = 450 intel_dp->attached_connector->panel.fixed_mode; 451 int mode_rate, max_rate; 452 453 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 454 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 455 if (mode_rate > max_rate) 456 return false; 457 458 return true; 459 } 460 461 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 462 int link_rate, u8 lane_count) 463 { 464 int index; 465 466 index = intel_dp_rate_index(intel_dp->common_rates, 467 intel_dp->num_common_rates, 468 link_rate); 469 if (index > 0) { 470 if (intel_dp_is_edp(intel_dp) && 471 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 472 intel_dp->common_rates[index - 1], 473 lane_count)) { 474 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); 475 return 0; 476 } 477 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 478 intel_dp->max_link_lane_count = lane_count; 479 } else if (lane_count > 1) { 480 if (intel_dp_is_edp(intel_dp) && 481 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 482 intel_dp_max_common_rate(intel_dp), 483 lane_count >> 1)) { 484 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); 485 return 0; 486 } 487 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 488 intel_dp->max_link_lane_count = lane_count >> 1; 489 } else { 490 DRM_ERROR("Link Training Unsuccessful\n"); 491 return -1; 492 } 493 494 return 0; 495 } 496 497 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 498 { 499 return div_u64(mul_u32_u32(mode_clock, 1000000U), 500 DP_DSC_FEC_OVERHEAD_FACTOR); 501 } 502 503 static int 504 small_joiner_ram_size_bits(struct drm_i915_private *i915) 505 { 506 if (INTEL_GEN(i915) >= 11) 507 return 7680 * 8; 508 else 509 return 6144 * 8; 510 } 511 512 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 513 u32 link_clock, u32 lane_count, 514 u32 mode_clock, u32 mode_hdisplay) 515 { 516 u32 bits_per_pixel, max_bpp_small_joiner_ram; 517 int i; 518 519 /* 520 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 521 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 522 * for SST -> TimeSlotsPerMTP is 1, 523 * for MST -> TimeSlotsPerMTP has to be calculated 524 */ 525 bits_per_pixel = (link_clock * lane_count * 8) / 526 intel_dp_mode_to_fec_clock(mode_clock); 527 DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel); 528 529 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 530 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 531 mode_hdisplay; 532 DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram); 533 534 /* 535 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 536 * check, output bpp from small joiner RAM check) 537 */ 538 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 539 540 /* Error out if the max bpp is less than smallest allowed valid bpp */ 541 if (bits_per_pixel < valid_dsc_bpp[0]) { 542 DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n", 543 bits_per_pixel, valid_dsc_bpp[0]); 544 return 0; 545 } 546 547 /* Find the nearest match in the array of known BPPs from VESA */ 548 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 549 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 550 break; 551 } 552 bits_per_pixel = valid_dsc_bpp[i]; 553 554 /* 555 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 556 * fractional part is 0 557 */ 558 return bits_per_pixel << 4; 559 } 560 561 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 562 int mode_clock, int mode_hdisplay) 563 { 564 u8 min_slice_count, i; 565 int max_slice_width; 566 567 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 568 min_slice_count = DIV_ROUND_UP(mode_clock, 569 DP_DSC_MAX_ENC_THROUGHPUT_0); 570 else 571 min_slice_count = DIV_ROUND_UP(mode_clock, 572 DP_DSC_MAX_ENC_THROUGHPUT_1); 573 574 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 575 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 576 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", 577 max_slice_width); 578 return 0; 579 } 580 /* Also take into account max slice width */ 581 min_slice_count = min_t(u8, min_slice_count, 582 DIV_ROUND_UP(mode_hdisplay, 583 max_slice_width)); 584 585 /* Find the closest match to the valid slice count values */ 586 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 587 if (valid_dsc_slicecount[i] > 588 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 589 false)) 590 break; 591 if (min_slice_count <= valid_dsc_slicecount[i]) 592 return valid_dsc_slicecount[i]; 593 } 594 595 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); 596 return 0; 597 } 598 599 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 600 int hdisplay) 601 { 602 /* 603 * Older platforms don't like hdisplay==4096 with DP. 604 * 605 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 606 * and frame counter increment), but we don't get vblank interrupts, 607 * and the pipe underruns immediately. The link also doesn't seem 608 * to get trained properly. 609 * 610 * On CHV the vblank interrupts don't seem to disappear but 611 * otherwise the symptoms are similar. 612 * 613 * TODO: confirm the behaviour on HSW+ 614 */ 615 return hdisplay == 4096 && !HAS_DDI(dev_priv); 616 } 617 618 static enum drm_mode_status 619 intel_dp_mode_valid(struct drm_connector *connector, 620 struct drm_display_mode *mode) 621 { 622 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 623 struct intel_connector *intel_connector = to_intel_connector(connector); 624 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 625 struct drm_i915_private *dev_priv = to_i915(connector->dev); 626 int target_clock = mode->clock; 627 int max_rate, mode_rate, max_lanes, max_link_clock; 628 int max_dotclk; 629 u16 dsc_max_output_bpp = 0; 630 u8 dsc_slice_count = 0; 631 632 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 633 return MODE_NO_DBLESCAN; 634 635 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 636 637 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 638 if (mode->hdisplay > fixed_mode->hdisplay) 639 return MODE_PANEL; 640 641 if (mode->vdisplay > fixed_mode->vdisplay) 642 return MODE_PANEL; 643 644 target_clock = fixed_mode->clock; 645 } 646 647 max_link_clock = intel_dp_max_link_rate(intel_dp); 648 max_lanes = intel_dp_max_lane_count(intel_dp); 649 650 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 651 mode_rate = intel_dp_link_required(target_clock, 18); 652 653 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 654 return MODE_H_ILLEGAL; 655 656 /* 657 * Output bpp is stored in 6.4 format so right shift by 4 to get the 658 * integer value since we support only integer values of bpp. 659 */ 660 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 661 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 662 if (intel_dp_is_edp(intel_dp)) { 663 dsc_max_output_bpp = 664 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 665 dsc_slice_count = 666 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 667 true); 668 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 669 dsc_max_output_bpp = 670 intel_dp_dsc_get_output_bpp(dev_priv, 671 max_link_clock, 672 max_lanes, 673 target_clock, 674 mode->hdisplay) >> 4; 675 dsc_slice_count = 676 intel_dp_dsc_get_slice_count(intel_dp, 677 target_clock, 678 mode->hdisplay); 679 } 680 } 681 682 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 683 target_clock > max_dotclk) 684 return MODE_CLOCK_HIGH; 685 686 if (mode->clock < 10000) 687 return MODE_CLOCK_LOW; 688 689 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 690 return MODE_H_ILLEGAL; 691 692 return intel_mode_valid_max_plane_size(dev_priv, mode); 693 } 694 695 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 696 { 697 int i; 698 u32 v = 0; 699 700 if (src_bytes > 4) 701 src_bytes = 4; 702 for (i = 0; i < src_bytes; i++) 703 v |= ((u32)src[i]) << ((3 - i) * 8); 704 return v; 705 } 706 707 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 708 { 709 int i; 710 if (dst_bytes > 4) 711 dst_bytes = 4; 712 for (i = 0; i < dst_bytes; i++) 713 dst[i] = src >> ((3-i) * 8); 714 } 715 716 static void 717 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 718 static void 719 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 720 bool force_disable_vdd); 721 static void 722 intel_dp_pps_init(struct intel_dp *intel_dp); 723 724 static intel_wakeref_t 725 pps_lock(struct intel_dp *intel_dp) 726 { 727 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 728 intel_wakeref_t wakeref; 729 730 /* 731 * See intel_power_sequencer_reset() why we need 732 * a power domain reference here. 733 */ 734 wakeref = intel_display_power_get(dev_priv, 735 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 736 737 mutex_lock(&dev_priv->pps_mutex); 738 739 return wakeref; 740 } 741 742 static intel_wakeref_t 743 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 744 { 745 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 746 747 mutex_unlock(&dev_priv->pps_mutex); 748 intel_display_power_put(dev_priv, 749 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 750 wakeref); 751 return 0; 752 } 753 754 #define with_pps_lock(dp, wf) \ 755 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 756 757 static void 758 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 759 { 760 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 761 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 762 enum pipe pipe = intel_dp->pps_pipe; 763 bool pll_enabled, release_cl_override = false; 764 enum dpio_phy phy = DPIO_PHY(pipe); 765 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 766 u32 DP; 767 768 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, 769 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 770 pipe_name(pipe), intel_dig_port->base.base.base.id, 771 intel_dig_port->base.base.name)) 772 return; 773 774 DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 775 pipe_name(pipe), intel_dig_port->base.base.base.id, 776 intel_dig_port->base.base.name); 777 778 /* Preserve the BIOS-computed detected bit. This is 779 * supposed to be read-only. 780 */ 781 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 782 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 783 DP |= DP_PORT_WIDTH(1); 784 DP |= DP_LINK_TRAIN_PAT_1; 785 786 if (IS_CHERRYVIEW(dev_priv)) 787 DP |= DP_PIPE_SEL_CHV(pipe); 788 else 789 DP |= DP_PIPE_SEL(pipe); 790 791 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE; 792 793 /* 794 * The DPLL for the pipe must be enabled for this to work. 795 * So enable temporarily it if it's not already enabled. 796 */ 797 if (!pll_enabled) { 798 release_cl_override = IS_CHERRYVIEW(dev_priv) && 799 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 800 801 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 802 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 803 DRM_ERROR("Failed to force on pll for pipe %c!\n", 804 pipe_name(pipe)); 805 return; 806 } 807 } 808 809 /* 810 * Similar magic as in intel_dp_enable_port(). 811 * We _must_ do this port enable + disable trick 812 * to make this power sequencer lock onto the port. 813 * Otherwise even VDD force bit won't work. 814 */ 815 I915_WRITE(intel_dp->output_reg, DP); 816 POSTING_READ(intel_dp->output_reg); 817 818 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN); 819 POSTING_READ(intel_dp->output_reg); 820 821 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 822 POSTING_READ(intel_dp->output_reg); 823 824 if (!pll_enabled) { 825 vlv_force_pll_off(dev_priv, pipe); 826 827 if (release_cl_override) 828 chv_phy_powergate_ch(dev_priv, phy, ch, false); 829 } 830 } 831 832 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 833 { 834 struct intel_encoder *encoder; 835 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 836 837 /* 838 * We don't have power sequencer currently. 839 * Pick one that's not used by other ports. 840 */ 841 for_each_intel_dp(&dev_priv->drm, encoder) { 842 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 843 844 if (encoder->type == INTEL_OUTPUT_EDP) { 845 WARN_ON(intel_dp->active_pipe != INVALID_PIPE && 846 intel_dp->active_pipe != intel_dp->pps_pipe); 847 848 if (intel_dp->pps_pipe != INVALID_PIPE) 849 pipes &= ~(1 << intel_dp->pps_pipe); 850 } else { 851 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE); 852 853 if (intel_dp->active_pipe != INVALID_PIPE) 854 pipes &= ~(1 << intel_dp->active_pipe); 855 } 856 } 857 858 if (pipes == 0) 859 return INVALID_PIPE; 860 861 return ffs(pipes) - 1; 862 } 863 864 static enum pipe 865 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 866 { 867 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 868 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 869 enum pipe pipe; 870 871 lockdep_assert_held(&dev_priv->pps_mutex); 872 873 /* We should never land here with regular DP ports */ 874 WARN_ON(!intel_dp_is_edp(intel_dp)); 875 876 WARN_ON(intel_dp->active_pipe != INVALID_PIPE && 877 intel_dp->active_pipe != intel_dp->pps_pipe); 878 879 if (intel_dp->pps_pipe != INVALID_PIPE) 880 return intel_dp->pps_pipe; 881 882 pipe = vlv_find_free_pps(dev_priv); 883 884 /* 885 * Didn't find one. This should not happen since there 886 * are two power sequencers and up to two eDP ports. 887 */ 888 if (WARN_ON(pipe == INVALID_PIPE)) 889 pipe = PIPE_A; 890 891 vlv_steal_power_sequencer(dev_priv, pipe); 892 intel_dp->pps_pipe = pipe; 893 894 DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 895 pipe_name(intel_dp->pps_pipe), 896 intel_dig_port->base.base.base.id, 897 intel_dig_port->base.base.name); 898 899 /* init power sequencer on this pipe and port */ 900 intel_dp_init_panel_power_sequencer(intel_dp); 901 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 902 903 /* 904 * Even vdd force doesn't work until we've made 905 * the power sequencer lock in on the port. 906 */ 907 vlv_power_sequencer_kick(intel_dp); 908 909 return intel_dp->pps_pipe; 910 } 911 912 static int 913 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 914 { 915 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 916 int backlight_controller = dev_priv->vbt.backlight.controller; 917 918 lockdep_assert_held(&dev_priv->pps_mutex); 919 920 /* We should never land here with regular DP ports */ 921 WARN_ON(!intel_dp_is_edp(intel_dp)); 922 923 if (!intel_dp->pps_reset) 924 return backlight_controller; 925 926 intel_dp->pps_reset = false; 927 928 /* 929 * Only the HW needs to be reprogrammed, the SW state is fixed and 930 * has been setup during connector init. 931 */ 932 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 933 934 return backlight_controller; 935 } 936 937 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 938 enum pipe pipe); 939 940 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 941 enum pipe pipe) 942 { 943 return I915_READ(PP_STATUS(pipe)) & PP_ON; 944 } 945 946 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 947 enum pipe pipe) 948 { 949 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD; 950 } 951 952 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 953 enum pipe pipe) 954 { 955 return true; 956 } 957 958 static enum pipe 959 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 960 enum port port, 961 vlv_pipe_check pipe_check) 962 { 963 enum pipe pipe; 964 965 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 966 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) & 967 PANEL_PORT_SELECT_MASK; 968 969 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 970 continue; 971 972 if (!pipe_check(dev_priv, pipe)) 973 continue; 974 975 return pipe; 976 } 977 978 return INVALID_PIPE; 979 } 980 981 static void 982 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 983 { 984 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 985 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 986 enum port port = intel_dig_port->base.port; 987 988 lockdep_assert_held(&dev_priv->pps_mutex); 989 990 /* try to find a pipe with this port selected */ 991 /* first pick one where the panel is on */ 992 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 993 vlv_pipe_has_pp_on); 994 /* didn't find one? pick one where vdd is on */ 995 if (intel_dp->pps_pipe == INVALID_PIPE) 996 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 997 vlv_pipe_has_vdd_on); 998 /* didn't find one? pick one with just the correct port */ 999 if (intel_dp->pps_pipe == INVALID_PIPE) 1000 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1001 vlv_pipe_any); 1002 1003 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1004 if (intel_dp->pps_pipe == INVALID_PIPE) { 1005 DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n", 1006 intel_dig_port->base.base.base.id, 1007 intel_dig_port->base.base.name); 1008 return; 1009 } 1010 1011 DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1012 intel_dig_port->base.base.base.id, 1013 intel_dig_port->base.base.name, 1014 pipe_name(intel_dp->pps_pipe)); 1015 1016 intel_dp_init_panel_power_sequencer(intel_dp); 1017 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1018 } 1019 1020 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1021 { 1022 struct intel_encoder *encoder; 1023 1024 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 1025 !IS_GEN9_LP(dev_priv))) 1026 return; 1027 1028 /* 1029 * We can't grab pps_mutex here due to deadlock with power_domain 1030 * mutex when power_domain functions are called while holding pps_mutex. 1031 * That also means that in order to use pps_pipe the code needs to 1032 * hold both a power domain reference and pps_mutex, and the power domain 1033 * reference get/put must be done while _not_ holding pps_mutex. 1034 * pps_{lock,unlock}() do these steps in the correct order, so one 1035 * should use them always. 1036 */ 1037 1038 for_each_intel_dp(&dev_priv->drm, encoder) { 1039 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1040 1041 WARN_ON(intel_dp->active_pipe != INVALID_PIPE); 1042 1043 if (encoder->type != INTEL_OUTPUT_EDP) 1044 continue; 1045 1046 if (IS_GEN9_LP(dev_priv)) 1047 intel_dp->pps_reset = true; 1048 else 1049 intel_dp->pps_pipe = INVALID_PIPE; 1050 } 1051 } 1052 1053 struct pps_registers { 1054 i915_reg_t pp_ctrl; 1055 i915_reg_t pp_stat; 1056 i915_reg_t pp_on; 1057 i915_reg_t pp_off; 1058 i915_reg_t pp_div; 1059 }; 1060 1061 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1062 struct pps_registers *regs) 1063 { 1064 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1065 int pps_idx = 0; 1066 1067 memset(regs, 0, sizeof(*regs)); 1068 1069 if (IS_GEN9_LP(dev_priv)) 1070 pps_idx = bxt_power_sequencer_idx(intel_dp); 1071 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1072 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1073 1074 regs->pp_ctrl = PP_CONTROL(pps_idx); 1075 regs->pp_stat = PP_STATUS(pps_idx); 1076 regs->pp_on = PP_ON_DELAYS(pps_idx); 1077 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1078 1079 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1080 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1081 regs->pp_div = INVALID_MMIO_REG; 1082 else 1083 regs->pp_div = PP_DIVISOR(pps_idx); 1084 } 1085 1086 static i915_reg_t 1087 _pp_ctrl_reg(struct intel_dp *intel_dp) 1088 { 1089 struct pps_registers regs; 1090 1091 intel_pps_get_registers(intel_dp, ®s); 1092 1093 return regs.pp_ctrl; 1094 } 1095 1096 static i915_reg_t 1097 _pp_stat_reg(struct intel_dp *intel_dp) 1098 { 1099 struct pps_registers regs; 1100 1101 intel_pps_get_registers(intel_dp, ®s); 1102 1103 return regs.pp_stat; 1104 } 1105 1106 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1107 This function only applicable when panel PM state is not to be tracked */ 1108 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1109 void *unused) 1110 { 1111 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1112 edp_notifier); 1113 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1114 intel_wakeref_t wakeref; 1115 1116 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1117 return 0; 1118 1119 with_pps_lock(intel_dp, wakeref) { 1120 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1121 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1122 i915_reg_t pp_ctrl_reg, pp_div_reg; 1123 u32 pp_div; 1124 1125 pp_ctrl_reg = PP_CONTROL(pipe); 1126 pp_div_reg = PP_DIVISOR(pipe); 1127 pp_div = I915_READ(pp_div_reg); 1128 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1129 1130 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1131 I915_WRITE(pp_div_reg, pp_div | 0x1F); 1132 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS); 1133 msleep(intel_dp->panel_power_cycle_delay); 1134 } 1135 } 1136 1137 return 0; 1138 } 1139 1140 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1141 { 1142 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1143 1144 lockdep_assert_held(&dev_priv->pps_mutex); 1145 1146 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1147 intel_dp->pps_pipe == INVALID_PIPE) 1148 return false; 1149 1150 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 1151 } 1152 1153 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1154 { 1155 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1156 1157 lockdep_assert_held(&dev_priv->pps_mutex); 1158 1159 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1160 intel_dp->pps_pipe == INVALID_PIPE) 1161 return false; 1162 1163 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1164 } 1165 1166 static void 1167 intel_dp_check_edp(struct intel_dp *intel_dp) 1168 { 1169 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1170 1171 if (!intel_dp_is_edp(intel_dp)) 1172 return; 1173 1174 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1175 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 1176 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 1177 I915_READ(_pp_stat_reg(intel_dp)), 1178 I915_READ(_pp_ctrl_reg(intel_dp))); 1179 } 1180 } 1181 1182 static u32 1183 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1184 { 1185 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1186 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1187 const unsigned int timeout_ms = 10; 1188 u32 status; 1189 bool done; 1190 1191 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1192 #ifdef __NetBSD__ 1193 if (!cold) { 1194 int ret; 1195 spin_lock(&i915->gmbus_wait_lock); 1196 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, 1197 &i915->gmbus_wait_queue, &i915->gmbus_wait_lock, 1198 msecs_to_jiffies_timeout(timeout_ms), 1199 C); 1200 if (ret < 0) /* Failure: pretend same as done. */ 1201 done = true; 1202 else if (ret == 0) /* Timed out: not done. */ 1203 done = false; 1204 else /* Succeeded (ret > 0): done. */ 1205 done = true; 1206 spin_unlock(&i915->gmbus_wait_lock); 1207 } else { 1208 done = wait_for_atomic(C, timeout_ms) == 0; 1209 } 1210 #else 1211 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1212 msecs_to_jiffies_timeout(timeout_ms)); 1213 1214 #endif 1215 1216 /* just trace the final value */ 1217 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1218 1219 if (!done) 1220 DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n", 1221 intel_dp->aux.name, timeout_ms, status); 1222 #undef C 1223 1224 return status; 1225 } 1226 1227 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1228 { 1229 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1230 1231 if (index) 1232 return 0; 1233 1234 /* 1235 * The clock divider is based off the hrawclk, and would like to run at 1236 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1237 */ 1238 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); 1239 } 1240 1241 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1242 { 1243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1244 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1245 1246 if (index) 1247 return 0; 1248 1249 /* 1250 * The clock divider is based off the cdclk or PCH rawclk, and would 1251 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1252 * divide by 2000 and use that 1253 */ 1254 if (dig_port->aux_ch == AUX_CH_A) 1255 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); 1256 else 1257 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); 1258 } 1259 1260 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1261 { 1262 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1263 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1264 1265 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1266 /* Workaround for non-ULT HSW */ 1267 switch (index) { 1268 case 0: return 63; 1269 case 1: return 72; 1270 default: return 0; 1271 } 1272 } 1273 1274 return ilk_get_aux_clock_divider(intel_dp, index); 1275 } 1276 1277 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1278 { 1279 /* 1280 * SKL doesn't need us to program the AUX clock divider (Hardware will 1281 * derive the clock from CDCLK automatically). We still implement the 1282 * get_aux_clock_divider vfunc to plug-in into the existing code. 1283 */ 1284 return index ? 0 : 1; 1285 } 1286 1287 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1288 int send_bytes, 1289 u32 aux_clock_divider) 1290 { 1291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1292 struct drm_i915_private *dev_priv = 1293 to_i915(intel_dig_port->base.base.dev); 1294 u32 precharge, timeout; 1295 1296 if (IS_GEN(dev_priv, 6)) 1297 precharge = 3; 1298 else 1299 precharge = 5; 1300 1301 if (IS_BROADWELL(dev_priv)) 1302 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1303 else 1304 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1305 1306 return DP_AUX_CH_CTL_SEND_BUSY | 1307 DP_AUX_CH_CTL_DONE | 1308 DP_AUX_CH_CTL_INTERRUPT | 1309 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1310 timeout | 1311 DP_AUX_CH_CTL_RECEIVE_ERROR | 1312 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1313 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1314 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1315 } 1316 1317 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1318 int send_bytes, 1319 u32 unused) 1320 { 1321 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1322 struct drm_i915_private *i915 = 1323 to_i915(intel_dig_port->base.base.dev); 1324 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1325 u32 ret; 1326 1327 ret = DP_AUX_CH_CTL_SEND_BUSY | 1328 DP_AUX_CH_CTL_DONE | 1329 DP_AUX_CH_CTL_INTERRUPT | 1330 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1331 DP_AUX_CH_CTL_TIME_OUT_MAX | 1332 DP_AUX_CH_CTL_RECEIVE_ERROR | 1333 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1334 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1335 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1336 1337 if (intel_phy_is_tc(i915, phy) && 1338 intel_dig_port->tc_mode == TC_PORT_TBT_ALT) 1339 ret |= DP_AUX_CH_CTL_TBT_IO; 1340 1341 return ret; 1342 } 1343 1344 static int 1345 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1346 const u8 *send, int send_bytes, 1347 u8 *recv, int recv_size, 1348 u32 aux_send_ctl_flags) 1349 { 1350 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1351 struct drm_i915_private *i915 = 1352 to_i915(intel_dig_port->base.base.dev); 1353 struct intel_uncore *uncore = &i915->uncore; 1354 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1355 bool is_tc_port = intel_phy_is_tc(i915, phy); 1356 i915_reg_t ch_ctl, ch_data[5]; 1357 u32 aux_clock_divider; 1358 enum intel_display_power_domain aux_domain = 1359 intel_aux_power_domain(intel_dig_port); 1360 intel_wakeref_t aux_wakeref; 1361 intel_wakeref_t pps_wakeref; 1362 int i, ret, recv_bytes; 1363 int try, clock = 0; 1364 u32 status; 1365 bool vdd; 1366 1367 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1368 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1369 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1370 1371 if (is_tc_port) 1372 intel_tc_port_lock(intel_dig_port); 1373 1374 aux_wakeref = intel_display_power_get(i915, aux_domain); 1375 pps_wakeref = pps_lock(intel_dp); 1376 1377 /* 1378 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1379 * In such cases we want to leave VDD enabled and it's up to upper layers 1380 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1381 * ourselves. 1382 */ 1383 vdd = edp_panel_vdd_on(intel_dp); 1384 1385 /* dp aux is extremely sensitive to irq latency, hence request the 1386 * lowest possible wakeup latency and so prevent the cpu from going into 1387 * deep sleep states. 1388 */ 1389 pm_qos_update_request(&i915->pm_qos, 0); 1390 1391 intel_dp_check_edp(intel_dp); 1392 1393 /* Try to wait for any previous AUX channel activity */ 1394 for (try = 0; try < 3; try++) { 1395 status = intel_uncore_read_notrace(uncore, ch_ctl); 1396 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1397 break; 1398 msleep(1); 1399 } 1400 /* just trace the final value */ 1401 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1402 1403 if (try == 3) { 1404 const u32 status = intel_uncore_read(uncore, ch_ctl); 1405 1406 if (status != intel_dp->aux_busy_last_status) { 1407 WARN(1, "dp_aux_ch not started status 0x%08x\n", 1408 status); 1409 intel_dp->aux_busy_last_status = status; 1410 } 1411 1412 ret = -EBUSY; 1413 goto out; 1414 } 1415 1416 /* Only 5 data registers! */ 1417 if (WARN_ON(send_bytes > 20 || recv_size > 20)) { 1418 ret = -E2BIG; 1419 goto out; 1420 } 1421 1422 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1423 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1424 send_bytes, 1425 aux_clock_divider); 1426 1427 send_ctl |= aux_send_ctl_flags; 1428 1429 /* Must try at least 3 times according to DP spec */ 1430 for (try = 0; try < 5; try++) { 1431 /* Load the send data into the aux channel data registers */ 1432 for (i = 0; i < send_bytes; i += 4) 1433 intel_uncore_write(uncore, 1434 ch_data[i >> 2], 1435 intel_dp_pack_aux(send + i, 1436 send_bytes - i)); 1437 1438 /* Send the command and wait for it to complete */ 1439 intel_uncore_write(uncore, ch_ctl, send_ctl); 1440 1441 status = intel_dp_aux_wait_done(intel_dp); 1442 1443 /* Clear done status and any errors */ 1444 intel_uncore_write(uncore, 1445 ch_ctl, 1446 status | 1447 DP_AUX_CH_CTL_DONE | 1448 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1449 DP_AUX_CH_CTL_RECEIVE_ERROR); 1450 1451 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1452 * 400us delay required for errors and timeouts 1453 * Timeout errors from the HW already meet this 1454 * requirement so skip to next iteration 1455 */ 1456 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1457 continue; 1458 1459 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1460 usleep_range(400, 500); 1461 continue; 1462 } 1463 if (status & DP_AUX_CH_CTL_DONE) 1464 goto done; 1465 } 1466 } 1467 1468 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1469 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 1470 ret = -EBUSY; 1471 goto out; 1472 } 1473 1474 done: 1475 /* Check for timeout or receive error. 1476 * Timeouts occur when the sink is not connected 1477 */ 1478 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1479 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 1480 ret = -EIO; 1481 goto out; 1482 } 1483 1484 /* Timeouts occur when the device isn't connected, so they're 1485 * "normal" -- don't fill the kernel log with these */ 1486 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1487 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 1488 ret = -ETIMEDOUT; 1489 goto out; 1490 } 1491 1492 /* Unload any bytes sent back from the other side */ 1493 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1494 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1495 1496 /* 1497 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1498 * We have no idea of what happened so we return -EBUSY so 1499 * drm layer takes care for the necessary retries. 1500 */ 1501 if (recv_bytes == 0 || recv_bytes > 20) { 1502 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n", 1503 recv_bytes); 1504 ret = -EBUSY; 1505 goto out; 1506 } 1507 1508 if (recv_bytes > recv_size) 1509 recv_bytes = recv_size; 1510 1511 for (i = 0; i < recv_bytes; i += 4) 1512 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1513 recv + i, recv_bytes - i); 1514 1515 ret = recv_bytes; 1516 out: 1517 pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1518 1519 if (vdd) 1520 edp_panel_vdd_off(intel_dp, false); 1521 1522 pps_unlock(intel_dp, pps_wakeref); 1523 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1524 1525 if (is_tc_port) 1526 intel_tc_port_unlock(intel_dig_port); 1527 1528 return ret; 1529 } 1530 1531 #define BARE_ADDRESS_SIZE 3 1532 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1533 1534 static void 1535 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1536 const struct drm_dp_aux_msg *msg) 1537 { 1538 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1539 txbuf[1] = (msg->address >> 8) & 0xff; 1540 txbuf[2] = msg->address & 0xff; 1541 txbuf[3] = msg->size - 1; 1542 } 1543 1544 static ssize_t 1545 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1546 { 1547 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1548 u8 txbuf[20], rxbuf[20]; 1549 size_t txsize, rxsize; 1550 int ret; 1551 1552 intel_dp_aux_header(txbuf, msg); 1553 1554 switch (msg->request & ~DP_AUX_I2C_MOT) { 1555 case DP_AUX_NATIVE_WRITE: 1556 case DP_AUX_I2C_WRITE: 1557 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1558 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1559 rxsize = 2; /* 0 or 1 data bytes */ 1560 1561 if (WARN_ON(txsize > 20)) 1562 return -E2BIG; 1563 1564 WARN_ON(!msg->buffer != !msg->size); 1565 1566 if (msg->buffer) 1567 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1568 1569 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1570 rxbuf, rxsize, 0); 1571 if (ret > 0) { 1572 msg->reply = rxbuf[0] >> 4; 1573 1574 if (ret > 1) { 1575 /* Number of bytes written in a short write. */ 1576 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1577 } else { 1578 /* Return payload size. */ 1579 ret = msg->size; 1580 } 1581 } 1582 break; 1583 1584 case DP_AUX_NATIVE_READ: 1585 case DP_AUX_I2C_READ: 1586 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1587 rxsize = msg->size + 1; 1588 1589 if (WARN_ON(rxsize > 20)) 1590 return -E2BIG; 1591 1592 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1593 rxbuf, rxsize, 0); 1594 if (ret > 0) { 1595 msg->reply = rxbuf[0] >> 4; 1596 /* 1597 * Assume happy day, and copy the data. The caller is 1598 * expected to check msg->reply before touching it. 1599 * 1600 * Return payload size. 1601 */ 1602 ret--; 1603 memcpy(msg->buffer, rxbuf + 1, ret); 1604 } 1605 break; 1606 1607 default: 1608 ret = -EINVAL; 1609 break; 1610 } 1611 1612 return ret; 1613 } 1614 1615 1616 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1617 { 1618 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1619 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1620 enum aux_ch aux_ch = dig_port->aux_ch; 1621 1622 switch (aux_ch) { 1623 case AUX_CH_B: 1624 case AUX_CH_C: 1625 case AUX_CH_D: 1626 return DP_AUX_CH_CTL(aux_ch); 1627 default: 1628 MISSING_CASE(aux_ch); 1629 return DP_AUX_CH_CTL(AUX_CH_B); 1630 } 1631 } 1632 1633 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1634 { 1635 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1636 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1637 enum aux_ch aux_ch = dig_port->aux_ch; 1638 1639 switch (aux_ch) { 1640 case AUX_CH_B: 1641 case AUX_CH_C: 1642 case AUX_CH_D: 1643 return DP_AUX_CH_DATA(aux_ch, index); 1644 default: 1645 MISSING_CASE(aux_ch); 1646 return DP_AUX_CH_DATA(AUX_CH_B, index); 1647 } 1648 } 1649 1650 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1651 { 1652 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1653 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1654 enum aux_ch aux_ch = dig_port->aux_ch; 1655 1656 switch (aux_ch) { 1657 case AUX_CH_A: 1658 return DP_AUX_CH_CTL(aux_ch); 1659 case AUX_CH_B: 1660 case AUX_CH_C: 1661 case AUX_CH_D: 1662 return PCH_DP_AUX_CH_CTL(aux_ch); 1663 default: 1664 MISSING_CASE(aux_ch); 1665 return DP_AUX_CH_CTL(AUX_CH_A); 1666 } 1667 } 1668 1669 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1670 { 1671 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1672 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1673 enum aux_ch aux_ch = dig_port->aux_ch; 1674 1675 switch (aux_ch) { 1676 case AUX_CH_A: 1677 return DP_AUX_CH_DATA(aux_ch, index); 1678 case AUX_CH_B: 1679 case AUX_CH_C: 1680 case AUX_CH_D: 1681 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1682 default: 1683 MISSING_CASE(aux_ch); 1684 return DP_AUX_CH_DATA(AUX_CH_A, index); 1685 } 1686 } 1687 1688 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1689 { 1690 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1691 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1692 enum aux_ch aux_ch = dig_port->aux_ch; 1693 1694 switch (aux_ch) { 1695 case AUX_CH_A: 1696 case AUX_CH_B: 1697 case AUX_CH_C: 1698 case AUX_CH_D: 1699 case AUX_CH_E: 1700 case AUX_CH_F: 1701 case AUX_CH_G: 1702 return DP_AUX_CH_CTL(aux_ch); 1703 default: 1704 MISSING_CASE(aux_ch); 1705 return DP_AUX_CH_CTL(AUX_CH_A); 1706 } 1707 } 1708 1709 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1710 { 1711 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1712 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1713 enum aux_ch aux_ch = dig_port->aux_ch; 1714 1715 switch (aux_ch) { 1716 case AUX_CH_A: 1717 case AUX_CH_B: 1718 case AUX_CH_C: 1719 case AUX_CH_D: 1720 case AUX_CH_E: 1721 case AUX_CH_F: 1722 case AUX_CH_G: 1723 return DP_AUX_CH_DATA(aux_ch, index); 1724 default: 1725 MISSING_CASE(aux_ch); 1726 return DP_AUX_CH_DATA(AUX_CH_A, index); 1727 } 1728 } 1729 1730 static void 1731 intel_dp_aux_fini(struct intel_dp *intel_dp) 1732 { 1733 kfree(__UNCONST(intel_dp->aux.name)); 1734 } 1735 1736 static void 1737 intel_dp_aux_init(struct intel_dp *intel_dp) 1738 { 1739 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1740 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1741 struct intel_encoder *encoder = &dig_port->base; 1742 1743 if (INTEL_GEN(dev_priv) >= 9) { 1744 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1745 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1746 } else if (HAS_PCH_SPLIT(dev_priv)) { 1747 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1748 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1749 } else { 1750 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1751 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1752 } 1753 1754 if (INTEL_GEN(dev_priv) >= 9) 1755 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1756 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1757 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1758 else if (HAS_PCH_SPLIT(dev_priv)) 1759 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1760 else 1761 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1762 1763 if (INTEL_GEN(dev_priv) >= 9) 1764 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1765 else 1766 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1767 1768 drm_dp_aux_init(&intel_dp->aux); 1769 1770 /* Failure to allocate our preferred name is not critical */ 1771 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", 1772 port_name(encoder->port)); 1773 intel_dp->aux.transfer = intel_dp_aux_transfer; 1774 } 1775 1776 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1777 { 1778 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1779 1780 return max_rate >= 540000; 1781 } 1782 1783 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1784 { 1785 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1786 1787 return max_rate >= 810000; 1788 } 1789 1790 static void 1791 intel_dp_set_clock(struct intel_encoder *encoder, 1792 struct intel_crtc_state *pipe_config) 1793 { 1794 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1795 const struct dp_link_dpll *divisor = NULL; 1796 int i, count = 0; 1797 1798 if (IS_G4X(dev_priv)) { 1799 divisor = g4x_dpll; 1800 count = ARRAY_SIZE(g4x_dpll); 1801 } else if (HAS_PCH_SPLIT(dev_priv)) { 1802 divisor = pch_dpll; 1803 count = ARRAY_SIZE(pch_dpll); 1804 } else if (IS_CHERRYVIEW(dev_priv)) { 1805 divisor = chv_dpll; 1806 count = ARRAY_SIZE(chv_dpll); 1807 } else if (IS_VALLEYVIEW(dev_priv)) { 1808 divisor = vlv_dpll; 1809 count = ARRAY_SIZE(vlv_dpll); 1810 } 1811 1812 if (divisor && count) { 1813 for (i = 0; i < count; i++) { 1814 if (pipe_config->port_clock == divisor[i].clock) { 1815 pipe_config->dpll = divisor[i].dpll; 1816 pipe_config->clock_set = true; 1817 break; 1818 } 1819 } 1820 } 1821 } 1822 1823 static void snprintf_int_array(char *str, size_t len, 1824 const int *array, int nelem) 1825 { 1826 int i; 1827 1828 str[0] = '\0'; 1829 1830 for (i = 0; i < nelem; i++) { 1831 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1832 if (r >= len) 1833 return; 1834 str += r; 1835 len -= r; 1836 } 1837 } 1838 1839 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1840 { 1841 char str[128]; /* FIXME: too big for stack? */ 1842 1843 if (!drm_debug_enabled(DRM_UT_KMS)) 1844 return; 1845 1846 snprintf_int_array(str, sizeof(str), 1847 intel_dp->source_rates, intel_dp->num_source_rates); 1848 DRM_DEBUG_KMS("source rates: %s\n", str); 1849 1850 snprintf_int_array(str, sizeof(str), 1851 intel_dp->sink_rates, intel_dp->num_sink_rates); 1852 DRM_DEBUG_KMS("sink rates: %s\n", str); 1853 1854 snprintf_int_array(str, sizeof(str), 1855 intel_dp->common_rates, intel_dp->num_common_rates); 1856 DRM_DEBUG_KMS("common rates: %s\n", str); 1857 } 1858 1859 int 1860 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1861 { 1862 int len; 1863 1864 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1865 if (WARN_ON(len <= 0)) 1866 return 162000; 1867 1868 return intel_dp->common_rates[len - 1]; 1869 } 1870 1871 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1872 { 1873 int i = intel_dp_rate_index(intel_dp->sink_rates, 1874 intel_dp->num_sink_rates, rate); 1875 1876 if (WARN_ON(i < 0)) 1877 i = 0; 1878 1879 return i; 1880 } 1881 1882 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1883 u8 *link_bw, u8 *rate_select) 1884 { 1885 /* eDP 1.4 rate select method. */ 1886 if (intel_dp->use_rate_select) { 1887 *link_bw = 0; 1888 *rate_select = 1889 intel_dp_rate_select(intel_dp, port_clock); 1890 } else { 1891 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1892 *rate_select = 0; 1893 } 1894 } 1895 1896 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1897 const struct intel_crtc_state *pipe_config) 1898 { 1899 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1900 1901 /* On TGL, FEC is supported on all Pipes */ 1902 if (INTEL_GEN(dev_priv) >= 12) 1903 return true; 1904 1905 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1906 return true; 1907 1908 return false; 1909 } 1910 1911 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1912 const struct intel_crtc_state *pipe_config) 1913 { 1914 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1915 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1916 } 1917 1918 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1919 const struct intel_crtc_state *crtc_state) 1920 { 1921 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1922 1923 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1924 return false; 1925 1926 return intel_dsc_source_support(encoder, crtc_state) && 1927 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1928 } 1929 1930 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1931 struct intel_crtc_state *pipe_config) 1932 { 1933 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1934 struct intel_connector *intel_connector = intel_dp->attached_connector; 1935 int bpp, bpc; 1936 1937 bpp = pipe_config->pipe_bpp; 1938 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1939 1940 if (bpc > 0) 1941 bpp = min(bpp, 3*bpc); 1942 1943 if (intel_dp_is_edp(intel_dp)) { 1944 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1945 if (intel_connector->base.display_info.bpc == 0 && 1946 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1947 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 1948 dev_priv->vbt.edp.bpp); 1949 bpp = dev_priv->vbt.edp.bpp; 1950 } 1951 } 1952 1953 return bpp; 1954 } 1955 1956 /* Adjust link config limits based on compliance test requests. */ 1957 void 1958 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1959 struct intel_crtc_state *pipe_config, 1960 struct link_config_limits *limits) 1961 { 1962 /* For DP Compliance we override the computed bpp for the pipe */ 1963 if (intel_dp->compliance.test_data.bpc != 0) { 1964 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1965 1966 limits->min_bpp = limits->max_bpp = bpp; 1967 pipe_config->dither_force_disable = bpp == 6 * 3; 1968 1969 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp); 1970 } 1971 1972 /* Use values requested by Compliance Test Request */ 1973 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1974 int index; 1975 1976 /* Validate the compliance test data since max values 1977 * might have changed due to link train fallback. 1978 */ 1979 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1980 intel_dp->compliance.test_lane_count)) { 1981 index = intel_dp_rate_index(intel_dp->common_rates, 1982 intel_dp->num_common_rates, 1983 intel_dp->compliance.test_link_rate); 1984 if (index >= 0) 1985 limits->min_clock = limits->max_clock = index; 1986 limits->min_lane_count = limits->max_lane_count = 1987 intel_dp->compliance.test_lane_count; 1988 } 1989 } 1990 } 1991 1992 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 1993 { 1994 /* 1995 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 1996 * format of the number of bytes per pixel will be half the number 1997 * of bytes of RGB pixel. 1998 */ 1999 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2000 bpp /= 2; 2001 2002 return bpp; 2003 } 2004 2005 /* Optimize link config in order: max bpp, min clock, min lanes */ 2006 static int 2007 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2008 struct intel_crtc_state *pipe_config, 2009 const struct link_config_limits *limits) 2010 { 2011 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2012 int bpp, clock, lane_count; 2013 int mode_rate, link_clock, link_avail; 2014 2015 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2016 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2017 2018 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2019 output_bpp); 2020 2021 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2022 for (lane_count = limits->min_lane_count; 2023 lane_count <= limits->max_lane_count; 2024 lane_count <<= 1) { 2025 link_clock = intel_dp->common_rates[clock]; 2026 link_avail = intel_dp_max_data_rate(link_clock, 2027 lane_count); 2028 2029 if (mode_rate <= link_avail) { 2030 pipe_config->lane_count = lane_count; 2031 pipe_config->pipe_bpp = bpp; 2032 pipe_config->port_clock = link_clock; 2033 2034 return 0; 2035 } 2036 } 2037 } 2038 } 2039 2040 return -EINVAL; 2041 } 2042 2043 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2044 { 2045 int i, num_bpc; 2046 u8 dsc_bpc[3] = {0}; 2047 2048 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2049 dsc_bpc); 2050 for (i = 0; i < num_bpc; i++) { 2051 if (dsc_max_bpc >= dsc_bpc[i]) 2052 return dsc_bpc[i] * 3; 2053 } 2054 2055 return 0; 2056 } 2057 2058 #define DSC_SUPPORTED_VERSION_MIN 1 2059 2060 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2061 struct intel_crtc_state *crtc_state) 2062 { 2063 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2064 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2065 u8 line_buf_depth; 2066 int ret; 2067 2068 ret = intel_dsc_compute_params(encoder, crtc_state); 2069 if (ret) 2070 return ret; 2071 2072 /* 2073 * Slice Height of 8 works for all currently available panels. So start 2074 * with that if pic_height is an integral multiple of 8. Eventually add 2075 * logic to try multiple slice heights. 2076 */ 2077 if (vdsc_cfg->pic_height % 8 == 0) 2078 vdsc_cfg->slice_height = 8; 2079 else if (vdsc_cfg->pic_height % 4 == 0) 2080 vdsc_cfg->slice_height = 4; 2081 else 2082 vdsc_cfg->slice_height = 2; 2083 2084 vdsc_cfg->dsc_version_major = 2085 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2086 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2087 vdsc_cfg->dsc_version_minor = 2088 min(DSC_SUPPORTED_VERSION_MIN, 2089 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2090 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2091 2092 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2093 DP_DSC_RGB; 2094 2095 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2096 if (!line_buf_depth) { 2097 DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); 2098 return -EINVAL; 2099 } 2100 2101 if (vdsc_cfg->dsc_version_minor == 2) 2102 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2103 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2104 else 2105 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2106 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2107 2108 vdsc_cfg->block_pred_enable = 2109 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2110 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2111 2112 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2113 } 2114 2115 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2116 struct intel_crtc_state *pipe_config, 2117 struct drm_connector_state *conn_state, 2118 struct link_config_limits *limits) 2119 { 2120 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2121 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2122 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2123 u8 dsc_max_bpc; 2124 int pipe_bpp; 2125 int ret; 2126 2127 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2128 intel_dp_supports_fec(intel_dp, pipe_config); 2129 2130 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2131 return -EINVAL; 2132 2133 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2134 if (INTEL_GEN(dev_priv) >= 12) 2135 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2136 else 2137 dsc_max_bpc = min_t(u8, 10, 2138 conn_state->max_requested_bpc); 2139 2140 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2141 2142 /* Min Input BPC for ICL+ is 8 */ 2143 if (pipe_bpp < 8 * 3) { 2144 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); 2145 return -EINVAL; 2146 } 2147 2148 /* 2149 * For now enable DSC for max bpp, max link rate, max lane count. 2150 * Optimize this later for the minimum possible link rate/lane count 2151 * with DSC enabled for the requested mode. 2152 */ 2153 pipe_config->pipe_bpp = pipe_bpp; 2154 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2155 pipe_config->lane_count = limits->max_lane_count; 2156 2157 if (intel_dp_is_edp(intel_dp)) { 2158 pipe_config->dsc.compressed_bpp = 2159 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2160 pipe_config->pipe_bpp); 2161 pipe_config->dsc.slice_count = 2162 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2163 true); 2164 } else { 2165 u16 dsc_max_output_bpp; 2166 u8 dsc_dp_slice_count; 2167 2168 dsc_max_output_bpp = 2169 intel_dp_dsc_get_output_bpp(dev_priv, 2170 pipe_config->port_clock, 2171 pipe_config->lane_count, 2172 adjusted_mode->crtc_clock, 2173 adjusted_mode->crtc_hdisplay); 2174 dsc_dp_slice_count = 2175 intel_dp_dsc_get_slice_count(intel_dp, 2176 adjusted_mode->crtc_clock, 2177 adjusted_mode->crtc_hdisplay); 2178 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2179 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); 2180 return -EINVAL; 2181 } 2182 pipe_config->dsc.compressed_bpp = min_t(u16, 2183 dsc_max_output_bpp >> 4, 2184 pipe_config->pipe_bpp); 2185 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2186 } 2187 /* 2188 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2189 * is greater than the maximum Cdclock and if slice count is even 2190 * then we need to use 2 VDSC instances. 2191 */ 2192 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2193 if (pipe_config->dsc.slice_count > 1) { 2194 pipe_config->dsc.dsc_split = true; 2195 } else { 2196 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); 2197 return -EINVAL; 2198 } 2199 } 2200 2201 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2202 if (ret < 0) { 2203 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " 2204 "Compressed BPP = %d\n", 2205 pipe_config->pipe_bpp, 2206 pipe_config->dsc.compressed_bpp); 2207 return ret; 2208 } 2209 2210 pipe_config->dsc.compression_enable = true; 2211 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " 2212 "Compressed Bpp = %d Slice Count = %d\n", 2213 pipe_config->pipe_bpp, 2214 pipe_config->dsc.compressed_bpp, 2215 pipe_config->dsc.slice_count); 2216 2217 return 0; 2218 } 2219 2220 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2221 { 2222 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2223 return 6 * 3; 2224 else 2225 return 8 * 3; 2226 } 2227 2228 static int 2229 intel_dp_compute_link_config(struct intel_encoder *encoder, 2230 struct intel_crtc_state *pipe_config, 2231 struct drm_connector_state *conn_state) 2232 { 2233 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2234 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2235 struct link_config_limits limits; 2236 int common_len; 2237 int ret; 2238 2239 common_len = intel_dp_common_len_rate_limit(intel_dp, 2240 intel_dp->max_link_rate); 2241 2242 /* No common link rates between source and sink */ 2243 WARN_ON(common_len <= 0); 2244 2245 limits.min_clock = 0; 2246 limits.max_clock = common_len - 1; 2247 2248 limits.min_lane_count = 1; 2249 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2250 2251 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2252 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2253 2254 if (intel_dp_is_edp(intel_dp)) { 2255 /* 2256 * Use the maximum clock and number of lanes the eDP panel 2257 * advertizes being capable of. The panels are generally 2258 * designed to support only a single clock and lane 2259 * configuration, and typically these values correspond to the 2260 * native resolution of the panel. 2261 */ 2262 limits.min_lane_count = limits.max_lane_count; 2263 limits.min_clock = limits.max_clock; 2264 } 2265 2266 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2267 2268 DRM_DEBUG_KMS("DP link computation with max lane count %i " 2269 "max rate %d max bpp %d pixel clock %iKHz\n", 2270 limits.max_lane_count, 2271 intel_dp->common_rates[limits.max_clock], 2272 limits.max_bpp, adjusted_mode->crtc_clock); 2273 2274 /* 2275 * Optimize for slow and wide. This is the place to add alternative 2276 * optimization policy. 2277 */ 2278 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2279 2280 /* enable compression if the mode doesn't fit available BW */ 2281 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); 2282 if (ret || intel_dp->force_dsc_en) { 2283 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2284 conn_state, &limits); 2285 if (ret < 0) 2286 return ret; 2287 } 2288 2289 if (pipe_config->dsc.compression_enable) { 2290 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2291 pipe_config->lane_count, pipe_config->port_clock, 2292 pipe_config->pipe_bpp, 2293 pipe_config->dsc.compressed_bpp); 2294 2295 DRM_DEBUG_KMS("DP link rate required %i available %i\n", 2296 intel_dp_link_required(adjusted_mode->crtc_clock, 2297 pipe_config->dsc.compressed_bpp), 2298 intel_dp_max_data_rate(pipe_config->port_clock, 2299 pipe_config->lane_count)); 2300 } else { 2301 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", 2302 pipe_config->lane_count, pipe_config->port_clock, 2303 pipe_config->pipe_bpp); 2304 2305 DRM_DEBUG_KMS("DP link rate required %i available %i\n", 2306 intel_dp_link_required(adjusted_mode->crtc_clock, 2307 pipe_config->pipe_bpp), 2308 intel_dp_max_data_rate(pipe_config->port_clock, 2309 pipe_config->lane_count)); 2310 } 2311 return 0; 2312 } 2313 2314 static int 2315 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2316 struct drm_connector *connector, 2317 struct intel_crtc_state *crtc_state) 2318 { 2319 const struct drm_display_info *info = &connector->display_info; 2320 const struct drm_display_mode *adjusted_mode = 2321 &crtc_state->hw.adjusted_mode; 2322 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2323 int ret; 2324 2325 if (!drm_mode_is_420_only(info, adjusted_mode) || 2326 !intel_dp_get_colorimetry_status(intel_dp) || 2327 !connector->ycbcr_420_allowed) 2328 return 0; 2329 2330 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2331 2332 /* YCBCR 420 output conversion needs a scaler */ 2333 ret = skl_update_scaler_crtc(crtc_state); 2334 if (ret) { 2335 DRM_DEBUG_KMS("Scaler allocation for output failed\n"); 2336 return ret; 2337 } 2338 2339 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN); 2340 2341 return 0; 2342 } 2343 2344 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2345 const struct drm_connector_state *conn_state) 2346 { 2347 const struct intel_digital_connector_state *intel_conn_state = 2348 const_container_of(conn_state, struct intel_digital_connector_state, base); 2349 const struct drm_display_mode *adjusted_mode = 2350 &crtc_state->hw.adjusted_mode; 2351 2352 /* 2353 * Our YCbCr output is always limited range. 2354 * crtc_state->limited_color_range only applies to RGB, 2355 * and it must never be set for YCbCr or we risk setting 2356 * some conflicting bits in PIPECONF which will mess up 2357 * the colors on the monitor. 2358 */ 2359 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2360 return false; 2361 2362 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2363 /* 2364 * See: 2365 * CEA-861-E - 5.1 Default Encoding Parameters 2366 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2367 */ 2368 return crtc_state->pipe_bpp != 18 && 2369 drm_default_rgb_quant_range(adjusted_mode) == 2370 HDMI_QUANTIZATION_RANGE_LIMITED; 2371 } else { 2372 return intel_conn_state->broadcast_rgb == 2373 INTEL_BROADCAST_RGB_LIMITED; 2374 } 2375 } 2376 2377 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2378 enum port port) 2379 { 2380 if (IS_G4X(dev_priv)) 2381 return false; 2382 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2383 return false; 2384 2385 return true; 2386 } 2387 2388 int 2389 intel_dp_compute_config(struct intel_encoder *encoder, 2390 struct intel_crtc_state *pipe_config, 2391 struct drm_connector_state *conn_state) 2392 { 2393 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2394 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2395 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2396 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2397 enum port port = encoder->port; 2398 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); 2399 struct intel_connector *intel_connector = intel_dp->attached_connector; 2400 struct intel_digital_connector_state *intel_conn_state = 2401 to_intel_digital_connector_state(conn_state); 2402 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 2403 DP_DPCD_QUIRK_CONSTANT_N); 2404 int ret = 0, output_bpp; 2405 2406 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2407 pipe_config->has_pch_encoder = true; 2408 2409 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2410 2411 if (lspcon->active) 2412 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2413 else 2414 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base, 2415 pipe_config); 2416 2417 if (ret) 2418 return ret; 2419 2420 pipe_config->has_drrs = false; 2421 if (!intel_dp_port_has_audio(dev_priv, port)) 2422 pipe_config->has_audio = false; 2423 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2424 pipe_config->has_audio = intel_dp->has_audio; 2425 else 2426 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2427 2428 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2429 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2430 adjusted_mode); 2431 2432 if (INTEL_GEN(dev_priv) >= 9) { 2433 ret = skl_update_scaler_crtc(pipe_config); 2434 if (ret) 2435 return ret; 2436 } 2437 2438 if (HAS_GMCH(dev_priv)) 2439 intel_gmch_panel_fitting(intel_crtc, pipe_config, 2440 conn_state->scaling_mode); 2441 else 2442 intel_pch_panel_fitting(intel_crtc, pipe_config, 2443 conn_state->scaling_mode); 2444 } 2445 2446 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2447 return -EINVAL; 2448 2449 if (HAS_GMCH(dev_priv) && 2450 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2451 return -EINVAL; 2452 2453 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2454 return -EINVAL; 2455 2456 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2457 return -EINVAL; 2458 2459 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2460 if (ret < 0) 2461 return ret; 2462 2463 pipe_config->limited_color_range = 2464 intel_dp_limited_color_range(pipe_config, conn_state); 2465 2466 if (pipe_config->dsc.compression_enable) 2467 output_bpp = pipe_config->dsc.compressed_bpp; 2468 else 2469 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2470 2471 intel_link_compute_m_n(output_bpp, 2472 pipe_config->lane_count, 2473 adjusted_mode->crtc_clock, 2474 pipe_config->port_clock, 2475 &pipe_config->dp_m_n, 2476 constant_n, pipe_config->fec_enable); 2477 2478 if (intel_connector->panel.downclock_mode != NULL && 2479 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2480 pipe_config->has_drrs = true; 2481 intel_link_compute_m_n(output_bpp, 2482 pipe_config->lane_count, 2483 intel_connector->panel.downclock_mode->clock, 2484 pipe_config->port_clock, 2485 &pipe_config->dp_m2_n2, 2486 constant_n, pipe_config->fec_enable); 2487 } 2488 2489 if (!HAS_DDI(dev_priv)) 2490 intel_dp_set_clock(encoder, pipe_config); 2491 2492 intel_psr_compute_config(intel_dp, pipe_config); 2493 2494 return 0; 2495 } 2496 2497 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2498 int link_rate, u8 lane_count, 2499 bool link_mst) 2500 { 2501 intel_dp->link_trained = false; 2502 intel_dp->link_rate = link_rate; 2503 intel_dp->lane_count = lane_count; 2504 intel_dp->link_mst = link_mst; 2505 } 2506 2507 static void intel_dp_prepare(struct intel_encoder *encoder, 2508 const struct intel_crtc_state *pipe_config) 2509 { 2510 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2511 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2512 enum port port = encoder->port; 2513 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2514 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2515 2516 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2517 pipe_config->lane_count, 2518 intel_crtc_has_type(pipe_config, 2519 INTEL_OUTPUT_DP_MST)); 2520 2521 intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); 2522 intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); 2523 2524 /* 2525 * There are four kinds of DP registers: 2526 * 2527 * IBX PCH 2528 * SNB CPU 2529 * IVB CPU 2530 * CPT PCH 2531 * 2532 * IBX PCH and CPU are the same for almost everything, 2533 * except that the CPU DP PLL is configured in this 2534 * register 2535 * 2536 * CPT PCH is quite different, having many bits moved 2537 * to the TRANS_DP_CTL register instead. That 2538 * configuration happens (oddly) in ilk_pch_enable 2539 */ 2540 2541 /* Preserve the BIOS-computed detected bit. This is 2542 * supposed to be read-only. 2543 */ 2544 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 2545 2546 /* Handle DP bits in common between all three register formats */ 2547 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2548 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2549 2550 /* Split out the IBX/CPU vs CPT settings */ 2551 2552 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2553 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2554 intel_dp->DP |= DP_SYNC_HS_HIGH; 2555 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2556 intel_dp->DP |= DP_SYNC_VS_HIGH; 2557 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2558 2559 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2560 intel_dp->DP |= DP_ENHANCED_FRAMING; 2561 2562 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2563 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2564 u32 trans_dp; 2565 2566 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2567 2568 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 2569 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2570 trans_dp |= TRANS_DP_ENH_FRAMING; 2571 else 2572 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2573 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); 2574 } else { 2575 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2576 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2577 2578 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2579 intel_dp->DP |= DP_SYNC_HS_HIGH; 2580 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2581 intel_dp->DP |= DP_SYNC_VS_HIGH; 2582 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2583 2584 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2585 intel_dp->DP |= DP_ENHANCED_FRAMING; 2586 2587 if (IS_CHERRYVIEW(dev_priv)) 2588 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2589 else 2590 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2591 } 2592 } 2593 2594 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2595 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2596 2597 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2598 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2599 2600 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2601 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2602 2603 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2604 2605 static void wait_panel_status(struct intel_dp *intel_dp, 2606 u32 mask, 2607 u32 value) 2608 { 2609 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2610 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2611 2612 lockdep_assert_held(&dev_priv->pps_mutex); 2613 2614 intel_pps_verify_state(intel_dp); 2615 2616 pp_stat_reg = _pp_stat_reg(intel_dp); 2617 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2618 2619 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 2620 mask, value, 2621 I915_READ(pp_stat_reg), 2622 I915_READ(pp_ctrl_reg)); 2623 2624 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2625 mask, value, 5000)) 2626 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 2627 I915_READ(pp_stat_reg), 2628 I915_READ(pp_ctrl_reg)); 2629 2630 DRM_DEBUG_KMS("Wait complete\n"); 2631 } 2632 2633 static void wait_panel_on(struct intel_dp *intel_dp) 2634 { 2635 DRM_DEBUG_KMS("Wait for panel power on\n"); 2636 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2637 } 2638 2639 static void wait_panel_off(struct intel_dp *intel_dp) 2640 { 2641 DRM_DEBUG_KMS("Wait for panel power off time\n"); 2642 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2643 } 2644 2645 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2646 { 2647 ktime_t panel_power_on_time; 2648 s64 panel_power_off_duration; 2649 2650 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 2651 2652 /* take the difference of currrent time and panel power off time 2653 * and then make panel wait for t11_t12 if needed. */ 2654 panel_power_on_time = ktime_get_boottime(); 2655 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2656 2657 /* When we disable the VDD override bit last we have to do the manual 2658 * wait. */ 2659 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2660 wait_remaining_ms_from_jiffies(jiffies, 2661 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2662 2663 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2664 } 2665 2666 static void wait_backlight_on(struct intel_dp *intel_dp) 2667 { 2668 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2669 intel_dp->backlight_on_delay); 2670 } 2671 2672 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2673 { 2674 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2675 intel_dp->backlight_off_delay); 2676 } 2677 2678 /* Read the current pp_control value, unlocking the register if it 2679 * is locked 2680 */ 2681 2682 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2683 { 2684 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2685 u32 control; 2686 2687 lockdep_assert_held(&dev_priv->pps_mutex); 2688 2689 control = I915_READ(_pp_ctrl_reg(intel_dp)); 2690 if (WARN_ON(!HAS_DDI(dev_priv) && 2691 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2692 control &= ~PANEL_UNLOCK_MASK; 2693 control |= PANEL_UNLOCK_REGS; 2694 } 2695 return control; 2696 } 2697 2698 /* 2699 * Must be paired with edp_panel_vdd_off(). 2700 * Must hold pps_mutex around the whole on/off sequence. 2701 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2702 */ 2703 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2704 { 2705 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2707 u32 pp; 2708 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2709 bool need_to_disable = !intel_dp->want_panel_vdd; 2710 2711 lockdep_assert_held(&dev_priv->pps_mutex); 2712 2713 if (!intel_dp_is_edp(intel_dp)) 2714 return false; 2715 2716 cancel_delayed_work(&intel_dp->panel_vdd_work); 2717 intel_dp->want_panel_vdd = true; 2718 2719 if (edp_have_panel_vdd(intel_dp)) 2720 return need_to_disable; 2721 2722 intel_display_power_get(dev_priv, 2723 intel_aux_power_domain(intel_dig_port)); 2724 2725 DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n", 2726 intel_dig_port->base.base.base.id, 2727 intel_dig_port->base.base.name); 2728 2729 if (!edp_have_panel_power(intel_dp)) 2730 wait_panel_power_cycle(intel_dp); 2731 2732 pp = ilk_get_pp_control(intel_dp); 2733 pp |= EDP_FORCE_VDD; 2734 2735 pp_stat_reg = _pp_stat_reg(intel_dp); 2736 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2737 2738 I915_WRITE(pp_ctrl_reg, pp); 2739 POSTING_READ(pp_ctrl_reg); 2740 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2741 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 2742 /* 2743 * If the panel wasn't on, delay before accessing aux channel 2744 */ 2745 if (!edp_have_panel_power(intel_dp)) { 2746 DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n", 2747 intel_dig_port->base.base.base.id, 2748 intel_dig_port->base.base.name); 2749 msleep(intel_dp->panel_power_up_delay); 2750 } 2751 2752 return need_to_disable; 2753 } 2754 2755 /* 2756 * Must be paired with intel_edp_panel_vdd_off() or 2757 * intel_edp_panel_off(). 2758 * Nested calls to these functions are not allowed since 2759 * we drop the lock. Caller must use some higher level 2760 * locking to prevent nested calls from other threads. 2761 */ 2762 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2763 { 2764 intel_wakeref_t wakeref; 2765 bool vdd; 2766 2767 if (!intel_dp_is_edp(intel_dp)) 2768 return; 2769 2770 vdd = false; 2771 with_pps_lock(intel_dp, wakeref) 2772 vdd = edp_panel_vdd_on(intel_dp); 2773 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2774 dp_to_dig_port(intel_dp)->base.base.base.id, 2775 dp_to_dig_port(intel_dp)->base.base.name); 2776 } 2777 2778 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2779 { 2780 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2781 struct intel_digital_port *intel_dig_port = 2782 dp_to_dig_port(intel_dp); 2783 u32 pp; 2784 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2785 2786 lockdep_assert_held(&dev_priv->pps_mutex); 2787 2788 WARN_ON(intel_dp->want_panel_vdd); 2789 2790 if (!edp_have_panel_vdd(intel_dp)) 2791 return; 2792 2793 DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n", 2794 intel_dig_port->base.base.base.id, 2795 intel_dig_port->base.base.name); 2796 2797 pp = ilk_get_pp_control(intel_dp); 2798 pp &= ~EDP_FORCE_VDD; 2799 2800 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2801 pp_stat_reg = _pp_stat_reg(intel_dp); 2802 2803 I915_WRITE(pp_ctrl_reg, pp); 2804 POSTING_READ(pp_ctrl_reg); 2805 2806 /* Make sure sequencer is idle before allowing subsequent activity */ 2807 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2808 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 2809 2810 if ((pp & PANEL_POWER_ON) == 0) 2811 intel_dp->panel_power_off_time = ktime_get_boottime(); 2812 2813 intel_display_power_put_unchecked(dev_priv, 2814 intel_aux_power_domain(intel_dig_port)); 2815 } 2816 2817 static void edp_panel_vdd_work(struct work_struct *__work) 2818 { 2819 struct intel_dp *intel_dp = 2820 container_of(to_delayed_work(__work), 2821 struct intel_dp, panel_vdd_work); 2822 intel_wakeref_t wakeref; 2823 2824 with_pps_lock(intel_dp, wakeref) { 2825 if (!intel_dp->want_panel_vdd) 2826 edp_panel_vdd_off_sync(intel_dp); 2827 } 2828 } 2829 2830 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 2831 { 2832 unsigned long delay; 2833 2834 /* 2835 * Queue the timer to fire a long time from now (relative to the power 2836 * down delay) to keep the panel power up across a sequence of 2837 * operations. 2838 */ 2839 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 2840 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 2841 } 2842 2843 /* 2844 * Must be paired with edp_panel_vdd_on(). 2845 * Must hold pps_mutex around the whole on/off sequence. 2846 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2847 */ 2848 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 2849 { 2850 struct drm_i915_private *dev_priv __lockdep_used = dp_to_i915(intel_dp); 2851 2852 lockdep_assert_held(&dev_priv->pps_mutex); 2853 2854 if (!intel_dp_is_edp(intel_dp)) 2855 return; 2856 2857 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 2858 dp_to_dig_port(intel_dp)->base.base.base.id, 2859 dp_to_dig_port(intel_dp)->base.base.name); 2860 2861 intel_dp->want_panel_vdd = false; 2862 2863 if (sync) 2864 edp_panel_vdd_off_sync(intel_dp); 2865 else 2866 edp_panel_vdd_schedule_off(intel_dp); 2867 } 2868 2869 static void edp_panel_on(struct intel_dp *intel_dp) 2870 { 2871 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2872 u32 pp; 2873 i915_reg_t pp_ctrl_reg; 2874 2875 lockdep_assert_held(&dev_priv->pps_mutex); 2876 2877 if (!intel_dp_is_edp(intel_dp)) 2878 return; 2879 2880 DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n", 2881 dp_to_dig_port(intel_dp)->base.base.base.id, 2882 dp_to_dig_port(intel_dp)->base.base.name); 2883 2884 if (WARN(edp_have_panel_power(intel_dp), 2885 "[ENCODER:%d:%s] panel power already on\n", 2886 dp_to_dig_port(intel_dp)->base.base.base.id, 2887 dp_to_dig_port(intel_dp)->base.base.name)) 2888 return; 2889 2890 wait_panel_power_cycle(intel_dp); 2891 2892 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2893 pp = ilk_get_pp_control(intel_dp); 2894 if (IS_GEN(dev_priv, 5)) { 2895 /* ILK workaround: disable reset around power sequence */ 2896 pp &= ~PANEL_POWER_RESET; 2897 I915_WRITE(pp_ctrl_reg, pp); 2898 POSTING_READ(pp_ctrl_reg); 2899 } 2900 2901 pp |= PANEL_POWER_ON; 2902 if (!IS_GEN(dev_priv, 5)) 2903 pp |= PANEL_POWER_RESET; 2904 2905 I915_WRITE(pp_ctrl_reg, pp); 2906 POSTING_READ(pp_ctrl_reg); 2907 2908 wait_panel_on(intel_dp); 2909 intel_dp->last_power_on = jiffies; 2910 2911 if (IS_GEN(dev_priv, 5)) { 2912 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 2913 I915_WRITE(pp_ctrl_reg, pp); 2914 POSTING_READ(pp_ctrl_reg); 2915 } 2916 } 2917 2918 void intel_edp_panel_on(struct intel_dp *intel_dp) 2919 { 2920 intel_wakeref_t wakeref; 2921 2922 if (!intel_dp_is_edp(intel_dp)) 2923 return; 2924 2925 with_pps_lock(intel_dp, wakeref) 2926 edp_panel_on(intel_dp); 2927 } 2928 2929 2930 static void edp_panel_off(struct intel_dp *intel_dp) 2931 { 2932 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2933 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2934 u32 pp; 2935 i915_reg_t pp_ctrl_reg; 2936 2937 lockdep_assert_held(&dev_priv->pps_mutex); 2938 2939 if (!intel_dp_is_edp(intel_dp)) 2940 return; 2941 2942 DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n", 2943 dig_port->base.base.base.id, dig_port->base.base.name); 2944 2945 WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n", 2946 dig_port->base.base.base.id, dig_port->base.base.name); 2947 2948 pp = ilk_get_pp_control(intel_dp); 2949 /* We need to switch off panel power _and_ force vdd, for otherwise some 2950 * panels get very unhappy and cease to work. */ 2951 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 2952 EDP_BLC_ENABLE); 2953 2954 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2955 2956 intel_dp->want_panel_vdd = false; 2957 2958 I915_WRITE(pp_ctrl_reg, pp); 2959 POSTING_READ(pp_ctrl_reg); 2960 2961 wait_panel_off(intel_dp); 2962 intel_dp->panel_power_off_time = ktime_get_boottime(); 2963 2964 /* We got a reference when we enabled the VDD. */ 2965 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 2966 } 2967 2968 void intel_edp_panel_off(struct intel_dp *intel_dp) 2969 { 2970 intel_wakeref_t wakeref; 2971 2972 if (!intel_dp_is_edp(intel_dp)) 2973 return; 2974 2975 with_pps_lock(intel_dp, wakeref) 2976 edp_panel_off(intel_dp); 2977 } 2978 2979 /* Enable backlight in the panel power control. */ 2980 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 2981 { 2982 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2983 intel_wakeref_t wakeref; 2984 2985 /* 2986 * If we enable the backlight right away following a panel power 2987 * on, we may see slight flicker as the panel syncs with the eDP 2988 * link. So delay a bit to make sure the image is solid before 2989 * allowing it to appear. 2990 */ 2991 wait_backlight_on(intel_dp); 2992 2993 with_pps_lock(intel_dp, wakeref) { 2994 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2995 u32 pp; 2996 2997 pp = ilk_get_pp_control(intel_dp); 2998 pp |= EDP_BLC_ENABLE; 2999 3000 I915_WRITE(pp_ctrl_reg, pp); 3001 POSTING_READ(pp_ctrl_reg); 3002 } 3003 } 3004 3005 /* Enable backlight PWM and backlight PP control. */ 3006 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3007 const struct drm_connector_state *conn_state) 3008 { 3009 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3010 3011 if (!intel_dp_is_edp(intel_dp)) 3012 return; 3013 3014 DRM_DEBUG_KMS("\n"); 3015 3016 intel_panel_enable_backlight(crtc_state, conn_state); 3017 _intel_edp_backlight_on(intel_dp); 3018 } 3019 3020 /* Disable backlight in the panel power control. */ 3021 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3022 { 3023 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3024 intel_wakeref_t wakeref; 3025 3026 if (!intel_dp_is_edp(intel_dp)) 3027 return; 3028 3029 with_pps_lock(intel_dp, wakeref) { 3030 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3031 u32 pp; 3032 3033 pp = ilk_get_pp_control(intel_dp); 3034 pp &= ~EDP_BLC_ENABLE; 3035 3036 I915_WRITE(pp_ctrl_reg, pp); 3037 POSTING_READ(pp_ctrl_reg); 3038 } 3039 3040 intel_dp->last_backlight_off = jiffies; 3041 edp_wait_backlight_off(intel_dp); 3042 } 3043 3044 /* Disable backlight PP control and backlight PWM. */ 3045 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3046 { 3047 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3048 3049 if (!intel_dp_is_edp(intel_dp)) 3050 return; 3051 3052 DRM_DEBUG_KMS("\n"); 3053 3054 _intel_edp_backlight_off(intel_dp); 3055 intel_panel_disable_backlight(old_conn_state); 3056 } 3057 3058 /* 3059 * Hook for controlling the panel power control backlight through the bl_power 3060 * sysfs attribute. Take care to handle multiple calls. 3061 */ 3062 static void intel_edp_backlight_power(struct intel_connector *connector, 3063 bool enable) 3064 { 3065 struct intel_dp *intel_dp = intel_attached_dp(connector); 3066 intel_wakeref_t wakeref; 3067 bool is_enabled; 3068 3069 is_enabled = false; 3070 with_pps_lock(intel_dp, wakeref) 3071 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3072 if (is_enabled == enable) 3073 return; 3074 3075 DRM_DEBUG_KMS("panel power control backlight %s\n", 3076 enable ? "enable" : "disable"); 3077 3078 if (enable) 3079 _intel_edp_backlight_on(intel_dp); 3080 else 3081 _intel_edp_backlight_off(intel_dp); 3082 } 3083 3084 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3085 { 3086 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3087 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3088 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN; 3089 3090 I915_STATE_WARN(cur_state != state, 3091 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3092 dig_port->base.base.base.id, dig_port->base.base.name, 3093 onoff(state), onoff(cur_state)); 3094 } 3095 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3096 3097 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3098 { 3099 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE; 3100 3101 I915_STATE_WARN(cur_state != state, 3102 "eDP PLL state assertion failure (expected %s, current %s)\n", 3103 onoff(state), onoff(cur_state)); 3104 } 3105 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3106 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3107 3108 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3109 const struct intel_crtc_state *pipe_config) 3110 { 3111 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3112 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3113 3114 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3115 assert_dp_port_disabled(intel_dp); 3116 assert_edp_pll_disabled(dev_priv); 3117 3118 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n", 3119 pipe_config->port_clock); 3120 3121 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3122 3123 if (pipe_config->port_clock == 162000) 3124 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3125 else 3126 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3127 3128 I915_WRITE(DP_A, intel_dp->DP); 3129 POSTING_READ(DP_A); 3130 udelay(500); 3131 3132 /* 3133 * [DevILK] Work around required when enabling DP PLL 3134 * while a pipe is enabled going to FDI: 3135 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3136 * 2. Program DP PLL enable 3137 */ 3138 if (IS_GEN(dev_priv, 5)) 3139 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3140 3141 intel_dp->DP |= DP_PLL_ENABLE; 3142 3143 I915_WRITE(DP_A, intel_dp->DP); 3144 POSTING_READ(DP_A); 3145 udelay(200); 3146 } 3147 3148 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3149 const struct intel_crtc_state *old_crtc_state) 3150 { 3151 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3152 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3153 3154 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3155 assert_dp_port_disabled(intel_dp); 3156 assert_edp_pll_enabled(dev_priv); 3157 3158 DRM_DEBUG_KMS("disabling eDP PLL\n"); 3159 3160 intel_dp->DP &= ~DP_PLL_ENABLE; 3161 3162 I915_WRITE(DP_A, intel_dp->DP); 3163 POSTING_READ(DP_A); 3164 udelay(200); 3165 } 3166 3167 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3168 { 3169 /* 3170 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3171 * be capable of signalling downstream hpd with a long pulse. 3172 * Whether or not that means D3 is safe to use is not clear, 3173 * but let's assume so until proven otherwise. 3174 * 3175 * FIXME should really check all downstream ports... 3176 */ 3177 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3178 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT && 3179 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3180 } 3181 3182 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3183 const struct intel_crtc_state *crtc_state, 3184 bool enable) 3185 { 3186 int ret; 3187 3188 if (!crtc_state->dsc.compression_enable) 3189 return; 3190 3191 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3192 enable ? DP_DECOMPRESSION_EN : 0); 3193 if (ret < 0) 3194 DRM_DEBUG_KMS("Failed to %s sink decompression state\n", 3195 enable ? "enable" : "disable"); 3196 } 3197 3198 /* If the sink supports it, try to set the power state appropriately */ 3199 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3200 { 3201 int ret, i; 3202 3203 /* Should have a valid DPCD by this point */ 3204 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3205 return; 3206 3207 if (mode != DRM_MODE_DPMS_ON) { 3208 if (downstream_hpd_needs_d0(intel_dp)) 3209 return; 3210 3211 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3212 DP_SET_POWER_D3); 3213 } else { 3214 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3215 3216 /* 3217 * When turning on, we need to retry for 1ms to give the sink 3218 * time to wake up. 3219 */ 3220 for (i = 0; i < 3; i++) { 3221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3222 DP_SET_POWER_D0); 3223 if (ret == 1) 3224 break; 3225 msleep(1); 3226 } 3227 3228 if (ret == 1 && lspcon->active) 3229 lspcon_wait_pcon_mode(lspcon); 3230 } 3231 3232 if (ret != 1) 3233 DRM_DEBUG_KMS("failed to %s sink power state\n", 3234 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3235 } 3236 3237 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3238 enum port port, enum pipe *pipe) 3239 { 3240 enum pipe p; 3241 3242 for_each_pipe(dev_priv, p) { 3243 u32 val = I915_READ(TRANS_DP_CTL(p)); 3244 3245 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3246 *pipe = p; 3247 return true; 3248 } 3249 } 3250 3251 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port)); 3252 3253 /* must initialize pipe to something for the asserts */ 3254 *pipe = PIPE_A; 3255 3256 return false; 3257 } 3258 3259 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3260 i915_reg_t dp_reg, enum port port, 3261 enum pipe *pipe) 3262 { 3263 bool ret; 3264 u32 val; 3265 3266 val = I915_READ(dp_reg); 3267 3268 ret = val & DP_PORT_EN; 3269 3270 /* asserts want to know the pipe even if the port is disabled */ 3271 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3272 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3273 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3274 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3275 else if (IS_CHERRYVIEW(dev_priv)) 3276 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3277 else 3278 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3279 3280 return ret; 3281 } 3282 3283 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3284 enum pipe *pipe) 3285 { 3286 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3287 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3288 intel_wakeref_t wakeref; 3289 bool ret; 3290 3291 wakeref = intel_display_power_get_if_enabled(dev_priv, 3292 encoder->power_domain); 3293 if (!wakeref) 3294 return false; 3295 3296 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3297 encoder->port, pipe); 3298 3299 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3300 3301 return ret; 3302 } 3303 3304 static void intel_dp_get_config(struct intel_encoder *encoder, 3305 struct intel_crtc_state *pipe_config) 3306 { 3307 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3308 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3309 u32 tmp, flags = 0; 3310 enum port port = encoder->port; 3311 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3312 3313 if (encoder->type == INTEL_OUTPUT_EDP) 3314 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3315 else 3316 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3317 3318 tmp = I915_READ(intel_dp->output_reg); 3319 3320 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3321 3322 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3323 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 3324 3325 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3326 flags |= DRM_MODE_FLAG_PHSYNC; 3327 else 3328 flags |= DRM_MODE_FLAG_NHSYNC; 3329 3330 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3331 flags |= DRM_MODE_FLAG_PVSYNC; 3332 else 3333 flags |= DRM_MODE_FLAG_NVSYNC; 3334 } else { 3335 if (tmp & DP_SYNC_HS_HIGH) 3336 flags |= DRM_MODE_FLAG_PHSYNC; 3337 else 3338 flags |= DRM_MODE_FLAG_NHSYNC; 3339 3340 if (tmp & DP_SYNC_VS_HIGH) 3341 flags |= DRM_MODE_FLAG_PVSYNC; 3342 else 3343 flags |= DRM_MODE_FLAG_NVSYNC; 3344 } 3345 3346 pipe_config->hw.adjusted_mode.flags |= flags; 3347 3348 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3349 pipe_config->limited_color_range = true; 3350 3351 pipe_config->lane_count = 3352 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3353 3354 intel_dp_get_m_n(crtc, pipe_config); 3355 3356 if (port == PORT_A) { 3357 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3358 pipe_config->port_clock = 162000; 3359 else 3360 pipe_config->port_clock = 270000; 3361 } 3362 3363 pipe_config->hw.adjusted_mode.crtc_clock = 3364 intel_dotclock_calculate(pipe_config->port_clock, 3365 &pipe_config->dp_m_n); 3366 3367 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3368 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3369 /* 3370 * This is a big fat ugly hack. 3371 * 3372 * Some machines in UEFI boot mode provide us a VBT that has 18 3373 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3374 * unknown we fail to light up. Yet the same BIOS boots up with 3375 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3376 * max, not what it tells us to use. 3377 * 3378 * Note: This will still be broken if the eDP panel is not lit 3379 * up by the BIOS, and thus we can't get the mode at module 3380 * load. 3381 */ 3382 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3383 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3384 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3385 } 3386 } 3387 3388 static void intel_disable_dp(struct intel_encoder *encoder, 3389 const struct intel_crtc_state *old_crtc_state, 3390 const struct drm_connector_state *old_conn_state) 3391 { 3392 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3393 3394 intel_dp->link_trained = false; 3395 3396 if (old_crtc_state->has_audio) 3397 intel_audio_codec_disable(encoder, 3398 old_crtc_state, old_conn_state); 3399 3400 /* Make sure the panel is off before trying to change the mode. But also 3401 * ensure that we have vdd while we switch off the panel. */ 3402 intel_edp_panel_vdd_on(intel_dp); 3403 intel_edp_backlight_off(old_conn_state); 3404 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3405 intel_edp_panel_off(intel_dp); 3406 } 3407 3408 static void g4x_disable_dp(struct intel_encoder *encoder, 3409 const struct intel_crtc_state *old_crtc_state, 3410 const struct drm_connector_state *old_conn_state) 3411 { 3412 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 3413 } 3414 3415 static void vlv_disable_dp(struct intel_encoder *encoder, 3416 const struct intel_crtc_state *old_crtc_state, 3417 const struct drm_connector_state *old_conn_state) 3418 { 3419 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 3420 } 3421 3422 static void g4x_post_disable_dp(struct intel_encoder *encoder, 3423 const struct intel_crtc_state *old_crtc_state, 3424 const struct drm_connector_state *old_conn_state) 3425 { 3426 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3427 enum port port = encoder->port; 3428 3429 /* 3430 * Bspec does not list a specific disable sequence for g4x DP. 3431 * Follow the ilk+ sequence (disable pipe before the port) for 3432 * g4x DP as it does not suffer from underruns like the normal 3433 * g4x modeset sequence (disable pipe after the port). 3434 */ 3435 intel_dp_link_down(encoder, old_crtc_state); 3436 3437 /* Only ilk+ has port A */ 3438 if (port == PORT_A) 3439 ilk_edp_pll_off(intel_dp, old_crtc_state); 3440 } 3441 3442 static void vlv_post_disable_dp(struct intel_encoder *encoder, 3443 const struct intel_crtc_state *old_crtc_state, 3444 const struct drm_connector_state *old_conn_state) 3445 { 3446 intel_dp_link_down(encoder, old_crtc_state); 3447 } 3448 3449 static void chv_post_disable_dp(struct intel_encoder *encoder, 3450 const struct intel_crtc_state *old_crtc_state, 3451 const struct drm_connector_state *old_conn_state) 3452 { 3453 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3454 3455 intel_dp_link_down(encoder, old_crtc_state); 3456 3457 vlv_dpio_get(dev_priv); 3458 3459 /* Assert data lane reset */ 3460 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3461 3462 vlv_dpio_put(dev_priv); 3463 } 3464 3465 static void 3466 _intel_dp_set_link_train(struct intel_dp *intel_dp, 3467 u32 *DP, 3468 u8 dp_train_pat) 3469 { 3470 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3471 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3472 enum port port = intel_dig_port->base.port; 3473 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 3474 3475 if (dp_train_pat & train_pat_mask) 3476 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n", 3477 dp_train_pat & train_pat_mask); 3478 3479 if (HAS_DDI(dev_priv)) { 3480 u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl); 3481 3482 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 3483 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 3484 else 3485 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 3486 3487 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 3488 switch (dp_train_pat & train_pat_mask) { 3489 case DP_TRAINING_PATTERN_DISABLE: 3490 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 3491 3492 break; 3493 case DP_TRAINING_PATTERN_1: 3494 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 3495 break; 3496 case DP_TRAINING_PATTERN_2: 3497 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 3498 break; 3499 case DP_TRAINING_PATTERN_3: 3500 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 3501 break; 3502 case DP_TRAINING_PATTERN_4: 3503 temp |= DP_TP_CTL_LINK_TRAIN_PAT4; 3504 break; 3505 } 3506 I915_WRITE(intel_dp->regs.dp_tp_ctl, temp); 3507 3508 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 3509 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 3510 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3511 3512 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3513 case DP_TRAINING_PATTERN_DISABLE: 3514 *DP |= DP_LINK_TRAIN_OFF_CPT; 3515 break; 3516 case DP_TRAINING_PATTERN_1: 3517 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3518 break; 3519 case DP_TRAINING_PATTERN_2: 3520 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3521 break; 3522 case DP_TRAINING_PATTERN_3: 3523 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); 3524 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3525 break; 3526 } 3527 3528 } else { 3529 *DP &= ~DP_LINK_TRAIN_MASK; 3530 3531 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3532 case DP_TRAINING_PATTERN_DISABLE: 3533 *DP |= DP_LINK_TRAIN_OFF; 3534 break; 3535 case DP_TRAINING_PATTERN_1: 3536 *DP |= DP_LINK_TRAIN_PAT_1; 3537 break; 3538 case DP_TRAINING_PATTERN_2: 3539 *DP |= DP_LINK_TRAIN_PAT_2; 3540 break; 3541 case DP_TRAINING_PATTERN_3: 3542 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); 3543 *DP |= DP_LINK_TRAIN_PAT_2; 3544 break; 3545 } 3546 } 3547 } 3548 3549 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3550 const struct intel_crtc_state *old_crtc_state) 3551 { 3552 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3553 3554 /* enable with pattern 1 (as per spec) */ 3555 3556 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3557 3558 /* 3559 * Magic for VLV/CHV. We _must_ first set up the register 3560 * without actually enabling the port, and then do another 3561 * write to enable the port. Otherwise link training will 3562 * fail when the power sequencer is freshly used for this port. 3563 */ 3564 intel_dp->DP |= DP_PORT_EN; 3565 if (old_crtc_state->has_audio) 3566 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3567 3568 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 3569 POSTING_READ(intel_dp->output_reg); 3570 } 3571 3572 static void intel_enable_dp(struct intel_encoder *encoder, 3573 const struct intel_crtc_state *pipe_config, 3574 const struct drm_connector_state *conn_state) 3575 { 3576 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3577 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3578 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3579 u32 dp_reg = I915_READ(intel_dp->output_reg); 3580 enum pipe pipe = crtc->pipe; 3581 intel_wakeref_t wakeref; 3582 3583 if (WARN_ON(dp_reg & DP_PORT_EN)) 3584 return; 3585 3586 with_pps_lock(intel_dp, wakeref) { 3587 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3588 vlv_init_panel_power_sequencer(encoder, pipe_config); 3589 3590 intel_dp_enable_port(intel_dp, pipe_config); 3591 3592 edp_panel_vdd_on(intel_dp); 3593 edp_panel_on(intel_dp); 3594 edp_panel_vdd_off(intel_dp, true); 3595 } 3596 3597 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3598 unsigned int lane_mask = 0x0; 3599 3600 if (IS_CHERRYVIEW(dev_priv)) 3601 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3602 3603 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3604 lane_mask); 3605 } 3606 3607 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3608 intel_dp_start_link_train(intel_dp); 3609 intel_dp_stop_link_train(intel_dp); 3610 3611 if (pipe_config->has_audio) { 3612 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 3613 pipe_name(pipe)); 3614 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3615 } 3616 } 3617 3618 static void g4x_enable_dp(struct intel_encoder *encoder, 3619 const struct intel_crtc_state *pipe_config, 3620 const struct drm_connector_state *conn_state) 3621 { 3622 intel_enable_dp(encoder, pipe_config, conn_state); 3623 intel_edp_backlight_on(pipe_config, conn_state); 3624 } 3625 3626 static void vlv_enable_dp(struct intel_encoder *encoder, 3627 const struct intel_crtc_state *pipe_config, 3628 const struct drm_connector_state *conn_state) 3629 { 3630 intel_edp_backlight_on(pipe_config, conn_state); 3631 } 3632 3633 static void g4x_pre_enable_dp(struct intel_encoder *encoder, 3634 const struct intel_crtc_state *pipe_config, 3635 const struct drm_connector_state *conn_state) 3636 { 3637 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3638 enum port port = encoder->port; 3639 3640 intel_dp_prepare(encoder, pipe_config); 3641 3642 /* Only ilk+ has port A */ 3643 if (port == PORT_A) 3644 ilk_edp_pll_on(intel_dp, pipe_config); 3645 } 3646 3647 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3648 { 3649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3650 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 3651 enum pipe pipe = intel_dp->pps_pipe; 3652 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3653 3654 WARN_ON(intel_dp->active_pipe != INVALID_PIPE); 3655 3656 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) 3657 return; 3658 3659 edp_panel_vdd_off_sync(intel_dp); 3660 3661 /* 3662 * VLV seems to get confused when multiple power sequencers 3663 * have the same port selected (even if only one has power/vdd 3664 * enabled). The failure manifests as vlv_wait_port_ready() failing 3665 * CHV on the other hand doesn't seem to mind having the same port 3666 * selected in multiple power sequencers, but let's clear the 3667 * port select always when logically disconnecting a power sequencer 3668 * from a port. 3669 */ 3670 DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3671 pipe_name(pipe), intel_dig_port->base.base.base.id, 3672 intel_dig_port->base.base.name); 3673 I915_WRITE(pp_on_reg, 0); 3674 POSTING_READ(pp_on_reg); 3675 3676 intel_dp->pps_pipe = INVALID_PIPE; 3677 } 3678 3679 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3680 enum pipe pipe) 3681 { 3682 struct intel_encoder *encoder; 3683 3684 lockdep_assert_held(&dev_priv->pps_mutex); 3685 3686 for_each_intel_dp(&dev_priv->drm, encoder) { 3687 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3688 3689 WARN(intel_dp->active_pipe == pipe, 3690 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3691 pipe_name(pipe), encoder->base.base.id, 3692 encoder->base.name); 3693 3694 if (intel_dp->pps_pipe != pipe) 3695 continue; 3696 3697 DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3698 pipe_name(pipe), encoder->base.base.id, 3699 encoder->base.name); 3700 3701 /* make sure vdd is off before we steal it */ 3702 vlv_detach_power_sequencer(intel_dp); 3703 } 3704 } 3705 3706 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3707 const struct intel_crtc_state *crtc_state) 3708 { 3709 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3710 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3711 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3712 3713 lockdep_assert_held(&dev_priv->pps_mutex); 3714 3715 WARN_ON(intel_dp->active_pipe != INVALID_PIPE); 3716 3717 if (intel_dp->pps_pipe != INVALID_PIPE && 3718 intel_dp->pps_pipe != crtc->pipe) { 3719 /* 3720 * If another power sequencer was being used on this 3721 * port previously make sure to turn off vdd there while 3722 * we still have control of it. 3723 */ 3724 vlv_detach_power_sequencer(intel_dp); 3725 } 3726 3727 /* 3728 * We may be stealing the power 3729 * sequencer from another port. 3730 */ 3731 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3732 3733 intel_dp->active_pipe = crtc->pipe; 3734 3735 if (!intel_dp_is_edp(intel_dp)) 3736 return; 3737 3738 /* now it's all ours */ 3739 intel_dp->pps_pipe = crtc->pipe; 3740 3741 DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3742 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3743 encoder->base.name); 3744 3745 /* init power sequencer on this pipe and port */ 3746 intel_dp_init_panel_power_sequencer(intel_dp); 3747 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3748 } 3749 3750 static void vlv_pre_enable_dp(struct intel_encoder *encoder, 3751 const struct intel_crtc_state *pipe_config, 3752 const struct drm_connector_state *conn_state) 3753 { 3754 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3755 3756 intel_enable_dp(encoder, pipe_config, conn_state); 3757 } 3758 3759 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, 3760 const struct intel_crtc_state *pipe_config, 3761 const struct drm_connector_state *conn_state) 3762 { 3763 intel_dp_prepare(encoder, pipe_config); 3764 3765 vlv_phy_pre_pll_enable(encoder, pipe_config); 3766 } 3767 3768 static void chv_pre_enable_dp(struct intel_encoder *encoder, 3769 const struct intel_crtc_state *pipe_config, 3770 const struct drm_connector_state *conn_state) 3771 { 3772 chv_phy_pre_encoder_enable(encoder, pipe_config); 3773 3774 intel_enable_dp(encoder, pipe_config, conn_state); 3775 3776 /* Second common lane will stay alive on its own now */ 3777 chv_phy_release_cl2_override(encoder); 3778 } 3779 3780 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, 3781 const struct intel_crtc_state *pipe_config, 3782 const struct drm_connector_state *conn_state) 3783 { 3784 intel_dp_prepare(encoder, pipe_config); 3785 3786 chv_phy_pre_pll_enable(encoder, pipe_config); 3787 } 3788 3789 static void chv_dp_post_pll_disable(struct intel_encoder *encoder, 3790 const struct intel_crtc_state *old_crtc_state, 3791 const struct drm_connector_state *old_conn_state) 3792 { 3793 chv_phy_post_pll_disable(encoder, old_crtc_state); 3794 } 3795 3796 /* 3797 * Fetch AUX CH registers 0x202 - 0x207 which contain 3798 * link status information 3799 */ 3800 bool 3801 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3802 { 3803 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3804 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3805 } 3806 3807 /* These are source-specific values. */ 3808 u8 3809 intel_dp_voltage_max(struct intel_dp *intel_dp) 3810 { 3811 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3812 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3813 enum port port = encoder->port; 3814 3815 if (HAS_DDI(dev_priv)) 3816 return intel_ddi_dp_voltage_max(encoder); 3817 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3818 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3819 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3820 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3821 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3822 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3823 else 3824 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3825 } 3826 3827 u8 3828 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) 3829 { 3830 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3831 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3832 enum port port = encoder->port; 3833 3834 if (HAS_DDI(dev_priv)) { 3835 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); 3836 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3837 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3838 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3839 return DP_TRAIN_PRE_EMPH_LEVEL_3; 3840 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3841 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3842 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3843 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3844 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3845 default: 3846 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3847 } 3848 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 3849 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3851 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3853 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3854 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3855 default: 3856 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3857 } 3858 } else { 3859 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3861 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3863 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3865 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3866 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3867 default: 3868 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3869 } 3870 } 3871 } 3872 3873 static u32 vlv_signal_levels(struct intel_dp *intel_dp) 3874 { 3875 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3876 unsigned long demph_reg_value, preemph_reg_value, 3877 uniqtranscale_reg_value; 3878 u8 train_set = intel_dp->train_set[0]; 3879 3880 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3881 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3882 preemph_reg_value = 0x0004000; 3883 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3884 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3885 demph_reg_value = 0x2B405555; 3886 uniqtranscale_reg_value = 0x552AB83A; 3887 break; 3888 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3889 demph_reg_value = 0x2B404040; 3890 uniqtranscale_reg_value = 0x5548B83A; 3891 break; 3892 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3893 demph_reg_value = 0x2B245555; 3894 uniqtranscale_reg_value = 0x5560B83A; 3895 break; 3896 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3897 demph_reg_value = 0x2B405555; 3898 uniqtranscale_reg_value = 0x5598DA3A; 3899 break; 3900 default: 3901 return 0; 3902 } 3903 break; 3904 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3905 preemph_reg_value = 0x0002000; 3906 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3907 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3908 demph_reg_value = 0x2B404040; 3909 uniqtranscale_reg_value = 0x5552B83A; 3910 break; 3911 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3912 demph_reg_value = 0x2B404848; 3913 uniqtranscale_reg_value = 0x5580B83A; 3914 break; 3915 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3916 demph_reg_value = 0x2B404040; 3917 uniqtranscale_reg_value = 0x55ADDA3A; 3918 break; 3919 default: 3920 return 0; 3921 } 3922 break; 3923 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3924 preemph_reg_value = 0x0000000; 3925 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3927 demph_reg_value = 0x2B305555; 3928 uniqtranscale_reg_value = 0x5570B83A; 3929 break; 3930 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3931 demph_reg_value = 0x2B2B4040; 3932 uniqtranscale_reg_value = 0x55ADDA3A; 3933 break; 3934 default: 3935 return 0; 3936 } 3937 break; 3938 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3939 preemph_reg_value = 0x0006000; 3940 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3942 demph_reg_value = 0x1B405555; 3943 uniqtranscale_reg_value = 0x55ADDA3A; 3944 break; 3945 default: 3946 return 0; 3947 } 3948 break; 3949 default: 3950 return 0; 3951 } 3952 3953 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 3954 uniqtranscale_reg_value, 0); 3955 3956 return 0; 3957 } 3958 3959 static u32 chv_signal_levels(struct intel_dp *intel_dp) 3960 { 3961 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3962 u32 deemph_reg_value, margin_reg_value; 3963 bool uniq_trans_scale = false; 3964 u8 train_set = intel_dp->train_set[0]; 3965 3966 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3967 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3968 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3969 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3970 deemph_reg_value = 128; 3971 margin_reg_value = 52; 3972 break; 3973 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3974 deemph_reg_value = 128; 3975 margin_reg_value = 77; 3976 break; 3977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3978 deemph_reg_value = 128; 3979 margin_reg_value = 102; 3980 break; 3981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3982 deemph_reg_value = 128; 3983 margin_reg_value = 154; 3984 uniq_trans_scale = true; 3985 break; 3986 default: 3987 return 0; 3988 } 3989 break; 3990 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3991 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3993 deemph_reg_value = 85; 3994 margin_reg_value = 78; 3995 break; 3996 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3997 deemph_reg_value = 85; 3998 margin_reg_value = 116; 3999 break; 4000 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4001 deemph_reg_value = 85; 4002 margin_reg_value = 154; 4003 break; 4004 default: 4005 return 0; 4006 } 4007 break; 4008 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4009 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4011 deemph_reg_value = 64; 4012 margin_reg_value = 104; 4013 break; 4014 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4015 deemph_reg_value = 64; 4016 margin_reg_value = 154; 4017 break; 4018 default: 4019 return 0; 4020 } 4021 break; 4022 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4023 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4025 deemph_reg_value = 43; 4026 margin_reg_value = 154; 4027 break; 4028 default: 4029 return 0; 4030 } 4031 break; 4032 default: 4033 return 0; 4034 } 4035 4036 chv_set_phy_signal_level(encoder, deemph_reg_value, 4037 margin_reg_value, uniq_trans_scale); 4038 4039 return 0; 4040 } 4041 4042 static u32 4043 g4x_signal_levels(u8 train_set) 4044 { 4045 u32 signal_levels = 0; 4046 4047 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4049 default: 4050 signal_levels |= DP_VOLTAGE_0_4; 4051 break; 4052 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4053 signal_levels |= DP_VOLTAGE_0_6; 4054 break; 4055 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4056 signal_levels |= DP_VOLTAGE_0_8; 4057 break; 4058 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4059 signal_levels |= DP_VOLTAGE_1_2; 4060 break; 4061 } 4062 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4063 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4064 default: 4065 signal_levels |= DP_PRE_EMPHASIS_0; 4066 break; 4067 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4068 signal_levels |= DP_PRE_EMPHASIS_3_5; 4069 break; 4070 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4071 signal_levels |= DP_PRE_EMPHASIS_6; 4072 break; 4073 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4074 signal_levels |= DP_PRE_EMPHASIS_9_5; 4075 break; 4076 } 4077 return signal_levels; 4078 } 4079 4080 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4081 static u32 4082 snb_cpu_edp_signal_levels(u8 train_set) 4083 { 4084 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4085 DP_TRAIN_PRE_EMPHASIS_MASK); 4086 switch (signal_levels) { 4087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4089 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4091 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4093 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4094 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4097 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4098 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4100 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4101 default: 4102 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4103 "0x%x\n", signal_levels); 4104 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4105 } 4106 } 4107 4108 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4109 static u32 4110 ivb_cpu_edp_signal_levels(u8 train_set) 4111 { 4112 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4113 DP_TRAIN_PRE_EMPHASIS_MASK); 4114 switch (signal_levels) { 4115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4116 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4117 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4118 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4120 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4121 4122 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4123 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4125 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4126 4127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4128 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4130 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4131 4132 default: 4133 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4134 "0x%x\n", signal_levels); 4135 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4136 } 4137 } 4138 4139 void 4140 intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4141 { 4142 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4144 enum port port = intel_dig_port->base.port; 4145 u32 signal_levels, mask = 0; 4146 u8 train_set = intel_dp->train_set[0]; 4147 4148 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) { 4149 signal_levels = bxt_signal_levels(intel_dp); 4150 } else if (HAS_DDI(dev_priv)) { 4151 signal_levels = ddi_signal_levels(intel_dp); 4152 mask = DDI_BUF_EMP_MASK; 4153 } else if (IS_CHERRYVIEW(dev_priv)) { 4154 signal_levels = chv_signal_levels(intel_dp); 4155 } else if (IS_VALLEYVIEW(dev_priv)) { 4156 signal_levels = vlv_signal_levels(intel_dp); 4157 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 4158 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4159 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4160 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) { 4161 signal_levels = snb_cpu_edp_signal_levels(train_set); 4162 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4163 } else { 4164 signal_levels = g4x_signal_levels(train_set); 4165 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 4166 } 4167 4168 if (mask) 4169 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 4170 4171 DRM_DEBUG_KMS("Using vswing level %d\n", 4172 train_set & DP_TRAIN_VOLTAGE_SWING_MASK); 4173 DRM_DEBUG_KMS("Using pre-emphasis level %d\n", 4174 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4175 DP_TRAIN_PRE_EMPHASIS_SHIFT); 4176 4177 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; 4178 4179 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 4180 POSTING_READ(intel_dp->output_reg); 4181 } 4182 4183 void 4184 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4185 u8 dp_train_pat) 4186 { 4187 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4188 struct drm_i915_private *dev_priv = 4189 to_i915(intel_dig_port->base.base.dev); 4190 4191 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); 4192 4193 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 4194 POSTING_READ(intel_dp->output_reg); 4195 } 4196 4197 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4198 { 4199 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4200 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4201 enum port port = intel_dig_port->base.port; 4202 u32 val; 4203 4204 if (!HAS_DDI(dev_priv)) 4205 return; 4206 4207 val = I915_READ(intel_dp->regs.dp_tp_ctl); 4208 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 4209 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 4210 I915_WRITE(intel_dp->regs.dp_tp_ctl, val); 4211 4212 /* 4213 * Until TGL on PORT_A we can have only eDP in SST mode. There the only 4214 * reason we need to set idle transmission mode is to work around a HW 4215 * issue where we enable the pipe while not in idle link-training mode. 4216 * In this case there is requirement to wait for a minimum number of 4217 * idle patterns to be sent. 4218 */ 4219 if (port == PORT_A && INTEL_GEN(dev_priv) < 12) 4220 return; 4221 4222 if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, 4223 DP_TP_STATUS_IDLE_DONE, 1)) 4224 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 4225 } 4226 4227 static void 4228 intel_dp_link_down(struct intel_encoder *encoder, 4229 const struct intel_crtc_state *old_crtc_state) 4230 { 4231 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4232 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4233 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4234 enum port port = encoder->port; 4235 u32 DP = intel_dp->DP; 4236 4237 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 4238 return; 4239 4240 DRM_DEBUG_KMS("\n"); 4241 4242 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4243 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4244 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4245 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4246 } else { 4247 DP &= ~DP_LINK_TRAIN_MASK; 4248 DP |= DP_LINK_TRAIN_PAT_IDLE; 4249 } 4250 I915_WRITE(intel_dp->output_reg, DP); 4251 POSTING_READ(intel_dp->output_reg); 4252 4253 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4254 I915_WRITE(intel_dp->output_reg, DP); 4255 POSTING_READ(intel_dp->output_reg); 4256 4257 /* 4258 * HW workaround for IBX, we need to move the port 4259 * to transcoder A after disabling it to allow the 4260 * matching HDMI port to be enabled on transcoder A. 4261 */ 4262 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4263 /* 4264 * We get CPU/PCH FIFO underruns on the other pipe when 4265 * doing the workaround. Sweep them under the rug. 4266 */ 4267 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4268 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4269 4270 /* always enable with pattern 1 (as per spec) */ 4271 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4272 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4273 DP_LINK_TRAIN_PAT_1; 4274 I915_WRITE(intel_dp->output_reg, DP); 4275 POSTING_READ(intel_dp->output_reg); 4276 4277 DP &= ~DP_PORT_EN; 4278 I915_WRITE(intel_dp->output_reg, DP); 4279 POSTING_READ(intel_dp->output_reg); 4280 4281 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4282 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4283 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4284 } 4285 4286 msleep(intel_dp->panel_power_down_delay); 4287 4288 intel_dp->DP = DP; 4289 4290 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4291 intel_wakeref_t wakeref; 4292 4293 with_pps_lock(intel_dp, wakeref) 4294 intel_dp->active_pipe = INVALID_PIPE; 4295 } 4296 } 4297 4298 static void 4299 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) 4300 { 4301 u8 dpcd_ext[6]; 4302 4303 /* 4304 * Prior to DP1.3 the bit represented by 4305 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. 4306 * if it is set DP_DPCD_REV at 0000h could be at a value less than 4307 * the true capability of the panel. The only way to check is to 4308 * then compare 0000h and 2200h. 4309 */ 4310 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 4311 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) 4312 return; 4313 4314 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, 4315 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { 4316 DRM_ERROR("DPCD failed read at extended capabilities\n"); 4317 return; 4318 } 4319 4320 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { 4321 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n"); 4322 return; 4323 } 4324 4325 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) 4326 return; 4327 4328 DRM_DEBUG_KMS("Base DPCD: %*ph\n", 4329 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); 4330 4331 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); 4332 } 4333 4334 bool 4335 intel_dp_read_dpcd(struct intel_dp *intel_dp) 4336 { 4337 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 4338 sizeof(intel_dp->dpcd)) < 0) 4339 return false; /* aux transfer failed */ 4340 4341 intel_dp_extended_receiver_capabilities(intel_dp); 4342 4343 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); 4344 4345 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4346 } 4347 4348 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4349 { 4350 u8 dprx = 0; 4351 4352 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4353 &dprx) != 1) 4354 return false; 4355 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4356 } 4357 4358 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4359 { 4360 /* 4361 * Clear the cached register set to avoid using stale values 4362 * for the sinks that do not support DSC. 4363 */ 4364 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4365 4366 /* Clear fec_capable to avoid using stale values */ 4367 intel_dp->fec_capable = 0; 4368 4369 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4370 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4371 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4372 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4373 intel_dp->dsc_dpcd, 4374 sizeof(intel_dp->dsc_dpcd)) < 0) 4375 DRM_ERROR("Failed to read DPCD register 0x%x\n", 4376 DP_DSC_SUPPORT); 4377 4378 DRM_DEBUG_KMS("DSC DPCD: %*ph\n", 4379 (int)sizeof(intel_dp->dsc_dpcd), 4380 intel_dp->dsc_dpcd); 4381 4382 /* FEC is supported only on DP 1.4 */ 4383 if (!intel_dp_is_edp(intel_dp) && 4384 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4385 &intel_dp->fec_capable) < 0) 4386 DRM_ERROR("Failed to read FEC DPCD register\n"); 4387 4388 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable); 4389 } 4390 } 4391 4392 static bool 4393 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4394 { 4395 struct drm_i915_private *dev_priv = 4396 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4397 4398 /* this function is meant to be called only once */ 4399 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0); 4400 4401 if (!intel_dp_read_dpcd(intel_dp)) 4402 return false; 4403 4404 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4405 drm_dp_is_branch(intel_dp->dpcd)); 4406 4407 /* 4408 * Read the eDP display control registers. 4409 * 4410 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4411 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4412 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4413 * method). The display control registers should read zero if they're 4414 * not supported anyway. 4415 */ 4416 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4417 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4418 sizeof(intel_dp->edp_dpcd)) 4419 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd), 4420 intel_dp->edp_dpcd); 4421 4422 /* 4423 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4424 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4425 */ 4426 intel_psr_init_dpcd(intel_dp); 4427 4428 /* Read the eDP 1.4+ supported link rates. */ 4429 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4430 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4431 int i; 4432 4433 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4434 sink_rates, sizeof(sink_rates)); 4435 4436 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4437 int val = le16_to_cpu(sink_rates[i]); 4438 4439 if (val == 0) 4440 break; 4441 4442 /* Value read multiplied by 200kHz gives the per-lane 4443 * link rate in kHz. The source rates are, however, 4444 * stored in terms of LS_Clk kHz. The full conversion 4445 * back to symbols is 4446 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4447 */ 4448 intel_dp->sink_rates[i] = (val * 200) / 10; 4449 } 4450 intel_dp->num_sink_rates = i; 4451 } 4452 4453 /* 4454 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4455 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4456 */ 4457 if (intel_dp->num_sink_rates) 4458 intel_dp->use_rate_select = true; 4459 else 4460 intel_dp_set_sink_rates(intel_dp); 4461 4462 intel_dp_set_common_rates(intel_dp); 4463 4464 /* Read the eDP DSC DPCD registers */ 4465 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4466 intel_dp_get_dsc_sink_cap(intel_dp); 4467 4468 return true; 4469 } 4470 4471 4472 static bool 4473 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4474 { 4475 if (!intel_dp_read_dpcd(intel_dp)) 4476 return false; 4477 4478 /* 4479 * Don't clobber cached eDP rates. Also skip re-reading 4480 * the OUI/ID since we know it won't change. 4481 */ 4482 if (!intel_dp_is_edp(intel_dp)) { 4483 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4484 drm_dp_is_branch(intel_dp->dpcd)); 4485 4486 intel_dp_set_sink_rates(intel_dp); 4487 intel_dp_set_common_rates(intel_dp); 4488 } 4489 4490 /* 4491 * Some eDP panels do not set a valid value for sink count, that is why 4492 * it don't care about read it here and in intel_edp_init_dpcd(). 4493 */ 4494 if (!intel_dp_is_edp(intel_dp) && 4495 !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) { 4496 u8 count; 4497 ssize_t r; 4498 4499 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); 4500 if (r < 1) 4501 return false; 4502 4503 /* 4504 * Sink count can change between short pulse hpd hence 4505 * a member variable in intel_dp will track any changes 4506 * between short pulse interrupts. 4507 */ 4508 intel_dp->sink_count = DP_GET_SINK_COUNT(count); 4509 4510 /* 4511 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4512 * a dongle is present but no display. Unless we require to know 4513 * if a dongle is present or not, we don't need to update 4514 * downstream port information. So, an early return here saves 4515 * time from performing other operations which are not required. 4516 */ 4517 if (!intel_dp->sink_count) 4518 return false; 4519 } 4520 4521 if (!drm_dp_is_branch(intel_dp->dpcd)) 4522 return true; /* native DP sink */ 4523 4524 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 4525 return true; /* no per-port downstream info */ 4526 4527 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 4528 intel_dp->downstream_ports, 4529 DP_MAX_DOWNSTREAM_PORTS) < 0) 4530 return false; /* downstream port status fetch failed */ 4531 4532 return true; 4533 } 4534 4535 static bool 4536 intel_dp_sink_can_mst(struct intel_dp *intel_dp) 4537 { 4538 u8 mstm_cap; 4539 4540 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 4541 return false; 4542 4543 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 4544 return false; 4545 4546 return mstm_cap & DP_MST_CAP; 4547 } 4548 4549 static bool 4550 intel_dp_can_mst(struct intel_dp *intel_dp) 4551 { 4552 return i915_modparams.enable_dp_mst && 4553 intel_dp->can_mst && 4554 intel_dp_sink_can_mst(intel_dp); 4555 } 4556 4557 static void 4558 intel_dp_configure_mst(struct intel_dp *intel_dp) 4559 { 4560 struct intel_encoder *encoder = 4561 &dp_to_dig_port(intel_dp)->base; 4562 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); 4563 4564 DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4565 encoder->base.base.id, encoder->base.name, 4566 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4567 yesno(i915_modparams.enable_dp_mst)); 4568 4569 if (!intel_dp->can_mst) 4570 return; 4571 4572 intel_dp->is_mst = sink_can_mst && 4573 i915_modparams.enable_dp_mst; 4574 4575 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4576 intel_dp->is_mst); 4577 } 4578 4579 static bool 4580 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4581 { 4582 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4583 sink_irq_vector, DP_DPRX_ESI_LEN) == 4584 DP_DPRX_ESI_LEN; 4585 } 4586 4587 bool 4588 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4589 const struct drm_connector_state *conn_state) 4590 { 4591 /* 4592 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4593 * of Color Encoding Format and Content Color Gamut], in order to 4594 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4595 */ 4596 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4597 return true; 4598 4599 switch (conn_state->colorspace) { 4600 case DRM_MODE_COLORIMETRY_SYCC_601: 4601 case DRM_MODE_COLORIMETRY_OPYCC_601: 4602 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4603 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4604 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4605 return true; 4606 default: 4607 break; 4608 } 4609 4610 return false; 4611 } 4612 4613 static void 4614 intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp, 4615 const struct intel_crtc_state *crtc_state, 4616 const struct drm_connector_state *conn_state) 4617 { 4618 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4619 struct dp_sdp vsc_sdp = {}; 4620 4621 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */ 4622 vsc_sdp.sdp_header.HB0 = 0; 4623 vsc_sdp.sdp_header.HB1 = 0x7; 4624 4625 /* 4626 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 4627 * Colorimetry Format indication. 4628 */ 4629 vsc_sdp.sdp_header.HB2 = 0x5; 4630 4631 /* 4632 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/ 4633 * Colorimetry Format indication (HB2 = 05h). 4634 */ 4635 vsc_sdp.sdp_header.HB3 = 0x13; 4636 4637 /* DP 1.4a spec, Table 2-120 */ 4638 switch (crtc_state->output_format) { 4639 case INTEL_OUTPUT_FORMAT_YCBCR444: 4640 vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */ 4641 break; 4642 case INTEL_OUTPUT_FORMAT_YCBCR420: 4643 vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */ 4644 break; 4645 case INTEL_OUTPUT_FORMAT_RGB: 4646 default: 4647 /* RGB: DB16[7:4] = 0h */ 4648 break; 4649 } 4650 4651 switch (conn_state->colorspace) { 4652 case DRM_MODE_COLORIMETRY_BT709_YCC: 4653 vsc_sdp.db[16] |= 0x1; 4654 break; 4655 case DRM_MODE_COLORIMETRY_XVYCC_601: 4656 vsc_sdp.db[16] |= 0x2; 4657 break; 4658 case DRM_MODE_COLORIMETRY_XVYCC_709: 4659 vsc_sdp.db[16] |= 0x3; 4660 break; 4661 case DRM_MODE_COLORIMETRY_SYCC_601: 4662 vsc_sdp.db[16] |= 0x4; 4663 break; 4664 case DRM_MODE_COLORIMETRY_OPYCC_601: 4665 vsc_sdp.db[16] |= 0x5; 4666 break; 4667 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4668 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4669 vsc_sdp.db[16] |= 0x6; 4670 break; 4671 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4672 vsc_sdp.db[16] |= 0x7; 4673 break; 4674 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 4675 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 4676 vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */ 4677 break; 4678 default: 4679 /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */ 4680 4681 /* RGB->YCBCR color conversion uses the BT.709 color space. */ 4682 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4683 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */ 4684 break; 4685 } 4686 4687 /* 4688 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only, 4689 * the following Component Bit Depth values are defined: 4690 * 001b = 8bpc. 4691 * 010b = 10bpc. 4692 * 011b = 12bpc. 4693 * 100b = 16bpc. 4694 */ 4695 switch (crtc_state->pipe_bpp) { 4696 case 24: /* 8bpc */ 4697 vsc_sdp.db[17] = 0x1; 4698 break; 4699 case 30: /* 10bpc */ 4700 vsc_sdp.db[17] = 0x2; 4701 break; 4702 case 36: /* 12bpc */ 4703 vsc_sdp.db[17] = 0x3; 4704 break; 4705 case 48: /* 16bpc */ 4706 vsc_sdp.db[17] = 0x4; 4707 break; 4708 default: 4709 MISSING_CASE(crtc_state->pipe_bpp); 4710 break; 4711 } 4712 4713 /* 4714 * Dynamic Range (Bit 7) 4715 * 0 = VESA range, 1 = CTA range. 4716 * all YCbCr are always limited range 4717 */ 4718 vsc_sdp.db[17] |= 0x80; 4719 4720 /* 4721 * Content Type (Bits 2:0) 4722 * 000b = Not defined. 4723 * 001b = Graphics. 4724 * 010b = Photo. 4725 * 011b = Video. 4726 * 100b = Game 4727 * All other values are RESERVED. 4728 * Note: See CTA-861-G for the definition and expected 4729 * processing by a stream sink for the above contect types. 4730 */ 4731 vsc_sdp.db[18] = 0; 4732 4733 intel_dig_port->write_infoframe(&intel_dig_port->base, 4734 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp)); 4735 } 4736 4737 static void 4738 intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 4739 const struct intel_crtc_state *crtc_state, 4740 const struct drm_connector_state *conn_state) 4741 { 4742 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4743 struct dp_sdp infoframe_sdp = {}; 4744 struct hdmi_drm_infoframe drm_infoframe = {}; 4745 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4746 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4747 ssize_t len; 4748 int ret; 4749 4750 ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state); 4751 if (ret) { 4752 DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n"); 4753 return; 4754 } 4755 4756 len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf)); 4757 if (len < 0) { 4758 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4759 return; 4760 } 4761 4762 if (len != infoframe_size) { 4763 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4764 return; 4765 } 4766 4767 /* 4768 * Set up the infoframe sdp packet for HDR static metadata. 4769 * Prepare VSC Header for SU as per DP 1.4a spec, 4770 * Table 2-100 and Table 2-101 4771 */ 4772 4773 /* Packet ID, 00h for non-Audio INFOFRAME */ 4774 infoframe_sdp.sdp_header.HB0 = 0; 4775 /* 4776 * Packet Type 80h + Non-audio INFOFRAME Type value 4777 * HDMI_INFOFRAME_TYPE_DRM: 0x87, 4778 */ 4779 infoframe_sdp.sdp_header.HB1 = drm_infoframe.header.type; 4780 /* 4781 * Least Significant Eight Bits of (Data Byte Count – 1) 4782 * infoframe_size - 1, 4783 */ 4784 infoframe_sdp.sdp_header.HB2 = 0x1D; 4785 /* INFOFRAME SDP Version Number */ 4786 infoframe_sdp.sdp_header.HB3 = (0x13 << 2); 4787 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4788 infoframe_sdp.db[0] = drm_infoframe.header.version; 4789 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4790 infoframe_sdp.db[1] = drm_infoframe.header.length; 4791 /* 4792 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4793 * HDMI_INFOFRAME_HEADER_SIZE 4794 */ 4795 BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4796 memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4797 HDMI_DRM_INFOFRAME_SIZE); 4798 4799 /* 4800 * Size of DP infoframe sdp packet for HDR static metadata is consist of 4801 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4802 * - Two Data Blocks: 2 bytes 4803 * CTA Header Byte2 (INFOFRAME Version Number) 4804 * CTA Header Byte3 (Length of INFOFRAME) 4805 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4806 * 4807 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4808 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4809 * will pad rest of the size. 4810 */ 4811 intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state, 4812 HDMI_PACKET_TYPE_GAMUT_METADATA, 4813 &infoframe_sdp, 4814 sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE); 4815 } 4816 4817 void intel_dp_vsc_enable(struct intel_dp *intel_dp, 4818 const struct intel_crtc_state *crtc_state, 4819 const struct drm_connector_state *conn_state) 4820 { 4821 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 4822 return; 4823 4824 intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state); 4825 } 4826 4827 void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp, 4828 const struct intel_crtc_state *crtc_state, 4829 const struct drm_connector_state *conn_state) 4830 { 4831 if (!conn_state->hdr_output_metadata) 4832 return; 4833 4834 intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp, 4835 crtc_state, 4836 conn_state); 4837 } 4838 4839 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4840 { 4841 int status = 0; 4842 int test_link_rate; 4843 u8 test_lane_count, test_link_bw; 4844 /* (DP CTS 1.2) 4845 * 4.3.1.11 4846 */ 4847 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4848 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4849 &test_lane_count); 4850 4851 if (status <= 0) { 4852 DRM_DEBUG_KMS("Lane count read failed\n"); 4853 return DP_TEST_NAK; 4854 } 4855 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4856 4857 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4858 &test_link_bw); 4859 if (status <= 0) { 4860 DRM_DEBUG_KMS("Link Rate read failed\n"); 4861 return DP_TEST_NAK; 4862 } 4863 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4864 4865 /* Validate the requested link rate and lane count */ 4866 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4867 test_lane_count)) 4868 return DP_TEST_NAK; 4869 4870 intel_dp->compliance.test_lane_count = test_lane_count; 4871 intel_dp->compliance.test_link_rate = test_link_rate; 4872 4873 return DP_TEST_ACK; 4874 } 4875 4876 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4877 { 4878 u8 test_pattern; 4879 u8 test_misc; 4880 __be16 h_width, v_height; 4881 int status = 0; 4882 4883 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4884 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4885 &test_pattern); 4886 if (status <= 0) { 4887 DRM_DEBUG_KMS("Test pattern read failed\n"); 4888 return DP_TEST_NAK; 4889 } 4890 if (test_pattern != DP_COLOR_RAMP) 4891 return DP_TEST_NAK; 4892 4893 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4894 &h_width, 2); 4895 if (status <= 0) { 4896 DRM_DEBUG_KMS("H Width read failed\n"); 4897 return DP_TEST_NAK; 4898 } 4899 4900 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4901 &v_height, 2); 4902 if (status <= 0) { 4903 DRM_DEBUG_KMS("V Height read failed\n"); 4904 return DP_TEST_NAK; 4905 } 4906 4907 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4908 &test_misc); 4909 if (status <= 0) { 4910 DRM_DEBUG_KMS("TEST MISC read failed\n"); 4911 return DP_TEST_NAK; 4912 } 4913 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4914 return DP_TEST_NAK; 4915 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4916 return DP_TEST_NAK; 4917 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4918 case DP_TEST_BIT_DEPTH_6: 4919 intel_dp->compliance.test_data.bpc = 6; 4920 break; 4921 case DP_TEST_BIT_DEPTH_8: 4922 intel_dp->compliance.test_data.bpc = 8; 4923 break; 4924 default: 4925 return DP_TEST_NAK; 4926 } 4927 4928 intel_dp->compliance.test_data.video_pattern = test_pattern; 4929 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4930 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4931 /* Set test active flag here so userspace doesn't interrupt things */ 4932 intel_dp->compliance.test_active = true; 4933 4934 return DP_TEST_ACK; 4935 } 4936 4937 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 4938 { 4939 u8 test_result = DP_TEST_ACK; 4940 struct intel_connector *intel_connector = intel_dp->attached_connector; 4941 struct drm_connector *connector = &intel_connector->base; 4942 4943 if (intel_connector->detect_edid == NULL || 4944 connector->edid_corrupt || 4945 intel_dp->aux.i2c_defer_count > 6) { 4946 /* Check EDID read for NACKs, DEFERs and corruption 4947 * (DP CTS 1.2 Core r1.1) 4948 * 4.2.2.4 : Failed EDID read, I2C_NAK 4949 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4950 * 4.2.2.6 : EDID corruption detected 4951 * Use failsafe mode for all cases 4952 */ 4953 if (intel_dp->aux.i2c_nack_count > 0 || 4954 intel_dp->aux.i2c_defer_count > 0) 4955 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", 4956 intel_dp->aux.i2c_nack_count, 4957 intel_dp->aux.i2c_defer_count); 4958 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4959 } else { 4960 struct edid *block = intel_connector->detect_edid; 4961 4962 /* We have to write the checksum 4963 * of the last block read 4964 */ 4965 block += intel_connector->detect_edid->extensions; 4966 4967 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4968 block->checksum) <= 0) 4969 DRM_DEBUG_KMS("Failed to write EDID checksum\n"); 4970 4971 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4972 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4973 } 4974 4975 /* Set test active flag here so userspace doesn't interrupt things */ 4976 intel_dp->compliance.test_active = true; 4977 4978 return test_result; 4979 } 4980 4981 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 4982 { 4983 u8 test_result = DP_TEST_NAK; 4984 return test_result; 4985 } 4986 4987 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 4988 { 4989 u8 response = DP_TEST_NAK; 4990 u8 request = 0; 4991 int status; 4992 4993 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 4994 if (status <= 0) { 4995 DRM_DEBUG_KMS("Could not read test request from sink\n"); 4996 goto update_status; 4997 } 4998 4999 switch (request) { 5000 case DP_TEST_LINK_TRAINING: 5001 DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); 5002 response = intel_dp_autotest_link_training(intel_dp); 5003 break; 5004 case DP_TEST_LINK_VIDEO_PATTERN: 5005 DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); 5006 response = intel_dp_autotest_video_pattern(intel_dp); 5007 break; 5008 case DP_TEST_LINK_EDID_READ: 5009 DRM_DEBUG_KMS("EDID test requested\n"); 5010 response = intel_dp_autotest_edid(intel_dp); 5011 break; 5012 case DP_TEST_LINK_PHY_TEST_PATTERN: 5013 DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); 5014 response = intel_dp_autotest_phy_pattern(intel_dp); 5015 break; 5016 default: 5017 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); 5018 break; 5019 } 5020 5021 if (response & DP_TEST_ACK) 5022 intel_dp->compliance.test_type = request; 5023 5024 update_status: 5025 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5026 if (status <= 0) 5027 DRM_DEBUG_KMS("Could not write test response to sink\n"); 5028 } 5029 5030 static int 5031 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5032 { 5033 bool bret; 5034 5035 if (intel_dp->is_mst) { 5036 u8 esi[DP_DPRX_ESI_LEN] = { 0 }; 5037 int ret = 0; 5038 int retry; 5039 bool handled; 5040 5041 WARN_ON_ONCE(intel_dp->active_mst_links < 0); 5042 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5043 go_again: 5044 if (bret == true) { 5045 5046 /* check link status - esi[10] = 0x200c */ 5047 if (intel_dp->active_mst_links > 0 && 5048 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5049 DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); 5050 intel_dp_start_link_train(intel_dp); 5051 intel_dp_stop_link_train(intel_dp); 5052 } 5053 5054 DRM_DEBUG_KMS("got esi %3ph\n", esi); 5055 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5056 5057 if (handled) { 5058 for (retry = 0; retry < 3; retry++) { 5059 int wret; 5060 wret = drm_dp_dpcd_write(&intel_dp->aux, 5061 DP_SINK_COUNT_ESI+1, 5062 &esi[1], 3); 5063 if (wret == 3) { 5064 break; 5065 } 5066 } 5067 5068 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5069 if (bret == true) { 5070 DRM_DEBUG_KMS("got esi2 %3ph\n", esi); 5071 goto go_again; 5072 } 5073 } else 5074 ret = 0; 5075 5076 return ret; 5077 } else { 5078 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); 5079 intel_dp->is_mst = false; 5080 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5081 intel_dp->is_mst); 5082 } 5083 } 5084 return -EINVAL; 5085 } 5086 5087 static bool 5088 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5089 { 5090 u8 link_status[DP_LINK_STATUS_SIZE]; 5091 5092 if (!intel_dp->link_trained) 5093 return false; 5094 5095 /* 5096 * While PSR source HW is enabled, it will control main-link sending 5097 * frames, enabling and disabling it so trying to do a retrain will fail 5098 * as the link would or not be on or it could mix training patterns 5099 * and frame data at the same time causing retrain to fail. 5100 * Also when exiting PSR, HW will retrain the link anyways fixing 5101 * any link status error. 5102 */ 5103 if (intel_psr_enabled(intel_dp)) 5104 return false; 5105 5106 if (!intel_dp_get_link_status(intel_dp, link_status)) 5107 return false; 5108 5109 /* 5110 * Validate the cached values of intel_dp->link_rate and 5111 * intel_dp->lane_count before attempting to retrain. 5112 */ 5113 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5114 intel_dp->lane_count)) 5115 return false; 5116 5117 /* Retrain if Channel EQ or CR not ok */ 5118 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5119 } 5120 5121 int intel_dp_retrain_link(struct intel_encoder *encoder, 5122 struct drm_modeset_acquire_ctx *ctx) 5123 { 5124 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5125 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5126 struct intel_connector *connector = intel_dp->attached_connector; 5127 struct drm_connector_state *conn_state; 5128 struct intel_crtc_state *crtc_state; 5129 struct intel_crtc *crtc; 5130 int ret; 5131 5132 /* FIXME handle the MST connectors as well */ 5133 5134 if (!connector || connector->base.status != connector_status_connected) 5135 return 0; 5136 5137 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5138 ctx); 5139 if (ret) 5140 return ret; 5141 5142 conn_state = connector->base.state; 5143 5144 crtc = to_intel_crtc(conn_state->crtc); 5145 if (!crtc) 5146 return 0; 5147 5148 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5149 if (ret) 5150 return ret; 5151 5152 crtc_state = to_intel_crtc_state(crtc->base.state); 5153 5154 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state)); 5155 5156 if (!crtc_state->hw.active) 5157 return 0; 5158 5159 if (conn_state->commit && 5160 !try_wait_for_completion(&conn_state->commit->hw_done)) 5161 return 0; 5162 5163 if (!intel_dp_needs_link_retrain(intel_dp)) 5164 return 0; 5165 5166 /* Suppress underruns caused by re-training */ 5167 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5168 if (crtc_state->has_pch_encoder) 5169 intel_set_pch_fifo_underrun_reporting(dev_priv, 5170 intel_crtc_pch_transcoder(crtc), false); 5171 5172 intel_dp_start_link_train(intel_dp); 5173 intel_dp_stop_link_train(intel_dp); 5174 5175 /* Keep underrun reporting disabled until things are stable */ 5176 intel_wait_for_vblank(dev_priv, crtc->pipe); 5177 5178 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5179 if (crtc_state->has_pch_encoder) 5180 intel_set_pch_fifo_underrun_reporting(dev_priv, 5181 intel_crtc_pch_transcoder(crtc), true); 5182 5183 return 0; 5184 } 5185 5186 /* 5187 * If display is now connected check links status, 5188 * there has been known issues of link loss triggering 5189 * long pulse. 5190 * 5191 * Some sinks (eg. ASUS PB287Q) seem to perform some 5192 * weird HPD ping pong during modesets. So we can apparently 5193 * end up with HPD going low during a modeset, and then 5194 * going back up soon after. And once that happens we must 5195 * retrain the link to get a picture. That's in case no 5196 * userspace component reacted to intermittent HPD dip. 5197 */ 5198 static enum intel_hotplug_state 5199 intel_dp_hotplug(struct intel_encoder *encoder, 5200 struct intel_connector *connector, 5201 bool irq_received) 5202 { 5203 struct drm_modeset_acquire_ctx ctx; 5204 enum intel_hotplug_state state; 5205 int ret; 5206 5207 state = intel_encoder_hotplug(encoder, connector, irq_received); 5208 5209 drm_modeset_acquire_init(&ctx, 0); 5210 5211 for (;;) { 5212 ret = intel_dp_retrain_link(encoder, &ctx); 5213 5214 if (ret == -EDEADLK) { 5215 drm_modeset_backoff(&ctx); 5216 continue; 5217 } 5218 5219 break; 5220 } 5221 5222 drm_modeset_drop_locks(&ctx); 5223 drm_modeset_acquire_fini(&ctx); 5224 WARN(ret, "Acquiring modeset locks failed with %i\n", ret); 5225 5226 /* 5227 * Keeping it consistent with intel_ddi_hotplug() and 5228 * intel_hdmi_hotplug(). 5229 */ 5230 if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) 5231 state = INTEL_HOTPLUG_RETRY; 5232 5233 return state; 5234 } 5235 5236 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5237 { 5238 u8 val; 5239 5240 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5241 return; 5242 5243 if (drm_dp_dpcd_readb(&intel_dp->aux, 5244 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5245 return; 5246 5247 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5248 5249 if (val & DP_AUTOMATED_TEST_REQUEST) 5250 intel_dp_handle_test_request(intel_dp); 5251 5252 if (val & DP_CP_IRQ) 5253 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5254 5255 if (val & DP_SINK_SPECIFIC_IRQ) 5256 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); 5257 } 5258 5259 /* 5260 * According to DP spec 5261 * 5.1.2: 5262 * 1. Read DPCD 5263 * 2. Configure link according to Receiver Capabilities 5264 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5265 * 4. Check link status on receipt of hot-plug interrupt 5266 * 5267 * intel_dp_short_pulse - handles short pulse interrupts 5268 * when full detection is not required. 5269 * Returns %true if short pulse is handled and full detection 5270 * is NOT required and %false otherwise. 5271 */ 5272 static bool 5273 intel_dp_short_pulse(struct intel_dp *intel_dp) 5274 { 5275 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5276 u8 old_sink_count = intel_dp->sink_count; 5277 bool ret; 5278 5279 /* 5280 * Clearing compliance test variables to allow capturing 5281 * of values for next automated test request. 5282 */ 5283 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5284 5285 /* 5286 * Now read the DPCD to see if it's actually running 5287 * If the current value of sink count doesn't match with 5288 * the value that was stored earlier or dpcd read failed 5289 * we need to do full detection 5290 */ 5291 ret = intel_dp_get_dpcd(intel_dp); 5292 5293 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5294 /* No need to proceed if we are going to do full detect */ 5295 return false; 5296 } 5297 5298 intel_dp_check_service_irq(intel_dp); 5299 5300 /* Handle CEC interrupts, if any */ 5301 drm_dp_cec_irq(&intel_dp->aux); 5302 5303 /* defer to the hotplug work for link retraining if needed */ 5304 if (intel_dp_needs_link_retrain(intel_dp)) 5305 return false; 5306 5307 intel_psr_short_pulse(intel_dp); 5308 5309 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5310 DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); 5311 /* Send a Hotplug Uevent to userspace to start modeset */ 5312 drm_kms_helper_hotplug_event(&dev_priv->drm); 5313 } 5314 5315 return true; 5316 } 5317 5318 /* XXX this is probably wrong for multiple downstream ports */ 5319 static enum drm_connector_status 5320 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5321 { 5322 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5323 u8 *dpcd = intel_dp->dpcd; 5324 u8 type; 5325 5326 if (WARN_ON(intel_dp_is_edp(intel_dp))) 5327 return connector_status_connected; 5328 5329 if (lspcon->active) 5330 lspcon_resume(lspcon); 5331 5332 if (!intel_dp_get_dpcd(intel_dp)) 5333 return connector_status_disconnected; 5334 5335 /* if there's no downstream port, we're done */ 5336 if (!drm_dp_is_branch(dpcd)) 5337 return connector_status_connected; 5338 5339 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5340 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 5341 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5342 5343 return intel_dp->sink_count ? 5344 connector_status_connected : connector_status_disconnected; 5345 } 5346 5347 if (intel_dp_can_mst(intel_dp)) 5348 return connector_status_connected; 5349 5350 /* If no HPD, poke DDC gently */ 5351 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5352 return connector_status_connected; 5353 5354 /* Well we tried, say unknown for unreliable port types */ 5355 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5356 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5357 if (type == DP_DS_PORT_TYPE_VGA || 5358 type == DP_DS_PORT_TYPE_NON_EDID) 5359 return connector_status_unknown; 5360 } else { 5361 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5362 DP_DWN_STRM_PORT_TYPE_MASK; 5363 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5364 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5365 return connector_status_unknown; 5366 } 5367 5368 /* Anything else is out of spec, warn and ignore */ 5369 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 5370 return connector_status_disconnected; 5371 } 5372 5373 static enum drm_connector_status 5374 edp_detect(struct intel_dp *intel_dp) 5375 { 5376 return connector_status_connected; 5377 } 5378 5379 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 5380 { 5381 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5382 u32 bit; 5383 5384 switch (encoder->hpd_pin) { 5385 case HPD_PORT_B: 5386 bit = SDE_PORTB_HOTPLUG; 5387 break; 5388 case HPD_PORT_C: 5389 bit = SDE_PORTC_HOTPLUG; 5390 break; 5391 case HPD_PORT_D: 5392 bit = SDE_PORTD_HOTPLUG; 5393 break; 5394 default: 5395 MISSING_CASE(encoder->hpd_pin); 5396 return false; 5397 } 5398 5399 return I915_READ(SDEISR) & bit; 5400 } 5401 5402 static bool cpt_digital_port_connected(struct intel_encoder *encoder) 5403 { 5404 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5405 u32 bit; 5406 5407 switch (encoder->hpd_pin) { 5408 case HPD_PORT_B: 5409 bit = SDE_PORTB_HOTPLUG_CPT; 5410 break; 5411 case HPD_PORT_C: 5412 bit = SDE_PORTC_HOTPLUG_CPT; 5413 break; 5414 case HPD_PORT_D: 5415 bit = SDE_PORTD_HOTPLUG_CPT; 5416 break; 5417 default: 5418 MISSING_CASE(encoder->hpd_pin); 5419 return false; 5420 } 5421 5422 return I915_READ(SDEISR) & bit; 5423 } 5424 5425 static bool spt_digital_port_connected(struct intel_encoder *encoder) 5426 { 5427 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5428 u32 bit; 5429 5430 switch (encoder->hpd_pin) { 5431 case HPD_PORT_A: 5432 bit = SDE_PORTA_HOTPLUG_SPT; 5433 break; 5434 case HPD_PORT_E: 5435 bit = SDE_PORTE_HOTPLUG_SPT; 5436 break; 5437 default: 5438 return cpt_digital_port_connected(encoder); 5439 } 5440 5441 return I915_READ(SDEISR) & bit; 5442 } 5443 5444 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 5445 { 5446 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5447 u32 bit; 5448 5449 switch (encoder->hpd_pin) { 5450 case HPD_PORT_B: 5451 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 5452 break; 5453 case HPD_PORT_C: 5454 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 5455 break; 5456 case HPD_PORT_D: 5457 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 5458 break; 5459 default: 5460 MISSING_CASE(encoder->hpd_pin); 5461 return false; 5462 } 5463 5464 return I915_READ(PORT_HOTPLUG_STAT) & bit; 5465 } 5466 5467 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 5468 { 5469 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5470 u32 bit; 5471 5472 switch (encoder->hpd_pin) { 5473 case HPD_PORT_B: 5474 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 5475 break; 5476 case HPD_PORT_C: 5477 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 5478 break; 5479 case HPD_PORT_D: 5480 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 5481 break; 5482 default: 5483 MISSING_CASE(encoder->hpd_pin); 5484 return false; 5485 } 5486 5487 return I915_READ(PORT_HOTPLUG_STAT) & bit; 5488 } 5489 5490 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 5491 { 5492 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5493 5494 if (encoder->hpd_pin == HPD_PORT_A) 5495 return I915_READ(DEISR) & DE_DP_A_HOTPLUG; 5496 else 5497 return ibx_digital_port_connected(encoder); 5498 } 5499 5500 static bool snb_digital_port_connected(struct intel_encoder *encoder) 5501 { 5502 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5503 5504 if (encoder->hpd_pin == HPD_PORT_A) 5505 return I915_READ(DEISR) & DE_DP_A_HOTPLUG; 5506 else 5507 return cpt_digital_port_connected(encoder); 5508 } 5509 5510 static bool ivb_digital_port_connected(struct intel_encoder *encoder) 5511 { 5512 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5513 5514 if (encoder->hpd_pin == HPD_PORT_A) 5515 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB; 5516 else 5517 return cpt_digital_port_connected(encoder); 5518 } 5519 5520 static bool bdw_digital_port_connected(struct intel_encoder *encoder) 5521 { 5522 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5523 5524 if (encoder->hpd_pin == HPD_PORT_A) 5525 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; 5526 else 5527 return cpt_digital_port_connected(encoder); 5528 } 5529 5530 static bool bxt_digital_port_connected(struct intel_encoder *encoder) 5531 { 5532 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5533 u32 bit; 5534 5535 switch (encoder->hpd_pin) { 5536 case HPD_PORT_A: 5537 bit = BXT_DE_PORT_HP_DDIA; 5538 break; 5539 case HPD_PORT_B: 5540 bit = BXT_DE_PORT_HP_DDIB; 5541 break; 5542 case HPD_PORT_C: 5543 bit = BXT_DE_PORT_HP_DDIC; 5544 break; 5545 default: 5546 MISSING_CASE(encoder->hpd_pin); 5547 return false; 5548 } 5549 5550 return I915_READ(GEN8_DE_PORT_ISR) & bit; 5551 } 5552 5553 static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv, 5554 enum phy phy) 5555 { 5556 if (HAS_PCH_MCC(dev_priv) && phy == PHY_C) 5557 return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1); 5558 5559 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy); 5560 } 5561 5562 static bool icp_digital_port_connected(struct intel_encoder *encoder) 5563 { 5564 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5565 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5566 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 5567 5568 if (intel_phy_is_combo(dev_priv, phy)) 5569 return intel_combo_phy_connected(dev_priv, phy); 5570 else if (intel_phy_is_tc(dev_priv, phy)) 5571 return intel_tc_port_connected(dig_port); 5572 else 5573 MISSING_CASE(encoder->hpd_pin); 5574 5575 return false; 5576 } 5577 5578 /* 5579 * intel_digital_port_connected - is the specified port connected? 5580 * @encoder: intel_encoder 5581 * 5582 * In cases where there's a connector physically connected but it can't be used 5583 * by our hardware we also return false, since the rest of the driver should 5584 * pretty much treat the port as disconnected. This is relevant for type-C 5585 * (starting on ICL) where there's ownership involved. 5586 * 5587 * Return %true if port is connected, %false otherwise. 5588 */ 5589 static bool __intel_digital_port_connected(struct intel_encoder *encoder) 5590 { 5591 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5592 5593 if (HAS_GMCH(dev_priv)) { 5594 if (IS_GM45(dev_priv)) 5595 return gm45_digital_port_connected(encoder); 5596 else 5597 return g4x_digital_port_connected(encoder); 5598 } 5599 5600 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 5601 return icp_digital_port_connected(encoder); 5602 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 5603 return spt_digital_port_connected(encoder); 5604 else if (IS_GEN9_LP(dev_priv)) 5605 return bxt_digital_port_connected(encoder); 5606 else if (IS_GEN(dev_priv, 8)) 5607 return bdw_digital_port_connected(encoder); 5608 else if (IS_GEN(dev_priv, 7)) 5609 return ivb_digital_port_connected(encoder); 5610 else if (IS_GEN(dev_priv, 6)) 5611 return snb_digital_port_connected(encoder); 5612 else if (IS_GEN(dev_priv, 5)) 5613 return ilk_digital_port_connected(encoder); 5614 5615 MISSING_CASE(INTEL_GEN(dev_priv)); 5616 return false; 5617 } 5618 5619 bool intel_digital_port_connected(struct intel_encoder *encoder) 5620 { 5621 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5622 bool is_connected = false; 5623 intel_wakeref_t wakeref; 5624 5625 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 5626 is_connected = __intel_digital_port_connected(encoder); 5627 5628 return is_connected; 5629 } 5630 5631 static struct edid * 5632 intel_dp_get_edid(struct intel_dp *intel_dp) 5633 { 5634 struct intel_connector *intel_connector = intel_dp->attached_connector; 5635 5636 /* use cached edid if we have one */ 5637 if (intel_connector->edid) { 5638 /* invalid edid */ 5639 if (IS_ERR(intel_connector->edid)) 5640 return NULL; 5641 5642 return drm_edid_duplicate(intel_connector->edid); 5643 } else 5644 return drm_get_edid(&intel_connector->base, 5645 &intel_dp->aux.ddc); 5646 } 5647 5648 static void 5649 intel_dp_set_edid(struct intel_dp *intel_dp) 5650 { 5651 struct intel_connector *intel_connector = intel_dp->attached_connector; 5652 struct edid *edid; 5653 5654 intel_dp_unset_edid(intel_dp); 5655 edid = intel_dp_get_edid(intel_dp); 5656 intel_connector->detect_edid = edid; 5657 5658 intel_dp->has_audio = drm_detect_monitor_audio(edid); 5659 drm_dp_cec_set_edid(&intel_dp->aux, edid); 5660 } 5661 5662 static void 5663 intel_dp_unset_edid(struct intel_dp *intel_dp) 5664 { 5665 struct intel_connector *intel_connector = intel_dp->attached_connector; 5666 5667 drm_dp_cec_unset_edid(&intel_dp->aux); 5668 kfree(intel_connector->detect_edid); 5669 intel_connector->detect_edid = NULL; 5670 5671 intel_dp->has_audio = false; 5672 } 5673 5674 static int 5675 intel_dp_detect(struct drm_connector *connector, 5676 struct drm_modeset_acquire_ctx *ctx, 5677 bool force) 5678 { 5679 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5680 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5681 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5682 struct intel_encoder *encoder = &dig_port->base; 5683 enum drm_connector_status status; 5684 5685 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5686 connector->base.id, connector->name); 5687 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5688 5689 /* Can't disconnect eDP */ 5690 if (intel_dp_is_edp(intel_dp)) 5691 status = edp_detect(intel_dp); 5692 else if (intel_digital_port_connected(encoder)) 5693 status = intel_dp_detect_dpcd(intel_dp); 5694 else 5695 status = connector_status_disconnected; 5696 5697 if (status == connector_status_disconnected) { 5698 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5699 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 5700 5701 if (intel_dp->is_mst) { 5702 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 5703 intel_dp->is_mst, 5704 intel_dp->mst_mgr.mst_state); 5705 intel_dp->is_mst = false; 5706 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5707 intel_dp->is_mst); 5708 } 5709 5710 goto out; 5711 } 5712 5713 if (intel_dp->reset_link_params) { 5714 /* Initial max link lane count */ 5715 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 5716 5717 /* Initial max link rate */ 5718 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 5719 5720 intel_dp->reset_link_params = false; 5721 } 5722 5723 intel_dp_print_rates(intel_dp); 5724 5725 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 5726 if (INTEL_GEN(dev_priv) >= 11) 5727 intel_dp_get_dsc_sink_cap(intel_dp); 5728 5729 intel_dp_configure_mst(intel_dp); 5730 5731 if (intel_dp->is_mst) { 5732 /* 5733 * If we are in MST mode then this connector 5734 * won't appear connected or have anything 5735 * with EDID on it 5736 */ 5737 status = connector_status_disconnected; 5738 goto out; 5739 } 5740 5741 /* 5742 * Some external monitors do not signal loss of link synchronization 5743 * with an IRQ_HPD, so force a link status check. 5744 */ 5745 if (!intel_dp_is_edp(intel_dp)) { 5746 int ret; 5747 5748 ret = intel_dp_retrain_link(encoder, ctx); 5749 if (ret) 5750 return ret; 5751 } 5752 5753 /* 5754 * Clearing NACK and defer counts to get their exact values 5755 * while reading EDID which are required by Compliance tests 5756 * 4.2.2.4 and 4.2.2.5 5757 */ 5758 intel_dp->aux.i2c_nack_count = 0; 5759 intel_dp->aux.i2c_defer_count = 0; 5760 5761 intel_dp_set_edid(intel_dp); 5762 if (intel_dp_is_edp(intel_dp) || 5763 to_intel_connector(connector)->detect_edid) 5764 status = connector_status_connected; 5765 5766 intel_dp_check_service_irq(intel_dp); 5767 5768 out: 5769 if (status != connector_status_connected && !intel_dp->is_mst) 5770 intel_dp_unset_edid(intel_dp); 5771 5772 /* 5773 * Make sure the refs for power wells enabled during detect are 5774 * dropped to avoid a new detect cycle triggered by HPD polling. 5775 */ 5776 intel_display_power_flush_work(dev_priv); 5777 5778 return status; 5779 } 5780 5781 static void 5782 intel_dp_force(struct drm_connector *connector) 5783 { 5784 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5785 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5786 struct intel_encoder *intel_encoder = &dig_port->base; 5787 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 5788 enum intel_display_power_domain aux_domain = 5789 intel_aux_power_domain(dig_port); 5790 intel_wakeref_t wakeref; 5791 5792 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5793 connector->base.id, connector->name); 5794 intel_dp_unset_edid(intel_dp); 5795 5796 if (connector->status != connector_status_connected) 5797 return; 5798 5799 wakeref = intel_display_power_get(dev_priv, aux_domain); 5800 5801 intel_dp_set_edid(intel_dp); 5802 5803 intel_display_power_put(dev_priv, aux_domain, wakeref); 5804 } 5805 5806 static int intel_dp_get_modes(struct drm_connector *connector) 5807 { 5808 struct intel_connector *intel_connector = to_intel_connector(connector); 5809 struct edid *edid; 5810 5811 edid = intel_connector->detect_edid; 5812 if (edid) { 5813 int ret = intel_connector_update_modes(connector, edid); 5814 if (ret) 5815 return ret; 5816 } 5817 5818 /* if eDP has no EDID, fall back to fixed mode */ 5819 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 5820 intel_connector->panel.fixed_mode) { 5821 struct drm_display_mode *mode; 5822 5823 mode = drm_mode_duplicate(connector->dev, 5824 intel_connector->panel.fixed_mode); 5825 if (mode) { 5826 drm_mode_probed_add(connector, mode); 5827 return 1; 5828 } 5829 } 5830 5831 return 0; 5832 } 5833 5834 static int 5835 intel_dp_connector_register(struct drm_connector *connector) 5836 { 5837 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5838 int ret; 5839 5840 ret = intel_connector_register(connector); 5841 if (ret) 5842 return ret; 5843 5844 i915_debugfs_connector_add(connector); 5845 5846 #ifdef __NetBSD__ 5847 DRM_DEBUG_KMS("registering %s bus for %s\n", 5848 intel_dp->aux.name, connector->name); 5849 #else 5850 DRM_DEBUG_KMS("registering %s bus for %s\n", 5851 intel_dp->aux.name, connector->kdev->kobj.name); 5852 #endif 5853 5854 intel_dp->aux.dev = connector->kdev; 5855 ret = drm_dp_aux_register(&intel_dp->aux); 5856 if (!ret) 5857 drm_dp_cec_register_connector(&intel_dp->aux, connector); 5858 return ret; 5859 } 5860 5861 static void 5862 intel_dp_connector_unregister(struct drm_connector *connector) 5863 { 5864 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5865 5866 drm_dp_cec_unregister_connector(&intel_dp->aux); 5867 drm_dp_aux_unregister(&intel_dp->aux); 5868 intel_connector_unregister(connector); 5869 } 5870 5871 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 5872 { 5873 struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 5874 struct intel_dp *intel_dp = &intel_dig_port->dp; 5875 5876 intel_dp_mst_encoder_cleanup(intel_dig_port); 5877 if (intel_dp_is_edp(intel_dp)) { 5878 intel_wakeref_t wakeref; 5879 5880 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5881 /* 5882 * vdd might still be enabled do to the delayed vdd off. 5883 * Make sure vdd is actually turned off here. 5884 */ 5885 with_pps_lock(intel_dp, wakeref) 5886 edp_panel_vdd_off_sync(intel_dp); 5887 5888 if (intel_dp->edp_notifier.notifier_call) { 5889 unregister_reboot_notifier(&intel_dp->edp_notifier); 5890 intel_dp->edp_notifier.notifier_call = NULL; 5891 } 5892 } 5893 5894 intel_dp_aux_fini(intel_dp); 5895 } 5896 5897 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 5898 { 5899 intel_dp_encoder_flush_work(encoder); 5900 5901 drm_encoder_cleanup(encoder); 5902 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 5903 } 5904 5905 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 5906 { 5907 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 5908 intel_wakeref_t wakeref; 5909 5910 if (!intel_dp_is_edp(intel_dp)) 5911 return; 5912 5913 /* 5914 * vdd might still be enabled do to the delayed vdd off. 5915 * Make sure vdd is actually turned off here. 5916 */ 5917 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5918 with_pps_lock(intel_dp, wakeref) 5919 edp_panel_vdd_off_sync(intel_dp); 5920 } 5921 5922 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 5923 { 5924 long ret; 5925 5926 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 5927 unsigned long irqflags; 5928 spin_lock_irqsave(&hdcp->cp_irq_lock, irqflags); 5929 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &hdcp->cp_irq_queue, 5930 &hdcp->cp_irq_lock, 5931 msecs_to_jiffies(timeout), 5932 C); 5933 if (!ret) 5934 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 5935 spin_unlock_irqrestore(&hdcp->cp_irq_lock, irqflags); 5936 } 5937 5938 static 5939 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, 5940 u8 *an) 5941 { 5942 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); 5943 static const struct drm_dp_aux_msg msg = { 5944 .request = DP_AUX_NATIVE_WRITE, 5945 .address = DP_AUX_HDCP_AKSV, 5946 .size = DRM_HDCP_KSV_LEN, 5947 }; 5948 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 5949 ssize_t dpcd_ret; 5950 int ret; 5951 5952 /* Output An first, that's easy */ 5953 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 5954 an, DRM_HDCP_AN_LEN); 5955 if (dpcd_ret != DRM_HDCP_AN_LEN) { 5956 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n", 5957 dpcd_ret); 5958 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 5959 } 5960 5961 /* 5962 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 5963 * order to get it on the wire, we need to create the AUX header as if 5964 * we were writing the data, and then tickle the hardware to output the 5965 * data once the header is sent out. 5966 */ 5967 intel_dp_aux_header(txbuf, &msg); 5968 5969 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 5970 rxbuf, sizeof(rxbuf), 5971 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 5972 if (ret < 0) { 5973 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret); 5974 return ret; 5975 } else if (ret == 0) { 5976 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n"); 5977 return -EIO; 5978 } 5979 5980 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 5981 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 5982 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 5983 reply); 5984 return -EIO; 5985 } 5986 return 0; 5987 } 5988 5989 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, 5990 u8 *bksv) 5991 { 5992 ssize_t ret; 5993 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 5994 DRM_HDCP_KSV_LEN); 5995 if (ret != DRM_HDCP_KSV_LEN) { 5996 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret); 5997 return ret >= 0 ? -EIO : ret; 5998 } 5999 return 0; 6000 } 6001 6002 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, 6003 u8 *bstatus) 6004 { 6005 ssize_t ret; 6006 /* 6007 * For some reason the HDMI and DP HDCP specs call this register 6008 * definition by different names. In the HDMI spec, it's called BSTATUS, 6009 * but in DP it's called BINFO. 6010 */ 6011 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6012 bstatus, DRM_HDCP_BSTATUS_LEN); 6013 if (ret != DRM_HDCP_BSTATUS_LEN) { 6014 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6015 return ret >= 0 ? -EIO : ret; 6016 } 6017 return 0; 6018 } 6019 6020 static 6021 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, 6022 u8 *bcaps) 6023 { 6024 ssize_t ret; 6025 6026 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6027 bcaps, 1); 6028 if (ret != 1) { 6029 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret); 6030 return ret >= 0 ? -EIO : ret; 6031 } 6032 6033 return 0; 6034 } 6035 6036 static 6037 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, 6038 bool *repeater_present) 6039 { 6040 ssize_t ret; 6041 u8 bcaps; 6042 6043 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6044 if (ret) 6045 return ret; 6046 6047 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6048 return 0; 6049 } 6050 6051 static 6052 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, 6053 u8 *ri_prime) 6054 { 6055 ssize_t ret; 6056 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6057 ri_prime, DRM_HDCP_RI_LEN); 6058 if (ret != DRM_HDCP_RI_LEN) { 6059 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret); 6060 return ret >= 0 ? -EIO : ret; 6061 } 6062 return 0; 6063 } 6064 6065 static 6066 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, 6067 bool *ksv_ready) 6068 { 6069 ssize_t ret; 6070 u8 bstatus; 6071 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6072 &bstatus, 1); 6073 if (ret != 1) { 6074 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6075 return ret >= 0 ? -EIO : ret; 6076 } 6077 *ksv_ready = bstatus & DP_BSTATUS_READY; 6078 return 0; 6079 } 6080 6081 static 6082 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, 6083 int num_downstream, u8 *ksv_fifo) 6084 { 6085 ssize_t ret; 6086 int i; 6087 6088 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6089 for (i = 0; i < num_downstream; i += 3) { 6090 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6091 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6092 DP_AUX_HDCP_KSV_FIFO, 6093 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6094 len); 6095 if (ret != len) { 6096 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n", 6097 i, ret); 6098 return ret >= 0 ? -EIO : ret; 6099 } 6100 } 6101 return 0; 6102 } 6103 6104 static 6105 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, 6106 int i, u32 *part) 6107 { 6108 ssize_t ret; 6109 6110 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6111 return -EINVAL; 6112 6113 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6114 DP_AUX_HDCP_V_PRIME(i), part, 6115 DRM_HDCP_V_PRIME_PART_LEN); 6116 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6117 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6118 return ret >= 0 ? -EIO : ret; 6119 } 6120 return 0; 6121 } 6122 6123 static 6124 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, 6125 bool enable) 6126 { 6127 /* Not used for single stream DisplayPort setups */ 6128 return 0; 6129 } 6130 6131 static 6132 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) 6133 { 6134 ssize_t ret; 6135 u8 bstatus; 6136 6137 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6138 &bstatus, 1); 6139 if (ret != 1) { 6140 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6141 return false; 6142 } 6143 6144 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6145 } 6146 6147 static 6148 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, 6149 bool *hdcp_capable) 6150 { 6151 ssize_t ret; 6152 u8 bcaps; 6153 6154 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6155 if (ret) 6156 return ret; 6157 6158 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6159 return 0; 6160 } 6161 6162 struct hdcp2_dp_errata_stream_type { 6163 u8 msg_id; 6164 u8 stream_type; 6165 } __packed; 6166 6167 struct hdcp2_dp_msg_data { 6168 u8 msg_id; 6169 u32 offset; 6170 bool msg_detectable; 6171 u32 timeout; 6172 u32 timeout2; /* Added for non_paired situation */ 6173 }; 6174 6175 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6176 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6177 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6178 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6179 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6180 false, 0, 0 }, 6181 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6182 false, 0, 0 }, 6183 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6184 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6185 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6186 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6187 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6188 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6189 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6190 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6191 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6192 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6193 0, 0 }, 6194 { HDCP_2_2_REP_SEND_RECVID_LIST, 6195 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6196 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6197 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6198 0, 0 }, 6199 { HDCP_2_2_REP_STREAM_MANAGE, 6200 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6201 0, 0 }, 6202 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6203 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6204 /* local define to shovel this through the write_2_2 interface */ 6205 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6206 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6207 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6208 0, 0 }, 6209 }; 6210 6211 static inline 6212 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, 6213 u8 *rx_status) 6214 { 6215 ssize_t ret; 6216 6217 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6218 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6219 HDCP_2_2_DP_RXSTATUS_LEN); 6220 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6221 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6222 return ret >= 0 ? -EIO : ret; 6223 } 6224 6225 return 0; 6226 } 6227 6228 static 6229 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, 6230 u8 msg_id, bool *msg_ready) 6231 { 6232 u8 rx_status; 6233 int ret; 6234 6235 *msg_ready = false; 6236 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6237 if (ret < 0) 6238 return ret; 6239 6240 switch (msg_id) { 6241 case HDCP_2_2_AKE_SEND_HPRIME: 6242 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6243 *msg_ready = true; 6244 break; 6245 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6246 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6247 *msg_ready = true; 6248 break; 6249 case HDCP_2_2_REP_SEND_RECVID_LIST: 6250 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6251 *msg_ready = true; 6252 break; 6253 default: 6254 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6255 return -EINVAL; 6256 } 6257 6258 return 0; 6259 } 6260 6261 static ssize_t 6262 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, 6263 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6264 { 6265 struct intel_dp *dp = &intel_dig_port->dp; 6266 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6267 u8 msg_id = hdcp2_msg_data->msg_id; 6268 int ret, timeout; 6269 bool msg_ready = false; 6270 6271 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6272 timeout = hdcp2_msg_data->timeout2; 6273 else 6274 timeout = hdcp2_msg_data->timeout; 6275 6276 /* 6277 * There is no way to detect the CERT, LPRIME and STREAM_READY 6278 * availability. So Wait for timeout and read the msg. 6279 */ 6280 if (!hdcp2_msg_data->msg_detectable) { 6281 mdelay(timeout); 6282 ret = 0; 6283 } else { 6284 /* 6285 * As we want to check the msg availability at timeout, Ignoring 6286 * the timeout at wait for CP_IRQ. 6287 */ 6288 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6289 ret = hdcp2_detect_msg_availability(intel_dig_port, 6290 msg_id, &msg_ready); 6291 if (!msg_ready) 6292 ret = -ETIMEDOUT; 6293 } 6294 6295 if (ret) 6296 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n", 6297 hdcp2_msg_data->msg_id, ret, timeout); 6298 6299 return ret; 6300 } 6301 6302 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6303 { 6304 int i; 6305 6306 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6307 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6308 return &hdcp2_dp_msg_data[i]; 6309 6310 return NULL; 6311 } 6312 6313 static 6314 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, 6315 void *buf, size_t size) 6316 { 6317 struct intel_dp *dp = &intel_dig_port->dp; 6318 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6319 unsigned int offset; 6320 u8 *byte = buf; 6321 ssize_t ret, bytes_to_write, len; 6322 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6323 6324 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6325 if (!hdcp2_msg_data) 6326 return -EINVAL; 6327 6328 offset = hdcp2_msg_data->offset; 6329 6330 /* No msg_id in DP HDCP2.2 msgs */ 6331 bytes_to_write = size - 1; 6332 byte++; 6333 6334 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6335 6336 while (bytes_to_write) { 6337 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6338 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6339 6340 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, 6341 offset, (void *)byte, len); 6342 if (ret < 0) 6343 return ret; 6344 6345 bytes_to_write -= ret; 6346 byte += ret; 6347 offset += ret; 6348 } 6349 6350 return size; 6351 } 6352 6353 static 6354 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) 6355 { 6356 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6357 u32 dev_cnt; 6358 ssize_t ret; 6359 6360 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6361 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6362 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6363 if (ret != HDCP_2_2_RXINFO_LEN) 6364 return ret >= 0 ? -EIO : ret; 6365 6366 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6367 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6368 6369 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6370 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6371 6372 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6373 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6374 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6375 6376 return ret; 6377 } 6378 6379 static 6380 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, 6381 u8 msg_id, void *buf, size_t size) 6382 { 6383 unsigned int offset; 6384 u8 *byte = buf; 6385 ssize_t ret, bytes_to_recv, len; 6386 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6387 6388 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6389 if (!hdcp2_msg_data) 6390 return -EINVAL; 6391 offset = hdcp2_msg_data->offset; 6392 6393 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); 6394 if (ret < 0) 6395 return ret; 6396 6397 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6398 ret = get_receiver_id_list_size(intel_dig_port); 6399 if (ret < 0) 6400 return ret; 6401 6402 size = ret; 6403 } 6404 bytes_to_recv = size - 1; 6405 6406 /* DP adaptation msgs has no msg_id */ 6407 byte++; 6408 6409 while (bytes_to_recv) { 6410 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6411 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6412 6413 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, 6414 (void *)byte, len); 6415 if (ret < 0) { 6416 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret); 6417 return ret; 6418 } 6419 6420 bytes_to_recv -= ret; 6421 byte += ret; 6422 offset += ret; 6423 } 6424 byte = buf; 6425 *byte = msg_id; 6426 6427 return size; 6428 } 6429 6430 static 6431 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, 6432 bool is_repeater, u8 content_type) 6433 { 6434 struct hdcp2_dp_errata_stream_type stream_type_msg; 6435 6436 if (is_repeater) 6437 return 0; 6438 6439 /* 6440 * Errata for DP: As Stream type is used for encryption, Receiver 6441 * should be communicated with stream type for the decryption of the 6442 * content. 6443 * Repeater will be communicated with stream type as a part of it's 6444 * auth later in time. 6445 */ 6446 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6447 stream_type_msg.stream_type = content_type; 6448 6449 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, 6450 sizeof(stream_type_msg)); 6451 } 6452 6453 static 6454 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) 6455 { 6456 u8 rx_status; 6457 int ret; 6458 6459 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6460 if (ret) 6461 return ret; 6462 6463 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6464 ret = HDCP_REAUTH_REQUEST; 6465 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6466 ret = HDCP_LINK_INTEGRITY_FAILURE; 6467 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6468 ret = HDCP_TOPOLOGY_CHANGE; 6469 6470 return ret; 6471 } 6472 6473 static 6474 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, 6475 bool *capable) 6476 { 6477 u8 rx_caps[3]; 6478 int ret; 6479 6480 *capable = false; 6481 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6482 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6483 rx_caps, HDCP_2_2_RXCAPS_LEN); 6484 if (ret != HDCP_2_2_RXCAPS_LEN) 6485 return ret >= 0 ? -EIO : ret; 6486 6487 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6488 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6489 *capable = true; 6490 6491 return 0; 6492 } 6493 6494 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 6495 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 6496 .read_bksv = intel_dp_hdcp_read_bksv, 6497 .read_bstatus = intel_dp_hdcp_read_bstatus, 6498 .repeater_present = intel_dp_hdcp_repeater_present, 6499 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 6500 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 6501 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 6502 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 6503 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 6504 .check_link = intel_dp_hdcp_check_link, 6505 .hdcp_capable = intel_dp_hdcp_capable, 6506 .write_2_2_msg = intel_dp_hdcp2_write_msg, 6507 .read_2_2_msg = intel_dp_hdcp2_read_msg, 6508 .config_stream_type = intel_dp_hdcp2_config_stream_type, 6509 .check_2_2_link = intel_dp_hdcp2_check_link, 6510 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 6511 .protocol = HDCP_PROTOCOL_DP, 6512 }; 6513 6514 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6515 { 6516 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6517 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6518 6519 lockdep_assert_held(&dev_priv->pps_mutex); 6520 6521 if (!edp_have_panel_vdd(intel_dp)) 6522 return; 6523 6524 /* 6525 * The VDD bit needs a power domain reference, so if the bit is 6526 * already enabled when we boot or resume, grab this reference and 6527 * schedule a vdd off, so we don't hold on to the reference 6528 * indefinitely. 6529 */ 6530 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 6531 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 6532 6533 edp_panel_vdd_schedule_off(intel_dp); 6534 } 6535 6536 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 6537 { 6538 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6539 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6540 enum pipe pipe; 6541 6542 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 6543 encoder->port, &pipe)) 6544 return pipe; 6545 6546 return INVALID_PIPE; 6547 } 6548 6549 void intel_dp_encoder_reset(struct drm_encoder *encoder) 6550 { 6551 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 6552 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 6553 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6554 intel_wakeref_t wakeref; 6555 6556 if (!HAS_DDI(dev_priv)) 6557 intel_dp->DP = I915_READ(intel_dp->output_reg); 6558 6559 if (lspcon->active) 6560 lspcon_resume(lspcon); 6561 6562 intel_dp->reset_link_params = true; 6563 6564 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 6565 !intel_dp_is_edp(intel_dp)) 6566 return; 6567 6568 with_pps_lock(intel_dp, wakeref) { 6569 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6570 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6571 6572 if (intel_dp_is_edp(intel_dp)) { 6573 /* 6574 * Reinit the power sequencer, in case BIOS did 6575 * something nasty with it. 6576 */ 6577 intel_dp_pps_init(intel_dp); 6578 intel_edp_panel_vdd_sanitize(intel_dp); 6579 } 6580 } 6581 } 6582 6583 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6584 .force = intel_dp_force, 6585 .fill_modes = drm_helper_probe_single_connector_modes, 6586 .atomic_get_property = intel_digital_connector_atomic_get_property, 6587 .atomic_set_property = intel_digital_connector_atomic_set_property, 6588 .late_register = intel_dp_connector_register, 6589 .early_unregister = intel_dp_connector_unregister, 6590 .destroy = intel_connector_destroy, 6591 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6592 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6593 }; 6594 6595 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6596 .detect_ctx = intel_dp_detect, 6597 .get_modes = intel_dp_get_modes, 6598 .mode_valid = intel_dp_mode_valid, 6599 .atomic_check = intel_digital_connector_atomic_check, 6600 }; 6601 6602 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 6603 .reset = intel_dp_encoder_reset, 6604 .destroy = intel_dp_encoder_destroy, 6605 }; 6606 6607 enum irqreturn 6608 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 6609 { 6610 struct intel_dp *intel_dp = &intel_dig_port->dp; 6611 6612 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 6613 /* 6614 * vdd off can generate a long pulse on eDP which 6615 * would require vdd on to handle it, and thus we 6616 * would end up in an endless cycle of 6617 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." 6618 */ 6619 DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n", 6620 intel_dig_port->base.base.base.id, 6621 intel_dig_port->base.base.name); 6622 return IRQ_HANDLED; 6623 } 6624 6625 DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n", 6626 intel_dig_port->base.base.base.id, 6627 intel_dig_port->base.base.name, 6628 long_hpd ? "long" : "short"); 6629 6630 if (long_hpd) { 6631 intel_dp->reset_link_params = true; 6632 return IRQ_NONE; 6633 } 6634 6635 if (intel_dp->is_mst) { 6636 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { 6637 /* 6638 * If we were in MST mode, and device is not 6639 * there, get out of MST mode 6640 */ 6641 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 6642 intel_dp->is_mst, intel_dp->mst_mgr.mst_state); 6643 intel_dp->is_mst = false; 6644 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6645 intel_dp->is_mst); 6646 6647 return IRQ_NONE; 6648 } 6649 } 6650 6651 if (!intel_dp->is_mst) { 6652 bool handled; 6653 6654 handled = intel_dp_short_pulse(intel_dp); 6655 6656 if (!handled) 6657 return IRQ_NONE; 6658 } 6659 6660 return IRQ_HANDLED; 6661 } 6662 6663 /* check the VBT to see whether the eDP is on another port */ 6664 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 6665 { 6666 /* 6667 * eDP not supported on g4x. so bail out early just 6668 * for a bit extra safety in case the VBT is bonkers. 6669 */ 6670 if (INTEL_GEN(dev_priv) < 5) 6671 return false; 6672 6673 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 6674 return true; 6675 6676 return intel_bios_is_port_edp(dev_priv, port); 6677 } 6678 6679 static void 6680 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6681 { 6682 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6683 enum port port = dp_to_dig_port(intel_dp)->base.port; 6684 6685 if (!IS_G4X(dev_priv) && port != PORT_A) 6686 intel_attach_force_audio_property(connector); 6687 6688 intel_attach_broadcast_rgb_property(connector); 6689 if (HAS_GMCH(dev_priv)) 6690 drm_connector_attach_max_bpc_property(connector, 6, 10); 6691 else if (INTEL_GEN(dev_priv) >= 5) 6692 drm_connector_attach_max_bpc_property(connector, 6, 12); 6693 6694 intel_attach_colorspace_property(connector); 6695 6696 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 6697 drm_object_attach_property(&connector->base, 6698 connector->dev->mode_config.hdr_output_metadata_property, 6699 0); 6700 6701 if (intel_dp_is_edp(intel_dp)) { 6702 u32 allowed_scalers; 6703 6704 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 6705 if (!HAS_GMCH(dev_priv)) 6706 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 6707 6708 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 6709 6710 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 6711 6712 } 6713 } 6714 6715 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 6716 { 6717 intel_dp->panel_power_off_time = ktime_get_boottime(); 6718 intel_dp->last_power_on = jiffies; 6719 intel_dp->last_backlight_off = jiffies; 6720 } 6721 6722 static void 6723 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 6724 { 6725 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6726 u32 pp_on, pp_off, pp_ctl; 6727 struct pps_registers regs; 6728 6729 intel_pps_get_registers(intel_dp, ®s); 6730 6731 pp_ctl = ilk_get_pp_control(intel_dp); 6732 6733 /* Ensure PPS is unlocked */ 6734 if (!HAS_DDI(dev_priv)) 6735 I915_WRITE(regs.pp_ctrl, pp_ctl); 6736 6737 pp_on = I915_READ(regs.pp_on); 6738 pp_off = I915_READ(regs.pp_off); 6739 6740 /* Pull timing values out of registers */ 6741 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 6742 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 6743 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 6744 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 6745 6746 if (i915_mmio_reg_valid(regs.pp_div)) { 6747 u32 pp_div; 6748 6749 pp_div = I915_READ(regs.pp_div); 6750 6751 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 6752 } else { 6753 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 6754 } 6755 } 6756 6757 static void 6758 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 6759 { 6760 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 6761 state_name, 6762 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 6763 } 6764 6765 static void 6766 intel_pps_verify_state(struct intel_dp *intel_dp) 6767 { 6768 struct edp_power_seq hw; 6769 struct edp_power_seq *sw = &intel_dp->pps_delays; 6770 6771 intel_pps_readout_hw_state(intel_dp, &hw); 6772 6773 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 6774 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 6775 DRM_ERROR("PPS state mismatch\n"); 6776 intel_pps_dump_state("sw", sw); 6777 intel_pps_dump_state("hw", &hw); 6778 } 6779 } 6780 6781 static void 6782 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 6783 { 6784 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6785 struct edp_power_seq cur, vbt, spec, 6786 *final = &intel_dp->pps_delays; 6787 6788 lockdep_assert_held(&dev_priv->pps_mutex); 6789 6790 /* already initialized? */ 6791 if (final->t11_t12 != 0) 6792 return; 6793 6794 intel_pps_readout_hw_state(intel_dp, &cur); 6795 6796 intel_pps_dump_state("cur", &cur); 6797 6798 vbt = dev_priv->vbt.edp.pps; 6799 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 6800 * of 500ms appears to be too short. Ocassionally the panel 6801 * just fails to power back on. Increasing the delay to 800ms 6802 * seems sufficient to avoid this problem. 6803 */ 6804 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 6805 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 6806 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", 6807 vbt.t11_t12); 6808 } 6809 /* T11_T12 delay is special and actually in units of 100ms, but zero 6810 * based in the hw (so we need to add 100 ms). But the sw vbt 6811 * table multiplies it with 1000 to make it in units of 100usec, 6812 * too. */ 6813 vbt.t11_t12 += 100 * 10; 6814 6815 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 6816 * our hw here, which are all in 100usec. */ 6817 spec.t1_t3 = 210 * 10; 6818 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 6819 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 6820 spec.t10 = 500 * 10; 6821 /* This one is special and actually in units of 100ms, but zero 6822 * based in the hw (so we need to add 100 ms). But the sw vbt 6823 * table multiplies it with 1000 to make it in units of 100usec, 6824 * too. */ 6825 spec.t11_t12 = (510 + 100) * 10; 6826 6827 intel_pps_dump_state("vbt", &vbt); 6828 6829 /* Use the max of the register settings and vbt. If both are 6830 * unset, fall back to the spec limits. */ 6831 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 6832 spec.field : \ 6833 max(cur.field, vbt.field)) 6834 assign_final(t1_t3); 6835 assign_final(t8); 6836 assign_final(t9); 6837 assign_final(t10); 6838 assign_final(t11_t12); 6839 #undef assign_final 6840 6841 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 6842 intel_dp->panel_power_up_delay = get_delay(t1_t3); 6843 intel_dp->backlight_on_delay = get_delay(t8); 6844 intel_dp->backlight_off_delay = get_delay(t9); 6845 intel_dp->panel_power_down_delay = get_delay(t10); 6846 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 6847 #undef get_delay 6848 6849 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 6850 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 6851 intel_dp->panel_power_cycle_delay); 6852 6853 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 6854 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 6855 6856 /* 6857 * We override the HW backlight delays to 1 because we do manual waits 6858 * on them. For T8, even BSpec recommends doing it. For T9, if we 6859 * don't do this, we'll end up waiting for the backlight off delay 6860 * twice: once when we do the manual sleep, and once when we disable 6861 * the panel and wait for the PP_STATUS bit to become zero. 6862 */ 6863 final->t8 = 1; 6864 final->t9 = 1; 6865 6866 /* 6867 * HW has only a 100msec granularity for t11_t12 so round it up 6868 * accordingly. 6869 */ 6870 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 6871 } 6872 6873 static void 6874 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 6875 bool force_disable_vdd) 6876 { 6877 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6878 u32 pp_on, pp_off, port_sel = 0; 6879 int div = dev_priv->rawclk_freq / 1000; 6880 struct pps_registers regs; 6881 enum port port = dp_to_dig_port(intel_dp)->base.port; 6882 const struct edp_power_seq *seq = &intel_dp->pps_delays; 6883 6884 lockdep_assert_held(&dev_priv->pps_mutex); 6885 6886 intel_pps_get_registers(intel_dp, ®s); 6887 6888 /* 6889 * On some VLV machines the BIOS can leave the VDD 6890 * enabled even on power sequencers which aren't 6891 * hooked up to any port. This would mess up the 6892 * power domain tracking the first time we pick 6893 * one of these power sequencers for use since 6894 * edp_panel_vdd_on() would notice that the VDD was 6895 * already on and therefore wouldn't grab the power 6896 * domain reference. Disable VDD first to avoid this. 6897 * This also avoids spuriously turning the VDD on as 6898 * soon as the new power sequencer gets initialized. 6899 */ 6900 if (force_disable_vdd) { 6901 u32 pp = ilk_get_pp_control(intel_dp); 6902 6903 WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); 6904 6905 if (pp & EDP_FORCE_VDD) 6906 DRM_DEBUG_KMS("VDD already on, disabling first\n"); 6907 6908 pp &= ~EDP_FORCE_VDD; 6909 6910 I915_WRITE(regs.pp_ctrl, pp); 6911 } 6912 6913 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 6914 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 6915 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 6916 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 6917 6918 /* Haswell doesn't have any port selection bits for the panel 6919 * power sequencer any more. */ 6920 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 6921 port_sel = PANEL_PORT_SELECT_VLV(port); 6922 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 6923 switch (port) { 6924 case PORT_A: 6925 port_sel = PANEL_PORT_SELECT_DPA; 6926 break; 6927 case PORT_C: 6928 port_sel = PANEL_PORT_SELECT_DPC; 6929 break; 6930 case PORT_D: 6931 port_sel = PANEL_PORT_SELECT_DPD; 6932 break; 6933 default: 6934 MISSING_CASE(port); 6935 break; 6936 } 6937 } 6938 6939 pp_on |= port_sel; 6940 6941 I915_WRITE(regs.pp_on, pp_on); 6942 I915_WRITE(regs.pp_off, pp_off); 6943 6944 /* 6945 * Compute the divisor for the pp clock, simply match the Bspec formula. 6946 */ 6947 if (i915_mmio_reg_valid(regs.pp_div)) { 6948 I915_WRITE(regs.pp_div, 6949 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | 6950 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 6951 } else { 6952 u32 pp_ctl; 6953 6954 pp_ctl = I915_READ(regs.pp_ctrl); 6955 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 6956 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 6957 I915_WRITE(regs.pp_ctrl, pp_ctl); 6958 } 6959 6960 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 6961 I915_READ(regs.pp_on), 6962 I915_READ(regs.pp_off), 6963 i915_mmio_reg_valid(regs.pp_div) ? 6964 I915_READ(regs.pp_div) : 6965 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 6966 } 6967 6968 static void intel_dp_pps_init(struct intel_dp *intel_dp) 6969 { 6970 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6971 6972 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 6973 vlv_initial_power_sequencer_setup(intel_dp); 6974 } else { 6975 intel_dp_init_panel_power_sequencer(intel_dp); 6976 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 6977 } 6978 } 6979 6980 /** 6981 * intel_dp_set_drrs_state - program registers for RR switch to take effect 6982 * @dev_priv: i915 device 6983 * @crtc_state: a pointer to the active intel_crtc_state 6984 * @refresh_rate: RR to be programmed 6985 * 6986 * This function gets called when refresh rate (RR) has to be changed from 6987 * one frequency to another. Switches can be between high and low RR 6988 * supported by the panel or to any other RR based on media playback (in 6989 * this case, RR value needs to be passed from user space). 6990 * 6991 * The caller of this function needs to take a lock on dev_priv->drrs. 6992 */ 6993 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 6994 const struct intel_crtc_state *crtc_state, 6995 int refresh_rate) 6996 { 6997 struct intel_dp *intel_dp = dev_priv->drrs.dp; 6998 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 6999 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7000 7001 if (refresh_rate <= 0) { 7002 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n"); 7003 return; 7004 } 7005 7006 if (intel_dp == NULL) { 7007 DRM_DEBUG_KMS("DRRS not supported.\n"); 7008 return; 7009 } 7010 7011 if (!intel_crtc) { 7012 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); 7013 return; 7014 } 7015 7016 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7017 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n"); 7018 return; 7019 } 7020 7021 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == 7022 refresh_rate) 7023 index = DRRS_LOW_RR; 7024 7025 if (index == dev_priv->drrs.refresh_rate_type) { 7026 DRM_DEBUG_KMS( 7027 "DRRS requested for previously set RR...ignoring\n"); 7028 return; 7029 } 7030 7031 if (!crtc_state->hw.active) { 7032 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n"); 7033 return; 7034 } 7035 7036 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7037 switch (index) { 7038 case DRRS_HIGH_RR: 7039 intel_dp_set_m_n(crtc_state, M1_N1); 7040 break; 7041 case DRRS_LOW_RR: 7042 intel_dp_set_m_n(crtc_state, M2_N2); 7043 break; 7044 case DRRS_MAX_RR: 7045 default: 7046 DRM_ERROR("Unsupported refreshrate type\n"); 7047 } 7048 } else if (INTEL_GEN(dev_priv) > 6) { 7049 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7050 u32 val; 7051 7052 val = I915_READ(reg); 7053 if (index > DRRS_HIGH_RR) { 7054 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7055 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7056 else 7057 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7058 } else { 7059 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7060 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7061 else 7062 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7063 } 7064 I915_WRITE(reg, val); 7065 } 7066 7067 dev_priv->drrs.refresh_rate_type = index; 7068 7069 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); 7070 } 7071 7072 /** 7073 * intel_edp_drrs_enable - init drrs struct if supported 7074 * @intel_dp: DP struct 7075 * @crtc_state: A pointer to the active crtc state. 7076 * 7077 * Initializes frontbuffer_bits and drrs.dp 7078 */ 7079 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7080 const struct intel_crtc_state *crtc_state) 7081 { 7082 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7083 7084 if (!crtc_state->has_drrs) { 7085 DRM_DEBUG_KMS("Panel doesn't support DRRS\n"); 7086 return; 7087 } 7088 7089 if (dev_priv->psr.enabled) { 7090 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n"); 7091 return; 7092 } 7093 7094 mutex_lock(&dev_priv->drrs.mutex); 7095 if (dev_priv->drrs.dp) { 7096 DRM_DEBUG_KMS("DRRS already enabled\n"); 7097 goto unlock; 7098 } 7099 7100 dev_priv->drrs.busy_frontbuffer_bits = 0; 7101 7102 dev_priv->drrs.dp = intel_dp; 7103 7104 unlock: 7105 mutex_unlock(&dev_priv->drrs.mutex); 7106 } 7107 7108 /** 7109 * intel_edp_drrs_disable - Disable DRRS 7110 * @intel_dp: DP struct 7111 * @old_crtc_state: Pointer to old crtc_state. 7112 * 7113 */ 7114 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7115 const struct intel_crtc_state *old_crtc_state) 7116 { 7117 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7118 7119 if (!old_crtc_state->has_drrs) 7120 return; 7121 7122 mutex_lock(&dev_priv->drrs.mutex); 7123 if (!dev_priv->drrs.dp) { 7124 mutex_unlock(&dev_priv->drrs.mutex); 7125 return; 7126 } 7127 7128 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7129 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7130 intel_dp->attached_connector->panel.fixed_mode->vrefresh); 7131 7132 dev_priv->drrs.dp = NULL; 7133 mutex_unlock(&dev_priv->drrs.mutex); 7134 7135 cancel_delayed_work_sync(&dev_priv->drrs.work); 7136 } 7137 7138 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7139 { 7140 struct drm_i915_private *dev_priv = 7141 container_of(work, typeof(*dev_priv), drrs.work.work); 7142 struct intel_dp *intel_dp; 7143 7144 mutex_lock(&dev_priv->drrs.mutex); 7145 7146 intel_dp = dev_priv->drrs.dp; 7147 7148 if (!intel_dp) 7149 goto unlock; 7150 7151 /* 7152 * The delayed work can race with an invalidate hence we need to 7153 * recheck. 7154 */ 7155 7156 if (dev_priv->drrs.busy_frontbuffer_bits) 7157 goto unlock; 7158 7159 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7160 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7161 7162 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7163 intel_dp->attached_connector->panel.downclock_mode->vrefresh); 7164 } 7165 7166 unlock: 7167 mutex_unlock(&dev_priv->drrs.mutex); 7168 } 7169 7170 /** 7171 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7172 * @dev_priv: i915 device 7173 * @frontbuffer_bits: frontbuffer plane tracking bits 7174 * 7175 * This function gets called everytime rendering on the given planes start. 7176 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7177 * 7178 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7179 */ 7180 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7181 unsigned int frontbuffer_bits) 7182 { 7183 struct drm_crtc *crtc; 7184 enum pipe pipe; 7185 7186 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7187 return; 7188 7189 cancel_delayed_work(&dev_priv->drrs.work); 7190 7191 mutex_lock(&dev_priv->drrs.mutex); 7192 if (!dev_priv->drrs.dp) { 7193 mutex_unlock(&dev_priv->drrs.mutex); 7194 return; 7195 } 7196 7197 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7198 pipe = to_intel_crtc(crtc)->pipe; 7199 7200 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7201 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7202 7203 /* invalidate means busy screen hence upclock */ 7204 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7205 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7206 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7207 7208 mutex_unlock(&dev_priv->drrs.mutex); 7209 } 7210 7211 /** 7212 * intel_edp_drrs_flush - Restart Idleness DRRS 7213 * @dev_priv: i915 device 7214 * @frontbuffer_bits: frontbuffer plane tracking bits 7215 * 7216 * This function gets called every time rendering on the given planes has 7217 * completed or flip on a crtc is completed. So DRRS should be upclocked 7218 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7219 * if no other planes are dirty. 7220 * 7221 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7222 */ 7223 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7224 unsigned int frontbuffer_bits) 7225 { 7226 struct drm_crtc *crtc; 7227 enum pipe pipe; 7228 7229 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7230 return; 7231 7232 cancel_delayed_work(&dev_priv->drrs.work); 7233 7234 mutex_lock(&dev_priv->drrs.mutex); 7235 if (!dev_priv->drrs.dp) { 7236 mutex_unlock(&dev_priv->drrs.mutex); 7237 return; 7238 } 7239 7240 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7241 pipe = to_intel_crtc(crtc)->pipe; 7242 7243 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7244 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7245 7246 /* flush means busy screen hence upclock */ 7247 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7248 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7249 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7250 7251 /* 7252 * flush also means no more activity hence schedule downclock, if all 7253 * other fbs are quiescent too 7254 */ 7255 if (!dev_priv->drrs.busy_frontbuffer_bits) 7256 schedule_delayed_work(&dev_priv->drrs.work, 7257 msecs_to_jiffies(1000)); 7258 mutex_unlock(&dev_priv->drrs.mutex); 7259 } 7260 7261 /** 7262 * DOC: Display Refresh Rate Switching (DRRS) 7263 * 7264 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7265 * which enables swtching between low and high refresh rates, 7266 * dynamically, based on the usage scenario. This feature is applicable 7267 * for internal panels. 7268 * 7269 * Indication that the panel supports DRRS is given by the panel EDID, which 7270 * would list multiple refresh rates for one resolution. 7271 * 7272 * DRRS is of 2 types - static and seamless. 7273 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7274 * (may appear as a blink on screen) and is used in dock-undock scenario. 7275 * Seamless DRRS involves changing RR without any visual effect to the user 7276 * and can be used during normal system usage. This is done by programming 7277 * certain registers. 7278 * 7279 * Support for static/seamless DRRS may be indicated in the VBT based on 7280 * inputs from the panel spec. 7281 * 7282 * DRRS saves power by switching to low RR based on usage scenarios. 7283 * 7284 * The implementation is based on frontbuffer tracking implementation. When 7285 * there is a disturbance on the screen triggered by user activity or a periodic 7286 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7287 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7288 * made. 7289 * 7290 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7291 * and intel_edp_drrs_flush() are called. 7292 * 7293 * DRRS can be further extended to support other internal panels and also 7294 * the scenario of video playback wherein RR is set based on the rate 7295 * requested by userspace. 7296 */ 7297 7298 /** 7299 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7300 * @connector: eDP connector 7301 * @fixed_mode: preferred mode of panel 7302 * 7303 * This function is called only once at driver load to initialize basic 7304 * DRRS stuff. 7305 * 7306 * Returns: 7307 * Downclock mode if panel supports it, else return NULL. 7308 * DRRS support is determined by the presence of downclock mode (apart 7309 * from VBT setting). 7310 */ 7311 static struct drm_display_mode * 7312 intel_dp_drrs_init(struct intel_connector *connector, 7313 struct drm_display_mode *fixed_mode) 7314 { 7315 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7316 struct drm_display_mode *downclock_mode = NULL; 7317 7318 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7319 7320 if (INTEL_GEN(dev_priv) <= 6) { 7321 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n"); 7322 return NULL; 7323 } 7324 7325 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7326 DRM_DEBUG_KMS("VBT doesn't support DRRS\n"); 7327 return NULL; 7328 } 7329 7330 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7331 if (!downclock_mode) { 7332 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); 7333 return NULL; 7334 } 7335 7336 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7337 7338 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7339 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n"); 7340 return downclock_mode; 7341 } 7342 7343 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7344 struct intel_connector *intel_connector) 7345 { 7346 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7347 struct drm_device *dev = &dev_priv->drm; 7348 struct drm_connector *connector = &intel_connector->base; 7349 struct drm_display_mode *fixed_mode = NULL; 7350 struct drm_display_mode *downclock_mode = NULL; 7351 bool has_dpcd; 7352 enum pipe pipe = INVALID_PIPE; 7353 intel_wakeref_t wakeref; 7354 struct edid *edid; 7355 7356 if (!intel_dp_is_edp(intel_dp)) 7357 return true; 7358 7359 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7360 7361 /* 7362 * On IBX/CPT we may get here with LVDS already registered. Since the 7363 * driver uses the only internal power sequencer available for both 7364 * eDP and LVDS bail out early in this case to prevent interfering 7365 * with an already powered-on LVDS power sequencer. 7366 */ 7367 if (intel_get_lvds_encoder(dev_priv)) { 7368 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 7369 DRM_INFO("LVDS was detected, not registering eDP\n"); 7370 7371 return false; 7372 } 7373 7374 with_pps_lock(intel_dp, wakeref) { 7375 intel_dp_init_panel_power_timestamps(intel_dp); 7376 intel_dp_pps_init(intel_dp); 7377 intel_edp_panel_vdd_sanitize(intel_dp); 7378 } 7379 7380 /* Cache DPCD and EDID for edp. */ 7381 has_dpcd = intel_edp_init_dpcd(intel_dp); 7382 7383 if (!has_dpcd) { 7384 /* if this fails, presume the device is a ghost */ 7385 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 7386 goto out_vdd_off; 7387 } 7388 7389 mutex_lock(&dev->mode_config.mutex); 7390 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 7391 if (edid) { 7392 if (drm_add_edid_modes(connector, edid)) { 7393 drm_connector_update_edid_property(connector, 7394 edid); 7395 } else { 7396 kfree(edid); 7397 edid = ERR_PTR(-EINVAL); 7398 } 7399 } else { 7400 edid = ERR_PTR(-ENOENT); 7401 } 7402 intel_connector->edid = edid; 7403 7404 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 7405 if (fixed_mode) 7406 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 7407 7408 /* fallback to VBT if available for eDP */ 7409 if (!fixed_mode) 7410 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 7411 mutex_unlock(&dev->mode_config.mutex); 7412 7413 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7414 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 7415 register_reboot_notifier(&intel_dp->edp_notifier); 7416 7417 /* 7418 * Figure out the current pipe for the initial backlight setup. 7419 * If the current pipe isn't valid, try the PPS pipe, and if that 7420 * fails just assume pipe A. 7421 */ 7422 pipe = vlv_active_pipe(intel_dp); 7423 7424 if (pipe != PIPE_A && pipe != PIPE_B) 7425 pipe = intel_dp->pps_pipe; 7426 7427 if (pipe != PIPE_A && pipe != PIPE_B) 7428 pipe = PIPE_A; 7429 7430 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n", 7431 pipe_name(pipe)); 7432 } 7433 7434 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 7435 intel_connector->panel.backlight.power = intel_edp_backlight_power; 7436 intel_panel_setup_backlight(connector, pipe); 7437 7438 if (fixed_mode) 7439 drm_connector_init_panel_orientation_property( 7440 connector, fixed_mode->hdisplay, fixed_mode->vdisplay); 7441 7442 return true; 7443 7444 out_vdd_off: 7445 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7446 /* 7447 * vdd might still be enabled do to the delayed vdd off. 7448 * Make sure vdd is actually turned off here. 7449 */ 7450 with_pps_lock(intel_dp, wakeref) 7451 edp_panel_vdd_off_sync(intel_dp); 7452 7453 return false; 7454 } 7455 7456 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 7457 { 7458 struct intel_connector *intel_connector; 7459 struct drm_connector *connector; 7460 7461 intel_connector = container_of(work, typeof(*intel_connector), 7462 modeset_retry_work); 7463 connector = &intel_connector->base; 7464 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 7465 connector->name); 7466 7467 /* Grab the locks before changing connector property*/ 7468 mutex_lock(&connector->dev->mode_config.mutex); 7469 /* Set connector link status to BAD and send a Uevent to notify 7470 * userspace to do a modeset. 7471 */ 7472 drm_connector_set_link_status_property(connector, 7473 DRM_MODE_LINK_STATUS_BAD); 7474 mutex_unlock(&connector->dev->mode_config.mutex); 7475 /* Send Hotplug uevent so userspace can reprobe */ 7476 drm_kms_helper_hotplug_event(connector->dev); 7477 } 7478 7479 bool 7480 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 7481 struct intel_connector *intel_connector) 7482 { 7483 struct drm_connector *connector = &intel_connector->base; 7484 struct intel_dp *intel_dp = &intel_dig_port->dp; 7485 struct intel_encoder *intel_encoder = &intel_dig_port->base; 7486 struct drm_device *dev = intel_encoder->base.dev; 7487 struct drm_i915_private *dev_priv = to_i915(dev); 7488 enum port port = intel_encoder->port; 7489 enum phy phy = intel_port_to_phy(dev_priv, port); 7490 int type; 7491 7492 /* Initialize the work for modeset in case of link train failure */ 7493 INIT_WORK(&intel_connector->modeset_retry_work, 7494 intel_dp_modeset_retry_work_fn); 7495 7496 if (WARN(intel_dig_port->max_lanes < 1, 7497 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 7498 intel_dig_port->max_lanes, intel_encoder->base.base.id, 7499 intel_encoder->base.name)) 7500 return false; 7501 7502 intel_dp_set_source_rates(intel_dp); 7503 7504 intel_dp->reset_link_params = true; 7505 intel_dp->pps_pipe = INVALID_PIPE; 7506 intel_dp->active_pipe = INVALID_PIPE; 7507 7508 /* Preserve the current hw state. */ 7509 intel_dp->DP = I915_READ(intel_dp->output_reg); 7510 intel_dp->attached_connector = intel_connector; 7511 7512 if (intel_dp_is_port_edp(dev_priv, port)) { 7513 /* 7514 * Currently we don't support eDP on TypeC ports, although in 7515 * theory it could work on TypeC legacy ports. 7516 */ 7517 WARN_ON(intel_phy_is_tc(dev_priv, phy)); 7518 type = DRM_MODE_CONNECTOR_eDP; 7519 } else { 7520 type = DRM_MODE_CONNECTOR_DisplayPort; 7521 } 7522 7523 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7524 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7525 7526 /* 7527 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 7528 * for DP the encoder type can be set by the caller to 7529 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 7530 */ 7531 if (type == DRM_MODE_CONNECTOR_eDP) 7532 intel_encoder->type = INTEL_OUTPUT_EDP; 7533 7534 /* eDP only on port B and/or C on vlv/chv */ 7535 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7536 intel_dp_is_edp(intel_dp) && 7537 port != PORT_B && port != PORT_C)) 7538 return false; 7539 7540 DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n", 7541 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 7542 intel_encoder->base.base.id, intel_encoder->base.name); 7543 7544 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 7545 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 7546 7547 if (!HAS_GMCH(dev_priv)) 7548 connector->interlace_allowed = true; 7549 connector->doublescan_allowed = 0; 7550 7551 if (INTEL_GEN(dev_priv) >= 11) 7552 connector->ycbcr_420_allowed = true; 7553 7554 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 7555 7556 intel_dp_aux_init(intel_dp); 7557 7558 intel_connector_attach_encoder(intel_connector, intel_encoder); 7559 7560 if (HAS_DDI(dev_priv)) 7561 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 7562 else 7563 intel_connector->get_hw_state = intel_connector_get_hw_state; 7564 7565 /* init MST on ports that can support it */ 7566 intel_dp_mst_encoder_init(intel_dig_port, 7567 intel_connector->base.base.id); 7568 7569 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 7570 intel_dp_aux_fini(intel_dp); 7571 intel_dp_mst_encoder_cleanup(intel_dig_port); 7572 goto fail; 7573 } 7574 7575 intel_dp_add_properties(intel_dp, connector); 7576 7577 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 7578 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 7579 if (ret) 7580 DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); 7581 } 7582 7583 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 7584 * 0xd. Failure to do so will result in spurious interrupts being 7585 * generated on the port when a cable is not attached. 7586 */ 7587 if (IS_G45(dev_priv)) { 7588 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 7589 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 7590 } 7591 7592 return true; 7593 7594 fail: 7595 drm_connector_cleanup(connector); 7596 7597 return false; 7598 } 7599 7600 bool intel_dp_init(struct drm_i915_private *dev_priv, 7601 i915_reg_t output_reg, 7602 enum port port) 7603 { 7604 struct intel_digital_port *intel_dig_port; 7605 struct intel_encoder *intel_encoder; 7606 struct drm_encoder *encoder; 7607 struct intel_connector *intel_connector; 7608 7609 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 7610 if (!intel_dig_port) 7611 return false; 7612 7613 intel_connector = intel_connector_alloc(); 7614 if (!intel_connector) 7615 goto err_connector_alloc; 7616 7617 intel_encoder = &intel_dig_port->base; 7618 encoder = &intel_encoder->base; 7619 7620 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 7621 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 7622 "DP %c", port_name(port))) 7623 goto err_encoder_init; 7624 7625 intel_encoder->hotplug = intel_dp_hotplug; 7626 intel_encoder->compute_config = intel_dp_compute_config; 7627 intel_encoder->get_hw_state = intel_dp_get_hw_state; 7628 intel_encoder->get_config = intel_dp_get_config; 7629 intel_encoder->update_pipe = intel_panel_update_backlight; 7630 intel_encoder->suspend = intel_dp_encoder_suspend; 7631 if (IS_CHERRYVIEW(dev_priv)) { 7632 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 7633 intel_encoder->pre_enable = chv_pre_enable_dp; 7634 intel_encoder->enable = vlv_enable_dp; 7635 intel_encoder->disable = vlv_disable_dp; 7636 intel_encoder->post_disable = chv_post_disable_dp; 7637 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 7638 } else if (IS_VALLEYVIEW(dev_priv)) { 7639 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 7640 intel_encoder->pre_enable = vlv_pre_enable_dp; 7641 intel_encoder->enable = vlv_enable_dp; 7642 intel_encoder->disable = vlv_disable_dp; 7643 intel_encoder->post_disable = vlv_post_disable_dp; 7644 } else { 7645 intel_encoder->pre_enable = g4x_pre_enable_dp; 7646 intel_encoder->enable = g4x_enable_dp; 7647 intel_encoder->disable = g4x_disable_dp; 7648 intel_encoder->post_disable = g4x_post_disable_dp; 7649 } 7650 7651 intel_dig_port->dp.output_reg = output_reg; 7652 intel_dig_port->max_lanes = 4; 7653 7654 intel_encoder->type = INTEL_OUTPUT_DP; 7655 intel_encoder->power_domain = intel_port_to_power_domain(port); 7656 if (IS_CHERRYVIEW(dev_priv)) { 7657 if (port == PORT_D) 7658 intel_encoder->pipe_mask = BIT(PIPE_C); 7659 else 7660 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 7661 } else { 7662 intel_encoder->pipe_mask = ~0; 7663 } 7664 intel_encoder->cloneable = 0; 7665 intel_encoder->port = port; 7666 7667 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 7668 7669 if (port != PORT_A) 7670 intel_infoframe_init(intel_dig_port); 7671 7672 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 7673 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 7674 goto err_init_connector; 7675 7676 return true; 7677 7678 err_init_connector: 7679 drm_encoder_cleanup(encoder); 7680 err_encoder_init: 7681 kfree(intel_connector); 7682 err_connector_alloc: 7683 kfree(intel_dig_port); 7684 return false; 7685 } 7686 7687 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 7688 { 7689 struct intel_encoder *encoder; 7690 7691 for_each_intel_encoder(&dev_priv->drm, encoder) { 7692 struct intel_dp *intel_dp; 7693 7694 if (encoder->type != INTEL_OUTPUT_DDI) 7695 continue; 7696 7697 intel_dp = enc_to_intel_dp(encoder); 7698 7699 if (!intel_dp->can_mst) 7700 continue; 7701 7702 if (intel_dp->is_mst) 7703 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 7704 } 7705 } 7706 7707 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 7708 { 7709 struct intel_encoder *encoder; 7710 7711 for_each_intel_encoder(&dev_priv->drm, encoder) { 7712 struct intel_dp *intel_dp; 7713 int ret; 7714 7715 if (encoder->type != INTEL_OUTPUT_DDI) 7716 continue; 7717 7718 intel_dp = enc_to_intel_dp(encoder); 7719 7720 if (!intel_dp->can_mst) 7721 continue; 7722 7723 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 7724 true); 7725 if (ret) { 7726 intel_dp->is_mst = false; 7727 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 7728 false); 7729 } 7730 } 7731 } 7732