1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_probe_helper.h> 42 43 #include "i915_debugfs.h" 44 #include "i915_drv.h" 45 #include "i915_trace.h" 46 #include "intel_atomic.h" 47 #include "intel_audio.h" 48 #include "intel_connector.h" 49 #include "intel_ddi.h" 50 #include "intel_display_types.h" 51 #include "intel_dp.h" 52 #include "intel_dp_link_training.h" 53 #include "intel_dp_mst.h" 54 #include "intel_dpio_phy.h" 55 #include "intel_fifo_underrun.h" 56 #include "intel_hdcp.h" 57 #include "intel_hdmi.h" 58 #include "intel_hotplug.h" 59 #include "intel_lspcon.h" 60 #include "intel_lvds.h" 61 #include "intel_panel.h" 62 #include "intel_psr.h" 63 #include "intel_sideband.h" 64 #include "intel_tc.h" 65 #include "intel_vdsc.h" 66 67 #define DP_DPRX_ESI_LEN 14 68 69 /* DP DSC throughput values used for slice count calculations KPixels/s */ 70 #define DP_DSC_PEAK_PIXEL_RATE 2720000 71 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 72 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 73 74 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 75 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 76 77 /* Compliance test status bits */ 78 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 79 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 80 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 83 struct dp_link_dpll { 84 int clock; 85 struct dpll dpll; 86 }; 87 88 static const struct dp_link_dpll g4x_dpll[] = { 89 { 162000, 90 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 91 { 270000, 92 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 93 }; 94 95 static const struct dp_link_dpll pch_dpll[] = { 96 { 162000, 97 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 98 { 270000, 99 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 100 }; 101 102 static const struct dp_link_dpll vlv_dpll[] = { 103 { 162000, 104 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 105 { 270000, 106 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 107 }; 108 109 /* 110 * CHV supports eDP 1.4 that have more link rates. 111 * Below only provides the fixed rate but exclude variable rate. 112 */ 113 static const struct dp_link_dpll chv_dpll[] = { 114 /* 115 * CHV requires to program fractional division for m2. 116 * m2 is stored in fixed point format using formula below 117 * (m2_int << 22) | m2_fraction 118 */ 119 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 120 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 121 { 270000, /* m2_int = 27, m2_fraction = 0 */ 122 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 123 }; 124 125 /* Constants for DP DSC configurations */ 126 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 127 128 /* With Single pipe configuration, HW is capable of supporting maximum 129 * of 4 slices per line. 130 */ 131 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 132 133 /** 134 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 135 * @intel_dp: DP struct 136 * 137 * If a CPU or PCH DP output is attached to an eDP panel, this function 138 * will return true, and false otherwise. 139 */ 140 bool intel_dp_is_edp(struct intel_dp *intel_dp) 141 { 142 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 143 144 return dig_port->base.type == INTEL_OUTPUT_EDP; 145 } 146 147 static void intel_dp_link_down(struct intel_encoder *encoder, 148 const struct intel_crtc_state *old_crtc_state); 149 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 150 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 151 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 152 const struct intel_crtc_state *crtc_state); 153 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 154 enum pipe pipe); 155 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 156 157 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 158 { 159 intel_dp->sink_rates[0] = 162000; 160 intel_dp->num_sink_rates = 1; 161 } 162 163 /* update sink rates from dpcd */ 164 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 165 { 166 static const int dp_rates[] = { 167 162000, 270000, 540000, 810000 168 }; 169 int i, max_rate; 170 171 if (drm_dp_has_quirk(&intel_dp->desc, 0, 172 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 173 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 174 static const int quirk_rates[] = { 162000, 270000, 324000 }; 175 176 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 177 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 178 179 return; 180 } 181 182 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 183 184 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 185 if (dp_rates[i] > max_rate) 186 break; 187 intel_dp->sink_rates[i] = dp_rates[i]; 188 } 189 190 intel_dp->num_sink_rates = i; 191 } 192 193 /* Get length of rates array potentially limited by max_rate. */ 194 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 195 { 196 int i; 197 198 /* Limit results by potentially reduced max rate */ 199 for (i = 0; i < len; i++) { 200 if (rates[len - i - 1] <= max_rate) 201 return len - i; 202 } 203 204 return 0; 205 } 206 207 /* Get length of common rates array potentially limited by max_rate. */ 208 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 209 int max_rate) 210 { 211 return intel_dp_rate_limit_len(intel_dp->common_rates, 212 intel_dp->num_common_rates, max_rate); 213 } 214 215 /* Theoretical max between source and sink */ 216 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 217 { 218 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 219 } 220 221 /* Theoretical max between source and sink */ 222 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 223 { 224 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 225 int source_max = dig_port->max_lanes; 226 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 227 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 228 229 return min3(source_max, sink_max, fia_max); 230 } 231 232 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 233 { 234 return intel_dp->max_link_lane_count; 235 } 236 237 int 238 intel_dp_link_required(int pixel_clock, int bpp) 239 { 240 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 241 return DIV_ROUND_UP(pixel_clock * bpp, 8); 242 } 243 244 int 245 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 246 { 247 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 248 * link rate that is generally expressed in Gbps. Since, 8 bits of data 249 * is transmitted every LS_Clk per lane, there is no need to account for 250 * the channel encoding that is done in the PHY layer here. 251 */ 252 253 return max_link_clock * max_lanes; 254 } 255 256 static int cnl_max_source_rate(struct intel_dp *intel_dp) 257 { 258 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 259 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 260 enum port port = dig_port->base.port; 261 262 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 263 264 /* Low voltage SKUs are limited to max of 5.4G */ 265 if (voltage == VOLTAGE_INFO_0_85V) 266 return 540000; 267 268 /* For this SKU 8.1G is supported in all ports */ 269 if (IS_CNL_WITH_PORT_F(dev_priv)) 270 return 810000; 271 272 /* For other SKUs, max rate on ports A and D is 5.4G */ 273 if (port == PORT_A || port == PORT_D) 274 return 540000; 275 276 return 810000; 277 } 278 279 static int icl_max_source_rate(struct intel_dp *intel_dp) 280 { 281 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 282 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 283 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 284 285 if (intel_phy_is_combo(dev_priv, phy) && 286 !IS_ELKHARTLAKE(dev_priv) && 287 !intel_dp_is_edp(intel_dp)) 288 return 540000; 289 290 return 810000; 291 } 292 293 static void 294 intel_dp_set_source_rates(struct intel_dp *intel_dp) 295 { 296 /* The values must be in increasing order */ 297 static const int cnl_rates[] = { 298 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 299 }; 300 static const int bxt_rates[] = { 301 162000, 216000, 243000, 270000, 324000, 432000, 540000 302 }; 303 static const int skl_rates[] = { 304 162000, 216000, 270000, 324000, 432000, 540000 305 }; 306 static const int hsw_rates[] = { 307 162000, 270000, 540000 308 }; 309 static const int g4x_rates[] = { 310 162000, 270000 311 }; 312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 313 struct intel_encoder *encoder = &dig_port->base; 314 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 315 const int *source_rates; 316 int size, max_rate = 0, vbt_max_rate; 317 318 /* This should only be done once */ 319 drm_WARN_ON(&dev_priv->drm, 320 intel_dp->source_rates || intel_dp->num_source_rates); 321 322 if (INTEL_GEN(dev_priv) >= 10) { 323 source_rates = cnl_rates; 324 size = ARRAY_SIZE(cnl_rates); 325 if (IS_GEN(dev_priv, 10)) 326 max_rate = cnl_max_source_rate(intel_dp); 327 else 328 max_rate = icl_max_source_rate(intel_dp); 329 } else if (IS_GEN9_LP(dev_priv)) { 330 source_rates = bxt_rates; 331 size = ARRAY_SIZE(bxt_rates); 332 } else if (IS_GEN9_BC(dev_priv)) { 333 source_rates = skl_rates; 334 size = ARRAY_SIZE(skl_rates); 335 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 336 IS_BROADWELL(dev_priv)) { 337 source_rates = hsw_rates; 338 size = ARRAY_SIZE(hsw_rates); 339 } else { 340 source_rates = g4x_rates; 341 size = ARRAY_SIZE(g4x_rates); 342 } 343 344 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 345 if (max_rate && vbt_max_rate) 346 max_rate = min(max_rate, vbt_max_rate); 347 else if (vbt_max_rate) 348 max_rate = vbt_max_rate; 349 350 if (max_rate) 351 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 352 353 intel_dp->source_rates = source_rates; 354 intel_dp->num_source_rates = size; 355 } 356 357 static int intersect_rates(const int *source_rates, int source_len, 358 const int *sink_rates, int sink_len, 359 int *common_rates) 360 { 361 int i = 0, j = 0, k = 0; 362 363 while (i < source_len && j < sink_len) { 364 if (source_rates[i] == sink_rates[j]) { 365 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 366 return k; 367 common_rates[k] = source_rates[i]; 368 ++k; 369 ++i; 370 ++j; 371 } else if (source_rates[i] < sink_rates[j]) { 372 ++i; 373 } else { 374 ++j; 375 } 376 } 377 return k; 378 } 379 380 /* return index of rate in rates array, or -1 if not found */ 381 static int intel_dp_rate_index(const int *rates, int len, int rate) 382 { 383 int i; 384 385 for (i = 0; i < len; i++) 386 if (rate == rates[i]) 387 return i; 388 389 return -1; 390 } 391 392 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 393 { 394 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 395 396 drm_WARN_ON(&i915->drm, 397 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 398 399 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 400 intel_dp->num_source_rates, 401 intel_dp->sink_rates, 402 intel_dp->num_sink_rates, 403 intel_dp->common_rates); 404 405 /* Paranoia, there should always be something in common. */ 406 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 407 intel_dp->common_rates[0] = 162000; 408 intel_dp->num_common_rates = 1; 409 } 410 } 411 412 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 413 u8 lane_count) 414 { 415 /* 416 * FIXME: we need to synchronize the current link parameters with 417 * hardware readout. Currently fast link training doesn't work on 418 * boot-up. 419 */ 420 if (link_rate == 0 || 421 link_rate > intel_dp->max_link_rate) 422 return false; 423 424 if (lane_count == 0 || 425 lane_count > intel_dp_max_lane_count(intel_dp)) 426 return false; 427 428 return true; 429 } 430 431 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 432 int link_rate, 433 u8 lane_count) 434 { 435 const struct drm_display_mode *fixed_mode = 436 intel_dp->attached_connector->panel.fixed_mode; 437 int mode_rate, max_rate; 438 439 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 440 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 441 if (mode_rate > max_rate) 442 return false; 443 444 return true; 445 } 446 447 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 448 int link_rate, u8 lane_count) 449 { 450 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 451 int index; 452 453 /* 454 * TODO: Enable fallback on MST links once MST link compute can handle 455 * the fallback params. 456 */ 457 if (intel_dp->is_mst) { 458 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 459 return -1; 460 } 461 462 index = intel_dp_rate_index(intel_dp->common_rates, 463 intel_dp->num_common_rates, 464 link_rate); 465 if (index > 0) { 466 if (intel_dp_is_edp(intel_dp) && 467 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 468 intel_dp->common_rates[index - 1], 469 lane_count)) { 470 drm_dbg_kms(&i915->drm, 471 "Retrying Link training for eDP with same parameters\n"); 472 return 0; 473 } 474 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 475 intel_dp->max_link_lane_count = lane_count; 476 } else if (lane_count > 1) { 477 if (intel_dp_is_edp(intel_dp) && 478 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 479 intel_dp_max_common_rate(intel_dp), 480 lane_count >> 1)) { 481 drm_dbg_kms(&i915->drm, 482 "Retrying Link training for eDP with same parameters\n"); 483 return 0; 484 } 485 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 486 intel_dp->max_link_lane_count = lane_count >> 1; 487 } else { 488 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 489 return -1; 490 } 491 492 return 0; 493 } 494 495 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 496 { 497 return div_u64(mul_u32_u32(mode_clock, 1000000U), 498 DP_DSC_FEC_OVERHEAD_FACTOR); 499 } 500 501 static int 502 small_joiner_ram_size_bits(struct drm_i915_private *i915) 503 { 504 if (INTEL_GEN(i915) >= 11) 505 return 7680 * 8; 506 else 507 return 6144 * 8; 508 } 509 510 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 511 u32 link_clock, u32 lane_count, 512 u32 mode_clock, u32 mode_hdisplay) 513 { 514 u32 bits_per_pixel, max_bpp_small_joiner_ram; 515 int i; 516 517 /* 518 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 519 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 520 * for SST -> TimeSlotsPerMTP is 1, 521 * for MST -> TimeSlotsPerMTP has to be calculated 522 */ 523 bits_per_pixel = (link_clock * lane_count * 8) / 524 intel_dp_mode_to_fec_clock(mode_clock); 525 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 526 527 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 528 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 529 mode_hdisplay; 530 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 531 max_bpp_small_joiner_ram); 532 533 /* 534 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 535 * check, output bpp from small joiner RAM check) 536 */ 537 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 538 539 /* Error out if the max bpp is less than smallest allowed valid bpp */ 540 if (bits_per_pixel < valid_dsc_bpp[0]) { 541 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 542 bits_per_pixel, valid_dsc_bpp[0]); 543 return 0; 544 } 545 546 /* Find the nearest match in the array of known BPPs from VESA */ 547 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 548 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 549 break; 550 } 551 bits_per_pixel = valid_dsc_bpp[i]; 552 553 /* 554 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 555 * fractional part is 0 556 */ 557 return bits_per_pixel << 4; 558 } 559 560 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 561 int mode_clock, int mode_hdisplay) 562 { 563 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 564 u8 min_slice_count, i; 565 int max_slice_width; 566 567 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 568 min_slice_count = DIV_ROUND_UP(mode_clock, 569 DP_DSC_MAX_ENC_THROUGHPUT_0); 570 else 571 min_slice_count = DIV_ROUND_UP(mode_clock, 572 DP_DSC_MAX_ENC_THROUGHPUT_1); 573 574 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 575 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 576 drm_dbg_kms(&i915->drm, 577 "Unsupported slice width %d by DP DSC Sink device\n", 578 max_slice_width); 579 return 0; 580 } 581 /* Also take into account max slice width */ 582 min_slice_count = max_t(u8, min_slice_count, 583 DIV_ROUND_UP(mode_hdisplay, 584 max_slice_width)); 585 586 /* Find the closest match to the valid slice count values */ 587 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 588 if (valid_dsc_slicecount[i] > 589 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 590 false)) 591 break; 592 if (min_slice_count <= valid_dsc_slicecount[i]) 593 return valid_dsc_slicecount[i]; 594 } 595 596 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 597 min_slice_count); 598 return 0; 599 } 600 601 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 602 int hdisplay) 603 { 604 /* 605 * Older platforms don't like hdisplay==4096 with DP. 606 * 607 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 608 * and frame counter increment), but we don't get vblank interrupts, 609 * and the pipe underruns immediately. The link also doesn't seem 610 * to get trained properly. 611 * 612 * On CHV the vblank interrupts don't seem to disappear but 613 * otherwise the symptoms are similar. 614 * 615 * TODO: confirm the behaviour on HSW+ 616 */ 617 return hdisplay == 4096 && !HAS_DDI(dev_priv); 618 } 619 620 static enum drm_mode_status 621 intel_dp_mode_valid_downstream(struct intel_connector *connector, 622 const struct drm_display_mode *mode, 623 int target_clock) 624 { 625 struct intel_dp *intel_dp = intel_attached_dp(connector); 626 const struct drm_display_info *info = &connector->base.display_info; 627 int tmds_clock; 628 629 if (intel_dp->dfp.max_dotclock && 630 target_clock > intel_dp->dfp.max_dotclock) 631 return MODE_CLOCK_HIGH; 632 633 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 634 tmds_clock = target_clock; 635 if (drm_mode_is_420_only(info, mode)) 636 tmds_clock /= 2; 637 638 if (intel_dp->dfp.min_tmds_clock && 639 tmds_clock < intel_dp->dfp.min_tmds_clock) 640 return MODE_CLOCK_LOW; 641 if (intel_dp->dfp.max_tmds_clock && 642 tmds_clock > intel_dp->dfp.max_tmds_clock) 643 return MODE_CLOCK_HIGH; 644 645 return MODE_OK; 646 } 647 648 static enum drm_mode_status 649 intel_dp_mode_valid(struct drm_connector *connector, 650 struct drm_display_mode *mode) 651 { 652 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 653 struct intel_connector *intel_connector = to_intel_connector(connector); 654 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 655 struct drm_i915_private *dev_priv = to_i915(connector->dev); 656 int target_clock = mode->clock; 657 int max_rate, mode_rate, max_lanes, max_link_clock; 658 int max_dotclk = dev_priv->max_dotclk_freq; 659 u16 dsc_max_output_bpp = 0; 660 u8 dsc_slice_count = 0; 661 enum drm_mode_status status; 662 663 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 664 return MODE_NO_DBLESCAN; 665 666 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 667 if (mode->hdisplay > fixed_mode->hdisplay) 668 return MODE_PANEL; 669 670 if (mode->vdisplay > fixed_mode->vdisplay) 671 return MODE_PANEL; 672 673 target_clock = fixed_mode->clock; 674 } 675 676 max_link_clock = intel_dp_max_link_rate(intel_dp); 677 max_lanes = intel_dp_max_lane_count(intel_dp); 678 679 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 680 mode_rate = intel_dp_link_required(target_clock, 18); 681 682 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 683 return MODE_H_ILLEGAL; 684 685 /* 686 * Output bpp is stored in 6.4 format so right shift by 4 to get the 687 * integer value since we support only integer values of bpp. 688 */ 689 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 690 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 691 if (intel_dp_is_edp(intel_dp)) { 692 dsc_max_output_bpp = 693 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 694 dsc_slice_count = 695 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 696 true); 697 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 698 dsc_max_output_bpp = 699 intel_dp_dsc_get_output_bpp(dev_priv, 700 max_link_clock, 701 max_lanes, 702 target_clock, 703 mode->hdisplay) >> 4; 704 dsc_slice_count = 705 intel_dp_dsc_get_slice_count(intel_dp, 706 target_clock, 707 mode->hdisplay); 708 } 709 } 710 711 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 712 target_clock > max_dotclk) 713 return MODE_CLOCK_HIGH; 714 715 if (mode->clock < 10000) 716 return MODE_CLOCK_LOW; 717 718 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 719 return MODE_H_ILLEGAL; 720 721 status = intel_dp_mode_valid_downstream(intel_connector, 722 mode, target_clock); 723 if (status != MODE_OK) 724 return status; 725 726 return intel_mode_valid_max_plane_size(dev_priv, mode); 727 } 728 729 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 730 { 731 int i; 732 u32 v = 0; 733 734 if (src_bytes > 4) 735 src_bytes = 4; 736 for (i = 0; i < src_bytes; i++) 737 v |= ((u32)src[i]) << ((3 - i) * 8); 738 return v; 739 } 740 741 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 742 { 743 int i; 744 if (dst_bytes > 4) 745 dst_bytes = 4; 746 for (i = 0; i < dst_bytes; i++) 747 dst[i] = src >> ((3-i) * 8); 748 } 749 750 static void 751 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 752 static void 753 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 754 bool force_disable_vdd); 755 static void 756 intel_dp_pps_init(struct intel_dp *intel_dp); 757 758 static intel_wakeref_t 759 pps_lock(struct intel_dp *intel_dp) 760 { 761 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 762 intel_wakeref_t wakeref; 763 764 /* 765 * See intel_power_sequencer_reset() why we need 766 * a power domain reference here. 767 */ 768 wakeref = intel_display_power_get(dev_priv, 769 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 770 771 mutex_lock(&dev_priv->pps_mutex); 772 773 return wakeref; 774 } 775 776 static intel_wakeref_t 777 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 778 { 779 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 780 781 mutex_unlock(&dev_priv->pps_mutex); 782 intel_display_power_put(dev_priv, 783 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 784 wakeref); 785 return 0; 786 } 787 788 #define with_pps_lock(dp, wf) \ 789 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 790 791 static void 792 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 793 { 794 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 795 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 796 enum pipe pipe = intel_dp->pps_pipe; 797 bool pll_enabled, release_cl_override = false; 798 enum dpio_phy phy = DPIO_PHY(pipe); 799 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 800 u32 DP; 801 802 if (drm_WARN(&dev_priv->drm, 803 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 804 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 805 pipe_name(pipe), dig_port->base.base.base.id, 806 dig_port->base.base.name)) 807 return; 808 809 drm_dbg_kms(&dev_priv->drm, 810 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 811 pipe_name(pipe), dig_port->base.base.base.id, 812 dig_port->base.base.name); 813 814 /* Preserve the BIOS-computed detected bit. This is 815 * supposed to be read-only. 816 */ 817 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 818 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 819 DP |= DP_PORT_WIDTH(1); 820 DP |= DP_LINK_TRAIN_PAT_1; 821 822 if (IS_CHERRYVIEW(dev_priv)) 823 DP |= DP_PIPE_SEL_CHV(pipe); 824 else 825 DP |= DP_PIPE_SEL(pipe); 826 827 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 828 829 /* 830 * The DPLL for the pipe must be enabled for this to work. 831 * So enable temporarily it if it's not already enabled. 832 */ 833 if (!pll_enabled) { 834 release_cl_override = IS_CHERRYVIEW(dev_priv) && 835 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 836 837 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 838 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 839 drm_err(&dev_priv->drm, 840 "Failed to force on pll for pipe %c!\n", 841 pipe_name(pipe)); 842 return; 843 } 844 } 845 846 /* 847 * Similar magic as in intel_dp_enable_port(). 848 * We _must_ do this port enable + disable trick 849 * to make this power sequencer lock onto the port. 850 * Otherwise even VDD force bit won't work. 851 */ 852 intel_de_write(dev_priv, intel_dp->output_reg, DP); 853 intel_de_posting_read(dev_priv, intel_dp->output_reg); 854 855 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 856 intel_de_posting_read(dev_priv, intel_dp->output_reg); 857 858 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 859 intel_de_posting_read(dev_priv, intel_dp->output_reg); 860 861 if (!pll_enabled) { 862 vlv_force_pll_off(dev_priv, pipe); 863 864 if (release_cl_override) 865 chv_phy_powergate_ch(dev_priv, phy, ch, false); 866 } 867 } 868 869 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 870 { 871 struct intel_encoder *encoder; 872 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 873 874 /* 875 * We don't have power sequencer currently. 876 * Pick one that's not used by other ports. 877 */ 878 for_each_intel_dp(&dev_priv->drm, encoder) { 879 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 880 881 if (encoder->type == INTEL_OUTPUT_EDP) { 882 drm_WARN_ON(&dev_priv->drm, 883 intel_dp->active_pipe != INVALID_PIPE && 884 intel_dp->active_pipe != 885 intel_dp->pps_pipe); 886 887 if (intel_dp->pps_pipe != INVALID_PIPE) 888 pipes &= ~(1 << intel_dp->pps_pipe); 889 } else { 890 drm_WARN_ON(&dev_priv->drm, 891 intel_dp->pps_pipe != INVALID_PIPE); 892 893 if (intel_dp->active_pipe != INVALID_PIPE) 894 pipes &= ~(1 << intel_dp->active_pipe); 895 } 896 } 897 898 if (pipes == 0) 899 return INVALID_PIPE; 900 901 return ffs(pipes) - 1; 902 } 903 904 static enum pipe 905 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 906 { 907 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 908 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 909 enum pipe pipe; 910 911 lockdep_assert_held(&dev_priv->pps_mutex); 912 913 /* We should never land here with regular DP ports */ 914 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 915 916 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 917 intel_dp->active_pipe != intel_dp->pps_pipe); 918 919 if (intel_dp->pps_pipe != INVALID_PIPE) 920 return intel_dp->pps_pipe; 921 922 pipe = vlv_find_free_pps(dev_priv); 923 924 /* 925 * Didn't find one. This should not happen since there 926 * are two power sequencers and up to two eDP ports. 927 */ 928 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 929 pipe = PIPE_A; 930 931 vlv_steal_power_sequencer(dev_priv, pipe); 932 intel_dp->pps_pipe = pipe; 933 934 drm_dbg_kms(&dev_priv->drm, 935 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 936 pipe_name(intel_dp->pps_pipe), 937 dig_port->base.base.base.id, 938 dig_port->base.base.name); 939 940 /* init power sequencer on this pipe and port */ 941 intel_dp_init_panel_power_sequencer(intel_dp); 942 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 943 944 /* 945 * Even vdd force doesn't work until we've made 946 * the power sequencer lock in on the port. 947 */ 948 vlv_power_sequencer_kick(intel_dp); 949 950 return intel_dp->pps_pipe; 951 } 952 953 static int 954 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 955 { 956 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 957 int backlight_controller = dev_priv->vbt.backlight.controller; 958 959 lockdep_assert_held(&dev_priv->pps_mutex); 960 961 /* We should never land here with regular DP ports */ 962 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 963 964 if (!intel_dp->pps_reset) 965 return backlight_controller; 966 967 intel_dp->pps_reset = false; 968 969 /* 970 * Only the HW needs to be reprogrammed, the SW state is fixed and 971 * has been setup during connector init. 972 */ 973 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 974 975 return backlight_controller; 976 } 977 978 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 979 enum pipe pipe); 980 981 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 982 enum pipe pipe) 983 { 984 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 985 } 986 987 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 988 enum pipe pipe) 989 { 990 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 991 } 992 993 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 994 enum pipe pipe) 995 { 996 return true; 997 } 998 999 static enum pipe 1000 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 1001 enum port port, 1002 vlv_pipe_check pipe_check) 1003 { 1004 enum pipe pipe; 1005 1006 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 1007 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 1008 PANEL_PORT_SELECT_MASK; 1009 1010 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 1011 continue; 1012 1013 if (!pipe_check(dev_priv, pipe)) 1014 continue; 1015 1016 return pipe; 1017 } 1018 1019 return INVALID_PIPE; 1020 } 1021 1022 static void 1023 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 1024 { 1025 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1026 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1027 enum port port = dig_port->base.port; 1028 1029 lockdep_assert_held(&dev_priv->pps_mutex); 1030 1031 /* try to find a pipe with this port selected */ 1032 /* first pick one where the panel is on */ 1033 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1034 vlv_pipe_has_pp_on); 1035 /* didn't find one? pick one where vdd is on */ 1036 if (intel_dp->pps_pipe == INVALID_PIPE) 1037 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1038 vlv_pipe_has_vdd_on); 1039 /* didn't find one? pick one with just the correct port */ 1040 if (intel_dp->pps_pipe == INVALID_PIPE) 1041 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1042 vlv_pipe_any); 1043 1044 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1045 if (intel_dp->pps_pipe == INVALID_PIPE) { 1046 drm_dbg_kms(&dev_priv->drm, 1047 "no initial power sequencer for [ENCODER:%d:%s]\n", 1048 dig_port->base.base.base.id, 1049 dig_port->base.base.name); 1050 return; 1051 } 1052 1053 drm_dbg_kms(&dev_priv->drm, 1054 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1055 dig_port->base.base.base.id, 1056 dig_port->base.base.name, 1057 pipe_name(intel_dp->pps_pipe)); 1058 1059 intel_dp_init_panel_power_sequencer(intel_dp); 1060 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1061 } 1062 1063 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1064 { 1065 struct intel_encoder *encoder; 1066 1067 if (drm_WARN_ON(&dev_priv->drm, 1068 !(IS_VALLEYVIEW(dev_priv) || 1069 IS_CHERRYVIEW(dev_priv) || 1070 IS_GEN9_LP(dev_priv)))) 1071 return; 1072 1073 /* 1074 * We can't grab pps_mutex here due to deadlock with power_domain 1075 * mutex when power_domain functions are called while holding pps_mutex. 1076 * That also means that in order to use pps_pipe the code needs to 1077 * hold both a power domain reference and pps_mutex, and the power domain 1078 * reference get/put must be done while _not_ holding pps_mutex. 1079 * pps_{lock,unlock}() do these steps in the correct order, so one 1080 * should use them always. 1081 */ 1082 1083 for_each_intel_dp(&dev_priv->drm, encoder) { 1084 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1085 1086 drm_WARN_ON(&dev_priv->drm, 1087 intel_dp->active_pipe != INVALID_PIPE); 1088 1089 if (encoder->type != INTEL_OUTPUT_EDP) 1090 continue; 1091 1092 if (IS_GEN9_LP(dev_priv)) 1093 intel_dp->pps_reset = true; 1094 else 1095 intel_dp->pps_pipe = INVALID_PIPE; 1096 } 1097 } 1098 1099 struct pps_registers { 1100 i915_reg_t pp_ctrl; 1101 i915_reg_t pp_stat; 1102 i915_reg_t pp_on; 1103 i915_reg_t pp_off; 1104 i915_reg_t pp_div; 1105 }; 1106 1107 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1108 struct pps_registers *regs) 1109 { 1110 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1111 int pps_idx = 0; 1112 1113 memset(regs, 0, sizeof(*regs)); 1114 1115 if (IS_GEN9_LP(dev_priv)) 1116 pps_idx = bxt_power_sequencer_idx(intel_dp); 1117 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1118 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1119 1120 regs->pp_ctrl = PP_CONTROL(pps_idx); 1121 regs->pp_stat = PP_STATUS(pps_idx); 1122 regs->pp_on = PP_ON_DELAYS(pps_idx); 1123 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1124 1125 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1126 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1127 regs->pp_div = INVALID_MMIO_REG; 1128 else 1129 regs->pp_div = PP_DIVISOR(pps_idx); 1130 } 1131 1132 static i915_reg_t 1133 _pp_ctrl_reg(struct intel_dp *intel_dp) 1134 { 1135 struct pps_registers regs; 1136 1137 intel_pps_get_registers(intel_dp, ®s); 1138 1139 return regs.pp_ctrl; 1140 } 1141 1142 static i915_reg_t 1143 _pp_stat_reg(struct intel_dp *intel_dp) 1144 { 1145 struct pps_registers regs; 1146 1147 intel_pps_get_registers(intel_dp, ®s); 1148 1149 return regs.pp_stat; 1150 } 1151 1152 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1153 This function only applicable when panel PM state is not to be tracked */ 1154 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1155 void *unused) 1156 { 1157 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1158 edp_notifier); 1159 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1160 intel_wakeref_t wakeref; 1161 1162 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1163 return 0; 1164 1165 with_pps_lock(intel_dp, wakeref) { 1166 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1167 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1168 i915_reg_t pp_ctrl_reg, pp_div_reg; 1169 u32 pp_div; 1170 1171 pp_ctrl_reg = PP_CONTROL(pipe); 1172 pp_div_reg = PP_DIVISOR(pipe); 1173 pp_div = intel_de_read(dev_priv, pp_div_reg); 1174 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1175 1176 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1177 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1178 intel_de_write(dev_priv, pp_ctrl_reg, 1179 PANEL_UNLOCK_REGS); 1180 drm_msleep(intel_dp->panel_power_cycle_delay); 1181 } 1182 } 1183 1184 return 0; 1185 } 1186 1187 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1188 { 1189 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1190 1191 lockdep_assert_held(&dev_priv->pps_mutex); 1192 1193 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1194 intel_dp->pps_pipe == INVALID_PIPE) 1195 return false; 1196 1197 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1198 } 1199 1200 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1201 { 1202 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1203 1204 lockdep_assert_held(&dev_priv->pps_mutex); 1205 1206 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1207 intel_dp->pps_pipe == INVALID_PIPE) 1208 return false; 1209 1210 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1211 } 1212 1213 static void 1214 intel_dp_check_edp(struct intel_dp *intel_dp) 1215 { 1216 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1217 1218 if (!intel_dp_is_edp(intel_dp)) 1219 return; 1220 1221 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1222 drm_WARN(&dev_priv->drm, 1, 1223 "eDP powered off while attempting aux channel communication.\n"); 1224 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1225 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1226 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1227 } 1228 } 1229 1230 static u32 1231 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1232 { 1233 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1234 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1235 const unsigned int timeout_ms = 10; 1236 u32 status; 1237 bool done; 1238 1239 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1240 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1241 msecs_to_jiffies_timeout(timeout_ms)); 1242 1243 /* just trace the final value */ 1244 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1245 1246 if (!done) 1247 drm_err(&i915->drm, 1248 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1249 intel_dp->aux.name, timeout_ms, status); 1250 #undef C 1251 1252 return status; 1253 } 1254 1255 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1256 { 1257 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1258 1259 if (index) 1260 return 0; 1261 1262 /* 1263 * The clock divider is based off the hrawclk, and would like to run at 1264 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1265 */ 1266 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1267 } 1268 1269 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1270 { 1271 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1272 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1273 u32 freq; 1274 1275 if (index) 1276 return 0; 1277 1278 /* 1279 * The clock divider is based off the cdclk or PCH rawclk, and would 1280 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1281 * divide by 2000 and use that 1282 */ 1283 if (dig_port->aux_ch == AUX_CH_A) 1284 freq = dev_priv->cdclk.hw.cdclk; 1285 else 1286 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1287 return DIV_ROUND_CLOSEST(freq, 2000); 1288 } 1289 1290 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1291 { 1292 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1293 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1294 1295 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1296 /* Workaround for non-ULT HSW */ 1297 switch (index) { 1298 case 0: return 63; 1299 case 1: return 72; 1300 default: return 0; 1301 } 1302 } 1303 1304 return ilk_get_aux_clock_divider(intel_dp, index); 1305 } 1306 1307 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1308 { 1309 /* 1310 * SKL doesn't need us to program the AUX clock divider (Hardware will 1311 * derive the clock from CDCLK automatically). We still implement the 1312 * get_aux_clock_divider vfunc to plug-in into the existing code. 1313 */ 1314 return index ? 0 : 1; 1315 } 1316 1317 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1318 int send_bytes, 1319 u32 aux_clock_divider) 1320 { 1321 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1322 struct drm_i915_private *dev_priv = 1323 to_i915(dig_port->base.base.dev); 1324 u32 precharge, timeout; 1325 1326 if (IS_GEN(dev_priv, 6)) 1327 precharge = 3; 1328 else 1329 precharge = 5; 1330 1331 if (IS_BROADWELL(dev_priv)) 1332 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1333 else 1334 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1335 1336 return DP_AUX_CH_CTL_SEND_BUSY | 1337 DP_AUX_CH_CTL_DONE | 1338 DP_AUX_CH_CTL_INTERRUPT | 1339 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1340 timeout | 1341 DP_AUX_CH_CTL_RECEIVE_ERROR | 1342 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1343 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1344 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1345 } 1346 1347 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1348 int send_bytes, 1349 u32 unused) 1350 { 1351 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1352 struct drm_i915_private *i915 = 1353 to_i915(dig_port->base.base.dev); 1354 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1355 u32 ret; 1356 1357 ret = DP_AUX_CH_CTL_SEND_BUSY | 1358 DP_AUX_CH_CTL_DONE | 1359 DP_AUX_CH_CTL_INTERRUPT | 1360 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1361 DP_AUX_CH_CTL_TIME_OUT_MAX | 1362 DP_AUX_CH_CTL_RECEIVE_ERROR | 1363 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1364 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1365 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1366 1367 if (intel_phy_is_tc(i915, phy) && 1368 dig_port->tc_mode == TC_PORT_TBT_ALT) 1369 ret |= DP_AUX_CH_CTL_TBT_IO; 1370 1371 return ret; 1372 } 1373 1374 static int 1375 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1376 const u8 *send, int send_bytes, 1377 u8 *recv, int recv_size, 1378 u32 aux_send_ctl_flags) 1379 { 1380 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1381 struct drm_i915_private *i915 = 1382 to_i915(dig_port->base.base.dev); 1383 struct intel_uncore *uncore = &i915->uncore; 1384 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1385 bool is_tc_port = intel_phy_is_tc(i915, phy); 1386 i915_reg_t ch_ctl, ch_data[5]; 1387 u32 aux_clock_divider; 1388 enum intel_display_power_domain aux_domain; 1389 intel_wakeref_t aux_wakeref; 1390 intel_wakeref_t pps_wakeref; 1391 int i, ret, recv_bytes; 1392 int try, clock = 0; 1393 u32 status; 1394 bool vdd; 1395 1396 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1397 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1398 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1399 1400 if (is_tc_port) 1401 intel_tc_port_lock(dig_port); 1402 1403 aux_domain = intel_aux_power_domain(dig_port); 1404 1405 aux_wakeref = intel_display_power_get(i915, aux_domain); 1406 pps_wakeref = pps_lock(intel_dp); 1407 1408 /* 1409 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1410 * In such cases we want to leave VDD enabled and it's up to upper layers 1411 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1412 * ourselves. 1413 */ 1414 vdd = edp_panel_vdd_on(intel_dp); 1415 1416 /* dp aux is extremely sensitive to irq latency, hence request the 1417 * lowest possible wakeup latency and so prevent the cpu from going into 1418 * deep sleep states. 1419 */ 1420 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 1421 1422 intel_dp_check_edp(intel_dp); 1423 1424 /* Try to wait for any previous AUX channel activity */ 1425 for (try = 0; try < 3; try++) { 1426 status = intel_uncore_read_notrace(uncore, ch_ctl); 1427 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1428 break; 1429 drm_msleep(1); 1430 } 1431 /* just trace the final value */ 1432 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1433 1434 if (try == 3) { 1435 const u32 status = intel_uncore_read(uncore, ch_ctl); 1436 1437 if (status != intel_dp->aux_busy_last_status) { 1438 drm_WARN(&i915->drm, 1, 1439 "%s: not started (status 0x%08x)\n", 1440 intel_dp->aux.name, status); 1441 intel_dp->aux_busy_last_status = status; 1442 } 1443 1444 ret = -EBUSY; 1445 goto out; 1446 } 1447 1448 /* Only 5 data registers! */ 1449 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1450 ret = -E2BIG; 1451 goto out; 1452 } 1453 1454 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1455 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1456 send_bytes, 1457 aux_clock_divider); 1458 1459 send_ctl |= aux_send_ctl_flags; 1460 1461 /* Must try at least 3 times according to DP spec */ 1462 for (try = 0; try < 5; try++) { 1463 /* Load the send data into the aux channel data registers */ 1464 for (i = 0; i < send_bytes; i += 4) 1465 intel_uncore_write(uncore, 1466 ch_data[i >> 2], 1467 intel_dp_pack_aux(send + i, 1468 send_bytes - i)); 1469 1470 /* Send the command and wait for it to complete */ 1471 intel_uncore_write(uncore, ch_ctl, send_ctl); 1472 1473 status = intel_dp_aux_wait_done(intel_dp); 1474 1475 /* Clear done status and any errors */ 1476 intel_uncore_write(uncore, 1477 ch_ctl, 1478 status | 1479 DP_AUX_CH_CTL_DONE | 1480 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1481 DP_AUX_CH_CTL_RECEIVE_ERROR); 1482 1483 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1484 * 400us delay required for errors and timeouts 1485 * Timeout errors from the HW already meet this 1486 * requirement so skip to next iteration 1487 */ 1488 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1489 continue; 1490 1491 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1492 usleep_range(400, 500); 1493 continue; 1494 } 1495 if (status & DP_AUX_CH_CTL_DONE) 1496 goto done; 1497 } 1498 } 1499 1500 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1501 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1502 intel_dp->aux.name, status); 1503 ret = -EBUSY; 1504 goto out; 1505 } 1506 1507 done: 1508 /* Check for timeout or receive error. 1509 * Timeouts occur when the sink is not connected 1510 */ 1511 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1512 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1513 intel_dp->aux.name, status); 1514 ret = -EIO; 1515 goto out; 1516 } 1517 1518 /* Timeouts occur when the device isn't connected, so they're 1519 * "normal" -- don't fill the kernel log with these */ 1520 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1521 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1522 intel_dp->aux.name, status); 1523 ret = -ETIMEDOUT; 1524 goto out; 1525 } 1526 1527 /* Unload any bytes sent back from the other side */ 1528 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1529 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1530 1531 /* 1532 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1533 * We have no idea of what happened so we return -EBUSY so 1534 * drm layer takes care for the necessary retries. 1535 */ 1536 if (recv_bytes == 0 || recv_bytes > 20) { 1537 drm_dbg_kms(&i915->drm, 1538 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1539 intel_dp->aux.name, recv_bytes); 1540 ret = -EBUSY; 1541 goto out; 1542 } 1543 1544 if (recv_bytes > recv_size) 1545 recv_bytes = recv_size; 1546 1547 for (i = 0; i < recv_bytes; i += 4) 1548 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1549 recv + i, recv_bytes - i); 1550 1551 ret = recv_bytes; 1552 out: 1553 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1554 1555 if (vdd) 1556 edp_panel_vdd_off(intel_dp, false); 1557 1558 pps_unlock(intel_dp, pps_wakeref); 1559 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1560 1561 if (is_tc_port) 1562 intel_tc_port_unlock(dig_port); 1563 1564 return ret; 1565 } 1566 1567 #define BARE_ADDRESS_SIZE 3 1568 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1569 1570 static void 1571 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1572 const struct drm_dp_aux_msg *msg) 1573 { 1574 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1575 txbuf[1] = (msg->address >> 8) & 0xff; 1576 txbuf[2] = msg->address & 0xff; 1577 txbuf[3] = msg->size - 1; 1578 } 1579 1580 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 1581 { 1582 /* 1583 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 1584 * select bit to inform the hardware to send the Aksv after our header 1585 * since we can't access that data from software. 1586 */ 1587 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 1588 msg->address == DP_AUX_HDCP_AKSV) 1589 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 1590 1591 return 0; 1592 } 1593 1594 static ssize_t 1595 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1596 { 1597 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1598 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1599 u8 txbuf[20], rxbuf[20]; 1600 size_t txsize, rxsize; 1601 u32 flags = intel_dp_aux_xfer_flags(msg); 1602 int ret; 1603 1604 intel_dp_aux_header(txbuf, msg); 1605 1606 switch (msg->request & ~DP_AUX_I2C_MOT) { 1607 case DP_AUX_NATIVE_WRITE: 1608 case DP_AUX_I2C_WRITE: 1609 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1610 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1611 rxsize = 2; /* 0 or 1 data bytes */ 1612 1613 if (drm_WARN_ON(&i915->drm, txsize > 20)) 1614 return -E2BIG; 1615 1616 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 1617 1618 if (msg->buffer) 1619 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1620 1621 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1622 rxbuf, rxsize, flags); 1623 if (ret > 0) { 1624 msg->reply = rxbuf[0] >> 4; 1625 1626 if (ret > 1) { 1627 /* Number of bytes written in a short write. */ 1628 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1629 } else { 1630 /* Return payload size. */ 1631 ret = msg->size; 1632 } 1633 } 1634 break; 1635 1636 case DP_AUX_NATIVE_READ: 1637 case DP_AUX_I2C_READ: 1638 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1639 rxsize = msg->size + 1; 1640 1641 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 1642 return -E2BIG; 1643 1644 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1645 rxbuf, rxsize, flags); 1646 if (ret > 0) { 1647 msg->reply = rxbuf[0] >> 4; 1648 /* 1649 * Assume happy day, and copy the data. The caller is 1650 * expected to check msg->reply before touching it. 1651 * 1652 * Return payload size. 1653 */ 1654 ret--; 1655 memcpy(msg->buffer, rxbuf + 1, ret); 1656 } 1657 break; 1658 1659 default: 1660 ret = -EINVAL; 1661 break; 1662 } 1663 1664 return ret; 1665 } 1666 1667 1668 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1669 { 1670 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1671 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1672 enum aux_ch aux_ch = dig_port->aux_ch; 1673 1674 switch (aux_ch) { 1675 case AUX_CH_B: 1676 case AUX_CH_C: 1677 case AUX_CH_D: 1678 return DP_AUX_CH_CTL(aux_ch); 1679 default: 1680 MISSING_CASE(aux_ch); 1681 return DP_AUX_CH_CTL(AUX_CH_B); 1682 } 1683 } 1684 1685 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1686 { 1687 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1688 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1689 enum aux_ch aux_ch = dig_port->aux_ch; 1690 1691 switch (aux_ch) { 1692 case AUX_CH_B: 1693 case AUX_CH_C: 1694 case AUX_CH_D: 1695 return DP_AUX_CH_DATA(aux_ch, index); 1696 default: 1697 MISSING_CASE(aux_ch); 1698 return DP_AUX_CH_DATA(AUX_CH_B, index); 1699 } 1700 } 1701 1702 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1703 { 1704 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1706 enum aux_ch aux_ch = dig_port->aux_ch; 1707 1708 switch (aux_ch) { 1709 case AUX_CH_A: 1710 return DP_AUX_CH_CTL(aux_ch); 1711 case AUX_CH_B: 1712 case AUX_CH_C: 1713 case AUX_CH_D: 1714 return PCH_DP_AUX_CH_CTL(aux_ch); 1715 default: 1716 MISSING_CASE(aux_ch); 1717 return DP_AUX_CH_CTL(AUX_CH_A); 1718 } 1719 } 1720 1721 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1722 { 1723 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1724 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1725 enum aux_ch aux_ch = dig_port->aux_ch; 1726 1727 switch (aux_ch) { 1728 case AUX_CH_A: 1729 return DP_AUX_CH_DATA(aux_ch, index); 1730 case AUX_CH_B: 1731 case AUX_CH_C: 1732 case AUX_CH_D: 1733 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1734 default: 1735 MISSING_CASE(aux_ch); 1736 return DP_AUX_CH_DATA(AUX_CH_A, index); 1737 } 1738 } 1739 1740 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1741 { 1742 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1743 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1744 enum aux_ch aux_ch = dig_port->aux_ch; 1745 1746 switch (aux_ch) { 1747 case AUX_CH_A: 1748 case AUX_CH_B: 1749 case AUX_CH_C: 1750 case AUX_CH_D: 1751 case AUX_CH_E: 1752 case AUX_CH_F: 1753 case AUX_CH_G: 1754 return DP_AUX_CH_CTL(aux_ch); 1755 default: 1756 MISSING_CASE(aux_ch); 1757 return DP_AUX_CH_CTL(AUX_CH_A); 1758 } 1759 } 1760 1761 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1762 { 1763 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1764 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1765 enum aux_ch aux_ch = dig_port->aux_ch; 1766 1767 switch (aux_ch) { 1768 case AUX_CH_A: 1769 case AUX_CH_B: 1770 case AUX_CH_C: 1771 case AUX_CH_D: 1772 case AUX_CH_E: 1773 case AUX_CH_F: 1774 case AUX_CH_G: 1775 return DP_AUX_CH_DATA(aux_ch, index); 1776 default: 1777 MISSING_CASE(aux_ch); 1778 return DP_AUX_CH_DATA(AUX_CH_A, index); 1779 } 1780 } 1781 1782 static void 1783 intel_dp_aux_fini(struct intel_dp *intel_dp) 1784 { 1785 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 1786 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 1787 1788 kfree(intel_dp->aux.name); 1789 } 1790 1791 static void 1792 intel_dp_aux_init(struct intel_dp *intel_dp) 1793 { 1794 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1795 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1796 struct intel_encoder *encoder = &dig_port->base; 1797 1798 if (INTEL_GEN(dev_priv) >= 9) { 1799 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1800 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1801 } else if (HAS_PCH_SPLIT(dev_priv)) { 1802 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1803 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1804 } else { 1805 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1806 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1807 } 1808 1809 if (INTEL_GEN(dev_priv) >= 9) 1810 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1811 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1812 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1813 else if (HAS_PCH_SPLIT(dev_priv)) 1814 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1815 else 1816 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1817 1818 if (INTEL_GEN(dev_priv) >= 9) 1819 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1820 else 1821 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1822 1823 drm_dp_aux_init(&intel_dp->aux); 1824 1825 /* Failure to allocate our preferred name is not critical */ 1826 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1827 aux_ch_name(dig_port->aux_ch), 1828 port_name(encoder->port)); 1829 intel_dp->aux.transfer = intel_dp_aux_transfer; 1830 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1831 } 1832 1833 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1834 { 1835 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1836 1837 return max_rate >= 540000; 1838 } 1839 1840 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1841 { 1842 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1843 1844 return max_rate >= 810000; 1845 } 1846 1847 static void 1848 intel_dp_set_clock(struct intel_encoder *encoder, 1849 struct intel_crtc_state *pipe_config) 1850 { 1851 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1852 const struct dp_link_dpll *divisor = NULL; 1853 int i, count = 0; 1854 1855 if (IS_G4X(dev_priv)) { 1856 divisor = g4x_dpll; 1857 count = ARRAY_SIZE(g4x_dpll); 1858 } else if (HAS_PCH_SPLIT(dev_priv)) { 1859 divisor = pch_dpll; 1860 count = ARRAY_SIZE(pch_dpll); 1861 } else if (IS_CHERRYVIEW(dev_priv)) { 1862 divisor = chv_dpll; 1863 count = ARRAY_SIZE(chv_dpll); 1864 } else if (IS_VALLEYVIEW(dev_priv)) { 1865 divisor = vlv_dpll; 1866 count = ARRAY_SIZE(vlv_dpll); 1867 } 1868 1869 if (divisor && count) { 1870 for (i = 0; i < count; i++) { 1871 if (pipe_config->port_clock == divisor[i].clock) { 1872 pipe_config->dpll = divisor[i].dpll; 1873 pipe_config->clock_set = true; 1874 break; 1875 } 1876 } 1877 } 1878 } 1879 1880 static void snprintf_int_array(char *str, size_t len, 1881 const int *array, int nelem) 1882 { 1883 int i; 1884 1885 str[0] = '\0'; 1886 1887 for (i = 0; i < nelem; i++) { 1888 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1889 if (r >= len) 1890 return; 1891 str += r; 1892 len -= r; 1893 } 1894 } 1895 1896 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1897 { 1898 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1899 char str[128]; /* FIXME: too big for stack? */ 1900 1901 if (!drm_debug_enabled(DRM_UT_KMS)) 1902 return; 1903 1904 snprintf_int_array(str, sizeof(str), 1905 intel_dp->source_rates, intel_dp->num_source_rates); 1906 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1907 1908 snprintf_int_array(str, sizeof(str), 1909 intel_dp->sink_rates, intel_dp->num_sink_rates); 1910 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1911 1912 snprintf_int_array(str, sizeof(str), 1913 intel_dp->common_rates, intel_dp->num_common_rates); 1914 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1915 } 1916 1917 int 1918 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1919 { 1920 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1921 int len; 1922 1923 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1924 if (drm_WARN_ON(&i915->drm, len <= 0)) 1925 return 162000; 1926 1927 return intel_dp->common_rates[len - 1]; 1928 } 1929 1930 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1931 { 1932 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1933 int i = intel_dp_rate_index(intel_dp->sink_rates, 1934 intel_dp->num_sink_rates, rate); 1935 1936 if (drm_WARN_ON(&i915->drm, i < 0)) 1937 i = 0; 1938 1939 return i; 1940 } 1941 1942 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1943 u8 *link_bw, u8 *rate_select) 1944 { 1945 /* eDP 1.4 rate select method. */ 1946 if (intel_dp->use_rate_select) { 1947 *link_bw = 0; 1948 *rate_select = 1949 intel_dp_rate_select(intel_dp, port_clock); 1950 } else { 1951 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1952 *rate_select = 0; 1953 } 1954 } 1955 1956 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1957 const struct intel_crtc_state *pipe_config) 1958 { 1959 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1960 1961 /* On TGL, FEC is supported on all Pipes */ 1962 if (INTEL_GEN(dev_priv) >= 12) 1963 return true; 1964 1965 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1966 return true; 1967 1968 return false; 1969 } 1970 1971 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1972 const struct intel_crtc_state *pipe_config) 1973 { 1974 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1975 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1976 } 1977 1978 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1979 const struct intel_crtc_state *crtc_state) 1980 { 1981 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1982 1983 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1984 return false; 1985 1986 return intel_dsc_source_support(encoder, crtc_state) && 1987 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1988 } 1989 1990 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 1991 const struct intel_crtc_state *crtc_state) 1992 { 1993 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1994 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 1995 intel_dp->dfp.ycbcr_444_to_420); 1996 } 1997 1998 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 1999 const struct intel_crtc_state *crtc_state, int bpc) 2000 { 2001 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 2002 2003 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 2004 clock /= 2; 2005 2006 return clock; 2007 } 2008 2009 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 2010 const struct intel_crtc_state *crtc_state, int bpc) 2011 { 2012 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 2013 2014 if (intel_dp->dfp.min_tmds_clock && 2015 tmds_clock < intel_dp->dfp.min_tmds_clock) 2016 return false; 2017 2018 if (intel_dp->dfp.max_tmds_clock && 2019 tmds_clock > intel_dp->dfp.max_tmds_clock) 2020 return false; 2021 2022 return true; 2023 } 2024 2025 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 2026 const struct intel_crtc_state *crtc_state, 2027 int bpc) 2028 { 2029 2030 return intel_hdmi_deep_color_possible(crtc_state, bpc, 2031 intel_dp->has_hdmi_sink, 2032 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 2033 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 2034 } 2035 2036 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 2037 const struct intel_crtc_state *crtc_state) 2038 { 2039 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2040 struct intel_connector *intel_connector = intel_dp->attached_connector; 2041 int bpp, bpc; 2042 2043 bpc = crtc_state->pipe_bpp / 3; 2044 2045 if (intel_dp->dfp.max_bpc) 2046 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 2047 2048 if (intel_dp->dfp.min_tmds_clock) { 2049 for (; bpc >= 10; bpc -= 2) { 2050 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 2051 break; 2052 } 2053 } 2054 2055 bpp = bpc * 3; 2056 if (intel_dp_is_edp(intel_dp)) { 2057 /* Get bpp from vbt only for panels that dont have bpp in edid */ 2058 if (intel_connector->base.display_info.bpc == 0 && 2059 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 2060 drm_dbg_kms(&dev_priv->drm, 2061 "clamping bpp for eDP panel to BIOS-provided %i\n", 2062 dev_priv->vbt.edp.bpp); 2063 bpp = dev_priv->vbt.edp.bpp; 2064 } 2065 } 2066 2067 return bpp; 2068 } 2069 2070 /* Adjust link config limits based on compliance test requests. */ 2071 void 2072 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 2073 struct intel_crtc_state *pipe_config, 2074 struct link_config_limits *limits) 2075 { 2076 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2077 2078 /* For DP Compliance we override the computed bpp for the pipe */ 2079 if (intel_dp->compliance.test_data.bpc != 0) { 2080 int bpp = 3 * intel_dp->compliance.test_data.bpc; 2081 2082 limits->min_bpp = limits->max_bpp = bpp; 2083 pipe_config->dither_force_disable = bpp == 6 * 3; 2084 2085 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 2086 } 2087 2088 /* Use values requested by Compliance Test Request */ 2089 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 2090 int index; 2091 2092 /* Validate the compliance test data since max values 2093 * might have changed due to link train fallback. 2094 */ 2095 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 2096 intel_dp->compliance.test_lane_count)) { 2097 index = intel_dp_rate_index(intel_dp->common_rates, 2098 intel_dp->num_common_rates, 2099 intel_dp->compliance.test_link_rate); 2100 if (index >= 0) 2101 limits->min_clock = limits->max_clock = index; 2102 limits->min_lane_count = limits->max_lane_count = 2103 intel_dp->compliance.test_lane_count; 2104 } 2105 } 2106 } 2107 2108 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 2109 { 2110 /* 2111 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 2112 * format of the number of bytes per pixel will be half the number 2113 * of bytes of RGB pixel. 2114 */ 2115 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2116 bpp /= 2; 2117 2118 return bpp; 2119 } 2120 2121 /* Optimize link config in order: max bpp, min clock, min lanes */ 2122 static int 2123 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2124 struct intel_crtc_state *pipe_config, 2125 const struct link_config_limits *limits) 2126 { 2127 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2128 int bpp, clock, lane_count; 2129 int mode_rate, link_clock, link_avail; 2130 2131 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2132 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2133 2134 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2135 output_bpp); 2136 2137 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2138 for (lane_count = limits->min_lane_count; 2139 lane_count <= limits->max_lane_count; 2140 lane_count <<= 1) { 2141 link_clock = intel_dp->common_rates[clock]; 2142 link_avail = intel_dp_max_data_rate(link_clock, 2143 lane_count); 2144 2145 if (mode_rate <= link_avail) { 2146 pipe_config->lane_count = lane_count; 2147 pipe_config->pipe_bpp = bpp; 2148 pipe_config->port_clock = link_clock; 2149 2150 return 0; 2151 } 2152 } 2153 } 2154 } 2155 2156 return -EINVAL; 2157 } 2158 2159 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2160 { 2161 int i, num_bpc; 2162 u8 dsc_bpc[3] = {0}; 2163 2164 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2165 dsc_bpc); 2166 for (i = 0; i < num_bpc; i++) { 2167 if (dsc_max_bpc >= dsc_bpc[i]) 2168 return dsc_bpc[i] * 3; 2169 } 2170 2171 return 0; 2172 } 2173 2174 #define DSC_SUPPORTED_VERSION_MIN 1 2175 2176 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2177 struct intel_crtc_state *crtc_state) 2178 { 2179 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2180 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2181 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2182 u8 line_buf_depth; 2183 int ret; 2184 2185 ret = intel_dsc_compute_params(encoder, crtc_state); 2186 if (ret) 2187 return ret; 2188 2189 /* 2190 * Slice Height of 8 works for all currently available panels. So start 2191 * with that if pic_height is an integral multiple of 8. Eventually add 2192 * logic to try multiple slice heights. 2193 */ 2194 if (vdsc_cfg->pic_height % 8 == 0) 2195 vdsc_cfg->slice_height = 8; 2196 else if (vdsc_cfg->pic_height % 4 == 0) 2197 vdsc_cfg->slice_height = 4; 2198 else 2199 vdsc_cfg->slice_height = 2; 2200 2201 vdsc_cfg->dsc_version_major = 2202 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2203 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2204 vdsc_cfg->dsc_version_minor = 2205 min(DSC_SUPPORTED_VERSION_MIN, 2206 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2207 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2208 2209 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2210 DP_DSC_RGB; 2211 2212 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2213 if (!line_buf_depth) { 2214 drm_dbg_kms(&i915->drm, 2215 "DSC Sink Line Buffer Depth invalid\n"); 2216 return -EINVAL; 2217 } 2218 2219 if (vdsc_cfg->dsc_version_minor == 2) 2220 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2221 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2222 else 2223 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2224 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2225 2226 vdsc_cfg->block_pred_enable = 2227 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2228 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2229 2230 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2231 } 2232 2233 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2234 struct intel_crtc_state *pipe_config, 2235 struct drm_connector_state *conn_state, 2236 struct link_config_limits *limits) 2237 { 2238 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2239 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2240 const struct drm_display_mode *adjusted_mode = 2241 &pipe_config->hw.adjusted_mode; 2242 u8 dsc_max_bpc; 2243 int pipe_bpp; 2244 int ret; 2245 2246 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2247 intel_dp_supports_fec(intel_dp, pipe_config); 2248 2249 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2250 return -EINVAL; 2251 2252 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2253 if (INTEL_GEN(dev_priv) >= 12) 2254 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2255 else 2256 dsc_max_bpc = min_t(u8, 10, 2257 conn_state->max_requested_bpc); 2258 2259 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2260 2261 /* Min Input BPC for ICL+ is 8 */ 2262 if (pipe_bpp < 8 * 3) { 2263 drm_dbg_kms(&dev_priv->drm, 2264 "No DSC support for less than 8bpc\n"); 2265 return -EINVAL; 2266 } 2267 2268 /* 2269 * For now enable DSC for max bpp, max link rate, max lane count. 2270 * Optimize this later for the minimum possible link rate/lane count 2271 * with DSC enabled for the requested mode. 2272 */ 2273 pipe_config->pipe_bpp = pipe_bpp; 2274 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2275 pipe_config->lane_count = limits->max_lane_count; 2276 2277 if (intel_dp_is_edp(intel_dp)) { 2278 pipe_config->dsc.compressed_bpp = 2279 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2280 pipe_config->pipe_bpp); 2281 pipe_config->dsc.slice_count = 2282 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2283 true); 2284 } else { 2285 u16 dsc_max_output_bpp; 2286 u8 dsc_dp_slice_count; 2287 2288 dsc_max_output_bpp = 2289 intel_dp_dsc_get_output_bpp(dev_priv, 2290 pipe_config->port_clock, 2291 pipe_config->lane_count, 2292 adjusted_mode->crtc_clock, 2293 adjusted_mode->crtc_hdisplay); 2294 dsc_dp_slice_count = 2295 intel_dp_dsc_get_slice_count(intel_dp, 2296 adjusted_mode->crtc_clock, 2297 adjusted_mode->crtc_hdisplay); 2298 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2299 drm_dbg_kms(&dev_priv->drm, 2300 "Compressed BPP/Slice Count not supported\n"); 2301 return -EINVAL; 2302 } 2303 pipe_config->dsc.compressed_bpp = min_t(u16, 2304 dsc_max_output_bpp >> 4, 2305 pipe_config->pipe_bpp); 2306 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2307 } 2308 /* 2309 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2310 * is greater than the maximum Cdclock and if slice count is even 2311 * then we need to use 2 VDSC instances. 2312 */ 2313 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2314 if (pipe_config->dsc.slice_count > 1) { 2315 pipe_config->dsc.dsc_split = true; 2316 } else { 2317 drm_dbg_kms(&dev_priv->drm, 2318 "Cannot split stream to use 2 VDSC instances\n"); 2319 return -EINVAL; 2320 } 2321 } 2322 2323 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2324 if (ret < 0) { 2325 drm_dbg_kms(&dev_priv->drm, 2326 "Cannot compute valid DSC parameters for Input Bpp = %d " 2327 "Compressed BPP = %d\n", 2328 pipe_config->pipe_bpp, 2329 pipe_config->dsc.compressed_bpp); 2330 return ret; 2331 } 2332 2333 pipe_config->dsc.compression_enable = true; 2334 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2335 "Compressed Bpp = %d Slice Count = %d\n", 2336 pipe_config->pipe_bpp, 2337 pipe_config->dsc.compressed_bpp, 2338 pipe_config->dsc.slice_count); 2339 2340 return 0; 2341 } 2342 2343 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2344 { 2345 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2346 return 6 * 3; 2347 else 2348 return 8 * 3; 2349 } 2350 2351 static int 2352 intel_dp_compute_link_config(struct intel_encoder *encoder, 2353 struct intel_crtc_state *pipe_config, 2354 struct drm_connector_state *conn_state) 2355 { 2356 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2357 const struct drm_display_mode *adjusted_mode = 2358 &pipe_config->hw.adjusted_mode; 2359 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2360 struct link_config_limits limits; 2361 int common_len; 2362 int ret; 2363 2364 common_len = intel_dp_common_len_rate_limit(intel_dp, 2365 intel_dp->max_link_rate); 2366 2367 /* No common link rates between source and sink */ 2368 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2369 2370 limits.min_clock = 0; 2371 limits.max_clock = common_len - 1; 2372 2373 limits.min_lane_count = 1; 2374 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2375 2376 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2377 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 2378 2379 if (intel_dp_is_edp(intel_dp)) { 2380 /* 2381 * Use the maximum clock and number of lanes the eDP panel 2382 * advertizes being capable of. The panels are generally 2383 * designed to support only a single clock and lane 2384 * configuration, and typically these values correspond to the 2385 * native resolution of the panel. 2386 */ 2387 limits.min_lane_count = limits.max_lane_count; 2388 limits.min_clock = limits.max_clock; 2389 } 2390 2391 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2392 2393 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2394 "max rate %d max bpp %d pixel clock %iKHz\n", 2395 limits.max_lane_count, 2396 intel_dp->common_rates[limits.max_clock], 2397 limits.max_bpp, adjusted_mode->crtc_clock); 2398 2399 /* 2400 * Optimize for slow and wide. This is the place to add alternative 2401 * optimization policy. 2402 */ 2403 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2404 2405 /* enable compression if the mode doesn't fit available BW */ 2406 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2407 if (ret || intel_dp->force_dsc_en) { 2408 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2409 conn_state, &limits); 2410 if (ret < 0) 2411 return ret; 2412 } 2413 2414 if (pipe_config->dsc.compression_enable) { 2415 drm_dbg_kms(&i915->drm, 2416 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2417 pipe_config->lane_count, pipe_config->port_clock, 2418 pipe_config->pipe_bpp, 2419 pipe_config->dsc.compressed_bpp); 2420 2421 drm_dbg_kms(&i915->drm, 2422 "DP link rate required %i available %i\n", 2423 intel_dp_link_required(adjusted_mode->crtc_clock, 2424 pipe_config->dsc.compressed_bpp), 2425 intel_dp_max_data_rate(pipe_config->port_clock, 2426 pipe_config->lane_count)); 2427 } else { 2428 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2429 pipe_config->lane_count, pipe_config->port_clock, 2430 pipe_config->pipe_bpp); 2431 2432 drm_dbg_kms(&i915->drm, 2433 "DP link rate required %i available %i\n", 2434 intel_dp_link_required(adjusted_mode->crtc_clock, 2435 pipe_config->pipe_bpp), 2436 intel_dp_max_data_rate(pipe_config->port_clock, 2437 pipe_config->lane_count)); 2438 } 2439 return 0; 2440 } 2441 2442 static int 2443 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2444 struct intel_crtc_state *crtc_state, 2445 const struct drm_connector_state *conn_state) 2446 { 2447 struct drm_connector *connector = conn_state->connector; 2448 const struct drm_display_info *info = &connector->display_info; 2449 const struct drm_display_mode *adjusted_mode = 2450 &crtc_state->hw.adjusted_mode; 2451 2452 if (!connector->ycbcr_420_allowed) 2453 return 0; 2454 2455 if (!drm_mode_is_420_only(info, adjusted_mode)) 2456 return 0; 2457 2458 if (intel_dp->dfp.ycbcr_444_to_420) { 2459 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 2460 return 0; 2461 } 2462 2463 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2464 2465 return intel_pch_panel_fitting(crtc_state, conn_state); 2466 } 2467 2468 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2469 const struct drm_connector_state *conn_state) 2470 { 2471 const struct intel_digital_connector_state *intel_conn_state = 2472 to_intel_digital_connector_state(conn_state); 2473 const struct drm_display_mode *adjusted_mode = 2474 &crtc_state->hw.adjusted_mode; 2475 2476 /* 2477 * Our YCbCr output is always limited range. 2478 * crtc_state->limited_color_range only applies to RGB, 2479 * and it must never be set for YCbCr or we risk setting 2480 * some conflicting bits in PIPECONF which will mess up 2481 * the colors on the monitor. 2482 */ 2483 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2484 return false; 2485 2486 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2487 /* 2488 * See: 2489 * CEA-861-E - 5.1 Default Encoding Parameters 2490 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2491 */ 2492 return crtc_state->pipe_bpp != 18 && 2493 drm_default_rgb_quant_range(adjusted_mode) == 2494 HDMI_QUANTIZATION_RANGE_LIMITED; 2495 } else { 2496 return intel_conn_state->broadcast_rgb == 2497 INTEL_BROADCAST_RGB_LIMITED; 2498 } 2499 } 2500 2501 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2502 enum port port) 2503 { 2504 if (IS_G4X(dev_priv)) 2505 return false; 2506 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2507 return false; 2508 2509 return true; 2510 } 2511 2512 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2513 const struct drm_connector_state *conn_state, 2514 struct drm_dp_vsc_sdp *vsc) 2515 { 2516 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2517 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2518 2519 /* 2520 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2521 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2522 * Colorimetry Format indication. 2523 */ 2524 vsc->revision = 0x5; 2525 vsc->length = 0x13; 2526 2527 /* DP 1.4a spec, Table 2-120 */ 2528 switch (crtc_state->output_format) { 2529 case INTEL_OUTPUT_FORMAT_YCBCR444: 2530 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2531 break; 2532 case INTEL_OUTPUT_FORMAT_YCBCR420: 2533 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2534 break; 2535 case INTEL_OUTPUT_FORMAT_RGB: 2536 default: 2537 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2538 } 2539 2540 switch (conn_state->colorspace) { 2541 case DRM_MODE_COLORIMETRY_BT709_YCC: 2542 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2543 break; 2544 case DRM_MODE_COLORIMETRY_XVYCC_601: 2545 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2546 break; 2547 case DRM_MODE_COLORIMETRY_XVYCC_709: 2548 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2549 break; 2550 case DRM_MODE_COLORIMETRY_SYCC_601: 2551 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2552 break; 2553 case DRM_MODE_COLORIMETRY_OPYCC_601: 2554 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2555 break; 2556 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2557 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2558 break; 2559 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2560 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2561 break; 2562 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2563 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2564 break; 2565 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2566 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2567 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2568 break; 2569 default: 2570 /* 2571 * RGB->YCBCR color conversion uses the BT.709 2572 * color space. 2573 */ 2574 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2575 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2576 else 2577 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2578 break; 2579 } 2580 2581 vsc->bpc = crtc_state->pipe_bpp / 3; 2582 2583 /* only RGB pixelformat supports 6 bpc */ 2584 drm_WARN_ON(&dev_priv->drm, 2585 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2586 2587 /* all YCbCr are always limited range */ 2588 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2589 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2590 } 2591 2592 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2593 struct intel_crtc_state *crtc_state, 2594 const struct drm_connector_state *conn_state) 2595 { 2596 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2597 2598 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2599 if (crtc_state->has_psr) 2600 return; 2601 2602 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2603 return; 2604 2605 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2606 vsc->sdp_type = DP_SDP_VSC; 2607 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2608 &crtc_state->infoframes.vsc); 2609 } 2610 2611 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2612 const struct intel_crtc_state *crtc_state, 2613 const struct drm_connector_state *conn_state, 2614 struct drm_dp_vsc_sdp *vsc) 2615 { 2616 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2617 2618 vsc->sdp_type = DP_SDP_VSC; 2619 2620 if (dev_priv->psr.psr2_enabled) { 2621 if (dev_priv->psr.colorimetry_support && 2622 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2623 /* [PSR2, +Colorimetry] */ 2624 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2625 vsc); 2626 } else { 2627 /* 2628 * [PSR2, -Colorimetry] 2629 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2630 * 3D stereo + PSR/PSR2 + Y-coordinate. 2631 */ 2632 vsc->revision = 0x4; 2633 vsc->length = 0xe; 2634 } 2635 } else { 2636 /* 2637 * [PSR1] 2638 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2639 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2640 * higher). 2641 */ 2642 vsc->revision = 0x2; 2643 vsc->length = 0x8; 2644 } 2645 } 2646 2647 static void 2648 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2649 struct intel_crtc_state *crtc_state, 2650 const struct drm_connector_state *conn_state) 2651 { 2652 int ret; 2653 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2654 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2655 2656 if (!conn_state->hdr_output_metadata) 2657 return; 2658 2659 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2660 2661 if (ret) { 2662 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2663 return; 2664 } 2665 2666 crtc_state->infoframes.enable |= 2667 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2668 } 2669 2670 static void 2671 intel_dp_drrs_compute_config(struct intel_dp *intel_dp, 2672 struct intel_crtc_state *pipe_config, 2673 int output_bpp, bool constant_n) 2674 { 2675 struct intel_connector *intel_connector = intel_dp->attached_connector; 2676 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2677 2678 /* 2679 * DRRS and PSR can't be enable together, so giving preference to PSR 2680 * as it allows more power-savings by complete shutting down display, 2681 * so to guarantee this, intel_dp_drrs_compute_config() must be called 2682 * after intel_psr_compute_config(). 2683 */ 2684 if (pipe_config->has_psr) 2685 return; 2686 2687 if (!intel_connector->panel.downclock_mode || 2688 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 2689 return; 2690 2691 pipe_config->has_drrs = true; 2692 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, 2693 intel_connector->panel.downclock_mode->clock, 2694 pipe_config->port_clock, &pipe_config->dp_m2_n2, 2695 constant_n, pipe_config->fec_enable); 2696 } 2697 2698 int 2699 intel_dp_compute_config(struct intel_encoder *encoder, 2700 struct intel_crtc_state *pipe_config, 2701 struct drm_connector_state *conn_state) 2702 { 2703 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2704 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2705 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2706 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2707 enum port port = encoder->port; 2708 struct intel_connector *intel_connector = intel_dp->attached_connector; 2709 struct intel_digital_connector_state *intel_conn_state = 2710 to_intel_digital_connector_state(conn_state); 2711 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2712 DP_DPCD_QUIRK_CONSTANT_N); 2713 int ret = 0, output_bpp; 2714 2715 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2716 pipe_config->has_pch_encoder = true; 2717 2718 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2719 2720 if (lspcon->active) 2721 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2722 else 2723 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, 2724 conn_state); 2725 if (ret) 2726 return ret; 2727 2728 if (!intel_dp_port_has_audio(dev_priv, port)) 2729 pipe_config->has_audio = false; 2730 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2731 pipe_config->has_audio = intel_dp->has_audio; 2732 else 2733 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2734 2735 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2736 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2737 adjusted_mode); 2738 2739 if (HAS_GMCH(dev_priv)) 2740 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2741 else 2742 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2743 if (ret) 2744 return ret; 2745 } 2746 2747 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2748 return -EINVAL; 2749 2750 if (HAS_GMCH(dev_priv) && 2751 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2752 return -EINVAL; 2753 2754 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2755 return -EINVAL; 2756 2757 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2758 return -EINVAL; 2759 2760 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2761 if (ret < 0) 2762 return ret; 2763 2764 pipe_config->limited_color_range = 2765 intel_dp_limited_color_range(pipe_config, conn_state); 2766 2767 if (pipe_config->dsc.compression_enable) 2768 output_bpp = pipe_config->dsc.compressed_bpp; 2769 else 2770 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2771 2772 intel_link_compute_m_n(output_bpp, 2773 pipe_config->lane_count, 2774 adjusted_mode->crtc_clock, 2775 pipe_config->port_clock, 2776 &pipe_config->dp_m_n, 2777 constant_n, pipe_config->fec_enable); 2778 2779 if (!HAS_DDI(dev_priv)) 2780 intel_dp_set_clock(encoder, pipe_config); 2781 2782 intel_psr_compute_config(intel_dp, pipe_config); 2783 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 2784 constant_n); 2785 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2786 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2787 2788 return 0; 2789 } 2790 2791 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2792 int link_rate, u8 lane_count, 2793 bool link_mst) 2794 { 2795 intel_dp->link_trained = false; 2796 intel_dp->link_rate = link_rate; 2797 intel_dp->lane_count = lane_count; 2798 intel_dp->link_mst = link_mst; 2799 } 2800 2801 static void intel_dp_prepare(struct intel_encoder *encoder, 2802 const struct intel_crtc_state *pipe_config) 2803 { 2804 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2805 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2806 enum port port = encoder->port; 2807 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2808 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2809 2810 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2811 pipe_config->lane_count, 2812 intel_crtc_has_type(pipe_config, 2813 INTEL_OUTPUT_DP_MST)); 2814 2815 /* 2816 * There are four kinds of DP registers: 2817 * 2818 * IBX PCH 2819 * SNB CPU 2820 * IVB CPU 2821 * CPT PCH 2822 * 2823 * IBX PCH and CPU are the same for almost everything, 2824 * except that the CPU DP PLL is configured in this 2825 * register 2826 * 2827 * CPT PCH is quite different, having many bits moved 2828 * to the TRANS_DP_CTL register instead. That 2829 * configuration happens (oddly) in ilk_pch_enable 2830 */ 2831 2832 /* Preserve the BIOS-computed detected bit. This is 2833 * supposed to be read-only. 2834 */ 2835 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2836 2837 /* Handle DP bits in common between all three register formats */ 2838 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2839 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2840 2841 /* Split out the IBX/CPU vs CPT settings */ 2842 2843 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2844 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2845 intel_dp->DP |= DP_SYNC_HS_HIGH; 2846 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2847 intel_dp->DP |= DP_SYNC_VS_HIGH; 2848 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2849 2850 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2851 intel_dp->DP |= DP_ENHANCED_FRAMING; 2852 2853 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2854 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2855 u32 trans_dp; 2856 2857 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2858 2859 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2860 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2861 trans_dp |= TRANS_DP_ENH_FRAMING; 2862 else 2863 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2864 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2865 } else { 2866 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2867 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2868 2869 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2870 intel_dp->DP |= DP_SYNC_HS_HIGH; 2871 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2872 intel_dp->DP |= DP_SYNC_VS_HIGH; 2873 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2874 2875 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2876 intel_dp->DP |= DP_ENHANCED_FRAMING; 2877 2878 if (IS_CHERRYVIEW(dev_priv)) 2879 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2880 else 2881 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2882 } 2883 } 2884 2885 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2886 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2887 2888 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2889 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2890 2891 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2892 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2893 2894 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2895 2896 static void wait_panel_status(struct intel_dp *intel_dp, 2897 u32 mask, 2898 u32 value) 2899 { 2900 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2901 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2902 2903 lockdep_assert_held(&dev_priv->pps_mutex); 2904 2905 intel_pps_verify_state(intel_dp); 2906 2907 pp_stat_reg = _pp_stat_reg(intel_dp); 2908 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2909 2910 drm_dbg_kms(&dev_priv->drm, 2911 "mask %08x value %08x status %08x control %08x\n", 2912 mask, value, 2913 intel_de_read(dev_priv, pp_stat_reg), 2914 intel_de_read(dev_priv, pp_ctrl_reg)); 2915 2916 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2917 mask, value, 5000)) 2918 drm_err(&dev_priv->drm, 2919 "Panel status timeout: status %08x control %08x\n", 2920 intel_de_read(dev_priv, pp_stat_reg), 2921 intel_de_read(dev_priv, pp_ctrl_reg)); 2922 2923 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2924 } 2925 2926 static void wait_panel_on(struct intel_dp *intel_dp) 2927 { 2928 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2929 2930 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2931 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2932 } 2933 2934 static void wait_panel_off(struct intel_dp *intel_dp) 2935 { 2936 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2937 2938 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2939 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2940 } 2941 2942 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2943 { 2944 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2945 ktime_t panel_power_on_time; 2946 s64 panel_power_off_duration; 2947 2948 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2949 2950 /* take the difference of currrent time and panel power off time 2951 * and then make panel wait for t11_t12 if needed. */ 2952 panel_power_on_time = ktime_get_boottime(); 2953 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2954 2955 /* When we disable the VDD override bit last we have to do the manual 2956 * wait. */ 2957 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2958 wait_remaining_ms_from_jiffies(jiffies, 2959 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2960 2961 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2962 } 2963 2964 static void wait_backlight_on(struct intel_dp *intel_dp) 2965 { 2966 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2967 intel_dp->backlight_on_delay); 2968 } 2969 2970 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2971 { 2972 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2973 intel_dp->backlight_off_delay); 2974 } 2975 2976 /* Read the current pp_control value, unlocking the register if it 2977 * is locked 2978 */ 2979 2980 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2981 { 2982 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2983 u32 control; 2984 2985 lockdep_assert_held(&dev_priv->pps_mutex); 2986 2987 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2988 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2989 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2990 control &= ~PANEL_UNLOCK_MASK; 2991 control |= PANEL_UNLOCK_REGS; 2992 } 2993 return control; 2994 } 2995 2996 /* 2997 * Must be paired with edp_panel_vdd_off(). 2998 * Must hold pps_mutex around the whole on/off sequence. 2999 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3000 */ 3001 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 3002 { 3003 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3004 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3005 u32 pp; 3006 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3007 bool need_to_disable = !intel_dp->want_panel_vdd; 3008 3009 lockdep_assert_held(&dev_priv->pps_mutex); 3010 3011 if (!intel_dp_is_edp(intel_dp)) 3012 return false; 3013 3014 cancel_delayed_work(&intel_dp->panel_vdd_work); 3015 intel_dp->want_panel_vdd = true; 3016 3017 if (edp_have_panel_vdd(intel_dp)) 3018 return need_to_disable; 3019 3020 intel_display_power_get(dev_priv, 3021 intel_aux_power_domain(dig_port)); 3022 3023 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 3024 dig_port->base.base.base.id, 3025 dig_port->base.base.name); 3026 3027 if (!edp_have_panel_power(intel_dp)) 3028 wait_panel_power_cycle(intel_dp); 3029 3030 pp = ilk_get_pp_control(intel_dp); 3031 pp |= EDP_FORCE_VDD; 3032 3033 pp_stat_reg = _pp_stat_reg(intel_dp); 3034 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3035 3036 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3037 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3038 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3039 intel_de_read(dev_priv, pp_stat_reg), 3040 intel_de_read(dev_priv, pp_ctrl_reg)); 3041 /* 3042 * If the panel wasn't on, delay before accessing aux channel 3043 */ 3044 if (!edp_have_panel_power(intel_dp)) { 3045 drm_dbg_kms(&dev_priv->drm, 3046 "[ENCODER:%d:%s] panel power wasn't enabled\n", 3047 dig_port->base.base.base.id, 3048 dig_port->base.base.name); 3049 drm_msleep(intel_dp->panel_power_up_delay); 3050 } 3051 3052 return need_to_disable; 3053 } 3054 3055 /* 3056 * Must be paired with intel_edp_panel_vdd_off() or 3057 * intel_edp_panel_off(). 3058 * Nested calls to these functions are not allowed since 3059 * we drop the lock. Caller must use some higher level 3060 * locking to prevent nested calls from other threads. 3061 */ 3062 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 3063 { 3064 intel_wakeref_t wakeref; 3065 bool vdd; 3066 3067 if (!intel_dp_is_edp(intel_dp)) 3068 return; 3069 3070 vdd = false; 3071 with_pps_lock(intel_dp, wakeref) 3072 vdd = edp_panel_vdd_on(intel_dp); 3073 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 3074 dp_to_dig_port(intel_dp)->base.base.base.id, 3075 dp_to_dig_port(intel_dp)->base.base.name); 3076 } 3077 3078 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 3079 { 3080 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3081 struct intel_digital_port *dig_port = 3082 dp_to_dig_port(intel_dp); 3083 u32 pp; 3084 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3085 3086 lockdep_assert_held(&dev_priv->pps_mutex); 3087 3088 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 3089 3090 if (!edp_have_panel_vdd(intel_dp)) 3091 return; 3092 3093 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 3094 dig_port->base.base.base.id, 3095 dig_port->base.base.name); 3096 3097 pp = ilk_get_pp_control(intel_dp); 3098 pp &= ~EDP_FORCE_VDD; 3099 3100 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3101 pp_stat_reg = _pp_stat_reg(intel_dp); 3102 3103 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3104 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3105 3106 /* Make sure sequencer is idle before allowing subsequent activity */ 3107 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3108 intel_de_read(dev_priv, pp_stat_reg), 3109 intel_de_read(dev_priv, pp_ctrl_reg)); 3110 3111 if ((pp & PANEL_POWER_ON) == 0) 3112 intel_dp->panel_power_off_time = ktime_get_boottime(); 3113 3114 intel_display_power_put_unchecked(dev_priv, 3115 intel_aux_power_domain(dig_port)); 3116 } 3117 3118 static void edp_panel_vdd_work(struct work_struct *__work) 3119 { 3120 struct intel_dp *intel_dp = 3121 container_of(to_delayed_work(__work), 3122 struct intel_dp, panel_vdd_work); 3123 intel_wakeref_t wakeref; 3124 3125 with_pps_lock(intel_dp, wakeref) { 3126 if (!intel_dp->want_panel_vdd) 3127 edp_panel_vdd_off_sync(intel_dp); 3128 } 3129 } 3130 3131 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3132 { 3133 unsigned long delay; 3134 3135 /* 3136 * Queue the timer to fire a long time from now (relative to the power 3137 * down delay) to keep the panel power up across a sequence of 3138 * operations. 3139 */ 3140 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3141 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3142 } 3143 3144 /* 3145 * Must be paired with edp_panel_vdd_on(). 3146 * Must hold pps_mutex around the whole on/off sequence. 3147 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3148 */ 3149 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3150 { 3151 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3152 3153 lockdep_assert_held(&dev_priv->pps_mutex); 3154 3155 if (!intel_dp_is_edp(intel_dp)) 3156 return; 3157 3158 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3159 dp_to_dig_port(intel_dp)->base.base.base.id, 3160 dp_to_dig_port(intel_dp)->base.base.name); 3161 3162 intel_dp->want_panel_vdd = false; 3163 3164 if (sync) 3165 edp_panel_vdd_off_sync(intel_dp); 3166 else 3167 edp_panel_vdd_schedule_off(intel_dp); 3168 } 3169 3170 static void edp_panel_on(struct intel_dp *intel_dp) 3171 { 3172 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3173 u32 pp; 3174 i915_reg_t pp_ctrl_reg; 3175 3176 lockdep_assert_held(&dev_priv->pps_mutex); 3177 3178 if (!intel_dp_is_edp(intel_dp)) 3179 return; 3180 3181 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3182 dp_to_dig_port(intel_dp)->base.base.base.id, 3183 dp_to_dig_port(intel_dp)->base.base.name); 3184 3185 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3186 "[ENCODER:%d:%s] panel power already on\n", 3187 dp_to_dig_port(intel_dp)->base.base.base.id, 3188 dp_to_dig_port(intel_dp)->base.base.name)) 3189 return; 3190 3191 wait_panel_power_cycle(intel_dp); 3192 3193 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3194 pp = ilk_get_pp_control(intel_dp); 3195 if (IS_GEN(dev_priv, 5)) { 3196 /* ILK workaround: disable reset around power sequence */ 3197 pp &= ~PANEL_POWER_RESET; 3198 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3199 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3200 } 3201 3202 pp |= PANEL_POWER_ON; 3203 if (!IS_GEN(dev_priv, 5)) 3204 pp |= PANEL_POWER_RESET; 3205 3206 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3207 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3208 3209 wait_panel_on(intel_dp); 3210 intel_dp->last_power_on = jiffies; 3211 3212 if (IS_GEN(dev_priv, 5)) { 3213 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3214 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3215 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3216 } 3217 } 3218 3219 void intel_edp_panel_on(struct intel_dp *intel_dp) 3220 { 3221 intel_wakeref_t wakeref; 3222 3223 if (!intel_dp_is_edp(intel_dp)) 3224 return; 3225 3226 with_pps_lock(intel_dp, wakeref) 3227 edp_panel_on(intel_dp); 3228 } 3229 3230 3231 static void edp_panel_off(struct intel_dp *intel_dp) 3232 { 3233 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3234 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3235 u32 pp; 3236 i915_reg_t pp_ctrl_reg; 3237 3238 lockdep_assert_held(&dev_priv->pps_mutex); 3239 3240 if (!intel_dp_is_edp(intel_dp)) 3241 return; 3242 3243 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3244 dig_port->base.base.base.id, dig_port->base.base.name); 3245 3246 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3247 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3248 dig_port->base.base.base.id, dig_port->base.base.name); 3249 3250 pp = ilk_get_pp_control(intel_dp); 3251 /* We need to switch off panel power _and_ force vdd, for otherwise some 3252 * panels get very unhappy and cease to work. */ 3253 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3254 EDP_BLC_ENABLE); 3255 3256 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3257 3258 intel_dp->want_panel_vdd = false; 3259 3260 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3261 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3262 3263 wait_panel_off(intel_dp); 3264 intel_dp->panel_power_off_time = ktime_get_boottime(); 3265 3266 /* We got a reference when we enabled the VDD. */ 3267 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3268 } 3269 3270 void intel_edp_panel_off(struct intel_dp *intel_dp) 3271 { 3272 intel_wakeref_t wakeref; 3273 3274 if (!intel_dp_is_edp(intel_dp)) 3275 return; 3276 3277 with_pps_lock(intel_dp, wakeref) 3278 edp_panel_off(intel_dp); 3279 } 3280 3281 /* Enable backlight in the panel power control. */ 3282 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3283 { 3284 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3285 intel_wakeref_t wakeref; 3286 3287 /* 3288 * If we enable the backlight right away following a panel power 3289 * on, we may see slight flicker as the panel syncs with the eDP 3290 * link. So delay a bit to make sure the image is solid before 3291 * allowing it to appear. 3292 */ 3293 wait_backlight_on(intel_dp); 3294 3295 with_pps_lock(intel_dp, wakeref) { 3296 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3297 u32 pp; 3298 3299 pp = ilk_get_pp_control(intel_dp); 3300 pp |= EDP_BLC_ENABLE; 3301 3302 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3303 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3304 } 3305 } 3306 3307 /* Enable backlight PWM and backlight PP control. */ 3308 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3309 const struct drm_connector_state *conn_state) 3310 { 3311 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3312 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3313 3314 if (!intel_dp_is_edp(intel_dp)) 3315 return; 3316 3317 drm_dbg_kms(&i915->drm, "\n"); 3318 3319 intel_panel_enable_backlight(crtc_state, conn_state); 3320 _intel_edp_backlight_on(intel_dp); 3321 } 3322 3323 /* Disable backlight in the panel power control. */ 3324 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3325 { 3326 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3327 intel_wakeref_t wakeref; 3328 3329 if (!intel_dp_is_edp(intel_dp)) 3330 return; 3331 3332 with_pps_lock(intel_dp, wakeref) { 3333 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3334 u32 pp; 3335 3336 pp = ilk_get_pp_control(intel_dp); 3337 pp &= ~EDP_BLC_ENABLE; 3338 3339 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3340 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3341 } 3342 3343 intel_dp->last_backlight_off = jiffies; 3344 edp_wait_backlight_off(intel_dp); 3345 } 3346 3347 /* Disable backlight PP control and backlight PWM. */ 3348 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3349 { 3350 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3351 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3352 3353 if (!intel_dp_is_edp(intel_dp)) 3354 return; 3355 3356 drm_dbg_kms(&i915->drm, "\n"); 3357 3358 _intel_edp_backlight_off(intel_dp); 3359 intel_panel_disable_backlight(old_conn_state); 3360 } 3361 3362 /* 3363 * Hook for controlling the panel power control backlight through the bl_power 3364 * sysfs attribute. Take care to handle multiple calls. 3365 */ 3366 static void intel_edp_backlight_power(struct intel_connector *connector, 3367 bool enable) 3368 { 3369 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3370 struct intel_dp *intel_dp = intel_attached_dp(connector); 3371 intel_wakeref_t wakeref; 3372 bool is_enabled; 3373 3374 is_enabled = false; 3375 with_pps_lock(intel_dp, wakeref) 3376 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3377 if (is_enabled == enable) 3378 return; 3379 3380 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3381 enable ? "enable" : "disable"); 3382 3383 if (enable) 3384 _intel_edp_backlight_on(intel_dp); 3385 else 3386 _intel_edp_backlight_off(intel_dp); 3387 } 3388 3389 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3390 { 3391 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3392 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3393 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3394 3395 I915_STATE_WARN(cur_state != state, 3396 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3397 dig_port->base.base.base.id, dig_port->base.base.name, 3398 onoff(state), onoff(cur_state)); 3399 } 3400 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3401 3402 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3403 { 3404 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3405 3406 I915_STATE_WARN(cur_state != state, 3407 "eDP PLL state assertion failure (expected %s, current %s)\n", 3408 onoff(state), onoff(cur_state)); 3409 } 3410 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3411 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3412 3413 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3414 const struct intel_crtc_state *pipe_config) 3415 { 3416 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3417 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3418 3419 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3420 assert_dp_port_disabled(intel_dp); 3421 assert_edp_pll_disabled(dev_priv); 3422 3423 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3424 pipe_config->port_clock); 3425 3426 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3427 3428 if (pipe_config->port_clock == 162000) 3429 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3430 else 3431 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3432 3433 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3434 intel_de_posting_read(dev_priv, DP_A); 3435 udelay(500); 3436 3437 /* 3438 * [DevILK] Work around required when enabling DP PLL 3439 * while a pipe is enabled going to FDI: 3440 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3441 * 2. Program DP PLL enable 3442 */ 3443 if (IS_GEN(dev_priv, 5)) 3444 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3445 3446 intel_dp->DP |= DP_PLL_ENABLE; 3447 3448 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3449 intel_de_posting_read(dev_priv, DP_A); 3450 udelay(200); 3451 } 3452 3453 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3454 const struct intel_crtc_state *old_crtc_state) 3455 { 3456 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3458 3459 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3460 assert_dp_port_disabled(intel_dp); 3461 assert_edp_pll_enabled(dev_priv); 3462 3463 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3464 3465 intel_dp->DP &= ~DP_PLL_ENABLE; 3466 3467 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3468 intel_de_posting_read(dev_priv, DP_A); 3469 udelay(200); 3470 } 3471 3472 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3473 { 3474 /* 3475 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3476 * be capable of signalling downstream hpd with a long pulse. 3477 * Whether or not that means D3 is safe to use is not clear, 3478 * but let's assume so until proven otherwise. 3479 * 3480 * FIXME should really check all downstream ports... 3481 */ 3482 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3483 drm_dp_is_branch(intel_dp->dpcd) && 3484 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3485 } 3486 3487 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3488 const struct intel_crtc_state *crtc_state, 3489 bool enable) 3490 { 3491 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3492 int ret; 3493 3494 if (!crtc_state->dsc.compression_enable) 3495 return; 3496 3497 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3498 enable ? DP_DECOMPRESSION_EN : 0); 3499 if (ret < 0) 3500 drm_dbg_kms(&i915->drm, 3501 "Failed to %s sink decompression state\n", 3502 enable ? "enable" : "disable"); 3503 } 3504 3505 /* If the device supports it, try to set the power state appropriately */ 3506 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3507 { 3508 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3509 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3510 int ret, i; 3511 3512 /* Should have a valid DPCD by this point */ 3513 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3514 return; 3515 3516 if (mode != DP_SET_POWER_D0) { 3517 if (downstream_hpd_needs_d0(intel_dp)) 3518 return; 3519 3520 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3521 } else { 3522 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3523 3524 /* 3525 * When turning on, we need to retry for 1ms to give the sink 3526 * time to wake up. 3527 */ 3528 for (i = 0; i < 3; i++) { 3529 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3530 if (ret == 1) 3531 break; 3532 drm_msleep(1); 3533 } 3534 3535 if (ret == 1 && lspcon->active) 3536 lspcon_wait_pcon_mode(lspcon); 3537 } 3538 3539 if (ret != 1) 3540 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 3541 encoder->base.base.id, encoder->base.name, 3542 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3543 } 3544 3545 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3546 enum port port, enum pipe *pipe) 3547 { 3548 enum pipe p; 3549 3550 for_each_pipe(dev_priv, p) { 3551 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3552 3553 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3554 *pipe = p; 3555 return true; 3556 } 3557 } 3558 3559 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3560 port_name(port)); 3561 3562 /* must initialize pipe to something for the asserts */ 3563 *pipe = PIPE_A; 3564 3565 return false; 3566 } 3567 3568 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3569 i915_reg_t dp_reg, enum port port, 3570 enum pipe *pipe) 3571 { 3572 bool ret; 3573 u32 val; 3574 3575 val = intel_de_read(dev_priv, dp_reg); 3576 3577 ret = val & DP_PORT_EN; 3578 3579 /* asserts want to know the pipe even if the port is disabled */ 3580 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3581 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3582 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3583 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3584 else if (IS_CHERRYVIEW(dev_priv)) 3585 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3586 else 3587 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3588 3589 return ret; 3590 } 3591 3592 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3593 enum pipe *pipe) 3594 { 3595 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3596 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3597 intel_wakeref_t wakeref; 3598 bool ret; 3599 3600 wakeref = intel_display_power_get_if_enabled(dev_priv, 3601 encoder->power_domain); 3602 if (!wakeref) 3603 return false; 3604 3605 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3606 encoder->port, pipe); 3607 3608 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3609 3610 return ret; 3611 } 3612 3613 static void intel_dp_get_config(struct intel_encoder *encoder, 3614 struct intel_crtc_state *pipe_config) 3615 { 3616 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3617 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3618 u32 tmp, flags = 0; 3619 enum port port = encoder->port; 3620 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3621 3622 if (encoder->type == INTEL_OUTPUT_EDP) 3623 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3624 else 3625 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3626 3627 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3628 3629 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3630 3631 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3632 u32 trans_dp = intel_de_read(dev_priv, 3633 TRANS_DP_CTL(crtc->pipe)); 3634 3635 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3636 flags |= DRM_MODE_FLAG_PHSYNC; 3637 else 3638 flags |= DRM_MODE_FLAG_NHSYNC; 3639 3640 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3641 flags |= DRM_MODE_FLAG_PVSYNC; 3642 else 3643 flags |= DRM_MODE_FLAG_NVSYNC; 3644 } else { 3645 if (tmp & DP_SYNC_HS_HIGH) 3646 flags |= DRM_MODE_FLAG_PHSYNC; 3647 else 3648 flags |= DRM_MODE_FLAG_NHSYNC; 3649 3650 if (tmp & DP_SYNC_VS_HIGH) 3651 flags |= DRM_MODE_FLAG_PVSYNC; 3652 else 3653 flags |= DRM_MODE_FLAG_NVSYNC; 3654 } 3655 3656 pipe_config->hw.adjusted_mode.flags |= flags; 3657 3658 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3659 pipe_config->limited_color_range = true; 3660 3661 pipe_config->lane_count = 3662 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3663 3664 intel_dp_get_m_n(crtc, pipe_config); 3665 3666 if (port == PORT_A) { 3667 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3668 pipe_config->port_clock = 162000; 3669 else 3670 pipe_config->port_clock = 270000; 3671 } 3672 3673 pipe_config->hw.adjusted_mode.crtc_clock = 3674 intel_dotclock_calculate(pipe_config->port_clock, 3675 &pipe_config->dp_m_n); 3676 3677 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3678 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3679 /* 3680 * This is a big fat ugly hack. 3681 * 3682 * Some machines in UEFI boot mode provide us a VBT that has 18 3683 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3684 * unknown we fail to light up. Yet the same BIOS boots up with 3685 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3686 * max, not what it tells us to use. 3687 * 3688 * Note: This will still be broken if the eDP panel is not lit 3689 * up by the BIOS, and thus we can't get the mode at module 3690 * load. 3691 */ 3692 drm_dbg_kms(&dev_priv->drm, 3693 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3694 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3695 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3696 } 3697 } 3698 3699 static void intel_disable_dp(struct intel_atomic_state *state, 3700 struct intel_encoder *encoder, 3701 const struct intel_crtc_state *old_crtc_state, 3702 const struct drm_connector_state *old_conn_state) 3703 { 3704 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3705 3706 intel_dp->link_trained = false; 3707 3708 if (old_crtc_state->has_audio) 3709 intel_audio_codec_disable(encoder, 3710 old_crtc_state, old_conn_state); 3711 3712 /* Make sure the panel is off before trying to change the mode. But also 3713 * ensure that we have vdd while we switch off the panel. */ 3714 intel_edp_panel_vdd_on(intel_dp); 3715 intel_edp_backlight_off(old_conn_state); 3716 intel_dp_set_power(intel_dp, DP_SET_POWER_D3); 3717 intel_edp_panel_off(intel_dp); 3718 } 3719 3720 static void g4x_disable_dp(struct intel_atomic_state *state, 3721 struct intel_encoder *encoder, 3722 const struct intel_crtc_state *old_crtc_state, 3723 const struct drm_connector_state *old_conn_state) 3724 { 3725 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3726 } 3727 3728 static void vlv_disable_dp(struct intel_atomic_state *state, 3729 struct intel_encoder *encoder, 3730 const struct intel_crtc_state *old_crtc_state, 3731 const struct drm_connector_state *old_conn_state) 3732 { 3733 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3734 } 3735 3736 static void g4x_post_disable_dp(struct intel_atomic_state *state, 3737 struct intel_encoder *encoder, 3738 const struct intel_crtc_state *old_crtc_state, 3739 const struct drm_connector_state *old_conn_state) 3740 { 3741 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3742 enum port port = encoder->port; 3743 3744 /* 3745 * Bspec does not list a specific disable sequence for g4x DP. 3746 * Follow the ilk+ sequence (disable pipe before the port) for 3747 * g4x DP as it does not suffer from underruns like the normal 3748 * g4x modeset sequence (disable pipe after the port). 3749 */ 3750 intel_dp_link_down(encoder, old_crtc_state); 3751 3752 /* Only ilk+ has port A */ 3753 if (port == PORT_A) 3754 ilk_edp_pll_off(intel_dp, old_crtc_state); 3755 } 3756 3757 static void vlv_post_disable_dp(struct intel_atomic_state *state, 3758 struct intel_encoder *encoder, 3759 const struct intel_crtc_state *old_crtc_state, 3760 const struct drm_connector_state *old_conn_state) 3761 { 3762 intel_dp_link_down(encoder, old_crtc_state); 3763 } 3764 3765 static void chv_post_disable_dp(struct intel_atomic_state *state, 3766 struct intel_encoder *encoder, 3767 const struct intel_crtc_state *old_crtc_state, 3768 const struct drm_connector_state *old_conn_state) 3769 { 3770 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3771 3772 intel_dp_link_down(encoder, old_crtc_state); 3773 3774 vlv_dpio_get(dev_priv); 3775 3776 /* Assert data lane reset */ 3777 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3778 3779 vlv_dpio_put(dev_priv); 3780 } 3781 3782 static void 3783 cpt_set_link_train(struct intel_dp *intel_dp, 3784 u8 dp_train_pat) 3785 { 3786 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3787 u32 *DP = &intel_dp->DP; 3788 3789 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3790 3791 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3792 case DP_TRAINING_PATTERN_DISABLE: 3793 *DP |= DP_LINK_TRAIN_OFF_CPT; 3794 break; 3795 case DP_TRAINING_PATTERN_1: 3796 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3797 break; 3798 case DP_TRAINING_PATTERN_2: 3799 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3800 break; 3801 case DP_TRAINING_PATTERN_3: 3802 drm_dbg_kms(&dev_priv->drm, 3803 "TPS3 not supported, using TPS2 instead\n"); 3804 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3805 break; 3806 } 3807 3808 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3809 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3810 } 3811 3812 static void 3813 g4x_set_link_train(struct intel_dp *intel_dp, 3814 u8 dp_train_pat) 3815 { 3816 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3817 u32 *DP = &intel_dp->DP; 3818 3819 *DP &= ~DP_LINK_TRAIN_MASK; 3820 3821 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3822 case DP_TRAINING_PATTERN_DISABLE: 3823 *DP |= DP_LINK_TRAIN_OFF; 3824 break; 3825 case DP_TRAINING_PATTERN_1: 3826 *DP |= DP_LINK_TRAIN_PAT_1; 3827 break; 3828 case DP_TRAINING_PATTERN_2: 3829 *DP |= DP_LINK_TRAIN_PAT_2; 3830 break; 3831 case DP_TRAINING_PATTERN_3: 3832 drm_dbg_kms(&dev_priv->drm, 3833 "TPS3 not supported, using TPS2 instead\n"); 3834 *DP |= DP_LINK_TRAIN_PAT_2; 3835 break; 3836 } 3837 3838 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3839 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3840 } 3841 3842 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3843 const struct intel_crtc_state *old_crtc_state) 3844 { 3845 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3846 3847 /* enable with pattern 1 (as per spec) */ 3848 3849 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3850 3851 /* 3852 * Magic for VLV/CHV. We _must_ first set up the register 3853 * without actually enabling the port, and then do another 3854 * write to enable the port. Otherwise link training will 3855 * fail when the power sequencer is freshly used for this port. 3856 */ 3857 intel_dp->DP |= DP_PORT_EN; 3858 if (old_crtc_state->has_audio) 3859 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3860 3861 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3862 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3863 } 3864 3865 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 3866 const struct intel_crtc_state *crtc_state) 3867 { 3868 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3869 u8 tmp; 3870 3871 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 3872 return; 3873 3874 if (!drm_dp_is_branch(intel_dp->dpcd)) 3875 return; 3876 3877 tmp = intel_dp->has_hdmi_sink ? 3878 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 3879 3880 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3881 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 3882 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", 3883 enableddisabled(intel_dp->has_hdmi_sink)); 3884 3885 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 3886 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 3887 3888 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3889 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 3890 drm_dbg_kms(&i915->drm, 3891 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", 3892 enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); 3893 3894 tmp = 0; 3895 3896 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3897 DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0) 3898 drm_dbg_kms(&i915->drm, 3899 "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n", 3900 enableddisabled(false)); 3901 } 3902 3903 static void intel_enable_dp(struct intel_atomic_state *state, 3904 struct intel_encoder *encoder, 3905 const struct intel_crtc_state *pipe_config, 3906 const struct drm_connector_state *conn_state) 3907 { 3908 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3909 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3910 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3911 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3912 enum pipe pipe = crtc->pipe; 3913 intel_wakeref_t wakeref; 3914 3915 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3916 return; 3917 3918 with_pps_lock(intel_dp, wakeref) { 3919 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3920 vlv_init_panel_power_sequencer(encoder, pipe_config); 3921 3922 intel_dp_enable_port(intel_dp, pipe_config); 3923 3924 edp_panel_vdd_on(intel_dp); 3925 edp_panel_on(intel_dp); 3926 edp_panel_vdd_off(intel_dp, true); 3927 } 3928 3929 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3930 unsigned int lane_mask = 0x0; 3931 3932 if (IS_CHERRYVIEW(dev_priv)) 3933 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3934 3935 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3936 lane_mask); 3937 } 3938 3939 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 3940 intel_dp_configure_protocol_converter(intel_dp, pipe_config); 3941 intel_dp_start_link_train(intel_dp); 3942 intel_dp_stop_link_train(intel_dp); 3943 3944 if (pipe_config->has_audio) { 3945 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3946 pipe_name(pipe)); 3947 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3948 } 3949 } 3950 3951 static void g4x_enable_dp(struct intel_atomic_state *state, 3952 struct intel_encoder *encoder, 3953 const struct intel_crtc_state *pipe_config, 3954 const struct drm_connector_state *conn_state) 3955 { 3956 intel_enable_dp(state, encoder, pipe_config, conn_state); 3957 intel_edp_backlight_on(pipe_config, conn_state); 3958 } 3959 3960 static void vlv_enable_dp(struct intel_atomic_state *state, 3961 struct intel_encoder *encoder, 3962 const struct intel_crtc_state *pipe_config, 3963 const struct drm_connector_state *conn_state) 3964 { 3965 intel_edp_backlight_on(pipe_config, conn_state); 3966 } 3967 3968 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 3969 struct intel_encoder *encoder, 3970 const struct intel_crtc_state *pipe_config, 3971 const struct drm_connector_state *conn_state) 3972 { 3973 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3974 enum port port = encoder->port; 3975 3976 intel_dp_prepare(encoder, pipe_config); 3977 3978 /* Only ilk+ has port A */ 3979 if (port == PORT_A) 3980 ilk_edp_pll_on(intel_dp, pipe_config); 3981 } 3982 3983 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3984 { 3985 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3986 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3987 enum pipe pipe = intel_dp->pps_pipe; 3988 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3989 3990 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3991 3992 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3993 return; 3994 3995 edp_panel_vdd_off_sync(intel_dp); 3996 3997 /* 3998 * VLV seems to get confused when multiple power sequencers 3999 * have the same port selected (even if only one has power/vdd 4000 * enabled). The failure manifests as vlv_wait_port_ready() failing 4001 * CHV on the other hand doesn't seem to mind having the same port 4002 * selected in multiple power sequencers, but let's clear the 4003 * port select always when logically disconnecting a power sequencer 4004 * from a port. 4005 */ 4006 drm_dbg_kms(&dev_priv->drm, 4007 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 4008 pipe_name(pipe), dig_port->base.base.base.id, 4009 dig_port->base.base.name); 4010 intel_de_write(dev_priv, pp_on_reg, 0); 4011 intel_de_posting_read(dev_priv, pp_on_reg); 4012 4013 intel_dp->pps_pipe = INVALID_PIPE; 4014 } 4015 4016 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 4017 enum pipe pipe) 4018 { 4019 struct intel_encoder *encoder; 4020 4021 lockdep_assert_held(&dev_priv->pps_mutex); 4022 4023 for_each_intel_dp(&dev_priv->drm, encoder) { 4024 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4025 4026 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 4027 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 4028 pipe_name(pipe), encoder->base.base.id, 4029 encoder->base.name); 4030 4031 if (intel_dp->pps_pipe != pipe) 4032 continue; 4033 4034 drm_dbg_kms(&dev_priv->drm, 4035 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 4036 pipe_name(pipe), encoder->base.base.id, 4037 encoder->base.name); 4038 4039 /* make sure vdd is off before we steal it */ 4040 vlv_detach_power_sequencer(intel_dp); 4041 } 4042 } 4043 4044 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 4045 const struct intel_crtc_state *crtc_state) 4046 { 4047 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4048 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4049 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4050 4051 lockdep_assert_held(&dev_priv->pps_mutex); 4052 4053 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 4054 4055 if (intel_dp->pps_pipe != INVALID_PIPE && 4056 intel_dp->pps_pipe != crtc->pipe) { 4057 /* 4058 * If another power sequencer was being used on this 4059 * port previously make sure to turn off vdd there while 4060 * we still have control of it. 4061 */ 4062 vlv_detach_power_sequencer(intel_dp); 4063 } 4064 4065 /* 4066 * We may be stealing the power 4067 * sequencer from another port. 4068 */ 4069 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 4070 4071 intel_dp->active_pipe = crtc->pipe; 4072 4073 if (!intel_dp_is_edp(intel_dp)) 4074 return; 4075 4076 /* now it's all ours */ 4077 intel_dp->pps_pipe = crtc->pipe; 4078 4079 drm_dbg_kms(&dev_priv->drm, 4080 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 4081 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 4082 encoder->base.name); 4083 4084 /* init power sequencer on this pipe and port */ 4085 intel_dp_init_panel_power_sequencer(intel_dp); 4086 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 4087 } 4088 4089 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 4090 struct intel_encoder *encoder, 4091 const struct intel_crtc_state *pipe_config, 4092 const struct drm_connector_state *conn_state) 4093 { 4094 vlv_phy_pre_encoder_enable(encoder, pipe_config); 4095 4096 intel_enable_dp(state, encoder, pipe_config, conn_state); 4097 } 4098 4099 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 4100 struct intel_encoder *encoder, 4101 const struct intel_crtc_state *pipe_config, 4102 const struct drm_connector_state *conn_state) 4103 { 4104 intel_dp_prepare(encoder, pipe_config); 4105 4106 vlv_phy_pre_pll_enable(encoder, pipe_config); 4107 } 4108 4109 static void chv_pre_enable_dp(struct intel_atomic_state *state, 4110 struct intel_encoder *encoder, 4111 const struct intel_crtc_state *pipe_config, 4112 const struct drm_connector_state *conn_state) 4113 { 4114 chv_phy_pre_encoder_enable(encoder, pipe_config); 4115 4116 intel_enable_dp(state, encoder, pipe_config, conn_state); 4117 4118 /* Second common lane will stay alive on its own now */ 4119 chv_phy_release_cl2_override(encoder); 4120 } 4121 4122 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 4123 struct intel_encoder *encoder, 4124 const struct intel_crtc_state *pipe_config, 4125 const struct drm_connector_state *conn_state) 4126 { 4127 intel_dp_prepare(encoder, pipe_config); 4128 4129 chv_phy_pre_pll_enable(encoder, pipe_config); 4130 } 4131 4132 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 4133 struct intel_encoder *encoder, 4134 const struct intel_crtc_state *old_crtc_state, 4135 const struct drm_connector_state *old_conn_state) 4136 { 4137 chv_phy_post_pll_disable(encoder, old_crtc_state); 4138 } 4139 4140 /* 4141 * Fetch AUX CH registers 0x202 - 0x207 which contain 4142 * link status information 4143 */ 4144 bool 4145 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status) 4146 { 4147 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 4148 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 4149 } 4150 4151 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp) 4152 { 4153 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4154 } 4155 4156 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp) 4157 { 4158 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4159 } 4160 4161 static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp) 4162 { 4163 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4164 } 4165 4166 static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp) 4167 { 4168 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4169 } 4170 4171 static void vlv_set_signal_levels(struct intel_dp *intel_dp) 4172 { 4173 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4174 unsigned long demph_reg_value, preemph_reg_value, 4175 uniqtranscale_reg_value; 4176 u8 train_set = intel_dp->train_set[0]; 4177 4178 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4179 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4180 preemph_reg_value = 0x0004000; 4181 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4182 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4183 demph_reg_value = 0x2B405555; 4184 uniqtranscale_reg_value = 0x552AB83A; 4185 break; 4186 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4187 demph_reg_value = 0x2B404040; 4188 uniqtranscale_reg_value = 0x5548B83A; 4189 break; 4190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4191 demph_reg_value = 0x2B245555; 4192 uniqtranscale_reg_value = 0x5560B83A; 4193 break; 4194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4195 demph_reg_value = 0x2B405555; 4196 uniqtranscale_reg_value = 0x5598DA3A; 4197 break; 4198 default: 4199 return; 4200 } 4201 break; 4202 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4203 preemph_reg_value = 0x0002000; 4204 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4205 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4206 demph_reg_value = 0x2B404040; 4207 uniqtranscale_reg_value = 0x5552B83A; 4208 break; 4209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4210 demph_reg_value = 0x2B404848; 4211 uniqtranscale_reg_value = 0x5580B83A; 4212 break; 4213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4214 demph_reg_value = 0x2B404040; 4215 uniqtranscale_reg_value = 0x55ADDA3A; 4216 break; 4217 default: 4218 return; 4219 } 4220 break; 4221 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4222 preemph_reg_value = 0x0000000; 4223 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4225 demph_reg_value = 0x2B305555; 4226 uniqtranscale_reg_value = 0x5570B83A; 4227 break; 4228 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4229 demph_reg_value = 0x2B2B4040; 4230 uniqtranscale_reg_value = 0x55ADDA3A; 4231 break; 4232 default: 4233 return; 4234 } 4235 break; 4236 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4237 preemph_reg_value = 0x0006000; 4238 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4239 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4240 demph_reg_value = 0x1B405555; 4241 uniqtranscale_reg_value = 0x55ADDA3A; 4242 break; 4243 default: 4244 return; 4245 } 4246 break; 4247 default: 4248 return; 4249 } 4250 4251 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 4252 uniqtranscale_reg_value, 0); 4253 } 4254 4255 static void chv_set_signal_levels(struct intel_dp *intel_dp) 4256 { 4257 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4258 u32 deemph_reg_value, margin_reg_value; 4259 bool uniq_trans_scale = false; 4260 u8 train_set = intel_dp->train_set[0]; 4261 4262 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4263 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4264 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4266 deemph_reg_value = 128; 4267 margin_reg_value = 52; 4268 break; 4269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4270 deemph_reg_value = 128; 4271 margin_reg_value = 77; 4272 break; 4273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4274 deemph_reg_value = 128; 4275 margin_reg_value = 102; 4276 break; 4277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4278 deemph_reg_value = 128; 4279 margin_reg_value = 154; 4280 uniq_trans_scale = true; 4281 break; 4282 default: 4283 return; 4284 } 4285 break; 4286 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4287 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4289 deemph_reg_value = 85; 4290 margin_reg_value = 78; 4291 break; 4292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4293 deemph_reg_value = 85; 4294 margin_reg_value = 116; 4295 break; 4296 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4297 deemph_reg_value = 85; 4298 margin_reg_value = 154; 4299 break; 4300 default: 4301 return; 4302 } 4303 break; 4304 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4305 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4307 deemph_reg_value = 64; 4308 margin_reg_value = 104; 4309 break; 4310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4311 deemph_reg_value = 64; 4312 margin_reg_value = 154; 4313 break; 4314 default: 4315 return; 4316 } 4317 break; 4318 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4319 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4321 deemph_reg_value = 43; 4322 margin_reg_value = 154; 4323 break; 4324 default: 4325 return; 4326 } 4327 break; 4328 default: 4329 return; 4330 } 4331 4332 chv_set_phy_signal_level(encoder, deemph_reg_value, 4333 margin_reg_value, uniq_trans_scale); 4334 } 4335 4336 static u32 g4x_signal_levels(u8 train_set) 4337 { 4338 u32 signal_levels = 0; 4339 4340 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4342 default: 4343 signal_levels |= DP_VOLTAGE_0_4; 4344 break; 4345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4346 signal_levels |= DP_VOLTAGE_0_6; 4347 break; 4348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4349 signal_levels |= DP_VOLTAGE_0_8; 4350 break; 4351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4352 signal_levels |= DP_VOLTAGE_1_2; 4353 break; 4354 } 4355 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4356 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4357 default: 4358 signal_levels |= DP_PRE_EMPHASIS_0; 4359 break; 4360 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4361 signal_levels |= DP_PRE_EMPHASIS_3_5; 4362 break; 4363 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4364 signal_levels |= DP_PRE_EMPHASIS_6; 4365 break; 4366 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4367 signal_levels |= DP_PRE_EMPHASIS_9_5; 4368 break; 4369 } 4370 return signal_levels; 4371 } 4372 4373 static void 4374 g4x_set_signal_levels(struct intel_dp *intel_dp) 4375 { 4376 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4377 u8 train_set = intel_dp->train_set[0]; 4378 u32 signal_levels; 4379 4380 signal_levels = g4x_signal_levels(train_set); 4381 4382 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4383 signal_levels); 4384 4385 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4386 intel_dp->DP |= signal_levels; 4387 4388 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4389 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4390 } 4391 4392 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4393 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4394 { 4395 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4396 DP_TRAIN_PRE_EMPHASIS_MASK); 4397 4398 switch (signal_levels) { 4399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4401 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4402 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4403 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4404 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4406 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4407 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4408 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4409 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4411 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4412 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4413 default: 4414 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4415 "0x%x\n", signal_levels); 4416 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4417 } 4418 } 4419 4420 static void 4421 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4422 { 4423 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4424 u8 train_set = intel_dp->train_set[0]; 4425 u32 signal_levels; 4426 4427 signal_levels = snb_cpu_edp_signal_levels(train_set); 4428 4429 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4430 signal_levels); 4431 4432 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4433 intel_dp->DP |= signal_levels; 4434 4435 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4436 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4437 } 4438 4439 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4440 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4441 { 4442 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4443 DP_TRAIN_PRE_EMPHASIS_MASK); 4444 4445 switch (signal_levels) { 4446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4447 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4449 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4452 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4453 4454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4455 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4457 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4458 4459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4460 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4462 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4463 4464 default: 4465 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4466 "0x%x\n", signal_levels); 4467 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4468 } 4469 } 4470 4471 static void 4472 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4473 { 4474 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4475 u8 train_set = intel_dp->train_set[0]; 4476 u32 signal_levels; 4477 4478 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4479 4480 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4481 signal_levels); 4482 4483 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4484 intel_dp->DP |= signal_levels; 4485 4486 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4487 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4488 } 4489 4490 void intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4491 { 4492 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4493 u8 train_set = intel_dp->train_set[0]; 4494 4495 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4496 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4497 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4498 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4499 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4500 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4501 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4502 " (max)" : ""); 4503 4504 intel_dp->set_signal_levels(intel_dp); 4505 } 4506 4507 void 4508 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4509 u8 dp_train_pat) 4510 { 4511 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4512 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 4513 4514 if (dp_train_pat & train_pat_mask) 4515 drm_dbg_kms(&dev_priv->drm, 4516 "Using DP training pattern TPS%d\n", 4517 dp_train_pat & train_pat_mask); 4518 4519 intel_dp->set_link_train(intel_dp, dp_train_pat); 4520 } 4521 4522 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4523 { 4524 if (intel_dp->set_idle_link_train) 4525 intel_dp->set_idle_link_train(intel_dp); 4526 } 4527 4528 static void 4529 intel_dp_link_down(struct intel_encoder *encoder, 4530 const struct intel_crtc_state *old_crtc_state) 4531 { 4532 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4533 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4534 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4535 enum port port = encoder->port; 4536 u32 DP = intel_dp->DP; 4537 4538 if (drm_WARN_ON(&dev_priv->drm, 4539 (intel_de_read(dev_priv, intel_dp->output_reg) & 4540 DP_PORT_EN) == 0)) 4541 return; 4542 4543 drm_dbg_kms(&dev_priv->drm, "\n"); 4544 4545 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4546 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4547 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4548 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4549 } else { 4550 DP &= ~DP_LINK_TRAIN_MASK; 4551 DP |= DP_LINK_TRAIN_PAT_IDLE; 4552 } 4553 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4554 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4555 4556 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4557 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4558 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4559 4560 /* 4561 * HW workaround for IBX, we need to move the port 4562 * to transcoder A after disabling it to allow the 4563 * matching HDMI port to be enabled on transcoder A. 4564 */ 4565 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4566 /* 4567 * We get CPU/PCH FIFO underruns on the other pipe when 4568 * doing the workaround. Sweep them under the rug. 4569 */ 4570 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4571 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4572 4573 /* always enable with pattern 1 (as per spec) */ 4574 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4575 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4576 DP_LINK_TRAIN_PAT_1; 4577 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4578 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4579 4580 DP &= ~DP_PORT_EN; 4581 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4582 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4583 4584 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4585 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4586 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4587 } 4588 4589 drm_msleep(intel_dp->panel_power_down_delay); 4590 4591 intel_dp->DP = DP; 4592 4593 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4594 intel_wakeref_t wakeref; 4595 4596 with_pps_lock(intel_dp, wakeref) 4597 intel_dp->active_pipe = INVALID_PIPE; 4598 } 4599 } 4600 4601 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4602 { 4603 u8 dprx = 0; 4604 4605 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4606 &dprx) != 1) 4607 return false; 4608 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4609 } 4610 4611 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4612 { 4613 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4614 4615 /* 4616 * Clear the cached register set to avoid using stale values 4617 * for the sinks that do not support DSC. 4618 */ 4619 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4620 4621 /* Clear fec_capable to avoid using stale values */ 4622 intel_dp->fec_capable = 0; 4623 4624 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4625 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4626 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4627 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4628 intel_dp->dsc_dpcd, 4629 sizeof(intel_dp->dsc_dpcd)) < 0) 4630 drm_err(&i915->drm, 4631 "Failed to read DPCD register 0x%x\n", 4632 DP_DSC_SUPPORT); 4633 4634 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4635 (int)sizeof(intel_dp->dsc_dpcd), 4636 intel_dp->dsc_dpcd); 4637 4638 /* FEC is supported only on DP 1.4 */ 4639 if (!intel_dp_is_edp(intel_dp) && 4640 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4641 &intel_dp->fec_capable) < 0) 4642 drm_err(&i915->drm, 4643 "Failed to read FEC DPCD register\n"); 4644 4645 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4646 intel_dp->fec_capable); 4647 } 4648 } 4649 4650 static bool 4651 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4652 { 4653 struct drm_i915_private *dev_priv = 4654 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4655 4656 /* this function is meant to be called only once */ 4657 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4658 4659 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 4660 return false; 4661 4662 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4663 drm_dp_is_branch(intel_dp->dpcd)); 4664 4665 /* 4666 * Read the eDP display control registers. 4667 * 4668 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4669 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4670 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4671 * method). The display control registers should read zero if they're 4672 * not supported anyway. 4673 */ 4674 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4675 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4676 sizeof(intel_dp->edp_dpcd)) 4677 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4678 (int)sizeof(intel_dp->edp_dpcd), 4679 intel_dp->edp_dpcd); 4680 4681 /* 4682 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4683 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4684 */ 4685 intel_psr_init_dpcd(intel_dp); 4686 4687 /* Clear the default sink rates */ 4688 intel_dp->num_sink_rates = 0; 4689 4690 /* Read the eDP 1.4+ supported link rates. */ 4691 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4692 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4693 int i; 4694 4695 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4696 sink_rates, sizeof(sink_rates)); 4697 4698 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4699 int val = le16_to_cpu(sink_rates[i]); 4700 4701 if (val == 0) 4702 break; 4703 4704 /* Value read multiplied by 200kHz gives the per-lane 4705 * link rate in kHz. The source rates are, however, 4706 * stored in terms of LS_Clk kHz. The full conversion 4707 * back to symbols is 4708 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4709 */ 4710 intel_dp->sink_rates[i] = (val * 200) / 10; 4711 } 4712 intel_dp->num_sink_rates = i; 4713 } 4714 4715 /* 4716 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4717 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4718 */ 4719 if (intel_dp->num_sink_rates) 4720 intel_dp->use_rate_select = true; 4721 else 4722 intel_dp_set_sink_rates(intel_dp); 4723 4724 intel_dp_set_common_rates(intel_dp); 4725 4726 /* Read the eDP DSC DPCD registers */ 4727 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4728 intel_dp_get_dsc_sink_cap(intel_dp); 4729 4730 return true; 4731 } 4732 4733 static bool 4734 intel_dp_has_sink_count(struct intel_dp *intel_dp) 4735 { 4736 if (!intel_dp->attached_connector) 4737 return false; 4738 4739 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 4740 intel_dp->dpcd, 4741 &intel_dp->desc); 4742 } 4743 4744 static bool 4745 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4746 { 4747 int ret; 4748 4749 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) 4750 return false; 4751 4752 /* 4753 * Don't clobber cached eDP rates. Also skip re-reading 4754 * the OUI/ID since we know it won't change. 4755 */ 4756 if (!intel_dp_is_edp(intel_dp)) { 4757 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4758 drm_dp_is_branch(intel_dp->dpcd)); 4759 4760 intel_dp_set_sink_rates(intel_dp); 4761 intel_dp_set_common_rates(intel_dp); 4762 } 4763 4764 if (intel_dp_has_sink_count(intel_dp)) { 4765 ret = drm_dp_read_sink_count(&intel_dp->aux); 4766 if (ret < 0) 4767 return false; 4768 4769 /* 4770 * Sink count can change between short pulse hpd hence 4771 * a member variable in intel_dp will track any changes 4772 * between short pulse interrupts. 4773 */ 4774 intel_dp->sink_count = ret; 4775 4776 /* 4777 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4778 * a dongle is present but no display. Unless we require to know 4779 * if a dongle is present or not, we don't need to update 4780 * downstream port information. So, an early return here saves 4781 * time from performing other operations which are not required. 4782 */ 4783 if (!intel_dp->sink_count) 4784 return false; 4785 } 4786 4787 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4788 intel_dp->downstream_ports) == 0; 4789 } 4790 4791 static bool 4792 intel_dp_can_mst(struct intel_dp *intel_dp) 4793 { 4794 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4795 4796 return i915->params.enable_dp_mst && 4797 intel_dp->can_mst && 4798 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4799 } 4800 4801 static void 4802 intel_dp_configure_mst(struct intel_dp *intel_dp) 4803 { 4804 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4805 struct intel_encoder *encoder = 4806 &dp_to_dig_port(intel_dp)->base; 4807 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4808 4809 drm_dbg_kms(&i915->drm, 4810 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4811 encoder->base.base.id, encoder->base.name, 4812 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4813 yesno(i915->params.enable_dp_mst)); 4814 4815 if (!intel_dp->can_mst) 4816 return; 4817 4818 intel_dp->is_mst = sink_can_mst && 4819 i915->params.enable_dp_mst; 4820 4821 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4822 intel_dp->is_mst); 4823 } 4824 4825 static bool 4826 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4827 { 4828 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4829 sink_irq_vector, DP_DPRX_ESI_LEN) == 4830 DP_DPRX_ESI_LEN; 4831 } 4832 4833 bool 4834 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4835 const struct drm_connector_state *conn_state) 4836 { 4837 /* 4838 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4839 * of Color Encoding Format and Content Color Gamut], in order to 4840 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4841 */ 4842 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4843 return true; 4844 4845 switch (conn_state->colorspace) { 4846 case DRM_MODE_COLORIMETRY_SYCC_601: 4847 case DRM_MODE_COLORIMETRY_OPYCC_601: 4848 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4849 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4850 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4851 return true; 4852 default: 4853 break; 4854 } 4855 4856 return false; 4857 } 4858 4859 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4860 struct dp_sdp *sdp, size_t size) 4861 { 4862 size_t length = sizeof(struct dp_sdp); 4863 4864 if (size < length) 4865 return -ENOSPC; 4866 4867 memset(sdp, 0, size); 4868 4869 /* 4870 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4871 * VSC SDP Header Bytes 4872 */ 4873 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4874 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4875 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4876 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4877 4878 /* 4879 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4880 * per DP 1.4a spec. 4881 */ 4882 if (vsc->revision != 0x5) 4883 goto out; 4884 4885 /* VSC SDP Payload for DB16 through DB18 */ 4886 /* Pixel Encoding and Colorimetry Formats */ 4887 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4888 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4889 4890 switch (vsc->bpc) { 4891 case 6: 4892 /* 6bpc: 0x0 */ 4893 break; 4894 case 8: 4895 sdp->db[17] = 0x1; /* DB17[3:0] */ 4896 break; 4897 case 10: 4898 sdp->db[17] = 0x2; 4899 break; 4900 case 12: 4901 sdp->db[17] = 0x3; 4902 break; 4903 case 16: 4904 sdp->db[17] = 0x4; 4905 break; 4906 default: 4907 MISSING_CASE(vsc->bpc); 4908 break; 4909 } 4910 /* Dynamic Range and Component Bit Depth */ 4911 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4912 sdp->db[17] |= 0x80; /* DB17[7] */ 4913 4914 /* Content Type */ 4915 sdp->db[18] = vsc->content_type & 0x7; 4916 4917 out: 4918 return length; 4919 } 4920 4921 static ssize_t 4922 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 4923 struct dp_sdp *sdp, 4924 size_t size) 4925 { 4926 size_t length = sizeof(struct dp_sdp); 4927 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4928 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4929 ssize_t len; 4930 4931 if (size < length) 4932 return -ENOSPC; 4933 4934 memset(sdp, 0, size); 4935 4936 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4937 if (len < 0) { 4938 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4939 return -ENOSPC; 4940 } 4941 4942 if (len != infoframe_size) { 4943 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4944 return -ENOSPC; 4945 } 4946 4947 /* 4948 * Set up the infoframe sdp packet for HDR static metadata. 4949 * Prepare VSC Header for SU as per DP 1.4a spec, 4950 * Table 2-100 and Table 2-101 4951 */ 4952 4953 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4954 sdp->sdp_header.HB0 = 0; 4955 /* 4956 * Packet Type 80h + Non-audio INFOFRAME Type value 4957 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4958 * - 80h + Non-audio INFOFRAME Type value 4959 * - InfoFrame Type: 0x07 4960 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4961 */ 4962 sdp->sdp_header.HB1 = drm_infoframe->type; 4963 /* 4964 * Least Significant Eight Bits of (Data Byte Count – 1) 4965 * infoframe_size - 1 4966 */ 4967 sdp->sdp_header.HB2 = 0x1D; 4968 /* INFOFRAME SDP Version Number */ 4969 sdp->sdp_header.HB3 = (0x13 << 2); 4970 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4971 sdp->db[0] = drm_infoframe->version; 4972 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4973 sdp->db[1] = drm_infoframe->length; 4974 /* 4975 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4976 * HDMI_INFOFRAME_HEADER_SIZE 4977 */ 4978 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4979 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4980 HDMI_DRM_INFOFRAME_SIZE); 4981 4982 /* 4983 * Size of DP infoframe sdp packet for HDR static metadata consists of 4984 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4985 * - Two Data Blocks: 2 bytes 4986 * CTA Header Byte2 (INFOFRAME Version Number) 4987 * CTA Header Byte3 (Length of INFOFRAME) 4988 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4989 * 4990 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4991 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4992 * will pad rest of the size. 4993 */ 4994 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4995 } 4996 4997 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4998 const struct intel_crtc_state *crtc_state, 4999 unsigned int type) 5000 { 5001 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5002 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5003 struct dp_sdp sdp = {}; 5004 ssize_t len; 5005 5006 if ((crtc_state->infoframes.enable & 5007 intel_hdmi_infoframe_enable(type)) == 0) 5008 return; 5009 5010 switch (type) { 5011 case DP_SDP_VSC: 5012 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 5013 sizeof(sdp)); 5014 break; 5015 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5016 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 5017 &sdp, sizeof(sdp)); 5018 break; 5019 default: 5020 MISSING_CASE(type); 5021 return; 5022 } 5023 5024 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5025 return; 5026 5027 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 5028 } 5029 5030 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 5031 const struct intel_crtc_state *crtc_state, 5032 struct drm_dp_vsc_sdp *vsc) 5033 { 5034 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5035 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5036 struct dp_sdp sdp = {}; 5037 ssize_t len; 5038 5039 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 5040 5041 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5042 return; 5043 5044 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 5045 &sdp, len); 5046 } 5047 5048 void intel_dp_set_infoframes(struct intel_encoder *encoder, 5049 bool enable, 5050 const struct intel_crtc_state *crtc_state, 5051 const struct drm_connector_state *conn_state) 5052 { 5053 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5054 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5055 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 5056 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 5057 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 5058 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 5059 u32 val = intel_de_read(dev_priv, reg); 5060 5061 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 5062 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 5063 if (intel_psr_enabled(intel_dp)) 5064 val &= ~dip_enable; 5065 else 5066 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 5067 5068 if (!enable) { 5069 intel_de_write(dev_priv, reg, val); 5070 intel_de_posting_read(dev_priv, reg); 5071 return; 5072 } 5073 5074 intel_de_write(dev_priv, reg, val); 5075 intel_de_posting_read(dev_priv, reg); 5076 5077 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5078 if (!intel_psr_enabled(intel_dp)) 5079 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5080 5081 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5082 } 5083 5084 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5085 const void *buffer, size_t size) 5086 { 5087 const struct dp_sdp *sdp = buffer; 5088 5089 if (size < sizeof(struct dp_sdp)) 5090 return -EINVAL; 5091 5092 memset(vsc, 0, sizeof(*vsc)); 5093 5094 if (sdp->sdp_header.HB0 != 0) 5095 return -EINVAL; 5096 5097 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5098 return -EINVAL; 5099 5100 vsc->sdp_type = sdp->sdp_header.HB1; 5101 vsc->revision = sdp->sdp_header.HB2; 5102 vsc->length = sdp->sdp_header.HB3; 5103 5104 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5105 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5106 /* 5107 * - HB2 = 0x2, HB3 = 0x8 5108 * VSC SDP supporting 3D stereo + PSR 5109 * - HB2 = 0x4, HB3 = 0xe 5110 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5111 * first scan line of the SU region (applies to eDP v1.4b 5112 * and higher). 5113 */ 5114 return 0; 5115 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5116 /* 5117 * - HB2 = 0x5, HB3 = 0x13 5118 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5119 * Format. 5120 */ 5121 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5122 vsc->colorimetry = sdp->db[16] & 0xf; 5123 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5124 5125 switch (sdp->db[17] & 0x7) { 5126 case 0x0: 5127 vsc->bpc = 6; 5128 break; 5129 case 0x1: 5130 vsc->bpc = 8; 5131 break; 5132 case 0x2: 5133 vsc->bpc = 10; 5134 break; 5135 case 0x3: 5136 vsc->bpc = 12; 5137 break; 5138 case 0x4: 5139 vsc->bpc = 16; 5140 break; 5141 default: 5142 MISSING_CASE(sdp->db[17] & 0x7); 5143 return -EINVAL; 5144 } 5145 5146 vsc->content_type = sdp->db[18] & 0x7; 5147 } else { 5148 return -EINVAL; 5149 } 5150 5151 return 0; 5152 } 5153 5154 static int 5155 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5156 const void *buffer, size_t size) 5157 { 5158 int ret; 5159 5160 const struct dp_sdp *sdp = buffer; 5161 5162 if (size < sizeof(struct dp_sdp)) 5163 return -EINVAL; 5164 5165 if (sdp->sdp_header.HB0 != 0) 5166 return -EINVAL; 5167 5168 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5169 return -EINVAL; 5170 5171 /* 5172 * Least Significant Eight Bits of (Data Byte Count – 1) 5173 * 1Dh (i.e., Data Byte Count = 30 bytes). 5174 */ 5175 if (sdp->sdp_header.HB2 != 0x1D) 5176 return -EINVAL; 5177 5178 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5179 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5180 return -EINVAL; 5181 5182 /* INFOFRAME SDP Version Number */ 5183 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5184 return -EINVAL; 5185 5186 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5187 if (sdp->db[0] != 1) 5188 return -EINVAL; 5189 5190 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5191 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5192 return -EINVAL; 5193 5194 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5195 HDMI_DRM_INFOFRAME_SIZE); 5196 5197 return ret; 5198 } 5199 5200 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5201 struct intel_crtc_state *crtc_state, 5202 struct drm_dp_vsc_sdp *vsc) 5203 { 5204 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5205 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5206 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5207 unsigned int type = DP_SDP_VSC; 5208 struct dp_sdp sdp = {}; 5209 int ret; 5210 5211 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5212 if (intel_psr_enabled(intel_dp)) 5213 return; 5214 5215 if ((crtc_state->infoframes.enable & 5216 intel_hdmi_infoframe_enable(type)) == 0) 5217 return; 5218 5219 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5220 5221 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5222 5223 if (ret) 5224 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5225 } 5226 5227 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5228 struct intel_crtc_state *crtc_state, 5229 struct hdmi_drm_infoframe *drm_infoframe) 5230 { 5231 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5232 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5233 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5234 struct dp_sdp sdp = {}; 5235 int ret; 5236 5237 if ((crtc_state->infoframes.enable & 5238 intel_hdmi_infoframe_enable(type)) == 0) 5239 return; 5240 5241 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5242 sizeof(sdp)); 5243 5244 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5245 sizeof(sdp)); 5246 5247 if (ret) 5248 drm_dbg_kms(&dev_priv->drm, 5249 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5250 } 5251 5252 void intel_read_dp_sdp(struct intel_encoder *encoder, 5253 struct intel_crtc_state *crtc_state, 5254 unsigned int type) 5255 { 5256 if (encoder->type != INTEL_OUTPUT_DDI) 5257 return; 5258 5259 switch (type) { 5260 case DP_SDP_VSC: 5261 intel_read_dp_vsc_sdp(encoder, crtc_state, 5262 &crtc_state->infoframes.vsc); 5263 break; 5264 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5265 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5266 &crtc_state->infoframes.drm.drm); 5267 break; 5268 default: 5269 MISSING_CASE(type); 5270 break; 5271 } 5272 } 5273 5274 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5275 { 5276 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5277 int status = 0; 5278 int test_link_rate; 5279 u8 test_lane_count, test_link_bw; 5280 /* (DP CTS 1.2) 5281 * 4.3.1.11 5282 */ 5283 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5284 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5285 &test_lane_count); 5286 5287 if (status <= 0) { 5288 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5289 return DP_TEST_NAK; 5290 } 5291 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5292 5293 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5294 &test_link_bw); 5295 if (status <= 0) { 5296 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5297 return DP_TEST_NAK; 5298 } 5299 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5300 5301 /* Validate the requested link rate and lane count */ 5302 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5303 test_lane_count)) 5304 return DP_TEST_NAK; 5305 5306 intel_dp->compliance.test_lane_count = test_lane_count; 5307 intel_dp->compliance.test_link_rate = test_link_rate; 5308 5309 return DP_TEST_ACK; 5310 } 5311 5312 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5313 { 5314 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5315 u8 test_pattern; 5316 u8 test_misc; 5317 __be16 h_width, v_height; 5318 int status = 0; 5319 5320 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5321 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5322 &test_pattern); 5323 if (status <= 0) { 5324 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5325 return DP_TEST_NAK; 5326 } 5327 if (test_pattern != DP_COLOR_RAMP) 5328 return DP_TEST_NAK; 5329 5330 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5331 &h_width, 2); 5332 if (status <= 0) { 5333 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5334 return DP_TEST_NAK; 5335 } 5336 5337 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5338 &v_height, 2); 5339 if (status <= 0) { 5340 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5341 return DP_TEST_NAK; 5342 } 5343 5344 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5345 &test_misc); 5346 if (status <= 0) { 5347 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5348 return DP_TEST_NAK; 5349 } 5350 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5351 return DP_TEST_NAK; 5352 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5353 return DP_TEST_NAK; 5354 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5355 case DP_TEST_BIT_DEPTH_6: 5356 intel_dp->compliance.test_data.bpc = 6; 5357 break; 5358 case DP_TEST_BIT_DEPTH_8: 5359 intel_dp->compliance.test_data.bpc = 8; 5360 break; 5361 default: 5362 return DP_TEST_NAK; 5363 } 5364 5365 intel_dp->compliance.test_data.video_pattern = test_pattern; 5366 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5367 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5368 /* Set test active flag here so userspace doesn't interrupt things */ 5369 intel_dp->compliance.test_active = true; 5370 5371 return DP_TEST_ACK; 5372 } 5373 5374 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5375 { 5376 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5377 u8 test_result = DP_TEST_ACK; 5378 struct intel_connector *intel_connector = intel_dp->attached_connector; 5379 struct drm_connector *connector = &intel_connector->base; 5380 5381 if (intel_connector->detect_edid == NULL || 5382 connector->edid_corrupt || 5383 intel_dp->aux.i2c_defer_count > 6) { 5384 /* Check EDID read for NACKs, DEFERs and corruption 5385 * (DP CTS 1.2 Core r1.1) 5386 * 4.2.2.4 : Failed EDID read, I2C_NAK 5387 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5388 * 4.2.2.6 : EDID corruption detected 5389 * Use failsafe mode for all cases 5390 */ 5391 if (intel_dp->aux.i2c_nack_count > 0 || 5392 intel_dp->aux.i2c_defer_count > 0) 5393 drm_dbg_kms(&i915->drm, 5394 "EDID read had %d NACKs, %d DEFERs\n", 5395 intel_dp->aux.i2c_nack_count, 5396 intel_dp->aux.i2c_defer_count); 5397 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5398 } else { 5399 struct edid *block = intel_connector->detect_edid; 5400 5401 /* We have to write the checksum 5402 * of the last block read 5403 */ 5404 block += intel_connector->detect_edid->extensions; 5405 5406 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5407 block->checksum) <= 0) 5408 drm_dbg_kms(&i915->drm, 5409 "Failed to write EDID checksum\n"); 5410 5411 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5412 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5413 } 5414 5415 /* Set test active flag here so userspace doesn't interrupt things */ 5416 intel_dp->compliance.test_active = true; 5417 5418 return test_result; 5419 } 5420 5421 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) 5422 { 5423 struct drm_dp_phy_test_params *data = 5424 &intel_dp->compliance.test_data.phytest; 5425 5426 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5427 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5428 return DP_TEST_NAK; 5429 } 5430 5431 /* 5432 * link_mst is set to false to avoid executing mst related code 5433 * during compliance testing. 5434 */ 5435 intel_dp->link_mst = false; 5436 5437 return DP_TEST_ACK; 5438 } 5439 5440 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) 5441 { 5442 struct drm_i915_private *dev_priv = 5443 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5444 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5445 struct drm_dp_phy_test_params *data = 5446 &intel_dp->compliance.test_data.phytest; 5447 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5448 enum pipe pipe = crtc->pipe; 5449 u32 pattern_val; 5450 5451 switch (data->phy_pattern) { 5452 case DP_PHY_TEST_PATTERN_NONE: 5453 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5454 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5455 break; 5456 case DP_PHY_TEST_PATTERN_D10_2: 5457 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5458 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5459 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5460 break; 5461 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5462 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5463 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5464 DDI_DP_COMP_CTL_ENABLE | 5465 DDI_DP_COMP_CTL_SCRAMBLED_0); 5466 break; 5467 case DP_PHY_TEST_PATTERN_PRBS7: 5468 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5469 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5470 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5471 break; 5472 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5473 /* 5474 * FIXME: Ideally pattern should come from DPCD 0x250. As 5475 * current firmware of DPR-100 could not set it, so hardcoding 5476 * now for complaince test. 5477 */ 5478 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5479 pattern_val = 0x3e0f83e0; 5480 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5481 pattern_val = 0x0f83e0f8; 5482 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5483 pattern_val = 0x0000f83e; 5484 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5485 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5486 DDI_DP_COMP_CTL_ENABLE | 5487 DDI_DP_COMP_CTL_CUSTOM80); 5488 break; 5489 case DP_PHY_TEST_PATTERN_CP2520: 5490 /* 5491 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5492 * current firmware of DPR-100 could not set it, so hardcoding 5493 * now for complaince test. 5494 */ 5495 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5496 pattern_val = 0xFB; 5497 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5498 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5499 pattern_val); 5500 break; 5501 default: 5502 WARN(1, "Invalid Phy Test Pattern\n"); 5503 } 5504 } 5505 5506 static void 5507 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) 5508 { 5509 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5510 struct drm_device *dev = dig_port->base.base.dev; 5511 struct drm_i915_private *dev_priv = to_i915(dev); 5512 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5513 enum pipe pipe = crtc->pipe; 5514 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5515 5516 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5517 TRANS_DDI_FUNC_CTL(pipe)); 5518 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5519 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5520 5521 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5522 TGL_TRANS_DDI_PORT_MASK); 5523 trans_conf_value &= ~PIPECONF_ENABLE; 5524 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5525 5526 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5527 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5528 trans_ddi_func_ctl_value); 5529 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5530 } 5531 5532 static void 5533 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) 5534 { 5535 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5536 struct drm_device *dev = dig_port->base.base.dev; 5537 struct drm_i915_private *dev_priv = to_i915(dev); 5538 enum port port = dig_port->base.port; 5539 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5540 enum pipe pipe = crtc->pipe; 5541 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5542 5543 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5544 TRANS_DDI_FUNC_CTL(pipe)); 5545 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5546 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5547 5548 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5549 TGL_TRANS_DDI_SELECT_PORT(port); 5550 trans_conf_value |= PIPECONF_ENABLE; 5551 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5552 5553 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5554 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5555 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5556 trans_ddi_func_ctl_value); 5557 } 5558 5559 void intel_dp_process_phy_request(struct intel_dp *intel_dp) 5560 { 5561 struct drm_dp_phy_test_params *data = 5562 &intel_dp->compliance.test_data.phytest; 5563 u8 link_status[DP_LINK_STATUS_SIZE]; 5564 5565 if (!intel_dp_get_link_status(intel_dp, link_status)) { 5566 DRM_DEBUG_KMS("failed to get link status\n"); 5567 return; 5568 } 5569 5570 /* retrieve vswing & pre-emphasis setting */ 5571 intel_dp_get_adjust_train(intel_dp, link_status); 5572 5573 intel_dp_autotest_phy_ddi_disable(intel_dp); 5574 5575 intel_dp_set_signal_levels(intel_dp); 5576 5577 intel_dp_phy_pattern_update(intel_dp); 5578 5579 intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); 5580 5581 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5582 link_status[DP_DPCD_REV]); 5583 } 5584 5585 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5586 { 5587 u8 test_result; 5588 5589 test_result = intel_dp_prepare_phytest(intel_dp); 5590 if (test_result != DP_TEST_ACK) 5591 DRM_ERROR("Phy test preparation failed\n"); 5592 5593 intel_dp_process_phy_request(intel_dp); 5594 5595 return test_result; 5596 } 5597 5598 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5599 { 5600 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5601 u8 response = DP_TEST_NAK; 5602 u8 request = 0; 5603 int status; 5604 5605 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5606 if (status <= 0) { 5607 drm_dbg_kms(&i915->drm, 5608 "Could not read test request from sink\n"); 5609 goto update_status; 5610 } 5611 5612 switch (request) { 5613 case DP_TEST_LINK_TRAINING: 5614 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5615 response = intel_dp_autotest_link_training(intel_dp); 5616 break; 5617 case DP_TEST_LINK_VIDEO_PATTERN: 5618 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5619 response = intel_dp_autotest_video_pattern(intel_dp); 5620 break; 5621 case DP_TEST_LINK_EDID_READ: 5622 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5623 response = intel_dp_autotest_edid(intel_dp); 5624 break; 5625 case DP_TEST_LINK_PHY_TEST_PATTERN: 5626 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5627 response = intel_dp_autotest_phy_pattern(intel_dp); 5628 break; 5629 default: 5630 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5631 request); 5632 break; 5633 } 5634 5635 if (response & DP_TEST_ACK) 5636 intel_dp->compliance.test_type = request; 5637 5638 update_status: 5639 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5640 if (status <= 0) 5641 drm_dbg_kms(&i915->drm, 5642 "Could not write test response to sink\n"); 5643 } 5644 5645 /** 5646 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5647 * @intel_dp: Intel DP struct 5648 * 5649 * Read any pending MST interrupts, call MST core to handle these and ack the 5650 * interrupts. Check if the main and AUX link state is ok. 5651 * 5652 * Returns: 5653 * - %true if pending interrupts were serviced (or no interrupts were 5654 * pending) w/o detecting an error condition. 5655 * - %false if an error condition - like AUX failure or a loss of link - is 5656 * detected, which needs servicing from the hotplug work. 5657 */ 5658 static bool 5659 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5660 { 5661 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5662 bool link_ok = true; 5663 5664 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5665 5666 for (;;) { 5667 /* 5668 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then 5669 * pass in "esi+10" to drm_dp_channel_eq_ok(), which 5670 * takes a 6-byte array. So we actually need 16 bytes 5671 * here. 5672 * 5673 * Somebody who knows what the limits actually are 5674 * should check this, but for now this is at least 5675 * harmless and avoids a valid compiler warning about 5676 * using more of the array than we have allocated. 5677 */ 5678 u8 esi[DP_DPRX_ESI_LEN+2] = {}; 5679 bool handled; 5680 int retry; 5681 5682 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5683 drm_dbg_kms(&i915->drm, 5684 "failed to get ESI - device may have failed\n"); 5685 link_ok = false; 5686 5687 break; 5688 } 5689 5690 /* check link status - esi[10] = 0x200c */ 5691 if (intel_dp->active_mst_links > 0 && link_ok && 5692 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5693 drm_dbg_kms(&i915->drm, 5694 "channel EQ not ok, retraining\n"); 5695 link_ok = false; 5696 } 5697 5698 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5699 5700 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5701 if (!handled) 5702 break; 5703 5704 for (retry = 0; retry < 3; retry++) { 5705 int wret; 5706 5707 wret = drm_dp_dpcd_write(&intel_dp->aux, 5708 DP_SINK_COUNT_ESI+1, 5709 &esi[1], 3); 5710 if (wret == 3) 5711 break; 5712 } 5713 } 5714 5715 return link_ok; 5716 } 5717 5718 static bool 5719 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5720 { 5721 u8 link_status[DP_LINK_STATUS_SIZE]; 5722 5723 if (!intel_dp->link_trained) 5724 return false; 5725 5726 /* 5727 * While PSR source HW is enabled, it will control main-link sending 5728 * frames, enabling and disabling it so trying to do a retrain will fail 5729 * as the link would or not be on or it could mix training patterns 5730 * and frame data at the same time causing retrain to fail. 5731 * Also when exiting PSR, HW will retrain the link anyways fixing 5732 * any link status error. 5733 */ 5734 if (intel_psr_enabled(intel_dp)) 5735 return false; 5736 5737 if (!intel_dp_get_link_status(intel_dp, link_status)) 5738 return false; 5739 5740 /* 5741 * Validate the cached values of intel_dp->link_rate and 5742 * intel_dp->lane_count before attempting to retrain. 5743 */ 5744 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5745 intel_dp->lane_count)) 5746 return false; 5747 5748 /* Retrain if Channel EQ or CR not ok */ 5749 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5750 } 5751 5752 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5753 const struct drm_connector_state *conn_state) 5754 { 5755 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5756 struct intel_encoder *encoder; 5757 enum pipe pipe; 5758 5759 if (!conn_state->best_encoder) 5760 return false; 5761 5762 /* SST */ 5763 encoder = &dp_to_dig_port(intel_dp)->base; 5764 if (conn_state->best_encoder == &encoder->base) 5765 return true; 5766 5767 /* MST */ 5768 for_each_pipe(i915, pipe) { 5769 encoder = &intel_dp->mst_encoders[pipe]->base; 5770 if (conn_state->best_encoder == &encoder->base) 5771 return true; 5772 } 5773 5774 return false; 5775 } 5776 5777 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5778 struct drm_modeset_acquire_ctx *ctx, 5779 u32 *crtc_mask) 5780 { 5781 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5782 struct drm_connector_list_iter conn_iter; 5783 struct intel_connector *connector; 5784 int ret = 0; 5785 5786 *crtc_mask = 0; 5787 5788 if (!intel_dp_needs_link_retrain(intel_dp)) 5789 return 0; 5790 5791 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5792 for_each_intel_connector_iter(connector, &conn_iter) { 5793 struct drm_connector_state *conn_state = 5794 connector->base.state; 5795 struct intel_crtc_state *crtc_state; 5796 struct intel_crtc *crtc; 5797 5798 if (!intel_dp_has_connector(intel_dp, conn_state)) 5799 continue; 5800 5801 crtc = to_intel_crtc(conn_state->crtc); 5802 if (!crtc) 5803 continue; 5804 5805 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5806 if (ret) 5807 break; 5808 5809 crtc_state = to_intel_crtc_state(crtc->base.state); 5810 5811 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5812 5813 if (!crtc_state->hw.active) 5814 continue; 5815 5816 if (conn_state->commit && 5817 !try_wait_for_completion(&conn_state->commit->hw_done)) 5818 continue; 5819 5820 *crtc_mask |= drm_crtc_mask(&crtc->base); 5821 } 5822 drm_connector_list_iter_end(&conn_iter); 5823 5824 if (!intel_dp_needs_link_retrain(intel_dp)) 5825 *crtc_mask = 0; 5826 5827 return ret; 5828 } 5829 5830 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5831 { 5832 struct intel_connector *connector = intel_dp->attached_connector; 5833 5834 return connector->base.status == connector_status_connected || 5835 intel_dp->is_mst; 5836 } 5837 5838 int intel_dp_retrain_link(struct intel_encoder *encoder, 5839 struct drm_modeset_acquire_ctx *ctx) 5840 { 5841 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5842 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5843 struct intel_crtc *crtc; 5844 u32 crtc_mask; 5845 int ret; 5846 5847 if (!intel_dp_is_connected(intel_dp)) 5848 return 0; 5849 5850 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5851 ctx); 5852 if (ret) 5853 return ret; 5854 5855 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5856 if (ret) 5857 return ret; 5858 5859 if (crtc_mask == 0) 5860 return 0; 5861 5862 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5863 encoder->base.base.id, encoder->base.name); 5864 5865 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5866 const struct intel_crtc_state *crtc_state = 5867 to_intel_crtc_state(crtc->base.state); 5868 5869 /* Suppress underruns caused by re-training */ 5870 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5871 if (crtc_state->has_pch_encoder) 5872 intel_set_pch_fifo_underrun_reporting(dev_priv, 5873 intel_crtc_pch_transcoder(crtc), false); 5874 } 5875 5876 intel_dp_start_link_train(intel_dp); 5877 intel_dp_stop_link_train(intel_dp); 5878 5879 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5880 const struct intel_crtc_state *crtc_state = 5881 to_intel_crtc_state(crtc->base.state); 5882 5883 /* Keep underrun reporting disabled until things are stable */ 5884 intel_wait_for_vblank(dev_priv, crtc->pipe); 5885 5886 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5887 if (crtc_state->has_pch_encoder) 5888 intel_set_pch_fifo_underrun_reporting(dev_priv, 5889 intel_crtc_pch_transcoder(crtc), true); 5890 } 5891 5892 return 0; 5893 } 5894 5895 /* 5896 * If display is now connected check links status, 5897 * there has been known issues of link loss triggering 5898 * long pulse. 5899 * 5900 * Some sinks (eg. ASUS PB287Q) seem to perform some 5901 * weird HPD ping pong during modesets. So we can apparently 5902 * end up with HPD going low during a modeset, and then 5903 * going back up soon after. And once that happens we must 5904 * retrain the link to get a picture. That's in case no 5905 * userspace component reacted to intermittent HPD dip. 5906 */ 5907 static enum intel_hotplug_state 5908 intel_dp_hotplug(struct intel_encoder *encoder, 5909 struct intel_connector *connector) 5910 { 5911 struct drm_modeset_acquire_ctx ctx; 5912 enum intel_hotplug_state state; 5913 int ret; 5914 5915 state = intel_encoder_hotplug(encoder, connector); 5916 5917 drm_modeset_acquire_init(&ctx, 0); 5918 5919 for (;;) { 5920 ret = intel_dp_retrain_link(encoder, &ctx); 5921 5922 if (ret == -EDEADLK) { 5923 drm_modeset_backoff(&ctx); 5924 continue; 5925 } 5926 5927 break; 5928 } 5929 5930 drm_modeset_drop_locks(&ctx); 5931 drm_modeset_acquire_fini(&ctx); 5932 drm_WARN(encoder->base.dev, ret, 5933 "Acquiring modeset locks failed with %i\n", ret); 5934 5935 /* 5936 * Keeping it consistent with intel_ddi_hotplug() and 5937 * intel_hdmi_hotplug(). 5938 */ 5939 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5940 state = INTEL_HOTPLUG_RETRY; 5941 5942 return state; 5943 } 5944 5945 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5946 { 5947 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5948 u8 val; 5949 5950 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5951 return; 5952 5953 if (drm_dp_dpcd_readb(&intel_dp->aux, 5954 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5955 return; 5956 5957 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5958 5959 if (val & DP_AUTOMATED_TEST_REQUEST) 5960 intel_dp_handle_test_request(intel_dp); 5961 5962 if (val & DP_CP_IRQ) 5963 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5964 5965 if (val & DP_SINK_SPECIFIC_IRQ) 5966 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5967 } 5968 5969 /* 5970 * According to DP spec 5971 * 5.1.2: 5972 * 1. Read DPCD 5973 * 2. Configure link according to Receiver Capabilities 5974 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5975 * 4. Check link status on receipt of hot-plug interrupt 5976 * 5977 * intel_dp_short_pulse - handles short pulse interrupts 5978 * when full detection is not required. 5979 * Returns %true if short pulse is handled and full detection 5980 * is NOT required and %false otherwise. 5981 */ 5982 static bool 5983 intel_dp_short_pulse(struct intel_dp *intel_dp) 5984 { 5985 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5986 u8 old_sink_count = intel_dp->sink_count; 5987 bool ret; 5988 5989 /* 5990 * Clearing compliance test variables to allow capturing 5991 * of values for next automated test request. 5992 */ 5993 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5994 5995 /* 5996 * Now read the DPCD to see if it's actually running 5997 * If the current value of sink count doesn't match with 5998 * the value that was stored earlier or dpcd read failed 5999 * we need to do full detection 6000 */ 6001 ret = intel_dp_get_dpcd(intel_dp); 6002 6003 if ((old_sink_count != intel_dp->sink_count) || !ret) { 6004 /* No need to proceed if we are going to do full detect */ 6005 return false; 6006 } 6007 6008 intel_dp_check_service_irq(intel_dp); 6009 6010 /* Handle CEC interrupts, if any */ 6011 drm_dp_cec_irq(&intel_dp->aux); 6012 6013 /* defer to the hotplug work for link retraining if needed */ 6014 if (intel_dp_needs_link_retrain(intel_dp)) 6015 return false; 6016 6017 intel_psr_short_pulse(intel_dp); 6018 6019 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 6020 drm_dbg_kms(&dev_priv->drm, 6021 "Link Training Compliance Test requested\n"); 6022 /* Send a Hotplug Uevent to userspace to start modeset */ 6023 drm_kms_helper_hotplug_event(&dev_priv->drm); 6024 } 6025 6026 return true; 6027 } 6028 6029 /* XXX this is probably wrong for multiple downstream ports */ 6030 static enum drm_connector_status 6031 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 6032 { 6033 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6034 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6035 u8 *dpcd = intel_dp->dpcd; 6036 u8 type; 6037 6038 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 6039 return connector_status_connected; 6040 6041 if (lspcon->active) 6042 lspcon_resume(lspcon); 6043 6044 if (!intel_dp_get_dpcd(intel_dp)) 6045 return connector_status_disconnected; 6046 6047 /* if there's no downstream port, we're done */ 6048 if (!drm_dp_is_branch(dpcd)) 6049 return connector_status_connected; 6050 6051 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 6052 if (intel_dp_has_sink_count(intel_dp) && 6053 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 6054 return intel_dp->sink_count ? 6055 connector_status_connected : connector_status_disconnected; 6056 } 6057 6058 if (intel_dp_can_mst(intel_dp)) 6059 return connector_status_connected; 6060 6061 /* If no HPD, poke DDC gently */ 6062 if (drm_probe_ddc(&intel_dp->aux.ddc)) 6063 return connector_status_connected; 6064 6065 /* Well we tried, say unknown for unreliable port types */ 6066 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 6067 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 6068 if (type == DP_DS_PORT_TYPE_VGA || 6069 type == DP_DS_PORT_TYPE_NON_EDID) 6070 return connector_status_unknown; 6071 } else { 6072 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 6073 DP_DWN_STRM_PORT_TYPE_MASK; 6074 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 6075 type == DP_DWN_STRM_PORT_TYPE_OTHER) 6076 return connector_status_unknown; 6077 } 6078 6079 /* Anything else is out of spec, warn and ignore */ 6080 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 6081 return connector_status_disconnected; 6082 } 6083 6084 static enum drm_connector_status 6085 edp_detect(struct intel_dp *intel_dp) 6086 { 6087 return connector_status_connected; 6088 } 6089 6090 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 6091 { 6092 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6093 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 6094 6095 return intel_de_read(dev_priv, SDEISR) & bit; 6096 } 6097 6098 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6099 { 6100 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6101 u32 bit; 6102 6103 switch (encoder->hpd_pin) { 6104 case HPD_PORT_B: 6105 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6106 break; 6107 case HPD_PORT_C: 6108 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6109 break; 6110 case HPD_PORT_D: 6111 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6112 break; 6113 default: 6114 MISSING_CASE(encoder->hpd_pin); 6115 return false; 6116 } 6117 6118 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6119 } 6120 6121 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6122 { 6123 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6124 u32 bit; 6125 6126 switch (encoder->hpd_pin) { 6127 case HPD_PORT_B: 6128 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6129 break; 6130 case HPD_PORT_C: 6131 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6132 break; 6133 case HPD_PORT_D: 6134 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6135 break; 6136 default: 6137 MISSING_CASE(encoder->hpd_pin); 6138 return false; 6139 } 6140 6141 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6142 } 6143 6144 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6145 { 6146 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6147 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6148 6149 return intel_de_read(dev_priv, DEISR) & bit; 6150 } 6151 6152 /* 6153 * intel_digital_port_connected - is the specified port connected? 6154 * @encoder: intel_encoder 6155 * 6156 * In cases where there's a connector physically connected but it can't be used 6157 * by our hardware we also return false, since the rest of the driver should 6158 * pretty much treat the port as disconnected. This is relevant for type-C 6159 * (starting on ICL) where there's ownership involved. 6160 * 6161 * Return %true if port is connected, %false otherwise. 6162 */ 6163 bool intel_digital_port_connected(struct intel_encoder *encoder) 6164 { 6165 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6166 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6167 bool is_connected = false; 6168 intel_wakeref_t wakeref; 6169 6170 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6171 is_connected = dig_port->connected(encoder); 6172 6173 return is_connected; 6174 } 6175 6176 static struct edid * 6177 intel_dp_get_edid(struct intel_dp *intel_dp) 6178 { 6179 struct intel_connector *intel_connector = intel_dp->attached_connector; 6180 6181 /* use cached edid if we have one */ 6182 if (intel_connector->edid) { 6183 /* invalid edid */ 6184 if (IS_ERR(intel_connector->edid)) 6185 return NULL; 6186 6187 return drm_edid_duplicate(intel_connector->edid); 6188 } else 6189 return drm_get_edid(&intel_connector->base, 6190 &intel_dp->aux.ddc); 6191 } 6192 6193 static void 6194 intel_dp_update_dfp(struct intel_dp *intel_dp, 6195 const struct edid *edid) 6196 { 6197 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6198 struct intel_connector *connector = intel_dp->attached_connector; 6199 6200 intel_dp->dfp.max_bpc = 6201 drm_dp_downstream_max_bpc(intel_dp->dpcd, 6202 intel_dp->downstream_ports, edid); 6203 6204 intel_dp->dfp.max_dotclock = 6205 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 6206 intel_dp->downstream_ports); 6207 6208 intel_dp->dfp.min_tmds_clock = 6209 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 6210 intel_dp->downstream_ports, 6211 edid); 6212 intel_dp->dfp.max_tmds_clock = 6213 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 6214 intel_dp->downstream_ports, 6215 edid); 6216 6217 drm_dbg_kms(&i915->drm, 6218 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n", 6219 connector->base.base.id, connector->base.name, 6220 intel_dp->dfp.max_bpc, 6221 intel_dp->dfp.max_dotclock, 6222 intel_dp->dfp.min_tmds_clock, 6223 intel_dp->dfp.max_tmds_clock); 6224 } 6225 6226 static void 6227 intel_dp_update_420(struct intel_dp *intel_dp) 6228 { 6229 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6230 struct intel_connector *connector = intel_dp->attached_connector; 6231 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420; 6232 6233 /* No YCbCr output support on gmch platforms */ 6234 if (HAS_GMCH(i915)) 6235 return; 6236 6237 /* 6238 * ILK doesn't seem capable of DP YCbCr output. The 6239 * displayed image is severly corrupted. SNB+ is fine. 6240 */ 6241 if (IS_GEN(i915, 5)) 6242 return; 6243 6244 is_branch = drm_dp_is_branch(intel_dp->dpcd); 6245 ycbcr_420_passthrough = 6246 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 6247 intel_dp->downstream_ports); 6248 ycbcr_444_to_420 = 6249 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 6250 intel_dp->downstream_ports); 6251 6252 if (INTEL_GEN(i915) >= 11) { 6253 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 6254 intel_dp->dfp.ycbcr_444_to_420 = 6255 ycbcr_444_to_420 && !ycbcr_420_passthrough; 6256 6257 connector->base.ycbcr_420_allowed = 6258 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 6259 } else { 6260 /* 4:4:4->4:2:0 conversion is the only way */ 6261 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 6262 6263 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 6264 } 6265 6266 drm_dbg_kms(&i915->drm, 6267 "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 6268 connector->base.base.id, connector->base.name, 6269 yesno(connector->base.ycbcr_420_allowed), 6270 yesno(intel_dp->dfp.ycbcr_444_to_420)); 6271 } 6272 6273 static void 6274 intel_dp_set_edid(struct intel_dp *intel_dp) 6275 { 6276 struct intel_connector *connector = intel_dp->attached_connector; 6277 struct edid *edid; 6278 6279 intel_dp_unset_edid(intel_dp); 6280 edid = intel_dp_get_edid(intel_dp); 6281 connector->detect_edid = edid; 6282 6283 intel_dp_update_dfp(intel_dp, edid); 6284 intel_dp_update_420(intel_dp); 6285 6286 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 6287 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 6288 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6289 } 6290 6291 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6292 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6293 } 6294 6295 static void 6296 intel_dp_unset_edid(struct intel_dp *intel_dp) 6297 { 6298 struct intel_connector *connector = intel_dp->attached_connector; 6299 6300 drm_dp_cec_unset_edid(&intel_dp->aux); 6301 kfree(connector->detect_edid); 6302 connector->detect_edid = NULL; 6303 6304 intel_dp->has_hdmi_sink = false; 6305 intel_dp->has_audio = false; 6306 intel_dp->edid_quirks = 0; 6307 6308 intel_dp->dfp.max_bpc = 0; 6309 intel_dp->dfp.max_dotclock = 0; 6310 intel_dp->dfp.min_tmds_clock = 0; 6311 intel_dp->dfp.max_tmds_clock = 0; 6312 6313 intel_dp->dfp.ycbcr_444_to_420 = false; 6314 connector->base.ycbcr_420_allowed = false; 6315 } 6316 6317 static int 6318 intel_dp_detect(struct drm_connector *connector, 6319 struct drm_modeset_acquire_ctx *ctx, 6320 bool force) 6321 { 6322 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6323 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6324 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6325 struct intel_encoder *encoder = &dig_port->base; 6326 enum drm_connector_status status; 6327 6328 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6329 connector->base.id, connector->name); 6330 drm_WARN_ON(&dev_priv->drm, 6331 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6332 6333 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 6334 return connector_status_disconnected; 6335 6336 /* Can't disconnect eDP */ 6337 if (intel_dp_is_edp(intel_dp)) 6338 status = edp_detect(intel_dp); 6339 else if (intel_digital_port_connected(encoder)) 6340 status = intel_dp_detect_dpcd(intel_dp); 6341 else 6342 status = connector_status_disconnected; 6343 6344 if (status == connector_status_disconnected) { 6345 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6346 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6347 6348 if (intel_dp->is_mst) { 6349 drm_dbg_kms(&dev_priv->drm, 6350 "MST device may have disappeared %d vs %d\n", 6351 intel_dp->is_mst, 6352 intel_dp->mst_mgr.mst_state); 6353 intel_dp->is_mst = false; 6354 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6355 intel_dp->is_mst); 6356 } 6357 6358 goto out; 6359 } 6360 6361 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6362 if (INTEL_GEN(dev_priv) >= 11) 6363 intel_dp_get_dsc_sink_cap(intel_dp); 6364 6365 intel_dp_configure_mst(intel_dp); 6366 6367 /* 6368 * TODO: Reset link params when switching to MST mode, until MST 6369 * supports link training fallback params. 6370 */ 6371 if (intel_dp->reset_link_params || intel_dp->is_mst) { 6372 /* Initial max link lane count */ 6373 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6374 6375 /* Initial max link rate */ 6376 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6377 6378 intel_dp->reset_link_params = false; 6379 } 6380 6381 intel_dp_print_rates(intel_dp); 6382 6383 if (intel_dp->is_mst) { 6384 /* 6385 * If we are in MST mode then this connector 6386 * won't appear connected or have anything 6387 * with EDID on it 6388 */ 6389 status = connector_status_disconnected; 6390 goto out; 6391 } 6392 6393 /* 6394 * Some external monitors do not signal loss of link synchronization 6395 * with an IRQ_HPD, so force a link status check. 6396 */ 6397 if (!intel_dp_is_edp(intel_dp)) { 6398 int ret; 6399 6400 ret = intel_dp_retrain_link(encoder, ctx); 6401 if (ret) 6402 return ret; 6403 } 6404 6405 /* 6406 * Clearing NACK and defer counts to get their exact values 6407 * while reading EDID which are required by Compliance tests 6408 * 4.2.2.4 and 4.2.2.5 6409 */ 6410 intel_dp->aux.i2c_nack_count = 0; 6411 intel_dp->aux.i2c_defer_count = 0; 6412 6413 intel_dp_set_edid(intel_dp); 6414 if (intel_dp_is_edp(intel_dp) || 6415 to_intel_connector(connector)->detect_edid) 6416 status = connector_status_connected; 6417 6418 intel_dp_check_service_irq(intel_dp); 6419 6420 out: 6421 if (status != connector_status_connected && !intel_dp->is_mst) 6422 intel_dp_unset_edid(intel_dp); 6423 6424 /* 6425 * Make sure the refs for power wells enabled during detect are 6426 * dropped to avoid a new detect cycle triggered by HPD polling. 6427 */ 6428 intel_display_power_flush_work(dev_priv); 6429 6430 if (!intel_dp_is_edp(intel_dp)) 6431 drm_dp_set_subconnector_property(connector, 6432 status, 6433 intel_dp->dpcd, 6434 intel_dp->downstream_ports); 6435 return status; 6436 } 6437 6438 static void 6439 intel_dp_force(struct drm_connector *connector) 6440 { 6441 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6442 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6443 struct intel_encoder *intel_encoder = &dig_port->base; 6444 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6445 enum intel_display_power_domain aux_domain = 6446 intel_aux_power_domain(dig_port); 6447 intel_wakeref_t wakeref; 6448 6449 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6450 connector->base.id, connector->name); 6451 intel_dp_unset_edid(intel_dp); 6452 6453 if (connector->status != connector_status_connected) 6454 return; 6455 6456 wakeref = intel_display_power_get(dev_priv, aux_domain); 6457 6458 intel_dp_set_edid(intel_dp); 6459 6460 intel_display_power_put(dev_priv, aux_domain, wakeref); 6461 } 6462 6463 static int intel_dp_get_modes(struct drm_connector *connector) 6464 { 6465 struct intel_connector *intel_connector = to_intel_connector(connector); 6466 struct edid *edid; 6467 6468 edid = intel_connector->detect_edid; 6469 if (edid) { 6470 int ret = intel_connector_update_modes(connector, edid); 6471 if (ret) 6472 return ret; 6473 } 6474 6475 /* if eDP has no EDID, fall back to fixed mode */ 6476 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 6477 intel_connector->panel.fixed_mode) { 6478 struct drm_display_mode *mode; 6479 6480 mode = drm_mode_duplicate(connector->dev, 6481 intel_connector->panel.fixed_mode); 6482 if (mode) { 6483 drm_mode_probed_add(connector, mode); 6484 return 1; 6485 } 6486 } 6487 6488 if (!edid) { 6489 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 6490 struct drm_display_mode *mode; 6491 6492 mode = drm_dp_downstream_mode(connector->dev, 6493 intel_dp->dpcd, 6494 intel_dp->downstream_ports); 6495 if (mode) { 6496 drm_mode_probed_add(connector, mode); 6497 return 1; 6498 } 6499 } 6500 6501 return 0; 6502 } 6503 6504 static int 6505 intel_dp_connector_register(struct drm_connector *connector) 6506 { 6507 struct drm_i915_private *i915 = to_i915(connector->dev); 6508 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6509 int ret; 6510 6511 ret = intel_connector_register(connector); 6512 if (ret) 6513 return ret; 6514 6515 #ifdef notyet 6516 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6517 intel_dp->aux.name, connector->kdev->kobj.name); 6518 #endif 6519 6520 intel_dp->aux.dev = connector->kdev; 6521 ret = drm_dp_aux_register(&intel_dp->aux); 6522 if (!ret) 6523 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6524 return ret; 6525 } 6526 6527 static void 6528 intel_dp_connector_unregister(struct drm_connector *connector) 6529 { 6530 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6531 6532 drm_dp_cec_unregister_connector(&intel_dp->aux); 6533 drm_dp_aux_unregister(&intel_dp->aux); 6534 intel_connector_unregister(connector); 6535 } 6536 6537 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6538 { 6539 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6540 struct intel_dp *intel_dp = &dig_port->dp; 6541 6542 intel_dp_mst_encoder_cleanup(dig_port); 6543 if (intel_dp_is_edp(intel_dp)) { 6544 intel_wakeref_t wakeref; 6545 6546 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6547 /* 6548 * vdd might still be enabled do to the delayed vdd off. 6549 * Make sure vdd is actually turned off here. 6550 */ 6551 with_pps_lock(intel_dp, wakeref) 6552 edp_panel_vdd_off_sync(intel_dp); 6553 6554 if (intel_dp->edp_notifier.notifier_call) { 6555 unregister_reboot_notifier(&intel_dp->edp_notifier); 6556 intel_dp->edp_notifier.notifier_call = NULL; 6557 } 6558 } 6559 6560 intel_dp_aux_fini(intel_dp); 6561 } 6562 6563 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6564 { 6565 intel_dp_encoder_flush_work(encoder); 6566 6567 drm_encoder_cleanup(encoder); 6568 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6569 } 6570 6571 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6572 { 6573 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6574 intel_wakeref_t wakeref; 6575 6576 if (!intel_dp_is_edp(intel_dp)) 6577 return; 6578 6579 /* 6580 * vdd might still be enabled do to the delayed vdd off. 6581 * Make sure vdd is actually turned off here. 6582 */ 6583 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6584 with_pps_lock(intel_dp, wakeref) 6585 edp_panel_vdd_off_sync(intel_dp); 6586 } 6587 6588 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6589 { 6590 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6591 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6592 6593 lockdep_assert_held(&dev_priv->pps_mutex); 6594 6595 if (!edp_have_panel_vdd(intel_dp)) 6596 return; 6597 6598 /* 6599 * The VDD bit needs a power domain reference, so if the bit is 6600 * already enabled when we boot or resume, grab this reference and 6601 * schedule a vdd off, so we don't hold on to the reference 6602 * indefinitely. 6603 */ 6604 drm_dbg_kms(&dev_priv->drm, 6605 "VDD left on by BIOS, adjusting state tracking\n"); 6606 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 6607 6608 edp_panel_vdd_schedule_off(intel_dp); 6609 } 6610 6611 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 6612 { 6613 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6614 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6615 enum pipe pipe; 6616 6617 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 6618 encoder->port, &pipe)) 6619 return pipe; 6620 6621 return INVALID_PIPE; 6622 } 6623 6624 void intel_dp_encoder_reset(struct drm_encoder *encoder) 6625 { 6626 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 6627 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 6628 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6629 intel_wakeref_t wakeref; 6630 6631 if (!HAS_DDI(dev_priv)) 6632 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6633 6634 if (lspcon->active) 6635 lspcon_resume(lspcon); 6636 6637 intel_dp->reset_link_params = true; 6638 6639 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 6640 !intel_dp_is_edp(intel_dp)) 6641 return; 6642 6643 with_pps_lock(intel_dp, wakeref) { 6644 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6645 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6646 6647 if (intel_dp_is_edp(intel_dp)) { 6648 /* 6649 * Reinit the power sequencer, in case BIOS did 6650 * something nasty with it. 6651 */ 6652 intel_dp_pps_init(intel_dp); 6653 intel_edp_panel_vdd_sanitize(intel_dp); 6654 } 6655 } 6656 } 6657 6658 static int intel_modeset_tile_group(struct intel_atomic_state *state, 6659 int tile_group_id) 6660 { 6661 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6662 struct drm_connector_list_iter conn_iter; 6663 struct drm_connector *connector; 6664 int ret = 0; 6665 6666 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 6667 drm_for_each_connector_iter(connector, &conn_iter) { 6668 struct drm_connector_state *conn_state; 6669 struct intel_crtc_state *crtc_state; 6670 struct intel_crtc *crtc; 6671 6672 if (!connector->has_tile || 6673 connector->tile_group->id != tile_group_id) 6674 continue; 6675 6676 conn_state = drm_atomic_get_connector_state(&state->base, 6677 connector); 6678 if (IS_ERR(conn_state)) { 6679 ret = PTR_ERR(conn_state); 6680 break; 6681 } 6682 6683 crtc = to_intel_crtc(conn_state->crtc); 6684 6685 if (!crtc) 6686 continue; 6687 6688 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6689 crtc_state->uapi.mode_changed = true; 6690 6691 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6692 if (ret) 6693 break; 6694 } 6695 drm_connector_list_iter_end(&conn_iter); 6696 6697 return ret; 6698 } 6699 6700 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6701 { 6702 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6703 struct intel_crtc *crtc; 6704 6705 if (transcoders == 0) 6706 return 0; 6707 6708 for_each_intel_crtc(&dev_priv->drm, crtc) { 6709 struct intel_crtc_state *crtc_state; 6710 int ret; 6711 6712 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6713 if (IS_ERR(crtc_state)) 6714 return PTR_ERR(crtc_state); 6715 6716 if (!crtc_state->hw.enable) 6717 continue; 6718 6719 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6720 continue; 6721 6722 crtc_state->uapi.mode_changed = true; 6723 6724 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6725 if (ret) 6726 return ret; 6727 6728 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6729 if (ret) 6730 return ret; 6731 6732 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6733 } 6734 6735 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 6736 6737 return 0; 6738 } 6739 6740 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6741 struct drm_connector *connector) 6742 { 6743 const struct drm_connector_state *old_conn_state = 6744 drm_atomic_get_old_connector_state(&state->base, connector); 6745 const struct intel_crtc_state *old_crtc_state; 6746 struct intel_crtc *crtc; 6747 u8 transcoders; 6748 6749 crtc = to_intel_crtc(old_conn_state->crtc); 6750 if (!crtc) 6751 return 0; 6752 6753 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6754 6755 if (!old_crtc_state->hw.active) 6756 return 0; 6757 6758 transcoders = old_crtc_state->sync_mode_slaves_mask; 6759 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6760 transcoders |= BIT(old_crtc_state->master_transcoder); 6761 6762 return intel_modeset_affected_transcoders(state, 6763 transcoders); 6764 } 6765 6766 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6767 struct drm_atomic_state *_state) 6768 { 6769 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6770 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6771 int ret; 6772 6773 ret = intel_digital_connector_atomic_check(conn, &state->base); 6774 if (ret) 6775 return ret; 6776 6777 /* 6778 * We don't enable port sync on BDW due to missing w/as and 6779 * due to not having adjusted the modeset sequence appropriately. 6780 */ 6781 if (INTEL_GEN(dev_priv) < 9) 6782 return 0; 6783 6784 if (!intel_connector_needs_modeset(state, conn)) 6785 return 0; 6786 6787 if (conn->has_tile) { 6788 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6789 if (ret) 6790 return ret; 6791 } 6792 6793 return intel_modeset_synced_crtcs(state, conn); 6794 } 6795 6796 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6797 .force = intel_dp_force, 6798 .fill_modes = drm_helper_probe_single_connector_modes, 6799 .atomic_get_property = intel_digital_connector_atomic_get_property, 6800 .atomic_set_property = intel_digital_connector_atomic_set_property, 6801 .late_register = intel_dp_connector_register, 6802 .early_unregister = intel_dp_connector_unregister, 6803 .destroy = intel_connector_destroy, 6804 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6805 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6806 }; 6807 6808 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6809 .detect_ctx = intel_dp_detect, 6810 .get_modes = intel_dp_get_modes, 6811 .mode_valid = intel_dp_mode_valid, 6812 .atomic_check = intel_dp_connector_atomic_check, 6813 }; 6814 6815 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 6816 .reset = intel_dp_encoder_reset, 6817 .destroy = intel_dp_encoder_destroy, 6818 }; 6819 6820 static bool intel_edp_have_power(struct intel_dp *intel_dp) 6821 { 6822 intel_wakeref_t wakeref; 6823 bool have_power = false; 6824 6825 with_pps_lock(intel_dp, wakeref) { 6826 have_power = edp_have_panel_power(intel_dp) && 6827 edp_have_panel_vdd(intel_dp); 6828 } 6829 6830 return have_power; 6831 } 6832 6833 enum irqreturn 6834 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 6835 { 6836 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6837 struct intel_dp *intel_dp = &dig_port->dp; 6838 6839 if (dig_port->base.type == INTEL_OUTPUT_EDP && 6840 (long_hpd || !intel_edp_have_power(intel_dp))) { 6841 /* 6842 * vdd off can generate a long/short pulse on eDP which 6843 * would require vdd on to handle it, and thus we 6844 * would end up in an endless cycle of 6845 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 6846 */ 6847 drm_dbg_kms(&i915->drm, 6848 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 6849 long_hpd ? "long" : "short", 6850 dig_port->base.base.base.id, 6851 dig_port->base.base.name); 6852 return IRQ_HANDLED; 6853 } 6854 6855 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 6856 dig_port->base.base.base.id, 6857 dig_port->base.base.name, 6858 long_hpd ? "long" : "short"); 6859 6860 if (long_hpd) { 6861 intel_dp->reset_link_params = true; 6862 return IRQ_NONE; 6863 } 6864 6865 if (intel_dp->is_mst) { 6866 if (!intel_dp_check_mst_status(intel_dp)) 6867 return IRQ_NONE; 6868 } else if (!intel_dp_short_pulse(intel_dp)) { 6869 return IRQ_NONE; 6870 } 6871 6872 return IRQ_HANDLED; 6873 } 6874 6875 /* check the VBT to see whether the eDP is on another port */ 6876 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 6877 { 6878 /* 6879 * eDP not supported on g4x. so bail out early just 6880 * for a bit extra safety in case the VBT is bonkers. 6881 */ 6882 if (INTEL_GEN(dev_priv) < 5) 6883 return false; 6884 6885 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 6886 return true; 6887 6888 return intel_bios_is_port_edp(dev_priv, port); 6889 } 6890 6891 static void 6892 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6893 { 6894 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6895 enum port port = dp_to_dig_port(intel_dp)->base.port; 6896 6897 if (!intel_dp_is_edp(intel_dp)) 6898 drm_connector_attach_dp_subconnector_property(connector); 6899 6900 if (!IS_G4X(dev_priv) && port != PORT_A) 6901 intel_attach_force_audio_property(connector); 6902 6903 intel_attach_broadcast_rgb_property(connector); 6904 if (HAS_GMCH(dev_priv)) 6905 drm_connector_attach_max_bpc_property(connector, 6, 10); 6906 else if (INTEL_GEN(dev_priv) >= 5) 6907 drm_connector_attach_max_bpc_property(connector, 6, 12); 6908 6909 intel_attach_colorspace_property(connector); 6910 6911 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 6912 drm_object_attach_property(&connector->base, 6913 connector->dev->mode_config.hdr_output_metadata_property, 6914 0); 6915 6916 if (intel_dp_is_edp(intel_dp)) { 6917 u32 allowed_scalers; 6918 6919 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 6920 if (!HAS_GMCH(dev_priv)) 6921 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 6922 6923 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 6924 6925 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 6926 6927 } 6928 } 6929 6930 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 6931 { 6932 intel_dp->panel_power_off_time = ktime_get_boottime(); 6933 intel_dp->last_power_on = jiffies; 6934 intel_dp->last_backlight_off = jiffies; 6935 } 6936 6937 static void 6938 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 6939 { 6940 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6941 u32 pp_on, pp_off, pp_ctl; 6942 struct pps_registers regs; 6943 6944 intel_pps_get_registers(intel_dp, ®s); 6945 6946 pp_ctl = ilk_get_pp_control(intel_dp); 6947 6948 /* Ensure PPS is unlocked */ 6949 if (!HAS_DDI(dev_priv)) 6950 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 6951 6952 pp_on = intel_de_read(dev_priv, regs.pp_on); 6953 pp_off = intel_de_read(dev_priv, regs.pp_off); 6954 6955 /* Pull timing values out of registers */ 6956 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 6957 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 6958 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 6959 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 6960 6961 if (i915_mmio_reg_valid(regs.pp_div)) { 6962 u32 pp_div; 6963 6964 pp_div = intel_de_read(dev_priv, regs.pp_div); 6965 6966 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 6967 } else { 6968 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 6969 } 6970 } 6971 6972 static void 6973 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 6974 { 6975 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 6976 state_name, 6977 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 6978 } 6979 6980 static void 6981 intel_pps_verify_state(struct intel_dp *intel_dp) 6982 { 6983 struct edp_power_seq hw; 6984 struct edp_power_seq *sw = &intel_dp->pps_delays; 6985 6986 intel_pps_readout_hw_state(intel_dp, &hw); 6987 6988 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 6989 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 6990 DRM_ERROR("PPS state mismatch\n"); 6991 intel_pps_dump_state("sw", sw); 6992 intel_pps_dump_state("hw", &hw); 6993 } 6994 } 6995 6996 static void 6997 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 6998 { 6999 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7000 struct edp_power_seq cur, vbt, spec, 7001 *final = &intel_dp->pps_delays; 7002 7003 lockdep_assert_held(&dev_priv->pps_mutex); 7004 7005 /* already initialized? */ 7006 if (final->t11_t12 != 0) 7007 return; 7008 7009 intel_pps_readout_hw_state(intel_dp, &cur); 7010 7011 intel_pps_dump_state("cur", &cur); 7012 7013 vbt = dev_priv->vbt.edp.pps; 7014 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7015 * of 500ms appears to be too short. Ocassionally the panel 7016 * just fails to power back on. Increasing the delay to 800ms 7017 * seems sufficient to avoid this problem. 7018 */ 7019 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7020 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7021 drm_dbg_kms(&dev_priv->drm, 7022 "Increasing T12 panel delay as per the quirk to %d\n", 7023 vbt.t11_t12); 7024 } 7025 /* T11_T12 delay is special and actually in units of 100ms, but zero 7026 * based in the hw (so we need to add 100 ms). But the sw vbt 7027 * table multiplies it with 1000 to make it in units of 100usec, 7028 * too. */ 7029 vbt.t11_t12 += 100 * 10; 7030 7031 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7032 * our hw here, which are all in 100usec. */ 7033 spec.t1_t3 = 210 * 10; 7034 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7035 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7036 spec.t10 = 500 * 10; 7037 /* This one is special and actually in units of 100ms, but zero 7038 * based in the hw (so we need to add 100 ms). But the sw vbt 7039 * table multiplies it with 1000 to make it in units of 100usec, 7040 * too. */ 7041 spec.t11_t12 = (510 + 100) * 10; 7042 7043 intel_pps_dump_state("vbt", &vbt); 7044 7045 /* Use the max of the register settings and vbt. If both are 7046 * unset, fall back to the spec limits. */ 7047 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7048 spec.field : \ 7049 max(cur.field, vbt.field)) 7050 assign_final(t1_t3); 7051 assign_final(t8); 7052 assign_final(t9); 7053 assign_final(t10); 7054 assign_final(t11_t12); 7055 #undef assign_final 7056 7057 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7058 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7059 intel_dp->backlight_on_delay = get_delay(t8); 7060 intel_dp->backlight_off_delay = get_delay(t9); 7061 intel_dp->panel_power_down_delay = get_delay(t10); 7062 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7063 #undef get_delay 7064 7065 drm_dbg_kms(&dev_priv->drm, 7066 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7067 intel_dp->panel_power_up_delay, 7068 intel_dp->panel_power_down_delay, 7069 intel_dp->panel_power_cycle_delay); 7070 7071 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7072 intel_dp->backlight_on_delay, 7073 intel_dp->backlight_off_delay); 7074 7075 /* 7076 * We override the HW backlight delays to 1 because we do manual waits 7077 * on them. For T8, even BSpec recommends doing it. For T9, if we 7078 * don't do this, we'll end up waiting for the backlight off delay 7079 * twice: once when we do the manual sleep, and once when we disable 7080 * the panel and wait for the PP_STATUS bit to become zero. 7081 */ 7082 final->t8 = 1; 7083 final->t9 = 1; 7084 7085 /* 7086 * HW has only a 100msec granularity for t11_t12 so round it up 7087 * accordingly. 7088 */ 7089 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7090 } 7091 7092 static void 7093 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7094 bool force_disable_vdd) 7095 { 7096 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7097 u32 pp_on, pp_off, port_sel = 0; 7098 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7099 struct pps_registers regs; 7100 enum port port = dp_to_dig_port(intel_dp)->base.port; 7101 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7102 7103 lockdep_assert_held(&dev_priv->pps_mutex); 7104 7105 intel_pps_get_registers(intel_dp, ®s); 7106 7107 /* 7108 * On some VLV machines the BIOS can leave the VDD 7109 * enabled even on power sequencers which aren't 7110 * hooked up to any port. This would mess up the 7111 * power domain tracking the first time we pick 7112 * one of these power sequencers for use since 7113 * edp_panel_vdd_on() would notice that the VDD was 7114 * already on and therefore wouldn't grab the power 7115 * domain reference. Disable VDD first to avoid this. 7116 * This also avoids spuriously turning the VDD on as 7117 * soon as the new power sequencer gets initialized. 7118 */ 7119 if (force_disable_vdd) { 7120 u32 pp = ilk_get_pp_control(intel_dp); 7121 7122 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7123 "Panel power already on\n"); 7124 7125 if (pp & EDP_FORCE_VDD) 7126 drm_dbg_kms(&dev_priv->drm, 7127 "VDD already on, disabling first\n"); 7128 7129 pp &= ~EDP_FORCE_VDD; 7130 7131 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7132 } 7133 7134 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7135 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7136 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7137 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7138 7139 /* Haswell doesn't have any port selection bits for the panel 7140 * power sequencer any more. */ 7141 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7142 port_sel = PANEL_PORT_SELECT_VLV(port); 7143 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7144 switch (port) { 7145 case PORT_A: 7146 port_sel = PANEL_PORT_SELECT_DPA; 7147 break; 7148 case PORT_C: 7149 port_sel = PANEL_PORT_SELECT_DPC; 7150 break; 7151 case PORT_D: 7152 port_sel = PANEL_PORT_SELECT_DPD; 7153 break; 7154 default: 7155 MISSING_CASE(port); 7156 break; 7157 } 7158 } 7159 7160 pp_on |= port_sel; 7161 7162 intel_de_write(dev_priv, regs.pp_on, pp_on); 7163 intel_de_write(dev_priv, regs.pp_off, pp_off); 7164 7165 /* 7166 * Compute the divisor for the pp clock, simply match the Bspec formula. 7167 */ 7168 if (i915_mmio_reg_valid(regs.pp_div)) { 7169 intel_de_write(dev_priv, regs.pp_div, 7170 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7171 } else { 7172 u32 pp_ctl; 7173 7174 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7175 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7176 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7177 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7178 } 7179 7180 drm_dbg_kms(&dev_priv->drm, 7181 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7182 intel_de_read(dev_priv, regs.pp_on), 7183 intel_de_read(dev_priv, regs.pp_off), 7184 i915_mmio_reg_valid(regs.pp_div) ? 7185 intel_de_read(dev_priv, regs.pp_div) : 7186 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7187 } 7188 7189 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7190 { 7191 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7192 7193 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7194 vlv_initial_power_sequencer_setup(intel_dp); 7195 } else { 7196 intel_dp_init_panel_power_sequencer(intel_dp); 7197 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7198 } 7199 } 7200 7201 /** 7202 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7203 * @dev_priv: i915 device 7204 * @crtc_state: a pointer to the active intel_crtc_state 7205 * @refresh_rate: RR to be programmed 7206 * 7207 * This function gets called when refresh rate (RR) has to be changed from 7208 * one frequency to another. Switches can be between high and low RR 7209 * supported by the panel or to any other RR based on media playback (in 7210 * this case, RR value needs to be passed from user space). 7211 * 7212 * The caller of this function needs to take a lock on dev_priv->drrs. 7213 */ 7214 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7215 const struct intel_crtc_state *crtc_state, 7216 int refresh_rate) 7217 { 7218 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7219 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7220 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7221 7222 if (refresh_rate <= 0) { 7223 drm_dbg_kms(&dev_priv->drm, 7224 "Refresh rate should be positive non-zero.\n"); 7225 return; 7226 } 7227 7228 if (intel_dp == NULL) { 7229 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7230 return; 7231 } 7232 7233 if (!intel_crtc) { 7234 drm_dbg_kms(&dev_priv->drm, 7235 "DRRS: intel_crtc not initialized\n"); 7236 return; 7237 } 7238 7239 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7240 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7241 return; 7242 } 7243 7244 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 7245 refresh_rate) 7246 index = DRRS_LOW_RR; 7247 7248 if (index == dev_priv->drrs.refresh_rate_type) { 7249 drm_dbg_kms(&dev_priv->drm, 7250 "DRRS requested for previously set RR...ignoring\n"); 7251 return; 7252 } 7253 7254 if (!crtc_state->hw.active) { 7255 drm_dbg_kms(&dev_priv->drm, 7256 "eDP encoder disabled. CRTC not Active\n"); 7257 return; 7258 } 7259 7260 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7261 switch (index) { 7262 case DRRS_HIGH_RR: 7263 intel_dp_set_m_n(crtc_state, M1_N1); 7264 break; 7265 case DRRS_LOW_RR: 7266 intel_dp_set_m_n(crtc_state, M2_N2); 7267 break; 7268 case DRRS_MAX_RR: 7269 default: 7270 drm_err(&dev_priv->drm, 7271 "Unsupported refreshrate type\n"); 7272 } 7273 } else if (INTEL_GEN(dev_priv) > 6) { 7274 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7275 u32 val; 7276 7277 val = intel_de_read(dev_priv, reg); 7278 if (index > DRRS_HIGH_RR) { 7279 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7280 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7281 else 7282 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7283 } else { 7284 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7285 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7286 else 7287 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7288 } 7289 intel_de_write(dev_priv, reg, val); 7290 } 7291 7292 dev_priv->drrs.refresh_rate_type = index; 7293 7294 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7295 refresh_rate); 7296 } 7297 7298 static void 7299 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) 7300 { 7301 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7302 7303 dev_priv->drrs.busy_frontbuffer_bits = 0; 7304 dev_priv->drrs.dp = intel_dp; 7305 } 7306 7307 /** 7308 * intel_edp_drrs_enable - init drrs struct if supported 7309 * @intel_dp: DP struct 7310 * @crtc_state: A pointer to the active crtc state. 7311 * 7312 * Initializes frontbuffer_bits and drrs.dp 7313 */ 7314 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7315 const struct intel_crtc_state *crtc_state) 7316 { 7317 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7318 7319 if (!crtc_state->has_drrs) 7320 return; 7321 7322 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); 7323 7324 mutex_lock(&dev_priv->drrs.mutex); 7325 7326 if (dev_priv->drrs.dp) { 7327 drm_warn(&dev_priv->drm, "DRRS already enabled\n"); 7328 goto unlock; 7329 } 7330 7331 intel_edp_drrs_enable_locked(intel_dp); 7332 7333 unlock: 7334 mutex_unlock(&dev_priv->drrs.mutex); 7335 } 7336 7337 static void 7338 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, 7339 const struct intel_crtc_state *crtc_state) 7340 { 7341 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7342 7343 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { 7344 int refresh; 7345 7346 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); 7347 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); 7348 } 7349 7350 dev_priv->drrs.dp = NULL; 7351 } 7352 7353 /** 7354 * intel_edp_drrs_disable - Disable DRRS 7355 * @intel_dp: DP struct 7356 * @old_crtc_state: Pointer to old crtc_state. 7357 * 7358 */ 7359 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7360 const struct intel_crtc_state *old_crtc_state) 7361 { 7362 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7363 7364 if (!old_crtc_state->has_drrs) 7365 return; 7366 7367 mutex_lock(&dev_priv->drrs.mutex); 7368 if (!dev_priv->drrs.dp) { 7369 mutex_unlock(&dev_priv->drrs.mutex); 7370 return; 7371 } 7372 7373 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); 7374 mutex_unlock(&dev_priv->drrs.mutex); 7375 7376 cancel_delayed_work_sync(&dev_priv->drrs.work); 7377 } 7378 7379 /** 7380 * intel_edp_drrs_update - Update DRRS state 7381 * @intel_dp: Intel DP 7382 * @crtc_state: new CRTC state 7383 * 7384 * This function will update DRRS states, disabling or enabling DRRS when 7385 * executing fastsets. For full modeset, intel_edp_drrs_disable() and 7386 * intel_edp_drrs_enable() should be called instead. 7387 */ 7388 void 7389 intel_edp_drrs_update(struct intel_dp *intel_dp, 7390 const struct intel_crtc_state *crtc_state) 7391 { 7392 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7393 7394 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 7395 return; 7396 7397 mutex_lock(&dev_priv->drrs.mutex); 7398 7399 /* New state matches current one? */ 7400 if (crtc_state->has_drrs == !!dev_priv->drrs.dp) 7401 goto unlock; 7402 7403 if (crtc_state->has_drrs) 7404 intel_edp_drrs_enable_locked(intel_dp); 7405 else 7406 intel_edp_drrs_disable_locked(intel_dp, crtc_state); 7407 7408 unlock: 7409 mutex_unlock(&dev_priv->drrs.mutex); 7410 } 7411 7412 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7413 { 7414 struct drm_i915_private *dev_priv = 7415 container_of(work, typeof(*dev_priv), drrs.work.work); 7416 struct intel_dp *intel_dp; 7417 7418 mutex_lock(&dev_priv->drrs.mutex); 7419 7420 intel_dp = dev_priv->drrs.dp; 7421 7422 if (!intel_dp) 7423 goto unlock; 7424 7425 /* 7426 * The delayed work can race with an invalidate hence we need to 7427 * recheck. 7428 */ 7429 7430 if (dev_priv->drrs.busy_frontbuffer_bits) 7431 goto unlock; 7432 7433 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7434 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7435 7436 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7437 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 7438 } 7439 7440 unlock: 7441 mutex_unlock(&dev_priv->drrs.mutex); 7442 } 7443 7444 /** 7445 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7446 * @dev_priv: i915 device 7447 * @frontbuffer_bits: frontbuffer plane tracking bits 7448 * 7449 * This function gets called everytime rendering on the given planes start. 7450 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7451 * 7452 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7453 */ 7454 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7455 unsigned int frontbuffer_bits) 7456 { 7457 struct intel_dp *intel_dp; 7458 struct drm_crtc *crtc; 7459 enum pipe pipe; 7460 7461 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7462 return; 7463 7464 cancel_delayed_work(&dev_priv->drrs.work); 7465 7466 mutex_lock(&dev_priv->drrs.mutex); 7467 7468 intel_dp = dev_priv->drrs.dp; 7469 if (!intel_dp) { 7470 mutex_unlock(&dev_priv->drrs.mutex); 7471 return; 7472 } 7473 7474 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7475 pipe = to_intel_crtc(crtc)->pipe; 7476 7477 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7478 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7479 7480 /* invalidate means busy screen hence upclock */ 7481 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7482 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7483 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7484 7485 mutex_unlock(&dev_priv->drrs.mutex); 7486 } 7487 7488 /** 7489 * intel_edp_drrs_flush - Restart Idleness DRRS 7490 * @dev_priv: i915 device 7491 * @frontbuffer_bits: frontbuffer plane tracking bits 7492 * 7493 * This function gets called every time rendering on the given planes has 7494 * completed or flip on a crtc is completed. So DRRS should be upclocked 7495 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7496 * if no other planes are dirty. 7497 * 7498 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7499 */ 7500 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7501 unsigned int frontbuffer_bits) 7502 { 7503 struct intel_dp *intel_dp; 7504 struct drm_crtc *crtc; 7505 enum pipe pipe; 7506 7507 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7508 return; 7509 7510 cancel_delayed_work(&dev_priv->drrs.work); 7511 7512 mutex_lock(&dev_priv->drrs.mutex); 7513 7514 intel_dp = dev_priv->drrs.dp; 7515 if (!intel_dp) { 7516 mutex_unlock(&dev_priv->drrs.mutex); 7517 return; 7518 } 7519 7520 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7521 pipe = to_intel_crtc(crtc)->pipe; 7522 7523 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7524 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7525 7526 /* flush means busy screen hence upclock */ 7527 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7528 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7529 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7530 7531 /* 7532 * flush also means no more activity hence schedule downclock, if all 7533 * other fbs are quiescent too 7534 */ 7535 if (!dev_priv->drrs.busy_frontbuffer_bits) 7536 schedule_delayed_work(&dev_priv->drrs.work, 7537 msecs_to_jiffies(1000)); 7538 mutex_unlock(&dev_priv->drrs.mutex); 7539 } 7540 7541 /** 7542 * DOC: Display Refresh Rate Switching (DRRS) 7543 * 7544 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7545 * which enables swtching between low and high refresh rates, 7546 * dynamically, based on the usage scenario. This feature is applicable 7547 * for internal panels. 7548 * 7549 * Indication that the panel supports DRRS is given by the panel EDID, which 7550 * would list multiple refresh rates for one resolution. 7551 * 7552 * DRRS is of 2 types - static and seamless. 7553 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7554 * (may appear as a blink on screen) and is used in dock-undock scenario. 7555 * Seamless DRRS involves changing RR without any visual effect to the user 7556 * and can be used during normal system usage. This is done by programming 7557 * certain registers. 7558 * 7559 * Support for static/seamless DRRS may be indicated in the VBT based on 7560 * inputs from the panel spec. 7561 * 7562 * DRRS saves power by switching to low RR based on usage scenarios. 7563 * 7564 * The implementation is based on frontbuffer tracking implementation. When 7565 * there is a disturbance on the screen triggered by user activity or a periodic 7566 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7567 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7568 * made. 7569 * 7570 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7571 * and intel_edp_drrs_flush() are called. 7572 * 7573 * DRRS can be further extended to support other internal panels and also 7574 * the scenario of video playback wherein RR is set based on the rate 7575 * requested by userspace. 7576 */ 7577 7578 /** 7579 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7580 * @connector: eDP connector 7581 * @fixed_mode: preferred mode of panel 7582 * 7583 * This function is called only once at driver load to initialize basic 7584 * DRRS stuff. 7585 * 7586 * Returns: 7587 * Downclock mode if panel supports it, else return NULL. 7588 * DRRS support is determined by the presence of downclock mode (apart 7589 * from VBT setting). 7590 */ 7591 static struct drm_display_mode * 7592 intel_dp_drrs_init(struct intel_connector *connector, 7593 struct drm_display_mode *fixed_mode) 7594 { 7595 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7596 struct drm_display_mode *downclock_mode = NULL; 7597 7598 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7599 rw_init(&dev_priv->drrs.mutex, "drrs"); 7600 7601 if (INTEL_GEN(dev_priv) <= 6) { 7602 drm_dbg_kms(&dev_priv->drm, 7603 "DRRS supported for Gen7 and above\n"); 7604 return NULL; 7605 } 7606 7607 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7608 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7609 return NULL; 7610 } 7611 7612 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7613 if (!downclock_mode) { 7614 drm_dbg_kms(&dev_priv->drm, 7615 "Downclock mode is not found. DRRS not supported\n"); 7616 return NULL; 7617 } 7618 7619 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7620 7621 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7622 drm_dbg_kms(&dev_priv->drm, 7623 "seamless DRRS supported for eDP panel.\n"); 7624 return downclock_mode; 7625 } 7626 7627 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7628 struct intel_connector *intel_connector) 7629 { 7630 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7631 struct drm_device *dev = &dev_priv->drm; 7632 struct drm_connector *connector = &intel_connector->base; 7633 struct drm_display_mode *fixed_mode = NULL; 7634 struct drm_display_mode *downclock_mode = NULL; 7635 bool has_dpcd; 7636 enum pipe pipe = INVALID_PIPE; 7637 intel_wakeref_t wakeref; 7638 struct edid *edid; 7639 7640 if (!intel_dp_is_edp(intel_dp)) 7641 return true; 7642 7643 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7644 7645 /* 7646 * On IBX/CPT we may get here with LVDS already registered. Since the 7647 * driver uses the only internal power sequencer available for both 7648 * eDP and LVDS bail out early in this case to prevent interfering 7649 * with an already powered-on LVDS power sequencer. 7650 */ 7651 if (intel_get_lvds_encoder(dev_priv)) { 7652 drm_WARN_ON(dev, 7653 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 7654 drm_info(&dev_priv->drm, 7655 "LVDS was detected, not registering eDP\n"); 7656 7657 return false; 7658 } 7659 7660 with_pps_lock(intel_dp, wakeref) { 7661 intel_dp_init_panel_power_timestamps(intel_dp); 7662 intel_dp_pps_init(intel_dp); 7663 intel_edp_panel_vdd_sanitize(intel_dp); 7664 } 7665 7666 /* Cache DPCD and EDID for edp. */ 7667 has_dpcd = intel_edp_init_dpcd(intel_dp); 7668 7669 if (!has_dpcd) { 7670 /* if this fails, presume the device is a ghost */ 7671 drm_info(&dev_priv->drm, 7672 "failed to retrieve link info, disabling eDP\n"); 7673 goto out_vdd_off; 7674 } 7675 7676 mutex_lock(&dev->mode_config.mutex); 7677 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 7678 if (edid) { 7679 if (drm_add_edid_modes(connector, edid)) { 7680 drm_connector_update_edid_property(connector, edid); 7681 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 7682 } else { 7683 kfree(edid); 7684 edid = ERR_PTR(-EINVAL); 7685 } 7686 } else { 7687 edid = ERR_PTR(-ENOENT); 7688 } 7689 intel_connector->edid = edid; 7690 7691 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 7692 if (fixed_mode) 7693 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 7694 7695 /* fallback to VBT if available for eDP */ 7696 if (!fixed_mode) 7697 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 7698 mutex_unlock(&dev->mode_config.mutex); 7699 7700 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7701 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 7702 register_reboot_notifier(&intel_dp->edp_notifier); 7703 7704 /* 7705 * Figure out the current pipe for the initial backlight setup. 7706 * If the current pipe isn't valid, try the PPS pipe, and if that 7707 * fails just assume pipe A. 7708 */ 7709 pipe = vlv_active_pipe(intel_dp); 7710 7711 if (pipe != PIPE_A && pipe != PIPE_B) 7712 pipe = intel_dp->pps_pipe; 7713 7714 if (pipe != PIPE_A && pipe != PIPE_B) 7715 pipe = PIPE_A; 7716 7717 drm_dbg_kms(&dev_priv->drm, 7718 "using pipe %c for initial backlight setup\n", 7719 pipe_name(pipe)); 7720 } 7721 7722 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 7723 intel_connector->panel.backlight.power = intel_edp_backlight_power; 7724 intel_panel_setup_backlight(connector, pipe); 7725 7726 if (fixed_mode) { 7727 drm_connector_set_panel_orientation_with_quirk(connector, 7728 dev_priv->vbt.orientation, 7729 fixed_mode->hdisplay, fixed_mode->vdisplay); 7730 } 7731 7732 return true; 7733 7734 out_vdd_off: 7735 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7736 /* 7737 * vdd might still be enabled do to the delayed vdd off. 7738 * Make sure vdd is actually turned off here. 7739 */ 7740 with_pps_lock(intel_dp, wakeref) 7741 edp_panel_vdd_off_sync(intel_dp); 7742 7743 return false; 7744 } 7745 7746 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 7747 { 7748 struct intel_connector *intel_connector; 7749 struct drm_connector *connector; 7750 7751 intel_connector = container_of(work, typeof(*intel_connector), 7752 modeset_retry_work); 7753 connector = &intel_connector->base; 7754 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 7755 connector->name); 7756 7757 /* Grab the locks before changing connector property*/ 7758 mutex_lock(&connector->dev->mode_config.mutex); 7759 /* Set connector link status to BAD and send a Uevent to notify 7760 * userspace to do a modeset. 7761 */ 7762 drm_connector_set_link_status_property(connector, 7763 DRM_MODE_LINK_STATUS_BAD); 7764 mutex_unlock(&connector->dev->mode_config.mutex); 7765 /* Send Hotplug uevent so userspace can reprobe */ 7766 drm_kms_helper_hotplug_event(connector->dev); 7767 } 7768 7769 bool 7770 intel_dp_init_connector(struct intel_digital_port *dig_port, 7771 struct intel_connector *intel_connector) 7772 { 7773 struct drm_connector *connector = &intel_connector->base; 7774 struct intel_dp *intel_dp = &dig_port->dp; 7775 struct intel_encoder *intel_encoder = &dig_port->base; 7776 struct drm_device *dev = intel_encoder->base.dev; 7777 struct drm_i915_private *dev_priv = to_i915(dev); 7778 enum port port = intel_encoder->port; 7779 enum phy phy = intel_port_to_phy(dev_priv, port); 7780 int type; 7781 7782 /* Initialize the work for modeset in case of link train failure */ 7783 INIT_WORK(&intel_connector->modeset_retry_work, 7784 intel_dp_modeset_retry_work_fn); 7785 7786 if (drm_WARN(dev, dig_port->max_lanes < 1, 7787 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 7788 dig_port->max_lanes, intel_encoder->base.base.id, 7789 intel_encoder->base.name)) 7790 return false; 7791 7792 intel_dp_set_source_rates(intel_dp); 7793 intel_dp_set_default_sink_rates(intel_dp); 7794 intel_dp_set_common_rates(intel_dp); 7795 7796 intel_dp->reset_link_params = true; 7797 intel_dp->pps_pipe = INVALID_PIPE; 7798 intel_dp->active_pipe = INVALID_PIPE; 7799 7800 /* Preserve the current hw state. */ 7801 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7802 intel_dp->attached_connector = intel_connector; 7803 7804 if (intel_dp_is_port_edp(dev_priv, port)) { 7805 /* 7806 * Currently we don't support eDP on TypeC ports, although in 7807 * theory it could work on TypeC legacy ports. 7808 */ 7809 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 7810 type = DRM_MODE_CONNECTOR_eDP; 7811 } else { 7812 type = DRM_MODE_CONNECTOR_DisplayPort; 7813 } 7814 7815 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7816 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7817 7818 /* 7819 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 7820 * for DP the encoder type can be set by the caller to 7821 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 7822 */ 7823 if (type == DRM_MODE_CONNECTOR_eDP) 7824 intel_encoder->type = INTEL_OUTPUT_EDP; 7825 7826 /* eDP only on port B and/or C on vlv/chv */ 7827 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 7828 IS_CHERRYVIEW(dev_priv)) && 7829 intel_dp_is_edp(intel_dp) && 7830 port != PORT_B && port != PORT_C)) 7831 return false; 7832 7833 drm_dbg_kms(&dev_priv->drm, 7834 "Adding %s connector on [ENCODER:%d:%s]\n", 7835 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 7836 intel_encoder->base.base.id, intel_encoder->base.name); 7837 7838 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 7839 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 7840 7841 if (!HAS_GMCH(dev_priv)) 7842 connector->interlace_allowed = true; 7843 connector->doublescan_allowed = 0; 7844 7845 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 7846 7847 intel_dp_aux_init(intel_dp); 7848 7849 intel_connector_attach_encoder(intel_connector, intel_encoder); 7850 7851 if (HAS_DDI(dev_priv)) 7852 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 7853 else 7854 intel_connector->get_hw_state = intel_connector_get_hw_state; 7855 7856 /* init MST on ports that can support it */ 7857 intel_dp_mst_encoder_init(dig_port, 7858 intel_connector->base.base.id); 7859 7860 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 7861 intel_dp_aux_fini(intel_dp); 7862 intel_dp_mst_encoder_cleanup(dig_port); 7863 goto fail; 7864 } 7865 7866 intel_dp_add_properties(intel_dp, connector); 7867 7868 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 7869 int ret = intel_dp_init_hdcp(dig_port, intel_connector); 7870 if (ret) 7871 drm_dbg_kms(&dev_priv->drm, 7872 "HDCP init failed, skipping.\n"); 7873 } 7874 7875 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 7876 * 0xd. Failure to do so will result in spurious interrupts being 7877 * generated on the port when a cable is not attached. 7878 */ 7879 if (IS_G45(dev_priv)) { 7880 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 7881 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 7882 (temp & ~0xf) | 0xd); 7883 } 7884 7885 return true; 7886 7887 fail: 7888 drm_connector_cleanup(connector); 7889 7890 return false; 7891 } 7892 7893 bool intel_dp_init(struct drm_i915_private *dev_priv, 7894 i915_reg_t output_reg, 7895 enum port port) 7896 { 7897 struct intel_digital_port *dig_port; 7898 struct intel_encoder *intel_encoder; 7899 struct drm_encoder *encoder; 7900 struct intel_connector *intel_connector; 7901 7902 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 7903 if (!dig_port) 7904 return false; 7905 7906 intel_connector = intel_connector_alloc(); 7907 if (!intel_connector) 7908 goto err_connector_alloc; 7909 7910 intel_encoder = &dig_port->base; 7911 encoder = &intel_encoder->base; 7912 7913 rw_init(&dig_port->hdcp_mutex, "dphdcp"); 7914 7915 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 7916 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 7917 "DP %c", port_name(port))) 7918 goto err_encoder_init; 7919 7920 intel_encoder->hotplug = intel_dp_hotplug; 7921 intel_encoder->compute_config = intel_dp_compute_config; 7922 intel_encoder->get_hw_state = intel_dp_get_hw_state; 7923 intel_encoder->get_config = intel_dp_get_config; 7924 intel_encoder->update_pipe = intel_panel_update_backlight; 7925 intel_encoder->suspend = intel_dp_encoder_suspend; 7926 if (IS_CHERRYVIEW(dev_priv)) { 7927 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 7928 intel_encoder->pre_enable = chv_pre_enable_dp; 7929 intel_encoder->enable = vlv_enable_dp; 7930 intel_encoder->disable = vlv_disable_dp; 7931 intel_encoder->post_disable = chv_post_disable_dp; 7932 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 7933 } else if (IS_VALLEYVIEW(dev_priv)) { 7934 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 7935 intel_encoder->pre_enable = vlv_pre_enable_dp; 7936 intel_encoder->enable = vlv_enable_dp; 7937 intel_encoder->disable = vlv_disable_dp; 7938 intel_encoder->post_disable = vlv_post_disable_dp; 7939 } else { 7940 intel_encoder->pre_enable = g4x_pre_enable_dp; 7941 intel_encoder->enable = g4x_enable_dp; 7942 intel_encoder->disable = g4x_disable_dp; 7943 intel_encoder->post_disable = g4x_post_disable_dp; 7944 } 7945 7946 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 7947 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 7948 dig_port->dp.set_link_train = cpt_set_link_train; 7949 else 7950 dig_port->dp.set_link_train = g4x_set_link_train; 7951 7952 if (IS_CHERRYVIEW(dev_priv)) 7953 dig_port->dp.set_signal_levels = chv_set_signal_levels; 7954 else if (IS_VALLEYVIEW(dev_priv)) 7955 dig_port->dp.set_signal_levels = vlv_set_signal_levels; 7956 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 7957 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 7958 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 7959 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 7960 else 7961 dig_port->dp.set_signal_levels = g4x_set_signal_levels; 7962 7963 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 7964 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 7965 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; 7966 dig_port->dp.voltage_max = intel_dp_voltage_max_3; 7967 } else { 7968 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; 7969 dig_port->dp.voltage_max = intel_dp_voltage_max_2; 7970 } 7971 7972 dig_port->dp.output_reg = output_reg; 7973 dig_port->max_lanes = 4; 7974 dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 7975 dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 7976 7977 intel_encoder->type = INTEL_OUTPUT_DP; 7978 intel_encoder->power_domain = intel_port_to_power_domain(port); 7979 if (IS_CHERRYVIEW(dev_priv)) { 7980 if (port == PORT_D) 7981 intel_encoder->pipe_mask = BIT(PIPE_C); 7982 else 7983 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 7984 } else { 7985 intel_encoder->pipe_mask = ~0; 7986 } 7987 intel_encoder->cloneable = 0; 7988 intel_encoder->port = port; 7989 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 7990 7991 dig_port->hpd_pulse = intel_dp_hpd_pulse; 7992 7993 if (HAS_GMCH(dev_priv)) { 7994 if (IS_GM45(dev_priv)) 7995 dig_port->connected = gm45_digital_port_connected; 7996 else 7997 dig_port->connected = g4x_digital_port_connected; 7998 } else { 7999 if (port == PORT_A) 8000 dig_port->connected = ilk_digital_port_connected; 8001 else 8002 dig_port->connected = ibx_digital_port_connected; 8003 } 8004 8005 if (port != PORT_A) 8006 intel_infoframe_init(dig_port); 8007 8008 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8009 if (!intel_dp_init_connector(dig_port, intel_connector)) 8010 goto err_init_connector; 8011 8012 return true; 8013 8014 err_init_connector: 8015 drm_encoder_cleanup(encoder); 8016 err_encoder_init: 8017 kfree(intel_connector); 8018 err_connector_alloc: 8019 kfree(dig_port); 8020 return false; 8021 } 8022 8023 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8024 { 8025 struct intel_encoder *encoder; 8026 8027 for_each_intel_encoder(&dev_priv->drm, encoder) { 8028 struct intel_dp *intel_dp; 8029 8030 if (encoder->type != INTEL_OUTPUT_DDI) 8031 continue; 8032 8033 intel_dp = enc_to_intel_dp(encoder); 8034 8035 if (!intel_dp->can_mst) 8036 continue; 8037 8038 if (intel_dp->is_mst) 8039 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8040 } 8041 } 8042 8043 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8044 { 8045 struct intel_encoder *encoder; 8046 8047 for_each_intel_encoder(&dev_priv->drm, encoder) { 8048 struct intel_dp *intel_dp; 8049 int ret; 8050 8051 if (encoder->type != INTEL_OUTPUT_DDI) 8052 continue; 8053 8054 intel_dp = enc_to_intel_dp(encoder); 8055 8056 if (!intel_dp->can_mst) 8057 continue; 8058 8059 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8060 true); 8061 if (ret) { 8062 intel_dp->is_mst = false; 8063 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8064 false); 8065 } 8066 } 8067 } 8068