1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/i2c.h> 29 #include <linux/slab.h> 30 #include <linux/export.h> 31 #include <linux/types.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <asm/byteorder.h> 35 #include <drm/drmP.h> 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_crtc_helper.h> 39 #include <drm/drm_edid.h> 40 #include "intel_drv.h" 41 #include <drm/i915_drm.h> 42 #include "i915_drv.h" 43 44 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) 45 #define DP_DPRX_ESI_LEN 14 46 47 /* Compliance test status bits */ 48 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 49 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 50 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 51 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 52 53 struct dp_link_dpll { 54 int clock; 55 struct dpll dpll; 56 }; 57 58 static const struct dp_link_dpll gen4_dpll[] = { 59 { 162000, 60 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 61 { 270000, 62 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 63 }; 64 65 static const struct dp_link_dpll pch_dpll[] = { 66 { 162000, 67 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 68 { 270000, 69 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 70 }; 71 72 static const struct dp_link_dpll vlv_dpll[] = { 73 { 162000, 74 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 75 { 270000, 76 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 77 }; 78 79 /* 80 * CHV supports eDP 1.4 that have more link rates. 81 * Below only provides the fixed rate but exclude variable rate. 82 */ 83 static const struct dp_link_dpll chv_dpll[] = { 84 /* 85 * CHV requires to program fractional division for m2. 86 * m2 is stored in fixed point format using formula below 87 * (m2_int << 22) | m2_fraction 88 */ 89 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 90 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 91 { 270000, /* m2_int = 27, m2_fraction = 0 */ 92 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 93 { 540000, /* m2_int = 27, m2_fraction = 0 */ 94 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } 95 }; 96 97 static const int bxt_rates[] = { 162000, 216000, 243000, 270000, 98 324000, 432000, 540000 }; 99 static const int skl_rates[] = { 162000, 216000, 270000, 100 324000, 432000, 540000 }; 101 static const int cnl_rates[] = { 162000, 216000, 270000, 102 324000, 432000, 540000, 103 648000, 810000 }; 104 static const int default_rates[] = { 162000, 270000, 540000 }; 105 106 /** 107 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 108 * @intel_dp: DP struct 109 * 110 * If a CPU or PCH DP output is attached to an eDP panel, this function 111 * will return true, and false otherwise. 112 */ 113 bool intel_dp_is_edp(struct intel_dp *intel_dp) 114 { 115 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 116 117 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 118 } 119 120 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 121 { 122 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 123 124 return intel_dig_port->base.base.dev; 125 } 126 127 static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 128 { 129 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 130 } 131 132 static void intel_dp_link_down(struct intel_dp *intel_dp); 133 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 134 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 135 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp); 136 static void vlv_steal_power_sequencer(struct drm_device *dev, 137 enum i915_pipe pipe); 138 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 139 140 /* update sink rates from dpcd */ 141 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 142 { 143 int i, max_rate; 144 145 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 146 147 for (i = 0; i < ARRAY_SIZE(default_rates); i++) { 148 if (default_rates[i] > max_rate) 149 break; 150 intel_dp->sink_rates[i] = default_rates[i]; 151 } 152 153 intel_dp->num_sink_rates = i; 154 } 155 156 /* Theoretical max between source and sink */ 157 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 158 { 159 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 160 } 161 162 /* Theoretical max between source and sink */ 163 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 164 { 165 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 166 int source_max = intel_dig_port->max_lanes; 167 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 168 169 return min(source_max, sink_max); 170 } 171 172 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 173 { 174 return intel_dp->max_link_lane_count; 175 } 176 177 int 178 intel_dp_link_required(int pixel_clock, int bpp) 179 { 180 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 181 return DIV_ROUND_UP(pixel_clock * bpp, 8); 182 } 183 184 int 185 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 186 { 187 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 188 * link rate that is generally expressed in Gbps. Since, 8 bits of data 189 * is transmitted every LS_Clk per lane, there is no need to account for 190 * the channel encoding that is done in the PHY layer here. 191 */ 192 193 return max_link_clock * max_lanes; 194 } 195 196 static int 197 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 198 { 199 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 200 struct intel_encoder *encoder = &intel_dig_port->base; 201 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 202 int max_dotclk = dev_priv->max_dotclk_freq; 203 int ds_max_dotclk; 204 205 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 206 207 if (type != DP_DS_PORT_TYPE_VGA) 208 return max_dotclk; 209 210 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 211 intel_dp->downstream_ports); 212 213 if (ds_max_dotclk != 0) 214 max_dotclk = min(max_dotclk, ds_max_dotclk); 215 216 return max_dotclk; 217 } 218 219 static void 220 intel_dp_set_source_rates(struct intel_dp *intel_dp) 221 { 222 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 223 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 224 enum port port = dig_port->port; 225 const int *source_rates; 226 int size; 227 u32 voltage; 228 229 /* This should only be done once */ 230 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates); 231 232 if (IS_GEN9_LP(dev_priv)) { 233 source_rates = bxt_rates; 234 size = ARRAY_SIZE(bxt_rates); 235 } else if (IS_CANNONLAKE(dev_priv)) { 236 source_rates = cnl_rates; 237 size = ARRAY_SIZE(cnl_rates); 238 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 239 if (port == PORT_A || port == PORT_D || 240 voltage == VOLTAGE_INFO_0_85V) 241 size -= 2; 242 } else if (IS_GEN9_BC(dev_priv)) { 243 source_rates = skl_rates; 244 size = ARRAY_SIZE(skl_rates); 245 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 246 IS_BROADWELL(dev_priv)) { 247 source_rates = default_rates; 248 size = ARRAY_SIZE(default_rates); 249 } else { 250 source_rates = default_rates; 251 size = ARRAY_SIZE(default_rates) - 1; 252 } 253 254 intel_dp->source_rates = source_rates; 255 intel_dp->num_source_rates = size; 256 } 257 258 static int intersect_rates(const int *source_rates, int source_len, 259 const int *sink_rates, int sink_len, 260 int *common_rates) 261 { 262 int i = 0, j = 0, k = 0; 263 264 while (i < source_len && j < sink_len) { 265 if (source_rates[i] == sink_rates[j]) { 266 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 267 return k; 268 common_rates[k] = source_rates[i]; 269 ++k; 270 ++i; 271 ++j; 272 } else if (source_rates[i] < sink_rates[j]) { 273 ++i; 274 } else { 275 ++j; 276 } 277 } 278 return k; 279 } 280 281 /* return index of rate in rates array, or -1 if not found */ 282 static int intel_dp_rate_index(const int *rates, int len, int rate) 283 { 284 int i; 285 286 for (i = 0; i < len; i++) 287 if (rate == rates[i]) 288 return i; 289 290 return -1; 291 } 292 293 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 294 { 295 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); 296 297 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 298 intel_dp->num_source_rates, 299 intel_dp->sink_rates, 300 intel_dp->num_sink_rates, 301 intel_dp->common_rates); 302 303 /* Paranoia, there should always be something in common. */ 304 if (WARN_ON(intel_dp->num_common_rates == 0)) { 305 intel_dp->common_rates[0] = default_rates[0]; 306 intel_dp->num_common_rates = 1; 307 } 308 } 309 310 /* get length of common rates potentially limited by max_rate */ 311 static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp, 312 int max_rate) 313 { 314 const int *common_rates = intel_dp->common_rates; 315 int i, common_len = intel_dp->num_common_rates; 316 317 /* Limit results by potentially reduced max rate */ 318 for (i = 0; i < common_len; i++) { 319 if (common_rates[common_len - i - 1] <= max_rate) 320 return common_len - i; 321 } 322 323 return 0; 324 } 325 326 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 327 uint8_t lane_count) 328 { 329 /* 330 * FIXME: we need to synchronize the current link parameters with 331 * hardware readout. Currently fast link training doesn't work on 332 * boot-up. 333 */ 334 if (link_rate == 0 || 335 link_rate > intel_dp->max_link_rate) 336 return false; 337 338 if (lane_count == 0 || 339 lane_count > intel_dp_max_lane_count(intel_dp)) 340 return false; 341 342 return true; 343 } 344 345 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 346 int link_rate, uint8_t lane_count) 347 { 348 int index; 349 350 index = intel_dp_rate_index(intel_dp->common_rates, 351 intel_dp->num_common_rates, 352 link_rate); 353 if (index > 0) { 354 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 355 intel_dp->max_link_lane_count = lane_count; 356 } else if (lane_count > 1) { 357 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 358 intel_dp->max_link_lane_count = lane_count >> 1; 359 } else { 360 DRM_ERROR("Link Training Unsuccessful\n"); 361 return -1; 362 } 363 364 return 0; 365 } 366 367 static enum drm_mode_status 368 intel_dp_mode_valid(struct drm_connector *connector, 369 struct drm_display_mode *mode) 370 { 371 struct intel_dp *intel_dp = intel_attached_dp(connector); 372 struct intel_connector *intel_connector = to_intel_connector(connector); 373 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 374 int target_clock = mode->clock; 375 int max_rate, mode_rate, max_lanes, max_link_clock; 376 int max_dotclk; 377 378 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 379 380 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 381 if (mode->hdisplay > fixed_mode->hdisplay) 382 return MODE_PANEL; 383 384 if (mode->vdisplay > fixed_mode->vdisplay) 385 return MODE_PANEL; 386 387 target_clock = fixed_mode->clock; 388 } 389 390 max_link_clock = intel_dp_max_link_rate(intel_dp); 391 max_lanes = intel_dp_max_lane_count(intel_dp); 392 393 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 394 mode_rate = intel_dp_link_required(target_clock, 18); 395 396 if (mode_rate > max_rate || target_clock > max_dotclk) 397 return MODE_CLOCK_HIGH; 398 399 if (mode->clock < 10000) 400 return MODE_CLOCK_LOW; 401 402 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 403 return MODE_H_ILLEGAL; 404 405 return MODE_OK; 406 } 407 408 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) 409 { 410 int i; 411 uint32_t v = 0; 412 413 if (src_bytes > 4) 414 src_bytes = 4; 415 for (i = 0; i < src_bytes; i++) 416 v |= ((uint32_t) src[i]) << ((3-i) * 8); 417 return v; 418 } 419 420 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 421 { 422 int i; 423 if (dst_bytes > 4) 424 dst_bytes = 4; 425 for (i = 0; i < dst_bytes; i++) 426 dst[i] = src >> ((3-i) * 8); 427 } 428 429 static void 430 intel_dp_init_panel_power_sequencer(struct drm_device *dev, 431 struct intel_dp *intel_dp); 432 static void 433 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 434 struct intel_dp *intel_dp, 435 bool force_disable_vdd); 436 static void 437 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); 438 439 static void pps_lock(struct intel_dp *intel_dp) 440 { 441 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 442 struct intel_encoder *encoder = &intel_dig_port->base; 443 struct drm_device *dev = encoder->base.dev; 444 struct drm_i915_private *dev_priv = to_i915(dev); 445 446 /* 447 * See vlv_power_sequencer_reset() why we need 448 * a power domain reference here. 449 */ 450 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 451 452 mutex_lock(&dev_priv->pps_mutex); 453 } 454 455 static void pps_unlock(struct intel_dp *intel_dp) 456 { 457 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 458 struct intel_encoder *encoder = &intel_dig_port->base; 459 struct drm_device *dev = encoder->base.dev; 460 struct drm_i915_private *dev_priv = to_i915(dev); 461 462 mutex_unlock(&dev_priv->pps_mutex); 463 464 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 465 } 466 467 static void 468 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 469 { 470 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 471 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 472 enum i915_pipe pipe = intel_dp->pps_pipe; 473 bool pll_enabled, release_cl_override = false; 474 enum dpio_phy phy = DPIO_PHY(pipe); 475 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 476 uint32_t DP; 477 478 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, 479 "skipping pipe %c power seqeuncer kick due to port %c being active\n", 480 pipe_name(pipe), port_name(intel_dig_port->port))) 481 return; 482 483 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n", 484 pipe_name(pipe), port_name(intel_dig_port->port)); 485 486 /* Preserve the BIOS-computed detected bit. This is 487 * supposed to be read-only. 488 */ 489 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 490 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 491 DP |= DP_PORT_WIDTH(1); 492 DP |= DP_LINK_TRAIN_PAT_1; 493 494 if (IS_CHERRYVIEW(dev_priv)) 495 DP |= DP_PIPE_SELECT_CHV(pipe); 496 else if (pipe == PIPE_B) 497 DP |= DP_PIPEB_SELECT; 498 499 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE; 500 501 /* 502 * The DPLL for the pipe must be enabled for this to work. 503 * So enable temporarily it if it's not already enabled. 504 */ 505 if (!pll_enabled) { 506 release_cl_override = IS_CHERRYVIEW(dev_priv) && 507 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 508 509 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 510 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 511 DRM_ERROR("Failed to force on pll for pipe %c!\n", 512 pipe_name(pipe)); 513 return; 514 } 515 } 516 517 /* 518 * Similar magic as in intel_dp_enable_port(). 519 * We _must_ do this port enable + disable trick 520 * to make this power seqeuencer lock onto the port. 521 * Otherwise even VDD force bit won't work. 522 */ 523 I915_WRITE(intel_dp->output_reg, DP); 524 POSTING_READ(intel_dp->output_reg); 525 526 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN); 527 POSTING_READ(intel_dp->output_reg); 528 529 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 530 POSTING_READ(intel_dp->output_reg); 531 532 if (!pll_enabled) { 533 vlv_force_pll_off(dev_priv, pipe); 534 535 if (release_cl_override) 536 chv_phy_powergate_ch(dev_priv, phy, ch, false); 537 } 538 } 539 540 static enum i915_pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 541 { 542 struct intel_encoder *encoder; 543 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 544 545 /* 546 * We don't have power sequencer currently. 547 * Pick one that's not used by other ports. 548 */ 549 for_each_intel_encoder(&dev_priv->drm, encoder) { 550 struct intel_dp *intel_dp; 551 552 if (encoder->type != INTEL_OUTPUT_DP && 553 encoder->type != INTEL_OUTPUT_EDP) 554 continue; 555 556 intel_dp = enc_to_intel_dp(&encoder->base); 557 558 if (encoder->type == INTEL_OUTPUT_EDP) { 559 WARN_ON(intel_dp->active_pipe != INVALID_PIPE && 560 intel_dp->active_pipe != intel_dp->pps_pipe); 561 562 if (intel_dp->pps_pipe != INVALID_PIPE) 563 pipes &= ~(1 << intel_dp->pps_pipe); 564 } else { 565 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE); 566 567 if (intel_dp->active_pipe != INVALID_PIPE) 568 pipes &= ~(1 << intel_dp->active_pipe); 569 } 570 } 571 572 if (pipes == 0) 573 return INVALID_PIPE; 574 575 return ffs(pipes) - 1; 576 } 577 578 static enum i915_pipe 579 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 580 { 581 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 582 struct drm_device *dev = intel_dig_port->base.base.dev; 583 struct drm_i915_private *dev_priv = to_i915(dev); 584 enum i915_pipe pipe; 585 586 lockdep_assert_held(&dev_priv->pps_mutex); 587 588 /* We should never land here with regular DP ports */ 589 WARN_ON(!intel_dp_is_edp(intel_dp)); 590 591 WARN_ON(intel_dp->active_pipe != INVALID_PIPE && 592 intel_dp->active_pipe != intel_dp->pps_pipe); 593 594 if (intel_dp->pps_pipe != INVALID_PIPE) 595 return intel_dp->pps_pipe; 596 597 pipe = vlv_find_free_pps(dev_priv); 598 599 /* 600 * Didn't find one. This should not happen since there 601 * are two power sequencers and up to two eDP ports. 602 */ 603 if (WARN_ON(pipe == INVALID_PIPE)) 604 pipe = PIPE_A; 605 606 vlv_steal_power_sequencer(dev, pipe); 607 intel_dp->pps_pipe = pipe; 608 609 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n", 610 pipe_name(intel_dp->pps_pipe), 611 port_name(intel_dig_port->port)); 612 613 /* init power sequencer on this pipe and port */ 614 intel_dp_init_panel_power_sequencer(dev, intel_dp); 615 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); 616 617 /* 618 * Even vdd force doesn't work until we've made 619 * the power sequencer lock in on the port. 620 */ 621 vlv_power_sequencer_kick(intel_dp); 622 623 return intel_dp->pps_pipe; 624 } 625 626 static int 627 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 628 { 629 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 630 struct drm_device *dev = intel_dig_port->base.base.dev; 631 struct drm_i915_private *dev_priv = to_i915(dev); 632 633 lockdep_assert_held(&dev_priv->pps_mutex); 634 635 /* We should never land here with regular DP ports */ 636 WARN_ON(!intel_dp_is_edp(intel_dp)); 637 638 /* 639 * TODO: BXT has 2 PPS instances. The correct port->PPS instance 640 * mapping needs to be retrieved from VBT, for now just hard-code to 641 * use instance #0 always. 642 */ 643 if (!intel_dp->pps_reset) 644 return 0; 645 646 intel_dp->pps_reset = false; 647 648 /* 649 * Only the HW needs to be reprogrammed, the SW state is fixed and 650 * has been setup during connector init. 651 */ 652 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); 653 654 return 0; 655 } 656 657 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 658 enum i915_pipe pipe); 659 660 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 661 enum i915_pipe pipe) 662 { 663 return I915_READ(PP_STATUS(pipe)) & PP_ON; 664 } 665 666 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 667 enum i915_pipe pipe) 668 { 669 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD; 670 } 671 672 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 673 enum i915_pipe pipe) 674 { 675 return true; 676 } 677 678 static enum i915_pipe 679 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 680 enum port port, 681 vlv_pipe_check pipe_check) 682 { 683 enum i915_pipe pipe; 684 685 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 686 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) & 687 PANEL_PORT_SELECT_MASK; 688 689 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 690 continue; 691 692 if (!pipe_check(dev_priv, pipe)) 693 continue; 694 695 return pipe; 696 } 697 698 return INVALID_PIPE; 699 } 700 701 static void 702 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 703 { 704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 705 struct drm_device *dev = intel_dig_port->base.base.dev; 706 struct drm_i915_private *dev_priv = to_i915(dev); 707 enum port port = intel_dig_port->port; 708 709 lockdep_assert_held(&dev_priv->pps_mutex); 710 711 /* try to find a pipe with this port selected */ 712 /* first pick one where the panel is on */ 713 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 714 vlv_pipe_has_pp_on); 715 /* didn't find one? pick one where vdd is on */ 716 if (intel_dp->pps_pipe == INVALID_PIPE) 717 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 718 vlv_pipe_has_vdd_on); 719 /* didn't find one? pick one with just the correct port */ 720 if (intel_dp->pps_pipe == INVALID_PIPE) 721 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 722 vlv_pipe_any); 723 724 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 725 if (intel_dp->pps_pipe == INVALID_PIPE) { 726 DRM_DEBUG_KMS("no initial power sequencer for port %c\n", 727 port_name(port)); 728 return; 729 } 730 731 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n", 732 port_name(port), pipe_name(intel_dp->pps_pipe)); 733 734 intel_dp_init_panel_power_sequencer(dev, intel_dp); 735 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); 736 } 737 738 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 739 { 740 struct drm_device *dev = &dev_priv->drm; 741 struct intel_encoder *encoder; 742 743 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 744 !IS_GEN9_LP(dev_priv))) 745 return; 746 747 /* 748 * We can't grab pps_mutex here due to deadlock with power_domain 749 * mutex when power_domain functions are called while holding pps_mutex. 750 * That also means that in order to use pps_pipe the code needs to 751 * hold both a power domain reference and pps_mutex, and the power domain 752 * reference get/put must be done while _not_ holding pps_mutex. 753 * pps_{lock,unlock}() do these steps in the correct order, so one 754 * should use them always. 755 */ 756 757 for_each_intel_encoder(dev, encoder) { 758 struct intel_dp *intel_dp; 759 760 if (encoder->type != INTEL_OUTPUT_DP && 761 encoder->type != INTEL_OUTPUT_EDP) 762 continue; 763 764 intel_dp = enc_to_intel_dp(&encoder->base); 765 766 WARN_ON(intel_dp->active_pipe != INVALID_PIPE); 767 768 if (encoder->type != INTEL_OUTPUT_EDP) 769 continue; 770 771 if (IS_GEN9_LP(dev_priv)) 772 intel_dp->pps_reset = true; 773 else 774 intel_dp->pps_pipe = INVALID_PIPE; 775 } 776 } 777 778 struct pps_registers { 779 i915_reg_t pp_ctrl; 780 i915_reg_t pp_stat; 781 i915_reg_t pp_on; 782 i915_reg_t pp_off; 783 i915_reg_t pp_div; 784 }; 785 786 static void intel_pps_get_registers(struct drm_i915_private *dev_priv, 787 struct intel_dp *intel_dp, 788 struct pps_registers *regs) 789 { 790 int pps_idx = 0; 791 792 memset(regs, 0, sizeof(*regs)); 793 794 if (IS_GEN9_LP(dev_priv)) 795 pps_idx = bxt_power_sequencer_idx(intel_dp); 796 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 797 pps_idx = vlv_power_sequencer_pipe(intel_dp); 798 799 regs->pp_ctrl = PP_CONTROL(pps_idx); 800 regs->pp_stat = PP_STATUS(pps_idx); 801 regs->pp_on = PP_ON_DELAYS(pps_idx); 802 regs->pp_off = PP_OFF_DELAYS(pps_idx); 803 if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv)) 804 regs->pp_div = PP_DIVISOR(pps_idx); 805 } 806 807 static i915_reg_t 808 _pp_ctrl_reg(struct intel_dp *intel_dp) 809 { 810 struct pps_registers regs; 811 812 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp, 813 ®s); 814 815 return regs.pp_ctrl; 816 } 817 818 static i915_reg_t 819 _pp_stat_reg(struct intel_dp *intel_dp) 820 { 821 struct pps_registers regs; 822 823 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp, 824 ®s); 825 826 return regs.pp_stat; 827 } 828 829 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 830 This function only applicable when panel PM state is not to be tracked */ 831 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 832 void *unused) 833 { 834 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 835 edp_notifier); 836 struct drm_device *dev = intel_dp_to_dev(intel_dp); 837 struct drm_i915_private *dev_priv = to_i915(dev); 838 839 #if 0 840 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 841 #endif 842 if (!intel_dp_is_edp(intel_dp)) 843 return 0; 844 845 pps_lock(intel_dp); 846 847 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 848 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp); 849 i915_reg_t pp_ctrl_reg, pp_div_reg; 850 u32 pp_div; 851 852 pp_ctrl_reg = PP_CONTROL(pipe); 853 pp_div_reg = PP_DIVISOR(pipe); 854 pp_div = I915_READ(pp_div_reg); 855 pp_div &= PP_REFERENCE_DIVIDER_MASK; 856 857 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 858 I915_WRITE(pp_div_reg, pp_div | 0x1F); 859 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF); 860 msleep(intel_dp->panel_power_cycle_delay); 861 } 862 863 pps_unlock(intel_dp); 864 865 return 0; 866 } 867 868 static bool edp_have_panel_power(struct intel_dp *intel_dp) 869 { 870 struct drm_device *dev = intel_dp_to_dev(intel_dp); 871 struct drm_i915_private *dev_priv = to_i915(dev); 872 873 lockdep_assert_held(&dev_priv->pps_mutex); 874 875 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 876 intel_dp->pps_pipe == INVALID_PIPE) 877 return false; 878 879 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 880 } 881 882 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 883 { 884 struct drm_device *dev = intel_dp_to_dev(intel_dp); 885 struct drm_i915_private *dev_priv = to_i915(dev); 886 887 lockdep_assert_held(&dev_priv->pps_mutex); 888 889 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 890 intel_dp->pps_pipe == INVALID_PIPE) 891 return false; 892 893 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 894 } 895 896 static void 897 intel_dp_check_edp(struct intel_dp *intel_dp) 898 { 899 struct drm_device *dev = intel_dp_to_dev(intel_dp); 900 struct drm_i915_private *dev_priv = to_i915(dev); 901 902 if (!intel_dp_is_edp(intel_dp)) 903 return; 904 905 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 906 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 907 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 908 I915_READ(_pp_stat_reg(intel_dp)), 909 I915_READ(_pp_ctrl_reg(intel_dp))); 910 } 911 } 912 913 static uint32_t 914 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 915 { 916 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 917 struct drm_device *dev = intel_dig_port->base.base.dev; 918 struct drm_i915_private *dev_priv = to_i915(dev); 919 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 920 uint32_t status; 921 bool done; 922 923 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 924 if (has_aux_irq) 925 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 926 msecs_to_jiffies_timeout(10)); 927 else 928 done = wait_for(C, 10) == 0; 929 if (!done) 930 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 931 has_aux_irq); 932 #undef C 933 934 return status; 935 } 936 937 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 938 { 939 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 940 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 941 942 if (index) 943 return 0; 944 945 /* 946 * The clock divider is based off the hrawclk, and would like to run at 947 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 948 */ 949 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); 950 } 951 952 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 953 { 954 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 955 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 956 957 if (index) 958 return 0; 959 960 /* 961 * The clock divider is based off the cdclk or PCH rawclk, and would 962 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 963 * divide by 2000 and use that 964 */ 965 if (intel_dig_port->port == PORT_A) 966 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); 967 else 968 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); 969 } 970 971 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 972 { 973 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 974 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 975 976 if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) { 977 /* Workaround for non-ULT HSW */ 978 switch (index) { 979 case 0: return 63; 980 case 1: return 72; 981 default: return 0; 982 } 983 } 984 985 return ilk_get_aux_clock_divider(intel_dp, index); 986 } 987 988 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 989 { 990 /* 991 * SKL doesn't need us to program the AUX clock divider (Hardware will 992 * derive the clock from CDCLK automatically). We still implement the 993 * get_aux_clock_divider vfunc to plug-in into the existing code. 994 */ 995 return index ? 0 : 1; 996 } 997 998 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 999 bool has_aux_irq, 1000 int send_bytes, 1001 uint32_t aux_clock_divider) 1002 { 1003 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1004 struct drm_i915_private *dev_priv = 1005 to_i915(intel_dig_port->base.base.dev); 1006 uint32_t precharge, timeout; 1007 1008 if (IS_GEN6(dev_priv)) 1009 precharge = 3; 1010 else 1011 precharge = 5; 1012 1013 if (IS_BROADWELL(dev_priv)) 1014 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1015 else 1016 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1017 1018 return DP_AUX_CH_CTL_SEND_BUSY | 1019 DP_AUX_CH_CTL_DONE | 1020 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 1021 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1022 timeout | 1023 DP_AUX_CH_CTL_RECEIVE_ERROR | 1024 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1025 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1026 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1027 } 1028 1029 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1030 bool has_aux_irq, 1031 int send_bytes, 1032 uint32_t unused) 1033 { 1034 return DP_AUX_CH_CTL_SEND_BUSY | 1035 DP_AUX_CH_CTL_DONE | 1036 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 1037 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1038 DP_AUX_CH_CTL_TIME_OUT_MAX | 1039 DP_AUX_CH_CTL_RECEIVE_ERROR | 1040 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1041 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1042 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1043 } 1044 1045 static int 1046 intel_dp_aux_ch(struct intel_dp *intel_dp, 1047 const uint8_t *send, int send_bytes, 1048 uint8_t *recv, int recv_size) 1049 { 1050 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1051 struct drm_i915_private *dev_priv = 1052 to_i915(intel_dig_port->base.base.dev); 1053 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 1054 uint32_t aux_clock_divider; 1055 int i, ret, recv_bytes; 1056 uint32_t status; 1057 int try, clock = 0; 1058 bool has_aux_irq = HAS_AUX_IRQ(dev_priv); 1059 bool vdd; 1060 1061 pps_lock(intel_dp); 1062 1063 /* 1064 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1065 * In such cases we want to leave VDD enabled and it's up to upper layers 1066 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1067 * ourselves. 1068 */ 1069 vdd = edp_panel_vdd_on(intel_dp); 1070 1071 /* dp aux is extremely sensitive to irq latency, hence request the 1072 * lowest possible wakeup latency and so prevent the cpu from going into 1073 * deep sleep states. 1074 */ 1075 pm_qos_update_request(&dev_priv->pm_qos, 0); 1076 1077 intel_dp_check_edp(intel_dp); 1078 1079 /* Try to wait for any previous AUX channel activity */ 1080 for (try = 0; try < 3; try++) { 1081 status = I915_READ_NOTRACE(ch_ctl); 1082 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1083 break; 1084 msleep(1); 1085 } 1086 1087 if (try == 3) { 1088 static u32 last_status = -1; 1089 const u32 status = I915_READ(ch_ctl); 1090 1091 if (status != last_status) { 1092 WARN(1, "dp_aux_ch not started status 0x%08x\n", 1093 status); 1094 last_status = status; 1095 } 1096 1097 ret = -EBUSY; 1098 goto out; 1099 } 1100 1101 /* Only 5 data registers! */ 1102 if (WARN_ON(send_bytes > 20 || recv_size > 20)) { 1103 ret = -E2BIG; 1104 goto out; 1105 } 1106 1107 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1108 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1109 has_aux_irq, 1110 send_bytes, 1111 aux_clock_divider); 1112 1113 /* Must try at least 3 times according to DP spec */ 1114 for (try = 0; try < 5; try++) { 1115 /* Load the send data into the aux channel data registers */ 1116 for (i = 0; i < send_bytes; i += 4) 1117 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2], 1118 intel_dp_pack_aux(send + i, 1119 send_bytes - i)); 1120 1121 /* Send the command and wait for it to complete */ 1122 I915_WRITE(ch_ctl, send_ctl); 1123 1124 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 1125 1126 /* Clear done status and any errors */ 1127 I915_WRITE(ch_ctl, 1128 status | 1129 DP_AUX_CH_CTL_DONE | 1130 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1131 DP_AUX_CH_CTL_RECEIVE_ERROR); 1132 1133 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1134 continue; 1135 1136 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1137 * 400us delay required for errors and timeouts 1138 * Timeout errors from the HW already meet this 1139 * requirement so skip to next iteration 1140 */ 1141 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1142 usleep_range(400, 500); 1143 continue; 1144 } 1145 if (status & DP_AUX_CH_CTL_DONE) 1146 goto done; 1147 } 1148 } 1149 1150 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1151 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 1152 ret = -EBUSY; 1153 goto out; 1154 } 1155 1156 done: 1157 /* Check for timeout or receive error. 1158 * Timeouts occur when the sink is not connected 1159 */ 1160 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1161 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 1162 ret = -EIO; 1163 goto out; 1164 } 1165 1166 /* Timeouts occur when the device isn't connected, so they're 1167 * "normal" -- don't fill the kernel log with these */ 1168 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1169 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 1170 ret = -ETIMEDOUT; 1171 goto out; 1172 } 1173 1174 /* Unload any bytes sent back from the other side */ 1175 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1176 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1177 1178 /* 1179 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1180 * We have no idea of what happened so we return -EBUSY so 1181 * drm layer takes care for the necessary retries. 1182 */ 1183 if (recv_bytes == 0 || recv_bytes > 20) { 1184 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n", 1185 recv_bytes); 1186 /* 1187 * FIXME: This patch was created on top of a series that 1188 * organize the retries at drm level. There EBUSY should 1189 * also take care for 1ms wait before retrying. 1190 * That aux retries re-org is still needed and after that is 1191 * merged we remove this sleep from here. 1192 */ 1193 usleep_range(1000, 1500); 1194 ret = -EBUSY; 1195 goto out; 1196 } 1197 1198 if (recv_bytes > recv_size) 1199 recv_bytes = recv_size; 1200 1201 for (i = 0; i < recv_bytes; i += 4) 1202 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]), 1203 recv + i, recv_bytes - i); 1204 1205 ret = recv_bytes; 1206 out: 1207 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 1208 1209 if (vdd) 1210 edp_panel_vdd_off(intel_dp, false); 1211 1212 pps_unlock(intel_dp); 1213 1214 return ret; 1215 } 1216 1217 #define BARE_ADDRESS_SIZE 3 1218 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1219 static ssize_t 1220 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1221 { 1222 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1223 uint8_t txbuf[20], rxbuf[20]; 1224 size_t txsize, rxsize; 1225 int ret; 1226 1227 txbuf[0] = (msg->request << 4) | 1228 ((msg->address >> 16) & 0xf); 1229 txbuf[1] = (msg->address >> 8) & 0xff; 1230 txbuf[2] = msg->address & 0xff; 1231 txbuf[3] = msg->size - 1; 1232 1233 switch (msg->request & ~DP_AUX_I2C_MOT) { 1234 case DP_AUX_NATIVE_WRITE: 1235 case DP_AUX_I2C_WRITE: 1236 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1237 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1238 rxsize = 2; /* 0 or 1 data bytes */ 1239 1240 if (WARN_ON(txsize > 20)) 1241 return -E2BIG; 1242 1243 WARN_ON(!msg->buffer != !msg->size); 1244 1245 if (msg->buffer) 1246 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1247 1248 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); 1249 if (ret > 0) { 1250 msg->reply = rxbuf[0] >> 4; 1251 1252 if (ret > 1) { 1253 /* Number of bytes written in a short write. */ 1254 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1255 } else { 1256 /* Return payload size. */ 1257 ret = msg->size; 1258 } 1259 } 1260 break; 1261 1262 case DP_AUX_NATIVE_READ: 1263 case DP_AUX_I2C_READ: 1264 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1265 rxsize = msg->size + 1; 1266 1267 if (WARN_ON(rxsize > 20)) 1268 return -E2BIG; 1269 1270 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); 1271 if (ret > 0) { 1272 msg->reply = rxbuf[0] >> 4; 1273 /* 1274 * Assume happy day, and copy the data. The caller is 1275 * expected to check msg->reply before touching it. 1276 * 1277 * Return payload size. 1278 */ 1279 ret--; 1280 memcpy(msg->buffer, rxbuf + 1, ret); 1281 } 1282 break; 1283 1284 default: 1285 ret = -EINVAL; 1286 break; 1287 } 1288 1289 return ret; 1290 } 1291 1292 static enum port intel_aux_port(struct drm_i915_private *dev_priv, 1293 enum port port) 1294 { 1295 const struct ddi_vbt_port_info *info = 1296 &dev_priv->vbt.ddi_port_info[port]; 1297 enum port aux_port; 1298 1299 if (!info->alternate_aux_channel) { 1300 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", 1301 port_name(port), port_name(port)); 1302 return port; 1303 } 1304 1305 switch (info->alternate_aux_channel) { 1306 case DP_AUX_A: 1307 aux_port = PORT_A; 1308 break; 1309 case DP_AUX_B: 1310 aux_port = PORT_B; 1311 break; 1312 case DP_AUX_C: 1313 aux_port = PORT_C; 1314 break; 1315 case DP_AUX_D: 1316 aux_port = PORT_D; 1317 break; 1318 default: 1319 MISSING_CASE(info->alternate_aux_channel); 1320 aux_port = PORT_A; 1321 break; 1322 } 1323 1324 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", 1325 port_name(aux_port), port_name(port)); 1326 1327 return aux_port; 1328 } 1329 1330 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, 1331 enum port port) 1332 { 1333 switch (port) { 1334 case PORT_B: 1335 case PORT_C: 1336 case PORT_D: 1337 return DP_AUX_CH_CTL(port); 1338 default: 1339 MISSING_CASE(port); 1340 return DP_AUX_CH_CTL(PORT_B); 1341 } 1342 } 1343 1344 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv, 1345 enum port port, int index) 1346 { 1347 switch (port) { 1348 case PORT_B: 1349 case PORT_C: 1350 case PORT_D: 1351 return DP_AUX_CH_DATA(port, index); 1352 default: 1353 MISSING_CASE(port); 1354 return DP_AUX_CH_DATA(PORT_B, index); 1355 } 1356 } 1357 1358 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv, 1359 enum port port) 1360 { 1361 switch (port) { 1362 case PORT_A: 1363 return DP_AUX_CH_CTL(port); 1364 case PORT_B: 1365 case PORT_C: 1366 case PORT_D: 1367 return PCH_DP_AUX_CH_CTL(port); 1368 default: 1369 MISSING_CASE(port); 1370 return DP_AUX_CH_CTL(PORT_A); 1371 } 1372 } 1373 1374 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv, 1375 enum port port, int index) 1376 { 1377 switch (port) { 1378 case PORT_A: 1379 return DP_AUX_CH_DATA(port, index); 1380 case PORT_B: 1381 case PORT_C: 1382 case PORT_D: 1383 return PCH_DP_AUX_CH_DATA(port, index); 1384 default: 1385 MISSING_CASE(port); 1386 return DP_AUX_CH_DATA(PORT_A, index); 1387 } 1388 } 1389 1390 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, 1391 enum port port) 1392 { 1393 switch (port) { 1394 case PORT_A: 1395 case PORT_B: 1396 case PORT_C: 1397 case PORT_D: 1398 return DP_AUX_CH_CTL(port); 1399 default: 1400 MISSING_CASE(port); 1401 return DP_AUX_CH_CTL(PORT_A); 1402 } 1403 } 1404 1405 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, 1406 enum port port, int index) 1407 { 1408 switch (port) { 1409 case PORT_A: 1410 case PORT_B: 1411 case PORT_C: 1412 case PORT_D: 1413 return DP_AUX_CH_DATA(port, index); 1414 default: 1415 MISSING_CASE(port); 1416 return DP_AUX_CH_DATA(PORT_A, index); 1417 } 1418 } 1419 1420 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv, 1421 enum port port) 1422 { 1423 if (INTEL_INFO(dev_priv)->gen >= 9) 1424 return skl_aux_ctl_reg(dev_priv, port); 1425 else if (HAS_PCH_SPLIT(dev_priv)) 1426 return ilk_aux_ctl_reg(dev_priv, port); 1427 else 1428 return g4x_aux_ctl_reg(dev_priv, port); 1429 } 1430 1431 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv, 1432 enum port port, int index) 1433 { 1434 if (INTEL_INFO(dev_priv)->gen >= 9) 1435 return skl_aux_data_reg(dev_priv, port, index); 1436 else if (HAS_PCH_SPLIT(dev_priv)) 1437 return ilk_aux_data_reg(dev_priv, port, index); 1438 else 1439 return g4x_aux_data_reg(dev_priv, port, index); 1440 } 1441 1442 static void intel_aux_reg_init(struct intel_dp *intel_dp) 1443 { 1444 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 1445 enum port port = intel_aux_port(dev_priv, 1446 dp_to_dig_port(intel_dp)->port); 1447 int i; 1448 1449 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); 1450 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++) 1451 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i); 1452 } 1453 1454 static void 1455 intel_dp_aux_fini(struct intel_dp *intel_dp) 1456 { 1457 kfree(intel_dp->aux.name); 1458 } 1459 1460 static void 1461 intel_dp_aux_init(struct intel_dp *intel_dp) 1462 { 1463 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1464 enum port port = intel_dig_port->port; 1465 1466 intel_aux_reg_init(intel_dp); 1467 drm_dp_aux_init(&intel_dp->aux); 1468 1469 /* Failure to allocate our preferred name is not critical */ 1470 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port)); 1471 intel_dp->aux.transfer = intel_dp_aux_transfer; 1472 } 1473 1474 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1475 { 1476 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1477 1478 return max_rate >= 540000; 1479 } 1480 1481 static void 1482 intel_dp_set_clock(struct intel_encoder *encoder, 1483 struct intel_crtc_state *pipe_config) 1484 { 1485 struct drm_device *dev = encoder->base.dev; 1486 struct drm_i915_private *dev_priv = to_i915(dev); 1487 const struct dp_link_dpll *divisor = NULL; 1488 int i, count = 0; 1489 1490 if (IS_G4X(dev_priv)) { 1491 divisor = gen4_dpll; 1492 count = ARRAY_SIZE(gen4_dpll); 1493 } else if (HAS_PCH_SPLIT(dev_priv)) { 1494 divisor = pch_dpll; 1495 count = ARRAY_SIZE(pch_dpll); 1496 } else if (IS_CHERRYVIEW(dev_priv)) { 1497 divisor = chv_dpll; 1498 count = ARRAY_SIZE(chv_dpll); 1499 } else if (IS_VALLEYVIEW(dev_priv)) { 1500 divisor = vlv_dpll; 1501 count = ARRAY_SIZE(vlv_dpll); 1502 } 1503 1504 if (divisor && count) { 1505 for (i = 0; i < count; i++) { 1506 if (pipe_config->port_clock == divisor[i].clock) { 1507 pipe_config->dpll = divisor[i].dpll; 1508 pipe_config->clock_set = true; 1509 break; 1510 } 1511 } 1512 } 1513 } 1514 1515 static void snprintf_int_array(char *str, size_t len, 1516 const int *array, int nelem) 1517 { 1518 int i; 1519 1520 str[0] = '\0'; 1521 1522 for (i = 0; i < nelem; i++) { 1523 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1524 if (r >= len) 1525 return; 1526 str += r; 1527 len -= r; 1528 } 1529 } 1530 1531 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1532 { 1533 char str[128]; /* FIXME: too big for stack? */ 1534 1535 if ((drm_debug & DRM_UT_KMS) == 0) 1536 return; 1537 1538 snprintf_int_array(str, sizeof(str), 1539 intel_dp->source_rates, intel_dp->num_source_rates); 1540 DRM_DEBUG_KMS("source rates: %s\n", str); 1541 1542 snprintf_int_array(str, sizeof(str), 1543 intel_dp->sink_rates, intel_dp->num_sink_rates); 1544 DRM_DEBUG_KMS("sink rates: %s\n", str); 1545 1546 snprintf_int_array(str, sizeof(str), 1547 intel_dp->common_rates, intel_dp->num_common_rates); 1548 DRM_DEBUG_KMS("common rates: %s\n", str); 1549 } 1550 1551 int 1552 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1553 { 1554 int len; 1555 1556 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1557 if (WARN_ON(len <= 0)) 1558 return 162000; 1559 1560 return intel_dp->common_rates[len - 1]; 1561 } 1562 1563 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1564 { 1565 int i = intel_dp_rate_index(intel_dp->sink_rates, 1566 intel_dp->num_sink_rates, rate); 1567 1568 if (WARN_ON(i < 0)) 1569 i = 0; 1570 1571 return i; 1572 } 1573 1574 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1575 uint8_t *link_bw, uint8_t *rate_select) 1576 { 1577 /* eDP 1.4 rate select method. */ 1578 if (intel_dp->use_rate_select) { 1579 *link_bw = 0; 1580 *rate_select = 1581 intel_dp_rate_select(intel_dp, port_clock); 1582 } else { 1583 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1584 *rate_select = 0; 1585 } 1586 } 1587 1588 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1589 struct intel_crtc_state *pipe_config) 1590 { 1591 int bpp, bpc; 1592 1593 bpp = pipe_config->pipe_bpp; 1594 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1595 1596 if (bpc > 0) 1597 bpp = min(bpp, 3*bpc); 1598 1599 /* For DP Compliance we override the computed bpp for the pipe */ 1600 if (intel_dp->compliance.test_data.bpc != 0) { 1601 pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc; 1602 pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3; 1603 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", 1604 pipe_config->pipe_bpp); 1605 } 1606 return bpp; 1607 } 1608 1609 static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1, 1610 struct drm_display_mode *m2) 1611 { 1612 bool bres = false; 1613 1614 if (m1 && m2) 1615 bres = (m1->hdisplay == m2->hdisplay && 1616 m1->hsync_start == m2->hsync_start && 1617 m1->hsync_end == m2->hsync_end && 1618 m1->htotal == m2->htotal && 1619 m1->vdisplay == m2->vdisplay && 1620 m1->vsync_start == m2->vsync_start && 1621 m1->vsync_end == m2->vsync_end && 1622 m1->vtotal == m2->vtotal); 1623 return bres; 1624 } 1625 1626 bool 1627 intel_dp_compute_config(struct intel_encoder *encoder, 1628 struct intel_crtc_state *pipe_config, 1629 struct drm_connector_state *conn_state) 1630 { 1631 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1632 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1633 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1634 enum port port = dp_to_dig_port(intel_dp)->port; 1635 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); 1636 struct intel_connector *intel_connector = intel_dp->attached_connector; 1637 struct intel_digital_connector_state *intel_conn_state = 1638 to_intel_digital_connector_state(conn_state); 1639 int lane_count, clock; 1640 int min_lane_count = 1; 1641 int max_lane_count = intel_dp_max_lane_count(intel_dp); 1642 /* Conveniently, the link BW constants become indices with a shift...*/ 1643 int min_clock = 0; 1644 int max_clock; 1645 int bpp, mode_rate; 1646 int link_avail, link_clock; 1647 int common_len; 1648 uint8_t link_bw, rate_select; 1649 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 1650 DP_DPCD_QUIRK_LIMITED_M_N); 1651 1652 common_len = intel_dp_common_len_rate_limit(intel_dp, 1653 intel_dp->max_link_rate); 1654 1655 /* No common link rates between source and sink */ 1656 WARN_ON(common_len <= 0); 1657 1658 max_clock = common_len - 1; 1659 1660 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1661 pipe_config->has_pch_encoder = true; 1662 1663 pipe_config->has_drrs = false; 1664 if (port == PORT_A) 1665 pipe_config->has_audio = false; 1666 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1667 pipe_config->has_audio = intel_dp->has_audio; 1668 else 1669 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1670 1671 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1672 struct drm_display_mode *panel_mode = 1673 intel_connector->panel.alt_fixed_mode; 1674 struct drm_display_mode *req_mode = &pipe_config->base.mode; 1675 1676 if (!intel_edp_compare_alt_mode(req_mode, panel_mode)) 1677 panel_mode = intel_connector->panel.fixed_mode; 1678 1679 drm_mode_debug_printmodeline(panel_mode); 1680 1681 intel_fixed_panel_mode(panel_mode, adjusted_mode); 1682 1683 if (INTEL_GEN(dev_priv) >= 9) { 1684 int ret; 1685 ret = skl_update_scaler_crtc(pipe_config); 1686 if (ret) 1687 return ret; 1688 } 1689 1690 if (HAS_GMCH_DISPLAY(dev_priv)) 1691 intel_gmch_panel_fitting(intel_crtc, pipe_config, 1692 conn_state->scaling_mode); 1693 else 1694 intel_pch_panel_fitting(intel_crtc, pipe_config, 1695 conn_state->scaling_mode); 1696 } 1697 1698 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1699 return false; 1700 1701 /* Use values requested by Compliance Test Request */ 1702 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1703 int index; 1704 1705 /* Validate the compliance test data since max values 1706 * might have changed due to link train fallback. 1707 */ 1708 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1709 intel_dp->compliance.test_lane_count)) { 1710 index = intel_dp_rate_index(intel_dp->common_rates, 1711 intel_dp->num_common_rates, 1712 intel_dp->compliance.test_link_rate); 1713 if (index >= 0) 1714 min_clock = max_clock = index; 1715 min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count; 1716 } 1717 } 1718 DRM_DEBUG_KMS("DP link computation with max lane count %i " 1719 "max bw %d pixel clock %iKHz\n", 1720 max_lane_count, intel_dp->common_rates[max_clock], 1721 adjusted_mode->crtc_clock); 1722 1723 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 1724 * bpc in between. */ 1725 bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 1726 if (intel_dp_is_edp(intel_dp)) { 1727 1728 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1729 if (intel_connector->base.display_info.bpc == 0 && 1730 (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) { 1731 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 1732 dev_priv->vbt.edp.bpp); 1733 bpp = dev_priv->vbt.edp.bpp; 1734 } 1735 1736 /* 1737 * Use the maximum clock and number of lanes the eDP panel 1738 * advertizes being capable of. The panels are generally 1739 * designed to support only a single clock and lane 1740 * configuration, and typically these values correspond to the 1741 * native resolution of the panel. 1742 */ 1743 min_lane_count = max_lane_count; 1744 min_clock = max_clock; 1745 } 1746 1747 for (; bpp >= 6*3; bpp -= 2*3) { 1748 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1749 bpp); 1750 1751 for (clock = min_clock; clock <= max_clock; clock++) { 1752 for (lane_count = min_lane_count; 1753 lane_count <= max_lane_count; 1754 lane_count <<= 1) { 1755 1756 link_clock = intel_dp->common_rates[clock]; 1757 link_avail = intel_dp_max_data_rate(link_clock, 1758 lane_count); 1759 1760 if (mode_rate <= link_avail) { 1761 goto found; 1762 } 1763 } 1764 } 1765 } 1766 1767 return false; 1768 1769 found: 1770 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1771 /* 1772 * See: 1773 * CEA-861-E - 5.1 Default Encoding Parameters 1774 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1775 */ 1776 pipe_config->limited_color_range = 1777 bpp != 18 && 1778 drm_default_rgb_quant_range(adjusted_mode) == 1779 HDMI_QUANTIZATION_RANGE_LIMITED; 1780 } else { 1781 pipe_config->limited_color_range = 1782 intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; 1783 } 1784 1785 pipe_config->lane_count = lane_count; 1786 1787 pipe_config->pipe_bpp = bpp; 1788 pipe_config->port_clock = intel_dp->common_rates[clock]; 1789 1790 intel_dp_compute_rate(intel_dp, pipe_config->port_clock, 1791 &link_bw, &rate_select); 1792 1793 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n", 1794 link_bw, rate_select, pipe_config->lane_count, 1795 pipe_config->port_clock, bpp); 1796 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 1797 mode_rate, link_avail); 1798 1799 intel_link_compute_m_n(bpp, lane_count, 1800 adjusted_mode->crtc_clock, 1801 pipe_config->port_clock, 1802 &pipe_config->dp_m_n, 1803 reduce_m_n); 1804 1805 if (intel_connector->panel.downclock_mode != NULL && 1806 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 1807 pipe_config->has_drrs = true; 1808 intel_link_compute_m_n(bpp, lane_count, 1809 intel_connector->panel.downclock_mode->clock, 1810 pipe_config->port_clock, 1811 &pipe_config->dp_m2_n2, 1812 reduce_m_n); 1813 } 1814 1815 /* 1816 * DPLL0 VCO may need to be adjusted to get the correct 1817 * clock for eDP. This will affect cdclk as well. 1818 */ 1819 if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) { 1820 int vco; 1821 1822 switch (pipe_config->port_clock / 2) { 1823 case 108000: 1824 case 216000: 1825 vco = 8640000; 1826 break; 1827 default: 1828 vco = 8100000; 1829 break; 1830 } 1831 1832 to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco; 1833 } 1834 1835 if (!HAS_DDI(dev_priv)) 1836 intel_dp_set_clock(encoder, pipe_config); 1837 1838 intel_psr_compute_config(intel_dp, pipe_config); 1839 1840 return true; 1841 } 1842 1843 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1844 int link_rate, uint8_t lane_count, 1845 bool link_mst) 1846 { 1847 intel_dp->link_rate = link_rate; 1848 intel_dp->lane_count = lane_count; 1849 intel_dp->link_mst = link_mst; 1850 } 1851 1852 static void intel_dp_prepare(struct intel_encoder *encoder, 1853 const struct intel_crtc_state *pipe_config) 1854 { 1855 struct drm_device *dev = encoder->base.dev; 1856 struct drm_i915_private *dev_priv = to_i915(dev); 1857 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1858 enum port port = dp_to_dig_port(intel_dp)->port; 1859 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1860 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1861 1862 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 1863 pipe_config->lane_count, 1864 intel_crtc_has_type(pipe_config, 1865 INTEL_OUTPUT_DP_MST)); 1866 1867 /* 1868 * There are four kinds of DP registers: 1869 * 1870 * IBX PCH 1871 * SNB CPU 1872 * IVB CPU 1873 * CPT PCH 1874 * 1875 * IBX PCH and CPU are the same for almost everything, 1876 * except that the CPU DP PLL is configured in this 1877 * register 1878 * 1879 * CPT PCH is quite different, having many bits moved 1880 * to the TRANS_DP_CTL register instead. That 1881 * configuration happens (oddly) in ironlake_pch_enable 1882 */ 1883 1884 /* Preserve the BIOS-computed detected bit. This is 1885 * supposed to be read-only. 1886 */ 1887 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 1888 1889 /* Handle DP bits in common between all three register formats */ 1890 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1891 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 1892 1893 /* Split out the IBX/CPU vs CPT settings */ 1894 1895 if (IS_GEN7(dev_priv) && port == PORT_A) { 1896 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1897 intel_dp->DP |= DP_SYNC_HS_HIGH; 1898 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1899 intel_dp->DP |= DP_SYNC_VS_HIGH; 1900 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1901 1902 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1903 intel_dp->DP |= DP_ENHANCED_FRAMING; 1904 1905 intel_dp->DP |= crtc->pipe << 29; 1906 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 1907 u32 trans_dp; 1908 1909 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1910 1911 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 1912 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1913 trans_dp |= TRANS_DP_ENH_FRAMING; 1914 else 1915 trans_dp &= ~TRANS_DP_ENH_FRAMING; 1916 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); 1917 } else { 1918 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 1919 intel_dp->DP |= DP_COLOR_RANGE_16_235; 1920 1921 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1922 intel_dp->DP |= DP_SYNC_HS_HIGH; 1923 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1924 intel_dp->DP |= DP_SYNC_VS_HIGH; 1925 intel_dp->DP |= DP_LINK_TRAIN_OFF; 1926 1927 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1928 intel_dp->DP |= DP_ENHANCED_FRAMING; 1929 1930 if (IS_CHERRYVIEW(dev_priv)) 1931 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe); 1932 else if (crtc->pipe == PIPE_B) 1933 intel_dp->DP |= DP_PIPEB_SELECT; 1934 } 1935 } 1936 1937 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1938 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 1939 1940 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 1941 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 1942 1943 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1944 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1945 1946 static void intel_pps_verify_state(struct drm_i915_private *dev_priv, 1947 struct intel_dp *intel_dp); 1948 1949 static void wait_panel_status(struct intel_dp *intel_dp, 1950 u32 mask, 1951 u32 value) 1952 { 1953 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1954 struct drm_i915_private *dev_priv = to_i915(dev); 1955 i915_reg_t pp_stat_reg, pp_ctrl_reg; 1956 1957 lockdep_assert_held(&dev_priv->pps_mutex); 1958 1959 intel_pps_verify_state(dev_priv, intel_dp); 1960 1961 pp_stat_reg = _pp_stat_reg(intel_dp); 1962 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1963 1964 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1965 mask, value, 1966 I915_READ(pp_stat_reg), 1967 I915_READ(pp_ctrl_reg)); 1968 1969 if (intel_wait_for_register(dev_priv, 1970 pp_stat_reg, mask, value, 1971 5000)) 1972 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 1973 I915_READ(pp_stat_reg), 1974 I915_READ(pp_ctrl_reg)); 1975 1976 DRM_DEBUG_KMS("Wait complete\n"); 1977 } 1978 1979 static void wait_panel_on(struct intel_dp *intel_dp) 1980 { 1981 DRM_DEBUG_KMS("Wait for panel power on\n"); 1982 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1983 } 1984 1985 static void wait_panel_off(struct intel_dp *intel_dp) 1986 { 1987 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1988 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1989 } 1990 1991 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 1992 { 1993 ktime_t panel_power_on_time; 1994 s64 panel_power_off_duration; 1995 1996 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1997 1998 /* take the difference of currrent time and panel power off time 1999 * and then make panel wait for t11_t12 if needed. */ 2000 panel_power_on_time = ktime_get_boottime(); 2001 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2002 2003 /* When we disable the VDD override bit last we have to do the manual 2004 * wait. */ 2005 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2006 wait_remaining_ms_from_jiffies(jiffies, 2007 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2008 2009 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2010 } 2011 2012 static void wait_backlight_on(struct intel_dp *intel_dp) 2013 { 2014 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2015 intel_dp->backlight_on_delay); 2016 } 2017 2018 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2019 { 2020 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2021 intel_dp->backlight_off_delay); 2022 } 2023 2024 /* Read the current pp_control value, unlocking the register if it 2025 * is locked 2026 */ 2027 2028 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) 2029 { 2030 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2031 struct drm_i915_private *dev_priv = to_i915(dev); 2032 u32 control; 2033 2034 lockdep_assert_held(&dev_priv->pps_mutex); 2035 2036 control = I915_READ(_pp_ctrl_reg(intel_dp)); 2037 if (WARN_ON(!HAS_DDI(dev_priv) && 2038 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2039 control &= ~PANEL_UNLOCK_MASK; 2040 control |= PANEL_UNLOCK_REGS; 2041 } 2042 return control; 2043 } 2044 2045 /* 2046 * Must be paired with edp_panel_vdd_off(). 2047 * Must hold pps_mutex around the whole on/off sequence. 2048 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2049 */ 2050 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2051 { 2052 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2053 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2054 struct drm_i915_private *dev_priv = to_i915(dev); 2055 u32 pp; 2056 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2057 bool need_to_disable = !intel_dp->want_panel_vdd; 2058 2059 lockdep_assert_held(&dev_priv->pps_mutex); 2060 2061 if (!intel_dp_is_edp(intel_dp)) 2062 return false; 2063 2064 cancel_delayed_work(&intel_dp->panel_vdd_work); 2065 intel_dp->want_panel_vdd = true; 2066 2067 if (edp_have_panel_vdd(intel_dp)) 2068 return need_to_disable; 2069 2070 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 2071 2072 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", 2073 port_name(intel_dig_port->port)); 2074 2075 if (!edp_have_panel_power(intel_dp)) 2076 wait_panel_power_cycle(intel_dp); 2077 2078 pp = ironlake_get_pp_control(intel_dp); 2079 pp |= EDP_FORCE_VDD; 2080 2081 pp_stat_reg = _pp_stat_reg(intel_dp); 2082 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2083 2084 I915_WRITE(pp_ctrl_reg, pp); 2085 POSTING_READ(pp_ctrl_reg); 2086 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2087 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 2088 /* 2089 * If the panel wasn't on, delay before accessing aux channel 2090 */ 2091 if (!edp_have_panel_power(intel_dp)) { 2092 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n", 2093 port_name(intel_dig_port->port)); 2094 msleep(intel_dp->panel_power_up_delay); 2095 } 2096 2097 return need_to_disable; 2098 } 2099 2100 /* 2101 * Must be paired with intel_edp_panel_vdd_off() or 2102 * intel_edp_panel_off(). 2103 * Nested calls to these functions are not allowed since 2104 * we drop the lock. Caller must use some higher level 2105 * locking to prevent nested calls from other threads. 2106 */ 2107 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2108 { 2109 bool vdd; 2110 2111 if (!intel_dp_is_edp(intel_dp)) 2112 return; 2113 2114 pps_lock(intel_dp); 2115 vdd = edp_panel_vdd_on(intel_dp); 2116 pps_unlock(intel_dp); 2117 2118 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n", 2119 port_name(dp_to_dig_port(intel_dp)->port)); 2120 } 2121 2122 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2123 { 2124 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2125 struct drm_i915_private *dev_priv = to_i915(dev); 2126 struct intel_digital_port *intel_dig_port = 2127 dp_to_dig_port(intel_dp); 2128 u32 pp; 2129 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2130 2131 lockdep_assert_held(&dev_priv->pps_mutex); 2132 2133 WARN_ON(intel_dp->want_panel_vdd); 2134 2135 if (!edp_have_panel_vdd(intel_dp)) 2136 return; 2137 2138 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n", 2139 port_name(intel_dig_port->port)); 2140 2141 pp = ironlake_get_pp_control(intel_dp); 2142 pp &= ~EDP_FORCE_VDD; 2143 2144 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2145 pp_stat_reg = _pp_stat_reg(intel_dp); 2146 2147 I915_WRITE(pp_ctrl_reg, pp); 2148 POSTING_READ(pp_ctrl_reg); 2149 2150 /* Make sure sequencer is idle before allowing subsequent activity */ 2151 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2152 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 2153 2154 if ((pp & PANEL_POWER_ON) == 0) 2155 intel_dp->panel_power_off_time = ktime_get_boottime(); 2156 2157 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2158 } 2159 2160 static void edp_panel_vdd_work(struct work_struct *__work) 2161 { 2162 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 2163 struct intel_dp, panel_vdd_work); 2164 2165 pps_lock(intel_dp); 2166 if (!intel_dp->want_panel_vdd) 2167 edp_panel_vdd_off_sync(intel_dp); 2168 pps_unlock(intel_dp); 2169 } 2170 2171 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 2172 { 2173 unsigned long delay; 2174 2175 /* 2176 * Queue the timer to fire a long time from now (relative to the power 2177 * down delay) to keep the panel power up across a sequence of 2178 * operations. 2179 */ 2180 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 2181 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 2182 } 2183 2184 /* 2185 * Must be paired with edp_panel_vdd_on(). 2186 * Must hold pps_mutex around the whole on/off sequence. 2187 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2188 */ 2189 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 2190 { 2191 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 2192 2193 lockdep_assert_held(&dev_priv->pps_mutex); 2194 2195 if (!intel_dp_is_edp(intel_dp)) 2196 return; 2197 2198 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", 2199 port_name(dp_to_dig_port(intel_dp)->port)); 2200 2201 intel_dp->want_panel_vdd = false; 2202 2203 if (sync) 2204 edp_panel_vdd_off_sync(intel_dp); 2205 else 2206 edp_panel_vdd_schedule_off(intel_dp); 2207 } 2208 2209 static void edp_panel_on(struct intel_dp *intel_dp) 2210 { 2211 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2212 struct drm_i915_private *dev_priv = to_i915(dev); 2213 u32 pp; 2214 i915_reg_t pp_ctrl_reg; 2215 2216 lockdep_assert_held(&dev_priv->pps_mutex); 2217 2218 if (!intel_dp_is_edp(intel_dp)) 2219 return; 2220 2221 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n", 2222 port_name(dp_to_dig_port(intel_dp)->port)); 2223 2224 if (WARN(edp_have_panel_power(intel_dp), 2225 "eDP port %c panel power already on\n", 2226 port_name(dp_to_dig_port(intel_dp)->port))) 2227 return; 2228 2229 wait_panel_power_cycle(intel_dp); 2230 2231 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2232 pp = ironlake_get_pp_control(intel_dp); 2233 if (IS_GEN5(dev_priv)) { 2234 /* ILK workaround: disable reset around power sequence */ 2235 pp &= ~PANEL_POWER_RESET; 2236 I915_WRITE(pp_ctrl_reg, pp); 2237 POSTING_READ(pp_ctrl_reg); 2238 } 2239 2240 pp |= PANEL_POWER_ON; 2241 if (!IS_GEN5(dev_priv)) 2242 pp |= PANEL_POWER_RESET; 2243 2244 I915_WRITE(pp_ctrl_reg, pp); 2245 POSTING_READ(pp_ctrl_reg); 2246 2247 wait_panel_on(intel_dp); 2248 intel_dp->last_power_on = jiffies; 2249 2250 if (IS_GEN5(dev_priv)) { 2251 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 2252 I915_WRITE(pp_ctrl_reg, pp); 2253 POSTING_READ(pp_ctrl_reg); 2254 } 2255 } 2256 2257 void intel_edp_panel_on(struct intel_dp *intel_dp) 2258 { 2259 if (!intel_dp_is_edp(intel_dp)) 2260 return; 2261 2262 pps_lock(intel_dp); 2263 edp_panel_on(intel_dp); 2264 pps_unlock(intel_dp); 2265 } 2266 2267 2268 static void edp_panel_off(struct intel_dp *intel_dp) 2269 { 2270 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2271 struct drm_i915_private *dev_priv = to_i915(dev); 2272 u32 pp; 2273 i915_reg_t pp_ctrl_reg; 2274 2275 lockdep_assert_held(&dev_priv->pps_mutex); 2276 2277 if (!intel_dp_is_edp(intel_dp)) 2278 return; 2279 2280 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", 2281 port_name(dp_to_dig_port(intel_dp)->port)); 2282 2283 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", 2284 port_name(dp_to_dig_port(intel_dp)->port)); 2285 2286 pp = ironlake_get_pp_control(intel_dp); 2287 /* We need to switch off panel power _and_ force vdd, for otherwise some 2288 * panels get very unhappy and cease to work. */ 2289 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 2290 EDP_BLC_ENABLE); 2291 2292 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2293 2294 intel_dp->want_panel_vdd = false; 2295 2296 I915_WRITE(pp_ctrl_reg, pp); 2297 POSTING_READ(pp_ctrl_reg); 2298 2299 wait_panel_off(intel_dp); 2300 intel_dp->panel_power_off_time = ktime_get_boottime(); 2301 2302 /* We got a reference when we enabled the VDD. */ 2303 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2304 } 2305 2306 void intel_edp_panel_off(struct intel_dp *intel_dp) 2307 { 2308 if (!intel_dp_is_edp(intel_dp)) 2309 return; 2310 2311 pps_lock(intel_dp); 2312 edp_panel_off(intel_dp); 2313 pps_unlock(intel_dp); 2314 } 2315 2316 /* Enable backlight in the panel power control. */ 2317 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 2318 { 2319 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2320 struct drm_device *dev = intel_dig_port->base.base.dev; 2321 struct drm_i915_private *dev_priv = to_i915(dev); 2322 u32 pp; 2323 i915_reg_t pp_ctrl_reg; 2324 2325 /* 2326 * If we enable the backlight right away following a panel power 2327 * on, we may see slight flicker as the panel syncs with the eDP 2328 * link. So delay a bit to make sure the image is solid before 2329 * allowing it to appear. 2330 */ 2331 wait_backlight_on(intel_dp); 2332 2333 pps_lock(intel_dp); 2334 2335 pp = ironlake_get_pp_control(intel_dp); 2336 pp |= EDP_BLC_ENABLE; 2337 2338 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2339 2340 I915_WRITE(pp_ctrl_reg, pp); 2341 POSTING_READ(pp_ctrl_reg); 2342 2343 pps_unlock(intel_dp); 2344 } 2345 2346 /* Enable backlight PWM and backlight PP control. */ 2347 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 2348 const struct drm_connector_state *conn_state) 2349 { 2350 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder); 2351 2352 if (!intel_dp_is_edp(intel_dp)) 2353 return; 2354 2355 DRM_DEBUG_KMS("\n"); 2356 2357 intel_panel_enable_backlight(crtc_state, conn_state); 2358 _intel_edp_backlight_on(intel_dp); 2359 } 2360 2361 /* Disable backlight in the panel power control. */ 2362 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 2363 { 2364 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2365 struct drm_i915_private *dev_priv = to_i915(dev); 2366 u32 pp; 2367 i915_reg_t pp_ctrl_reg; 2368 2369 if (!intel_dp_is_edp(intel_dp)) 2370 return; 2371 2372 pps_lock(intel_dp); 2373 2374 pp = ironlake_get_pp_control(intel_dp); 2375 pp &= ~EDP_BLC_ENABLE; 2376 2377 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2378 2379 I915_WRITE(pp_ctrl_reg, pp); 2380 POSTING_READ(pp_ctrl_reg); 2381 2382 pps_unlock(intel_dp); 2383 2384 intel_dp->last_backlight_off = jiffies; 2385 edp_wait_backlight_off(intel_dp); 2386 } 2387 2388 /* Disable backlight PP control and backlight PWM. */ 2389 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 2390 { 2391 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder); 2392 2393 if (!intel_dp_is_edp(intel_dp)) 2394 return; 2395 2396 DRM_DEBUG_KMS("\n"); 2397 2398 _intel_edp_backlight_off(intel_dp); 2399 intel_panel_disable_backlight(old_conn_state); 2400 } 2401 2402 /* 2403 * Hook for controlling the panel power control backlight through the bl_power 2404 * sysfs attribute. Take care to handle multiple calls. 2405 */ 2406 static void intel_edp_backlight_power(struct intel_connector *connector, 2407 bool enable) 2408 { 2409 struct intel_dp *intel_dp = intel_attached_dp(&connector->base); 2410 bool is_enabled; 2411 2412 pps_lock(intel_dp); 2413 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 2414 pps_unlock(intel_dp); 2415 2416 if (is_enabled == enable) 2417 return; 2418 2419 DRM_DEBUG_KMS("panel power control backlight %s\n", 2420 enable ? "enable" : "disable"); 2421 2422 if (enable) 2423 _intel_edp_backlight_on(intel_dp); 2424 else 2425 _intel_edp_backlight_off(intel_dp); 2426 } 2427 2428 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 2429 { 2430 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2431 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2432 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN; 2433 2434 I915_STATE_WARN(cur_state != state, 2435 "DP port %c state assertion failure (expected %s, current %s)\n", 2436 port_name(dig_port->port), 2437 onoff(state), onoff(cur_state)); 2438 } 2439 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 2440 2441 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 2442 { 2443 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE; 2444 2445 I915_STATE_WARN(cur_state != state, 2446 "eDP PLL state assertion failure (expected %s, current %s)\n", 2447 onoff(state), onoff(cur_state)); 2448 } 2449 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 2450 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 2451 2452 static void ironlake_edp_pll_on(struct intel_dp *intel_dp, 2453 const struct intel_crtc_state *pipe_config) 2454 { 2455 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); 2456 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2457 2458 assert_pipe_disabled(dev_priv, crtc->pipe); 2459 assert_dp_port_disabled(intel_dp); 2460 assert_edp_pll_disabled(dev_priv); 2461 2462 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n", 2463 pipe_config->port_clock); 2464 2465 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 2466 2467 if (pipe_config->port_clock == 162000) 2468 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 2469 else 2470 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 2471 2472 I915_WRITE(DP_A, intel_dp->DP); 2473 POSTING_READ(DP_A); 2474 udelay(500); 2475 2476 /* 2477 * [DevILK] Work around required when enabling DP PLL 2478 * while a pipe is enabled going to FDI: 2479 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 2480 * 2. Program DP PLL enable 2481 */ 2482 if (IS_GEN5(dev_priv)) 2483 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 2484 2485 intel_dp->DP |= DP_PLL_ENABLE; 2486 2487 I915_WRITE(DP_A, intel_dp->DP); 2488 POSTING_READ(DP_A); 2489 udelay(200); 2490 } 2491 2492 static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 2493 { 2494 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2495 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 2496 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2497 2498 assert_pipe_disabled(dev_priv, crtc->pipe); 2499 assert_dp_port_disabled(intel_dp); 2500 assert_edp_pll_enabled(dev_priv); 2501 2502 DRM_DEBUG_KMS("disabling eDP PLL\n"); 2503 2504 intel_dp->DP &= ~DP_PLL_ENABLE; 2505 2506 I915_WRITE(DP_A, intel_dp->DP); 2507 POSTING_READ(DP_A); 2508 udelay(200); 2509 } 2510 2511 /* If the sink supports it, try to set the power state appropriately */ 2512 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 2513 { 2514 int ret, i; 2515 2516 /* Should have a valid DPCD by this point */ 2517 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 2518 return; 2519 2520 if (mode != DRM_MODE_DPMS_ON) { 2521 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 2522 DP_SET_POWER_D3); 2523 } else { 2524 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 2525 2526 /* 2527 * When turning on, we need to retry for 1ms to give the sink 2528 * time to wake up. 2529 */ 2530 for (i = 0; i < 3; i++) { 2531 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 2532 DP_SET_POWER_D0); 2533 if (ret == 1) 2534 break; 2535 msleep(1); 2536 } 2537 2538 if (ret == 1 && lspcon->active) 2539 lspcon_wait_pcon_mode(lspcon); 2540 } 2541 2542 if (ret != 1) 2543 DRM_DEBUG_KMS("failed to %s sink power state\n", 2544 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 2545 } 2546 2547 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 2548 enum i915_pipe *pipe) 2549 { 2550 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2551 enum port port = dp_to_dig_port(intel_dp)->port; 2552 struct drm_device *dev = encoder->base.dev; 2553 struct drm_i915_private *dev_priv = to_i915(dev); 2554 u32 tmp; 2555 bool ret; 2556 2557 if (!intel_display_power_get_if_enabled(dev_priv, 2558 encoder->power_domain)) 2559 return false; 2560 2561 ret = false; 2562 2563 tmp = I915_READ(intel_dp->output_reg); 2564 2565 if (!(tmp & DP_PORT_EN)) 2566 goto out; 2567 2568 if (IS_GEN7(dev_priv) && port == PORT_A) { 2569 *pipe = PORT_TO_PIPE_CPT(tmp); 2570 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2571 enum i915_pipe p; 2572 2573 for_each_pipe(dev_priv, p) { 2574 u32 trans_dp = I915_READ(TRANS_DP_CTL(p)); 2575 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) { 2576 *pipe = p; 2577 ret = true; 2578 2579 goto out; 2580 } 2581 } 2582 2583 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 2584 i915_mmio_reg_offset(intel_dp->output_reg)); 2585 } else if (IS_CHERRYVIEW(dev_priv)) { 2586 *pipe = DP_PORT_TO_PIPE_CHV(tmp); 2587 } else { 2588 *pipe = PORT_TO_PIPE(tmp); 2589 } 2590 2591 ret = true; 2592 2593 out: 2594 intel_display_power_put(dev_priv, encoder->power_domain); 2595 2596 return ret; 2597 } 2598 2599 static void intel_dp_get_config(struct intel_encoder *encoder, 2600 struct intel_crtc_state *pipe_config) 2601 { 2602 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2603 u32 tmp, flags = 0; 2604 struct drm_device *dev = encoder->base.dev; 2605 struct drm_i915_private *dev_priv = to_i915(dev); 2606 enum port port = dp_to_dig_port(intel_dp)->port; 2607 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2608 2609 tmp = I915_READ(intel_dp->output_reg); 2610 2611 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 2612 2613 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2614 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 2615 2616 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 2617 flags |= DRM_MODE_FLAG_PHSYNC; 2618 else 2619 flags |= DRM_MODE_FLAG_NHSYNC; 2620 2621 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 2622 flags |= DRM_MODE_FLAG_PVSYNC; 2623 else 2624 flags |= DRM_MODE_FLAG_NVSYNC; 2625 } else { 2626 if (tmp & DP_SYNC_HS_HIGH) 2627 flags |= DRM_MODE_FLAG_PHSYNC; 2628 else 2629 flags |= DRM_MODE_FLAG_NHSYNC; 2630 2631 if (tmp & DP_SYNC_VS_HIGH) 2632 flags |= DRM_MODE_FLAG_PVSYNC; 2633 else 2634 flags |= DRM_MODE_FLAG_NVSYNC; 2635 } 2636 2637 pipe_config->base.adjusted_mode.flags |= flags; 2638 2639 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 2640 pipe_config->limited_color_range = true; 2641 2642 pipe_config->lane_count = 2643 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 2644 2645 intel_dp_get_m_n(crtc, pipe_config); 2646 2647 if (port == PORT_A) { 2648 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 2649 pipe_config->port_clock = 162000; 2650 else 2651 pipe_config->port_clock = 270000; 2652 } 2653 2654 pipe_config->base.adjusted_mode.crtc_clock = 2655 intel_dotclock_calculate(pipe_config->port_clock, 2656 &pipe_config->dp_m_n); 2657 2658 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 2659 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 2660 /* 2661 * This is a big fat ugly hack. 2662 * 2663 * Some machines in UEFI boot mode provide us a VBT that has 18 2664 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 2665 * unknown we fail to light up. Yet the same BIOS boots up with 2666 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 2667 * max, not what it tells us to use. 2668 * 2669 * Note: This will still be broken if the eDP panel is not lit 2670 * up by the BIOS, and thus we can't get the mode at module 2671 * load. 2672 */ 2673 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 2674 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 2675 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 2676 } 2677 } 2678 2679 static void intel_disable_dp(struct intel_encoder *encoder, 2680 const struct intel_crtc_state *old_crtc_state, 2681 const struct drm_connector_state *old_conn_state) 2682 { 2683 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2684 2685 if (old_crtc_state->has_audio) 2686 intel_audio_codec_disable(encoder); 2687 2688 /* Make sure the panel is off before trying to change the mode. But also 2689 * ensure that we have vdd while we switch off the panel. */ 2690 intel_edp_panel_vdd_on(intel_dp); 2691 intel_edp_backlight_off(old_conn_state); 2692 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2693 intel_edp_panel_off(intel_dp); 2694 } 2695 2696 static void g4x_disable_dp(struct intel_encoder *encoder, 2697 const struct intel_crtc_state *old_crtc_state, 2698 const struct drm_connector_state *old_conn_state) 2699 { 2700 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2701 2702 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2703 2704 /* disable the port before the pipe on g4x */ 2705 intel_dp_link_down(intel_dp); 2706 } 2707 2708 static void ilk_disable_dp(struct intel_encoder *encoder, 2709 const struct intel_crtc_state *old_crtc_state, 2710 const struct drm_connector_state *old_conn_state) 2711 { 2712 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2713 } 2714 2715 static void vlv_disable_dp(struct intel_encoder *encoder, 2716 const struct intel_crtc_state *old_crtc_state, 2717 const struct drm_connector_state *old_conn_state) 2718 { 2719 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2720 2721 intel_psr_disable(intel_dp, old_crtc_state); 2722 2723 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2724 } 2725 2726 static void ilk_post_disable_dp(struct intel_encoder *encoder, 2727 const struct intel_crtc_state *old_crtc_state, 2728 const struct drm_connector_state *old_conn_state) 2729 { 2730 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2731 enum port port = dp_to_dig_port(intel_dp)->port; 2732 2733 intel_dp_link_down(intel_dp); 2734 2735 /* Only ilk+ has port A */ 2736 if (port == PORT_A) 2737 ironlake_edp_pll_off(intel_dp); 2738 } 2739 2740 static void vlv_post_disable_dp(struct intel_encoder *encoder, 2741 const struct intel_crtc_state *old_crtc_state, 2742 const struct drm_connector_state *old_conn_state) 2743 { 2744 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2745 2746 intel_dp_link_down(intel_dp); 2747 } 2748 2749 static void chv_post_disable_dp(struct intel_encoder *encoder, 2750 const struct intel_crtc_state *old_crtc_state, 2751 const struct drm_connector_state *old_conn_state) 2752 { 2753 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2754 struct drm_device *dev = encoder->base.dev; 2755 struct drm_i915_private *dev_priv = to_i915(dev); 2756 2757 intel_dp_link_down(intel_dp); 2758 2759 mutex_lock(&dev_priv->sb_lock); 2760 2761 /* Assert data lane reset */ 2762 chv_data_lane_soft_reset(encoder, true); 2763 2764 mutex_unlock(&dev_priv->sb_lock); 2765 } 2766 2767 static void 2768 _intel_dp_set_link_train(struct intel_dp *intel_dp, 2769 uint32_t *DP, 2770 uint8_t dp_train_pat) 2771 { 2772 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2773 struct drm_device *dev = intel_dig_port->base.base.dev; 2774 struct drm_i915_private *dev_priv = to_i915(dev); 2775 enum port port = intel_dig_port->port; 2776 2777 if (dp_train_pat & DP_TRAINING_PATTERN_MASK) 2778 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n", 2779 dp_train_pat & DP_TRAINING_PATTERN_MASK); 2780 2781 if (HAS_DDI(dev_priv)) { 2782 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2783 2784 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 2785 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 2786 else 2787 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 2788 2789 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2790 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2791 case DP_TRAINING_PATTERN_DISABLE: 2792 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 2793 2794 break; 2795 case DP_TRAINING_PATTERN_1: 2796 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 2797 break; 2798 case DP_TRAINING_PATTERN_2: 2799 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 2800 break; 2801 case DP_TRAINING_PATTERN_3: 2802 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 2803 break; 2804 } 2805 I915_WRITE(DP_TP_CTL(port), temp); 2806 2807 } else if ((IS_GEN7(dev_priv) && port == PORT_A) || 2808 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 2809 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 2810 2811 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2812 case DP_TRAINING_PATTERN_DISABLE: 2813 *DP |= DP_LINK_TRAIN_OFF_CPT; 2814 break; 2815 case DP_TRAINING_PATTERN_1: 2816 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 2817 break; 2818 case DP_TRAINING_PATTERN_2: 2819 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2820 break; 2821 case DP_TRAINING_PATTERN_3: 2822 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); 2823 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2824 break; 2825 } 2826 2827 } else { 2828 if (IS_CHERRYVIEW(dev_priv)) 2829 *DP &= ~DP_LINK_TRAIN_MASK_CHV; 2830 else 2831 *DP &= ~DP_LINK_TRAIN_MASK; 2832 2833 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2834 case DP_TRAINING_PATTERN_DISABLE: 2835 *DP |= DP_LINK_TRAIN_OFF; 2836 break; 2837 case DP_TRAINING_PATTERN_1: 2838 *DP |= DP_LINK_TRAIN_PAT_1; 2839 break; 2840 case DP_TRAINING_PATTERN_2: 2841 *DP |= DP_LINK_TRAIN_PAT_2; 2842 break; 2843 case DP_TRAINING_PATTERN_3: 2844 if (IS_CHERRYVIEW(dev_priv)) { 2845 *DP |= DP_LINK_TRAIN_PAT_3_CHV; 2846 } else { 2847 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); 2848 *DP |= DP_LINK_TRAIN_PAT_2; 2849 } 2850 break; 2851 } 2852 } 2853 } 2854 2855 static void intel_dp_enable_port(struct intel_dp *intel_dp, 2856 const struct intel_crtc_state *old_crtc_state) 2857 { 2858 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2859 struct drm_i915_private *dev_priv = to_i915(dev); 2860 2861 /* enable with pattern 1 (as per spec) */ 2862 2863 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 2864 2865 /* 2866 * Magic for VLV/CHV. We _must_ first set up the register 2867 * without actually enabling the port, and then do another 2868 * write to enable the port. Otherwise link training will 2869 * fail when the power sequencer is freshly used for this port. 2870 */ 2871 intel_dp->DP |= DP_PORT_EN; 2872 if (old_crtc_state->has_audio) 2873 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 2874 2875 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 2876 POSTING_READ(intel_dp->output_reg); 2877 } 2878 2879 static void intel_enable_dp(struct intel_encoder *encoder, 2880 const struct intel_crtc_state *pipe_config, 2881 const struct drm_connector_state *conn_state) 2882 { 2883 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2884 struct drm_device *dev = encoder->base.dev; 2885 struct drm_i915_private *dev_priv = to_i915(dev); 2886 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2887 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2888 enum i915_pipe pipe = crtc->pipe; 2889 2890 if (WARN_ON(dp_reg & DP_PORT_EN)) 2891 return; 2892 2893 pps_lock(intel_dp); 2894 2895 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2896 vlv_init_panel_power_sequencer(intel_dp); 2897 2898 intel_dp_enable_port(intel_dp, pipe_config); 2899 2900 edp_panel_vdd_on(intel_dp); 2901 edp_panel_on(intel_dp); 2902 edp_panel_vdd_off(intel_dp, true); 2903 2904 pps_unlock(intel_dp); 2905 2906 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2907 unsigned int lane_mask = 0x0; 2908 2909 if (IS_CHERRYVIEW(dev_priv)) 2910 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 2911 2912 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 2913 lane_mask); 2914 } 2915 2916 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2917 intel_dp_start_link_train(intel_dp); 2918 intel_dp_stop_link_train(intel_dp); 2919 2920 if (pipe_config->has_audio) { 2921 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 2922 pipe_name(pipe)); 2923 intel_audio_codec_enable(encoder, pipe_config, conn_state); 2924 } 2925 } 2926 2927 static void g4x_enable_dp(struct intel_encoder *encoder, 2928 const struct intel_crtc_state *pipe_config, 2929 const struct drm_connector_state *conn_state) 2930 { 2931 intel_enable_dp(encoder, pipe_config, conn_state); 2932 intel_edp_backlight_on(pipe_config, conn_state); 2933 } 2934 2935 static void vlv_enable_dp(struct intel_encoder *encoder, 2936 const struct intel_crtc_state *pipe_config, 2937 const struct drm_connector_state *conn_state) 2938 { 2939 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2940 2941 intel_edp_backlight_on(pipe_config, conn_state); 2942 intel_psr_enable(intel_dp, pipe_config); 2943 } 2944 2945 static void g4x_pre_enable_dp(struct intel_encoder *encoder, 2946 const struct intel_crtc_state *pipe_config, 2947 const struct drm_connector_state *conn_state) 2948 { 2949 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2950 enum port port = dp_to_dig_port(intel_dp)->port; 2951 2952 intel_dp_prepare(encoder, pipe_config); 2953 2954 /* Only ilk+ has port A */ 2955 if (port == PORT_A) 2956 ironlake_edp_pll_on(intel_dp, pipe_config); 2957 } 2958 2959 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 2960 { 2961 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2962 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 2963 enum i915_pipe pipe = intel_dp->pps_pipe; 2964 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 2965 2966 WARN_ON(intel_dp->active_pipe != INVALID_PIPE); 2967 2968 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) 2969 return; 2970 2971 edp_panel_vdd_off_sync(intel_dp); 2972 2973 /* 2974 * VLV seems to get confused when multiple power seqeuencers 2975 * have the same port selected (even if only one has power/vdd 2976 * enabled). The failure manifests as vlv_wait_port_ready() failing 2977 * CHV on the other hand doesn't seem to mind having the same port 2978 * selected in multiple power seqeuencers, but let's clear the 2979 * port select always when logically disconnecting a power sequencer 2980 * from a port. 2981 */ 2982 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n", 2983 pipe_name(pipe), port_name(intel_dig_port->port)); 2984 I915_WRITE(pp_on_reg, 0); 2985 POSTING_READ(pp_on_reg); 2986 2987 intel_dp->pps_pipe = INVALID_PIPE; 2988 } 2989 2990 static void vlv_steal_power_sequencer(struct drm_device *dev, 2991 enum i915_pipe pipe) 2992 { 2993 struct drm_i915_private *dev_priv = to_i915(dev); 2994 struct intel_encoder *encoder; 2995 2996 lockdep_assert_held(&dev_priv->pps_mutex); 2997 2998 for_each_intel_encoder(dev, encoder) { 2999 struct intel_dp *intel_dp; 3000 enum port port; 3001 3002 if (encoder->type != INTEL_OUTPUT_DP && 3003 encoder->type != INTEL_OUTPUT_EDP) 3004 continue; 3005 3006 intel_dp = enc_to_intel_dp(&encoder->base); 3007 port = dp_to_dig_port(intel_dp)->port; 3008 3009 WARN(intel_dp->active_pipe == pipe, 3010 "stealing pipe %c power sequencer from active (e)DP port %c\n", 3011 pipe_name(pipe), port_name(port)); 3012 3013 if (intel_dp->pps_pipe != pipe) 3014 continue; 3015 3016 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n", 3017 pipe_name(pipe), port_name(port)); 3018 3019 /* make sure vdd is off before we steal it */ 3020 vlv_detach_power_sequencer(intel_dp); 3021 } 3022 } 3023 3024 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) 3025 { 3026 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3027 struct intel_encoder *encoder = &intel_dig_port->base; 3028 struct drm_device *dev = encoder->base.dev; 3029 struct drm_i915_private *dev_priv = to_i915(dev); 3030 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 3031 3032 lockdep_assert_held(&dev_priv->pps_mutex); 3033 3034 WARN_ON(intel_dp->active_pipe != INVALID_PIPE); 3035 3036 if (intel_dp->pps_pipe != INVALID_PIPE && 3037 intel_dp->pps_pipe != crtc->pipe) { 3038 /* 3039 * If another power sequencer was being used on this 3040 * port previously make sure to turn off vdd there while 3041 * we still have control of it. 3042 */ 3043 vlv_detach_power_sequencer(intel_dp); 3044 } 3045 3046 /* 3047 * We may be stealing the power 3048 * sequencer from another port. 3049 */ 3050 vlv_steal_power_sequencer(dev, crtc->pipe); 3051 3052 intel_dp->active_pipe = crtc->pipe; 3053 3054 if (!intel_dp_is_edp(intel_dp)) 3055 return; 3056 3057 /* now it's all ours */ 3058 intel_dp->pps_pipe = crtc->pipe; 3059 3060 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n", 3061 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port)); 3062 3063 /* init power sequencer on this pipe and port */ 3064 intel_dp_init_panel_power_sequencer(dev, intel_dp); 3065 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); 3066 } 3067 3068 static void vlv_pre_enable_dp(struct intel_encoder *encoder, 3069 const struct intel_crtc_state *pipe_config, 3070 const struct drm_connector_state *conn_state) 3071 { 3072 vlv_phy_pre_encoder_enable(encoder); 3073 3074 intel_enable_dp(encoder, pipe_config, conn_state); 3075 } 3076 3077 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, 3078 const struct intel_crtc_state *pipe_config, 3079 const struct drm_connector_state *conn_state) 3080 { 3081 intel_dp_prepare(encoder, pipe_config); 3082 3083 vlv_phy_pre_pll_enable(encoder); 3084 } 3085 3086 static void chv_pre_enable_dp(struct intel_encoder *encoder, 3087 const struct intel_crtc_state *pipe_config, 3088 const struct drm_connector_state *conn_state) 3089 { 3090 chv_phy_pre_encoder_enable(encoder); 3091 3092 intel_enable_dp(encoder, pipe_config, conn_state); 3093 3094 /* Second common lane will stay alive on its own now */ 3095 chv_phy_release_cl2_override(encoder); 3096 } 3097 3098 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, 3099 const struct intel_crtc_state *pipe_config, 3100 const struct drm_connector_state *conn_state) 3101 { 3102 intel_dp_prepare(encoder, pipe_config); 3103 3104 chv_phy_pre_pll_enable(encoder); 3105 } 3106 3107 static void chv_dp_post_pll_disable(struct intel_encoder *encoder, 3108 const struct intel_crtc_state *pipe_config, 3109 const struct drm_connector_state *conn_state) 3110 { 3111 chv_phy_post_pll_disable(encoder); 3112 } 3113 3114 /* 3115 * Fetch AUX CH registers 0x202 - 0x207 which contain 3116 * link status information 3117 */ 3118 bool 3119 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 3120 { 3121 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3122 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3123 } 3124 3125 static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp) 3126 { 3127 uint8_t psr_caps = 0; 3128 3129 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1) 3130 return false; 3131 return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED; 3132 } 3133 3134 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 3135 { 3136 uint8_t dprx = 0; 3137 3138 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 3139 &dprx) != 1) 3140 return false; 3141 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 3142 } 3143 3144 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) 3145 { 3146 uint8_t alpm_caps = 0; 3147 3148 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 3149 &alpm_caps) != 1) 3150 return false; 3151 return alpm_caps & DP_ALPM_CAP; 3152 } 3153 3154 /* These are source-specific values. */ 3155 uint8_t 3156 intel_dp_voltage_max(struct intel_dp *intel_dp) 3157 { 3158 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 3159 enum port port = dp_to_dig_port(intel_dp)->port; 3160 3161 if (INTEL_GEN(dev_priv) >= 9) { 3162 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3163 return intel_ddi_dp_voltage_max(encoder); 3164 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3165 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3166 else if (IS_GEN7(dev_priv) && port == PORT_A) 3167 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3168 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3169 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3170 else 3171 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3172 } 3173 3174 uint8_t 3175 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 3176 { 3177 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 3178 enum port port = dp_to_dig_port(intel_dp)->port; 3179 3180 if (INTEL_GEN(dev_priv) >= 9) { 3181 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3182 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3183 return DP_TRAIN_PRE_EMPH_LEVEL_3; 3184 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3185 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3186 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3187 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3189 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3190 default: 3191 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3192 } 3193 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3194 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3195 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3196 return DP_TRAIN_PRE_EMPH_LEVEL_3; 3197 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3198 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3200 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3201 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3202 default: 3203 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3204 } 3205 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3206 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3208 return DP_TRAIN_PRE_EMPH_LEVEL_3; 3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3210 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3211 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3212 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3214 default: 3215 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3216 } 3217 } else if (IS_GEN7(dev_priv) && port == PORT_A) { 3218 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3219 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3220 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3223 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3224 default: 3225 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3226 } 3227 } else { 3228 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3230 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3232 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3234 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3235 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3236 default: 3237 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3238 } 3239 } 3240 } 3241 3242 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) 3243 { 3244 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3245 unsigned long demph_reg_value, preemph_reg_value, 3246 uniqtranscale_reg_value; 3247 uint8_t train_set = intel_dp->train_set[0]; 3248 3249 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3250 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3251 preemph_reg_value = 0x0004000; 3252 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3254 demph_reg_value = 0x2B405555; 3255 uniqtranscale_reg_value = 0x552AB83A; 3256 break; 3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3258 demph_reg_value = 0x2B404040; 3259 uniqtranscale_reg_value = 0x5548B83A; 3260 break; 3261 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3262 demph_reg_value = 0x2B245555; 3263 uniqtranscale_reg_value = 0x5560B83A; 3264 break; 3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3266 demph_reg_value = 0x2B405555; 3267 uniqtranscale_reg_value = 0x5598DA3A; 3268 break; 3269 default: 3270 return 0; 3271 } 3272 break; 3273 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3274 preemph_reg_value = 0x0002000; 3275 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3277 demph_reg_value = 0x2B404040; 3278 uniqtranscale_reg_value = 0x5552B83A; 3279 break; 3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3281 demph_reg_value = 0x2B404848; 3282 uniqtranscale_reg_value = 0x5580B83A; 3283 break; 3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3285 demph_reg_value = 0x2B404040; 3286 uniqtranscale_reg_value = 0x55ADDA3A; 3287 break; 3288 default: 3289 return 0; 3290 } 3291 break; 3292 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3293 preemph_reg_value = 0x0000000; 3294 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3296 demph_reg_value = 0x2B305555; 3297 uniqtranscale_reg_value = 0x5570B83A; 3298 break; 3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3300 demph_reg_value = 0x2B2B4040; 3301 uniqtranscale_reg_value = 0x55ADDA3A; 3302 break; 3303 default: 3304 return 0; 3305 } 3306 break; 3307 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3308 preemph_reg_value = 0x0006000; 3309 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3311 demph_reg_value = 0x1B405555; 3312 uniqtranscale_reg_value = 0x55ADDA3A; 3313 break; 3314 default: 3315 return 0; 3316 } 3317 break; 3318 default: 3319 return 0; 3320 } 3321 3322 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 3323 uniqtranscale_reg_value, 0); 3324 3325 return 0; 3326 } 3327 3328 static uint32_t chv_signal_levels(struct intel_dp *intel_dp) 3329 { 3330 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3331 u32 deemph_reg_value, margin_reg_value; 3332 bool uniq_trans_scale = false; 3333 uint8_t train_set = intel_dp->train_set[0]; 3334 3335 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3336 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3337 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3339 deemph_reg_value = 128; 3340 margin_reg_value = 52; 3341 break; 3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3343 deemph_reg_value = 128; 3344 margin_reg_value = 77; 3345 break; 3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3347 deemph_reg_value = 128; 3348 margin_reg_value = 102; 3349 break; 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3351 deemph_reg_value = 128; 3352 margin_reg_value = 154; 3353 uniq_trans_scale = true; 3354 break; 3355 default: 3356 return 0; 3357 } 3358 break; 3359 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3360 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3362 deemph_reg_value = 85; 3363 margin_reg_value = 78; 3364 break; 3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3366 deemph_reg_value = 85; 3367 margin_reg_value = 116; 3368 break; 3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3370 deemph_reg_value = 85; 3371 margin_reg_value = 154; 3372 break; 3373 default: 3374 return 0; 3375 } 3376 break; 3377 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3378 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3380 deemph_reg_value = 64; 3381 margin_reg_value = 104; 3382 break; 3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3384 deemph_reg_value = 64; 3385 margin_reg_value = 154; 3386 break; 3387 default: 3388 return 0; 3389 } 3390 break; 3391 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3392 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3394 deemph_reg_value = 43; 3395 margin_reg_value = 154; 3396 break; 3397 default: 3398 return 0; 3399 } 3400 break; 3401 default: 3402 return 0; 3403 } 3404 3405 chv_set_phy_signal_level(encoder, deemph_reg_value, 3406 margin_reg_value, uniq_trans_scale); 3407 3408 return 0; 3409 } 3410 3411 static uint32_t 3412 gen4_signal_levels(uint8_t train_set) 3413 { 3414 uint32_t signal_levels = 0; 3415 3416 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3417 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3418 default: 3419 signal_levels |= DP_VOLTAGE_0_4; 3420 break; 3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3422 signal_levels |= DP_VOLTAGE_0_6; 3423 break; 3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3425 signal_levels |= DP_VOLTAGE_0_8; 3426 break; 3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3428 signal_levels |= DP_VOLTAGE_1_2; 3429 break; 3430 } 3431 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3432 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3433 default: 3434 signal_levels |= DP_PRE_EMPHASIS_0; 3435 break; 3436 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3437 signal_levels |= DP_PRE_EMPHASIS_3_5; 3438 break; 3439 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3440 signal_levels |= DP_PRE_EMPHASIS_6; 3441 break; 3442 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3443 signal_levels |= DP_PRE_EMPHASIS_9_5; 3444 break; 3445 } 3446 return signal_levels; 3447 } 3448 3449 /* Gen6's DP voltage swing and pre-emphasis control */ 3450 static uint32_t 3451 gen6_edp_signal_levels(uint8_t train_set) 3452 { 3453 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3454 DP_TRAIN_PRE_EMPHASIS_MASK); 3455 switch (signal_levels) { 3456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3458 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3460 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 3463 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 3464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3466 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 3467 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3469 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 3470 default: 3471 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3472 "0x%x\n", signal_levels); 3473 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 3474 } 3475 } 3476 3477 /* Gen7's DP voltage swing and pre-emphasis control */ 3478 static uint32_t 3479 gen7_edp_signal_levels(uint8_t train_set) 3480 { 3481 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3482 DP_TRAIN_PRE_EMPHASIS_MASK); 3483 switch (signal_levels) { 3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3485 return EDP_LINK_TRAIN_400MV_0DB_IVB; 3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3487 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 3488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 3489 return EDP_LINK_TRAIN_400MV_6DB_IVB; 3490 3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3492 return EDP_LINK_TRAIN_600MV_0DB_IVB; 3493 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3494 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 3495 3496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3497 return EDP_LINK_TRAIN_800MV_0DB_IVB; 3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3499 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 3500 3501 default: 3502 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3503 "0x%x\n", signal_levels); 3504 return EDP_LINK_TRAIN_500MV_0DB_IVB; 3505 } 3506 } 3507 3508 void 3509 intel_dp_set_signal_levels(struct intel_dp *intel_dp) 3510 { 3511 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3512 enum port port = intel_dig_port->port; 3513 struct drm_device *dev = intel_dig_port->base.base.dev; 3514 struct drm_i915_private *dev_priv = to_i915(dev); 3515 uint32_t signal_levels, mask = 0; 3516 uint8_t train_set = intel_dp->train_set[0]; 3517 3518 if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) { 3519 signal_levels = bxt_signal_levels(intel_dp); 3520 } else if (HAS_DDI(dev_priv)) { 3521 signal_levels = ddi_signal_levels(intel_dp); 3522 mask = DDI_BUF_EMP_MASK; 3523 } else if (IS_CHERRYVIEW(dev_priv)) { 3524 signal_levels = chv_signal_levels(intel_dp); 3525 } else if (IS_VALLEYVIEW(dev_priv)) { 3526 signal_levels = vlv_signal_levels(intel_dp); 3527 } else if (IS_GEN7(dev_priv) && port == PORT_A) { 3528 signal_levels = gen7_edp_signal_levels(train_set); 3529 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 3530 } else if (IS_GEN6(dev_priv) && port == PORT_A) { 3531 signal_levels = gen6_edp_signal_levels(train_set); 3532 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 3533 } else { 3534 signal_levels = gen4_signal_levels(train_set); 3535 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 3536 } 3537 3538 if (mask) 3539 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 3540 3541 DRM_DEBUG_KMS("Using vswing level %d\n", 3542 train_set & DP_TRAIN_VOLTAGE_SWING_MASK); 3543 DRM_DEBUG_KMS("Using pre-emphasis level %d\n", 3544 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 3545 DP_TRAIN_PRE_EMPHASIS_SHIFT); 3546 3547 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; 3548 3549 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 3550 POSTING_READ(intel_dp->output_reg); 3551 } 3552 3553 void 3554 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 3555 uint8_t dp_train_pat) 3556 { 3557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3558 struct drm_i915_private *dev_priv = 3559 to_i915(intel_dig_port->base.base.dev); 3560 3561 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); 3562 3563 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 3564 POSTING_READ(intel_dp->output_reg); 3565 } 3566 3567 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 3568 { 3569 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3570 struct drm_device *dev = intel_dig_port->base.base.dev; 3571 struct drm_i915_private *dev_priv = to_i915(dev); 3572 enum port port = intel_dig_port->port; 3573 uint32_t val; 3574 3575 if (!HAS_DDI(dev_priv)) 3576 return; 3577 3578 val = I915_READ(DP_TP_CTL(port)); 3579 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 3580 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 3581 I915_WRITE(DP_TP_CTL(port), val); 3582 3583 /* 3584 * On PORT_A we can have only eDP in SST mode. There the only reason 3585 * we need to set idle transmission mode is to work around a HW issue 3586 * where we enable the pipe while not in idle link-training mode. 3587 * In this case there is requirement to wait for a minimum number of 3588 * idle patterns to be sent. 3589 */ 3590 if (port == PORT_A) 3591 return; 3592 3593 if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port), 3594 DP_TP_STATUS_IDLE_DONE, 3595 DP_TP_STATUS_IDLE_DONE, 3596 1)) 3597 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 3598 } 3599 3600 static void 3601 intel_dp_link_down(struct intel_dp *intel_dp) 3602 { 3603 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3604 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 3605 enum port port = intel_dig_port->port; 3606 struct drm_device *dev = intel_dig_port->base.base.dev; 3607 struct drm_i915_private *dev_priv = to_i915(dev); 3608 uint32_t DP = intel_dp->DP; 3609 3610 if (WARN_ON(HAS_DDI(dev_priv))) 3611 return; 3612 3613 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 3614 return; 3615 3616 DRM_DEBUG_KMS("\n"); 3617 3618 if ((IS_GEN7(dev_priv) && port == PORT_A) || 3619 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 3620 DP &= ~DP_LINK_TRAIN_MASK_CPT; 3621 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 3622 } else { 3623 if (IS_CHERRYVIEW(dev_priv)) 3624 DP &= ~DP_LINK_TRAIN_MASK_CHV; 3625 else 3626 DP &= ~DP_LINK_TRAIN_MASK; 3627 DP |= DP_LINK_TRAIN_PAT_IDLE; 3628 } 3629 I915_WRITE(intel_dp->output_reg, DP); 3630 POSTING_READ(intel_dp->output_reg); 3631 3632 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 3633 I915_WRITE(intel_dp->output_reg, DP); 3634 POSTING_READ(intel_dp->output_reg); 3635 3636 /* 3637 * HW workaround for IBX, we need to move the port 3638 * to transcoder A after disabling it to allow the 3639 * matching HDMI port to be enabled on transcoder A. 3640 */ 3641 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 3642 /* 3643 * We get CPU/PCH FIFO underruns on the other pipe when 3644 * doing the workaround. Sweep them under the rug. 3645 */ 3646 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 3647 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 3648 3649 /* always enable with pattern 1 (as per spec) */ 3650 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK); 3651 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1; 3652 I915_WRITE(intel_dp->output_reg, DP); 3653 POSTING_READ(intel_dp->output_reg); 3654 3655 DP &= ~DP_PORT_EN; 3656 I915_WRITE(intel_dp->output_reg, DP); 3657 POSTING_READ(intel_dp->output_reg); 3658 3659 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 3660 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3661 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3662 } 3663 3664 msleep(intel_dp->panel_power_down_delay); 3665 3666 intel_dp->DP = DP; 3667 3668 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3669 pps_lock(intel_dp); 3670 intel_dp->active_pipe = INVALID_PIPE; 3671 pps_unlock(intel_dp); 3672 } 3673 } 3674 3675 bool 3676 intel_dp_read_dpcd(struct intel_dp *intel_dp) 3677 { 3678 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 3679 sizeof(intel_dp->dpcd)) < 0) 3680 return false; /* aux transfer failed */ 3681 3682 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); 3683 3684 return intel_dp->dpcd[DP_DPCD_REV] != 0; 3685 } 3686 3687 static bool 3688 intel_edp_init_dpcd(struct intel_dp *intel_dp) 3689 { 3690 struct drm_i915_private *dev_priv = 3691 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3692 3693 /* this function is meant to be called only once */ 3694 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0); 3695 3696 if (!intel_dp_read_dpcd(intel_dp)) 3697 return false; 3698 3699 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3700 drm_dp_is_branch(intel_dp->dpcd)); 3701 3702 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3703 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3704 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 3705 3706 /* Check if the panel supports PSR */ 3707 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, 3708 intel_dp->psr_dpcd, 3709 sizeof(intel_dp->psr_dpcd)); 3710 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { 3711 dev_priv->psr.sink_support = true; 3712 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 3713 } 3714 3715 if (INTEL_GEN(dev_priv) >= 9 && 3716 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) { 3717 uint8_t frame_sync_cap; 3718 3719 dev_priv->psr.sink_support = true; 3720 if (drm_dp_dpcd_readb(&intel_dp->aux, 3721 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, 3722 &frame_sync_cap) != 1) 3723 frame_sync_cap = 0; 3724 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false; 3725 /* PSR2 needs frame sync as well */ 3726 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; 3727 DRM_DEBUG_KMS("PSR2 %s on sink", 3728 dev_priv->psr.psr2_support ? "supported" : "not supported"); 3729 3730 if (dev_priv->psr.psr2_support) { 3731 dev_priv->psr.y_cord_support = 3732 intel_dp_get_y_cord_status(intel_dp); 3733 dev_priv->psr.colorimetry_support = 3734 intel_dp_get_colorimetry_status(intel_dp); 3735 dev_priv->psr.alpm = 3736 intel_dp_get_alpm_status(intel_dp); 3737 } 3738 3739 } 3740 3741 /* Read the eDP Display control capabilities registers */ 3742 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && 3743 drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3744 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 3745 sizeof(intel_dp->edp_dpcd)) 3746 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), 3747 intel_dp->edp_dpcd); 3748 3749 /* Intermediate frequency support */ 3750 if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */ 3751 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3752 int i; 3753 3754 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 3755 sink_rates, sizeof(sink_rates)); 3756 3757 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 3758 int val = le16_to_cpu(sink_rates[i]); 3759 3760 if (val == 0) 3761 break; 3762 3763 /* Value read multiplied by 200kHz gives the per-lane 3764 * link rate in kHz. The source rates are, however, 3765 * stored in terms of LS_Clk kHz. The full conversion 3766 * back to symbols is 3767 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 3768 */ 3769 intel_dp->sink_rates[i] = (val * 200) / 10; 3770 } 3771 intel_dp->num_sink_rates = i; 3772 } 3773 3774 if (intel_dp->num_sink_rates) 3775 intel_dp->use_rate_select = true; 3776 else 3777 intel_dp_set_sink_rates(intel_dp); 3778 3779 intel_dp_set_common_rates(intel_dp); 3780 3781 return true; 3782 } 3783 3784 3785 static bool 3786 intel_dp_get_dpcd(struct intel_dp *intel_dp) 3787 { 3788 u8 sink_count; 3789 3790 if (!intel_dp_read_dpcd(intel_dp)) 3791 return false; 3792 3793 /* Don't clobber cached eDP rates. */ 3794 if (!intel_dp_is_edp(intel_dp)) { 3795 intel_dp_set_sink_rates(intel_dp); 3796 intel_dp_set_common_rates(intel_dp); 3797 } 3798 3799 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) 3800 return false; 3801 3802 /* 3803 * Sink count can change between short pulse hpd hence 3804 * a member variable in intel_dp will track any changes 3805 * between short pulse interrupts. 3806 */ 3807 intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count); 3808 3809 /* 3810 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 3811 * a dongle is present but no display. Unless we require to know 3812 * if a dongle is present or not, we don't need to update 3813 * downstream port information. So, an early return here saves 3814 * time from performing other operations which are not required. 3815 */ 3816 if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count) 3817 return false; 3818 3819 if (!drm_dp_is_branch(intel_dp->dpcd)) 3820 return true; /* native DP sink */ 3821 3822 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 3823 return true; /* no per-port downstream info */ 3824 3825 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 3826 intel_dp->downstream_ports, 3827 DP_MAX_DOWNSTREAM_PORTS) < 0) 3828 return false; /* downstream port status fetch failed */ 3829 3830 return true; 3831 } 3832 3833 static bool 3834 intel_dp_can_mst(struct intel_dp *intel_dp) 3835 { 3836 u8 mstm_cap; 3837 3838 if (!i915_modparams.enable_dp_mst) 3839 return false; 3840 3841 if (!intel_dp->can_mst) 3842 return false; 3843 3844 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 3845 return false; 3846 3847 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 3848 return false; 3849 3850 return mstm_cap & DP_MST_CAP; 3851 } 3852 3853 static void 3854 intel_dp_configure_mst(struct intel_dp *intel_dp) 3855 { 3856 if (!i915_modparams.enable_dp_mst) 3857 return; 3858 3859 if (!intel_dp->can_mst) 3860 return; 3861 3862 intel_dp->is_mst = intel_dp_can_mst(intel_dp); 3863 3864 if (intel_dp->is_mst) 3865 DRM_DEBUG_KMS("Sink is MST capable\n"); 3866 else 3867 DRM_DEBUG_KMS("Sink is not MST capable\n"); 3868 3869 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 3870 intel_dp->is_mst); 3871 } 3872 3873 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) 3874 { 3875 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3876 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3877 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 3878 u8 buf; 3879 int ret = 0; 3880 int count = 0; 3881 int attempts = 10; 3882 3883 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { 3884 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); 3885 ret = -EIO; 3886 goto out; 3887 } 3888 3889 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 3890 buf & ~DP_TEST_SINK_START) < 0) { 3891 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); 3892 ret = -EIO; 3893 goto out; 3894 } 3895 3896 do { 3897 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 3898 3899 if (drm_dp_dpcd_readb(&intel_dp->aux, 3900 DP_TEST_SINK_MISC, &buf) < 0) { 3901 ret = -EIO; 3902 goto out; 3903 } 3904 count = buf & DP_TEST_COUNT_MASK; 3905 } while (--attempts && count); 3906 3907 if (attempts == 0) { 3908 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n"); 3909 ret = -ETIMEDOUT; 3910 } 3911 3912 out: 3913 hsw_enable_ips(intel_crtc); 3914 return ret; 3915 } 3916 3917 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp) 3918 { 3919 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3920 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3921 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 3922 u8 buf; 3923 int ret; 3924 3925 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) 3926 return -EIO; 3927 3928 if (!(buf & DP_TEST_CRC_SUPPORTED)) 3929 return -ENOTTY; 3930 3931 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) 3932 return -EIO; 3933 3934 if (buf & DP_TEST_SINK_START) { 3935 ret = intel_dp_sink_crc_stop(intel_dp); 3936 if (ret) 3937 return ret; 3938 } 3939 3940 hsw_disable_ips(intel_crtc); 3941 3942 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 3943 buf | DP_TEST_SINK_START) < 0) { 3944 hsw_enable_ips(intel_crtc); 3945 return -EIO; 3946 } 3947 3948 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 3949 return 0; 3950 } 3951 3952 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) 3953 { 3954 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3955 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3956 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc); 3957 u8 buf; 3958 int count, ret; 3959 int attempts = 6; 3960 3961 ret = intel_dp_sink_crc_start(intel_dp); 3962 if (ret) 3963 return ret; 3964 3965 do { 3966 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 3967 3968 if (drm_dp_dpcd_readb(&intel_dp->aux, 3969 DP_TEST_SINK_MISC, &buf) < 0) { 3970 ret = -EIO; 3971 goto stop; 3972 } 3973 count = buf & DP_TEST_COUNT_MASK; 3974 3975 } while (--attempts && count == 0); 3976 3977 if (attempts == 0) { 3978 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); 3979 ret = -ETIMEDOUT; 3980 goto stop; 3981 } 3982 3983 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) { 3984 ret = -EIO; 3985 goto stop; 3986 } 3987 3988 stop: 3989 intel_dp_sink_crc_stop(intel_dp); 3990 return ret; 3991 } 3992 3993 static bool 3994 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 3995 { 3996 return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, 3997 sink_irq_vector) == 1; 3998 } 3999 4000 static bool 4001 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4002 { 4003 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4004 sink_irq_vector, DP_DPRX_ESI_LEN) == 4005 DP_DPRX_ESI_LEN; 4006 } 4007 4008 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4009 { 4010 int status = 0; 4011 int test_link_rate; 4012 uint8_t test_lane_count, test_link_bw; 4013 /* (DP CTS 1.2) 4014 * 4.3.1.11 4015 */ 4016 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4017 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4018 &test_lane_count); 4019 4020 if (status <= 0) { 4021 DRM_DEBUG_KMS("Lane count read failed\n"); 4022 return DP_TEST_NAK; 4023 } 4024 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4025 4026 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4027 &test_link_bw); 4028 if (status <= 0) { 4029 DRM_DEBUG_KMS("Link Rate read failed\n"); 4030 return DP_TEST_NAK; 4031 } 4032 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4033 4034 /* Validate the requested link rate and lane count */ 4035 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4036 test_lane_count)) 4037 return DP_TEST_NAK; 4038 4039 intel_dp->compliance.test_lane_count = test_lane_count; 4040 intel_dp->compliance.test_link_rate = test_link_rate; 4041 4042 return DP_TEST_ACK; 4043 } 4044 4045 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4046 { 4047 uint8_t test_pattern; 4048 uint8_t test_misc; 4049 __be16 h_width, v_height; 4050 int status = 0; 4051 4052 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4053 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4054 &test_pattern); 4055 if (status <= 0) { 4056 DRM_DEBUG_KMS("Test pattern read failed\n"); 4057 return DP_TEST_NAK; 4058 } 4059 if (test_pattern != DP_COLOR_RAMP) 4060 return DP_TEST_NAK; 4061 4062 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4063 &h_width, 2); 4064 if (status <= 0) { 4065 DRM_DEBUG_KMS("H Width read failed\n"); 4066 return DP_TEST_NAK; 4067 } 4068 4069 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4070 &v_height, 2); 4071 if (status <= 0) { 4072 DRM_DEBUG_KMS("V Height read failed\n"); 4073 return DP_TEST_NAK; 4074 } 4075 4076 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4077 &test_misc); 4078 if (status <= 0) { 4079 DRM_DEBUG_KMS("TEST MISC read failed\n"); 4080 return DP_TEST_NAK; 4081 } 4082 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4083 return DP_TEST_NAK; 4084 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4085 return DP_TEST_NAK; 4086 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4087 case DP_TEST_BIT_DEPTH_6: 4088 intel_dp->compliance.test_data.bpc = 6; 4089 break; 4090 case DP_TEST_BIT_DEPTH_8: 4091 intel_dp->compliance.test_data.bpc = 8; 4092 break; 4093 default: 4094 return DP_TEST_NAK; 4095 } 4096 4097 intel_dp->compliance.test_data.video_pattern = test_pattern; 4098 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4099 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4100 /* Set test active flag here so userspace doesn't interrupt things */ 4101 intel_dp->compliance.test_active = 1; 4102 4103 return DP_TEST_ACK; 4104 } 4105 4106 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) 4107 { 4108 uint8_t test_result = DP_TEST_ACK; 4109 struct intel_connector *intel_connector = intel_dp->attached_connector; 4110 struct drm_connector *connector = &intel_connector->base; 4111 4112 if (intel_connector->detect_edid == NULL || 4113 connector->edid_corrupt || 4114 intel_dp->aux.i2c_defer_count > 6) { 4115 /* Check EDID read for NACKs, DEFERs and corruption 4116 * (DP CTS 1.2 Core r1.1) 4117 * 4.2.2.4 : Failed EDID read, I2C_NAK 4118 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4119 * 4.2.2.6 : EDID corruption detected 4120 * Use failsafe mode for all cases 4121 */ 4122 if (intel_dp->aux.i2c_nack_count > 0 || 4123 intel_dp->aux.i2c_defer_count > 0) 4124 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", 4125 intel_dp->aux.i2c_nack_count, 4126 intel_dp->aux.i2c_defer_count); 4127 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4128 } else { 4129 struct edid *block = intel_connector->detect_edid; 4130 4131 /* We have to write the checksum 4132 * of the last block read 4133 */ 4134 block += intel_connector->detect_edid->extensions; 4135 4136 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4137 block->checksum) <= 0) 4138 DRM_DEBUG_KMS("Failed to write EDID checksum\n"); 4139 4140 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4141 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4142 } 4143 4144 /* Set test active flag here so userspace doesn't interrupt things */ 4145 intel_dp->compliance.test_active = 1; 4146 4147 return test_result; 4148 } 4149 4150 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 4151 { 4152 uint8_t test_result = DP_TEST_NAK; 4153 return test_result; 4154 } 4155 4156 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 4157 { 4158 uint8_t response = DP_TEST_NAK; 4159 uint8_t request = 0; 4160 int status; 4161 4162 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 4163 if (status <= 0) { 4164 DRM_DEBUG_KMS("Could not read test request from sink\n"); 4165 goto update_status; 4166 } 4167 4168 switch (request) { 4169 case DP_TEST_LINK_TRAINING: 4170 DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); 4171 response = intel_dp_autotest_link_training(intel_dp); 4172 break; 4173 case DP_TEST_LINK_VIDEO_PATTERN: 4174 DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); 4175 response = intel_dp_autotest_video_pattern(intel_dp); 4176 break; 4177 case DP_TEST_LINK_EDID_READ: 4178 DRM_DEBUG_KMS("EDID test requested\n"); 4179 response = intel_dp_autotest_edid(intel_dp); 4180 break; 4181 case DP_TEST_LINK_PHY_TEST_PATTERN: 4182 DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); 4183 response = intel_dp_autotest_phy_pattern(intel_dp); 4184 break; 4185 default: 4186 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); 4187 break; 4188 } 4189 4190 if (response & DP_TEST_ACK) 4191 intel_dp->compliance.test_type = request; 4192 4193 update_status: 4194 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 4195 if (status <= 0) 4196 DRM_DEBUG_KMS("Could not write test response to sink\n"); 4197 } 4198 4199 static int 4200 intel_dp_check_mst_status(struct intel_dp *intel_dp) 4201 { 4202 bool bret; 4203 4204 if (intel_dp->is_mst) { 4205 u8 esi[DP_DPRX_ESI_LEN] = { 0 }; 4206 int ret = 0; 4207 int retry; 4208 bool handled; 4209 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 4210 go_again: 4211 if (bret == true) { 4212 4213 /* check link status - esi[10] = 0x200c */ 4214 if (intel_dp->active_mst_links && 4215 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 4216 DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); 4217 intel_dp_start_link_train(intel_dp); 4218 intel_dp_stop_link_train(intel_dp); 4219 } 4220 4221 DRM_DEBUG_KMS("got esi %3ph\n", esi); 4222 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 4223 4224 if (handled) { 4225 for (retry = 0; retry < 3; retry++) { 4226 int wret; 4227 wret = drm_dp_dpcd_write(&intel_dp->aux, 4228 DP_SINK_COUNT_ESI+1, 4229 &esi[1], 3); 4230 if (wret == 3) { 4231 break; 4232 } 4233 } 4234 4235 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 4236 if (bret == true) { 4237 DRM_DEBUG_KMS("got esi2 %3ph\n", esi); 4238 goto go_again; 4239 } 4240 } else 4241 ret = 0; 4242 4243 return ret; 4244 } else { 4245 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4246 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); 4247 intel_dp->is_mst = false; 4248 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4249 /* send a hotplug event */ 4250 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev); 4251 } 4252 } 4253 return -EINVAL; 4254 } 4255 4256 static void 4257 intel_dp_retrain_link(struct intel_dp *intel_dp) 4258 { 4259 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4260 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4261 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 4262 4263 /* Suppress underruns caused by re-training */ 4264 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 4265 if (crtc->config->has_pch_encoder) 4266 intel_set_pch_fifo_underrun_reporting(dev_priv, 4267 intel_crtc_pch_transcoder(crtc), false); 4268 4269 intel_dp_start_link_train(intel_dp); 4270 intel_dp_stop_link_train(intel_dp); 4271 4272 /* Keep underrun reporting disabled until things are stable */ 4273 intel_wait_for_vblank(dev_priv, crtc->pipe); 4274 4275 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 4276 if (crtc->config->has_pch_encoder) 4277 intel_set_pch_fifo_underrun_reporting(dev_priv, 4278 intel_crtc_pch_transcoder(crtc), true); 4279 } 4280 4281 static void 4282 intel_dp_check_link_status(struct intel_dp *intel_dp) 4283 { 4284 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4285 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4286 u8 link_status[DP_LINK_STATUS_SIZE]; 4287 4288 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 4289 4290 if (!intel_dp_get_link_status(intel_dp, link_status)) { 4291 DRM_ERROR("Failed to get link status\n"); 4292 return; 4293 } 4294 4295 if (!intel_encoder->base.crtc) 4296 return; 4297 4298 if (!to_intel_crtc(intel_encoder->base.crtc)->active) 4299 return; 4300 4301 /* 4302 * Validate the cached values of intel_dp->link_rate and 4303 * intel_dp->lane_count before attempting to retrain. 4304 */ 4305 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 4306 intel_dp->lane_count)) 4307 return; 4308 4309 /* Retrain if Channel EQ or CR not ok */ 4310 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 4311 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 4312 intel_encoder->base.name); 4313 4314 intel_dp_retrain_link(intel_dp); 4315 } 4316 } 4317 4318 /* 4319 * According to DP spec 4320 * 5.1.2: 4321 * 1. Read DPCD 4322 * 2. Configure link according to Receiver Capabilities 4323 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 4324 * 4. Check link status on receipt of hot-plug interrupt 4325 * 4326 * intel_dp_short_pulse - handles short pulse interrupts 4327 * when full detection is not required. 4328 * Returns %true if short pulse is handled and full detection 4329 * is NOT required and %false otherwise. 4330 */ 4331 static bool 4332 intel_dp_short_pulse(struct intel_dp *intel_dp) 4333 { 4334 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4335 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4336 u8 sink_irq_vector = 0; 4337 u8 old_sink_count = intel_dp->sink_count; 4338 bool ret; 4339 4340 /* 4341 * Clearing compliance test variables to allow capturing 4342 * of values for next automated test request. 4343 */ 4344 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4345 4346 /* 4347 * Now read the DPCD to see if it's actually running 4348 * If the current value of sink count doesn't match with 4349 * the value that was stored earlier or dpcd read failed 4350 * we need to do full detection 4351 */ 4352 ret = intel_dp_get_dpcd(intel_dp); 4353 4354 if ((old_sink_count != intel_dp->sink_count) || !ret) { 4355 /* No need to proceed if we are going to do full detect */ 4356 return false; 4357 } 4358 4359 /* Try to read the source of the interrupt */ 4360 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 4361 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && 4362 sink_irq_vector != 0) { 4363 /* Clear interrupt source */ 4364 drm_dp_dpcd_writeb(&intel_dp->aux, 4365 DP_DEVICE_SERVICE_IRQ_VECTOR, 4366 sink_irq_vector); 4367 4368 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 4369 intel_dp_handle_test_request(intel_dp); 4370 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 4371 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 4372 } 4373 4374 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 4375 intel_dp_check_link_status(intel_dp); 4376 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4377 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 4378 DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); 4379 /* Send a Hotplug Uevent to userspace to start modeset */ 4380 drm_kms_helper_hotplug_event(intel_encoder->base.dev); 4381 } 4382 4383 return true; 4384 } 4385 4386 /* XXX this is probably wrong for multiple downstream ports */ 4387 static enum drm_connector_status 4388 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 4389 { 4390 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 4391 uint8_t *dpcd = intel_dp->dpcd; 4392 uint8_t type; 4393 4394 if (lspcon->active) 4395 lspcon_resume(lspcon); 4396 4397 if (!intel_dp_get_dpcd(intel_dp)) 4398 return connector_status_disconnected; 4399 4400 if (intel_dp_is_edp(intel_dp)) 4401 return connector_status_connected; 4402 4403 /* if there's no downstream port, we're done */ 4404 if (!drm_dp_is_branch(dpcd)) 4405 return connector_status_connected; 4406 4407 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 4408 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 4409 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 4410 4411 return intel_dp->sink_count ? 4412 connector_status_connected : connector_status_disconnected; 4413 } 4414 4415 if (intel_dp_can_mst(intel_dp)) 4416 return connector_status_connected; 4417 4418 /* If no HPD, poke DDC gently */ 4419 if (drm_probe_ddc(&intel_dp->aux.ddc)) 4420 return connector_status_connected; 4421 4422 /* Well we tried, say unknown for unreliable port types */ 4423 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 4424 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 4425 if (type == DP_DS_PORT_TYPE_VGA || 4426 type == DP_DS_PORT_TYPE_NON_EDID) 4427 return connector_status_unknown; 4428 } else { 4429 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 4430 DP_DWN_STRM_PORT_TYPE_MASK; 4431 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 4432 type == DP_DWN_STRM_PORT_TYPE_OTHER) 4433 return connector_status_unknown; 4434 } 4435 4436 /* Anything else is out of spec, warn and ignore */ 4437 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 4438 return connector_status_disconnected; 4439 } 4440 4441 static enum drm_connector_status 4442 edp_detect(struct intel_dp *intel_dp) 4443 { 4444 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4445 struct drm_i915_private *dev_priv = to_i915(dev); 4446 enum drm_connector_status status; 4447 4448 status = intel_panel_detect(dev_priv); 4449 if (status == connector_status_unknown) 4450 status = connector_status_connected; 4451 4452 return status; 4453 } 4454 4455 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 4456 struct intel_digital_port *port) 4457 { 4458 u32 bit; 4459 4460 switch (port->port) { 4461 case PORT_B: 4462 bit = SDE_PORTB_HOTPLUG; 4463 break; 4464 case PORT_C: 4465 bit = SDE_PORTC_HOTPLUG; 4466 break; 4467 case PORT_D: 4468 bit = SDE_PORTD_HOTPLUG; 4469 break; 4470 default: 4471 MISSING_CASE(port->port); 4472 return false; 4473 } 4474 4475 return I915_READ(SDEISR) & bit; 4476 } 4477 4478 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv, 4479 struct intel_digital_port *port) 4480 { 4481 u32 bit; 4482 4483 switch (port->port) { 4484 case PORT_B: 4485 bit = SDE_PORTB_HOTPLUG_CPT; 4486 break; 4487 case PORT_C: 4488 bit = SDE_PORTC_HOTPLUG_CPT; 4489 break; 4490 case PORT_D: 4491 bit = SDE_PORTD_HOTPLUG_CPT; 4492 break; 4493 default: 4494 MISSING_CASE(port->port); 4495 return false; 4496 } 4497 4498 return I915_READ(SDEISR) & bit; 4499 } 4500 4501 static bool spt_digital_port_connected(struct drm_i915_private *dev_priv, 4502 struct intel_digital_port *port) 4503 { 4504 u32 bit; 4505 4506 switch (port->port) { 4507 case PORT_A: 4508 bit = SDE_PORTA_HOTPLUG_SPT; 4509 break; 4510 case PORT_E: 4511 bit = SDE_PORTE_HOTPLUG_SPT; 4512 break; 4513 default: 4514 return cpt_digital_port_connected(dev_priv, port); 4515 } 4516 4517 return I915_READ(SDEISR) & bit; 4518 } 4519 4520 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv, 4521 struct intel_digital_port *port) 4522 { 4523 u32 bit; 4524 4525 switch (port->port) { 4526 case PORT_B: 4527 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 4528 break; 4529 case PORT_C: 4530 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 4531 break; 4532 case PORT_D: 4533 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 4534 break; 4535 default: 4536 MISSING_CASE(port->port); 4537 return false; 4538 } 4539 4540 return I915_READ(PORT_HOTPLUG_STAT) & bit; 4541 } 4542 4543 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv, 4544 struct intel_digital_port *port) 4545 { 4546 u32 bit; 4547 4548 switch (port->port) { 4549 case PORT_B: 4550 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 4551 break; 4552 case PORT_C: 4553 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 4554 break; 4555 case PORT_D: 4556 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 4557 break; 4558 default: 4559 MISSING_CASE(port->port); 4560 return false; 4561 } 4562 4563 return I915_READ(PORT_HOTPLUG_STAT) & bit; 4564 } 4565 4566 static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv, 4567 struct intel_digital_port *port) 4568 { 4569 if (port->port == PORT_A) 4570 return I915_READ(DEISR) & DE_DP_A_HOTPLUG; 4571 else 4572 return ibx_digital_port_connected(dev_priv, port); 4573 } 4574 4575 static bool snb_digital_port_connected(struct drm_i915_private *dev_priv, 4576 struct intel_digital_port *port) 4577 { 4578 if (port->port == PORT_A) 4579 return I915_READ(DEISR) & DE_DP_A_HOTPLUG; 4580 else 4581 return cpt_digital_port_connected(dev_priv, port); 4582 } 4583 4584 static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv, 4585 struct intel_digital_port *port) 4586 { 4587 if (port->port == PORT_A) 4588 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB; 4589 else 4590 return cpt_digital_port_connected(dev_priv, port); 4591 } 4592 4593 static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv, 4594 struct intel_digital_port *port) 4595 { 4596 if (port->port == PORT_A) 4597 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; 4598 else 4599 return cpt_digital_port_connected(dev_priv, port); 4600 } 4601 4602 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, 4603 struct intel_digital_port *intel_dig_port) 4604 { 4605 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4606 enum port port; 4607 u32 bit; 4608 4609 port = intel_hpd_pin_to_port(intel_encoder->hpd_pin); 4610 switch (port) { 4611 case PORT_A: 4612 bit = BXT_DE_PORT_HP_DDIA; 4613 break; 4614 case PORT_B: 4615 bit = BXT_DE_PORT_HP_DDIB; 4616 break; 4617 case PORT_C: 4618 bit = BXT_DE_PORT_HP_DDIC; 4619 break; 4620 default: 4621 MISSING_CASE(port); 4622 return false; 4623 } 4624 4625 return I915_READ(GEN8_DE_PORT_ISR) & bit; 4626 } 4627 4628 /* 4629 * intel_digital_port_connected - is the specified port connected? 4630 * @dev_priv: i915 private structure 4631 * @port: the port to test 4632 * 4633 * Return %true if @port is connected, %false otherwise. 4634 */ 4635 bool intel_digital_port_connected(struct drm_i915_private *dev_priv, 4636 struct intel_digital_port *port) 4637 { 4638 if (HAS_GMCH_DISPLAY(dev_priv)) { 4639 if (IS_GM45(dev_priv)) 4640 return gm45_digital_port_connected(dev_priv, port); 4641 else 4642 return g4x_digital_port_connected(dev_priv, port); 4643 } 4644 4645 if (IS_GEN5(dev_priv)) 4646 return ilk_digital_port_connected(dev_priv, port); 4647 else if (IS_GEN6(dev_priv)) 4648 return snb_digital_port_connected(dev_priv, port); 4649 else if (IS_GEN7(dev_priv)) 4650 return ivb_digital_port_connected(dev_priv, port); 4651 else if (IS_GEN8(dev_priv)) 4652 return bdw_digital_port_connected(dev_priv, port); 4653 else if (IS_GEN9_LP(dev_priv)) 4654 return bxt_digital_port_connected(dev_priv, port); 4655 else 4656 return spt_digital_port_connected(dev_priv, port); 4657 } 4658 4659 static struct edid * 4660 intel_dp_get_edid(struct intel_dp *intel_dp) 4661 { 4662 struct intel_connector *intel_connector = intel_dp->attached_connector; 4663 4664 /* use cached edid if we have one */ 4665 if (intel_connector->edid) { 4666 /* invalid edid */ 4667 if (IS_ERR(intel_connector->edid)) 4668 return NULL; 4669 4670 return drm_edid_duplicate(intel_connector->edid); 4671 } else 4672 return drm_get_edid(&intel_connector->base, 4673 &intel_dp->aux.ddc); 4674 } 4675 4676 static void 4677 intel_dp_set_edid(struct intel_dp *intel_dp) 4678 { 4679 struct intel_connector *intel_connector = intel_dp->attached_connector; 4680 struct edid *edid; 4681 4682 intel_dp_unset_edid(intel_dp); 4683 edid = intel_dp_get_edid(intel_dp); 4684 intel_connector->detect_edid = edid; 4685 4686 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4687 } 4688 4689 static void 4690 intel_dp_unset_edid(struct intel_dp *intel_dp) 4691 { 4692 struct intel_connector *intel_connector = intel_dp->attached_connector; 4693 4694 kfree(intel_connector->detect_edid); 4695 intel_connector->detect_edid = NULL; 4696 4697 intel_dp->has_audio = false; 4698 } 4699 4700 static int 4701 intel_dp_long_pulse(struct intel_connector *intel_connector) 4702 { 4703 struct drm_connector *connector = &intel_connector->base; 4704 struct intel_dp *intel_dp = intel_attached_dp(connector); 4705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4706 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4707 struct drm_device *dev = connector->dev; 4708 enum drm_connector_status status; 4709 u8 sink_irq_vector = 0; 4710 4711 WARN_ON(!drm_modeset_is_locked(&connector->dev->mode_config.connection_mutex)); 4712 4713 intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain); 4714 4715 /* Can't disconnect eDP, but you can close the lid... */ 4716 if (intel_dp_is_edp(intel_dp)) 4717 status = edp_detect(intel_dp); 4718 else if (intel_digital_port_connected(to_i915(dev), 4719 dp_to_dig_port(intel_dp))) 4720 status = intel_dp_detect_dpcd(intel_dp); 4721 else 4722 status = connector_status_disconnected; 4723 4724 if (status == connector_status_disconnected) { 4725 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4726 4727 if (intel_dp->is_mst) { 4728 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 4729 intel_dp->is_mst, 4730 intel_dp->mst_mgr.mst_state); 4731 intel_dp->is_mst = false; 4732 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4733 intel_dp->is_mst); 4734 } 4735 4736 goto out; 4737 } 4738 4739 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4740 intel_encoder->type = INTEL_OUTPUT_DP; 4741 4742 if (intel_dp->reset_link_params) { 4743 /* Initial max link lane count */ 4744 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 4745 4746 /* Initial max link rate */ 4747 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 4748 4749 intel_dp->reset_link_params = false; 4750 } 4751 4752 intel_dp_print_rates(intel_dp); 4753 4754 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4755 drm_dp_is_branch(intel_dp->dpcd)); 4756 4757 intel_dp_configure_mst(intel_dp); 4758 4759 if (intel_dp->is_mst) { 4760 /* 4761 * If we are in MST mode then this connector 4762 * won't appear connected or have anything 4763 * with EDID on it 4764 */ 4765 status = connector_status_disconnected; 4766 goto out; 4767 } else { 4768 /* 4769 * If display is now connected check links status, 4770 * there has been known issues of link loss triggerring 4771 * long pulse. 4772 * 4773 * Some sinks (eg. ASUS PB287Q) seem to perform some 4774 * weird HPD ping pong during modesets. So we can apparently 4775 * end up with HPD going low during a modeset, and then 4776 * going back up soon after. And once that happens we must 4777 * retrain the link to get a picture. That's in case no 4778 * userspace component reacted to intermittent HPD dip. 4779 */ 4780 intel_dp_check_link_status(intel_dp); 4781 } 4782 4783 /* 4784 * Clearing NACK and defer counts to get their exact values 4785 * while reading EDID which are required by Compliance tests 4786 * 4.2.2.4 and 4.2.2.5 4787 */ 4788 intel_dp->aux.i2c_nack_count = 0; 4789 intel_dp->aux.i2c_defer_count = 0; 4790 4791 intel_dp_set_edid(intel_dp); 4792 if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid) 4793 status = connector_status_connected; 4794 intel_dp->detect_done = true; 4795 4796 /* Try to read the source of the interrupt */ 4797 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 4798 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && 4799 sink_irq_vector != 0) { 4800 /* Clear interrupt source */ 4801 drm_dp_dpcd_writeb(&intel_dp->aux, 4802 DP_DEVICE_SERVICE_IRQ_VECTOR, 4803 sink_irq_vector); 4804 4805 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 4806 intel_dp_handle_test_request(intel_dp); 4807 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 4808 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 4809 } 4810 4811 out: 4812 if (status != connector_status_connected && !intel_dp->is_mst) 4813 intel_dp_unset_edid(intel_dp); 4814 4815 intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain); 4816 return status; 4817 } 4818 4819 static int 4820 intel_dp_detect(struct drm_connector *connector, 4821 struct drm_modeset_acquire_ctx *ctx, 4822 bool force) 4823 { 4824 struct intel_dp *intel_dp = intel_attached_dp(connector); 4825 int status = connector->status; 4826 4827 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4828 connector->base.id, connector->name); 4829 4830 /* If full detect is not performed yet, do a full detect */ 4831 if (!intel_dp->detect_done) 4832 status = intel_dp_long_pulse(intel_dp->attached_connector); 4833 4834 intel_dp->detect_done = false; 4835 4836 return status; 4837 } 4838 4839 static void 4840 intel_dp_force(struct drm_connector *connector) 4841 { 4842 struct intel_dp *intel_dp = intel_attached_dp(connector); 4843 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4844 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4845 4846 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4847 connector->base.id, connector->name); 4848 intel_dp_unset_edid(intel_dp); 4849 4850 if (connector->status != connector_status_connected) 4851 return; 4852 4853 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 4854 4855 intel_dp_set_edid(intel_dp); 4856 4857 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 4858 4859 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4860 intel_encoder->type = INTEL_OUTPUT_DP; 4861 } 4862 4863 static int intel_dp_get_modes(struct drm_connector *connector) 4864 { 4865 struct intel_connector *intel_connector = to_intel_connector(connector); 4866 struct edid *edid; 4867 4868 edid = intel_connector->detect_edid; 4869 if (edid) { 4870 int ret = intel_connector_update_modes(connector, edid); 4871 if (ret) 4872 return ret; 4873 } 4874 4875 /* if eDP has no EDID, fall back to fixed mode */ 4876 if (intel_dp_is_edp(intel_attached_dp(connector)) && 4877 intel_connector->panel.fixed_mode) { 4878 struct drm_display_mode *mode; 4879 4880 mode = drm_mode_duplicate(connector->dev, 4881 intel_connector->panel.fixed_mode); 4882 if (mode) { 4883 drm_mode_probed_add(connector, mode); 4884 return 1; 4885 } 4886 } 4887 4888 return 0; 4889 } 4890 4891 static int 4892 intel_dp_connector_register(struct drm_connector *connector) 4893 { 4894 struct intel_dp *intel_dp = intel_attached_dp(connector); 4895 int ret; 4896 4897 ret = intel_connector_register(connector); 4898 if (ret) 4899 return ret; 4900 4901 i915_debugfs_connector_add(connector); 4902 4903 DRM_DEBUG_KMS("registering %s bus for %s\n", 4904 intel_dp->aux.name, connector->kdev->kobj.name); 4905 4906 intel_dp->aux.dev = connector->kdev; 4907 return drm_dp_aux_register(&intel_dp->aux); 4908 } 4909 4910 static void 4911 intel_dp_connector_unregister(struct drm_connector *connector) 4912 { 4913 drm_dp_aux_unregister(&intel_attached_dp(connector)->aux); 4914 intel_connector_unregister(connector); 4915 } 4916 4917 static void 4918 intel_dp_connector_destroy(struct drm_connector *connector) 4919 { 4920 struct intel_connector *intel_connector = to_intel_connector(connector); 4921 4922 kfree(intel_connector->detect_edid); 4923 4924 if (!IS_ERR_OR_NULL(intel_connector->edid)) 4925 kfree(intel_connector->edid); 4926 4927 /* 4928 * Can't call intel_dp_is_edp() since the encoder may have been 4929 * destroyed already. 4930 */ 4931 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4932 intel_panel_fini(&intel_connector->panel); 4933 4934 drm_connector_cleanup(connector); 4935 kfree(connector); 4936 } 4937 4938 void intel_dp_encoder_destroy(struct drm_encoder *encoder) 4939 { 4940 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4941 struct intel_dp *intel_dp = &intel_dig_port->dp; 4942 4943 intel_dp_mst_encoder_cleanup(intel_dig_port); 4944 if (intel_dp_is_edp(intel_dp)) { 4945 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4946 /* 4947 * vdd might still be enabled do to the delayed vdd off. 4948 * Make sure vdd is actually turned off here. 4949 */ 4950 pps_lock(intel_dp); 4951 edp_panel_vdd_off_sync(intel_dp); 4952 pps_unlock(intel_dp); 4953 4954 if (intel_dp->edp_notifier.notifier_call) { 4955 unregister_reboot_notifier(&intel_dp->edp_notifier); 4956 intel_dp->edp_notifier.notifier_call = NULL; 4957 } 4958 } 4959 4960 intel_dp_aux_fini(intel_dp); 4961 4962 drm_encoder_cleanup(encoder); 4963 kfree(intel_dig_port); 4964 } 4965 4966 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4967 { 4968 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 4969 4970 if (!intel_dp_is_edp(intel_dp)) 4971 return; 4972 4973 /* 4974 * vdd might still be enabled do to the delayed vdd off. 4975 * Make sure vdd is actually turned off here. 4976 */ 4977 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4978 pps_lock(intel_dp); 4979 edp_panel_vdd_off_sync(intel_dp); 4980 pps_unlock(intel_dp); 4981 } 4982 4983 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 4984 { 4985 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4986 struct drm_device *dev = intel_dig_port->base.base.dev; 4987 struct drm_i915_private *dev_priv = to_i915(dev); 4988 4989 lockdep_assert_held(&dev_priv->pps_mutex); 4990 4991 if (!edp_have_panel_vdd(intel_dp)) 4992 return; 4993 4994 /* 4995 * The VDD bit needs a power domain reference, so if the bit is 4996 * already enabled when we boot or resume, grab this reference and 4997 * schedule a vdd off, so we don't hold on to the reference 4998 * indefinitely. 4999 */ 5000 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 5001 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5002 5003 edp_panel_vdd_schedule_off(intel_dp); 5004 } 5005 5006 static enum i915_pipe vlv_active_pipe(struct intel_dp *intel_dp) 5007 { 5008 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 5009 5010 if ((intel_dp->DP & DP_PORT_EN) == 0) 5011 return INVALID_PIPE; 5012 5013 if (IS_CHERRYVIEW(dev_priv)) 5014 return DP_PORT_TO_PIPE_CHV(intel_dp->DP); 5015 else 5016 return PORT_TO_PIPE(intel_dp->DP); 5017 } 5018 5019 void intel_dp_encoder_reset(struct drm_encoder *encoder) 5020 { 5021 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 5022 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5023 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5024 5025 if (!HAS_DDI(dev_priv)) 5026 intel_dp->DP = I915_READ(intel_dp->output_reg); 5027 5028 if (lspcon->active) 5029 lspcon_resume(lspcon); 5030 5031 intel_dp->reset_link_params = true; 5032 5033 pps_lock(intel_dp); 5034 5035 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5036 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 5037 5038 if (intel_dp_is_edp(intel_dp)) { 5039 /* Reinit the power sequencer, in case BIOS did something with it. */ 5040 intel_dp_pps_init(encoder->dev, intel_dp); 5041 intel_edp_panel_vdd_sanitize(intel_dp); 5042 } 5043 5044 pps_unlock(intel_dp); 5045 } 5046 5047 static const struct drm_connector_funcs intel_dp_connector_funcs = { 5048 .force = intel_dp_force, 5049 .fill_modes = drm_helper_probe_single_connector_modes, 5050 .atomic_get_property = intel_digital_connector_atomic_get_property, 5051 .atomic_set_property = intel_digital_connector_atomic_set_property, 5052 .late_register = intel_dp_connector_register, 5053 .early_unregister = intel_dp_connector_unregister, 5054 .destroy = intel_dp_connector_destroy, 5055 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 5056 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 5057 }; 5058 5059 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 5060 .detect_ctx = intel_dp_detect, 5061 .get_modes = intel_dp_get_modes, 5062 .mode_valid = intel_dp_mode_valid, 5063 .atomic_check = intel_digital_connector_atomic_check, 5064 }; 5065 5066 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 5067 .reset = intel_dp_encoder_reset, 5068 .destroy = intel_dp_encoder_destroy, 5069 }; 5070 5071 enum irqreturn 5072 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 5073 { 5074 struct intel_dp *intel_dp = &intel_dig_port->dp; 5075 struct drm_device *dev = intel_dig_port->base.base.dev; 5076 struct drm_i915_private *dev_priv = to_i915(dev); 5077 enum irqreturn ret = IRQ_NONE; 5078 5079 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && 5080 intel_dig_port->base.type != INTEL_OUTPUT_HDMI) 5081 intel_dig_port->base.type = INTEL_OUTPUT_DP; 5082 5083 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 5084 /* 5085 * vdd off can generate a long pulse on eDP which 5086 * would require vdd on to handle it, and thus we 5087 * would end up in an endless cycle of 5088 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." 5089 */ 5090 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", 5091 port_name(intel_dig_port->port)); 5092 return IRQ_HANDLED; 5093 } 5094 5095 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", 5096 port_name(intel_dig_port->port), 5097 long_hpd ? "long" : "short"); 5098 5099 if (long_hpd) { 5100 intel_dp->reset_link_params = true; 5101 intel_dp->detect_done = false; 5102 return IRQ_NONE; 5103 } 5104 5105 intel_display_power_get(dev_priv, intel_dp->aux_power_domain); 5106 5107 if (intel_dp->is_mst) { 5108 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { 5109 /* 5110 * If we were in MST mode, and device is not 5111 * there, get out of MST mode 5112 */ 5113 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 5114 intel_dp->is_mst, intel_dp->mst_mgr.mst_state); 5115 intel_dp->is_mst = false; 5116 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5117 intel_dp->is_mst); 5118 intel_dp->detect_done = false; 5119 goto put_power; 5120 } 5121 } 5122 5123 if (!intel_dp->is_mst) { 5124 if (!intel_dp_short_pulse(intel_dp)) { 5125 intel_dp->detect_done = false; 5126 goto put_power; 5127 } 5128 } 5129 5130 ret = IRQ_HANDLED; 5131 5132 put_power: 5133 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 5134 5135 return ret; 5136 } 5137 5138 /* check the VBT to see whether the eDP is on another port */ 5139 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 5140 { 5141 /* 5142 * eDP not supported on g4x. so bail out early just 5143 * for a bit extra safety in case the VBT is bonkers. 5144 */ 5145 if (INTEL_GEN(dev_priv) < 5) 5146 return false; 5147 5148 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 5149 return true; 5150 5151 return intel_bios_is_port_edp(dev_priv, port); 5152 } 5153 5154 static void 5155 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 5156 { 5157 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5158 5159 intel_attach_force_audio_property(connector); 5160 intel_attach_broadcast_rgb_property(connector); 5161 5162 if (intel_dp_is_edp(intel_dp)) { 5163 u32 allowed_scalers; 5164 5165 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 5166 if (!HAS_GMCH_DISPLAY(dev_priv)) 5167 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 5168 5169 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 5170 5171 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 5172 5173 } 5174 } 5175 5176 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 5177 { 5178 intel_dp->panel_power_off_time = ktime_get_boottime(); 5179 intel_dp->last_power_on = jiffies; 5180 intel_dp->last_backlight_off = jiffies; 5181 } 5182 5183 static void 5184 intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, 5185 struct intel_dp *intel_dp, struct edp_power_seq *seq) 5186 { 5187 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; 5188 struct pps_registers regs; 5189 5190 intel_pps_get_registers(dev_priv, intel_dp, ®s); 5191 5192 /* Workaround: Need to write PP_CONTROL with the unlock key as 5193 * the very first thing. */ 5194 pp_ctl = ironlake_get_pp_control(intel_dp); 5195 5196 pp_on = I915_READ(regs.pp_on); 5197 pp_off = I915_READ(regs.pp_off); 5198 if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv)) { 5199 I915_WRITE(regs.pp_ctrl, pp_ctl); 5200 pp_div = I915_READ(regs.pp_div); 5201 } 5202 5203 /* Pull timing values out of registers */ 5204 seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 5205 PANEL_POWER_UP_DELAY_SHIFT; 5206 5207 seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 5208 PANEL_LIGHT_ON_DELAY_SHIFT; 5209 5210 seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 5211 PANEL_LIGHT_OFF_DELAY_SHIFT; 5212 5213 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 5214 PANEL_POWER_DOWN_DELAY_SHIFT; 5215 5216 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { 5217 seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> 5218 BXT_POWER_CYCLE_DELAY_SHIFT) * 1000; 5219 } else { 5220 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 5221 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 5222 } 5223 } 5224 5225 static void 5226 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 5227 { 5228 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 5229 state_name, 5230 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 5231 } 5232 5233 static void 5234 intel_pps_verify_state(struct drm_i915_private *dev_priv, 5235 struct intel_dp *intel_dp) 5236 { 5237 struct edp_power_seq hw; 5238 struct edp_power_seq *sw = &intel_dp->pps_delays; 5239 5240 intel_pps_readout_hw_state(dev_priv, intel_dp, &hw); 5241 5242 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 5243 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 5244 DRM_ERROR("PPS state mismatch\n"); 5245 intel_pps_dump_state("sw", sw); 5246 intel_pps_dump_state("hw", &hw); 5247 } 5248 } 5249 5250 static void 5251 intel_dp_init_panel_power_sequencer(struct drm_device *dev, 5252 struct intel_dp *intel_dp) 5253 { 5254 struct drm_i915_private *dev_priv = to_i915(dev); 5255 struct edp_power_seq cur, vbt, spec, 5256 *final = &intel_dp->pps_delays; 5257 5258 lockdep_assert_held(&dev_priv->pps_mutex); 5259 5260 /* already initialized? */ 5261 if (final->t11_t12 != 0) 5262 return; 5263 5264 intel_pps_readout_hw_state(dev_priv, intel_dp, &cur); 5265 5266 intel_pps_dump_state("cur", &cur); 5267 5268 vbt = dev_priv->vbt.edp.pps; 5269 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 5270 * of 500ms appears to be too short. Ocassionally the panel 5271 * just fails to power back on. Increasing the delay to 800ms 5272 * seems sufficient to avoid this problem. 5273 */ 5274 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 5275 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 5276 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", 5277 vbt.t11_t12); 5278 } 5279 /* T11_T12 delay is special and actually in units of 100ms, but zero 5280 * based in the hw (so we need to add 100 ms). But the sw vbt 5281 * table multiplies it with 1000 to make it in units of 100usec, 5282 * too. */ 5283 vbt.t11_t12 += 100 * 10; 5284 5285 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 5286 * our hw here, which are all in 100usec. */ 5287 spec.t1_t3 = 210 * 10; 5288 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 5289 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 5290 spec.t10 = 500 * 10; 5291 /* This one is special and actually in units of 100ms, but zero 5292 * based in the hw (so we need to add 100 ms). But the sw vbt 5293 * table multiplies it with 1000 to make it in units of 100usec, 5294 * too. */ 5295 spec.t11_t12 = (510 + 100) * 10; 5296 5297 intel_pps_dump_state("vbt", &vbt); 5298 5299 /* Use the max of the register settings and vbt. If both are 5300 * unset, fall back to the spec limits. */ 5301 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 5302 spec.field : \ 5303 max(cur.field, vbt.field)) 5304 assign_final(t1_t3); 5305 assign_final(t8); 5306 assign_final(t9); 5307 assign_final(t10); 5308 assign_final(t11_t12); 5309 #undef assign_final 5310 5311 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 5312 intel_dp->panel_power_up_delay = get_delay(t1_t3); 5313 intel_dp->backlight_on_delay = get_delay(t8); 5314 intel_dp->backlight_off_delay = get_delay(t9); 5315 intel_dp->panel_power_down_delay = get_delay(t10); 5316 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 5317 #undef get_delay 5318 5319 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 5320 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 5321 intel_dp->panel_power_cycle_delay); 5322 5323 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 5324 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 5325 5326 /* 5327 * We override the HW backlight delays to 1 because we do manual waits 5328 * on them. For T8, even BSpec recommends doing it. For T9, if we 5329 * don't do this, we'll end up waiting for the backlight off delay 5330 * twice: once when we do the manual sleep, and once when we disable 5331 * the panel and wait for the PP_STATUS bit to become zero. 5332 */ 5333 final->t8 = 1; 5334 final->t9 = 1; 5335 5336 /* 5337 * HW has only a 100msec granularity for t11_t12 so round it up 5338 * accordingly. 5339 */ 5340 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 5341 } 5342 5343 static void 5344 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 5345 struct intel_dp *intel_dp, 5346 bool force_disable_vdd) 5347 { 5348 struct drm_i915_private *dev_priv = to_i915(dev); 5349 u32 pp_on, pp_off, pp_div, port_sel = 0; 5350 int div = dev_priv->rawclk_freq / 1000; 5351 struct pps_registers regs; 5352 enum port port = dp_to_dig_port(intel_dp)->port; 5353 const struct edp_power_seq *seq = &intel_dp->pps_delays; 5354 5355 lockdep_assert_held(&dev_priv->pps_mutex); 5356 5357 intel_pps_get_registers(dev_priv, intel_dp, ®s); 5358 5359 /* 5360 * On some VLV machines the BIOS can leave the VDD 5361 * enabled even on power seqeuencers which aren't 5362 * hooked up to any port. This would mess up the 5363 * power domain tracking the first time we pick 5364 * one of these power sequencers for use since 5365 * edp_panel_vdd_on() would notice that the VDD was 5366 * already on and therefore wouldn't grab the power 5367 * domain reference. Disable VDD first to avoid this. 5368 * This also avoids spuriously turning the VDD on as 5369 * soon as the new power seqeuencer gets initialized. 5370 */ 5371 if (force_disable_vdd) { 5372 u32 pp = ironlake_get_pp_control(intel_dp); 5373 5374 WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); 5375 5376 if (pp & EDP_FORCE_VDD) 5377 DRM_DEBUG_KMS("VDD already on, disabling first\n"); 5378 5379 pp &= ~EDP_FORCE_VDD; 5380 5381 I915_WRITE(regs.pp_ctrl, pp); 5382 } 5383 5384 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 5385 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 5386 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 5387 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 5388 /* Compute the divisor for the pp clock, simply match the Bspec 5389 * formula. */ 5390 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { 5391 pp_div = I915_READ(regs.pp_ctrl); 5392 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; 5393 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 5394 << BXT_POWER_CYCLE_DELAY_SHIFT); 5395 } else { 5396 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; 5397 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 5398 << PANEL_POWER_CYCLE_DELAY_SHIFT); 5399 } 5400 5401 /* Haswell doesn't have any port selection bits for the panel 5402 * power sequencer any more. */ 5403 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5404 port_sel = PANEL_PORT_SELECT_VLV(port); 5405 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 5406 if (port == PORT_A) 5407 port_sel = PANEL_PORT_SELECT_DPA; 5408 else 5409 port_sel = PANEL_PORT_SELECT_DPD; 5410 } 5411 5412 pp_on |= port_sel; 5413 5414 I915_WRITE(regs.pp_on, pp_on); 5415 I915_WRITE(regs.pp_off, pp_off); 5416 if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) 5417 I915_WRITE(regs.pp_ctrl, pp_div); 5418 else 5419 I915_WRITE(regs.pp_div, pp_div); 5420 5421 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 5422 I915_READ(regs.pp_on), 5423 I915_READ(regs.pp_off), 5424 (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) ? 5425 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : 5426 I915_READ(regs.pp_div)); 5427 } 5428 5429 static void intel_dp_pps_init(struct drm_device *dev, 5430 struct intel_dp *intel_dp) 5431 { 5432 struct drm_i915_private *dev_priv = to_i915(dev); 5433 5434 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5435 vlv_initial_power_sequencer_setup(intel_dp); 5436 } else { 5437 intel_dp_init_panel_power_sequencer(dev, intel_dp); 5438 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); 5439 } 5440 } 5441 5442 /** 5443 * intel_dp_set_drrs_state - program registers for RR switch to take effect 5444 * @dev_priv: i915 device 5445 * @crtc_state: a pointer to the active intel_crtc_state 5446 * @refresh_rate: RR to be programmed 5447 * 5448 * This function gets called when refresh rate (RR) has to be changed from 5449 * one frequency to another. Switches can be between high and low RR 5450 * supported by the panel or to any other RR based on media playback (in 5451 * this case, RR value needs to be passed from user space). 5452 * 5453 * The caller of this function needs to take a lock on dev_priv->drrs. 5454 */ 5455 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 5456 const struct intel_crtc_state *crtc_state, 5457 int refresh_rate) 5458 { 5459 struct intel_encoder *encoder; 5460 struct intel_digital_port *dig_port = NULL; 5461 struct intel_dp *intel_dp = dev_priv->drrs.dp; 5462 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 5463 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 5464 5465 if (refresh_rate <= 0) { 5466 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n"); 5467 return; 5468 } 5469 5470 if (intel_dp == NULL) { 5471 DRM_DEBUG_KMS("DRRS not supported.\n"); 5472 return; 5473 } 5474 5475 dig_port = dp_to_dig_port(intel_dp); 5476 encoder = &dig_port->base; 5477 intel_crtc = to_intel_crtc(encoder->base.crtc); 5478 5479 if (!intel_crtc) { 5480 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); 5481 return; 5482 } 5483 5484 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 5485 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n"); 5486 return; 5487 } 5488 5489 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == 5490 refresh_rate) 5491 index = DRRS_LOW_RR; 5492 5493 if (index == dev_priv->drrs.refresh_rate_type) { 5494 DRM_DEBUG_KMS( 5495 "DRRS requested for previously set RR...ignoring\n"); 5496 return; 5497 } 5498 5499 if (!crtc_state->base.active) { 5500 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n"); 5501 return; 5502 } 5503 5504 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 5505 switch (index) { 5506 case DRRS_HIGH_RR: 5507 intel_dp_set_m_n(intel_crtc, M1_N1); 5508 break; 5509 case DRRS_LOW_RR: 5510 intel_dp_set_m_n(intel_crtc, M2_N2); 5511 break; 5512 case DRRS_MAX_RR: 5513 default: 5514 DRM_ERROR("Unsupported refreshrate type\n"); 5515 } 5516 } else if (INTEL_GEN(dev_priv) > 6) { 5517 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 5518 u32 val; 5519 5520 val = I915_READ(reg); 5521 if (index > DRRS_HIGH_RR) { 5522 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5523 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 5524 else 5525 val |= PIPECONF_EDP_RR_MODE_SWITCH; 5526 } else { 5527 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5528 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 5529 else 5530 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 5531 } 5532 I915_WRITE(reg, val); 5533 } 5534 5535 dev_priv->drrs.refresh_rate_type = index; 5536 5537 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); 5538 } 5539 5540 /** 5541 * intel_edp_drrs_enable - init drrs struct if supported 5542 * @intel_dp: DP struct 5543 * @crtc_state: A pointer to the active crtc state. 5544 * 5545 * Initializes frontbuffer_bits and drrs.dp 5546 */ 5547 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 5548 const struct intel_crtc_state *crtc_state) 5549 { 5550 struct drm_device *dev = intel_dp_to_dev(intel_dp); 5551 struct drm_i915_private *dev_priv = to_i915(dev); 5552 5553 if (!crtc_state->has_drrs) { 5554 DRM_DEBUG_KMS("Panel doesn't support DRRS\n"); 5555 return; 5556 } 5557 5558 if (dev_priv->psr.enabled) { 5559 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n"); 5560 return; 5561 } 5562 5563 mutex_lock(&dev_priv->drrs.mutex); 5564 if (WARN_ON(dev_priv->drrs.dp)) { 5565 DRM_ERROR("DRRS already enabled\n"); 5566 goto unlock; 5567 } 5568 5569 dev_priv->drrs.busy_frontbuffer_bits = 0; 5570 5571 dev_priv->drrs.dp = intel_dp; 5572 5573 unlock: 5574 mutex_unlock(&dev_priv->drrs.mutex); 5575 } 5576 5577 /** 5578 * intel_edp_drrs_disable - Disable DRRS 5579 * @intel_dp: DP struct 5580 * @old_crtc_state: Pointer to old crtc_state. 5581 * 5582 */ 5583 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 5584 const struct intel_crtc_state *old_crtc_state) 5585 { 5586 struct drm_device *dev = intel_dp_to_dev(intel_dp); 5587 struct drm_i915_private *dev_priv = to_i915(dev); 5588 5589 if (!old_crtc_state->has_drrs) 5590 return; 5591 5592 mutex_lock(&dev_priv->drrs.mutex); 5593 if (!dev_priv->drrs.dp) { 5594 mutex_unlock(&dev_priv->drrs.mutex); 5595 return; 5596 } 5597 5598 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5599 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 5600 intel_dp->attached_connector->panel.fixed_mode->vrefresh); 5601 5602 dev_priv->drrs.dp = NULL; 5603 mutex_unlock(&dev_priv->drrs.mutex); 5604 5605 cancel_delayed_work_sync(&dev_priv->drrs.work); 5606 } 5607 5608 static void intel_edp_drrs_downclock_work(struct work_struct *work) 5609 { 5610 struct drm_i915_private *dev_priv = 5611 container_of(work, typeof(*dev_priv), drrs.work.work); 5612 struct intel_dp *intel_dp; 5613 5614 mutex_lock(&dev_priv->drrs.mutex); 5615 5616 intel_dp = dev_priv->drrs.dp; 5617 5618 if (!intel_dp) 5619 goto unlock; 5620 5621 /* 5622 * The delayed work can race with an invalidate hence we need to 5623 * recheck. 5624 */ 5625 5626 if (dev_priv->drrs.busy_frontbuffer_bits) 5627 goto unlock; 5628 5629 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 5630 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 5631 5632 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 5633 intel_dp->attached_connector->panel.downclock_mode->vrefresh); 5634 } 5635 5636 unlock: 5637 mutex_unlock(&dev_priv->drrs.mutex); 5638 } 5639 5640 /** 5641 * intel_edp_drrs_invalidate - Disable Idleness DRRS 5642 * @dev_priv: i915 device 5643 * @frontbuffer_bits: frontbuffer plane tracking bits 5644 * 5645 * This function gets called everytime rendering on the given planes start. 5646 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 5647 * 5648 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 5649 */ 5650 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 5651 unsigned int frontbuffer_bits) 5652 { 5653 struct drm_crtc *crtc; 5654 enum i915_pipe pipe; 5655 5656 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 5657 return; 5658 5659 cancel_delayed_work(&dev_priv->drrs.work); 5660 5661 mutex_lock(&dev_priv->drrs.mutex); 5662 if (!dev_priv->drrs.dp) { 5663 mutex_unlock(&dev_priv->drrs.mutex); 5664 return; 5665 } 5666 5667 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 5668 pipe = to_intel_crtc(crtc)->pipe; 5669 5670 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 5671 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 5672 5673 /* invalidate means busy screen hence upclock */ 5674 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5675 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 5676 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 5677 5678 mutex_unlock(&dev_priv->drrs.mutex); 5679 } 5680 5681 /** 5682 * intel_edp_drrs_flush - Restart Idleness DRRS 5683 * @dev_priv: i915 device 5684 * @frontbuffer_bits: frontbuffer plane tracking bits 5685 * 5686 * This function gets called every time rendering on the given planes has 5687 * completed or flip on a crtc is completed. So DRRS should be upclocked 5688 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 5689 * if no other planes are dirty. 5690 * 5691 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 5692 */ 5693 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 5694 unsigned int frontbuffer_bits) 5695 { 5696 struct drm_crtc *crtc; 5697 enum i915_pipe pipe; 5698 5699 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 5700 return; 5701 5702 cancel_delayed_work(&dev_priv->drrs.work); 5703 5704 mutex_lock(&dev_priv->drrs.mutex); 5705 if (!dev_priv->drrs.dp) { 5706 mutex_unlock(&dev_priv->drrs.mutex); 5707 return; 5708 } 5709 5710 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 5711 pipe = to_intel_crtc(crtc)->pipe; 5712 5713 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 5714 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 5715 5716 /* flush means busy screen hence upclock */ 5717 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5718 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 5719 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 5720 5721 /* 5722 * flush also means no more activity hence schedule downclock, if all 5723 * other fbs are quiescent too 5724 */ 5725 if (!dev_priv->drrs.busy_frontbuffer_bits) 5726 schedule_delayed_work(&dev_priv->drrs.work, 5727 msecs_to_jiffies(1000)); 5728 mutex_unlock(&dev_priv->drrs.mutex); 5729 } 5730 5731 /** 5732 * DOC: Display Refresh Rate Switching (DRRS) 5733 * 5734 * Display Refresh Rate Switching (DRRS) is a power conservation feature 5735 * which enables swtching between low and high refresh rates, 5736 * dynamically, based on the usage scenario. This feature is applicable 5737 * for internal panels. 5738 * 5739 * Indication that the panel supports DRRS is given by the panel EDID, which 5740 * would list multiple refresh rates for one resolution. 5741 * 5742 * DRRS is of 2 types - static and seamless. 5743 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 5744 * (may appear as a blink on screen) and is used in dock-undock scenario. 5745 * Seamless DRRS involves changing RR without any visual effect to the user 5746 * and can be used during normal system usage. This is done by programming 5747 * certain registers. 5748 * 5749 * Support for static/seamless DRRS may be indicated in the VBT based on 5750 * inputs from the panel spec. 5751 * 5752 * DRRS saves power by switching to low RR based on usage scenarios. 5753 * 5754 * The implementation is based on frontbuffer tracking implementation. When 5755 * there is a disturbance on the screen triggered by user activity or a periodic 5756 * system activity, DRRS is disabled (RR is changed to high RR). When there is 5757 * no movement on screen, after a timeout of 1 second, a switch to low RR is 5758 * made. 5759 * 5760 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 5761 * and intel_edp_drrs_flush() are called. 5762 * 5763 * DRRS can be further extended to support other internal panels and also 5764 * the scenario of video playback wherein RR is set based on the rate 5765 * requested by userspace. 5766 */ 5767 5768 /** 5769 * intel_dp_drrs_init - Init basic DRRS work and mutex. 5770 * @intel_connector: eDP connector 5771 * @fixed_mode: preferred mode of panel 5772 * 5773 * This function is called only once at driver load to initialize basic 5774 * DRRS stuff. 5775 * 5776 * Returns: 5777 * Downclock mode if panel supports it, else return NULL. 5778 * DRRS support is determined by the presence of downclock mode (apart 5779 * from VBT setting). 5780 */ 5781 static struct drm_display_mode * 5782 intel_dp_drrs_init(struct intel_connector *intel_connector, 5783 struct drm_display_mode *fixed_mode) 5784 { 5785 struct drm_connector *connector = &intel_connector->base; 5786 struct drm_device *dev = connector->dev; 5787 struct drm_i915_private *dev_priv = to_i915(dev); 5788 struct drm_display_mode *downclock_mode = NULL; 5789 5790 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 5791 lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE); 5792 5793 if (INTEL_GEN(dev_priv) <= 6) { 5794 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n"); 5795 return NULL; 5796 } 5797 5798 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 5799 DRM_DEBUG_KMS("VBT doesn't support DRRS\n"); 5800 return NULL; 5801 } 5802 5803 downclock_mode = intel_find_panel_downclock 5804 (dev_priv, fixed_mode, connector); 5805 5806 if (!downclock_mode) { 5807 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); 5808 return NULL; 5809 } 5810 5811 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 5812 5813 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 5814 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n"); 5815 return downclock_mode; 5816 } 5817 5818 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 5819 struct intel_connector *intel_connector) 5820 { 5821 struct drm_connector *connector = &intel_connector->base; 5822 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5823 struct intel_encoder *intel_encoder = &intel_dig_port->base; 5824 struct drm_device *dev = intel_encoder->base.dev; 5825 struct drm_i915_private *dev_priv = to_i915(dev); 5826 struct drm_display_mode *fixed_mode = NULL; 5827 struct drm_display_mode *alt_fixed_mode = NULL; 5828 struct drm_display_mode *downclock_mode = NULL; 5829 bool has_dpcd; 5830 struct drm_display_mode *scan; 5831 struct edid *edid; 5832 enum i915_pipe pipe = INVALID_PIPE; 5833 5834 if (!intel_dp_is_edp(intel_dp)) 5835 return true; 5836 5837 /* 5838 * On IBX/CPT we may get here with LVDS already registered. Since the 5839 * driver uses the only internal power sequencer available for both 5840 * eDP and LVDS bail out early in this case to prevent interfering 5841 * with an already powered-on LVDS power sequencer. 5842 */ 5843 if (intel_get_lvds_encoder(dev)) { 5844 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 5845 DRM_INFO("LVDS was detected, not registering eDP\n"); 5846 5847 return false; 5848 } 5849 5850 pps_lock(intel_dp); 5851 5852 intel_dp_init_panel_power_timestamps(intel_dp); 5853 intel_dp_pps_init(dev, intel_dp); 5854 intel_edp_panel_vdd_sanitize(intel_dp); 5855 5856 pps_unlock(intel_dp); 5857 5858 /* Cache DPCD and EDID for edp. */ 5859 has_dpcd = intel_edp_init_dpcd(intel_dp); 5860 5861 if (!has_dpcd) { 5862 /* if this fails, presume the device is a ghost */ 5863 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 5864 goto out_vdd_off; 5865 } 5866 5867 mutex_lock(&dev->mode_config.mutex); 5868 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 5869 if (edid) { 5870 if (drm_add_edid_modes(connector, edid)) { 5871 drm_mode_connector_update_edid_property(connector, 5872 edid); 5873 drm_edid_to_eld(connector, edid); 5874 } else { 5875 kfree(edid); 5876 edid = ERR_PTR(-EINVAL); 5877 } 5878 } else { 5879 edid = ERR_PTR(-ENOENT); 5880 } 5881 intel_connector->edid = edid; 5882 5883 /* prefer fixed mode from EDID if available, save an alt mode also */ 5884 list_for_each_entry(scan, &connector->probed_modes, head) { 5885 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 5886 fixed_mode = drm_mode_duplicate(dev, scan); 5887 downclock_mode = intel_dp_drrs_init( 5888 intel_connector, fixed_mode); 5889 } else if (!alt_fixed_mode) { 5890 alt_fixed_mode = drm_mode_duplicate(dev, scan); 5891 } 5892 } 5893 5894 /* fallback to VBT if available for eDP */ 5895 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 5896 fixed_mode = drm_mode_duplicate(dev, 5897 dev_priv->vbt.lfp_lvds_vbt_mode); 5898 if (fixed_mode) { 5899 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 5900 connector->display_info.width_mm = fixed_mode->width_mm; 5901 connector->display_info.height_mm = fixed_mode->height_mm; 5902 } 5903 } 5904 mutex_unlock(&dev->mode_config.mutex); 5905 5906 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5907 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 5908 register_reboot_notifier(&intel_dp->edp_notifier); 5909 5910 /* 5911 * Figure out the current pipe for the initial backlight setup. 5912 * If the current pipe isn't valid, try the PPS pipe, and if that 5913 * fails just assume pipe A. 5914 */ 5915 pipe = vlv_active_pipe(intel_dp); 5916 5917 if (pipe != PIPE_A && pipe != PIPE_B) 5918 pipe = intel_dp->pps_pipe; 5919 5920 if (pipe != PIPE_A && pipe != PIPE_B) 5921 pipe = PIPE_A; 5922 5923 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n", 5924 pipe_name(pipe)); 5925 } 5926 5927 intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode, 5928 downclock_mode); 5929 intel_connector->panel.backlight.power = intel_edp_backlight_power; 5930 intel_panel_setup_backlight(connector, pipe); 5931 5932 return true; 5933 5934 out_vdd_off: 5935 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5936 /* 5937 * vdd might still be enabled do to the delayed vdd off. 5938 * Make sure vdd is actually turned off here. 5939 */ 5940 pps_lock(intel_dp); 5941 edp_panel_vdd_off_sync(intel_dp); 5942 pps_unlock(intel_dp); 5943 5944 return false; 5945 } 5946 5947 /* Set up the hotplug pin and aux power domain. */ 5948 static void 5949 intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port) 5950 { 5951 struct intel_encoder *encoder = &intel_dig_port->base; 5952 struct intel_dp *intel_dp = &intel_dig_port->dp; 5953 5954 encoder->hpd_pin = intel_hpd_pin(intel_dig_port->port); 5955 5956 switch (intel_dig_port->port) { 5957 case PORT_A: 5958 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A; 5959 break; 5960 case PORT_B: 5961 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B; 5962 break; 5963 case PORT_C: 5964 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C; 5965 break; 5966 case PORT_D: 5967 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D; 5968 break; 5969 case PORT_E: 5970 /* FIXME: Check VBT for actual wiring of PORT E */ 5971 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D; 5972 break; 5973 default: 5974 MISSING_CASE(intel_dig_port->port); 5975 } 5976 } 5977 5978 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 5979 { 5980 struct intel_connector *intel_connector; 5981 struct drm_connector *connector; 5982 5983 intel_connector = container_of(work, typeof(*intel_connector), 5984 modeset_retry_work); 5985 connector = &intel_connector->base; 5986 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 5987 connector->name); 5988 5989 /* Grab the locks before changing connector property*/ 5990 mutex_lock(&connector->dev->mode_config.mutex); 5991 /* Set connector link status to BAD and send a Uevent to notify 5992 * userspace to do a modeset. 5993 */ 5994 drm_mode_connector_set_link_status_property(connector, 5995 DRM_MODE_LINK_STATUS_BAD); 5996 mutex_unlock(&connector->dev->mode_config.mutex); 5997 /* Send Hotplug uevent so userspace can reprobe */ 5998 drm_kms_helper_hotplug_event(connector->dev); 5999 } 6000 6001 bool 6002 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 6003 struct intel_connector *intel_connector) 6004 { 6005 struct drm_connector *connector = &intel_connector->base; 6006 struct intel_dp *intel_dp = &intel_dig_port->dp; 6007 struct intel_encoder *intel_encoder = &intel_dig_port->base; 6008 struct drm_device *dev = intel_encoder->base.dev; 6009 struct drm_i915_private *dev_priv = to_i915(dev); 6010 enum port port = intel_dig_port->port; 6011 int type; 6012 6013 /* Initialize the work for modeset in case of link train failure */ 6014 INIT_WORK(&intel_connector->modeset_retry_work, 6015 intel_dp_modeset_retry_work_fn); 6016 6017 if (WARN(intel_dig_port->max_lanes < 1, 6018 "Not enough lanes (%d) for DP on port %c\n", 6019 intel_dig_port->max_lanes, port_name(port))) 6020 return false; 6021 6022 intel_dp_set_source_rates(intel_dp); 6023 6024 intel_dp->reset_link_params = true; 6025 intel_dp->pps_pipe = INVALID_PIPE; 6026 intel_dp->active_pipe = INVALID_PIPE; 6027 6028 /* intel_dp vfuncs */ 6029 if (INTEL_GEN(dev_priv) >= 9) 6030 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 6031 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 6032 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 6033 else if (HAS_PCH_SPLIT(dev_priv)) 6034 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 6035 else 6036 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 6037 6038 if (INTEL_GEN(dev_priv) >= 9) 6039 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 6040 else 6041 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 6042 6043 if (HAS_DDI(dev_priv)) 6044 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain; 6045 6046 /* Preserve the current hw state. */ 6047 intel_dp->DP = I915_READ(intel_dp->output_reg); 6048 intel_dp->attached_connector = intel_connector; 6049 6050 if (intel_dp_is_port_edp(dev_priv, port)) 6051 type = DRM_MODE_CONNECTOR_eDP; 6052 else 6053 type = DRM_MODE_CONNECTOR_DisplayPort; 6054 6055 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6056 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6057 6058 /* 6059 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 6060 * for DP the encoder type can be set by the caller to 6061 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 6062 */ 6063 if (type == DRM_MODE_CONNECTOR_eDP) 6064 intel_encoder->type = INTEL_OUTPUT_EDP; 6065 6066 /* eDP only on port B and/or C on vlv/chv */ 6067 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 6068 intel_dp_is_edp(intel_dp) && 6069 port != PORT_B && port != PORT_C)) 6070 return false; 6071 6072 DRM_DEBUG_KMS("Adding %s connector on port %c\n", 6073 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 6074 port_name(port)); 6075 6076 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 6077 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6078 6079 connector->interlace_allowed = true; 6080 connector->doublescan_allowed = 0; 6081 6082 intel_dp_init_connector_port_info(intel_dig_port); 6083 6084 intel_dp_aux_init(intel_dp); 6085 6086 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 6087 edp_panel_vdd_work); 6088 6089 intel_connector_attach_encoder(intel_connector, intel_encoder); 6090 6091 if (HAS_DDI(dev_priv)) 6092 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 6093 else 6094 intel_connector->get_hw_state = intel_connector_get_hw_state; 6095 6096 /* init MST on ports that can support it */ 6097 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) && 6098 (port == PORT_B || port == PORT_C || port == PORT_D)) 6099 intel_dp_mst_encoder_init(intel_dig_port, 6100 intel_connector->base.base.id); 6101 6102 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 6103 intel_dp_aux_fini(intel_dp); 6104 intel_dp_mst_encoder_cleanup(intel_dig_port); 6105 goto fail; 6106 } 6107 6108 intel_dp_add_properties(intel_dp, connector); 6109 6110 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 6111 * 0xd. Failure to do so will result in spurious interrupts being 6112 * generated on the port when a cable is not attached. 6113 */ 6114 if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) { 6115 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 6116 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 6117 } 6118 6119 return true; 6120 6121 fail: 6122 drm_connector_cleanup(connector); 6123 6124 return false; 6125 } 6126 6127 bool intel_dp_init(struct drm_i915_private *dev_priv, 6128 i915_reg_t output_reg, 6129 enum port port) 6130 { 6131 struct intel_digital_port *intel_dig_port; 6132 struct intel_encoder *intel_encoder; 6133 struct drm_encoder *encoder; 6134 struct intel_connector *intel_connector; 6135 6136 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 6137 if (!intel_dig_port) 6138 return false; 6139 6140 intel_connector = intel_connector_alloc(); 6141 if (!intel_connector) 6142 goto err_connector_alloc; 6143 6144 intel_encoder = &intel_dig_port->base; 6145 encoder = &intel_encoder->base; 6146 6147 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 6148 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 6149 "DP %c", port_name(port))) 6150 goto err_encoder_init; 6151 6152 intel_encoder->compute_config = intel_dp_compute_config; 6153 intel_encoder->get_hw_state = intel_dp_get_hw_state; 6154 intel_encoder->get_config = intel_dp_get_config; 6155 intel_encoder->suspend = intel_dp_encoder_suspend; 6156 if (IS_CHERRYVIEW(dev_priv)) { 6157 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 6158 intel_encoder->pre_enable = chv_pre_enable_dp; 6159 intel_encoder->enable = vlv_enable_dp; 6160 intel_encoder->disable = vlv_disable_dp; 6161 intel_encoder->post_disable = chv_post_disable_dp; 6162 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 6163 } else if (IS_VALLEYVIEW(dev_priv)) { 6164 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 6165 intel_encoder->pre_enable = vlv_pre_enable_dp; 6166 intel_encoder->enable = vlv_enable_dp; 6167 intel_encoder->disable = vlv_disable_dp; 6168 intel_encoder->post_disable = vlv_post_disable_dp; 6169 } else if (INTEL_GEN(dev_priv) >= 5) { 6170 intel_encoder->pre_enable = g4x_pre_enable_dp; 6171 intel_encoder->enable = g4x_enable_dp; 6172 intel_encoder->disable = ilk_disable_dp; 6173 intel_encoder->post_disable = ilk_post_disable_dp; 6174 } else { 6175 intel_encoder->pre_enable = g4x_pre_enable_dp; 6176 intel_encoder->enable = g4x_enable_dp; 6177 intel_encoder->disable = g4x_disable_dp; 6178 } 6179 6180 intel_dig_port->port = port; 6181 intel_dig_port->dp.output_reg = output_reg; 6182 intel_dig_port->max_lanes = 4; 6183 6184 intel_encoder->type = INTEL_OUTPUT_DP; 6185 intel_encoder->power_domain = intel_port_to_power_domain(port); 6186 if (IS_CHERRYVIEW(dev_priv)) { 6187 if (port == PORT_D) 6188 intel_encoder->crtc_mask = 1 << 2; 6189 else 6190 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 6191 } else { 6192 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 6193 } 6194 intel_encoder->cloneable = 0; 6195 intel_encoder->port = port; 6196 6197 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 6198 dev_priv->hotplug.irq_port[port] = intel_dig_port; 6199 6200 if (port != PORT_A) 6201 intel_infoframe_init(intel_dig_port); 6202 6203 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 6204 goto err_init_connector; 6205 6206 return true; 6207 6208 err_init_connector: 6209 drm_encoder_cleanup(encoder); 6210 err_encoder_init: 6211 kfree(intel_connector); 6212 err_connector_alloc: 6213 kfree(intel_dig_port); 6214 return false; 6215 } 6216 6217 void intel_dp_mst_suspend(struct drm_device *dev) 6218 { 6219 struct drm_i915_private *dev_priv = to_i915(dev); 6220 int i; 6221 6222 /* disable MST */ 6223 for (i = 0; i < I915_MAX_PORTS; i++) { 6224 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; 6225 6226 if (!intel_dig_port || !intel_dig_port->dp.can_mst) 6227 continue; 6228 6229 if (intel_dig_port->dp.is_mst) 6230 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); 6231 } 6232 } 6233 6234 void intel_dp_mst_resume(struct drm_device *dev) 6235 { 6236 struct drm_i915_private *dev_priv = to_i915(dev); 6237 int i; 6238 6239 for (i = 0; i < I915_MAX_PORTS; i++) { 6240 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; 6241 int ret; 6242 6243 if (!intel_dig_port || !intel_dig_port->dp.can_mst) 6244 continue; 6245 6246 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); 6247 if (ret) 6248 intel_dp_check_mst_status(&intel_dig_port->dp); 6249 } 6250 } 6251