1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: runtime pm 37 * 38 * The i915 driver supports dynamic enabling and disabling of entire hardware 39 * blocks at runtime. This is especially important on the display side where 40 * software is supposed to control many power gates manually on recent hardware, 41 * since on the GT side a lot of the power management is done by the hardware. 42 * But even there some manual control at the device level is required. 43 * 44 * Since i915 supports a diverse set of platforms with a unified codebase and 45 * hardware engineers just love to shuffle functionality around between power 46 * domains there's a sizeable amount of indirection required. This file provides 47 * generic functions to the driver for grabbing and releasing references for 48 * abstract power domains. It then maps those to the actual power wells 49 * present for a given platform. 50 */ 51 52 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 53 for (i = 0; \ 54 i < (power_domains)->power_well_count && \ 55 ((power_well) = &(power_domains)->power_wells[i]); \ 56 i++) \ 57 for_each_if ((power_well)->domains & (domain_mask)) 58 59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 60 for (i = (power_domains)->power_well_count - 1; \ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 62 i--) \ 63 for_each_if ((power_well)->domains & (domain_mask)) 64 65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 66 int power_well_id); 67 68 static struct i915_power_well * 69 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); 70 71 const char * 72 intel_display_power_domain_str(enum intel_display_power_domain domain) 73 { 74 switch (domain) { 75 case POWER_DOMAIN_PIPE_A: 76 return "PIPE_A"; 77 case POWER_DOMAIN_PIPE_B: 78 return "PIPE_B"; 79 case POWER_DOMAIN_PIPE_C: 80 return "PIPE_C"; 81 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 82 return "PIPE_A_PANEL_FITTER"; 83 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 84 return "PIPE_B_PANEL_FITTER"; 85 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 86 return "PIPE_C_PANEL_FITTER"; 87 case POWER_DOMAIN_TRANSCODER_A: 88 return "TRANSCODER_A"; 89 case POWER_DOMAIN_TRANSCODER_B: 90 return "TRANSCODER_B"; 91 case POWER_DOMAIN_TRANSCODER_C: 92 return "TRANSCODER_C"; 93 case POWER_DOMAIN_TRANSCODER_EDP: 94 return "TRANSCODER_EDP"; 95 case POWER_DOMAIN_TRANSCODER_DSI_A: 96 return "TRANSCODER_DSI_A"; 97 case POWER_DOMAIN_TRANSCODER_DSI_C: 98 return "TRANSCODER_DSI_C"; 99 case POWER_DOMAIN_PORT_DDI_A_LANES: 100 return "PORT_DDI_A_LANES"; 101 case POWER_DOMAIN_PORT_DDI_B_LANES: 102 return "PORT_DDI_B_LANES"; 103 case POWER_DOMAIN_PORT_DDI_C_LANES: 104 return "PORT_DDI_C_LANES"; 105 case POWER_DOMAIN_PORT_DDI_D_LANES: 106 return "PORT_DDI_D_LANES"; 107 case POWER_DOMAIN_PORT_DDI_E_LANES: 108 return "PORT_DDI_E_LANES"; 109 case POWER_DOMAIN_PORT_DSI: 110 return "PORT_DSI"; 111 case POWER_DOMAIN_PORT_CRT: 112 return "PORT_CRT"; 113 case POWER_DOMAIN_PORT_OTHER: 114 return "PORT_OTHER"; 115 case POWER_DOMAIN_VGA: 116 return "VGA"; 117 case POWER_DOMAIN_AUDIO: 118 return "AUDIO"; 119 case POWER_DOMAIN_PLLS: 120 return "PLLS"; 121 case POWER_DOMAIN_AUX_A: 122 return "AUX_A"; 123 case POWER_DOMAIN_AUX_B: 124 return "AUX_B"; 125 case POWER_DOMAIN_AUX_C: 126 return "AUX_C"; 127 case POWER_DOMAIN_AUX_D: 128 return "AUX_D"; 129 case POWER_DOMAIN_GMBUS: 130 return "GMBUS"; 131 case POWER_DOMAIN_INIT: 132 return "INIT"; 133 case POWER_DOMAIN_MODESET: 134 return "MODESET"; 135 default: 136 MISSING_CASE(domain); 137 return "?"; 138 } 139 } 140 141 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 142 struct i915_power_well *power_well) 143 { 144 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 145 power_well->ops->enable(dev_priv, power_well); 146 power_well->hw_enabled = true; 147 } 148 149 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 150 struct i915_power_well *power_well) 151 { 152 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 153 power_well->hw_enabled = false; 154 power_well->ops->disable(dev_priv, power_well); 155 } 156 157 static void intel_power_well_get(struct drm_i915_private *dev_priv, 158 struct i915_power_well *power_well) 159 { 160 if (!power_well->count++) 161 intel_power_well_enable(dev_priv, power_well); 162 } 163 164 static void intel_power_well_put(struct drm_i915_private *dev_priv, 165 struct i915_power_well *power_well) 166 { 167 WARN(!power_well->count, "Use count on power well %s is already zero", 168 power_well->name); 169 170 if (!--power_well->count) 171 intel_power_well_disable(dev_priv, power_well); 172 } 173 174 /* 175 * We should only use the power well if we explicitly asked the hardware to 176 * enable it, so check if it's enabled and also check if we've requested it to 177 * be enabled. 178 */ 179 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 180 struct i915_power_well *power_well) 181 { 182 return I915_READ(HSW_PWR_WELL_DRIVER) == 183 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 184 } 185 186 /** 187 * __intel_display_power_is_enabled - unlocked check for a power domain 188 * @dev_priv: i915 device instance 189 * @domain: power domain to check 190 * 191 * This is the unlocked version of intel_display_power_is_enabled() and should 192 * only be used from error capture and recovery code where deadlocks are 193 * possible. 194 * 195 * Returns: 196 * True when the power domain is enabled, false otherwise. 197 */ 198 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 199 enum intel_display_power_domain domain) 200 { 201 struct i915_power_domains *power_domains; 202 struct i915_power_well *power_well; 203 bool is_enabled; 204 int i; 205 206 if (dev_priv->pm.suspended) 207 return false; 208 209 power_domains = &dev_priv->power_domains; 210 211 is_enabled = true; 212 213 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 214 if (power_well->always_on) 215 continue; 216 217 if (!power_well->hw_enabled) { 218 is_enabled = false; 219 break; 220 } 221 } 222 223 return is_enabled; 224 } 225 226 /** 227 * intel_display_power_is_enabled - check for a power domain 228 * @dev_priv: i915 device instance 229 * @domain: power domain to check 230 * 231 * This function can be used to check the hw power domain state. It is mostly 232 * used in hardware state readout functions. Everywhere else code should rely 233 * upon explicit power domain reference counting to ensure that the hardware 234 * block is powered up before accessing it. 235 * 236 * Callers must hold the relevant modesetting locks to ensure that concurrent 237 * threads can't disable the power well while the caller tries to read a few 238 * registers. 239 * 240 * Returns: 241 * True when the power domain is enabled, false otherwise. 242 */ 243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 244 enum intel_display_power_domain domain) 245 { 246 struct i915_power_domains *power_domains; 247 bool ret; 248 249 power_domains = &dev_priv->power_domains; 250 251 mutex_lock(&power_domains->lock); 252 ret = __intel_display_power_is_enabled(dev_priv, domain); 253 mutex_unlock(&power_domains->lock); 254 255 return ret; 256 } 257 258 /** 259 * intel_display_set_init_power - set the initial power domain state 260 * @dev_priv: i915 device instance 261 * @enable: whether to enable or disable the initial power domain state 262 * 263 * For simplicity our driver load/unload and system suspend/resume code assumes 264 * that all power domains are always enabled. This functions controls the state 265 * of this little hack. While the initial power domain state is enabled runtime 266 * pm is effectively disabled. 267 */ 268 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 269 bool enable) 270 { 271 if (dev_priv->power_domains.init_power_on == enable) 272 return; 273 274 if (enable) 275 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 276 else 277 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 278 279 dev_priv->power_domains.init_power_on = enable; 280 } 281 282 /* 283 * Starting with Haswell, we have a "Power Down Well" that can be turned off 284 * when not needed anymore. We have 4 registers that can request the power well 285 * to be enabled, and it will only be disabled if none of the registers is 286 * requesting it to be enabled. 287 */ 288 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 289 { 290 struct pci_dev *pdev = dev_priv->drm.pdev; 291 292 /* 293 * After we re-enable the power well, if we touch VGA register 0x3d5 294 * we'll get unclaimed register interrupts. This stops after we write 295 * anything to the VGA MSR register. The vgacon module uses this 296 * register all the time, so if we unbind our driver and, as a 297 * consequence, bind vgacon, we'll get stuck in an infinite loop at 298 * console_unlock(). So make here we touch the VGA MSR register, making 299 * sure vgacon can keep working normally without triggering interrupts 300 * and error messages. 301 */ 302 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 303 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 304 vga_put(pdev, VGA_RSRC_LEGACY_IO); 305 306 if (IS_BROADWELL(dev_priv)) 307 gen8_irq_power_well_post_enable(dev_priv, 308 1 << PIPE_C | 1 << PIPE_B); 309 } 310 311 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) 312 { 313 if (IS_BROADWELL(dev_priv)) 314 gen8_irq_power_well_pre_disable(dev_priv, 315 1 << PIPE_C | 1 << PIPE_B); 316 } 317 318 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 319 struct i915_power_well *power_well) 320 { 321 struct pci_dev *pdev = dev_priv->drm.pdev; 322 323 /* 324 * After we re-enable the power well, if we touch VGA register 0x3d5 325 * we'll get unclaimed register interrupts. This stops after we write 326 * anything to the VGA MSR register. The vgacon module uses this 327 * register all the time, so if we unbind our driver and, as a 328 * consequence, bind vgacon, we'll get stuck in an infinite loop at 329 * console_unlock(). So make here we touch the VGA MSR register, making 330 * sure vgacon can keep working normally without triggering interrupts 331 * and error messages. 332 */ 333 if (power_well->data == SKL_DISP_PW_2) { 334 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 335 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 336 vga_put(pdev, VGA_RSRC_LEGACY_IO); 337 338 gen8_irq_power_well_post_enable(dev_priv, 339 1 << PIPE_C | 1 << PIPE_B); 340 } 341 } 342 343 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv, 344 struct i915_power_well *power_well) 345 { 346 if (power_well->data == SKL_DISP_PW_2) 347 gen8_irq_power_well_pre_disable(dev_priv, 348 1 << PIPE_C | 1 << PIPE_B); 349 } 350 351 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 352 struct i915_power_well *power_well, bool enable) 353 { 354 bool is_enabled, enable_requested; 355 uint32_t tmp; 356 357 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 358 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 359 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 360 361 if (enable) { 362 if (!enable_requested) 363 I915_WRITE(HSW_PWR_WELL_DRIVER, 364 HSW_PWR_WELL_ENABLE_REQUEST); 365 366 if (!is_enabled) { 367 DRM_DEBUG_KMS("Enabling power well\n"); 368 if (intel_wait_for_register(dev_priv, 369 HSW_PWR_WELL_DRIVER, 370 HSW_PWR_WELL_STATE_ENABLED, 371 HSW_PWR_WELL_STATE_ENABLED, 372 20)) 373 DRM_ERROR("Timeout enabling power well\n"); 374 hsw_power_well_post_enable(dev_priv); 375 } 376 377 } else { 378 if (enable_requested) { 379 hsw_power_well_pre_disable(dev_priv); 380 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 381 POSTING_READ(HSW_PWR_WELL_DRIVER); 382 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 383 } 384 } 385 } 386 387 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 388 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 389 BIT(POWER_DOMAIN_PIPE_B) | \ 390 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 391 BIT(POWER_DOMAIN_PIPE_C) | \ 392 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 393 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 394 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 395 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 396 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 397 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 398 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 399 BIT(POWER_DOMAIN_AUX_B) | \ 400 BIT(POWER_DOMAIN_AUX_C) | \ 401 BIT(POWER_DOMAIN_AUX_D) | \ 402 BIT(POWER_DOMAIN_AUDIO) | \ 403 BIT(POWER_DOMAIN_VGA) | \ 404 BIT(POWER_DOMAIN_INIT)) 405 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 406 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 407 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 408 BIT(POWER_DOMAIN_INIT)) 409 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 410 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 411 BIT(POWER_DOMAIN_INIT)) 412 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 413 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 414 BIT(POWER_DOMAIN_INIT)) 415 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 416 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 417 BIT(POWER_DOMAIN_INIT)) 418 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 419 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 420 BIT(POWER_DOMAIN_MODESET) | \ 421 BIT(POWER_DOMAIN_AUX_A) | \ 422 BIT(POWER_DOMAIN_INIT)) 423 424 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 425 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 426 BIT(POWER_DOMAIN_PIPE_B) | \ 427 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 428 BIT(POWER_DOMAIN_PIPE_C) | \ 429 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 430 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 431 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 432 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 433 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 434 BIT(POWER_DOMAIN_AUX_B) | \ 435 BIT(POWER_DOMAIN_AUX_C) | \ 436 BIT(POWER_DOMAIN_AUDIO) | \ 437 BIT(POWER_DOMAIN_VGA) | \ 438 BIT(POWER_DOMAIN_GMBUS) | \ 439 BIT(POWER_DOMAIN_INIT)) 440 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 441 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 442 BIT(POWER_DOMAIN_MODESET) | \ 443 BIT(POWER_DOMAIN_AUX_A) | \ 444 BIT(POWER_DOMAIN_INIT)) 445 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 446 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 447 BIT(POWER_DOMAIN_AUX_A) | \ 448 BIT(POWER_DOMAIN_INIT)) 449 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 450 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 451 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 452 BIT(POWER_DOMAIN_AUX_B) | \ 453 BIT(POWER_DOMAIN_AUX_C) | \ 454 BIT(POWER_DOMAIN_INIT)) 455 456 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 457 { 458 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 459 "DC9 already programmed to be enabled.\n"); 460 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 461 "DC5 still not disabled to enable DC9.\n"); 462 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 463 WARN_ONCE(intel_irqs_enabled(dev_priv), 464 "Interrupts not disabled yet.\n"); 465 466 /* 467 * TODO: check for the following to verify the conditions to enter DC9 468 * state are satisfied: 469 * 1] Check relevant display engine registers to verify if mode set 470 * disable sequence was followed. 471 * 2] Check if display uninitialize sequence is initialized. 472 */ 473 } 474 475 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 476 { 477 WARN_ONCE(intel_irqs_enabled(dev_priv), 478 "Interrupts not disabled yet.\n"); 479 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 480 "DC5 still not disabled.\n"); 481 482 /* 483 * TODO: check for the following to verify DC9 state was indeed 484 * entered before programming to disable it: 485 * 1] Check relevant display engine registers to verify if mode 486 * set disable sequence was followed. 487 * 2] Check if display uninitialize sequence is initialized. 488 */ 489 } 490 491 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 492 u32 state) 493 { 494 int rewrites = 0; 495 int rereads = 0; 496 u32 v; 497 498 I915_WRITE(DC_STATE_EN, state); 499 500 /* It has been observed that disabling the dc6 state sometimes 501 * doesn't stick and dmc keeps returning old value. Make sure 502 * the write really sticks enough times and also force rewrite until 503 * we are confident that state is exactly what we want. 504 */ 505 do { 506 v = I915_READ(DC_STATE_EN); 507 508 if (v != state) { 509 I915_WRITE(DC_STATE_EN, state); 510 rewrites++; 511 rereads = 0; 512 } else if (rereads++ > 5) { 513 break; 514 } 515 516 } while (rewrites < 100); 517 518 if (v != state) 519 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 520 state, v); 521 522 /* Most of the times we need one retry, avoid spam */ 523 if (rewrites > 1) 524 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 525 state, rewrites); 526 } 527 528 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 529 { 530 u32 mask; 531 532 mask = DC_STATE_EN_UPTO_DC5; 533 if (IS_BROXTON(dev_priv)) 534 mask |= DC_STATE_EN_DC9; 535 else 536 mask |= DC_STATE_EN_UPTO_DC6; 537 538 return mask; 539 } 540 541 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 542 { 543 u32 val; 544 545 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 546 547 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 548 dev_priv->csr.dc_state, val); 549 dev_priv->csr.dc_state = val; 550 } 551 552 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 553 { 554 uint32_t val; 555 uint32_t mask; 556 557 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 558 state &= dev_priv->csr.allowed_dc_mask; 559 560 val = I915_READ(DC_STATE_EN); 561 mask = gen9_dc_mask(dev_priv); 562 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 563 val & mask, state); 564 565 /* Check if DMC is ignoring our DC state requests */ 566 if ((val & mask) != dev_priv->csr.dc_state) 567 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 568 dev_priv->csr.dc_state, val & mask); 569 570 val &= ~mask; 571 val |= state; 572 573 gen9_write_dc_state(dev_priv, val); 574 575 dev_priv->csr.dc_state = val & mask; 576 } 577 578 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 579 { 580 assert_can_enable_dc9(dev_priv); 581 582 DRM_DEBUG_KMS("Enabling DC9\n"); 583 584 intel_power_sequencer_reset(dev_priv); 585 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 586 } 587 588 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 589 { 590 assert_can_disable_dc9(dev_priv); 591 592 DRM_DEBUG_KMS("Disabling DC9\n"); 593 594 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 595 596 intel_pps_unlock_regs_wa(dev_priv); 597 } 598 599 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 600 { 601 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 602 "CSR program storage start is NULL\n"); 603 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 604 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 605 } 606 607 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 608 { 609 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 610 SKL_DISP_PW_2); 611 612 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 613 614 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 615 "DC5 already programmed to be enabled.\n"); 616 assert_rpm_wakelock_held(dev_priv); 617 618 assert_csr_loaded(dev_priv); 619 } 620 621 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 622 { 623 assert_can_enable_dc5(dev_priv); 624 625 DRM_DEBUG_KMS("Enabling DC5\n"); 626 627 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 628 } 629 630 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 631 { 632 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 633 "Backlight is not disabled.\n"); 634 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 635 "DC6 already programmed to be enabled.\n"); 636 637 assert_csr_loaded(dev_priv); 638 } 639 640 void skl_enable_dc6(struct drm_i915_private *dev_priv) 641 { 642 assert_can_enable_dc6(dev_priv); 643 644 DRM_DEBUG_KMS("Enabling DC6\n"); 645 646 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 647 648 } 649 650 void skl_disable_dc6(struct drm_i915_private *dev_priv) 651 { 652 DRM_DEBUG_KMS("Disabling DC6\n"); 653 654 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 655 } 656 657 static void 658 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, 659 struct i915_power_well *power_well) 660 { 661 enum skl_disp_power_wells power_well_id = power_well->data; 662 u32 val; 663 u32 mask; 664 665 mask = SKL_POWER_WELL_REQ(power_well_id); 666 667 val = I915_READ(HSW_PWR_WELL_KVMR); 668 if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n", 669 power_well->name)) 670 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask); 671 672 val = I915_READ(HSW_PWR_WELL_BIOS); 673 val |= I915_READ(HSW_PWR_WELL_DEBUG); 674 675 if (!(val & mask)) 676 return; 677 678 /* 679 * DMC is known to force on the request bits for power well 1 on SKL 680 * and BXT and the misc IO power well on SKL but we don't expect any 681 * other request bits to be set, so WARN for those. 682 */ 683 if (power_well_id == SKL_DISP_PW_1 || 684 ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 685 power_well_id == SKL_DISP_PW_MISC_IO)) 686 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " 687 "by DMC\n", power_well->name); 688 else 689 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n", 690 power_well->name); 691 692 I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask); 693 I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask); 694 } 695 696 static void skl_set_power_well(struct drm_i915_private *dev_priv, 697 struct i915_power_well *power_well, bool enable) 698 { 699 uint32_t tmp, fuse_status; 700 uint32_t req_mask, state_mask; 701 bool is_enabled, enable_requested, check_fuse_status = false; 702 703 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 704 fuse_status = I915_READ(SKL_FUSE_STATUS); 705 706 switch (power_well->data) { 707 case SKL_DISP_PW_1: 708 if (intel_wait_for_register(dev_priv, 709 SKL_FUSE_STATUS, 710 SKL_FUSE_PG0_DIST_STATUS, 711 SKL_FUSE_PG0_DIST_STATUS, 712 1)) { 713 DRM_ERROR("PG0 not enabled\n"); 714 return; 715 } 716 break; 717 case SKL_DISP_PW_2: 718 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 719 DRM_ERROR("PG1 in disabled state\n"); 720 return; 721 } 722 break; 723 case SKL_DISP_PW_DDI_A_E: 724 case SKL_DISP_PW_DDI_B: 725 case SKL_DISP_PW_DDI_C: 726 case SKL_DISP_PW_DDI_D: 727 case SKL_DISP_PW_MISC_IO: 728 break; 729 default: 730 WARN(1, "Unknown power well %lu\n", power_well->data); 731 return; 732 } 733 734 req_mask = SKL_POWER_WELL_REQ(power_well->data); 735 enable_requested = tmp & req_mask; 736 state_mask = SKL_POWER_WELL_STATE(power_well->data); 737 is_enabled = tmp & state_mask; 738 739 if (!enable && enable_requested) 740 skl_power_well_pre_disable(dev_priv, power_well); 741 742 if (enable) { 743 if (!enable_requested) { 744 WARN((tmp & state_mask) && 745 !I915_READ(HSW_PWR_WELL_BIOS), 746 "Invalid for power well status to be enabled, unless done by the BIOS, \ 747 when request is to disable!\n"); 748 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 749 } 750 751 if (!is_enabled) { 752 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 753 check_fuse_status = true; 754 } 755 } else { 756 if (enable_requested) { 757 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 758 POSTING_READ(HSW_PWR_WELL_DRIVER); 759 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 760 } 761 762 if (IS_GEN9(dev_priv)) 763 gen9_sanitize_power_well_requests(dev_priv, power_well); 764 } 765 766 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, 767 1)) 768 DRM_ERROR("%s %s timeout\n", 769 power_well->name, enable ? "enable" : "disable"); 770 771 if (check_fuse_status) { 772 if (power_well->data == SKL_DISP_PW_1) { 773 if (intel_wait_for_register(dev_priv, 774 SKL_FUSE_STATUS, 775 SKL_FUSE_PG1_DIST_STATUS, 776 SKL_FUSE_PG1_DIST_STATUS, 777 1)) 778 DRM_ERROR("PG1 distributing status timeout\n"); 779 } else if (power_well->data == SKL_DISP_PW_2) { 780 if (intel_wait_for_register(dev_priv, 781 SKL_FUSE_STATUS, 782 SKL_FUSE_PG2_DIST_STATUS, 783 SKL_FUSE_PG2_DIST_STATUS, 784 1)) 785 DRM_ERROR("PG2 distributing status timeout\n"); 786 } 787 } 788 789 if (enable && !is_enabled) 790 skl_power_well_post_enable(dev_priv, power_well); 791 } 792 793 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 794 struct i915_power_well *power_well) 795 { 796 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 797 798 /* 799 * We're taking over the BIOS, so clear any requests made by it since 800 * the driver is in charge now. 801 */ 802 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 803 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 804 } 805 806 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 807 struct i915_power_well *power_well) 808 { 809 hsw_set_power_well(dev_priv, power_well, true); 810 } 811 812 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 813 struct i915_power_well *power_well) 814 { 815 hsw_set_power_well(dev_priv, power_well, false); 816 } 817 818 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 819 struct i915_power_well *power_well) 820 { 821 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) | 822 SKL_POWER_WELL_STATE(power_well->data); 823 824 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 825 } 826 827 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 828 struct i915_power_well *power_well) 829 { 830 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 831 832 /* Clear any request made by BIOS as driver is taking over */ 833 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 834 } 835 836 static void skl_power_well_enable(struct drm_i915_private *dev_priv, 837 struct i915_power_well *power_well) 838 { 839 skl_set_power_well(dev_priv, power_well, true); 840 } 841 842 static void skl_power_well_disable(struct drm_i915_private *dev_priv, 843 struct i915_power_well *power_well) 844 { 845 skl_set_power_well(dev_priv, power_well, false); 846 } 847 848 static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well) 849 { 850 enum skl_disp_power_wells power_well_id = power_well->data; 851 852 return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0; 853 } 854 855 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 856 struct i915_power_well *power_well) 857 { 858 enum skl_disp_power_wells power_well_id = power_well->data; 859 struct i915_power_well *cmn_a_well = NULL; 860 861 if (power_well_id == BXT_DPIO_CMN_BC) { 862 /* 863 * We need to copy the GRC calibration value from the eDP PHY, 864 * so make sure it's powered up. 865 */ 866 cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 867 intel_power_well_get(dev_priv, cmn_a_well); 868 } 869 870 bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); 871 872 if (cmn_a_well) 873 intel_power_well_put(dev_priv, cmn_a_well); 874 } 875 876 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 877 struct i915_power_well *power_well) 878 { 879 bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well)); 880 } 881 882 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 883 struct i915_power_well *power_well) 884 { 885 return bxt_ddi_phy_is_enabled(dev_priv, 886 bxt_power_well_to_phy(power_well)); 887 } 888 889 static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, 890 struct i915_power_well *power_well) 891 { 892 if (power_well->count > 0) 893 bxt_dpio_cmn_power_well_enable(dev_priv, power_well); 894 else 895 bxt_dpio_cmn_power_well_disable(dev_priv, power_well); 896 } 897 898 899 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 900 { 901 struct i915_power_well *power_well; 902 903 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 904 if (power_well->count > 0) 905 bxt_ddi_phy_verify_state(dev_priv, 906 bxt_power_well_to_phy(power_well)); 907 908 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); 909 if (power_well->count > 0) 910 bxt_ddi_phy_verify_state(dev_priv, 911 bxt_power_well_to_phy(power_well)); 912 } 913 914 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 915 struct i915_power_well *power_well) 916 { 917 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 918 } 919 920 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 921 { 922 u32 tmp = I915_READ(DBUF_CTL); 923 924 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 925 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 926 "Unexpected DBuf power power state (0x%08x)\n", tmp); 927 } 928 929 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 930 struct i915_power_well *power_well) 931 { 932 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 933 934 WARN_ON(dev_priv->cdclk_freq != 935 dev_priv->display.get_display_clock_speed(&dev_priv->drm)); 936 937 gen9_assert_dbuf_enabled(dev_priv); 938 939 if (IS_BROXTON(dev_priv)) 940 bxt_verify_ddi_phy_power_wells(dev_priv); 941 } 942 943 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 944 struct i915_power_well *power_well) 945 { 946 if (!dev_priv->csr.dmc_payload) 947 return; 948 949 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 950 skl_enable_dc6(dev_priv); 951 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 952 gen9_enable_dc5(dev_priv); 953 } 954 955 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, 956 struct i915_power_well *power_well) 957 { 958 if (power_well->count > 0) 959 gen9_dc_off_power_well_enable(dev_priv, power_well); 960 else 961 gen9_dc_off_power_well_disable(dev_priv, power_well); 962 } 963 964 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 965 struct i915_power_well *power_well) 966 { 967 } 968 969 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 970 struct i915_power_well *power_well) 971 { 972 return true; 973 } 974 975 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 976 struct i915_power_well *power_well, bool enable) 977 { 978 enum punit_power_well power_well_id = power_well->data; 979 u32 mask; 980 u32 state; 981 u32 ctrl; 982 983 mask = PUNIT_PWRGT_MASK(power_well_id); 984 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 985 PUNIT_PWRGT_PWR_GATE(power_well_id); 986 987 mutex_lock(&dev_priv->rps.hw_lock); 988 989 #define COND \ 990 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 991 992 if (COND) 993 goto out; 994 995 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 996 ctrl &= ~mask; 997 ctrl |= state; 998 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 999 1000 if (wait_for(COND, 100)) 1001 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1002 state, 1003 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1004 1005 #undef COND 1006 1007 out: 1008 mutex_unlock(&dev_priv->rps.hw_lock); 1009 } 1010 1011 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 1012 struct i915_power_well *power_well) 1013 { 1014 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 1015 } 1016 1017 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1018 struct i915_power_well *power_well) 1019 { 1020 vlv_set_power_well(dev_priv, power_well, true); 1021 } 1022 1023 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1024 struct i915_power_well *power_well) 1025 { 1026 vlv_set_power_well(dev_priv, power_well, false); 1027 } 1028 1029 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1030 struct i915_power_well *power_well) 1031 { 1032 int power_well_id = power_well->data; 1033 bool enabled = false; 1034 u32 mask; 1035 u32 state; 1036 u32 ctrl; 1037 1038 mask = PUNIT_PWRGT_MASK(power_well_id); 1039 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 1040 1041 mutex_lock(&dev_priv->rps.hw_lock); 1042 1043 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1044 /* 1045 * We only ever set the power-on and power-gate states, anything 1046 * else is unexpected. 1047 */ 1048 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 1049 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 1050 if (state == ctrl) 1051 enabled = true; 1052 1053 /* 1054 * A transient state at this point would mean some unexpected party 1055 * is poking at the power controls too. 1056 */ 1057 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1058 WARN_ON(ctrl != state); 1059 1060 mutex_unlock(&dev_priv->rps.hw_lock); 1061 1062 return enabled; 1063 } 1064 1065 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1066 { 1067 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 1068 1069 /* 1070 * Disable trickle feed and enable pnd deadline calculation 1071 */ 1072 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1073 I915_WRITE(CBR1_VLV, 0); 1074 1075 WARN_ON(dev_priv->rawclk_freq == 0); 1076 1077 I915_WRITE(RAWCLK_FREQ_VLV, 1078 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1079 } 1080 1081 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1082 { 1083 struct intel_encoder *encoder; 1084 enum i915_pipe pipe; 1085 1086 /* 1087 * Enable the CRI clock source so we can get at the 1088 * display and the reference clock for VGA 1089 * hotplug / manual detection. Supposedly DSI also 1090 * needs the ref clock up and running. 1091 * 1092 * CHV DPLL B/C have some issues if VGA mode is enabled. 1093 */ 1094 for_each_pipe(&dev_priv->drm, pipe) { 1095 u32 val = I915_READ(DPLL(pipe)); 1096 1097 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1098 if (pipe != PIPE_A) 1099 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1100 1101 I915_WRITE(DPLL(pipe), val); 1102 } 1103 1104 vlv_init_display_clock_gating(dev_priv); 1105 1106 spin_lock_irq(&dev_priv->irq_lock); 1107 valleyview_enable_display_irqs(dev_priv); 1108 spin_unlock_irq(&dev_priv->irq_lock); 1109 1110 /* 1111 * During driver initialization/resume we can avoid restoring the 1112 * part of the HW/SW state that will be inited anyway explicitly. 1113 */ 1114 if (dev_priv->power_domains.initializing) 1115 return; 1116 1117 intel_hpd_init(dev_priv); 1118 1119 /* Re-enable the ADPA, if we have one */ 1120 for_each_intel_encoder(&dev_priv->drm, encoder) { 1121 if (encoder->type == INTEL_OUTPUT_ANALOG) 1122 intel_crt_reset(&encoder->base); 1123 } 1124 1125 i915_redisable_vga_power_on(&dev_priv->drm); 1126 1127 intel_pps_unlock_regs_wa(dev_priv); 1128 } 1129 1130 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1131 { 1132 spin_lock_irq(&dev_priv->irq_lock); 1133 valleyview_disable_display_irqs(dev_priv); 1134 spin_unlock_irq(&dev_priv->irq_lock); 1135 1136 /* make sure we're done processing display irqs */ 1137 synchronize_irq(dev_priv->drm.irq); 1138 1139 intel_power_sequencer_reset(dev_priv); 1140 1141 intel_hpd_poll_init(dev_priv); 1142 } 1143 1144 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1145 struct i915_power_well *power_well) 1146 { 1147 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 1148 1149 vlv_set_power_well(dev_priv, power_well, true); 1150 1151 vlv_display_power_well_init(dev_priv); 1152 } 1153 1154 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1155 struct i915_power_well *power_well) 1156 { 1157 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 1158 1159 vlv_display_power_well_deinit(dev_priv); 1160 1161 vlv_set_power_well(dev_priv, power_well, false); 1162 } 1163 1164 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1165 struct i915_power_well *power_well) 1166 { 1167 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 1168 1169 /* since ref/cri clock was enabled */ 1170 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1171 1172 vlv_set_power_well(dev_priv, power_well, true); 1173 1174 /* 1175 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1176 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1177 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1178 * b. The other bits such as sfr settings / modesel may all 1179 * be set to 0. 1180 * 1181 * This should only be done on init and resume from S3 with 1182 * both PLLs disabled, or we risk losing DPIO and PLL 1183 * synchronization. 1184 */ 1185 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1186 } 1187 1188 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1189 struct i915_power_well *power_well) 1190 { 1191 enum i915_pipe pipe; 1192 1193 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 1194 1195 for_each_pipe(dev_priv, pipe) 1196 assert_pll_disabled(dev_priv, pipe); 1197 1198 /* Assert common reset */ 1199 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1200 1201 vlv_set_power_well(dev_priv, power_well, false); 1202 } 1203 1204 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 1205 1206 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1207 int power_well_id) 1208 { 1209 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1210 int i; 1211 1212 for (i = 0; i < power_domains->power_well_count; i++) { 1213 struct i915_power_well *power_well; 1214 1215 power_well = &power_domains->power_wells[i]; 1216 if (power_well->data == power_well_id) 1217 return power_well; 1218 } 1219 1220 return NULL; 1221 } 1222 1223 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1224 1225 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1226 { 1227 struct i915_power_well *cmn_bc = 1228 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1229 struct i915_power_well *cmn_d = 1230 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1231 u32 phy_control = dev_priv->chv_phy_control; 1232 u32 phy_status = 0; 1233 u32 phy_status_mask = 0xffffffff; 1234 1235 /* 1236 * The BIOS can leave the PHY is some weird state 1237 * where it doesn't fully power down some parts. 1238 * Disable the asserts until the PHY has been fully 1239 * reset (ie. the power well has been disabled at 1240 * least once). 1241 */ 1242 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1243 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1244 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1245 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1246 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1247 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1248 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1249 1250 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1251 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1252 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1253 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1254 1255 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1256 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1257 1258 /* this assumes override is only used to enable lanes */ 1259 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1260 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1261 1262 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1263 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1264 1265 /* CL1 is on whenever anything is on in either channel */ 1266 if (BITS_SET(phy_control, 1267 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1268 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1269 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1270 1271 /* 1272 * The DPLLB check accounts for the pipe B + port A usage 1273 * with CL2 powered up but all the lanes in the second channel 1274 * powered down. 1275 */ 1276 if (BITS_SET(phy_control, 1277 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1278 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1279 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1280 1281 if (BITS_SET(phy_control, 1282 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1283 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1284 if (BITS_SET(phy_control, 1285 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1286 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1287 1288 if (BITS_SET(phy_control, 1289 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1290 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1291 if (BITS_SET(phy_control, 1292 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1293 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1294 } 1295 1296 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1297 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1298 1299 /* this assumes override is only used to enable lanes */ 1300 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1301 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1302 1303 if (BITS_SET(phy_control, 1304 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1305 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1306 1307 if (BITS_SET(phy_control, 1308 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1309 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1310 if (BITS_SET(phy_control, 1311 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1312 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1313 } 1314 1315 phy_status &= phy_status_mask; 1316 1317 /* 1318 * The PHY may be busy with some initial calibration and whatnot, 1319 * so the power state can take a while to actually change. 1320 */ 1321 if (intel_wait_for_register(dev_priv, 1322 DISPLAY_PHY_STATUS, 1323 phy_status_mask, 1324 phy_status, 1325 10)) 1326 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1327 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1328 phy_status, dev_priv->chv_phy_control); 1329 } 1330 1331 #undef BITS_SET 1332 1333 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1334 struct i915_power_well *power_well) 1335 { 1336 enum dpio_phy phy; 1337 enum i915_pipe pipe; 1338 uint32_t tmp; 1339 1340 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1341 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1342 1343 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1344 pipe = PIPE_A; 1345 phy = DPIO_PHY0; 1346 } else { 1347 pipe = PIPE_C; 1348 phy = DPIO_PHY1; 1349 } 1350 1351 /* since ref/cri clock was enabled */ 1352 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1353 vlv_set_power_well(dev_priv, power_well, true); 1354 1355 /* Poll for phypwrgood signal */ 1356 if (intel_wait_for_register(dev_priv, 1357 DISPLAY_PHY_STATUS, 1358 PHY_POWERGOOD(phy), 1359 PHY_POWERGOOD(phy), 1360 1)) 1361 DRM_ERROR("Display PHY %d is not power up\n", phy); 1362 1363 mutex_lock(&dev_priv->sb_lock); 1364 1365 /* Enable dynamic power down */ 1366 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1367 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1368 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1369 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1370 1371 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1372 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1373 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1374 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1375 } else { 1376 /* 1377 * Force the non-existing CL2 off. BXT does this 1378 * too, so maybe it saves some power even though 1379 * CL2 doesn't exist? 1380 */ 1381 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1382 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1383 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1384 } 1385 1386 mutex_unlock(&dev_priv->sb_lock); 1387 1388 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1389 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1390 1391 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1392 phy, dev_priv->chv_phy_control); 1393 1394 assert_chv_phy_status(dev_priv); 1395 } 1396 1397 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1398 struct i915_power_well *power_well) 1399 { 1400 enum dpio_phy phy; 1401 1402 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1403 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1404 1405 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1406 phy = DPIO_PHY0; 1407 assert_pll_disabled(dev_priv, PIPE_A); 1408 assert_pll_disabled(dev_priv, PIPE_B); 1409 } else { 1410 phy = DPIO_PHY1; 1411 assert_pll_disabled(dev_priv, PIPE_C); 1412 } 1413 1414 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1415 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1416 1417 vlv_set_power_well(dev_priv, power_well, false); 1418 1419 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1420 phy, dev_priv->chv_phy_control); 1421 1422 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1423 dev_priv->chv_phy_assert[phy] = true; 1424 1425 assert_chv_phy_status(dev_priv); 1426 } 1427 1428 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1429 enum dpio_channel ch, bool override, unsigned int mask) 1430 { 1431 enum i915_pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1432 u32 reg, val, expected, actual; 1433 1434 /* 1435 * The BIOS can leave the PHY is some weird state 1436 * where it doesn't fully power down some parts. 1437 * Disable the asserts until the PHY has been fully 1438 * reset (ie. the power well has been disabled at 1439 * least once). 1440 */ 1441 if (!dev_priv->chv_phy_assert[phy]) 1442 return; 1443 1444 if (ch == DPIO_CH0) 1445 reg = _CHV_CMN_DW0_CH0; 1446 else 1447 reg = _CHV_CMN_DW6_CH1; 1448 1449 mutex_lock(&dev_priv->sb_lock); 1450 val = vlv_dpio_read(dev_priv, pipe, reg); 1451 mutex_unlock(&dev_priv->sb_lock); 1452 1453 /* 1454 * This assumes !override is only used when the port is disabled. 1455 * All lanes should power down even without the override when 1456 * the port is disabled. 1457 */ 1458 if (!override || mask == 0xf) { 1459 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1460 /* 1461 * If CH1 common lane is not active anymore 1462 * (eg. for pipe B DPLL) the entire channel will 1463 * shut down, which causes the common lane registers 1464 * to read as 0. That means we can't actually check 1465 * the lane power down status bits, but as the entire 1466 * register reads as 0 it's a good indication that the 1467 * channel is indeed entirely powered down. 1468 */ 1469 if (ch == DPIO_CH1 && val == 0) 1470 expected = 0; 1471 } else if (mask != 0x0) { 1472 expected = DPIO_ANYDL_POWERDOWN; 1473 } else { 1474 expected = 0; 1475 } 1476 1477 if (ch == DPIO_CH0) 1478 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1479 else 1480 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1481 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1482 1483 WARN(actual != expected, 1484 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1485 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1486 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1487 reg, val); 1488 } 1489 1490 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1491 enum dpio_channel ch, bool override) 1492 { 1493 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1494 bool was_override; 1495 1496 mutex_lock(&power_domains->lock); 1497 1498 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1499 1500 if (override == was_override) 1501 goto out; 1502 1503 if (override) 1504 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1505 else 1506 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1507 1508 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1509 1510 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1511 phy, ch, dev_priv->chv_phy_control); 1512 1513 assert_chv_phy_status(dev_priv); 1514 1515 out: 1516 mutex_unlock(&power_domains->lock); 1517 1518 return was_override; 1519 } 1520 1521 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1522 bool override, unsigned int mask) 1523 { 1524 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1525 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1526 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1527 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1528 1529 mutex_lock(&power_domains->lock); 1530 1531 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1532 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1533 1534 if (override) 1535 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1536 else 1537 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1538 1539 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1540 1541 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1542 phy, ch, mask, dev_priv->chv_phy_control); 1543 1544 assert_chv_phy_status(dev_priv); 1545 1546 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1547 1548 mutex_unlock(&power_domains->lock); 1549 } 1550 1551 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1552 struct i915_power_well *power_well) 1553 { 1554 enum i915_pipe pipe = power_well->data; 1555 bool enabled; 1556 u32 state, ctrl; 1557 1558 mutex_lock(&dev_priv->rps.hw_lock); 1559 1560 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1561 /* 1562 * We only ever set the power-on and power-gate states, anything 1563 * else is unexpected. 1564 */ 1565 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1566 enabled = state == DP_SSS_PWR_ON(pipe); 1567 1568 /* 1569 * A transient state at this point would mean some unexpected party 1570 * is poking at the power controls too. 1571 */ 1572 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1573 WARN_ON(ctrl << 16 != state); 1574 1575 mutex_unlock(&dev_priv->rps.hw_lock); 1576 1577 return enabled; 1578 } 1579 1580 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1581 struct i915_power_well *power_well, 1582 bool enable) 1583 { 1584 enum i915_pipe pipe = power_well->data; 1585 u32 state; 1586 u32 ctrl; 1587 1588 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1589 1590 mutex_lock(&dev_priv->rps.hw_lock); 1591 1592 #define COND \ 1593 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1594 1595 if (COND) 1596 goto out; 1597 1598 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1599 ctrl &= ~DP_SSC_MASK(pipe); 1600 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1601 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1602 1603 if (wait_for(COND, 100)) 1604 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1605 state, 1606 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1607 1608 #undef COND 1609 1610 out: 1611 mutex_unlock(&dev_priv->rps.hw_lock); 1612 } 1613 1614 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1615 struct i915_power_well *power_well) 1616 { 1617 WARN_ON_ONCE(power_well->data != PIPE_A); 1618 1619 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1620 } 1621 1622 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1623 struct i915_power_well *power_well) 1624 { 1625 WARN_ON_ONCE(power_well->data != PIPE_A); 1626 1627 chv_set_pipe_power_well(dev_priv, power_well, true); 1628 1629 vlv_display_power_well_init(dev_priv); 1630 } 1631 1632 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1633 struct i915_power_well *power_well) 1634 { 1635 WARN_ON_ONCE(power_well->data != PIPE_A); 1636 1637 vlv_display_power_well_deinit(dev_priv); 1638 1639 chv_set_pipe_power_well(dev_priv, power_well, false); 1640 } 1641 1642 static void 1643 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1644 enum intel_display_power_domain domain) 1645 { 1646 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1647 struct i915_power_well *power_well; 1648 int i; 1649 1650 for_each_power_well(i, power_well, BIT(domain), power_domains) 1651 intel_power_well_get(dev_priv, power_well); 1652 1653 power_domains->domain_use_count[domain]++; 1654 } 1655 1656 /** 1657 * intel_display_power_get - grab a power domain reference 1658 * @dev_priv: i915 device instance 1659 * @domain: power domain to reference 1660 * 1661 * This function grabs a power domain reference for @domain and ensures that the 1662 * power domain and all its parents are powered up. Therefore users should only 1663 * grab a reference to the innermost power domain they need. 1664 * 1665 * Any power domain reference obtained by this function must have a symmetric 1666 * call to intel_display_power_put() to release the reference again. 1667 */ 1668 void intel_display_power_get(struct drm_i915_private *dev_priv, 1669 enum intel_display_power_domain domain) 1670 { 1671 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1672 1673 intel_runtime_pm_get(dev_priv); 1674 1675 mutex_lock(&power_domains->lock); 1676 1677 __intel_display_power_get_domain(dev_priv, domain); 1678 1679 mutex_unlock(&power_domains->lock); 1680 } 1681 1682 /** 1683 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1684 * @dev_priv: i915 device instance 1685 * @domain: power domain to reference 1686 * 1687 * This function grabs a power domain reference for @domain and ensures that the 1688 * power domain and all its parents are powered up. Therefore users should only 1689 * grab a reference to the innermost power domain they need. 1690 * 1691 * Any power domain reference obtained by this function must have a symmetric 1692 * call to intel_display_power_put() to release the reference again. 1693 */ 1694 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1695 enum intel_display_power_domain domain) 1696 { 1697 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1698 bool is_enabled; 1699 1700 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 1701 return false; 1702 1703 mutex_lock(&power_domains->lock); 1704 1705 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1706 __intel_display_power_get_domain(dev_priv, domain); 1707 is_enabled = true; 1708 } else { 1709 is_enabled = false; 1710 } 1711 1712 mutex_unlock(&power_domains->lock); 1713 1714 if (!is_enabled) 1715 intel_runtime_pm_put(dev_priv); 1716 1717 return is_enabled; 1718 } 1719 1720 /** 1721 * intel_display_power_put - release a power domain reference 1722 * @dev_priv: i915 device instance 1723 * @domain: power domain to reference 1724 * 1725 * This function drops the power domain reference obtained by 1726 * intel_display_power_get() and might power down the corresponding hardware 1727 * block right away if this is the last reference. 1728 */ 1729 void intel_display_power_put(struct drm_i915_private *dev_priv, 1730 enum intel_display_power_domain domain) 1731 { 1732 struct i915_power_domains *power_domains; 1733 struct i915_power_well *power_well; 1734 int i; 1735 1736 power_domains = &dev_priv->power_domains; 1737 1738 mutex_lock(&power_domains->lock); 1739 1740 WARN(!power_domains->domain_use_count[domain], 1741 "Use count on domain %s is already zero\n", 1742 intel_display_power_domain_str(domain)); 1743 power_domains->domain_use_count[domain]--; 1744 1745 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) 1746 intel_power_well_put(dev_priv, power_well); 1747 1748 mutex_unlock(&power_domains->lock); 1749 1750 intel_runtime_pm_put(dev_priv); 1751 } 1752 1753 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1754 BIT(POWER_DOMAIN_PIPE_B) | \ 1755 BIT(POWER_DOMAIN_PIPE_C) | \ 1756 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1757 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1758 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1759 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1760 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1761 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1762 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1763 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1764 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1765 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1766 BIT(POWER_DOMAIN_VGA) | \ 1767 BIT(POWER_DOMAIN_AUDIO) | \ 1768 BIT(POWER_DOMAIN_INIT)) 1769 1770 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1771 BIT(POWER_DOMAIN_PIPE_B) | \ 1772 BIT(POWER_DOMAIN_PIPE_C) | \ 1773 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1774 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1775 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1776 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1777 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1778 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1779 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1780 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1781 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1782 BIT(POWER_DOMAIN_VGA) | \ 1783 BIT(POWER_DOMAIN_AUDIO) | \ 1784 BIT(POWER_DOMAIN_INIT)) 1785 1786 #define VLV_DISPLAY_POWER_DOMAINS ( \ 1787 BIT(POWER_DOMAIN_PIPE_A) | \ 1788 BIT(POWER_DOMAIN_PIPE_B) | \ 1789 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1790 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1791 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1792 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1793 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1794 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1795 BIT(POWER_DOMAIN_PORT_DSI) | \ 1796 BIT(POWER_DOMAIN_PORT_CRT) | \ 1797 BIT(POWER_DOMAIN_VGA) | \ 1798 BIT(POWER_DOMAIN_AUDIO) | \ 1799 BIT(POWER_DOMAIN_AUX_B) | \ 1800 BIT(POWER_DOMAIN_AUX_C) | \ 1801 BIT(POWER_DOMAIN_GMBUS) | \ 1802 BIT(POWER_DOMAIN_INIT)) 1803 1804 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1805 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1806 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1807 BIT(POWER_DOMAIN_PORT_CRT) | \ 1808 BIT(POWER_DOMAIN_AUX_B) | \ 1809 BIT(POWER_DOMAIN_AUX_C) | \ 1810 BIT(POWER_DOMAIN_INIT)) 1811 1812 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1813 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1814 BIT(POWER_DOMAIN_AUX_B) | \ 1815 BIT(POWER_DOMAIN_INIT)) 1816 1817 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1818 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1819 BIT(POWER_DOMAIN_AUX_B) | \ 1820 BIT(POWER_DOMAIN_INIT)) 1821 1822 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1823 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1824 BIT(POWER_DOMAIN_AUX_C) | \ 1825 BIT(POWER_DOMAIN_INIT)) 1826 1827 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1828 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1829 BIT(POWER_DOMAIN_AUX_C) | \ 1830 BIT(POWER_DOMAIN_INIT)) 1831 1832 #define CHV_DISPLAY_POWER_DOMAINS ( \ 1833 BIT(POWER_DOMAIN_PIPE_A) | \ 1834 BIT(POWER_DOMAIN_PIPE_B) | \ 1835 BIT(POWER_DOMAIN_PIPE_C) | \ 1836 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1837 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1838 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1839 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1840 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1841 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1842 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1843 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1844 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1845 BIT(POWER_DOMAIN_PORT_DSI) | \ 1846 BIT(POWER_DOMAIN_VGA) | \ 1847 BIT(POWER_DOMAIN_AUDIO) | \ 1848 BIT(POWER_DOMAIN_AUX_B) | \ 1849 BIT(POWER_DOMAIN_AUX_C) | \ 1850 BIT(POWER_DOMAIN_AUX_D) | \ 1851 BIT(POWER_DOMAIN_GMBUS) | \ 1852 BIT(POWER_DOMAIN_INIT)) 1853 1854 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1855 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1856 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1857 BIT(POWER_DOMAIN_AUX_B) | \ 1858 BIT(POWER_DOMAIN_AUX_C) | \ 1859 BIT(POWER_DOMAIN_INIT)) 1860 1861 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1862 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1863 BIT(POWER_DOMAIN_AUX_D) | \ 1864 BIT(POWER_DOMAIN_INIT)) 1865 1866 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1867 .sync_hw = i9xx_always_on_power_well_noop, 1868 .enable = i9xx_always_on_power_well_noop, 1869 .disable = i9xx_always_on_power_well_noop, 1870 .is_enabled = i9xx_always_on_power_well_enabled, 1871 }; 1872 1873 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1874 .sync_hw = chv_pipe_power_well_sync_hw, 1875 .enable = chv_pipe_power_well_enable, 1876 .disable = chv_pipe_power_well_disable, 1877 .is_enabled = chv_pipe_power_well_enabled, 1878 }; 1879 1880 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1881 .sync_hw = vlv_power_well_sync_hw, 1882 .enable = chv_dpio_cmn_power_well_enable, 1883 .disable = chv_dpio_cmn_power_well_disable, 1884 .is_enabled = vlv_power_well_enabled, 1885 }; 1886 1887 static struct i915_power_well i9xx_always_on_power_well[] = { 1888 { 1889 .name = "always-on", 1890 .always_on = 1, 1891 .domains = POWER_DOMAIN_MASK, 1892 .ops = &i9xx_always_on_power_well_ops, 1893 }, 1894 }; 1895 1896 static const struct i915_power_well_ops hsw_power_well_ops = { 1897 .sync_hw = hsw_power_well_sync_hw, 1898 .enable = hsw_power_well_enable, 1899 .disable = hsw_power_well_disable, 1900 .is_enabled = hsw_power_well_enabled, 1901 }; 1902 1903 static const struct i915_power_well_ops skl_power_well_ops = { 1904 .sync_hw = skl_power_well_sync_hw, 1905 .enable = skl_power_well_enable, 1906 .disable = skl_power_well_disable, 1907 .is_enabled = skl_power_well_enabled, 1908 }; 1909 1910 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1911 .sync_hw = gen9_dc_off_power_well_sync_hw, 1912 .enable = gen9_dc_off_power_well_enable, 1913 .disable = gen9_dc_off_power_well_disable, 1914 .is_enabled = gen9_dc_off_power_well_enabled, 1915 }; 1916 1917 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1918 .sync_hw = bxt_dpio_cmn_power_well_sync_hw, 1919 .enable = bxt_dpio_cmn_power_well_enable, 1920 .disable = bxt_dpio_cmn_power_well_disable, 1921 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1922 }; 1923 1924 static struct i915_power_well hsw_power_wells[] = { 1925 { 1926 .name = "always-on", 1927 .always_on = 1, 1928 .domains = POWER_DOMAIN_MASK, 1929 .ops = &i9xx_always_on_power_well_ops, 1930 }, 1931 { 1932 .name = "display", 1933 .domains = HSW_DISPLAY_POWER_DOMAINS, 1934 .ops = &hsw_power_well_ops, 1935 }, 1936 }; 1937 1938 static struct i915_power_well bdw_power_wells[] = { 1939 { 1940 .name = "always-on", 1941 .always_on = 1, 1942 .domains = POWER_DOMAIN_MASK, 1943 .ops = &i9xx_always_on_power_well_ops, 1944 }, 1945 { 1946 .name = "display", 1947 .domains = BDW_DISPLAY_POWER_DOMAINS, 1948 .ops = &hsw_power_well_ops, 1949 }, 1950 }; 1951 1952 static const struct i915_power_well_ops vlv_display_power_well_ops = { 1953 .sync_hw = vlv_power_well_sync_hw, 1954 .enable = vlv_display_power_well_enable, 1955 .disable = vlv_display_power_well_disable, 1956 .is_enabled = vlv_power_well_enabled, 1957 }; 1958 1959 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1960 .sync_hw = vlv_power_well_sync_hw, 1961 .enable = vlv_dpio_cmn_power_well_enable, 1962 .disable = vlv_dpio_cmn_power_well_disable, 1963 .is_enabled = vlv_power_well_enabled, 1964 }; 1965 1966 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1967 .sync_hw = vlv_power_well_sync_hw, 1968 .enable = vlv_power_well_enable, 1969 .disable = vlv_power_well_disable, 1970 .is_enabled = vlv_power_well_enabled, 1971 }; 1972 1973 static struct i915_power_well vlv_power_wells[] = { 1974 { 1975 .name = "always-on", 1976 .always_on = 1, 1977 .domains = POWER_DOMAIN_MASK, 1978 .ops = &i9xx_always_on_power_well_ops, 1979 .data = PUNIT_POWER_WELL_ALWAYS_ON, 1980 }, 1981 { 1982 .name = "display", 1983 .domains = VLV_DISPLAY_POWER_DOMAINS, 1984 .data = PUNIT_POWER_WELL_DISP2D, 1985 .ops = &vlv_display_power_well_ops, 1986 }, 1987 { 1988 .name = "dpio-tx-b-01", 1989 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1990 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1991 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1992 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1993 .ops = &vlv_dpio_power_well_ops, 1994 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1995 }, 1996 { 1997 .name = "dpio-tx-b-23", 1998 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1999 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2000 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2001 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2002 .ops = &vlv_dpio_power_well_ops, 2003 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 2004 }, 2005 { 2006 .name = "dpio-tx-c-01", 2007 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2008 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2009 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2010 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2011 .ops = &vlv_dpio_power_well_ops, 2012 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 2013 }, 2014 { 2015 .name = "dpio-tx-c-23", 2016 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2017 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2018 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2019 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2020 .ops = &vlv_dpio_power_well_ops, 2021 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 2022 }, 2023 { 2024 .name = "dpio-common", 2025 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2026 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 2027 .ops = &vlv_dpio_cmn_power_well_ops, 2028 }, 2029 }; 2030 2031 static struct i915_power_well chv_power_wells[] = { 2032 { 2033 .name = "always-on", 2034 .always_on = 1, 2035 .domains = POWER_DOMAIN_MASK, 2036 .ops = &i9xx_always_on_power_well_ops, 2037 }, 2038 { 2039 .name = "display", 2040 /* 2041 * Pipe A power well is the new disp2d well. Pipe B and C 2042 * power wells don't actually exist. Pipe A power well is 2043 * required for any pipe to work. 2044 */ 2045 .domains = CHV_DISPLAY_POWER_DOMAINS, 2046 .data = PIPE_A, 2047 .ops = &chv_pipe_power_well_ops, 2048 }, 2049 { 2050 .name = "dpio-common-bc", 2051 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2052 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 2053 .ops = &chv_dpio_cmn_power_well_ops, 2054 }, 2055 { 2056 .name = "dpio-common-d", 2057 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2058 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 2059 .ops = &chv_dpio_cmn_power_well_ops, 2060 }, 2061 }; 2062 2063 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2064 int power_well_id) 2065 { 2066 struct i915_power_well *power_well; 2067 bool ret; 2068 2069 power_well = lookup_power_well(dev_priv, power_well_id); 2070 ret = power_well->ops->is_enabled(dev_priv, power_well); 2071 2072 return ret; 2073 } 2074 2075 static struct i915_power_well skl_power_wells[] = { 2076 { 2077 .name = "always-on", 2078 .always_on = 1, 2079 .domains = POWER_DOMAIN_MASK, 2080 .ops = &i9xx_always_on_power_well_ops, 2081 .data = SKL_DISP_PW_ALWAYS_ON, 2082 }, 2083 { 2084 .name = "power well 1", 2085 /* Handled by the DMC firmware */ 2086 .domains = 0, 2087 .ops = &skl_power_well_ops, 2088 .data = SKL_DISP_PW_1, 2089 }, 2090 { 2091 .name = "MISC IO power well", 2092 /* Handled by the DMC firmware */ 2093 .domains = 0, 2094 .ops = &skl_power_well_ops, 2095 .data = SKL_DISP_PW_MISC_IO, 2096 }, 2097 { 2098 .name = "DC off", 2099 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 2100 .ops = &gen9_dc_off_power_well_ops, 2101 .data = SKL_DISP_PW_DC_OFF, 2102 }, 2103 { 2104 .name = "power well 2", 2105 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2106 .ops = &skl_power_well_ops, 2107 .data = SKL_DISP_PW_2, 2108 }, 2109 { 2110 .name = "DDI A/E power well", 2111 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 2112 .ops = &skl_power_well_ops, 2113 .data = SKL_DISP_PW_DDI_A_E, 2114 }, 2115 { 2116 .name = "DDI B power well", 2117 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 2118 .ops = &skl_power_well_ops, 2119 .data = SKL_DISP_PW_DDI_B, 2120 }, 2121 { 2122 .name = "DDI C power well", 2123 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 2124 .ops = &skl_power_well_ops, 2125 .data = SKL_DISP_PW_DDI_C, 2126 }, 2127 { 2128 .name = "DDI D power well", 2129 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 2130 .ops = &skl_power_well_ops, 2131 .data = SKL_DISP_PW_DDI_D, 2132 }, 2133 }; 2134 2135 static struct i915_power_well bxt_power_wells[] = { 2136 { 2137 .name = "always-on", 2138 .always_on = 1, 2139 .domains = POWER_DOMAIN_MASK, 2140 .ops = &i9xx_always_on_power_well_ops, 2141 }, 2142 { 2143 .name = "power well 1", 2144 .domains = 0, 2145 .ops = &skl_power_well_ops, 2146 .data = SKL_DISP_PW_1, 2147 }, 2148 { 2149 .name = "DC off", 2150 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 2151 .ops = &gen9_dc_off_power_well_ops, 2152 .data = SKL_DISP_PW_DC_OFF, 2153 }, 2154 { 2155 .name = "power well 2", 2156 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2157 .ops = &skl_power_well_ops, 2158 .data = SKL_DISP_PW_2, 2159 }, 2160 { 2161 .name = "dpio-common-a", 2162 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 2163 .ops = &bxt_dpio_cmn_power_well_ops, 2164 .data = BXT_DPIO_CMN_A, 2165 }, 2166 { 2167 .name = "dpio-common-bc", 2168 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 2169 .ops = &bxt_dpio_cmn_power_well_ops, 2170 .data = BXT_DPIO_CMN_BC, 2171 }, 2172 }; 2173 2174 static int 2175 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 2176 int disable_power_well) 2177 { 2178 if (disable_power_well >= 0) 2179 return !!disable_power_well; 2180 2181 return 1; 2182 } 2183 2184 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 2185 int enable_dc) 2186 { 2187 uint32_t mask; 2188 int requested_dc; 2189 int max_dc; 2190 2191 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2192 max_dc = 2; 2193 mask = 0; 2194 } else if (IS_BROXTON(dev_priv)) { 2195 max_dc = 1; 2196 /* 2197 * DC9 has a separate HW flow from the rest of the DC states, 2198 * not depending on the DMC firmware. It's needed by system 2199 * suspend/resume, so allow it unconditionally. 2200 */ 2201 mask = DC_STATE_EN_DC9; 2202 } else { 2203 max_dc = 0; 2204 mask = 0; 2205 } 2206 2207 if (!i915.disable_power_well) 2208 max_dc = 0; 2209 2210 if (enable_dc >= 0 && enable_dc <= max_dc) { 2211 requested_dc = enable_dc; 2212 } else if (enable_dc == -1) { 2213 requested_dc = max_dc; 2214 } else if (enable_dc > max_dc && enable_dc <= 2) { 2215 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 2216 enable_dc, max_dc); 2217 requested_dc = max_dc; 2218 } else { 2219 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 2220 requested_dc = max_dc; 2221 } 2222 2223 if (requested_dc > 1) 2224 mask |= DC_STATE_EN_UPTO_DC6; 2225 if (requested_dc > 0) 2226 mask |= DC_STATE_EN_UPTO_DC5; 2227 2228 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 2229 2230 return mask; 2231 } 2232 2233 #define set_power_wells(power_domains, __power_wells) ({ \ 2234 (power_domains)->power_wells = (__power_wells); \ 2235 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 2236 }) 2237 2238 /** 2239 * intel_power_domains_init - initializes the power domain structures 2240 * @dev_priv: i915 device instance 2241 * 2242 * Initializes the power domain structures for @dev_priv depending upon the 2243 * supported platform. 2244 */ 2245 int intel_power_domains_init(struct drm_i915_private *dev_priv) 2246 { 2247 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2248 2249 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 2250 i915.disable_power_well); 2251 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, 2252 i915.enable_dc); 2253 2254 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 2255 2256 lockinit(&power_domains->lock, "i915pl", 0, LK_CANRECURSE); 2257 2258 /* 2259 * The enabling order will be from lower to higher indexed wells, 2260 * the disabling order is reversed. 2261 */ 2262 if (IS_HASWELL(dev_priv)) { 2263 set_power_wells(power_domains, hsw_power_wells); 2264 } else if (IS_BROADWELL(dev_priv)) { 2265 set_power_wells(power_domains, bdw_power_wells); 2266 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2267 set_power_wells(power_domains, skl_power_wells); 2268 } else if (IS_BROXTON(dev_priv)) { 2269 set_power_wells(power_domains, bxt_power_wells); 2270 } else if (IS_CHERRYVIEW(dev_priv)) { 2271 set_power_wells(power_domains, chv_power_wells); 2272 } else if (IS_VALLEYVIEW(dev_priv)) { 2273 set_power_wells(power_domains, vlv_power_wells); 2274 } else { 2275 set_power_wells(power_domains, i9xx_always_on_power_well); 2276 } 2277 2278 return 0; 2279 } 2280 2281 /** 2282 * intel_power_domains_fini - finalizes the power domain structures 2283 * @dev_priv: i915 device instance 2284 * 2285 * Finalizes the power domain structures for @dev_priv depending upon the 2286 * supported platform. This function also disables runtime pm and ensures that 2287 * the device stays powered up so that the driver can be reloaded. 2288 */ 2289 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2290 { 2291 #if 0 2292 struct device *kdev = &dev_priv->drm.pdev->dev; 2293 #endif 2294 2295 /* 2296 * The i915.ko module is still not prepared to be loaded when 2297 * the power well is not enabled, so just enable it in case 2298 * we're going to unload/reload. 2299 * The following also reacquires the RPM reference the core passed 2300 * to the driver during loading, which is dropped in 2301 * intel_runtime_pm_enable(). We have to hand back the control of the 2302 * device to the core with this reference held. 2303 */ 2304 intel_display_set_init_power(dev_priv, true); 2305 2306 /* Remove the refcount we took to keep power well support disabled. */ 2307 if (!i915.disable_power_well) 2308 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2309 2310 /* 2311 * Remove the refcount we took in intel_runtime_pm_enable() in case 2312 * the platform doesn't support runtime PM. 2313 */ 2314 #if 0 2315 if (!HAS_RUNTIME_PM(dev_priv)) 2316 pm_runtime_put(kdev); 2317 #endif 2318 } 2319 2320 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 2321 { 2322 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2323 struct i915_power_well *power_well; 2324 int i; 2325 2326 mutex_lock(&power_domains->lock); 2327 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 2328 power_well->ops->sync_hw(dev_priv, power_well); 2329 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 2330 power_well); 2331 } 2332 mutex_unlock(&power_domains->lock); 2333 } 2334 2335 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 2336 { 2337 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 2338 POSTING_READ(DBUF_CTL); 2339 2340 udelay(10); 2341 2342 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 2343 DRM_ERROR("DBuf power enable timeout\n"); 2344 } 2345 2346 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 2347 { 2348 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 2349 POSTING_READ(DBUF_CTL); 2350 2351 udelay(10); 2352 2353 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 2354 DRM_ERROR("DBuf power disable timeout!\n"); 2355 } 2356 2357 static void skl_display_core_init(struct drm_i915_private *dev_priv, 2358 bool resume) 2359 { 2360 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2361 struct i915_power_well *well; 2362 uint32_t val; 2363 2364 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2365 2366 /* enable PCH reset handshake */ 2367 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2368 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 2369 2370 /* enable PG1 and Misc I/O */ 2371 mutex_lock(&power_domains->lock); 2372 2373 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2374 intel_power_well_enable(dev_priv, well); 2375 2376 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2377 intel_power_well_enable(dev_priv, well); 2378 2379 mutex_unlock(&power_domains->lock); 2380 2381 skl_init_cdclk(dev_priv); 2382 2383 gen9_dbuf_enable(dev_priv); 2384 2385 if (resume && dev_priv->csr.dmc_payload) 2386 intel_csr_load_program(dev_priv); 2387 } 2388 2389 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2390 { 2391 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2392 struct i915_power_well *well; 2393 2394 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2395 2396 gen9_dbuf_disable(dev_priv); 2397 2398 skl_uninit_cdclk(dev_priv); 2399 2400 /* The spec doesn't call for removing the reset handshake flag */ 2401 /* disable PG1 and Misc I/O */ 2402 2403 mutex_lock(&power_domains->lock); 2404 2405 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2406 intel_power_well_disable(dev_priv, well); 2407 2408 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2409 intel_power_well_disable(dev_priv, well); 2410 2411 mutex_unlock(&power_domains->lock); 2412 } 2413 2414 void bxt_display_core_init(struct drm_i915_private *dev_priv, 2415 bool resume) 2416 { 2417 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2418 struct i915_power_well *well; 2419 uint32_t val; 2420 2421 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2422 2423 /* 2424 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 2425 * or else the reset will hang because there is no PCH to respond. 2426 * Move the handshake programming to initialization sequence. 2427 * Previously was left up to BIOS. 2428 */ 2429 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2430 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 2431 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 2432 2433 /* Enable PG1 */ 2434 mutex_lock(&power_domains->lock); 2435 2436 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2437 intel_power_well_enable(dev_priv, well); 2438 2439 mutex_unlock(&power_domains->lock); 2440 2441 bxt_init_cdclk(dev_priv); 2442 2443 gen9_dbuf_enable(dev_priv); 2444 2445 if (resume && dev_priv->csr.dmc_payload) 2446 intel_csr_load_program(dev_priv); 2447 } 2448 2449 void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 2450 { 2451 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2452 struct i915_power_well *well; 2453 2454 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2455 2456 gen9_dbuf_disable(dev_priv); 2457 2458 bxt_uninit_cdclk(dev_priv); 2459 2460 /* The spec doesn't call for removing the reset handshake flag */ 2461 2462 /* Disable PG1 */ 2463 mutex_lock(&power_domains->lock); 2464 2465 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2466 intel_power_well_disable(dev_priv, well); 2467 2468 mutex_unlock(&power_domains->lock); 2469 } 2470 2471 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2472 { 2473 struct i915_power_well *cmn_bc = 2474 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2475 struct i915_power_well *cmn_d = 2476 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 2477 2478 /* 2479 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 2480 * workaround never ever read DISPLAY_PHY_CONTROL, and 2481 * instead maintain a shadow copy ourselves. Use the actual 2482 * power well state and lane status to reconstruct the 2483 * expected initial value. 2484 */ 2485 dev_priv->chv_phy_control = 2486 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 2487 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 2488 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 2489 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 2490 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 2491 2492 /* 2493 * If all lanes are disabled we leave the override disabled 2494 * with all power down bits cleared to match the state we 2495 * would use after disabling the port. Otherwise enable the 2496 * override and set the lane powerdown bits accding to the 2497 * current lane status. 2498 */ 2499 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 2500 uint32_t status = I915_READ(DPLL(PIPE_A)); 2501 unsigned int mask; 2502 2503 mask = status & DPLL_PORTB_READY_MASK; 2504 if (mask == 0xf) 2505 mask = 0x0; 2506 else 2507 dev_priv->chv_phy_control |= 2508 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 2509 2510 dev_priv->chv_phy_control |= 2511 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 2512 2513 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 2514 if (mask == 0xf) 2515 mask = 0x0; 2516 else 2517 dev_priv->chv_phy_control |= 2518 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 2519 2520 dev_priv->chv_phy_control |= 2521 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 2522 2523 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 2524 2525 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 2526 } else { 2527 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 2528 } 2529 2530 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 2531 uint32_t status = I915_READ(DPIO_PHY_STATUS); 2532 unsigned int mask; 2533 2534 mask = status & DPLL_PORTD_READY_MASK; 2535 2536 if (mask == 0xf) 2537 mask = 0x0; 2538 else 2539 dev_priv->chv_phy_control |= 2540 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 2541 2542 dev_priv->chv_phy_control |= 2543 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2544 2545 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2546 2547 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2548 } else { 2549 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2550 } 2551 2552 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2553 2554 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2555 dev_priv->chv_phy_control); 2556 } 2557 2558 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2559 { 2560 struct i915_power_well *cmn = 2561 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2562 struct i915_power_well *disp2d = 2563 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2564 2565 /* If the display might be already active skip this */ 2566 if (cmn->ops->is_enabled(dev_priv, cmn) && 2567 disp2d->ops->is_enabled(dev_priv, disp2d) && 2568 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2569 return; 2570 2571 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2572 2573 /* cmnlane needs DPLL registers */ 2574 disp2d->ops->enable(dev_priv, disp2d); 2575 2576 /* 2577 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2578 * Need to assert and de-assert PHY SB reset by gating the 2579 * common lane power, then un-gating it. 2580 * Simply ungating isn't enough to reset the PHY enough to get 2581 * ports and lanes running. 2582 */ 2583 cmn->ops->disable(dev_priv, cmn); 2584 } 2585 2586 /** 2587 * intel_power_domains_init_hw - initialize hardware power domain state 2588 * @dev_priv: i915 device instance 2589 * @resume: Called from resume code paths or not 2590 * 2591 * This function initializes the hardware power domain state and enables all 2592 * power domains using intel_display_set_init_power(). 2593 */ 2594 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2595 { 2596 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2597 2598 power_domains->initializing = true; 2599 2600 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2601 skl_display_core_init(dev_priv, resume); 2602 } else if (IS_BROXTON(dev_priv)) { 2603 bxt_display_core_init(dev_priv, resume); 2604 } else if (IS_CHERRYVIEW(dev_priv)) { 2605 mutex_lock(&power_domains->lock); 2606 chv_phy_control_init(dev_priv); 2607 mutex_unlock(&power_domains->lock); 2608 } else if (IS_VALLEYVIEW(dev_priv)) { 2609 mutex_lock(&power_domains->lock); 2610 vlv_cmnlane_wa(dev_priv); 2611 mutex_unlock(&power_domains->lock); 2612 } 2613 2614 /* For now, we need the power well to be always enabled. */ 2615 intel_display_set_init_power(dev_priv, true); 2616 /* Disable power support if the user asked so. */ 2617 if (!i915.disable_power_well) 2618 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 2619 intel_power_domains_sync_hw(dev_priv); 2620 power_domains->initializing = false; 2621 } 2622 2623 /** 2624 * intel_power_domains_suspend - suspend power domain state 2625 * @dev_priv: i915 device instance 2626 * 2627 * This function prepares the hardware power domain state before entering 2628 * system suspend. It must be paired with intel_power_domains_init_hw(). 2629 */ 2630 void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2631 { 2632 /* 2633 * Even if power well support was disabled we still want to disable 2634 * power wells while we are system suspended. 2635 */ 2636 if (!i915.disable_power_well) 2637 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2638 2639 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 2640 skl_display_core_uninit(dev_priv); 2641 else if (IS_BROXTON(dev_priv)) 2642 bxt_display_core_uninit(dev_priv); 2643 } 2644 2645 /** 2646 * intel_runtime_pm_get - grab a runtime pm reference 2647 * @dev_priv: i915 device instance 2648 * 2649 * This function grabs a device-level runtime pm reference (mostly used for GEM 2650 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2651 * 2652 * Any runtime pm reference obtained by this function must have a symmetric 2653 * call to intel_runtime_pm_put() to release the reference again. 2654 */ 2655 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2656 { 2657 struct pci_dev *pdev = dev_priv->drm.pdev; 2658 struct device *kdev = &pdev->dev; 2659 2660 pm_runtime_get_sync(kdev); 2661 2662 atomic_inc(&dev_priv->pm.wakeref_count); 2663 assert_rpm_wakelock_held(dev_priv); 2664 } 2665 2666 /** 2667 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 2668 * @dev_priv: i915 device instance 2669 * 2670 * This function grabs a device-level runtime pm reference if the device is 2671 * already in use and ensures that it is powered up. 2672 * 2673 * Any runtime pm reference obtained by this function must have a symmetric 2674 * call to intel_runtime_pm_put() to release the reference again. 2675 */ 2676 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2677 { 2678 #ifndef __DragonFly__ 2679 struct pci_dev *pdev = dev_priv->drm.pdev; 2680 struct device *kdev = &pdev->dev; 2681 2682 if (IS_ENABLED(CONFIG_PM)) { 2683 int ret = pm_runtime_get_if_in_use(kdev); 2684 2685 /* 2686 * In cases runtime PM is disabled by the RPM core and we get 2687 * an -EINVAL return value we are not supposed to call this 2688 * function, since the power state is undefined. This applies 2689 * atm to the late/early system suspend/resume handlers. 2690 */ 2691 WARN_ON_ONCE(ret < 0); 2692 if (ret <= 0) 2693 return false; 2694 } 2695 2696 atomic_inc(&dev_priv->pm.wakeref_count); 2697 assert_rpm_wakelock_held(dev_priv); 2698 #endif 2699 2700 return true; 2701 } 2702 2703 /** 2704 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2705 * @dev_priv: i915 device instance 2706 * 2707 * This function grabs a device-level runtime pm reference (mostly used for GEM 2708 * code to ensure the GTT or GT is on). 2709 * 2710 * It will _not_ power up the device but instead only check that it's powered 2711 * on. Therefore it is only valid to call this functions from contexts where 2712 * the device is known to be powered up and where trying to power it up would 2713 * result in hilarity and deadlocks. That pretty much means only the system 2714 * suspend/resume code where this is used to grab runtime pm references for 2715 * delayed setup down in work items. 2716 * 2717 * Any runtime pm reference obtained by this function must have a symmetric 2718 * call to intel_runtime_pm_put() to release the reference again. 2719 */ 2720 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2721 { 2722 #if 0 2723 struct pci_dev *pdev = dev_priv->drm.pdev; 2724 struct device *kdev = &pdev->dev; 2725 #endif 2726 2727 assert_rpm_wakelock_held(dev_priv); 2728 #if 0 2729 pm_runtime_get_noresume(kdev); 2730 #endif 2731 2732 atomic_inc(&dev_priv->pm.wakeref_count); 2733 } 2734 2735 /** 2736 * intel_runtime_pm_put - release a runtime pm reference 2737 * @dev_priv: i915 device instance 2738 * 2739 * This function drops the device-level runtime pm reference obtained by 2740 * intel_runtime_pm_get() and might power down the corresponding 2741 * hardware block right away if this is the last reference. 2742 */ 2743 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2744 { 2745 struct pci_dev *pdev = dev_priv->drm.pdev; 2746 struct device *kdev = &pdev->dev; 2747 2748 assert_rpm_wakelock_held(dev_priv); 2749 if (atomic_dec_and_test(&dev_priv->pm.wakeref_count)) 2750 atomic_inc(&dev_priv->pm.atomic_seq); 2751 2752 pm_runtime_mark_last_busy(kdev); 2753 pm_runtime_put_autosuspend(kdev); 2754 } 2755 2756 /** 2757 * intel_runtime_pm_enable - enable runtime pm 2758 * @dev_priv: i915 device instance 2759 * 2760 * This function enables runtime pm at the end of the driver load sequence. 2761 * 2762 * Note that this function does currently not enable runtime pm for the 2763 * subordinate display power domains. That is only done on the first modeset 2764 * using intel_display_set_init_power(). 2765 */ 2766 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2767 { 2768 #if 0 2769 struct pci_dev *pdev = dev_priv->drm.pdev; 2770 struct device *kdev = &pdev->dev; 2771 2772 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ 2773 pm_runtime_mark_last_busy(kdev); 2774 2775 /* 2776 * Take a permanent reference to disable the RPM functionality and drop 2777 * it only when unloading the driver. Use the low level get/put helpers, 2778 * so the driver's own RPM reference tracking asserts also work on 2779 * platforms without RPM support. 2780 */ 2781 if (!HAS_RUNTIME_PM(dev_priv)) { 2782 pm_runtime_dont_use_autosuspend(kdev); 2783 pm_runtime_get_sync(kdev); 2784 } else { 2785 pm_runtime_use_autosuspend(kdev); 2786 } 2787 2788 /* 2789 * The core calls the driver load handler with an RPM reference held. 2790 * We drop that here and will reacquire it during unloading in 2791 * intel_power_domains_fini(). 2792 */ 2793 pm_runtime_put_autosuspend(kdev); 2794 #endif 2795 } 2796