1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include <linux/pm_runtime.h> 30 #include <linux/vgaarb.h> 31 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: runtime pm 37 * 38 * The i915 driver supports dynamic enabling and disabling of entire hardware 39 * blocks at runtime. This is especially important on the display side where 40 * software is supposed to control many power gates manually on recent hardware, 41 * since on the GT side a lot of the power management is done by the hardware. 42 * But even there some manual control at the device level is required. 43 * 44 * Since i915 supports a diverse set of platforms with a unified codebase and 45 * hardware engineers just love to shuffle functionality around between power 46 * domains there's a sizeable amount of indirection required. This file provides 47 * generic functions to the driver for grabbing and releasing references for 48 * abstract power domains. It then maps those to the actual power wells 49 * present for a given platform. 50 */ 51 52 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 53 for (i = 0; \ 54 i < (power_domains)->power_well_count && \ 55 ((power_well) = &(power_domains)->power_wells[i]); \ 56 i++) \ 57 for_each_if ((power_well)->domains & (domain_mask)) 58 59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 60 for (i = (power_domains)->power_well_count - 1; \ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 62 i--) \ 63 for_each_if ((power_well)->domains & (domain_mask)) 64 65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 66 int power_well_id); 67 68 static struct i915_power_well * 69 lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); 70 71 const char * 72 intel_display_power_domain_str(enum intel_display_power_domain domain) 73 { 74 switch (domain) { 75 case POWER_DOMAIN_PIPE_A: 76 return "PIPE_A"; 77 case POWER_DOMAIN_PIPE_B: 78 return "PIPE_B"; 79 case POWER_DOMAIN_PIPE_C: 80 return "PIPE_C"; 81 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 82 return "PIPE_A_PANEL_FITTER"; 83 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 84 return "PIPE_B_PANEL_FITTER"; 85 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 86 return "PIPE_C_PANEL_FITTER"; 87 case POWER_DOMAIN_TRANSCODER_A: 88 return "TRANSCODER_A"; 89 case POWER_DOMAIN_TRANSCODER_B: 90 return "TRANSCODER_B"; 91 case POWER_DOMAIN_TRANSCODER_C: 92 return "TRANSCODER_C"; 93 case POWER_DOMAIN_TRANSCODER_EDP: 94 return "TRANSCODER_EDP"; 95 case POWER_DOMAIN_TRANSCODER_DSI_A: 96 return "TRANSCODER_DSI_A"; 97 case POWER_DOMAIN_TRANSCODER_DSI_C: 98 return "TRANSCODER_DSI_C"; 99 case POWER_DOMAIN_PORT_DDI_A_LANES: 100 return "PORT_DDI_A_LANES"; 101 case POWER_DOMAIN_PORT_DDI_B_LANES: 102 return "PORT_DDI_B_LANES"; 103 case POWER_DOMAIN_PORT_DDI_C_LANES: 104 return "PORT_DDI_C_LANES"; 105 case POWER_DOMAIN_PORT_DDI_D_LANES: 106 return "PORT_DDI_D_LANES"; 107 case POWER_DOMAIN_PORT_DDI_E_LANES: 108 return "PORT_DDI_E_LANES"; 109 case POWER_DOMAIN_PORT_DSI: 110 return "PORT_DSI"; 111 case POWER_DOMAIN_PORT_CRT: 112 return "PORT_CRT"; 113 case POWER_DOMAIN_PORT_OTHER: 114 return "PORT_OTHER"; 115 case POWER_DOMAIN_VGA: 116 return "VGA"; 117 case POWER_DOMAIN_AUDIO: 118 return "AUDIO"; 119 case POWER_DOMAIN_PLLS: 120 return "PLLS"; 121 case POWER_DOMAIN_AUX_A: 122 return "AUX_A"; 123 case POWER_DOMAIN_AUX_B: 124 return "AUX_B"; 125 case POWER_DOMAIN_AUX_C: 126 return "AUX_C"; 127 case POWER_DOMAIN_AUX_D: 128 return "AUX_D"; 129 case POWER_DOMAIN_GMBUS: 130 return "GMBUS"; 131 case POWER_DOMAIN_INIT: 132 return "INIT"; 133 case POWER_DOMAIN_MODESET: 134 return "MODESET"; 135 default: 136 MISSING_CASE(domain); 137 return "?"; 138 } 139 } 140 141 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 142 struct i915_power_well *power_well) 143 { 144 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 145 power_well->ops->enable(dev_priv, power_well); 146 power_well->hw_enabled = true; 147 } 148 149 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 150 struct i915_power_well *power_well) 151 { 152 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 153 power_well->hw_enabled = false; 154 power_well->ops->disable(dev_priv, power_well); 155 } 156 157 static void intel_power_well_get(struct drm_i915_private *dev_priv, 158 struct i915_power_well *power_well) 159 { 160 if (!power_well->count++) 161 intel_power_well_enable(dev_priv, power_well); 162 } 163 164 static void intel_power_well_put(struct drm_i915_private *dev_priv, 165 struct i915_power_well *power_well) 166 { 167 WARN(!power_well->count, "Use count on power well %s is already zero", 168 power_well->name); 169 170 if (!--power_well->count) 171 intel_power_well_disable(dev_priv, power_well); 172 } 173 174 /* 175 * We should only use the power well if we explicitly asked the hardware to 176 * enable it, so check if it's enabled and also check if we've requested it to 177 * be enabled. 178 */ 179 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 180 struct i915_power_well *power_well) 181 { 182 return I915_READ(HSW_PWR_WELL_DRIVER) == 183 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 184 } 185 186 /** 187 * __intel_display_power_is_enabled - unlocked check for a power domain 188 * @dev_priv: i915 device instance 189 * @domain: power domain to check 190 * 191 * This is the unlocked version of intel_display_power_is_enabled() and should 192 * only be used from error capture and recovery code where deadlocks are 193 * possible. 194 * 195 * Returns: 196 * True when the power domain is enabled, false otherwise. 197 */ 198 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 199 enum intel_display_power_domain domain) 200 { 201 struct i915_power_domains *power_domains; 202 struct i915_power_well *power_well; 203 bool is_enabled; 204 int i; 205 206 if (dev_priv->pm.suspended) 207 return false; 208 209 power_domains = &dev_priv->power_domains; 210 211 is_enabled = true; 212 213 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 214 if (power_well->always_on) 215 continue; 216 217 if (!power_well->hw_enabled) { 218 is_enabled = false; 219 break; 220 } 221 } 222 223 return is_enabled; 224 } 225 226 /** 227 * intel_display_power_is_enabled - check for a power domain 228 * @dev_priv: i915 device instance 229 * @domain: power domain to check 230 * 231 * This function can be used to check the hw power domain state. It is mostly 232 * used in hardware state readout functions. Everywhere else code should rely 233 * upon explicit power domain reference counting to ensure that the hardware 234 * block is powered up before accessing it. 235 * 236 * Callers must hold the relevant modesetting locks to ensure that concurrent 237 * threads can't disable the power well while the caller tries to read a few 238 * registers. 239 * 240 * Returns: 241 * True when the power domain is enabled, false otherwise. 242 */ 243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 244 enum intel_display_power_domain domain) 245 { 246 struct i915_power_domains *power_domains; 247 bool ret; 248 249 power_domains = &dev_priv->power_domains; 250 251 mutex_lock(&power_domains->lock); 252 ret = __intel_display_power_is_enabled(dev_priv, domain); 253 mutex_unlock(&power_domains->lock); 254 255 return ret; 256 } 257 258 /** 259 * intel_display_set_init_power - set the initial power domain state 260 * @dev_priv: i915 device instance 261 * @enable: whether to enable or disable the initial power domain state 262 * 263 * For simplicity our driver load/unload and system suspend/resume code assumes 264 * that all power domains are always enabled. This functions controls the state 265 * of this little hack. While the initial power domain state is enabled runtime 266 * pm is effectively disabled. 267 */ 268 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 269 bool enable) 270 { 271 if (dev_priv->power_domains.init_power_on == enable) 272 return; 273 274 if (enable) 275 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 276 else 277 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 278 279 dev_priv->power_domains.init_power_on = enable; 280 } 281 282 /* 283 * Starting with Haswell, we have a "Power Down Well" that can be turned off 284 * when not needed anymore. We have 4 registers that can request the power well 285 * to be enabled, and it will only be disabled if none of the registers is 286 * requesting it to be enabled. 287 */ 288 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 289 { 290 struct pci_dev *pdev = dev_priv->drm.pdev; 291 292 /* 293 * After we re-enable the power well, if we touch VGA register 0x3d5 294 * we'll get unclaimed register interrupts. This stops after we write 295 * anything to the VGA MSR register. The vgacon module uses this 296 * register all the time, so if we unbind our driver and, as a 297 * consequence, bind vgacon, we'll get stuck in an infinite loop at 298 * console_unlock(). So make here we touch the VGA MSR register, making 299 * sure vgacon can keep working normally without triggering interrupts 300 * and error messages. 301 */ 302 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 303 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 304 vga_put(pdev, VGA_RSRC_LEGACY_IO); 305 306 if (IS_BROADWELL(dev_priv)) 307 gen8_irq_power_well_post_enable(dev_priv, 308 1 << PIPE_C | 1 << PIPE_B); 309 } 310 311 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) 312 { 313 if (IS_BROADWELL(dev_priv)) 314 gen8_irq_power_well_pre_disable(dev_priv, 315 1 << PIPE_C | 1 << PIPE_B); 316 } 317 318 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 319 struct i915_power_well *power_well) 320 { 321 struct pci_dev *pdev = dev_priv->drm.pdev; 322 323 /* 324 * After we re-enable the power well, if we touch VGA register 0x3d5 325 * we'll get unclaimed register interrupts. This stops after we write 326 * anything to the VGA MSR register. The vgacon module uses this 327 * register all the time, so if we unbind our driver and, as a 328 * consequence, bind vgacon, we'll get stuck in an infinite loop at 329 * console_unlock(). So make here we touch the VGA MSR register, making 330 * sure vgacon can keep working normally without triggering interrupts 331 * and error messages. 332 */ 333 if (power_well->id == SKL_DISP_PW_2) { 334 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 335 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 336 vga_put(pdev, VGA_RSRC_LEGACY_IO); 337 338 gen8_irq_power_well_post_enable(dev_priv, 339 1 << PIPE_C | 1 << PIPE_B); 340 } 341 } 342 343 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv, 344 struct i915_power_well *power_well) 345 { 346 if (power_well->id == SKL_DISP_PW_2) 347 gen8_irq_power_well_pre_disable(dev_priv, 348 1 << PIPE_C | 1 << PIPE_B); 349 } 350 351 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 352 struct i915_power_well *power_well, bool enable) 353 { 354 bool is_enabled, enable_requested; 355 uint32_t tmp; 356 357 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 358 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 359 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 360 361 if (enable) { 362 if (!enable_requested) 363 I915_WRITE(HSW_PWR_WELL_DRIVER, 364 HSW_PWR_WELL_ENABLE_REQUEST); 365 366 if (!is_enabled) { 367 DRM_DEBUG_KMS("Enabling power well\n"); 368 if (intel_wait_for_register(dev_priv, 369 HSW_PWR_WELL_DRIVER, 370 HSW_PWR_WELL_STATE_ENABLED, 371 HSW_PWR_WELL_STATE_ENABLED, 372 20)) 373 DRM_ERROR("Timeout enabling power well\n"); 374 hsw_power_well_post_enable(dev_priv); 375 } 376 377 } else { 378 if (enable_requested) { 379 hsw_power_well_pre_disable(dev_priv); 380 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 381 POSTING_READ(HSW_PWR_WELL_DRIVER); 382 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 383 } 384 } 385 } 386 387 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 388 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 389 BIT(POWER_DOMAIN_PIPE_B) | \ 390 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 391 BIT(POWER_DOMAIN_PIPE_C) | \ 392 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 393 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 394 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 395 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 396 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 397 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 398 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 399 BIT(POWER_DOMAIN_AUX_B) | \ 400 BIT(POWER_DOMAIN_AUX_C) | \ 401 BIT(POWER_DOMAIN_AUX_D) | \ 402 BIT(POWER_DOMAIN_AUDIO) | \ 403 BIT(POWER_DOMAIN_VGA) | \ 404 BIT(POWER_DOMAIN_INIT)) 405 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 406 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 407 BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 408 BIT(POWER_DOMAIN_INIT)) 409 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 410 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 411 BIT(POWER_DOMAIN_INIT)) 412 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 413 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 414 BIT(POWER_DOMAIN_INIT)) 415 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 416 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 417 BIT(POWER_DOMAIN_INIT)) 418 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 419 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 420 BIT(POWER_DOMAIN_MODESET) | \ 421 BIT(POWER_DOMAIN_AUX_A) | \ 422 BIT(POWER_DOMAIN_INIT)) 423 424 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 425 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 426 BIT(POWER_DOMAIN_PIPE_B) | \ 427 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 428 BIT(POWER_DOMAIN_PIPE_C) | \ 429 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 430 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 431 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 432 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 433 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 434 BIT(POWER_DOMAIN_AUX_B) | \ 435 BIT(POWER_DOMAIN_AUX_C) | \ 436 BIT(POWER_DOMAIN_AUDIO) | \ 437 BIT(POWER_DOMAIN_VGA) | \ 438 BIT(POWER_DOMAIN_GMBUS) | \ 439 BIT(POWER_DOMAIN_INIT)) 440 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 441 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 442 BIT(POWER_DOMAIN_MODESET) | \ 443 BIT(POWER_DOMAIN_AUX_A) | \ 444 BIT(POWER_DOMAIN_INIT)) 445 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 446 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 447 BIT(POWER_DOMAIN_AUX_A) | \ 448 BIT(POWER_DOMAIN_INIT)) 449 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 450 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 451 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 452 BIT(POWER_DOMAIN_AUX_B) | \ 453 BIT(POWER_DOMAIN_AUX_C) | \ 454 BIT(POWER_DOMAIN_INIT)) 455 456 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 457 { 458 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 459 "DC9 already programmed to be enabled.\n"); 460 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 461 "DC5 still not disabled to enable DC9.\n"); 462 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 463 WARN_ONCE(intel_irqs_enabled(dev_priv), 464 "Interrupts not disabled yet.\n"); 465 466 /* 467 * TODO: check for the following to verify the conditions to enter DC9 468 * state are satisfied: 469 * 1] Check relevant display engine registers to verify if mode set 470 * disable sequence was followed. 471 * 2] Check if display uninitialize sequence is initialized. 472 */ 473 } 474 475 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 476 { 477 WARN_ONCE(intel_irqs_enabled(dev_priv), 478 "Interrupts not disabled yet.\n"); 479 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 480 "DC5 still not disabled.\n"); 481 482 /* 483 * TODO: check for the following to verify DC9 state was indeed 484 * entered before programming to disable it: 485 * 1] Check relevant display engine registers to verify if mode 486 * set disable sequence was followed. 487 * 2] Check if display uninitialize sequence is initialized. 488 */ 489 } 490 491 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 492 u32 state) 493 { 494 int rewrites = 0; 495 int rereads = 0; 496 u32 v; 497 498 I915_WRITE(DC_STATE_EN, state); 499 500 /* It has been observed that disabling the dc6 state sometimes 501 * doesn't stick and dmc keeps returning old value. Make sure 502 * the write really sticks enough times and also force rewrite until 503 * we are confident that state is exactly what we want. 504 */ 505 do { 506 v = I915_READ(DC_STATE_EN); 507 508 if (v != state) { 509 I915_WRITE(DC_STATE_EN, state); 510 rewrites++; 511 rereads = 0; 512 } else if (rereads++ > 5) { 513 break; 514 } 515 516 } while (rewrites < 100); 517 518 if (v != state) 519 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", 520 state, v); 521 522 /* Most of the times we need one retry, avoid spam */ 523 if (rewrites > 1) 524 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", 525 state, rewrites); 526 } 527 528 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 529 { 530 u32 mask; 531 532 mask = DC_STATE_EN_UPTO_DC5; 533 if (IS_BROXTON(dev_priv)) 534 mask |= DC_STATE_EN_DC9; 535 else 536 mask |= DC_STATE_EN_UPTO_DC6; 537 538 return mask; 539 } 540 541 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 542 { 543 u32 val; 544 545 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); 546 547 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", 548 dev_priv->csr.dc_state, val); 549 dev_priv->csr.dc_state = val; 550 } 551 552 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 553 { 554 uint32_t val; 555 uint32_t mask; 556 557 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) 558 state &= dev_priv->csr.allowed_dc_mask; 559 560 val = I915_READ(DC_STATE_EN); 561 mask = gen9_dc_mask(dev_priv); 562 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 563 val & mask, state); 564 565 /* Check if DMC is ignoring our DC state requests */ 566 if ((val & mask) != dev_priv->csr.dc_state) 567 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", 568 dev_priv->csr.dc_state, val & mask); 569 570 val &= ~mask; 571 val |= state; 572 573 gen9_write_dc_state(dev_priv, val); 574 575 dev_priv->csr.dc_state = val & mask; 576 } 577 578 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 579 { 580 assert_can_enable_dc9(dev_priv); 581 582 DRM_DEBUG_KMS("Enabling DC9\n"); 583 584 intel_power_sequencer_reset(dev_priv); 585 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 586 } 587 588 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 589 { 590 assert_can_disable_dc9(dev_priv); 591 592 DRM_DEBUG_KMS("Disabling DC9\n"); 593 594 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 595 596 intel_pps_unlock_regs_wa(dev_priv); 597 } 598 599 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 600 { 601 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), 602 "CSR program storage start is NULL\n"); 603 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); 604 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); 605 } 606 607 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 608 { 609 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 610 SKL_DISP_PW_2); 611 612 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 613 614 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 615 "DC5 already programmed to be enabled.\n"); 616 assert_rpm_wakelock_held(dev_priv); 617 618 assert_csr_loaded(dev_priv); 619 } 620 621 void gen9_enable_dc5(struct drm_i915_private *dev_priv) 622 { 623 assert_can_enable_dc5(dev_priv); 624 625 DRM_DEBUG_KMS("Enabling DC5\n"); 626 627 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 628 } 629 630 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 631 { 632 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 633 "Backlight is not disabled.\n"); 634 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 635 "DC6 already programmed to be enabled.\n"); 636 637 assert_csr_loaded(dev_priv); 638 } 639 640 void skl_enable_dc6(struct drm_i915_private *dev_priv) 641 { 642 assert_can_enable_dc6(dev_priv); 643 644 DRM_DEBUG_KMS("Enabling DC6\n"); 645 646 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 647 648 } 649 650 void skl_disable_dc6(struct drm_i915_private *dev_priv) 651 { 652 DRM_DEBUG_KMS("Disabling DC6\n"); 653 654 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 655 } 656 657 static void 658 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, 659 struct i915_power_well *power_well) 660 { 661 enum skl_disp_power_wells power_well_id = power_well->id; 662 u32 val; 663 u32 mask; 664 665 mask = SKL_POWER_WELL_REQ(power_well_id); 666 667 val = I915_READ(HSW_PWR_WELL_KVMR); 668 if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n", 669 power_well->name)) 670 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask); 671 672 val = I915_READ(HSW_PWR_WELL_BIOS); 673 val |= I915_READ(HSW_PWR_WELL_DEBUG); 674 675 if (!(val & mask)) 676 return; 677 678 /* 679 * DMC is known to force on the request bits for power well 1 on SKL 680 * and BXT and the misc IO power well on SKL but we don't expect any 681 * other request bits to be set, so WARN for those. 682 */ 683 if (power_well_id == SKL_DISP_PW_1 || 684 ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && 685 power_well_id == SKL_DISP_PW_MISC_IO)) 686 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on " 687 "by DMC\n", power_well->name); 688 else 689 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n", 690 power_well->name); 691 692 I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask); 693 I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask); 694 } 695 696 static void skl_set_power_well(struct drm_i915_private *dev_priv, 697 struct i915_power_well *power_well, bool enable) 698 { 699 uint32_t tmp, fuse_status; 700 uint32_t req_mask, state_mask; 701 bool is_enabled, enable_requested, check_fuse_status = false; 702 703 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 704 fuse_status = I915_READ(SKL_FUSE_STATUS); 705 706 switch (power_well->id) { 707 case SKL_DISP_PW_1: 708 if (intel_wait_for_register(dev_priv, 709 SKL_FUSE_STATUS, 710 SKL_FUSE_PG0_DIST_STATUS, 711 SKL_FUSE_PG0_DIST_STATUS, 712 1)) { 713 DRM_ERROR("PG0 not enabled\n"); 714 return; 715 } 716 break; 717 case SKL_DISP_PW_2: 718 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 719 DRM_ERROR("PG1 in disabled state\n"); 720 return; 721 } 722 break; 723 case SKL_DISP_PW_DDI_A_E: 724 case SKL_DISP_PW_DDI_B: 725 case SKL_DISP_PW_DDI_C: 726 case SKL_DISP_PW_DDI_D: 727 case SKL_DISP_PW_MISC_IO: 728 break; 729 default: 730 WARN(1, "Unknown power well %lu\n", power_well->id); 731 return; 732 } 733 734 req_mask = SKL_POWER_WELL_REQ(power_well->id); 735 enable_requested = tmp & req_mask; 736 state_mask = SKL_POWER_WELL_STATE(power_well->id); 737 is_enabled = tmp & state_mask; 738 739 if (!enable && enable_requested) 740 skl_power_well_pre_disable(dev_priv, power_well); 741 742 if (enable) { 743 if (!enable_requested) { 744 WARN((tmp & state_mask) && 745 !I915_READ(HSW_PWR_WELL_BIOS), 746 "Invalid for power well status to be enabled, unless done by the BIOS, \ 747 when request is to disable!\n"); 748 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 749 } 750 751 if (!is_enabled) { 752 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 753 check_fuse_status = true; 754 } 755 } else { 756 if (enable_requested) { 757 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 758 POSTING_READ(HSW_PWR_WELL_DRIVER); 759 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 760 } 761 762 if (IS_GEN9(dev_priv)) 763 gen9_sanitize_power_well_requests(dev_priv, power_well); 764 } 765 766 if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable, 767 1)) 768 DRM_ERROR("%s %s timeout\n", 769 power_well->name, enable ? "enable" : "disable"); 770 771 if (check_fuse_status) { 772 if (power_well->id == SKL_DISP_PW_1) { 773 if (intel_wait_for_register(dev_priv, 774 SKL_FUSE_STATUS, 775 SKL_FUSE_PG1_DIST_STATUS, 776 SKL_FUSE_PG1_DIST_STATUS, 777 1)) 778 DRM_ERROR("PG1 distributing status timeout\n"); 779 } else if (power_well->id == SKL_DISP_PW_2) { 780 if (intel_wait_for_register(dev_priv, 781 SKL_FUSE_STATUS, 782 SKL_FUSE_PG2_DIST_STATUS, 783 SKL_FUSE_PG2_DIST_STATUS, 784 1)) 785 DRM_ERROR("PG2 distributing status timeout\n"); 786 } 787 } 788 789 if (enable && !is_enabled) 790 skl_power_well_post_enable(dev_priv, power_well); 791 } 792 793 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 794 struct i915_power_well *power_well) 795 { 796 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 797 798 /* 799 * We're taking over the BIOS, so clear any requests made by it since 800 * the driver is in charge now. 801 */ 802 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 803 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 804 } 805 806 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 807 struct i915_power_well *power_well) 808 { 809 hsw_set_power_well(dev_priv, power_well, true); 810 } 811 812 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 813 struct i915_power_well *power_well) 814 { 815 hsw_set_power_well(dev_priv, power_well, false); 816 } 817 818 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 819 struct i915_power_well *power_well) 820 { 821 uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) | 822 SKL_POWER_WELL_STATE(power_well->id); 823 824 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 825 } 826 827 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 828 struct i915_power_well *power_well) 829 { 830 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 831 832 /* Clear any request made by BIOS as driver is taking over */ 833 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 834 } 835 836 static void skl_power_well_enable(struct drm_i915_private *dev_priv, 837 struct i915_power_well *power_well) 838 { 839 skl_set_power_well(dev_priv, power_well, true); 840 } 841 842 static void skl_power_well_disable(struct drm_i915_private *dev_priv, 843 struct i915_power_well *power_well) 844 { 845 skl_set_power_well(dev_priv, power_well, false); 846 } 847 848 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 849 struct i915_power_well *power_well) 850 { 851 bxt_ddi_phy_init(dev_priv, power_well->data); 852 } 853 854 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 855 struct i915_power_well *power_well) 856 { 857 bxt_ddi_phy_uninit(dev_priv, power_well->data); 858 } 859 860 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 861 struct i915_power_well *power_well) 862 { 863 return bxt_ddi_phy_is_enabled(dev_priv, power_well->data); 864 } 865 866 static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, 867 struct i915_power_well *power_well) 868 { 869 if (power_well->count > 0) 870 bxt_dpio_cmn_power_well_enable(dev_priv, power_well); 871 else 872 bxt_dpio_cmn_power_well_disable(dev_priv, power_well); 873 } 874 875 876 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 877 { 878 struct i915_power_well *power_well; 879 880 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); 881 if (power_well->count > 0) 882 bxt_ddi_phy_verify_state(dev_priv, power_well->data); 883 884 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); 885 if (power_well->count > 0) 886 bxt_ddi_phy_verify_state(dev_priv, power_well->data); 887 } 888 889 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 890 struct i915_power_well *power_well) 891 { 892 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 893 } 894 895 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 896 { 897 u32 tmp = I915_READ(DBUF_CTL); 898 899 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != 900 (DBUF_POWER_STATE | DBUF_POWER_REQUEST), 901 "Unexpected DBuf power power state (0x%08x)\n", tmp); 902 } 903 904 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 905 struct i915_power_well *power_well) 906 { 907 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 908 909 WARN_ON(dev_priv->cdclk_freq != 910 dev_priv->display.get_display_clock_speed(dev_priv)); 911 912 gen9_assert_dbuf_enabled(dev_priv); 913 914 if (IS_BROXTON(dev_priv)) 915 bxt_verify_ddi_phy_power_wells(dev_priv); 916 } 917 918 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 919 struct i915_power_well *power_well) 920 { 921 if (!dev_priv->csr.dmc_payload) 922 return; 923 924 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 925 skl_enable_dc6(dev_priv); 926 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 927 gen9_enable_dc5(dev_priv); 928 } 929 930 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, 931 struct i915_power_well *power_well) 932 { 933 if (power_well->count > 0) 934 gen9_dc_off_power_well_enable(dev_priv, power_well); 935 else 936 gen9_dc_off_power_well_disable(dev_priv, power_well); 937 } 938 939 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 940 struct i915_power_well *power_well) 941 { 942 } 943 944 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 945 struct i915_power_well *power_well) 946 { 947 return true; 948 } 949 950 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 951 struct i915_power_well *power_well, bool enable) 952 { 953 enum punit_power_well power_well_id = power_well->id; 954 u32 mask; 955 u32 state; 956 u32 ctrl; 957 958 mask = PUNIT_PWRGT_MASK(power_well_id); 959 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 960 PUNIT_PWRGT_PWR_GATE(power_well_id); 961 962 mutex_lock(&dev_priv->rps.hw_lock); 963 964 #define COND \ 965 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 966 967 if (COND) 968 goto out; 969 970 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 971 ctrl &= ~mask; 972 ctrl |= state; 973 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 974 975 if (wait_for(COND, 100)) 976 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 977 state, 978 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 979 980 #undef COND 981 982 out: 983 mutex_unlock(&dev_priv->rps.hw_lock); 984 } 985 986 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 987 struct i915_power_well *power_well) 988 { 989 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 990 } 991 992 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 993 struct i915_power_well *power_well) 994 { 995 vlv_set_power_well(dev_priv, power_well, true); 996 } 997 998 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 999 struct i915_power_well *power_well) 1000 { 1001 vlv_set_power_well(dev_priv, power_well, false); 1002 } 1003 1004 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1005 struct i915_power_well *power_well) 1006 { 1007 int power_well_id = power_well->id; 1008 bool enabled = false; 1009 u32 mask; 1010 u32 state; 1011 u32 ctrl; 1012 1013 mask = PUNIT_PWRGT_MASK(power_well_id); 1014 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 1015 1016 mutex_lock(&dev_priv->rps.hw_lock); 1017 1018 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1019 /* 1020 * We only ever set the power-on and power-gate states, anything 1021 * else is unexpected. 1022 */ 1023 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 1024 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 1025 if (state == ctrl) 1026 enabled = true; 1027 1028 /* 1029 * A transient state at this point would mean some unexpected party 1030 * is poking at the power controls too. 1031 */ 1032 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1033 WARN_ON(ctrl != state); 1034 1035 mutex_unlock(&dev_priv->rps.hw_lock); 1036 1037 return enabled; 1038 } 1039 1040 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1041 { 1042 u32 val; 1043 1044 /* 1045 * On driver load, a pipe may be active and driving a DSI display. 1046 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1047 * (and never recovering) in this case. intel_dsi_post_disable() will 1048 * clear it when we turn off the display. 1049 */ 1050 val = I915_READ(DSPCLK_GATE_D); 1051 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1052 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1053 I915_WRITE(DSPCLK_GATE_D, val); 1054 1055 /* 1056 * Disable trickle feed and enable pnd deadline calculation 1057 */ 1058 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1059 I915_WRITE(CBR1_VLV, 0); 1060 1061 WARN_ON(dev_priv->rawclk_freq == 0); 1062 1063 I915_WRITE(RAWCLK_FREQ_VLV, 1064 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); 1065 } 1066 1067 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1068 { 1069 struct intel_encoder *encoder; 1070 enum i915_pipe pipe; 1071 1072 /* 1073 * Enable the CRI clock source so we can get at the 1074 * display and the reference clock for VGA 1075 * hotplug / manual detection. Supposedly DSI also 1076 * needs the ref clock up and running. 1077 * 1078 * CHV DPLL B/C have some issues if VGA mode is enabled. 1079 */ 1080 for_each_pipe(dev_priv, pipe) { 1081 u32 val = I915_READ(DPLL(pipe)); 1082 1083 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1084 if (pipe != PIPE_A) 1085 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1086 1087 I915_WRITE(DPLL(pipe), val); 1088 } 1089 1090 vlv_init_display_clock_gating(dev_priv); 1091 1092 spin_lock_irq(&dev_priv->irq_lock); 1093 valleyview_enable_display_irqs(dev_priv); 1094 spin_unlock_irq(&dev_priv->irq_lock); 1095 1096 /* 1097 * During driver initialization/resume we can avoid restoring the 1098 * part of the HW/SW state that will be inited anyway explicitly. 1099 */ 1100 if (dev_priv->power_domains.initializing) 1101 return; 1102 1103 intel_hpd_init(dev_priv); 1104 1105 /* Re-enable the ADPA, if we have one */ 1106 for_each_intel_encoder(&dev_priv->drm, encoder) { 1107 if (encoder->type == INTEL_OUTPUT_ANALOG) 1108 intel_crt_reset(&encoder->base); 1109 } 1110 1111 i915_redisable_vga_power_on(dev_priv); 1112 1113 intel_pps_unlock_regs_wa(dev_priv); 1114 } 1115 1116 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1117 { 1118 spin_lock_irq(&dev_priv->irq_lock); 1119 valleyview_disable_display_irqs(dev_priv); 1120 spin_unlock_irq(&dev_priv->irq_lock); 1121 1122 /* make sure we're done processing display irqs */ 1123 synchronize_irq(dev_priv->drm.irq); 1124 1125 intel_power_sequencer_reset(dev_priv); 1126 1127 /* Prevent us from re-enabling polling on accident in late suspend */ 1128 #if 0 1129 if (!dev_priv->drm.dev->power.is_suspended) 1130 #endif 1131 intel_hpd_poll_init(dev_priv); 1132 } 1133 1134 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1135 struct i915_power_well *power_well) 1136 { 1137 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); 1138 1139 vlv_set_power_well(dev_priv, power_well, true); 1140 1141 vlv_display_power_well_init(dev_priv); 1142 } 1143 1144 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1145 struct i915_power_well *power_well) 1146 { 1147 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); 1148 1149 vlv_display_power_well_deinit(dev_priv); 1150 1151 vlv_set_power_well(dev_priv, power_well, false); 1152 } 1153 1154 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1155 struct i915_power_well *power_well) 1156 { 1157 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); 1158 1159 /* since ref/cri clock was enabled */ 1160 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1161 1162 vlv_set_power_well(dev_priv, power_well, true); 1163 1164 /* 1165 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1166 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1167 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1168 * b. The other bits such as sfr settings / modesel may all 1169 * be set to 0. 1170 * 1171 * This should only be done on init and resume from S3 with 1172 * both PLLs disabled, or we risk losing DPIO and PLL 1173 * synchronization. 1174 */ 1175 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 1176 } 1177 1178 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1179 struct i915_power_well *power_well) 1180 { 1181 enum i915_pipe pipe; 1182 1183 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); 1184 1185 for_each_pipe(dev_priv, pipe) 1186 assert_pll_disabled(dev_priv, pipe); 1187 1188 /* Assert common reset */ 1189 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 1190 1191 vlv_set_power_well(dev_priv, power_well, false); 1192 } 1193 1194 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 1195 1196 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 1197 int power_well_id) 1198 { 1199 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1200 int i; 1201 1202 for (i = 0; i < power_domains->power_well_count; i++) { 1203 struct i915_power_well *power_well; 1204 1205 power_well = &power_domains->power_wells[i]; 1206 if (power_well->id == power_well_id) 1207 return power_well; 1208 } 1209 1210 return NULL; 1211 } 1212 1213 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1214 1215 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1216 { 1217 struct i915_power_well *cmn_bc = 1218 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1219 struct i915_power_well *cmn_d = 1220 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1221 u32 phy_control = dev_priv->chv_phy_control; 1222 u32 phy_status = 0; 1223 u32 phy_status_mask = 0xffffffff; 1224 1225 /* 1226 * The BIOS can leave the PHY is some weird state 1227 * where it doesn't fully power down some parts. 1228 * Disable the asserts until the PHY has been fully 1229 * reset (ie. the power well has been disabled at 1230 * least once). 1231 */ 1232 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1233 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1234 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1235 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1236 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1237 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1238 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1239 1240 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1241 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1242 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1243 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1244 1245 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1246 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1247 1248 /* this assumes override is only used to enable lanes */ 1249 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1250 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1251 1252 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1253 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1254 1255 /* CL1 is on whenever anything is on in either channel */ 1256 if (BITS_SET(phy_control, 1257 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1258 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1259 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1260 1261 /* 1262 * The DPLLB check accounts for the pipe B + port A usage 1263 * with CL2 powered up but all the lanes in the second channel 1264 * powered down. 1265 */ 1266 if (BITS_SET(phy_control, 1267 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1268 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1269 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1270 1271 if (BITS_SET(phy_control, 1272 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1273 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1274 if (BITS_SET(phy_control, 1275 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1276 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1277 1278 if (BITS_SET(phy_control, 1279 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1280 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1281 if (BITS_SET(phy_control, 1282 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1283 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1284 } 1285 1286 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1287 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1288 1289 /* this assumes override is only used to enable lanes */ 1290 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1291 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1292 1293 if (BITS_SET(phy_control, 1294 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1295 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1296 1297 if (BITS_SET(phy_control, 1298 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1299 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1300 if (BITS_SET(phy_control, 1301 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1302 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1303 } 1304 1305 phy_status &= phy_status_mask; 1306 1307 /* 1308 * The PHY may be busy with some initial calibration and whatnot, 1309 * so the power state can take a while to actually change. 1310 */ 1311 if (intel_wait_for_register(dev_priv, 1312 DISPLAY_PHY_STATUS, 1313 phy_status_mask, 1314 phy_status, 1315 10)) 1316 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1317 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, 1318 phy_status, dev_priv->chv_phy_control); 1319 } 1320 1321 #undef BITS_SET 1322 1323 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1324 struct i915_power_well *power_well) 1325 { 1326 enum dpio_phy phy; 1327 enum i915_pipe pipe; 1328 uint32_t tmp; 1329 1330 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && 1331 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); 1332 1333 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1334 pipe = PIPE_A; 1335 phy = DPIO_PHY0; 1336 } else { 1337 pipe = PIPE_C; 1338 phy = DPIO_PHY1; 1339 } 1340 1341 /* since ref/cri clock was enabled */ 1342 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1343 vlv_set_power_well(dev_priv, power_well, true); 1344 1345 /* Poll for phypwrgood signal */ 1346 if (intel_wait_for_register(dev_priv, 1347 DISPLAY_PHY_STATUS, 1348 PHY_POWERGOOD(phy), 1349 PHY_POWERGOOD(phy), 1350 1)) 1351 DRM_ERROR("Display PHY %d is not power up\n", phy); 1352 1353 mutex_lock(&dev_priv->sb_lock); 1354 1355 /* Enable dynamic power down */ 1356 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1357 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1358 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1359 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1360 1361 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1362 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1363 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1364 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1365 } else { 1366 /* 1367 * Force the non-existing CL2 off. BXT does this 1368 * too, so maybe it saves some power even though 1369 * CL2 doesn't exist? 1370 */ 1371 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1372 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1373 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1374 } 1375 1376 mutex_unlock(&dev_priv->sb_lock); 1377 1378 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1379 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1380 1381 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1382 phy, dev_priv->chv_phy_control); 1383 1384 assert_chv_phy_status(dev_priv); 1385 } 1386 1387 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1388 struct i915_power_well *power_well) 1389 { 1390 enum dpio_phy phy; 1391 1392 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && 1393 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); 1394 1395 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1396 phy = DPIO_PHY0; 1397 assert_pll_disabled(dev_priv, PIPE_A); 1398 assert_pll_disabled(dev_priv, PIPE_B); 1399 } else { 1400 phy = DPIO_PHY1; 1401 assert_pll_disabled(dev_priv, PIPE_C); 1402 } 1403 1404 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1405 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1406 1407 vlv_set_power_well(dev_priv, power_well, false); 1408 1409 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1410 phy, dev_priv->chv_phy_control); 1411 1412 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1413 dev_priv->chv_phy_assert[phy] = true; 1414 1415 assert_chv_phy_status(dev_priv); 1416 } 1417 1418 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1419 enum dpio_channel ch, bool override, unsigned int mask) 1420 { 1421 enum i915_pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1422 u32 reg, val, expected, actual; 1423 1424 /* 1425 * The BIOS can leave the PHY is some weird state 1426 * where it doesn't fully power down some parts. 1427 * Disable the asserts until the PHY has been fully 1428 * reset (ie. the power well has been disabled at 1429 * least once). 1430 */ 1431 if (!dev_priv->chv_phy_assert[phy]) 1432 return; 1433 1434 if (ch == DPIO_CH0) 1435 reg = _CHV_CMN_DW0_CH0; 1436 else 1437 reg = _CHV_CMN_DW6_CH1; 1438 1439 mutex_lock(&dev_priv->sb_lock); 1440 val = vlv_dpio_read(dev_priv, pipe, reg); 1441 mutex_unlock(&dev_priv->sb_lock); 1442 1443 /* 1444 * This assumes !override is only used when the port is disabled. 1445 * All lanes should power down even without the override when 1446 * the port is disabled. 1447 */ 1448 if (!override || mask == 0xf) { 1449 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1450 /* 1451 * If CH1 common lane is not active anymore 1452 * (eg. for pipe B DPLL) the entire channel will 1453 * shut down, which causes the common lane registers 1454 * to read as 0. That means we can't actually check 1455 * the lane power down status bits, but as the entire 1456 * register reads as 0 it's a good indication that the 1457 * channel is indeed entirely powered down. 1458 */ 1459 if (ch == DPIO_CH1 && val == 0) 1460 expected = 0; 1461 } else if (mask != 0x0) { 1462 expected = DPIO_ANYDL_POWERDOWN; 1463 } else { 1464 expected = 0; 1465 } 1466 1467 if (ch == DPIO_CH0) 1468 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1469 else 1470 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1471 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1472 1473 WARN(actual != expected, 1474 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1475 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1476 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1477 reg, val); 1478 } 1479 1480 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1481 enum dpio_channel ch, bool override) 1482 { 1483 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1484 bool was_override; 1485 1486 mutex_lock(&power_domains->lock); 1487 1488 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1489 1490 if (override == was_override) 1491 goto out; 1492 1493 if (override) 1494 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1495 else 1496 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1497 1498 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1499 1500 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1501 phy, ch, dev_priv->chv_phy_control); 1502 1503 assert_chv_phy_status(dev_priv); 1504 1505 out: 1506 mutex_unlock(&power_domains->lock); 1507 1508 return was_override; 1509 } 1510 1511 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1512 bool override, unsigned int mask) 1513 { 1514 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1515 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1516 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1517 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1518 1519 mutex_lock(&power_domains->lock); 1520 1521 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1522 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1523 1524 if (override) 1525 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1526 else 1527 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1528 1529 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1530 1531 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1532 phy, ch, mask, dev_priv->chv_phy_control); 1533 1534 assert_chv_phy_status(dev_priv); 1535 1536 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1537 1538 mutex_unlock(&power_domains->lock); 1539 } 1540 1541 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1542 struct i915_power_well *power_well) 1543 { 1544 enum i915_pipe pipe = power_well->id; 1545 bool enabled; 1546 u32 state, ctrl; 1547 1548 mutex_lock(&dev_priv->rps.hw_lock); 1549 1550 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1551 /* 1552 * We only ever set the power-on and power-gate states, anything 1553 * else is unexpected. 1554 */ 1555 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1556 enabled = state == DP_SSS_PWR_ON(pipe); 1557 1558 /* 1559 * A transient state at this point would mean some unexpected party 1560 * is poking at the power controls too. 1561 */ 1562 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1563 WARN_ON(ctrl << 16 != state); 1564 1565 mutex_unlock(&dev_priv->rps.hw_lock); 1566 1567 return enabled; 1568 } 1569 1570 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1571 struct i915_power_well *power_well, 1572 bool enable) 1573 { 1574 enum i915_pipe pipe = power_well->id; 1575 u32 state; 1576 u32 ctrl; 1577 1578 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1579 1580 mutex_lock(&dev_priv->rps.hw_lock); 1581 1582 #define COND \ 1583 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1584 1585 if (COND) 1586 goto out; 1587 1588 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1589 ctrl &= ~DP_SSC_MASK(pipe); 1590 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1591 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1592 1593 if (wait_for(COND, 100)) 1594 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1595 state, 1596 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1597 1598 #undef COND 1599 1600 out: 1601 mutex_unlock(&dev_priv->rps.hw_lock); 1602 } 1603 1604 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1605 struct i915_power_well *power_well) 1606 { 1607 WARN_ON_ONCE(power_well->id != PIPE_A); 1608 1609 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1610 } 1611 1612 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1613 struct i915_power_well *power_well) 1614 { 1615 WARN_ON_ONCE(power_well->id != PIPE_A); 1616 1617 chv_set_pipe_power_well(dev_priv, power_well, true); 1618 1619 vlv_display_power_well_init(dev_priv); 1620 } 1621 1622 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1623 struct i915_power_well *power_well) 1624 { 1625 WARN_ON_ONCE(power_well->id != PIPE_A); 1626 1627 vlv_display_power_well_deinit(dev_priv); 1628 1629 chv_set_pipe_power_well(dev_priv, power_well, false); 1630 } 1631 1632 static void 1633 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 1634 enum intel_display_power_domain domain) 1635 { 1636 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1637 struct i915_power_well *power_well; 1638 int i; 1639 1640 for_each_power_well(i, power_well, BIT(domain), power_domains) 1641 intel_power_well_get(dev_priv, power_well); 1642 1643 power_domains->domain_use_count[domain]++; 1644 } 1645 1646 /** 1647 * intel_display_power_get - grab a power domain reference 1648 * @dev_priv: i915 device instance 1649 * @domain: power domain to reference 1650 * 1651 * This function grabs a power domain reference for @domain and ensures that the 1652 * power domain and all its parents are powered up. Therefore users should only 1653 * grab a reference to the innermost power domain they need. 1654 * 1655 * Any power domain reference obtained by this function must have a symmetric 1656 * call to intel_display_power_put() to release the reference again. 1657 */ 1658 void intel_display_power_get(struct drm_i915_private *dev_priv, 1659 enum intel_display_power_domain domain) 1660 { 1661 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1662 1663 intel_runtime_pm_get(dev_priv); 1664 1665 mutex_lock(&power_domains->lock); 1666 1667 __intel_display_power_get_domain(dev_priv, domain); 1668 1669 mutex_unlock(&power_domains->lock); 1670 } 1671 1672 /** 1673 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 1674 * @dev_priv: i915 device instance 1675 * @domain: power domain to reference 1676 * 1677 * This function grabs a power domain reference for @domain and ensures that the 1678 * power domain and all its parents are powered up. Therefore users should only 1679 * grab a reference to the innermost power domain they need. 1680 * 1681 * Any power domain reference obtained by this function must have a symmetric 1682 * call to intel_display_power_put() to release the reference again. 1683 */ 1684 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 1685 enum intel_display_power_domain domain) 1686 { 1687 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1688 bool is_enabled; 1689 1690 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 1691 return false; 1692 1693 mutex_lock(&power_domains->lock); 1694 1695 if (__intel_display_power_is_enabled(dev_priv, domain)) { 1696 __intel_display_power_get_domain(dev_priv, domain); 1697 is_enabled = true; 1698 } else { 1699 is_enabled = false; 1700 } 1701 1702 mutex_unlock(&power_domains->lock); 1703 1704 if (!is_enabled) 1705 intel_runtime_pm_put(dev_priv); 1706 1707 return is_enabled; 1708 } 1709 1710 /** 1711 * intel_display_power_put - release a power domain reference 1712 * @dev_priv: i915 device instance 1713 * @domain: power domain to reference 1714 * 1715 * This function drops the power domain reference obtained by 1716 * intel_display_power_get() and might power down the corresponding hardware 1717 * block right away if this is the last reference. 1718 */ 1719 void intel_display_power_put(struct drm_i915_private *dev_priv, 1720 enum intel_display_power_domain domain) 1721 { 1722 struct i915_power_domains *power_domains; 1723 struct i915_power_well *power_well; 1724 int i; 1725 1726 power_domains = &dev_priv->power_domains; 1727 1728 mutex_lock(&power_domains->lock); 1729 1730 WARN(!power_domains->domain_use_count[domain], 1731 "Use count on domain %s is already zero\n", 1732 intel_display_power_domain_str(domain)); 1733 power_domains->domain_use_count[domain]--; 1734 1735 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) 1736 intel_power_well_put(dev_priv, power_well); 1737 1738 mutex_unlock(&power_domains->lock); 1739 1740 intel_runtime_pm_put(dev_priv); 1741 } 1742 1743 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1744 BIT(POWER_DOMAIN_PIPE_B) | \ 1745 BIT(POWER_DOMAIN_PIPE_C) | \ 1746 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1747 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1748 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1749 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1750 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1751 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1752 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1753 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1754 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1755 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1756 BIT(POWER_DOMAIN_VGA) | \ 1757 BIT(POWER_DOMAIN_AUDIO) | \ 1758 BIT(POWER_DOMAIN_INIT)) 1759 1760 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1761 BIT(POWER_DOMAIN_PIPE_B) | \ 1762 BIT(POWER_DOMAIN_PIPE_C) | \ 1763 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1764 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1765 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1766 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1767 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1768 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1769 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1770 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1771 BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 1772 BIT(POWER_DOMAIN_VGA) | \ 1773 BIT(POWER_DOMAIN_AUDIO) | \ 1774 BIT(POWER_DOMAIN_INIT)) 1775 1776 #define VLV_DISPLAY_POWER_DOMAINS ( \ 1777 BIT(POWER_DOMAIN_PIPE_A) | \ 1778 BIT(POWER_DOMAIN_PIPE_B) | \ 1779 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1780 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1781 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1782 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1783 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1784 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1785 BIT(POWER_DOMAIN_PORT_DSI) | \ 1786 BIT(POWER_DOMAIN_PORT_CRT) | \ 1787 BIT(POWER_DOMAIN_VGA) | \ 1788 BIT(POWER_DOMAIN_AUDIO) | \ 1789 BIT(POWER_DOMAIN_AUX_B) | \ 1790 BIT(POWER_DOMAIN_AUX_C) | \ 1791 BIT(POWER_DOMAIN_GMBUS) | \ 1792 BIT(POWER_DOMAIN_INIT)) 1793 1794 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1795 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1796 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1797 BIT(POWER_DOMAIN_PORT_CRT) | \ 1798 BIT(POWER_DOMAIN_AUX_B) | \ 1799 BIT(POWER_DOMAIN_AUX_C) | \ 1800 BIT(POWER_DOMAIN_INIT)) 1801 1802 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1803 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1804 BIT(POWER_DOMAIN_AUX_B) | \ 1805 BIT(POWER_DOMAIN_INIT)) 1806 1807 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1808 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1809 BIT(POWER_DOMAIN_AUX_B) | \ 1810 BIT(POWER_DOMAIN_INIT)) 1811 1812 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1813 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1814 BIT(POWER_DOMAIN_AUX_C) | \ 1815 BIT(POWER_DOMAIN_INIT)) 1816 1817 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1818 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1819 BIT(POWER_DOMAIN_AUX_C) | \ 1820 BIT(POWER_DOMAIN_INIT)) 1821 1822 #define CHV_DISPLAY_POWER_DOMAINS ( \ 1823 BIT(POWER_DOMAIN_PIPE_A) | \ 1824 BIT(POWER_DOMAIN_PIPE_B) | \ 1825 BIT(POWER_DOMAIN_PIPE_C) | \ 1826 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 1827 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 1828 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 1829 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 1830 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 1831 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 1832 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1833 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1834 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1835 BIT(POWER_DOMAIN_PORT_DSI) | \ 1836 BIT(POWER_DOMAIN_VGA) | \ 1837 BIT(POWER_DOMAIN_AUDIO) | \ 1838 BIT(POWER_DOMAIN_AUX_B) | \ 1839 BIT(POWER_DOMAIN_AUX_C) | \ 1840 BIT(POWER_DOMAIN_AUX_D) | \ 1841 BIT(POWER_DOMAIN_GMBUS) | \ 1842 BIT(POWER_DOMAIN_INIT)) 1843 1844 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1845 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 1846 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 1847 BIT(POWER_DOMAIN_AUX_B) | \ 1848 BIT(POWER_DOMAIN_AUX_C) | \ 1849 BIT(POWER_DOMAIN_INIT)) 1850 1851 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1852 BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 1853 BIT(POWER_DOMAIN_AUX_D) | \ 1854 BIT(POWER_DOMAIN_INIT)) 1855 1856 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1857 .sync_hw = i9xx_always_on_power_well_noop, 1858 .enable = i9xx_always_on_power_well_noop, 1859 .disable = i9xx_always_on_power_well_noop, 1860 .is_enabled = i9xx_always_on_power_well_enabled, 1861 }; 1862 1863 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1864 .sync_hw = chv_pipe_power_well_sync_hw, 1865 .enable = chv_pipe_power_well_enable, 1866 .disable = chv_pipe_power_well_disable, 1867 .is_enabled = chv_pipe_power_well_enabled, 1868 }; 1869 1870 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1871 .sync_hw = vlv_power_well_sync_hw, 1872 .enable = chv_dpio_cmn_power_well_enable, 1873 .disable = chv_dpio_cmn_power_well_disable, 1874 .is_enabled = vlv_power_well_enabled, 1875 }; 1876 1877 static struct i915_power_well i9xx_always_on_power_well[] = { 1878 { 1879 .name = "always-on", 1880 .always_on = 1, 1881 .domains = POWER_DOMAIN_MASK, 1882 .ops = &i9xx_always_on_power_well_ops, 1883 }, 1884 }; 1885 1886 static const struct i915_power_well_ops hsw_power_well_ops = { 1887 .sync_hw = hsw_power_well_sync_hw, 1888 .enable = hsw_power_well_enable, 1889 .disable = hsw_power_well_disable, 1890 .is_enabled = hsw_power_well_enabled, 1891 }; 1892 1893 static const struct i915_power_well_ops skl_power_well_ops = { 1894 .sync_hw = skl_power_well_sync_hw, 1895 .enable = skl_power_well_enable, 1896 .disable = skl_power_well_disable, 1897 .is_enabled = skl_power_well_enabled, 1898 }; 1899 1900 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 1901 .sync_hw = gen9_dc_off_power_well_sync_hw, 1902 .enable = gen9_dc_off_power_well_enable, 1903 .disable = gen9_dc_off_power_well_disable, 1904 .is_enabled = gen9_dc_off_power_well_enabled, 1905 }; 1906 1907 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 1908 .sync_hw = bxt_dpio_cmn_power_well_sync_hw, 1909 .enable = bxt_dpio_cmn_power_well_enable, 1910 .disable = bxt_dpio_cmn_power_well_disable, 1911 .is_enabled = bxt_dpio_cmn_power_well_enabled, 1912 }; 1913 1914 static struct i915_power_well hsw_power_wells[] = { 1915 { 1916 .name = "always-on", 1917 .always_on = 1, 1918 .domains = POWER_DOMAIN_MASK, 1919 .ops = &i9xx_always_on_power_well_ops, 1920 }, 1921 { 1922 .name = "display", 1923 .domains = HSW_DISPLAY_POWER_DOMAINS, 1924 .ops = &hsw_power_well_ops, 1925 }, 1926 }; 1927 1928 static struct i915_power_well bdw_power_wells[] = { 1929 { 1930 .name = "always-on", 1931 .always_on = 1, 1932 .domains = POWER_DOMAIN_MASK, 1933 .ops = &i9xx_always_on_power_well_ops, 1934 }, 1935 { 1936 .name = "display", 1937 .domains = BDW_DISPLAY_POWER_DOMAINS, 1938 .ops = &hsw_power_well_ops, 1939 }, 1940 }; 1941 1942 static const struct i915_power_well_ops vlv_display_power_well_ops = { 1943 .sync_hw = vlv_power_well_sync_hw, 1944 .enable = vlv_display_power_well_enable, 1945 .disable = vlv_display_power_well_disable, 1946 .is_enabled = vlv_power_well_enabled, 1947 }; 1948 1949 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1950 .sync_hw = vlv_power_well_sync_hw, 1951 .enable = vlv_dpio_cmn_power_well_enable, 1952 .disable = vlv_dpio_cmn_power_well_disable, 1953 .is_enabled = vlv_power_well_enabled, 1954 }; 1955 1956 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1957 .sync_hw = vlv_power_well_sync_hw, 1958 .enable = vlv_power_well_enable, 1959 .disable = vlv_power_well_disable, 1960 .is_enabled = vlv_power_well_enabled, 1961 }; 1962 1963 static struct i915_power_well vlv_power_wells[] = { 1964 { 1965 .name = "always-on", 1966 .always_on = 1, 1967 .domains = POWER_DOMAIN_MASK, 1968 .ops = &i9xx_always_on_power_well_ops, 1969 .id = PUNIT_POWER_WELL_ALWAYS_ON, 1970 }, 1971 { 1972 .name = "display", 1973 .domains = VLV_DISPLAY_POWER_DOMAINS, 1974 .id = PUNIT_POWER_WELL_DISP2D, 1975 .ops = &vlv_display_power_well_ops, 1976 }, 1977 { 1978 .name = "dpio-tx-b-01", 1979 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1980 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1981 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1982 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1983 .ops = &vlv_dpio_power_well_ops, 1984 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1985 }, 1986 { 1987 .name = "dpio-tx-b-23", 1988 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1989 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1990 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1991 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1992 .ops = &vlv_dpio_power_well_ops, 1993 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 1994 }, 1995 { 1996 .name = "dpio-tx-c-01", 1997 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1998 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1999 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2000 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2001 .ops = &vlv_dpio_power_well_ops, 2002 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 2003 }, 2004 { 2005 .name = "dpio-tx-c-23", 2006 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 2007 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 2008 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 2009 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 2010 .ops = &vlv_dpio_power_well_ops, 2011 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 2012 }, 2013 { 2014 .name = "dpio-common", 2015 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 2016 .id = PUNIT_POWER_WELL_DPIO_CMN_BC, 2017 .ops = &vlv_dpio_cmn_power_well_ops, 2018 }, 2019 }; 2020 2021 static struct i915_power_well chv_power_wells[] = { 2022 { 2023 .name = "always-on", 2024 .always_on = 1, 2025 .domains = POWER_DOMAIN_MASK, 2026 .ops = &i9xx_always_on_power_well_ops, 2027 }, 2028 { 2029 .name = "display", 2030 /* 2031 * Pipe A power well is the new disp2d well. Pipe B and C 2032 * power wells don't actually exist. Pipe A power well is 2033 * required for any pipe to work. 2034 */ 2035 .domains = CHV_DISPLAY_POWER_DOMAINS, 2036 .id = PIPE_A, 2037 .ops = &chv_pipe_power_well_ops, 2038 }, 2039 { 2040 .name = "dpio-common-bc", 2041 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 2042 .id = PUNIT_POWER_WELL_DPIO_CMN_BC, 2043 .ops = &chv_dpio_cmn_power_well_ops, 2044 }, 2045 { 2046 .name = "dpio-common-d", 2047 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 2048 .id = PUNIT_POWER_WELL_DPIO_CMN_D, 2049 .ops = &chv_dpio_cmn_power_well_ops, 2050 }, 2051 }; 2052 2053 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 2054 int power_well_id) 2055 { 2056 struct i915_power_well *power_well; 2057 bool ret; 2058 2059 power_well = lookup_power_well(dev_priv, power_well_id); 2060 ret = power_well->ops->is_enabled(dev_priv, power_well); 2061 2062 return ret; 2063 } 2064 2065 static struct i915_power_well skl_power_wells[] = { 2066 { 2067 .name = "always-on", 2068 .always_on = 1, 2069 .domains = POWER_DOMAIN_MASK, 2070 .ops = &i9xx_always_on_power_well_ops, 2071 .id = SKL_DISP_PW_ALWAYS_ON, 2072 }, 2073 { 2074 .name = "power well 1", 2075 /* Handled by the DMC firmware */ 2076 .domains = 0, 2077 .ops = &skl_power_well_ops, 2078 .id = SKL_DISP_PW_1, 2079 }, 2080 { 2081 .name = "MISC IO power well", 2082 /* Handled by the DMC firmware */ 2083 .domains = 0, 2084 .ops = &skl_power_well_ops, 2085 .id = SKL_DISP_PW_MISC_IO, 2086 }, 2087 { 2088 .name = "DC off", 2089 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 2090 .ops = &gen9_dc_off_power_well_ops, 2091 .id = SKL_DISP_PW_DC_OFF, 2092 }, 2093 { 2094 .name = "power well 2", 2095 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2096 .ops = &skl_power_well_ops, 2097 .id = SKL_DISP_PW_2, 2098 }, 2099 { 2100 .name = "DDI A/E power well", 2101 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 2102 .ops = &skl_power_well_ops, 2103 .id = SKL_DISP_PW_DDI_A_E, 2104 }, 2105 { 2106 .name = "DDI B power well", 2107 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 2108 .ops = &skl_power_well_ops, 2109 .id = SKL_DISP_PW_DDI_B, 2110 }, 2111 { 2112 .name = "DDI C power well", 2113 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 2114 .ops = &skl_power_well_ops, 2115 .id = SKL_DISP_PW_DDI_C, 2116 }, 2117 { 2118 .name = "DDI D power well", 2119 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 2120 .ops = &skl_power_well_ops, 2121 .id = SKL_DISP_PW_DDI_D, 2122 }, 2123 }; 2124 2125 static struct i915_power_well bxt_power_wells[] = { 2126 { 2127 .name = "always-on", 2128 .always_on = 1, 2129 .domains = POWER_DOMAIN_MASK, 2130 .ops = &i9xx_always_on_power_well_ops, 2131 }, 2132 { 2133 .name = "power well 1", 2134 .domains = 0, 2135 .ops = &skl_power_well_ops, 2136 .id = SKL_DISP_PW_1, 2137 }, 2138 { 2139 .name = "DC off", 2140 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 2141 .ops = &gen9_dc_off_power_well_ops, 2142 .id = SKL_DISP_PW_DC_OFF, 2143 }, 2144 { 2145 .name = "power well 2", 2146 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 2147 .ops = &skl_power_well_ops, 2148 .id = SKL_DISP_PW_2, 2149 }, 2150 { 2151 .name = "dpio-common-a", 2152 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 2153 .ops = &bxt_dpio_cmn_power_well_ops, 2154 .id = BXT_DPIO_CMN_A, 2155 .data = DPIO_PHY1, 2156 }, 2157 { 2158 .name = "dpio-common-bc", 2159 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 2160 .ops = &bxt_dpio_cmn_power_well_ops, 2161 .id = BXT_DPIO_CMN_BC, 2162 .data = DPIO_PHY0, 2163 }, 2164 }; 2165 2166 static int 2167 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 2168 int disable_power_well) 2169 { 2170 if (disable_power_well >= 0) 2171 return !!disable_power_well; 2172 2173 return 1; 2174 } 2175 2176 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 2177 int enable_dc) 2178 { 2179 uint32_t mask; 2180 int requested_dc; 2181 int max_dc; 2182 2183 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2184 max_dc = 2; 2185 mask = 0; 2186 } else if (IS_BROXTON(dev_priv)) { 2187 max_dc = 1; 2188 /* 2189 * DC9 has a separate HW flow from the rest of the DC states, 2190 * not depending on the DMC firmware. It's needed by system 2191 * suspend/resume, so allow it unconditionally. 2192 */ 2193 mask = DC_STATE_EN_DC9; 2194 } else { 2195 max_dc = 0; 2196 mask = 0; 2197 } 2198 2199 if (!i915.disable_power_well) 2200 max_dc = 0; 2201 2202 if (enable_dc >= 0 && enable_dc <= max_dc) { 2203 requested_dc = enable_dc; 2204 } else if (enable_dc == -1) { 2205 requested_dc = max_dc; 2206 } else if (enable_dc > max_dc && enable_dc <= 2) { 2207 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", 2208 enable_dc, max_dc); 2209 requested_dc = max_dc; 2210 } else { 2211 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); 2212 requested_dc = max_dc; 2213 } 2214 2215 if (requested_dc > 1) 2216 mask |= DC_STATE_EN_UPTO_DC6; 2217 if (requested_dc > 0) 2218 mask |= DC_STATE_EN_UPTO_DC5; 2219 2220 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); 2221 2222 return mask; 2223 } 2224 2225 #define set_power_wells(power_domains, __power_wells) ({ \ 2226 (power_domains)->power_wells = (__power_wells); \ 2227 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 2228 }) 2229 2230 /** 2231 * intel_power_domains_init - initializes the power domain structures 2232 * @dev_priv: i915 device instance 2233 * 2234 * Initializes the power domain structures for @dev_priv depending upon the 2235 * supported platform. 2236 */ 2237 int intel_power_domains_init(struct drm_i915_private *dev_priv) 2238 { 2239 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2240 2241 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 2242 i915.disable_power_well); 2243 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, 2244 i915.enable_dc); 2245 2246 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 2247 2248 lockinit(&power_domains->lock, "i915pl", 0, LK_CANRECURSE); 2249 2250 /* 2251 * The enabling order will be from lower to higher indexed wells, 2252 * the disabling order is reversed. 2253 */ 2254 if (IS_HASWELL(dev_priv)) { 2255 set_power_wells(power_domains, hsw_power_wells); 2256 } else if (IS_BROADWELL(dev_priv)) { 2257 set_power_wells(power_domains, bdw_power_wells); 2258 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2259 set_power_wells(power_domains, skl_power_wells); 2260 } else if (IS_BROXTON(dev_priv)) { 2261 set_power_wells(power_domains, bxt_power_wells); 2262 } else if (IS_CHERRYVIEW(dev_priv)) { 2263 set_power_wells(power_domains, chv_power_wells); 2264 } else if (IS_VALLEYVIEW(dev_priv)) { 2265 set_power_wells(power_domains, vlv_power_wells); 2266 } else { 2267 set_power_wells(power_domains, i9xx_always_on_power_well); 2268 } 2269 2270 return 0; 2271 } 2272 2273 /** 2274 * intel_power_domains_fini - finalizes the power domain structures 2275 * @dev_priv: i915 device instance 2276 * 2277 * Finalizes the power domain structures for @dev_priv depending upon the 2278 * supported platform. This function also disables runtime pm and ensures that 2279 * the device stays powered up so that the driver can be reloaded. 2280 */ 2281 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2282 { 2283 #if 0 2284 struct device *kdev = &dev_priv->drm.pdev->dev; 2285 #endif 2286 2287 /* 2288 * The i915.ko module is still not prepared to be loaded when 2289 * the power well is not enabled, so just enable it in case 2290 * we're going to unload/reload. 2291 * The following also reacquires the RPM reference the core passed 2292 * to the driver during loading, which is dropped in 2293 * intel_runtime_pm_enable(). We have to hand back the control of the 2294 * device to the core with this reference held. 2295 */ 2296 intel_display_set_init_power(dev_priv, true); 2297 2298 /* Remove the refcount we took to keep power well support disabled. */ 2299 if (!i915.disable_power_well) 2300 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2301 2302 /* 2303 * Remove the refcount we took in intel_runtime_pm_enable() in case 2304 * the platform doesn't support runtime PM. 2305 */ 2306 #if 0 2307 if (!HAS_RUNTIME_PM(dev_priv)) 2308 pm_runtime_put(kdev); 2309 #endif 2310 } 2311 2312 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 2313 { 2314 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2315 struct i915_power_well *power_well; 2316 int i; 2317 2318 mutex_lock(&power_domains->lock); 2319 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 2320 power_well->ops->sync_hw(dev_priv, power_well); 2321 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 2322 power_well); 2323 } 2324 mutex_unlock(&power_domains->lock); 2325 } 2326 2327 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 2328 { 2329 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 2330 POSTING_READ(DBUF_CTL); 2331 2332 udelay(10); 2333 2334 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 2335 DRM_ERROR("DBuf power enable timeout\n"); 2336 } 2337 2338 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 2339 { 2340 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 2341 POSTING_READ(DBUF_CTL); 2342 2343 udelay(10); 2344 2345 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 2346 DRM_ERROR("DBuf power disable timeout!\n"); 2347 } 2348 2349 static void skl_display_core_init(struct drm_i915_private *dev_priv, 2350 bool resume) 2351 { 2352 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2353 struct i915_power_well *well; 2354 uint32_t val; 2355 2356 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2357 2358 /* enable PCH reset handshake */ 2359 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2360 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 2361 2362 /* enable PG1 and Misc I/O */ 2363 mutex_lock(&power_domains->lock); 2364 2365 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2366 intel_power_well_enable(dev_priv, well); 2367 2368 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2369 intel_power_well_enable(dev_priv, well); 2370 2371 mutex_unlock(&power_domains->lock); 2372 2373 skl_init_cdclk(dev_priv); 2374 2375 gen9_dbuf_enable(dev_priv); 2376 2377 if (resume && dev_priv->csr.dmc_payload) 2378 intel_csr_load_program(dev_priv); 2379 } 2380 2381 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 2382 { 2383 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2384 struct i915_power_well *well; 2385 2386 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2387 2388 gen9_dbuf_disable(dev_priv); 2389 2390 skl_uninit_cdclk(dev_priv); 2391 2392 /* The spec doesn't call for removing the reset handshake flag */ 2393 /* disable PG1 and Misc I/O */ 2394 2395 mutex_lock(&power_domains->lock); 2396 2397 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 2398 intel_power_well_disable(dev_priv, well); 2399 2400 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2401 intel_power_well_disable(dev_priv, well); 2402 2403 mutex_unlock(&power_domains->lock); 2404 } 2405 2406 void bxt_display_core_init(struct drm_i915_private *dev_priv, 2407 bool resume) 2408 { 2409 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2410 struct i915_power_well *well; 2411 uint32_t val; 2412 2413 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2414 2415 /* 2416 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 2417 * or else the reset will hang because there is no PCH to respond. 2418 * Move the handshake programming to initialization sequence. 2419 * Previously was left up to BIOS. 2420 */ 2421 val = I915_READ(HSW_NDE_RSTWRN_OPT); 2422 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 2423 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 2424 2425 /* Enable PG1 */ 2426 mutex_lock(&power_domains->lock); 2427 2428 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2429 intel_power_well_enable(dev_priv, well); 2430 2431 mutex_unlock(&power_domains->lock); 2432 2433 bxt_init_cdclk(dev_priv); 2434 2435 gen9_dbuf_enable(dev_priv); 2436 2437 if (resume && dev_priv->csr.dmc_payload) 2438 intel_csr_load_program(dev_priv); 2439 } 2440 2441 void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 2442 { 2443 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2444 struct i915_power_well *well; 2445 2446 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2447 2448 gen9_dbuf_disable(dev_priv); 2449 2450 bxt_uninit_cdclk(dev_priv); 2451 2452 /* The spec doesn't call for removing the reset handshake flag */ 2453 2454 /* Disable PG1 */ 2455 mutex_lock(&power_domains->lock); 2456 2457 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 2458 intel_power_well_disable(dev_priv, well); 2459 2460 mutex_unlock(&power_domains->lock); 2461 } 2462 2463 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 2464 { 2465 struct i915_power_well *cmn_bc = 2466 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2467 struct i915_power_well *cmn_d = 2468 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 2469 2470 /* 2471 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 2472 * workaround never ever read DISPLAY_PHY_CONTROL, and 2473 * instead maintain a shadow copy ourselves. Use the actual 2474 * power well state and lane status to reconstruct the 2475 * expected initial value. 2476 */ 2477 dev_priv->chv_phy_control = 2478 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 2479 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 2480 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 2481 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 2482 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 2483 2484 /* 2485 * If all lanes are disabled we leave the override disabled 2486 * with all power down bits cleared to match the state we 2487 * would use after disabling the port. Otherwise enable the 2488 * override and set the lane powerdown bits accding to the 2489 * current lane status. 2490 */ 2491 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 2492 uint32_t status = I915_READ(DPLL(PIPE_A)); 2493 unsigned int mask; 2494 2495 mask = status & DPLL_PORTB_READY_MASK; 2496 if (mask == 0xf) 2497 mask = 0x0; 2498 else 2499 dev_priv->chv_phy_control |= 2500 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 2501 2502 dev_priv->chv_phy_control |= 2503 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 2504 2505 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 2506 if (mask == 0xf) 2507 mask = 0x0; 2508 else 2509 dev_priv->chv_phy_control |= 2510 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 2511 2512 dev_priv->chv_phy_control |= 2513 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 2514 2515 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 2516 2517 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 2518 } else { 2519 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 2520 } 2521 2522 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 2523 uint32_t status = I915_READ(DPIO_PHY_STATUS); 2524 unsigned int mask; 2525 2526 mask = status & DPLL_PORTD_READY_MASK; 2527 2528 if (mask == 0xf) 2529 mask = 0x0; 2530 else 2531 dev_priv->chv_phy_control |= 2532 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 2533 2534 dev_priv->chv_phy_control |= 2535 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2536 2537 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2538 2539 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2540 } else { 2541 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2542 } 2543 2544 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2545 2546 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2547 dev_priv->chv_phy_control); 2548 } 2549 2550 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2551 { 2552 struct i915_power_well *cmn = 2553 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2554 struct i915_power_well *disp2d = 2555 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2556 2557 /* If the display might be already active skip this */ 2558 if (cmn->ops->is_enabled(dev_priv, cmn) && 2559 disp2d->ops->is_enabled(dev_priv, disp2d) && 2560 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2561 return; 2562 2563 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2564 2565 /* cmnlane needs DPLL registers */ 2566 disp2d->ops->enable(dev_priv, disp2d); 2567 2568 /* 2569 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2570 * Need to assert and de-assert PHY SB reset by gating the 2571 * common lane power, then un-gating it. 2572 * Simply ungating isn't enough to reset the PHY enough to get 2573 * ports and lanes running. 2574 */ 2575 cmn->ops->disable(dev_priv, cmn); 2576 } 2577 2578 /** 2579 * intel_power_domains_init_hw - initialize hardware power domain state 2580 * @dev_priv: i915 device instance 2581 * @resume: Called from resume code paths or not 2582 * 2583 * This function initializes the hardware power domain state and enables all 2584 * power domains using intel_display_set_init_power(). 2585 */ 2586 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2587 { 2588 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2589 2590 power_domains->initializing = true; 2591 2592 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 2593 skl_display_core_init(dev_priv, resume); 2594 } else if (IS_BROXTON(dev_priv)) { 2595 bxt_display_core_init(dev_priv, resume); 2596 } else if (IS_CHERRYVIEW(dev_priv)) { 2597 mutex_lock(&power_domains->lock); 2598 chv_phy_control_init(dev_priv); 2599 mutex_unlock(&power_domains->lock); 2600 } else if (IS_VALLEYVIEW(dev_priv)) { 2601 mutex_lock(&power_domains->lock); 2602 vlv_cmnlane_wa(dev_priv); 2603 mutex_unlock(&power_domains->lock); 2604 } 2605 2606 /* For now, we need the power well to be always enabled. */ 2607 intel_display_set_init_power(dev_priv, true); 2608 /* Disable power support if the user asked so. */ 2609 if (!i915.disable_power_well) 2610 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 2611 intel_power_domains_sync_hw(dev_priv); 2612 power_domains->initializing = false; 2613 } 2614 2615 /** 2616 * intel_power_domains_suspend - suspend power domain state 2617 * @dev_priv: i915 device instance 2618 * 2619 * This function prepares the hardware power domain state before entering 2620 * system suspend. It must be paired with intel_power_domains_init_hw(). 2621 */ 2622 void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2623 { 2624 /* 2625 * Even if power well support was disabled we still want to disable 2626 * power wells while we are system suspended. 2627 */ 2628 if (!i915.disable_power_well) 2629 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2630 2631 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 2632 skl_display_core_uninit(dev_priv); 2633 else if (IS_BROXTON(dev_priv)) 2634 bxt_display_core_uninit(dev_priv); 2635 } 2636 2637 /** 2638 * intel_runtime_pm_get - grab a runtime pm reference 2639 * @dev_priv: i915 device instance 2640 * 2641 * This function grabs a device-level runtime pm reference (mostly used for GEM 2642 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2643 * 2644 * Any runtime pm reference obtained by this function must have a symmetric 2645 * call to intel_runtime_pm_put() to release the reference again. 2646 */ 2647 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2648 { 2649 struct pci_dev *pdev = dev_priv->drm.pdev; 2650 struct device *kdev = &pdev->dev; 2651 2652 pm_runtime_get_sync(kdev); 2653 2654 atomic_inc(&dev_priv->pm.wakeref_count); 2655 assert_rpm_wakelock_held(dev_priv); 2656 } 2657 2658 /** 2659 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 2660 * @dev_priv: i915 device instance 2661 * 2662 * This function grabs a device-level runtime pm reference if the device is 2663 * already in use and ensures that it is powered up. 2664 * 2665 * Any runtime pm reference obtained by this function must have a symmetric 2666 * call to intel_runtime_pm_put() to release the reference again. 2667 */ 2668 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2669 { 2670 #ifndef __DragonFly__ 2671 struct pci_dev *pdev = dev_priv->drm.pdev; 2672 struct device *kdev = &pdev->dev; 2673 2674 if (IS_ENABLED(CONFIG_PM)) { 2675 int ret = pm_runtime_get_if_in_use(kdev); 2676 2677 /* 2678 * In cases runtime PM is disabled by the RPM core and we get 2679 * an -EINVAL return value we are not supposed to call this 2680 * function, since the power state is undefined. This applies 2681 * atm to the late/early system suspend/resume handlers. 2682 */ 2683 WARN_ON_ONCE(ret < 0); 2684 if (ret <= 0) 2685 return false; 2686 } 2687 2688 atomic_inc(&dev_priv->pm.wakeref_count); 2689 assert_rpm_wakelock_held(dev_priv); 2690 #endif 2691 2692 return true; 2693 } 2694 2695 /** 2696 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2697 * @dev_priv: i915 device instance 2698 * 2699 * This function grabs a device-level runtime pm reference (mostly used for GEM 2700 * code to ensure the GTT or GT is on). 2701 * 2702 * It will _not_ power up the device but instead only check that it's powered 2703 * on. Therefore it is only valid to call this functions from contexts where 2704 * the device is known to be powered up and where trying to power it up would 2705 * result in hilarity and deadlocks. That pretty much means only the system 2706 * suspend/resume code where this is used to grab runtime pm references for 2707 * delayed setup down in work items. 2708 * 2709 * Any runtime pm reference obtained by this function must have a symmetric 2710 * call to intel_runtime_pm_put() to release the reference again. 2711 */ 2712 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2713 { 2714 #if 0 2715 struct pci_dev *pdev = dev_priv->drm.pdev; 2716 struct device *kdev = &pdev->dev; 2717 #endif 2718 2719 assert_rpm_wakelock_held(dev_priv); 2720 #if 0 2721 pm_runtime_get_noresume(kdev); 2722 #endif 2723 2724 atomic_inc(&dev_priv->pm.wakeref_count); 2725 } 2726 2727 /** 2728 * intel_runtime_pm_put - release a runtime pm reference 2729 * @dev_priv: i915 device instance 2730 * 2731 * This function drops the device-level runtime pm reference obtained by 2732 * intel_runtime_pm_get() and might power down the corresponding 2733 * hardware block right away if this is the last reference. 2734 */ 2735 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2736 { 2737 struct pci_dev *pdev = dev_priv->drm.pdev; 2738 struct device *kdev = &pdev->dev; 2739 2740 assert_rpm_wakelock_held(dev_priv); 2741 atomic_dec(&dev_priv->pm.wakeref_count); 2742 2743 pm_runtime_mark_last_busy(kdev); 2744 pm_runtime_put_autosuspend(kdev); 2745 } 2746 2747 /** 2748 * intel_runtime_pm_enable - enable runtime pm 2749 * @dev_priv: i915 device instance 2750 * 2751 * This function enables runtime pm at the end of the driver load sequence. 2752 * 2753 * Note that this function does currently not enable runtime pm for the 2754 * subordinate display power domains. That is only done on the first modeset 2755 * using intel_display_set_init_power(). 2756 */ 2757 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2758 { 2759 #if 0 2760 struct pci_dev *pdev = dev_priv->drm.pdev; 2761 struct device *kdev = &pdev->dev; 2762 2763 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ 2764 pm_runtime_mark_last_busy(kdev); 2765 2766 /* 2767 * Take a permanent reference to disable the RPM functionality and drop 2768 * it only when unloading the driver. Use the low level get/put helpers, 2769 * so the driver's own RPM reference tracking asserts also work on 2770 * platforms without RPM support. 2771 */ 2772 if (!HAS_RUNTIME_PM(dev_priv)) { 2773 pm_runtime_dont_use_autosuspend(kdev); 2774 pm_runtime_get_sync(kdev); 2775 } else { 2776 pm_runtime_use_autosuspend(kdev); 2777 } 2778 2779 /* 2780 * The core calls the driver load handler with an RPM reference held. 2781 * We drop that here and will reacquire it during unloading in 2782 * intel_power_domains_fini(). 2783 */ 2784 pm_runtime_put_autosuspend(kdev); 2785 #endif 2786 } 2787