1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 * 27 */ 28 29 #include "i915_drv.h" 30 #include "intel_drv.h" 31 32 /** 33 * DOC: runtime pm 34 * 35 * The i915 driver supports dynamic enabling and disabling of entire hardware 36 * blocks at runtime. This is especially important on the display side where 37 * software is supposed to control many power gates manually on recent hardware, 38 * since on the GT side a lot of the power management is done by the hardware. 39 * But even there some manual control at the device level is required. 40 * 41 * Since i915 supports a diverse set of platforms with a unified codebase and 42 * hardware engineers just love to shuffle functionality around between power 43 * domains there's a sizeable amount of indirection required. This file provides 44 * generic functions to the driver for grabbing and releasing references for 45 * abstract power domains. It then maps those to the actual power wells 46 * present for a given platform. 47 */ 48 49 #define GEN9_ENABLE_DC5(dev) 0 50 #define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev) 51 52 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 53 for (i = 0; \ 54 i < (power_domains)->power_well_count && \ 55 ((power_well) = &(power_domains)->power_wells[i]); \ 56 i++) \ 57 if ((power_well)->domains & (domain_mask)) 58 59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 60 for (i = (power_domains)->power_well_count - 1; \ 61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 62 i--) \ 63 if ((power_well)->domains & (domain_mask)) 64 65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 66 int power_well_id); 67 68 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 69 struct i915_power_well *power_well) 70 { 71 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 72 power_well->ops->enable(dev_priv, power_well); 73 power_well->hw_enabled = true; 74 } 75 76 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 77 struct i915_power_well *power_well) 78 { 79 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 80 power_well->hw_enabled = false; 81 power_well->ops->disable(dev_priv, power_well); 82 } 83 84 /* 85 * We should only use the power well if we explicitly asked the hardware to 86 * enable it, so check if it's enabled and also check if we've requested it to 87 * be enabled. 88 */ 89 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 90 struct i915_power_well *power_well) 91 { 92 return I915_READ(HSW_PWR_WELL_DRIVER) == 93 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 94 } 95 96 /** 97 * __intel_display_power_is_enabled - unlocked check for a power domain 98 * @dev_priv: i915 device instance 99 * @domain: power domain to check 100 * 101 * This is the unlocked version of intel_display_power_is_enabled() and should 102 * only be used from error capture and recovery code where deadlocks are 103 * possible. 104 * 105 * Returns: 106 * True when the power domain is enabled, false otherwise. 107 */ 108 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 109 enum intel_display_power_domain domain) 110 { 111 struct i915_power_domains *power_domains; 112 struct i915_power_well *power_well; 113 bool is_enabled; 114 int i; 115 116 if (dev_priv->pm.suspended) 117 return false; 118 119 power_domains = &dev_priv->power_domains; 120 121 is_enabled = true; 122 123 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 124 if (power_well->always_on) 125 continue; 126 127 if (!power_well->hw_enabled) { 128 is_enabled = false; 129 break; 130 } 131 } 132 133 return is_enabled; 134 } 135 136 /** 137 * intel_display_power_is_enabled - check for a power domain 138 * @dev_priv: i915 device instance 139 * @domain: power domain to check 140 * 141 * This function can be used to check the hw power domain state. It is mostly 142 * used in hardware state readout functions. Everywhere else code should rely 143 * upon explicit power domain reference counting to ensure that the hardware 144 * block is powered up before accessing it. 145 * 146 * Callers must hold the relevant modesetting locks to ensure that concurrent 147 * threads can't disable the power well while the caller tries to read a few 148 * registers. 149 * 150 * Returns: 151 * True when the power domain is enabled, false otherwise. 152 */ 153 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 154 enum intel_display_power_domain domain) 155 { 156 struct i915_power_domains *power_domains; 157 bool ret; 158 159 power_domains = &dev_priv->power_domains; 160 161 mutex_lock(&power_domains->lock); 162 ret = __intel_display_power_is_enabled(dev_priv, domain); 163 mutex_unlock(&power_domains->lock); 164 165 return ret; 166 } 167 168 /** 169 * intel_display_set_init_power - set the initial power domain state 170 * @dev_priv: i915 device instance 171 * @enable: whether to enable or disable the initial power domain state 172 * 173 * For simplicity our driver load/unload and system suspend/resume code assumes 174 * that all power domains are always enabled. This functions controls the state 175 * of this little hack. While the initial power domain state is enabled runtime 176 * pm is effectively disabled. 177 */ 178 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 179 bool enable) 180 { 181 if (dev_priv->power_domains.init_power_on == enable) 182 return; 183 184 if (enable) 185 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 186 else 187 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 188 189 dev_priv->power_domains.init_power_on = enable; 190 } 191 192 /* 193 * Starting with Haswell, we have a "Power Down Well" that can be turned off 194 * when not needed anymore. We have 4 registers that can request the power well 195 * to be enabled, and it will only be disabled if none of the registers is 196 * requesting it to be enabled. 197 */ 198 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 199 { 200 struct drm_device *dev = dev_priv->dev; 201 202 /* 203 * After we re-enable the power well, if we touch VGA register 0x3d5 204 * we'll get unclaimed register interrupts. This stops after we write 205 * anything to the VGA MSR register. The vgacon module uses this 206 * register all the time, so if we unbind our driver and, as a 207 * consequence, bind vgacon, we'll get stuck in an infinite loop at 208 * console_unlock(). So make here we touch the VGA MSR register, making 209 * sure vgacon can keep working normally without triggering interrupts 210 * and error messages. 211 */ 212 #if 0 213 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 214 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 215 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 216 #endif 217 218 if (IS_BROADWELL(dev)) 219 gen8_irq_power_well_post_enable(dev_priv, 220 1 << PIPE_C | 1 << PIPE_B); 221 } 222 223 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 224 struct i915_power_well *power_well) 225 { 226 struct drm_device *dev = dev_priv->dev; 227 228 /* 229 * After we re-enable the power well, if we touch VGA register 0x3d5 230 * we'll get unclaimed register interrupts. This stops after we write 231 * anything to the VGA MSR register. The vgacon module uses this 232 * register all the time, so if we unbind our driver and, as a 233 * consequence, bind vgacon, we'll get stuck in an infinite loop at 234 * console_unlock(). So make here we touch the VGA MSR register, making 235 * sure vgacon can keep working normally without triggering interrupts 236 * and error messages. 237 */ 238 if (power_well->data == SKL_DISP_PW_2) { 239 #if 0 240 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 241 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 242 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 243 #endif 244 245 gen8_irq_power_well_post_enable(dev_priv, 246 1 << PIPE_C | 1 << PIPE_B); 247 } 248 249 if (power_well->data == SKL_DISP_PW_1) { 250 if (!dev_priv->power_domains.initializing) 251 intel_prepare_ddi(dev); 252 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); 253 } 254 } 255 256 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 257 struct i915_power_well *power_well, bool enable) 258 { 259 bool is_enabled, enable_requested; 260 uint32_t tmp; 261 262 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 263 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 264 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 265 266 if (enable) { 267 if (!enable_requested) 268 I915_WRITE(HSW_PWR_WELL_DRIVER, 269 HSW_PWR_WELL_ENABLE_REQUEST); 270 271 if (!is_enabled) { 272 DRM_DEBUG_KMS("Enabling power well\n"); 273 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 274 HSW_PWR_WELL_STATE_ENABLED), 20)) 275 DRM_ERROR("Timeout enabling power well\n"); 276 hsw_power_well_post_enable(dev_priv); 277 } 278 279 } else { 280 if (enable_requested) { 281 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 282 POSTING_READ(HSW_PWR_WELL_DRIVER); 283 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 284 } 285 } 286 } 287 288 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 289 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 290 BIT(POWER_DOMAIN_PIPE_B) | \ 291 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 292 BIT(POWER_DOMAIN_PIPE_C) | \ 293 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 294 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 295 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 296 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 297 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 298 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 299 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 300 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 301 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 302 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ 303 BIT(POWER_DOMAIN_AUX_B) | \ 304 BIT(POWER_DOMAIN_AUX_C) | \ 305 BIT(POWER_DOMAIN_AUX_D) | \ 306 BIT(POWER_DOMAIN_AUDIO) | \ 307 BIT(POWER_DOMAIN_VGA) | \ 308 BIT(POWER_DOMAIN_INIT)) 309 #define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 310 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 311 BIT(POWER_DOMAIN_PLLS) | \ 312 BIT(POWER_DOMAIN_PIPE_A) | \ 313 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 314 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 315 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 316 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 317 BIT(POWER_DOMAIN_AUX_A) | \ 318 BIT(POWER_DOMAIN_INIT)) 319 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \ 320 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 321 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 322 BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \ 323 BIT(POWER_DOMAIN_INIT)) 324 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \ 325 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 326 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 327 BIT(POWER_DOMAIN_INIT)) 328 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \ 329 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 330 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 331 BIT(POWER_DOMAIN_INIT)) 332 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \ 333 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 334 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 335 BIT(POWER_DOMAIN_INIT)) 336 #define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \ 337 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 338 BIT(POWER_DOMAIN_PLLS) | \ 339 BIT(POWER_DOMAIN_INIT)) 340 #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 341 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 342 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 343 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \ 344 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \ 345 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \ 346 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \ 347 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \ 348 BIT(POWER_DOMAIN_INIT)) 349 350 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 351 BIT(POWER_DOMAIN_TRANSCODER_A) | \ 352 BIT(POWER_DOMAIN_PIPE_B) | \ 353 BIT(POWER_DOMAIN_TRANSCODER_B) | \ 354 BIT(POWER_DOMAIN_PIPE_C) | \ 355 BIT(POWER_DOMAIN_TRANSCODER_C) | \ 356 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 357 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 358 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 359 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 360 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 361 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 362 BIT(POWER_DOMAIN_AUX_B) | \ 363 BIT(POWER_DOMAIN_AUX_C) | \ 364 BIT(POWER_DOMAIN_AUDIO) | \ 365 BIT(POWER_DOMAIN_VGA) | \ 366 BIT(POWER_DOMAIN_GMBUS) | \ 367 BIT(POWER_DOMAIN_INIT)) 368 #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 369 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 370 BIT(POWER_DOMAIN_PIPE_A) | \ 371 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 372 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 373 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 374 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 375 BIT(POWER_DOMAIN_AUX_A) | \ 376 BIT(POWER_DOMAIN_PLLS) | \ 377 BIT(POWER_DOMAIN_INIT)) 378 #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \ 379 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \ 380 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \ 381 BIT(POWER_DOMAIN_INIT)) 382 383 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 384 { 385 struct drm_device *dev = dev_priv->dev; 386 387 WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n"); 388 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 389 "DC9 already programmed to be enabled.\n"); 390 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 391 "DC5 still not disabled to enable DC9.\n"); 392 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n"); 393 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); 394 395 /* 396 * TODO: check for the following to verify the conditions to enter DC9 397 * state are satisfied: 398 * 1] Check relevant display engine registers to verify if mode set 399 * disable sequence was followed. 400 * 2] Check if display uninitialize sequence is initialized. 401 */ 402 } 403 404 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 405 { 406 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); 407 WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), 408 "DC9 already programmed to be disabled.\n"); 409 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, 410 "DC5 still not disabled.\n"); 411 412 /* 413 * TODO: check for the following to verify DC9 state was indeed 414 * entered before programming to disable it: 415 * 1] Check relevant display engine registers to verify if mode 416 * set disable sequence was followed. 417 * 2] Check if display uninitialize sequence is initialized. 418 */ 419 } 420 421 void bxt_enable_dc9(struct drm_i915_private *dev_priv) 422 { 423 uint32_t val; 424 425 assert_can_enable_dc9(dev_priv); 426 427 DRM_DEBUG_KMS("Enabling DC9\n"); 428 429 val = I915_READ(DC_STATE_EN); 430 val |= DC_STATE_EN_DC9; 431 I915_WRITE(DC_STATE_EN, val); 432 POSTING_READ(DC_STATE_EN); 433 } 434 435 void bxt_disable_dc9(struct drm_i915_private *dev_priv) 436 { 437 uint32_t val; 438 439 assert_can_disable_dc9(dev_priv); 440 441 DRM_DEBUG_KMS("Disabling DC9\n"); 442 443 val = I915_READ(DC_STATE_EN); 444 val &= ~DC_STATE_EN_DC9; 445 I915_WRITE(DC_STATE_EN, val); 446 POSTING_READ(DC_STATE_EN); 447 } 448 449 static void gen9_set_dc_state_debugmask_memory_up( 450 struct drm_i915_private *dev_priv) 451 { 452 uint32_t val; 453 454 /* The below bit doesn't need to be cleared ever afterwards */ 455 val = I915_READ(DC_STATE_DEBUG); 456 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { 457 val |= DC_STATE_DEBUG_MASK_MEMORY_UP; 458 I915_WRITE(DC_STATE_DEBUG, val); 459 POSTING_READ(DC_STATE_DEBUG); 460 } 461 } 462 463 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 464 { 465 struct drm_device *dev = dev_priv->dev; 466 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 467 SKL_DISP_PW_2); 468 469 WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n"); 470 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 471 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); 472 473 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), 474 "DC5 already programmed to be enabled.\n"); 475 WARN_ONCE(dev_priv->pm.suspended, 476 "DC5 cannot be enabled, if platform is runtime-suspended.\n"); 477 478 assert_csr_loaded(dev_priv); 479 } 480 481 static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) 482 { 483 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, 484 SKL_DISP_PW_2); 485 /* 486 * During initialization, the firmware may not be loaded yet. 487 * We still want to make sure that the DC enabling flag is cleared. 488 */ 489 if (dev_priv->power_domains.initializing) 490 return; 491 492 WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n"); 493 WARN_ONCE(dev_priv->pm.suspended, 494 "Disabling of DC5 while platform is runtime-suspended should never happen.\n"); 495 } 496 497 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 498 { 499 uint32_t val; 500 501 assert_can_enable_dc5(dev_priv); 502 503 DRM_DEBUG_KMS("Enabling DC5\n"); 504 505 gen9_set_dc_state_debugmask_memory_up(dev_priv); 506 507 val = I915_READ(DC_STATE_EN); 508 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; 509 val |= DC_STATE_EN_UPTO_DC5; 510 I915_WRITE(DC_STATE_EN, val); 511 POSTING_READ(DC_STATE_EN); 512 } 513 514 static void gen9_disable_dc5(struct drm_i915_private *dev_priv) 515 { 516 uint32_t val; 517 518 assert_can_disable_dc5(dev_priv); 519 520 DRM_DEBUG_KMS("Disabling DC5\n"); 521 522 val = I915_READ(DC_STATE_EN); 523 val &= ~DC_STATE_EN_UPTO_DC5; 524 I915_WRITE(DC_STATE_EN, val); 525 POSTING_READ(DC_STATE_EN); 526 } 527 528 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 529 { 530 struct drm_device *dev = dev_priv->dev; 531 532 WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n"); 533 WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); 534 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 535 "Backlight is not disabled.\n"); 536 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 537 "DC6 already programmed to be enabled.\n"); 538 539 assert_csr_loaded(dev_priv); 540 } 541 542 static void assert_can_disable_dc6(struct drm_i915_private *dev_priv) 543 { 544 /* 545 * During initialization, the firmware may not be loaded yet. 546 * We still want to make sure that the DC enabling flag is cleared. 547 */ 548 if (dev_priv->power_domains.initializing) 549 return; 550 551 assert_csr_loaded(dev_priv); 552 WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), 553 "DC6 already programmed to be disabled.\n"); 554 } 555 556 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 557 { 558 uint32_t val; 559 560 assert_can_enable_dc6(dev_priv); 561 562 DRM_DEBUG_KMS("Enabling DC6\n"); 563 564 gen9_set_dc_state_debugmask_memory_up(dev_priv); 565 566 val = I915_READ(DC_STATE_EN); 567 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK; 568 val |= DC_STATE_EN_UPTO_DC6; 569 I915_WRITE(DC_STATE_EN, val); 570 POSTING_READ(DC_STATE_EN); 571 } 572 573 static void skl_disable_dc6(struct drm_i915_private *dev_priv) 574 { 575 uint32_t val; 576 577 assert_can_disable_dc6(dev_priv); 578 579 DRM_DEBUG_KMS("Disabling DC6\n"); 580 581 val = I915_READ(DC_STATE_EN); 582 val &= ~DC_STATE_EN_UPTO_DC6; 583 I915_WRITE(DC_STATE_EN, val); 584 POSTING_READ(DC_STATE_EN); 585 } 586 587 static void skl_set_power_well(struct drm_i915_private *dev_priv, 588 struct i915_power_well *power_well, bool enable) 589 { 590 struct drm_device *dev = dev_priv->dev; 591 uint32_t tmp, fuse_status; 592 uint32_t req_mask, state_mask; 593 bool is_enabled, enable_requested, check_fuse_status = false; 594 595 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 596 fuse_status = I915_READ(SKL_FUSE_STATUS); 597 598 switch (power_well->data) { 599 case SKL_DISP_PW_1: 600 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 601 SKL_FUSE_PG0_DIST_STATUS), 1)) { 602 DRM_ERROR("PG0 not enabled\n"); 603 return; 604 } 605 break; 606 case SKL_DISP_PW_2: 607 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) { 608 DRM_ERROR("PG1 in disabled state\n"); 609 return; 610 } 611 break; 612 case SKL_DISP_PW_DDI_A_E: 613 case SKL_DISP_PW_DDI_B: 614 case SKL_DISP_PW_DDI_C: 615 case SKL_DISP_PW_DDI_D: 616 case SKL_DISP_PW_MISC_IO: 617 break; 618 default: 619 WARN(1, "Unknown power well %lu\n", power_well->data); 620 return; 621 } 622 623 req_mask = SKL_POWER_WELL_REQ(power_well->data); 624 enable_requested = tmp & req_mask; 625 state_mask = SKL_POWER_WELL_STATE(power_well->data); 626 is_enabled = tmp & state_mask; 627 628 if (enable) { 629 if (!enable_requested) { 630 WARN((tmp & state_mask) && 631 !I915_READ(HSW_PWR_WELL_BIOS), 632 "Invalid for power well status to be enabled, unless done by the BIOS, \ 633 when request is to disable!\n"); 634 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 635 power_well->data == SKL_DISP_PW_2) { 636 if (SKL_ENABLE_DC6(dev)) { 637 skl_disable_dc6(dev_priv); 638 /* 639 * DDI buffer programming unnecessary during driver-load/resume 640 * as it's already done during modeset initialization then. 641 * It's also invalid here as encoder list is still uninitialized. 642 */ 643 if (!dev_priv->power_domains.initializing) 644 intel_prepare_ddi(dev); 645 } else { 646 gen9_disable_dc5(dev_priv); 647 } 648 } 649 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); 650 } 651 652 if (!is_enabled) { 653 DRM_DEBUG_KMS("Enabling %s\n", power_well->name); 654 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 655 state_mask), 1)) 656 DRM_ERROR("%s enable timeout\n", 657 power_well->name); 658 check_fuse_status = true; 659 } 660 } else { 661 if (enable_requested) { 662 if (IS_SKYLAKE(dev) && 663 (power_well->data == SKL_DISP_PW_1) && 664 (intel_csr_load_status_get(dev_priv) == FW_LOADED)) 665 DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n"); 666 else { 667 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 668 POSTING_READ(HSW_PWR_WELL_DRIVER); 669 DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 670 } 671 672 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 673 power_well->data == SKL_DISP_PW_2) { 674 enum csr_state state; 675 /* TODO: wait for a completion event or 676 * similar here instead of busy 677 * waiting using wait_for function. 678 */ 679 wait_for((state = intel_csr_load_status_get(dev_priv)) != 680 FW_UNINITIALIZED, 1000); 681 if (state != FW_LOADED) 682 DRM_DEBUG("CSR firmware not ready (%d)\n", 683 state); 684 else 685 if (SKL_ENABLE_DC6(dev)) 686 skl_enable_dc6(dev_priv); 687 else 688 gen9_enable_dc5(dev_priv); 689 } 690 } 691 } 692 693 if (check_fuse_status) { 694 if (power_well->data == SKL_DISP_PW_1) { 695 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 696 SKL_FUSE_PG1_DIST_STATUS), 1)) 697 DRM_ERROR("PG1 distributing status timeout\n"); 698 } else if (power_well->data == SKL_DISP_PW_2) { 699 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 700 SKL_FUSE_PG2_DIST_STATUS), 1)) 701 DRM_ERROR("PG2 distributing status timeout\n"); 702 } 703 } 704 705 if (enable && !is_enabled) 706 skl_power_well_post_enable(dev_priv, power_well); 707 } 708 709 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 710 struct i915_power_well *power_well) 711 { 712 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 713 714 /* 715 * We're taking over the BIOS, so clear any requests made by it since 716 * the driver is in charge now. 717 */ 718 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 719 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 720 } 721 722 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 723 struct i915_power_well *power_well) 724 { 725 hsw_set_power_well(dev_priv, power_well, true); 726 } 727 728 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 729 struct i915_power_well *power_well) 730 { 731 hsw_set_power_well(dev_priv, power_well, false); 732 } 733 734 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv, 735 struct i915_power_well *power_well) 736 { 737 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) | 738 SKL_POWER_WELL_STATE(power_well->data); 739 740 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask; 741 } 742 743 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv, 744 struct i915_power_well *power_well) 745 { 746 skl_set_power_well(dev_priv, power_well, power_well->count > 0); 747 748 /* Clear any request made by BIOS as driver is taking over */ 749 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 750 } 751 752 static void skl_power_well_enable(struct drm_i915_private *dev_priv, 753 struct i915_power_well *power_well) 754 { 755 skl_set_power_well(dev_priv, power_well, true); 756 } 757 758 static void skl_power_well_disable(struct drm_i915_private *dev_priv, 759 struct i915_power_well *power_well) 760 { 761 skl_set_power_well(dev_priv, power_well, false); 762 } 763 764 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 765 struct i915_power_well *power_well) 766 { 767 } 768 769 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 770 struct i915_power_well *power_well) 771 { 772 return true; 773 } 774 775 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 776 struct i915_power_well *power_well, bool enable) 777 { 778 enum punit_power_well power_well_id = power_well->data; 779 u32 mask; 780 u32 state; 781 u32 ctrl; 782 783 mask = PUNIT_PWRGT_MASK(power_well_id); 784 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 785 PUNIT_PWRGT_PWR_GATE(power_well_id); 786 787 mutex_lock(&dev_priv->rps.hw_lock); 788 789 #define COND \ 790 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 791 792 if (COND) 793 goto out; 794 795 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 796 ctrl &= ~mask; 797 ctrl |= state; 798 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 799 800 if (wait_for(COND, 100)) 801 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 802 state, 803 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 804 805 #undef COND 806 807 out: 808 mutex_unlock(&dev_priv->rps.hw_lock); 809 } 810 811 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 812 struct i915_power_well *power_well) 813 { 814 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 815 } 816 817 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 818 struct i915_power_well *power_well) 819 { 820 vlv_set_power_well(dev_priv, power_well, true); 821 } 822 823 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 824 struct i915_power_well *power_well) 825 { 826 vlv_set_power_well(dev_priv, power_well, false); 827 } 828 829 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 830 struct i915_power_well *power_well) 831 { 832 int power_well_id = power_well->data; 833 bool enabled = false; 834 u32 mask; 835 u32 state; 836 u32 ctrl; 837 838 mask = PUNIT_PWRGT_MASK(power_well_id); 839 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 840 841 mutex_lock(&dev_priv->rps.hw_lock); 842 843 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 844 /* 845 * We only ever set the power-on and power-gate states, anything 846 * else is unexpected. 847 */ 848 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 849 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 850 if (state == ctrl) 851 enabled = true; 852 853 /* 854 * A transient state at this point would mean some unexpected party 855 * is poking at the power controls too. 856 */ 857 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 858 WARN_ON(ctrl != state); 859 860 mutex_unlock(&dev_priv->rps.hw_lock); 861 862 return enabled; 863 } 864 865 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 866 { 867 enum i915_pipe pipe; 868 869 /* 870 * Enable the CRI clock source so we can get at the 871 * display and the reference clock for VGA 872 * hotplug / manual detection. Supposedly DSI also 873 * needs the ref clock up and running. 874 * 875 * CHV DPLL B/C have some issues if VGA mode is enabled. 876 */ 877 for_each_pipe(dev_priv->dev, pipe) { 878 u32 val = I915_READ(DPLL(pipe)); 879 880 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 881 if (pipe != PIPE_A) 882 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 883 884 I915_WRITE(DPLL(pipe), val); 885 } 886 887 spin_lock_irq(&dev_priv->irq_lock); 888 valleyview_enable_display_irqs(dev_priv); 889 spin_unlock_irq(&dev_priv->irq_lock); 890 891 /* 892 * During driver initialization/resume we can avoid restoring the 893 * part of the HW/SW state that will be inited anyway explicitly. 894 */ 895 if (dev_priv->power_domains.initializing) 896 return; 897 898 intel_hpd_init(dev_priv); 899 900 i915_redisable_vga_power_on(dev_priv->dev); 901 } 902 903 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 904 { 905 spin_lock_irq(&dev_priv->irq_lock); 906 valleyview_disable_display_irqs(dev_priv); 907 spin_unlock_irq(&dev_priv->irq_lock); 908 909 vlv_power_sequencer_reset(dev_priv); 910 } 911 912 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 913 struct i915_power_well *power_well) 914 { 915 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 916 917 vlv_set_power_well(dev_priv, power_well, true); 918 919 vlv_display_power_well_init(dev_priv); 920 } 921 922 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 923 struct i915_power_well *power_well) 924 { 925 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 926 927 vlv_display_power_well_deinit(dev_priv); 928 929 vlv_set_power_well(dev_priv, power_well, false); 930 } 931 932 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 933 struct i915_power_well *power_well) 934 { 935 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 936 937 /* since ref/cri clock was enabled */ 938 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 939 940 vlv_set_power_well(dev_priv, power_well, true); 941 942 /* 943 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 944 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 945 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 946 * b. The other bits such as sfr settings / modesel may all 947 * be set to 0. 948 * 949 * This should only be done on init and resume from S3 with 950 * both PLLs disabled, or we risk losing DPIO and PLL 951 * synchronization. 952 */ 953 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 954 } 955 956 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 957 struct i915_power_well *power_well) 958 { 959 enum i915_pipe pipe; 960 961 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 962 963 for_each_pipe(dev_priv, pipe) 964 assert_pll_disabled(dev_priv, pipe); 965 966 /* Assert common reset */ 967 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 968 969 vlv_set_power_well(dev_priv, power_well, false); 970 } 971 972 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 973 974 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 975 int power_well_id) 976 { 977 struct i915_power_domains *power_domains = &dev_priv->power_domains; 978 struct i915_power_well *power_well; 979 int i; 980 981 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 982 if (power_well->data == power_well_id) 983 return power_well; 984 } 985 986 return NULL; 987 } 988 989 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 990 991 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 992 { 993 struct i915_power_well *cmn_bc = 994 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 995 struct i915_power_well *cmn_d = 996 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 997 u32 phy_control = dev_priv->chv_phy_control; 998 u32 phy_status = 0; 999 u32 phy_status_mask = 0xffffffff; 1000 u32 tmp; 1001 1002 /* 1003 * The BIOS can leave the PHY is some weird state 1004 * where it doesn't fully power down some parts. 1005 * Disable the asserts until the PHY has been fully 1006 * reset (ie. the power well has been disabled at 1007 * least once). 1008 */ 1009 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1010 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1011 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1012 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1013 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1014 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1015 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1016 1017 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1018 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1019 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1020 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1021 1022 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1023 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1024 1025 /* this assumes override is only used to enable lanes */ 1026 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1027 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1028 1029 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1030 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1031 1032 /* CL1 is on whenever anything is on in either channel */ 1033 if (BITS_SET(phy_control, 1034 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1035 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1036 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1037 1038 /* 1039 * The DPLLB check accounts for the pipe B + port A usage 1040 * with CL2 powered up but all the lanes in the second channel 1041 * powered down. 1042 */ 1043 if (BITS_SET(phy_control, 1044 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1045 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1046 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1047 1048 if (BITS_SET(phy_control, 1049 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1050 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1051 if (BITS_SET(phy_control, 1052 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1053 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1054 1055 if (BITS_SET(phy_control, 1056 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1057 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1058 if (BITS_SET(phy_control, 1059 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1060 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1061 } 1062 1063 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1064 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1065 1066 /* this assumes override is only used to enable lanes */ 1067 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1068 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1069 1070 if (BITS_SET(phy_control, 1071 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1072 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1073 1074 if (BITS_SET(phy_control, 1075 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1076 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1077 if (BITS_SET(phy_control, 1078 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1079 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1080 } 1081 1082 phy_status &= phy_status_mask; 1083 1084 /* 1085 * The PHY may be busy with some initial calibration and whatnot, 1086 * so the power state can take a while to actually change. 1087 */ 1088 if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) 1089 WARN(phy_status != tmp, 1090 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1091 tmp, phy_status, dev_priv->chv_phy_control); 1092 } 1093 1094 #undef BITS_SET 1095 1096 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1097 struct i915_power_well *power_well) 1098 { 1099 enum dpio_phy phy; 1100 enum i915_pipe pipe; 1101 uint32_t tmp; 1102 1103 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1104 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1105 1106 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1107 pipe = PIPE_A; 1108 phy = DPIO_PHY0; 1109 } else { 1110 pipe = PIPE_C; 1111 phy = DPIO_PHY1; 1112 } 1113 1114 /* since ref/cri clock was enabled */ 1115 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1116 vlv_set_power_well(dev_priv, power_well, true); 1117 1118 /* Poll for phypwrgood signal */ 1119 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 1120 DRM_ERROR("Display PHY %d is not power up\n", phy); 1121 1122 mutex_lock(&dev_priv->sb_lock); 1123 1124 /* Enable dynamic power down */ 1125 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1126 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1127 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1128 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1129 1130 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1131 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1132 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1133 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1134 } else { 1135 /* 1136 * Force the non-existing CL2 off. BXT does this 1137 * too, so maybe it saves some power even though 1138 * CL2 doesn't exist? 1139 */ 1140 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1141 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1142 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1143 } 1144 1145 mutex_unlock(&dev_priv->sb_lock); 1146 1147 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1148 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1149 1150 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1151 phy, dev_priv->chv_phy_control); 1152 1153 assert_chv_phy_status(dev_priv); 1154 } 1155 1156 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1157 struct i915_power_well *power_well) 1158 { 1159 enum dpio_phy phy; 1160 1161 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && 1162 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); 1163 1164 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { 1165 phy = DPIO_PHY0; 1166 assert_pll_disabled(dev_priv, PIPE_A); 1167 assert_pll_disabled(dev_priv, PIPE_B); 1168 } else { 1169 phy = DPIO_PHY1; 1170 assert_pll_disabled(dev_priv, PIPE_C); 1171 } 1172 1173 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1174 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1175 1176 vlv_set_power_well(dev_priv, power_well, false); 1177 1178 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1179 phy, dev_priv->chv_phy_control); 1180 1181 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1182 dev_priv->chv_phy_assert[phy] = true; 1183 1184 assert_chv_phy_status(dev_priv); 1185 } 1186 1187 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1188 enum dpio_channel ch, bool override, unsigned int mask) 1189 { 1190 enum i915_pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1191 u32 reg, val, expected, actual; 1192 1193 /* 1194 * The BIOS can leave the PHY is some weird state 1195 * where it doesn't fully power down some parts. 1196 * Disable the asserts until the PHY has been fully 1197 * reset (ie. the power well has been disabled at 1198 * least once). 1199 */ 1200 if (!dev_priv->chv_phy_assert[phy]) 1201 return; 1202 1203 if (ch == DPIO_CH0) 1204 reg = _CHV_CMN_DW0_CH0; 1205 else 1206 reg = _CHV_CMN_DW6_CH1; 1207 1208 mutex_lock(&dev_priv->sb_lock); 1209 val = vlv_dpio_read(dev_priv, pipe, reg); 1210 mutex_unlock(&dev_priv->sb_lock); 1211 1212 /* 1213 * This assumes !override is only used when the port is disabled. 1214 * All lanes should power down even without the override when 1215 * the port is disabled. 1216 */ 1217 if (!override || mask == 0xf) { 1218 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1219 /* 1220 * If CH1 common lane is not active anymore 1221 * (eg. for pipe B DPLL) the entire channel will 1222 * shut down, which causes the common lane registers 1223 * to read as 0. That means we can't actually check 1224 * the lane power down status bits, but as the entire 1225 * register reads as 0 it's a good indication that the 1226 * channel is indeed entirely powered down. 1227 */ 1228 if (ch == DPIO_CH1 && val == 0) 1229 expected = 0; 1230 } else if (mask != 0x0) { 1231 expected = DPIO_ANYDL_POWERDOWN; 1232 } else { 1233 expected = 0; 1234 } 1235 1236 if (ch == DPIO_CH0) 1237 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1238 else 1239 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1240 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1241 1242 WARN(actual != expected, 1243 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1244 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), 1245 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), 1246 reg, val); 1247 } 1248 1249 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1250 enum dpio_channel ch, bool override) 1251 { 1252 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1253 bool was_override; 1254 1255 mutex_lock(&power_domains->lock); 1256 1257 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1258 1259 if (override == was_override) 1260 goto out; 1261 1262 if (override) 1263 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1264 else 1265 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1266 1267 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1268 1269 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1270 phy, ch, dev_priv->chv_phy_control); 1271 1272 assert_chv_phy_status(dev_priv); 1273 1274 out: 1275 mutex_unlock(&power_domains->lock); 1276 1277 return was_override; 1278 } 1279 1280 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1281 bool override, unsigned int mask) 1282 { 1283 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1284 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1285 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); 1286 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); 1287 1288 mutex_lock(&power_domains->lock); 1289 1290 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1291 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1292 1293 if (override) 1294 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1295 else 1296 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1297 1298 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 1299 1300 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1301 phy, ch, mask, dev_priv->chv_phy_control); 1302 1303 assert_chv_phy_status(dev_priv); 1304 1305 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1306 1307 mutex_unlock(&power_domains->lock); 1308 } 1309 1310 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1311 struct i915_power_well *power_well) 1312 { 1313 enum i915_pipe pipe = power_well->data; 1314 bool enabled; 1315 u32 state, ctrl; 1316 1317 mutex_lock(&dev_priv->rps.hw_lock); 1318 1319 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1320 /* 1321 * We only ever set the power-on and power-gate states, anything 1322 * else is unexpected. 1323 */ 1324 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); 1325 enabled = state == DP_SSS_PWR_ON(pipe); 1326 1327 /* 1328 * A transient state at this point would mean some unexpected party 1329 * is poking at the power controls too. 1330 */ 1331 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1332 WARN_ON(ctrl << 16 != state); 1333 1334 mutex_unlock(&dev_priv->rps.hw_lock); 1335 1336 return enabled; 1337 } 1338 1339 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1340 struct i915_power_well *power_well, 1341 bool enable) 1342 { 1343 enum i915_pipe pipe = power_well->data; 1344 u32 state; 1345 u32 ctrl; 1346 1347 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1348 1349 mutex_lock(&dev_priv->rps.hw_lock); 1350 1351 #define COND \ 1352 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) 1353 1354 if (COND) 1355 goto out; 1356 1357 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 1358 ctrl &= ~DP_SSC_MASK(pipe); 1359 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1360 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); 1361 1362 if (wait_for(COND, 100)) 1363 DRM_ERROR("timeout setting power well state %08x (%08x)\n", 1364 state, 1365 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); 1366 1367 #undef COND 1368 1369 out: 1370 mutex_unlock(&dev_priv->rps.hw_lock); 1371 } 1372 1373 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1374 struct i915_power_well *power_well) 1375 { 1376 WARN_ON_ONCE(power_well->data != PIPE_A); 1377 1378 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); 1379 } 1380 1381 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1382 struct i915_power_well *power_well) 1383 { 1384 WARN_ON_ONCE(power_well->data != PIPE_A); 1385 1386 chv_set_pipe_power_well(dev_priv, power_well, true); 1387 1388 vlv_display_power_well_init(dev_priv); 1389 } 1390 1391 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1392 struct i915_power_well *power_well) 1393 { 1394 WARN_ON_ONCE(power_well->data != PIPE_A); 1395 1396 vlv_display_power_well_deinit(dev_priv); 1397 1398 chv_set_pipe_power_well(dev_priv, power_well, false); 1399 } 1400 1401 /** 1402 * intel_display_power_get - grab a power domain reference 1403 * @dev_priv: i915 device instance 1404 * @domain: power domain to reference 1405 * 1406 * This function grabs a power domain reference for @domain and ensures that the 1407 * power domain and all its parents are powered up. Therefore users should only 1408 * grab a reference to the innermost power domain they need. 1409 * 1410 * Any power domain reference obtained by this function must have a symmetric 1411 * call to intel_display_power_put() to release the reference again. 1412 */ 1413 void intel_display_power_get(struct drm_i915_private *dev_priv, 1414 enum intel_display_power_domain domain) 1415 { 1416 struct i915_power_domains *power_domains; 1417 struct i915_power_well *power_well; 1418 int i; 1419 1420 intel_runtime_pm_get(dev_priv); 1421 1422 power_domains = &dev_priv->power_domains; 1423 1424 mutex_lock(&power_domains->lock); 1425 1426 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1427 if (!power_well->count++) 1428 intel_power_well_enable(dev_priv, power_well); 1429 } 1430 1431 power_domains->domain_use_count[domain]++; 1432 1433 mutex_unlock(&power_domains->lock); 1434 } 1435 1436 /** 1437 * intel_display_power_put - release a power domain reference 1438 * @dev_priv: i915 device instance 1439 * @domain: power domain to reference 1440 * 1441 * This function drops the power domain reference obtained by 1442 * intel_display_power_get() and might power down the corresponding hardware 1443 * block right away if this is the last reference. 1444 */ 1445 void intel_display_power_put(struct drm_i915_private *dev_priv, 1446 enum intel_display_power_domain domain) 1447 { 1448 struct i915_power_domains *power_domains; 1449 struct i915_power_well *power_well; 1450 int i; 1451 1452 power_domains = &dev_priv->power_domains; 1453 1454 mutex_lock(&power_domains->lock); 1455 1456 WARN_ON(!power_domains->domain_use_count[domain]); 1457 power_domains->domain_use_count[domain]--; 1458 1459 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 1460 WARN_ON(!power_well->count); 1461 1462 if (!--power_well->count && i915.disable_power_well) 1463 intel_power_well_disable(dev_priv, power_well); 1464 } 1465 1466 mutex_unlock(&power_domains->lock); 1467 1468 intel_runtime_pm_put(dev_priv); 1469 } 1470 1471 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 1472 BIT(POWER_DOMAIN_PIPE_A) | \ 1473 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 1474 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 1475 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 1476 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1477 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1478 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1479 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1480 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 1481 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 1482 BIT(POWER_DOMAIN_PORT_CRT) | \ 1483 BIT(POWER_DOMAIN_PLLS) | \ 1484 BIT(POWER_DOMAIN_AUX_A) | \ 1485 BIT(POWER_DOMAIN_AUX_B) | \ 1486 BIT(POWER_DOMAIN_AUX_C) | \ 1487 BIT(POWER_DOMAIN_AUX_D) | \ 1488 BIT(POWER_DOMAIN_GMBUS) | \ 1489 BIT(POWER_DOMAIN_INIT)) 1490 #define HSW_DISPLAY_POWER_DOMAINS ( \ 1491 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 1492 BIT(POWER_DOMAIN_INIT)) 1493 1494 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \ 1495 HSW_ALWAYS_ON_POWER_DOMAINS | \ 1496 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) 1497 #define BDW_DISPLAY_POWER_DOMAINS ( \ 1498 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ 1499 BIT(POWER_DOMAIN_INIT)) 1500 1501 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) 1502 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK 1503 1504 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1505 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1506 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1507 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1508 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1509 BIT(POWER_DOMAIN_PORT_CRT) | \ 1510 BIT(POWER_DOMAIN_AUX_B) | \ 1511 BIT(POWER_DOMAIN_AUX_C) | \ 1512 BIT(POWER_DOMAIN_INIT)) 1513 1514 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 1515 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1516 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1517 BIT(POWER_DOMAIN_AUX_B) | \ 1518 BIT(POWER_DOMAIN_INIT)) 1519 1520 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 1521 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1522 BIT(POWER_DOMAIN_AUX_B) | \ 1523 BIT(POWER_DOMAIN_INIT)) 1524 1525 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 1526 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1527 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1528 BIT(POWER_DOMAIN_AUX_C) | \ 1529 BIT(POWER_DOMAIN_INIT)) 1530 1531 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 1532 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1533 BIT(POWER_DOMAIN_AUX_C) | \ 1534 BIT(POWER_DOMAIN_INIT)) 1535 1536 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 1537 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 1538 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 1539 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 1540 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 1541 BIT(POWER_DOMAIN_AUX_B) | \ 1542 BIT(POWER_DOMAIN_AUX_C) | \ 1543 BIT(POWER_DOMAIN_INIT)) 1544 1545 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 1546 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 1547 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 1548 BIT(POWER_DOMAIN_AUX_D) | \ 1549 BIT(POWER_DOMAIN_INIT)) 1550 1551 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 1552 .sync_hw = i9xx_always_on_power_well_noop, 1553 .enable = i9xx_always_on_power_well_noop, 1554 .disable = i9xx_always_on_power_well_noop, 1555 .is_enabled = i9xx_always_on_power_well_enabled, 1556 }; 1557 1558 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 1559 .sync_hw = chv_pipe_power_well_sync_hw, 1560 .enable = chv_pipe_power_well_enable, 1561 .disable = chv_pipe_power_well_disable, 1562 .is_enabled = chv_pipe_power_well_enabled, 1563 }; 1564 1565 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 1566 .sync_hw = vlv_power_well_sync_hw, 1567 .enable = chv_dpio_cmn_power_well_enable, 1568 .disable = chv_dpio_cmn_power_well_disable, 1569 .is_enabled = vlv_power_well_enabled, 1570 }; 1571 1572 static struct i915_power_well i9xx_always_on_power_well[] = { 1573 { 1574 .name = "always-on", 1575 .always_on = 1, 1576 .domains = POWER_DOMAIN_MASK, 1577 .ops = &i9xx_always_on_power_well_ops, 1578 }, 1579 }; 1580 1581 static const struct i915_power_well_ops hsw_power_well_ops = { 1582 .sync_hw = hsw_power_well_sync_hw, 1583 .enable = hsw_power_well_enable, 1584 .disable = hsw_power_well_disable, 1585 .is_enabled = hsw_power_well_enabled, 1586 }; 1587 1588 static const struct i915_power_well_ops skl_power_well_ops = { 1589 .sync_hw = skl_power_well_sync_hw, 1590 .enable = skl_power_well_enable, 1591 .disable = skl_power_well_disable, 1592 .is_enabled = skl_power_well_enabled, 1593 }; 1594 1595 static struct i915_power_well hsw_power_wells[] = { 1596 { 1597 .name = "always-on", 1598 .always_on = 1, 1599 .domains = HSW_ALWAYS_ON_POWER_DOMAINS, 1600 .ops = &i9xx_always_on_power_well_ops, 1601 }, 1602 { 1603 .name = "display", 1604 .domains = HSW_DISPLAY_POWER_DOMAINS, 1605 .ops = &hsw_power_well_ops, 1606 }, 1607 }; 1608 1609 static struct i915_power_well bdw_power_wells[] = { 1610 { 1611 .name = "always-on", 1612 .always_on = 1, 1613 .domains = BDW_ALWAYS_ON_POWER_DOMAINS, 1614 .ops = &i9xx_always_on_power_well_ops, 1615 }, 1616 { 1617 .name = "display", 1618 .domains = BDW_DISPLAY_POWER_DOMAINS, 1619 .ops = &hsw_power_well_ops, 1620 }, 1621 }; 1622 1623 static const struct i915_power_well_ops vlv_display_power_well_ops = { 1624 .sync_hw = vlv_power_well_sync_hw, 1625 .enable = vlv_display_power_well_enable, 1626 .disable = vlv_display_power_well_disable, 1627 .is_enabled = vlv_power_well_enabled, 1628 }; 1629 1630 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 1631 .sync_hw = vlv_power_well_sync_hw, 1632 .enable = vlv_dpio_cmn_power_well_enable, 1633 .disable = vlv_dpio_cmn_power_well_disable, 1634 .is_enabled = vlv_power_well_enabled, 1635 }; 1636 1637 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 1638 .sync_hw = vlv_power_well_sync_hw, 1639 .enable = vlv_power_well_enable, 1640 .disable = vlv_power_well_disable, 1641 .is_enabled = vlv_power_well_enabled, 1642 }; 1643 1644 static struct i915_power_well vlv_power_wells[] = { 1645 { 1646 .name = "always-on", 1647 .always_on = 1, 1648 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1649 .ops = &i9xx_always_on_power_well_ops, 1650 }, 1651 { 1652 .name = "display", 1653 .domains = VLV_DISPLAY_POWER_DOMAINS, 1654 .data = PUNIT_POWER_WELL_DISP2D, 1655 .ops = &vlv_display_power_well_ops, 1656 }, 1657 { 1658 .name = "dpio-tx-b-01", 1659 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1660 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1661 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1662 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1663 .ops = &vlv_dpio_power_well_ops, 1664 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 1665 }, 1666 { 1667 .name = "dpio-tx-b-23", 1668 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1669 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1670 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1671 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1672 .ops = &vlv_dpio_power_well_ops, 1673 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 1674 }, 1675 { 1676 .name = "dpio-tx-c-01", 1677 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1678 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1679 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1680 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1681 .ops = &vlv_dpio_power_well_ops, 1682 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 1683 }, 1684 { 1685 .name = "dpio-tx-c-23", 1686 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 1687 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 1688 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 1689 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 1690 .ops = &vlv_dpio_power_well_ops, 1691 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 1692 }, 1693 { 1694 .name = "dpio-common", 1695 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 1696 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 1697 .ops = &vlv_dpio_cmn_power_well_ops, 1698 }, 1699 }; 1700 1701 static struct i915_power_well chv_power_wells[] = { 1702 { 1703 .name = "always-on", 1704 .always_on = 1, 1705 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 1706 .ops = &i9xx_always_on_power_well_ops, 1707 }, 1708 { 1709 .name = "display", 1710 /* 1711 * Pipe A power well is the new disp2d well. Pipe B and C 1712 * power wells don't actually exist. Pipe A power well is 1713 * required for any pipe to work. 1714 */ 1715 .domains = VLV_DISPLAY_POWER_DOMAINS, 1716 .data = PIPE_A, 1717 .ops = &chv_pipe_power_well_ops, 1718 }, 1719 { 1720 .name = "dpio-common-bc", 1721 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 1722 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 1723 .ops = &chv_dpio_cmn_power_well_ops, 1724 }, 1725 { 1726 .name = "dpio-common-d", 1727 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 1728 .data = PUNIT_POWER_WELL_DPIO_CMN_D, 1729 .ops = &chv_dpio_cmn_power_well_ops, 1730 }, 1731 }; 1732 1733 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 1734 int power_well_id) 1735 { 1736 struct i915_power_well *power_well; 1737 bool ret; 1738 1739 power_well = lookup_power_well(dev_priv, power_well_id); 1740 ret = power_well->ops->is_enabled(dev_priv, power_well); 1741 1742 return ret; 1743 } 1744 1745 static struct i915_power_well skl_power_wells[] = { 1746 { 1747 .name = "always-on", 1748 .always_on = 1, 1749 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1750 .ops = &i9xx_always_on_power_well_ops, 1751 }, 1752 { 1753 .name = "power well 1", 1754 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS, 1755 .ops = &skl_power_well_ops, 1756 .data = SKL_DISP_PW_1, 1757 }, 1758 { 1759 .name = "MISC IO power well", 1760 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS, 1761 .ops = &skl_power_well_ops, 1762 .data = SKL_DISP_PW_MISC_IO, 1763 }, 1764 { 1765 .name = "power well 2", 1766 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1767 .ops = &skl_power_well_ops, 1768 .data = SKL_DISP_PW_2, 1769 }, 1770 { 1771 .name = "DDI A/E power well", 1772 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS, 1773 .ops = &skl_power_well_ops, 1774 .data = SKL_DISP_PW_DDI_A_E, 1775 }, 1776 { 1777 .name = "DDI B power well", 1778 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS, 1779 .ops = &skl_power_well_ops, 1780 .data = SKL_DISP_PW_DDI_B, 1781 }, 1782 { 1783 .name = "DDI C power well", 1784 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS, 1785 .ops = &skl_power_well_ops, 1786 .data = SKL_DISP_PW_DDI_C, 1787 }, 1788 { 1789 .name = "DDI D power well", 1790 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS, 1791 .ops = &skl_power_well_ops, 1792 .data = SKL_DISP_PW_DDI_D, 1793 }, 1794 }; 1795 1796 static struct i915_power_well bxt_power_wells[] = { 1797 { 1798 .name = "always-on", 1799 .always_on = 1, 1800 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS, 1801 .ops = &i9xx_always_on_power_well_ops, 1802 }, 1803 { 1804 .name = "power well 1", 1805 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS, 1806 .ops = &skl_power_well_ops, 1807 .data = SKL_DISP_PW_1, 1808 }, 1809 { 1810 .name = "power well 2", 1811 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 1812 .ops = &skl_power_well_ops, 1813 .data = SKL_DISP_PW_2, 1814 } 1815 }; 1816 1817 static int 1818 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 1819 int disable_power_well) 1820 { 1821 if (disable_power_well >= 0) 1822 return !!disable_power_well; 1823 1824 if (IS_SKYLAKE(dev_priv)) { 1825 DRM_DEBUG_KMS("Disabling display power well support\n"); 1826 return 0; 1827 } 1828 1829 return 1; 1830 } 1831 1832 #define set_power_wells(power_domains, __power_wells) ({ \ 1833 (power_domains)->power_wells = (__power_wells); \ 1834 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 1835 }) 1836 1837 /** 1838 * intel_power_domains_init - initializes the power domain structures 1839 * @dev_priv: i915 device instance 1840 * 1841 * Initializes the power domain structures for @dev_priv depending upon the 1842 * supported platform. 1843 */ 1844 int intel_power_domains_init(struct drm_i915_private *dev_priv) 1845 { 1846 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1847 1848 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 1849 i915.disable_power_well); 1850 1851 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); 1852 1853 lockinit(&power_domains->lock, "i915pl", 0, LK_CANRECURSE); 1854 1855 /* 1856 * The enabling order will be from lower to higher indexed wells, 1857 * the disabling order is reversed. 1858 */ 1859 if (IS_HASWELL(dev_priv->dev)) { 1860 set_power_wells(power_domains, hsw_power_wells); 1861 } else if (IS_BROADWELL(dev_priv->dev)) { 1862 set_power_wells(power_domains, bdw_power_wells); 1863 } else if (IS_SKYLAKE(dev_priv->dev)) { 1864 set_power_wells(power_domains, skl_power_wells); 1865 } else if (IS_BROXTON(dev_priv->dev)) { 1866 set_power_wells(power_domains, bxt_power_wells); 1867 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1868 set_power_wells(power_domains, chv_power_wells); 1869 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 1870 set_power_wells(power_domains, vlv_power_wells); 1871 } else { 1872 set_power_wells(power_domains, i9xx_always_on_power_well); 1873 } 1874 1875 return 0; 1876 } 1877 1878 static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) 1879 { 1880 #if 0 1881 struct drm_device *dev = dev_priv->dev; 1882 struct device *device = &dev->pdev->dev; 1883 1884 if (!HAS_RUNTIME_PM(dev)) 1885 return; 1886 1887 if (!intel_enable_rc6(dev)) 1888 return; 1889 1890 /* Make sure we're not suspended first. */ 1891 pm_runtime_get_sync(device); 1892 #endif 1893 } 1894 1895 /** 1896 * intel_power_domains_fini - finalizes the power domain structures 1897 * @dev_priv: i915 device instance 1898 * 1899 * Finalizes the power domain structures for @dev_priv depending upon the 1900 * supported platform. This function also disables runtime pm and ensures that 1901 * the device stays powered up so that the driver can be reloaded. 1902 */ 1903 void intel_power_domains_fini(struct drm_i915_private *dev_priv) 1904 { 1905 intel_runtime_pm_disable(dev_priv); 1906 1907 /* The i915.ko module is still not prepared to be loaded when 1908 * the power well is not enabled, so just enable it in case 1909 * we're going to unload/reload. */ 1910 intel_display_set_init_power(dev_priv, true); 1911 } 1912 1913 static void intel_power_domains_resume(struct drm_i915_private *dev_priv) 1914 { 1915 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1916 struct i915_power_well *power_well; 1917 int i; 1918 1919 mutex_lock(&power_domains->lock); 1920 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 1921 power_well->ops->sync_hw(dev_priv, power_well); 1922 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 1923 power_well); 1924 } 1925 mutex_unlock(&power_domains->lock); 1926 } 1927 1928 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 1929 { 1930 struct i915_power_well *cmn_bc = 1931 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 1932 struct i915_power_well *cmn_d = 1933 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 1934 1935 /* 1936 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1937 * workaround never ever read DISPLAY_PHY_CONTROL, and 1938 * instead maintain a shadow copy ourselves. Use the actual 1939 * power well state and lane status to reconstruct the 1940 * expected initial value. 1941 */ 1942 dev_priv->chv_phy_control = 1943 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1944 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1945 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 1946 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 1947 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 1948 1949 /* 1950 * If all lanes are disabled we leave the override disabled 1951 * with all power down bits cleared to match the state we 1952 * would use after disabling the port. Otherwise enable the 1953 * override and set the lane powerdown bits accding to the 1954 * current lane status. 1955 */ 1956 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1957 uint32_t status = I915_READ(DPLL(PIPE_A)); 1958 unsigned int mask; 1959 1960 mask = status & DPLL_PORTB_READY_MASK; 1961 if (mask == 0xf) 1962 mask = 0x0; 1963 else 1964 dev_priv->chv_phy_control |= 1965 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 1966 1967 dev_priv->chv_phy_control |= 1968 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 1969 1970 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 1971 if (mask == 0xf) 1972 mask = 0x0; 1973 else 1974 dev_priv->chv_phy_control |= 1975 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 1976 1977 dev_priv->chv_phy_control |= 1978 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1979 1980 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1981 1982 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 1983 } else { 1984 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 1985 } 1986 1987 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { 1988 uint32_t status = I915_READ(DPIO_PHY_STATUS); 1989 unsigned int mask; 1990 1991 mask = status & DPLL_PORTD_READY_MASK; 1992 1993 if (mask == 0xf) 1994 mask = 0x0; 1995 else 1996 dev_priv->chv_phy_control |= 1997 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 1998 1999 dev_priv->chv_phy_control |= 2000 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 2001 2002 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 2003 2004 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 2005 } else { 2006 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 2007 } 2008 2009 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); 2010 2011 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", 2012 dev_priv->chv_phy_control); 2013 } 2014 2015 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 2016 { 2017 struct i915_power_well *cmn = 2018 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 2019 struct i915_power_well *disp2d = 2020 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 2021 2022 /* If the display might be already active skip this */ 2023 if (cmn->ops->is_enabled(dev_priv, cmn) && 2024 disp2d->ops->is_enabled(dev_priv, disp2d) && 2025 I915_READ(DPIO_CTL) & DPIO_CMNRST) 2026 return; 2027 2028 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 2029 2030 /* cmnlane needs DPLL registers */ 2031 disp2d->ops->enable(dev_priv, disp2d); 2032 2033 /* 2034 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 2035 * Need to assert and de-assert PHY SB reset by gating the 2036 * common lane power, then un-gating it. 2037 * Simply ungating isn't enough to reset the PHY enough to get 2038 * ports and lanes running. 2039 */ 2040 cmn->ops->disable(dev_priv, cmn); 2041 } 2042 2043 /** 2044 * intel_power_domains_init_hw - initialize hardware power domain state 2045 * @dev_priv: i915 device instance 2046 * 2047 * This function initializes the hardware power domain state and enables all 2048 * power domains using intel_display_set_init_power(). 2049 */ 2050 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 2051 { 2052 struct drm_device *dev = dev_priv->dev; 2053 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2054 2055 power_domains->initializing = true; 2056 2057 if (IS_CHERRYVIEW(dev)) { 2058 mutex_lock(&power_domains->lock); 2059 chv_phy_control_init(dev_priv); 2060 mutex_unlock(&power_domains->lock); 2061 } else if (IS_VALLEYVIEW(dev)) { 2062 mutex_lock(&power_domains->lock); 2063 vlv_cmnlane_wa(dev_priv); 2064 mutex_unlock(&power_domains->lock); 2065 } 2066 2067 /* For now, we need the power well to be always enabled. */ 2068 intel_display_set_init_power(dev_priv, true); 2069 intel_power_domains_resume(dev_priv); 2070 power_domains->initializing = false; 2071 } 2072 2073 /** 2074 * intel_runtime_pm_get - grab a runtime pm reference 2075 * @dev_priv: i915 device instance 2076 * 2077 * This function grabs a device-level runtime pm reference (mostly used for GEM 2078 * code to ensure the GTT or GT is on) and ensures that it is powered up. 2079 * 2080 * Any runtime pm reference obtained by this function must have a symmetric 2081 * call to intel_runtime_pm_put() to release the reference again. 2082 */ 2083 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2084 { 2085 struct drm_device *dev = dev_priv->dev; 2086 #if 0 2087 struct device *device = &dev->pdev->dev; 2088 #endif 2089 2090 if (!HAS_RUNTIME_PM(dev)) 2091 return; 2092 2093 #if 0 2094 pm_runtime_get_sync(device); 2095 #endif 2096 WARN(dev_priv->pm.suspended, "Device still suspended.\n"); 2097 } 2098 2099 /** 2100 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2101 * @dev_priv: i915 device instance 2102 * 2103 * This function grabs a device-level runtime pm reference (mostly used for GEM 2104 * code to ensure the GTT or GT is on). 2105 * 2106 * It will _not_ power up the device but instead only check that it's powered 2107 * on. Therefore it is only valid to call this functions from contexts where 2108 * the device is known to be powered up and where trying to power it up would 2109 * result in hilarity and deadlocks. That pretty much means only the system 2110 * suspend/resume code where this is used to grab runtime pm references for 2111 * delayed setup down in work items. 2112 * 2113 * Any runtime pm reference obtained by this function must have a symmetric 2114 * call to intel_runtime_pm_put() to release the reference again. 2115 */ 2116 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2117 { 2118 struct drm_device *dev = dev_priv->dev; 2119 #if 0 2120 struct device *device = &dev->pdev->dev; 2121 #endif 2122 2123 if (!HAS_RUNTIME_PM(dev)) 2124 return; 2125 2126 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); 2127 #if 0 2128 pm_runtime_get_noresume(device); 2129 #endif 2130 } 2131 2132 /** 2133 * intel_runtime_pm_put - release a runtime pm reference 2134 * @dev_priv: i915 device instance 2135 * 2136 * This function drops the device-level runtime pm reference obtained by 2137 * intel_runtime_pm_get() and might power down the corresponding 2138 * hardware block right away if this is the last reference. 2139 */ 2140 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2141 { 2142 #if 0 2143 struct drm_device *dev = dev_priv->dev; 2144 struct device *device = &dev->pdev->dev; 2145 2146 if (!HAS_RUNTIME_PM(dev)) 2147 return; 2148 2149 pm_runtime_mark_last_busy(device); 2150 pm_runtime_put_autosuspend(device); 2151 #endif 2152 } 2153 2154 /** 2155 * intel_runtime_pm_enable - enable runtime pm 2156 * @dev_priv: i915 device instance 2157 * 2158 * This function enables runtime pm at the end of the driver load sequence. 2159 * 2160 * Note that this function does currently not enable runtime pm for the 2161 * subordinate display power domains. That is only done on the first modeset 2162 * using intel_display_set_init_power(). 2163 */ 2164 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2165 { 2166 struct drm_device *dev = dev_priv->dev; 2167 #if 0 2168 struct device *device = &dev->pdev->dev; 2169 #endif 2170 2171 if (!HAS_RUNTIME_PM(dev)) 2172 return; 2173 2174 /* 2175 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 2176 * requirement. 2177 */ 2178 if (!intel_enable_rc6(dev)) { 2179 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 2180 return; 2181 } 2182 2183 #if 0 2184 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 2185 pm_runtime_mark_last_busy(device); 2186 pm_runtime_use_autosuspend(device); 2187 2188 pm_runtime_put_autosuspend(device); 2189 #endif 2190 } 2191 2192