1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28 #include "i915_drv.h" 29 #include "intel_drv.h" 30 #include <linux/module.h> 31 #include <machine/clock.h> 32 #include <drm/i915_powerwell.h> 33 34 /** 35 * RC6 is a special power stage which allows the GPU to enter an very 36 * low-voltage mode when idle, using down to 0V while at this stage. This 37 * stage is entered automatically when the GPU is idle when RC6 support is 38 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 39 * 40 * There are different RC6 modes available in Intel GPU, which differentiate 41 * among each other with the latency required to enter and leave RC6 and 42 * voltage consumed by the GPU in different states. 43 * 44 * The combination of the following flags define which states GPU is allowed 45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 46 * RC6pp is deepest RC6. Their support by hardware varies according to the 47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 48 * which brings the most power savings; deeper states save more power, but 49 * require higher latency to switch to and wake up. 50 */ 51 #define INTEL_RC6_ENABLE (1<<0) 52 #define INTEL_RC6p_ENABLE (1<<1) 53 #define INTEL_RC6pp_ENABLE (1<<2) 54 55 /* FBC, or Frame Buffer Compression, is a technique employed to compress the 56 * framebuffer contents in-memory, aiming at reducing the required bandwidth 57 * during in-memory transfers and, therefore, reduce the power packet. 58 * 59 * The benefits of FBC are mostly visible with solid backgrounds and 60 * variation-less patterns. 61 * 62 * FBC-related functionality can be enabled by the means of the 63 * i915.i915_enable_fbc parameter 64 */ 65 66 static void i8xx_disable_fbc(struct drm_device *dev) 67 { 68 struct drm_i915_private *dev_priv = dev->dev_private; 69 u32 fbc_ctl; 70 71 /* Disable compression */ 72 fbc_ctl = I915_READ(FBC_CONTROL); 73 if ((fbc_ctl & FBC_CTL_EN) == 0) 74 return; 75 76 fbc_ctl &= ~FBC_CTL_EN; 77 I915_WRITE(FBC_CONTROL, fbc_ctl); 78 79 /* Wait for compressing bit to clear */ 80 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 81 DRM_DEBUG_KMS("FBC idle timed out\n"); 82 return; 83 } 84 85 DRM_DEBUG_KMS("disabled FBC\n"); 86 } 87 88 static void i8xx_enable_fbc(struct drm_crtc *crtc) 89 { 90 struct drm_device *dev = crtc->dev; 91 struct drm_i915_private *dev_priv = dev->dev_private; 92 struct drm_framebuffer *fb = crtc->primary->fb; 93 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 94 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 95 int cfb_pitch; 96 int i; 97 u32 fbc_ctl; 98 99 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; 100 if (fb->pitches[0] < cfb_pitch) 101 cfb_pitch = fb->pitches[0]; 102 103 /* FBC_CTL wants 32B or 64B units */ 104 if (IS_GEN2(dev)) 105 cfb_pitch = (cfb_pitch / 32) - 1; 106 else 107 cfb_pitch = (cfb_pitch / 64) - 1; 108 109 /* Clear old tags */ 110 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 111 I915_WRITE(FBC_TAG + (i * 4), 0); 112 113 if (IS_GEN4(dev)) { 114 u32 fbc_ctl2; 115 116 /* Set it up... */ 117 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 118 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane); 119 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 120 I915_WRITE(FBC_FENCE_OFF, crtc->y); 121 } 122 123 /* enable it... */ 124 fbc_ctl = I915_READ(FBC_CONTROL); 125 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; 126 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; 127 if (IS_I945GM(dev)) 128 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 129 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 130 fbc_ctl |= obj->fence_reg; 131 I915_WRITE(FBC_CONTROL, fbc_ctl); 132 133 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", 134 cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); 135 } 136 137 static bool i8xx_fbc_enabled(struct drm_device *dev) 138 { 139 struct drm_i915_private *dev_priv = dev->dev_private; 140 141 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 142 } 143 144 static void g4x_enable_fbc(struct drm_crtc *crtc) 145 { 146 struct drm_device *dev = crtc->dev; 147 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_framebuffer *fb = crtc->primary->fb; 149 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 151 u32 dpfc_ctl; 152 153 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; 154 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 155 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 156 else 157 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 158 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 159 160 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 161 162 /* enable it... */ 163 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 164 165 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 166 } 167 168 static void g4x_disable_fbc(struct drm_device *dev) 169 { 170 struct drm_i915_private *dev_priv = dev->dev_private; 171 u32 dpfc_ctl; 172 173 /* Disable compression */ 174 dpfc_ctl = I915_READ(DPFC_CONTROL); 175 if (dpfc_ctl & DPFC_CTL_EN) { 176 dpfc_ctl &= ~DPFC_CTL_EN; 177 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 178 179 DRM_DEBUG_KMS("disabled FBC\n"); 180 } 181 } 182 183 static bool g4x_fbc_enabled(struct drm_device *dev) 184 { 185 struct drm_i915_private *dev_priv = dev->dev_private; 186 187 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 188 } 189 190 static void sandybridge_blit_fbc_update(struct drm_device *dev) 191 { 192 struct drm_i915_private *dev_priv = dev->dev_private; 193 u32 blt_ecoskpd; 194 195 /* Make sure blitter notifies FBC of writes */ 196 197 /* Blitter is part of Media powerwell on VLV. No impact of 198 * his param in other platforms for now */ 199 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA); 200 201 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 202 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 203 GEN6_BLITTER_LOCK_SHIFT; 204 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 205 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; 206 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 207 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << 208 GEN6_BLITTER_LOCK_SHIFT); 209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 210 POSTING_READ(GEN6_BLITTER_ECOSKPD); 211 212 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA); 213 } 214 215 static void ironlake_enable_fbc(struct drm_crtc *crtc) 216 { 217 struct drm_device *dev = crtc->dev; 218 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_framebuffer *fb = crtc->primary->fb; 220 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 221 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 222 u32 dpfc_ctl; 223 224 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); 225 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 226 dev_priv->fbc.threshold++; 227 228 switch (dev_priv->fbc.threshold) { 229 case 4: 230 case 3: 231 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 232 break; 233 case 2: 234 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 235 break; 236 case 1: 237 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 238 break; 239 } 240 dpfc_ctl |= DPFC_CTL_FENCE_EN; 241 if (IS_GEN5(dev)) 242 dpfc_ctl |= obj->fence_reg; 243 244 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 245 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); 246 /* enable it... */ 247 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 248 249 if (IS_GEN6(dev)) { 250 I915_WRITE(SNB_DPFC_CTL_SA, 251 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 252 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 253 sandybridge_blit_fbc_update(dev); 254 } 255 256 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 257 } 258 259 static void ironlake_disable_fbc(struct drm_device *dev) 260 { 261 struct drm_i915_private *dev_priv = dev->dev_private; 262 u32 dpfc_ctl; 263 264 /* Disable compression */ 265 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 266 if (dpfc_ctl & DPFC_CTL_EN) { 267 dpfc_ctl &= ~DPFC_CTL_EN; 268 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 269 270 DRM_DEBUG_KMS("disabled FBC\n"); 271 } 272 } 273 274 static bool ironlake_fbc_enabled(struct drm_device *dev) 275 { 276 struct drm_i915_private *dev_priv = dev->dev_private; 277 278 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 279 } 280 281 static void gen7_enable_fbc(struct drm_crtc *crtc) 282 { 283 struct drm_device *dev = crtc->dev; 284 struct drm_i915_private *dev_priv = dev->dev_private; 285 struct drm_framebuffer *fb = crtc->primary->fb; 286 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 287 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 288 u32 dpfc_ctl; 289 290 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); 291 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 292 dev_priv->fbc.threshold++; 293 294 switch (dev_priv->fbc.threshold) { 295 case 4: 296 case 3: 297 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 298 break; 299 case 2: 300 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 301 break; 302 case 1: 303 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 304 break; 305 } 306 307 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 308 309 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 310 311 if (IS_IVYBRIDGE(dev)) { 312 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 313 I915_WRITE(ILK_DISPLAY_CHICKEN1, 314 I915_READ(ILK_DISPLAY_CHICKEN1) | 315 ILK_FBCQ_DIS); 316 } else { 317 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 318 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe), 319 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) | 320 HSW_FBCQ_DIS); 321 } 322 323 I915_WRITE(SNB_DPFC_CTL_SA, 324 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 325 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 326 327 sandybridge_blit_fbc_update(dev); 328 329 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 330 } 331 332 bool intel_fbc_enabled(struct drm_device *dev) 333 { 334 struct drm_i915_private *dev_priv = dev->dev_private; 335 336 if (!dev_priv->display.fbc_enabled) 337 return false; 338 339 return dev_priv->display.fbc_enabled(dev); 340 } 341 342 static void intel_fbc_work_fn(struct work_struct *__work) 343 { 344 struct intel_fbc_work *work = 345 container_of(to_delayed_work(__work), 346 struct intel_fbc_work, work); 347 struct drm_device *dev = work->crtc->dev; 348 struct drm_i915_private *dev_priv = dev->dev_private; 349 350 mutex_lock(&dev->struct_mutex); 351 if (work == dev_priv->fbc.fbc_work) { 352 /* Double check that we haven't switched fb without cancelling 353 * the prior work. 354 */ 355 if (work->crtc->primary->fb == work->fb) { 356 dev_priv->display.enable_fbc(work->crtc); 357 358 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; 359 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id; 360 dev_priv->fbc.y = work->crtc->y; 361 } 362 363 dev_priv->fbc.fbc_work = NULL; 364 } 365 mutex_unlock(&dev->struct_mutex); 366 367 kfree(work); 368 } 369 370 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) 371 { 372 if (dev_priv->fbc.fbc_work == NULL) 373 return; 374 375 DRM_DEBUG_KMS("cancelling pending FBC enable\n"); 376 377 /* Synchronisation is provided by struct_mutex and checking of 378 * dev_priv->fbc.fbc_work, so we can perform the cancellation 379 * entirely asynchronously. 380 */ 381 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work)) 382 /* tasklet was killed before being run, clean up */ 383 kfree(dev_priv->fbc.fbc_work); 384 385 /* Mark the work as no longer wanted so that if it does 386 * wake-up (because the work was already running and waiting 387 * for our mutex), it will discover that is no longer 388 * necessary to run. 389 */ 390 dev_priv->fbc.fbc_work = NULL; 391 } 392 393 static void intel_enable_fbc(struct drm_crtc *crtc) 394 { 395 struct intel_fbc_work *work; 396 struct drm_device *dev = crtc->dev; 397 struct drm_i915_private *dev_priv = dev->dev_private; 398 399 if (!dev_priv->display.enable_fbc) 400 return; 401 402 intel_cancel_fbc_work(dev_priv); 403 404 work = kzalloc(sizeof(*work), GFP_KERNEL); 405 if (work == NULL) { 406 DRM_ERROR("Failed to allocate FBC work structure\n"); 407 dev_priv->display.enable_fbc(crtc); 408 return; 409 } 410 411 work->crtc = crtc; 412 work->fb = crtc->primary->fb; 413 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); 414 415 dev_priv->fbc.fbc_work = work; 416 417 /* Delay the actual enabling to let pageflipping cease and the 418 * display to settle before starting the compression. Note that 419 * this delay also serves a second purpose: it allows for a 420 * vblank to pass after disabling the FBC before we attempt 421 * to modify the control registers. 422 * 423 * A more complicated solution would involve tracking vblanks 424 * following the termination of the page-flipping sequence 425 * and indeed performing the enable as a co-routine and not 426 * waiting synchronously upon the vblank. 427 * 428 * WaFbcWaitForVBlankBeforeEnable:ilk,snb 429 */ 430 schedule_delayed_work(&work->work, msecs_to_jiffies(50)); 431 } 432 433 void intel_disable_fbc(struct drm_device *dev) 434 { 435 struct drm_i915_private *dev_priv = dev->dev_private; 436 437 intel_cancel_fbc_work(dev_priv); 438 439 if (!dev_priv->display.disable_fbc) 440 return; 441 442 dev_priv->display.disable_fbc(dev); 443 dev_priv->fbc.plane = -1; 444 } 445 446 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv, 447 enum no_fbc_reason reason) 448 { 449 if (dev_priv->fbc.no_fbc_reason == reason) 450 return false; 451 452 dev_priv->fbc.no_fbc_reason = reason; 453 return true; 454 } 455 456 /** 457 * intel_update_fbc - enable/disable FBC as needed 458 * @dev: the drm_device 459 * 460 * Set up the framebuffer compression hardware at mode set time. We 461 * enable it if possible: 462 * - plane A only (on pre-965) 463 * - no pixel mulitply/line duplication 464 * - no alpha buffer discard 465 * - no dual wide 466 * - framebuffer <= max_hdisplay in width, max_vdisplay in height 467 * 468 * We can't assume that any compression will take place (worst case), 469 * so the compressed buffer has to be the same size as the uncompressed 470 * one. It also must reside (along with the line length buffer) in 471 * stolen memory. 472 * 473 * We need to enable/disable FBC on a global basis. 474 */ 475 void intel_update_fbc(struct drm_device *dev) 476 { 477 struct drm_i915_private *dev_priv = dev->dev_private; 478 struct drm_crtc *crtc = NULL, *tmp_crtc; 479 struct intel_crtc *intel_crtc; 480 struct drm_framebuffer *fb; 481 struct drm_i915_gem_object *obj; 482 const struct drm_display_mode *adjusted_mode; 483 unsigned int max_width, max_height; 484 485 if (!HAS_FBC(dev)) { 486 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); 487 return; 488 } 489 490 if (!i915.powersave) { 491 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 492 DRM_DEBUG_KMS("fbc disabled per module param\n"); 493 return; 494 } 495 496 /* 497 * If FBC is already on, we just have to verify that we can 498 * keep it that way... 499 * Need to disable if: 500 * - more than one pipe is active 501 * - changing FBC params (stride, fence, mode) 502 * - new fb is too large to fit in compressed buffer 503 * - going to an unsupported config (interlace, pixel multiply, etc.) 504 */ 505 for_each_crtc(dev, tmp_crtc) { 506 if (intel_crtc_active(tmp_crtc) && 507 to_intel_crtc(tmp_crtc)->primary_enabled) { 508 if (crtc) { 509 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) 510 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 511 goto out_disable; 512 } 513 crtc = tmp_crtc; 514 } 515 } 516 517 if (!crtc || crtc->primary->fb == NULL) { 518 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) 519 DRM_DEBUG_KMS("no output, disabling\n"); 520 goto out_disable; 521 } 522 523 intel_crtc = to_intel_crtc(crtc); 524 fb = crtc->primary->fb; 525 obj = intel_fb_obj(fb); 526 adjusted_mode = &intel_crtc->config.adjusted_mode; 527 528 if (i915.enable_fbc < 0) { 529 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) 530 DRM_DEBUG_KMS("disabled per chip default\n"); 531 goto out_disable; 532 } 533 if (!i915.enable_fbc) { 534 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 535 DRM_DEBUG_KMS("fbc disabled per module param\n"); 536 goto out_disable; 537 } 538 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || 539 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 540 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) 541 DRM_DEBUG_KMS("mode incompatible with compression, " 542 "disabling\n"); 543 goto out_disable; 544 } 545 546 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { 547 max_width = 4096; 548 max_height = 4096; 549 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 550 max_width = 4096; 551 max_height = 2048; 552 } else { 553 max_width = 2048; 554 max_height = 1536; 555 } 556 if (intel_crtc->config.pipe_src_w > max_width || 557 intel_crtc->config.pipe_src_h > max_height) { 558 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) 559 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 560 goto out_disable; 561 } 562 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) && 563 intel_crtc->plane != PLANE_A) { 564 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) 565 DRM_DEBUG_KMS("plane not A, disabling compression\n"); 566 goto out_disable; 567 } 568 569 /* The use of a CPU fence is mandatory in order to detect writes 570 * by the CPU to the scanout and trigger updates to the FBC. 571 */ 572 if (obj->tiling_mode != I915_TILING_X || 573 obj->fence_reg == I915_FENCE_REG_NONE) { 574 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED)) 575 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 576 goto out_disable; 577 } 578 579 /* If the kernel debugger is active, always disable compression */ 580 #ifdef DDB 581 if (in_dbg_master()) 582 goto out_disable; 583 #endif 584 585 if (i915_gem_stolen_setup_compression(dev, obj->base.size, 586 drm_format_plane_cpp(fb->pixel_format, 0))) { 587 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) 588 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 589 goto out_disable; 590 } 591 592 /* If the scanout has not changed, don't modify the FBC settings. 593 * Note that we make the fundamental assumption that the fb->obj 594 * cannot be unpinned (and have its GTT offset and fence revoked) 595 * without first being decoupled from the scanout and FBC disabled. 596 */ 597 if (dev_priv->fbc.plane == intel_crtc->plane && 598 dev_priv->fbc.fb_id == fb->base.id && 599 dev_priv->fbc.y == crtc->y) 600 return; 601 602 if (intel_fbc_enabled(dev)) { 603 /* We update FBC along two paths, after changing fb/crtc 604 * configuration (modeswitching) and after page-flipping 605 * finishes. For the latter, we know that not only did 606 * we disable the FBC at the start of the page-flip 607 * sequence, but also more than one vblank has passed. 608 * 609 * For the former case of modeswitching, it is possible 610 * to switch between two FBC valid configurations 611 * instantaneously so we do need to disable the FBC 612 * before we can modify its control registers. We also 613 * have to wait for the next vblank for that to take 614 * effect. However, since we delay enabling FBC we can 615 * assume that a vblank has passed since disabling and 616 * that we can safely alter the registers in the deferred 617 * callback. 618 * 619 * In the scenario that we go from a valid to invalid 620 * and then back to valid FBC configuration we have 621 * no strict enforcement that a vblank occurred since 622 * disabling the FBC. However, along all current pipe 623 * disabling paths we do need to wait for a vblank at 624 * some point. And we wait before enabling FBC anyway. 625 */ 626 DRM_DEBUG_KMS("disabling active FBC for update\n"); 627 intel_disable_fbc(dev); 628 } 629 630 intel_enable_fbc(crtc); 631 dev_priv->fbc.no_fbc_reason = FBC_OK; 632 return; 633 634 out_disable: 635 /* Multiple disables should be harmless */ 636 if (intel_fbc_enabled(dev)) { 637 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 638 intel_disable_fbc(dev); 639 } 640 i915_gem_stolen_cleanup_compression(dev); 641 } 642 643 static void i915_pineview_get_mem_freq(struct drm_device *dev) 644 { 645 struct drm_i915_private *dev_priv = dev->dev_private; 646 u32 tmp; 647 648 tmp = I915_READ(CLKCFG); 649 650 switch (tmp & CLKCFG_FSB_MASK) { 651 case CLKCFG_FSB_533: 652 dev_priv->fsb_freq = 533; /* 133*4 */ 653 break; 654 case CLKCFG_FSB_800: 655 dev_priv->fsb_freq = 800; /* 200*4 */ 656 break; 657 case CLKCFG_FSB_667: 658 dev_priv->fsb_freq = 667; /* 167*4 */ 659 break; 660 case CLKCFG_FSB_400: 661 dev_priv->fsb_freq = 400; /* 100*4 */ 662 break; 663 } 664 665 switch (tmp & CLKCFG_MEM_MASK) { 666 case CLKCFG_MEM_533: 667 dev_priv->mem_freq = 533; 668 break; 669 case CLKCFG_MEM_667: 670 dev_priv->mem_freq = 667; 671 break; 672 case CLKCFG_MEM_800: 673 dev_priv->mem_freq = 800; 674 break; 675 } 676 677 /* detect pineview DDR3 setting */ 678 tmp = I915_READ(CSHRDDR3CTL); 679 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 680 } 681 682 static void i915_ironlake_get_mem_freq(struct drm_device *dev) 683 { 684 struct drm_i915_private *dev_priv = dev->dev_private; 685 u16 ddrpll, csipll; 686 687 ddrpll = I915_READ16(DDRMPLL1); 688 csipll = I915_READ16(CSIPLL0); 689 690 switch (ddrpll & 0xff) { 691 case 0xc: 692 dev_priv->mem_freq = 800; 693 break; 694 case 0x10: 695 dev_priv->mem_freq = 1066; 696 break; 697 case 0x14: 698 dev_priv->mem_freq = 1333; 699 break; 700 case 0x18: 701 dev_priv->mem_freq = 1600; 702 break; 703 default: 704 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 705 ddrpll & 0xff); 706 dev_priv->mem_freq = 0; 707 break; 708 } 709 710 dev_priv->ips.r_t = dev_priv->mem_freq; 711 712 switch (csipll & 0x3ff) { 713 case 0x00c: 714 dev_priv->fsb_freq = 3200; 715 break; 716 case 0x00e: 717 dev_priv->fsb_freq = 3733; 718 break; 719 case 0x010: 720 dev_priv->fsb_freq = 4266; 721 break; 722 case 0x012: 723 dev_priv->fsb_freq = 4800; 724 break; 725 case 0x014: 726 dev_priv->fsb_freq = 5333; 727 break; 728 case 0x016: 729 dev_priv->fsb_freq = 5866; 730 break; 731 case 0x018: 732 dev_priv->fsb_freq = 6400; 733 break; 734 default: 735 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 736 csipll & 0x3ff); 737 dev_priv->fsb_freq = 0; 738 break; 739 } 740 741 if (dev_priv->fsb_freq == 3200) { 742 dev_priv->ips.c_m = 0; 743 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 744 dev_priv->ips.c_m = 1; 745 } else { 746 dev_priv->ips.c_m = 2; 747 } 748 } 749 750 static const struct cxsr_latency cxsr_latency_table[] = { 751 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 752 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 753 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 754 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 755 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 756 757 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 758 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 759 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 760 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 761 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 762 763 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 764 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 765 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 766 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 767 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 768 769 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 770 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 771 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 772 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 773 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 774 775 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 776 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 777 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 778 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 779 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 780 781 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 782 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 783 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 784 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 785 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 786 }; 787 788 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 789 int is_ddr3, 790 int fsb, 791 int mem) 792 { 793 const struct cxsr_latency *latency; 794 int i; 795 796 if (fsb == 0 || mem == 0) 797 return NULL; 798 799 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 800 latency = &cxsr_latency_table[i]; 801 if (is_desktop == latency->is_desktop && 802 is_ddr3 == latency->is_ddr3 && 803 fsb == latency->fsb_freq && mem == latency->mem_freq) 804 return latency; 805 } 806 807 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 808 809 return NULL; 810 } 811 812 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 813 { 814 struct drm_device *dev = dev_priv->dev; 815 u32 val; 816 817 if (IS_VALLEYVIEW(dev)) { 818 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 819 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { 820 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 821 } else if (IS_PINEVIEW(dev)) { 822 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; 823 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; 824 I915_WRITE(DSPFW3, val); 825 } else if (IS_I945G(dev) || IS_I945GM(dev)) { 826 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 827 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 828 I915_WRITE(FW_BLC_SELF, val); 829 } else if (IS_I915GM(dev)) { 830 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 831 _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 832 I915_WRITE(INSTPM, val); 833 } else { 834 return; 835 } 836 837 DRM_DEBUG_KMS("memory self-refresh is %s\n", 838 enable ? "enabled" : "disabled"); 839 } 840 841 /* 842 * Latency for FIFO fetches is dependent on several factors: 843 * - memory configuration (speed, channels) 844 * - chipset 845 * - current MCH state 846 * It can be fairly high in some situations, so here we assume a fairly 847 * pessimal value. It's a tradeoff between extra memory fetches (if we 848 * set this value too high, the FIFO will fetch frequently to stay full) 849 * and power consumption (set it too low to save power and we might see 850 * FIFO underruns and display "flicker"). 851 * 852 * A value of 5us seems to be a good balance; safe for very low end 853 * platforms but not overly aggressive on lower latency configs. 854 */ 855 static const int latency_ns = 5000; 856 857 static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 858 { 859 struct drm_i915_private *dev_priv = dev->dev_private; 860 uint32_t dsparb = I915_READ(DSPARB); 861 int size; 862 863 size = dsparb & 0x7f; 864 if (plane) 865 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 866 867 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 868 plane ? "B" : "A", size); 869 870 return size; 871 } 872 873 static int i830_get_fifo_size(struct drm_device *dev, int plane) 874 { 875 struct drm_i915_private *dev_priv = dev->dev_private; 876 uint32_t dsparb = I915_READ(DSPARB); 877 int size; 878 879 size = dsparb & 0x1ff; 880 if (plane) 881 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 882 size >>= 1; /* Convert to cachelines */ 883 884 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 885 plane ? "B" : "A", size); 886 887 return size; 888 } 889 890 static int i845_get_fifo_size(struct drm_device *dev, int plane) 891 { 892 struct drm_i915_private *dev_priv = dev->dev_private; 893 uint32_t dsparb = I915_READ(DSPARB); 894 int size; 895 896 size = dsparb & 0x7f; 897 size >>= 2; /* Convert to cachelines */ 898 899 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 900 plane ? "B" : "A", 901 size); 902 903 return size; 904 } 905 906 /* Pineview has different values for various configs */ 907 static const struct intel_watermark_params pineview_display_wm = { 908 .fifo_size = PINEVIEW_DISPLAY_FIFO, 909 .max_wm = PINEVIEW_MAX_WM, 910 .default_wm = PINEVIEW_DFT_WM, 911 .guard_size = PINEVIEW_GUARD_WM, 912 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 913 }; 914 static const struct intel_watermark_params pineview_display_hplloff_wm = { 915 .fifo_size = PINEVIEW_DISPLAY_FIFO, 916 .max_wm = PINEVIEW_MAX_WM, 917 .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 918 .guard_size = PINEVIEW_GUARD_WM, 919 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 920 }; 921 static const struct intel_watermark_params pineview_cursor_wm = { 922 .fifo_size = PINEVIEW_CURSOR_FIFO, 923 .max_wm = PINEVIEW_CURSOR_MAX_WM, 924 .default_wm = PINEVIEW_CURSOR_DFT_WM, 925 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 926 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 927 }; 928 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 929 .fifo_size = PINEVIEW_CURSOR_FIFO, 930 .max_wm = PINEVIEW_CURSOR_MAX_WM, 931 .default_wm = PINEVIEW_CURSOR_DFT_WM, 932 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 933 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 934 }; 935 static const struct intel_watermark_params g4x_wm_info = { 936 .fifo_size = G4X_FIFO_SIZE, 937 .max_wm = G4X_MAX_WM, 938 .default_wm = G4X_MAX_WM, 939 .guard_size = 2, 940 .cacheline_size = G4X_FIFO_LINE_SIZE, 941 }; 942 static const struct intel_watermark_params g4x_cursor_wm_info = { 943 .fifo_size = I965_CURSOR_FIFO, 944 .max_wm = I965_CURSOR_MAX_WM, 945 .default_wm = I965_CURSOR_DFT_WM, 946 .guard_size = 2, 947 .cacheline_size = G4X_FIFO_LINE_SIZE, 948 }; 949 static const struct intel_watermark_params valleyview_wm_info = { 950 .fifo_size = VALLEYVIEW_FIFO_SIZE, 951 .max_wm = VALLEYVIEW_MAX_WM, 952 .default_wm = VALLEYVIEW_MAX_WM, 953 .guard_size = 2, 954 .cacheline_size = G4X_FIFO_LINE_SIZE, 955 }; 956 static const struct intel_watermark_params valleyview_cursor_wm_info = { 957 .fifo_size = I965_CURSOR_FIFO, 958 .max_wm = VALLEYVIEW_CURSOR_MAX_WM, 959 .default_wm = I965_CURSOR_DFT_WM, 960 .guard_size = 2, 961 .cacheline_size = G4X_FIFO_LINE_SIZE, 962 }; 963 static const struct intel_watermark_params i965_cursor_wm_info = { 964 .fifo_size = I965_CURSOR_FIFO, 965 .max_wm = I965_CURSOR_MAX_WM, 966 .default_wm = I965_CURSOR_DFT_WM, 967 .guard_size = 2, 968 .cacheline_size = I915_FIFO_LINE_SIZE, 969 }; 970 static const struct intel_watermark_params i945_wm_info = { 971 .fifo_size = I945_FIFO_SIZE, 972 .max_wm = I915_MAX_WM, 973 .default_wm = 1, 974 .guard_size = 2, 975 .cacheline_size = I915_FIFO_LINE_SIZE, 976 }; 977 static const struct intel_watermark_params i915_wm_info = { 978 .fifo_size = I915_FIFO_SIZE, 979 .max_wm = I915_MAX_WM, 980 .default_wm = 1, 981 .guard_size = 2, 982 .cacheline_size = I915_FIFO_LINE_SIZE, 983 }; 984 static const struct intel_watermark_params i830_wm_info = { 985 .fifo_size = I855GM_FIFO_SIZE, 986 .max_wm = I915_MAX_WM, 987 .default_wm = 1, 988 .guard_size = 2, 989 .cacheline_size = I830_FIFO_LINE_SIZE, 990 }; 991 static const struct intel_watermark_params i845_wm_info = { 992 .fifo_size = I830_FIFO_SIZE, 993 .max_wm = I915_MAX_WM, 994 .default_wm = 1, 995 .guard_size = 2, 996 .cacheline_size = I830_FIFO_LINE_SIZE, 997 }; 998 999 /** 1000 * intel_calculate_wm - calculate watermark level 1001 * @clock_in_khz: pixel clock 1002 * @wm: chip FIFO params 1003 * @pixel_size: display pixel size 1004 * @latency_ns: memory latency for the platform 1005 * 1006 * Calculate the watermark level (the level at which the display plane will 1007 * start fetching from memory again). Each chip has a different display 1008 * FIFO size and allocation, so the caller needs to figure that out and pass 1009 * in the correct intel_watermark_params structure. 1010 * 1011 * As the pixel clock runs, the FIFO will be drained at a rate that depends 1012 * on the pixel size. When it reaches the watermark level, it'll start 1013 * fetching FIFO line sized based chunks from memory until the FIFO fills 1014 * past the watermark point. If the FIFO drains completely, a FIFO underrun 1015 * will occur, and a display engine hang could result. 1016 */ 1017 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 1018 const struct intel_watermark_params *wm, 1019 int fifo_size, 1020 int pixel_size, 1021 unsigned long latency_ns) 1022 { 1023 long entries_required, wm_size; 1024 1025 /* 1026 * Note: we need to make sure we don't overflow for various clock & 1027 * latency values. 1028 * clocks go from a few thousand to several hundred thousand. 1029 * latency is usually a few thousand 1030 */ 1031 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 1032 1000; 1033 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 1034 1035 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 1036 1037 wm_size = fifo_size - (entries_required + wm->guard_size); 1038 1039 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 1040 1041 /* Don't promote wm_size to unsigned... */ 1042 if (wm_size > (long)wm->max_wm) 1043 wm_size = wm->max_wm; 1044 if (wm_size <= 0) 1045 wm_size = wm->default_wm; 1046 return wm_size; 1047 } 1048 1049 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) 1050 { 1051 struct drm_crtc *crtc, *enabled = NULL; 1052 1053 for_each_crtc(dev, crtc) { 1054 if (intel_crtc_active(crtc)) { 1055 if (enabled) 1056 return NULL; 1057 enabled = crtc; 1058 } 1059 } 1060 1061 return enabled; 1062 } 1063 1064 static void pineview_update_wm(struct drm_crtc *unused_crtc) 1065 { 1066 struct drm_device *dev = unused_crtc->dev; 1067 struct drm_i915_private *dev_priv = dev->dev_private; 1068 struct drm_crtc *crtc; 1069 const struct cxsr_latency *latency; 1070 u32 reg; 1071 unsigned long wm; 1072 1073 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 1074 dev_priv->fsb_freq, dev_priv->mem_freq); 1075 if (!latency) { 1076 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 1077 intel_set_memory_cxsr(dev_priv, false); 1078 return; 1079 } 1080 1081 crtc = single_enabled_crtc(dev); 1082 if (crtc) { 1083 const struct drm_display_mode *adjusted_mode; 1084 int pixel_size = crtc->primary->fb->bits_per_pixel / 8; 1085 int clock; 1086 1087 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1088 clock = adjusted_mode->crtc_clock; 1089 1090 /* Display SR */ 1091 wm = intel_calculate_wm(clock, &pineview_display_wm, 1092 pineview_display_wm.fifo_size, 1093 pixel_size, latency->display_sr); 1094 reg = I915_READ(DSPFW1); 1095 reg &= ~DSPFW_SR_MASK; 1096 reg |= wm << DSPFW_SR_SHIFT; 1097 I915_WRITE(DSPFW1, reg); 1098 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 1099 1100 /* cursor SR */ 1101 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 1102 pineview_display_wm.fifo_size, 1103 pixel_size, latency->cursor_sr); 1104 reg = I915_READ(DSPFW3); 1105 reg &= ~DSPFW_CURSOR_SR_MASK; 1106 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; 1107 I915_WRITE(DSPFW3, reg); 1108 1109 /* Display HPLL off SR */ 1110 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 1111 pineview_display_hplloff_wm.fifo_size, 1112 pixel_size, latency->display_hpll_disable); 1113 reg = I915_READ(DSPFW3); 1114 reg &= ~DSPFW_HPLL_SR_MASK; 1115 reg |= wm & DSPFW_HPLL_SR_MASK; 1116 I915_WRITE(DSPFW3, reg); 1117 1118 /* cursor HPLL off SR */ 1119 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 1120 pineview_display_hplloff_wm.fifo_size, 1121 pixel_size, latency->cursor_hpll_disable); 1122 reg = I915_READ(DSPFW3); 1123 reg &= ~DSPFW_HPLL_CURSOR_MASK; 1124 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; 1125 I915_WRITE(DSPFW3, reg); 1126 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 1127 1128 intel_set_memory_cxsr(dev_priv, true); 1129 } else { 1130 intel_set_memory_cxsr(dev_priv, false); 1131 } 1132 } 1133 1134 static bool g4x_compute_wm0(struct drm_device *dev, 1135 int plane, 1136 const struct intel_watermark_params *display, 1137 int display_latency_ns, 1138 const struct intel_watermark_params *cursor, 1139 int cursor_latency_ns, 1140 int *plane_wm, 1141 int *cursor_wm) 1142 { 1143 struct drm_crtc *crtc; 1144 const struct drm_display_mode *adjusted_mode; 1145 int htotal, hdisplay, clock, pixel_size; 1146 int line_time_us, line_count; 1147 int entries, tlb_miss; 1148 1149 crtc = intel_get_crtc_for_plane(dev, plane); 1150 if (!intel_crtc_active(crtc)) { 1151 *cursor_wm = cursor->guard_size; 1152 *plane_wm = display->guard_size; 1153 return false; 1154 } 1155 1156 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1157 clock = adjusted_mode->crtc_clock; 1158 htotal = adjusted_mode->crtc_htotal; 1159 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1160 pixel_size = crtc->primary->fb->bits_per_pixel / 8; 1161 1162 /* Use the small buffer method to calculate plane watermark */ 1163 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 1164 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 1165 if (tlb_miss > 0) 1166 entries += tlb_miss; 1167 entries = DIV_ROUND_UP(entries, display->cacheline_size); 1168 *plane_wm = entries + display->guard_size; 1169 if (*plane_wm > (int)display->max_wm) 1170 *plane_wm = display->max_wm; 1171 1172 /* Use the large buffer method to calculate cursor watermark */ 1173 line_time_us = max(htotal * 1000 / clock, 1); 1174 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 1175 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size; 1176 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 1177 if (tlb_miss > 0) 1178 entries += tlb_miss; 1179 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 1180 *cursor_wm = entries + cursor->guard_size; 1181 if (*cursor_wm > (int)cursor->max_wm) 1182 *cursor_wm = (int)cursor->max_wm; 1183 1184 return true; 1185 } 1186 1187 /* 1188 * Check the wm result. 1189 * 1190 * If any calculated watermark values is larger than the maximum value that 1191 * can be programmed into the associated watermark register, that watermark 1192 * must be disabled. 1193 */ 1194 static bool g4x_check_srwm(struct drm_device *dev, 1195 int display_wm, int cursor_wm, 1196 const struct intel_watermark_params *display, 1197 const struct intel_watermark_params *cursor) 1198 { 1199 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 1200 display_wm, cursor_wm); 1201 1202 if (display_wm > display->max_wm) { 1203 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", 1204 display_wm, display->max_wm); 1205 return false; 1206 } 1207 1208 if (cursor_wm > cursor->max_wm) { 1209 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", 1210 cursor_wm, cursor->max_wm); 1211 return false; 1212 } 1213 1214 if (!(display_wm || cursor_wm)) { 1215 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 1216 return false; 1217 } 1218 1219 return true; 1220 } 1221 1222 static bool g4x_compute_srwm(struct drm_device *dev, 1223 int plane, 1224 int latency_ns, 1225 const struct intel_watermark_params *display, 1226 const struct intel_watermark_params *cursor, 1227 int *display_wm, int *cursor_wm) 1228 { 1229 struct drm_crtc *crtc; 1230 const struct drm_display_mode *adjusted_mode; 1231 int hdisplay, htotal, pixel_size, clock; 1232 unsigned long line_time_us; 1233 int line_count, line_size; 1234 int small, large; 1235 int entries; 1236 1237 if (!latency_ns) { 1238 *display_wm = *cursor_wm = 0; 1239 return false; 1240 } 1241 1242 crtc = intel_get_crtc_for_plane(dev, plane); 1243 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1244 clock = adjusted_mode->crtc_clock; 1245 htotal = adjusted_mode->crtc_htotal; 1246 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1247 pixel_size = crtc->primary->fb->bits_per_pixel / 8; 1248 1249 line_time_us = max(htotal * 1000 / clock, 1); 1250 line_count = (latency_ns / line_time_us + 1000) / 1000; 1251 line_size = hdisplay * pixel_size; 1252 1253 /* Use the minimum of the small and large buffer method for primary */ 1254 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 1255 large = line_count * line_size; 1256 1257 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 1258 *display_wm = entries + display->guard_size; 1259 1260 /* calculate the self-refresh watermark for display cursor */ 1261 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width; 1262 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 1263 *cursor_wm = entries + cursor->guard_size; 1264 1265 return g4x_check_srwm(dev, 1266 *display_wm, *cursor_wm, 1267 display, cursor); 1268 } 1269 1270 static bool vlv_compute_drain_latency(struct drm_device *dev, 1271 int plane, 1272 int *plane_prec_mult, 1273 int *plane_dl, 1274 int *cursor_prec_mult, 1275 int *cursor_dl) 1276 { 1277 struct drm_crtc *crtc; 1278 int clock, pixel_size; 1279 int entries; 1280 1281 crtc = intel_get_crtc_for_plane(dev, plane); 1282 if (!intel_crtc_active(crtc)) 1283 return false; 1284 1285 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 1286 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ 1287 1288 entries = (clock / 1000) * pixel_size; 1289 *plane_prec_mult = (entries > 128) ? 1290 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32; 1291 *plane_dl = (64 * (*plane_prec_mult) * 4) / entries; 1292 1293 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ 1294 *cursor_prec_mult = (entries > 128) ? 1295 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32; 1296 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries; 1297 1298 return true; 1299 } 1300 1301 /* 1302 * Update drain latency registers of memory arbiter 1303 * 1304 * Valleyview SoC has a new memory arbiter and needs drain latency registers 1305 * to be programmed. Each plane has a drain latency multiplier and a drain 1306 * latency value. 1307 */ 1308 1309 static void vlv_update_drain_latency(struct drm_device *dev) 1310 { 1311 struct drm_i915_private *dev_priv = dev->dev_private; 1312 int planea_prec, planea_dl, planeb_prec, planeb_dl; 1313 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; 1314 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is 1315 either 16 or 32 */ 1316 1317 /* For plane A, Cursor A */ 1318 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, 1319 &cursor_prec_mult, &cursora_dl)) { 1320 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1321 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64; 1322 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1323 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64; 1324 1325 I915_WRITE(VLV_DDL1, cursora_prec | 1326 (cursora_dl << DDL_CURSORA_SHIFT) | 1327 planea_prec | planea_dl); 1328 } 1329 1330 /* For plane B, Cursor B */ 1331 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, 1332 &cursor_prec_mult, &cursorb_dl)) { 1333 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1334 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64; 1335 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1336 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64; 1337 1338 I915_WRITE(VLV_DDL2, cursorb_prec | 1339 (cursorb_dl << DDL_CURSORB_SHIFT) | 1340 planeb_prec | planeb_dl); 1341 } 1342 } 1343 1344 #define single_plane_enabled(mask) is_power_of_2(mask) 1345 1346 static void valleyview_update_wm(struct drm_crtc *crtc) 1347 { 1348 struct drm_device *dev = crtc->dev; 1349 static const int sr_latency_ns = 12000; 1350 struct drm_i915_private *dev_priv = dev->dev_private; 1351 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1352 int plane_sr, cursor_sr; 1353 int ignore_plane_sr, ignore_cursor_sr; 1354 unsigned int enabled = 0; 1355 bool cxsr_enabled; 1356 1357 vlv_update_drain_latency(dev); 1358 1359 if (g4x_compute_wm0(dev, PIPE_A, 1360 &valleyview_wm_info, latency_ns, 1361 &valleyview_cursor_wm_info, latency_ns, 1362 &planea_wm, &cursora_wm)) 1363 enabled |= 1 << PIPE_A; 1364 1365 if (g4x_compute_wm0(dev, PIPE_B, 1366 &valleyview_wm_info, latency_ns, 1367 &valleyview_cursor_wm_info, latency_ns, 1368 &planeb_wm, &cursorb_wm)) 1369 enabled |= 1 << PIPE_B; 1370 1371 if (single_plane_enabled(enabled) && 1372 g4x_compute_srwm(dev, ffs(enabled) - 1, 1373 sr_latency_ns, 1374 &valleyview_wm_info, 1375 &valleyview_cursor_wm_info, 1376 &plane_sr, &ignore_cursor_sr) && 1377 g4x_compute_srwm(dev, ffs(enabled) - 1, 1378 2*sr_latency_ns, 1379 &valleyview_wm_info, 1380 &valleyview_cursor_wm_info, 1381 &ignore_plane_sr, &cursor_sr)) { 1382 cxsr_enabled = true; 1383 } else { 1384 cxsr_enabled = false; 1385 intel_set_memory_cxsr(dev_priv, false); 1386 plane_sr = cursor_sr = 0; 1387 } 1388 1389 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1390 planea_wm, cursora_wm, 1391 planeb_wm, cursorb_wm, 1392 plane_sr, cursor_sr); 1393 1394 I915_WRITE(DSPFW1, 1395 (plane_sr << DSPFW_SR_SHIFT) | 1396 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1397 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1398 planea_wm); 1399 I915_WRITE(DSPFW2, 1400 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1401 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1402 I915_WRITE(DSPFW3, 1403 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | 1404 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1405 1406 if (cxsr_enabled) 1407 intel_set_memory_cxsr(dev_priv, true); 1408 } 1409 1410 static void g4x_update_wm(struct drm_crtc *crtc) 1411 { 1412 struct drm_device *dev = crtc->dev; 1413 static const int sr_latency_ns = 12000; 1414 struct drm_i915_private *dev_priv = dev->dev_private; 1415 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1416 int plane_sr, cursor_sr; 1417 unsigned int enabled = 0; 1418 bool cxsr_enabled; 1419 1420 if (g4x_compute_wm0(dev, PIPE_A, 1421 &g4x_wm_info, latency_ns, 1422 &g4x_cursor_wm_info, latency_ns, 1423 &planea_wm, &cursora_wm)) 1424 enabled |= 1 << PIPE_A; 1425 1426 if (g4x_compute_wm0(dev, PIPE_B, 1427 &g4x_wm_info, latency_ns, 1428 &g4x_cursor_wm_info, latency_ns, 1429 &planeb_wm, &cursorb_wm)) 1430 enabled |= 1 << PIPE_B; 1431 1432 if (single_plane_enabled(enabled) && 1433 g4x_compute_srwm(dev, ffs(enabled) - 1, 1434 sr_latency_ns, 1435 &g4x_wm_info, 1436 &g4x_cursor_wm_info, 1437 &plane_sr, &cursor_sr)) { 1438 cxsr_enabled = true; 1439 } else { 1440 cxsr_enabled = false; 1441 intel_set_memory_cxsr(dev_priv, false); 1442 plane_sr = cursor_sr = 0; 1443 } 1444 1445 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1446 planea_wm, cursora_wm, 1447 planeb_wm, cursorb_wm, 1448 plane_sr, cursor_sr); 1449 1450 I915_WRITE(DSPFW1, 1451 (plane_sr << DSPFW_SR_SHIFT) | 1452 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1453 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1454 planea_wm); 1455 I915_WRITE(DSPFW2, 1456 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1457 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1458 /* HPLL off in SR has some issues on G4x... disable it */ 1459 I915_WRITE(DSPFW3, 1460 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | 1461 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1462 1463 if (cxsr_enabled) 1464 intel_set_memory_cxsr(dev_priv, true); 1465 } 1466 1467 static void i965_update_wm(struct drm_crtc *unused_crtc) 1468 { 1469 struct drm_device *dev = unused_crtc->dev; 1470 struct drm_i915_private *dev_priv = dev->dev_private; 1471 struct drm_crtc *crtc; 1472 int srwm = 1; 1473 int cursor_sr = 16; 1474 bool cxsr_enabled; 1475 1476 /* Calc sr entries for one plane configs */ 1477 crtc = single_enabled_crtc(dev); 1478 if (crtc) { 1479 /* self-refresh has much higher latency */ 1480 static const int sr_latency_ns = 12000; 1481 const struct drm_display_mode *adjusted_mode = 1482 &to_intel_crtc(crtc)->config.adjusted_mode; 1483 int clock = adjusted_mode->crtc_clock; 1484 int htotal = adjusted_mode->crtc_htotal; 1485 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1486 int pixel_size = crtc->primary->fb->bits_per_pixel / 8; 1487 unsigned long line_time_us; 1488 int entries; 1489 1490 line_time_us = max(htotal * 1000 / clock, 1); 1491 1492 /* Use ns/us then divide to preserve precision */ 1493 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1494 pixel_size * hdisplay; 1495 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 1496 srwm = I965_FIFO_SIZE - entries; 1497 if (srwm < 0) 1498 srwm = 1; 1499 srwm &= 0x1ff; 1500 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 1501 entries, srwm); 1502 1503 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1504 pixel_size * to_intel_crtc(crtc)->cursor_width; 1505 entries = DIV_ROUND_UP(entries, 1506 i965_cursor_wm_info.cacheline_size); 1507 cursor_sr = i965_cursor_wm_info.fifo_size - 1508 (entries + i965_cursor_wm_info.guard_size); 1509 1510 if (cursor_sr > i965_cursor_wm_info.max_wm) 1511 cursor_sr = i965_cursor_wm_info.max_wm; 1512 1513 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1514 "cursor %d\n", srwm, cursor_sr); 1515 1516 cxsr_enabled = true; 1517 } else { 1518 cxsr_enabled = false; 1519 /* Turn off self refresh if both pipes are enabled */ 1520 intel_set_memory_cxsr(dev_priv, false); 1521 } 1522 1523 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1524 srwm); 1525 1526 /* 965 has limitations... */ 1527 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | 1528 (8 << 16) | (8 << 8) | (8 << 0)); 1529 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 1530 /* update cursor SR watermark */ 1531 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1532 1533 if (cxsr_enabled) 1534 intel_set_memory_cxsr(dev_priv, true); 1535 } 1536 1537 static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1538 { 1539 struct drm_device *dev = unused_crtc->dev; 1540 struct drm_i915_private *dev_priv = dev->dev_private; 1541 const struct intel_watermark_params *wm_info; 1542 uint32_t fwater_lo; 1543 uint32_t fwater_hi; 1544 int cwm, srwm = 1; 1545 int fifo_size; 1546 int planea_wm, planeb_wm; 1547 struct drm_crtc *crtc, *enabled = NULL; 1548 1549 if (IS_I945GM(dev)) 1550 wm_info = &i945_wm_info; 1551 else if (!IS_GEN2(dev)) 1552 wm_info = &i915_wm_info; 1553 else 1554 wm_info = &i830_wm_info; 1555 1556 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1557 crtc = intel_get_crtc_for_plane(dev, 0); 1558 if (intel_crtc_active(crtc)) { 1559 const struct drm_display_mode *adjusted_mode; 1560 int cpp = crtc->primary->fb->bits_per_pixel / 8; 1561 if (IS_GEN2(dev)) 1562 cpp = 4; 1563 1564 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1565 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1566 wm_info, fifo_size, cpp, 1567 latency_ns); 1568 enabled = crtc; 1569 } else 1570 planea_wm = fifo_size - wm_info->guard_size; 1571 1572 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1573 crtc = intel_get_crtc_for_plane(dev, 1); 1574 if (intel_crtc_active(crtc)) { 1575 const struct drm_display_mode *adjusted_mode; 1576 int cpp = crtc->primary->fb->bits_per_pixel / 8; 1577 if (IS_GEN2(dev)) 1578 cpp = 4; 1579 1580 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1581 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1582 wm_info, fifo_size, cpp, 1583 latency_ns); 1584 if (enabled == NULL) 1585 enabled = crtc; 1586 else 1587 enabled = NULL; 1588 } else 1589 planeb_wm = fifo_size - wm_info->guard_size; 1590 1591 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1592 1593 if (IS_I915GM(dev) && enabled) { 1594 struct drm_i915_gem_object *obj; 1595 1596 obj = intel_fb_obj(enabled->primary->fb); 1597 1598 /* self-refresh seems busted with untiled */ 1599 if (obj->tiling_mode == I915_TILING_NONE) 1600 enabled = NULL; 1601 } 1602 1603 /* 1604 * Overlay gets an aggressive default since video jitter is bad. 1605 */ 1606 cwm = 2; 1607 1608 /* Play safe and disable self-refresh before adjusting watermarks. */ 1609 intel_set_memory_cxsr(dev_priv, false); 1610 1611 /* Calc sr entries for one plane configs */ 1612 if (HAS_FW_BLC(dev) && enabled) { 1613 /* self-refresh has much higher latency */ 1614 static const int sr_latency_ns = 6000; 1615 const struct drm_display_mode *adjusted_mode = 1616 &to_intel_crtc(enabled)->config.adjusted_mode; 1617 int clock = adjusted_mode->crtc_clock; 1618 int htotal = adjusted_mode->crtc_htotal; 1619 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1620 int pixel_size = enabled->primary->fb->bits_per_pixel / 8; 1621 unsigned long line_time_us; 1622 int entries; 1623 1624 line_time_us = max(htotal * 1000 / clock, 1); 1625 1626 /* Use ns/us then divide to preserve precision */ 1627 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1628 pixel_size * hdisplay; 1629 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 1630 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 1631 srwm = wm_info->fifo_size - entries; 1632 if (srwm < 0) 1633 srwm = 1; 1634 1635 if (IS_I945G(dev) || IS_I945GM(dev)) 1636 I915_WRITE(FW_BLC_SELF, 1637 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 1638 else if (IS_I915GM(dev)) 1639 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 1640 } 1641 1642 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 1643 planea_wm, planeb_wm, cwm, srwm); 1644 1645 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 1646 fwater_hi = (cwm & 0x1f); 1647 1648 /* Set request length to 8 cachelines per fetch */ 1649 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 1650 fwater_hi = fwater_hi | (1 << 8); 1651 1652 I915_WRITE(FW_BLC, fwater_lo); 1653 I915_WRITE(FW_BLC2, fwater_hi); 1654 1655 if (enabled) 1656 intel_set_memory_cxsr(dev_priv, true); 1657 } 1658 1659 static void i845_update_wm(struct drm_crtc *unused_crtc) 1660 { 1661 struct drm_device *dev = unused_crtc->dev; 1662 struct drm_i915_private *dev_priv = dev->dev_private; 1663 struct drm_crtc *crtc; 1664 const struct drm_display_mode *adjusted_mode; 1665 uint32_t fwater_lo; 1666 int planea_wm; 1667 1668 crtc = single_enabled_crtc(dev); 1669 if (crtc == NULL) 1670 return; 1671 1672 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1673 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1674 &i845_wm_info, 1675 dev_priv->display.get_fifo_size(dev, 0), 1676 4, latency_ns); 1677 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1678 fwater_lo |= (3<<8) | planea_wm; 1679 1680 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 1681 1682 I915_WRITE(FW_BLC, fwater_lo); 1683 } 1684 1685 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, 1686 struct drm_crtc *crtc) 1687 { 1688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1689 uint32_t pixel_rate; 1690 1691 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock; 1692 1693 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 1694 * adjust the pixel_rate here. */ 1695 1696 if (intel_crtc->config.pch_pfit.enabled) { 1697 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 1698 uint32_t pfit_size = intel_crtc->config.pch_pfit.size; 1699 1700 pipe_w = intel_crtc->config.pipe_src_w; 1701 pipe_h = intel_crtc->config.pipe_src_h; 1702 pfit_w = (pfit_size >> 16) & 0xFFFF; 1703 pfit_h = pfit_size & 0xFFFF; 1704 if (pipe_w < pfit_w) 1705 pipe_w = pfit_w; 1706 if (pipe_h < pfit_h) 1707 pipe_h = pfit_h; 1708 1709 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 1710 pfit_w * pfit_h); 1711 } 1712 1713 return pixel_rate; 1714 } 1715 1716 /* latency must be in 0.1us units. */ 1717 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, 1718 uint32_t latency) 1719 { 1720 uint64_t ret; 1721 1722 if (WARN(latency == 0, "Latency value missing\n")) 1723 return UINT_MAX; 1724 1725 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; 1726 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; 1727 1728 return ret; 1729 } 1730 1731 /* latency must be in 0.1us units. */ 1732 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 1733 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 1734 uint32_t latency) 1735 { 1736 uint32_t ret; 1737 1738 if (WARN(latency == 0, "Latency value missing\n")) 1739 return UINT_MAX; 1740 1741 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 1742 ret = (ret + 1) * horiz_pixels * bytes_per_pixel; 1743 ret = DIV_ROUND_UP(ret, 64) + 2; 1744 return ret; 1745 } 1746 1747 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 1748 uint8_t bytes_per_pixel) 1749 { 1750 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1751 } 1752 1753 struct ilk_pipe_wm_parameters { 1754 bool active; 1755 uint32_t pipe_htotal; 1756 uint32_t pixel_rate; 1757 struct intel_plane_wm_parameters pri; 1758 struct intel_plane_wm_parameters spr; 1759 struct intel_plane_wm_parameters cur; 1760 }; 1761 1762 struct ilk_wm_maximums { 1763 uint16_t pri; 1764 uint16_t spr; 1765 uint16_t cur; 1766 uint16_t fbc; 1767 }; 1768 1769 /* used in computing the new watermarks state */ 1770 struct intel_wm_config { 1771 unsigned int num_pipes_active; 1772 bool sprites_enabled; 1773 bool sprites_scaled; 1774 }; 1775 1776 /* 1777 * For both WM_PIPE and WM_LP. 1778 * mem_value must be in 0.1us units. 1779 */ 1780 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, 1781 uint32_t mem_value, 1782 bool is_lp) 1783 { 1784 uint32_t method1, method2; 1785 1786 if (!params->active || !params->pri.enabled) 1787 return 0; 1788 1789 method1 = ilk_wm_method1(params->pixel_rate, 1790 params->pri.bytes_per_pixel, 1791 mem_value); 1792 1793 if (!is_lp) 1794 return method1; 1795 1796 method2 = ilk_wm_method2(params->pixel_rate, 1797 params->pipe_htotal, 1798 params->pri.horiz_pixels, 1799 params->pri.bytes_per_pixel, 1800 mem_value); 1801 1802 return min(method1, method2); 1803 } 1804 1805 /* 1806 * For both WM_PIPE and WM_LP. 1807 * mem_value must be in 0.1us units. 1808 */ 1809 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, 1810 uint32_t mem_value) 1811 { 1812 uint32_t method1, method2; 1813 1814 if (!params->active || !params->spr.enabled) 1815 return 0; 1816 1817 method1 = ilk_wm_method1(params->pixel_rate, 1818 params->spr.bytes_per_pixel, 1819 mem_value); 1820 method2 = ilk_wm_method2(params->pixel_rate, 1821 params->pipe_htotal, 1822 params->spr.horiz_pixels, 1823 params->spr.bytes_per_pixel, 1824 mem_value); 1825 return min(method1, method2); 1826 } 1827 1828 /* 1829 * For both WM_PIPE and WM_LP. 1830 * mem_value must be in 0.1us units. 1831 */ 1832 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, 1833 uint32_t mem_value) 1834 { 1835 if (!params->active || !params->cur.enabled) 1836 return 0; 1837 1838 return ilk_wm_method2(params->pixel_rate, 1839 params->pipe_htotal, 1840 params->cur.horiz_pixels, 1841 params->cur.bytes_per_pixel, 1842 mem_value); 1843 } 1844 1845 /* Only for WM_LP. */ 1846 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, 1847 uint32_t pri_val) 1848 { 1849 if (!params->active || !params->pri.enabled) 1850 return 0; 1851 1852 return ilk_wm_fbc(pri_val, 1853 params->pri.horiz_pixels, 1854 params->pri.bytes_per_pixel); 1855 } 1856 1857 static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 1858 { 1859 if (INTEL_INFO(dev)->gen >= 8) 1860 return 3072; 1861 else if (INTEL_INFO(dev)->gen >= 7) 1862 return 768; 1863 else 1864 return 512; 1865 } 1866 1867 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev, 1868 int level, bool is_sprite) 1869 { 1870 if (INTEL_INFO(dev)->gen >= 8) 1871 /* BDW primary/sprite plane watermarks */ 1872 return level == 0 ? 255 : 2047; 1873 else if (INTEL_INFO(dev)->gen >= 7) 1874 /* IVB/HSW primary/sprite plane watermarks */ 1875 return level == 0 ? 127 : 1023; 1876 else if (!is_sprite) 1877 /* ILK/SNB primary plane watermarks */ 1878 return level == 0 ? 127 : 511; 1879 else 1880 /* ILK/SNB sprite plane watermarks */ 1881 return level == 0 ? 63 : 255; 1882 } 1883 1884 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev, 1885 int level) 1886 { 1887 if (INTEL_INFO(dev)->gen >= 7) 1888 return level == 0 ? 63 : 255; 1889 else 1890 return level == 0 ? 31 : 63; 1891 } 1892 1893 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev) 1894 { 1895 if (INTEL_INFO(dev)->gen >= 8) 1896 return 31; 1897 else 1898 return 15; 1899 } 1900 1901 /* Calculate the maximum primary/sprite plane watermark */ 1902 static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 1903 int level, 1904 const struct intel_wm_config *config, 1905 enum intel_ddb_partitioning ddb_partitioning, 1906 bool is_sprite) 1907 { 1908 unsigned int fifo_size = ilk_display_fifo_size(dev); 1909 1910 /* if sprites aren't enabled, sprites get nothing */ 1911 if (is_sprite && !config->sprites_enabled) 1912 return 0; 1913 1914 /* HSW allows LP1+ watermarks even with multiple pipes */ 1915 if (level == 0 || config->num_pipes_active > 1) { 1916 fifo_size /= INTEL_INFO(dev)->num_pipes; 1917 1918 /* 1919 * For some reason the non self refresh 1920 * FIFO size is only half of the self 1921 * refresh FIFO size on ILK/SNB. 1922 */ 1923 if (INTEL_INFO(dev)->gen <= 6) 1924 fifo_size /= 2; 1925 } 1926 1927 if (config->sprites_enabled) { 1928 /* level 0 is always calculated with 1:1 split */ 1929 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 1930 if (is_sprite) 1931 fifo_size *= 5; 1932 fifo_size /= 6; 1933 } else { 1934 fifo_size /= 2; 1935 } 1936 } 1937 1938 /* clamp to max that the registers can hold */ 1939 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite)); 1940 } 1941 1942 /* Calculate the maximum cursor plane watermark */ 1943 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, 1944 int level, 1945 const struct intel_wm_config *config) 1946 { 1947 /* HSW LP1+ watermarks w/ multiple pipes */ 1948 if (level > 0 && config->num_pipes_active > 1) 1949 return 64; 1950 1951 /* otherwise just report max that registers can hold */ 1952 return ilk_cursor_wm_reg_max(dev, level); 1953 } 1954 1955 static void ilk_compute_wm_maximums(const struct drm_device *dev, 1956 int level, 1957 const struct intel_wm_config *config, 1958 enum intel_ddb_partitioning ddb_partitioning, 1959 struct ilk_wm_maximums *max) 1960 { 1961 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1962 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1963 max->cur = ilk_cursor_wm_max(dev, level, config); 1964 max->fbc = ilk_fbc_wm_reg_max(dev); 1965 } 1966 1967 static void ilk_compute_wm_reg_maximums(struct drm_device *dev, 1968 int level, 1969 struct ilk_wm_maximums *max) 1970 { 1971 max->pri = ilk_plane_wm_reg_max(dev, level, false); 1972 max->spr = ilk_plane_wm_reg_max(dev, level, true); 1973 max->cur = ilk_cursor_wm_reg_max(dev, level); 1974 max->fbc = ilk_fbc_wm_reg_max(dev); 1975 } 1976 1977 static bool ilk_validate_wm_level(int level, 1978 const struct ilk_wm_maximums *max, 1979 struct intel_wm_level *result) 1980 { 1981 bool ret; 1982 1983 /* already determined to be invalid? */ 1984 if (!result->enable) 1985 return false; 1986 1987 result->enable = result->pri_val <= max->pri && 1988 result->spr_val <= max->spr && 1989 result->cur_val <= max->cur; 1990 1991 ret = result->enable; 1992 1993 /* 1994 * HACK until we can pre-compute everything, 1995 * and thus fail gracefully if LP0 watermarks 1996 * are exceeded... 1997 */ 1998 if (level == 0 && !result->enable) { 1999 if (result->pri_val > max->pri) 2000 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 2001 level, result->pri_val, max->pri); 2002 if (result->spr_val > max->spr) 2003 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 2004 level, result->spr_val, max->spr); 2005 if (result->cur_val > max->cur) 2006 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 2007 level, result->cur_val, max->cur); 2008 2009 result->pri_val = min_t(uint32_t, result->pri_val, max->pri); 2010 result->spr_val = min_t(uint32_t, result->spr_val, max->spr); 2011 result->cur_val = min_t(uint32_t, result->cur_val, max->cur); 2012 result->enable = true; 2013 } 2014 2015 return ret; 2016 } 2017 2018 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 2019 int level, 2020 const struct ilk_pipe_wm_parameters *p, 2021 struct intel_wm_level *result) 2022 { 2023 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2024 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 2025 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 2026 2027 /* WM1+ latency values stored in 0.5us units */ 2028 if (level > 0) { 2029 pri_latency *= 5; 2030 spr_latency *= 5; 2031 cur_latency *= 5; 2032 } 2033 2034 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level); 2035 result->spr_val = ilk_compute_spr_wm(p, spr_latency); 2036 result->cur_val = ilk_compute_cur_wm(p, cur_latency); 2037 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); 2038 result->enable = true; 2039 } 2040 2041 static uint32_t 2042 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) 2043 { 2044 struct drm_i915_private *dev_priv = dev->dev_private; 2045 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2046 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 2047 u32 linetime, ips_linetime; 2048 2049 if (!intel_crtc_active(crtc)) 2050 return 0; 2051 2052 /* The WM are computed with base on how long it takes to fill a single 2053 * row at the given clock rate, multiplied by 8. 2054 * */ 2055 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2056 mode->crtc_clock); 2057 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2058 intel_ddi_get_cdclk_freq(dev_priv)); 2059 2060 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2061 PIPE_WM_LINETIME_TIME(linetime); 2062 } 2063 2064 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2065 { 2066 struct drm_i915_private *dev_priv = dev->dev_private; 2067 2068 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2069 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2070 2071 wm[0] = (sskpd >> 56) & 0xFF; 2072 if (wm[0] == 0) 2073 wm[0] = sskpd & 0xF; 2074 wm[1] = (sskpd >> 4) & 0xFF; 2075 wm[2] = (sskpd >> 12) & 0xFF; 2076 wm[3] = (sskpd >> 20) & 0x1FF; 2077 wm[4] = (sskpd >> 32) & 0x1FF; 2078 } else if (INTEL_INFO(dev)->gen >= 6) { 2079 uint32_t sskpd = I915_READ(MCH_SSKPD); 2080 2081 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; 2082 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; 2083 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; 2084 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; 2085 } else if (INTEL_INFO(dev)->gen >= 5) { 2086 uint32_t mltr = I915_READ(MLTR_ILK); 2087 2088 /* ILK primary LP0 latency is 700 ns */ 2089 wm[0] = 7; 2090 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; 2091 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; 2092 } 2093 } 2094 2095 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2096 { 2097 /* ILK sprite LP0 latency is 1300 ns */ 2098 if (INTEL_INFO(dev)->gen == 5) 2099 wm[0] = 13; 2100 } 2101 2102 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2103 { 2104 /* ILK cursor LP0 latency is 1300 ns */ 2105 if (INTEL_INFO(dev)->gen == 5) 2106 wm[0] = 13; 2107 2108 /* WaDoubleCursorLP3Latency:ivb */ 2109 if (IS_IVYBRIDGE(dev)) 2110 wm[3] *= 2; 2111 } 2112 2113 int ilk_wm_max_level(const struct drm_device *dev) 2114 { 2115 /* how many WM levels are we expecting */ 2116 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2117 return 4; 2118 else if (INTEL_INFO(dev)->gen >= 6) 2119 return 3; 2120 else 2121 return 2; 2122 } 2123 2124 static void intel_print_wm_latency(struct drm_device *dev, 2125 const char *name, 2126 const uint16_t wm[5]) 2127 { 2128 int level, max_level = ilk_wm_max_level(dev); 2129 2130 for (level = 0; level <= max_level; level++) { 2131 unsigned int latency = wm[level]; 2132 2133 if (latency == 0) { 2134 DRM_ERROR("%s WM%d latency not provided\n", 2135 name, level); 2136 continue; 2137 } 2138 2139 /* WM1+ latency values in 0.5us units */ 2140 if (level > 0) 2141 latency *= 5; 2142 2143 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", 2144 name, level, wm[level], 2145 latency / 10, latency % 10); 2146 } 2147 } 2148 2149 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2150 uint16_t wm[5], uint16_t min) 2151 { 2152 int level, max_level = ilk_wm_max_level(dev_priv->dev); 2153 2154 if (wm[0] >= min) 2155 return false; 2156 2157 wm[0] = max(wm[0], min); 2158 for (level = 1; level <= max_level; level++) 2159 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); 2160 2161 return true; 2162 } 2163 2164 static void snb_wm_latency_quirk(struct drm_device *dev) 2165 { 2166 struct drm_i915_private *dev_priv = dev->dev_private; 2167 bool changed; 2168 2169 /* 2170 * The BIOS provided WM memory latency values are often 2171 * inadequate for high resolution displays. Adjust them. 2172 */ 2173 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 2174 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 2175 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 2176 2177 if (!changed) 2178 return; 2179 2180 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); 2181 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2182 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2183 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2184 } 2185 2186 static void ilk_setup_wm_latency(struct drm_device *dev) 2187 { 2188 struct drm_i915_private *dev_priv = dev->dev_private; 2189 2190 intel_read_wm_latency(dev, dev_priv->wm.pri_latency); 2191 2192 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, 2193 sizeof(dev_priv->wm.pri_latency)); 2194 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, 2195 sizeof(dev_priv->wm.pri_latency)); 2196 2197 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); 2198 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); 2199 2200 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2201 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2202 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2203 2204 if (IS_GEN6(dev)) 2205 snb_wm_latency_quirk(dev); 2206 } 2207 2208 static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2209 struct ilk_pipe_wm_parameters *p) 2210 { 2211 struct drm_device *dev = crtc->dev; 2212 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2213 enum i915_pipe pipe = intel_crtc->pipe; 2214 struct drm_plane *plane; 2215 2216 if (!intel_crtc_active(crtc)) 2217 return; 2218 2219 p->active = true; 2220 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal; 2221 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2222 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8; 2223 p->cur.bytes_per_pixel = 4; 2224 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w; 2225 p->cur.horiz_pixels = intel_crtc->cursor_width; 2226 /* TODO: for now, assume primary and cursor planes are always enabled. */ 2227 p->pri.enabled = true; 2228 p->cur.enabled = true; 2229 2230 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 2231 struct intel_plane *intel_plane = to_intel_plane(plane); 2232 2233 if (intel_plane->pipe == pipe) { 2234 p->spr = intel_plane->wm; 2235 break; 2236 } 2237 } 2238 } 2239 2240 static void ilk_compute_wm_config(struct drm_device *dev, 2241 struct intel_wm_config *config) 2242 { 2243 struct intel_crtc *intel_crtc; 2244 2245 /* Compute the currently _active_ config */ 2246 for_each_intel_crtc(dev, intel_crtc) { 2247 const struct intel_pipe_wm *wm = &intel_crtc->wm.active; 2248 2249 if (!wm->pipe_enabled) 2250 continue; 2251 2252 config->sprites_enabled |= wm->sprites_enabled; 2253 config->sprites_scaled |= wm->sprites_scaled; 2254 config->num_pipes_active++; 2255 } 2256 } 2257 2258 /* Compute new watermarks for the pipe */ 2259 static bool intel_compute_pipe_wm(struct drm_crtc *crtc, 2260 const struct ilk_pipe_wm_parameters *params, 2261 struct intel_pipe_wm *pipe_wm) 2262 { 2263 struct drm_device *dev = crtc->dev; 2264 const struct drm_i915_private *dev_priv = dev->dev_private; 2265 int level, max_level = ilk_wm_max_level(dev); 2266 /* LP0 watermark maximums depend on this pipe alone */ 2267 struct intel_wm_config config = { 2268 .num_pipes_active = 1, 2269 .sprites_enabled = params->spr.enabled, 2270 .sprites_scaled = params->spr.scaled, 2271 }; 2272 struct ilk_wm_maximums max; 2273 2274 pipe_wm->pipe_enabled = params->active; 2275 pipe_wm->sprites_enabled = params->spr.enabled; 2276 pipe_wm->sprites_scaled = params->spr.scaled; 2277 2278 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2279 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) 2280 max_level = 1; 2281 2282 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2283 if (params->spr.scaled) 2284 max_level = 0; 2285 2286 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]); 2287 2288 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2289 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2290 2291 /* LP0 watermarks always use 1/2 DDB partitioning */ 2292 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2293 2294 /* At least LP0 must be valid */ 2295 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) 2296 return false; 2297 2298 ilk_compute_wm_reg_maximums(dev, 1, &max); 2299 2300 for (level = 1; level <= max_level; level++) { 2301 struct intel_wm_level wm = {}; 2302 2303 ilk_compute_wm_level(dev_priv, level, params, &wm); 2304 2305 /* 2306 * Disable any watermark level that exceeds the 2307 * register maximums since such watermarks are 2308 * always invalid. 2309 */ 2310 if (!ilk_validate_wm_level(level, &max, &wm)) 2311 break; 2312 2313 pipe_wm->wm[level] = wm; 2314 } 2315 2316 return true; 2317 } 2318 2319 /* 2320 * Merge the watermarks from all active pipes for a specific level. 2321 */ 2322 static void ilk_merge_wm_level(struct drm_device *dev, 2323 int level, 2324 struct intel_wm_level *ret_wm) 2325 { 2326 struct intel_crtc *intel_crtc; 2327 2328 ret_wm->enable = true; 2329 2330 for_each_intel_crtc(dev, intel_crtc) { 2331 const struct intel_pipe_wm *active = &intel_crtc->wm.active; 2332 const struct intel_wm_level *wm = &active->wm[level]; 2333 2334 if (!active->pipe_enabled) 2335 continue; 2336 2337 /* 2338 * The watermark values may have been used in the past, 2339 * so we must maintain them in the registers for some 2340 * time even if the level is now disabled. 2341 */ 2342 if (!wm->enable) 2343 ret_wm->enable = false; 2344 2345 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 2346 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 2347 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 2348 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 2349 } 2350 } 2351 2352 /* 2353 * Merge all low power watermarks for all active pipes. 2354 */ 2355 static void ilk_wm_merge(struct drm_device *dev, 2356 const struct intel_wm_config *config, 2357 const struct ilk_wm_maximums *max, 2358 struct intel_pipe_wm *merged) 2359 { 2360 int level, max_level = ilk_wm_max_level(dev); 2361 int last_enabled_level = max_level; 2362 2363 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 2364 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && 2365 config->num_pipes_active > 1) 2366 return; 2367 2368 /* ILK: FBC WM must be disabled always */ 2369 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; 2370 2371 /* merge each WM1+ level */ 2372 for (level = 1; level <= max_level; level++) { 2373 struct intel_wm_level *wm = &merged->wm[level]; 2374 2375 ilk_merge_wm_level(dev, level, wm); 2376 2377 if (level > last_enabled_level) 2378 wm->enable = false; 2379 else if (!ilk_validate_wm_level(level, max, wm)) 2380 /* make sure all following levels get disabled */ 2381 last_enabled_level = level - 1; 2382 2383 /* 2384 * The spec says it is preferred to disable 2385 * FBC WMs instead of disabling a WM level. 2386 */ 2387 if (wm->fbc_val > max->fbc) { 2388 if (wm->enable) 2389 merged->fbc_wm_enabled = false; 2390 wm->fbc_val = 0; 2391 } 2392 } 2393 2394 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 2395 /* 2396 * FIXME this is racy. FBC might get enabled later. 2397 * What we should check here is whether FBC can be 2398 * enabled sometime later. 2399 */ 2400 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) { 2401 for (level = 2; level <= max_level; level++) { 2402 struct intel_wm_level *wm = &merged->wm[level]; 2403 2404 wm->enable = false; 2405 } 2406 } 2407 } 2408 2409 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 2410 { 2411 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 2412 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 2413 } 2414 2415 /* The value we need to program into the WM_LPx latency field */ 2416 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2417 { 2418 struct drm_i915_private *dev_priv = dev->dev_private; 2419 2420 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2421 return 2 * level; 2422 else 2423 return dev_priv->wm.pri_latency[level]; 2424 } 2425 2426 static void ilk_compute_wm_results(struct drm_device *dev, 2427 const struct intel_pipe_wm *merged, 2428 enum intel_ddb_partitioning partitioning, 2429 struct ilk_wm_values *results) 2430 { 2431 struct intel_crtc *intel_crtc; 2432 int level, wm_lp; 2433 2434 results->enable_fbc_wm = merged->fbc_wm_enabled; 2435 results->partitioning = partitioning; 2436 2437 /* LP1+ register values */ 2438 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2439 const struct intel_wm_level *r; 2440 2441 level = ilk_wm_lp_to_level(wm_lp, merged); 2442 2443 r = &merged->wm[level]; 2444 2445 /* 2446 * Maintain the watermark values even if the level is 2447 * disabled. Doing otherwise could cause underruns. 2448 */ 2449 results->wm_lp[wm_lp - 1] = 2450 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 2451 (r->pri_val << WM1_LP_SR_SHIFT) | 2452 r->cur_val; 2453 2454 if (r->enable) 2455 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; 2456 2457 if (INTEL_INFO(dev)->gen >= 8) 2458 results->wm_lp[wm_lp - 1] |= 2459 r->fbc_val << WM1_LP_FBC_SHIFT_BDW; 2460 else 2461 results->wm_lp[wm_lp - 1] |= 2462 r->fbc_val << WM1_LP_FBC_SHIFT; 2463 2464 /* 2465 * Always set WM1S_LP_EN when spr_val != 0, even if the 2466 * level is disabled. Doing otherwise could cause underruns. 2467 */ 2468 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { 2469 WARN_ON(wm_lp != 1); 2470 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; 2471 } else 2472 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2473 } 2474 2475 /* LP0 register values */ 2476 for_each_intel_crtc(dev, intel_crtc) { 2477 enum i915_pipe pipe = intel_crtc->pipe; 2478 const struct intel_wm_level *r = 2479 &intel_crtc->wm.active.wm[0]; 2480 2481 if (WARN_ON(!r->enable)) 2482 continue; 2483 2484 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; 2485 2486 results->wm_pipe[pipe] = 2487 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 2488 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | 2489 r->cur_val; 2490 } 2491 } 2492 2493 /* Find the result with the highest level enabled. Check for enable_fbc_wm in 2494 * case both are at the same level. Prefer r1 in case they're the same. */ 2495 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, 2496 struct intel_pipe_wm *r1, 2497 struct intel_pipe_wm *r2) 2498 { 2499 int level, max_level = ilk_wm_max_level(dev); 2500 int level1 = 0, level2 = 0; 2501 2502 for (level = 1; level <= max_level; level++) { 2503 if (r1->wm[level].enable) 2504 level1 = level; 2505 if (r2->wm[level].enable) 2506 level2 = level; 2507 } 2508 2509 if (level1 == level2) { 2510 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 2511 return r2; 2512 else 2513 return r1; 2514 } else if (level1 > level2) { 2515 return r1; 2516 } else { 2517 return r2; 2518 } 2519 } 2520 2521 /* dirty bits used to track which watermarks need changes */ 2522 #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 2523 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) 2524 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 2525 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 2526 #define WM_DIRTY_FBC (1 << 24) 2527 #define WM_DIRTY_DDB (1 << 25) 2528 2529 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, 2530 const struct ilk_wm_values *old, 2531 const struct ilk_wm_values *new) 2532 { 2533 unsigned int dirty = 0; 2534 enum i915_pipe pipe; 2535 int wm_lp; 2536 2537 for_each_pipe(pipe) { 2538 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 2539 dirty |= WM_DIRTY_LINETIME(pipe); 2540 /* Must disable LP1+ watermarks too */ 2541 dirty |= WM_DIRTY_LP_ALL; 2542 } 2543 2544 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 2545 dirty |= WM_DIRTY_PIPE(pipe); 2546 /* Must disable LP1+ watermarks too */ 2547 dirty |= WM_DIRTY_LP_ALL; 2548 } 2549 } 2550 2551 if (old->enable_fbc_wm != new->enable_fbc_wm) { 2552 dirty |= WM_DIRTY_FBC; 2553 /* Must disable LP1+ watermarks too */ 2554 dirty |= WM_DIRTY_LP_ALL; 2555 } 2556 2557 if (old->partitioning != new->partitioning) { 2558 dirty |= WM_DIRTY_DDB; 2559 /* Must disable LP1+ watermarks too */ 2560 dirty |= WM_DIRTY_LP_ALL; 2561 } 2562 2563 /* LP1+ watermarks already deemed dirty, no need to continue */ 2564 if (dirty & WM_DIRTY_LP_ALL) 2565 return dirty; 2566 2567 /* Find the lowest numbered LP1+ watermark in need of an update... */ 2568 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2569 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 2570 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 2571 break; 2572 } 2573 2574 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 2575 for (; wm_lp <= 3; wm_lp++) 2576 dirty |= WM_DIRTY_LP(wm_lp); 2577 2578 return dirty; 2579 } 2580 2581 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 2582 unsigned int dirty) 2583 { 2584 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2585 bool changed = false; 2586 2587 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { 2588 previous->wm_lp[2] &= ~WM1_LP_SR_EN; 2589 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); 2590 changed = true; 2591 } 2592 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { 2593 previous->wm_lp[1] &= ~WM1_LP_SR_EN; 2594 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); 2595 changed = true; 2596 } 2597 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { 2598 previous->wm_lp[0] &= ~WM1_LP_SR_EN; 2599 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); 2600 changed = true; 2601 } 2602 2603 /* 2604 * Don't touch WM1S_LP_EN here. 2605 * Doing so could cause underruns. 2606 */ 2607 2608 return changed; 2609 } 2610 2611 /* 2612 * The spec says we shouldn't write when we don't need, because every write 2613 * causes WMs to be re-evaluated, expending some power. 2614 */ 2615 static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2616 struct ilk_wm_values *results) 2617 { 2618 struct drm_device *dev = dev_priv->dev; 2619 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2620 unsigned int dirty; 2621 uint32_t val; 2622 2623 dirty = ilk_compute_wm_dirty(dev, previous, results); 2624 if (!dirty) 2625 return; 2626 2627 _ilk_disable_lp_wm(dev_priv, dirty); 2628 2629 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 2630 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2631 if (dirty & WM_DIRTY_PIPE(PIPE_B)) 2632 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2633 if (dirty & WM_DIRTY_PIPE(PIPE_C)) 2634 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2635 2636 if (dirty & WM_DIRTY_LINETIME(PIPE_A)) 2637 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2638 if (dirty & WM_DIRTY_LINETIME(PIPE_B)) 2639 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2640 if (dirty & WM_DIRTY_LINETIME(PIPE_C)) 2641 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2642 2643 if (dirty & WM_DIRTY_DDB) { 2644 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2645 val = I915_READ(WM_MISC); 2646 if (results->partitioning == INTEL_DDB_PART_1_2) 2647 val &= ~WM_MISC_DATA_PARTITION_5_6; 2648 else 2649 val |= WM_MISC_DATA_PARTITION_5_6; 2650 I915_WRITE(WM_MISC, val); 2651 } else { 2652 val = I915_READ(DISP_ARB_CTL2); 2653 if (results->partitioning == INTEL_DDB_PART_1_2) 2654 val &= ~DISP_DATA_PARTITION_5_6; 2655 else 2656 val |= DISP_DATA_PARTITION_5_6; 2657 I915_WRITE(DISP_ARB_CTL2, val); 2658 } 2659 } 2660 2661 if (dirty & WM_DIRTY_FBC) { 2662 val = I915_READ(DISP_ARB_CTL); 2663 if (results->enable_fbc_wm) 2664 val &= ~DISP_FBC_WM_DIS; 2665 else 2666 val |= DISP_FBC_WM_DIS; 2667 I915_WRITE(DISP_ARB_CTL, val); 2668 } 2669 2670 if (dirty & WM_DIRTY_LP(1) && 2671 previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 2672 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2673 2674 if (INTEL_INFO(dev)->gen >= 7) { 2675 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 2676 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2677 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 2678 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2679 } 2680 2681 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 2682 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2683 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 2684 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2685 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 2686 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2687 2688 dev_priv->wm.hw = *results; 2689 } 2690 2691 static bool ilk_disable_lp_wm(struct drm_device *dev) 2692 { 2693 struct drm_i915_private *dev_priv = dev->dev_private; 2694 2695 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2696 } 2697 2698 static void ilk_update_wm(struct drm_crtc *crtc) 2699 { 2700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2701 struct drm_device *dev = crtc->dev; 2702 struct drm_i915_private *dev_priv = dev->dev_private; 2703 struct ilk_wm_maximums max; 2704 struct ilk_pipe_wm_parameters params = {}; 2705 struct ilk_wm_values results = {}; 2706 enum intel_ddb_partitioning partitioning; 2707 struct intel_pipe_wm pipe_wm = {}; 2708 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 2709 struct intel_wm_config config = {}; 2710 2711 ilk_compute_wm_parameters(crtc, ¶ms); 2712 2713 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); 2714 2715 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) 2716 return; 2717 2718 intel_crtc->wm.active = pipe_wm; 2719 2720 ilk_compute_wm_config(dev, &config); 2721 2722 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 2723 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 2724 2725 /* 5/6 split only in single pipe config on IVB+ */ 2726 if (INTEL_INFO(dev)->gen >= 7 && 2727 config.num_pipes_active == 1 && config.sprites_enabled) { 2728 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 2729 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 2730 2731 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 2732 } else { 2733 best_lp_wm = &lp_wm_1_2; 2734 } 2735 2736 partitioning = (best_lp_wm == &lp_wm_1_2) ? 2737 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 2738 2739 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 2740 2741 ilk_write_wm_values(dev_priv, &results); 2742 } 2743 2744 static void 2745 ilk_update_sprite_wm(struct drm_plane *plane, 2746 struct drm_crtc *crtc, 2747 uint32_t sprite_width, uint32_t sprite_height, 2748 int pixel_size, bool enabled, bool scaled) 2749 { 2750 struct drm_device *dev = plane->dev; 2751 struct intel_plane *intel_plane = to_intel_plane(plane); 2752 2753 intel_plane->wm.enabled = enabled; 2754 intel_plane->wm.scaled = scaled; 2755 intel_plane->wm.horiz_pixels = sprite_width; 2756 intel_plane->wm.vert_pixels = sprite_width; 2757 intel_plane->wm.bytes_per_pixel = pixel_size; 2758 2759 /* 2760 * IVB workaround: must disable low power watermarks for at least 2761 * one frame before enabling scaling. LP watermarks can be re-enabled 2762 * when scaling is disabled. 2763 * 2764 * WaCxSRDisabledForSpriteScaling:ivb 2765 */ 2766 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) 2767 intel_wait_for_vblank(dev, intel_plane->pipe); 2768 2769 ilk_update_wm(crtc); 2770 } 2771 2772 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 2773 { 2774 struct drm_device *dev = crtc->dev; 2775 struct drm_i915_private *dev_priv = dev->dev_private; 2776 struct ilk_wm_values *hw = &dev_priv->wm.hw; 2777 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2778 struct intel_pipe_wm *active = &intel_crtc->wm.active; 2779 enum i915_pipe pipe = intel_crtc->pipe; 2780 static const unsigned int wm0_pipe_reg[] = { 2781 [PIPE_A] = WM0_PIPEA_ILK, 2782 [PIPE_B] = WM0_PIPEB_ILK, 2783 [PIPE_C] = WM0_PIPEC_IVB, 2784 }; 2785 2786 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 2787 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2788 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 2789 2790 active->pipe_enabled = intel_crtc_active(crtc); 2791 2792 if (active->pipe_enabled) { 2793 u32 tmp = hw->wm_pipe[pipe]; 2794 2795 /* 2796 * For active pipes LP0 watermark is marked as 2797 * enabled, and LP1+ watermaks as disabled since 2798 * we can't really reverse compute them in case 2799 * multiple pipes are active. 2800 */ 2801 active->wm[0].enable = true; 2802 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; 2803 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; 2804 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; 2805 active->linetime = hw->wm_linetime[pipe]; 2806 } else { 2807 int level, max_level = ilk_wm_max_level(dev); 2808 2809 /* 2810 * For inactive pipes, all watermark levels 2811 * should be marked as enabled but zeroed, 2812 * which is what we'd compute them to. 2813 */ 2814 for (level = 0; level <= max_level; level++) 2815 active->wm[level].enable = true; 2816 } 2817 } 2818 2819 void ilk_wm_get_hw_state(struct drm_device *dev) 2820 { 2821 struct drm_i915_private *dev_priv = dev->dev_private; 2822 struct ilk_wm_values *hw = &dev_priv->wm.hw; 2823 struct drm_crtc *crtc; 2824 2825 for_each_crtc(dev, crtc) 2826 ilk_pipe_wm_get_hw_state(crtc); 2827 2828 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 2829 hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 2830 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 2831 2832 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 2833 if (INTEL_INFO(dev)->gen >= 7) { 2834 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 2835 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 2836 } 2837 2838 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2839 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2840 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 2841 else if (IS_IVYBRIDGE(dev)) 2842 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 2843 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 2844 2845 hw->enable_fbc_wm = 2846 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 2847 } 2848 2849 /** 2850 * intel_update_watermarks - update FIFO watermark values based on current modes 2851 * 2852 * Calculate watermark values for the various WM regs based on current mode 2853 * and plane configuration. 2854 * 2855 * There are several cases to deal with here: 2856 * - normal (i.e. non-self-refresh) 2857 * - self-refresh (SR) mode 2858 * - lines are large relative to FIFO size (buffer can hold up to 2) 2859 * - lines are small relative to FIFO size (buffer can hold more than 2 2860 * lines), so need to account for TLB latency 2861 * 2862 * The normal calculation is: 2863 * watermark = dotclock * bytes per pixel * latency 2864 * where latency is platform & configuration dependent (we assume pessimal 2865 * values here). 2866 * 2867 * The SR calculation is: 2868 * watermark = (trunc(latency/line time)+1) * surface width * 2869 * bytes per pixel 2870 * where 2871 * line time = htotal / dotclock 2872 * surface width = hdisplay for normal plane and 64 for cursor 2873 * and latency is assumed to be high, as above. 2874 * 2875 * The final value programmed to the register should always be rounded up, 2876 * and include an extra 2 entries to account for clock crossings. 2877 * 2878 * We don't use the sprite, so we can ignore that. And on Crestline we have 2879 * to set the non-SR watermarks to 8. 2880 */ 2881 void intel_update_watermarks(struct drm_crtc *crtc) 2882 { 2883 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 2884 2885 if (dev_priv->display.update_wm) 2886 dev_priv->display.update_wm(crtc); 2887 } 2888 2889 void intel_update_sprite_watermarks(struct drm_plane *plane, 2890 struct drm_crtc *crtc, 2891 uint32_t sprite_width, 2892 uint32_t sprite_height, 2893 int pixel_size, 2894 bool enabled, bool scaled) 2895 { 2896 struct drm_i915_private *dev_priv = plane->dev->dev_private; 2897 2898 if (dev_priv->display.update_sprite_wm) 2899 dev_priv->display.update_sprite_wm(plane, crtc, 2900 sprite_width, sprite_height, 2901 pixel_size, enabled, scaled); 2902 } 2903 2904 static struct drm_i915_gem_object * 2905 intel_alloc_context_page(struct drm_device *dev) 2906 { 2907 struct drm_i915_gem_object *ctx; 2908 int ret; 2909 2910 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2911 2912 ctx = i915_gem_alloc_object(dev, 4096); 2913 if (!ctx) { 2914 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 2915 return NULL; 2916 } 2917 2918 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0); 2919 if (ret) { 2920 DRM_ERROR("failed to pin power context: %d\n", ret); 2921 goto err_unref; 2922 } 2923 2924 ret = i915_gem_object_set_to_gtt_domain(ctx, 1); 2925 if (ret) { 2926 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 2927 goto err_unpin; 2928 } 2929 2930 return ctx; 2931 2932 err_unpin: 2933 i915_gem_object_ggtt_unpin(ctx); 2934 err_unref: 2935 drm_gem_object_unreference(&ctx->base); 2936 return NULL; 2937 } 2938 2939 /** 2940 * Lock protecting IPS related data structures 2941 */ 2942 struct lock mchdev_lock; 2943 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE); 2944 2945 /* Global for IPS driver to get at the current i915 device. Protected by 2946 * mchdev_lock. */ 2947 static struct drm_i915_private *i915_mch_dev; 2948 2949 bool ironlake_set_drps(struct drm_device *dev, u8 val) 2950 { 2951 struct drm_i915_private *dev_priv = dev->dev_private; 2952 u16 rgvswctl; 2953 2954 assert_spin_locked(&mchdev_lock); 2955 2956 rgvswctl = I915_READ16(MEMSWCTL); 2957 if (rgvswctl & MEMCTL_CMD_STS) { 2958 DRM_DEBUG("gpu busy, RCS change rejected\n"); 2959 return false; /* still busy with another command */ 2960 } 2961 2962 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 2963 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 2964 I915_WRITE16(MEMSWCTL, rgvswctl); 2965 POSTING_READ16(MEMSWCTL); 2966 2967 rgvswctl |= MEMCTL_CMD_STS; 2968 I915_WRITE16(MEMSWCTL, rgvswctl); 2969 2970 return true; 2971 } 2972 2973 static void ironlake_enable_drps(struct drm_device *dev) 2974 { 2975 struct drm_i915_private *dev_priv = dev->dev_private; 2976 u32 rgvmodectl = I915_READ(MEMMODECTL); 2977 u8 fmax, fmin, fstart, vstart; 2978 2979 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2980 2981 /* Enable temp reporting */ 2982 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 2983 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 2984 2985 /* 100ms RC evaluation intervals */ 2986 I915_WRITE(RCUPEI, 100000); 2987 I915_WRITE(RCDNEI, 100000); 2988 2989 /* Set max/min thresholds to 90ms and 80ms respectively */ 2990 I915_WRITE(RCBMAXAVG, 90000); 2991 I915_WRITE(RCBMINAVG, 80000); 2992 2993 I915_WRITE(MEMIHYST, 1); 2994 2995 /* Set up min, max, and cur for interrupt handling */ 2996 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 2997 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 2998 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 2999 MEMMODE_FSTART_SHIFT; 3000 3001 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 3002 PXVFREQ_PX_SHIFT; 3003 3004 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 3005 dev_priv->ips.fstart = fstart; 3006 3007 dev_priv->ips.max_delay = fstart; 3008 dev_priv->ips.min_delay = fmin; 3009 dev_priv->ips.cur_delay = fstart; 3010 3011 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 3012 fmax, fmin, fstart); 3013 3014 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 3015 3016 /* 3017 * Interrupts will be enabled in ironlake_irq_postinstall 3018 */ 3019 3020 I915_WRITE(VIDSTART, vstart); 3021 POSTING_READ(VIDSTART); 3022 3023 rgvmodectl |= MEMMODE_SWMODE_EN; 3024 I915_WRITE(MEMMODECTL, rgvmodectl); 3025 3026 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 3027 DRM_ERROR("stuck trying to change perf mode\n"); 3028 mdelay(1); 3029 3030 ironlake_set_drps(dev, fstart); 3031 3032 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 3033 I915_READ(0x112e0); 3034 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 3035 dev_priv->ips.last_count2 = I915_READ(0x112f4); 3036 getrawmonotonic(&dev_priv->ips.last_time2); 3037 3038 lockmgr(&mchdev_lock, LK_RELEASE); 3039 } 3040 3041 static void ironlake_disable_drps(struct drm_device *dev) 3042 { 3043 struct drm_i915_private *dev_priv = dev->dev_private; 3044 u16 rgvswctl; 3045 3046 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 3047 3048 rgvswctl = I915_READ16(MEMSWCTL); 3049 3050 /* Ack interrupts, disable EFC interrupt */ 3051 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 3052 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 3053 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 3054 I915_WRITE(DEIIR, DE_PCU_EVENT); 3055 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 3056 3057 /* Go back to the starting frequency */ 3058 ironlake_set_drps(dev, dev_priv->ips.fstart); 3059 mdelay(1); 3060 rgvswctl |= MEMCTL_CMD_STS; 3061 I915_WRITE(MEMSWCTL, rgvswctl); 3062 mdelay(1); 3063 3064 lockmgr(&mchdev_lock, LK_RELEASE); 3065 } 3066 3067 /* There's a funny hw issue where the hw returns all 0 when reading from 3068 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value 3069 * ourselves, instead of doing a rmw cycle (which might result in us clearing 3070 * all limits and the gpu stuck at whatever frequency it is at atm). 3071 */ 3072 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) 3073 { 3074 u32 limits; 3075 3076 /* Only set the down limit when we've reached the lowest level to avoid 3077 * getting more interrupts, otherwise leave this clear. This prevents a 3078 * race in the hw when coming out of rc6: There's a tiny window where 3079 * the hw runs at the minimal clock before selecting the desired 3080 * frequency, if the down threshold expires in that window we will not 3081 * receive a down interrupt. */ 3082 limits = dev_priv->rps.max_freq_softlimit << 24; 3083 if (val <= dev_priv->rps.min_freq_softlimit) 3084 limits |= dev_priv->rps.min_freq_softlimit << 16; 3085 3086 return limits; 3087 } 3088 3089 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 3090 { 3091 int new_power; 3092 3093 new_power = dev_priv->rps.power; 3094 switch (dev_priv->rps.power) { 3095 case LOW_POWER: 3096 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) 3097 new_power = BETWEEN; 3098 break; 3099 3100 case BETWEEN: 3101 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) 3102 new_power = LOW_POWER; 3103 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) 3104 new_power = HIGH_POWER; 3105 break; 3106 3107 case HIGH_POWER: 3108 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) 3109 new_power = BETWEEN; 3110 break; 3111 } 3112 /* Max/min bins are special */ 3113 if (val == dev_priv->rps.min_freq_softlimit) 3114 new_power = LOW_POWER; 3115 if (val == dev_priv->rps.max_freq_softlimit) 3116 new_power = HIGH_POWER; 3117 if (new_power == dev_priv->rps.power) 3118 return; 3119 3120 /* Note the units here are not exactly 1us, but 1280ns. */ 3121 switch (new_power) { 3122 case LOW_POWER: 3123 /* Upclock if more than 95% busy over 16ms */ 3124 I915_WRITE(GEN6_RP_UP_EI, 12500); 3125 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800); 3126 3127 /* Downclock if less than 85% busy over 32ms */ 3128 I915_WRITE(GEN6_RP_DOWN_EI, 25000); 3129 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250); 3130 3131 I915_WRITE(GEN6_RP_CONTROL, 3132 GEN6_RP_MEDIA_TURBO | 3133 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3134 GEN6_RP_MEDIA_IS_GFX | 3135 GEN6_RP_ENABLE | 3136 GEN6_RP_UP_BUSY_AVG | 3137 GEN6_RP_DOWN_IDLE_AVG); 3138 break; 3139 3140 case BETWEEN: 3141 /* Upclock if more than 90% busy over 13ms */ 3142 I915_WRITE(GEN6_RP_UP_EI, 10250); 3143 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225); 3144 3145 /* Downclock if less than 75% busy over 32ms */ 3146 I915_WRITE(GEN6_RP_DOWN_EI, 25000); 3147 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750); 3148 3149 I915_WRITE(GEN6_RP_CONTROL, 3150 GEN6_RP_MEDIA_TURBO | 3151 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3152 GEN6_RP_MEDIA_IS_GFX | 3153 GEN6_RP_ENABLE | 3154 GEN6_RP_UP_BUSY_AVG | 3155 GEN6_RP_DOWN_IDLE_AVG); 3156 break; 3157 3158 case HIGH_POWER: 3159 /* Upclock if more than 85% busy over 10ms */ 3160 I915_WRITE(GEN6_RP_UP_EI, 8000); 3161 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800); 3162 3163 /* Downclock if less than 60% busy over 32ms */ 3164 I915_WRITE(GEN6_RP_DOWN_EI, 25000); 3165 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000); 3166 3167 I915_WRITE(GEN6_RP_CONTROL, 3168 GEN6_RP_MEDIA_TURBO | 3169 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3170 GEN6_RP_MEDIA_IS_GFX | 3171 GEN6_RP_ENABLE | 3172 GEN6_RP_UP_BUSY_AVG | 3173 GEN6_RP_DOWN_IDLE_AVG); 3174 break; 3175 } 3176 3177 dev_priv->rps.power = new_power; 3178 dev_priv->rps.last_adj = 0; 3179 } 3180 3181 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 3182 { 3183 u32 mask = 0; 3184 3185 if (val > dev_priv->rps.min_freq_softlimit) 3186 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 3187 if (val < dev_priv->rps.max_freq_softlimit) 3188 mask |= GEN6_PM_RP_UP_THRESHOLD; 3189 3190 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); 3191 mask &= dev_priv->pm_rps_events; 3192 3193 /* IVB and SNB hard hangs on looping batchbuffer 3194 * if GEN6_PM_UP_EI_EXPIRED is masked. 3195 */ 3196 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev)) 3197 mask |= GEN6_PM_RP_UP_EI_EXPIRED; 3198 3199 if (IS_GEN8(dev_priv->dev)) 3200 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; 3201 3202 return ~mask; 3203 } 3204 3205 /* gen6_set_rps is called to update the frequency request, but should also be 3206 * called when the range (min_delay and max_delay) is modified so that we can 3207 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 3208 void gen6_set_rps(struct drm_device *dev, u8 val) 3209 { 3210 struct drm_i915_private *dev_priv = dev->dev_private; 3211 3212 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3213 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3214 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3215 3216 /* min/max delay may still have been modified so be sure to 3217 * write the limits value. 3218 */ 3219 if (val != dev_priv->rps.cur_freq) { 3220 gen6_set_rps_thresholds(dev_priv, val); 3221 3222 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3223 I915_WRITE(GEN6_RPNSWREQ, 3224 HSW_FREQUENCY(val)); 3225 else 3226 I915_WRITE(GEN6_RPNSWREQ, 3227 GEN6_FREQUENCY(val) | 3228 GEN6_OFFSET(0) | 3229 GEN6_AGGRESSIVE_TURBO); 3230 } 3231 3232 /* Make sure we continue to get interrupts 3233 * until we hit the minimum or maximum frequencies. 3234 */ 3235 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val)); 3236 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3237 3238 POSTING_READ(GEN6_RPNSWREQ); 3239 3240 dev_priv->rps.cur_freq = val; 3241 trace_intel_gpu_freq_change(val * 50); 3242 } 3243 3244 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down 3245 * 3246 * * If Gfx is Idle, then 3247 * 1. Mask Turbo interrupts 3248 * 2. Bring up Gfx clock 3249 * 3. Change the freq to Rpn and wait till P-Unit updates freq 3250 * 4. Clear the Force GFX CLK ON bit so that Gfx can down 3251 * 5. Unmask Turbo interrupts 3252 */ 3253 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 3254 { 3255 int revision; 3256 3257 struct drm_device *dev = dev_priv->dev; 3258 3259 /* Latest VLV doesn't need to force the gfx clock */ 3260 revision = pci_read_config(dev->dev, PCIR_REVID, 1); 3261 if (revision >= 0xd) { 3262 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3263 return; 3264 } 3265 3266 /* 3267 * When we are idle. Drop to min voltage state. 3268 */ 3269 3270 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) 3271 return; 3272 3273 /* Mask turbo interrupt so that they will not come in between */ 3274 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3275 3276 vlv_force_gfx_clock(dev_priv, true); 3277 3278 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; 3279 3280 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, 3281 dev_priv->rps.min_freq_softlimit); 3282 3283 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) 3284 & GENFREQSTATUS) == 0, 5)) 3285 DRM_ERROR("timed out waiting for Punit\n"); 3286 3287 vlv_force_gfx_clock(dev_priv, false); 3288 3289 I915_WRITE(GEN6_PMINTRMSK, 3290 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 3291 } 3292 3293 void gen6_rps_idle(struct drm_i915_private *dev_priv) 3294 { 3295 struct drm_device *dev = dev_priv->dev; 3296 3297 mutex_lock(&dev_priv->rps.hw_lock); 3298 if (dev_priv->rps.enabled) { 3299 if (IS_CHERRYVIEW(dev)) 3300 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3301 else if (IS_VALLEYVIEW(dev)) 3302 vlv_set_rps_idle(dev_priv); 3303 else 3304 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3305 dev_priv->rps.last_adj = 0; 3306 } 3307 mutex_unlock(&dev_priv->rps.hw_lock); 3308 } 3309 3310 void gen6_rps_boost(struct drm_i915_private *dev_priv) 3311 { 3312 struct drm_device *dev = dev_priv->dev; 3313 3314 mutex_lock(&dev_priv->rps.hw_lock); 3315 if (dev_priv->rps.enabled) { 3316 if (IS_VALLEYVIEW(dev)) 3317 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3318 else 3319 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); 3320 dev_priv->rps.last_adj = 0; 3321 } 3322 mutex_unlock(&dev_priv->rps.hw_lock); 3323 } 3324 3325 void valleyview_set_rps(struct drm_device *dev, u8 val) 3326 { 3327 struct drm_i915_private *dev_priv = dev->dev_private; 3328 3329 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3330 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3331 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3332 3333 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", 3334 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 3335 dev_priv->rps.cur_freq, 3336 vlv_gpu_freq(dev_priv, val), val); 3337 3338 if (val != dev_priv->rps.cur_freq) 3339 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3340 3341 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3342 3343 dev_priv->rps.cur_freq = val; 3344 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 3345 } 3346 3347 static void gen8_disable_rps_interrupts(struct drm_device *dev) 3348 { 3349 struct drm_i915_private *dev_priv = dev->dev_private; 3350 3351 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP); 3352 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) & 3353 ~dev_priv->pm_rps_events); 3354 /* Complete PM interrupt masking here doesn't race with the rps work 3355 * item again unmasking PM interrupts because that is using a different 3356 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in 3357 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which 3358 * gen8_enable_rps will clean up. */ 3359 3360 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3361 dev_priv->rps.pm_iir = 0; 3362 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3363 3364 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); 3365 } 3366 3367 static void gen6_disable_rps_interrupts(struct drm_device *dev) 3368 { 3369 struct drm_i915_private *dev_priv = dev->dev_private; 3370 3371 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3372 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & 3373 ~dev_priv->pm_rps_events); 3374 /* Complete PM interrupt masking here doesn't race with the rps work 3375 * item again unmasking PM interrupts because that is using a different 3376 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3377 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 3378 3379 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3380 dev_priv->rps.pm_iir = 0; 3381 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3382 3383 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); 3384 } 3385 3386 static void gen6_disable_rps(struct drm_device *dev) 3387 { 3388 struct drm_i915_private *dev_priv = dev->dev_private; 3389 3390 I915_WRITE(GEN6_RC_CONTROL, 0); 3391 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 3392 3393 if (IS_BROADWELL(dev)) 3394 gen8_disable_rps_interrupts(dev); 3395 else 3396 gen6_disable_rps_interrupts(dev); 3397 } 3398 3399 static void cherryview_disable_rps(struct drm_device *dev) 3400 { 3401 struct drm_i915_private *dev_priv = dev->dev_private; 3402 3403 I915_WRITE(GEN6_RC_CONTROL, 0); 3404 3405 gen8_disable_rps_interrupts(dev); 3406 } 3407 3408 static void valleyview_disable_rps(struct drm_device *dev) 3409 { 3410 struct drm_i915_private *dev_priv = dev->dev_private; 3411 3412 I915_WRITE(GEN6_RC_CONTROL, 0); 3413 3414 gen6_disable_rps_interrupts(dev); 3415 } 3416 3417 static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 3418 { 3419 if (IS_VALLEYVIEW(dev)) { 3420 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 3421 mode = GEN6_RC_CTL_RC6_ENABLE; 3422 else 3423 mode = 0; 3424 } 3425 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3426 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3427 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3428 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3429 } 3430 3431 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 3432 { 3433 /* No RC6 before Ironlake */ 3434 if (INTEL_INFO(dev)->gen < 5) 3435 return 0; 3436 3437 /* RC6 is only on Ironlake mobile not on desktop */ 3438 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev)) 3439 return 0; 3440 3441 /* Respect the kernel parameter if it is set */ 3442 if (enable_rc6 >= 0) { 3443 int mask; 3444 3445 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 3446 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 3447 INTEL_RC6pp_ENABLE; 3448 else 3449 mask = INTEL_RC6_ENABLE; 3450 3451 if ((enable_rc6 & mask) != enable_rc6) 3452 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", 3453 enable_rc6 & mask, enable_rc6, mask); 3454 3455 return enable_rc6 & mask; 3456 } 3457 3458 /* Disable RC6 on Ironlake */ 3459 if (INTEL_INFO(dev)->gen == 5) 3460 return 0; 3461 3462 if (IS_IVYBRIDGE(dev)) 3463 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3464 3465 return INTEL_RC6_ENABLE; 3466 } 3467 3468 int intel_enable_rc6(const struct drm_device *dev) 3469 { 3470 return i915.enable_rc6; 3471 } 3472 3473 static void gen8_enable_rps_interrupts(struct drm_device *dev) 3474 { 3475 struct drm_i915_private *dev_priv = dev->dev_private; 3476 3477 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3478 WARN_ON(dev_priv->rps.pm_iir); 3479 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3480 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); 3481 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3482 } 3483 3484 static void gen6_enable_rps_interrupts(struct drm_device *dev) 3485 { 3486 struct drm_i915_private *dev_priv = dev->dev_private; 3487 3488 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3489 WARN_ON(dev_priv->rps.pm_iir); 3490 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 3491 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); 3492 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3493 } 3494 3495 static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap) 3496 { 3497 /* All of these values are in units of 50MHz */ 3498 dev_priv->rps.cur_freq = 0; 3499 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ 3500 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 3501 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 3502 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 3503 /* XXX: only BYT has a special efficient freq */ 3504 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 3505 /* hw_max = RP0 until we check for overclocking */ 3506 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 3507 3508 /* Preserve min/max settings in case of re-init */ 3509 if (dev_priv->rps.max_freq_softlimit == 0) 3510 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 3511 3512 if (dev_priv->rps.min_freq_softlimit == 0) 3513 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 3514 } 3515 3516 static void gen8_enable_rps(struct drm_device *dev) 3517 { 3518 struct drm_i915_private *dev_priv = dev->dev_private; 3519 struct intel_engine_cs *ring; 3520 uint32_t rc6_mask = 0, rp_state_cap; 3521 int unused; 3522 3523 /* 1a: Software RC state - RC0 */ 3524 I915_WRITE(GEN6_RC_STATE, 0); 3525 3526 /* 1c & 1d: Get forcewake during program sequence. Although the driver 3527 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 3528 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3529 3530 /* 2a: Disable RC states. */ 3531 I915_WRITE(GEN6_RC_CONTROL, 0); 3532 3533 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3534 parse_rp_state_cap(dev_priv, rp_state_cap); 3535 3536 /* 2b: Program RC6 thresholds.*/ 3537 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 3538 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 3539 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 3540 for_each_ring(ring, dev_priv, unused) 3541 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 3542 I915_WRITE(GEN6_RC_SLEEP, 0); 3543 if (IS_BROADWELL(dev)) 3544 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 3545 else 3546 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 3547 3548 /* 3: Enable RC6 */ 3549 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3550 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 3551 intel_print_rc6_info(dev, rc6_mask); 3552 if (IS_BROADWELL(dev)) 3553 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 3554 GEN7_RC_CTL_TO_MODE | 3555 rc6_mask); 3556 else 3557 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 3558 GEN6_RC_CTL_EI_MODE(1) | 3559 rc6_mask); 3560 3561 /* 4 Program defaults and thresholds for RPS*/ 3562 I915_WRITE(GEN6_RPNSWREQ, 3563 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3564 I915_WRITE(GEN6_RC_VIDEO_FREQ, 3565 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 3566 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 3567 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 3568 3569 /* Docs recommend 900MHz, and 300 MHz respectively */ 3570 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3571 dev_priv->rps.max_freq_softlimit << 24 | 3572 dev_priv->rps.min_freq_softlimit << 16); 3573 3574 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 3575 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 3576 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ 3577 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ 3578 3579 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3580 3581 /* 5: Enable RPS */ 3582 I915_WRITE(GEN6_RP_CONTROL, 3583 GEN6_RP_MEDIA_TURBO | 3584 GEN6_RP_MEDIA_HW_NORMAL_MODE | 3585 GEN6_RP_MEDIA_IS_GFX | 3586 GEN6_RP_ENABLE | 3587 GEN6_RP_UP_BUSY_AVG | 3588 GEN6_RP_DOWN_IDLE_AVG); 3589 3590 /* 6: Ring frequency + overclocking (our driver does this later */ 3591 3592 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 3593 3594 gen8_enable_rps_interrupts(dev); 3595 3596 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3597 } 3598 3599 static void gen6_enable_rps(struct drm_device *dev) 3600 { 3601 struct drm_i915_private *dev_priv = dev->dev_private; 3602 struct intel_engine_cs *ring; 3603 u32 rp_state_cap; 3604 u32 gt_perf_status; 3605 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 3606 u32 gtfifodbg; 3607 int rc6_mode; 3608 int i, ret; 3609 3610 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3611 3612 /* Here begins a magic sequence of register writes to enable 3613 * auto-downclocking. 3614 * 3615 * Perhaps there might be some value in exposing these to 3616 * userspace... 3617 */ 3618 I915_WRITE(GEN6_RC_STATE, 0); 3619 3620 /* Clear the DBG now so we don't confuse earlier errors */ 3621 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 3622 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 3623 I915_WRITE(GTFIFODBG, gtfifodbg); 3624 } 3625 3626 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3627 3628 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3629 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 3630 3631 parse_rp_state_cap(dev_priv, rp_state_cap); 3632 3633 /* disable the counters and set deterministic thresholds */ 3634 I915_WRITE(GEN6_RC_CONTROL, 0); 3635 3636 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 3637 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 3638 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 3639 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 3640 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 3641 3642 for_each_ring(ring, dev_priv, i) 3643 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 3644 3645 I915_WRITE(GEN6_RC_SLEEP, 0); 3646 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 3647 if (IS_IVYBRIDGE(dev)) 3648 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 3649 else 3650 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 3651 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 3652 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 3653 3654 /* Check if we are enabling RC6 */ 3655 rc6_mode = intel_enable_rc6(dev_priv->dev); 3656 if (rc6_mode & INTEL_RC6_ENABLE) 3657 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 3658 3659 /* We don't use those on Haswell */ 3660 if (!IS_HASWELL(dev)) { 3661 if (rc6_mode & INTEL_RC6p_ENABLE) 3662 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 3663 3664 if (rc6_mode & INTEL_RC6pp_ENABLE) 3665 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 3666 } 3667 3668 intel_print_rc6_info(dev, rc6_mask); 3669 3670 I915_WRITE(GEN6_RC_CONTROL, 3671 rc6_mask | 3672 GEN6_RC_CTL_EI_MODE(1) | 3673 GEN6_RC_CTL_HW_ENABLE); 3674 3675 /* Power down if completely idle for over 50ms */ 3676 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); 3677 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3678 3679 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3680 if (ret) 3681 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3682 3683 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); 3684 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ 3685 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", 3686 (dev_priv->rps.max_freq_softlimit & 0xff) * 50, 3687 (pcu_mbox & 0xff) * 50); 3688 dev_priv->rps.max_freq = pcu_mbox & 0xff; 3689 } 3690 3691 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 3692 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 3693 3694 gen6_enable_rps_interrupts(dev); 3695 3696 rc6vids = 0; 3697 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 3698 if (IS_GEN6(dev) && ret) { 3699 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 3700 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 3701 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 3702 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 3703 rc6vids &= 0xffff00; 3704 rc6vids |= GEN6_ENCODE_RC6_VID(450); 3705 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); 3706 if (ret) 3707 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); 3708 } 3709 3710 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3711 } 3712 3713 static void __gen6_update_ring_freq(struct drm_device *dev) 3714 { 3715 struct drm_i915_private *dev_priv = dev->dev_private; 3716 int min_freq = 15; 3717 unsigned int gpu_freq; 3718 unsigned int max_ia_freq, min_ring_freq; 3719 int scaling_factor = 180; 3720 3721 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3722 3723 #if 0 3724 policy = cpufreq_cpu_get(0); 3725 if (policy) { 3726 max_ia_freq = policy->cpuinfo.max_freq; 3727 cpufreq_cpu_put(policy); 3728 } else { 3729 /* 3730 * Default to measured freq if none found, PCU will ensure we 3731 * don't go over 3732 */ 3733 max_ia_freq = tsc_khz; 3734 } 3735 #else 3736 max_ia_freq = tsc_frequency / 1000; 3737 #endif 3738 3739 /* Convert from kHz to MHz */ 3740 max_ia_freq /= 1000; 3741 3742 min_ring_freq = I915_READ(DCLK) & 0xf; 3743 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 3744 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 3745 3746 /* 3747 * For each potential GPU frequency, load a ring frequency we'd like 3748 * to use for memory access. We do this by specifying the IA frequency 3749 * the PCU should use as a reference to determine the ring frequency. 3750 */ 3751 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; 3752 gpu_freq--) { 3753 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; 3754 unsigned int ia_freq = 0, ring_freq = 0; 3755 3756 if (INTEL_INFO(dev)->gen >= 8) { 3757 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 3758 ring_freq = max(min_ring_freq, gpu_freq); 3759 } else if (IS_HASWELL(dev)) { 3760 ring_freq = mult_frac(gpu_freq, 5, 4); 3761 ring_freq = max(min_ring_freq, ring_freq); 3762 /* leave ia_freq as the default, chosen by cpufreq */ 3763 } else { 3764 /* On older processors, there is no separate ring 3765 * clock domain, so in order to boost the bandwidth 3766 * of the ring, we need to upclock the CPU (ia_freq). 3767 * 3768 * For GPU frequencies less than 750MHz, 3769 * just use the lowest ring freq. 3770 */ 3771 if (gpu_freq < min_freq) 3772 ia_freq = 800; 3773 else 3774 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 3775 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 3776 } 3777 3778 sandybridge_pcode_write(dev_priv, 3779 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 3780 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | 3781 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | 3782 gpu_freq); 3783 } 3784 } 3785 3786 void gen6_update_ring_freq(struct drm_device *dev) 3787 { 3788 struct drm_i915_private *dev_priv = dev->dev_private; 3789 3790 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev)) 3791 return; 3792 3793 mutex_lock(&dev_priv->rps.hw_lock); 3794 __gen6_update_ring_freq(dev); 3795 mutex_unlock(&dev_priv->rps.hw_lock); 3796 } 3797 3798 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 3799 { 3800 u32 val, rp0; 3801 3802 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); 3803 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; 3804 3805 return rp0; 3806 } 3807 3808 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) 3809 { 3810 u32 val, rpe; 3811 3812 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); 3813 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 3814 3815 return rpe; 3816 } 3817 3818 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 3819 { 3820 u32 val, rp1; 3821 3822 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3823 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; 3824 3825 return rp1; 3826 } 3827 3828 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv) 3829 { 3830 u32 val, rpn; 3831 3832 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); 3833 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK; 3834 return rpn; 3835 } 3836 3837 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) 3838 { 3839 u32 val, rp1; 3840 3841 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 3842 3843 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 3844 3845 return rp1; 3846 } 3847 3848 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 3849 { 3850 u32 val, rp0; 3851 3852 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 3853 3854 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 3855 /* Clamp to max */ 3856 rp0 = min_t(u32, rp0, 0xea); 3857 3858 return rp0; 3859 } 3860 3861 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) 3862 { 3863 u32 val, rpe; 3864 3865 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 3866 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 3867 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 3868 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 3869 3870 return rpe; 3871 } 3872 3873 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 3874 { 3875 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3876 } 3877 3878 /* Check that the pctx buffer wasn't move under us. */ 3879 static void valleyview_check_pctx(struct drm_i915_private *dev_priv) 3880 { 3881 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 3882 3883 /* DragonFly - if EDID fails vlv_pctx can wind up NULL */ 3884 if (WARN_ON(!dev_priv->vlv_pctx)) 3885 return; 3886 3887 WARN_ON(pctx_addr != dev_priv->mm.stolen_base + 3888 dev_priv->vlv_pctx->stolen->start); 3889 } 3890 3891 3892 /* Check that the pcbr address is not empty. */ 3893 static void cherryview_check_pctx(struct drm_i915_private *dev_priv) 3894 { 3895 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 3896 3897 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 3898 } 3899 3900 static void cherryview_setup_pctx(struct drm_device *dev) 3901 { 3902 struct drm_i915_private *dev_priv = dev->dev_private; 3903 unsigned long pctx_paddr, paddr; 3904 struct i915_gtt *gtt = &dev_priv->gtt; 3905 u32 pcbr; 3906 int pctx_size = 32*1024; 3907 3908 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 3909 3910 pcbr = I915_READ(VLV_PCBR); 3911 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 3912 paddr = (dev_priv->mm.stolen_base + 3913 (gtt->stolen_size - pctx_size)); 3914 3915 pctx_paddr = (paddr & (~4095)); 3916 I915_WRITE(VLV_PCBR, pctx_paddr); 3917 } 3918 } 3919 3920 static void valleyview_setup_pctx(struct drm_device *dev) 3921 { 3922 struct drm_i915_private *dev_priv = dev->dev_private; 3923 struct drm_i915_gem_object *pctx; 3924 unsigned long pctx_paddr; 3925 u32 pcbr; 3926 int pctx_size = 24*1024; 3927 3928 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 3929 3930 pcbr = I915_READ(VLV_PCBR); 3931 if (pcbr) { 3932 /* BIOS set it up already, grab the pre-alloc'd space */ 3933 int pcbr_offset; 3934 3935 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 3936 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 3937 pcbr_offset, 3938 I915_GTT_OFFSET_NONE, 3939 pctx_size); 3940 goto out; 3941 } 3942 3943 /* 3944 * From the Gunit register HAS: 3945 * The Gfx driver is expected to program this register and ensure 3946 * proper allocation within Gfx stolen memory. For example, this 3947 * register should be programmed such than the PCBR range does not 3948 * overlap with other ranges, such as the frame buffer, protected 3949 * memory, or any other relevant ranges. 3950 */ 3951 pctx = i915_gem_object_create_stolen(dev, pctx_size); 3952 if (!pctx) { 3953 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 3954 return; 3955 } 3956 3957 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 3958 I915_WRITE(VLV_PCBR, pctx_paddr); 3959 3960 out: 3961 dev_priv->vlv_pctx = pctx; 3962 } 3963 3964 static void valleyview_cleanup_pctx(struct drm_device *dev) 3965 { 3966 struct drm_i915_private *dev_priv = dev->dev_private; 3967 3968 if (WARN_ON(!dev_priv->vlv_pctx)) 3969 return; 3970 3971 drm_gem_object_unreference(&dev_priv->vlv_pctx->base); 3972 dev_priv->vlv_pctx = NULL; 3973 } 3974 3975 static void valleyview_init_gt_powersave(struct drm_device *dev) 3976 { 3977 struct drm_i915_private *dev_priv = dev->dev_private; 3978 3979 valleyview_setup_pctx(dev); 3980 3981 mutex_lock(&dev_priv->rps.hw_lock); 3982 3983 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 3984 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 3985 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 3986 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), 3987 dev_priv->rps.max_freq); 3988 3989 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 3990 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 3991 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 3992 dev_priv->rps.efficient_freq); 3993 3994 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); 3995 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 3996 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 3997 dev_priv->rps.rp1_freq); 3998 3999 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 4000 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 4001 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 4002 dev_priv->rps.min_freq); 4003 4004 /* Preserve min/max settings in case of re-init */ 4005 if (dev_priv->rps.max_freq_softlimit == 0) 4006 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4007 4008 if (dev_priv->rps.min_freq_softlimit == 0) 4009 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 4010 4011 mutex_unlock(&dev_priv->rps.hw_lock); 4012 } 4013 4014 static void cherryview_init_gt_powersave(struct drm_device *dev) 4015 { 4016 struct drm_i915_private *dev_priv = dev->dev_private; 4017 4018 cherryview_setup_pctx(dev); 4019 4020 mutex_lock(&dev_priv->rps.hw_lock); 4021 4022 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 4023 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 4024 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 4025 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), 4026 dev_priv->rps.max_freq); 4027 4028 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); 4029 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 4030 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 4031 dev_priv->rps.efficient_freq); 4032 4033 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); 4034 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", 4035 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 4036 dev_priv->rps.rp1_freq); 4037 4038 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); 4039 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 4040 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 4041 dev_priv->rps.min_freq); 4042 4043 /* Preserve min/max settings in case of re-init */ 4044 if (dev_priv->rps.max_freq_softlimit == 0) 4045 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4046 4047 if (dev_priv->rps.min_freq_softlimit == 0) 4048 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 4049 4050 mutex_unlock(&dev_priv->rps.hw_lock); 4051 } 4052 4053 static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 4054 { 4055 valleyview_cleanup_pctx(dev); 4056 } 4057 4058 static void cherryview_enable_rps(struct drm_device *dev) 4059 { 4060 struct drm_i915_private *dev_priv = dev->dev_private; 4061 struct intel_engine_cs *ring; 4062 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 4063 int i; 4064 4065 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4066 4067 gtfifodbg = I915_READ(GTFIFODBG); 4068 if (gtfifodbg) { 4069 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 4070 gtfifodbg); 4071 I915_WRITE(GTFIFODBG, gtfifodbg); 4072 } 4073 4074 cherryview_check_pctx(dev_priv); 4075 4076 /* 1a & 1b: Get forcewake during program sequence. Although the driver 4077 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 4078 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 4079 4080 /* 2a: Program RC6 thresholds.*/ 4081 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 4082 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 4083 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 4084 4085 for_each_ring(ring, dev_priv, i) 4086 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4087 I915_WRITE(GEN6_RC_SLEEP, 0); 4088 4089 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 4090 4091 /* allows RC6 residency counter to work */ 4092 I915_WRITE(VLV_COUNTER_CONTROL, 4093 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 4094 VLV_MEDIA_RC6_COUNT_EN | 4095 VLV_RENDER_RC6_COUNT_EN)); 4096 4097 /* For now we assume BIOS is allocating and populating the PCBR */ 4098 pcbr = I915_READ(VLV_PCBR); 4099 4100 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr); 4101 4102 /* 3: Enable RC6 */ 4103 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 4104 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 4105 rc6_mode = GEN6_RC_CTL_EI_MODE(1); 4106 4107 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 4108 4109 /* 4 Program defaults and thresholds for RPS*/ 4110 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 4111 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 4112 I915_WRITE(GEN6_RP_UP_EI, 66000); 4113 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 4114 4115 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4116 4117 /* WaDisablePwrmtrEvent:chv (pre-production hw) */ 4118 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff); 4119 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00); 4120 4121 /* 5: Enable RPS */ 4122 I915_WRITE(GEN6_RP_CONTROL, 4123 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4124 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */ 4125 GEN6_RP_ENABLE | 4126 GEN6_RP_UP_BUSY_AVG | 4127 GEN6_RP_DOWN_IDLE_AVG); 4128 4129 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4130 4131 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 4132 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 4133 4134 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 4135 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 4136 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 4137 dev_priv->rps.cur_freq); 4138 4139 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 4140 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 4141 dev_priv->rps.efficient_freq); 4142 4143 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 4144 4145 gen8_enable_rps_interrupts(dev); 4146 4147 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4148 } 4149 4150 static void valleyview_enable_rps(struct drm_device *dev) 4151 { 4152 struct drm_i915_private *dev_priv = dev->dev_private; 4153 struct intel_engine_cs *ring; 4154 u32 gtfifodbg, val, rc6_mode = 0; 4155 int i; 4156 4157 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4158 4159 valleyview_check_pctx(dev_priv); 4160 4161 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 4162 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 4163 gtfifodbg); 4164 I915_WRITE(GTFIFODBG, gtfifodbg); 4165 } 4166 4167 /* If VLV, Forcewake all wells, else re-direct to regular path */ 4168 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 4169 4170 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 4171 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 4172 I915_WRITE(GEN6_RP_UP_EI, 66000); 4173 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 4174 4175 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4176 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240); 4177 4178 I915_WRITE(GEN6_RP_CONTROL, 4179 GEN6_RP_MEDIA_TURBO | 4180 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4181 GEN6_RP_MEDIA_IS_GFX | 4182 GEN6_RP_ENABLE | 4183 GEN6_RP_UP_BUSY_AVG | 4184 GEN6_RP_DOWN_IDLE_CONT); 4185 4186 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); 4187 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 4188 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 4189 4190 for_each_ring(ring, dev_priv, i) 4191 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4192 4193 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); 4194 4195 /* allows RC6 residency counter to work */ 4196 I915_WRITE(VLV_COUNTER_CONTROL, 4197 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | 4198 VLV_RENDER_RC0_COUNT_EN | 4199 VLV_MEDIA_RC6_COUNT_EN | 4200 VLV_RENDER_RC6_COUNT_EN)); 4201 4202 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4203 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 4204 4205 intel_print_rc6_info(dev, rc6_mode); 4206 4207 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 4208 4209 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4210 4211 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 4212 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 4213 4214 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 4215 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 4216 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 4217 dev_priv->rps.cur_freq); 4218 4219 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 4220 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 4221 dev_priv->rps.efficient_freq); 4222 4223 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 4224 4225 gen6_enable_rps_interrupts(dev); 4226 4227 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4228 } 4229 4230 void ironlake_teardown_rc6(struct drm_device *dev) 4231 { 4232 struct drm_i915_private *dev_priv = dev->dev_private; 4233 4234 if (dev_priv->ips.renderctx) { 4235 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx); 4236 drm_gem_object_unreference(&dev_priv->ips.renderctx->base); 4237 dev_priv->ips.renderctx = NULL; 4238 } 4239 4240 if (dev_priv->ips.pwrctx) { 4241 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx); 4242 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); 4243 dev_priv->ips.pwrctx = NULL; 4244 } 4245 } 4246 4247 static void ironlake_disable_rc6(struct drm_device *dev) 4248 { 4249 struct drm_i915_private *dev_priv = dev->dev_private; 4250 4251 if (I915_READ(PWRCTXA)) { 4252 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 4253 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 4254 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 4255 50); 4256 4257 I915_WRITE(PWRCTXA, 0); 4258 POSTING_READ(PWRCTXA); 4259 4260 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 4261 POSTING_READ(RSTDBYCTL); 4262 } 4263 } 4264 4265 static int ironlake_setup_rc6(struct drm_device *dev) 4266 { 4267 struct drm_i915_private *dev_priv = dev->dev_private; 4268 4269 if (dev_priv->ips.renderctx == NULL) 4270 dev_priv->ips.renderctx = intel_alloc_context_page(dev); 4271 if (!dev_priv->ips.renderctx) 4272 return -ENOMEM; 4273 4274 if (dev_priv->ips.pwrctx == NULL) 4275 dev_priv->ips.pwrctx = intel_alloc_context_page(dev); 4276 if (!dev_priv->ips.pwrctx) { 4277 ironlake_teardown_rc6(dev); 4278 return -ENOMEM; 4279 } 4280 4281 return 0; 4282 } 4283 4284 static void ironlake_enable_rc6(struct drm_device *dev) 4285 { 4286 struct drm_i915_private *dev_priv = dev->dev_private; 4287 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 4288 bool was_interruptible; 4289 int ret; 4290 4291 /* rc6 disabled by default due to repeated reports of hanging during 4292 * boot and resume. 4293 */ 4294 if (!intel_enable_rc6(dev)) 4295 return; 4296 4297 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 4298 4299 ret = ironlake_setup_rc6(dev); 4300 if (ret) 4301 return; 4302 4303 was_interruptible = dev_priv->mm.interruptible; 4304 dev_priv->mm.interruptible = false; 4305 4306 /* 4307 * GPU can automatically power down the render unit if given a page 4308 * to save state. 4309 */ 4310 ret = intel_ring_begin(ring, 6); 4311 if (ret) { 4312 ironlake_teardown_rc6(dev); 4313 dev_priv->mm.interruptible = was_interruptible; 4314 return; 4315 } 4316 4317 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 4318 intel_ring_emit(ring, MI_SET_CONTEXT); 4319 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) | 4320 MI_MM_SPACE_GTT | 4321 MI_SAVE_EXT_STATE_EN | 4322 MI_RESTORE_EXT_STATE_EN | 4323 MI_RESTORE_INHIBIT); 4324 intel_ring_emit(ring, MI_SUSPEND_FLUSH); 4325 intel_ring_emit(ring, MI_NOOP); 4326 intel_ring_emit(ring, MI_FLUSH); 4327 intel_ring_advance(ring); 4328 4329 /* 4330 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW 4331 * does an implicit flush, combined with MI_FLUSH above, it should be 4332 * safe to assume that renderctx is valid 4333 */ 4334 ret = intel_ring_idle(ring); 4335 dev_priv->mm.interruptible = was_interruptible; 4336 if (ret) { 4337 DRM_ERROR("failed to enable ironlake power savings\n"); 4338 ironlake_teardown_rc6(dev); 4339 return; 4340 } 4341 4342 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); 4343 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 4344 4345 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE); 4346 } 4347 4348 static unsigned long intel_pxfreq(u32 vidfreq) 4349 { 4350 unsigned long freq; 4351 int div = (vidfreq & 0x3f0000) >> 16; 4352 int post = (vidfreq & 0x3000) >> 12; 4353 int pre = (vidfreq & 0x7); 4354 4355 if (!pre) 4356 return 0; 4357 4358 freq = ((div * 133333) / ((1<<post) * pre)); 4359 4360 return freq; 4361 } 4362 4363 static const struct cparams { 4364 u16 i; 4365 u16 t; 4366 u16 m; 4367 u16 c; 4368 } cparams[] = { 4369 { 1, 1333, 301, 28664 }, 4370 { 1, 1066, 294, 24460 }, 4371 { 1, 800, 294, 25192 }, 4372 { 0, 1333, 276, 27605 }, 4373 { 0, 1066, 276, 27605 }, 4374 { 0, 800, 231, 23784 }, 4375 }; 4376 4377 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) 4378 { 4379 u64 total_count, diff, ret; 4380 u32 count1, count2, count3, m = 0, c = 0; 4381 unsigned long now = jiffies_to_msecs(jiffies), diff1; 4382 int i; 4383 4384 assert_spin_locked(&mchdev_lock); 4385 4386 diff1 = now - dev_priv->ips.last_time1; 4387 4388 /* Prevent division-by-zero if we are asking too fast. 4389 * Also, we don't get interesting results if we are polling 4390 * faster than once in 10ms, so just return the saved value 4391 * in such cases. 4392 */ 4393 if (diff1 <= 10) 4394 return dev_priv->ips.chipset_power; 4395 4396 count1 = I915_READ(DMIEC); 4397 count2 = I915_READ(DDREC); 4398 count3 = I915_READ(CSIEC); 4399 4400 total_count = count1 + count2 + count3; 4401 4402 /* FIXME: handle per-counter overflow */ 4403 if (total_count < dev_priv->ips.last_count1) { 4404 diff = ~0UL - dev_priv->ips.last_count1; 4405 diff += total_count; 4406 } else { 4407 diff = total_count - dev_priv->ips.last_count1; 4408 } 4409 4410 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 4411 if (cparams[i].i == dev_priv->ips.c_m && 4412 cparams[i].t == dev_priv->ips.r_t) { 4413 m = cparams[i].m; 4414 c = cparams[i].c; 4415 break; 4416 } 4417 } 4418 4419 diff = div_u64(diff, diff1); 4420 ret = ((m * diff) + c); 4421 ret = div_u64(ret, 10); 4422 4423 dev_priv->ips.last_count1 = total_count; 4424 dev_priv->ips.last_time1 = now; 4425 4426 dev_priv->ips.chipset_power = ret; 4427 4428 return ret; 4429 } 4430 4431 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 4432 { 4433 struct drm_device *dev = dev_priv->dev; 4434 unsigned long val; 4435 4436 if (INTEL_INFO(dev)->gen != 5) 4437 return 0; 4438 4439 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4440 4441 val = __i915_chipset_val(dev_priv); 4442 4443 lockmgr(&mchdev_lock, LK_RELEASE); 4444 4445 return val; 4446 } 4447 4448 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 4449 { 4450 unsigned long m, x, b; 4451 u32 tsfs; 4452 4453 tsfs = I915_READ(TSFS); 4454 4455 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 4456 x = I915_READ8(TR1); 4457 4458 b = tsfs & TSFS_INTR_MASK; 4459 4460 return ((m * x) / 127) - b; 4461 } 4462 4463 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 4464 { 4465 struct drm_device *dev = dev_priv->dev; 4466 static const struct v_table { 4467 u16 vd; /* in .1 mil */ 4468 u16 vm; /* in .1 mil */ 4469 } v_table[] = { 4470 { 0, 0, }, 4471 { 375, 0, }, 4472 { 500, 0, }, 4473 { 625, 0, }, 4474 { 750, 0, }, 4475 { 875, 0, }, 4476 { 1000, 0, }, 4477 { 1125, 0, }, 4478 { 4125, 3000, }, 4479 { 4125, 3000, }, 4480 { 4125, 3000, }, 4481 { 4125, 3000, }, 4482 { 4125, 3000, }, 4483 { 4125, 3000, }, 4484 { 4125, 3000, }, 4485 { 4125, 3000, }, 4486 { 4125, 3000, }, 4487 { 4125, 3000, }, 4488 { 4125, 3000, }, 4489 { 4125, 3000, }, 4490 { 4125, 3000, }, 4491 { 4125, 3000, }, 4492 { 4125, 3000, }, 4493 { 4125, 3000, }, 4494 { 4125, 3000, }, 4495 { 4125, 3000, }, 4496 { 4125, 3000, }, 4497 { 4125, 3000, }, 4498 { 4125, 3000, }, 4499 { 4125, 3000, }, 4500 { 4125, 3000, }, 4501 { 4125, 3000, }, 4502 { 4250, 3125, }, 4503 { 4375, 3250, }, 4504 { 4500, 3375, }, 4505 { 4625, 3500, }, 4506 { 4750, 3625, }, 4507 { 4875, 3750, }, 4508 { 5000, 3875, }, 4509 { 5125, 4000, }, 4510 { 5250, 4125, }, 4511 { 5375, 4250, }, 4512 { 5500, 4375, }, 4513 { 5625, 4500, }, 4514 { 5750, 4625, }, 4515 { 5875, 4750, }, 4516 { 6000, 4875, }, 4517 { 6125, 5000, }, 4518 { 6250, 5125, }, 4519 { 6375, 5250, }, 4520 { 6500, 5375, }, 4521 { 6625, 5500, }, 4522 { 6750, 5625, }, 4523 { 6875, 5750, }, 4524 { 7000, 5875, }, 4525 { 7125, 6000, }, 4526 { 7250, 6125, }, 4527 { 7375, 6250, }, 4528 { 7500, 6375, }, 4529 { 7625, 6500, }, 4530 { 7750, 6625, }, 4531 { 7875, 6750, }, 4532 { 8000, 6875, }, 4533 { 8125, 7000, }, 4534 { 8250, 7125, }, 4535 { 8375, 7250, }, 4536 { 8500, 7375, }, 4537 { 8625, 7500, }, 4538 { 8750, 7625, }, 4539 { 8875, 7750, }, 4540 { 9000, 7875, }, 4541 { 9125, 8000, }, 4542 { 9250, 8125, }, 4543 { 9375, 8250, }, 4544 { 9500, 8375, }, 4545 { 9625, 8500, }, 4546 { 9750, 8625, }, 4547 { 9875, 8750, }, 4548 { 10000, 8875, }, 4549 { 10125, 9000, }, 4550 { 10250, 9125, }, 4551 { 10375, 9250, }, 4552 { 10500, 9375, }, 4553 { 10625, 9500, }, 4554 { 10750, 9625, }, 4555 { 10875, 9750, }, 4556 { 11000, 9875, }, 4557 { 11125, 10000, }, 4558 { 11250, 10125, }, 4559 { 11375, 10250, }, 4560 { 11500, 10375, }, 4561 { 11625, 10500, }, 4562 { 11750, 10625, }, 4563 { 11875, 10750, }, 4564 { 12000, 10875, }, 4565 { 12125, 11000, }, 4566 { 12250, 11125, }, 4567 { 12375, 11250, }, 4568 { 12500, 11375, }, 4569 { 12625, 11500, }, 4570 { 12750, 11625, }, 4571 { 12875, 11750, }, 4572 { 13000, 11875, }, 4573 { 13125, 12000, }, 4574 { 13250, 12125, }, 4575 { 13375, 12250, }, 4576 { 13500, 12375, }, 4577 { 13625, 12500, }, 4578 { 13750, 12625, }, 4579 { 13875, 12750, }, 4580 { 14000, 12875, }, 4581 { 14125, 13000, }, 4582 { 14250, 13125, }, 4583 { 14375, 13250, }, 4584 { 14500, 13375, }, 4585 { 14625, 13500, }, 4586 { 14750, 13625, }, 4587 { 14875, 13750, }, 4588 { 15000, 13875, }, 4589 { 15125, 14000, }, 4590 { 15250, 14125, }, 4591 { 15375, 14250, }, 4592 { 15500, 14375, }, 4593 { 15625, 14500, }, 4594 { 15750, 14625, }, 4595 { 15875, 14750, }, 4596 { 16000, 14875, }, 4597 { 16125, 15000, }, 4598 }; 4599 if (INTEL_INFO(dev)->is_mobile) 4600 return v_table[pxvid].vm; 4601 else 4602 return v_table[pxvid].vd; 4603 } 4604 4605 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 4606 { 4607 struct timespec now, diff1; 4608 u64 diff; 4609 unsigned long diffms; 4610 u32 count; 4611 4612 assert_spin_locked(&mchdev_lock); 4613 4614 getrawmonotonic(&now); 4615 diff1 = timespec_sub(now, dev_priv->ips.last_time2); 4616 4617 /* Don't divide by 0 */ 4618 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 4619 if (!diffms) 4620 return; 4621 4622 count = I915_READ(GFXEC); 4623 4624 if (count < dev_priv->ips.last_count2) { 4625 diff = ~0UL - dev_priv->ips.last_count2; 4626 diff += count; 4627 } else { 4628 diff = count - dev_priv->ips.last_count2; 4629 } 4630 4631 dev_priv->ips.last_count2 = count; 4632 dev_priv->ips.last_time2 = now; 4633 4634 /* More magic constants... */ 4635 diff = diff * 1181; 4636 diff = div_u64(diff, diffms * 10); 4637 dev_priv->ips.gfx_power = diff; 4638 } 4639 4640 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 4641 { 4642 struct drm_device *dev = dev_priv->dev; 4643 4644 if (INTEL_INFO(dev)->gen != 5) 4645 return; 4646 4647 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4648 4649 __i915_update_gfx_val(dev_priv); 4650 4651 lockmgr(&mchdev_lock, LK_RELEASE); 4652 } 4653 4654 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) 4655 { 4656 unsigned long t, corr, state1, corr2, state2; 4657 u32 pxvid, ext_v; 4658 4659 assert_spin_locked(&mchdev_lock); 4660 4661 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); 4662 pxvid = (pxvid >> 24) & 0x7f; 4663 ext_v = pvid_to_extvid(dev_priv, pxvid); 4664 4665 state1 = ext_v; 4666 4667 t = i915_mch_val(dev_priv); 4668 4669 /* Revel in the empirically derived constants */ 4670 4671 /* Correction factor in 1/100000 units */ 4672 if (t > 80) 4673 corr = ((t * 2349) + 135940); 4674 else if (t >= 50) 4675 corr = ((t * 964) + 29317); 4676 else /* < 50 */ 4677 corr = ((t * 301) + 1004); 4678 4679 corr = corr * ((150142 * state1) / 10000 - 78642); 4680 corr /= 100000; 4681 corr2 = (corr * dev_priv->ips.corr); 4682 4683 state2 = (corr2 * state1) / 10000; 4684 state2 /= 100; /* convert to mW */ 4685 4686 __i915_update_gfx_val(dev_priv); 4687 4688 return dev_priv->ips.gfx_power + state2; 4689 } 4690 4691 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 4692 { 4693 struct drm_device *dev = dev_priv->dev; 4694 unsigned long val; 4695 4696 if (INTEL_INFO(dev)->gen != 5) 4697 return 0; 4698 4699 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4700 4701 val = __i915_gfx_val(dev_priv); 4702 4703 lockmgr(&mchdev_lock, LK_RELEASE); 4704 4705 return val; 4706 } 4707 4708 /** 4709 * i915_read_mch_val - return value for IPS use 4710 * 4711 * Calculate and return a value for the IPS driver to use when deciding whether 4712 * we have thermal and power headroom to increase CPU or GPU power budget. 4713 */ 4714 unsigned long i915_read_mch_val(void) 4715 { 4716 struct drm_i915_private *dev_priv; 4717 unsigned long chipset_val, graphics_val, ret = 0; 4718 4719 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4720 if (!i915_mch_dev) 4721 goto out_unlock; 4722 dev_priv = i915_mch_dev; 4723 4724 chipset_val = __i915_chipset_val(dev_priv); 4725 graphics_val = __i915_gfx_val(dev_priv); 4726 4727 ret = chipset_val + graphics_val; 4728 4729 out_unlock: 4730 lockmgr(&mchdev_lock, LK_RELEASE); 4731 4732 return ret; 4733 } 4734 4735 /** 4736 * i915_gpu_raise - raise GPU frequency limit 4737 * 4738 * Raise the limit; IPS indicates we have thermal headroom. 4739 */ 4740 bool i915_gpu_raise(void) 4741 { 4742 struct drm_i915_private *dev_priv; 4743 bool ret = true; 4744 4745 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4746 if (!i915_mch_dev) { 4747 ret = false; 4748 goto out_unlock; 4749 } 4750 dev_priv = i915_mch_dev; 4751 4752 if (dev_priv->ips.max_delay > dev_priv->ips.fmax) 4753 dev_priv->ips.max_delay--; 4754 4755 out_unlock: 4756 lockmgr(&mchdev_lock, LK_RELEASE); 4757 4758 return ret; 4759 } 4760 4761 /** 4762 * i915_gpu_lower - lower GPU frequency limit 4763 * 4764 * IPS indicates we're close to a thermal limit, so throttle back the GPU 4765 * frequency maximum. 4766 */ 4767 bool i915_gpu_lower(void) 4768 { 4769 struct drm_i915_private *dev_priv; 4770 bool ret = true; 4771 4772 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4773 if (!i915_mch_dev) { 4774 ret = false; 4775 goto out_unlock; 4776 } 4777 dev_priv = i915_mch_dev; 4778 4779 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) 4780 dev_priv->ips.max_delay++; 4781 4782 out_unlock: 4783 lockmgr(&mchdev_lock, LK_RELEASE); 4784 4785 return ret; 4786 } 4787 4788 /** 4789 * i915_gpu_busy - indicate GPU business to IPS 4790 * 4791 * Tell the IPS driver whether or not the GPU is busy. 4792 */ 4793 bool i915_gpu_busy(void) 4794 { 4795 struct drm_i915_private *dev_priv; 4796 struct intel_engine_cs *ring; 4797 bool ret = false; 4798 int i; 4799 4800 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4801 if (!i915_mch_dev) 4802 goto out_unlock; 4803 dev_priv = i915_mch_dev; 4804 4805 for_each_ring(ring, dev_priv, i) 4806 ret |= !list_empty(&ring->request_list); 4807 4808 out_unlock: 4809 lockmgr(&mchdev_lock, LK_RELEASE); 4810 4811 return ret; 4812 } 4813 4814 /** 4815 * i915_gpu_turbo_disable - disable graphics turbo 4816 * 4817 * Disable graphics turbo by resetting the max frequency and setting the 4818 * current frequency to the default. 4819 */ 4820 bool i915_gpu_turbo_disable(void) 4821 { 4822 struct drm_i915_private *dev_priv; 4823 bool ret = true; 4824 4825 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4826 if (!i915_mch_dev) { 4827 ret = false; 4828 goto out_unlock; 4829 } 4830 dev_priv = i915_mch_dev; 4831 4832 dev_priv->ips.max_delay = dev_priv->ips.fstart; 4833 4834 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 4835 ret = false; 4836 4837 out_unlock: 4838 lockmgr(&mchdev_lock, LK_RELEASE); 4839 4840 return ret; 4841 } 4842 4843 #if 0 4844 /** 4845 * Tells the intel_ips driver that the i915 driver is now loaded, if 4846 * IPS got loaded first. 4847 * 4848 * This awkward dance is so that neither module has to depend on the 4849 * other in order for IPS to do the appropriate communication of 4850 * GPU turbo limits to i915. 4851 */ 4852 static void 4853 ips_ping_for_i915_load(void) 4854 { 4855 void (*link)(void); 4856 4857 link = symbol_get(ips_link_to_i915_driver); 4858 if (link) { 4859 link(); 4860 symbol_put(ips_link_to_i915_driver); 4861 } 4862 } 4863 #endif 4864 4865 void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 4866 { 4867 /* We only register the i915 ips part with intel-ips once everything is 4868 * set up, to avoid intel-ips sneaking in and reading bogus values. */ 4869 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4870 i915_mch_dev = dev_priv; 4871 lockmgr(&mchdev_lock, LK_RELEASE); 4872 } 4873 4874 void intel_gpu_ips_teardown(void) 4875 { 4876 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 4877 i915_mch_dev = NULL; 4878 lockmgr(&mchdev_lock, LK_RELEASE); 4879 } 4880 4881 static void intel_init_emon(struct drm_device *dev) 4882 { 4883 struct drm_i915_private *dev_priv = dev->dev_private; 4884 u32 lcfuse; 4885 u8 pxw[16]; 4886 int i; 4887 4888 /* Disable to program */ 4889 I915_WRITE(ECR, 0); 4890 POSTING_READ(ECR); 4891 4892 /* Program energy weights for various events */ 4893 I915_WRITE(SDEW, 0x15040d00); 4894 I915_WRITE(CSIEW0, 0x007f0000); 4895 I915_WRITE(CSIEW1, 0x1e220004); 4896 I915_WRITE(CSIEW2, 0x04000004); 4897 4898 for (i = 0; i < 5; i++) 4899 I915_WRITE(PEW + (i * 4), 0); 4900 for (i = 0; i < 3; i++) 4901 I915_WRITE(DEW + (i * 4), 0); 4902 4903 /* Program P-state weights to account for frequency power adjustment */ 4904 for (i = 0; i < 16; i++) { 4905 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 4906 unsigned long freq = intel_pxfreq(pxvidfreq); 4907 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 4908 PXVFREQ_PX_SHIFT; 4909 unsigned long val; 4910 4911 val = vid * vid; 4912 val *= (freq / 1000); 4913 val *= 255; 4914 val /= (127*127*900); 4915 if (val > 0xff) 4916 DRM_ERROR("bad pxval: %ld\n", val); 4917 pxw[i] = val; 4918 } 4919 /* Render standby states get 0 weight */ 4920 pxw[14] = 0; 4921 pxw[15] = 0; 4922 4923 for (i = 0; i < 4; i++) { 4924 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 4925 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 4926 I915_WRITE(PXW + (i * 4), val); 4927 } 4928 4929 /* Adjust magic regs to magic values (more experimental results) */ 4930 I915_WRITE(OGW0, 0); 4931 I915_WRITE(OGW1, 0); 4932 I915_WRITE(EG0, 0x00007f00); 4933 I915_WRITE(EG1, 0x0000000e); 4934 I915_WRITE(EG2, 0x000e0000); 4935 I915_WRITE(EG3, 0x68000300); 4936 I915_WRITE(EG4, 0x42000000); 4937 I915_WRITE(EG5, 0x00140031); 4938 I915_WRITE(EG6, 0); 4939 I915_WRITE(EG7, 0); 4940 4941 for (i = 0; i < 8; i++) 4942 I915_WRITE(PXWL + (i * 4), 0); 4943 4944 /* Enable PMON + select events */ 4945 I915_WRITE(ECR, 0x80000019); 4946 4947 lcfuse = I915_READ(LCFUSE02); 4948 4949 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 4950 } 4951 4952 void intel_init_gt_powersave(struct drm_device *dev) 4953 { 4954 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 4955 4956 if (IS_CHERRYVIEW(dev)) 4957 cherryview_init_gt_powersave(dev); 4958 else if (IS_VALLEYVIEW(dev)) 4959 valleyview_init_gt_powersave(dev); 4960 } 4961 4962 void intel_cleanup_gt_powersave(struct drm_device *dev) 4963 { 4964 if (IS_CHERRYVIEW(dev)) 4965 return; 4966 else if (IS_VALLEYVIEW(dev)) 4967 valleyview_cleanup_gt_powersave(dev); 4968 } 4969 4970 /** 4971 * intel_suspend_gt_powersave - suspend PM work and helper threads 4972 * @dev: drm device 4973 * 4974 * We don't want to disable RC6 or other features here, we just want 4975 * to make sure any work we've queued has finished and won't bother 4976 * us while we're suspended. 4977 */ 4978 void intel_suspend_gt_powersave(struct drm_device *dev) 4979 { 4980 struct drm_i915_private *dev_priv = dev->dev_private; 4981 4982 /* Interrupts should be disabled already to avoid re-arming. */ 4983 WARN_ON(intel_irqs_enabled(dev_priv)); 4984 4985 #if 0 4986 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4987 #endif 4988 4989 cancel_work_sync(&dev_priv->rps.work); 4990 4991 /* Force GPU to min freq during suspend */ 4992 gen6_rps_idle(dev_priv); 4993 } 4994 4995 void intel_disable_gt_powersave(struct drm_device *dev) 4996 { 4997 struct drm_i915_private *dev_priv = dev->dev_private; 4998 4999 /* Interrupts should be disabled already to avoid re-arming. */ 5000 WARN_ON(intel_irqs_enabled(dev_priv)); 5001 5002 if (IS_IRONLAKE_M(dev)) { 5003 ironlake_disable_drps(dev); 5004 ironlake_disable_rc6(dev); 5005 } else if (INTEL_INFO(dev)->gen >= 6) { 5006 intel_suspend_gt_powersave(dev); 5007 5008 mutex_lock(&dev_priv->rps.hw_lock); 5009 if (IS_CHERRYVIEW(dev)) 5010 cherryview_disable_rps(dev); 5011 else if (IS_VALLEYVIEW(dev)) 5012 valleyview_disable_rps(dev); 5013 else 5014 gen6_disable_rps(dev); 5015 dev_priv->rps.enabled = false; 5016 mutex_unlock(&dev_priv->rps.hw_lock); 5017 } 5018 } 5019 5020 static void intel_gen6_powersave_work(struct work_struct *work) 5021 { 5022 struct drm_i915_private *dev_priv = 5023 container_of(work, struct drm_i915_private, 5024 rps.delayed_resume_work.work); 5025 struct drm_device *dev = dev_priv->dev; 5026 5027 mutex_lock(&dev_priv->rps.hw_lock); 5028 5029 if (IS_CHERRYVIEW(dev)) { 5030 cherryview_enable_rps(dev); 5031 } else if (IS_VALLEYVIEW(dev)) { 5032 valleyview_enable_rps(dev); 5033 } else if (IS_BROADWELL(dev)) { 5034 gen8_enable_rps(dev); 5035 __gen6_update_ring_freq(dev); 5036 } else { 5037 gen6_enable_rps(dev); 5038 __gen6_update_ring_freq(dev); 5039 } 5040 dev_priv->rps.enabled = true; 5041 mutex_unlock(&dev_priv->rps.hw_lock); 5042 5043 intel_runtime_pm_put(dev_priv); 5044 } 5045 5046 void intel_enable_gt_powersave(struct drm_device *dev) 5047 { 5048 struct drm_i915_private *dev_priv = dev->dev_private; 5049 5050 if (IS_IRONLAKE_M(dev)) { 5051 mutex_lock(&dev->struct_mutex); 5052 ironlake_enable_drps(dev); 5053 ironlake_enable_rc6(dev); 5054 intel_init_emon(dev); 5055 mutex_unlock(&dev->struct_mutex); 5056 } else if (INTEL_INFO(dev)->gen >= 6) { 5057 /* 5058 * PCU communication is slow and this doesn't need to be 5059 * done at any specific time, so do this out of our fast path 5060 * to make resume and init faster. 5061 * 5062 * We depend on the HW RC6 power context save/restore 5063 * mechanism when entering D3 through runtime PM suspend. So 5064 * disable RPM until RPS/RC6 is properly setup. We can only 5065 * get here via the driver load/system resume/runtime resume 5066 * paths, so the _noresume version is enough (and in case of 5067 * runtime resume it's necessary). 5068 */ 5069 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work, 5070 round_jiffies_up_relative(HZ))) 5071 intel_runtime_pm_get_noresume(dev_priv); 5072 } 5073 } 5074 5075 void intel_reset_gt_powersave(struct drm_device *dev) 5076 { 5077 struct drm_i915_private *dev_priv = dev->dev_private; 5078 5079 dev_priv->rps.enabled = false; 5080 intel_enable_gt_powersave(dev); 5081 } 5082 5083 static void ibx_init_clock_gating(struct drm_device *dev) 5084 { 5085 struct drm_i915_private *dev_priv = dev->dev_private; 5086 5087 /* 5088 * On Ibex Peak and Cougar Point, we need to disable clock 5089 * gating for the panel power sequencer or it will fail to 5090 * start up when no ports are active. 5091 */ 5092 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 5093 } 5094 5095 static void g4x_disable_trickle_feed(struct drm_device *dev) 5096 { 5097 struct drm_i915_private *dev_priv = dev->dev_private; 5098 int pipe; 5099 5100 for_each_pipe(pipe) { 5101 I915_WRITE(DSPCNTR(pipe), 5102 I915_READ(DSPCNTR(pipe)) | 5103 DISPPLANE_TRICKLE_FEED_DISABLE); 5104 intel_flush_primary_plane(dev_priv, pipe); 5105 } 5106 } 5107 5108 static void ilk_init_lp_watermarks(struct drm_device *dev) 5109 { 5110 struct drm_i915_private *dev_priv = dev->dev_private; 5111 5112 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 5113 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 5114 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 5115 5116 /* 5117 * Don't touch WM1S_LP_EN here. 5118 * Doing so could cause underruns. 5119 */ 5120 } 5121 5122 static void ironlake_init_clock_gating(struct drm_device *dev) 5123 { 5124 struct drm_i915_private *dev_priv = dev->dev_private; 5125 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 5126 5127 /* 5128 * Required for FBC 5129 * WaFbcDisableDpfcClockGating:ilk 5130 */ 5131 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 5132 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 5133 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 5134 5135 I915_WRITE(PCH_3DCGDIS0, 5136 MARIUNIT_CLOCK_GATE_DISABLE | 5137 SVSMUNIT_CLOCK_GATE_DISABLE); 5138 I915_WRITE(PCH_3DCGDIS1, 5139 VFMUNIT_CLOCK_GATE_DISABLE); 5140 5141 /* 5142 * According to the spec the following bits should be set in 5143 * order to enable memory self-refresh 5144 * The bit 22/21 of 0x42004 5145 * The bit 5 of 0x42020 5146 * The bit 15 of 0x45000 5147 */ 5148 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5149 (I915_READ(ILK_DISPLAY_CHICKEN2) | 5150 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 5151 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 5152 I915_WRITE(DISP_ARB_CTL, 5153 (I915_READ(DISP_ARB_CTL) | 5154 DISP_FBC_WM_DIS)); 5155 5156 ilk_init_lp_watermarks(dev); 5157 5158 /* 5159 * Based on the document from hardware guys the following bits 5160 * should be set unconditionally in order to enable FBC. 5161 * The bit 22 of 0x42000 5162 * The bit 22 of 0x42004 5163 * The bit 7,8,9 of 0x42020. 5164 */ 5165 if (IS_IRONLAKE_M(dev)) { 5166 /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 5167 I915_WRITE(ILK_DISPLAY_CHICKEN1, 5168 I915_READ(ILK_DISPLAY_CHICKEN1) | 5169 ILK_FBCQ_DIS); 5170 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5171 I915_READ(ILK_DISPLAY_CHICKEN2) | 5172 ILK_DPARB_GATE); 5173 } 5174 5175 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 5176 5177 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5178 I915_READ(ILK_DISPLAY_CHICKEN2) | 5179 ILK_ELPIN_409_SELECT); 5180 I915_WRITE(_3D_CHICKEN2, 5181 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 5182 _3D_CHICKEN2_WM_READ_PIPELINED); 5183 5184 /* WaDisableRenderCachePipelinedFlush:ilk */ 5185 I915_WRITE(CACHE_MODE_0, 5186 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 5187 5188 /* WaDisable_RenderCache_OperationalFlush:ilk */ 5189 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5190 5191 g4x_disable_trickle_feed(dev); 5192 5193 ibx_init_clock_gating(dev); 5194 } 5195 5196 static void cpt_init_clock_gating(struct drm_device *dev) 5197 { 5198 struct drm_i915_private *dev_priv = dev->dev_private; 5199 int pipe; 5200 uint32_t val; 5201 5202 /* 5203 * On Ibex Peak and Cougar Point, we need to disable clock 5204 * gating for the panel power sequencer or it will fail to 5205 * start up when no ports are active. 5206 */ 5207 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 5208 PCH_DPLUNIT_CLOCK_GATE_DISABLE | 5209 PCH_CPUNIT_CLOCK_GATE_DISABLE); 5210 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 5211 DPLS_EDP_PPS_FIX_DIS); 5212 /* The below fixes the weird display corruption, a few pixels shifted 5213 * downward, on (only) LVDS of some HP laptops with IVY. 5214 */ 5215 for_each_pipe(pipe) { 5216 val = I915_READ(TRANS_CHICKEN2(pipe)); 5217 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 5218 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 5219 if (dev_priv->vbt.fdi_rx_polarity_inverted) 5220 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 5221 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 5222 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 5223 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 5224 I915_WRITE(TRANS_CHICKEN2(pipe), val); 5225 } 5226 /* WADP0ClockGatingDisable */ 5227 for_each_pipe(pipe) { 5228 I915_WRITE(TRANS_CHICKEN1(pipe), 5229 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 5230 } 5231 } 5232 5233 static void gen6_check_mch_setup(struct drm_device *dev) 5234 { 5235 struct drm_i915_private *dev_priv = dev->dev_private; 5236 uint32_t tmp; 5237 5238 tmp = I915_READ(MCH_SSKPD); 5239 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) 5240 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 5241 tmp); 5242 } 5243 5244 static void gen6_init_clock_gating(struct drm_device *dev) 5245 { 5246 struct drm_i915_private *dev_priv = dev->dev_private; 5247 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 5248 5249 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 5250 5251 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5252 I915_READ(ILK_DISPLAY_CHICKEN2) | 5253 ILK_ELPIN_409_SELECT); 5254 5255 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 5256 I915_WRITE(_3D_CHICKEN, 5257 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 5258 5259 /* WaSetupGtModeTdRowDispatch:snb */ 5260 if (IS_SNB_GT1(dev)) 5261 I915_WRITE(GEN6_GT_MODE, 5262 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 5263 5264 /* WaDisable_RenderCache_OperationalFlush:snb */ 5265 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5266 5267 /* 5268 * BSpec recoomends 8x4 when MSAA is used, 5269 * however in practice 16x4 seems fastest. 5270 * 5271 * Note that PS/WM thread counts depend on the WIZ hashing 5272 * disable bit, which we don't touch here, but it's good 5273 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 5274 */ 5275 I915_WRITE(GEN6_GT_MODE, 5276 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 5277 5278 ilk_init_lp_watermarks(dev); 5279 5280 I915_WRITE(CACHE_MODE_0, 5281 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 5282 5283 I915_WRITE(GEN6_UCGCTL1, 5284 I915_READ(GEN6_UCGCTL1) | 5285 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 5286 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 5287 5288 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5289 * gating disable must be set. Failure to set it results in 5290 * flickering pixels due to Z write ordering failures after 5291 * some amount of runtime in the Mesa "fire" demo, and Unigine 5292 * Sanctuary and Tropics, and apparently anything else with 5293 * alpha test or pixel discard. 5294 * 5295 * According to the spec, bit 11 (RCCUNIT) must also be set, 5296 * but we didn't debug actual testcases to find it out. 5297 * 5298 * WaDisableRCCUnitClockGating:snb 5299 * WaDisableRCPBUnitClockGating:snb 5300 */ 5301 I915_WRITE(GEN6_UCGCTL2, 5302 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 5303 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 5304 5305 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 5306 I915_WRITE(_3D_CHICKEN3, 5307 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); 5308 5309 /* 5310 * Bspec says: 5311 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and 5312 * 3DSTATE_SF number of SF output attributes is more than 16." 5313 */ 5314 I915_WRITE(_3D_CHICKEN3, 5315 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); 5316 5317 /* 5318 * According to the spec the following bits should be 5319 * set in order to enable memory self-refresh and fbc: 5320 * The bit21 and bit22 of 0x42000 5321 * The bit21 and bit22 of 0x42004 5322 * The bit5 and bit7 of 0x42020 5323 * The bit14 of 0x70180 5324 * The bit14 of 0x71180 5325 * 5326 * WaFbcAsynchFlipDisableFbcQueue:snb 5327 */ 5328 I915_WRITE(ILK_DISPLAY_CHICKEN1, 5329 I915_READ(ILK_DISPLAY_CHICKEN1) | 5330 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 5331 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5332 I915_READ(ILK_DISPLAY_CHICKEN2) | 5333 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 5334 I915_WRITE(ILK_DSPCLK_GATE_D, 5335 I915_READ(ILK_DSPCLK_GATE_D) | 5336 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 5337 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 5338 5339 g4x_disable_trickle_feed(dev); 5340 5341 cpt_init_clock_gating(dev); 5342 5343 gen6_check_mch_setup(dev); 5344 } 5345 5346 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 5347 { 5348 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 5349 5350 /* 5351 * WaVSThreadDispatchOverride:ivb,vlv 5352 * 5353 * This actually overrides the dispatch 5354 * mode for all thread types. 5355 */ 5356 reg &= ~GEN7_FF_SCHED_MASK; 5357 reg |= GEN7_FF_TS_SCHED_HW; 5358 reg |= GEN7_FF_VS_SCHED_HW; 5359 reg |= GEN7_FF_DS_SCHED_HW; 5360 5361 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 5362 } 5363 5364 static void lpt_init_clock_gating(struct drm_device *dev) 5365 { 5366 struct drm_i915_private *dev_priv = dev->dev_private; 5367 5368 /* 5369 * TODO: this bit should only be enabled when really needed, then 5370 * disabled when not needed anymore in order to save power. 5371 */ 5372 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 5373 I915_WRITE(SOUTH_DSPCLK_GATE_D, 5374 I915_READ(SOUTH_DSPCLK_GATE_D) | 5375 PCH_LP_PARTITION_LEVEL_DISABLE); 5376 5377 /* WADPOClockGatingDisable:hsw */ 5378 I915_WRITE(_TRANSA_CHICKEN1, 5379 I915_READ(_TRANSA_CHICKEN1) | 5380 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 5381 } 5382 5383 static void lpt_suspend_hw(struct drm_device *dev) 5384 { 5385 struct drm_i915_private *dev_priv = dev->dev_private; 5386 5387 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 5388 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 5389 5390 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5391 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 5392 } 5393 } 5394 5395 static void gen8_init_clock_gating(struct drm_device *dev) 5396 { 5397 struct drm_i915_private *dev_priv = dev->dev_private; 5398 enum i915_pipe pipe; 5399 5400 I915_WRITE(WM3_LP_ILK, 0); 5401 I915_WRITE(WM2_LP_ILK, 0); 5402 I915_WRITE(WM1_LP_ILK, 0); 5403 5404 /* FIXME(BDW): Check all the w/a, some might only apply to 5405 * pre-production hw. */ 5406 5407 /* WaDisablePartialInstShootdown:bdw */ 5408 I915_WRITE(GEN8_ROW_CHICKEN, 5409 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); 5410 5411 /* WaDisableThreadStallDopClockGating:bdw */ 5412 /* FIXME: Unclear whether we really need this on production bdw. */ 5413 I915_WRITE(GEN8_ROW_CHICKEN, 5414 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); 5415 5416 /* 5417 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for 5418 * pre-production hardware 5419 */ 5420 I915_WRITE(HALF_SLICE_CHICKEN3, 5421 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS)); 5422 I915_WRITE(HALF_SLICE_CHICKEN3, 5423 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 5424 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); 5425 5426 I915_WRITE(_3D_CHICKEN3, 5427 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); 5428 5429 I915_WRITE(COMMON_SLICE_CHICKEN2, 5430 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); 5431 5432 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5433 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE)); 5434 5435 /* WaDisableDopClockGating:bdw May not be needed for production */ 5436 I915_WRITE(GEN7_ROW_CHICKEN2, 5437 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 5438 5439 /* WaSwitchSolVfFArbitrationPriority:bdw */ 5440 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 5441 5442 /* WaPsrDPAMaskVBlankInSRD:bdw */ 5443 I915_WRITE(CHICKEN_PAR1_1, 5444 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 5445 5446 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 5447 for_each_pipe(pipe) { 5448 I915_WRITE(CHICKEN_PIPESL_1(pipe), 5449 I915_READ(CHICKEN_PIPESL_1(pipe)) | 5450 BDW_DPRS_MASK_VBLANK_SRD); 5451 } 5452 5453 /* Use Force Non-Coherent whenever executing a 3D context. This is a 5454 * workaround for for a possible hang in the unlikely event a TLB 5455 * invalidation occurs during a PSD flush. 5456 */ 5457 I915_WRITE(HDC_CHICKEN0, 5458 I915_READ(HDC_CHICKEN0) | 5459 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT)); 5460 5461 /* WaVSRefCountFullforceMissDisable:bdw */ 5462 /* WaDSRefCountFullforceMissDisable:bdw */ 5463 I915_WRITE(GEN7_FF_THREAD_MODE, 5464 I915_READ(GEN7_FF_THREAD_MODE) & 5465 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 5466 5467 /* 5468 * BSpec recommends 8x4 when MSAA is used, 5469 * however in practice 16x4 seems fastest. 5470 * 5471 * Note that PS/WM thread counts depend on the WIZ hashing 5472 * disable bit, which we don't touch here, but it's good 5473 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 5474 */ 5475 I915_WRITE(GEN7_GT_MODE, 5476 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 5477 5478 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 5479 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 5480 5481 /* WaDisableSDEUnitClockGating:bdw */ 5482 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 5483 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 5484 5485 /* Wa4x4STCOptimizationDisable:bdw */ 5486 I915_WRITE(CACHE_MODE_1, 5487 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); 5488 } 5489 5490 static void haswell_init_clock_gating(struct drm_device *dev) 5491 { 5492 struct drm_i915_private *dev_priv = dev->dev_private; 5493 5494 ilk_init_lp_watermarks(dev); 5495 5496 /* L3 caching of data atomics doesn't work -- disable it. */ 5497 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 5498 I915_WRITE(HSW_ROW_CHICKEN3, 5499 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); 5500 5501 /* This is required by WaCatErrorRejectionIssue:hsw */ 5502 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 5503 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5504 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5505 5506 /* WaVSRefCountFullforceMissDisable:hsw */ 5507 I915_WRITE(GEN7_FF_THREAD_MODE, 5508 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 5509 5510 /* WaDisable_RenderCache_OperationalFlush:hsw */ 5511 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5512 5513 /* enable HiZ Raw Stall Optimization */ 5514 I915_WRITE(CACHE_MODE_0_GEN7, 5515 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 5516 5517 /* WaDisable4x2SubspanOptimization:hsw */ 5518 I915_WRITE(CACHE_MODE_1, 5519 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 5520 5521 /* 5522 * BSpec recommends 8x4 when MSAA is used, 5523 * however in practice 16x4 seems fastest. 5524 * 5525 * Note that PS/WM thread counts depend on the WIZ hashing 5526 * disable bit, which we don't touch here, but it's good 5527 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 5528 */ 5529 I915_WRITE(GEN7_GT_MODE, 5530 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 5531 5532 /* WaSwitchSolVfFArbitrationPriority:hsw */ 5533 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 5534 5535 /* WaRsPkgCStateDisplayPMReq:hsw */ 5536 I915_WRITE(CHICKEN_PAR1_1, 5537 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 5538 5539 lpt_init_clock_gating(dev); 5540 } 5541 5542 static void ivybridge_init_clock_gating(struct drm_device *dev) 5543 { 5544 struct drm_i915_private *dev_priv = dev->dev_private; 5545 uint32_t snpcr; 5546 5547 ilk_init_lp_watermarks(dev); 5548 5549 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 5550 5551 /* WaDisableEarlyCull:ivb */ 5552 I915_WRITE(_3D_CHICKEN3, 5553 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 5554 5555 /* WaDisableBackToBackFlipFix:ivb */ 5556 I915_WRITE(IVB_CHICKEN3, 5557 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 5558 CHICKEN3_DGMG_DONE_FIX_DISABLE); 5559 5560 /* WaDisablePSDDualDispatchEnable:ivb */ 5561 if (IS_IVB_GT1(dev)) 5562 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5563 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5564 5565 /* WaDisable_RenderCache_OperationalFlush:ivb */ 5566 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5567 5568 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 5569 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 5570 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 5571 5572 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 5573 I915_WRITE(GEN7_L3CNTLREG1, 5574 GEN7_WA_FOR_GEN7_L3_CONTROL); 5575 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 5576 GEN7_WA_L3_CHICKEN_MODE); 5577 if (IS_IVB_GT1(dev)) 5578 I915_WRITE(GEN7_ROW_CHICKEN2, 5579 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 5580 else { 5581 /* must write both registers */ 5582 I915_WRITE(GEN7_ROW_CHICKEN2, 5583 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 5584 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 5585 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 5586 } 5587 5588 /* WaForceL3Serialization:ivb */ 5589 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 5590 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 5591 5592 /* 5593 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 5594 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 5595 */ 5596 I915_WRITE(GEN6_UCGCTL2, 5597 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 5598 5599 /* This is required by WaCatErrorRejectionIssue:ivb */ 5600 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 5601 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5602 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5603 5604 g4x_disable_trickle_feed(dev); 5605 5606 gen7_setup_fixed_func_scheduler(dev_priv); 5607 5608 if (0) { /* causes HiZ corruption on ivb:gt1 */ 5609 /* enable HiZ Raw Stall Optimization */ 5610 I915_WRITE(CACHE_MODE_0_GEN7, 5611 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 5612 } 5613 5614 /* WaDisable4x2SubspanOptimization:ivb */ 5615 I915_WRITE(CACHE_MODE_1, 5616 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 5617 5618 /* 5619 * BSpec recommends 8x4 when MSAA is used, 5620 * however in practice 16x4 seems fastest. 5621 * 5622 * Note that PS/WM thread counts depend on the WIZ hashing 5623 * disable bit, which we don't touch here, but it's good 5624 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 5625 */ 5626 I915_WRITE(GEN7_GT_MODE, 5627 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 5628 5629 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5630 snpcr &= ~GEN6_MBC_SNPCR_MASK; 5631 snpcr |= GEN6_MBC_SNPCR_MED; 5632 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 5633 5634 if (!HAS_PCH_NOP(dev)) 5635 cpt_init_clock_gating(dev); 5636 5637 gen6_check_mch_setup(dev); 5638 } 5639 5640 static void valleyview_init_clock_gating(struct drm_device *dev) 5641 { 5642 struct drm_i915_private *dev_priv = dev->dev_private; 5643 u32 val; 5644 5645 mutex_lock(&dev_priv->rps.hw_lock); 5646 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5647 mutex_unlock(&dev_priv->rps.hw_lock); 5648 switch ((val >> 6) & 3) { 5649 case 0: 5650 case 1: 5651 dev_priv->mem_freq = 800; 5652 break; 5653 case 2: 5654 dev_priv->mem_freq = 1066; 5655 break; 5656 case 3: 5657 dev_priv->mem_freq = 1333; 5658 break; 5659 } 5660 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5661 5662 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5663 5664 /* WaDisableEarlyCull:vlv */ 5665 I915_WRITE(_3D_CHICKEN3, 5666 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 5667 5668 /* WaDisableBackToBackFlipFix:vlv */ 5669 I915_WRITE(IVB_CHICKEN3, 5670 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 5671 CHICKEN3_DGMG_DONE_FIX_DISABLE); 5672 5673 /* WaPsdDispatchEnable:vlv */ 5674 /* WaDisablePSDDualDispatchEnable:vlv */ 5675 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5676 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 5677 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5678 5679 /* WaDisable_RenderCache_OperationalFlush:vlv */ 5680 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5681 5682 /* WaForceL3Serialization:vlv */ 5683 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 5684 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 5685 5686 /* WaDisableDopClockGating:vlv */ 5687 I915_WRITE(GEN7_ROW_CHICKEN2, 5688 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 5689 5690 /* This is required by WaCatErrorRejectionIssue:vlv */ 5691 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 5692 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5693 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5694 5695 gen7_setup_fixed_func_scheduler(dev_priv); 5696 5697 /* 5698 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 5699 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 5700 */ 5701 I915_WRITE(GEN6_UCGCTL2, 5702 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 5703 5704 /* WaDisableL3Bank2xClockGate:vlv 5705 * Disabling L3 clock gating- MMIO 940c[25] = 1 5706 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 5707 I915_WRITE(GEN7_UCGCTL4, 5708 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 5709 5710 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5711 5712 /* 5713 * BSpec says this must be set, even though 5714 * WaDisable4x2SubspanOptimization isn't listed for VLV. 5715 */ 5716 I915_WRITE(CACHE_MODE_1, 5717 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 5718 5719 /* 5720 * WaIncreaseL3CreditsForVLVB0:vlv 5721 * This is the hardware default actually. 5722 */ 5723 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 5724 5725 /* 5726 * WaDisableVLVClockGating_VBIIssue:vlv 5727 * Disable clock gating on th GCFG unit to prevent a delay 5728 * in the reporting of vblank events. 5729 */ 5730 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 5731 } 5732 5733 static void cherryview_init_clock_gating(struct drm_device *dev) 5734 { 5735 struct drm_i915_private *dev_priv = dev->dev_private; 5736 u32 val; 5737 5738 mutex_lock(&dev_priv->rps.hw_lock); 5739 val = vlv_punit_read(dev_priv, CCK_FUSE_REG); 5740 mutex_unlock(&dev_priv->rps.hw_lock); 5741 switch ((val >> 2) & 0x7) { 5742 case 0: 5743 case 1: 5744 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200; 5745 dev_priv->mem_freq = 1600; 5746 break; 5747 case 2: 5748 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267; 5749 dev_priv->mem_freq = 1600; 5750 break; 5751 case 3: 5752 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333; 5753 dev_priv->mem_freq = 2000; 5754 break; 5755 case 4: 5756 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320; 5757 dev_priv->mem_freq = 1600; 5758 break; 5759 case 5: 5760 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400; 5761 dev_priv->mem_freq = 1600; 5762 break; 5763 } 5764 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5765 5766 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5767 5768 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5769 5770 /* WaDisablePartialInstShootdown:chv */ 5771 I915_WRITE(GEN8_ROW_CHICKEN, 5772 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); 5773 5774 /* WaDisableThreadStallDopClockGating:chv */ 5775 I915_WRITE(GEN8_ROW_CHICKEN, 5776 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); 5777 5778 /* WaVSRefCountFullforceMissDisable:chv */ 5779 /* WaDSRefCountFullforceMissDisable:chv */ 5780 I915_WRITE(GEN7_FF_THREAD_MODE, 5781 I915_READ(GEN7_FF_THREAD_MODE) & 5782 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 5783 5784 /* WaDisableSemaphoreAndSyncFlipWait:chv */ 5785 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 5786 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 5787 5788 /* WaDisableCSUnitClockGating:chv */ 5789 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 5790 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 5791 5792 /* WaDisableSDEUnitClockGating:chv */ 5793 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 5794 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 5795 5796 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ 5797 I915_WRITE(HALF_SLICE_CHICKEN3, 5798 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 5799 5800 /* WaDisableGunitClockGating:chv (pre-production hw) */ 5801 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) | 5802 GINT_DIS); 5803 5804 /* WaDisableFfDopClockGating:chv (pre-production hw) */ 5805 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 5806 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE)); 5807 5808 /* WaDisableDopClockGating:chv (pre-production hw) */ 5809 I915_WRITE(GEN7_ROW_CHICKEN2, 5810 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 5811 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 5812 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 5813 } 5814 5815 static void g4x_init_clock_gating(struct drm_device *dev) 5816 { 5817 struct drm_i915_private *dev_priv = dev->dev_private; 5818 uint32_t dspclk_gate; 5819 5820 I915_WRITE(RENCLK_GATE_D1, 0); 5821 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 5822 GS_UNIT_CLOCK_GATE_DISABLE | 5823 CL_UNIT_CLOCK_GATE_DISABLE); 5824 I915_WRITE(RAMCLK_GATE_D, 0); 5825 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 5826 OVRUNIT_CLOCK_GATE_DISABLE | 5827 OVCUNIT_CLOCK_GATE_DISABLE; 5828 if (IS_GM45(dev)) 5829 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 5830 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 5831 5832 /* WaDisableRenderCachePipelinedFlush */ 5833 I915_WRITE(CACHE_MODE_0, 5834 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 5835 5836 /* WaDisable_RenderCache_OperationalFlush:g4x */ 5837 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5838 5839 g4x_disable_trickle_feed(dev); 5840 } 5841 5842 static void crestline_init_clock_gating(struct drm_device *dev) 5843 { 5844 struct drm_i915_private *dev_priv = dev->dev_private; 5845 5846 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 5847 I915_WRITE(RENCLK_GATE_D2, 0); 5848 I915_WRITE(DSPCLK_GATE_D, 0); 5849 I915_WRITE(RAMCLK_GATE_D, 0); 5850 I915_WRITE16(DEUC, 0); 5851 I915_WRITE(MI_ARB_STATE, 5852 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 5853 5854 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 5855 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5856 } 5857 5858 static void broadwater_init_clock_gating(struct drm_device *dev) 5859 { 5860 struct drm_i915_private *dev_priv = dev->dev_private; 5861 5862 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 5863 I965_RCC_CLOCK_GATE_DISABLE | 5864 I965_RCPB_CLOCK_GATE_DISABLE | 5865 I965_ISC_CLOCK_GATE_DISABLE | 5866 I965_FBC_CLOCK_GATE_DISABLE); 5867 I915_WRITE(RENCLK_GATE_D2, 0); 5868 I915_WRITE(MI_ARB_STATE, 5869 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 5870 5871 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 5872 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5873 } 5874 5875 static void gen3_init_clock_gating(struct drm_device *dev) 5876 { 5877 struct drm_i915_private *dev_priv = dev->dev_private; 5878 u32 dstate = I915_READ(D_STATE); 5879 5880 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 5881 DSTATE_DOT_CLOCK_GATING; 5882 I915_WRITE(D_STATE, dstate); 5883 5884 if (IS_PINEVIEW(dev)) 5885 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 5886 5887 /* IIR "flip pending" means done if this bit is set */ 5888 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 5889 5890 /* interrupts should cause a wake up from C3 */ 5891 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 5892 5893 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 5894 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 5895 } 5896 5897 static void i85x_init_clock_gating(struct drm_device *dev) 5898 { 5899 struct drm_i915_private *dev_priv = dev->dev_private; 5900 5901 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 5902 5903 /* interrupts should cause a wake up from C3 */ 5904 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 5905 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 5906 } 5907 5908 static void i830_init_clock_gating(struct drm_device *dev) 5909 { 5910 struct drm_i915_private *dev_priv = dev->dev_private; 5911 5912 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 5913 } 5914 5915 void intel_init_clock_gating(struct drm_device *dev) 5916 { 5917 struct drm_i915_private *dev_priv = dev->dev_private; 5918 5919 dev_priv->display.init_clock_gating(dev); 5920 } 5921 5922 void intel_suspend_hw(struct drm_device *dev) 5923 { 5924 if (HAS_PCH_LPT(dev)) 5925 lpt_suspend_hw(dev); 5926 } 5927 5928 #define for_each_power_well(i, power_well, domain_mask, power_domains) \ 5929 for (i = 0; \ 5930 i < (power_domains)->power_well_count && \ 5931 ((power_well) = &(power_domains)->power_wells[i]); \ 5932 i++) \ 5933 if ((power_well)->domains & (domain_mask)) 5934 5935 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \ 5936 for (i = (power_domains)->power_well_count - 1; \ 5937 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\ 5938 i--) \ 5939 if ((power_well)->domains & (domain_mask)) 5940 5941 /** 5942 * We should only use the power well if we explicitly asked the hardware to 5943 * enable it, so check if it's enabled and also check if we've requested it to 5944 * be enabled. 5945 */ 5946 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 5947 struct i915_power_well *power_well) 5948 { 5949 return I915_READ(HSW_PWR_WELL_DRIVER) == 5950 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5951 } 5952 5953 bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv, 5954 enum intel_display_power_domain domain) 5955 { 5956 struct i915_power_domains *power_domains; 5957 struct i915_power_well *power_well; 5958 bool is_enabled; 5959 int i; 5960 5961 if (dev_priv->pm.suspended) 5962 return false; 5963 5964 power_domains = &dev_priv->power_domains; 5965 5966 is_enabled = true; 5967 5968 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 5969 if (power_well->always_on) 5970 continue; 5971 5972 if (!power_well->hw_enabled) { 5973 is_enabled = false; 5974 break; 5975 } 5976 } 5977 5978 return is_enabled; 5979 } 5980 5981 bool intel_display_power_enabled(struct drm_i915_private *dev_priv, 5982 enum intel_display_power_domain domain) 5983 { 5984 struct i915_power_domains *power_domains; 5985 bool ret; 5986 5987 power_domains = &dev_priv->power_domains; 5988 5989 mutex_lock(&power_domains->lock); 5990 ret = intel_display_power_enabled_unlocked(dev_priv, domain); 5991 mutex_unlock(&power_domains->lock); 5992 5993 return ret; 5994 } 5995 5996 /* 5997 * Starting with Haswell, we have a "Power Down Well" that can be turned off 5998 * when not needed anymore. We have 4 registers that can request the power well 5999 * to be enabled, and it will only be disabled if none of the registers is 6000 * requesting it to be enabled. 6001 */ 6002 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 6003 { 6004 struct drm_device *dev = dev_priv->dev; 6005 6006 /* 6007 * After we re-enable the power well, if we touch VGA register 0x3d5 6008 * we'll get unclaimed register interrupts. This stops after we write 6009 * anything to the VGA MSR register. The vgacon module uses this 6010 * register all the time, so if we unbind our driver and, as a 6011 * consequence, bind vgacon, we'll get stuck in an infinite loop at 6012 * console_unlock(). So make here we touch the VGA MSR register, making 6013 * sure vgacon can keep working normally without triggering interrupts 6014 * and error messages. 6015 */ 6016 #if 0 6017 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 6018 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); 6019 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6020 #endif 6021 6022 if (IS_BROADWELL(dev)) 6023 gen8_irq_power_well_post_enable(dev_priv); 6024 } 6025 6026 static void hsw_set_power_well(struct drm_i915_private *dev_priv, 6027 struct i915_power_well *power_well, bool enable) 6028 { 6029 bool is_enabled, enable_requested; 6030 uint32_t tmp; 6031 6032 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 6033 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 6034 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 6035 6036 if (enable) { 6037 if (!enable_requested) 6038 I915_WRITE(HSW_PWR_WELL_DRIVER, 6039 HSW_PWR_WELL_ENABLE_REQUEST); 6040 6041 if (!is_enabled) { 6042 DRM_DEBUG_KMS("Enabling power well\n"); 6043 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 6044 HSW_PWR_WELL_STATE_ENABLED), 20)) 6045 DRM_ERROR("Timeout enabling power well\n"); 6046 } 6047 6048 hsw_power_well_post_enable(dev_priv); 6049 } else { 6050 if (enable_requested) { 6051 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 6052 POSTING_READ(HSW_PWR_WELL_DRIVER); 6053 DRM_DEBUG_KMS("Requesting to disable the power well\n"); 6054 } 6055 } 6056 } 6057 6058 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 6059 struct i915_power_well *power_well) 6060 { 6061 hsw_set_power_well(dev_priv, power_well, power_well->count > 0); 6062 6063 /* 6064 * We're taking over the BIOS, so clear any requests made by it since 6065 * the driver is in charge now. 6066 */ 6067 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) 6068 I915_WRITE(HSW_PWR_WELL_BIOS, 0); 6069 } 6070 6071 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 6072 struct i915_power_well *power_well) 6073 { 6074 hsw_set_power_well(dev_priv, power_well, true); 6075 } 6076 6077 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 6078 struct i915_power_well *power_well) 6079 { 6080 hsw_set_power_well(dev_priv, power_well, false); 6081 } 6082 6083 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 6084 struct i915_power_well *power_well) 6085 { 6086 } 6087 6088 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 6089 struct i915_power_well *power_well) 6090 { 6091 return true; 6092 } 6093 6094 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 6095 struct i915_power_well *power_well, bool enable) 6096 { 6097 enum punit_power_well power_well_id = power_well->data; 6098 u32 mask; 6099 u32 state; 6100 u32 ctrl; 6101 6102 mask = PUNIT_PWRGT_MASK(power_well_id); 6103 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 6104 PUNIT_PWRGT_PWR_GATE(power_well_id); 6105 6106 mutex_lock(&dev_priv->rps.hw_lock); 6107 6108 #define COND \ 6109 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 6110 6111 if (COND) 6112 goto out; 6113 6114 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 6115 ctrl &= ~mask; 6116 ctrl |= state; 6117 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 6118 6119 if (wait_for(COND, 100)) 6120 DRM_ERROR("timout setting power well state %08x (%08x)\n", 6121 state, 6122 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 6123 6124 #undef COND 6125 6126 out: 6127 mutex_unlock(&dev_priv->rps.hw_lock); 6128 } 6129 6130 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, 6131 struct i915_power_well *power_well) 6132 { 6133 vlv_set_power_well(dev_priv, power_well, power_well->count > 0); 6134 } 6135 6136 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 6137 struct i915_power_well *power_well) 6138 { 6139 vlv_set_power_well(dev_priv, power_well, true); 6140 } 6141 6142 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 6143 struct i915_power_well *power_well) 6144 { 6145 vlv_set_power_well(dev_priv, power_well, false); 6146 } 6147 6148 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 6149 struct i915_power_well *power_well) 6150 { 6151 int power_well_id = power_well->data; 6152 bool enabled = false; 6153 u32 mask; 6154 u32 state; 6155 u32 ctrl; 6156 6157 mask = PUNIT_PWRGT_MASK(power_well_id); 6158 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 6159 6160 mutex_lock(&dev_priv->rps.hw_lock); 6161 6162 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 6163 /* 6164 * We only ever set the power-on and power-gate states, anything 6165 * else is unexpected. 6166 */ 6167 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && 6168 state != PUNIT_PWRGT_PWR_GATE(power_well_id)); 6169 if (state == ctrl) 6170 enabled = true; 6171 6172 /* 6173 * A transient state at this point would mean some unexpected party 6174 * is poking at the power controls too. 6175 */ 6176 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 6177 WARN_ON(ctrl != state); 6178 6179 mutex_unlock(&dev_priv->rps.hw_lock); 6180 6181 return enabled; 6182 } 6183 6184 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 6185 struct i915_power_well *power_well) 6186 { 6187 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 6188 6189 vlv_set_power_well(dev_priv, power_well, true); 6190 6191 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 6192 valleyview_enable_display_irqs(dev_priv); 6193 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 6194 6195 /* 6196 * During driver initialization/resume we can avoid restoring the 6197 * part of the HW/SW state that will be inited anyway explicitly. 6198 */ 6199 if (dev_priv->power_domains.initializing) 6200 return; 6201 6202 intel_hpd_init(dev_priv->dev); 6203 6204 i915_redisable_vga_power_on(dev_priv->dev); 6205 } 6206 6207 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 6208 struct i915_power_well *power_well) 6209 { 6210 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D); 6211 6212 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 6213 valleyview_disable_display_irqs(dev_priv); 6214 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 6215 6216 vlv_set_power_well(dev_priv, power_well, false); 6217 } 6218 6219 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 6220 struct i915_power_well *power_well) 6221 { 6222 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 6223 6224 /* 6225 * Enable the CRI clock source so we can get at the 6226 * display and the reference clock for VGA 6227 * hotplug / manual detection. 6228 */ 6229 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | 6230 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); 6231 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 6232 6233 vlv_set_power_well(dev_priv, power_well, true); 6234 6235 /* 6236 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 6237 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 6238 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 6239 * b. The other bits such as sfr settings / modesel may all 6240 * be set to 0. 6241 * 6242 * This should only be done on init and resume from S3 with 6243 * both PLLs disabled, or we risk losing DPIO and PLL 6244 * synchronization. 6245 */ 6246 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); 6247 } 6248 6249 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 6250 struct i915_power_well *power_well) 6251 { 6252 struct drm_device *dev = dev_priv->dev; 6253 enum i915_pipe pipe; 6254 6255 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 6256 6257 for_each_pipe(pipe) 6258 assert_pll_disabled(dev_priv, pipe); 6259 6260 /* Assert common reset */ 6261 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); 6262 6263 vlv_set_power_well(dev_priv, power_well, false); 6264 } 6265 6266 static void check_power_well_state(struct drm_i915_private *dev_priv, 6267 struct i915_power_well *power_well) 6268 { 6269 bool enabled = power_well->ops->is_enabled(dev_priv, power_well); 6270 6271 if (power_well->always_on || !i915.disable_power_well) { 6272 if (!enabled) 6273 goto mismatch; 6274 6275 return; 6276 } 6277 6278 if (enabled != (power_well->count > 0)) 6279 goto mismatch; 6280 6281 return; 6282 6283 mismatch: 6284 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", 6285 power_well->name, power_well->always_on, enabled, 6286 power_well->count, i915.disable_power_well); 6287 } 6288 6289 void intel_display_power_get(struct drm_i915_private *dev_priv, 6290 enum intel_display_power_domain domain) 6291 { 6292 struct i915_power_domains *power_domains; 6293 struct i915_power_well *power_well; 6294 int i; 6295 6296 intel_runtime_pm_get(dev_priv); 6297 6298 power_domains = &dev_priv->power_domains; 6299 6300 mutex_lock(&power_domains->lock); 6301 6302 for_each_power_well(i, power_well, BIT(domain), power_domains) { 6303 if (!power_well->count++) { 6304 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 6305 power_well->ops->enable(dev_priv, power_well); 6306 power_well->hw_enabled = true; 6307 } 6308 6309 check_power_well_state(dev_priv, power_well); 6310 } 6311 6312 power_domains->domain_use_count[domain]++; 6313 6314 mutex_unlock(&power_domains->lock); 6315 } 6316 6317 void intel_display_power_put(struct drm_i915_private *dev_priv, 6318 enum intel_display_power_domain domain) 6319 { 6320 struct i915_power_domains *power_domains; 6321 struct i915_power_well *power_well; 6322 int i; 6323 6324 power_domains = &dev_priv->power_domains; 6325 6326 mutex_lock(&power_domains->lock); 6327 6328 WARN_ON(!power_domains->domain_use_count[domain]); 6329 power_domains->domain_use_count[domain]--; 6330 6331 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 6332 WARN_ON(!power_well->count); 6333 6334 if (!--power_well->count && i915.disable_power_well) { 6335 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 6336 power_well->hw_enabled = false; 6337 power_well->ops->disable(dev_priv, power_well); 6338 } 6339 6340 check_power_well_state(dev_priv, power_well); 6341 } 6342 6343 mutex_unlock(&power_domains->lock); 6344 6345 intel_runtime_pm_put(dev_priv); 6346 } 6347 6348 static struct i915_power_domains *hsw_pwr; 6349 6350 /* Display audio driver power well request */ 6351 int i915_request_power_well(void) 6352 { 6353 struct drm_i915_private *dev_priv; 6354 6355 if (!hsw_pwr) 6356 return -ENODEV; 6357 6358 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 6359 power_domains); 6360 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 6361 return 0; 6362 } 6363 6364 /* Display audio driver power well release */ 6365 int i915_release_power_well(void) 6366 { 6367 struct drm_i915_private *dev_priv; 6368 6369 if (!hsw_pwr) 6370 return -ENODEV; 6371 6372 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 6373 power_domains); 6374 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); 6375 return 0; 6376 } 6377 6378 /* 6379 * Private interface for the audio driver to get CDCLK in kHz. 6380 * 6381 * Caller must request power well using i915_request_power_well() prior to 6382 * making the call. 6383 */ 6384 int i915_get_cdclk_freq(void) 6385 { 6386 struct drm_i915_private *dev_priv; 6387 6388 if (!hsw_pwr) 6389 return -ENODEV; 6390 6391 dev_priv = container_of(hsw_pwr, struct drm_i915_private, 6392 power_domains); 6393 6394 return intel_ddi_get_cdclk_freq(dev_priv); 6395 } 6396 6397 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 6398 6399 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 6400 BIT(POWER_DOMAIN_PIPE_A) | \ 6401 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ 6402 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \ 6403 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \ 6404 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 6405 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 6406 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 6407 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 6408 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ 6409 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ 6410 BIT(POWER_DOMAIN_PORT_CRT) | \ 6411 BIT(POWER_DOMAIN_PLLS) | \ 6412 BIT(POWER_DOMAIN_INIT)) 6413 #define HSW_DISPLAY_POWER_DOMAINS ( \ 6414 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 6415 BIT(POWER_DOMAIN_INIT)) 6416 6417 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \ 6418 HSW_ALWAYS_ON_POWER_DOMAINS | \ 6419 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER)) 6420 #define BDW_DISPLAY_POWER_DOMAINS ( \ 6421 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \ 6422 BIT(POWER_DOMAIN_INIT)) 6423 6424 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT) 6425 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK 6426 6427 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 6428 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 6429 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 6430 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 6431 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 6432 BIT(POWER_DOMAIN_PORT_CRT) | \ 6433 BIT(POWER_DOMAIN_INIT)) 6434 6435 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 6436 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ 6437 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 6438 BIT(POWER_DOMAIN_INIT)) 6439 6440 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 6441 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ 6442 BIT(POWER_DOMAIN_INIT)) 6443 6444 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 6445 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ 6446 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 6447 BIT(POWER_DOMAIN_INIT)) 6448 6449 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 6450 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 6451 BIT(POWER_DOMAIN_INIT)) 6452 6453 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 6454 .sync_hw = i9xx_always_on_power_well_noop, 6455 .enable = i9xx_always_on_power_well_noop, 6456 .disable = i9xx_always_on_power_well_noop, 6457 .is_enabled = i9xx_always_on_power_well_enabled, 6458 }; 6459 6460 static struct i915_power_well i9xx_always_on_power_well[] = { 6461 { 6462 .name = "always-on", 6463 .always_on = 1, 6464 .domains = POWER_DOMAIN_MASK, 6465 .ops = &i9xx_always_on_power_well_ops, 6466 }, 6467 }; 6468 6469 static const struct i915_power_well_ops hsw_power_well_ops = { 6470 .sync_hw = hsw_power_well_sync_hw, 6471 .enable = hsw_power_well_enable, 6472 .disable = hsw_power_well_disable, 6473 .is_enabled = hsw_power_well_enabled, 6474 }; 6475 6476 static struct i915_power_well hsw_power_wells[] = { 6477 { 6478 .name = "always-on", 6479 .always_on = 1, 6480 .domains = HSW_ALWAYS_ON_POWER_DOMAINS, 6481 .ops = &i9xx_always_on_power_well_ops, 6482 }, 6483 { 6484 .name = "display", 6485 .domains = HSW_DISPLAY_POWER_DOMAINS, 6486 .ops = &hsw_power_well_ops, 6487 }, 6488 }; 6489 6490 static struct i915_power_well bdw_power_wells[] = { 6491 { 6492 .name = "always-on", 6493 .always_on = 1, 6494 .domains = BDW_ALWAYS_ON_POWER_DOMAINS, 6495 .ops = &i9xx_always_on_power_well_ops, 6496 }, 6497 { 6498 .name = "display", 6499 .domains = BDW_DISPLAY_POWER_DOMAINS, 6500 .ops = &hsw_power_well_ops, 6501 }, 6502 }; 6503 6504 static const struct i915_power_well_ops vlv_display_power_well_ops = { 6505 .sync_hw = vlv_power_well_sync_hw, 6506 .enable = vlv_display_power_well_enable, 6507 .disable = vlv_display_power_well_disable, 6508 .is_enabled = vlv_power_well_enabled, 6509 }; 6510 6511 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 6512 .sync_hw = vlv_power_well_sync_hw, 6513 .enable = vlv_dpio_cmn_power_well_enable, 6514 .disable = vlv_dpio_cmn_power_well_disable, 6515 .is_enabled = vlv_power_well_enabled, 6516 }; 6517 6518 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 6519 .sync_hw = vlv_power_well_sync_hw, 6520 .enable = vlv_power_well_enable, 6521 .disable = vlv_power_well_disable, 6522 .is_enabled = vlv_power_well_enabled, 6523 }; 6524 6525 static struct i915_power_well vlv_power_wells[] = { 6526 { 6527 .name = "always-on", 6528 .always_on = 1, 6529 .domains = VLV_ALWAYS_ON_POWER_DOMAINS, 6530 .ops = &i9xx_always_on_power_well_ops, 6531 }, 6532 { 6533 .name = "display", 6534 .domains = VLV_DISPLAY_POWER_DOMAINS, 6535 .data = PUNIT_POWER_WELL_DISP2D, 6536 .ops = &vlv_display_power_well_ops, 6537 }, 6538 { 6539 .name = "dpio-tx-b-01", 6540 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 6541 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 6542 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 6543 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 6544 .ops = &vlv_dpio_power_well_ops, 6545 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, 6546 }, 6547 { 6548 .name = "dpio-tx-b-23", 6549 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 6550 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 6551 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 6552 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 6553 .ops = &vlv_dpio_power_well_ops, 6554 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, 6555 }, 6556 { 6557 .name = "dpio-tx-c-01", 6558 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 6559 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 6560 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 6561 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 6562 .ops = &vlv_dpio_power_well_ops, 6563 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, 6564 }, 6565 { 6566 .name = "dpio-tx-c-23", 6567 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 6568 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 6569 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 6570 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 6571 .ops = &vlv_dpio_power_well_ops, 6572 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, 6573 }, 6574 { 6575 .name = "dpio-common", 6576 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 6577 .data = PUNIT_POWER_WELL_DPIO_CMN_BC, 6578 .ops = &vlv_dpio_cmn_power_well_ops, 6579 }, 6580 }; 6581 6582 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 6583 enum punit_power_well power_well_id) 6584 { 6585 struct i915_power_domains *power_domains = &dev_priv->power_domains; 6586 struct i915_power_well *power_well; 6587 int i; 6588 6589 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 6590 if (power_well->data == power_well_id) 6591 return power_well; 6592 } 6593 6594 return NULL; 6595 } 6596 6597 #define set_power_wells(power_domains, __power_wells) ({ \ 6598 (power_domains)->power_wells = (__power_wells); \ 6599 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ 6600 }) 6601 6602 int intel_power_domains_init(struct drm_i915_private *dev_priv) 6603 { 6604 struct i915_power_domains *power_domains = &dev_priv->power_domains; 6605 6606 lockinit(&power_domains->lock, "i915pl", 0, LK_CANRECURSE); 6607 6608 /* 6609 * The enabling order will be from lower to higher indexed wells, 6610 * the disabling order is reversed. 6611 */ 6612 if (IS_HASWELL(dev_priv->dev)) { 6613 set_power_wells(power_domains, hsw_power_wells); 6614 hsw_pwr = power_domains; 6615 } else if (IS_BROADWELL(dev_priv->dev)) { 6616 set_power_wells(power_domains, bdw_power_wells); 6617 hsw_pwr = power_domains; 6618 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 6619 set_power_wells(power_domains, vlv_power_wells); 6620 } else { 6621 set_power_wells(power_domains, i9xx_always_on_power_well); 6622 } 6623 6624 return 0; 6625 } 6626 6627 void intel_power_domains_remove(struct drm_i915_private *dev_priv) 6628 { 6629 hsw_pwr = NULL; 6630 } 6631 6632 static void intel_power_domains_resume(struct drm_i915_private *dev_priv) 6633 { 6634 struct i915_power_domains *power_domains = &dev_priv->power_domains; 6635 struct i915_power_well *power_well; 6636 int i; 6637 6638 mutex_lock(&power_domains->lock); 6639 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { 6640 power_well->ops->sync_hw(dev_priv, power_well); 6641 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, 6642 power_well); 6643 } 6644 mutex_unlock(&power_domains->lock); 6645 } 6646 6647 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 6648 { 6649 struct i915_power_well *cmn = 6650 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); 6651 struct i915_power_well *disp2d = 6652 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); 6653 6654 /* nothing to do if common lane is already off */ 6655 if (!cmn->ops->is_enabled(dev_priv, cmn)) 6656 return; 6657 6658 /* If the display might be already active skip this */ 6659 if (disp2d->ops->is_enabled(dev_priv, disp2d) && 6660 I915_READ(DPIO_CTL) & DPIO_CMNRST) 6661 return; 6662 6663 DRM_DEBUG_KMS("toggling display PHY side reset\n"); 6664 6665 /* cmnlane needs DPLL registers */ 6666 disp2d->ops->enable(dev_priv, disp2d); 6667 6668 /* 6669 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 6670 * Need to assert and de-assert PHY SB reset by gating the 6671 * common lane power, then un-gating it. 6672 * Simply ungating isn't enough to reset the PHY enough to get 6673 * ports and lanes running. 6674 */ 6675 cmn->ops->disable(dev_priv, cmn); 6676 } 6677 6678 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) 6679 { 6680 struct drm_device *dev = dev_priv->dev; 6681 struct i915_power_domains *power_domains = &dev_priv->power_domains; 6682 6683 power_domains->initializing = true; 6684 6685 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 6686 mutex_lock(&power_domains->lock); 6687 vlv_cmnlane_wa(dev_priv); 6688 mutex_unlock(&power_domains->lock); 6689 } 6690 6691 /* For now, we need the power well to be always enabled. */ 6692 intel_display_set_init_power(dev_priv, true); 6693 intel_power_domains_resume(dev_priv); 6694 power_domains->initializing = false; 6695 } 6696 6697 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) 6698 { 6699 intel_runtime_pm_get(dev_priv); 6700 } 6701 6702 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) 6703 { 6704 intel_runtime_pm_put(dev_priv); 6705 } 6706 6707 void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 6708 { 6709 #if 0 6710 struct drm_device *dev = dev_priv->dev; 6711 struct device *device = &dev->pdev->dev; 6712 6713 if (!HAS_RUNTIME_PM(dev)) 6714 return; 6715 6716 pm_runtime_get_sync(device); 6717 WARN(dev_priv->pm.suspended, "Device still suspended.\n"); 6718 #endif 6719 } 6720 6721 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 6722 { 6723 struct drm_device *dev = dev_priv->dev; 6724 #if 0 6725 struct device *device = &dev->pdev->dev; 6726 #endif 6727 6728 if (!HAS_RUNTIME_PM(dev)) 6729 return; 6730 6731 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n"); 6732 #if 0 6733 pm_runtime_get_noresume(device); 6734 #endif 6735 } 6736 6737 void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 6738 { 6739 #if 0 6740 struct drm_device *dev = dev_priv->dev; 6741 struct device *device = &dev->pdev->dev; 6742 6743 if (!HAS_RUNTIME_PM(dev)) 6744 return; 6745 6746 pm_runtime_mark_last_busy(device); 6747 pm_runtime_put_autosuspend(device); 6748 #endif 6749 } 6750 6751 void intel_init_runtime_pm(struct drm_i915_private *dev_priv) 6752 { 6753 struct drm_device *dev = dev_priv->dev; 6754 #if 0 6755 struct device *device = &dev->pdev->dev; 6756 #endif 6757 6758 if (!HAS_RUNTIME_PM(dev)) 6759 return; 6760 6761 #if 0 6762 pm_runtime_set_active(device); 6763 6764 /* 6765 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6766 * requirement. 6767 */ 6768 if (!intel_enable_rc6(dev)) { 6769 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 6770 return; 6771 } 6772 6773 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 6774 pm_runtime_mark_last_busy(device); 6775 pm_runtime_use_autosuspend(device); 6776 6777 pm_runtime_put_autosuspend(device); 6778 #endif 6779 } 6780 6781 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv) 6782 { 6783 #if 0 6784 struct drm_device *dev = dev_priv->dev; 6785 struct device *device = &dev->pdev->dev; 6786 6787 if (!HAS_RUNTIME_PM(dev)) 6788 return; 6789 6790 if (!intel_enable_rc6(dev)) 6791 return; 6792 6793 /* Make sure we're not suspended first. */ 6794 pm_runtime_get_sync(device); 6795 pm_runtime_disable(device); 6796 #endif 6797 } 6798 6799 /* Set up chip specific power management-related functions */ 6800 void intel_init_pm(struct drm_device *dev) 6801 { 6802 struct drm_i915_private *dev_priv = dev->dev_private; 6803 6804 if (HAS_FBC(dev)) { 6805 if (INTEL_INFO(dev)->gen >= 7) { 6806 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6807 dev_priv->display.enable_fbc = gen7_enable_fbc; 6808 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6809 } else if (INTEL_INFO(dev)->gen >= 5) { 6810 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6811 dev_priv->display.enable_fbc = ironlake_enable_fbc; 6812 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6813 } else if (IS_GM45(dev)) { 6814 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 6815 dev_priv->display.enable_fbc = g4x_enable_fbc; 6816 dev_priv->display.disable_fbc = g4x_disable_fbc; 6817 } else { 6818 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 6819 dev_priv->display.enable_fbc = i8xx_enable_fbc; 6820 dev_priv->display.disable_fbc = i8xx_disable_fbc; 6821 6822 /* This value was pulled out of someone's hat */ 6823 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); 6824 } 6825 } 6826 6827 /* For cxsr */ 6828 if (IS_PINEVIEW(dev)) 6829 i915_pineview_get_mem_freq(dev); 6830 else if (IS_GEN5(dev)) 6831 i915_ironlake_get_mem_freq(dev); 6832 6833 /* For FIFO watermark updates */ 6834 if (HAS_PCH_SPLIT(dev)) { 6835 ilk_setup_wm_latency(dev); 6836 6837 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 6838 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 6839 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && 6840 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 6841 dev_priv->display.update_wm = ilk_update_wm; 6842 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; 6843 } else { 6844 DRM_DEBUG_KMS("Failed to read display plane latency. " 6845 "Disable CxSR\n"); 6846 } 6847 6848 if (IS_GEN5(dev)) 6849 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 6850 else if (IS_GEN6(dev)) 6851 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 6852 else if (IS_IVYBRIDGE(dev)) 6853 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 6854 else if (IS_HASWELL(dev)) 6855 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 6856 else if (INTEL_INFO(dev)->gen == 8) 6857 dev_priv->display.init_clock_gating = gen8_init_clock_gating; 6858 } else if (IS_CHERRYVIEW(dev)) { 6859 dev_priv->display.update_wm = valleyview_update_wm; 6860 dev_priv->display.init_clock_gating = 6861 cherryview_init_clock_gating; 6862 } else if (IS_VALLEYVIEW(dev)) { 6863 dev_priv->display.update_wm = valleyview_update_wm; 6864 dev_priv->display.init_clock_gating = 6865 valleyview_init_clock_gating; 6866 } else if (IS_PINEVIEW(dev)) { 6867 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 6868 dev_priv->is_ddr3, 6869 dev_priv->fsb_freq, 6870 dev_priv->mem_freq)) { 6871 DRM_INFO("failed to find known CxSR latency " 6872 "(found ddr%s fsb freq %d, mem freq %d), " 6873 "disabling CxSR\n", 6874 (dev_priv->is_ddr3 == 1) ? "3" : "2", 6875 dev_priv->fsb_freq, dev_priv->mem_freq); 6876 /* Disable CxSR and never update its watermark again */ 6877 intel_set_memory_cxsr(dev_priv, false); 6878 dev_priv->display.update_wm = NULL; 6879 } else 6880 dev_priv->display.update_wm = pineview_update_wm; 6881 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6882 } else if (IS_G4X(dev)) { 6883 dev_priv->display.update_wm = g4x_update_wm; 6884 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 6885 } else if (IS_GEN4(dev)) { 6886 dev_priv->display.update_wm = i965_update_wm; 6887 if (IS_CRESTLINE(dev)) 6888 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 6889 else if (IS_BROADWATER(dev)) 6890 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 6891 } else if (IS_GEN3(dev)) { 6892 dev_priv->display.update_wm = i9xx_update_wm; 6893 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 6894 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6895 } else if (IS_GEN2(dev)) { 6896 if (INTEL_INFO(dev)->num_pipes == 1) { 6897 dev_priv->display.update_wm = i845_update_wm; 6898 dev_priv->display.get_fifo_size = i845_get_fifo_size; 6899 } else { 6900 dev_priv->display.update_wm = i9xx_update_wm; 6901 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6902 } 6903 6904 if (IS_I85X(dev) || IS_I865G(dev)) 6905 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6906 else 6907 dev_priv->display.init_clock_gating = i830_init_clock_gating; 6908 } else { 6909 DRM_ERROR("unexpected fall-through in intel_init_pm\n"); 6910 } 6911 } 6912 6913 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) 6914 { 6915 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6916 6917 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 6918 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 6919 return -EAGAIN; 6920 } 6921 6922 I915_WRITE(GEN6_PCODE_DATA, *val); 6923 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 6924 6925 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6926 500)) { 6927 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 6928 return -ETIMEDOUT; 6929 } 6930 6931 *val = I915_READ(GEN6_PCODE_DATA); 6932 I915_WRITE(GEN6_PCODE_DATA, 0); 6933 6934 return 0; 6935 } 6936 6937 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) 6938 { 6939 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6940 6941 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 6942 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 6943 return -EAGAIN; 6944 } 6945 6946 I915_WRITE(GEN6_PCODE_DATA, val); 6947 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 6948 6949 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6950 500)) { 6951 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 6952 return -ETIMEDOUT; 6953 } 6954 6955 I915_WRITE(GEN6_PCODE_DATA, 0); 6956 6957 return 0; 6958 } 6959 6960 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 6961 { 6962 int div; 6963 6964 /* 4 x czclk */ 6965 switch (dev_priv->mem_freq) { 6966 case 800: 6967 div = 10; 6968 break; 6969 case 1066: 6970 div = 12; 6971 break; 6972 case 1333: 6973 div = 16; 6974 break; 6975 default: 6976 return -1; 6977 } 6978 6979 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); 6980 } 6981 6982 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 6983 { 6984 int mul; 6985 6986 /* 4 x czclk */ 6987 switch (dev_priv->mem_freq) { 6988 case 800: 6989 mul = 10; 6990 break; 6991 case 1066: 6992 mul = 12; 6993 break; 6994 case 1333: 6995 mul = 16; 6996 break; 6997 default: 6998 return -1; 6999 } 7000 7001 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 7002 } 7003 7004 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7005 { 7006 int div, freq; 7007 7008 switch (dev_priv->rps.cz_freq) { 7009 case 200: 7010 div = 5; 7011 break; 7012 case 267: 7013 div = 6; 7014 break; 7015 case 320: 7016 case 333: 7017 case 400: 7018 div = 8; 7019 break; 7020 default: 7021 return -1; 7022 } 7023 7024 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2); 7025 7026 return freq; 7027 } 7028 7029 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7030 { 7031 int mul, opcode; 7032 7033 switch (dev_priv->rps.cz_freq) { 7034 case 200: 7035 mul = 5; 7036 break; 7037 case 267: 7038 mul = 6; 7039 break; 7040 case 320: 7041 case 333: 7042 case 400: 7043 mul = 8; 7044 break; 7045 default: 7046 return -1; 7047 } 7048 7049 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); 7050 7051 return opcode; 7052 } 7053 7054 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7055 { 7056 int ret = -1; 7057 7058 if (IS_CHERRYVIEW(dev_priv->dev)) 7059 ret = chv_gpu_freq(dev_priv, val); 7060 else if (IS_VALLEYVIEW(dev_priv->dev)) 7061 ret = byt_gpu_freq(dev_priv, val); 7062 7063 return ret; 7064 } 7065 7066 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7067 { 7068 int ret = -1; 7069 7070 if (IS_CHERRYVIEW(dev_priv->dev)) 7071 ret = chv_freq_opcode(dev_priv, val); 7072 else if (IS_VALLEYVIEW(dev_priv->dev)) 7073 ret = byt_freq_opcode(dev_priv, val); 7074 7075 return ret; 7076 } 7077 7078 void intel_pm_setup(struct drm_device *dev) 7079 { 7080 struct drm_i915_private *dev_priv = dev->dev_private; 7081 7082 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE); 7083 7084 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 7085 intel_gen6_powersave_work); 7086 7087 dev_priv->pm.suspended = false; 7088 dev_priv->pm._irqs_disabled = false; 7089 } 7090