1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include "intel_drv.h" 30 #include "i915_drv.h" 31 32 void i8xx_disable_fbc(struct drm_device *dev) 33 { 34 struct drm_i915_private *dev_priv = dev->dev_private; 35 u32 fbc_ctl; 36 37 /* Disable compression */ 38 fbc_ctl = I915_READ(FBC_CONTROL); 39 if ((fbc_ctl & FBC_CTL_EN) == 0) 40 return; 41 42 fbc_ctl &= ~FBC_CTL_EN; 43 I915_WRITE(FBC_CONTROL, fbc_ctl); 44 45 /* Wait for compressing bit to clear */ 46 if (_intel_wait_for(dev, 47 (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 48 1, "915fbd")) { 49 DRM_DEBUG_KMS("FBC idle timed out\n"); 50 return; 51 } 52 53 DRM_DEBUG_KMS("disabled FBC\n"); 54 } 55 56 void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 57 { 58 struct drm_device *dev = crtc->dev; 59 struct drm_i915_private *dev_priv = dev->dev_private; 60 struct drm_framebuffer *fb = crtc->fb; 61 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 62 struct drm_i915_gem_object *obj = intel_fb->obj; 63 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 64 int cfb_pitch; 65 int plane, i; 66 u32 fbc_ctl, fbc_ctl2; 67 68 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 69 if (fb->pitches[0] < cfb_pitch) 70 cfb_pitch = fb->pitches[0]; 71 72 /* FBC_CTL wants 64B units */ 73 cfb_pitch = (cfb_pitch / 64) - 1; 74 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 75 76 /* Clear old tags */ 77 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 78 I915_WRITE(FBC_TAG + (i * 4), 0); 79 80 /* Set it up... */ 81 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 82 fbc_ctl2 |= plane; 83 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 84 I915_WRITE(FBC_FENCE_OFF, crtc->y); 85 86 /* enable it... */ 87 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 88 if (IS_I945GM(dev)) 89 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 90 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 91 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 92 fbc_ctl |= obj->fence_reg; 93 I915_WRITE(FBC_CONTROL, fbc_ctl); 94 95 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", 96 cfb_pitch, crtc->y, intel_crtc->plane); 97 } 98 99 bool i8xx_fbc_enabled(struct drm_device *dev) 100 { 101 struct drm_i915_private *dev_priv = dev->dev_private; 102 103 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 104 } 105 106 void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 107 { 108 struct drm_device *dev = crtc->dev; 109 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct drm_framebuffer *fb = crtc->fb; 111 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 112 struct drm_i915_gem_object *obj = intel_fb->obj; 113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 114 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 115 unsigned long stall_watermark = 200; 116 u32 dpfc_ctl; 117 118 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 119 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 120 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 121 122 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 123 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 124 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 125 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 126 127 /* enable it... */ 128 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 129 130 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 131 } 132 133 void g4x_disable_fbc(struct drm_device *dev) 134 { 135 struct drm_i915_private *dev_priv = dev->dev_private; 136 u32 dpfc_ctl; 137 138 /* Disable compression */ 139 dpfc_ctl = I915_READ(DPFC_CONTROL); 140 if (dpfc_ctl & DPFC_CTL_EN) { 141 dpfc_ctl &= ~DPFC_CTL_EN; 142 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 143 144 DRM_DEBUG_KMS("disabled FBC\n"); 145 } 146 } 147 148 bool g4x_fbc_enabled(struct drm_device *dev) 149 { 150 struct drm_i915_private *dev_priv = dev->dev_private; 151 152 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 153 } 154 155 static void sandybridge_blit_fbc_update(struct drm_device *dev) 156 { 157 struct drm_i915_private *dev_priv = dev->dev_private; 158 u32 blt_ecoskpd; 159 160 /* Make sure blitter notifies FBC of writes */ 161 gen6_gt_force_wake_get(dev_priv); 162 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 163 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 164 GEN6_BLITTER_LOCK_SHIFT; 165 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 166 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; 167 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 168 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << 169 GEN6_BLITTER_LOCK_SHIFT); 170 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 171 POSTING_READ(GEN6_BLITTER_ECOSKPD); 172 gen6_gt_force_wake_put(dev_priv); 173 } 174 175 void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 176 { 177 struct drm_device *dev = crtc->dev; 178 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_framebuffer *fb = crtc->fb; 180 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 181 struct drm_i915_gem_object *obj = intel_fb->obj; 182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 183 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 184 unsigned long stall_watermark = 200; 185 u32 dpfc_ctl; 186 187 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 188 dpfc_ctl &= DPFC_RESERVED; 189 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 190 /* Set persistent mode for front-buffer rendering, ala X. */ 191 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; 192 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); 193 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 194 195 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 196 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 197 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 198 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 199 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); 200 /* enable it... */ 201 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 202 203 if (IS_GEN6(dev)) { 204 I915_WRITE(SNB_DPFC_CTL_SA, 205 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 206 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 207 sandybridge_blit_fbc_update(dev); 208 } 209 210 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 211 } 212 213 void ironlake_disable_fbc(struct drm_device *dev) 214 { 215 struct drm_i915_private *dev_priv = dev->dev_private; 216 u32 dpfc_ctl; 217 218 /* Disable compression */ 219 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 220 if (dpfc_ctl & DPFC_CTL_EN) { 221 dpfc_ctl &= ~DPFC_CTL_EN; 222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 223 224 DRM_DEBUG_KMS("disabled FBC\n"); 225 } 226 } 227 228 bool ironlake_fbc_enabled(struct drm_device *dev) 229 { 230 struct drm_i915_private *dev_priv = dev->dev_private; 231 232 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 233 } 234 235 bool intel_fbc_enabled(struct drm_device *dev) 236 { 237 struct drm_i915_private *dev_priv = dev->dev_private; 238 239 if (!dev_priv->display.fbc_enabled) 240 return false; 241 242 return dev_priv->display.fbc_enabled(dev); 243 } 244 245 static void intel_fbc_work_fn(void *arg, int pending) 246 { 247 struct intel_fbc_work *work = arg; 248 struct drm_device *dev = work->crtc->dev; 249 struct drm_i915_private *dev_priv = dev->dev_private; 250 251 DRM_LOCK(dev); 252 if (work == dev_priv->fbc_work) { 253 /* Double check that we haven't switched fb without cancelling 254 * the prior work. 255 */ 256 if (work->crtc->fb == work->fb) { 257 dev_priv->display.enable_fbc(work->crtc, 258 work->interval); 259 260 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; 261 dev_priv->cfb_fb = work->crtc->fb->base.id; 262 dev_priv->cfb_y = work->crtc->y; 263 } 264 265 dev_priv->fbc_work = NULL; 266 } 267 DRM_UNLOCK(dev); 268 269 drm_free(work, DRM_MEM_KMS); 270 } 271 272 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) 273 { 274 u_int pending; 275 276 if (dev_priv->fbc_work == NULL) 277 return; 278 279 DRM_DEBUG_KMS("cancelling pending FBC enable\n"); 280 281 /* Synchronisation is provided by struct_mutex and checking of 282 * dev_priv->fbc_work, so we can perform the cancellation 283 * entirely asynchronously. 284 */ 285 if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task, 286 &pending) == 0) 287 /* tasklet was killed before being run, clean up */ 288 drm_free(dev_priv->fbc_work, DRM_MEM_KMS); 289 290 /* Mark the work as no longer wanted so that if it does 291 * wake-up (because the work was already running and waiting 292 * for our mutex), it will discover that is no longer 293 * necessary to run. 294 */ 295 dev_priv->fbc_work = NULL; 296 } 297 298 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 299 { 300 struct intel_fbc_work *work; 301 struct drm_device *dev = crtc->dev; 302 struct drm_i915_private *dev_priv = dev->dev_private; 303 304 if (!dev_priv->display.enable_fbc) 305 return; 306 307 intel_cancel_fbc_work(dev_priv); 308 309 work = kmalloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO); 310 work->crtc = crtc; 311 work->fb = crtc->fb; 312 work->interval = interval; 313 TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn, 314 work); 315 316 dev_priv->fbc_work = work; 317 318 DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); 319 320 /* Delay the actual enabling to let pageflipping cease and the 321 * display to settle before starting the compression. Note that 322 * this delay also serves a second purpose: it allows for a 323 * vblank to pass after disabling the FBC before we attempt 324 * to modify the control registers. 325 * 326 * A more complicated solution would involve tracking vblanks 327 * following the termination of the page-flipping sequence 328 * and indeed performing the enable as a co-routine and not 329 * waiting synchronously upon the vblank. 330 */ 331 taskqueue_enqueue_timeout(dev_priv->tq, &work->task, 332 msecs_to_jiffies(50)); 333 } 334 335 void intel_disable_fbc(struct drm_device *dev) 336 { 337 struct drm_i915_private *dev_priv = dev->dev_private; 338 339 intel_cancel_fbc_work(dev_priv); 340 341 if (!dev_priv->display.disable_fbc) 342 return; 343 344 dev_priv->display.disable_fbc(dev); 345 dev_priv->cfb_plane = -1; 346 } 347 348 /** 349 * intel_update_fbc - enable/disable FBC as needed 350 * @dev: the drm_device 351 * 352 * Set up the framebuffer compression hardware at mode set time. We 353 * enable it if possible: 354 * - plane A only (on pre-965) 355 * - no pixel mulitply/line duplication 356 * - no alpha buffer discard 357 * - no dual wide 358 * - framebuffer <= 2048 in width, 1536 in height 359 * 360 * We can't assume that any compression will take place (worst case), 361 * so the compressed buffer has to be the same size as the uncompressed 362 * one. It also must reside (along with the line length buffer) in 363 * stolen memory. 364 * 365 * We need to enable/disable FBC on a global basis. 366 */ 367 void intel_update_fbc(struct drm_device *dev) 368 { 369 struct drm_i915_private *dev_priv = dev->dev_private; 370 struct drm_crtc *crtc = NULL, *tmp_crtc; 371 struct intel_crtc *intel_crtc; 372 struct drm_framebuffer *fb; 373 struct intel_framebuffer *intel_fb; 374 struct drm_i915_gem_object *obj; 375 int enable_fbc; 376 377 DRM_DEBUG_KMS("\n"); 378 379 if (!i915_powersave) 380 return; 381 382 if (!I915_HAS_FBC(dev)) 383 return; 384 385 /* 386 * If FBC is already on, we just have to verify that we can 387 * keep it that way... 388 * Need to disable if: 389 * - more than one pipe is active 390 * - changing FBC params (stride, fence, mode) 391 * - new fb is too large to fit in compressed buffer 392 * - going to an unsupported config (interlace, pixel multiply, etc.) 393 */ 394 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 395 if (tmp_crtc->enabled && tmp_crtc->fb) { 396 if (crtc) { 397 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 398 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 399 goto out_disable; 400 } 401 crtc = tmp_crtc; 402 } 403 } 404 405 if (!crtc || crtc->fb == NULL) { 406 DRM_DEBUG_KMS("no output, disabling\n"); 407 dev_priv->no_fbc_reason = FBC_NO_OUTPUT; 408 goto out_disable; 409 } 410 411 intel_crtc = to_intel_crtc(crtc); 412 fb = crtc->fb; 413 intel_fb = to_intel_framebuffer(fb); 414 obj = intel_fb->obj; 415 416 enable_fbc = i915_enable_fbc; 417 if (enable_fbc < 0) { 418 DRM_DEBUG_KMS("fbc set to per-chip default\n"); 419 enable_fbc = 1; 420 if (INTEL_INFO(dev)->gen <= 6) 421 enable_fbc = 0; 422 } 423 if (!enable_fbc) { 424 DRM_DEBUG_KMS("fbc disabled per module param\n"); 425 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 426 goto out_disable; 427 } 428 if (intel_fb->obj->base.size > dev_priv->cfb_size) { 429 DRM_DEBUG_KMS("framebuffer too large, disabling " 430 "compression\n"); 431 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 432 goto out_disable; 433 } 434 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 435 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 436 DRM_DEBUG_KMS("mode incompatible with compression, " 437 "disabling\n"); 438 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 439 goto out_disable; 440 } 441 if ((crtc->mode.hdisplay > 2048) || 442 (crtc->mode.vdisplay > 1536)) { 443 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 444 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 445 goto out_disable; 446 } 447 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { 448 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 449 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 450 goto out_disable; 451 } 452 if (obj->tiling_mode != I915_TILING_X || 453 obj->fence_reg == I915_FENCE_REG_NONE) { 454 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 455 dev_priv->no_fbc_reason = FBC_NOT_TILED; 456 goto out_disable; 457 } 458 459 #ifdef DDB 460 /* If the kernel debugger is active, always disable compression */ 461 if (db_active) 462 goto out_disable; 463 #endif 464 465 /* If the scanout has not changed, don't modify the FBC settings. 466 * Note that we make the fundamental assumption that the fb->obj 467 * cannot be unpinned (and have its GTT offset and fence revoked) 468 * without first being decoupled from the scanout and FBC disabled. 469 */ 470 if (dev_priv->cfb_plane == intel_crtc->plane && 471 dev_priv->cfb_fb == fb->base.id && 472 dev_priv->cfb_y == crtc->y) 473 return; 474 475 if (intel_fbc_enabled(dev)) { 476 /* We update FBC along two paths, after changing fb/crtc 477 * configuration (modeswitching) and after page-flipping 478 * finishes. For the latter, we know that not only did 479 * we disable the FBC at the start of the page-flip 480 * sequence, but also more than one vblank has passed. 481 * 482 * For the former case of modeswitching, it is possible 483 * to switch between two FBC valid configurations 484 * instantaneously so we do need to disable the FBC 485 * before we can modify its control registers. We also 486 * have to wait for the next vblank for that to take 487 * effect. However, since we delay enabling FBC we can 488 * assume that a vblank has passed since disabling and 489 * that we can safely alter the registers in the deferred 490 * callback. 491 * 492 * In the scenario that we go from a valid to invalid 493 * and then back to valid FBC configuration we have 494 * no strict enforcement that a vblank occurred since 495 * disabling the FBC. However, along all current pipe 496 * disabling paths we do need to wait for a vblank at 497 * some point. And we wait before enabling FBC anyway. 498 */ 499 DRM_DEBUG_KMS("disabling active FBC for update\n"); 500 intel_disable_fbc(dev); 501 } 502 503 intel_enable_fbc(crtc, 500); 504 return; 505 506 out_disable: 507 /* Multiple disables should be harmless */ 508 if (intel_fbc_enabled(dev)) { 509 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 510 intel_disable_fbc(dev); 511 } 512 } 513 514 void i915_ironlake_get_mem_freq(struct drm_device *dev); 515 void i915_pineview_get_mem_freq(struct drm_device *dev); 516 517 void i915_pineview_get_mem_freq(struct drm_device *dev) 518 { 519 drm_i915_private_t *dev_priv = dev->dev_private; 520 u32 tmp; 521 522 tmp = I915_READ(CLKCFG); 523 524 switch (tmp & CLKCFG_FSB_MASK) { 525 case CLKCFG_FSB_533: 526 dev_priv->fsb_freq = 533; /* 133*4 */ 527 break; 528 case CLKCFG_FSB_800: 529 dev_priv->fsb_freq = 800; /* 200*4 */ 530 break; 531 case CLKCFG_FSB_667: 532 dev_priv->fsb_freq = 667; /* 167*4 */ 533 break; 534 case CLKCFG_FSB_400: 535 dev_priv->fsb_freq = 400; /* 100*4 */ 536 break; 537 } 538 539 switch (tmp & CLKCFG_MEM_MASK) { 540 case CLKCFG_MEM_533: 541 dev_priv->mem_freq = 533; 542 break; 543 case CLKCFG_MEM_667: 544 dev_priv->mem_freq = 667; 545 break; 546 case CLKCFG_MEM_800: 547 dev_priv->mem_freq = 800; 548 break; 549 } 550 551 /* detect pineview DDR3 setting */ 552 tmp = I915_READ(CSHRDDR3CTL); 553 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 554 } 555 556 void i915_ironlake_get_mem_freq(struct drm_device *dev) 557 { 558 drm_i915_private_t *dev_priv = dev->dev_private; 559 u16 ddrpll, csipll; 560 561 ddrpll = I915_READ16(DDRMPLL1); 562 csipll = I915_READ16(CSIPLL0); 563 564 switch (ddrpll & 0xff) { 565 case 0xc: 566 dev_priv->mem_freq = 800; 567 break; 568 case 0x10: 569 dev_priv->mem_freq = 1066; 570 break; 571 case 0x14: 572 dev_priv->mem_freq = 1333; 573 break; 574 case 0x18: 575 dev_priv->mem_freq = 1600; 576 break; 577 default: 578 DRM_DEBUG("unknown memory frequency 0x%02x\n", 579 ddrpll & 0xff); 580 dev_priv->mem_freq = 0; 581 break; 582 } 583 584 dev_priv->r_t = dev_priv->mem_freq; 585 586 switch (csipll & 0x3ff) { 587 case 0x00c: 588 dev_priv->fsb_freq = 3200; 589 break; 590 case 0x00e: 591 dev_priv->fsb_freq = 3733; 592 break; 593 case 0x010: 594 dev_priv->fsb_freq = 4266; 595 break; 596 case 0x012: 597 dev_priv->fsb_freq = 4800; 598 break; 599 case 0x014: 600 dev_priv->fsb_freq = 5333; 601 break; 602 case 0x016: 603 dev_priv->fsb_freq = 5866; 604 break; 605 case 0x018: 606 dev_priv->fsb_freq = 6400; 607 break; 608 default: 609 DRM_DEBUG("unknown fsb frequency 0x%04x\n", 610 csipll & 0x3ff); 611 dev_priv->fsb_freq = 0; 612 break; 613 } 614 615 if (dev_priv->fsb_freq == 3200) { 616 dev_priv->c_m = 0; 617 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 618 dev_priv->c_m = 1; 619 } else { 620 dev_priv->c_m = 2; 621 } 622 } 623 624 /* Pineview has different values for various configs */ 625 static const struct intel_watermark_params pineview_display_wm = { 626 PINEVIEW_DISPLAY_FIFO, 627 PINEVIEW_MAX_WM, 628 PINEVIEW_DFT_WM, 629 PINEVIEW_GUARD_WM, 630 PINEVIEW_FIFO_LINE_SIZE 631 }; 632 static const struct intel_watermark_params pineview_display_hplloff_wm = { 633 PINEVIEW_DISPLAY_FIFO, 634 PINEVIEW_MAX_WM, 635 PINEVIEW_DFT_HPLLOFF_WM, 636 PINEVIEW_GUARD_WM, 637 PINEVIEW_FIFO_LINE_SIZE 638 }; 639 static const struct intel_watermark_params pineview_cursor_wm = { 640 PINEVIEW_CURSOR_FIFO, 641 PINEVIEW_CURSOR_MAX_WM, 642 PINEVIEW_CURSOR_DFT_WM, 643 PINEVIEW_CURSOR_GUARD_WM, 644 PINEVIEW_FIFO_LINE_SIZE, 645 }; 646 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 647 PINEVIEW_CURSOR_FIFO, 648 PINEVIEW_CURSOR_MAX_WM, 649 PINEVIEW_CURSOR_DFT_WM, 650 PINEVIEW_CURSOR_GUARD_WM, 651 PINEVIEW_FIFO_LINE_SIZE 652 }; 653 static const struct intel_watermark_params g4x_wm_info = { 654 G4X_FIFO_SIZE, 655 G4X_MAX_WM, 656 G4X_MAX_WM, 657 2, 658 G4X_FIFO_LINE_SIZE, 659 }; 660 static const struct intel_watermark_params g4x_cursor_wm_info = { 661 I965_CURSOR_FIFO, 662 I965_CURSOR_MAX_WM, 663 I965_CURSOR_DFT_WM, 664 2, 665 G4X_FIFO_LINE_SIZE, 666 }; 667 static const struct intel_watermark_params i965_cursor_wm_info = { 668 I965_CURSOR_FIFO, 669 I965_CURSOR_MAX_WM, 670 I965_CURSOR_DFT_WM, 671 2, 672 I915_FIFO_LINE_SIZE, 673 }; 674 static const struct intel_watermark_params i945_wm_info = { 675 I945_FIFO_SIZE, 676 I915_MAX_WM, 677 1, 678 2, 679 I915_FIFO_LINE_SIZE 680 }; 681 static const struct intel_watermark_params i915_wm_info = { 682 I915_FIFO_SIZE, 683 I915_MAX_WM, 684 1, 685 2, 686 I915_FIFO_LINE_SIZE 687 }; 688 static const struct intel_watermark_params i855_wm_info = { 689 I855GM_FIFO_SIZE, 690 I915_MAX_WM, 691 1, 692 2, 693 I830_FIFO_LINE_SIZE 694 }; 695 static const struct intel_watermark_params i830_wm_info = { 696 I830_FIFO_SIZE, 697 I915_MAX_WM, 698 1, 699 2, 700 I830_FIFO_LINE_SIZE 701 }; 702 703 static const struct intel_watermark_params ironlake_display_wm_info = { 704 ILK_DISPLAY_FIFO, 705 ILK_DISPLAY_MAXWM, 706 ILK_DISPLAY_DFTWM, 707 2, 708 ILK_FIFO_LINE_SIZE 709 }; 710 static const struct intel_watermark_params ironlake_cursor_wm_info = { 711 ILK_CURSOR_FIFO, 712 ILK_CURSOR_MAXWM, 713 ILK_CURSOR_DFTWM, 714 2, 715 ILK_FIFO_LINE_SIZE 716 }; 717 static const struct intel_watermark_params ironlake_display_srwm_info = { 718 ILK_DISPLAY_SR_FIFO, 719 ILK_DISPLAY_MAX_SRWM, 720 ILK_DISPLAY_DFT_SRWM, 721 2, 722 ILK_FIFO_LINE_SIZE 723 }; 724 static const struct intel_watermark_params ironlake_cursor_srwm_info = { 725 ILK_CURSOR_SR_FIFO, 726 ILK_CURSOR_MAX_SRWM, 727 ILK_CURSOR_DFT_SRWM, 728 2, 729 ILK_FIFO_LINE_SIZE 730 }; 731 732 static const struct intel_watermark_params sandybridge_display_wm_info = { 733 SNB_DISPLAY_FIFO, 734 SNB_DISPLAY_MAXWM, 735 SNB_DISPLAY_DFTWM, 736 2, 737 SNB_FIFO_LINE_SIZE 738 }; 739 static const struct intel_watermark_params sandybridge_cursor_wm_info = { 740 SNB_CURSOR_FIFO, 741 SNB_CURSOR_MAXWM, 742 SNB_CURSOR_DFTWM, 743 2, 744 SNB_FIFO_LINE_SIZE 745 }; 746 static const struct intel_watermark_params sandybridge_display_srwm_info = { 747 SNB_DISPLAY_SR_FIFO, 748 SNB_DISPLAY_MAX_SRWM, 749 SNB_DISPLAY_DFT_SRWM, 750 2, 751 SNB_FIFO_LINE_SIZE 752 }; 753 static const struct intel_watermark_params sandybridge_cursor_srwm_info = { 754 SNB_CURSOR_SR_FIFO, 755 SNB_CURSOR_MAX_SRWM, 756 SNB_CURSOR_DFT_SRWM, 757 2, 758 SNB_FIFO_LINE_SIZE 759 }; 760 761 762 /** 763 * intel_calculate_wm - calculate watermark level 764 * @clock_in_khz: pixel clock 765 * @wm: chip FIFO params 766 * @pixel_size: display pixel size 767 * @latency_ns: memory latency for the platform 768 * 769 * Calculate the watermark level (the level at which the display plane will 770 * start fetching from memory again). Each chip has a different display 771 * FIFO size and allocation, so the caller needs to figure that out and pass 772 * in the correct intel_watermark_params structure. 773 * 774 * As the pixel clock runs, the FIFO will be drained at a rate that depends 775 * on the pixel size. When it reaches the watermark level, it'll start 776 * fetching FIFO line sized based chunks from memory until the FIFO fills 777 * past the watermark point. If the FIFO drains completely, a FIFO underrun 778 * will occur, and a display engine hang could result. 779 */ 780 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 781 const struct intel_watermark_params *wm, 782 int fifo_size, 783 int pixel_size, 784 unsigned long latency_ns) 785 { 786 long entries_required, wm_size; 787 788 /* 789 * Note: we need to make sure we don't overflow for various clock & 790 * latency values. 791 * clocks go from a few thousand to several hundred thousand. 792 * latency is usually a few thousand 793 */ 794 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 795 1000; 796 entries_required = howmany(entries_required, wm->cacheline_size); 797 798 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 799 800 wm_size = fifo_size - (entries_required + wm->guard_size); 801 802 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 803 804 /* Don't promote wm_size to unsigned... */ 805 if (wm_size > (long)wm->max_wm) 806 wm_size = wm->max_wm; 807 if (wm_size <= 0) 808 wm_size = wm->default_wm; 809 return wm_size; 810 } 811 812 struct cxsr_latency { 813 int is_desktop; 814 int is_ddr3; 815 unsigned long fsb_freq; 816 unsigned long mem_freq; 817 unsigned long display_sr; 818 unsigned long display_hpll_disable; 819 unsigned long cursor_sr; 820 unsigned long cursor_hpll_disable; 821 }; 822 823 static const struct cxsr_latency cxsr_latency_table[] = { 824 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 825 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 826 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 827 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 828 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 829 830 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 831 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 832 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 833 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 834 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 835 836 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 837 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 838 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 839 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 840 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 841 842 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 843 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 844 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 845 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 846 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 847 848 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 849 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 850 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 851 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 852 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 853 854 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 855 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 856 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 857 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 858 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 859 }; 860 861 const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 862 int is_ddr3, 863 int fsb, 864 int mem) 865 { 866 const struct cxsr_latency *latency; 867 int i; 868 869 if (fsb == 0 || mem == 0) 870 return NULL; 871 872 for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) { 873 latency = &cxsr_latency_table[i]; 874 if (is_desktop == latency->is_desktop && 875 is_ddr3 == latency->is_ddr3 && 876 fsb == latency->fsb_freq && mem == latency->mem_freq) 877 return latency; 878 } 879 880 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 881 882 return NULL; 883 } 884 885 void pineview_disable_cxsr(struct drm_device *dev) 886 { 887 struct drm_i915_private *dev_priv = dev->dev_private; 888 889 /* deactivate cxsr */ 890 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); 891 } 892 893 /* 894 * Latency for FIFO fetches is dependent on several factors: 895 * - memory configuration (speed, channels) 896 * - chipset 897 * - current MCH state 898 * It can be fairly high in some situations, so here we assume a fairly 899 * pessimal value. It's a tradeoff between extra memory fetches (if we 900 * set this value too high, the FIFO will fetch frequently to stay full) 901 * and power consumption (set it too low to save power and we might see 902 * FIFO underruns and display "flicker"). 903 * 904 * A value of 5us seems to be a good balance; safe for very low end 905 * platforms but not overly aggressive on lower latency configs. 906 */ 907 static const int latency_ns = 5000; 908 909 int i9xx_get_fifo_size(struct drm_device *dev, int plane) 910 { 911 struct drm_i915_private *dev_priv = dev->dev_private; 912 uint32_t dsparb = I915_READ(DSPARB); 913 int size; 914 915 size = dsparb & 0x7f; 916 if (plane) 917 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 918 919 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 920 plane ? "B" : "A", size); 921 922 return size; 923 } 924 925 int i85x_get_fifo_size(struct drm_device *dev, int plane) 926 { 927 struct drm_i915_private *dev_priv = dev->dev_private; 928 uint32_t dsparb = I915_READ(DSPARB); 929 int size; 930 931 size = dsparb & 0x1ff; 932 if (plane) 933 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 934 size >>= 1; /* Convert to cachelines */ 935 936 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 937 plane ? "B" : "A", size); 938 939 return size; 940 } 941 942 int i845_get_fifo_size(struct drm_device *dev, int plane) 943 { 944 struct drm_i915_private *dev_priv = dev->dev_private; 945 uint32_t dsparb = I915_READ(DSPARB); 946 int size; 947 948 size = dsparb & 0x7f; 949 size >>= 2; /* Convert to cachelines */ 950 951 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 952 plane ? "B" : "A", 953 size); 954 955 return size; 956 } 957 958 int i830_get_fifo_size(struct drm_device *dev, int plane) 959 { 960 struct drm_i915_private *dev_priv = dev->dev_private; 961 uint32_t dsparb = I915_READ(DSPARB); 962 int size; 963 964 size = dsparb & 0x7f; 965 size >>= 1; /* Convert to cachelines */ 966 967 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 968 plane ? "B" : "A", size); 969 970 return size; 971 } 972 973 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) 974 { 975 struct drm_crtc *crtc, *enabled = NULL; 976 977 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 978 if (crtc->enabled && crtc->fb) { 979 if (enabled) 980 return NULL; 981 enabled = crtc; 982 } 983 } 984 985 return enabled; 986 } 987 988 void pineview_update_wm(struct drm_device *dev) 989 { 990 struct drm_i915_private *dev_priv = dev->dev_private; 991 struct drm_crtc *crtc; 992 const struct cxsr_latency *latency; 993 u32 reg; 994 unsigned long wm; 995 996 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 997 dev_priv->fsb_freq, dev_priv->mem_freq); 998 if (!latency) { 999 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 1000 pineview_disable_cxsr(dev); 1001 return; 1002 } 1003 1004 crtc = single_enabled_crtc(dev); 1005 if (crtc) { 1006 int clock = crtc->mode.clock; 1007 int pixel_size = crtc->fb->bits_per_pixel / 8; 1008 1009 /* Display SR */ 1010 wm = intel_calculate_wm(clock, &pineview_display_wm, 1011 pineview_display_wm.fifo_size, 1012 pixel_size, latency->display_sr); 1013 reg = I915_READ(DSPFW1); 1014 reg &= ~DSPFW_SR_MASK; 1015 reg |= wm << DSPFW_SR_SHIFT; 1016 I915_WRITE(DSPFW1, reg); 1017 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 1018 1019 /* cursor SR */ 1020 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 1021 pineview_display_wm.fifo_size, 1022 pixel_size, latency->cursor_sr); 1023 reg = I915_READ(DSPFW3); 1024 reg &= ~DSPFW_CURSOR_SR_MASK; 1025 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; 1026 I915_WRITE(DSPFW3, reg); 1027 1028 /* Display HPLL off SR */ 1029 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 1030 pineview_display_hplloff_wm.fifo_size, 1031 pixel_size, latency->display_hpll_disable); 1032 reg = I915_READ(DSPFW3); 1033 reg &= ~DSPFW_HPLL_SR_MASK; 1034 reg |= wm & DSPFW_HPLL_SR_MASK; 1035 I915_WRITE(DSPFW3, reg); 1036 1037 /* cursor HPLL off SR */ 1038 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 1039 pineview_display_hplloff_wm.fifo_size, 1040 pixel_size, latency->cursor_hpll_disable); 1041 reg = I915_READ(DSPFW3); 1042 reg &= ~DSPFW_HPLL_CURSOR_MASK; 1043 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; 1044 I915_WRITE(DSPFW3, reg); 1045 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 1046 1047 /* activate cxsr */ 1048 I915_WRITE(DSPFW3, 1049 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); 1050 DRM_DEBUG_KMS("Self-refresh is enabled\n"); 1051 } else { 1052 pineview_disable_cxsr(dev); 1053 DRM_DEBUG_KMS("Self-refresh is disabled\n"); 1054 } 1055 } 1056 1057 static bool g4x_compute_wm0(struct drm_device *dev, 1058 int plane, 1059 const struct intel_watermark_params *display, 1060 int display_latency_ns, 1061 const struct intel_watermark_params *cursor, 1062 int cursor_latency_ns, 1063 int *plane_wm, 1064 int *cursor_wm) 1065 { 1066 struct drm_crtc *crtc; 1067 int htotal, hdisplay, clock, pixel_size; 1068 int line_time_us, line_count; 1069 int entries, tlb_miss; 1070 1071 crtc = intel_get_crtc_for_plane(dev, plane); 1072 if (crtc->fb == NULL || !crtc->enabled) { 1073 *cursor_wm = cursor->guard_size; 1074 *plane_wm = display->guard_size; 1075 return false; 1076 } 1077 1078 htotal = crtc->mode.htotal; 1079 hdisplay = crtc->mode.hdisplay; 1080 clock = crtc->mode.clock; 1081 pixel_size = crtc->fb->bits_per_pixel / 8; 1082 1083 /* Use the small buffer method to calculate plane watermark */ 1084 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 1085 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 1086 if (tlb_miss > 0) 1087 entries += tlb_miss; 1088 entries = howmany(entries, display->cacheline_size); 1089 *plane_wm = entries + display->guard_size; 1090 if (*plane_wm > (int)display->max_wm) 1091 *plane_wm = display->max_wm; 1092 1093 /* Use the large buffer method to calculate cursor watermark */ 1094 line_time_us = ((htotal * 1000) / clock); 1095 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 1096 entries = line_count * 64 * pixel_size; 1097 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 1098 if (tlb_miss > 0) 1099 entries += tlb_miss; 1100 entries = howmany(entries, cursor->cacheline_size); 1101 *cursor_wm = entries + cursor->guard_size; 1102 if (*cursor_wm > (int)cursor->max_wm) 1103 *cursor_wm = (int)cursor->max_wm; 1104 1105 return true; 1106 } 1107 1108 /* 1109 * Check the wm result. 1110 * 1111 * If any calculated watermark values is larger than the maximum value that 1112 * can be programmed into the associated watermark register, that watermark 1113 * must be disabled. 1114 */ 1115 static bool g4x_check_srwm(struct drm_device *dev, 1116 int display_wm, int cursor_wm, 1117 const struct intel_watermark_params *display, 1118 const struct intel_watermark_params *cursor) 1119 { 1120 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 1121 display_wm, cursor_wm); 1122 1123 if (display_wm > display->max_wm) { 1124 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", 1125 display_wm, display->max_wm); 1126 return false; 1127 } 1128 1129 if (cursor_wm > cursor->max_wm) { 1130 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", 1131 cursor_wm, cursor->max_wm); 1132 return false; 1133 } 1134 1135 if (!(display_wm || cursor_wm)) { 1136 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 1137 return false; 1138 } 1139 1140 return true; 1141 } 1142 1143 static bool g4x_compute_srwm(struct drm_device *dev, 1144 int plane, 1145 int latency_ns, 1146 const struct intel_watermark_params *display, 1147 const struct intel_watermark_params *cursor, 1148 int *display_wm, int *cursor_wm) 1149 { 1150 struct drm_crtc *crtc; 1151 int hdisplay, htotal, pixel_size, clock; 1152 unsigned long line_time_us; 1153 int line_count, line_size; 1154 int small, large; 1155 int entries; 1156 1157 if (!latency_ns) { 1158 *display_wm = *cursor_wm = 0; 1159 return false; 1160 } 1161 1162 crtc = intel_get_crtc_for_plane(dev, plane); 1163 hdisplay = crtc->mode.hdisplay; 1164 htotal = crtc->mode.htotal; 1165 clock = crtc->mode.clock; 1166 pixel_size = crtc->fb->bits_per_pixel / 8; 1167 1168 line_time_us = (htotal * 1000) / clock; 1169 line_count = (latency_ns / line_time_us + 1000) / 1000; 1170 line_size = hdisplay * pixel_size; 1171 1172 /* Use the minimum of the small and large buffer method for primary */ 1173 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 1174 large = line_count * line_size; 1175 1176 entries = howmany(min(small, large), display->cacheline_size); 1177 *display_wm = entries + display->guard_size; 1178 1179 /* calculate the self-refresh watermark for display cursor */ 1180 entries = line_count * pixel_size * 64; 1181 entries = howmany(entries, cursor->cacheline_size); 1182 *cursor_wm = entries + cursor->guard_size; 1183 1184 return g4x_check_srwm(dev, 1185 *display_wm, *cursor_wm, 1186 display, cursor); 1187 } 1188 1189 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask)) 1190 1191 void g4x_update_wm(struct drm_device *dev) 1192 { 1193 static const int sr_latency_ns = 12000; 1194 struct drm_i915_private *dev_priv = dev->dev_private; 1195 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1196 int plane_sr, cursor_sr; 1197 unsigned int enabled = 0; 1198 1199 if (g4x_compute_wm0(dev, 0, 1200 &g4x_wm_info, latency_ns, 1201 &g4x_cursor_wm_info, latency_ns, 1202 &planea_wm, &cursora_wm)) 1203 enabled |= 1; 1204 1205 if (g4x_compute_wm0(dev, 1, 1206 &g4x_wm_info, latency_ns, 1207 &g4x_cursor_wm_info, latency_ns, 1208 &planeb_wm, &cursorb_wm)) 1209 enabled |= 2; 1210 1211 plane_sr = cursor_sr = 0; 1212 if (single_plane_enabled(enabled) && 1213 g4x_compute_srwm(dev, ffs(enabled) - 1, 1214 sr_latency_ns, 1215 &g4x_wm_info, 1216 &g4x_cursor_wm_info, 1217 &plane_sr, &cursor_sr)) 1218 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1219 else 1220 I915_WRITE(FW_BLC_SELF, 1221 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 1222 1223 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1224 planea_wm, cursora_wm, 1225 planeb_wm, cursorb_wm, 1226 plane_sr, cursor_sr); 1227 1228 I915_WRITE(DSPFW1, 1229 (plane_sr << DSPFW_SR_SHIFT) | 1230 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1231 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1232 planea_wm); 1233 I915_WRITE(DSPFW2, 1234 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1235 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1236 /* HPLL off in SR has some issues on G4x... disable it */ 1237 I915_WRITE(DSPFW3, 1238 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | 1239 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1240 } 1241 1242 void i965_update_wm(struct drm_device *dev) 1243 { 1244 struct drm_i915_private *dev_priv = dev->dev_private; 1245 struct drm_crtc *crtc; 1246 int srwm = 1; 1247 int cursor_sr = 16; 1248 1249 /* Calc sr entries for one plane configs */ 1250 crtc = single_enabled_crtc(dev); 1251 if (crtc) { 1252 /* self-refresh has much higher latency */ 1253 static const int sr_latency_ns = 12000; 1254 int clock = crtc->mode.clock; 1255 int htotal = crtc->mode.htotal; 1256 int hdisplay = crtc->mode.hdisplay; 1257 int pixel_size = crtc->fb->bits_per_pixel / 8; 1258 unsigned long line_time_us; 1259 int entries; 1260 1261 line_time_us = ((htotal * 1000) / clock); 1262 1263 /* Use ns/us then divide to preserve precision */ 1264 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1265 pixel_size * hdisplay; 1266 entries = howmany(entries, I915_FIFO_LINE_SIZE); 1267 srwm = I965_FIFO_SIZE - entries; 1268 if (srwm < 0) 1269 srwm = 1; 1270 srwm &= 0x1ff; 1271 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 1272 entries, srwm); 1273 1274 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1275 pixel_size * 64; 1276 entries = howmany(entries, i965_cursor_wm_info.cacheline_size); 1277 cursor_sr = i965_cursor_wm_info.fifo_size - 1278 (entries + i965_cursor_wm_info.guard_size); 1279 1280 if (cursor_sr > i965_cursor_wm_info.max_wm) 1281 cursor_sr = i965_cursor_wm_info.max_wm; 1282 1283 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1284 "cursor %d\n", srwm, cursor_sr); 1285 1286 if (IS_CRESTLINE(dev)) 1287 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1288 } else { 1289 /* Turn off self refresh if both pipes are enabled */ 1290 if (IS_CRESTLINE(dev)) 1291 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 1292 & ~FW_BLC_SELF_EN); 1293 } 1294 1295 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1296 srwm); 1297 1298 /* 965 has limitations... */ 1299 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | 1300 (8 << 16) | (8 << 8) | (8 << 0)); 1301 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 1302 /* update cursor SR watermark */ 1303 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1304 } 1305 1306 void i9xx_update_wm(struct drm_device *dev) 1307 { 1308 struct drm_i915_private *dev_priv = dev->dev_private; 1309 const struct intel_watermark_params *wm_info; 1310 uint32_t fwater_lo; 1311 uint32_t fwater_hi; 1312 int cwm, srwm = 1; 1313 int fifo_size; 1314 int planea_wm, planeb_wm; 1315 struct drm_crtc *crtc, *enabled = NULL; 1316 1317 if (IS_I945GM(dev)) 1318 wm_info = &i945_wm_info; 1319 else if (!IS_GEN2(dev)) 1320 wm_info = &i915_wm_info; 1321 else 1322 wm_info = &i855_wm_info; 1323 1324 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1325 crtc = intel_get_crtc_for_plane(dev, 0); 1326 if (crtc->enabled && crtc->fb) { 1327 planea_wm = intel_calculate_wm(crtc->mode.clock, 1328 wm_info, fifo_size, 1329 crtc->fb->bits_per_pixel / 8, 1330 latency_ns); 1331 enabled = crtc; 1332 } else 1333 planea_wm = fifo_size - wm_info->guard_size; 1334 1335 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1336 crtc = intel_get_crtc_for_plane(dev, 1); 1337 if (crtc->enabled && crtc->fb) { 1338 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1339 wm_info, fifo_size, 1340 crtc->fb->bits_per_pixel / 8, 1341 latency_ns); 1342 if (enabled == NULL) 1343 enabled = crtc; 1344 else 1345 enabled = NULL; 1346 } else 1347 planeb_wm = fifo_size - wm_info->guard_size; 1348 1349 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1350 1351 /* 1352 * Overlay gets an aggressive default since video jitter is bad. 1353 */ 1354 cwm = 2; 1355 1356 /* Play safe and disable self-refresh before adjusting watermarks. */ 1357 if (IS_I945G(dev) || IS_I945GM(dev)) 1358 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); 1359 else if (IS_I915GM(dev)) 1360 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); 1361 1362 /* Calc sr entries for one plane configs */ 1363 if (HAS_FW_BLC(dev) && enabled) { 1364 /* self-refresh has much higher latency */ 1365 static const int sr_latency_ns = 6000; 1366 int clock = enabled->mode.clock; 1367 int htotal = enabled->mode.htotal; 1368 int hdisplay = enabled->mode.hdisplay; 1369 int pixel_size = enabled->fb->bits_per_pixel / 8; 1370 unsigned long line_time_us; 1371 int entries; 1372 1373 line_time_us = (htotal * 1000) / clock; 1374 1375 /* Use ns/us then divide to preserve precision */ 1376 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1377 pixel_size * hdisplay; 1378 entries = howmany(entries, wm_info->cacheline_size); 1379 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 1380 srwm = wm_info->fifo_size - entries; 1381 if (srwm < 0) 1382 srwm = 1; 1383 1384 if (IS_I945G(dev) || IS_I945GM(dev)) 1385 I915_WRITE(FW_BLC_SELF, 1386 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 1387 else if (IS_I915GM(dev)) 1388 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 1389 } 1390 1391 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 1392 planea_wm, planeb_wm, cwm, srwm); 1393 1394 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 1395 fwater_hi = (cwm & 0x1f); 1396 1397 /* Set request length to 8 cachelines per fetch */ 1398 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 1399 fwater_hi = fwater_hi | (1 << 8); 1400 1401 I915_WRITE(FW_BLC, fwater_lo); 1402 I915_WRITE(FW_BLC2, fwater_hi); 1403 1404 if (HAS_FW_BLC(dev)) { 1405 if (enabled) { 1406 if (IS_I945G(dev) || IS_I945GM(dev)) 1407 I915_WRITE(FW_BLC_SELF, 1408 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 1409 else if (IS_I915GM(dev)) 1410 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); 1411 DRM_DEBUG_KMS("memory self refresh enabled\n"); 1412 } else 1413 DRM_DEBUG_KMS("memory self refresh disabled\n"); 1414 } 1415 } 1416 1417 void i830_update_wm(struct drm_device *dev) 1418 { 1419 struct drm_i915_private *dev_priv = dev->dev_private; 1420 struct drm_crtc *crtc; 1421 uint32_t fwater_lo; 1422 int planea_wm; 1423 1424 crtc = single_enabled_crtc(dev); 1425 if (crtc == NULL) 1426 return; 1427 1428 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1429 dev_priv->display.get_fifo_size(dev, 0), 1430 crtc->fb->bits_per_pixel / 8, 1431 latency_ns); 1432 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1433 fwater_lo |= (3<<8) | planea_wm; 1434 1435 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 1436 1437 I915_WRITE(FW_BLC, fwater_lo); 1438 } 1439 1440 #define ILK_LP0_PLANE_LATENCY 700 1441 #define ILK_LP0_CURSOR_LATENCY 1300 1442 1443 /* 1444 * Check the wm result. 1445 * 1446 * If any calculated watermark values is larger than the maximum value that 1447 * can be programmed into the associated watermark register, that watermark 1448 * must be disabled. 1449 */ 1450 static bool ironlake_check_srwm(struct drm_device *dev, int level, 1451 int fbc_wm, int display_wm, int cursor_wm, 1452 const struct intel_watermark_params *display, 1453 const struct intel_watermark_params *cursor) 1454 { 1455 struct drm_i915_private *dev_priv = dev->dev_private; 1456 1457 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," 1458 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); 1459 1460 if (fbc_wm > SNB_FBC_MAX_SRWM) { 1461 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", 1462 fbc_wm, SNB_FBC_MAX_SRWM, level); 1463 1464 /* fbc has it's own way to disable FBC WM */ 1465 I915_WRITE(DISP_ARB_CTL, 1466 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); 1467 return false; 1468 } 1469 1470 if (display_wm > display->max_wm) { 1471 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", 1472 display_wm, SNB_DISPLAY_MAX_SRWM, level); 1473 return false; 1474 } 1475 1476 if (cursor_wm > cursor->max_wm) { 1477 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", 1478 cursor_wm, SNB_CURSOR_MAX_SRWM, level); 1479 return false; 1480 } 1481 1482 if (!(fbc_wm || display_wm || cursor_wm)) { 1483 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); 1484 return false; 1485 } 1486 1487 return true; 1488 } 1489 1490 /* 1491 * Compute watermark values of WM[1-3], 1492 */ 1493 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, 1494 int latency_ns, 1495 const struct intel_watermark_params *display, 1496 const struct intel_watermark_params *cursor, 1497 int *fbc_wm, int *display_wm, int *cursor_wm) 1498 { 1499 struct drm_crtc *crtc; 1500 unsigned long line_time_us; 1501 int hdisplay, htotal, pixel_size, clock; 1502 int line_count, line_size; 1503 int small, large; 1504 int entries; 1505 1506 if (!latency_ns) { 1507 *fbc_wm = *display_wm = *cursor_wm = 0; 1508 return false; 1509 } 1510 1511 crtc = intel_get_crtc_for_plane(dev, plane); 1512 hdisplay = crtc->mode.hdisplay; 1513 htotal = crtc->mode.htotal; 1514 clock = crtc->mode.clock; 1515 pixel_size = crtc->fb->bits_per_pixel / 8; 1516 1517 line_time_us = (htotal * 1000) / clock; 1518 line_count = (latency_ns / line_time_us + 1000) / 1000; 1519 line_size = hdisplay * pixel_size; 1520 1521 /* Use the minimum of the small and large buffer method for primary */ 1522 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 1523 large = line_count * line_size; 1524 1525 entries = howmany(min(small, large), display->cacheline_size); 1526 *display_wm = entries + display->guard_size; 1527 1528 /* 1529 * Spec says: 1530 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 1531 */ 1532 *fbc_wm = howmany(*display_wm * 64, line_size) + 2; 1533 1534 /* calculate the self-refresh watermark for display cursor */ 1535 entries = line_count * pixel_size * 64; 1536 entries = howmany(entries, cursor->cacheline_size); 1537 *cursor_wm = entries + cursor->guard_size; 1538 1539 return ironlake_check_srwm(dev, level, 1540 *fbc_wm, *display_wm, *cursor_wm, 1541 display, cursor); 1542 } 1543 1544 void ironlake_update_wm(struct drm_device *dev) 1545 { 1546 struct drm_i915_private *dev_priv = dev->dev_private; 1547 int fbc_wm, plane_wm, cursor_wm; 1548 unsigned int enabled; 1549 1550 enabled = 0; 1551 if (g4x_compute_wm0(dev, 0, 1552 &ironlake_display_wm_info, 1553 ILK_LP0_PLANE_LATENCY, 1554 &ironlake_cursor_wm_info, 1555 ILK_LP0_CURSOR_LATENCY, 1556 &plane_wm, &cursor_wm)) { 1557 I915_WRITE(WM0_PIPEA_ILK, 1558 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1559 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1560 " plane %d, " "cursor: %d\n", 1561 plane_wm, cursor_wm); 1562 enabled |= 1; 1563 } 1564 1565 if (g4x_compute_wm0(dev, 1, 1566 &ironlake_display_wm_info, 1567 ILK_LP0_PLANE_LATENCY, 1568 &ironlake_cursor_wm_info, 1569 ILK_LP0_CURSOR_LATENCY, 1570 &plane_wm, &cursor_wm)) { 1571 I915_WRITE(WM0_PIPEB_ILK, 1572 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1573 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1574 " plane %d, cursor: %d\n", 1575 plane_wm, cursor_wm); 1576 enabled |= 2; 1577 } 1578 1579 /* 1580 * Calculate and update the self-refresh watermark only when one 1581 * display plane is used. 1582 */ 1583 I915_WRITE(WM3_LP_ILK, 0); 1584 I915_WRITE(WM2_LP_ILK, 0); 1585 I915_WRITE(WM1_LP_ILK, 0); 1586 1587 if (!single_plane_enabled(enabled)) 1588 return; 1589 enabled = ffs(enabled) - 1; 1590 1591 /* WM1 */ 1592 if (!ironlake_compute_srwm(dev, 1, enabled, 1593 ILK_READ_WM1_LATENCY() * 500, 1594 &ironlake_display_srwm_info, 1595 &ironlake_cursor_srwm_info, 1596 &fbc_wm, &plane_wm, &cursor_wm)) 1597 return; 1598 1599 I915_WRITE(WM1_LP_ILK, 1600 WM1_LP_SR_EN | 1601 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1602 (fbc_wm << WM1_LP_FBC_SHIFT) | 1603 (plane_wm << WM1_LP_SR_SHIFT) | 1604 cursor_wm); 1605 1606 /* WM2 */ 1607 if (!ironlake_compute_srwm(dev, 2, enabled, 1608 ILK_READ_WM2_LATENCY() * 500, 1609 &ironlake_display_srwm_info, 1610 &ironlake_cursor_srwm_info, 1611 &fbc_wm, &plane_wm, &cursor_wm)) 1612 return; 1613 1614 I915_WRITE(WM2_LP_ILK, 1615 WM2_LP_EN | 1616 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1617 (fbc_wm << WM1_LP_FBC_SHIFT) | 1618 (plane_wm << WM1_LP_SR_SHIFT) | 1619 cursor_wm); 1620 1621 /* 1622 * WM3 is unsupported on ILK, probably because we don't have latency 1623 * data for that power state 1624 */ 1625 } 1626 1627 void sandybridge_update_wm(struct drm_device *dev) 1628 { 1629 struct drm_i915_private *dev_priv = dev->dev_private; 1630 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1631 u32 val; 1632 int fbc_wm, plane_wm, cursor_wm; 1633 unsigned int enabled; 1634 1635 enabled = 0; 1636 if (g4x_compute_wm0(dev, 0, 1637 &sandybridge_display_wm_info, latency, 1638 &sandybridge_cursor_wm_info, latency, 1639 &plane_wm, &cursor_wm)) { 1640 val = I915_READ(WM0_PIPEA_ILK); 1641 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 1642 I915_WRITE(WM0_PIPEA_ILK, val | 1643 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); 1644 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1645 " plane %d, " "cursor: %d\n", 1646 plane_wm, cursor_wm); 1647 enabled |= 1; 1648 } 1649 1650 if (g4x_compute_wm0(dev, 1, 1651 &sandybridge_display_wm_info, latency, 1652 &sandybridge_cursor_wm_info, latency, 1653 &plane_wm, &cursor_wm)) { 1654 val = I915_READ(WM0_PIPEB_ILK); 1655 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 1656 I915_WRITE(WM0_PIPEB_ILK, val | 1657 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); 1658 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1659 " plane %d, cursor: %d\n", 1660 plane_wm, cursor_wm); 1661 enabled |= 2; 1662 } 1663 1664 /* IVB has 3 pipes */ 1665 if (IS_IVYBRIDGE(dev) && 1666 g4x_compute_wm0(dev, 2, 1667 &sandybridge_display_wm_info, latency, 1668 &sandybridge_cursor_wm_info, latency, 1669 &plane_wm, &cursor_wm)) { 1670 val = I915_READ(WM0_PIPEC_IVB); 1671 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 1672 I915_WRITE(WM0_PIPEC_IVB, val | 1673 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); 1674 DRM_DEBUG_KMS("FIFO watermarks For pipe C -" 1675 " plane %d, cursor: %d\n", 1676 plane_wm, cursor_wm); 1677 enabled |= 3; 1678 } 1679 1680 /* 1681 * Calculate and update the self-refresh watermark only when one 1682 * display plane is used. 1683 * 1684 * SNB support 3 levels of watermark. 1685 * 1686 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, 1687 * and disabled in the descending order 1688 * 1689 */ 1690 I915_WRITE(WM3_LP_ILK, 0); 1691 I915_WRITE(WM2_LP_ILK, 0); 1692 I915_WRITE(WM1_LP_ILK, 0); 1693 1694 if (!single_plane_enabled(enabled) || 1695 dev_priv->sprite_scaling_enabled) 1696 return; 1697 enabled = ffs(enabled) - 1; 1698 1699 /* WM1 */ 1700 if (!ironlake_compute_srwm(dev, 1, enabled, 1701 SNB_READ_WM1_LATENCY() * 500, 1702 &sandybridge_display_srwm_info, 1703 &sandybridge_cursor_srwm_info, 1704 &fbc_wm, &plane_wm, &cursor_wm)) 1705 return; 1706 1707 I915_WRITE(WM1_LP_ILK, 1708 WM1_LP_SR_EN | 1709 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1710 (fbc_wm << WM1_LP_FBC_SHIFT) | 1711 (plane_wm << WM1_LP_SR_SHIFT) | 1712 cursor_wm); 1713 1714 /* WM2 */ 1715 if (!ironlake_compute_srwm(dev, 2, enabled, 1716 SNB_READ_WM2_LATENCY() * 500, 1717 &sandybridge_display_srwm_info, 1718 &sandybridge_cursor_srwm_info, 1719 &fbc_wm, &plane_wm, &cursor_wm)) 1720 return; 1721 1722 I915_WRITE(WM2_LP_ILK, 1723 WM2_LP_EN | 1724 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1725 (fbc_wm << WM1_LP_FBC_SHIFT) | 1726 (plane_wm << WM1_LP_SR_SHIFT) | 1727 cursor_wm); 1728 1729 /* WM3 */ 1730 if (!ironlake_compute_srwm(dev, 3, enabled, 1731 SNB_READ_WM3_LATENCY() * 500, 1732 &sandybridge_display_srwm_info, 1733 &sandybridge_cursor_srwm_info, 1734 &fbc_wm, &plane_wm, &cursor_wm)) 1735 return; 1736 1737 I915_WRITE(WM3_LP_ILK, 1738 WM3_LP_EN | 1739 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1740 (fbc_wm << WM1_LP_FBC_SHIFT) | 1741 (plane_wm << WM1_LP_SR_SHIFT) | 1742 cursor_wm); 1743 } 1744 1745 static bool 1746 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, 1747 uint32_t sprite_width, int pixel_size, 1748 const struct intel_watermark_params *display, 1749 int display_latency_ns, int *sprite_wm) 1750 { 1751 struct drm_crtc *crtc; 1752 int clock; 1753 int entries, tlb_miss; 1754 1755 crtc = intel_get_crtc_for_plane(dev, plane); 1756 if (crtc->fb == NULL || !crtc->enabled) { 1757 *sprite_wm = display->guard_size; 1758 return false; 1759 } 1760 1761 clock = crtc->mode.clock; 1762 1763 /* Use the small buffer method to calculate the sprite watermark */ 1764 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 1765 tlb_miss = display->fifo_size*display->cacheline_size - 1766 sprite_width * 8; 1767 if (tlb_miss > 0) 1768 entries += tlb_miss; 1769 entries = howmany(entries, display->cacheline_size); 1770 *sprite_wm = entries + display->guard_size; 1771 if (*sprite_wm > (int)display->max_wm) 1772 *sprite_wm = display->max_wm; 1773 1774 return true; 1775 } 1776 1777 static bool 1778 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, 1779 uint32_t sprite_width, int pixel_size, 1780 const struct intel_watermark_params *display, 1781 int latency_ns, int *sprite_wm) 1782 { 1783 struct drm_crtc *crtc; 1784 unsigned long line_time_us; 1785 int clock; 1786 int line_count, line_size; 1787 int small, large; 1788 int entries; 1789 1790 if (!latency_ns) { 1791 *sprite_wm = 0; 1792 return false; 1793 } 1794 1795 crtc = intel_get_crtc_for_plane(dev, plane); 1796 clock = crtc->mode.clock; 1797 if (!clock) { 1798 *sprite_wm = 0; 1799 return false; 1800 } 1801 1802 line_time_us = (sprite_width * 1000) / clock; 1803 if (!line_time_us) { 1804 *sprite_wm = 0; 1805 return false; 1806 } 1807 1808 line_count = (latency_ns / line_time_us + 1000) / 1000; 1809 line_size = sprite_width * pixel_size; 1810 1811 /* Use the minimum of the small and large buffer method for primary */ 1812 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 1813 large = line_count * line_size; 1814 1815 entries = howmany(min(small, large), display->cacheline_size); 1816 *sprite_wm = entries + display->guard_size; 1817 1818 return *sprite_wm > 0x3ff ? false : true; 1819 } 1820 1821 void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, 1822 uint32_t sprite_width, int pixel_size) 1823 { 1824 struct drm_i915_private *dev_priv = dev->dev_private; 1825 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1826 u32 val; 1827 int sprite_wm, reg; 1828 int ret; 1829 1830 switch (pipe) { 1831 case 0: 1832 reg = WM0_PIPEA_ILK; 1833 break; 1834 case 1: 1835 reg = WM0_PIPEB_ILK; 1836 break; 1837 case 2: 1838 reg = WM0_PIPEC_IVB; 1839 break; 1840 default: 1841 return; /* bad pipe */ 1842 } 1843 1844 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, 1845 &sandybridge_display_wm_info, 1846 latency, &sprite_wm); 1847 if (!ret) { 1848 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", 1849 pipe); 1850 return; 1851 } 1852 1853 val = I915_READ(reg); 1854 val &= ~WM0_PIPE_SPRITE_MASK; 1855 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); 1856 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); 1857 1858 1859 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 1860 pixel_size, 1861 &sandybridge_display_srwm_info, 1862 SNB_READ_WM1_LATENCY() * 500, 1863 &sprite_wm); 1864 if (!ret) { 1865 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", 1866 pipe); 1867 return; 1868 } 1869 I915_WRITE(WM1S_LP_ILK, sprite_wm); 1870 1871 /* Only IVB has two more LP watermarks for sprite */ 1872 if (!IS_IVYBRIDGE(dev)) 1873 return; 1874 1875 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 1876 pixel_size, 1877 &sandybridge_display_srwm_info, 1878 SNB_READ_WM2_LATENCY() * 500, 1879 &sprite_wm); 1880 if (!ret) { 1881 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", 1882 pipe); 1883 return; 1884 } 1885 I915_WRITE(WM2S_LP_IVB, sprite_wm); 1886 1887 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 1888 pixel_size, 1889 &sandybridge_display_srwm_info, 1890 SNB_READ_WM3_LATENCY() * 500, 1891 &sprite_wm); 1892 if (!ret) { 1893 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", 1894 pipe); 1895 return; 1896 } 1897 I915_WRITE(WM3S_LP_IVB, sprite_wm); 1898 } 1899 1900 /** 1901 * intel_update_watermarks - update FIFO watermark values based on current modes 1902 * 1903 * Calculate watermark values for the various WM regs based on current mode 1904 * and plane configuration. 1905 * 1906 * There are several cases to deal with here: 1907 * - normal (i.e. non-self-refresh) 1908 * - self-refresh (SR) mode 1909 * - lines are large relative to FIFO size (buffer can hold up to 2) 1910 * - lines are small relative to FIFO size (buffer can hold more than 2 1911 * lines), so need to account for TLB latency 1912 * 1913 * The normal calculation is: 1914 * watermark = dotclock * bytes per pixel * latency 1915 * where latency is platform & configuration dependent (we assume pessimal 1916 * values here). 1917 * 1918 * The SR calculation is: 1919 * watermark = (trunc(latency/line time)+1) * surface width * 1920 * bytes per pixel 1921 * where 1922 * line time = htotal / dotclock 1923 * surface width = hdisplay for normal plane and 64 for cursor 1924 * and latency is assumed to be high, as above. 1925 * 1926 * The final value programmed to the register should always be rounded up, 1927 * and include an extra 2 entries to account for clock crossings. 1928 * 1929 * We don't use the sprite, so we can ignore that. And on Crestline we have 1930 * to set the non-SR watermarks to 8. 1931 */ 1932 void intel_update_watermarks(struct drm_device *dev) 1933 { 1934 struct drm_i915_private *dev_priv = dev->dev_private; 1935 1936 if (dev_priv->display.update_wm) 1937 dev_priv->display.update_wm(dev); 1938 } 1939 1940 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 1941 uint32_t sprite_width, int pixel_size) 1942 { 1943 struct drm_i915_private *dev_priv = dev->dev_private; 1944 1945 if (dev_priv->display.update_sprite_wm) 1946 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, 1947 pixel_size); 1948 } 1949 1950 static struct drm_i915_gem_object * 1951 intel_alloc_context_page(struct drm_device *dev) 1952 { 1953 struct drm_i915_gem_object *ctx; 1954 int ret; 1955 1956 DRM_LOCK_ASSERT(dev); 1957 1958 ctx = i915_gem_alloc_object(dev, 4096); 1959 if (!ctx) { 1960 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 1961 return NULL; 1962 } 1963 1964 ret = i915_gem_object_pin(ctx, 4096, true); 1965 if (ret) { 1966 DRM_ERROR("failed to pin power context: %d\n", ret); 1967 goto err_unref; 1968 } 1969 1970 ret = i915_gem_object_set_to_gtt_domain(ctx, 1); 1971 if (ret) { 1972 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 1973 goto err_unpin; 1974 } 1975 1976 return ctx; 1977 1978 err_unpin: 1979 i915_gem_object_unpin(ctx); 1980 err_unref: 1981 drm_gem_object_unreference(&ctx->base); 1982 DRM_UNLOCK(dev); 1983 return NULL; 1984 } 1985 1986 /** 1987 * Lock protecting IPS related data structures 1988 */ 1989 struct lock mchdev_lock; 1990 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE); 1991 1992 /* Global for IPS driver to get at the current i915 device. Protected by 1993 * mchdev_lock. */ 1994 struct drm_i915_private *i915_mch_dev; 1995 1996 bool ironlake_set_drps(struct drm_device *dev, u8 val) 1997 { 1998 struct drm_i915_private *dev_priv = dev->dev_private; 1999 u16 rgvswctl; 2000 2001 rgvswctl = I915_READ16(MEMSWCTL); 2002 if (rgvswctl & MEMCTL_CMD_STS) { 2003 DRM_DEBUG("gpu busy, RCS change rejected\n"); 2004 return false; /* still busy with another command */ 2005 } 2006 2007 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 2008 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 2009 I915_WRITE16(MEMSWCTL, rgvswctl); 2010 POSTING_READ16(MEMSWCTL); 2011 2012 rgvswctl |= MEMCTL_CMD_STS; 2013 I915_WRITE16(MEMSWCTL, rgvswctl); 2014 2015 return true; 2016 } 2017 2018 void ironlake_enable_drps(struct drm_device *dev) 2019 { 2020 struct drm_i915_private *dev_priv = dev->dev_private; 2021 u32 rgvmodectl = I915_READ(MEMMODECTL); 2022 u8 fmax, fmin, fstart, vstart; 2023 2024 /* Enable temp reporting */ 2025 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 2026 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 2027 2028 /* 100ms RC evaluation intervals */ 2029 I915_WRITE(RCUPEI, 100000); 2030 I915_WRITE(RCDNEI, 100000); 2031 2032 /* Set max/min thresholds to 90ms and 80ms respectively */ 2033 I915_WRITE(RCBMAXAVG, 90000); 2034 I915_WRITE(RCBMINAVG, 80000); 2035 2036 I915_WRITE(MEMIHYST, 1); 2037 2038 /* Set up min, max, and cur for interrupt handling */ 2039 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 2040 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 2041 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 2042 MEMMODE_FSTART_SHIFT; 2043 2044 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 2045 PXVFREQ_PX_SHIFT; 2046 2047 dev_priv->fmax = fmax; /* IPS callback will increase this */ 2048 dev_priv->fstart = fstart; 2049 2050 dev_priv->max_delay = fstart; 2051 dev_priv->min_delay = fmin; 2052 dev_priv->cur_delay = fstart; 2053 2054 DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n", 2055 fmax, fmin, fstart); 2056 2057 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 2058 2059 /* 2060 * Interrupts will be enabled in ironlake_irq_postinstall 2061 */ 2062 2063 I915_WRITE(VIDSTART, vstart); 2064 POSTING_READ(VIDSTART); 2065 2066 rgvmodectl |= MEMMODE_SWMODE_EN; 2067 I915_WRITE(MEMMODECTL, rgvmodectl); 2068 2069 if (_intel_wait_for(dev, 2070 (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10, 2071 1, "915per")) 2072 DRM_ERROR("stuck trying to change perf mode\n"); 2073 DELAY(1000); 2074 2075 ironlake_set_drps(dev, fstart); 2076 2077 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 2078 I915_READ(0x112e0); 2079 dev_priv->last_time1 = jiffies_to_msecs(jiffies); 2080 dev_priv->last_count2 = I915_READ(0x112f4); 2081 nanotime(&dev_priv->last_time2); 2082 } 2083 2084 void ironlake_disable_drps(struct drm_device *dev) 2085 { 2086 struct drm_i915_private *dev_priv = dev->dev_private; 2087 u16 rgvswctl = I915_READ16(MEMSWCTL); 2088 2089 /* Ack interrupts, disable EFC interrupt */ 2090 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 2091 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 2092 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 2093 I915_WRITE(DEIIR, DE_PCU_EVENT); 2094 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 2095 2096 /* Go back to the starting frequency */ 2097 ironlake_set_drps(dev, dev_priv->fstart); 2098 DELAY(1000); 2099 rgvswctl |= MEMCTL_CMD_STS; 2100 I915_WRITE(MEMSWCTL, rgvswctl); 2101 DELAY(1000); 2102 2103 } 2104 2105 void gen6_set_rps(struct drm_device *dev, u8 val) 2106 { 2107 struct drm_i915_private *dev_priv = dev->dev_private; 2108 u32 swreq; 2109 2110 swreq = (val & 0x3ff) << 25; 2111 I915_WRITE(GEN6_RPNSWREQ, swreq); 2112 } 2113 2114 void gen6_disable_rps(struct drm_device *dev) 2115 { 2116 struct drm_i915_private *dev_priv = dev->dev_private; 2117 2118 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 2119 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 2120 I915_WRITE(GEN6_PMIER, 0); 2121 /* Complete PM interrupt masking here doesn't race with the rps work 2122 * item again unmasking PM interrupts because that is using a different 2123 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 2124 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 2125 2126 lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); 2127 dev_priv->pm_iir = 0; 2128 lockmgr(&dev_priv->rps_lock, LK_RELEASE); 2129 2130 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2131 } 2132 2133 static unsigned long intel_pxfreq(u32 vidfreq) 2134 { 2135 unsigned long freq; 2136 int div = (vidfreq & 0x3f0000) >> 16; 2137 int post = (vidfreq & 0x3000) >> 12; 2138 int pre = (vidfreq & 0x7); 2139 2140 if (!pre) 2141 return 0; 2142 2143 freq = ((div * 133333) / ((1<<post) * pre)); 2144 2145 return freq; 2146 } 2147 2148 static const struct cparams { 2149 u16 i; 2150 u16 t; 2151 u16 m; 2152 u16 c; 2153 } cparams[] = { 2154 { 1, 1333, 301, 28664 }, 2155 { 1, 1066, 294, 24460 }, 2156 { 1, 800, 294, 25192 }, 2157 { 0, 1333, 276, 27605 }, 2158 { 0, 1066, 276, 27605 }, 2159 { 0, 800, 231, 23784 }, 2160 }; 2161 2162 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 2163 { 2164 u64 total_count, diff, ret; 2165 u32 count1, count2, count3, m = 0, c = 0; 2166 unsigned long now = jiffies_to_msecs(jiffies), diff1; 2167 int i; 2168 2169 diff1 = now - dev_priv->last_time1; 2170 /* 2171 * sysctl(8) reads the value of sysctl twice in rapid 2172 * succession. There is high chance that it happens in the 2173 * same timer tick. Use the cached value to not divide by 2174 * zero and give the hw a chance to gather more samples. 2175 */ 2176 if (diff1 <= 10) 2177 return (dev_priv->chipset_power); 2178 2179 count1 = I915_READ(DMIEC); 2180 count2 = I915_READ(DDREC); 2181 count3 = I915_READ(CSIEC); 2182 2183 total_count = count1 + count2 + count3; 2184 2185 /* FIXME: handle per-counter overflow */ 2186 if (total_count < dev_priv->last_count1) { 2187 diff = ~0UL - dev_priv->last_count1; 2188 diff += total_count; 2189 } else { 2190 diff = total_count - dev_priv->last_count1; 2191 } 2192 2193 for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) { 2194 if (cparams[i].i == dev_priv->c_m && 2195 cparams[i].t == dev_priv->r_t) { 2196 m = cparams[i].m; 2197 c = cparams[i].c; 2198 break; 2199 } 2200 } 2201 2202 diff = diff / diff1; 2203 ret = ((m * diff) + c); 2204 ret = ret / 10; 2205 2206 dev_priv->last_count1 = total_count; 2207 dev_priv->last_time1 = now; 2208 2209 dev_priv->chipset_power = ret; 2210 return (ret); 2211 } 2212 2213 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 2214 { 2215 unsigned long m, x, b; 2216 u32 tsfs; 2217 2218 tsfs = I915_READ(TSFS); 2219 2220 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 2221 x = I915_READ8(TR1); 2222 2223 b = tsfs & TSFS_INTR_MASK; 2224 2225 return ((m * x) / 127) - b; 2226 } 2227 2228 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 2229 { 2230 static const struct v_table { 2231 u16 vd; /* in .1 mil */ 2232 u16 vm; /* in .1 mil */ 2233 } v_table[] = { 2234 { 0, 0, }, 2235 { 375, 0, }, 2236 { 500, 0, }, 2237 { 625, 0, }, 2238 { 750, 0, }, 2239 { 875, 0, }, 2240 { 1000, 0, }, 2241 { 1125, 0, }, 2242 { 4125, 3000, }, 2243 { 4125, 3000, }, 2244 { 4125, 3000, }, 2245 { 4125, 3000, }, 2246 { 4125, 3000, }, 2247 { 4125, 3000, }, 2248 { 4125, 3000, }, 2249 { 4125, 3000, }, 2250 { 4125, 3000, }, 2251 { 4125, 3000, }, 2252 { 4125, 3000, }, 2253 { 4125, 3000, }, 2254 { 4125, 3000, }, 2255 { 4125, 3000, }, 2256 { 4125, 3000, }, 2257 { 4125, 3000, }, 2258 { 4125, 3000, }, 2259 { 4125, 3000, }, 2260 { 4125, 3000, }, 2261 { 4125, 3000, }, 2262 { 4125, 3000, }, 2263 { 4125, 3000, }, 2264 { 4125, 3000, }, 2265 { 4125, 3000, }, 2266 { 4250, 3125, }, 2267 { 4375, 3250, }, 2268 { 4500, 3375, }, 2269 { 4625, 3500, }, 2270 { 4750, 3625, }, 2271 { 4875, 3750, }, 2272 { 5000, 3875, }, 2273 { 5125, 4000, }, 2274 { 5250, 4125, }, 2275 { 5375, 4250, }, 2276 { 5500, 4375, }, 2277 { 5625, 4500, }, 2278 { 5750, 4625, }, 2279 { 5875, 4750, }, 2280 { 6000, 4875, }, 2281 { 6125, 5000, }, 2282 { 6250, 5125, }, 2283 { 6375, 5250, }, 2284 { 6500, 5375, }, 2285 { 6625, 5500, }, 2286 { 6750, 5625, }, 2287 { 6875, 5750, }, 2288 { 7000, 5875, }, 2289 { 7125, 6000, }, 2290 { 7250, 6125, }, 2291 { 7375, 6250, }, 2292 { 7500, 6375, }, 2293 { 7625, 6500, }, 2294 { 7750, 6625, }, 2295 { 7875, 6750, }, 2296 { 8000, 6875, }, 2297 { 8125, 7000, }, 2298 { 8250, 7125, }, 2299 { 8375, 7250, }, 2300 { 8500, 7375, }, 2301 { 8625, 7500, }, 2302 { 8750, 7625, }, 2303 { 8875, 7750, }, 2304 { 9000, 7875, }, 2305 { 9125, 8000, }, 2306 { 9250, 8125, }, 2307 { 9375, 8250, }, 2308 { 9500, 8375, }, 2309 { 9625, 8500, }, 2310 { 9750, 8625, }, 2311 { 9875, 8750, }, 2312 { 10000, 8875, }, 2313 { 10125, 9000, }, 2314 { 10250, 9125, }, 2315 { 10375, 9250, }, 2316 { 10500, 9375, }, 2317 { 10625, 9500, }, 2318 { 10750, 9625, }, 2319 { 10875, 9750, }, 2320 { 11000, 9875, }, 2321 { 11125, 10000, }, 2322 { 11250, 10125, }, 2323 { 11375, 10250, }, 2324 { 11500, 10375, }, 2325 { 11625, 10500, }, 2326 { 11750, 10625, }, 2327 { 11875, 10750, }, 2328 { 12000, 10875, }, 2329 { 12125, 11000, }, 2330 { 12250, 11125, }, 2331 { 12375, 11250, }, 2332 { 12500, 11375, }, 2333 { 12625, 11500, }, 2334 { 12750, 11625, }, 2335 { 12875, 11750, }, 2336 { 13000, 11875, }, 2337 { 13125, 12000, }, 2338 { 13250, 12125, }, 2339 { 13375, 12250, }, 2340 { 13500, 12375, }, 2341 { 13625, 12500, }, 2342 { 13750, 12625, }, 2343 { 13875, 12750, }, 2344 { 14000, 12875, }, 2345 { 14125, 13000, }, 2346 { 14250, 13125, }, 2347 { 14375, 13250, }, 2348 { 14500, 13375, }, 2349 { 14625, 13500, }, 2350 { 14750, 13625, }, 2351 { 14875, 13750, }, 2352 { 15000, 13875, }, 2353 { 15125, 14000, }, 2354 { 15250, 14125, }, 2355 { 15375, 14250, }, 2356 { 15500, 14375, }, 2357 { 15625, 14500, }, 2358 { 15750, 14625, }, 2359 { 15875, 14750, }, 2360 { 16000, 14875, }, 2361 { 16125, 15000, }, 2362 }; 2363 if (dev_priv->info->is_mobile) 2364 return v_table[pxvid].vm; 2365 else 2366 return v_table[pxvid].vd; 2367 } 2368 2369 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 2370 { 2371 struct timespec now, diff1; 2372 u64 diff; 2373 unsigned long diffms; 2374 u32 count; 2375 2376 if (dev_priv->info->gen != 5) 2377 return; 2378 2379 nanotime(&now); 2380 diff1 = now; 2381 timespecsub(&diff1, &dev_priv->last_time2); 2382 2383 /* Don't divide by 0 */ 2384 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 2385 if (!diffms) 2386 return; 2387 2388 count = I915_READ(GFXEC); 2389 2390 if (count < dev_priv->last_count2) { 2391 diff = ~0UL - dev_priv->last_count2; 2392 diff += count; 2393 } else { 2394 diff = count - dev_priv->last_count2; 2395 } 2396 2397 dev_priv->last_count2 = count; 2398 dev_priv->last_time2 = now; 2399 2400 /* More magic constants... */ 2401 diff = diff * 1181; 2402 diff = diff / (diffms * 10); 2403 dev_priv->gfx_power = diff; 2404 } 2405 2406 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 2407 { 2408 unsigned long t, corr, state1, corr2, state2; 2409 u32 pxvid, ext_v; 2410 2411 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 2412 pxvid = (pxvid >> 24) & 0x7f; 2413 ext_v = pvid_to_extvid(dev_priv, pxvid); 2414 2415 state1 = ext_v; 2416 2417 t = i915_mch_val(dev_priv); 2418 2419 /* Revel in the empirically derived constants */ 2420 2421 /* Correction factor in 1/100000 units */ 2422 if (t > 80) 2423 corr = ((t * 2349) + 135940); 2424 else if (t >= 50) 2425 corr = ((t * 964) + 29317); 2426 else /* < 50 */ 2427 corr = ((t * 301) + 1004); 2428 2429 corr = corr * ((150142 * state1) / 10000 - 78642); 2430 corr /= 100000; 2431 corr2 = (corr * dev_priv->corr); 2432 2433 state2 = (corr2 * state1) / 10000; 2434 state2 /= 100; /* convert to mW */ 2435 2436 i915_update_gfx_val(dev_priv); 2437 2438 return dev_priv->gfx_power + state2; 2439 } 2440 2441 /** 2442 * i915_read_mch_val - return value for IPS use 2443 * 2444 * Calculate and return a value for the IPS driver to use when deciding whether 2445 * we have thermal and power headroom to increase CPU or GPU power budget. 2446 */ 2447 unsigned long i915_read_mch_val(void) 2448 { 2449 struct drm_i915_private *dev_priv; 2450 unsigned long chipset_val, graphics_val, ret = 0; 2451 2452 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2453 if (!i915_mch_dev) 2454 goto out_unlock; 2455 dev_priv = i915_mch_dev; 2456 2457 chipset_val = i915_chipset_val(dev_priv); 2458 graphics_val = i915_gfx_val(dev_priv); 2459 2460 ret = chipset_val + graphics_val; 2461 2462 out_unlock: 2463 lockmgr(&mchdev_lock, LK_RELEASE); 2464 2465 return ret; 2466 } 2467 2468 /** 2469 * i915_gpu_raise - raise GPU frequency limit 2470 * 2471 * Raise the limit; IPS indicates we have thermal headroom. 2472 */ 2473 bool i915_gpu_raise(void) 2474 { 2475 struct drm_i915_private *dev_priv; 2476 bool ret = true; 2477 2478 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2479 if (!i915_mch_dev) { 2480 ret = false; 2481 goto out_unlock; 2482 } 2483 dev_priv = i915_mch_dev; 2484 2485 if (dev_priv->max_delay > dev_priv->fmax) 2486 dev_priv->max_delay--; 2487 2488 out_unlock: 2489 lockmgr(&mchdev_lock, LK_RELEASE); 2490 2491 return ret; 2492 } 2493 2494 /** 2495 * i915_gpu_lower - lower GPU frequency limit 2496 * 2497 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2498 * frequency maximum. 2499 */ 2500 bool i915_gpu_lower(void) 2501 { 2502 struct drm_i915_private *dev_priv; 2503 bool ret = true; 2504 2505 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2506 if (!i915_mch_dev) { 2507 ret = false; 2508 goto out_unlock; 2509 } 2510 dev_priv = i915_mch_dev; 2511 2512 if (dev_priv->max_delay < dev_priv->min_delay) 2513 dev_priv->max_delay++; 2514 2515 out_unlock: 2516 lockmgr(&mchdev_lock, LK_RELEASE); 2517 2518 return ret; 2519 } 2520 2521 /** 2522 * i915_gpu_busy - indicate GPU business to IPS 2523 * 2524 * Tell the IPS driver whether or not the GPU is busy. 2525 */ 2526 bool i915_gpu_busy(void) 2527 { 2528 struct drm_i915_private *dev_priv; 2529 bool ret = false; 2530 2531 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2532 if (!i915_mch_dev) 2533 goto out_unlock; 2534 dev_priv = i915_mch_dev; 2535 2536 ret = dev_priv->busy; 2537 2538 out_unlock: 2539 lockmgr(&mchdev_lock, LK_RELEASE); 2540 2541 return ret; 2542 } 2543 2544 /** 2545 * i915_gpu_turbo_disable - disable graphics turbo 2546 * 2547 * Disable graphics turbo by resetting the max frequency and setting the 2548 * current frequency to the default. 2549 */ 2550 bool i915_gpu_turbo_disable(void) 2551 { 2552 struct drm_i915_private *dev_priv; 2553 bool ret = true; 2554 2555 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2556 if (!i915_mch_dev) { 2557 ret = false; 2558 goto out_unlock; 2559 } 2560 dev_priv = i915_mch_dev; 2561 2562 dev_priv->max_delay = dev_priv->fstart; 2563 2564 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 2565 ret = false; 2566 2567 out_unlock: 2568 lockmgr(&mchdev_lock, LK_RELEASE); 2569 2570 return ret; 2571 } 2572 2573 void intel_init_emon(struct drm_device *dev) 2574 { 2575 struct drm_i915_private *dev_priv = dev->dev_private; 2576 u32 lcfuse; 2577 u8 pxw[16]; 2578 int i; 2579 2580 /* Disable to program */ 2581 I915_WRITE(ECR, 0); 2582 POSTING_READ(ECR); 2583 2584 /* Program energy weights for various events */ 2585 I915_WRITE(SDEW, 0x15040d00); 2586 I915_WRITE(CSIEW0, 0x007f0000); 2587 I915_WRITE(CSIEW1, 0x1e220004); 2588 I915_WRITE(CSIEW2, 0x04000004); 2589 2590 for (i = 0; i < 5; i++) 2591 I915_WRITE(PEW + (i * 4), 0); 2592 for (i = 0; i < 3; i++) 2593 I915_WRITE(DEW + (i * 4), 0); 2594 2595 /* Program P-state weights to account for frequency power adjustment */ 2596 for (i = 0; i < 16; i++) { 2597 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 2598 unsigned long freq = intel_pxfreq(pxvidfreq); 2599 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 2600 PXVFREQ_PX_SHIFT; 2601 unsigned long val; 2602 2603 val = vid * vid; 2604 val *= (freq / 1000); 2605 val *= 255; 2606 val /= (127*127*900); 2607 if (val > 0xff) 2608 DRM_ERROR("bad pxval: %ld\n", val); 2609 pxw[i] = val; 2610 } 2611 /* Render standby states get 0 weight */ 2612 pxw[14] = 0; 2613 pxw[15] = 0; 2614 2615 for (i = 0; i < 4; i++) { 2616 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 2617 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 2618 I915_WRITE(PXW + (i * 4), val); 2619 } 2620 2621 /* Adjust magic regs to magic values (more experimental results) */ 2622 I915_WRITE(OGW0, 0); 2623 I915_WRITE(OGW1, 0); 2624 I915_WRITE(EG0, 0x00007f00); 2625 I915_WRITE(EG1, 0x0000000e); 2626 I915_WRITE(EG2, 0x000e0000); 2627 I915_WRITE(EG3, 0x68000300); 2628 I915_WRITE(EG4, 0x42000000); 2629 I915_WRITE(EG5, 0x00140031); 2630 I915_WRITE(EG6, 0); 2631 I915_WRITE(EG7, 0); 2632 2633 for (i = 0; i < 8; i++) 2634 I915_WRITE(PXWL + (i * 4), 0); 2635 2636 /* Enable PMON + select events */ 2637 I915_WRITE(ECR, 0x80000019); 2638 2639 lcfuse = I915_READ(LCFUSE02); 2640 2641 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 2642 } 2643 2644 static int intel_enable_rc6(struct drm_device *dev) 2645 { 2646 /* 2647 * Respect the kernel parameter if it is set 2648 */ 2649 if (i915_enable_rc6 >= 0) 2650 return i915_enable_rc6; 2651 2652 /* 2653 * Disable RC6 on Ironlake 2654 */ 2655 if (INTEL_INFO(dev)->gen == 5) 2656 return 0; 2657 2658 /* 2659 * Enable rc6 on Sandybridge if DMA remapping is disabled 2660 */ 2661 if (INTEL_INFO(dev)->gen == 6) { 2662 DRM_DEBUG_DRIVER( 2663 "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", 2664 intel_iommu_enabled ? "true" : "false", 2665 !intel_iommu_enabled ? "en" : "dis"); 2666 return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE); 2667 } 2668 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); 2669 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 2670 } 2671 2672 void gen6_enable_rps(struct drm_i915_private *dev_priv) 2673 { 2674 struct drm_device *dev = dev_priv->dev; 2675 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 2676 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2677 u32 pcu_mbox, rc6_mask = 0; 2678 u32 gtfifodbg; 2679 int cur_freq, min_freq, max_freq; 2680 int rc6_mode; 2681 int i; 2682 2683 /* Here begins a magic sequence of register writes to enable 2684 * auto-downclocking. 2685 * 2686 * Perhaps there might be some value in exposing these to 2687 * userspace... 2688 */ 2689 I915_WRITE(GEN6_RC_STATE, 0); 2690 DRM_LOCK(dev); 2691 2692 /* Clear the DBG now so we don't confuse earlier errors */ 2693 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 2694 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 2695 I915_WRITE(GTFIFODBG, gtfifodbg); 2696 } 2697 2698 gen6_gt_force_wake_get(dev_priv); 2699 2700 /* disable the counters and set deterministic thresholds */ 2701 I915_WRITE(GEN6_RC_CONTROL, 0); 2702 2703 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 2704 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 2705 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 2706 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 2707 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 2708 2709 for (i = 0; i < I915_NUM_RINGS; i++) 2710 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); 2711 2712 I915_WRITE(GEN6_RC_SLEEP, 0); 2713 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 2714 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 2715 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 2716 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 2717 2718 rc6_mode = intel_enable_rc6(dev_priv->dev); 2719 if (rc6_mode & INTEL_RC6_ENABLE) 2720 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 2721 2722 if (rc6_mode & INTEL_RC6p_ENABLE) 2723 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 2724 2725 if (rc6_mode & INTEL_RC6pp_ENABLE) 2726 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 2727 2728 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 2729 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", 2730 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", 2731 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); 2732 2733 I915_WRITE(GEN6_RC_CONTROL, 2734 rc6_mask | 2735 GEN6_RC_CTL_EI_MODE(1) | 2736 GEN6_RC_CTL_HW_ENABLE); 2737 2738 I915_WRITE(GEN6_RPNSWREQ, 2739 GEN6_FREQUENCY(10) | 2740 GEN6_OFFSET(0) | 2741 GEN6_AGGRESSIVE_TURBO); 2742 I915_WRITE(GEN6_RC_VIDEO_FREQ, 2743 GEN6_FREQUENCY(12)); 2744 2745 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 2746 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 2747 18 << 24 | 2748 6 << 16); 2749 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); 2750 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); 2751 I915_WRITE(GEN6_RP_UP_EI, 100000); 2752 I915_WRITE(GEN6_RP_DOWN_EI, 5000000); 2753 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 2754 I915_WRITE(GEN6_RP_CONTROL, 2755 GEN6_RP_MEDIA_TURBO | 2756 GEN6_RP_MEDIA_HW_MODE | 2757 GEN6_RP_MEDIA_IS_GFX | 2758 GEN6_RP_ENABLE | 2759 GEN6_RP_UP_BUSY_AVG | 2760 GEN6_RP_DOWN_IDLE_CONT); 2761 2762 if (_intel_wait_for(dev, 2763 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2764 1, "915pr1")) 2765 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 2766 2767 I915_WRITE(GEN6_PCODE_DATA, 0); 2768 I915_WRITE(GEN6_PCODE_MAILBOX, 2769 GEN6_PCODE_READY | 2770 GEN6_PCODE_WRITE_MIN_FREQ_TABLE); 2771 if (_intel_wait_for(dev, 2772 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2773 1, "915pr2")) 2774 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2775 2776 min_freq = (rp_state_cap & 0xff0000) >> 16; 2777 max_freq = rp_state_cap & 0xff; 2778 cur_freq = (gt_perf_status & 0xff00) >> 8; 2779 2780 /* Check for overclock support */ 2781 if (_intel_wait_for(dev, 2782 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2783 1, "915pr3")) 2784 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 2785 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); 2786 pcu_mbox = I915_READ(GEN6_PCODE_DATA); 2787 if (_intel_wait_for(dev, 2788 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2789 1, "915pr4")) 2790 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2791 if (pcu_mbox & (1<<31)) { /* OC supported */ 2792 max_freq = pcu_mbox & 0xff; 2793 DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2794 } 2795 2796 /* In units of 100MHz */ 2797 dev_priv->max_delay = max_freq; 2798 dev_priv->min_delay = min_freq; 2799 dev_priv->cur_delay = cur_freq; 2800 2801 /* requires MSI enabled */ 2802 I915_WRITE(GEN6_PMIER, 2803 GEN6_PM_MBOX_EVENT | 2804 GEN6_PM_THERMAL_EVENT | 2805 GEN6_PM_RP_DOWN_TIMEOUT | 2806 GEN6_PM_RP_UP_THRESHOLD | 2807 GEN6_PM_RP_DOWN_THRESHOLD | 2808 GEN6_PM_RP_UP_EI_EXPIRED | 2809 GEN6_PM_RP_DOWN_EI_EXPIRED); 2810 lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); 2811 if (dev_priv->pm_iir != 0) 2812 kprintf("pm_iir %x\n", dev_priv->pm_iir); 2813 I915_WRITE(GEN6_PMIMR, 0); 2814 lockmgr(&dev_priv->rps_lock, LK_RELEASE); 2815 /* enable all PM interrupts */ 2816 I915_WRITE(GEN6_PMINTRMSK, 0); 2817 2818 gen6_gt_force_wake_put(dev_priv); 2819 DRM_UNLOCK(dev); 2820 } 2821 2822 void gen6_update_ring_freq(struct drm_i915_private *dev_priv) 2823 { 2824 struct drm_device *dev; 2825 int min_freq = 15; 2826 int gpu_freq, ia_freq, max_ia_freq; 2827 int scaling_factor = 180; 2828 uint64_t tsc_freq; 2829 2830 dev = dev_priv->dev; 2831 #if 0 2832 max_ia_freq = cpufreq_quick_get_max(0); 2833 /* 2834 * Default to measured freq if none found, PCU will ensure we don't go 2835 * over 2836 */ 2837 if (!max_ia_freq) 2838 max_ia_freq = tsc_freq; 2839 2840 /* Convert from Hz to MHz */ 2841 max_ia_freq /= 1000; 2842 #else 2843 tsc_freq = atomic_load_acq_64(&tsc_freq); 2844 max_ia_freq = tsc_freq / 1000 / 1000; 2845 #endif 2846 2847 DRM_LOCK(dev); 2848 2849 /* 2850 * For each potential GPU frequency, load a ring frequency we'd like 2851 * to use for memory access. We do this by specifying the IA frequency 2852 * the PCU should use as a reference to determine the ring frequency. 2853 */ 2854 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; 2855 gpu_freq--) { 2856 int diff = dev_priv->max_delay - gpu_freq; 2857 int d; 2858 2859 /* 2860 * For GPU frequencies less than 750MHz, just use the lowest 2861 * ring freq. 2862 */ 2863 if (gpu_freq < min_freq) 2864 ia_freq = 800; 2865 else 2866 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 2867 d = 100; 2868 ia_freq = (ia_freq + d / 2) / d; 2869 2870 I915_WRITE(GEN6_PCODE_DATA, 2871 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | 2872 gpu_freq); 2873 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 2874 GEN6_PCODE_WRITE_MIN_FREQ_TABLE); 2875 if (_intel_wait_for(dev, 2876 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2877 10, 1, "915frq")) { 2878 DRM_ERROR("pcode write of freq table timed out\n"); 2879 continue; 2880 } 2881 } 2882 2883 DRM_UNLOCK(dev); 2884 } 2885 2886 void ironlake_init_clock_gating(struct drm_device *dev) 2887 { 2888 struct drm_i915_private *dev_priv = dev->dev_private; 2889 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 2890 2891 /* Required for FBC */ 2892 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 2893 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 2894 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 2895 2896 I915_WRITE(PCH_3DCGDIS0, 2897 MARIUNIT_CLOCK_GATE_DISABLE | 2898 SVSMUNIT_CLOCK_GATE_DISABLE); 2899 I915_WRITE(PCH_3DCGDIS1, 2900 VFMUNIT_CLOCK_GATE_DISABLE); 2901 2902 /* 2903 * According to the spec the following bits should be set in 2904 * order to enable memory self-refresh 2905 * The bit 22/21 of 0x42004 2906 * The bit 5 of 0x42020 2907 * The bit 15 of 0x45000 2908 */ 2909 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2910 (I915_READ(ILK_DISPLAY_CHICKEN2) | 2911 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 2912 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 2913 I915_WRITE(DISP_ARB_CTL, 2914 (I915_READ(DISP_ARB_CTL) | 2915 DISP_FBC_WM_DIS)); 2916 I915_WRITE(WM3_LP_ILK, 0); 2917 I915_WRITE(WM2_LP_ILK, 0); 2918 I915_WRITE(WM1_LP_ILK, 0); 2919 2920 /* 2921 * Based on the document from hardware guys the following bits 2922 * should be set unconditionally in order to enable FBC. 2923 * The bit 22 of 0x42000 2924 * The bit 22 of 0x42004 2925 * The bit 7,8,9 of 0x42020. 2926 */ 2927 if (IS_IRONLAKE_M(dev)) { 2928 I915_WRITE(ILK_DISPLAY_CHICKEN1, 2929 I915_READ(ILK_DISPLAY_CHICKEN1) | 2930 ILK_FBCQ_DIS); 2931 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2932 I915_READ(ILK_DISPLAY_CHICKEN2) | 2933 ILK_DPARB_GATE); 2934 } 2935 2936 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 2937 2938 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2939 I915_READ(ILK_DISPLAY_CHICKEN2) | 2940 ILK_ELPIN_409_SELECT); 2941 I915_WRITE(_3D_CHICKEN2, 2942 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 2943 _3D_CHICKEN2_WM_READ_PIPELINED); 2944 2945 /* WaDisableRenderCachePipelinedFlush */ 2946 I915_WRITE(CACHE_MODE_0, 2947 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 2948 2949 ibx_init_clock_gating(dev); 2950 } 2951 2952 void cpt_init_clock_gating(struct drm_device *dev) 2953 { 2954 struct drm_i915_private *dev_priv = dev->dev_private; 2955 int pipe; 2956 2957 /* 2958 * On Ibex Peak and Cougar Point, we need to disable clock 2959 * gating for the panel power sequencer or it will fail to 2960 * start up when no ports are active. 2961 */ 2962 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 2963 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 2964 DPLS_EDP_PPS_FIX_DIS); 2965 /* The below fixes the weird display corruption, a few pixels shifted 2966 * downward, on (only) LVDS of some HP laptops with IVY. 2967 */ 2968 for_each_pipe(pipe) 2969 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE); 2970 /* WADP0ClockGatingDisable */ 2971 for_each_pipe(pipe) { 2972 I915_WRITE(TRANS_CHICKEN1(pipe), 2973 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 2974 } 2975 } 2976 2977 void gen6_init_clock_gating(struct drm_device *dev) 2978 { 2979 struct drm_i915_private *dev_priv = dev->dev_private; 2980 int pipe; 2981 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 2982 2983 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 2984 2985 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2986 I915_READ(ILK_DISPLAY_CHICKEN2) | 2987 ILK_ELPIN_409_SELECT); 2988 2989 /* WaDisableHiZPlanesWhenMSAAEnabled */ 2990 I915_WRITE(_3D_CHICKEN, 2991 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 2992 2993 /* WaSetupGtModeTdRowDispatch */ 2994 if (IS_SNB_GT1(dev)) 2995 I915_WRITE(GEN6_GT_MODE, 2996 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 2997 2998 I915_WRITE(WM3_LP_ILK, 0); 2999 I915_WRITE(WM2_LP_ILK, 0); 3000 I915_WRITE(WM1_LP_ILK, 0); 3001 3002 I915_WRITE(CACHE_MODE_0, 3003 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 3004 3005 I915_WRITE(GEN6_UCGCTL1, 3006 I915_READ(GEN6_UCGCTL1) | 3007 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 3008 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 3009 3010 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 3011 * gating disable must be set. Failure to set it results in 3012 * flickering pixels due to Z write ordering failures after 3013 * some amount of runtime in the Mesa "fire" demo, and Unigine 3014 * Sanctuary and Tropics, and apparently anything else with 3015 * alpha test or pixel discard. 3016 * 3017 * According to the spec, bit 11 (RCCUNIT) must also be set, 3018 * but we didn't debug actual testcases to find it out. 3019 * 3020 * Also apply WaDisableVDSUnitClockGating and 3021 * WaDisableRCPBUnitClockGating. 3022 */ 3023 I915_WRITE(GEN6_UCGCTL2, 3024 GEN7_VDSUNIT_CLOCK_GATE_DISABLE | 3025 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 3026 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 3027 3028 /* Bspec says we need to always set all mask bits. */ 3029 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) | 3030 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL); 3031 3032 /* 3033 * According to the spec the following bits should be 3034 * set in order to enable memory self-refresh and fbc: 3035 * The bit21 and bit22 of 0x42000 3036 * The bit21 and bit22 of 0x42004 3037 * The bit5 and bit7 of 0x42020 3038 * The bit14 of 0x70180 3039 * The bit14 of 0x71180 3040 */ 3041 I915_WRITE(ILK_DISPLAY_CHICKEN1, 3042 I915_READ(ILK_DISPLAY_CHICKEN1) | 3043 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 3044 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3045 I915_READ(ILK_DISPLAY_CHICKEN2) | 3046 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 3047 I915_WRITE(ILK_DSPCLK_GATE_D, 3048 I915_READ(ILK_DSPCLK_GATE_D) | 3049 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 3050 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 3051 3052 /* WaMbcDriverBootEnable */ 3053 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3054 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3055 3056 for_each_pipe(pipe) { 3057 I915_WRITE(DSPCNTR(pipe), 3058 I915_READ(DSPCNTR(pipe)) | 3059 DISPPLANE_TRICKLE_FEED_DISABLE); 3060 intel_flush_display_plane(dev_priv, pipe); 3061 } 3062 3063 /* The default value should be 0x200 according to docs, but the two 3064 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ 3065 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); 3066 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); 3067 3068 cpt_init_clock_gating(dev); 3069 } 3070 3071 void ivybridge_init_clock_gating(struct drm_device *dev) 3072 { 3073 struct drm_i915_private *dev_priv = dev->dev_private; 3074 int pipe; 3075 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 3076 3077 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 3078 3079 I915_WRITE(WM3_LP_ILK, 0); 3080 I915_WRITE(WM2_LP_ILK, 0); 3081 I915_WRITE(WM1_LP_ILK, 0); 3082 3083 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. 3084 * This implements the WaDisableRCZUnitClockGating workaround. 3085 */ 3086 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 3087 3088 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 3089 3090 I915_WRITE(IVB_CHICKEN3, 3091 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3092 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3093 3094 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3095 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3096 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3097 3098 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ 3099 I915_WRITE(GEN7_L3CNTLREG1, 3100 GEN7_WA_FOR_GEN7_L3_CONTROL); 3101 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 3102 GEN7_WA_L3_CHICKEN_MODE); 3103 3104 /* This is required by WaCatErrorRejectionIssue */ 3105 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 3106 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 3107 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 3108 3109 for_each_pipe(pipe) { 3110 I915_WRITE(DSPCNTR(pipe), 3111 I915_READ(DSPCNTR(pipe)) | 3112 DISPPLANE_TRICKLE_FEED_DISABLE); 3113 intel_flush_display_plane(dev_priv, pipe); 3114 } 3115 } 3116 3117 void g4x_init_clock_gating(struct drm_device *dev) 3118 { 3119 struct drm_i915_private *dev_priv = dev->dev_private; 3120 uint32_t dspclk_gate; 3121 3122 I915_WRITE(RENCLK_GATE_D1, 0); 3123 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 3124 GS_UNIT_CLOCK_GATE_DISABLE | 3125 CL_UNIT_CLOCK_GATE_DISABLE); 3126 I915_WRITE(RAMCLK_GATE_D, 0); 3127 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 3128 OVRUNIT_CLOCK_GATE_DISABLE | 3129 OVCUNIT_CLOCK_GATE_DISABLE; 3130 if (IS_GM45(dev)) 3131 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 3132 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 3133 } 3134 3135 void crestline_init_clock_gating(struct drm_device *dev) 3136 { 3137 struct drm_i915_private *dev_priv = dev->dev_private; 3138 3139 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 3140 I915_WRITE(RENCLK_GATE_D2, 0); 3141 I915_WRITE(DSPCLK_GATE_D, 0); 3142 I915_WRITE(RAMCLK_GATE_D, 0); 3143 I915_WRITE16(DEUC, 0); 3144 } 3145 3146 void broadwater_init_clock_gating(struct drm_device *dev) 3147 { 3148 struct drm_i915_private *dev_priv = dev->dev_private; 3149 3150 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 3151 I965_RCC_CLOCK_GATE_DISABLE | 3152 I965_RCPB_CLOCK_GATE_DISABLE | 3153 I965_ISC_CLOCK_GATE_DISABLE | 3154 I965_FBC_CLOCK_GATE_DISABLE); 3155 I915_WRITE(RENCLK_GATE_D2, 0); 3156 } 3157 3158 void gen3_init_clock_gating(struct drm_device *dev) 3159 { 3160 struct drm_i915_private *dev_priv = dev->dev_private; 3161 u32 dstate = I915_READ(D_STATE); 3162 3163 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 3164 DSTATE_DOT_CLOCK_GATING; 3165 I915_WRITE(D_STATE, dstate); 3166 } 3167 3168 void i85x_init_clock_gating(struct drm_device *dev) 3169 { 3170 struct drm_i915_private *dev_priv = dev->dev_private; 3171 3172 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 3173 } 3174 3175 void i830_init_clock_gating(struct drm_device *dev) 3176 { 3177 struct drm_i915_private *dev_priv = dev->dev_private; 3178 3179 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 3180 } 3181 3182 void ibx_init_clock_gating(struct drm_device *dev) 3183 { 3184 struct drm_i915_private *dev_priv = dev->dev_private; 3185 3186 /* 3187 * On Ibex Peak and Cougar Point, we need to disable clock 3188 * gating for the panel power sequencer or it will fail to 3189 * start up when no ports are active. 3190 */ 3191 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 3192 } 3193 3194 static void ironlake_teardown_rc6(struct drm_device *dev) 3195 { 3196 struct drm_i915_private *dev_priv = dev->dev_private; 3197 3198 if (dev_priv->renderctx) { 3199 i915_gem_object_unpin(dev_priv->renderctx); 3200 drm_gem_object_unreference(&dev_priv->renderctx->base); 3201 dev_priv->renderctx = NULL; 3202 } 3203 3204 if (dev_priv->pwrctx) { 3205 i915_gem_object_unpin(dev_priv->pwrctx); 3206 drm_gem_object_unreference(&dev_priv->pwrctx->base); 3207 dev_priv->pwrctx = NULL; 3208 } 3209 } 3210 3211 void ironlake_disable_rc6(struct drm_device *dev) 3212 { 3213 struct drm_i915_private *dev_priv = dev->dev_private; 3214 3215 if (I915_READ(PWRCTXA)) { 3216 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 3217 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 3218 (void)_intel_wait_for(dev, 3219 ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 3220 50, 1, "915pro"); 3221 3222 I915_WRITE(PWRCTXA, 0); 3223 POSTING_READ(PWRCTXA); 3224 3225 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3226 POSTING_READ(RSTDBYCTL); 3227 } 3228 3229 ironlake_teardown_rc6(dev); 3230 } 3231 3232 static int ironlake_setup_rc6(struct drm_device *dev) 3233 { 3234 struct drm_i915_private *dev_priv = dev->dev_private; 3235 3236 if (dev_priv->renderctx == NULL) 3237 dev_priv->renderctx = intel_alloc_context_page(dev); 3238 if (!dev_priv->renderctx) 3239 return -ENOMEM; 3240 3241 if (dev_priv->pwrctx == NULL) 3242 dev_priv->pwrctx = intel_alloc_context_page(dev); 3243 if (!dev_priv->pwrctx) { 3244 ironlake_teardown_rc6(dev); 3245 return -ENOMEM; 3246 } 3247 3248 return 0; 3249 } 3250 3251 void ironlake_enable_rc6(struct drm_device *dev) 3252 { 3253 struct drm_i915_private *dev_priv = dev->dev_private; 3254 int ret; 3255 3256 /* rc6 disabled by default due to repeated reports of hanging during 3257 * boot and resume. 3258 */ 3259 if (!intel_enable_rc6(dev)) 3260 return; 3261 3262 DRM_LOCK(dev); 3263 ret = ironlake_setup_rc6(dev); 3264 if (ret) { 3265 DRM_UNLOCK(dev); 3266 return; 3267 } 3268 3269 /* 3270 * GPU can automatically power down the render unit if given a page 3271 * to save state. 3272 */ 3273 ret = BEGIN_LP_RING(6); 3274 if (ret) { 3275 ironlake_teardown_rc6(dev); 3276 DRM_UNLOCK(dev); 3277 return; 3278 } 3279 3280 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 3281 OUT_RING(MI_SET_CONTEXT); 3282 OUT_RING(dev_priv->renderctx->gtt_offset | 3283 MI_MM_SPACE_GTT | 3284 MI_SAVE_EXT_STATE_EN | 3285 MI_RESTORE_EXT_STATE_EN | 3286 MI_RESTORE_INHIBIT); 3287 OUT_RING(MI_SUSPEND_FLUSH); 3288 OUT_RING(MI_NOOP); 3289 OUT_RING(MI_FLUSH); 3290 ADVANCE_LP_RING(); 3291 3292 /* 3293 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW 3294 * does an implicit flush, combined with MI_FLUSH above, it should be 3295 * safe to assume that renderctx is valid 3296 */ 3297 ret = intel_wait_ring_idle(LP_RING(dev_priv)); 3298 if (ret) { 3299 DRM_ERROR("failed to enable ironlake power savings\n"); 3300 ironlake_teardown_rc6(dev); 3301 DRM_UNLOCK(dev); 3302 return; 3303 } 3304 3305 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 3306 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3307 DRM_UNLOCK(dev); 3308 } 3309 3310 void intel_init_clock_gating(struct drm_device *dev) 3311 { 3312 struct drm_i915_private *dev_priv = dev->dev_private; 3313 3314 dev_priv->display.init_clock_gating(dev); 3315 3316 if (dev_priv->display.init_pch_clock_gating) 3317 dev_priv->display.init_pch_clock_gating(dev); 3318 } 3319