1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: Frame Buffer Compression (FBC) 26 * 27 * FBC tries to save memory bandwidth (and so power consumption) by 28 * compressing the amount of memory used by the display. It is total 29 * transparent to user space and completely handled in the kernel. 30 * 31 * The benefits of FBC are mostly visible with solid backgrounds and 32 * variation-less patterns. It comes from keeping the memory footprint small 33 * and having fewer memory pages opened and accessed for refreshing the display. 34 * 35 * i915 is responsible to reserve stolen memory for FBC and configure its 36 * offset on proper registers. The hardware takes care of all 37 * compress/decompress. However there are many known cases where we have to 38 * forcibly disable it to allow proper screen updates. 39 */ 40 41 #include "intel_drv.h" 42 #include "i915_drv.h" 43 44 static inline bool fbc_supported(struct drm_i915_private *dev_priv) 45 { 46 return HAS_FBC(dev_priv); 47 } 48 49 static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) 50 { 51 return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8; 52 } 53 54 static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) 55 { 56 return INTEL_GEN(dev_priv) < 4; 57 } 58 59 static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) 60 { 61 return INTEL_GEN(dev_priv) <= 3; 62 } 63 64 /* 65 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the 66 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's 67 * origin so the x and y offsets can actually fit the registers. As a 68 * consequence, the fence doesn't really start exactly at the display plane 69 * address we program because it starts at the real start of the buffer, so we 70 * have to take this into consideration here. 71 */ 72 static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) 73 { 74 return crtc->base.y - crtc->adjusted_y; 75 } 76 77 /* 78 * For SKL+, the plane source size used by the hardware is based on the value we 79 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value 80 * we wrote to PIPESRC. 81 */ 82 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, 83 int *width, int *height) 84 { 85 if (width) 86 *width = cache->plane.src_w; 87 if (height) 88 *height = cache->plane.src_h; 89 } 90 91 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, 92 struct intel_fbc_state_cache *cache) 93 { 94 int lines; 95 96 intel_fbc_get_plane_source_size(cache, NULL, &lines); 97 if (INTEL_GEN(dev_priv) == 7) 98 lines = min(lines, 2048); 99 else if (INTEL_GEN(dev_priv) >= 8) 100 lines = min(lines, 2560); 101 102 /* Hardware needs the full buffer stride, not just the active area. */ 103 return lines * cache->fb.stride; 104 } 105 106 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) 107 { 108 u32 fbc_ctl; 109 110 /* Disable compression */ 111 fbc_ctl = I915_READ(FBC_CONTROL); 112 if ((fbc_ctl & FBC_CTL_EN) == 0) 113 return; 114 115 fbc_ctl &= ~FBC_CTL_EN; 116 I915_WRITE(FBC_CONTROL, fbc_ctl); 117 118 /* Wait for compressing bit to clear */ 119 if (intel_wait_for_register(dev_priv, 120 FBC_STATUS, FBC_STAT_COMPRESSING, 0, 121 10)) { 122 DRM_DEBUG_KMS("FBC idle timed out\n"); 123 return; 124 } 125 } 126 127 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) 128 { 129 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 130 int cfb_pitch; 131 int i; 132 u32 fbc_ctl; 133 134 /* Note: fbc.threshold == 1 for i8xx */ 135 cfb_pitch = params->cfb_size / FBC_LL_SIZE; 136 if (params->fb.stride < cfb_pitch) 137 cfb_pitch = params->fb.stride; 138 139 /* FBC_CTL wants 32B or 64B units */ 140 if (IS_GEN2(dev_priv)) 141 cfb_pitch = (cfb_pitch / 32) - 1; 142 else 143 cfb_pitch = (cfb_pitch / 64) - 1; 144 145 /* Clear old tags */ 146 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 147 I915_WRITE(FBC_TAG(i), 0); 148 149 if (IS_GEN4(dev_priv)) { 150 u32 fbc_ctl2; 151 152 /* Set it up... */ 153 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 154 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane); 155 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 156 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); 157 } 158 159 /* enable it... */ 160 fbc_ctl = I915_READ(FBC_CONTROL); 161 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; 162 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; 163 if (IS_I945GM(dev_priv)) 164 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 165 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 166 fbc_ctl |= params->vma->fence->id; 167 I915_WRITE(FBC_CONTROL, fbc_ctl); 168 } 169 170 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) 171 { 172 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 173 } 174 175 static void g4x_fbc_activate(struct drm_i915_private *dev_priv) 176 { 177 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 178 u32 dpfc_ctl; 179 180 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; 181 if (params->fb.format->cpp[0] == 2) 182 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 183 else 184 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 185 186 if (params->vma->fence) { 187 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; 188 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 189 } else { 190 I915_WRITE(DPFC_FENCE_YOFF, 0); 191 } 192 193 /* enable it... */ 194 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 195 } 196 197 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) 198 { 199 u32 dpfc_ctl; 200 201 /* Disable compression */ 202 dpfc_ctl = I915_READ(DPFC_CONTROL); 203 if (dpfc_ctl & DPFC_CTL_EN) { 204 dpfc_ctl &= ~DPFC_CTL_EN; 205 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 206 } 207 } 208 209 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) 210 { 211 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 212 } 213 214 /* This function forces a CFB recompression through the nuke operation. */ 215 static void intel_fbc_recompress(struct drm_i915_private *dev_priv) 216 { 217 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); 218 POSTING_READ(MSG_FBC_REND_STATE); 219 } 220 221 static void ilk_fbc_activate(struct drm_i915_private *dev_priv) 222 { 223 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 224 u32 dpfc_ctl; 225 int threshold = dev_priv->fbc.threshold; 226 227 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); 228 if (params->fb.format->cpp[0] == 2) 229 threshold++; 230 231 switch (threshold) { 232 case 4: 233 case 3: 234 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 235 break; 236 case 2: 237 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 238 break; 239 case 1: 240 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 241 break; 242 } 243 244 if (params->vma->fence) { 245 dpfc_ctl |= DPFC_CTL_FENCE_EN; 246 if (IS_GEN5(dev_priv)) 247 dpfc_ctl |= params->vma->fence->id; 248 if (IS_GEN6(dev_priv)) { 249 I915_WRITE(SNB_DPFC_CTL_SA, 250 SNB_CPU_FENCE_ENABLE | 251 params->vma->fence->id); 252 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 253 params->crtc.fence_y_offset); 254 } 255 } else { 256 if (IS_GEN6(dev_priv)) { 257 I915_WRITE(SNB_DPFC_CTL_SA, 0); 258 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); 259 } 260 } 261 262 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 263 I915_WRITE(ILK_FBC_RT_BASE, 264 i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID); 265 /* enable it... */ 266 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 267 268 intel_fbc_recompress(dev_priv); 269 } 270 271 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) 272 { 273 u32 dpfc_ctl; 274 275 /* Disable compression */ 276 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 277 if (dpfc_ctl & DPFC_CTL_EN) { 278 dpfc_ctl &= ~DPFC_CTL_EN; 279 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 280 } 281 } 282 283 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) 284 { 285 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 286 } 287 288 static void gen7_fbc_activate(struct drm_i915_private *dev_priv) 289 { 290 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 291 u32 dpfc_ctl; 292 int threshold = dev_priv->fbc.threshold; 293 294 dpfc_ctl = 0; 295 if (IS_IVYBRIDGE(dev_priv)) 296 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); 297 298 if (params->fb.format->cpp[0] == 2) 299 threshold++; 300 301 switch (threshold) { 302 case 4: 303 case 3: 304 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 305 break; 306 case 2: 307 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 308 break; 309 case 1: 310 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 311 break; 312 } 313 314 if (params->vma->fence) { 315 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 316 I915_WRITE(SNB_DPFC_CTL_SA, 317 SNB_CPU_FENCE_ENABLE | 318 params->vma->fence->id); 319 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); 320 } else { 321 I915_WRITE(SNB_DPFC_CTL_SA,0); 322 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); 323 } 324 325 if (dev_priv->fbc.false_color) 326 dpfc_ctl |= FBC_CTL_FALSE_COLOR; 327 328 if (IS_IVYBRIDGE(dev_priv)) { 329 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 330 I915_WRITE(ILK_DISPLAY_CHICKEN1, 331 I915_READ(ILK_DISPLAY_CHICKEN1) | 332 ILK_FBCQ_DIS); 333 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 334 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 335 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), 336 I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | 337 HSW_FBCQ_DIS); 338 } 339 340 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 341 342 intel_fbc_recompress(dev_priv); 343 } 344 345 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) 346 { 347 if (INTEL_GEN(dev_priv) >= 5) 348 return ilk_fbc_is_active(dev_priv); 349 else if (IS_GM45(dev_priv)) 350 return g4x_fbc_is_active(dev_priv); 351 else 352 return i8xx_fbc_is_active(dev_priv); 353 } 354 355 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) 356 { 357 struct intel_fbc *fbc = &dev_priv->fbc; 358 359 fbc->active = true; 360 361 if (INTEL_GEN(dev_priv) >= 7) 362 gen7_fbc_activate(dev_priv); 363 else if (INTEL_GEN(dev_priv) >= 5) 364 ilk_fbc_activate(dev_priv); 365 else if (IS_GM45(dev_priv)) 366 g4x_fbc_activate(dev_priv); 367 else 368 i8xx_fbc_activate(dev_priv); 369 } 370 371 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) 372 { 373 struct intel_fbc *fbc = &dev_priv->fbc; 374 375 fbc->active = false; 376 377 if (INTEL_GEN(dev_priv) >= 5) 378 ilk_fbc_deactivate(dev_priv); 379 else if (IS_GM45(dev_priv)) 380 g4x_fbc_deactivate(dev_priv); 381 else 382 i8xx_fbc_deactivate(dev_priv); 383 } 384 385 /** 386 * intel_fbc_is_active - Is FBC active? 387 * @dev_priv: i915 device instance 388 * 389 * This function is used to verify the current state of FBC. 390 * 391 * FIXME: This should be tracked in the plane config eventually 392 * instead of queried at runtime for most callers. 393 */ 394 bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 395 { 396 return dev_priv->fbc.active; 397 } 398 399 static void intel_fbc_work_fn(struct work_struct *__work) 400 { 401 struct drm_i915_private *dev_priv = 402 container_of(__work, struct drm_i915_private, fbc.work.work); 403 struct intel_fbc *fbc = &dev_priv->fbc; 404 struct intel_fbc_work *work = &fbc->work; 405 struct intel_crtc *crtc = fbc->crtc; 406 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; 407 408 if (drm_crtc_vblank_get(&crtc->base)) { 409 DRM_ERROR("vblank not available for FBC on pipe %c\n", 410 pipe_name(crtc->pipe)); 411 412 mutex_lock(&fbc->lock); 413 work->scheduled = false; 414 mutex_unlock(&fbc->lock); 415 return; 416 } 417 418 retry: 419 /* Delay the actual enabling to let pageflipping cease and the 420 * display to settle before starting the compression. Note that 421 * this delay also serves a second purpose: it allows for a 422 * vblank to pass after disabling the FBC before we attempt 423 * to modify the control registers. 424 * 425 * WaFbcWaitForVBlankBeforeEnable:ilk,snb 426 * 427 * It is also worth mentioning that since work->scheduled_vblank can be 428 * updated multiple times by the other threads, hitting the timeout is 429 * not an error condition. We'll just end up hitting the "goto retry" 430 * case below. 431 */ 432 wait_event_timeout(vblank->queue, 433 drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, 434 msecs_to_jiffies(50)); 435 436 mutex_lock(&fbc->lock); 437 438 /* Were we cancelled? */ 439 if (!work->scheduled) 440 goto out; 441 442 /* Were we delayed again while this function was sleeping? */ 443 if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { 444 mutex_unlock(&fbc->lock); 445 goto retry; 446 } 447 448 intel_fbc_hw_activate(dev_priv); 449 450 work->scheduled = false; 451 452 out: 453 mutex_unlock(&fbc->lock); 454 drm_crtc_vblank_put(&crtc->base); 455 } 456 457 static void intel_fbc_schedule_activation(struct intel_crtc *crtc) 458 { 459 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 460 struct intel_fbc *fbc = &dev_priv->fbc; 461 struct intel_fbc_work *work = &fbc->work; 462 463 WARN_ON(!mutex_is_locked(&fbc->lock)); 464 465 if (drm_crtc_vblank_get(&crtc->base)) { 466 DRM_ERROR("vblank not available for FBC on pipe %c\n", 467 pipe_name(crtc->pipe)); 468 return; 469 } 470 471 /* It is useless to call intel_fbc_cancel_work() or cancel_work() in 472 * this function since we're not releasing fbc.lock, so it won't have an 473 * opportunity to grab it to discover that it was cancelled. So we just 474 * update the expected jiffy count. */ 475 work->scheduled = true; 476 work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); 477 drm_crtc_vblank_put(&crtc->base); 478 479 schedule_work(&work->work); 480 } 481 482 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) 483 { 484 struct intel_fbc *fbc = &dev_priv->fbc; 485 486 WARN_ON(!mutex_is_locked(&fbc->lock)); 487 488 /* Calling cancel_work() here won't help due to the fact that the work 489 * function grabs fbc->lock. Just set scheduled to false so the work 490 * function can know it was cancelled. */ 491 fbc->work.scheduled = false; 492 493 if (fbc->active) 494 intel_fbc_hw_deactivate(dev_priv); 495 } 496 497 static bool multiple_pipes_ok(struct intel_crtc *crtc, 498 struct intel_plane_state *plane_state) 499 { 500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 501 struct intel_fbc *fbc = &dev_priv->fbc; 502 enum i915_pipe pipe = crtc->pipe; 503 504 /* Don't even bother tracking anything we don't need. */ 505 if (!no_fbc_on_multiple_pipes(dev_priv)) 506 return true; 507 508 if (plane_state->base.visible) 509 fbc->visible_pipes_mask |= (1 << pipe); 510 else 511 fbc->visible_pipes_mask &= ~(1 << pipe); 512 513 return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0; 514 } 515 516 static int find_compression_threshold(struct drm_i915_private *dev_priv, 517 struct drm_mm_node *node, 518 int size, 519 int fb_cpp) 520 { 521 struct i915_ggtt *ggtt = &dev_priv->ggtt; 522 int compression_threshold = 1; 523 int ret; 524 u64 end; 525 526 /* The FBC hardware for BDW/SKL doesn't have access to the stolen 527 * reserved range size, so it always assumes the maximum (8mb) is used. 528 * If we enable FBC using a CFB on that memory range we'll get FIFO 529 * underruns, even if that range is not reserved by the BIOS. */ 530 if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) 531 end = ggtt->stolen_size - 8 * 1024 * 1024; 532 else 533 end = U64_MAX; 534 535 /* HACK: This code depends on what we will do in *_enable_fbc. If that 536 * code changes, this code needs to change as well. 537 * 538 * The enable_fbc code will attempt to use one of our 2 compression 539 * thresholds, therefore, in that case, we only have 1 resort. 540 */ 541 542 /* Try to over-allocate to reduce reallocations and fragmentation. */ 543 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, 544 4096, 0, end); 545 if (ret == 0) 546 return compression_threshold; 547 548 again: 549 /* HW's ability to limit the CFB is 1:4 */ 550 if (compression_threshold > 4 || 551 (fb_cpp == 2 && compression_threshold == 2)) 552 return 0; 553 554 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, 555 4096, 0, end); 556 if (ret && INTEL_GEN(dev_priv) <= 4) { 557 return 0; 558 } else if (ret) { 559 compression_threshold <<= 1; 560 goto again; 561 } else { 562 return compression_threshold; 563 } 564 } 565 566 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) 567 { 568 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 569 struct intel_fbc *fbc = &dev_priv->fbc; 570 struct drm_mm_node *compressed_llb = NULL; 571 int size, fb_cpp, ret; 572 573 WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); 574 575 size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); 576 fb_cpp = fbc->state_cache.fb.format->cpp[0]; 577 578 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, 579 size, fb_cpp); 580 if (!ret) 581 goto err_llb; 582 else if (ret > 1) { 583 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 584 585 } 586 587 fbc->threshold = ret; 588 589 if (INTEL_GEN(dev_priv) >= 5) 590 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); 591 else if (IS_GM45(dev_priv)) { 592 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); 593 } else { 594 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); 595 if (!compressed_llb) 596 goto err_fb; 597 598 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, 599 4096, 4096); 600 if (ret) 601 goto err_fb; 602 603 fbc->compressed_llb = compressed_llb; 604 605 I915_WRITE(FBC_CFB_BASE, 606 dev_priv->mm.stolen_base + fbc->compressed_fb.start); 607 I915_WRITE(FBC_LL_BASE, 608 dev_priv->mm.stolen_base + compressed_llb->start); 609 } 610 611 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", 612 fbc->compressed_fb.size, fbc->threshold); 613 614 return 0; 615 616 err_fb: 617 kfree(compressed_llb); 618 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 619 err_llb: 620 if (drm_mm_initialized(&dev_priv->mm.stolen)) 621 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 622 return -ENOSPC; 623 } 624 625 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 626 { 627 struct intel_fbc *fbc = &dev_priv->fbc; 628 629 if (drm_mm_node_allocated(&fbc->compressed_fb)) 630 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 631 632 if (fbc->compressed_llb) { 633 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); 634 kfree(fbc->compressed_llb); 635 } 636 } 637 638 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 639 { 640 struct intel_fbc *fbc = &dev_priv->fbc; 641 642 if (!fbc_supported(dev_priv)) 643 return; 644 645 mutex_lock(&fbc->lock); 646 __intel_fbc_cleanup_cfb(dev_priv); 647 mutex_unlock(&fbc->lock); 648 } 649 650 static bool stride_is_valid(struct drm_i915_private *dev_priv, 651 unsigned int stride) 652 { 653 /* These should have been caught earlier. */ 654 WARN_ON(stride < 512); 655 WARN_ON((stride & (64 - 1)) != 0); 656 657 /* Below are the additional FBC restrictions. */ 658 659 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) 660 return stride == 4096 || stride == 8192; 661 662 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) 663 return false; 664 665 if (stride > 16384) 666 return false; 667 668 return true; 669 } 670 671 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, 672 uint32_t pixel_format) 673 { 674 switch (pixel_format) { 675 case DRM_FORMAT_XRGB8888: 676 case DRM_FORMAT_XBGR8888: 677 return true; 678 case DRM_FORMAT_XRGB1555: 679 case DRM_FORMAT_RGB565: 680 /* 16bpp not supported on gen2 */ 681 if (IS_GEN2(dev_priv)) 682 return false; 683 /* WaFbcOnly1to1Ratio:ctg */ 684 if (IS_G4X(dev_priv)) 685 return false; 686 return true; 687 default: 688 return false; 689 } 690 } 691 692 /* 693 * For some reason, the hardware tracking starts looking at whatever we 694 * programmed as the display plane base address register. It does not look at 695 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} 696 * variables instead of just looking at the pipe/plane size. 697 */ 698 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 699 { 700 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 701 struct intel_fbc *fbc = &dev_priv->fbc; 702 unsigned int effective_w, effective_h, max_w, max_h; 703 704 if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { 705 max_w = 4096; 706 max_h = 4096; 707 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 708 max_w = 4096; 709 max_h = 2048; 710 } else { 711 max_w = 2048; 712 max_h = 1536; 713 } 714 715 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, 716 &effective_h); 717 effective_w += crtc->adjusted_x; 718 effective_h += crtc->adjusted_y; 719 720 return effective_w <= max_w && effective_h <= max_h; 721 } 722 723 static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 724 struct intel_crtc_state *crtc_state, 725 struct intel_plane_state *plane_state) 726 { 727 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 728 struct intel_fbc *fbc = &dev_priv->fbc; 729 struct intel_fbc_state_cache *cache = &fbc->state_cache; 730 struct drm_framebuffer *fb = plane_state->base.fb; 731 732 cache->vma = NULL; 733 734 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 735 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 736 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 737 738 cache->plane.rotation = plane_state->base.rotation; 739 /* 740 * Src coordinates are already rotated by 270 degrees for 741 * the 90/270 degree plane rotation cases (to match the 742 * GTT mapping), hence no need to account for rotation here. 743 */ 744 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; 745 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; 746 cache->plane.visible = plane_state->base.visible; 747 748 if (!cache->plane.visible) 749 return; 750 751 cache->fb.format = fb->format; 752 cache->fb.stride = fb->pitches[0]; 753 754 cache->vma = plane_state->vma; 755 } 756 757 static bool intel_fbc_can_activate(struct intel_crtc *crtc) 758 { 759 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 760 struct intel_fbc *fbc = &dev_priv->fbc; 761 struct intel_fbc_state_cache *cache = &fbc->state_cache; 762 763 /* We don't need to use a state cache here since this information is 764 * global for all CRTC. 765 */ 766 if (fbc->underrun_detected) { 767 fbc->no_fbc_reason = "underrun detected"; 768 return false; 769 } 770 771 if (!cache->vma) { 772 fbc->no_fbc_reason = "primary plane not visible"; 773 return false; 774 } 775 776 if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) || 777 (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) { 778 fbc->no_fbc_reason = "incompatible mode"; 779 return false; 780 } 781 782 if (!intel_fbc_hw_tracking_covers_screen(crtc)) { 783 fbc->no_fbc_reason = "mode too large for compression"; 784 return false; 785 } 786 787 /* The use of a CPU fence is mandatory in order to detect writes 788 * by the CPU to the scanout and trigger updates to the FBC. 789 * 790 * Note that is possible for a tiled surface to be unmappable (and 791 * so have no fence associated with it) due to aperture constaints 792 * at the time of pinning. 793 */ 794 if (!cache->vma->fence) { 795 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 796 return false; 797 } 798 if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && 799 cache->plane.rotation != DRM_ROTATE_0) { 800 fbc->no_fbc_reason = "rotation unsupported"; 801 return false; 802 } 803 804 if (!stride_is_valid(dev_priv, cache->fb.stride)) { 805 fbc->no_fbc_reason = "framebuffer stride not supported"; 806 return false; 807 } 808 809 if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { 810 fbc->no_fbc_reason = "pixel format is invalid"; 811 return false; 812 } 813 814 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 815 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 816 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { 817 fbc->no_fbc_reason = "pixel rate is too big"; 818 return false; 819 } 820 821 /* It is possible for the required CFB size change without a 822 * crtc->disable + crtc->enable since it is possible to change the 823 * stride without triggering a full modeset. Since we try to 824 * over-allocate the CFB, there's a chance we may keep FBC enabled even 825 * if this happens, but if we exceed the current CFB size we'll have to 826 * disable FBC. Notice that it would be possible to disable FBC, wait 827 * for a frame, free the stolen node, then try to reenable FBC in case 828 * we didn't get any invalidate/deactivate calls, but this would require 829 * a lot of tracking just for a specific case. If we conclude it's an 830 * important case, we can implement it later. */ 831 if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > 832 fbc->compressed_fb.size * fbc->threshold) { 833 fbc->no_fbc_reason = "CFB requirements changed"; 834 return false; 835 } 836 837 return true; 838 } 839 840 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) 841 { 842 struct intel_fbc *fbc = &dev_priv->fbc; 843 844 if (intel_vgpu_active(dev_priv)) { 845 fbc->no_fbc_reason = "VGPU is active"; 846 return false; 847 } 848 849 if (!i915.enable_fbc) { 850 fbc->no_fbc_reason = "disabled per module param or by default"; 851 return false; 852 } 853 854 if (fbc->underrun_detected) { 855 fbc->no_fbc_reason = "underrun detected"; 856 return false; 857 } 858 859 return true; 860 } 861 862 static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 863 struct intel_fbc_reg_params *params) 864 { 865 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 866 struct intel_fbc *fbc = &dev_priv->fbc; 867 struct intel_fbc_state_cache *cache = &fbc->state_cache; 868 869 /* Since all our fields are integer types, use memset here so the 870 * comparison function can rely on memcmp because the padding will be 871 * zero. */ 872 memset(params, 0, sizeof(*params)); 873 874 params->vma = cache->vma; 875 876 params->crtc.pipe = crtc->pipe; 877 params->crtc.plane = crtc->plane; 878 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); 879 880 params->fb.format = cache->fb.format; 881 params->fb.stride = cache->fb.stride; 882 883 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 884 } 885 886 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, 887 struct intel_fbc_reg_params *params2) 888 { 889 /* We can use this since intel_fbc_get_reg_params() does a memset. */ 890 return memcmp(params1, params2, sizeof(*params1)) == 0; 891 } 892 893 void intel_fbc_pre_update(struct intel_crtc *crtc, 894 struct intel_crtc_state *crtc_state, 895 struct intel_plane_state *plane_state) 896 { 897 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 898 struct intel_fbc *fbc = &dev_priv->fbc; 899 900 if (!fbc_supported(dev_priv)) 901 return; 902 903 mutex_lock(&fbc->lock); 904 905 if (!multiple_pipes_ok(crtc, plane_state)) { 906 fbc->no_fbc_reason = "more than one pipe active"; 907 goto deactivate; 908 } 909 910 if (!fbc->enabled || fbc->crtc != crtc) 911 goto unlock; 912 913 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 914 915 deactivate: 916 intel_fbc_deactivate(dev_priv); 917 unlock: 918 mutex_unlock(&fbc->lock); 919 } 920 921 static void __intel_fbc_post_update(struct intel_crtc *crtc) 922 { 923 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 924 struct intel_fbc *fbc = &dev_priv->fbc; 925 struct intel_fbc_reg_params old_params; 926 927 WARN_ON(!mutex_is_locked(&fbc->lock)); 928 929 if (!fbc->enabled || fbc->crtc != crtc) 930 return; 931 932 if (!intel_fbc_can_activate(crtc)) { 933 WARN_ON(fbc->active); 934 return; 935 } 936 937 old_params = fbc->params; 938 intel_fbc_get_reg_params(crtc, &fbc->params); 939 940 /* If the scanout has not changed, don't modify the FBC settings. 941 * Note that we make the fundamental assumption that the fb->obj 942 * cannot be unpinned (and have its GTT offset and fence revoked) 943 * without first being decoupled from the scanout and FBC disabled. 944 */ 945 if (fbc->active && 946 intel_fbc_reg_params_equal(&old_params, &fbc->params)) 947 return; 948 949 intel_fbc_deactivate(dev_priv); 950 intel_fbc_schedule_activation(crtc); 951 fbc->no_fbc_reason = "FBC enabled (active or scheduled)"; 952 } 953 954 void intel_fbc_post_update(struct intel_crtc *crtc) 955 { 956 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 957 struct intel_fbc *fbc = &dev_priv->fbc; 958 959 if (!fbc_supported(dev_priv)) 960 return; 961 962 mutex_lock(&fbc->lock); 963 __intel_fbc_post_update(crtc); 964 mutex_unlock(&fbc->lock); 965 } 966 967 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) 968 { 969 if (fbc->enabled) 970 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; 971 else 972 return fbc->possible_framebuffer_bits; 973 } 974 975 void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 976 unsigned int frontbuffer_bits, 977 enum fb_op_origin origin) 978 { 979 struct intel_fbc *fbc = &dev_priv->fbc; 980 981 if (!fbc_supported(dev_priv)) 982 return; 983 984 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) 985 return; 986 987 mutex_lock(&fbc->lock); 988 989 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; 990 991 if (fbc->enabled && fbc->busy_bits) 992 intel_fbc_deactivate(dev_priv); 993 994 mutex_unlock(&fbc->lock); 995 } 996 997 void intel_fbc_flush(struct drm_i915_private *dev_priv, 998 unsigned int frontbuffer_bits, enum fb_op_origin origin) 999 { 1000 struct intel_fbc *fbc = &dev_priv->fbc; 1001 1002 if (!fbc_supported(dev_priv)) 1003 return; 1004 1005 mutex_lock(&fbc->lock); 1006 1007 fbc->busy_bits &= ~frontbuffer_bits; 1008 1009 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) 1010 goto out; 1011 1012 if (!fbc->busy_bits && fbc->enabled && 1013 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { 1014 if (fbc->active) 1015 intel_fbc_recompress(dev_priv); 1016 else 1017 __intel_fbc_post_update(fbc->crtc); 1018 } 1019 1020 out: 1021 mutex_unlock(&fbc->lock); 1022 } 1023 1024 /** 1025 * intel_fbc_choose_crtc - select a CRTC to enable FBC on 1026 * @dev_priv: i915 device instance 1027 * @state: the atomic state structure 1028 * 1029 * This function looks at the proposed state for CRTCs and planes, then chooses 1030 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to 1031 * true. 1032 * 1033 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe 1034 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. 1035 */ 1036 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1037 struct drm_atomic_state *state) 1038 { 1039 struct intel_fbc *fbc = &dev_priv->fbc; 1040 struct drm_plane *plane; 1041 struct drm_plane_state *plane_state; 1042 bool crtc_chosen = false; 1043 int i; 1044 1045 mutex_lock(&fbc->lock); 1046 1047 /* Does this atomic commit involve the CRTC currently tied to FBC? */ 1048 if (fbc->crtc && 1049 !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base)) 1050 goto out; 1051 1052 if (!intel_fbc_can_enable(dev_priv)) 1053 goto out; 1054 1055 /* Simply choose the first CRTC that is compatible and has a visible 1056 * plane. We could go for fancier schemes such as checking the plane 1057 * size, but this would just affect the few platforms that don't tie FBC 1058 * to pipe or plane A. */ 1059 for_each_new_plane_in_state(state, plane, plane_state, i) { 1060 struct intel_plane_state *intel_plane_state = 1061 to_intel_plane_state(plane_state); 1062 struct intel_crtc_state *intel_crtc_state; 1063 struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc); 1064 1065 if (!intel_plane_state->base.visible) 1066 continue; 1067 1068 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) 1069 continue; 1070 1071 if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) 1072 continue; 1073 1074 intel_crtc_state = to_intel_crtc_state( 1075 drm_atomic_get_existing_crtc_state(state, &crtc->base)); 1076 1077 intel_crtc_state->enable_fbc = true; 1078 crtc_chosen = true; 1079 break; 1080 } 1081 1082 if (!crtc_chosen) 1083 fbc->no_fbc_reason = "no suitable CRTC for FBC"; 1084 1085 out: 1086 mutex_unlock(&fbc->lock); 1087 } 1088 1089 /** 1090 * intel_fbc_enable: tries to enable FBC on the CRTC 1091 * @crtc: the CRTC 1092 * @crtc_state: corresponding &drm_crtc_state for @crtc 1093 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc 1094 * 1095 * This function checks if the given CRTC was chosen for FBC, then enables it if 1096 * possible. Notice that it doesn't activate FBC. It is valid to call 1097 * intel_fbc_enable multiple times for the same pipe without an 1098 * intel_fbc_disable in the middle, as long as it is deactivated. 1099 */ 1100 void intel_fbc_enable(struct intel_crtc *crtc, 1101 struct intel_crtc_state *crtc_state, 1102 struct intel_plane_state *plane_state) 1103 { 1104 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1105 struct intel_fbc *fbc = &dev_priv->fbc; 1106 1107 if (!fbc_supported(dev_priv)) 1108 return; 1109 1110 mutex_lock(&fbc->lock); 1111 1112 if (fbc->enabled) { 1113 WARN_ON(fbc->crtc == NULL); 1114 if (fbc->crtc == crtc) { 1115 WARN_ON(!crtc_state->enable_fbc); 1116 WARN_ON(fbc->active); 1117 } 1118 goto out; 1119 } 1120 1121 if (!crtc_state->enable_fbc) 1122 goto out; 1123 1124 WARN_ON(fbc->active); 1125 WARN_ON(fbc->crtc != NULL); 1126 1127 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 1128 if (intel_fbc_alloc_cfb(crtc)) { 1129 fbc->no_fbc_reason = "not enough stolen memory"; 1130 goto out; 1131 } 1132 1133 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 1134 fbc->no_fbc_reason = "FBC enabled but not active yet\n"; 1135 1136 fbc->enabled = true; 1137 fbc->crtc = crtc; 1138 out: 1139 mutex_unlock(&fbc->lock); 1140 } 1141 1142 /** 1143 * __intel_fbc_disable - disable FBC 1144 * @dev_priv: i915 device instance 1145 * 1146 * This is the low level function that actually disables FBC. Callers should 1147 * grab the FBC lock. 1148 */ 1149 static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 1150 { 1151 struct intel_fbc *fbc = &dev_priv->fbc; 1152 struct intel_crtc *crtc = fbc->crtc; 1153 1154 WARN_ON(!mutex_is_locked(&fbc->lock)); 1155 WARN_ON(!fbc->enabled); 1156 WARN_ON(fbc->active); 1157 WARN_ON(crtc->active); 1158 1159 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 1160 1161 __intel_fbc_cleanup_cfb(dev_priv); 1162 1163 fbc->enabled = false; 1164 fbc->crtc = NULL; 1165 } 1166 1167 /** 1168 * intel_fbc_disable - disable FBC if it's associated with crtc 1169 * @crtc: the CRTC 1170 * 1171 * This function disables FBC if it's associated with the provided CRTC. 1172 */ 1173 void intel_fbc_disable(struct intel_crtc *crtc) 1174 { 1175 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1176 struct intel_fbc *fbc = &dev_priv->fbc; 1177 1178 if (!fbc_supported(dev_priv)) 1179 return; 1180 1181 mutex_lock(&fbc->lock); 1182 if (fbc->crtc == crtc) 1183 __intel_fbc_disable(dev_priv); 1184 mutex_unlock(&fbc->lock); 1185 1186 cancel_work_sync(&fbc->work.work); 1187 } 1188 1189 /** 1190 * intel_fbc_global_disable - globally disable FBC 1191 * @dev_priv: i915 device instance 1192 * 1193 * This function disables FBC regardless of which CRTC is associated with it. 1194 */ 1195 void intel_fbc_global_disable(struct drm_i915_private *dev_priv) 1196 { 1197 struct intel_fbc *fbc = &dev_priv->fbc; 1198 1199 if (!fbc_supported(dev_priv)) 1200 return; 1201 1202 mutex_lock(&fbc->lock); 1203 if (fbc->enabled) 1204 __intel_fbc_disable(dev_priv); 1205 mutex_unlock(&fbc->lock); 1206 1207 cancel_work_sync(&fbc->work.work); 1208 } 1209 1210 static void intel_fbc_underrun_work_fn(struct work_struct *work) 1211 { 1212 struct drm_i915_private *dev_priv = 1213 container_of(work, struct drm_i915_private, fbc.underrun_work); 1214 struct intel_fbc *fbc = &dev_priv->fbc; 1215 1216 mutex_lock(&fbc->lock); 1217 1218 /* Maybe we were scheduled twice. */ 1219 if (fbc->underrun_detected) 1220 goto out; 1221 1222 DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n"); 1223 fbc->underrun_detected = true; 1224 1225 intel_fbc_deactivate(dev_priv); 1226 out: 1227 mutex_unlock(&fbc->lock); 1228 } 1229 1230 /** 1231 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun 1232 * @dev_priv: i915 device instance 1233 * 1234 * Without FBC, most underruns are harmless and don't really cause too many 1235 * problems, except for an annoying message on dmesg. With FBC, underruns can 1236 * become black screens or even worse, especially when paired with bad 1237 * watermarks. So in order for us to be on the safe side, completely disable FBC 1238 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe 1239 * already suggests that watermarks may be bad, so try to be as safe as 1240 * possible. 1241 * 1242 * This function is called from the IRQ handler. 1243 */ 1244 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) 1245 { 1246 struct intel_fbc *fbc = &dev_priv->fbc; 1247 1248 if (!fbc_supported(dev_priv)) 1249 return; 1250 1251 /* There's no guarantee that underrun_detected won't be set to true 1252 * right after this check and before the work is scheduled, but that's 1253 * not a problem since we'll check it again under the work function 1254 * while FBC is locked. This check here is just to prevent us from 1255 * unnecessarily scheduling the work, and it relies on the fact that we 1256 * never switch underrun_detect back to false after it's true. */ 1257 if (READ_ONCE(fbc->underrun_detected)) 1258 return; 1259 1260 schedule_work(&fbc->underrun_work); 1261 } 1262 1263 /** 1264 * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking 1265 * @dev_priv: i915 device instance 1266 * 1267 * The FBC code needs to track CRTC visibility since the older platforms can't 1268 * have FBC enabled while multiple pipes are used. This function does the 1269 * initial setup at driver load to make sure FBC is matching the real hardware. 1270 */ 1271 void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) 1272 { 1273 struct intel_crtc *crtc; 1274 1275 /* Don't even bother tracking anything if we don't need. */ 1276 if (!no_fbc_on_multiple_pipes(dev_priv)) 1277 return; 1278 1279 for_each_intel_crtc(&dev_priv->drm, crtc) 1280 if (intel_crtc_active(crtc) && 1281 crtc->base.primary->state->visible) 1282 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); 1283 } 1284 1285 /* 1286 * The DDX driver changes its behavior depending on the value it reads from 1287 * i915.enable_fbc, so sanitize it by translating the default value into either 1288 * 0 or 1 in order to allow it to know what's going on. 1289 * 1290 * Notice that this is done at driver initialization and we still allow user 1291 * space to change the value during runtime without sanitizing it again. IGT 1292 * relies on being able to change i915.enable_fbc at runtime. 1293 */ 1294 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) 1295 { 1296 if (i915.enable_fbc >= 0) 1297 return !!i915.enable_fbc; 1298 1299 if (!HAS_FBC(dev_priv)) 1300 return 0; 1301 1302 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) 1303 return 1; 1304 1305 return 0; 1306 } 1307 1308 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) 1309 { 1310 #ifdef CONFIG_INTEL_IOMMU 1311 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ 1312 if (intel_iommu_gfx_mapped && 1313 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { 1314 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); 1315 return true; 1316 } 1317 #endif 1318 1319 return false; 1320 } 1321 1322 /** 1323 * intel_fbc_init - Initialize FBC 1324 * @dev_priv: the i915 device 1325 * 1326 * This function might be called during PM init process. 1327 */ 1328 void intel_fbc_init(struct drm_i915_private *dev_priv) 1329 { 1330 struct intel_fbc *fbc = &dev_priv->fbc; 1331 enum i915_pipe pipe; 1332 1333 INIT_WORK(&fbc->work.work, intel_fbc_work_fn); 1334 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); 1335 lockinit(&fbc->lock, "i915fl", 0, LK_CANRECURSE); 1336 fbc->enabled = false; 1337 fbc->active = false; 1338 fbc->work.scheduled = false; 1339 1340 if (need_fbc_vtd_wa(dev_priv)) 1341 mkwrite_device_info(dev_priv)->has_fbc = false; 1342 1343 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1344 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); 1345 1346 if (!HAS_FBC(dev_priv)) { 1347 fbc->no_fbc_reason = "unsupported by this chipset"; 1348 return; 1349 } 1350 1351 for_each_pipe(dev_priv, pipe) { 1352 fbc->possible_framebuffer_bits |= 1353 INTEL_FRONTBUFFER_PRIMARY(pipe); 1354 1355 if (fbc_on_pipe_a_only(dev_priv)) 1356 break; 1357 } 1358 1359 /* This value was pulled out of someone's hat */ 1360 if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) 1361 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); 1362 1363 /* We still don't have any sort of hardware state readout for FBC, so 1364 * deactivate it in case the BIOS activated it to make sure software 1365 * matches the hardware state. */ 1366 if (intel_fbc_hw_is_active(dev_priv)) 1367 intel_fbc_hw_deactivate(dev_priv); 1368 } 1369