1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: Frame Buffer Compression (FBC) 26 * 27 * FBC tries to save memory bandwidth (and so power consumption) by 28 * compressing the amount of memory used by the display. It is total 29 * transparent to user space and completely handled in the kernel. 30 * 31 * The benefits of FBC are mostly visible with solid backgrounds and 32 * variation-less patterns. It comes from keeping the memory footprint small 33 * and having fewer memory pages opened and accessed for refreshing the display. 34 * 35 * i915 is responsible to reserve stolen memory for FBC and configure its 36 * offset on proper registers. The hardware takes care of all 37 * compress/decompress. However there are many known cases where we have to 38 * forcibly disable it to allow proper screen updates. 39 */ 40 41 #include "intel_drv.h" 42 #include "i915_drv.h" 43 44 static inline bool fbc_supported(struct drm_i915_private *dev_priv) 45 { 46 return HAS_FBC(dev_priv); 47 } 48 49 static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) 50 { 51 return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8; 52 } 53 54 static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) 55 { 56 return INTEL_INFO(dev_priv)->gen < 4; 57 } 58 59 static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) 60 { 61 return INTEL_INFO(dev_priv)->gen <= 3; 62 } 63 64 /* 65 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the 66 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's 67 * origin so the x and y offsets can actually fit the registers. As a 68 * consequence, the fence doesn't really start exactly at the display plane 69 * address we program because it starts at the real start of the buffer, so we 70 * have to take this into consideration here. 71 */ 72 static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) 73 { 74 return crtc->base.y - crtc->adjusted_y; 75 } 76 77 /* 78 * For SKL+, the plane source size used by the hardware is based on the value we 79 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value 80 * we wrote to PIPESRC. 81 */ 82 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, 83 int *width, int *height) 84 { 85 int w, h; 86 87 if (intel_rotation_90_or_270(cache->plane.rotation)) { 88 w = cache->plane.src_h; 89 h = cache->plane.src_w; 90 } else { 91 w = cache->plane.src_w; 92 h = cache->plane.src_h; 93 } 94 95 if (width) 96 *width = w; 97 if (height) 98 *height = h; 99 } 100 101 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, 102 struct intel_fbc_state_cache *cache) 103 { 104 int lines; 105 106 intel_fbc_get_plane_source_size(cache, NULL, &lines); 107 if (INTEL_INFO(dev_priv)->gen >= 7) 108 lines = min(lines, 2048); 109 110 /* Hardware needs the full buffer stride, not just the active area. */ 111 return lines * cache->fb.stride; 112 } 113 114 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) 115 { 116 u32 fbc_ctl; 117 118 /* Disable compression */ 119 fbc_ctl = I915_READ(FBC_CONTROL); 120 if ((fbc_ctl & FBC_CTL_EN) == 0) 121 return; 122 123 fbc_ctl &= ~FBC_CTL_EN; 124 I915_WRITE(FBC_CONTROL, fbc_ctl); 125 126 /* Wait for compressing bit to clear */ 127 if (intel_wait_for_register(dev_priv, 128 FBC_STATUS, FBC_STAT_COMPRESSING, 0, 129 10)) { 130 DRM_DEBUG_KMS("FBC idle timed out\n"); 131 return; 132 } 133 } 134 135 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) 136 { 137 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 138 int cfb_pitch; 139 int i; 140 u32 fbc_ctl; 141 142 /* Note: fbc.threshold == 1 for i8xx */ 143 cfb_pitch = params->cfb_size / FBC_LL_SIZE; 144 if (params->fb.stride < cfb_pitch) 145 cfb_pitch = params->fb.stride; 146 147 /* FBC_CTL wants 32B or 64B units */ 148 if (IS_GEN2(dev_priv)) 149 cfb_pitch = (cfb_pitch / 32) - 1; 150 else 151 cfb_pitch = (cfb_pitch / 64) - 1; 152 153 /* Clear old tags */ 154 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 155 I915_WRITE(FBC_TAG(i), 0); 156 157 if (IS_GEN4(dev_priv)) { 158 u32 fbc_ctl2; 159 160 /* Set it up... */ 161 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 162 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane); 163 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 164 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); 165 } 166 167 /* enable it... */ 168 fbc_ctl = I915_READ(FBC_CONTROL); 169 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; 170 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; 171 if (IS_I945GM(dev_priv)) 172 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 173 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 174 fbc_ctl |= params->fb.fence_reg; 175 I915_WRITE(FBC_CONTROL, fbc_ctl); 176 } 177 178 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) 179 { 180 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 181 } 182 183 static void g4x_fbc_activate(struct drm_i915_private *dev_priv) 184 { 185 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 186 u32 dpfc_ctl; 187 188 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; 189 if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) 190 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 191 else 192 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 193 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; 194 195 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 196 197 /* enable it... */ 198 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 199 } 200 201 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) 202 { 203 u32 dpfc_ctl; 204 205 /* Disable compression */ 206 dpfc_ctl = I915_READ(DPFC_CONTROL); 207 if (dpfc_ctl & DPFC_CTL_EN) { 208 dpfc_ctl &= ~DPFC_CTL_EN; 209 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 210 } 211 } 212 213 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) 214 { 215 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 216 } 217 218 /* This function forces a CFB recompression through the nuke operation. */ 219 static void intel_fbc_recompress(struct drm_i915_private *dev_priv) 220 { 221 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); 222 POSTING_READ(MSG_FBC_REND_STATE); 223 } 224 225 static void ilk_fbc_activate(struct drm_i915_private *dev_priv) 226 { 227 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 228 u32 dpfc_ctl; 229 int threshold = dev_priv->fbc.threshold; 230 231 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); 232 if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) 233 threshold++; 234 235 switch (threshold) { 236 case 4: 237 case 3: 238 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 239 break; 240 case 2: 241 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 242 break; 243 case 1: 244 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 245 break; 246 } 247 dpfc_ctl |= DPFC_CTL_FENCE_EN; 248 if (IS_GEN5(dev_priv)) 249 dpfc_ctl |= params->fb.fence_reg; 250 251 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 252 I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); 253 /* enable it... */ 254 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 255 256 if (IS_GEN6(dev_priv)) { 257 I915_WRITE(SNB_DPFC_CTL_SA, 258 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 259 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); 260 } 261 262 intel_fbc_recompress(dev_priv); 263 } 264 265 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) 266 { 267 u32 dpfc_ctl; 268 269 /* Disable compression */ 270 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 271 if (dpfc_ctl & DPFC_CTL_EN) { 272 dpfc_ctl &= ~DPFC_CTL_EN; 273 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 274 } 275 } 276 277 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) 278 { 279 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 280 } 281 282 static void gen7_fbc_activate(struct drm_i915_private *dev_priv) 283 { 284 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 285 u32 dpfc_ctl; 286 int threshold = dev_priv->fbc.threshold; 287 288 dpfc_ctl = 0; 289 if (IS_IVYBRIDGE(dev_priv)) 290 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); 291 292 if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) 293 threshold++; 294 295 switch (threshold) { 296 case 4: 297 case 3: 298 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 299 break; 300 case 2: 301 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 302 break; 303 case 1: 304 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 305 break; 306 } 307 308 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 309 310 if (dev_priv->fbc.false_color) 311 dpfc_ctl |= FBC_CTL_FALSE_COLOR; 312 313 if (IS_IVYBRIDGE(dev_priv)) { 314 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 315 I915_WRITE(ILK_DISPLAY_CHICKEN1, 316 I915_READ(ILK_DISPLAY_CHICKEN1) | 317 ILK_FBCQ_DIS); 318 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 319 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 320 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), 321 I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | 322 HSW_FBCQ_DIS); 323 } 324 325 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 326 327 I915_WRITE(SNB_DPFC_CTL_SA, 328 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 329 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); 330 331 intel_fbc_recompress(dev_priv); 332 } 333 334 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) 335 { 336 if (INTEL_INFO(dev_priv)->gen >= 5) 337 return ilk_fbc_is_active(dev_priv); 338 else if (IS_GM45(dev_priv)) 339 return g4x_fbc_is_active(dev_priv); 340 else 341 return i8xx_fbc_is_active(dev_priv); 342 } 343 344 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) 345 { 346 struct intel_fbc *fbc = &dev_priv->fbc; 347 348 fbc->active = true; 349 350 if (INTEL_INFO(dev_priv)->gen >= 7) 351 gen7_fbc_activate(dev_priv); 352 else if (INTEL_INFO(dev_priv)->gen >= 5) 353 ilk_fbc_activate(dev_priv); 354 else if (IS_GM45(dev_priv)) 355 g4x_fbc_activate(dev_priv); 356 else 357 i8xx_fbc_activate(dev_priv); 358 } 359 360 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) 361 { 362 struct intel_fbc *fbc = &dev_priv->fbc; 363 364 fbc->active = false; 365 366 if (INTEL_INFO(dev_priv)->gen >= 5) 367 ilk_fbc_deactivate(dev_priv); 368 else if (IS_GM45(dev_priv)) 369 g4x_fbc_deactivate(dev_priv); 370 else 371 i8xx_fbc_deactivate(dev_priv); 372 } 373 374 /** 375 * intel_fbc_is_active - Is FBC active? 376 * @dev_priv: i915 device instance 377 * 378 * This function is used to verify the current state of FBC. 379 * 380 * FIXME: This should be tracked in the plane config eventually 381 * instead of queried at runtime for most callers. 382 */ 383 bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 384 { 385 return dev_priv->fbc.active; 386 } 387 388 static void intel_fbc_work_fn(struct work_struct *__work) 389 { 390 struct drm_i915_private *dev_priv = 391 container_of(__work, struct drm_i915_private, fbc.work.work); 392 struct intel_fbc *fbc = &dev_priv->fbc; 393 struct intel_fbc_work *work = &fbc->work; 394 struct intel_crtc *crtc = fbc->crtc; 395 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; 396 397 if (drm_crtc_vblank_get(&crtc->base)) { 398 DRM_ERROR("vblank not available for FBC on pipe %c\n", 399 pipe_name(crtc->pipe)); 400 401 mutex_lock(&fbc->lock); 402 work->scheduled = false; 403 mutex_unlock(&fbc->lock); 404 return; 405 } 406 407 retry: 408 /* Delay the actual enabling to let pageflipping cease and the 409 * display to settle before starting the compression. Note that 410 * this delay also serves a second purpose: it allows for a 411 * vblank to pass after disabling the FBC before we attempt 412 * to modify the control registers. 413 * 414 * WaFbcWaitForVBlankBeforeEnable:ilk,snb 415 * 416 * It is also worth mentioning that since work->scheduled_vblank can be 417 * updated multiple times by the other threads, hitting the timeout is 418 * not an error condition. We'll just end up hitting the "goto retry" 419 * case below. 420 */ 421 wait_event_timeout(vblank->queue, 422 drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, 423 msecs_to_jiffies(50)); 424 425 mutex_lock(&fbc->lock); 426 427 /* Were we cancelled? */ 428 if (!work->scheduled) 429 goto out; 430 431 /* Were we delayed again while this function was sleeping? */ 432 if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { 433 mutex_unlock(&fbc->lock); 434 goto retry; 435 } 436 437 intel_fbc_hw_activate(dev_priv); 438 439 work->scheduled = false; 440 441 out: 442 mutex_unlock(&fbc->lock); 443 drm_crtc_vblank_put(&crtc->base); 444 } 445 446 static void intel_fbc_schedule_activation(struct intel_crtc *crtc) 447 { 448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 449 struct intel_fbc *fbc = &dev_priv->fbc; 450 struct intel_fbc_work *work = &fbc->work; 451 452 WARN_ON(!mutex_is_locked(&fbc->lock)); 453 454 if (drm_crtc_vblank_get(&crtc->base)) { 455 DRM_ERROR("vblank not available for FBC on pipe %c\n", 456 pipe_name(crtc->pipe)); 457 return; 458 } 459 460 /* It is useless to call intel_fbc_cancel_work() or cancel_work() in 461 * this function since we're not releasing fbc.lock, so it won't have an 462 * opportunity to grab it to discover that it was cancelled. So we just 463 * update the expected jiffy count. */ 464 work->scheduled = true; 465 work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); 466 drm_crtc_vblank_put(&crtc->base); 467 468 schedule_work(&work->work); 469 } 470 471 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) 472 { 473 struct intel_fbc *fbc = &dev_priv->fbc; 474 475 WARN_ON(!mutex_is_locked(&fbc->lock)); 476 477 /* Calling cancel_work() here won't help due to the fact that the work 478 * function grabs fbc->lock. Just set scheduled to false so the work 479 * function can know it was cancelled. */ 480 fbc->work.scheduled = false; 481 482 if (fbc->active) 483 intel_fbc_hw_deactivate(dev_priv); 484 } 485 486 static bool multiple_pipes_ok(struct intel_crtc *crtc, 487 struct intel_plane_state *plane_state) 488 { 489 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 490 struct intel_fbc *fbc = &dev_priv->fbc; 491 enum i915_pipe pipe = crtc->pipe; 492 493 /* Don't even bother tracking anything we don't need. */ 494 if (!no_fbc_on_multiple_pipes(dev_priv)) 495 return true; 496 497 if (plane_state->visible) 498 fbc->visible_pipes_mask |= (1 << pipe); 499 else 500 fbc->visible_pipes_mask &= ~(1 << pipe); 501 502 return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0; 503 } 504 505 static int find_compression_threshold(struct drm_i915_private *dev_priv, 506 struct drm_mm_node *node, 507 int size, 508 int fb_cpp) 509 { 510 struct i915_ggtt *ggtt = &dev_priv->ggtt; 511 int compression_threshold = 1; 512 int ret; 513 u64 end; 514 515 /* The FBC hardware for BDW/SKL doesn't have access to the stolen 516 * reserved range size, so it always assumes the maximum (8mb) is used. 517 * If we enable FBC using a CFB on that memory range we'll get FIFO 518 * underruns, even if that range is not reserved by the BIOS. */ 519 if (IS_BROADWELL(dev_priv) || 520 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 521 end = ggtt->stolen_size - 8 * 1024 * 1024; 522 else 523 end = ggtt->stolen_usable_size; 524 525 /* HACK: This code depends on what we will do in *_enable_fbc. If that 526 * code changes, this code needs to change as well. 527 * 528 * The enable_fbc code will attempt to use one of our 2 compression 529 * thresholds, therefore, in that case, we only have 1 resort. 530 */ 531 532 /* Try to over-allocate to reduce reallocations and fragmentation. */ 533 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, 534 4096, 0, end); 535 if (ret == 0) 536 return compression_threshold; 537 538 again: 539 /* HW's ability to limit the CFB is 1:4 */ 540 if (compression_threshold > 4 || 541 (fb_cpp == 2 && compression_threshold == 2)) 542 return 0; 543 544 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, 545 4096, 0, end); 546 if (ret && INTEL_INFO(dev_priv)->gen <= 4) { 547 return 0; 548 } else if (ret) { 549 compression_threshold <<= 1; 550 goto again; 551 } else { 552 return compression_threshold; 553 } 554 } 555 556 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) 557 { 558 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 559 struct intel_fbc *fbc = &dev_priv->fbc; 560 struct drm_mm_node *compressed_llb = NULL; 561 int size, fb_cpp, ret; 562 563 WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); 564 565 size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); 566 fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0); 567 568 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, 569 size, fb_cpp); 570 if (!ret) 571 goto err_llb; 572 else if (ret > 1) { 573 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 574 575 } 576 577 fbc->threshold = ret; 578 579 if (INTEL_INFO(dev_priv)->gen >= 5) 580 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); 581 else if (IS_GM45(dev_priv)) { 582 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); 583 } else { 584 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); 585 if (!compressed_llb) 586 goto err_fb; 587 588 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, 589 4096, 4096); 590 if (ret) 591 goto err_fb; 592 593 fbc->compressed_llb = compressed_llb; 594 595 I915_WRITE(FBC_CFB_BASE, 596 dev_priv->mm.stolen_base + fbc->compressed_fb.start); 597 I915_WRITE(FBC_LL_BASE, 598 dev_priv->mm.stolen_base + compressed_llb->start); 599 } 600 601 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", 602 fbc->compressed_fb.size, fbc->threshold); 603 604 return 0; 605 606 err_fb: 607 kfree(compressed_llb); 608 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 609 err_llb: 610 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 611 return -ENOSPC; 612 } 613 614 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 615 { 616 struct intel_fbc *fbc = &dev_priv->fbc; 617 618 if (drm_mm_node_allocated(&fbc->compressed_fb)) 619 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 620 621 if (fbc->compressed_llb) { 622 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); 623 kfree(fbc->compressed_llb); 624 } 625 } 626 627 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 628 { 629 struct intel_fbc *fbc = &dev_priv->fbc; 630 631 if (!fbc_supported(dev_priv)) 632 return; 633 634 mutex_lock(&fbc->lock); 635 __intel_fbc_cleanup_cfb(dev_priv); 636 mutex_unlock(&fbc->lock); 637 } 638 639 static bool stride_is_valid(struct drm_i915_private *dev_priv, 640 unsigned int stride) 641 { 642 /* These should have been caught earlier. */ 643 WARN_ON(stride < 512); 644 WARN_ON((stride & (64 - 1)) != 0); 645 646 /* Below are the additional FBC restrictions. */ 647 648 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) 649 return stride == 4096 || stride == 8192; 650 651 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) 652 return false; 653 654 if (stride > 16384) 655 return false; 656 657 return true; 658 } 659 660 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, 661 uint32_t pixel_format) 662 { 663 switch (pixel_format) { 664 case DRM_FORMAT_XRGB8888: 665 case DRM_FORMAT_XBGR8888: 666 return true; 667 case DRM_FORMAT_XRGB1555: 668 case DRM_FORMAT_RGB565: 669 /* 16bpp not supported on gen2 */ 670 if (IS_GEN2(dev_priv)) 671 return false; 672 /* WaFbcOnly1to1Ratio:ctg */ 673 if (IS_G4X(dev_priv)) 674 return false; 675 return true; 676 default: 677 return false; 678 } 679 } 680 681 /* 682 * For some reason, the hardware tracking starts looking at whatever we 683 * programmed as the display plane base address register. It does not look at 684 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} 685 * variables instead of just looking at the pipe/plane size. 686 */ 687 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 688 { 689 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 690 struct intel_fbc *fbc = &dev_priv->fbc; 691 unsigned int effective_w, effective_h, max_w, max_h; 692 693 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { 694 max_w = 4096; 695 max_h = 4096; 696 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 697 max_w = 4096; 698 max_h = 2048; 699 } else { 700 max_w = 2048; 701 max_h = 1536; 702 } 703 704 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, 705 &effective_h); 706 effective_w += crtc->adjusted_x; 707 effective_h += crtc->adjusted_y; 708 709 return effective_w <= max_w && effective_h <= max_h; 710 } 711 712 static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 713 struct intel_crtc_state *crtc_state, 714 struct intel_plane_state *plane_state) 715 { 716 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 717 struct intel_fbc *fbc = &dev_priv->fbc; 718 struct intel_fbc_state_cache *cache = &fbc->state_cache; 719 struct drm_framebuffer *fb = plane_state->base.fb; 720 struct drm_i915_gem_object *obj; 721 722 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 723 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 724 cache->crtc.hsw_bdw_pixel_rate = 725 ilk_pipe_pixel_rate(crtc_state); 726 727 cache->plane.rotation = plane_state->base.rotation; 728 cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16; 729 cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16; 730 cache->plane.visible = plane_state->visible; 731 732 if (!cache->plane.visible) 733 return; 734 735 obj = intel_fb_obj(fb); 736 737 /* FIXME: We lack the proper locking here, so only run this on the 738 * platforms that need. */ 739 if (IS_GEN(dev_priv, 5, 6)) 740 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); 741 cache->fb.pixel_format = fb->pixel_format; 742 cache->fb.stride = fb->pitches[0]; 743 cache->fb.fence_reg = obj->fence_reg; 744 cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); 745 } 746 747 static bool intel_fbc_can_activate(struct intel_crtc *crtc) 748 { 749 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 750 struct intel_fbc *fbc = &dev_priv->fbc; 751 struct intel_fbc_state_cache *cache = &fbc->state_cache; 752 753 if (!cache->plane.visible) { 754 fbc->no_fbc_reason = "primary plane not visible"; 755 return false; 756 } 757 758 if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) || 759 (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) { 760 fbc->no_fbc_reason = "incompatible mode"; 761 return false; 762 } 763 764 if (!intel_fbc_hw_tracking_covers_screen(crtc)) { 765 fbc->no_fbc_reason = "mode too large for compression"; 766 return false; 767 } 768 769 /* The use of a CPU fence is mandatory in order to detect writes 770 * by the CPU to the scanout and trigger updates to the FBC. 771 */ 772 if (cache->fb.tiling_mode != I915_TILING_X || 773 cache->fb.fence_reg == I915_FENCE_REG_NONE) { 774 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 775 return false; 776 } 777 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && 778 cache->plane.rotation != DRM_ROTATE_0) { 779 fbc->no_fbc_reason = "rotation unsupported"; 780 return false; 781 } 782 783 if (!stride_is_valid(dev_priv, cache->fb.stride)) { 784 fbc->no_fbc_reason = "framebuffer stride not supported"; 785 return false; 786 } 787 788 if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) { 789 fbc->no_fbc_reason = "pixel format is invalid"; 790 return false; 791 } 792 793 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 794 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 795 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) { 796 fbc->no_fbc_reason = "pixel rate is too big"; 797 return false; 798 } 799 800 /* It is possible for the required CFB size change without a 801 * crtc->disable + crtc->enable since it is possible to change the 802 * stride without triggering a full modeset. Since we try to 803 * over-allocate the CFB, there's a chance we may keep FBC enabled even 804 * if this happens, but if we exceed the current CFB size we'll have to 805 * disable FBC. Notice that it would be possible to disable FBC, wait 806 * for a frame, free the stolen node, then try to reenable FBC in case 807 * we didn't get any invalidate/deactivate calls, but this would require 808 * a lot of tracking just for a specific case. If we conclude it's an 809 * important case, we can implement it later. */ 810 if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > 811 fbc->compressed_fb.size * fbc->threshold) { 812 fbc->no_fbc_reason = "CFB requirements changed"; 813 return false; 814 } 815 816 return true; 817 } 818 819 static bool intel_fbc_can_choose(struct intel_crtc *crtc) 820 { 821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 822 struct intel_fbc *fbc = &dev_priv->fbc; 823 824 if (intel_vgpu_active(dev_priv)) { 825 fbc->no_fbc_reason = "VGPU is active"; 826 return false; 827 } 828 829 if (!i915.enable_fbc) { 830 fbc->no_fbc_reason = "disabled per module param or by default"; 831 return false; 832 } 833 834 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) { 835 fbc->no_fbc_reason = "no enabled pipes can have FBC"; 836 return false; 837 } 838 839 if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) { 840 fbc->no_fbc_reason = "no enabled planes can have FBC"; 841 return false; 842 } 843 844 return true; 845 } 846 847 static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 848 struct intel_fbc_reg_params *params) 849 { 850 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 851 struct intel_fbc *fbc = &dev_priv->fbc; 852 struct intel_fbc_state_cache *cache = &fbc->state_cache; 853 854 /* Since all our fields are integer types, use memset here so the 855 * comparison function can rely on memcmp because the padding will be 856 * zero. */ 857 memset(params, 0, sizeof(*params)); 858 859 params->crtc.pipe = crtc->pipe; 860 params->crtc.plane = crtc->plane; 861 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); 862 863 params->fb.pixel_format = cache->fb.pixel_format; 864 params->fb.stride = cache->fb.stride; 865 params->fb.fence_reg = cache->fb.fence_reg; 866 867 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 868 869 params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset; 870 } 871 872 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, 873 struct intel_fbc_reg_params *params2) 874 { 875 /* We can use this since intel_fbc_get_reg_params() does a memset. */ 876 return memcmp(params1, params2, sizeof(*params1)) == 0; 877 } 878 879 void intel_fbc_pre_update(struct intel_crtc *crtc, 880 struct intel_crtc_state *crtc_state, 881 struct intel_plane_state *plane_state) 882 { 883 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 884 struct intel_fbc *fbc = &dev_priv->fbc; 885 886 if (!fbc_supported(dev_priv)) 887 return; 888 889 mutex_lock(&fbc->lock); 890 891 if (!multiple_pipes_ok(crtc, plane_state)) { 892 fbc->no_fbc_reason = "more than one pipe active"; 893 goto deactivate; 894 } 895 896 if (!fbc->enabled || fbc->crtc != crtc) 897 goto unlock; 898 899 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 900 901 deactivate: 902 intel_fbc_deactivate(dev_priv); 903 unlock: 904 mutex_unlock(&fbc->lock); 905 } 906 907 static void __intel_fbc_post_update(struct intel_crtc *crtc) 908 { 909 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 910 struct intel_fbc *fbc = &dev_priv->fbc; 911 struct intel_fbc_reg_params old_params; 912 913 WARN_ON(!mutex_is_locked(&fbc->lock)); 914 915 if (!fbc->enabled || fbc->crtc != crtc) 916 return; 917 918 if (!intel_fbc_can_activate(crtc)) { 919 WARN_ON(fbc->active); 920 return; 921 } 922 923 old_params = fbc->params; 924 intel_fbc_get_reg_params(crtc, &fbc->params); 925 926 /* If the scanout has not changed, don't modify the FBC settings. 927 * Note that we make the fundamental assumption that the fb->obj 928 * cannot be unpinned (and have its GTT offset and fence revoked) 929 * without first being decoupled from the scanout and FBC disabled. 930 */ 931 if (fbc->active && 932 intel_fbc_reg_params_equal(&old_params, &fbc->params)) 933 return; 934 935 intel_fbc_deactivate(dev_priv); 936 intel_fbc_schedule_activation(crtc); 937 fbc->no_fbc_reason = "FBC enabled (active or scheduled)"; 938 } 939 940 void intel_fbc_post_update(struct intel_crtc *crtc) 941 { 942 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 943 struct intel_fbc *fbc = &dev_priv->fbc; 944 945 if (!fbc_supported(dev_priv)) 946 return; 947 948 mutex_lock(&fbc->lock); 949 __intel_fbc_post_update(crtc); 950 mutex_unlock(&fbc->lock); 951 } 952 953 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) 954 { 955 if (fbc->enabled) 956 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; 957 else 958 return fbc->possible_framebuffer_bits; 959 } 960 961 void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 962 unsigned int frontbuffer_bits, 963 enum fb_op_origin origin) 964 { 965 struct intel_fbc *fbc = &dev_priv->fbc; 966 967 if (!fbc_supported(dev_priv)) 968 return; 969 970 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) 971 return; 972 973 mutex_lock(&fbc->lock); 974 975 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; 976 977 if (fbc->enabled && fbc->busy_bits) 978 intel_fbc_deactivate(dev_priv); 979 980 mutex_unlock(&fbc->lock); 981 } 982 983 void intel_fbc_flush(struct drm_i915_private *dev_priv, 984 unsigned int frontbuffer_bits, enum fb_op_origin origin) 985 { 986 struct intel_fbc *fbc = &dev_priv->fbc; 987 988 if (!fbc_supported(dev_priv)) 989 return; 990 991 mutex_lock(&fbc->lock); 992 993 fbc->busy_bits &= ~frontbuffer_bits; 994 995 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) 996 goto out; 997 998 if (!fbc->busy_bits && fbc->enabled && 999 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { 1000 if (fbc->active) 1001 intel_fbc_recompress(dev_priv); 1002 else 1003 __intel_fbc_post_update(fbc->crtc); 1004 } 1005 1006 out: 1007 mutex_unlock(&fbc->lock); 1008 } 1009 1010 /** 1011 * intel_fbc_choose_crtc - select a CRTC to enable FBC on 1012 * @dev_priv: i915 device instance 1013 * @state: the atomic state structure 1014 * 1015 * This function looks at the proposed state for CRTCs and planes, then chooses 1016 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to 1017 * true. 1018 * 1019 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe 1020 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. 1021 */ 1022 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1023 struct drm_atomic_state *state) 1024 { 1025 struct intel_fbc *fbc = &dev_priv->fbc; 1026 struct drm_crtc *crtc; 1027 struct drm_crtc_state *crtc_state; 1028 struct drm_plane *plane; 1029 struct drm_plane_state *plane_state; 1030 bool fbc_crtc_present = false; 1031 int i, j; 1032 1033 mutex_lock(&fbc->lock); 1034 1035 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1036 if (fbc->crtc == to_intel_crtc(crtc)) { 1037 fbc_crtc_present = true; 1038 break; 1039 } 1040 } 1041 /* This atomic commit doesn't involve the CRTC currently tied to FBC. */ 1042 if (!fbc_crtc_present && fbc->crtc != NULL) 1043 goto out; 1044 1045 /* Simply choose the first CRTC that is compatible and has a visible 1046 * plane. We could go for fancier schemes such as checking the plane 1047 * size, but this would just affect the few platforms that don't tie FBC 1048 * to pipe or plane A. */ 1049 for_each_plane_in_state(state, plane, plane_state, i) { 1050 struct intel_plane_state *intel_plane_state = 1051 to_intel_plane_state(plane_state); 1052 1053 if (!intel_plane_state->visible) 1054 continue; 1055 1056 for_each_crtc_in_state(state, crtc, crtc_state, j) { 1057 struct intel_crtc_state *intel_crtc_state = 1058 to_intel_crtc_state(crtc_state); 1059 1060 if (plane_state->crtc != crtc) 1061 continue; 1062 1063 if (!intel_fbc_can_choose(to_intel_crtc(crtc))) 1064 break; 1065 1066 intel_crtc_state->enable_fbc = true; 1067 goto out; 1068 } 1069 } 1070 1071 out: 1072 mutex_unlock(&fbc->lock); 1073 } 1074 1075 /** 1076 * intel_fbc_enable: tries to enable FBC on the CRTC 1077 * @crtc: the CRTC 1078 * @crtc_state: corresponding &drm_crtc_state for @crtc 1079 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc 1080 * 1081 * This function checks if the given CRTC was chosen for FBC, then enables it if 1082 * possible. Notice that it doesn't activate FBC. It is valid to call 1083 * intel_fbc_enable multiple times for the same pipe without an 1084 * intel_fbc_disable in the middle, as long as it is deactivated. 1085 */ 1086 void intel_fbc_enable(struct intel_crtc *crtc, 1087 struct intel_crtc_state *crtc_state, 1088 struct intel_plane_state *plane_state) 1089 { 1090 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1091 struct intel_fbc *fbc = &dev_priv->fbc; 1092 1093 if (!fbc_supported(dev_priv)) 1094 return; 1095 1096 mutex_lock(&fbc->lock); 1097 1098 if (fbc->enabled) { 1099 WARN_ON(fbc->crtc == NULL); 1100 if (fbc->crtc == crtc) { 1101 WARN_ON(!crtc_state->enable_fbc); 1102 WARN_ON(fbc->active); 1103 } 1104 goto out; 1105 } 1106 1107 if (!crtc_state->enable_fbc) 1108 goto out; 1109 1110 WARN_ON(fbc->active); 1111 WARN_ON(fbc->crtc != NULL); 1112 1113 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 1114 if (intel_fbc_alloc_cfb(crtc)) { 1115 fbc->no_fbc_reason = "not enough stolen memory"; 1116 goto out; 1117 } 1118 1119 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 1120 fbc->no_fbc_reason = "FBC enabled but not active yet\n"; 1121 1122 fbc->enabled = true; 1123 fbc->crtc = crtc; 1124 out: 1125 mutex_unlock(&fbc->lock); 1126 } 1127 1128 /** 1129 * __intel_fbc_disable - disable FBC 1130 * @dev_priv: i915 device instance 1131 * 1132 * This is the low level function that actually disables FBC. Callers should 1133 * grab the FBC lock. 1134 */ 1135 static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 1136 { 1137 struct intel_fbc *fbc = &dev_priv->fbc; 1138 struct intel_crtc *crtc = fbc->crtc; 1139 1140 WARN_ON(!mutex_is_locked(&fbc->lock)); 1141 WARN_ON(!fbc->enabled); 1142 WARN_ON(fbc->active); 1143 WARN_ON(crtc->active); 1144 1145 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 1146 1147 __intel_fbc_cleanup_cfb(dev_priv); 1148 1149 fbc->enabled = false; 1150 fbc->crtc = NULL; 1151 } 1152 1153 /** 1154 * intel_fbc_disable - disable FBC if it's associated with crtc 1155 * @crtc: the CRTC 1156 * 1157 * This function disables FBC if it's associated with the provided CRTC. 1158 */ 1159 void intel_fbc_disable(struct intel_crtc *crtc) 1160 { 1161 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1162 struct intel_fbc *fbc = &dev_priv->fbc; 1163 1164 if (!fbc_supported(dev_priv)) 1165 return; 1166 1167 mutex_lock(&fbc->lock); 1168 if (fbc->crtc == crtc) 1169 __intel_fbc_disable(dev_priv); 1170 mutex_unlock(&fbc->lock); 1171 1172 cancel_work_sync(&fbc->work.work); 1173 } 1174 1175 /** 1176 * intel_fbc_global_disable - globally disable FBC 1177 * @dev_priv: i915 device instance 1178 * 1179 * This function disables FBC regardless of which CRTC is associated with it. 1180 */ 1181 void intel_fbc_global_disable(struct drm_i915_private *dev_priv) 1182 { 1183 struct intel_fbc *fbc = &dev_priv->fbc; 1184 1185 if (!fbc_supported(dev_priv)) 1186 return; 1187 1188 mutex_lock(&fbc->lock); 1189 if (fbc->enabled) 1190 __intel_fbc_disable(dev_priv); 1191 mutex_unlock(&fbc->lock); 1192 1193 cancel_work_sync(&fbc->work.work); 1194 } 1195 1196 /** 1197 * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking 1198 * @dev_priv: i915 device instance 1199 * 1200 * The FBC code needs to track CRTC visibility since the older platforms can't 1201 * have FBC enabled while multiple pipes are used. This function does the 1202 * initial setup at driver load to make sure FBC is matching the real hardware. 1203 */ 1204 void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) 1205 { 1206 struct intel_crtc *crtc; 1207 1208 /* Don't even bother tracking anything if we don't need. */ 1209 if (!no_fbc_on_multiple_pipes(dev_priv)) 1210 return; 1211 1212 for_each_intel_crtc(&dev_priv->drm, crtc) 1213 if (intel_crtc_active(&crtc->base) && 1214 to_intel_plane_state(crtc->base.primary->state)->visible) 1215 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); 1216 } 1217 1218 /* 1219 * The DDX driver changes its behavior depending on the value it reads from 1220 * i915.enable_fbc, so sanitize it by translating the default value into either 1221 * 0 or 1 in order to allow it to know what's going on. 1222 * 1223 * Notice that this is done at driver initialization and we still allow user 1224 * space to change the value during runtime without sanitizing it again. IGT 1225 * relies on being able to change i915.enable_fbc at runtime. 1226 */ 1227 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) 1228 { 1229 if (i915.enable_fbc >= 0) 1230 return !!i915.enable_fbc; 1231 1232 if (!HAS_FBC(dev_priv)) 1233 return 0; 1234 1235 if (IS_BROADWELL(dev_priv)) 1236 return 1; 1237 1238 return 0; 1239 } 1240 1241 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) 1242 { 1243 #ifdef CONFIG_INTEL_IOMMU 1244 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ 1245 if (intel_iommu_gfx_mapped && 1246 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { 1247 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); 1248 return true; 1249 } 1250 #endif 1251 1252 return false; 1253 } 1254 1255 /** 1256 * intel_fbc_init - Initialize FBC 1257 * @dev_priv: the i915 device 1258 * 1259 * This function might be called during PM init process. 1260 */ 1261 void intel_fbc_init(struct drm_i915_private *dev_priv) 1262 { 1263 struct intel_fbc *fbc = &dev_priv->fbc; 1264 enum i915_pipe pipe; 1265 1266 INIT_WORK(&fbc->work.work, intel_fbc_work_fn); 1267 lockinit(&fbc->lock, "i915fl", 0, LK_CANRECURSE); 1268 fbc->enabled = false; 1269 fbc->active = false; 1270 fbc->work.scheduled = false; 1271 1272 if (need_fbc_vtd_wa(dev_priv)) 1273 mkwrite_device_info(dev_priv)->has_fbc = false; 1274 1275 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1276 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); 1277 1278 if (!HAS_FBC(dev_priv)) { 1279 fbc->no_fbc_reason = "unsupported by this chipset"; 1280 return; 1281 } 1282 1283 for_each_pipe(dev_priv, pipe) { 1284 fbc->possible_framebuffer_bits |= 1285 INTEL_FRONTBUFFER_PRIMARY(pipe); 1286 1287 if (fbc_on_pipe_a_only(dev_priv)) 1288 break; 1289 } 1290 1291 /* This value was pulled out of someone's hat */ 1292 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv)) 1293 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); 1294 1295 /* We still don't have any sort of hardware state readout for FBC, so 1296 * deactivate it in case the BIOS activated it to make sure software 1297 * matches the hardware state. */ 1298 if (intel_fbc_hw_is_active(dev_priv)) 1299 intel_fbc_hw_deactivate(dev_priv); 1300 } 1301