1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: Frame Buffer Compression (FBC) 26 * 27 * FBC tries to save memory bandwidth (and so power consumption) by 28 * compressing the amount of memory used by the display. It is total 29 * transparent to user space and completely handled in the kernel. 30 * 31 * The benefits of FBC are mostly visible with solid backgrounds and 32 * variation-less patterns. It comes from keeping the memory footprint small 33 * and having fewer memory pages opened and accessed for refreshing the display. 34 * 35 * i915 is responsible to reserve stolen memory for FBC and configure its 36 * offset on proper registers. The hardware takes care of all 37 * compress/decompress. However there are many known cases where we have to 38 * forcibly disable it to allow proper screen updates. 39 */ 40 41 #include "intel_drv.h" 42 #include "i915_drv.h" 43 44 static inline bool fbc_supported(struct drm_i915_private *dev_priv) 45 { 46 return dev_priv->fbc.activate != NULL; 47 } 48 49 static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) 50 { 51 return IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8; 52 } 53 54 static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) 55 { 56 return INTEL_INFO(dev_priv)->gen < 4; 57 } 58 59 /* 60 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the 61 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's 62 * origin so the x and y offsets can actually fit the registers. As a 63 * consequence, the fence doesn't really start exactly at the display plane 64 * address we program because it starts at the real start of the buffer, so we 65 * have to take this into consideration here. 66 */ 67 static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) 68 { 69 return crtc->base.y - crtc->adjusted_y; 70 } 71 72 /* 73 * For SKL+, the plane source size used by the hardware is based on the value we 74 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value 75 * we wrote to PIPESRC. 76 */ 77 static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, 78 int *width, int *height) 79 { 80 struct intel_plane_state *plane_state = 81 to_intel_plane_state(crtc->base.primary->state); 82 int w, h; 83 84 if (intel_rotation_90_or_270(plane_state->base.rotation)) { 85 w = drm_rect_height(&plane_state->src) >> 16; 86 h = drm_rect_width(&plane_state->src) >> 16; 87 } else { 88 w = drm_rect_width(&plane_state->src) >> 16; 89 h = drm_rect_height(&plane_state->src) >> 16; 90 } 91 92 if (width) 93 *width = w; 94 if (height) 95 *height = h; 96 } 97 98 static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc, 99 struct drm_framebuffer *fb) 100 { 101 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 102 int lines; 103 104 intel_fbc_get_plane_source_size(crtc, NULL, &lines); 105 if (INTEL_INFO(dev_priv)->gen >= 7) 106 lines = min(lines, 2048); 107 108 /* Hardware needs the full buffer stride, not just the active area. */ 109 return lines * fb->pitches[0]; 110 } 111 112 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) 113 { 114 u32 fbc_ctl; 115 116 dev_priv->fbc.active = false; 117 118 /* Disable compression */ 119 fbc_ctl = I915_READ(FBC_CONTROL); 120 if ((fbc_ctl & FBC_CTL_EN) == 0) 121 return; 122 123 fbc_ctl &= ~FBC_CTL_EN; 124 I915_WRITE(FBC_CONTROL, fbc_ctl); 125 126 /* Wait for compressing bit to clear */ 127 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 128 DRM_DEBUG_KMS("FBC idle timed out\n"); 129 return; 130 } 131 } 132 133 static void i8xx_fbc_activate(struct intel_crtc *crtc) 134 { 135 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 136 struct drm_framebuffer *fb = crtc->base.primary->fb; 137 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 138 int cfb_pitch; 139 int i; 140 u32 fbc_ctl; 141 142 dev_priv->fbc.active = true; 143 144 /* Note: fbc.threshold == 1 for i8xx */ 145 cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE; 146 if (fb->pitches[0] < cfb_pitch) 147 cfb_pitch = fb->pitches[0]; 148 149 /* FBC_CTL wants 32B or 64B units */ 150 if (IS_GEN2(dev_priv)) 151 cfb_pitch = (cfb_pitch / 32) - 1; 152 else 153 cfb_pitch = (cfb_pitch / 64) - 1; 154 155 /* Clear old tags */ 156 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 157 I915_WRITE(FBC_TAG(i), 0); 158 159 if (IS_GEN4(dev_priv)) { 160 u32 fbc_ctl2; 161 162 /* Set it up... */ 163 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 164 fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane); 165 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 166 I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc)); 167 } 168 169 /* enable it... */ 170 fbc_ctl = I915_READ(FBC_CONTROL); 171 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; 172 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; 173 if (IS_I945GM(dev_priv)) 174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 176 fbc_ctl |= obj->fence_reg; 177 I915_WRITE(FBC_CONTROL, fbc_ctl); 178 } 179 180 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) 181 { 182 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 183 } 184 185 static void g4x_fbc_activate(struct intel_crtc *crtc) 186 { 187 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 188 struct drm_framebuffer *fb = crtc->base.primary->fb; 189 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 190 u32 dpfc_ctl; 191 192 dev_priv->fbc.active = true; 193 194 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN; 195 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 196 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 197 else 198 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 199 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 200 201 I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc)); 202 203 /* enable it... */ 204 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 205 } 206 207 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) 208 { 209 u32 dpfc_ctl; 210 211 dev_priv->fbc.active = false; 212 213 /* Disable compression */ 214 dpfc_ctl = I915_READ(DPFC_CONTROL); 215 if (dpfc_ctl & DPFC_CTL_EN) { 216 dpfc_ctl &= ~DPFC_CTL_EN; 217 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 218 } 219 } 220 221 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) 222 { 223 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 224 } 225 226 /* This function forces a CFB recompression through the nuke operation. */ 227 static void intel_fbc_recompress(struct drm_i915_private *dev_priv) 228 { 229 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); 230 POSTING_READ(MSG_FBC_REND_STATE); 231 } 232 233 static void ilk_fbc_activate(struct intel_crtc *crtc) 234 { 235 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 236 struct drm_framebuffer *fb = crtc->base.primary->fb; 237 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 238 u32 dpfc_ctl; 239 int threshold = dev_priv->fbc.threshold; 240 unsigned int y_offset; 241 242 dev_priv->fbc.active = true; 243 244 dpfc_ctl = DPFC_CTL_PLANE(crtc->plane); 245 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 246 threshold++; 247 248 switch (threshold) { 249 case 4: 250 case 3: 251 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 252 break; 253 case 2: 254 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 255 break; 256 case 1: 257 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 258 break; 259 } 260 dpfc_ctl |= DPFC_CTL_FENCE_EN; 261 if (IS_GEN5(dev_priv)) 262 dpfc_ctl |= obj->fence_reg; 263 264 y_offset = get_crtc_fence_y_offset(crtc); 265 I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset); 266 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); 267 /* enable it... */ 268 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 269 270 if (IS_GEN6(dev_priv)) { 271 I915_WRITE(SNB_DPFC_CTL_SA, 272 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 273 I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset); 274 } 275 276 intel_fbc_recompress(dev_priv); 277 } 278 279 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) 280 { 281 u32 dpfc_ctl; 282 283 dev_priv->fbc.active = false; 284 285 /* Disable compression */ 286 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 287 if (dpfc_ctl & DPFC_CTL_EN) { 288 dpfc_ctl &= ~DPFC_CTL_EN; 289 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 290 } 291 } 292 293 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) 294 { 295 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 296 } 297 298 static void gen7_fbc_activate(struct intel_crtc *crtc) 299 { 300 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 301 struct drm_framebuffer *fb = crtc->base.primary->fb; 302 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 303 u32 dpfc_ctl; 304 int threshold = dev_priv->fbc.threshold; 305 306 dev_priv->fbc.active = true; 307 308 dpfc_ctl = 0; 309 if (IS_IVYBRIDGE(dev_priv)) 310 dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane); 311 312 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 313 threshold++; 314 315 switch (threshold) { 316 case 4: 317 case 3: 318 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 319 break; 320 case 2: 321 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 322 break; 323 case 1: 324 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 325 break; 326 } 327 328 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 329 330 if (dev_priv->fbc.false_color) 331 dpfc_ctl |= FBC_CTL_FALSE_COLOR; 332 333 if (IS_IVYBRIDGE(dev_priv)) { 334 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 335 I915_WRITE(ILK_DISPLAY_CHICKEN1, 336 I915_READ(ILK_DISPLAY_CHICKEN1) | 337 ILK_FBCQ_DIS); 338 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 339 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 340 I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe), 341 I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) | 342 HSW_FBCQ_DIS); 343 } 344 345 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 346 347 I915_WRITE(SNB_DPFC_CTL_SA, 348 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 349 I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc)); 350 351 intel_fbc_recompress(dev_priv); 352 } 353 354 /** 355 * intel_fbc_is_active - Is FBC active? 356 * @dev_priv: i915 device instance 357 * 358 * This function is used to verify the current state of FBC. 359 * FIXME: This should be tracked in the plane config eventually 360 * instead of queried at runtime for most callers. 361 */ 362 bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 363 { 364 return dev_priv->fbc.active; 365 } 366 367 static void intel_fbc_activate(const struct drm_framebuffer *fb) 368 { 369 struct drm_i915_private *dev_priv = fb->dev->dev_private; 370 struct intel_crtc *crtc = dev_priv->fbc.crtc; 371 372 dev_priv->fbc.activate(crtc); 373 374 dev_priv->fbc.fb_id = fb->base.id; 375 dev_priv->fbc.y = crtc->base.y; 376 } 377 378 static void intel_fbc_work_fn(struct work_struct *__work) 379 { 380 struct drm_i915_private *dev_priv = 381 container_of(__work, struct drm_i915_private, fbc.work.work); 382 struct intel_fbc_work *work = &dev_priv->fbc.work; 383 struct intel_crtc *crtc = dev_priv->fbc.crtc; 384 int delay_ms = 50; 385 386 retry: 387 /* Delay the actual enabling to let pageflipping cease and the 388 * display to settle before starting the compression. Note that 389 * this delay also serves a second purpose: it allows for a 390 * vblank to pass after disabling the FBC before we attempt 391 * to modify the control registers. 392 * 393 * A more complicated solution would involve tracking vblanks 394 * following the termination of the page-flipping sequence 395 * and indeed performing the enable as a co-routine and not 396 * waiting synchronously upon the vblank. 397 * 398 * WaFbcWaitForVBlankBeforeEnable:ilk,snb 399 */ 400 wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms); 401 402 mutex_lock(&dev_priv->fbc.lock); 403 404 /* Were we cancelled? */ 405 if (!work->scheduled) 406 goto out; 407 408 /* Were we delayed again while this function was sleeping? */ 409 if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms), 410 jiffies)) { 411 mutex_unlock(&dev_priv->fbc.lock); 412 goto retry; 413 } 414 415 if (crtc->base.primary->fb == work->fb) 416 intel_fbc_activate(work->fb); 417 418 work->scheduled = false; 419 420 out: 421 mutex_unlock(&dev_priv->fbc.lock); 422 } 423 424 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) 425 { 426 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 427 dev_priv->fbc.work.scheduled = false; 428 } 429 430 static void intel_fbc_schedule_activation(struct intel_crtc *crtc) 431 { 432 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 433 struct intel_fbc_work *work = &dev_priv->fbc.work; 434 435 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 436 437 /* It is useless to call intel_fbc_cancel_work() in this function since 438 * we're not releasing fbc.lock, so it won't have an opportunity to grab 439 * it to discover that it was cancelled. So we just update the expected 440 * jiffy count. */ 441 work->fb = crtc->base.primary->fb; 442 work->scheduled = true; 443 work->enable_jiffies = jiffies; 444 445 schedule_work(&work->work); 446 } 447 448 static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv) 449 { 450 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 451 452 intel_fbc_cancel_work(dev_priv); 453 454 if (dev_priv->fbc.active) 455 dev_priv->fbc.deactivate(dev_priv); 456 } 457 458 /* 459 * intel_fbc_deactivate - deactivate FBC if it's associated with crtc 460 * @crtc: the CRTC 461 * 462 * This function deactivates FBC if it's associated with the provided CRTC. 463 */ 464 void intel_fbc_deactivate(struct intel_crtc *crtc) 465 { 466 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 467 468 if (!fbc_supported(dev_priv)) 469 return; 470 471 mutex_lock(&dev_priv->fbc.lock); 472 if (dev_priv->fbc.crtc == crtc) 473 __intel_fbc_deactivate(dev_priv); 474 mutex_unlock(&dev_priv->fbc.lock); 475 } 476 477 static void set_no_fbc_reason(struct drm_i915_private *dev_priv, 478 const char *reason) 479 { 480 if (dev_priv->fbc.no_fbc_reason == reason) 481 return; 482 483 dev_priv->fbc.no_fbc_reason = reason; 484 DRM_DEBUG_KMS("Disabling FBC: %s\n", reason); 485 } 486 487 static bool crtc_can_fbc(struct intel_crtc *crtc) 488 { 489 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 490 491 if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) 492 return false; 493 494 if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) 495 return false; 496 497 return true; 498 } 499 500 static bool crtc_is_valid(struct intel_crtc *crtc) 501 { 502 if (!intel_crtc_active(&crtc->base)) 503 return false; 504 505 if (!to_intel_plane_state(crtc->base.primary->state)->visible) 506 return false; 507 508 return true; 509 } 510 511 static bool multiple_pipes_ok(struct drm_i915_private *dev_priv) 512 { 513 enum i915_pipe pipe; 514 int n_pipes = 0; 515 struct drm_crtc *crtc; 516 517 if (INTEL_INFO(dev_priv)->gen > 4) 518 return true; 519 520 for_each_pipe(dev_priv, pipe) { 521 crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 522 523 if (intel_crtc_active(crtc) && 524 to_intel_plane_state(crtc->primary->state)->visible) 525 n_pipes++; 526 } 527 528 return (n_pipes < 2); 529 } 530 531 static int find_compression_threshold(struct drm_i915_private *dev_priv, 532 struct drm_mm_node *node, 533 int size, 534 int fb_cpp) 535 { 536 int compression_threshold = 1; 537 int ret; 538 u64 end; 539 540 /* The FBC hardware for BDW/SKL doesn't have access to the stolen 541 * reserved range size, so it always assumes the maximum (8mb) is used. 542 * If we enable FBC using a CFB on that memory range we'll get FIFO 543 * underruns, even if that range is not reserved by the BIOS. */ 544 if (IS_BROADWELL(dev_priv) || 545 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 546 end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024; 547 else 548 end = dev_priv->gtt.stolen_usable_size; 549 550 /* HACK: This code depends on what we will do in *_enable_fbc. If that 551 * code changes, this code needs to change as well. 552 * 553 * The enable_fbc code will attempt to use one of our 2 compression 554 * thresholds, therefore, in that case, we only have 1 resort. 555 */ 556 557 /* Try to over-allocate to reduce reallocations and fragmentation. */ 558 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, 559 4096, 0, end); 560 if (ret == 0) 561 return compression_threshold; 562 563 again: 564 /* HW's ability to limit the CFB is 1:4 */ 565 if (compression_threshold > 4 || 566 (fb_cpp == 2 && compression_threshold == 2)) 567 return 0; 568 569 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, 570 4096, 0, end); 571 if (ret && INTEL_INFO(dev_priv)->gen <= 4) { 572 return 0; 573 } else if (ret) { 574 compression_threshold <<= 1; 575 goto again; 576 } else { 577 return compression_threshold; 578 } 579 } 580 581 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) 582 { 583 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 584 struct drm_framebuffer *fb = crtc->base.primary->state->fb; 585 struct drm_mm_node *compressed_llb = NULL; 586 int size, fb_cpp, ret; 587 588 WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)); 589 590 size = intel_fbc_calculate_cfb_size(crtc, fb); 591 fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0); 592 593 ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb, 594 size, fb_cpp); 595 if (!ret) 596 goto err_llb; 597 else if (ret > 1) { 598 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 599 600 } 601 602 dev_priv->fbc.threshold = ret; 603 604 if (INTEL_INFO(dev_priv)->gen >= 5) 605 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); 606 else if (IS_GM45(dev_priv)) { 607 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); 608 } else { 609 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); 610 if (!compressed_llb) 611 goto err_fb; 612 613 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, 614 4096, 4096); 615 if (ret) 616 goto err_fb; 617 618 dev_priv->fbc.compressed_llb = compressed_llb; 619 620 I915_WRITE(FBC_CFB_BASE, 621 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); 622 I915_WRITE(FBC_LL_BASE, 623 dev_priv->mm.stolen_base + compressed_llb->start); 624 } 625 626 DRM_DEBUG_KMS("reserved %lu bytes of contiguous stolen space for FBC, threshold: %d\n", 627 dev_priv->fbc.compressed_fb.size, 628 dev_priv->fbc.threshold); 629 630 return 0; 631 632 err_fb: 633 kfree(compressed_llb); 634 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb); 635 err_llb: 636 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 637 return -ENOSPC; 638 } 639 640 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 641 { 642 if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)) 643 i915_gem_stolen_remove_node(dev_priv, 644 &dev_priv->fbc.compressed_fb); 645 646 if (dev_priv->fbc.compressed_llb) { 647 i915_gem_stolen_remove_node(dev_priv, 648 dev_priv->fbc.compressed_llb); 649 kfree(dev_priv->fbc.compressed_llb); 650 } 651 } 652 653 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 654 { 655 if (!fbc_supported(dev_priv)) 656 return; 657 658 mutex_lock(&dev_priv->fbc.lock); 659 __intel_fbc_cleanup_cfb(dev_priv); 660 mutex_unlock(&dev_priv->fbc.lock); 661 } 662 663 static bool stride_is_valid(struct drm_i915_private *dev_priv, 664 unsigned int stride) 665 { 666 /* These should have been caught earlier. */ 667 WARN_ON(stride < 512); 668 WARN_ON((stride & (64 - 1)) != 0); 669 670 /* Below are the additional FBC restrictions. */ 671 672 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) 673 return stride == 4096 || stride == 8192; 674 675 if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) 676 return false; 677 678 if (stride > 16384) 679 return false; 680 681 return true; 682 } 683 684 static bool pixel_format_is_valid(struct drm_framebuffer *fb) 685 { 686 struct drm_device *dev = fb->dev; 687 struct drm_i915_private *dev_priv = dev->dev_private; 688 689 switch (fb->pixel_format) { 690 case DRM_FORMAT_XRGB8888: 691 case DRM_FORMAT_XBGR8888: 692 return true; 693 case DRM_FORMAT_XRGB1555: 694 case DRM_FORMAT_RGB565: 695 /* 16bpp not supported on gen2 */ 696 if (IS_GEN2(dev)) 697 return false; 698 /* WaFbcOnly1to1Ratio:ctg */ 699 if (IS_G4X(dev_priv)) 700 return false; 701 return true; 702 default: 703 return false; 704 } 705 } 706 707 /* 708 * For some reason, the hardware tracking starts looking at whatever we 709 * programmed as the display plane base address register. It does not look at 710 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} 711 * variables instead of just looking at the pipe/plane size. 712 */ 713 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 714 { 715 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 716 unsigned int effective_w, effective_h, max_w, max_h; 717 718 if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { 719 max_w = 4096; 720 max_h = 4096; 721 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 722 max_w = 4096; 723 max_h = 2048; 724 } else { 725 max_w = 2048; 726 max_h = 1536; 727 } 728 729 intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h); 730 effective_w += crtc->adjusted_x; 731 effective_h += crtc->adjusted_y; 732 733 return effective_w <= max_w && effective_h <= max_h; 734 } 735 736 /** 737 * __intel_fbc_update - activate/deactivate FBC as needed, unlocked 738 * @crtc: the CRTC that triggered the update 739 * 740 * This function completely reevaluates the status of FBC, then activates, 741 * deactivates or maintains it on the same state. 742 */ 743 static void __intel_fbc_update(struct intel_crtc *crtc) 744 { 745 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 746 struct drm_framebuffer *fb; 747 struct drm_i915_gem_object *obj; 748 const struct drm_display_mode *adjusted_mode; 749 750 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 751 752 if (!multiple_pipes_ok(dev_priv)) { 753 set_no_fbc_reason(dev_priv, "more than one pipe active"); 754 goto out_disable; 755 } 756 757 if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc) 758 return; 759 760 if (!crtc_is_valid(crtc)) { 761 set_no_fbc_reason(dev_priv, "no output"); 762 goto out_disable; 763 } 764 765 fb = crtc->base.primary->fb; 766 obj = intel_fb_obj(fb); 767 adjusted_mode = &crtc->config->base.adjusted_mode; 768 769 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || 770 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 771 set_no_fbc_reason(dev_priv, "incompatible mode"); 772 goto out_disable; 773 } 774 775 if (!intel_fbc_hw_tracking_covers_screen(crtc)) { 776 set_no_fbc_reason(dev_priv, "mode too large for compression"); 777 goto out_disable; 778 } 779 780 /* The use of a CPU fence is mandatory in order to detect writes 781 * by the CPU to the scanout and trigger updates to the FBC. 782 */ 783 if (obj->tiling_mode != I915_TILING_X || 784 obj->fence_reg == I915_FENCE_REG_NONE) { 785 set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced"); 786 goto out_disable; 787 } 788 if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && 789 crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) { 790 set_no_fbc_reason(dev_priv, "rotation unsupported"); 791 goto out_disable; 792 } 793 794 if (!stride_is_valid(dev_priv, fb->pitches[0])) { 795 set_no_fbc_reason(dev_priv, "framebuffer stride not supported"); 796 goto out_disable; 797 } 798 799 if (!pixel_format_is_valid(fb)) { 800 set_no_fbc_reason(dev_priv, "pixel format is invalid"); 801 goto out_disable; 802 } 803 804 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 805 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 806 ilk_pipe_pixel_rate(crtc->config) >= 807 dev_priv->cdclk_freq * 95 / 100) { 808 set_no_fbc_reason(dev_priv, "pixel rate is too big"); 809 goto out_disable; 810 } 811 812 /* It is possible for the required CFB size change without a 813 * crtc->disable + crtc->enable since it is possible to change the 814 * stride without triggering a full modeset. Since we try to 815 * over-allocate the CFB, there's a chance we may keep FBC enabled even 816 * if this happens, but if we exceed the current CFB size we'll have to 817 * disable FBC. Notice that it would be possible to disable FBC, wait 818 * for a frame, free the stolen node, then try to reenable FBC in case 819 * we didn't get any invalidate/deactivate calls, but this would require 820 * a lot of tracking just for a specific case. If we conclude it's an 821 * important case, we can implement it later. */ 822 if (intel_fbc_calculate_cfb_size(crtc, fb) > 823 dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) { 824 set_no_fbc_reason(dev_priv, "CFB requirements changed"); 825 goto out_disable; 826 } 827 828 /* If the scanout has not changed, don't modify the FBC settings. 829 * Note that we make the fundamental assumption that the fb->obj 830 * cannot be unpinned (and have its GTT offset and fence revoked) 831 * without first being decoupled from the scanout and FBC disabled. 832 */ 833 if (dev_priv->fbc.crtc == crtc && 834 dev_priv->fbc.fb_id == fb->base.id && 835 dev_priv->fbc.y == crtc->base.y && 836 dev_priv->fbc.active) 837 return; 838 839 if (intel_fbc_is_active(dev_priv)) { 840 /* We update FBC along two paths, after changing fb/crtc 841 * configuration (modeswitching) and after page-flipping 842 * finishes. For the latter, we know that not only did 843 * we disable the FBC at the start of the page-flip 844 * sequence, but also more than one vblank has passed. 845 * 846 * For the former case of modeswitching, it is possible 847 * to switch between two FBC valid configurations 848 * instantaneously so we do need to disable the FBC 849 * before we can modify its control registers. We also 850 * have to wait for the next vblank for that to take 851 * effect. However, since we delay enabling FBC we can 852 * assume that a vblank has passed since disabling and 853 * that we can safely alter the registers in the deferred 854 * callback. 855 * 856 * In the scenario that we go from a valid to invalid 857 * and then back to valid FBC configuration we have 858 * no strict enforcement that a vblank occurred since 859 * disabling the FBC. However, along all current pipe 860 * disabling paths we do need to wait for a vblank at 861 * some point. And we wait before enabling FBC anyway. 862 */ 863 DRM_DEBUG_KMS("deactivating FBC for update\n"); 864 __intel_fbc_deactivate(dev_priv); 865 } 866 867 intel_fbc_schedule_activation(crtc); 868 dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)"; 869 return; 870 871 out_disable: 872 /* Multiple disables should be harmless */ 873 if (intel_fbc_is_active(dev_priv)) { 874 DRM_DEBUG_KMS("unsupported config, deactivating FBC\n"); 875 __intel_fbc_deactivate(dev_priv); 876 } 877 } 878 879 /* 880 * intel_fbc_update - activate/deactivate FBC as needed 881 * @crtc: the CRTC that triggered the update 882 * 883 * This function reevaluates the overall state and activates or deactivates FBC. 884 */ 885 void intel_fbc_update(struct intel_crtc *crtc) 886 { 887 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 888 889 if (!fbc_supported(dev_priv)) 890 return; 891 892 mutex_lock(&dev_priv->fbc.lock); 893 __intel_fbc_update(crtc); 894 mutex_unlock(&dev_priv->fbc.lock); 895 } 896 897 void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 898 unsigned int frontbuffer_bits, 899 enum fb_op_origin origin) 900 { 901 unsigned int fbc_bits; 902 903 if (!fbc_supported(dev_priv)) 904 return; 905 906 if (origin == ORIGIN_GTT) 907 return; 908 909 mutex_lock(&dev_priv->fbc.lock); 910 911 if (dev_priv->fbc.enabled) 912 fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); 913 else 914 fbc_bits = dev_priv->fbc.possible_framebuffer_bits; 915 916 dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); 917 918 if (dev_priv->fbc.busy_bits) 919 __intel_fbc_deactivate(dev_priv); 920 921 mutex_unlock(&dev_priv->fbc.lock); 922 } 923 924 void intel_fbc_flush(struct drm_i915_private *dev_priv, 925 unsigned int frontbuffer_bits, enum fb_op_origin origin) 926 { 927 if (!fbc_supported(dev_priv)) 928 return; 929 930 if (origin == ORIGIN_GTT) 931 return; 932 933 mutex_lock(&dev_priv->fbc.lock); 934 935 dev_priv->fbc.busy_bits &= ~frontbuffer_bits; 936 937 if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) { 938 if (origin != ORIGIN_FLIP && dev_priv->fbc.active) { 939 intel_fbc_recompress(dev_priv); 940 } else { 941 __intel_fbc_deactivate(dev_priv); 942 __intel_fbc_update(dev_priv->fbc.crtc); 943 } 944 } 945 946 mutex_unlock(&dev_priv->fbc.lock); 947 } 948 949 /** 950 * intel_fbc_enable: tries to enable FBC on the CRTC 951 * @crtc: the CRTC 952 * 953 * This function checks if it's possible to enable FBC on the following CRTC, 954 * then enables it. Notice that it doesn't activate FBC. 955 */ 956 void intel_fbc_enable(struct intel_crtc *crtc) 957 { 958 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 959 960 if (!fbc_supported(dev_priv)) 961 return; 962 963 mutex_lock(&dev_priv->fbc.lock); 964 965 if (dev_priv->fbc.enabled) { 966 WARN_ON(dev_priv->fbc.crtc == crtc); 967 goto out; 968 } 969 970 WARN_ON(dev_priv->fbc.active); 971 WARN_ON(dev_priv->fbc.crtc != NULL); 972 973 if (intel_vgpu_active(dev_priv->dev)) { 974 set_no_fbc_reason(dev_priv, "VGPU is active"); 975 goto out; 976 } 977 978 if (i915.enable_fbc < 0) { 979 set_no_fbc_reason(dev_priv, "disabled per chip default"); 980 goto out; 981 } 982 983 if (!i915.enable_fbc) { 984 set_no_fbc_reason(dev_priv, "disabled per module param"); 985 goto out; 986 } 987 988 if (!crtc_can_fbc(crtc)) { 989 set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC"); 990 goto out; 991 } 992 993 if (intel_fbc_alloc_cfb(crtc)) { 994 set_no_fbc_reason(dev_priv, "not enough stolen memory"); 995 goto out; 996 } 997 998 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 999 dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n"; 1000 1001 dev_priv->fbc.enabled = true; 1002 dev_priv->fbc.crtc = crtc; 1003 out: 1004 mutex_unlock(&dev_priv->fbc.lock); 1005 } 1006 1007 /** 1008 * __intel_fbc_disable - disable FBC 1009 * @dev_priv: i915 device instance 1010 * 1011 * This is the low level function that actually disables FBC. Callers should 1012 * grab the FBC lock. 1013 */ 1014 static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 1015 { 1016 struct intel_crtc *crtc = dev_priv->fbc.crtc; 1017 1018 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 1019 WARN_ON(!dev_priv->fbc.enabled); 1020 WARN_ON(dev_priv->fbc.active); 1021 assert_pipe_disabled(dev_priv, crtc->pipe); 1022 1023 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); 1024 1025 __intel_fbc_cleanup_cfb(dev_priv); 1026 1027 dev_priv->fbc.enabled = false; 1028 dev_priv->fbc.crtc = NULL; 1029 } 1030 1031 /** 1032 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc 1033 * @crtc: the CRTC 1034 * 1035 * This function disables FBC if it's associated with the provided CRTC. 1036 */ 1037 void intel_fbc_disable_crtc(struct intel_crtc *crtc) 1038 { 1039 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1040 1041 if (!fbc_supported(dev_priv)) 1042 return; 1043 1044 mutex_lock(&dev_priv->fbc.lock); 1045 if (dev_priv->fbc.crtc == crtc) { 1046 WARN_ON(!dev_priv->fbc.enabled); 1047 WARN_ON(dev_priv->fbc.active); 1048 __intel_fbc_disable(dev_priv); 1049 } 1050 mutex_unlock(&dev_priv->fbc.lock); 1051 } 1052 1053 /** 1054 * intel_fbc_disable - globally disable FBC 1055 * @dev_priv: i915 device instance 1056 * 1057 * This function disables FBC regardless of which CRTC is associated with it. 1058 */ 1059 void intel_fbc_disable(struct drm_i915_private *dev_priv) 1060 { 1061 if (!fbc_supported(dev_priv)) 1062 return; 1063 1064 mutex_lock(&dev_priv->fbc.lock); 1065 if (dev_priv->fbc.enabled) 1066 __intel_fbc_disable(dev_priv); 1067 mutex_unlock(&dev_priv->fbc.lock); 1068 } 1069 1070 /** 1071 * intel_fbc_init - Initialize FBC 1072 * @dev_priv: the i915 device 1073 * 1074 * This function might be called during PM init process. 1075 */ 1076 void intel_fbc_init(struct drm_i915_private *dev_priv) 1077 { 1078 enum i915_pipe pipe; 1079 1080 INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn); 1081 lockinit(&dev_priv->fbc.lock, "i915fl", 0, LK_CANRECURSE); 1082 dev_priv->fbc.enabled = false; 1083 dev_priv->fbc.active = false; 1084 dev_priv->fbc.work.scheduled = false; 1085 1086 if (!HAS_FBC(dev_priv)) { 1087 dev_priv->fbc.no_fbc_reason = "unsupported by this chipset"; 1088 return; 1089 } 1090 1091 for_each_pipe(dev_priv, pipe) { 1092 dev_priv->fbc.possible_framebuffer_bits |= 1093 INTEL_FRONTBUFFER_PRIMARY(pipe); 1094 1095 if (fbc_on_pipe_a_only(dev_priv)) 1096 break; 1097 } 1098 1099 if (INTEL_INFO(dev_priv)->gen >= 7) { 1100 dev_priv->fbc.is_active = ilk_fbc_is_active; 1101 dev_priv->fbc.activate = gen7_fbc_activate; 1102 dev_priv->fbc.deactivate = ilk_fbc_deactivate; 1103 } else if (INTEL_INFO(dev_priv)->gen >= 5) { 1104 dev_priv->fbc.is_active = ilk_fbc_is_active; 1105 dev_priv->fbc.activate = ilk_fbc_activate; 1106 dev_priv->fbc.deactivate = ilk_fbc_deactivate; 1107 } else if (IS_GM45(dev_priv)) { 1108 dev_priv->fbc.is_active = g4x_fbc_is_active; 1109 dev_priv->fbc.activate = g4x_fbc_activate; 1110 dev_priv->fbc.deactivate = g4x_fbc_deactivate; 1111 } else { 1112 dev_priv->fbc.is_active = i8xx_fbc_is_active; 1113 dev_priv->fbc.activate = i8xx_fbc_activate; 1114 dev_priv->fbc.deactivate = i8xx_fbc_deactivate; 1115 1116 /* This value was pulled out of someone's hat */ 1117 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); 1118 } 1119 1120 /* We still don't have any sort of hardware state readout for FBC, so 1121 * deactivate it in case the BIOS activated it to make sure software 1122 * matches the hardware state. */ 1123 if (dev_priv->fbc.is_active(dev_priv)) 1124 dev_priv->fbc.deactivate(dev_priv); 1125 } 1126