1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 #include <linux/list.h> 33 #include <drm/drmP.h> 34 #include <drm/radeon_drm.h> 35 #include <drm/drm_cache.h> 36 #include "radeon.h" 37 #include "radeon_trace.h" 38 39 40 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 41 42 /* 43 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 44 * function are calling it. 45 */ 46 47 static void radeon_update_memory_usage(struct radeon_bo *bo, 48 unsigned mem_type, int sign) 49 { 50 struct radeon_device *rdev = bo->rdev; 51 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; 52 53 switch (mem_type) { 54 case TTM_PL_TT: 55 if (sign > 0) 56 atomic64_add(size, &rdev->gtt_usage); 57 else 58 atomic64_sub(size, &rdev->gtt_usage); 59 break; 60 case TTM_PL_VRAM: 61 if (sign > 0) 62 atomic64_add(size, &rdev->vram_usage); 63 else 64 atomic64_sub(size, &rdev->vram_usage); 65 break; 66 } 67 } 68 69 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 70 { 71 struct radeon_bo *bo; 72 73 bo = container_of(tbo, struct radeon_bo, tbo); 74 75 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 76 77 mutex_lock(&bo->rdev->gem.mutex); 78 list_del_init(&bo->list); 79 mutex_unlock(&bo->rdev->gem.mutex); 80 radeon_bo_clear_surface_reg(bo); 81 WARN_ON_ONCE(!list_empty(&bo->va)); 82 drm_gem_object_release(&bo->gem_base); 83 kfree(bo); 84 } 85 86 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) 87 { 88 if (bo->destroy == &radeon_ttm_bo_destroy) 89 return true; 90 return false; 91 } 92 93 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 94 { 95 u32 c = 0, i; 96 97 rbo->placement.placement = rbo->placements; 98 rbo->placement.busy_placement = rbo->placements; 99 if (domain & RADEON_GEM_DOMAIN_VRAM) { 100 /* Try placing BOs which don't need CPU access outside of the 101 * CPU accessible part of VRAM 102 */ 103 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && 104 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { 105 rbo->placements[c].fpfn = 106 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 107 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 108 TTM_PL_FLAG_UNCACHED | 109 TTM_PL_FLAG_VRAM; 110 } 111 112 rbo->placements[c].fpfn = 0; 113 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 114 TTM_PL_FLAG_UNCACHED | 115 TTM_PL_FLAG_VRAM; 116 } 117 118 if (domain & RADEON_GEM_DOMAIN_GTT) { 119 if (rbo->flags & RADEON_GEM_GTT_UC) { 120 rbo->placements[c].fpfn = 0; 121 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 122 TTM_PL_FLAG_TT; 123 124 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 125 (rbo->rdev->flags & RADEON_IS_AGP)) { 126 rbo->placements[c].fpfn = 0; 127 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 128 TTM_PL_FLAG_UNCACHED | 129 TTM_PL_FLAG_TT; 130 } else { 131 rbo->placements[c].fpfn = 0; 132 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 133 TTM_PL_FLAG_TT; 134 } 135 } 136 137 if (domain & RADEON_GEM_DOMAIN_CPU) { 138 if (rbo->flags & RADEON_GEM_GTT_UC) { 139 rbo->placements[c].fpfn = 0; 140 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 141 TTM_PL_FLAG_SYSTEM; 142 143 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 144 rbo->rdev->flags & RADEON_IS_AGP) { 145 rbo->placements[c].fpfn = 0; 146 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 147 TTM_PL_FLAG_UNCACHED | 148 TTM_PL_FLAG_SYSTEM; 149 } else { 150 rbo->placements[c].fpfn = 0; 151 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 152 TTM_PL_FLAG_SYSTEM; 153 } 154 } 155 if (!c) { 156 rbo->placements[c].fpfn = 0; 157 rbo->placements[c++].flags = TTM_PL_MASK_CACHING | 158 TTM_PL_FLAG_SYSTEM; 159 } 160 161 rbo->placement.num_placement = c; 162 rbo->placement.num_busy_placement = c; 163 164 for (i = 0; i < c; ++i) { 165 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && 166 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && 167 !rbo->placements[i].fpfn) 168 rbo->placements[i].lpfn = 169 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 170 else 171 rbo->placements[i].lpfn = 0; 172 } 173 } 174 175 int radeon_bo_create(struct radeon_device *rdev, 176 unsigned long size, int byte_align, bool kernel, 177 u32 domain, u32 flags, struct sg_table *sg, 178 struct reservation_object *resv, 179 struct radeon_bo **bo_ptr) 180 { 181 struct radeon_bo *bo; 182 enum ttm_bo_type type; 183 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 184 size_t acc_size; 185 int r; 186 187 size = ALIGN(size, PAGE_SIZE); 188 189 if (kernel) { 190 type = ttm_bo_type_kernel; 191 } else if (sg) { 192 type = ttm_bo_type_sg; 193 } else { 194 type = ttm_bo_type_device; 195 } 196 *bo_ptr = NULL; 197 198 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 199 sizeof(struct radeon_bo)); 200 201 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 202 if (bo == NULL) 203 return -ENOMEM; 204 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); 205 if (unlikely(r)) { 206 kfree(bo); 207 return r; 208 } 209 bo->rdev = rdev; 210 bo->surface_reg = -1; 211 INIT_LIST_HEAD(&bo->list); 212 INIT_LIST_HEAD(&bo->va); 213 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | 214 RADEON_GEM_DOMAIN_GTT | 215 RADEON_GEM_DOMAIN_CPU); 216 217 bo->flags = flags; 218 /* PCI GART is always snooped */ 219 if (!(rdev->flags & RADEON_IS_PCIE)) 220 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 221 222 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx 223 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 224 */ 225 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) 226 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 227 228 #ifdef CONFIG_X86_32 229 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 230 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 231 */ 232 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 233 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 234 /* Don't try to enable write-combining when it can't work, or things 235 * may be slow 236 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 237 */ 238 #ifndef CONFIG_COMPILE_TEST 239 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 240 thanks to write-combining 241 #endif 242 243 if (bo->flags & RADEON_GEM_GTT_WC) 244 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 245 "better performance thanks to write-combining\n"); 246 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 247 #else 248 /* For architectures that don't support WC memory, 249 * mask out the WC flag from the BO 250 */ 251 if (!drm_arch_can_wc_memory()) 252 bo->flags &= ~RADEON_GEM_GTT_WC; 253 #endif 254 255 radeon_ttm_placement_from_domain(bo, domain); 256 /* Kernel allocation are uninterruptible */ 257 down_read(&rdev->pm.mclk_lock); 258 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 259 &bo->placement, page_align, !kernel, NULL, 260 acc_size, sg, resv, &radeon_ttm_bo_destroy); 261 up_read(&rdev->pm.mclk_lock); 262 if (unlikely(r != 0)) { 263 return r; 264 } 265 *bo_ptr = bo; 266 267 trace_radeon_bo_create(bo); 268 269 return 0; 270 } 271 272 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) 273 { 274 bool is_iomem; 275 int r; 276 277 if (bo->kptr) { 278 if (ptr) { 279 *ptr = bo->kptr; 280 } 281 return 0; 282 } 283 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 284 if (r) { 285 return r; 286 } 287 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 288 if (ptr) { 289 *ptr = bo->kptr; 290 } 291 radeon_bo_check_tiling(bo, 0, 0); 292 return 0; 293 } 294 295 void radeon_bo_kunmap(struct radeon_bo *bo) 296 { 297 if (bo->kptr == NULL) 298 return; 299 bo->kptr = NULL; 300 radeon_bo_check_tiling(bo, 0, 0); 301 ttm_bo_kunmap(&bo->kmap); 302 } 303 304 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) 305 { 306 if (bo == NULL) 307 return NULL; 308 309 ttm_bo_reference(&bo->tbo); 310 return bo; 311 } 312 313 void radeon_bo_unref(struct radeon_bo **bo) 314 { 315 struct ttm_buffer_object *tbo; 316 struct radeon_device *rdev; 317 318 if ((*bo) == NULL) 319 return; 320 rdev = (*bo)->rdev; 321 tbo = &((*bo)->tbo); 322 ttm_bo_unref(&tbo); 323 if (tbo == NULL) 324 *bo = NULL; 325 } 326 327 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 328 u64 *gpu_addr) 329 { 330 int r, i; 331 332 if (bo->pin_count) { 333 bo->pin_count++; 334 if (gpu_addr) 335 *gpu_addr = radeon_bo_gpu_offset(bo); 336 337 if (max_offset != 0) { 338 u64 domain_start; 339 340 if (domain == RADEON_GEM_DOMAIN_VRAM) 341 domain_start = bo->rdev->mc.vram_start; 342 else 343 domain_start = bo->rdev->mc.gtt_start; 344 WARN_ON_ONCE(max_offset < 345 (radeon_bo_gpu_offset(bo) - domain_start)); 346 } 347 348 return 0; 349 } 350 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) { 351 /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */ 352 return -EINVAL; 353 } 354 355 radeon_ttm_placement_from_domain(bo, domain); 356 for (i = 0; i < bo->placement.num_placement; i++) { 357 /* force to pin into visible video ram */ 358 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && 359 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && 360 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) 361 bo->placements[i].lpfn = 362 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 363 else 364 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; 365 366 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 367 } 368 369 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 370 if (likely(r == 0)) { 371 bo->pin_count = 1; 372 if (gpu_addr != NULL) 373 *gpu_addr = radeon_bo_gpu_offset(bo); 374 if (domain == RADEON_GEM_DOMAIN_VRAM) 375 bo->rdev->vram_pin_size += radeon_bo_size(bo); 376 else 377 bo->rdev->gart_pin_size += radeon_bo_size(bo); 378 } else { 379 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 380 } 381 return r; 382 } 383 384 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 385 { 386 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); 387 } 388 389 int radeon_bo_unpin(struct radeon_bo *bo) 390 { 391 int r, i; 392 393 if (!bo->pin_count) { 394 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 395 return 0; 396 } 397 bo->pin_count--; 398 if (bo->pin_count) 399 return 0; 400 for (i = 0; i < bo->placement.num_placement; i++) { 401 bo->placements[i].lpfn = 0; 402 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 403 } 404 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 405 if (likely(r == 0)) { 406 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 407 bo->rdev->vram_pin_size -= radeon_bo_size(bo); 408 else 409 bo->rdev->gart_pin_size -= radeon_bo_size(bo); 410 } else { 411 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 412 } 413 return r; 414 } 415 416 int radeon_bo_evict_vram(struct radeon_device *rdev) 417 { 418 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 419 if (0 && (rdev->flags & RADEON_IS_IGP)) { 420 if (rdev->mc.igp_sideport_enabled == false) 421 /* Useless to evict on IGP chips */ 422 return 0; 423 } 424 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 425 } 426 427 void radeon_bo_force_delete(struct radeon_device *rdev) 428 { 429 struct radeon_bo *bo, *n; 430 431 if (list_empty(&rdev->gem.objects)) { 432 return; 433 } 434 dev_err(rdev->dev, "Userspace still has active objects !\n"); 435 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 436 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 437 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 438 *((unsigned long *)&bo->gem_base.refcount)); 439 mutex_lock(&bo->rdev->gem.mutex); 440 list_del_init(&bo->list); 441 mutex_unlock(&bo->rdev->gem.mutex); 442 /* this should unref the ttm bo */ 443 drm_gem_object_unreference_unlocked(&bo->gem_base); 444 } 445 } 446 447 int radeon_bo_init(struct radeon_device *rdev) 448 { 449 /* reserve PAT memory space to WC for VRAM */ 450 #if 0 451 arch_io_reserve_memtype_wc(rdev->mc.aper_base, 452 rdev->mc.aper_size); 453 #endif 454 455 /* Add an MTRR for the VRAM */ 456 if (!rdev->fastfb_working) { 457 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, 458 rdev->mc.aper_size); 459 } 460 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 461 rdev->mc.mc_vram_size >> 20, 462 (unsigned long long)rdev->mc.aper_size >> 20); 463 DRM_INFO("RAM width %dbits %cDR\n", 464 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 465 return radeon_ttm_init(rdev); 466 } 467 468 void radeon_bo_fini(struct radeon_device *rdev) 469 { 470 radeon_ttm_fini(rdev); 471 arch_phys_wc_del(rdev->mc.vram_mtrr); 472 #if 0 473 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); 474 #endif 475 } 476 477 /* Returns how many bytes TTM can move per IB. 478 */ 479 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) 480 { 481 u64 real_vram_size = rdev->mc.real_vram_size; 482 u64 vram_usage = atomic64_read(&rdev->vram_usage); 483 484 /* This function is based on the current VRAM usage. 485 * 486 * - If all of VRAM is free, allow relocating the number of bytes that 487 * is equal to 1/4 of the size of VRAM for this IB. 488 489 * - If more than one half of VRAM is occupied, only allow relocating 490 * 1 MB of data for this IB. 491 * 492 * - From 0 to one half of used VRAM, the threshold decreases 493 * linearly. 494 * __________________ 495 * 1/4 of -|\ | 496 * VRAM | \ | 497 * | \ | 498 * | \ | 499 * | \ | 500 * | \ | 501 * | \ | 502 * | \________|1 MB 503 * |----------------| 504 * VRAM 0 % 100 % 505 * used used 506 * 507 * Note: It's a threshold, not a limit. The threshold must be crossed 508 * for buffer relocations to stop, so any buffer of an arbitrary size 509 * can be moved as long as the threshold isn't crossed before 510 * the relocation takes place. We don't want to disable buffer 511 * relocations completely. 512 * 513 * The idea is that buffers should be placed in VRAM at creation time 514 * and TTM should only do a minimum number of relocations during 515 * command submission. In practice, you need to submit at least 516 * a dozen IBs to move all buffers to VRAM if they are in GTT. 517 * 518 * Also, things can get pretty crazy under memory pressure and actual 519 * VRAM usage can change a lot, so playing safe even at 50% does 520 * consistently increase performance. 521 */ 522 523 u64 half_vram = real_vram_size >> 1; 524 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; 525 u64 bytes_moved_threshold = half_free_vram >> 1; 526 return max(bytes_moved_threshold, 1024*1024ull); 527 } 528 529 int radeon_bo_list_validate(struct radeon_device *rdev, 530 struct ww_acquire_ctx *ticket, 531 struct list_head *head, int ring) 532 { 533 struct radeon_bo_list *lobj; 534 struct list_head duplicates; 535 int r; 536 u64 bytes_moved = 0, initial_bytes_moved; 537 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 538 539 INIT_LIST_HEAD(&duplicates); 540 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); 541 if (unlikely(r != 0)) { 542 return r; 543 } 544 545 list_for_each_entry(lobj, head, tv.head) { 546 struct radeon_bo *bo = lobj->robj; 547 if (!bo->pin_count) { 548 u32 domain = lobj->prefered_domains; 549 u32 allowed = lobj->allowed_domains; 550 u32 current_domain = 551 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 552 553 /* Check if this buffer will be moved and don't move it 554 * if we have moved too many buffers for this IB already. 555 * 556 * Note that this allows moving at least one buffer of 557 * any size, because it doesn't take the current "bo" 558 * into account. We don't want to disallow buffer moves 559 * completely. 560 */ 561 if ((allowed & current_domain) != 0 && 562 (domain & current_domain) == 0 && /* will be moved */ 563 bytes_moved > bytes_moved_threshold) { 564 /* don't move it */ 565 domain = current_domain; 566 } 567 568 retry: 569 radeon_ttm_placement_from_domain(bo, domain); 570 if (ring == R600_RING_TYPE_UVD_INDEX) 571 radeon_uvd_force_into_uvd_segment(bo, allowed); 572 573 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); 574 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 575 bytes_moved += atomic64_read(&rdev->num_bytes_moved) - 576 initial_bytes_moved; 577 578 if (unlikely(r)) { 579 if (r != -ERESTARTSYS && 580 domain != lobj->allowed_domains) { 581 domain = lobj->allowed_domains; 582 goto retry; 583 } 584 ttm_eu_backoff_reservation(ticket, head); 585 return r; 586 } 587 } 588 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 589 lobj->tiling_flags = bo->tiling_flags; 590 } 591 592 list_for_each_entry(lobj, &duplicates, tv.head) { 593 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); 594 lobj->tiling_flags = lobj->robj->tiling_flags; 595 } 596 597 return 0; 598 } 599 600 int radeon_bo_get_surface_reg(struct radeon_bo *bo) 601 { 602 struct radeon_device *rdev = bo->rdev; 603 struct radeon_surface_reg *reg; 604 struct radeon_bo *old_object; 605 int steal; 606 int i; 607 608 lockdep_assert_held(&bo->tbo.resv->lock.base); 609 610 if (!bo->tiling_flags) 611 return 0; 612 613 if (bo->surface_reg >= 0) { 614 reg = &rdev->surface_regs[bo->surface_reg]; 615 i = bo->surface_reg; 616 goto out; 617 } 618 619 steal = -1; 620 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 621 622 reg = &rdev->surface_regs[i]; 623 if (!reg->bo) 624 break; 625 626 old_object = reg->bo; 627 if (old_object->pin_count == 0) 628 steal = i; 629 } 630 631 /* if we are all out */ 632 if (i == RADEON_GEM_MAX_SURFACES) { 633 if (steal == -1) 634 return -ENOMEM; 635 /* find someone with a surface reg and nuke their BO */ 636 reg = &rdev->surface_regs[steal]; 637 old_object = reg->bo; 638 /* blow away the mapping */ 639 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 640 ttm_bo_unmap_virtual(&old_object->tbo); 641 old_object->surface_reg = -1; 642 i = steal; 643 } 644 645 bo->surface_reg = i; 646 reg->bo = bo; 647 648 out: 649 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 650 bo->tbo.mem.start << PAGE_SHIFT, 651 bo->tbo.num_pages << PAGE_SHIFT); 652 return 0; 653 } 654 655 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) 656 { 657 struct radeon_device *rdev = bo->rdev; 658 struct radeon_surface_reg *reg; 659 660 if (bo->surface_reg == -1) 661 return; 662 663 reg = &rdev->surface_regs[bo->surface_reg]; 664 radeon_clear_surface_reg(rdev, bo->surface_reg); 665 666 reg->bo = NULL; 667 bo->surface_reg = -1; 668 } 669 670 int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 671 uint32_t tiling_flags, uint32_t pitch) 672 { 673 struct radeon_device *rdev = bo->rdev; 674 int r; 675 676 if (rdev->family >= CHIP_CEDAR) { 677 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; 678 679 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 680 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 681 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 682 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 683 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 684 switch (bankw) { 685 case 0: 686 case 1: 687 case 2: 688 case 4: 689 case 8: 690 break; 691 default: 692 return -EINVAL; 693 } 694 switch (bankh) { 695 case 0: 696 case 1: 697 case 2: 698 case 4: 699 case 8: 700 break; 701 default: 702 return -EINVAL; 703 } 704 switch (mtaspect) { 705 case 0: 706 case 1: 707 case 2: 708 case 4: 709 case 8: 710 break; 711 default: 712 return -EINVAL; 713 } 714 if (tilesplit > 6) { 715 return -EINVAL; 716 } 717 if (stilesplit > 6) { 718 return -EINVAL; 719 } 720 } 721 r = radeon_bo_reserve(bo, false); 722 if (unlikely(r != 0)) 723 return r; 724 bo->tiling_flags = tiling_flags; 725 bo->pitch = pitch; 726 radeon_bo_unreserve(bo); 727 return 0; 728 } 729 730 void radeon_bo_get_tiling_flags(struct radeon_bo *bo, 731 uint32_t *tiling_flags, 732 uint32_t *pitch) 733 { 734 lockdep_assert_held(&bo->tbo.resv->lock.base); 735 736 if (tiling_flags) 737 *tiling_flags = bo->tiling_flags; 738 if (pitch) 739 *pitch = bo->pitch; 740 } 741 742 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 743 bool force_drop) 744 { 745 if (!force_drop) 746 lockdep_assert_held(&bo->tbo.resv->lock.base); 747 748 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 749 return 0; 750 751 if (force_drop) { 752 radeon_bo_clear_surface_reg(bo); 753 return 0; 754 } 755 756 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 757 if (!has_moved) 758 return 0; 759 760 if (bo->surface_reg >= 0) 761 radeon_bo_clear_surface_reg(bo); 762 return 0; 763 } 764 765 if ((bo->surface_reg >= 0) && !has_moved) 766 return 0; 767 768 return radeon_bo_get_surface_reg(bo); 769 } 770 771 void radeon_bo_move_notify(struct ttm_buffer_object *bo, 772 bool evict, 773 struct ttm_mem_reg *new_mem) 774 { 775 struct radeon_bo *rbo; 776 777 if (!radeon_ttm_bo_is_radeon_bo(bo)) 778 return; 779 780 rbo = container_of(bo, struct radeon_bo, tbo); 781 radeon_bo_check_tiling(rbo, 0, 1); 782 radeon_vm_bo_invalidate(rbo->rdev, rbo); 783 784 /* update statistics */ 785 if (!new_mem) 786 return; 787 788 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); 789 radeon_update_memory_usage(rbo, new_mem->mem_type, 1); 790 } 791 792 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 793 { 794 struct radeon_device *rdev; 795 struct radeon_bo *rbo; 796 unsigned long offset, size, lpfn; 797 int i, r; 798 799 if (!radeon_ttm_bo_is_radeon_bo(bo)) 800 return 0; 801 rbo = container_of(bo, struct radeon_bo, tbo); 802 radeon_bo_check_tiling(rbo, 0, 0); 803 rdev = rbo->rdev; 804 if (bo->mem.mem_type != TTM_PL_VRAM) 805 return 0; 806 807 size = bo->mem.num_pages << PAGE_SHIFT; 808 offset = bo->mem.start << PAGE_SHIFT; 809 if ((offset + size) <= rdev->mc.visible_vram_size) 810 return 0; 811 812 /* Can't move a pinned BO to visible VRAM */ 813 if (rbo->pin_count > 0) 814 return -EINVAL; 815 816 /* hurrah the memory is not visible ! */ 817 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 818 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 819 for (i = 0; i < rbo->placement.num_placement; i++) { 820 /* Force into visible VRAM */ 821 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && 822 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) 823 rbo->placements[i].lpfn = lpfn; 824 } 825 r = ttm_bo_validate(bo, &rbo->placement, false, false); 826 if (unlikely(r == -ENOMEM)) { 827 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 828 return ttm_bo_validate(bo, &rbo->placement, false, false); 829 } else if (unlikely(r != 0)) { 830 return r; 831 } 832 833 offset = bo->mem.start << PAGE_SHIFT; 834 /* this should never happen */ 835 if ((offset + size) > rdev->mc.visible_vram_size) 836 return -EINVAL; 837 838 return 0; 839 } 840 841 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) 842 { 843 int r; 844 845 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 846 if (unlikely(r != 0)) 847 return r; 848 if (mem_type) 849 *mem_type = bo->tbo.mem.mem_type; 850 851 r = ttm_bo_wait(&bo->tbo, true, no_wait); 852 ttm_bo_unreserve(&bo->tbo); 853 return r; 854 } 855 856 /** 857 * radeon_bo_fence - add fence to buffer object 858 * 859 * @bo: buffer object in question 860 * @fence: fence to add 861 * @shared: true if fence should be added shared 862 * 863 */ 864 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, 865 bool shared) 866 { 867 struct reservation_object *resv = bo->tbo.resv; 868 869 if (shared) 870 reservation_object_add_shared_fence(resv, &fence->base); 871 else 872 reservation_object_add_excl_fence(resv, &fence->base); 873 } 874