1 /* $NetBSD: radeon_object.c,v 1.5 2018/08/27 07:49:47 riastradh Exp $ */ 2 3 /* 4 * Copyright 2009 Jerome Glisse. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * The above copyright notice and this permission notice (including the 24 * next paragraph) shall be included in all copies or substantial portions 25 * of the Software. 26 * 27 */ 28 /* 29 * Authors: 30 * Jerome Glisse <glisse@freedesktop.org> 31 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 32 * Dave Airlie 33 */ 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: radeon_object.c,v 1.5 2018/08/27 07:49:47 riastradh Exp $"); 36 37 #include <linux/list.h> 38 #include <linux/slab.h> 39 #include <drm/drmP.h> 40 #include <drm/radeon_drm.h> 41 #include <drm/drm_cache.h> 42 #include "radeon.h" 43 #include "radeon_trace.h" 44 45 46 int radeon_ttm_init(struct radeon_device *rdev); 47 void radeon_ttm_fini(struct radeon_device *rdev); 48 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 49 50 /* 51 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 52 * function are calling it. 53 */ 54 55 static void radeon_update_memory_usage(struct radeon_bo *bo, 56 unsigned mem_type, int sign) 57 { 58 struct radeon_device *rdev = bo->rdev; 59 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; 60 61 switch (mem_type) { 62 case TTM_PL_TT: 63 if (sign > 0) 64 atomic64_add(size, &rdev->gtt_usage); 65 else 66 atomic64_sub(size, &rdev->gtt_usage); 67 break; 68 case TTM_PL_VRAM: 69 if (sign > 0) 70 atomic64_add(size, &rdev->vram_usage); 71 else 72 atomic64_sub(size, &rdev->vram_usage); 73 break; 74 } 75 } 76 77 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 78 { 79 struct radeon_bo *bo; 80 81 bo = container_of(tbo, struct radeon_bo, tbo); 82 83 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 84 85 mutex_lock(&bo->rdev->gem.mutex); 86 list_del_init(&bo->list); 87 mutex_unlock(&bo->rdev->gem.mutex); 88 radeon_bo_clear_surface_reg(bo); 89 WARN_ON(!list_empty(&bo->va)); 90 drm_gem_object_release(&bo->gem_base); 91 kfree(bo); 92 } 93 94 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) 95 { 96 if (bo->destroy == &radeon_ttm_bo_destroy) 97 return true; 98 return false; 99 } 100 101 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 102 { 103 u32 c = 0, i; 104 105 rbo->placement.placement = rbo->placements; 106 rbo->placement.busy_placement = rbo->placements; 107 if (domain & RADEON_GEM_DOMAIN_VRAM) { 108 /* Try placing BOs which don't need CPU access outside of the 109 * CPU accessible part of VRAM 110 */ 111 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && 112 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { 113 rbo->placements[c].fpfn = 114 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 115 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 116 TTM_PL_FLAG_UNCACHED | 117 TTM_PL_FLAG_VRAM; 118 } 119 120 rbo->placements[c].fpfn = 0; 121 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 122 TTM_PL_FLAG_UNCACHED | 123 TTM_PL_FLAG_VRAM; 124 } 125 126 if (domain & RADEON_GEM_DOMAIN_GTT) { 127 if (rbo->flags & RADEON_GEM_GTT_UC) { 128 rbo->placements[c].fpfn = 0; 129 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 130 TTM_PL_FLAG_TT; 131 132 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 133 (rbo->rdev->flags & RADEON_IS_AGP)) { 134 rbo->placements[c].fpfn = 0; 135 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 136 TTM_PL_FLAG_UNCACHED | 137 TTM_PL_FLAG_TT; 138 } else { 139 rbo->placements[c].fpfn = 0; 140 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 141 TTM_PL_FLAG_TT; 142 } 143 } 144 145 if (domain & RADEON_GEM_DOMAIN_CPU) { 146 if (rbo->flags & RADEON_GEM_GTT_UC) { 147 rbo->placements[c].fpfn = 0; 148 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 149 TTM_PL_FLAG_SYSTEM; 150 151 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 152 rbo->rdev->flags & RADEON_IS_AGP) { 153 rbo->placements[c].fpfn = 0; 154 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 155 TTM_PL_FLAG_UNCACHED | 156 TTM_PL_FLAG_SYSTEM; 157 } else { 158 rbo->placements[c].fpfn = 0; 159 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 160 TTM_PL_FLAG_SYSTEM; 161 } 162 } 163 if (!c) { 164 rbo->placements[c].fpfn = 0; 165 rbo->placements[c++].flags = TTM_PL_MASK_CACHING | 166 TTM_PL_FLAG_SYSTEM; 167 } 168 169 rbo->placement.num_placement = c; 170 rbo->placement.num_busy_placement = c; 171 172 for (i = 0; i < c; ++i) { 173 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && 174 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && 175 !rbo->placements[i].fpfn) 176 rbo->placements[i].lpfn = 177 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 178 else 179 rbo->placements[i].lpfn = 0; 180 } 181 } 182 183 int radeon_bo_create(struct radeon_device *rdev, 184 unsigned long size, int byte_align, bool kernel, 185 u32 domain, u32 flags, struct sg_table *sg, 186 struct reservation_object *resv, 187 struct radeon_bo **bo_ptr) 188 { 189 struct radeon_bo *bo; 190 enum ttm_bo_type type; 191 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 192 size_t acc_size; 193 int r; 194 195 #ifdef __NetBSD__ /* XXX ALIGN means something else. */ 196 size = round_up(size, PAGE_SIZE); 197 #else 198 size = ALIGN(size, PAGE_SIZE); 199 #endif 200 201 if (kernel) { 202 type = ttm_bo_type_kernel; 203 } else if (sg) { 204 type = ttm_bo_type_sg; 205 } else { 206 type = ttm_bo_type_device; 207 } 208 *bo_ptr = NULL; 209 210 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 211 sizeof(struct radeon_bo)); 212 213 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 214 if (bo == NULL) 215 return -ENOMEM; 216 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); 217 if (unlikely(r)) { 218 kfree(bo); 219 return r; 220 } 221 bo->rdev = rdev; 222 bo->surface_reg = -1; 223 INIT_LIST_HEAD(&bo->list); 224 INIT_LIST_HEAD(&bo->va); 225 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | 226 RADEON_GEM_DOMAIN_GTT | 227 RADEON_GEM_DOMAIN_CPU); 228 229 bo->flags = flags; 230 /* PCI GART is always snooped */ 231 if (!(rdev->flags & RADEON_IS_PCIE)) 232 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 233 234 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx 235 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 236 */ 237 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) 238 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 239 240 #ifdef CONFIG_X86_32 241 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 242 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 243 */ 244 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 245 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 246 /* Don't try to enable write-combining when it can't work, or things 247 * may be slow 248 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 249 */ 250 #ifndef CONFIG_COMPILE_TEST 251 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 252 thanks to write-combining 253 #endif 254 255 if (bo->flags & RADEON_GEM_GTT_WC) 256 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 257 "better performance thanks to write-combining\n"); 258 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 259 #else 260 /* For architectures that don't support WC memory, 261 * mask out the WC flag from the BO 262 */ 263 if (!drm_arch_can_wc_memory()) 264 bo->flags &= ~RADEON_GEM_GTT_WC; 265 #endif 266 267 radeon_ttm_placement_from_domain(bo, domain); 268 /* Kernel allocation are uninterruptible */ 269 down_read(&rdev->pm.mclk_lock); 270 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 271 &bo->placement, page_align, !kernel, NULL, 272 acc_size, sg, resv, &radeon_ttm_bo_destroy); 273 up_read(&rdev->pm.mclk_lock); 274 if (unlikely(r != 0)) { 275 return r; 276 } 277 *bo_ptr = bo; 278 279 trace_radeon_bo_create(bo); 280 281 return 0; 282 } 283 284 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) 285 { 286 bool is_iomem; 287 int r; 288 289 if (bo->kptr) { 290 if (ptr) { 291 *ptr = bo->kptr; 292 } 293 return 0; 294 } 295 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 296 if (r) { 297 return r; 298 } 299 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 300 if (ptr) { 301 *ptr = bo->kptr; 302 } 303 radeon_bo_check_tiling(bo, 0, 0); 304 return 0; 305 } 306 307 void radeon_bo_kunmap(struct radeon_bo *bo) 308 { 309 if (bo->kptr == NULL) 310 return; 311 bo->kptr = NULL; 312 radeon_bo_check_tiling(bo, 0, 0); 313 ttm_bo_kunmap(&bo->kmap); 314 } 315 316 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) 317 { 318 if (bo == NULL) 319 return NULL; 320 321 ttm_bo_reference(&bo->tbo); 322 return bo; 323 } 324 325 void radeon_bo_unref(struct radeon_bo **bo) 326 { 327 struct ttm_buffer_object *tbo; 328 struct radeon_device *rdev __unused; 329 330 if ((*bo) == NULL) 331 return; 332 rdev = (*bo)->rdev; 333 tbo = &((*bo)->tbo); 334 ttm_bo_unref(&tbo); 335 if (tbo == NULL) 336 *bo = NULL; 337 } 338 339 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 340 u64 *gpu_addr) 341 { 342 int r, i; 343 344 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) 345 return -EPERM; 346 347 if (bo->pin_count) { 348 bo->pin_count++; 349 if (gpu_addr) 350 *gpu_addr = radeon_bo_gpu_offset(bo); 351 352 if (max_offset != 0) { 353 u64 domain_start; 354 355 if (domain == RADEON_GEM_DOMAIN_VRAM) 356 domain_start = bo->rdev->mc.vram_start; 357 else 358 domain_start = bo->rdev->mc.gtt_start; 359 WARN_ON_ONCE(max_offset < 360 (radeon_bo_gpu_offset(bo) - domain_start)); 361 } 362 363 return 0; 364 } 365 radeon_ttm_placement_from_domain(bo, domain); 366 for (i = 0; i < bo->placement.num_placement; i++) { 367 /* force to pin into visible video ram */ 368 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && 369 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && 370 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) 371 bo->placements[i].lpfn = 372 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 373 else 374 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; 375 376 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 377 } 378 379 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 380 if (likely(r == 0)) { 381 bo->pin_count = 1; 382 if (gpu_addr != NULL) 383 *gpu_addr = radeon_bo_gpu_offset(bo); 384 if (domain == RADEON_GEM_DOMAIN_VRAM) 385 bo->rdev->vram_pin_size += radeon_bo_size(bo); 386 else 387 bo->rdev->gart_pin_size += radeon_bo_size(bo); 388 } else { 389 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 390 } 391 return r; 392 } 393 394 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 395 { 396 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); 397 } 398 399 int radeon_bo_unpin(struct radeon_bo *bo) 400 { 401 int r, i; 402 403 if (!bo->pin_count) { 404 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 405 return 0; 406 } 407 bo->pin_count--; 408 if (bo->pin_count) 409 return 0; 410 for (i = 0; i < bo->placement.num_placement; i++) { 411 bo->placements[i].lpfn = 0; 412 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 413 } 414 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 415 if (likely(r == 0)) { 416 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 417 bo->rdev->vram_pin_size -= radeon_bo_size(bo); 418 else 419 bo->rdev->gart_pin_size -= radeon_bo_size(bo); 420 } else { 421 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 422 } 423 return r; 424 } 425 426 int radeon_bo_evict_vram(struct radeon_device *rdev) 427 { 428 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 429 if (0 && (rdev->flags & RADEON_IS_IGP)) { 430 if (rdev->mc.igp_sideport_enabled == false) 431 /* Useless to evict on IGP chips */ 432 return 0; 433 } 434 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 435 } 436 437 void radeon_bo_force_delete(struct radeon_device *rdev) 438 { 439 struct radeon_bo *bo, *n; 440 441 if (list_empty(&rdev->gem.objects)) { 442 return; 443 } 444 dev_err(rdev->dev, "Userspace still has active objects !\n"); 445 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 446 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 447 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 448 *((unsigned long *)&bo->gem_base.refcount)); 449 mutex_lock(&bo->rdev->gem.mutex); 450 list_del_init(&bo->list); 451 mutex_unlock(&bo->rdev->gem.mutex); 452 /* this should unref the ttm bo */ 453 drm_gem_object_unreference_unlocked(&bo->gem_base); 454 } 455 } 456 457 int radeon_bo_init(struct radeon_device *rdev) 458 { 459 /* Add an MTRR for the VRAM */ 460 if (!rdev->fastfb_working) { 461 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, 462 rdev->mc.aper_size); 463 } 464 #ifdef __NetBSD__ 465 if (rdev->mc.aper_base) 466 pmap_pv_track(rdev->mc.aper_base, rdev->mc.aper_size); 467 #endif 468 DRM_INFO("Detected VRAM RAM=%"PRIx64"M, BAR=%lluM\n", 469 rdev->mc.mc_vram_size >> 20, 470 (unsigned long long)rdev->mc.aper_size >> 20); 471 DRM_INFO("RAM width %dbits %cDR\n", 472 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 473 return radeon_ttm_init(rdev); 474 } 475 476 void radeon_bo_fini(struct radeon_device *rdev) 477 { 478 radeon_ttm_fini(rdev); 479 #ifdef __NetBSD__ 480 if (rdev->mc.aper_base) 481 pmap_pv_untrack(rdev->mc.aper_base, rdev->mc.aper_size); 482 #endif 483 arch_phys_wc_del(rdev->mc.vram_mtrr); 484 } 485 486 /* Returns how many bytes TTM can move per IB. 487 */ 488 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) 489 { 490 u64 real_vram_size = rdev->mc.real_vram_size; 491 u64 vram_usage = atomic64_read(&rdev->vram_usage); 492 493 /* This function is based on the current VRAM usage. 494 * 495 * - If all of VRAM is free, allow relocating the number of bytes that 496 * is equal to 1/4 of the size of VRAM for this IB. 497 498 * - If more than one half of VRAM is occupied, only allow relocating 499 * 1 MB of data for this IB. 500 * 501 * - From 0 to one half of used VRAM, the threshold decreases 502 * linearly. 503 * __________________ 504 * 1/4 of -|\ | 505 * VRAM | \ | 506 * | \ | 507 * | \ | 508 * | \ | 509 * | \ | 510 * | \ | 511 * | \________|1 MB 512 * |----------------| 513 * VRAM 0 % 100 % 514 * used used 515 * 516 * Note: It's a threshold, not a limit. The threshold must be crossed 517 * for buffer relocations to stop, so any buffer of an arbitrary size 518 * can be moved as long as the threshold isn't crossed before 519 * the relocation takes place. We don't want to disable buffer 520 * relocations completely. 521 * 522 * The idea is that buffers should be placed in VRAM at creation time 523 * and TTM should only do a minimum number of relocations during 524 * command submission. In practice, you need to submit at least 525 * a dozen IBs to move all buffers to VRAM if they are in GTT. 526 * 527 * Also, things can get pretty crazy under memory pressure and actual 528 * VRAM usage can change a lot, so playing safe even at 50% does 529 * consistently increase performance. 530 */ 531 532 u64 half_vram = real_vram_size >> 1; 533 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; 534 u64 bytes_moved_threshold = half_free_vram >> 1; 535 return max(bytes_moved_threshold, 1024*1024ull); 536 } 537 538 int radeon_bo_list_validate(struct radeon_device *rdev, 539 struct ww_acquire_ctx *ticket, 540 struct list_head *head, int ring) 541 { 542 struct radeon_bo_list *lobj; 543 struct list_head duplicates; 544 int r; 545 u64 bytes_moved = 0, initial_bytes_moved; 546 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 547 548 INIT_LIST_HEAD(&duplicates); 549 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); 550 if (unlikely(r != 0)) { 551 return r; 552 } 553 554 list_for_each_entry(lobj, head, tv.head) { 555 struct radeon_bo *bo = lobj->robj; 556 if (!bo->pin_count) { 557 u32 domain = lobj->prefered_domains; 558 u32 allowed = lobj->allowed_domains; 559 u32 current_domain = 560 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 561 562 /* Check if this buffer will be moved and don't move it 563 * if we have moved too many buffers for this IB already. 564 * 565 * Note that this allows moving at least one buffer of 566 * any size, because it doesn't take the current "bo" 567 * into account. We don't want to disallow buffer moves 568 * completely. 569 */ 570 if ((allowed & current_domain) != 0 && 571 (domain & current_domain) == 0 && /* will be moved */ 572 bytes_moved > bytes_moved_threshold) { 573 /* don't move it */ 574 domain = current_domain; 575 } 576 577 retry: 578 radeon_ttm_placement_from_domain(bo, domain); 579 if (ring == R600_RING_TYPE_UVD_INDEX) 580 radeon_uvd_force_into_uvd_segment(bo, allowed); 581 582 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); 583 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 584 bytes_moved += atomic64_read(&rdev->num_bytes_moved) - 585 initial_bytes_moved; 586 587 if (unlikely(r)) { 588 if (r != -ERESTARTSYS && 589 domain != lobj->allowed_domains) { 590 domain = lobj->allowed_domains; 591 goto retry; 592 } 593 ttm_eu_backoff_reservation(ticket, head); 594 return r; 595 } 596 } 597 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 598 lobj->tiling_flags = bo->tiling_flags; 599 } 600 601 int radeon_bo_get_surface_reg(struct radeon_bo *bo) 602 { 603 struct radeon_device *rdev = bo->rdev; 604 struct radeon_surface_reg *reg; 605 struct radeon_bo *old_object; 606 int steal; 607 int i; 608 609 lockdep_assert_held(&bo->tbo.resv->lock.base); 610 611 if (!bo->tiling_flags) 612 return 0; 613 614 if (bo->surface_reg >= 0) { 615 reg = &rdev->surface_regs[bo->surface_reg]; 616 i = bo->surface_reg; 617 goto out; 618 } 619 620 steal = -1; 621 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 622 623 reg = &rdev->surface_regs[i]; 624 if (!reg->bo) 625 break; 626 627 old_object = reg->bo; 628 if (old_object->pin_count == 0) 629 steal = i; 630 } 631 632 /* if we are all out */ 633 if (i == RADEON_GEM_MAX_SURFACES) { 634 if (steal == -1) 635 return -ENOMEM; 636 /* find someone with a surface reg and nuke their BO */ 637 reg = &rdev->surface_regs[steal]; 638 old_object = reg->bo; 639 /* blow away the mapping */ 640 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 641 ttm_bo_unmap_virtual(&old_object->tbo); 642 old_object->surface_reg = -1; 643 i = steal; 644 } 645 646 bo->surface_reg = i; 647 reg->bo = bo; 648 649 out: 650 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 651 bo->tbo.mem.start << PAGE_SHIFT, 652 bo->tbo.num_pages << PAGE_SHIFT); 653 return 0; 654 } 655 656 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) 657 { 658 struct radeon_device *rdev = bo->rdev; 659 struct radeon_surface_reg *reg; 660 661 if (bo->surface_reg == -1) 662 return; 663 664 reg = &rdev->surface_regs[bo->surface_reg]; 665 radeon_clear_surface_reg(rdev, bo->surface_reg); 666 667 reg->bo = NULL; 668 bo->surface_reg = -1; 669 } 670 671 int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 672 uint32_t tiling_flags, uint32_t pitch) 673 { 674 struct radeon_device *rdev = bo->rdev; 675 int r; 676 677 if (rdev->family >= CHIP_CEDAR) { 678 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; 679 680 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 681 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 682 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 683 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 684 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 685 switch (bankw) { 686 case 0: 687 case 1: 688 case 2: 689 case 4: 690 case 8: 691 break; 692 default: 693 return -EINVAL; 694 } 695 switch (bankh) { 696 case 0: 697 case 1: 698 case 2: 699 case 4: 700 case 8: 701 break; 702 default: 703 return -EINVAL; 704 } 705 switch (mtaspect) { 706 case 0: 707 case 1: 708 case 2: 709 case 4: 710 case 8: 711 break; 712 default: 713 return -EINVAL; 714 } 715 if (tilesplit > 6) { 716 return -EINVAL; 717 } 718 if (stilesplit > 6) { 719 return -EINVAL; 720 } 721 } 722 r = radeon_bo_reserve(bo, false); 723 if (unlikely(r != 0)) 724 return r; 725 bo->tiling_flags = tiling_flags; 726 bo->pitch = pitch; 727 radeon_bo_unreserve(bo); 728 return 0; 729 } 730 731 void radeon_bo_get_tiling_flags(struct radeon_bo *bo, 732 uint32_t *tiling_flags, 733 uint32_t *pitch) 734 { 735 lockdep_assert_held(&bo->tbo.resv->lock.base); 736 737 if (tiling_flags) 738 *tiling_flags = bo->tiling_flags; 739 if (pitch) 740 *pitch = bo->pitch; 741 } 742 743 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 744 bool force_drop) 745 { 746 if (!force_drop) 747 lockdep_assert_held(&bo->tbo.resv->lock.base); 748 749 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 750 return 0; 751 752 if (force_drop) { 753 radeon_bo_clear_surface_reg(bo); 754 return 0; 755 } 756 757 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 758 if (!has_moved) 759 return 0; 760 761 if (bo->surface_reg >= 0) 762 radeon_bo_clear_surface_reg(bo); 763 return 0; 764 } 765 766 if ((bo->surface_reg >= 0) && !has_moved) 767 return 0; 768 769 return radeon_bo_get_surface_reg(bo); 770 } 771 772 void radeon_bo_move_notify(struct ttm_buffer_object *bo, 773 struct ttm_mem_reg *new_mem) 774 { 775 struct radeon_bo *rbo; 776 777 if (!radeon_ttm_bo_is_radeon_bo(bo)) 778 return; 779 780 rbo = container_of(bo, struct radeon_bo, tbo); 781 radeon_bo_check_tiling(rbo, 0, 1); 782 radeon_vm_bo_invalidate(rbo->rdev, rbo); 783 784 /* update statistics */ 785 if (!new_mem) 786 return; 787 788 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); 789 radeon_update_memory_usage(rbo, new_mem->mem_type, 1); 790 } 791 792 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 793 { 794 struct radeon_device *rdev; 795 struct radeon_bo *rbo; 796 unsigned long offset, size, lpfn; 797 int i, r; 798 799 if (!radeon_ttm_bo_is_radeon_bo(bo)) 800 return 0; 801 rbo = container_of(bo, struct radeon_bo, tbo); 802 radeon_bo_check_tiling(rbo, 0, 0); 803 rdev = rbo->rdev; 804 if (bo->mem.mem_type != TTM_PL_VRAM) 805 return 0; 806 807 size = bo->mem.num_pages << PAGE_SHIFT; 808 offset = bo->mem.start << PAGE_SHIFT; 809 if ((offset + size) <= rdev->mc.visible_vram_size) 810 return 0; 811 812 /* hurrah the memory is not visible ! */ 813 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 814 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 815 for (i = 0; i < rbo->placement.num_placement; i++) { 816 /* Force into visible VRAM */ 817 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && 818 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) 819 rbo->placements[i].lpfn = lpfn; 820 } 821 r = ttm_bo_validate(bo, &rbo->placement, false, false); 822 if (unlikely(r == -ENOMEM)) { 823 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 824 return ttm_bo_validate(bo, &rbo->placement, false, false); 825 } else if (unlikely(r != 0)) { 826 return r; 827 } 828 829 offset = bo->mem.start << PAGE_SHIFT; 830 /* this should never happen */ 831 if ((offset + size) > rdev->mc.visible_vram_size) 832 return -EINVAL; 833 834 return 0; 835 } 836 837 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) 838 { 839 int r; 840 841 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 842 if (unlikely(r != 0)) 843 return r; 844 if (mem_type) 845 *mem_type = bo->tbo.mem.mem_type; 846 847 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 848 ttm_bo_unreserve(&bo->tbo); 849 return r; 850 } 851 852 /** 853 * radeon_bo_fence - add fence to buffer object 854 * 855 * @bo: buffer object in question 856 * @fence: fence to add 857 * @shared: true if fence should be added shared 858 * 859 */ 860 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, 861 bool shared) 862 { 863 struct reservation_object *resv = bo->tbo.resv; 864 865 if (shared) 866 reservation_object_add_shared_fence(resv, &fence->base); 867 else 868 reservation_object_add_excl_fence(resv, &fence->base); 869 } 870