1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 * 32 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_object.c 254885 2013-08-25 19:37:15Z dumbbell $ 33 */ 34 #include <drm/drmP.h> 35 #include <uapi_drm/radeon_drm.h> 36 #include "radeon.h" 37 #ifdef DUMBBELL_WIP 38 #include "radeon_trace.h" 39 #endif /* DUMBBELL_WIP */ 40 #include <linux/io.h> 41 42 43 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 44 45 /* 46 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 47 * function are calling it. 48 */ 49 50 static void radeon_update_memory_usage(struct radeon_bo *bo, 51 unsigned mem_type, int sign) 52 { 53 struct radeon_device *rdev = bo->rdev; 54 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; 55 56 switch (mem_type) { 57 case TTM_PL_TT: 58 if (sign > 0) 59 atomic64_add(size, &rdev->gtt_usage); 60 else 61 atomic64_sub(size, &rdev->gtt_usage); 62 break; 63 case TTM_PL_VRAM: 64 if (sign > 0) 65 atomic64_add(size, &rdev->vram_usage); 66 else 67 atomic64_sub(size, &rdev->vram_usage); 68 break; 69 } 70 } 71 72 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 73 { 74 struct radeon_bo *bo; 75 76 bo = container_of(tbo, struct radeon_bo, tbo); 77 78 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 79 80 spin_lock(&bo->rdev->gem.mutex); 81 list_del_init(&bo->list); 82 spin_unlock(&bo->rdev->gem.mutex); 83 radeon_bo_clear_surface_reg(bo); 84 WARN_ON(!list_empty(&bo->va)); 85 drm_gem_object_release(&bo->gem_base); 86 kfree(bo); 87 } 88 89 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) 90 { 91 if (bo->destroy == &radeon_ttm_bo_destroy) 92 return true; 93 return false; 94 } 95 96 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 97 { 98 u32 c = 0, i; 99 100 rbo->placement.fpfn = 0; 101 rbo->placement.lpfn = 0; 102 rbo->placement.placement = rbo->placements; 103 rbo->placement.busy_placement = rbo->placements; 104 if (domain & RADEON_GEM_DOMAIN_VRAM) 105 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 106 TTM_PL_FLAG_VRAM; 107 if (domain & RADEON_GEM_DOMAIN_GTT) { 108 if (rbo->flags & RADEON_GEM_GTT_UC) { 109 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; 110 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 111 (rbo->rdev->flags & RADEON_IS_AGP)) { 112 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 113 TTM_PL_FLAG_TT; 114 } else { 115 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 116 } 117 } 118 if (domain & RADEON_GEM_DOMAIN_CPU) { 119 if (rbo->flags & RADEON_GEM_GTT_UC) { 120 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; 121 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 122 rbo->rdev->flags & RADEON_IS_AGP) { 123 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 124 TTM_PL_FLAG_SYSTEM; 125 } else { 126 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; 127 } 128 } 129 if (!c) 130 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 131 rbo->placement.num_placement = c; 132 rbo->placement.num_busy_placement = c; 133 134 /* 135 * Use two-ended allocation depending on the buffer size to 136 * improve fragmentation quality. 137 * 512kb was measured as the most optimal number. 138 */ 139 if (rbo->tbo.mem.size > 512 * 1024) { 140 for (i = 0; i < c; i++) { 141 rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; 142 } 143 } 144 } 145 146 int radeon_bo_create(struct radeon_device *rdev, 147 unsigned long size, int byte_align, bool kernel, u32 domain, 148 u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) 149 { 150 struct radeon_bo *bo; 151 enum ttm_bo_type type; 152 unsigned long page_align = roundup2(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 153 size_t acc_size; 154 int r; 155 156 size = ALIGN(size, PAGE_SIZE); 157 158 if (kernel) { 159 type = ttm_bo_type_kernel; 160 } else if (sg) { 161 type = ttm_bo_type_sg; 162 } else { 163 type = ttm_bo_type_device; 164 } 165 *bo_ptr = NULL; 166 167 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 168 sizeof(struct radeon_bo)); 169 170 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 171 if (bo == NULL) 172 return -ENOMEM; 173 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); 174 if (unlikely(r)) { 175 kfree(bo); 176 return r; 177 } 178 bo->rdev = rdev; 179 bo->surface_reg = -1; 180 INIT_LIST_HEAD(&bo->list); 181 INIT_LIST_HEAD(&bo->va); 182 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | 183 RADEON_GEM_DOMAIN_GTT | 184 RADEON_GEM_DOMAIN_CPU); 185 186 bo->flags = flags; 187 /* PCI GART is always snooped */ 188 if (!(rdev->flags & RADEON_IS_PCIE)) 189 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 190 191 radeon_ttm_placement_from_domain(bo, domain); 192 /* Kernel allocation are uninterruptible */ 193 lockmgr(&rdev->pm.mclk_lock, LK_SHARED); 194 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 195 &bo->placement, page_align, !kernel, NULL, 196 acc_size, sg, &radeon_ttm_bo_destroy); 197 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); 198 if (unlikely(r != 0)) { 199 return r; 200 } 201 *bo_ptr = bo; 202 203 #ifdef DUMBBELL_WIP 204 trace_radeon_bo_create(bo); 205 #endif /* DUMBBELL_WIP */ 206 207 return 0; 208 } 209 210 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) 211 { 212 bool is_iomem; 213 int r; 214 215 if (bo->kptr) { 216 if (ptr) { 217 *ptr = bo->kptr; 218 } 219 return 0; 220 } 221 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 222 if (r) { 223 return r; 224 } 225 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 226 if (ptr) { 227 *ptr = bo->kptr; 228 } 229 radeon_bo_check_tiling(bo, 0, 0); 230 return 0; 231 } 232 233 void radeon_bo_kunmap(struct radeon_bo *bo) 234 { 235 if (bo->kptr == NULL) 236 return; 237 bo->kptr = NULL; 238 radeon_bo_check_tiling(bo, 0, 0); 239 ttm_bo_kunmap(&bo->kmap); 240 } 241 242 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) 243 { 244 if (bo == NULL) 245 return NULL; 246 247 ttm_bo_reference(&bo->tbo); 248 return bo; 249 } 250 251 void radeon_bo_unref(struct radeon_bo **bo) 252 { 253 struct ttm_buffer_object *tbo; 254 struct radeon_device *rdev; 255 struct radeon_bo *rbo; 256 257 if ((rbo = *bo) == NULL) 258 return; 259 *bo = NULL; 260 rdev = rbo->rdev; 261 tbo = &rbo->tbo; 262 ttm_bo_unref(&tbo); 263 } 264 265 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 266 u64 *gpu_addr) 267 { 268 int r, i; 269 270 if (bo->pin_count) { 271 bo->pin_count++; 272 if (gpu_addr) 273 *gpu_addr = radeon_bo_gpu_offset(bo); 274 275 if (max_offset != 0) { 276 u64 domain_start; 277 278 if (domain == RADEON_GEM_DOMAIN_VRAM) 279 domain_start = bo->rdev->mc.vram_start; 280 else 281 domain_start = bo->rdev->mc.gtt_start; 282 if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) { 283 DRM_ERROR("radeon_bo_pin_restricted: " 284 "max_offset(%ju) < " 285 "(radeon_bo_gpu_offset(%ju) - " 286 "domain_start(%ju)", 287 (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo), 288 (uintmax_t)domain_start); 289 } 290 } 291 292 return 0; 293 } 294 radeon_ttm_placement_from_domain(bo, domain); 295 if (domain == RADEON_GEM_DOMAIN_VRAM) { 296 /* force to pin into visible video ram */ 297 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 298 } 299 if (max_offset) { 300 u64 lpfn = max_offset >> PAGE_SHIFT; 301 302 if (!bo->placement.lpfn) 303 bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; 304 305 if (lpfn < bo->placement.lpfn) 306 bo->placement.lpfn = lpfn; 307 } 308 for (i = 0; i < bo->placement.num_placement; i++) 309 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 310 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 311 if (likely(r == 0)) { 312 bo->pin_count = 1; 313 if (gpu_addr != NULL) 314 *gpu_addr = radeon_bo_gpu_offset(bo); 315 if (domain == RADEON_GEM_DOMAIN_VRAM) 316 bo->rdev->vram_pin_size += radeon_bo_size(bo); 317 else 318 bo->rdev->gart_pin_size += radeon_bo_size(bo); 319 } else { 320 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 321 } 322 return r; 323 } 324 325 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 326 { 327 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); 328 } 329 330 int radeon_bo_unpin(struct radeon_bo *bo) 331 { 332 int r, i; 333 334 if (!bo->pin_count) { 335 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 336 return 0; 337 } 338 bo->pin_count--; 339 if (bo->pin_count) 340 return 0; 341 for (i = 0; i < bo->placement.num_placement; i++) 342 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 343 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 344 if (likely(r == 0)) { 345 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 346 bo->rdev->vram_pin_size -= radeon_bo_size(bo); 347 else 348 bo->rdev->gart_pin_size -= radeon_bo_size(bo); 349 } else { 350 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 351 } 352 return r; 353 } 354 355 int radeon_bo_evict_vram(struct radeon_device *rdev) 356 { 357 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 358 if (0 && (rdev->flags & RADEON_IS_IGP)) { 359 if (rdev->mc.igp_sideport_enabled == false) 360 /* Useless to evict on IGP chips */ 361 return 0; 362 } 363 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 364 } 365 366 void radeon_bo_force_delete(struct radeon_device *rdev) 367 { 368 struct radeon_bo *bo, *n; 369 370 if (list_empty(&rdev->gem.objects)) { 371 return; 372 } 373 dev_err(rdev->dev, "Userspace still has active objects !\n"); 374 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 375 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 376 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 377 *((unsigned long *)&bo->gem_base.refcount)); 378 spin_lock(&bo->rdev->gem.mutex); 379 list_del_init(&bo->list); 380 spin_unlock(&bo->rdev->gem.mutex); 381 /* this should unref the ttm bo */ 382 drm_gem_object_unreference(&bo->gem_base); 383 } 384 } 385 386 int radeon_bo_init(struct radeon_device *rdev) 387 { 388 /* Add an MTRR for the VRAM */ 389 if (!rdev->fastfb_working) { 390 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, 391 rdev->mc.aper_size); 392 } 393 DRM_INFO("Detected VRAM RAM=%juM, BAR=%juM\n", 394 rdev->mc.mc_vram_size >> 20, 395 (uintmax_t)rdev->mc.aper_size >> 20); 396 DRM_INFO("RAM width %dbits %cDR\n", 397 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 398 return radeon_ttm_init(rdev); 399 } 400 401 void radeon_bo_fini(struct radeon_device *rdev) 402 { 403 radeon_ttm_fini(rdev); 404 arch_phys_wc_del(rdev->mc.vram_mtrr); 405 } 406 407 /* Returns how many bytes TTM can move per IB. 408 */ 409 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) 410 { 411 u64 real_vram_size = rdev->mc.real_vram_size; 412 u64 vram_usage = atomic64_read(&rdev->vram_usage); 413 414 /* This function is based on the current VRAM usage. 415 * 416 * - If all of VRAM is free, allow relocating the number of bytes that 417 * is equal to 1/4 of the size of VRAM for this IB. 418 419 * - If more than one half of VRAM is occupied, only allow relocating 420 * 1 MB of data for this IB. 421 * 422 * - From 0 to one half of used VRAM, the threshold decreases 423 * linearly. 424 * __________________ 425 * 1/4 of -|\ | 426 * VRAM | \ | 427 * | \ | 428 * | \ | 429 * | \ | 430 * | \ | 431 * | \ | 432 * | \________|1 MB 433 * |----------------| 434 * VRAM 0 % 100 % 435 * used used 436 * 437 * Note: It's a threshold, not a limit. The threshold must be crossed 438 * for buffer relocations to stop, so any buffer of an arbitrary size 439 * can be moved as long as the threshold isn't crossed before 440 * the relocation takes place. We don't want to disable buffer 441 * relocations completely. 442 * 443 * The idea is that buffers should be placed in VRAM at creation time 444 * and TTM should only do a minimum number of relocations during 445 * command submission. In practice, you need to submit at least 446 * a dozen IBs to move all buffers to VRAM if they are in GTT. 447 * 448 * Also, things can get pretty crazy under memory pressure and actual 449 * VRAM usage can change a lot, so playing safe even at 50% does 450 * consistently increase performance. 451 */ 452 453 u64 half_vram = real_vram_size >> 1; 454 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; 455 u64 bytes_moved_threshold = half_free_vram >> 1; 456 return max(bytes_moved_threshold, 1024*1024ull); 457 } 458 459 int radeon_bo_list_validate(struct radeon_device *rdev, 460 struct ww_acquire_ctx *ticket, 461 struct list_head *head, int ring) 462 { 463 struct radeon_cs_reloc *lobj; 464 struct radeon_bo *bo; 465 int r; 466 u64 bytes_moved = 0, initial_bytes_moved; 467 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 468 469 r = ttm_eu_reserve_buffers(ticket, head); 470 if (unlikely(r != 0)) { 471 return r; 472 } 473 474 list_for_each_entry(lobj, head, tv.head) { 475 bo = lobj->robj; 476 if (!bo->pin_count) { 477 u32 domain = lobj->prefered_domains; 478 u32 current_domain = 479 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 480 481 /* Check if this buffer will be moved and don't move it 482 * if we have moved too many buffers for this IB already. 483 * 484 * Note that this allows moving at least one buffer of 485 * any size, because it doesn't take the current "bo" 486 * into account. We don't want to disallow buffer moves 487 * completely. 488 */ 489 if ((lobj->allowed_domains & current_domain) != 0 && 490 (domain & current_domain) == 0 && /* will be moved */ 491 bytes_moved > bytes_moved_threshold) { 492 /* don't move it */ 493 domain = current_domain; 494 } 495 496 retry: 497 radeon_ttm_placement_from_domain(bo, domain); 498 if (ring == R600_RING_TYPE_UVD_INDEX) 499 radeon_uvd_force_into_uvd_segment(bo); 500 501 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); 502 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 503 bytes_moved += atomic64_read(&rdev->num_bytes_moved) - 504 initial_bytes_moved; 505 506 if (unlikely(r)) { 507 if (r != -ERESTARTSYS && 508 domain != lobj->allowed_domains) { 509 domain = lobj->allowed_domains; 510 goto retry; 511 } 512 return r; 513 } 514 } 515 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 516 lobj->tiling_flags = bo->tiling_flags; 517 } 518 return 0; 519 } 520 521 #ifdef DUMBBELL_WIP 522 int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 523 struct vm_area_struct *vma) 524 { 525 return ttm_fbdev_mmap(vma, &bo->tbo); 526 } 527 #endif /* DUMBBELL_WIP */ 528 529 int radeon_bo_get_surface_reg(struct radeon_bo *bo) 530 { 531 struct radeon_device *rdev = bo->rdev; 532 struct radeon_surface_reg *reg; 533 struct radeon_bo *old_object; 534 int steal; 535 int i; 536 537 KASSERT(radeon_bo_is_reserved(bo), 538 ("radeon_bo_get_surface_reg: radeon_bo is not reserved")); 539 540 if (!bo->tiling_flags) 541 return 0; 542 543 if (bo->surface_reg >= 0) { 544 reg = &rdev->surface_regs[bo->surface_reg]; 545 i = bo->surface_reg; 546 goto out; 547 } 548 549 steal = -1; 550 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 551 552 reg = &rdev->surface_regs[i]; 553 if (!reg->bo) 554 break; 555 556 old_object = reg->bo; 557 if (old_object->pin_count == 0) 558 steal = i; 559 } 560 561 /* if we are all out */ 562 if (i == RADEON_GEM_MAX_SURFACES) { 563 if (steal == -1) 564 return -ENOMEM; 565 /* find someone with a surface reg and nuke their BO */ 566 reg = &rdev->surface_regs[steal]; 567 old_object = reg->bo; 568 /* blow away the mapping */ 569 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 570 ttm_bo_unmap_virtual(&old_object->tbo); 571 old_object->surface_reg = -1; 572 i = steal; 573 } 574 575 bo->surface_reg = i; 576 reg->bo = bo; 577 578 out: 579 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 580 bo->tbo.mem.start << PAGE_SHIFT, 581 bo->tbo.num_pages << PAGE_SHIFT); 582 return 0; 583 } 584 585 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) 586 { 587 struct radeon_device *rdev = bo->rdev; 588 struct radeon_surface_reg *reg; 589 590 if (bo->surface_reg == -1) 591 return; 592 593 reg = &rdev->surface_regs[bo->surface_reg]; 594 radeon_clear_surface_reg(rdev, bo->surface_reg); 595 596 reg->bo = NULL; 597 bo->surface_reg = -1; 598 } 599 600 int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 601 uint32_t tiling_flags, uint32_t pitch) 602 { 603 struct radeon_device *rdev = bo->rdev; 604 int r; 605 606 if (rdev->family >= CHIP_CEDAR) { 607 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; 608 609 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 610 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 611 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 612 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 613 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 614 switch (bankw) { 615 case 0: 616 case 1: 617 case 2: 618 case 4: 619 case 8: 620 break; 621 default: 622 return -EINVAL; 623 } 624 switch (bankh) { 625 case 0: 626 case 1: 627 case 2: 628 case 4: 629 case 8: 630 break; 631 default: 632 return -EINVAL; 633 } 634 switch (mtaspect) { 635 case 0: 636 case 1: 637 case 2: 638 case 4: 639 case 8: 640 break; 641 default: 642 return -EINVAL; 643 } 644 if (tilesplit > 6) { 645 return -EINVAL; 646 } 647 if (stilesplit > 6) { 648 return -EINVAL; 649 } 650 } 651 r = radeon_bo_reserve(bo, false); 652 if (unlikely(r != 0)) 653 return r; 654 bo->tiling_flags = tiling_flags; 655 bo->pitch = pitch; 656 radeon_bo_unreserve(bo); 657 return 0; 658 } 659 660 void radeon_bo_get_tiling_flags(struct radeon_bo *bo, 661 uint32_t *tiling_flags, 662 uint32_t *pitch) 663 { 664 KASSERT(radeon_bo_is_reserved(bo), 665 ("radeon_bo_get_tiling_flags: radeon_bo is not reserved")); 666 if (tiling_flags) 667 *tiling_flags = bo->tiling_flags; 668 if (pitch) 669 *pitch = bo->pitch; 670 } 671 672 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 673 bool force_drop) 674 { 675 KASSERT((radeon_bo_is_reserved(bo) || force_drop), 676 ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop")); 677 678 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 679 return 0; 680 681 if (force_drop) { 682 radeon_bo_clear_surface_reg(bo); 683 return 0; 684 } 685 686 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 687 if (!has_moved) 688 return 0; 689 690 if (bo->surface_reg >= 0) 691 radeon_bo_clear_surface_reg(bo); 692 return 0; 693 } 694 695 if ((bo->surface_reg >= 0) && !has_moved) 696 return 0; 697 698 return radeon_bo_get_surface_reg(bo); 699 } 700 701 void radeon_bo_move_notify(struct ttm_buffer_object *bo, 702 struct ttm_mem_reg *new_mem) 703 { 704 struct radeon_bo *rbo; 705 706 if (!radeon_ttm_bo_is_radeon_bo(bo)) 707 return; 708 709 rbo = container_of(bo, struct radeon_bo, tbo); 710 radeon_bo_check_tiling(rbo, 0, 1); 711 radeon_vm_bo_invalidate(rbo->rdev, rbo); 712 713 /* update statistics */ 714 if (!new_mem) 715 return; 716 717 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); 718 radeon_update_memory_usage(rbo, new_mem->mem_type, 1); 719 } 720 721 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 722 { 723 struct radeon_device *rdev; 724 struct radeon_bo *rbo; 725 unsigned long offset, size; 726 int r; 727 728 if (!radeon_ttm_bo_is_radeon_bo(bo)) 729 return 0; 730 rbo = container_of(bo, struct radeon_bo, tbo); 731 radeon_bo_check_tiling(rbo, 0, 0); 732 rdev = rbo->rdev; 733 if (bo->mem.mem_type != TTM_PL_VRAM) 734 return 0; 735 736 size = bo->mem.num_pages << PAGE_SHIFT; 737 offset = bo->mem.start << PAGE_SHIFT; 738 if ((offset + size) <= rdev->mc.visible_vram_size) 739 return 0; 740 741 /* hurrah the memory is not visible ! */ 742 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 743 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 744 r = ttm_bo_validate(bo, &rbo->placement, false, false); 745 if (unlikely(r == -ENOMEM)) { 746 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 747 return ttm_bo_validate(bo, &rbo->placement, false, false); 748 } else if (unlikely(r != 0)) { 749 return r; 750 } 751 752 offset = bo->mem.start << PAGE_SHIFT; 753 /* this should never happen */ 754 if ((offset + size) > rdev->mc.visible_vram_size) 755 return -EINVAL; 756 757 return 0; 758 } 759 760 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) 761 { 762 int r; 763 764 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 765 if (unlikely(r != 0)) 766 return r; 767 lockmgr(&bo->tbo.bdev->fence_lock, LK_EXCLUSIVE); 768 if (mem_type) 769 *mem_type = bo->tbo.mem.mem_type; 770 if (bo->tbo.sync_obj) 771 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 772 lockmgr(&bo->tbo.bdev->fence_lock, LK_RELEASE); 773 ttm_bo_unreserve(&bo->tbo); 774 return r; 775 } 776 777 778 /** 779 * radeon_bo_reserve - reserve bo 780 * @bo: bo structure 781 * @no_intr: don't return -ERESTARTSYS on pending signal 782 * 783 * Returns: 784 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 785 * a signal. Release all buffer reservations and return to user-space. 786 */ 787 int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) 788 { 789 int r; 790 791 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 792 if (unlikely(r != 0)) { 793 if (r != -ERESTARTSYS) 794 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 795 return r; 796 } 797 return 0; 798 } 799