1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 * 32 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_object.c 254885 2013-08-25 19:37:15Z dumbbell $ 33 */ 34 #include <drm/drmP.h> 35 #include <uapi_drm/radeon_drm.h> 36 #include "radeon.h" 37 #ifdef DUMBBELL_WIP 38 #include "radeon_trace.h" 39 #endif /* DUMBBELL_WIP */ 40 #include <linux/io.h> 41 42 43 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 44 45 /* 46 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 47 * function are calling it. 48 */ 49 50 static void radeon_update_memory_usage(struct radeon_bo *bo, 51 unsigned mem_type, int sign) 52 { 53 struct radeon_device *rdev = bo->rdev; 54 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; 55 56 switch (mem_type) { 57 case TTM_PL_TT: 58 if (sign > 0) 59 atomic64_add(size, &rdev->gtt_usage); 60 else 61 atomic64_sub(size, &rdev->gtt_usage); 62 break; 63 case TTM_PL_VRAM: 64 if (sign > 0) 65 atomic64_add(size, &rdev->vram_usage); 66 else 67 atomic64_sub(size, &rdev->vram_usage); 68 break; 69 } 70 } 71 72 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 73 { 74 struct radeon_bo *bo; 75 76 bo = container_of(tbo, struct radeon_bo, tbo); 77 78 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 79 80 spin_lock(&bo->rdev->gem.mutex); 81 list_del_init(&bo->list); 82 spin_unlock(&bo->rdev->gem.mutex); 83 radeon_bo_clear_surface_reg(bo); 84 WARN_ON(!list_empty(&bo->va)); 85 drm_gem_object_release(&bo->gem_base); 86 kfree(bo); 87 } 88 89 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) 90 { 91 if (bo->destroy == &radeon_ttm_bo_destroy) 92 return true; 93 return false; 94 } 95 96 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 97 { 98 u32 c = 0, i; 99 100 rbo->placement.placement = rbo->placements; 101 rbo->placement.busy_placement = rbo->placements; 102 if (domain & RADEON_GEM_DOMAIN_VRAM) 103 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 104 TTM_PL_FLAG_UNCACHED | 105 TTM_PL_FLAG_VRAM; 106 107 if (domain & RADEON_GEM_DOMAIN_GTT) { 108 if (rbo->flags & RADEON_GEM_GTT_UC) { 109 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 110 TTM_PL_FLAG_TT; 111 112 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 113 (rbo->rdev->flags & RADEON_IS_AGP)) { 114 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 115 TTM_PL_FLAG_UNCACHED | 116 TTM_PL_FLAG_TT; 117 } else { 118 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 119 TTM_PL_FLAG_TT; 120 } 121 } 122 123 if (domain & RADEON_GEM_DOMAIN_CPU) { 124 if (rbo->flags & RADEON_GEM_GTT_UC) { 125 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 126 TTM_PL_FLAG_SYSTEM; 127 128 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 129 rbo->rdev->flags & RADEON_IS_AGP) { 130 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 131 TTM_PL_FLAG_UNCACHED | 132 TTM_PL_FLAG_SYSTEM; 133 } else { 134 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 135 TTM_PL_FLAG_SYSTEM; 136 } 137 } 138 if (!c) 139 rbo->placements[c++].flags = TTM_PL_MASK_CACHING | 140 TTM_PL_FLAG_SYSTEM; 141 142 rbo->placement.num_placement = c; 143 rbo->placement.num_busy_placement = c; 144 145 for (i = 0; i < c; ++i) { 146 rbo->placements[i].fpfn = 0; 147 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && 148 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) 149 rbo->placements[i].lpfn = 150 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 151 else 152 rbo->placements[i].lpfn = 0; 153 } 154 155 /* 156 * Use two-ended allocation depending on the buffer size to 157 * improve fragmentation quality. 158 * 512kb was measured as the most optimal number. 159 */ 160 if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) && 161 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) && 162 rbo->tbo.mem.size > 512 * 1024) { 163 for (i = 0; i < c; i++) { 164 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; 165 } 166 } 167 } 168 169 int radeon_bo_create(struct radeon_device *rdev, 170 unsigned long size, int byte_align, bool kernel, u32 domain, 171 u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) 172 { 173 struct radeon_bo *bo; 174 enum ttm_bo_type type; 175 unsigned long page_align = roundup2(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 176 size_t acc_size; 177 int r; 178 179 size = ALIGN(size, PAGE_SIZE); 180 181 if (kernel) { 182 type = ttm_bo_type_kernel; 183 } else if (sg) { 184 type = ttm_bo_type_sg; 185 } else { 186 type = ttm_bo_type_device; 187 } 188 *bo_ptr = NULL; 189 190 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 191 sizeof(struct radeon_bo)); 192 193 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 194 if (bo == NULL) 195 return -ENOMEM; 196 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); 197 if (unlikely(r)) { 198 kfree(bo); 199 return r; 200 } 201 bo->rdev = rdev; 202 bo->surface_reg = -1; 203 INIT_LIST_HEAD(&bo->list); 204 INIT_LIST_HEAD(&bo->va); 205 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | 206 RADEON_GEM_DOMAIN_GTT | 207 RADEON_GEM_DOMAIN_CPU); 208 209 bo->flags = flags; 210 /* PCI GART is always snooped */ 211 if (!(rdev->flags & RADEON_IS_PCIE)) 212 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 213 214 #ifdef CONFIG_X86_32 215 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 216 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 217 */ 218 bo->flags &= ~RADEON_GEM_GTT_WC; 219 #endif 220 221 radeon_ttm_placement_from_domain(bo, domain); 222 /* Kernel allocation are uninterruptible */ 223 lockmgr(&rdev->pm.mclk_lock, LK_SHARED); 224 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 225 &bo->placement, page_align, !kernel, NULL, 226 acc_size, sg, &radeon_ttm_bo_destroy); 227 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); 228 if (unlikely(r != 0)) { 229 return r; 230 } 231 *bo_ptr = bo; 232 233 #ifdef DUMBBELL_WIP 234 trace_radeon_bo_create(bo); 235 #endif /* DUMBBELL_WIP */ 236 237 return 0; 238 } 239 240 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) 241 { 242 bool is_iomem; 243 int r; 244 245 if (bo->kptr) { 246 if (ptr) { 247 *ptr = bo->kptr; 248 } 249 return 0; 250 } 251 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 252 if (r) { 253 return r; 254 } 255 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 256 if (ptr) { 257 *ptr = bo->kptr; 258 } 259 radeon_bo_check_tiling(bo, 0, 0); 260 return 0; 261 } 262 263 void radeon_bo_kunmap(struct radeon_bo *bo) 264 { 265 if (bo->kptr == NULL) 266 return; 267 bo->kptr = NULL; 268 radeon_bo_check_tiling(bo, 0, 0); 269 ttm_bo_kunmap(&bo->kmap); 270 } 271 272 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) 273 { 274 if (bo == NULL) 275 return NULL; 276 277 ttm_bo_reference(&bo->tbo); 278 return bo; 279 } 280 281 void radeon_bo_unref(struct radeon_bo **bo) 282 { 283 struct ttm_buffer_object *tbo; 284 struct radeon_device *rdev; 285 struct radeon_bo *rbo; 286 287 if ((rbo = *bo) == NULL) 288 return; 289 *bo = NULL; 290 rdev = rbo->rdev; 291 tbo = &rbo->tbo; 292 ttm_bo_unref(&tbo); 293 } 294 295 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 296 u64 *gpu_addr) 297 { 298 int r, i; 299 300 if (bo->pin_count) { 301 bo->pin_count++; 302 if (gpu_addr) 303 *gpu_addr = radeon_bo_gpu_offset(bo); 304 305 if (max_offset != 0) { 306 u64 domain_start; 307 308 if (domain == RADEON_GEM_DOMAIN_VRAM) 309 domain_start = bo->rdev->mc.vram_start; 310 else 311 domain_start = bo->rdev->mc.gtt_start; 312 if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) { 313 DRM_ERROR("radeon_bo_pin_restricted: " 314 "max_offset(%ju) < " 315 "(radeon_bo_gpu_offset(%ju) - " 316 "domain_start(%ju)", 317 (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo), 318 (uintmax_t)domain_start); 319 } 320 } 321 322 return 0; 323 } 324 radeon_ttm_placement_from_domain(bo, domain); 325 for (i = 0; i < bo->placement.num_placement; i++) { 326 /* force to pin into visible video ram */ 327 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && 328 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && 329 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) 330 bo->placements[i].lpfn = 331 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 332 else 333 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; 334 335 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 336 } 337 338 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 339 if (likely(r == 0)) { 340 bo->pin_count = 1; 341 if (gpu_addr != NULL) 342 *gpu_addr = radeon_bo_gpu_offset(bo); 343 if (domain == RADEON_GEM_DOMAIN_VRAM) 344 bo->rdev->vram_pin_size += radeon_bo_size(bo); 345 else 346 bo->rdev->gart_pin_size += radeon_bo_size(bo); 347 } else { 348 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 349 } 350 return r; 351 } 352 353 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 354 { 355 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); 356 } 357 358 int radeon_bo_unpin(struct radeon_bo *bo) 359 { 360 int r, i; 361 362 if (!bo->pin_count) { 363 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 364 return 0; 365 } 366 bo->pin_count--; 367 if (bo->pin_count) 368 return 0; 369 for (i = 0; i < bo->placement.num_placement; i++) { 370 bo->placements[i].lpfn = 0; 371 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 372 } 373 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 374 if (likely(r == 0)) { 375 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 376 bo->rdev->vram_pin_size -= radeon_bo_size(bo); 377 else 378 bo->rdev->gart_pin_size -= radeon_bo_size(bo); 379 } else { 380 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 381 } 382 return r; 383 } 384 385 int radeon_bo_evict_vram(struct radeon_device *rdev) 386 { 387 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 388 if (0 && (rdev->flags & RADEON_IS_IGP)) { 389 if (rdev->mc.igp_sideport_enabled == false) 390 /* Useless to evict on IGP chips */ 391 return 0; 392 } 393 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 394 } 395 396 void radeon_bo_force_delete(struct radeon_device *rdev) 397 { 398 struct radeon_bo *bo, *n; 399 400 if (list_empty(&rdev->gem.objects)) { 401 return; 402 } 403 dev_err(rdev->dev, "Userspace still has active objects !\n"); 404 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 405 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 406 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 407 *((unsigned long *)&bo->gem_base.refcount)); 408 spin_lock(&bo->rdev->gem.mutex); 409 list_del_init(&bo->list); 410 spin_unlock(&bo->rdev->gem.mutex); 411 /* this should unref the ttm bo */ 412 drm_gem_object_unreference(&bo->gem_base); 413 } 414 } 415 416 int radeon_bo_init(struct radeon_device *rdev) 417 { 418 /* Add an MTRR for the VRAM */ 419 if (!rdev->fastfb_working) { 420 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, 421 rdev->mc.aper_size); 422 } 423 DRM_INFO("Detected VRAM RAM=%juM, BAR=%juM\n", 424 rdev->mc.mc_vram_size >> 20, 425 (uintmax_t)rdev->mc.aper_size >> 20); 426 DRM_INFO("RAM width %dbits %cDR\n", 427 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 428 return radeon_ttm_init(rdev); 429 } 430 431 void radeon_bo_fini(struct radeon_device *rdev) 432 { 433 radeon_ttm_fini(rdev); 434 arch_phys_wc_del(rdev->mc.vram_mtrr); 435 } 436 437 /* Returns how many bytes TTM can move per IB. 438 */ 439 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) 440 { 441 u64 real_vram_size = rdev->mc.real_vram_size; 442 u64 vram_usage = atomic64_read(&rdev->vram_usage); 443 444 /* This function is based on the current VRAM usage. 445 * 446 * - If all of VRAM is free, allow relocating the number of bytes that 447 * is equal to 1/4 of the size of VRAM for this IB. 448 449 * - If more than one half of VRAM is occupied, only allow relocating 450 * 1 MB of data for this IB. 451 * 452 * - From 0 to one half of used VRAM, the threshold decreases 453 * linearly. 454 * __________________ 455 * 1/4 of -|\ | 456 * VRAM | \ | 457 * | \ | 458 * | \ | 459 * | \ | 460 * | \ | 461 * | \ | 462 * | \________|1 MB 463 * |----------------| 464 * VRAM 0 % 100 % 465 * used used 466 * 467 * Note: It's a threshold, not a limit. The threshold must be crossed 468 * for buffer relocations to stop, so any buffer of an arbitrary size 469 * can be moved as long as the threshold isn't crossed before 470 * the relocation takes place. We don't want to disable buffer 471 * relocations completely. 472 * 473 * The idea is that buffers should be placed in VRAM at creation time 474 * and TTM should only do a minimum number of relocations during 475 * command submission. In practice, you need to submit at least 476 * a dozen IBs to move all buffers to VRAM if they are in GTT. 477 * 478 * Also, things can get pretty crazy under memory pressure and actual 479 * VRAM usage can change a lot, so playing safe even at 50% does 480 * consistently increase performance. 481 */ 482 483 u64 half_vram = real_vram_size >> 1; 484 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; 485 u64 bytes_moved_threshold = half_free_vram >> 1; 486 return max(bytes_moved_threshold, 1024*1024ull); 487 } 488 489 int radeon_bo_list_validate(struct radeon_device *rdev, 490 struct ww_acquire_ctx *ticket, 491 struct list_head *head, int ring) 492 { 493 struct radeon_cs_reloc *lobj; 494 struct radeon_bo *bo; 495 int r; 496 u64 bytes_moved = 0, initial_bytes_moved; 497 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 498 499 r = ttm_eu_reserve_buffers(ticket, head); 500 if (unlikely(r != 0)) { 501 return r; 502 } 503 504 list_for_each_entry(lobj, head, tv.head) { 505 bo = lobj->robj; 506 if (!bo->pin_count) { 507 u32 domain = lobj->prefered_domains; 508 u32 allowed = lobj->allowed_domains; 509 u32 current_domain = 510 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 511 512 /* Check if this buffer will be moved and don't move it 513 * if we have moved too many buffers for this IB already. 514 * 515 * Note that this allows moving at least one buffer of 516 * any size, because it doesn't take the current "bo" 517 * into account. We don't want to disallow buffer moves 518 * completely. 519 */ 520 if ((allowed & current_domain) != 0 && 521 (domain & current_domain) == 0 && /* will be moved */ 522 bytes_moved > bytes_moved_threshold) { 523 /* don't move it */ 524 domain = current_domain; 525 } 526 527 retry: 528 radeon_ttm_placement_from_domain(bo, domain); 529 if (ring == R600_RING_TYPE_UVD_INDEX) 530 radeon_uvd_force_into_uvd_segment(bo, allowed); 531 532 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); 533 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 534 bytes_moved += atomic64_read(&rdev->num_bytes_moved) - 535 initial_bytes_moved; 536 537 if (unlikely(r)) { 538 if (r != -ERESTARTSYS && 539 domain != lobj->allowed_domains) { 540 domain = lobj->allowed_domains; 541 goto retry; 542 } 543 return r; 544 } 545 } 546 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 547 lobj->tiling_flags = bo->tiling_flags; 548 } 549 return 0; 550 } 551 552 #ifdef DUMBBELL_WIP 553 int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 554 struct vm_area_struct *vma) 555 { 556 return ttm_fbdev_mmap(vma, &bo->tbo); 557 } 558 #endif /* DUMBBELL_WIP */ 559 560 int radeon_bo_get_surface_reg(struct radeon_bo *bo) 561 { 562 struct radeon_device *rdev = bo->rdev; 563 struct radeon_surface_reg *reg; 564 struct radeon_bo *old_object; 565 int steal; 566 int i; 567 568 KASSERT(radeon_bo_is_reserved(bo), 569 ("radeon_bo_get_surface_reg: radeon_bo is not reserved")); 570 571 if (!bo->tiling_flags) 572 return 0; 573 574 if (bo->surface_reg >= 0) { 575 reg = &rdev->surface_regs[bo->surface_reg]; 576 i = bo->surface_reg; 577 goto out; 578 } 579 580 steal = -1; 581 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 582 583 reg = &rdev->surface_regs[i]; 584 if (!reg->bo) 585 break; 586 587 old_object = reg->bo; 588 if (old_object->pin_count == 0) 589 steal = i; 590 } 591 592 /* if we are all out */ 593 if (i == RADEON_GEM_MAX_SURFACES) { 594 if (steal == -1) 595 return -ENOMEM; 596 /* find someone with a surface reg and nuke their BO */ 597 reg = &rdev->surface_regs[steal]; 598 old_object = reg->bo; 599 /* blow away the mapping */ 600 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 601 ttm_bo_unmap_virtual(&old_object->tbo); 602 old_object->surface_reg = -1; 603 i = steal; 604 } 605 606 bo->surface_reg = i; 607 reg->bo = bo; 608 609 out: 610 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 611 bo->tbo.mem.start << PAGE_SHIFT, 612 bo->tbo.num_pages << PAGE_SHIFT); 613 return 0; 614 } 615 616 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) 617 { 618 struct radeon_device *rdev = bo->rdev; 619 struct radeon_surface_reg *reg; 620 621 if (bo->surface_reg == -1) 622 return; 623 624 reg = &rdev->surface_regs[bo->surface_reg]; 625 radeon_clear_surface_reg(rdev, bo->surface_reg); 626 627 reg->bo = NULL; 628 bo->surface_reg = -1; 629 } 630 631 int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 632 uint32_t tiling_flags, uint32_t pitch) 633 { 634 struct radeon_device *rdev = bo->rdev; 635 int r; 636 637 if (rdev->family >= CHIP_CEDAR) { 638 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; 639 640 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 641 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 642 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 643 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 644 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 645 switch (bankw) { 646 case 0: 647 case 1: 648 case 2: 649 case 4: 650 case 8: 651 break; 652 default: 653 return -EINVAL; 654 } 655 switch (bankh) { 656 case 0: 657 case 1: 658 case 2: 659 case 4: 660 case 8: 661 break; 662 default: 663 return -EINVAL; 664 } 665 switch (mtaspect) { 666 case 0: 667 case 1: 668 case 2: 669 case 4: 670 case 8: 671 break; 672 default: 673 return -EINVAL; 674 } 675 if (tilesplit > 6) { 676 return -EINVAL; 677 } 678 if (stilesplit > 6) { 679 return -EINVAL; 680 } 681 } 682 r = radeon_bo_reserve(bo, false); 683 if (unlikely(r != 0)) 684 return r; 685 bo->tiling_flags = tiling_flags; 686 bo->pitch = pitch; 687 radeon_bo_unreserve(bo); 688 return 0; 689 } 690 691 void radeon_bo_get_tiling_flags(struct radeon_bo *bo, 692 uint32_t *tiling_flags, 693 uint32_t *pitch) 694 { 695 KASSERT(radeon_bo_is_reserved(bo), 696 ("radeon_bo_get_tiling_flags: radeon_bo is not reserved")); 697 if (tiling_flags) 698 *tiling_flags = bo->tiling_flags; 699 if (pitch) 700 *pitch = bo->pitch; 701 } 702 703 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 704 bool force_drop) 705 { 706 KASSERT((radeon_bo_is_reserved(bo) || force_drop), 707 ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop")); 708 709 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 710 return 0; 711 712 if (force_drop) { 713 radeon_bo_clear_surface_reg(bo); 714 return 0; 715 } 716 717 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 718 if (!has_moved) 719 return 0; 720 721 if (bo->surface_reg >= 0) 722 radeon_bo_clear_surface_reg(bo); 723 return 0; 724 } 725 726 if ((bo->surface_reg >= 0) && !has_moved) 727 return 0; 728 729 return radeon_bo_get_surface_reg(bo); 730 } 731 732 void radeon_bo_move_notify(struct ttm_buffer_object *bo, 733 struct ttm_mem_reg *new_mem) 734 { 735 struct radeon_bo *rbo; 736 737 if (!radeon_ttm_bo_is_radeon_bo(bo)) 738 return; 739 740 rbo = container_of(bo, struct radeon_bo, tbo); 741 radeon_bo_check_tiling(rbo, 0, 1); 742 radeon_vm_bo_invalidate(rbo->rdev, rbo); 743 744 /* update statistics */ 745 if (!new_mem) 746 return; 747 748 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); 749 radeon_update_memory_usage(rbo, new_mem->mem_type, 1); 750 } 751 752 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 753 { 754 struct radeon_device *rdev; 755 struct radeon_bo *rbo; 756 unsigned long offset, size; 757 int r; 758 759 if (!radeon_ttm_bo_is_radeon_bo(bo)) 760 return 0; 761 rbo = container_of(bo, struct radeon_bo, tbo); 762 radeon_bo_check_tiling(rbo, 0, 0); 763 rdev = rbo->rdev; 764 if (bo->mem.mem_type != TTM_PL_VRAM) 765 return 0; 766 767 size = bo->mem.num_pages << PAGE_SHIFT; 768 offset = bo->mem.start << PAGE_SHIFT; 769 if ((offset + size) <= rdev->mc.visible_vram_size) 770 return 0; 771 772 /* hurrah the memory is not visible ! */ 773 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 774 rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 775 r = ttm_bo_validate(bo, &rbo->placement, false, false); 776 if (unlikely(r == -ENOMEM)) { 777 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 778 return ttm_bo_validate(bo, &rbo->placement, false, false); 779 } else if (unlikely(r != 0)) { 780 return r; 781 } 782 783 offset = bo->mem.start << PAGE_SHIFT; 784 /* this should never happen */ 785 if ((offset + size) > rdev->mc.visible_vram_size) 786 return -EINVAL; 787 788 return 0; 789 } 790 791 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) 792 { 793 int r; 794 795 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 796 if (unlikely(r != 0)) 797 return r; 798 lockmgr(&bo->tbo.bdev->fence_lock, LK_EXCLUSIVE); 799 if (mem_type) 800 *mem_type = bo->tbo.mem.mem_type; 801 if (bo->tbo.sync_obj) 802 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 803 lockmgr(&bo->tbo.bdev->fence_lock, LK_RELEASE); 804 ttm_bo_unreserve(&bo->tbo); 805 return r; 806 } 807 808 809 /** 810 * radeon_bo_reserve - reserve bo 811 * @bo: bo structure 812 * @no_intr: don't return -ERESTARTSYS on pending signal 813 * 814 * Returns: 815 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 816 * a signal. Release all buffer reservations and return to user-space. 817 */ 818 int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) 819 { 820 int r; 821 822 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 823 if (unlikely(r != 0)) { 824 if (r != -ERESTARTSYS) 825 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 826 return r; 827 } 828 return 0; 829 } 830