1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 #include <drm/drmP.h> 33 #include <drm/radeon_drm.h> 34 #include "radeon.h" 35 #ifdef TRACE_TODO 36 #include "radeon_trace.h" 37 #endif 38 #include <linux/io.h> 39 40 41 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 42 43 /* 44 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 45 * function are calling it. 46 */ 47 48 static void radeon_update_memory_usage(struct radeon_bo *bo, 49 unsigned mem_type, int sign) 50 { 51 struct radeon_device *rdev = bo->rdev; 52 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; 53 54 switch (mem_type) { 55 case TTM_PL_TT: 56 if (sign > 0) 57 atomic64_add(size, &rdev->gtt_usage); 58 else 59 atomic64_sub(size, &rdev->gtt_usage); 60 break; 61 case TTM_PL_VRAM: 62 if (sign > 0) 63 atomic64_add(size, &rdev->vram_usage); 64 else 65 atomic64_sub(size, &rdev->vram_usage); 66 break; 67 } 68 } 69 70 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 71 { 72 struct radeon_bo *bo; 73 74 bo = container_of(tbo, struct radeon_bo, tbo); 75 76 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 77 78 mutex_lock(&bo->rdev->gem.mutex); 79 list_del_init(&bo->list); 80 mutex_unlock(&bo->rdev->gem.mutex); 81 radeon_bo_clear_surface_reg(bo); 82 WARN_ON(!list_empty(&bo->va)); 83 drm_gem_object_release(&bo->gem_base); 84 kfree(bo); 85 } 86 87 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) 88 { 89 if (bo->destroy == &radeon_ttm_bo_destroy) 90 return true; 91 return false; 92 } 93 94 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 95 { 96 u32 c = 0, i; 97 98 rbo->placement.placement = rbo->placements; 99 rbo->placement.busy_placement = rbo->placements; 100 if (domain & RADEON_GEM_DOMAIN_VRAM) 101 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 102 TTM_PL_FLAG_UNCACHED | 103 TTM_PL_FLAG_VRAM; 104 105 if (domain & RADEON_GEM_DOMAIN_GTT) { 106 if (rbo->flags & RADEON_GEM_GTT_UC) { 107 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 108 TTM_PL_FLAG_TT; 109 110 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 111 (rbo->rdev->flags & RADEON_IS_AGP)) { 112 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 113 TTM_PL_FLAG_UNCACHED | 114 TTM_PL_FLAG_TT; 115 } else { 116 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 117 TTM_PL_FLAG_TT; 118 } 119 } 120 121 if (domain & RADEON_GEM_DOMAIN_CPU) { 122 if (rbo->flags & RADEON_GEM_GTT_UC) { 123 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 124 TTM_PL_FLAG_SYSTEM; 125 126 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 127 rbo->rdev->flags & RADEON_IS_AGP) { 128 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 129 TTM_PL_FLAG_UNCACHED | 130 TTM_PL_FLAG_SYSTEM; 131 } else { 132 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 133 TTM_PL_FLAG_SYSTEM; 134 } 135 } 136 if (!c) 137 rbo->placements[c++].flags = TTM_PL_MASK_CACHING | 138 TTM_PL_FLAG_SYSTEM; 139 140 rbo->placement.num_placement = c; 141 rbo->placement.num_busy_placement = c; 142 143 for (i = 0; i < c; ++i) { 144 rbo->placements[i].fpfn = 0; 145 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && 146 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) 147 rbo->placements[i].lpfn = 148 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 149 else 150 rbo->placements[i].lpfn = 0; 151 } 152 153 /* 154 * Use two-ended allocation depending on the buffer size to 155 * improve fragmentation quality. 156 * 512kb was measured as the most optimal number. 157 */ 158 if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) && 159 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) && 160 rbo->tbo.mem.size > 512 * 1024) { 161 for (i = 0; i < c; i++) { 162 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; 163 } 164 } 165 } 166 167 int radeon_bo_create(struct radeon_device *rdev, 168 unsigned long size, int byte_align, bool kernel, u32 domain, 169 u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) 170 { 171 struct radeon_bo *bo; 172 enum ttm_bo_type type; 173 unsigned long page_align = roundup2(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 174 size_t acc_size; 175 int r; 176 177 size = ALIGN(size, PAGE_SIZE); 178 179 if (kernel) { 180 type = ttm_bo_type_kernel; 181 } else if (sg) { 182 type = ttm_bo_type_sg; 183 } else { 184 type = ttm_bo_type_device; 185 } 186 *bo_ptr = NULL; 187 188 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, 189 sizeof(struct radeon_bo)); 190 191 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 192 if (bo == NULL) 193 return -ENOMEM; 194 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); 195 if (unlikely(r)) { 196 kfree(bo); 197 return r; 198 } 199 bo->rdev = rdev; 200 bo->surface_reg = -1; 201 INIT_LIST_HEAD(&bo->list); 202 INIT_LIST_HEAD(&bo->va); 203 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | 204 RADEON_GEM_DOMAIN_GTT | 205 RADEON_GEM_DOMAIN_CPU); 206 207 bo->flags = flags; 208 /* PCI GART is always snooped */ 209 if (!(rdev->flags & RADEON_IS_PCIE)) 210 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 211 212 #ifdef CONFIG_X86_32 213 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 214 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 215 */ 216 bo->flags &= ~RADEON_GEM_GTT_WC; 217 #endif 218 219 radeon_ttm_placement_from_domain(bo, domain); 220 /* Kernel allocation are uninterruptible */ 221 lockmgr(&rdev->pm.mclk_lock, LK_SHARED); 222 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 223 &bo->placement, page_align, !kernel, NULL, 224 acc_size, sg, &radeon_ttm_bo_destroy); 225 lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); 226 if (unlikely(r != 0)) { 227 return r; 228 } 229 *bo_ptr = bo; 230 231 #ifdef TRACE_TODO 232 trace_radeon_bo_create(bo); 233 #endif 234 235 return 0; 236 } 237 238 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) 239 { 240 bool is_iomem; 241 int r; 242 243 if (bo->kptr) { 244 if (ptr) { 245 *ptr = bo->kptr; 246 } 247 return 0; 248 } 249 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 250 if (r) { 251 return r; 252 } 253 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 254 if (ptr) { 255 *ptr = bo->kptr; 256 } 257 radeon_bo_check_tiling(bo, 0, 0); 258 return 0; 259 } 260 261 void radeon_bo_kunmap(struct radeon_bo *bo) 262 { 263 if (bo->kptr == NULL) 264 return; 265 bo->kptr = NULL; 266 radeon_bo_check_tiling(bo, 0, 0); 267 ttm_bo_kunmap(&bo->kmap); 268 } 269 270 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) 271 { 272 if (bo == NULL) 273 return NULL; 274 275 ttm_bo_reference(&bo->tbo); 276 return bo; 277 } 278 279 void radeon_bo_unref(struct radeon_bo **bo) 280 { 281 struct ttm_buffer_object *tbo; 282 struct radeon_device *rdev; 283 struct radeon_bo *rbo; 284 285 if ((rbo = *bo) == NULL) 286 return; 287 *bo = NULL; 288 rdev = rbo->rdev; 289 tbo = &rbo->tbo; 290 ttm_bo_unref(&tbo); 291 } 292 293 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 294 u64 *gpu_addr) 295 { 296 int r, i; 297 298 if (bo->pin_count) { 299 bo->pin_count++; 300 if (gpu_addr) 301 *gpu_addr = radeon_bo_gpu_offset(bo); 302 303 if (max_offset != 0) { 304 u64 domain_start; 305 306 if (domain == RADEON_GEM_DOMAIN_VRAM) 307 domain_start = bo->rdev->mc.vram_start; 308 else 309 domain_start = bo->rdev->mc.gtt_start; 310 if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) { 311 DRM_ERROR("radeon_bo_pin_restricted: " 312 "max_offset(%ju) < " 313 "(radeon_bo_gpu_offset(%ju) - " 314 "domain_start(%ju)", 315 (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo), 316 (uintmax_t)domain_start); 317 } 318 } 319 320 return 0; 321 } 322 radeon_ttm_placement_from_domain(bo, domain); 323 for (i = 0; i < bo->placement.num_placement; i++) { 324 /* force to pin into visible video ram */ 325 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && 326 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && 327 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) 328 bo->placements[i].lpfn = 329 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 330 else 331 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; 332 333 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 334 } 335 336 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 337 if (likely(r == 0)) { 338 bo->pin_count = 1; 339 if (gpu_addr != NULL) 340 *gpu_addr = radeon_bo_gpu_offset(bo); 341 if (domain == RADEON_GEM_DOMAIN_VRAM) 342 bo->rdev->vram_pin_size += radeon_bo_size(bo); 343 else 344 bo->rdev->gart_pin_size += radeon_bo_size(bo); 345 } else { 346 dev_err(bo->rdev->dev, "%p pin failed\n", bo); 347 } 348 return r; 349 } 350 351 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) 352 { 353 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); 354 } 355 356 int radeon_bo_unpin(struct radeon_bo *bo) 357 { 358 int r, i; 359 360 if (!bo->pin_count) { 361 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); 362 return 0; 363 } 364 bo->pin_count--; 365 if (bo->pin_count) 366 return 0; 367 for (i = 0; i < bo->placement.num_placement; i++) { 368 bo->placements[i].lpfn = 0; 369 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 370 } 371 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 372 if (likely(r == 0)) { 373 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 374 bo->rdev->vram_pin_size -= radeon_bo_size(bo); 375 else 376 bo->rdev->gart_pin_size -= radeon_bo_size(bo); 377 } else { 378 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 379 } 380 return r; 381 } 382 383 int radeon_bo_evict_vram(struct radeon_device *rdev) 384 { 385 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 386 if (0 && (rdev->flags & RADEON_IS_IGP)) { 387 if (rdev->mc.igp_sideport_enabled == false) 388 /* Useless to evict on IGP chips */ 389 return 0; 390 } 391 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 392 } 393 394 void radeon_bo_force_delete(struct radeon_device *rdev) 395 { 396 struct radeon_bo *bo, *n; 397 398 if (list_empty(&rdev->gem.objects)) { 399 return; 400 } 401 dev_err(rdev->dev, "Userspace still has active objects !\n"); 402 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 403 dev_err(rdev->dev, "%p %p %lu %lu force free\n", 404 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 405 *((unsigned long *)&bo->gem_base.refcount)); 406 mutex_lock(&bo->rdev->gem.mutex); 407 list_del_init(&bo->list); 408 mutex_unlock(&bo->rdev->gem.mutex); 409 /* this should unref the ttm bo */ 410 drm_gem_object_unreference(&bo->gem_base); 411 } 412 } 413 414 int radeon_bo_init(struct radeon_device *rdev) 415 { 416 /* Add an MTRR for the VRAM */ 417 if (!rdev->fastfb_working) { 418 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, 419 rdev->mc.aper_size); 420 } 421 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 422 rdev->mc.mc_vram_size >> 20, 423 (unsigned long long)rdev->mc.aper_size >> 20); 424 DRM_INFO("RAM width %dbits %cDR\n", 425 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 426 return radeon_ttm_init(rdev); 427 } 428 429 void radeon_bo_fini(struct radeon_device *rdev) 430 { 431 radeon_ttm_fini(rdev); 432 arch_phys_wc_del(rdev->mc.vram_mtrr); 433 } 434 435 /* Returns how many bytes TTM can move per IB. 436 */ 437 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) 438 { 439 u64 real_vram_size = rdev->mc.real_vram_size; 440 u64 vram_usage = atomic64_read(&rdev->vram_usage); 441 442 /* This function is based on the current VRAM usage. 443 * 444 * - If all of VRAM is free, allow relocating the number of bytes that 445 * is equal to 1/4 of the size of VRAM for this IB. 446 447 * - If more than one half of VRAM is occupied, only allow relocating 448 * 1 MB of data for this IB. 449 * 450 * - From 0 to one half of used VRAM, the threshold decreases 451 * linearly. 452 * __________________ 453 * 1/4 of -|\ | 454 * VRAM | \ | 455 * | \ | 456 * | \ | 457 * | \ | 458 * | \ | 459 * | \ | 460 * | \________|1 MB 461 * |----------------| 462 * VRAM 0 % 100 % 463 * used used 464 * 465 * Note: It's a threshold, not a limit. The threshold must be crossed 466 * for buffer relocations to stop, so any buffer of an arbitrary size 467 * can be moved as long as the threshold isn't crossed before 468 * the relocation takes place. We don't want to disable buffer 469 * relocations completely. 470 * 471 * The idea is that buffers should be placed in VRAM at creation time 472 * and TTM should only do a minimum number of relocations during 473 * command submission. In practice, you need to submit at least 474 * a dozen IBs to move all buffers to VRAM if they are in GTT. 475 * 476 * Also, things can get pretty crazy under memory pressure and actual 477 * VRAM usage can change a lot, so playing safe even at 50% does 478 * consistently increase performance. 479 */ 480 481 u64 half_vram = real_vram_size >> 1; 482 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; 483 u64 bytes_moved_threshold = half_free_vram >> 1; 484 return max(bytes_moved_threshold, 1024*1024ull); 485 } 486 487 int radeon_bo_list_validate(struct radeon_device *rdev, 488 struct ww_acquire_ctx *ticket, 489 struct list_head *head, int ring) 490 { 491 struct radeon_cs_reloc *lobj; 492 struct radeon_bo *bo; 493 int r; 494 u64 bytes_moved = 0, initial_bytes_moved; 495 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 496 497 r = ttm_eu_reserve_buffers(ticket, head); 498 if (unlikely(r != 0)) { 499 return r; 500 } 501 502 list_for_each_entry(lobj, head, tv.head) { 503 bo = lobj->robj; 504 if (!bo->pin_count) { 505 u32 domain = lobj->prefered_domains; 506 u32 allowed = lobj->allowed_domains; 507 u32 current_domain = 508 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 509 510 /* Check if this buffer will be moved and don't move it 511 * if we have moved too many buffers for this IB already. 512 * 513 * Note that this allows moving at least one buffer of 514 * any size, because it doesn't take the current "bo" 515 * into account. We don't want to disallow buffer moves 516 * completely. 517 */ 518 if ((allowed & current_domain) != 0 && 519 (domain & current_domain) == 0 && /* will be moved */ 520 bytes_moved > bytes_moved_threshold) { 521 /* don't move it */ 522 domain = current_domain; 523 } 524 525 retry: 526 radeon_ttm_placement_from_domain(bo, domain); 527 if (ring == R600_RING_TYPE_UVD_INDEX) 528 radeon_uvd_force_into_uvd_segment(bo, allowed); 529 530 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); 531 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 532 bytes_moved += atomic64_read(&rdev->num_bytes_moved) - 533 initial_bytes_moved; 534 535 if (unlikely(r)) { 536 if (r != -ERESTARTSYS && 537 domain != lobj->allowed_domains) { 538 domain = lobj->allowed_domains; 539 goto retry; 540 } 541 ttm_eu_backoff_reservation(ticket, head); 542 return r; 543 } 544 } 545 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 546 lobj->tiling_flags = bo->tiling_flags; 547 } 548 return 0; 549 } 550 551 int radeon_bo_get_surface_reg(struct radeon_bo *bo) 552 { 553 struct radeon_device *rdev = bo->rdev; 554 struct radeon_surface_reg *reg; 555 struct radeon_bo *old_object; 556 int steal; 557 int i; 558 559 if (!bo->tiling_flags) 560 return 0; 561 562 if (bo->surface_reg >= 0) { 563 reg = &rdev->surface_regs[bo->surface_reg]; 564 i = bo->surface_reg; 565 goto out; 566 } 567 568 steal = -1; 569 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 570 571 reg = &rdev->surface_regs[i]; 572 if (!reg->bo) 573 break; 574 575 old_object = reg->bo; 576 if (old_object->pin_count == 0) 577 steal = i; 578 } 579 580 /* if we are all out */ 581 if (i == RADEON_GEM_MAX_SURFACES) { 582 if (steal == -1) 583 return -ENOMEM; 584 /* find someone with a surface reg and nuke their BO */ 585 reg = &rdev->surface_regs[steal]; 586 old_object = reg->bo; 587 /* blow away the mapping */ 588 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 589 ttm_bo_unmap_virtual(&old_object->tbo); 590 old_object->surface_reg = -1; 591 i = steal; 592 } 593 594 bo->surface_reg = i; 595 reg->bo = bo; 596 597 out: 598 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 599 bo->tbo.mem.start << PAGE_SHIFT, 600 bo->tbo.num_pages << PAGE_SHIFT); 601 return 0; 602 } 603 604 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) 605 { 606 struct radeon_device *rdev = bo->rdev; 607 struct radeon_surface_reg *reg; 608 609 if (bo->surface_reg == -1) 610 return; 611 612 reg = &rdev->surface_regs[bo->surface_reg]; 613 radeon_clear_surface_reg(rdev, bo->surface_reg); 614 615 reg->bo = NULL; 616 bo->surface_reg = -1; 617 } 618 619 int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 620 uint32_t tiling_flags, uint32_t pitch) 621 { 622 struct radeon_device *rdev = bo->rdev; 623 int r; 624 625 if (rdev->family >= CHIP_CEDAR) { 626 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; 627 628 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 629 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 630 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 631 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 632 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 633 switch (bankw) { 634 case 0: 635 case 1: 636 case 2: 637 case 4: 638 case 8: 639 break; 640 default: 641 return -EINVAL; 642 } 643 switch (bankh) { 644 case 0: 645 case 1: 646 case 2: 647 case 4: 648 case 8: 649 break; 650 default: 651 return -EINVAL; 652 } 653 switch (mtaspect) { 654 case 0: 655 case 1: 656 case 2: 657 case 4: 658 case 8: 659 break; 660 default: 661 return -EINVAL; 662 } 663 if (tilesplit > 6) { 664 return -EINVAL; 665 } 666 if (stilesplit > 6) { 667 return -EINVAL; 668 } 669 } 670 r = radeon_bo_reserve(bo, false); 671 if (unlikely(r != 0)) 672 return r; 673 bo->tiling_flags = tiling_flags; 674 bo->pitch = pitch; 675 radeon_bo_unreserve(bo); 676 return 0; 677 } 678 679 void radeon_bo_get_tiling_flags(struct radeon_bo *bo, 680 uint32_t *tiling_flags, 681 uint32_t *pitch) 682 { 683 if (tiling_flags) 684 *tiling_flags = bo->tiling_flags; 685 if (pitch) 686 *pitch = bo->pitch; 687 } 688 689 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 690 bool force_drop) 691 { 692 693 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 694 return 0; 695 696 if (force_drop) { 697 radeon_bo_clear_surface_reg(bo); 698 return 0; 699 } 700 701 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 702 if (!has_moved) 703 return 0; 704 705 if (bo->surface_reg >= 0) 706 radeon_bo_clear_surface_reg(bo); 707 return 0; 708 } 709 710 if ((bo->surface_reg >= 0) && !has_moved) 711 return 0; 712 713 return radeon_bo_get_surface_reg(bo); 714 } 715 716 void radeon_bo_move_notify(struct ttm_buffer_object *bo, 717 struct ttm_mem_reg *new_mem) 718 { 719 struct radeon_bo *rbo; 720 721 if (!radeon_ttm_bo_is_radeon_bo(bo)) 722 return; 723 724 rbo = container_of(bo, struct radeon_bo, tbo); 725 radeon_bo_check_tiling(rbo, 0, 1); 726 radeon_vm_bo_invalidate(rbo->rdev, rbo); 727 728 /* update statistics */ 729 if (!new_mem) 730 return; 731 732 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); 733 radeon_update_memory_usage(rbo, new_mem->mem_type, 1); 734 } 735 736 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 737 { 738 struct radeon_device *rdev; 739 struct radeon_bo *rbo; 740 unsigned long offset, size; 741 int r; 742 743 if (!radeon_ttm_bo_is_radeon_bo(bo)) 744 return 0; 745 rbo = container_of(bo, struct radeon_bo, tbo); 746 radeon_bo_check_tiling(rbo, 0, 0); 747 rdev = rbo->rdev; 748 if (bo->mem.mem_type != TTM_PL_VRAM) 749 return 0; 750 751 size = bo->mem.num_pages << PAGE_SHIFT; 752 offset = bo->mem.start << PAGE_SHIFT; 753 if ((offset + size) <= rdev->mc.visible_vram_size) 754 return 0; 755 756 /* hurrah the memory is not visible ! */ 757 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 758 rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 759 r = ttm_bo_validate(bo, &rbo->placement, false, false); 760 if (unlikely(r == -ENOMEM)) { 761 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 762 return ttm_bo_validate(bo, &rbo->placement, false, false); 763 } else if (unlikely(r != 0)) { 764 return r; 765 } 766 767 offset = bo->mem.start << PAGE_SHIFT; 768 /* this should never happen */ 769 if ((offset + size) > rdev->mc.visible_vram_size) 770 return -EINVAL; 771 772 return 0; 773 } 774 775 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) 776 { 777 int r; 778 779 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 780 if (unlikely(r != 0)) 781 return r; 782 lockmgr(&bo->tbo.bdev->fence_lock, LK_EXCLUSIVE); 783 if (mem_type) 784 *mem_type = bo->tbo.mem.mem_type; 785 if (bo->tbo.sync_obj) 786 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 787 lockmgr(&bo->tbo.bdev->fence_lock, LK_RELEASE); 788 ttm_bo_unreserve(&bo->tbo); 789 return r; 790 } 791 792 793 /** 794 * radeon_bo_reserve - reserve bo 795 * @bo: bo structure 796 * @no_intr: don't return -ERESTARTSYS on pending signal 797 * 798 * Returns: 799 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 800 * a signal. Release all buffer reservations and return to user-space. 801 */ 802 int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) 803 { 804 int r; 805 806 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 807 if (unlikely(r != 0)) { 808 if (r != -ERESTARTSYS) 809 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 810 return r; 811 } 812 return 0; 813 } 814