1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/reservation.h> 44 45 #define TTM_ASSERT_LOCKED(param) 46 #define TTM_DEBUG(fmt, arg...) 47 #define TTM_BO_HASH_ORDER 13 48 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int pos; 61 62 pos = ffs(place->flags & TTM_PL_MASK_MEM); 63 if (unlikely(!pos)) 64 return -EINVAL; 65 66 *mem_type = pos - 1; 67 return 0; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 struct drm_printer p = drm_debug_printer(TTM_PFX); 74 75 pr_err(" has_type: %d\n", man->has_type); 76 pr_err(" use_type: %d\n", man->use_type); 77 pr_err(" flags: 0x%08X\n", man->flags); 78 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); 79 pr_err(" size: %ju\n", man->size); 80 pr_err(" available_caching: 0x%08X\n", man->available_caching); 81 pr_err(" default_caching: 0x%08X\n", man->default_caching); 82 if (mem_type != TTM_PL_SYSTEM) 83 (*man->func->debug)(man, &p); 84 } 85 86 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 87 struct ttm_placement *placement) 88 { 89 int i, ret, mem_type; 90 91 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 92 bo, bo->mem.num_pages, bo->mem.size >> 10, 93 bo->mem.size >> 20); 94 for (i = 0; i < placement->num_placement; i++) { 95 ret = ttm_mem_type_from_place(&placement->placement[i], 96 &mem_type); 97 if (ret) 98 return; 99 pr_err(" placement[%d]=0x%08X (%d)\n", 100 i, placement->placement[i].flags, mem_type); 101 ttm_mem_type_debug(bo->bdev, mem_type); 102 } 103 } 104 105 static ssize_t ttm_bo_global_show(struct kobject *kobj, 106 struct attribute *attr, 107 char *buffer) 108 { 109 struct ttm_bo_global *glob = 110 container_of(kobj, struct ttm_bo_global, kobj); 111 112 return snprintf(buffer, PAGE_SIZE, "%d\n", 113 atomic_read(&glob->bo_count)); 114 } 115 116 static struct attribute *ttm_bo_global_attrs[] = { 117 &ttm_bo_count, 118 NULL 119 }; 120 121 static const struct sysfs_ops ttm_bo_global_ops = { 122 .show = &ttm_bo_global_show 123 }; 124 125 static struct kobj_type ttm_bo_glob_kobj_type = { 126 .release = &ttm_bo_global_kobj_release, 127 .sysfs_ops = &ttm_bo_global_ops, 128 .default_attrs = ttm_bo_global_attrs 129 }; 130 131 132 static inline uint32_t ttm_bo_type_flags(unsigned type) 133 { 134 return 1 << (type); 135 } 136 137 static void ttm_bo_release_list(struct kref *list_kref) 138 { 139 struct ttm_buffer_object *bo = 140 container_of(list_kref, struct ttm_buffer_object, list_kref); 141 struct ttm_bo_device *bdev = bo->bdev; 142 size_t acc_size = bo->acc_size; 143 144 BUG_ON(kref_read(&bo->list_kref)); 145 BUG_ON(kref_read(&bo->kref)); 146 BUG_ON(atomic_read(&bo->cpu_writers)); 147 BUG_ON(bo->mem.mm_node != NULL); 148 BUG_ON(!list_empty(&bo->lru)); 149 BUG_ON(!list_empty(&bo->ddestroy)); 150 ttm_tt_destroy(bo->ttm); 151 atomic_dec(&bo->glob->bo_count); 152 dma_fence_put(bo->moving); 153 reservation_object_fini(&bo->ttm_resv); 154 mutex_destroy(&bo->wu_mutex); 155 if (bo->destroy) 156 bo->destroy(bo); 157 else { 158 kfree(bo); 159 } 160 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 161 } 162 163 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 164 { 165 struct ttm_bo_device *bdev = bo->bdev; 166 struct ttm_mem_type_manager *man; 167 168 lockdep_assert_held(&bo->resv->lock.base); 169 170 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 171 172 #ifdef __DragonFly__ 173 if (WARN_ON(!list_empty(&bo->lru))) 174 return; 175 #endif 176 177 man = &bdev->man[bo->mem.mem_type]; 178 list_add_tail(&bo->lru, &man->lru[bo->priority]); 179 kref_get(&bo->list_kref); 180 181 if (bo->ttm && !(bo->ttm->page_flags & 182 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { 183 list_add_tail(&bo->swap, 184 &bo->glob->swap_lru[bo->priority]); 185 kref_get(&bo->list_kref); 186 } 187 } 188 } 189 EXPORT_SYMBOL(ttm_bo_add_to_lru); 190 191 static void ttm_bo_ref_bug(struct kref *list_kref) 192 { 193 BUG(); 194 } 195 196 void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 197 { 198 if (!list_empty(&bo->swap)) { 199 list_del_init(&bo->swap); 200 kref_put(&bo->list_kref, ttm_bo_ref_bug); 201 } 202 if (!list_empty(&bo->lru)) { 203 list_del_init(&bo->lru); 204 kref_put(&bo->list_kref, ttm_bo_ref_bug); 205 } 206 207 /* 208 * TODO: Add a driver hook to delete from 209 * driver-specific LRU's here. 210 */ 211 } 212 213 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 214 { 215 lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE); 216 ttm_bo_del_from_lru(bo); 217 lockmgr(&bo->glob->lru_lock, LK_RELEASE); 218 } 219 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 220 221 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 222 { 223 lockdep_assert_held(&bo->resv->lock.base); 224 225 ttm_bo_del_from_lru(bo); 226 ttm_bo_add_to_lru(bo); 227 } 228 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 229 230 /* 231 * Call bo->mutex locked. 232 */ 233 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 234 { 235 struct ttm_bo_device *bdev = bo->bdev; 236 struct ttm_bo_global *glob = bo->glob; 237 int ret = 0; 238 uint32_t page_flags = 0; 239 240 TTM_ASSERT_LOCKED(&bo->mutex); 241 bo->ttm = NULL; 242 243 if (bdev->need_dma32) 244 page_flags |= TTM_PAGE_FLAG_DMA32; 245 246 switch (bo->type) { 247 case ttm_bo_type_device: 248 if (zero_alloc) 249 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 250 case ttm_bo_type_kernel: 251 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 252 page_flags, glob->dummy_read_page); 253 if (unlikely(bo->ttm == NULL)) 254 ret = -ENOMEM; 255 break; 256 case ttm_bo_type_sg: 257 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 258 page_flags | TTM_PAGE_FLAG_SG, 259 glob->dummy_read_page); 260 if (unlikely(bo->ttm == NULL)) { 261 ret = -ENOMEM; 262 break; 263 } 264 bo->ttm->sg = bo->sg; 265 break; 266 default: 267 pr_err("Illegal buffer object type\n"); 268 ret = -EINVAL; 269 break; 270 } 271 272 return ret; 273 } 274 275 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 276 struct ttm_mem_reg *mem, 277 bool evict, bool interruptible, 278 bool no_wait_gpu) 279 { 280 struct ttm_bo_device *bdev = bo->bdev; 281 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 282 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 283 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 284 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 285 int ret = 0; 286 287 if (old_is_pci || new_is_pci || 288 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 289 ret = ttm_mem_io_lock(old_man, true); 290 if (unlikely(ret != 0)) 291 goto out_err; 292 ttm_bo_unmap_virtual_locked(bo); 293 ttm_mem_io_unlock(old_man); 294 } 295 296 /* 297 * Create and bind a ttm if required. 298 */ 299 300 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 301 if (bo->ttm == NULL) { 302 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 303 ret = ttm_bo_add_ttm(bo, zero); 304 if (ret) 305 goto out_err; 306 } 307 308 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 309 if (ret) 310 goto out_err; 311 312 if (mem->mem_type != TTM_PL_SYSTEM) { 313 ret = ttm_tt_bind(bo->ttm, mem); 314 if (ret) 315 goto out_err; 316 } 317 318 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 319 if (bdev->driver->move_notify) 320 bdev->driver->move_notify(bo, evict, mem); 321 bo->mem = *mem; 322 mem->mm_node = NULL; 323 goto moved; 324 } 325 } 326 327 if (bdev->driver->move_notify) 328 bdev->driver->move_notify(bo, evict, mem); 329 330 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 331 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 332 ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem); 333 else if (bdev->driver->move) 334 ret = bdev->driver->move(bo, evict, interruptible, 335 no_wait_gpu, mem); 336 else 337 ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem); 338 339 if (ret) { 340 if (bdev->driver->move_notify) { 341 struct ttm_mem_reg tmp_mem = *mem; 342 *mem = bo->mem; 343 bo->mem = tmp_mem; 344 bdev->driver->move_notify(bo, false, mem); 345 bo->mem = *mem; 346 *mem = tmp_mem; 347 } 348 349 goto out_err; 350 } 351 352 moved: 353 if (bo->evicted) { 354 if (bdev->driver->invalidate_caches) { 355 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 356 if (ret) 357 pr_err("Can not flush read caches\n"); 358 } 359 bo->evicted = false; 360 } 361 362 if (bo->mem.mm_node) { 363 bo->offset = (bo->mem.start << PAGE_SHIFT) + 364 bdev->man[bo->mem.mem_type].gpu_offset; 365 bo->cur_placement = bo->mem.placement; 366 } else 367 bo->offset = 0; 368 369 return 0; 370 371 out_err: 372 new_man = &bdev->man[bo->mem.mem_type]; 373 if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { 374 ttm_tt_destroy(bo->ttm); 375 bo->ttm = NULL; 376 } 377 378 return ret; 379 } 380 381 /** 382 * Call bo::reserved. 383 * Will release GPU memory type usage on destruction. 384 * This is the place to put in driver specific hooks to release 385 * driver private resources. 386 * Will release the bo::reserved lock. 387 */ 388 389 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 390 { 391 if (bo->bdev->driver->move_notify) 392 bo->bdev->driver->move_notify(bo, false, NULL); 393 394 ttm_tt_destroy(bo->ttm); 395 bo->ttm = NULL; 396 ttm_bo_mem_put(bo, &bo->mem); 397 398 ww_mutex_unlock (&bo->resv->lock); 399 } 400 401 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) 402 { 403 int r; 404 405 if (bo->resv == &bo->ttm_resv) 406 return 0; 407 408 BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); 409 410 r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); 411 if (r) 412 reservation_object_unlock(&bo->ttm_resv); 413 414 return r; 415 } 416 417 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 418 { 419 struct reservation_object_list *fobj; 420 struct dma_fence *fence; 421 int i; 422 423 fobj = reservation_object_get_list(&bo->ttm_resv); 424 fence = reservation_object_get_excl(&bo->ttm_resv); 425 if (fence && !fence->ops->signaled) 426 dma_fence_enable_sw_signaling(fence); 427 428 for (i = 0; fobj && i < fobj->shared_count; ++i) { 429 fence = rcu_dereference_protected(fobj->shared[i], 430 reservation_object_held(bo->resv)); 431 432 if (!fence->ops->signaled) 433 dma_fence_enable_sw_signaling(fence); 434 } 435 } 436 437 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 438 { 439 struct ttm_bo_device *bdev = bo->bdev; 440 struct ttm_bo_global *glob = bo->glob; 441 int ret; 442 443 ret = ttm_bo_individualize_resv(bo); 444 if (ret) { 445 /* Last resort, if we fail to allocate memory for the 446 * fences block for the BO to become idle 447 */ 448 reservation_object_wait_timeout_rcu(bo->resv, true, false, 449 30 * HZ); 450 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 451 goto error; 452 } 453 454 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 455 ret = __ttm_bo_reserve(bo, false, true, NULL); 456 if (!ret) { 457 if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { 458 ttm_bo_del_from_lru(bo); 459 lockmgr(&glob->lru_lock, LK_RELEASE); 460 if (bo->resv != &bo->ttm_resv) 461 reservation_object_unlock(&bo->ttm_resv); 462 463 ttm_bo_cleanup_memtype_use(bo); 464 return; 465 } 466 467 ttm_bo_flush_all_fences(bo); 468 469 /* 470 * Make NO_EVICT bos immediately available to 471 * shrinkers, now that they are queued for 472 * destruction. 473 */ 474 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 475 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 476 ttm_bo_add_to_lru(bo); 477 } 478 479 __ttm_bo_unreserve(bo); 480 } 481 if (bo->resv != &bo->ttm_resv) 482 reservation_object_unlock(&bo->ttm_resv); 483 484 error: 485 kref_get(&bo->list_kref); 486 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 487 lockmgr(&glob->lru_lock, LK_RELEASE); 488 489 schedule_delayed_work(&bdev->wq, 490 ((HZ / 100) < 1) ? 1 : HZ / 100); 491 } 492 493 /** 494 * function ttm_bo_cleanup_refs_and_unlock 495 * If bo idle, remove from delayed- and lru lists, and unref. 496 * If not idle, do nothing. 497 * 498 * Must be called with lru_lock and reservation held, this function 499 * will drop both before returning. 500 * 501 * @interruptible Any sleeps should occur interruptibly. 502 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 503 */ 504 505 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 506 bool interruptible, 507 bool no_wait_gpu) 508 { 509 struct ttm_bo_global *glob = bo->glob; 510 struct reservation_object *resv; 511 int ret; 512 513 if (unlikely(list_empty(&bo->ddestroy))) 514 resv = bo->resv; 515 else 516 resv = &bo->ttm_resv; 517 518 if (reservation_object_test_signaled_rcu(resv, true)) 519 ret = 0; 520 else 521 ret = -EBUSY; 522 523 if (ret && !no_wait_gpu) { 524 long lret; 525 ww_mutex_unlock(&bo->resv->lock); 526 lockmgr(&glob->lru_lock, LK_RELEASE); 527 528 lret = reservation_object_wait_timeout_rcu(resv, true, 529 interruptible, 530 30 * HZ); 531 532 if (lret < 0) 533 return lret; 534 else if (lret == 0) 535 return -EBUSY; 536 537 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 538 ret = __ttm_bo_reserve(bo, false, true, NULL); 539 540 /* 541 * We raced, and lost, someone else holds the reservation now, 542 * and is probably busy in ttm_bo_cleanup_memtype_use. 543 * 544 * Even if it's not the case, because we finished waiting any 545 * delayed destruction would succeed, so just return success 546 * here. 547 */ 548 if (ret) { 549 lockmgr(&glob->lru_lock, LK_RELEASE); 550 return 0; 551 } 552 } 553 554 if (ret || unlikely(list_empty(&bo->ddestroy))) { 555 __ttm_bo_unreserve(bo); 556 lockmgr(&glob->lru_lock, LK_RELEASE); 557 return ret; 558 } 559 560 ttm_bo_del_from_lru(bo); 561 list_del_init(&bo->ddestroy); 562 kref_put(&bo->list_kref, ttm_bo_ref_bug); 563 564 lockmgr(&glob->lru_lock, LK_RELEASE); 565 ttm_bo_cleanup_memtype_use(bo); 566 567 return 0; 568 } 569 570 /** 571 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 572 * encountered buffers. 573 */ 574 575 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 576 { 577 struct ttm_bo_global *glob = bdev->glob; 578 struct ttm_buffer_object *entry = NULL; 579 int ret = 0; 580 581 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 582 if (list_empty(&bdev->ddestroy)) 583 goto out_unlock; 584 585 entry = list_first_entry(&bdev->ddestroy, 586 struct ttm_buffer_object, ddestroy); 587 kref_get(&entry->list_kref); 588 589 for (;;) { 590 struct ttm_buffer_object *nentry = NULL; 591 592 if (entry->ddestroy.next != &bdev->ddestroy) { 593 nentry = list_first_entry(&entry->ddestroy, 594 struct ttm_buffer_object, ddestroy); 595 kref_get(&nentry->list_kref); 596 } 597 598 ret = __ttm_bo_reserve(entry, false, true, NULL); 599 if (remove_all && ret) { 600 lockmgr(&glob->lru_lock, LK_RELEASE); 601 ret = __ttm_bo_reserve(entry, false, false, NULL); 602 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 603 } 604 605 if (!ret) 606 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 607 !remove_all); 608 else 609 lockmgr(&glob->lru_lock, LK_RELEASE); 610 611 kref_put(&entry->list_kref, ttm_bo_release_list); 612 entry = nentry; 613 614 if (ret || !entry) 615 goto out; 616 617 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 618 if (list_empty(&entry->ddestroy)) 619 break; 620 } 621 622 out_unlock: 623 lockmgr(&glob->lru_lock, LK_RELEASE); 624 out: 625 if (entry) 626 kref_put(&entry->list_kref, ttm_bo_release_list); 627 return ret; 628 } 629 630 static void ttm_bo_delayed_workqueue(struct work_struct *work) 631 { 632 struct ttm_bo_device *bdev = 633 container_of(work, struct ttm_bo_device, wq.work); 634 635 if (ttm_bo_delayed_delete(bdev, false)) { 636 schedule_delayed_work(&bdev->wq, 637 ((HZ / 100) < 1) ? 1 : HZ / 100); 638 } 639 } 640 641 static void ttm_bo_release(struct kref *kref) 642 { 643 struct ttm_buffer_object *bo = 644 container_of(kref, struct ttm_buffer_object, kref); 645 struct ttm_bo_device *bdev = bo->bdev; 646 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 647 648 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 649 ttm_mem_io_lock(man, false); 650 ttm_mem_io_free_vm(bo); 651 ttm_mem_io_unlock(man); 652 ttm_bo_cleanup_refs_or_queue(bo); 653 kref_put(&bo->list_kref, ttm_bo_release_list); 654 } 655 656 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 657 { 658 struct ttm_buffer_object *bo = *p_bo; 659 660 *p_bo = NULL; 661 kref_put(&bo->kref, ttm_bo_release); 662 } 663 EXPORT_SYMBOL(ttm_bo_unref); 664 665 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 666 { 667 return cancel_delayed_work_sync(&bdev->wq); 668 } 669 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 670 671 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 672 { 673 if (resched) 674 schedule_delayed_work(&bdev->wq, 675 ((HZ / 100) < 1) ? 1 : HZ / 100); 676 } 677 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 678 679 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 680 bool no_wait_gpu) 681 { 682 struct ttm_bo_device *bdev = bo->bdev; 683 struct ttm_mem_reg evict_mem; 684 struct ttm_placement placement; 685 int ret = 0; 686 687 lockdep_assert_held(&bo->resv->lock.base); 688 689 evict_mem = bo->mem; 690 evict_mem.mm_node = NULL; 691 evict_mem.bus.io_reserved_vm = false; 692 evict_mem.bus.io_reserved_count = 0; 693 694 placement.num_placement = 0; 695 placement.num_busy_placement = 0; 696 bdev->driver->evict_flags(bo, &placement); 697 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 698 no_wait_gpu); 699 if (ret) { 700 if (ret != -ERESTARTSYS) { 701 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 702 bo); 703 ttm_bo_mem_space_debug(bo, &placement); 704 } 705 goto out; 706 } 707 708 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 709 no_wait_gpu); 710 if (unlikely(ret)) { 711 if (ret != -ERESTARTSYS) 712 pr_err("Buffer eviction failed\n"); 713 ttm_bo_mem_put(bo, &evict_mem); 714 goto out; 715 } 716 bo->evicted = true; 717 out: 718 return ret; 719 } 720 721 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 722 const struct ttm_place *place) 723 { 724 /* Don't evict this BO if it's outside of the 725 * requested placement range 726 */ 727 if (place->fpfn >= (bo->mem.start + bo->mem.size) || 728 (place->lpfn && place->lpfn <= bo->mem.start)) 729 return false; 730 731 return true; 732 } 733 EXPORT_SYMBOL(ttm_bo_eviction_valuable); 734 735 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 736 uint32_t mem_type, 737 const struct ttm_place *place, 738 bool interruptible, 739 bool no_wait_gpu) 740 { 741 struct ttm_bo_global *glob = bdev->glob; 742 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 743 struct ttm_buffer_object *bo; 744 int ret = -EBUSY; 745 unsigned i; 746 747 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 748 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 749 list_for_each_entry(bo, &man->lru[i], lru) { 750 ret = __ttm_bo_reserve(bo, false, true, NULL); 751 if (ret) 752 continue; 753 754 if (place && !bdev->driver->eviction_valuable(bo, 755 place)) { 756 __ttm_bo_unreserve(bo); 757 ret = -EBUSY; 758 continue; 759 } 760 761 break; 762 } 763 764 if (!ret) 765 break; 766 } 767 768 if (ret) { 769 lockmgr(&glob->lru_lock, LK_RELEASE); 770 return ret; 771 } 772 773 kref_get(&bo->list_kref); 774 775 if (!list_empty(&bo->ddestroy)) { 776 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 777 no_wait_gpu); 778 kref_put(&bo->list_kref, ttm_bo_release_list); 779 return ret; 780 } 781 782 ttm_bo_del_from_lru(bo); 783 lockmgr(&glob->lru_lock, LK_RELEASE); 784 785 BUG_ON(ret != 0); 786 787 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 788 ttm_bo_unreserve(bo); 789 790 kref_put(&bo->list_kref, ttm_bo_release_list); 791 return ret; 792 } 793 794 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 795 { 796 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 797 798 if (mem->mm_node) 799 (*man->func->put_node)(man, mem); 800 } 801 EXPORT_SYMBOL(ttm_bo_mem_put); 802 803 /** 804 * Add the last move fence to the BO and reserve a new shared slot. 805 */ 806 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 807 struct ttm_mem_type_manager *man, 808 struct ttm_mem_reg *mem) 809 { 810 struct dma_fence *fence; 811 int ret; 812 813 lockmgr(&man->move_lock, LK_EXCLUSIVE); 814 fence = dma_fence_get(man->move); 815 lockmgr(&man->move_lock, LK_RELEASE); 816 817 if (fence) { 818 reservation_object_add_shared_fence(bo->resv, fence); 819 820 ret = reservation_object_reserve_shared(bo->resv); 821 if (unlikely(ret)) 822 return ret; 823 824 dma_fence_put(bo->moving); 825 bo->moving = fence; 826 } 827 828 return 0; 829 } 830 831 /** 832 * Repeatedly evict memory from the LRU for @mem_type until we create enough 833 * space, or we've evicted everything and there isn't enough space. 834 */ 835 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 836 uint32_t mem_type, 837 const struct ttm_place *place, 838 struct ttm_mem_reg *mem, 839 bool interruptible, 840 bool no_wait_gpu) 841 { 842 struct ttm_bo_device *bdev = bo->bdev; 843 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 844 int ret; 845 846 do { 847 ret = (*man->func->get_node)(man, bo, place, mem); 848 if (unlikely(ret != 0)) 849 return ret; 850 if (mem->mm_node) 851 break; 852 ret = ttm_mem_evict_first(bdev, mem_type, place, 853 interruptible, no_wait_gpu); 854 if (unlikely(ret != 0)) 855 return ret; 856 } while (1); 857 mem->mem_type = mem_type; 858 return ttm_bo_add_move_fence(bo, man, mem); 859 } 860 861 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 862 uint32_t cur_placement, 863 uint32_t proposed_placement) 864 { 865 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 866 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 867 868 /** 869 * Keep current caching if possible. 870 */ 871 872 if ((cur_placement & caching) != 0) 873 result |= (cur_placement & caching); 874 else if ((man->default_caching & caching) != 0) 875 result |= man->default_caching; 876 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 877 result |= TTM_PL_FLAG_CACHED; 878 else if ((TTM_PL_FLAG_WC & caching) != 0) 879 result |= TTM_PL_FLAG_WC; 880 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 881 result |= TTM_PL_FLAG_UNCACHED; 882 883 return result; 884 } 885 886 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 887 uint32_t mem_type, 888 const struct ttm_place *place, 889 uint32_t *masked_placement) 890 { 891 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 892 893 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 894 return false; 895 896 if ((place->flags & man->available_caching) == 0) 897 return false; 898 899 cur_flags |= (place->flags & man->available_caching); 900 901 *masked_placement = cur_flags; 902 return true; 903 } 904 905 /** 906 * Creates space for memory region @mem according to its type. 907 * 908 * This function first searches for free space in compatible memory types in 909 * the priority order defined by the driver. If free space isn't found, then 910 * ttm_bo_mem_force_space is attempted in priority order to evict and find 911 * space. 912 */ 913 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 914 struct ttm_placement *placement, 915 struct ttm_mem_reg *mem, 916 bool interruptible, 917 bool no_wait_gpu) 918 { 919 struct ttm_bo_device *bdev = bo->bdev; 920 struct ttm_mem_type_manager *man; 921 uint32_t mem_type = TTM_PL_SYSTEM; 922 uint32_t cur_flags = 0; 923 bool type_found = false; 924 bool type_ok = false; 925 bool has_erestartsys = false; 926 int i, ret; 927 928 ret = reservation_object_reserve_shared(bo->resv); 929 if (unlikely(ret)) 930 return ret; 931 932 mem->mm_node = NULL; 933 for (i = 0; i < placement->num_placement; ++i) { 934 const struct ttm_place *place = &placement->placement[i]; 935 936 ret = ttm_mem_type_from_place(place, &mem_type); 937 if (ret) 938 return ret; 939 man = &bdev->man[mem_type]; 940 if (!man->has_type || !man->use_type) 941 continue; 942 943 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 944 &cur_flags); 945 946 if (!type_ok) 947 continue; 948 949 type_found = true; 950 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 951 cur_flags); 952 /* 953 * Use the access and other non-mapping-related flag bits from 954 * the memory placement flags to the current flags 955 */ 956 ttm_flag_masked(&cur_flags, place->flags, 957 ~TTM_PL_MASK_MEMTYPE); 958 959 if (mem_type == TTM_PL_SYSTEM) 960 break; 961 962 ret = (*man->func->get_node)(man, bo, place, mem); 963 if (unlikely(ret)) 964 return ret; 965 966 if (mem->mm_node) { 967 ret = ttm_bo_add_move_fence(bo, man, mem); 968 if (unlikely(ret)) { 969 (*man->func->put_node)(man, mem); 970 return ret; 971 } 972 break; 973 } 974 } 975 976 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 977 mem->mem_type = mem_type; 978 mem->placement = cur_flags; 979 return 0; 980 } 981 982 for (i = 0; i < placement->num_busy_placement; ++i) { 983 const struct ttm_place *place = &placement->busy_placement[i]; 984 985 ret = ttm_mem_type_from_place(place, &mem_type); 986 if (ret) 987 return ret; 988 man = &bdev->man[mem_type]; 989 if (!man->has_type || !man->use_type) 990 continue; 991 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 992 continue; 993 994 type_found = true; 995 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 996 cur_flags); 997 /* 998 * Use the access and other non-mapping-related flag bits from 999 * the memory placement flags to the current flags 1000 */ 1001 ttm_flag_masked(&cur_flags, place->flags, 1002 ~TTM_PL_MASK_MEMTYPE); 1003 1004 if (mem_type == TTM_PL_SYSTEM) { 1005 mem->mem_type = mem_type; 1006 mem->placement = cur_flags; 1007 mem->mm_node = NULL; 1008 return 0; 1009 } 1010 1011 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 1012 interruptible, no_wait_gpu); 1013 if (ret == 0 && mem->mm_node) { 1014 mem->placement = cur_flags; 1015 return 0; 1016 } 1017 if (ret == -ERESTARTSYS) 1018 has_erestartsys = true; 1019 } 1020 1021 if (!type_found) { 1022 pr_err(TTM_PFX "No compatible memory type found\n"); 1023 return -EINVAL; 1024 } 1025 1026 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 1027 } 1028 EXPORT_SYMBOL(ttm_bo_mem_space); 1029 1030 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1031 struct ttm_placement *placement, 1032 bool interruptible, 1033 bool no_wait_gpu) 1034 { 1035 int ret = 0; 1036 struct ttm_mem_reg mem; 1037 1038 lockdep_assert_held(&bo->resv->lock.base); 1039 1040 mem.num_pages = bo->num_pages; 1041 mem.size = mem.num_pages << PAGE_SHIFT; 1042 mem.page_alignment = bo->mem.page_alignment; 1043 mem.bus.io_reserved_vm = false; 1044 mem.bus.io_reserved_count = 0; 1045 /* 1046 * Determine where to move the buffer. 1047 */ 1048 ret = ttm_bo_mem_space(bo, placement, &mem, 1049 interruptible, no_wait_gpu); 1050 if (ret) 1051 goto out_unlock; 1052 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1053 interruptible, no_wait_gpu); 1054 out_unlock: 1055 if (ret && mem.mm_node) 1056 ttm_bo_mem_put(bo, &mem); 1057 return ret; 1058 } 1059 1060 static bool ttm_bo_places_compat(const struct ttm_place *places, 1061 unsigned num_placement, 1062 struct ttm_mem_reg *mem, 1063 uint32_t *new_flags) 1064 { 1065 unsigned i; 1066 1067 for (i = 0; i < num_placement; i++) { 1068 const struct ttm_place *heap = &places[i]; 1069 1070 if (mem->mm_node && (mem->start < heap->fpfn || 1071 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1072 continue; 1073 1074 *new_flags = heap->flags; 1075 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1076 (*new_flags & mem->placement & TTM_PL_MASK_MEM) && 1077 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || 1078 (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) 1079 return true; 1080 } 1081 return false; 1082 } 1083 1084 bool ttm_bo_mem_compat(struct ttm_placement *placement, 1085 struct ttm_mem_reg *mem, 1086 uint32_t *new_flags) 1087 { 1088 if (ttm_bo_places_compat(placement->placement, placement->num_placement, 1089 mem, new_flags)) 1090 return true; 1091 1092 if ((placement->busy_placement != placement->placement || 1093 placement->num_busy_placement > placement->num_placement) && 1094 ttm_bo_places_compat(placement->busy_placement, 1095 placement->num_busy_placement, 1096 mem, new_flags)) 1097 return true; 1098 1099 return false; 1100 } 1101 EXPORT_SYMBOL(ttm_bo_mem_compat); 1102 1103 int ttm_bo_validate(struct ttm_buffer_object *bo, 1104 struct ttm_placement *placement, 1105 bool interruptible, 1106 bool no_wait_gpu) 1107 { 1108 int ret; 1109 uint32_t new_flags; 1110 1111 lockdep_assert_held(&bo->resv->lock.base); 1112 /* 1113 * Check whether we need to move buffer. 1114 */ 1115 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1116 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1117 no_wait_gpu); 1118 if (ret) 1119 return ret; 1120 } else { 1121 /* 1122 * Use the access and other non-mapping-related flag bits from 1123 * the compatible memory placement flags to the active flags 1124 */ 1125 ttm_flag_masked(&bo->mem.placement, new_flags, 1126 ~TTM_PL_MASK_MEMTYPE); 1127 } 1128 /* 1129 * We might need to add a TTM. 1130 */ 1131 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1132 ret = ttm_bo_add_ttm(bo, true); 1133 if (ret) 1134 return ret; 1135 } 1136 return 0; 1137 } 1138 EXPORT_SYMBOL(ttm_bo_validate); 1139 1140 int ttm_bo_init_reserved(struct ttm_bo_device *bdev, 1141 struct ttm_buffer_object *bo, 1142 unsigned long size, 1143 enum ttm_bo_type type, 1144 struct ttm_placement *placement, 1145 uint32_t page_alignment, 1146 bool interruptible, 1147 struct vm_object *persistent_swap_storage, 1148 size_t acc_size, 1149 struct sg_table *sg, 1150 struct reservation_object *resv, 1151 void (*destroy) (struct ttm_buffer_object *)) 1152 { 1153 int ret = 0; 1154 unsigned long num_pages; 1155 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1156 bool locked; 1157 1158 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1159 if (ret) { 1160 pr_err("Out of kernel memory\n"); 1161 if (destroy) 1162 (*destroy)(bo); 1163 else 1164 kfree(bo); 1165 return -ENOMEM; 1166 } 1167 1168 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1169 if (num_pages == 0) { 1170 pr_err("Illegal buffer object size\n"); 1171 if (destroy) 1172 (*destroy)(bo); 1173 else 1174 kfree(bo); 1175 ttm_mem_global_free(mem_glob, acc_size); 1176 return -EINVAL; 1177 } 1178 bo->destroy = destroy; 1179 1180 kref_init(&bo->kref); 1181 kref_init(&bo->list_kref); 1182 atomic_set(&bo->cpu_writers, 0); 1183 INIT_LIST_HEAD(&bo->lru); 1184 INIT_LIST_HEAD(&bo->ddestroy); 1185 INIT_LIST_HEAD(&bo->swap); 1186 INIT_LIST_HEAD(&bo->io_reserve_lru); 1187 lockinit(&bo->wu_mutex, "ttmbwm", 0, LK_CANRECURSE); 1188 bo->bdev = bdev; 1189 bo->glob = bdev->glob; 1190 bo->type = type; 1191 bo->num_pages = num_pages; 1192 bo->mem.size = num_pages << PAGE_SHIFT; 1193 bo->mem.mem_type = TTM_PL_SYSTEM; 1194 bo->mem.num_pages = bo->num_pages; 1195 bo->mem.mm_node = NULL; 1196 bo->mem.page_alignment = page_alignment; 1197 bo->mem.bus.io_reserved_vm = false; 1198 bo->mem.bus.io_reserved_count = 0; 1199 bo->moving = NULL; 1200 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1201 bo->persistent_swap_storage = persistent_swap_storage; 1202 bo->acc_size = acc_size; 1203 bo->sg = sg; 1204 if (resv) { 1205 bo->resv = resv; 1206 lockdep_assert_held(&bo->resv->lock.base); 1207 } else { 1208 bo->resv = &bo->ttm_resv; 1209 } 1210 reservation_object_init(&bo->ttm_resv); 1211 atomic_inc(&bo->glob->bo_count); 1212 drm_vma_node_reset(&bo->vma_node); 1213 bo->priority = 0; 1214 1215 /* 1216 * For ttm_bo_type_device buffers, allocate 1217 * address space from the device. 1218 */ 1219 if (bo->type == ttm_bo_type_device || 1220 bo->type == ttm_bo_type_sg) 1221 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1222 bo->mem.num_pages); 1223 1224 /* passed reservation objects should already be locked, 1225 * since otherwise lockdep will be angered in radeon. 1226 */ 1227 if (!resv) { 1228 locked = ww_mutex_trylock(&bo->resv->lock); 1229 WARN_ON(!locked); 1230 } 1231 1232 if (likely(!ret)) 1233 ret = ttm_bo_validate(bo, placement, interruptible, false); 1234 1235 if (unlikely(ret)) { 1236 if (!resv) 1237 ttm_bo_unreserve(bo); 1238 1239 ttm_bo_unref(&bo); 1240 return ret; 1241 } 1242 1243 if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 1244 lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE); 1245 ttm_bo_add_to_lru(bo); 1246 lockmgr(&bo->glob->lru_lock, LK_RELEASE); 1247 } 1248 1249 return ret; 1250 } 1251 EXPORT_SYMBOL(ttm_bo_init_reserved); 1252 1253 int ttm_bo_init(struct ttm_bo_device *bdev, 1254 struct ttm_buffer_object *bo, 1255 unsigned long size, 1256 enum ttm_bo_type type, 1257 struct ttm_placement *placement, 1258 uint32_t page_alignment, 1259 bool interruptible, 1260 struct vm_object *persistent_swap_storage, 1261 size_t acc_size, 1262 struct sg_table *sg, 1263 struct reservation_object *resv, 1264 void (*destroy) (struct ttm_buffer_object *)) 1265 { 1266 int ret; 1267 1268 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, 1269 page_alignment, interruptible, 1270 persistent_swap_storage, acc_size, 1271 sg, resv, destroy); 1272 if (ret) 1273 return ret; 1274 1275 if (!resv) 1276 ttm_bo_unreserve(bo); 1277 1278 return 0; 1279 } 1280 EXPORT_SYMBOL(ttm_bo_init); 1281 1282 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1283 unsigned long bo_size, 1284 unsigned struct_size) 1285 { 1286 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1287 size_t size = 0; 1288 1289 size += ttm_round_pot(struct_size); 1290 size += ttm_round_pot(npages * sizeof(void *)); 1291 size += ttm_round_pot(sizeof(struct ttm_tt)); 1292 return size; 1293 } 1294 EXPORT_SYMBOL(ttm_bo_acc_size); 1295 1296 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1297 unsigned long bo_size, 1298 unsigned struct_size) 1299 { 1300 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1301 size_t size = 0; 1302 1303 size += ttm_round_pot(struct_size); 1304 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); 1305 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1306 return size; 1307 } 1308 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1309 1310 int ttm_bo_create(struct ttm_bo_device *bdev, 1311 unsigned long size, 1312 enum ttm_bo_type type, 1313 struct ttm_placement *placement, 1314 uint32_t page_alignment, 1315 bool interruptible, 1316 struct vm_object *persistent_swap_storage, 1317 struct ttm_buffer_object **p_bo) 1318 { 1319 struct ttm_buffer_object *bo; 1320 size_t acc_size; 1321 int ret; 1322 1323 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1324 if (unlikely(bo == NULL)) 1325 return -ENOMEM; 1326 1327 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1328 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1329 interruptible, persistent_swap_storage, acc_size, 1330 NULL, NULL, NULL); 1331 if (likely(ret == 0)) 1332 *p_bo = bo; 1333 1334 return ret; 1335 } 1336 EXPORT_SYMBOL(ttm_bo_create); 1337 1338 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1339 unsigned mem_type) 1340 { 1341 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1342 struct ttm_bo_global *glob = bdev->glob; 1343 struct dma_fence *fence; 1344 int ret; 1345 unsigned i; 1346 1347 /* 1348 * Can't use standard list traversal since we're unlocking. 1349 */ 1350 1351 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1352 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 1353 while (!list_empty(&man->lru[i])) { 1354 lockmgr(&glob->lru_lock, LK_RELEASE); 1355 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); 1356 if (ret) 1357 return ret; 1358 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1359 } 1360 } 1361 lockmgr(&glob->lru_lock, LK_RELEASE); 1362 1363 lockmgr(&man->move_lock, LK_EXCLUSIVE); 1364 fence = dma_fence_get(man->move); 1365 lockmgr(&man->move_lock, LK_RELEASE); 1366 1367 if (fence) { 1368 ret = dma_fence_wait(fence, false); 1369 dma_fence_put(fence); 1370 if (ret) 1371 return ret; 1372 } 1373 1374 return 0; 1375 } 1376 1377 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1378 { 1379 struct ttm_mem_type_manager *man; 1380 int ret = -EINVAL; 1381 1382 if (mem_type >= TTM_NUM_MEM_TYPES) { 1383 pr_err("Illegal memory type %d\n", mem_type); 1384 return ret; 1385 } 1386 man = &bdev->man[mem_type]; 1387 1388 if (!man->has_type) { 1389 pr_err("Trying to take down uninitialized memory manager type %u\n", 1390 mem_type); 1391 return ret; 1392 } 1393 1394 man->use_type = false; 1395 man->has_type = false; 1396 1397 ret = 0; 1398 if (mem_type > 0) { 1399 ret = ttm_bo_force_list_clean(bdev, mem_type); 1400 if (ret) { 1401 pr_err("Cleanup eviction failed\n"); 1402 return ret; 1403 } 1404 1405 ret = (*man->func->takedown)(man); 1406 } 1407 1408 dma_fence_put(man->move); 1409 man->move = NULL; 1410 1411 return ret; 1412 } 1413 EXPORT_SYMBOL(ttm_bo_clean_mm); 1414 1415 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1416 { 1417 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1418 1419 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1420 pr_err("Illegal memory manager memory type %u\n", mem_type); 1421 return -EINVAL; 1422 } 1423 1424 if (!man->has_type) { 1425 pr_err("Memory type %u has not been initialized\n", mem_type); 1426 return 0; 1427 } 1428 1429 return ttm_bo_force_list_clean(bdev, mem_type); 1430 } 1431 EXPORT_SYMBOL(ttm_bo_evict_mm); 1432 1433 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1434 unsigned long p_size) 1435 { 1436 int ret; 1437 struct ttm_mem_type_manager *man; 1438 unsigned i; 1439 1440 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1441 man = &bdev->man[type]; 1442 BUG_ON(man->has_type); 1443 man->io_reserve_fastpath = true; 1444 man->use_io_reserve_lru = false; 1445 lockinit(&man->io_reserve_mutex, "ttmior", 0, 0); 1446 lockinit(&man->move_lock, "ttmml", 0, 0); 1447 INIT_LIST_HEAD(&man->io_reserve_lru); 1448 1449 ret = bdev->driver->init_mem_type(bdev, type, man); 1450 if (ret) 1451 return ret; 1452 man->bdev = bdev; 1453 1454 if (type != TTM_PL_SYSTEM) { 1455 ret = (*man->func->init)(man, p_size); 1456 if (ret) 1457 return ret; 1458 } 1459 man->has_type = true; 1460 man->use_type = true; 1461 man->size = p_size; 1462 1463 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1464 INIT_LIST_HEAD(&man->lru[i]); 1465 man->move = NULL; 1466 1467 return 0; 1468 } 1469 EXPORT_SYMBOL(ttm_bo_init_mm); 1470 1471 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1472 { 1473 struct ttm_bo_global *glob = 1474 container_of(kobj, struct ttm_bo_global, kobj); 1475 1476 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1477 __free_page(glob->dummy_read_page); 1478 kfree(glob); 1479 } 1480 1481 void ttm_bo_global_release(struct drm_global_reference *ref) 1482 { 1483 struct ttm_bo_global *glob = ref->object; 1484 1485 kobject_del(&glob->kobj); 1486 kobject_put(&glob->kobj); 1487 } 1488 EXPORT_SYMBOL(ttm_bo_global_release); 1489 1490 int ttm_bo_global_init(struct drm_global_reference *ref) 1491 { 1492 struct ttm_bo_global_ref *bo_ref = 1493 container_of(ref, struct ttm_bo_global_ref, ref); 1494 struct ttm_bo_global *glob = ref->object; 1495 int ret; 1496 unsigned i; 1497 1498 lockinit(&glob->device_list_mutex, "ttmdlm", 0, 0); 1499 lockinit(&glob->lru_lock, "ttmlru", 0, 0); 1500 glob->mem_glob = bo_ref->mem_glob; 1501 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1502 1503 if (unlikely(glob->dummy_read_page == NULL)) { 1504 ret = -ENOMEM; 1505 goto out_no_drp; 1506 } 1507 1508 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1509 INIT_LIST_HEAD(&glob->swap_lru[i]); 1510 INIT_LIST_HEAD(&glob->device_list); 1511 1512 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1513 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1514 if (unlikely(ret != 0)) { 1515 pr_err("Could not register buffer object swapout\n"); 1516 goto out_no_shrink; 1517 } 1518 1519 atomic_set(&glob->bo_count, 0); 1520 1521 ret = kobject_init_and_add( 1522 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1523 if (unlikely(ret != 0)) 1524 kobject_put(&glob->kobj); 1525 return ret; 1526 out_no_shrink: 1527 __free_page(glob->dummy_read_page); 1528 out_no_drp: 1529 kfree(glob); 1530 return ret; 1531 } 1532 EXPORT_SYMBOL(ttm_bo_global_init); 1533 1534 1535 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1536 { 1537 int ret = 0; 1538 unsigned i = TTM_NUM_MEM_TYPES; 1539 struct ttm_mem_type_manager *man; 1540 struct ttm_bo_global *glob = bdev->glob; 1541 1542 while (i--) { 1543 man = &bdev->man[i]; 1544 if (man->has_type) { 1545 man->use_type = false; 1546 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1547 ret = -EBUSY; 1548 pr_err("DRM memory manager type %d is not clean\n", 1549 i); 1550 } 1551 man->has_type = false; 1552 } 1553 } 1554 1555 mutex_lock(&glob->device_list_mutex); 1556 list_del(&bdev->device_list); 1557 mutex_unlock(&glob->device_list_mutex); 1558 1559 cancel_delayed_work_sync(&bdev->wq); 1560 1561 while (ttm_bo_delayed_delete(bdev, true)) 1562 ; 1563 1564 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1565 if (list_empty(&bdev->ddestroy)) 1566 TTM_DEBUG("Delayed destroy list was clean\n"); 1567 1568 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 1569 if (list_empty(&bdev->man[0].lru[0])) 1570 TTM_DEBUG("Swap list %d was clean\n", i); 1571 lockmgr(&glob->lru_lock, LK_RELEASE); 1572 1573 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1574 1575 return ret; 1576 } 1577 EXPORT_SYMBOL(ttm_bo_device_release); 1578 1579 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1580 struct ttm_bo_global *glob, 1581 struct ttm_bo_driver *driver, 1582 struct address_space *mapping, 1583 uint64_t file_page_offset, 1584 bool need_dma32) 1585 { 1586 int ret = -EINVAL; 1587 1588 bdev->driver = driver; 1589 1590 memset(bdev->man, 0, sizeof(bdev->man)); 1591 1592 /* 1593 * Initialize the system memory buffer type. 1594 * Other types need to be driver / IOCTL initialized. 1595 */ 1596 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1597 if (unlikely(ret != 0)) 1598 goto out_no_sys; 1599 1600 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, 1601 0x10000000); 1602 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1603 INIT_LIST_HEAD(&bdev->ddestroy); 1604 /* 1605 * XXX DRAGONFLY - dev_mapping NULL atm, find other XXX DRAGONFLY 1606 * lines and fix when it no longer is in later API change. 1607 */ 1608 bdev->dev_mapping = mapping; 1609 bdev->glob = glob; 1610 bdev->need_dma32 = need_dma32; 1611 mutex_lock(&glob->device_list_mutex); 1612 list_add_tail(&bdev->device_list, &glob->device_list); 1613 mutex_unlock(&glob->device_list_mutex); 1614 1615 return 0; 1616 out_no_sys: 1617 return ret; 1618 } 1619 EXPORT_SYMBOL(ttm_bo_device_init); 1620 1621 /* 1622 * buffer object vm functions. 1623 */ 1624 1625 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1626 { 1627 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1628 1629 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1630 if (mem->mem_type == TTM_PL_SYSTEM) 1631 return false; 1632 1633 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1634 return false; 1635 1636 if (mem->placement & TTM_PL_FLAG_CACHED) 1637 return false; 1638 } 1639 return true; 1640 } 1641 1642 #ifdef __DragonFly__ 1643 1644 /* 1645 * XXX DRAGONFLY - device_mapping not yet implemented so 1646 * file_mapping is basically always NULL. We have to properly 1647 * release the mmap, etc. 1648 */ 1649 void ttm_bo_release_mmap(struct ttm_buffer_object *bo); 1650 1651 #endif 1652 1653 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1654 { 1655 // drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1656 ttm_bo_release_mmap(bo); 1657 ttm_mem_io_free_vm(bo); 1658 } 1659 1660 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1661 { 1662 struct ttm_bo_device *bdev = bo->bdev; 1663 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1664 1665 ttm_mem_io_lock(man, false); 1666 ttm_bo_unmap_virtual_locked(bo); 1667 ttm_mem_io_unlock(man); 1668 } 1669 1670 1671 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1672 1673 int ttm_bo_wait(struct ttm_buffer_object *bo, 1674 bool interruptible, bool no_wait) 1675 { 1676 long timeout = 15 * HZ; 1677 1678 if (no_wait) { 1679 if (reservation_object_test_signaled_rcu(bo->resv, true)) 1680 return 0; 1681 else 1682 return -EBUSY; 1683 } 1684 1685 timeout = reservation_object_wait_timeout_rcu(bo->resv, true, 1686 interruptible, timeout); 1687 if (timeout < 0) 1688 return timeout; 1689 1690 if (timeout == 0) 1691 return -EBUSY; 1692 1693 reservation_object_add_excl_fence(bo->resv, NULL); 1694 return 0; 1695 } 1696 EXPORT_SYMBOL(ttm_bo_wait); 1697 1698 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1699 { 1700 int ret = 0; 1701 1702 /* 1703 * Using ttm_bo_reserve makes sure the lru lists are updated. 1704 */ 1705 1706 ret = ttm_bo_reserve(bo, true, no_wait, NULL); 1707 if (unlikely(ret != 0)) 1708 return ret; 1709 ret = ttm_bo_wait(bo, true, no_wait); 1710 if (likely(ret == 0)) 1711 atomic_inc(&bo->cpu_writers); 1712 ttm_bo_unreserve(bo); 1713 return ret; 1714 } 1715 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1716 1717 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1718 { 1719 atomic_dec(&bo->cpu_writers); 1720 } 1721 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1722 1723 /** 1724 * A buffer object shrink method that tries to swap out the first 1725 * buffer object on the bo_global::swap_lru list. 1726 */ 1727 1728 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1729 { 1730 struct ttm_bo_global *glob = 1731 container_of(shrink, struct ttm_bo_global, shrink); 1732 struct ttm_buffer_object *bo; 1733 int ret = -EBUSY; 1734 unsigned i; 1735 1736 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1737 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 1738 list_for_each_entry(bo, &glob->swap_lru[i], swap) { 1739 ret = __ttm_bo_reserve(bo, false, true, NULL); 1740 if (!ret) 1741 break; 1742 } 1743 if (!ret) 1744 break; 1745 } 1746 1747 if (ret) { 1748 lockmgr(&glob->lru_lock, LK_RELEASE); 1749 return ret; 1750 } 1751 1752 kref_get(&bo->list_kref); 1753 1754 if (!list_empty(&bo->ddestroy)) { 1755 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1756 kref_put(&bo->list_kref, ttm_bo_release_list); 1757 return ret; 1758 } 1759 1760 ttm_bo_del_from_lru(bo); 1761 lockmgr(&glob->lru_lock, LK_RELEASE); 1762 1763 /** 1764 * Move to system cached 1765 */ 1766 1767 if (bo->mem.mem_type != TTM_PL_SYSTEM || 1768 bo->ttm->caching_state != tt_cached) { 1769 struct ttm_mem_reg evict_mem; 1770 1771 evict_mem = bo->mem; 1772 evict_mem.mm_node = NULL; 1773 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1774 evict_mem.mem_type = TTM_PL_SYSTEM; 1775 1776 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1777 false, false); 1778 if (unlikely(ret != 0)) 1779 goto out; 1780 } 1781 1782 /** 1783 * Make sure BO is idle. 1784 */ 1785 1786 ret = ttm_bo_wait(bo, false, false); 1787 if (unlikely(ret != 0)) 1788 goto out; 1789 1790 ttm_bo_unmap_virtual(bo); 1791 1792 /** 1793 * Swap out. Buffer will be swapped in again as soon as 1794 * anyone tries to access a ttm page. 1795 */ 1796 1797 if (bo->bdev->driver->swap_notify) 1798 bo->bdev->driver->swap_notify(bo); 1799 1800 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1801 out: 1802 1803 /** 1804 * 1805 * Unreserve without putting on LRU to avoid swapping out an 1806 * already swapped buffer. 1807 */ 1808 1809 __ttm_bo_unreserve(bo); 1810 kref_put(&bo->list_kref, ttm_bo_release_list); 1811 return ret; 1812 } 1813 1814 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1815 { 1816 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1817 ; 1818 } 1819 EXPORT_SYMBOL(ttm_bo_swapout_all); 1820 1821 /** 1822 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1823 * unreserved 1824 * 1825 * @bo: Pointer to buffer 1826 */ 1827 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1828 { 1829 int ret; 1830 1831 /* 1832 * In the absense of a wait_unlocked API, 1833 * Use the bo::wu_mutex to avoid triggering livelocks due to 1834 * concurrent use of this function. Note that this use of 1835 * bo::wu_mutex can go away if we change locking order to 1836 * mmap_sem -> bo::reserve. 1837 */ 1838 ret = mutex_lock_interruptible(&bo->wu_mutex); 1839 if (unlikely(ret != 0)) 1840 return -ERESTARTSYS; 1841 if (!ww_mutex_is_locked(&bo->resv->lock)) 1842 goto out_unlock; 1843 ret = __ttm_bo_reserve(bo, true, false, NULL); 1844 if (unlikely(ret != 0)) 1845 goto out_unlock; 1846 __ttm_bo_unreserve(bo); 1847 1848 out_unlock: 1849 mutex_unlock(&bo->wu_mutex); 1850 return ret; 1851 } 1852