1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 44 #define TTM_ASSERT_LOCKED(param) 45 #define TTM_DEBUG(fmt, arg...) 46 #define TTM_BO_HASH_ORDER 13 47 48 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 49 static void ttm_bo_global_kobj_release(struct kobject *kobj); 50 51 static struct attribute ttm_bo_count = { 52 .name = "bo_count", 53 .mode = S_IRUGO 54 }; 55 56 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 57 uint32_t *mem_type) 58 { 59 int i; 60 61 for (i = 0; i <= TTM_PL_PRIV5; i++) { 62 if (place->flags & (1 << i)) { 63 *mem_type = i; 64 return 0; 65 } 66 } 67 return -EINVAL; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 74 pr_err(" has_type: %d\n", man->has_type); 75 pr_err(" use_type: %d\n", man->use_type); 76 pr_err(" flags: 0x%08X\n", man->flags); 77 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); 78 pr_err(" size: %ju\n", man->size); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 81 if (mem_type != TTM_PL_SYSTEM) 82 (*man->func->debug)(man, TTM_PFX); 83 } 84 85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 86 struct ttm_placement *placement) 87 { 88 int i, ret, mem_type; 89 90 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 91 bo, bo->mem.num_pages, bo->mem.size >> 10, 92 bo->mem.size >> 20); 93 for (i = 0; i < placement->num_placement; i++) { 94 ret = ttm_mem_type_from_place(&placement->placement[i], 95 &mem_type); 96 if (ret) 97 return; 98 pr_err(" placement[%d]=0x%08X (%d)\n", 99 i, placement->placement[i].flags, mem_type); 100 ttm_mem_type_debug(bo->bdev, mem_type); 101 } 102 } 103 104 static ssize_t ttm_bo_global_show(struct kobject *kobj, 105 struct attribute *attr, 106 char *buffer) 107 { 108 struct ttm_bo_global *glob = 109 container_of(kobj, struct ttm_bo_global, kobj); 110 111 return snprintf(buffer, PAGE_SIZE, "%lu\n", 112 (unsigned long) atomic_read(&glob->bo_count)); 113 } 114 115 static struct attribute *ttm_bo_global_attrs[] = { 116 &ttm_bo_count, 117 NULL 118 }; 119 120 static const struct sysfs_ops ttm_bo_global_ops = { 121 .show = &ttm_bo_global_show 122 }; 123 124 static struct kobj_type ttm_bo_glob_kobj_type = { 125 .release = &ttm_bo_global_kobj_release, 126 .sysfs_ops = &ttm_bo_global_ops, 127 .default_attrs = ttm_bo_global_attrs 128 }; 129 130 131 static inline uint32_t ttm_bo_type_flags(unsigned type) 132 { 133 return 1 << (type); 134 } 135 136 static void ttm_bo_release_list(struct kref *list_kref) 137 { 138 struct ttm_buffer_object *bo = 139 container_of(list_kref, struct ttm_buffer_object, list_kref); 140 struct ttm_bo_device *bdev = bo->bdev; 141 size_t acc_size = bo->acc_size; 142 143 BUG_ON(atomic_read(&bo->list_kref.refcount)); 144 BUG_ON(atomic_read(&bo->kref.refcount)); 145 BUG_ON(atomic_read(&bo->cpu_writers)); 146 BUG_ON(bo->sync_obj != NULL); 147 BUG_ON(bo->mem.mm_node != NULL); 148 BUG_ON(!list_empty(&bo->lru)); 149 BUG_ON(!list_empty(&bo->ddestroy)); 150 151 if (bo->ttm) 152 ttm_tt_destroy(bo->ttm); 153 atomic_dec(&bo->glob->bo_count); 154 if (bo->resv == &bo->ttm_resv) 155 reservation_object_fini(&bo->ttm_resv); 156 mutex_destroy(&bo->wu_mutex); 157 if (bo->destroy) 158 bo->destroy(bo); 159 else { 160 kfree(bo); 161 } 162 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 163 } 164 165 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 166 { 167 struct ttm_bo_device *bdev = bo->bdev; 168 struct ttm_mem_type_manager *man; 169 170 lockdep_assert_held(&bo->resv->lock.base); 171 172 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 173 174 BUG_ON(!list_empty(&bo->lru)); 175 176 man = &bdev->man[bo->mem.mem_type]; 177 list_add_tail(&bo->lru, &man->lru); 178 kref_get(&bo->list_kref); 179 180 if (bo->ttm != NULL) { 181 list_add_tail(&bo->swap, &bo->glob->swap_lru); 182 kref_get(&bo->list_kref); 183 } 184 } 185 } 186 EXPORT_SYMBOL(ttm_bo_add_to_lru); 187 188 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 189 { 190 int put_count = 0; 191 192 if (!list_empty(&bo->swap)) { 193 list_del_init(&bo->swap); 194 ++put_count; 195 } 196 if (!list_empty(&bo->lru)) { 197 list_del_init(&bo->lru); 198 ++put_count; 199 } 200 201 /* 202 * TODO: Add a driver hook to delete from 203 * driver-specific LRU's here. 204 */ 205 206 return put_count; 207 } 208 209 static void ttm_bo_ref_bug(struct kref *list_kref) 210 { 211 BUG(); 212 } 213 214 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 215 bool never_free) 216 { 217 kref_sub(&bo->list_kref, count, 218 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 219 } 220 221 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 222 { 223 int put_count; 224 225 lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE); 226 put_count = ttm_bo_del_from_lru(bo); 227 lockmgr(&bo->glob->lru_lock, LK_RELEASE); 228 ttm_bo_list_ref_sub(bo, put_count, true); 229 } 230 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 231 232 /* 233 * Call bo->mutex locked. 234 */ 235 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 236 { 237 struct ttm_bo_device *bdev = bo->bdev; 238 struct ttm_bo_global *glob = bo->glob; 239 int ret = 0; 240 uint32_t page_flags = 0; 241 242 TTM_ASSERT_LOCKED(&bo->mutex); 243 bo->ttm = NULL; 244 245 if (bdev->need_dma32) 246 page_flags |= TTM_PAGE_FLAG_DMA32; 247 248 switch (bo->type) { 249 case ttm_bo_type_device: 250 if (zero_alloc) 251 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 252 case ttm_bo_type_kernel: 253 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 254 page_flags, glob->dummy_read_page); 255 if (unlikely(bo->ttm == NULL)) 256 ret = -ENOMEM; 257 break; 258 case ttm_bo_type_sg: 259 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 260 page_flags | TTM_PAGE_FLAG_SG, 261 glob->dummy_read_page); 262 if (unlikely(bo->ttm == NULL)) { 263 ret = -ENOMEM; 264 break; 265 } 266 bo->ttm->sg = bo->sg; 267 break; 268 default: 269 pr_err("Illegal buffer object type\n"); 270 ret = -EINVAL; 271 break; 272 } 273 274 return ret; 275 } 276 277 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 278 struct ttm_mem_reg *mem, 279 bool evict, bool interruptible, 280 bool no_wait_gpu) 281 { 282 struct ttm_bo_device *bdev = bo->bdev; 283 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 284 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 285 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 286 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 287 int ret = 0; 288 289 if (old_is_pci || new_is_pci || 290 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 291 ret = ttm_mem_io_lock(old_man, true); 292 if (unlikely(ret != 0)) 293 goto out_err; 294 ttm_bo_unmap_virtual_locked(bo); 295 ttm_mem_io_unlock(old_man); 296 } 297 298 /* 299 * Create and bind a ttm if required. 300 */ 301 302 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 303 if (bo->ttm == NULL) { 304 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 305 ret = ttm_bo_add_ttm(bo, zero); 306 if (ret) 307 goto out_err; 308 } 309 310 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 311 if (ret) 312 goto out_err; 313 314 if (mem->mem_type != TTM_PL_SYSTEM) { 315 ret = ttm_tt_bind(bo->ttm, mem); 316 if (ret) 317 goto out_err; 318 } 319 320 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 321 if (bdev->driver->move_notify) 322 bdev->driver->move_notify(bo, mem); 323 bo->mem = *mem; 324 mem->mm_node = NULL; 325 goto moved; 326 } 327 } 328 329 if (bdev->driver->move_notify) 330 bdev->driver->move_notify(bo, mem); 331 332 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 333 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 334 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 335 else if (bdev->driver->move) 336 ret = bdev->driver->move(bo, evict, interruptible, 337 no_wait_gpu, mem); 338 else 339 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 340 341 if (ret) { 342 if (bdev->driver->move_notify) { 343 struct ttm_mem_reg tmp_mem = *mem; 344 *mem = bo->mem; 345 bo->mem = tmp_mem; 346 bdev->driver->move_notify(bo, mem); 347 bo->mem = *mem; 348 *mem = tmp_mem; 349 } 350 351 goto out_err; 352 } 353 354 moved: 355 if (bo->evicted) { 356 if (bdev->driver->invalidate_caches) { 357 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 358 if (ret) 359 pr_err("Can not flush read caches\n"); 360 } 361 bo->evicted = false; 362 } 363 364 if (bo->mem.mm_node) { 365 bo->offset = (bo->mem.start << PAGE_SHIFT) + 366 bdev->man[bo->mem.mem_type].gpu_offset; 367 bo->cur_placement = bo->mem.placement; 368 } else 369 bo->offset = 0; 370 371 return 0; 372 373 out_err: 374 new_man = &bdev->man[bo->mem.mem_type]; 375 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 376 ttm_tt_unbind(bo->ttm); 377 ttm_tt_destroy(bo->ttm); 378 bo->ttm = NULL; 379 } 380 381 return ret; 382 } 383 384 /** 385 * Call bo::reserved. 386 * Will release GPU memory type usage on destruction. 387 * This is the place to put in driver specific hooks to release 388 * driver private resources. 389 * Will release the bo::reserved lock. 390 */ 391 392 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 393 { 394 if (bo->bdev->driver->move_notify) 395 bo->bdev->driver->move_notify(bo, NULL); 396 397 if (bo->ttm) { 398 ttm_tt_unbind(bo->ttm); 399 ttm_tt_destroy(bo->ttm); 400 bo->ttm = NULL; 401 } 402 ttm_bo_mem_put(bo, &bo->mem); 403 404 ww_mutex_unlock (&bo->resv->lock); 405 } 406 407 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 408 { 409 struct ttm_bo_device *bdev = bo->bdev; 410 struct ttm_bo_global *glob = bo->glob; 411 struct ttm_bo_driver *driver = bdev->driver; 412 void *sync_obj = NULL; 413 int put_count; 414 int ret; 415 416 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 417 ret = __ttm_bo_reserve(bo, false, true, false, 0); 418 419 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 420 (void) ttm_bo_wait(bo, false, false, true); 421 if (!ret && !bo->sync_obj) { 422 lockmgr(&bdev->fence_lock, LK_RELEASE); 423 put_count = ttm_bo_del_from_lru(bo); 424 425 lockmgr(&glob->lru_lock, LK_RELEASE); 426 ttm_bo_cleanup_memtype_use(bo); 427 428 ttm_bo_list_ref_sub(bo, put_count, true); 429 430 return; 431 } 432 if (bo->sync_obj) 433 sync_obj = driver->sync_obj_ref(bo->sync_obj); 434 lockmgr(&bdev->fence_lock, LK_RELEASE); 435 436 if (!ret) { 437 438 /* 439 * Make NO_EVICT bos immediately available to 440 * shrinkers, now that they are queued for 441 * destruction. 442 */ 443 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 444 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 445 ttm_bo_add_to_lru(bo); 446 } 447 448 __ttm_bo_unreserve(bo); 449 } 450 451 kref_get(&bo->list_kref); 452 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 453 lockmgr(&glob->lru_lock, LK_RELEASE); 454 455 if (sync_obj) { 456 driver->sync_obj_flush(sync_obj); 457 driver->sync_obj_unref(&sync_obj); 458 } 459 schedule_delayed_work(&bdev->wq, 460 ((HZ / 100) < 1) ? 1 : HZ / 100); 461 } 462 463 /** 464 * function ttm_bo_cleanup_refs_and_unlock 465 * If bo idle, remove from delayed- and lru lists, and unref. 466 * If not idle, do nothing. 467 * 468 * Must be called with lru_lock and reservation held, this function 469 * will drop both before returning. 470 * 471 * @interruptible Any sleeps should occur interruptibly. 472 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 473 */ 474 475 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 476 bool interruptible, 477 bool no_wait_gpu) 478 { 479 struct ttm_bo_device *bdev = bo->bdev; 480 struct ttm_bo_driver *driver = bdev->driver; 481 struct ttm_bo_global *glob = bo->glob; 482 int put_count; 483 int ret; 484 485 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 486 ret = ttm_bo_wait(bo, false, false, true); 487 488 if (ret && !no_wait_gpu) { 489 void *sync_obj; 490 491 /* 492 * Take a reference to the fence and unreserve, 493 * at this point the buffer should be dead, so 494 * no new sync objects can be attached. 495 */ 496 sync_obj = driver->sync_obj_ref(bo->sync_obj); 497 lockmgr(&bdev->fence_lock, LK_RELEASE); 498 499 __ttm_bo_unreserve(bo); 500 lockmgr(&glob->lru_lock, LK_RELEASE); 501 502 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 503 driver->sync_obj_unref(&sync_obj); 504 if (ret) 505 return ret; 506 507 /* 508 * remove sync_obj with ttm_bo_wait, the wait should be 509 * finished, and no new wait object should have been added. 510 */ 511 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 512 ret = ttm_bo_wait(bo, false, false, true); 513 WARN_ON(ret); 514 lockmgr(&bdev->fence_lock, LK_RELEASE); 515 if (ret) 516 return ret; 517 518 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 519 ret = __ttm_bo_reserve(bo, false, true, false, 0); 520 521 /* 522 * We raced, and lost, someone else holds the reservation now, 523 * and is probably busy in ttm_bo_cleanup_memtype_use. 524 * 525 * Even if it's not the case, because we finished waiting any 526 * delayed destruction would succeed, so just return success 527 * here. 528 */ 529 if (ret) { 530 lockmgr(&glob->lru_lock, LK_RELEASE); 531 return 0; 532 } 533 } else 534 lockmgr(&bdev->fence_lock, LK_RELEASE); 535 536 if (ret || unlikely(list_empty(&bo->ddestroy))) { 537 __ttm_bo_unreserve(bo); 538 lockmgr(&glob->lru_lock, LK_RELEASE); 539 return ret; 540 } 541 542 put_count = ttm_bo_del_from_lru(bo); 543 list_del_init(&bo->ddestroy); 544 ++put_count; 545 546 lockmgr(&glob->lru_lock, LK_RELEASE); 547 ttm_bo_cleanup_memtype_use(bo); 548 549 ttm_bo_list_ref_sub(bo, put_count, true); 550 551 return 0; 552 } 553 554 /** 555 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 556 * encountered buffers. 557 */ 558 559 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 560 { 561 struct ttm_bo_global *glob = bdev->glob; 562 struct ttm_buffer_object *entry = NULL; 563 int ret = 0; 564 565 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 566 if (list_empty(&bdev->ddestroy)) 567 goto out_unlock; 568 569 entry = list_first_entry(&bdev->ddestroy, 570 struct ttm_buffer_object, ddestroy); 571 kref_get(&entry->list_kref); 572 573 for (;;) { 574 struct ttm_buffer_object *nentry = NULL; 575 576 if (entry->ddestroy.next != &bdev->ddestroy) { 577 nentry = list_first_entry(&entry->ddestroy, 578 struct ttm_buffer_object, ddestroy); 579 kref_get(&nentry->list_kref); 580 } 581 582 ret = __ttm_bo_reserve(entry, false, true, false, 0); 583 if (remove_all && ret) { 584 lockmgr(&glob->lru_lock, LK_RELEASE); 585 ret = __ttm_bo_reserve(entry, false, false, 586 false, 0); 587 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 588 } 589 590 if (!ret) 591 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 592 !remove_all); 593 else 594 lockmgr(&glob->lru_lock, LK_RELEASE); 595 596 kref_put(&entry->list_kref, ttm_bo_release_list); 597 entry = nentry; 598 599 if (ret || !entry) 600 goto out; 601 602 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 603 if (list_empty(&entry->ddestroy)) 604 break; 605 } 606 607 out_unlock: 608 lockmgr(&glob->lru_lock, LK_RELEASE); 609 out: 610 if (entry) 611 kref_put(&entry->list_kref, ttm_bo_release_list); 612 return ret; 613 } 614 615 static void ttm_bo_delayed_workqueue(struct work_struct *work) 616 { 617 struct ttm_bo_device *bdev = 618 container_of(work, struct ttm_bo_device, wq.work); 619 620 if (ttm_bo_delayed_delete(bdev, false)) { 621 schedule_delayed_work(&bdev->wq, 622 ((HZ / 100) < 1) ? 1 : HZ / 100); 623 } 624 } 625 626 static void ttm_bo_release(struct kref *kref) 627 { 628 struct ttm_buffer_object *bo = 629 container_of(kref, struct ttm_buffer_object, kref); 630 struct ttm_bo_device *bdev = bo->bdev; 631 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 632 633 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 634 ttm_mem_io_lock(man, false); 635 ttm_mem_io_free_vm(bo); 636 ttm_mem_io_unlock(man); 637 ttm_bo_cleanup_refs_or_queue(bo); 638 kref_put(&bo->list_kref, ttm_bo_release_list); 639 } 640 641 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 642 { 643 struct ttm_buffer_object *bo = *p_bo; 644 645 *p_bo = NULL; 646 kref_put(&bo->kref, ttm_bo_release); 647 } 648 EXPORT_SYMBOL(ttm_bo_unref); 649 650 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 651 { 652 return cancel_delayed_work_sync(&bdev->wq); 653 } 654 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 655 656 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 657 { 658 if (resched) 659 schedule_delayed_work(&bdev->wq, 660 ((HZ / 100) < 1) ? 1 : HZ / 100); 661 } 662 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 663 664 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 665 bool no_wait_gpu) 666 { 667 struct ttm_bo_device *bdev = bo->bdev; 668 struct ttm_mem_reg evict_mem; 669 struct ttm_placement placement; 670 int ret = 0; 671 672 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 673 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 674 lockmgr(&bdev->fence_lock, LK_RELEASE); 675 676 if (unlikely(ret != 0)) { 677 if (ret != -ERESTARTSYS) { 678 pr_err("Failed to expire sync object before buffer eviction\n"); 679 } 680 goto out; 681 } 682 683 lockdep_assert_held(&bo->resv->lock.base); 684 685 evict_mem = bo->mem; 686 evict_mem.mm_node = NULL; 687 evict_mem.bus.io_reserved_vm = false; 688 evict_mem.bus.io_reserved_count = 0; 689 690 placement.fpfn = 0; 691 placement.lpfn = 0; 692 placement.num_placement = 0; 693 placement.num_busy_placement = 0; 694 bdev->driver->evict_flags(bo, &placement); 695 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 696 no_wait_gpu); 697 if (ret) { 698 if (ret != -ERESTARTSYS) { 699 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 700 bo); 701 ttm_bo_mem_space_debug(bo, &placement); 702 } 703 goto out; 704 } 705 706 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 707 no_wait_gpu); 708 if (ret) { 709 if (ret != -ERESTARTSYS) 710 pr_err("Buffer eviction failed\n"); 711 ttm_bo_mem_put(bo, &evict_mem); 712 goto out; 713 } 714 bo->evicted = true; 715 out: 716 return ret; 717 } 718 719 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 720 uint32_t mem_type, 721 bool interruptible, 722 bool no_wait_gpu) 723 { 724 struct ttm_bo_global *glob = bdev->glob; 725 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 726 struct ttm_buffer_object *bo; 727 int ret = -EBUSY, put_count; 728 729 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 730 list_for_each_entry(bo, &man->lru, lru) { 731 ret = __ttm_bo_reserve(bo, false, true, false, 0); 732 if (!ret) 733 break; 734 } 735 736 if (ret) { 737 lockmgr(&glob->lru_lock, LK_RELEASE); 738 return ret; 739 } 740 741 kref_get(&bo->list_kref); 742 743 if (!list_empty(&bo->ddestroy)) { 744 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 745 no_wait_gpu); 746 kref_put(&bo->list_kref, ttm_bo_release_list); 747 return ret; 748 } 749 750 put_count = ttm_bo_del_from_lru(bo); 751 lockmgr(&glob->lru_lock, LK_RELEASE); 752 753 BUG_ON(ret != 0); 754 755 ttm_bo_list_ref_sub(bo, put_count, true); 756 757 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 758 ttm_bo_unreserve(bo); 759 760 kref_put(&bo->list_kref, ttm_bo_release_list); 761 return ret; 762 } 763 764 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 765 { 766 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 767 768 if (mem->mm_node) 769 (*man->func->put_node)(man, mem); 770 } 771 EXPORT_SYMBOL(ttm_bo_mem_put); 772 773 /** 774 * Repeatedly evict memory from the LRU for @mem_type until we create enough 775 * space, or we've evicted everything and there isn't enough space. 776 */ 777 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 778 uint32_t mem_type, 779 struct ttm_placement *placement, 780 struct ttm_mem_reg *mem, 781 bool interruptible, 782 bool no_wait_gpu) 783 { 784 struct ttm_bo_device *bdev = bo->bdev; 785 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 786 int ret; 787 788 do { 789 ret = (*man->func->get_node)(man, bo, placement, mem); 790 if (unlikely(ret != 0)) 791 return ret; 792 if (mem->mm_node) 793 break; 794 ret = ttm_mem_evict_first(bdev, mem_type, 795 interruptible, no_wait_gpu); 796 if (unlikely(ret != 0)) 797 return ret; 798 } while (1); 799 if (mem->mm_node == NULL) 800 return -ENOMEM; 801 mem->mem_type = mem_type; 802 return 0; 803 } 804 805 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 806 uint32_t cur_placement, 807 uint32_t proposed_placement) 808 { 809 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 810 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 811 812 /** 813 * Keep current caching if possible. 814 */ 815 816 if ((cur_placement & caching) != 0) 817 result |= (cur_placement & caching); 818 else if ((man->default_caching & caching) != 0) 819 result |= man->default_caching; 820 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 821 result |= TTM_PL_FLAG_CACHED; 822 else if ((TTM_PL_FLAG_WC & caching) != 0) 823 result |= TTM_PL_FLAG_WC; 824 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 825 result |= TTM_PL_FLAG_UNCACHED; 826 827 return result; 828 } 829 830 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 831 uint32_t mem_type, 832 const struct ttm_place *place, 833 uint32_t *masked_placement) 834 { 835 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 836 837 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 838 return false; 839 840 if ((place->flags & man->available_caching) == 0) 841 return false; 842 843 cur_flags |= (place->flags & man->available_caching); 844 845 *masked_placement = cur_flags; 846 return true; 847 } 848 849 /** 850 * Creates space for memory region @mem according to its type. 851 * 852 * This function first searches for free space in compatible memory types in 853 * the priority order defined by the driver. If free space isn't found, then 854 * ttm_bo_mem_force_space is attempted in priority order to evict and find 855 * space. 856 */ 857 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 858 struct ttm_placement *placement, 859 struct ttm_mem_reg *mem, 860 bool interruptible, 861 bool no_wait_gpu) 862 { 863 struct ttm_bo_device *bdev = bo->bdev; 864 struct ttm_mem_type_manager *man; 865 uint32_t mem_type = TTM_PL_SYSTEM; 866 uint32_t cur_flags = 0; 867 bool type_found = false; 868 bool type_ok = false; 869 bool has_erestartsys = false; 870 int i, ret; 871 872 mem->mm_node = NULL; 873 for (i = 0; i < placement->num_placement; ++i) { 874 ret = ttm_mem_type_from_place(&placement->placement[i], 875 &mem_type); 876 if (ret) 877 return ret; 878 man = &bdev->man[mem_type]; 879 880 type_ok = ttm_bo_mt_compatible(man, 881 mem_type, 882 &placement->placement[i], 883 &cur_flags); 884 885 if (!type_ok) 886 continue; 887 888 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 889 cur_flags); 890 /* 891 * Use the access and other non-mapping-related flag bits from 892 * the memory placement flags to the current flags 893 */ 894 ttm_flag_masked(&cur_flags, placement->placement[i].flags, 895 ~TTM_PL_MASK_MEMTYPE); 896 897 if (mem_type == TTM_PL_SYSTEM) 898 break; 899 900 if (man->has_type && man->use_type) { 901 type_found = true; 902 ret = (*man->func->get_node)(man, bo, placement, mem); 903 if (unlikely(ret)) 904 return ret; 905 } 906 if (mem->mm_node) 907 break; 908 } 909 910 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 911 mem->mem_type = mem_type; 912 mem->placement = cur_flags; 913 return 0; 914 } 915 916 if (!type_found) 917 return -EINVAL; 918 919 for (i = 0; i < placement->num_busy_placement; ++i) { 920 ret = ttm_mem_type_from_place(&placement->busy_placement[i], 921 &mem_type); 922 if (ret) 923 return ret; 924 man = &bdev->man[mem_type]; 925 if (!man->has_type) 926 continue; 927 if (!ttm_bo_mt_compatible(man, 928 mem_type, 929 &placement->busy_placement[i], 930 &cur_flags)) 931 continue; 932 933 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 934 cur_flags); 935 /* 936 * Use the access and other non-mapping-related flag bits from 937 * the memory placement flags to the current flags 938 */ 939 ttm_flag_masked(&cur_flags, placement->busy_placement[i].flags, 940 ~TTM_PL_MASK_MEMTYPE); 941 942 943 if (mem_type == TTM_PL_SYSTEM) { 944 mem->mem_type = mem_type; 945 mem->placement = cur_flags; 946 mem->mm_node = NULL; 947 return 0; 948 } 949 950 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 951 interruptible, no_wait_gpu); 952 if (ret == 0 && mem->mm_node) { 953 mem->placement = cur_flags; 954 return 0; 955 } 956 if (ret == -ERESTARTSYS) 957 has_erestartsys = true; 958 } 959 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 960 return ret; 961 } 962 EXPORT_SYMBOL(ttm_bo_mem_space); 963 964 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 965 struct ttm_placement *placement, 966 bool interruptible, 967 bool no_wait_gpu) 968 { 969 int ret = 0; 970 struct ttm_mem_reg mem; 971 struct ttm_bo_device *bdev = bo->bdev; 972 973 lockdep_assert_held(&bo->resv->lock.base); 974 975 /* 976 * FIXME: It's possible to pipeline buffer moves. 977 * Have the driver move function wait for idle when necessary, 978 * instead of doing it here. 979 */ 980 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 981 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 982 lockmgr(&bdev->fence_lock, LK_RELEASE); 983 if (ret) 984 return ret; 985 mem.num_pages = bo->num_pages; 986 mem.size = mem.num_pages << PAGE_SHIFT; 987 mem.page_alignment = bo->mem.page_alignment; 988 mem.bus.io_reserved_vm = false; 989 mem.bus.io_reserved_count = 0; 990 /* 991 * Determine where to move the buffer. 992 */ 993 ret = ttm_bo_mem_space(bo, placement, &mem, 994 interruptible, no_wait_gpu); 995 if (ret) 996 goto out_unlock; 997 ret = ttm_bo_handle_move_mem(bo, &mem, false, 998 interruptible, no_wait_gpu); 999 out_unlock: 1000 if (ret && mem.mm_node) 1001 ttm_bo_mem_put(bo, &mem); 1002 return ret; 1003 } 1004 1005 static int ttm_bo_mem_compat(struct ttm_placement *placement, 1006 struct ttm_mem_reg *mem) 1007 { 1008 int i; 1009 1010 if (mem->mm_node && placement->lpfn != 0 && 1011 (mem->start < placement->fpfn || 1012 mem->start + mem->num_pages > placement->lpfn)) 1013 return -1; 1014 1015 for (i = 0; i < placement->num_placement; i++) { 1016 if ((placement->placement[i].flags & mem->placement & 1017 TTM_PL_MASK_CACHING) && 1018 (placement->placement[i].flags & mem->placement & 1019 TTM_PL_MASK_MEM)) 1020 return i; 1021 } 1022 return -1; 1023 } 1024 1025 int ttm_bo_validate(struct ttm_buffer_object *bo, 1026 struct ttm_placement *placement, 1027 bool interruptible, 1028 bool no_wait_gpu) 1029 { 1030 int ret; 1031 1032 lockdep_assert_held(&bo->resv->lock.base); 1033 /* Check that range is valid */ 1034 if (placement->lpfn || placement->fpfn) 1035 if (placement->fpfn > placement->lpfn || 1036 (placement->lpfn - placement->fpfn) < bo->num_pages) 1037 return -EINVAL; 1038 /* 1039 * Check whether we need to move buffer. 1040 */ 1041 ret = ttm_bo_mem_compat(placement, &bo->mem); 1042 if (ret < 0) { 1043 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1044 no_wait_gpu); 1045 if (ret) 1046 return ret; 1047 } else { 1048 /* 1049 * Use the access and other non-mapping-related flag bits from 1050 * the compatible memory placement flags to the active flags 1051 */ 1052 ttm_flag_masked(&bo->mem.placement, 1053 placement->placement[ret].flags, 1054 ~TTM_PL_MASK_MEMTYPE); 1055 } 1056 /* 1057 * We might need to add a TTM. 1058 */ 1059 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1060 ret = ttm_bo_add_ttm(bo, true); 1061 if (ret) 1062 return ret; 1063 } 1064 return 0; 1065 } 1066 EXPORT_SYMBOL(ttm_bo_validate); 1067 1068 int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1069 struct ttm_placement *placement) 1070 { 1071 BUG_ON((placement->fpfn || placement->lpfn) && 1072 (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); 1073 1074 return 0; 1075 } 1076 1077 int ttm_bo_init(struct ttm_bo_device *bdev, 1078 struct ttm_buffer_object *bo, 1079 unsigned long size, 1080 enum ttm_bo_type type, 1081 struct ttm_placement *placement, 1082 uint32_t page_alignment, 1083 bool interruptible, 1084 struct vm_object *persistent_swap_storage, 1085 size_t acc_size, 1086 struct sg_table *sg, 1087 void (*destroy) (struct ttm_buffer_object *)) 1088 { 1089 int ret = 0; 1090 unsigned long num_pages; 1091 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1092 bool locked; 1093 1094 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1095 if (ret) { 1096 pr_err("Out of kernel memory\n"); 1097 if (destroy) 1098 (*destroy)(bo); 1099 else 1100 kfree(bo); 1101 return -ENOMEM; 1102 } 1103 1104 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1105 if (num_pages == 0) { 1106 pr_err("Illegal buffer object size\n"); 1107 if (destroy) 1108 (*destroy)(bo); 1109 else 1110 kfree(bo); 1111 ttm_mem_global_free(mem_glob, acc_size); 1112 return -EINVAL; 1113 } 1114 bo->destroy = destroy; 1115 1116 kref_init(&bo->kref); 1117 kref_init(&bo->list_kref); 1118 atomic_set(&bo->cpu_writers, 0); 1119 INIT_LIST_HEAD(&bo->lru); 1120 INIT_LIST_HEAD(&bo->ddestroy); 1121 INIT_LIST_HEAD(&bo->swap); 1122 INIT_LIST_HEAD(&bo->io_reserve_lru); 1123 lockinit(&bo->wu_mutex, "ttmbwm", 0, LK_CANRECURSE); 1124 bo->bdev = bdev; 1125 bo->glob = bdev->glob; 1126 bo->type = type; 1127 bo->num_pages = num_pages; 1128 bo->mem.size = num_pages << PAGE_SHIFT; 1129 bo->mem.mem_type = TTM_PL_SYSTEM; 1130 bo->mem.num_pages = bo->num_pages; 1131 bo->mem.mm_node = NULL; 1132 bo->mem.page_alignment = page_alignment; 1133 bo->mem.bus.io_reserved_vm = false; 1134 bo->mem.bus.io_reserved_count = 0; 1135 bo->priv_flags = 0; 1136 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1137 bo->persistent_swap_storage = persistent_swap_storage; 1138 bo->acc_size = acc_size; 1139 bo->sg = sg; 1140 bo->resv = &bo->ttm_resv; 1141 reservation_object_init(bo->resv); 1142 atomic_inc(&bo->glob->bo_count); 1143 drm_vma_node_reset(&bo->vma_node); 1144 1145 ret = ttm_bo_check_placement(bo, placement); 1146 1147 /* 1148 * For ttm_bo_type_device buffers, allocate 1149 * address space from the device. 1150 */ 1151 if (likely(!ret) && 1152 (bo->type == ttm_bo_type_device || 1153 bo->type == ttm_bo_type_sg)) 1154 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1155 bo->mem.num_pages); 1156 1157 locked = ww_mutex_trylock(&bo->resv->lock); 1158 WARN_ON(!locked); 1159 1160 if (likely(!ret)) 1161 ret = ttm_bo_validate(bo, placement, interruptible, false); 1162 1163 ttm_bo_unreserve(bo); 1164 1165 if (unlikely(ret)) 1166 ttm_bo_unref(&bo); 1167 1168 return ret; 1169 } 1170 EXPORT_SYMBOL(ttm_bo_init); 1171 1172 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1173 unsigned long bo_size, 1174 unsigned struct_size) 1175 { 1176 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1177 size_t size = 0; 1178 1179 size += ttm_round_pot(struct_size); 1180 size += PAGE_ALIGN(npages * sizeof(void *)); 1181 size += ttm_round_pot(sizeof(struct ttm_tt)); 1182 return size; 1183 } 1184 EXPORT_SYMBOL(ttm_bo_acc_size); 1185 1186 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1187 unsigned long bo_size, 1188 unsigned struct_size) 1189 { 1190 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1191 size_t size = 0; 1192 1193 size += ttm_round_pot(struct_size); 1194 size += PAGE_ALIGN(npages * sizeof(void *)); 1195 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); 1196 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1197 return size; 1198 } 1199 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1200 1201 int ttm_bo_create(struct ttm_bo_device *bdev, 1202 unsigned long size, 1203 enum ttm_bo_type type, 1204 struct ttm_placement *placement, 1205 uint32_t page_alignment, 1206 bool interruptible, 1207 struct vm_object *persistent_swap_storage, 1208 struct ttm_buffer_object **p_bo) 1209 { 1210 struct ttm_buffer_object *bo; 1211 size_t acc_size; 1212 int ret; 1213 1214 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1215 if (unlikely(bo == NULL)) 1216 return -ENOMEM; 1217 1218 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1219 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1220 interruptible, persistent_swap_storage, acc_size, 1221 NULL, NULL); 1222 if (likely(ret == 0)) 1223 *p_bo = bo; 1224 1225 return ret; 1226 } 1227 EXPORT_SYMBOL(ttm_bo_create); 1228 1229 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1230 unsigned mem_type, bool allow_errors) 1231 { 1232 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1233 struct ttm_bo_global *glob = bdev->glob; 1234 int ret; 1235 1236 /* 1237 * Can't use standard list traversal since we're unlocking. 1238 */ 1239 1240 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1241 while (!list_empty(&man->lru)) { 1242 lockmgr(&glob->lru_lock, LK_RELEASE); 1243 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1244 if (ret) { 1245 if (allow_errors) { 1246 return ret; 1247 } else { 1248 pr_err("Cleanup eviction failed\n"); 1249 } 1250 } 1251 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1252 } 1253 lockmgr(&glob->lru_lock, LK_RELEASE); 1254 return 0; 1255 } 1256 1257 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1258 { 1259 struct ttm_mem_type_manager *man; 1260 int ret = -EINVAL; 1261 1262 if (mem_type >= TTM_NUM_MEM_TYPES) { 1263 pr_err("Illegal memory type %d\n", mem_type); 1264 return ret; 1265 } 1266 man = &bdev->man[mem_type]; 1267 1268 if (!man->has_type) { 1269 pr_err("Trying to take down uninitialized memory manager type %u\n", 1270 mem_type); 1271 return ret; 1272 } 1273 1274 man->use_type = false; 1275 man->has_type = false; 1276 1277 ret = 0; 1278 if (mem_type > 0) { 1279 ttm_bo_force_list_clean(bdev, mem_type, false); 1280 1281 ret = (*man->func->takedown)(man); 1282 } 1283 1284 return ret; 1285 } 1286 EXPORT_SYMBOL(ttm_bo_clean_mm); 1287 1288 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1289 { 1290 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1291 1292 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1293 pr_err("Illegal memory manager memory type %u\n", mem_type); 1294 return -EINVAL; 1295 } 1296 1297 if (!man->has_type) { 1298 pr_err("Memory type %u has not been initialized\n", mem_type); 1299 return 0; 1300 } 1301 1302 return ttm_bo_force_list_clean(bdev, mem_type, true); 1303 } 1304 EXPORT_SYMBOL(ttm_bo_evict_mm); 1305 1306 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1307 unsigned long p_size) 1308 { 1309 int ret = -EINVAL; 1310 struct ttm_mem_type_manager *man; 1311 1312 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1313 man = &bdev->man[type]; 1314 BUG_ON(man->has_type); 1315 man->io_reserve_fastpath = true; 1316 man->use_io_reserve_lru = false; 1317 lockinit(&man->io_reserve_mutex, "ttmior", 0, 0); 1318 INIT_LIST_HEAD(&man->io_reserve_lru); 1319 1320 ret = bdev->driver->init_mem_type(bdev, type, man); 1321 if (ret) 1322 return ret; 1323 man->bdev = bdev; 1324 1325 ret = 0; 1326 if (type != TTM_PL_SYSTEM) { 1327 ret = (*man->func->init)(man, p_size); 1328 if (ret) 1329 return ret; 1330 } 1331 man->has_type = true; 1332 man->use_type = true; 1333 man->size = p_size; 1334 1335 INIT_LIST_HEAD(&man->lru); 1336 1337 return 0; 1338 } 1339 EXPORT_SYMBOL(ttm_bo_init_mm); 1340 1341 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1342 { 1343 struct ttm_bo_global *glob = 1344 container_of(kobj, struct ttm_bo_global, kobj); 1345 1346 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1347 __free_page(glob->dummy_read_page); 1348 kfree(glob); 1349 } 1350 1351 void ttm_bo_global_release(struct drm_global_reference *ref) 1352 { 1353 struct ttm_bo_global *glob = ref->object; 1354 1355 kobject_del(&glob->kobj); 1356 kobject_put(&glob->kobj); 1357 } 1358 EXPORT_SYMBOL(ttm_bo_global_release); 1359 1360 int ttm_bo_global_init(struct drm_global_reference *ref) 1361 { 1362 struct ttm_bo_global_ref *bo_ref = 1363 container_of(ref, struct ttm_bo_global_ref, ref); 1364 struct ttm_bo_global *glob = ref->object; 1365 int ret; 1366 1367 lockinit(&glob->device_list_mutex, "ttmdlm", 0, 0); 1368 lockinit(&glob->lru_lock, "ttmlru", 0, 0); 1369 glob->mem_glob = bo_ref->mem_glob; 1370 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1371 1372 if (unlikely(glob->dummy_read_page == NULL)) { 1373 ret = -ENOMEM; 1374 goto out_no_drp; 1375 } 1376 1377 INIT_LIST_HEAD(&glob->swap_lru); 1378 INIT_LIST_HEAD(&glob->device_list); 1379 1380 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1381 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1382 if (unlikely(ret != 0)) { 1383 pr_err("Could not register buffer object swapout\n"); 1384 goto out_no_shrink; 1385 } 1386 1387 atomic_set(&glob->bo_count, 0); 1388 1389 ret = kobject_init_and_add( 1390 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1391 if (unlikely(ret != 0)) 1392 kobject_put(&glob->kobj); 1393 return ret; 1394 out_no_shrink: 1395 __free_page(glob->dummy_read_page); 1396 out_no_drp: 1397 kfree(glob); 1398 return ret; 1399 } 1400 EXPORT_SYMBOL(ttm_bo_global_init); 1401 1402 1403 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1404 { 1405 int ret = 0; 1406 unsigned i = TTM_NUM_MEM_TYPES; 1407 struct ttm_mem_type_manager *man; 1408 struct ttm_bo_global *glob = bdev->glob; 1409 1410 while (i--) { 1411 man = &bdev->man[i]; 1412 if (man->has_type) { 1413 man->use_type = false; 1414 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1415 ret = -EBUSY; 1416 pr_err("DRM memory manager type %d is not clean\n", 1417 i); 1418 } 1419 man->has_type = false; 1420 } 1421 } 1422 1423 mutex_lock(&glob->device_list_mutex); 1424 list_del(&bdev->device_list); 1425 mutex_unlock(&glob->device_list_mutex); 1426 1427 cancel_delayed_work_sync(&bdev->wq); 1428 1429 while (ttm_bo_delayed_delete(bdev, true)) 1430 ; 1431 1432 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1433 if (list_empty(&bdev->ddestroy)) 1434 TTM_DEBUG("Delayed destroy list was clean\n"); 1435 1436 if (list_empty(&bdev->man[0].lru)) 1437 TTM_DEBUG("Swap list was clean\n"); 1438 lockmgr(&glob->lru_lock, LK_RELEASE); 1439 1440 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1441 1442 return ret; 1443 } 1444 EXPORT_SYMBOL(ttm_bo_device_release); 1445 1446 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1447 struct ttm_bo_global *glob, 1448 struct ttm_bo_driver *driver, 1449 struct address_space *mapping, 1450 uint64_t file_page_offset, 1451 bool need_dma32) 1452 { 1453 int ret = -EINVAL; 1454 1455 bdev->driver = driver; 1456 1457 memset(bdev->man, 0, sizeof(bdev->man)); 1458 1459 /* 1460 * Initialize the system memory buffer type. 1461 * Other types need to be driver / IOCTL initialized. 1462 */ 1463 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1464 if (unlikely(ret != 0)) 1465 goto out_no_sys; 1466 1467 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, 1468 0x10000000); 1469 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1470 INIT_LIST_HEAD(&bdev->ddestroy); 1471 /* 1472 * XXX DRAGONFLY - dev_mapping NULL atm, find other XXX DRAGONFLY 1473 * lines and fix when it no longer is in later API change. 1474 */ 1475 bdev->dev_mapping = mapping; 1476 bdev->glob = glob; 1477 bdev->need_dma32 = need_dma32; 1478 bdev->val_seq = 0; 1479 lockinit(&bdev->fence_lock, "ttmfnc", 0, 0); 1480 mutex_lock(&glob->device_list_mutex); 1481 list_add_tail(&bdev->device_list, &glob->device_list); 1482 mutex_unlock(&glob->device_list_mutex); 1483 1484 return 0; 1485 out_no_sys: 1486 return ret; 1487 } 1488 EXPORT_SYMBOL(ttm_bo_device_init); 1489 1490 /* 1491 * buffer object vm functions. 1492 */ 1493 1494 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1495 { 1496 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1497 1498 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1499 if (mem->mem_type == TTM_PL_SYSTEM) 1500 return false; 1501 1502 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1503 return false; 1504 1505 if (mem->placement & TTM_PL_FLAG_CACHED) 1506 return false; 1507 } 1508 return true; 1509 } 1510 1511 #ifdef __DragonFly__ 1512 1513 /* 1514 * XXX DRAGONFLY - device_mapping not yet implemented so 1515 * file_mapping is basically always NULL. We have to properly 1516 * release the mmap, etc. 1517 */ 1518 void ttm_bo_release_mmap(struct ttm_buffer_object *bo); 1519 1520 /** 1521 * drm_vma_node_unmap() - Unmap offset node 1522 * @node: Offset node 1523 * @file_mapping: Address space to unmap @node from 1524 * 1525 * Unmap all userspace mappings for a given offset node. The mappings must be 1526 * associated with the @file_mapping address-space. If no offset exists or 1527 * the address-space is invalid, nothing is done. 1528 * 1529 * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() 1530 * is not called on this node concurrently. 1531 */ 1532 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, 1533 struct address_space *file_mapping) 1534 { 1535 struct ttm_buffer_object *bo = container_of(node, struct ttm_buffer_object, vma_node); 1536 1537 if (drm_vma_node_has_offset(node)) 1538 unmap_mapping_range(file_mapping, 1539 drm_vma_node_offset_addr(node), 1540 drm_vma_node_size(node) << PAGE_SHIFT, 1); 1541 ttm_bo_release_mmap(bo); 1542 } 1543 #endif 1544 1545 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1546 { 1547 struct ttm_bo_device *bdev = bo->bdev; 1548 1549 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1550 ttm_mem_io_free_vm(bo); 1551 } 1552 1553 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1554 { 1555 struct ttm_bo_device *bdev = bo->bdev; 1556 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1557 1558 ttm_mem_io_lock(man, false); 1559 ttm_bo_unmap_virtual_locked(bo); 1560 ttm_mem_io_unlock(man); 1561 } 1562 1563 1564 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1565 1566 1567 int ttm_bo_wait(struct ttm_buffer_object *bo, 1568 bool lazy, bool interruptible, bool no_wait) 1569 { 1570 struct ttm_bo_driver *driver = bo->bdev->driver; 1571 struct ttm_bo_device *bdev = bo->bdev; 1572 void *sync_obj; 1573 int ret = 0; 1574 1575 if (likely(bo->sync_obj == NULL)) 1576 return 0; 1577 1578 while (bo->sync_obj) { 1579 1580 if (driver->sync_obj_signaled(bo->sync_obj)) { 1581 void *tmp_obj = bo->sync_obj; 1582 bo->sync_obj = NULL; 1583 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1584 lockmgr(&bdev->fence_lock, LK_RELEASE); 1585 driver->sync_obj_unref(&tmp_obj); 1586 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1587 continue; 1588 } 1589 1590 if (no_wait) 1591 return -EBUSY; 1592 1593 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1594 lockmgr(&bdev->fence_lock, LK_RELEASE); 1595 ret = driver->sync_obj_wait(sync_obj, 1596 lazy, interruptible); 1597 if (unlikely(ret != 0)) { 1598 driver->sync_obj_unref(&sync_obj); 1599 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1600 return ret; 1601 } 1602 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1603 if (likely(bo->sync_obj == sync_obj)) { 1604 void *tmp_obj = bo->sync_obj; 1605 bo->sync_obj = NULL; 1606 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1607 &bo->priv_flags); 1608 lockmgr(&bdev->fence_lock, LK_RELEASE); 1609 driver->sync_obj_unref(&sync_obj); 1610 driver->sync_obj_unref(&tmp_obj); 1611 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1612 } else { 1613 lockmgr(&bdev->fence_lock, LK_RELEASE); 1614 driver->sync_obj_unref(&sync_obj); 1615 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1616 } 1617 } 1618 return 0; 1619 } 1620 EXPORT_SYMBOL(ttm_bo_wait); 1621 1622 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1623 { 1624 struct ttm_bo_device *bdev = bo->bdev; 1625 int ret = 0; 1626 1627 /* 1628 * Using ttm_bo_reserve makes sure the lru lists are updated. 1629 */ 1630 1631 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1632 if (unlikely(ret != 0)) 1633 return ret; 1634 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1635 ret = ttm_bo_wait(bo, false, true, no_wait); 1636 lockmgr(&bdev->fence_lock, LK_RELEASE); 1637 if (likely(ret == 0)) 1638 atomic_inc(&bo->cpu_writers); 1639 ttm_bo_unreserve(bo); 1640 return ret; 1641 } 1642 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1643 1644 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1645 { 1646 atomic_dec(&bo->cpu_writers); 1647 } 1648 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1649 1650 /** 1651 * A buffer object shrink method that tries to swap out the first 1652 * buffer object on the bo_global::swap_lru list. 1653 */ 1654 1655 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1656 { 1657 struct ttm_bo_global *glob = 1658 container_of(shrink, struct ttm_bo_global, shrink); 1659 struct ttm_buffer_object *bo; 1660 int ret = -EBUSY; 1661 int put_count; 1662 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1663 1664 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1665 list_for_each_entry(bo, &glob->swap_lru, swap) { 1666 ret = __ttm_bo_reserve(bo, false, true, false, 0); 1667 if (!ret) 1668 break; 1669 } 1670 1671 if (ret) { 1672 lockmgr(&glob->lru_lock, LK_RELEASE); 1673 return ret; 1674 } 1675 1676 kref_get(&bo->list_kref); 1677 1678 if (!list_empty(&bo->ddestroy)) { 1679 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1680 kref_put(&bo->list_kref, ttm_bo_release_list); 1681 return ret; 1682 } 1683 1684 put_count = ttm_bo_del_from_lru(bo); 1685 lockmgr(&glob->lru_lock, LK_RELEASE); 1686 1687 ttm_bo_list_ref_sub(bo, put_count, true); 1688 1689 /** 1690 * Wait for GPU, then move to system cached. 1691 */ 1692 1693 lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE); 1694 ret = ttm_bo_wait(bo, false, false, false); 1695 lockmgr(&bo->bdev->fence_lock, LK_RELEASE); 1696 1697 if (unlikely(ret != 0)) 1698 goto out; 1699 1700 if ((bo->mem.placement & swap_placement) != swap_placement) { 1701 struct ttm_mem_reg evict_mem; 1702 1703 evict_mem = bo->mem; 1704 evict_mem.mm_node = NULL; 1705 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1706 evict_mem.mem_type = TTM_PL_SYSTEM; 1707 1708 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1709 false, false); 1710 if (unlikely(ret != 0)) 1711 goto out; 1712 } 1713 1714 ttm_bo_unmap_virtual(bo); 1715 1716 /** 1717 * Swap out. Buffer will be swapped in again as soon as 1718 * anyone tries to access a ttm page. 1719 */ 1720 1721 if (bo->bdev->driver->swap_notify) 1722 bo->bdev->driver->swap_notify(bo); 1723 1724 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1725 out: 1726 1727 /** 1728 * 1729 * Unreserve without putting on LRU to avoid swapping out an 1730 * already swapped buffer. 1731 */ 1732 1733 __ttm_bo_unreserve(bo); 1734 kref_put(&bo->list_kref, ttm_bo_release_list); 1735 return ret; 1736 } 1737 1738 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1739 { 1740 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1741 ; 1742 } 1743 EXPORT_SYMBOL(ttm_bo_swapout_all); 1744 1745 /** 1746 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1747 * unreserved 1748 * 1749 * @bo: Pointer to buffer 1750 */ 1751 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1752 { 1753 int ret; 1754 1755 /* 1756 * In the absense of a wait_unlocked API, 1757 * Use the bo::wu_mutex to avoid triggering livelocks due to 1758 * concurrent use of this function. Note that this use of 1759 * bo::wu_mutex can go away if we change locking order to 1760 * mmap_sem -> bo::reserve. 1761 */ 1762 ret = mutex_lock_interruptible(&bo->wu_mutex); 1763 if (unlikely(ret != 0)) 1764 return -ERESTARTSYS; 1765 if (!ww_mutex_is_locked(&bo->resv->lock)) 1766 goto out_unlock; 1767 ret = __ttm_bo_reserve(bo, true, false, false, NULL); 1768 if (unlikely(ret != 0)) 1769 goto out_unlock; 1770 __ttm_bo_unreserve(bo); 1771 1772 out_unlock: 1773 mutex_unlock(&bo->wu_mutex); 1774 return ret; 1775 } 1776