1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 #include <linux/reservation.h> 44 45 #define TTM_ASSERT_LOCKED(param) 46 #define TTM_DEBUG(fmt, arg...) 47 #define TTM_BO_HASH_ORDER 13 48 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int i; 61 62 for (i = 0; i <= TTM_PL_PRIV5; i++) { 63 if (place->flags & (1 << i)) { 64 *mem_type = i; 65 return 0; 66 } 67 } 68 return -EINVAL; 69 } 70 71 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 72 { 73 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 74 75 pr_err(" has_type: %d\n", man->has_type); 76 pr_err(" use_type: %d\n", man->use_type); 77 pr_err(" flags: 0x%08X\n", man->flags); 78 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); 79 pr_err(" size: %ju\n", man->size); 80 pr_err(" available_caching: 0x%08X\n", man->available_caching); 81 pr_err(" default_caching: 0x%08X\n", man->default_caching); 82 if (mem_type != TTM_PL_SYSTEM) 83 (*man->func->debug)(man, TTM_PFX); 84 } 85 86 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 87 struct ttm_placement *placement) 88 { 89 int i, ret, mem_type; 90 91 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 92 bo, bo->mem.num_pages, bo->mem.size >> 10, 93 bo->mem.size >> 20); 94 for (i = 0; i < placement->num_placement; i++) { 95 ret = ttm_mem_type_from_place(&placement->placement[i], 96 &mem_type); 97 if (ret) 98 return; 99 pr_err(" placement[%d]=0x%08X (%d)\n", 100 i, placement->placement[i].flags, mem_type); 101 ttm_mem_type_debug(bo->bdev, mem_type); 102 } 103 } 104 105 static ssize_t ttm_bo_global_show(struct kobject *kobj, 106 struct attribute *attr, 107 char *buffer) 108 { 109 struct ttm_bo_global *glob = 110 container_of(kobj, struct ttm_bo_global, kobj); 111 112 return snprintf(buffer, PAGE_SIZE, "%lu\n", 113 (unsigned long) atomic_read(&glob->bo_count)); 114 } 115 116 static struct attribute *ttm_bo_global_attrs[] = { 117 &ttm_bo_count, 118 NULL 119 }; 120 121 static const struct sysfs_ops ttm_bo_global_ops = { 122 .show = &ttm_bo_global_show 123 }; 124 125 static struct kobj_type ttm_bo_glob_kobj_type = { 126 .release = &ttm_bo_global_kobj_release, 127 .sysfs_ops = &ttm_bo_global_ops, 128 .default_attrs = ttm_bo_global_attrs 129 }; 130 131 132 static inline uint32_t ttm_bo_type_flags(unsigned type) 133 { 134 return 1 << (type); 135 } 136 137 static void ttm_bo_release_list(struct kref *list_kref) 138 { 139 struct ttm_buffer_object *bo = 140 container_of(list_kref, struct ttm_buffer_object, list_kref); 141 struct ttm_bo_device *bdev = bo->bdev; 142 size_t acc_size = bo->acc_size; 143 144 BUG_ON(atomic_read(&bo->list_kref.refcount)); 145 BUG_ON(atomic_read(&bo->kref.refcount)); 146 BUG_ON(atomic_read(&bo->cpu_writers)); 147 BUG_ON(bo->mem.mm_node != NULL); 148 BUG_ON(!list_empty(&bo->lru)); 149 BUG_ON(!list_empty(&bo->ddestroy)); 150 151 if (bo->ttm) 152 ttm_tt_destroy(bo->ttm); 153 atomic_dec(&bo->glob->bo_count); 154 if (bo->resv == &bo->ttm_resv) 155 reservation_object_fini(&bo->ttm_resv); 156 mutex_destroy(&bo->wu_mutex); 157 if (bo->destroy) 158 bo->destroy(bo); 159 else { 160 kfree(bo); 161 } 162 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 163 } 164 165 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 166 { 167 struct ttm_bo_device *bdev = bo->bdev; 168 169 lockdep_assert_held(&bo->resv->lock.base); 170 171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 172 173 BUG_ON(!list_empty(&bo->lru)); 174 175 list_add(&bo->lru, bdev->driver->lru_tail(bo)); 176 kref_get(&bo->list_kref); 177 178 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { 179 list_add(&bo->swap, bdev->driver->swap_lru_tail(bo)); 180 kref_get(&bo->list_kref); 181 } 182 } 183 } 184 EXPORT_SYMBOL(ttm_bo_add_to_lru); 185 186 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 187 { 188 struct ttm_bo_device *bdev = bo->bdev; 189 int put_count = 0; 190 191 if (bdev->driver->lru_removal) 192 bdev->driver->lru_removal(bo); 193 194 if (!list_empty(&bo->swap)) { 195 list_del_init(&bo->swap); 196 ++put_count; 197 } 198 if (!list_empty(&bo->lru)) { 199 list_del_init(&bo->lru); 200 ++put_count; 201 } 202 203 return put_count; 204 } 205 206 static void ttm_bo_ref_bug(struct kref *list_kref) 207 { 208 BUG(); 209 } 210 211 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 212 bool never_free) 213 { 214 kref_sub(&bo->list_kref, count, 215 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 216 } 217 218 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) 219 { 220 int put_count; 221 222 lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE); 223 put_count = ttm_bo_del_from_lru(bo); 224 lockmgr(&bo->glob->lru_lock, LK_RELEASE); 225 ttm_bo_list_ref_sub(bo, put_count, true); 226 } 227 EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); 228 229 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 230 { 231 struct ttm_bo_device *bdev = bo->bdev; 232 int put_count = 0; 233 234 lockdep_assert_held(&bo->resv->lock.base); 235 236 if (bdev->driver->lru_removal) 237 bdev->driver->lru_removal(bo); 238 239 put_count = ttm_bo_del_from_lru(bo); 240 ttm_bo_list_ref_sub(bo, put_count, true); 241 ttm_bo_add_to_lru(bo); 242 } 243 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 244 245 struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo) 246 { 247 return bo->bdev->man[bo->mem.mem_type].lru.prev; 248 } 249 EXPORT_SYMBOL(ttm_bo_default_lru_tail); 250 251 struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo) 252 { 253 return bo->glob->swap_lru.prev; 254 } 255 EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail); 256 257 /* 258 * Call bo->mutex locked. 259 */ 260 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 261 { 262 struct ttm_bo_device *bdev = bo->bdev; 263 struct ttm_bo_global *glob = bo->glob; 264 int ret = 0; 265 uint32_t page_flags = 0; 266 267 TTM_ASSERT_LOCKED(&bo->mutex); 268 bo->ttm = NULL; 269 270 if (bdev->need_dma32) 271 page_flags |= TTM_PAGE_FLAG_DMA32; 272 273 switch (bo->type) { 274 case ttm_bo_type_device: 275 if (zero_alloc) 276 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 277 case ttm_bo_type_kernel: 278 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 279 page_flags, glob->dummy_read_page); 280 if (unlikely(bo->ttm == NULL)) 281 ret = -ENOMEM; 282 break; 283 case ttm_bo_type_sg: 284 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 285 page_flags | TTM_PAGE_FLAG_SG, 286 glob->dummy_read_page); 287 if (unlikely(bo->ttm == NULL)) { 288 ret = -ENOMEM; 289 break; 290 } 291 bo->ttm->sg = bo->sg; 292 break; 293 default: 294 pr_err("Illegal buffer object type\n"); 295 ret = -EINVAL; 296 break; 297 } 298 299 return ret; 300 } 301 302 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 303 struct ttm_mem_reg *mem, 304 bool evict, bool interruptible, 305 bool no_wait_gpu) 306 { 307 struct ttm_bo_device *bdev = bo->bdev; 308 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 309 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 310 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 311 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 312 int ret = 0; 313 314 if (old_is_pci || new_is_pci || 315 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 316 ret = ttm_mem_io_lock(old_man, true); 317 if (unlikely(ret != 0)) 318 goto out_err; 319 ttm_bo_unmap_virtual_locked(bo); 320 ttm_mem_io_unlock(old_man); 321 } 322 323 /* 324 * Create and bind a ttm if required. 325 */ 326 327 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 328 if (bo->ttm == NULL) { 329 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 330 ret = ttm_bo_add_ttm(bo, zero); 331 if (ret) 332 goto out_err; 333 } 334 335 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 336 if (ret) 337 goto out_err; 338 339 if (mem->mem_type != TTM_PL_SYSTEM) { 340 ret = ttm_tt_bind(bo->ttm, mem); 341 if (ret) 342 goto out_err; 343 } 344 345 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 346 if (bdev->driver->move_notify) 347 bdev->driver->move_notify(bo, mem); 348 bo->mem = *mem; 349 mem->mm_node = NULL; 350 goto moved; 351 } 352 } 353 354 if (bdev->driver->move_notify) 355 bdev->driver->move_notify(bo, mem); 356 357 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 358 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 359 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 360 else if (bdev->driver->move) 361 ret = bdev->driver->move(bo, evict, interruptible, 362 no_wait_gpu, mem); 363 else 364 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 365 366 if (ret) { 367 if (bdev->driver->move_notify) { 368 struct ttm_mem_reg tmp_mem = *mem; 369 *mem = bo->mem; 370 bo->mem = tmp_mem; 371 bdev->driver->move_notify(bo, mem); 372 bo->mem = *mem; 373 *mem = tmp_mem; 374 } 375 376 goto out_err; 377 } 378 379 moved: 380 if (bo->evicted) { 381 if (bdev->driver->invalidate_caches) { 382 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 383 if (ret) 384 pr_err("Can not flush read caches\n"); 385 } 386 bo->evicted = false; 387 } 388 389 if (bo->mem.mm_node) { 390 bo->offset = (bo->mem.start << PAGE_SHIFT) + 391 bdev->man[bo->mem.mem_type].gpu_offset; 392 bo->cur_placement = bo->mem.placement; 393 } else 394 bo->offset = 0; 395 396 return 0; 397 398 out_err: 399 new_man = &bdev->man[bo->mem.mem_type]; 400 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 401 ttm_tt_unbind(bo->ttm); 402 ttm_tt_destroy(bo->ttm); 403 bo->ttm = NULL; 404 } 405 406 return ret; 407 } 408 409 /** 410 * Call bo::reserved. 411 * Will release GPU memory type usage on destruction. 412 * This is the place to put in driver specific hooks to release 413 * driver private resources. 414 * Will release the bo::reserved lock. 415 */ 416 417 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 418 { 419 if (bo->bdev->driver->move_notify) 420 bo->bdev->driver->move_notify(bo, NULL); 421 422 if (bo->ttm) { 423 ttm_tt_unbind(bo->ttm); 424 ttm_tt_destroy(bo->ttm); 425 bo->ttm = NULL; 426 } 427 ttm_bo_mem_put(bo, &bo->mem); 428 429 ww_mutex_unlock (&bo->resv->lock); 430 } 431 432 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 433 { 434 struct reservation_object_list *fobj; 435 struct fence *fence; 436 int i; 437 438 fobj = reservation_object_get_list(bo->resv); 439 fence = reservation_object_get_excl(bo->resv); 440 if (fence && !fence->ops->signaled) 441 fence_enable_sw_signaling(fence); 442 443 for (i = 0; fobj && i < fobj->shared_count; ++i) { 444 fence = rcu_dereference_protected(fobj->shared[i], 445 reservation_object_held(bo->resv)); 446 447 if (!fence->ops->signaled) 448 fence_enable_sw_signaling(fence); 449 } 450 } 451 452 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 453 { 454 struct ttm_bo_device *bdev = bo->bdev; 455 struct ttm_bo_global *glob = bo->glob; 456 int put_count; 457 int ret; 458 459 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 460 ret = __ttm_bo_reserve(bo, false, true, NULL); 461 462 if (!ret) { 463 if (!ttm_bo_wait(bo, false, true)) { 464 put_count = ttm_bo_del_from_lru(bo); 465 466 lockmgr(&glob->lru_lock, LK_RELEASE); 467 ttm_bo_cleanup_memtype_use(bo); 468 469 ttm_bo_list_ref_sub(bo, put_count, true); 470 471 return; 472 } else 473 ttm_bo_flush_all_fences(bo); 474 475 /* 476 * Make NO_EVICT bos immediately available to 477 * shrinkers, now that they are queued for 478 * destruction. 479 */ 480 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 481 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 482 ttm_bo_add_to_lru(bo); 483 } 484 485 __ttm_bo_unreserve(bo); 486 } 487 488 kref_get(&bo->list_kref); 489 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 490 lockmgr(&glob->lru_lock, LK_RELEASE); 491 492 schedule_delayed_work(&bdev->wq, 493 ((HZ / 100) < 1) ? 1 : HZ / 100); 494 } 495 496 /** 497 * function ttm_bo_cleanup_refs_and_unlock 498 * If bo idle, remove from delayed- and lru lists, and unref. 499 * If not idle, do nothing. 500 * 501 * Must be called with lru_lock and reservation held, this function 502 * will drop both before returning. 503 * 504 * @interruptible Any sleeps should occur interruptibly. 505 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 506 */ 507 508 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 509 bool interruptible, 510 bool no_wait_gpu) 511 { 512 struct ttm_bo_global *glob = bo->glob; 513 int put_count; 514 int ret; 515 516 ret = ttm_bo_wait(bo, false, true); 517 518 if (ret && !no_wait_gpu) { 519 long lret; 520 ww_mutex_unlock(&bo->resv->lock); 521 lockmgr(&glob->lru_lock, LK_RELEASE); 522 523 lret = reservation_object_wait_timeout_rcu(bo->resv, 524 true, 525 interruptible, 526 30 * HZ); 527 528 if (lret < 0) 529 return lret; 530 else if (lret == 0) 531 return -EBUSY; 532 533 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 534 ret = __ttm_bo_reserve(bo, false, true, NULL); 535 536 /* 537 * We raced, and lost, someone else holds the reservation now, 538 * and is probably busy in ttm_bo_cleanup_memtype_use. 539 * 540 * Even if it's not the case, because we finished waiting any 541 * delayed destruction would succeed, so just return success 542 * here. 543 */ 544 if (ret) { 545 lockmgr(&glob->lru_lock, LK_RELEASE); 546 return 0; 547 } 548 549 /* 550 * remove sync_obj with ttm_bo_wait, the wait should be 551 * finished, and no new wait object should have been added. 552 */ 553 ret = ttm_bo_wait(bo, false, true); 554 WARN_ON(ret); 555 } 556 557 if (ret || unlikely(list_empty(&bo->ddestroy))) { 558 __ttm_bo_unreserve(bo); 559 lockmgr(&glob->lru_lock, LK_RELEASE); 560 return ret; 561 } 562 563 put_count = ttm_bo_del_from_lru(bo); 564 list_del_init(&bo->ddestroy); 565 ++put_count; 566 567 lockmgr(&glob->lru_lock, LK_RELEASE); 568 ttm_bo_cleanup_memtype_use(bo); 569 570 ttm_bo_list_ref_sub(bo, put_count, true); 571 572 return 0; 573 } 574 575 /** 576 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 577 * encountered buffers. 578 */ 579 580 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 581 { 582 struct ttm_bo_global *glob = bdev->glob; 583 struct ttm_buffer_object *entry = NULL; 584 int ret = 0; 585 586 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 587 if (list_empty(&bdev->ddestroy)) 588 goto out_unlock; 589 590 entry = list_first_entry(&bdev->ddestroy, 591 struct ttm_buffer_object, ddestroy); 592 kref_get(&entry->list_kref); 593 594 for (;;) { 595 struct ttm_buffer_object *nentry = NULL; 596 597 if (entry->ddestroy.next != &bdev->ddestroy) { 598 nentry = list_first_entry(&entry->ddestroy, 599 struct ttm_buffer_object, ddestroy); 600 kref_get(&nentry->list_kref); 601 } 602 603 ret = __ttm_bo_reserve(entry, false, true, NULL); 604 if (remove_all && ret) { 605 lockmgr(&glob->lru_lock, LK_RELEASE); 606 ret = __ttm_bo_reserve(entry, false, false, NULL); 607 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 608 } 609 610 if (!ret) 611 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 612 !remove_all); 613 else 614 lockmgr(&glob->lru_lock, LK_RELEASE); 615 616 kref_put(&entry->list_kref, ttm_bo_release_list); 617 entry = nentry; 618 619 if (ret || !entry) 620 goto out; 621 622 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 623 if (list_empty(&entry->ddestroy)) 624 break; 625 } 626 627 out_unlock: 628 lockmgr(&glob->lru_lock, LK_RELEASE); 629 out: 630 if (entry) 631 kref_put(&entry->list_kref, ttm_bo_release_list); 632 return ret; 633 } 634 635 static void ttm_bo_delayed_workqueue(struct work_struct *work) 636 { 637 struct ttm_bo_device *bdev = 638 container_of(work, struct ttm_bo_device, wq.work); 639 640 if (ttm_bo_delayed_delete(bdev, false)) { 641 schedule_delayed_work(&bdev->wq, 642 ((HZ / 100) < 1) ? 1 : HZ / 100); 643 } 644 } 645 646 static void ttm_bo_release(struct kref *kref) 647 { 648 struct ttm_buffer_object *bo = 649 container_of(kref, struct ttm_buffer_object, kref); 650 struct ttm_bo_device *bdev = bo->bdev; 651 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 652 653 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 654 ttm_mem_io_lock(man, false); 655 ttm_mem_io_free_vm(bo); 656 ttm_mem_io_unlock(man); 657 ttm_bo_cleanup_refs_or_queue(bo); 658 kref_put(&bo->list_kref, ttm_bo_release_list); 659 } 660 661 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 662 { 663 struct ttm_buffer_object *bo = *p_bo; 664 665 *p_bo = NULL; 666 kref_put(&bo->kref, ttm_bo_release); 667 } 668 EXPORT_SYMBOL(ttm_bo_unref); 669 670 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 671 { 672 return cancel_delayed_work_sync(&bdev->wq); 673 } 674 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 675 676 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 677 { 678 if (resched) 679 schedule_delayed_work(&bdev->wq, 680 ((HZ / 100) < 1) ? 1 : HZ / 100); 681 } 682 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 683 684 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 685 bool no_wait_gpu) 686 { 687 struct ttm_bo_device *bdev = bo->bdev; 688 struct ttm_mem_reg evict_mem; 689 struct ttm_placement placement; 690 int ret = 0; 691 692 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); 693 694 if (unlikely(ret != 0)) { 695 if (ret != -ERESTARTSYS) { 696 pr_err("Failed to expire sync object before buffer eviction\n"); 697 } 698 goto out; 699 } 700 701 lockdep_assert_held(&bo->resv->lock.base); 702 703 evict_mem = bo->mem; 704 evict_mem.mm_node = NULL; 705 evict_mem.bus.io_reserved_vm = false; 706 evict_mem.bus.io_reserved_count = 0; 707 708 placement.num_placement = 0; 709 placement.num_busy_placement = 0; 710 bdev->driver->evict_flags(bo, &placement); 711 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 712 no_wait_gpu); 713 if (ret) { 714 if (ret != -ERESTARTSYS) { 715 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 716 bo); 717 ttm_bo_mem_space_debug(bo, &placement); 718 } 719 goto out; 720 } 721 722 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 723 no_wait_gpu); 724 if (ret) { 725 if (ret != -ERESTARTSYS) 726 pr_err("Buffer eviction failed\n"); 727 ttm_bo_mem_put(bo, &evict_mem); 728 goto out; 729 } 730 bo->evicted = true; 731 out: 732 return ret; 733 } 734 735 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 736 uint32_t mem_type, 737 const struct ttm_place *place, 738 bool interruptible, 739 bool no_wait_gpu) 740 { 741 struct ttm_bo_global *glob = bdev->glob; 742 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 743 struct ttm_buffer_object *bo; 744 int ret = -EBUSY, put_count; 745 746 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 747 list_for_each_entry(bo, &man->lru, lru) { 748 ret = __ttm_bo_reserve(bo, false, true, NULL); 749 if (!ret) { 750 if (place && (place->fpfn || place->lpfn)) { 751 /* Don't evict this BO if it's outside of the 752 * requested placement range 753 */ 754 if (place->fpfn >= (bo->mem.start + bo->mem.size) || 755 (place->lpfn && place->lpfn <= bo->mem.start)) { 756 __ttm_bo_unreserve(bo); 757 ret = -EBUSY; 758 continue; 759 } 760 } 761 762 break; 763 } 764 } 765 766 if (ret) { 767 lockmgr(&glob->lru_lock, LK_RELEASE); 768 return ret; 769 } 770 771 kref_get(&bo->list_kref); 772 773 if (!list_empty(&bo->ddestroy)) { 774 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 775 no_wait_gpu); 776 kref_put(&bo->list_kref, ttm_bo_release_list); 777 return ret; 778 } 779 780 put_count = ttm_bo_del_from_lru(bo); 781 lockmgr(&glob->lru_lock, LK_RELEASE); 782 783 BUG_ON(ret != 0); 784 785 ttm_bo_list_ref_sub(bo, put_count, true); 786 787 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 788 ttm_bo_unreserve(bo); 789 790 kref_put(&bo->list_kref, ttm_bo_release_list); 791 return ret; 792 } 793 794 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 795 { 796 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 797 798 if (mem->mm_node) 799 (*man->func->put_node)(man, mem); 800 } 801 EXPORT_SYMBOL(ttm_bo_mem_put); 802 803 /** 804 * Repeatedly evict memory from the LRU for @mem_type until we create enough 805 * space, or we've evicted everything and there isn't enough space. 806 */ 807 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 808 uint32_t mem_type, 809 const struct ttm_place *place, 810 struct ttm_mem_reg *mem, 811 bool interruptible, 812 bool no_wait_gpu) 813 { 814 struct ttm_bo_device *bdev = bo->bdev; 815 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 816 int ret; 817 818 do { 819 ret = (*man->func->get_node)(man, bo, place, mem); 820 if (unlikely(ret != 0)) 821 return ret; 822 if (mem->mm_node) 823 break; 824 ret = ttm_mem_evict_first(bdev, mem_type, place, 825 interruptible, no_wait_gpu); 826 if (unlikely(ret != 0)) 827 return ret; 828 } while (1); 829 if (mem->mm_node == NULL) 830 return -ENOMEM; 831 mem->mem_type = mem_type; 832 return 0; 833 } 834 835 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 836 uint32_t cur_placement, 837 uint32_t proposed_placement) 838 { 839 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 840 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 841 842 /** 843 * Keep current caching if possible. 844 */ 845 846 if ((cur_placement & caching) != 0) 847 result |= (cur_placement & caching); 848 else if ((man->default_caching & caching) != 0) 849 result |= man->default_caching; 850 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 851 result |= TTM_PL_FLAG_CACHED; 852 else if ((TTM_PL_FLAG_WC & caching) != 0) 853 result |= TTM_PL_FLAG_WC; 854 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 855 result |= TTM_PL_FLAG_UNCACHED; 856 857 return result; 858 } 859 860 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 861 uint32_t mem_type, 862 const struct ttm_place *place, 863 uint32_t *masked_placement) 864 { 865 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 866 867 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 868 return false; 869 870 if ((place->flags & man->available_caching) == 0) 871 return false; 872 873 cur_flags |= (place->flags & man->available_caching); 874 875 *masked_placement = cur_flags; 876 return true; 877 } 878 879 /** 880 * Creates space for memory region @mem according to its type. 881 * 882 * This function first searches for free space in compatible memory types in 883 * the priority order defined by the driver. If free space isn't found, then 884 * ttm_bo_mem_force_space is attempted in priority order to evict and find 885 * space. 886 */ 887 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 888 struct ttm_placement *placement, 889 struct ttm_mem_reg *mem, 890 bool interruptible, 891 bool no_wait_gpu) 892 { 893 struct ttm_bo_device *bdev = bo->bdev; 894 struct ttm_mem_type_manager *man; 895 uint32_t mem_type = TTM_PL_SYSTEM; 896 uint32_t cur_flags = 0; 897 bool type_found = false; 898 bool type_ok = false; 899 bool has_erestartsys = false; 900 int i, ret; 901 902 mem->mm_node = NULL; 903 for (i = 0; i < placement->num_placement; ++i) { 904 const struct ttm_place *place = &placement->placement[i]; 905 906 ret = ttm_mem_type_from_place(place, &mem_type); 907 if (ret) 908 return ret; 909 man = &bdev->man[mem_type]; 910 if (!man->has_type || !man->use_type) 911 continue; 912 913 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 914 &cur_flags); 915 916 if (!type_ok) 917 continue; 918 919 type_found = true; 920 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 921 cur_flags); 922 /* 923 * Use the access and other non-mapping-related flag bits from 924 * the memory placement flags to the current flags 925 */ 926 ttm_flag_masked(&cur_flags, place->flags, 927 ~TTM_PL_MASK_MEMTYPE); 928 929 if (mem_type == TTM_PL_SYSTEM) 930 break; 931 932 ret = (*man->func->get_node)(man, bo, place, mem); 933 if (unlikely(ret)) 934 return ret; 935 936 if (mem->mm_node) 937 break; 938 } 939 940 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 941 mem->mem_type = mem_type; 942 mem->placement = cur_flags; 943 return 0; 944 } 945 946 for (i = 0; i < placement->num_busy_placement; ++i) { 947 const struct ttm_place *place = &placement->busy_placement[i]; 948 949 ret = ttm_mem_type_from_place(place, &mem_type); 950 if (ret) 951 return ret; 952 man = &bdev->man[mem_type]; 953 if (!man->has_type || !man->use_type) 954 continue; 955 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 956 continue; 957 958 type_found = true; 959 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 960 cur_flags); 961 /* 962 * Use the access and other non-mapping-related flag bits from 963 * the memory placement flags to the current flags 964 */ 965 ttm_flag_masked(&cur_flags, place->flags, 966 ~TTM_PL_MASK_MEMTYPE); 967 968 if (mem_type == TTM_PL_SYSTEM) { 969 mem->mem_type = mem_type; 970 mem->placement = cur_flags; 971 mem->mm_node = NULL; 972 return 0; 973 } 974 975 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 976 interruptible, no_wait_gpu); 977 if (ret == 0 && mem->mm_node) { 978 mem->placement = cur_flags; 979 return 0; 980 } 981 if (ret == -ERESTARTSYS) 982 has_erestartsys = true; 983 } 984 985 if (!type_found) { 986 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n"); 987 return -EINVAL; 988 } 989 990 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 991 } 992 EXPORT_SYMBOL(ttm_bo_mem_space); 993 994 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 995 struct ttm_placement *placement, 996 bool interruptible, 997 bool no_wait_gpu) 998 { 999 int ret = 0; 1000 struct ttm_mem_reg mem; 1001 1002 lockdep_assert_held(&bo->resv->lock.base); 1003 1004 /* 1005 * Don't wait for the BO on initial allocation. This is important when 1006 * the BO has an imported reservation object. 1007 */ 1008 if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) { 1009 /* 1010 * FIXME: It's possible to pipeline buffer moves. 1011 * Have the driver move function wait for idle when necessary, 1012 * instead of doing it here. 1013 */ 1014 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); 1015 if (ret) 1016 return ret; 1017 } 1018 mem.num_pages = bo->num_pages; 1019 mem.size = mem.num_pages << PAGE_SHIFT; 1020 mem.page_alignment = bo->mem.page_alignment; 1021 mem.bus.io_reserved_vm = false; 1022 mem.bus.io_reserved_count = 0; 1023 /* 1024 * Determine where to move the buffer. 1025 */ 1026 ret = ttm_bo_mem_space(bo, placement, &mem, 1027 interruptible, no_wait_gpu); 1028 if (ret) 1029 goto out_unlock; 1030 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1031 interruptible, no_wait_gpu); 1032 out_unlock: 1033 if (ret && mem.mm_node) 1034 ttm_bo_mem_put(bo, &mem); 1035 return ret; 1036 } 1037 1038 bool ttm_bo_mem_compat(struct ttm_placement *placement, 1039 struct ttm_mem_reg *mem, 1040 uint32_t *new_flags) 1041 { 1042 int i; 1043 1044 for (i = 0; i < placement->num_placement; i++) { 1045 const struct ttm_place *heap = &placement->placement[i]; 1046 if (mem->mm_node && 1047 (mem->start < heap->fpfn || 1048 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1049 continue; 1050 1051 *new_flags = heap->flags; 1052 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1053 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1054 return true; 1055 } 1056 1057 for (i = 0; i < placement->num_busy_placement; i++) { 1058 const struct ttm_place *heap = &placement->busy_placement[i]; 1059 if (mem->mm_node && 1060 (mem->start < heap->fpfn || 1061 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1062 continue; 1063 1064 *new_flags = heap->flags; 1065 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1066 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1067 return true; 1068 } 1069 1070 return false; 1071 } 1072 EXPORT_SYMBOL(ttm_bo_mem_compat); 1073 1074 int ttm_bo_validate(struct ttm_buffer_object *bo, 1075 struct ttm_placement *placement, 1076 bool interruptible, 1077 bool no_wait_gpu) 1078 { 1079 int ret; 1080 uint32_t new_flags; 1081 1082 lockdep_assert_held(&bo->resv->lock.base); 1083 /* 1084 * Check whether we need to move buffer. 1085 */ 1086 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1087 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1088 no_wait_gpu); 1089 if (ret) 1090 return ret; 1091 } else { 1092 /* 1093 * Use the access and other non-mapping-related flag bits from 1094 * the compatible memory placement flags to the active flags 1095 */ 1096 ttm_flag_masked(&bo->mem.placement, new_flags, 1097 ~TTM_PL_MASK_MEMTYPE); 1098 } 1099 /* 1100 * We might need to add a TTM. 1101 */ 1102 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1103 ret = ttm_bo_add_ttm(bo, true); 1104 if (ret) 1105 return ret; 1106 } 1107 return 0; 1108 } 1109 EXPORT_SYMBOL(ttm_bo_validate); 1110 1111 int ttm_bo_init(struct ttm_bo_device *bdev, 1112 struct ttm_buffer_object *bo, 1113 unsigned long size, 1114 enum ttm_bo_type type, 1115 struct ttm_placement *placement, 1116 uint32_t page_alignment, 1117 bool interruptible, 1118 struct vm_object *persistent_swap_storage, 1119 size_t acc_size, 1120 struct sg_table *sg, 1121 struct reservation_object *resv, 1122 void (*destroy) (struct ttm_buffer_object *)) 1123 { 1124 int ret = 0; 1125 unsigned long num_pages; 1126 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1127 bool locked; 1128 1129 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1130 if (ret) { 1131 pr_err("Out of kernel memory\n"); 1132 if (destroy) 1133 (*destroy)(bo); 1134 else 1135 kfree(bo); 1136 return -ENOMEM; 1137 } 1138 1139 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1140 if (num_pages == 0) { 1141 pr_err("Illegal buffer object size\n"); 1142 if (destroy) 1143 (*destroy)(bo); 1144 else 1145 kfree(bo); 1146 ttm_mem_global_free(mem_glob, acc_size); 1147 return -EINVAL; 1148 } 1149 bo->destroy = destroy; 1150 1151 kref_init(&bo->kref); 1152 kref_init(&bo->list_kref); 1153 atomic_set(&bo->cpu_writers, 0); 1154 INIT_LIST_HEAD(&bo->lru); 1155 INIT_LIST_HEAD(&bo->ddestroy); 1156 INIT_LIST_HEAD(&bo->swap); 1157 INIT_LIST_HEAD(&bo->io_reserve_lru); 1158 lockinit(&bo->wu_mutex, "ttmbwm", 0, LK_CANRECURSE); 1159 bo->bdev = bdev; 1160 bo->glob = bdev->glob; 1161 bo->type = type; 1162 bo->num_pages = num_pages; 1163 bo->mem.size = num_pages << PAGE_SHIFT; 1164 bo->mem.mem_type = TTM_PL_SYSTEM; 1165 bo->mem.num_pages = bo->num_pages; 1166 bo->mem.mm_node = NULL; 1167 bo->mem.page_alignment = page_alignment; 1168 bo->mem.bus.io_reserved_vm = false; 1169 bo->mem.bus.io_reserved_count = 0; 1170 bo->priv_flags = 0; 1171 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1172 bo->persistent_swap_storage = persistent_swap_storage; 1173 bo->acc_size = acc_size; 1174 bo->sg = sg; 1175 if (resv) { 1176 bo->resv = resv; 1177 lockdep_assert_held(&bo->resv->lock.base); 1178 } else { 1179 bo->resv = &bo->ttm_resv; 1180 reservation_object_init(&bo->ttm_resv); 1181 } 1182 atomic_inc(&bo->glob->bo_count); 1183 drm_vma_node_reset(&bo->vma_node); 1184 1185 /* 1186 * For ttm_bo_type_device buffers, allocate 1187 * address space from the device. 1188 */ 1189 if (bo->type == ttm_bo_type_device || 1190 bo->type == ttm_bo_type_sg) 1191 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1192 bo->mem.num_pages); 1193 1194 /* passed reservation objects should already be locked, 1195 * since otherwise lockdep will be angered in radeon. 1196 */ 1197 if (!resv) { 1198 locked = ww_mutex_trylock(&bo->resv->lock); 1199 WARN_ON(!locked); 1200 } 1201 1202 if (likely(!ret)) 1203 ret = ttm_bo_validate(bo, placement, interruptible, false); 1204 1205 if (!resv) { 1206 ttm_bo_unreserve(bo); 1207 1208 } else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 1209 lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE); 1210 ttm_bo_add_to_lru(bo); 1211 lockmgr(&bo->glob->lru_lock, LK_RELEASE); 1212 } 1213 1214 if (unlikely(ret)) 1215 ttm_bo_unref(&bo); 1216 1217 return ret; 1218 } 1219 EXPORT_SYMBOL(ttm_bo_init); 1220 1221 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1222 unsigned long bo_size, 1223 unsigned struct_size) 1224 { 1225 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1226 size_t size = 0; 1227 1228 size += ttm_round_pot(struct_size); 1229 size += ttm_round_pot(npages * sizeof(void *)); 1230 size += ttm_round_pot(sizeof(struct ttm_tt)); 1231 return size; 1232 } 1233 EXPORT_SYMBOL(ttm_bo_acc_size); 1234 1235 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1236 unsigned long bo_size, 1237 unsigned struct_size) 1238 { 1239 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1240 size_t size = 0; 1241 1242 size += ttm_round_pot(struct_size); 1243 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); 1244 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1245 return size; 1246 } 1247 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1248 1249 int ttm_bo_create(struct ttm_bo_device *bdev, 1250 unsigned long size, 1251 enum ttm_bo_type type, 1252 struct ttm_placement *placement, 1253 uint32_t page_alignment, 1254 bool interruptible, 1255 struct vm_object *persistent_swap_storage, 1256 struct ttm_buffer_object **p_bo) 1257 { 1258 struct ttm_buffer_object *bo; 1259 size_t acc_size; 1260 int ret; 1261 1262 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1263 if (unlikely(bo == NULL)) 1264 return -ENOMEM; 1265 1266 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1267 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1268 interruptible, persistent_swap_storage, acc_size, 1269 NULL, NULL, NULL); 1270 if (likely(ret == 0)) 1271 *p_bo = bo; 1272 1273 return ret; 1274 } 1275 EXPORT_SYMBOL(ttm_bo_create); 1276 1277 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1278 unsigned mem_type, bool allow_errors) 1279 { 1280 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1281 struct ttm_bo_global *glob = bdev->glob; 1282 int ret; 1283 1284 /* 1285 * Can't use standard list traversal since we're unlocking. 1286 */ 1287 1288 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1289 while (!list_empty(&man->lru)) { 1290 lockmgr(&glob->lru_lock, LK_RELEASE); 1291 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); 1292 if (ret) { 1293 if (allow_errors) { 1294 return ret; 1295 } else { 1296 pr_err("Cleanup eviction failed\n"); 1297 } 1298 } 1299 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1300 } 1301 lockmgr(&glob->lru_lock, LK_RELEASE); 1302 return 0; 1303 } 1304 1305 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1306 { 1307 struct ttm_mem_type_manager *man; 1308 int ret = -EINVAL; 1309 1310 if (mem_type >= TTM_NUM_MEM_TYPES) { 1311 pr_err("Illegal memory type %d\n", mem_type); 1312 return ret; 1313 } 1314 man = &bdev->man[mem_type]; 1315 1316 if (!man->has_type) { 1317 pr_err("Trying to take down uninitialized memory manager type %u\n", 1318 mem_type); 1319 return ret; 1320 } 1321 1322 man->use_type = false; 1323 man->has_type = false; 1324 1325 ret = 0; 1326 if (mem_type > 0) { 1327 ttm_bo_force_list_clean(bdev, mem_type, false); 1328 1329 ret = (*man->func->takedown)(man); 1330 } 1331 1332 return ret; 1333 } 1334 EXPORT_SYMBOL(ttm_bo_clean_mm); 1335 1336 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1337 { 1338 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1339 1340 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1341 pr_err("Illegal memory manager memory type %u\n", mem_type); 1342 return -EINVAL; 1343 } 1344 1345 if (!man->has_type) { 1346 pr_err("Memory type %u has not been initialized\n", mem_type); 1347 return 0; 1348 } 1349 1350 return ttm_bo_force_list_clean(bdev, mem_type, true); 1351 } 1352 EXPORT_SYMBOL(ttm_bo_evict_mm); 1353 1354 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1355 unsigned long p_size) 1356 { 1357 int ret = -EINVAL; 1358 struct ttm_mem_type_manager *man; 1359 1360 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1361 man = &bdev->man[type]; 1362 BUG_ON(man->has_type); 1363 man->io_reserve_fastpath = true; 1364 man->use_io_reserve_lru = false; 1365 lockinit(&man->io_reserve_mutex, "ttmior", 0, 0); 1366 INIT_LIST_HEAD(&man->io_reserve_lru); 1367 1368 ret = bdev->driver->init_mem_type(bdev, type, man); 1369 if (ret) 1370 return ret; 1371 man->bdev = bdev; 1372 1373 ret = 0; 1374 if (type != TTM_PL_SYSTEM) { 1375 ret = (*man->func->init)(man, p_size); 1376 if (ret) 1377 return ret; 1378 } 1379 man->has_type = true; 1380 man->use_type = true; 1381 man->size = p_size; 1382 1383 INIT_LIST_HEAD(&man->lru); 1384 1385 return 0; 1386 } 1387 EXPORT_SYMBOL(ttm_bo_init_mm); 1388 1389 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1390 { 1391 struct ttm_bo_global *glob = 1392 container_of(kobj, struct ttm_bo_global, kobj); 1393 1394 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1395 __free_page(glob->dummy_read_page); 1396 kfree(glob); 1397 } 1398 1399 void ttm_bo_global_release(struct drm_global_reference *ref) 1400 { 1401 struct ttm_bo_global *glob = ref->object; 1402 1403 kobject_del(&glob->kobj); 1404 kobject_put(&glob->kobj); 1405 } 1406 EXPORT_SYMBOL(ttm_bo_global_release); 1407 1408 int ttm_bo_global_init(struct drm_global_reference *ref) 1409 { 1410 struct ttm_bo_global_ref *bo_ref = 1411 container_of(ref, struct ttm_bo_global_ref, ref); 1412 struct ttm_bo_global *glob = ref->object; 1413 int ret; 1414 1415 lockinit(&glob->device_list_mutex, "ttmdlm", 0, 0); 1416 lockinit(&glob->lru_lock, "ttmlru", 0, 0); 1417 glob->mem_glob = bo_ref->mem_glob; 1418 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1419 1420 if (unlikely(glob->dummy_read_page == NULL)) { 1421 ret = -ENOMEM; 1422 goto out_no_drp; 1423 } 1424 1425 INIT_LIST_HEAD(&glob->swap_lru); 1426 INIT_LIST_HEAD(&glob->device_list); 1427 1428 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1429 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1430 if (unlikely(ret != 0)) { 1431 pr_err("Could not register buffer object swapout\n"); 1432 goto out_no_shrink; 1433 } 1434 1435 atomic_set(&glob->bo_count, 0); 1436 1437 ret = kobject_init_and_add( 1438 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1439 if (unlikely(ret != 0)) 1440 kobject_put(&glob->kobj); 1441 return ret; 1442 out_no_shrink: 1443 __free_page(glob->dummy_read_page); 1444 out_no_drp: 1445 kfree(glob); 1446 return ret; 1447 } 1448 EXPORT_SYMBOL(ttm_bo_global_init); 1449 1450 1451 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1452 { 1453 int ret = 0; 1454 unsigned i = TTM_NUM_MEM_TYPES; 1455 struct ttm_mem_type_manager *man; 1456 struct ttm_bo_global *glob = bdev->glob; 1457 1458 while (i--) { 1459 man = &bdev->man[i]; 1460 if (man->has_type) { 1461 man->use_type = false; 1462 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1463 ret = -EBUSY; 1464 pr_err("DRM memory manager type %d is not clean\n", 1465 i); 1466 } 1467 man->has_type = false; 1468 } 1469 } 1470 1471 mutex_lock(&glob->device_list_mutex); 1472 list_del(&bdev->device_list); 1473 mutex_unlock(&glob->device_list_mutex); 1474 1475 cancel_delayed_work_sync(&bdev->wq); 1476 1477 while (ttm_bo_delayed_delete(bdev, true)) 1478 ; 1479 1480 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1481 if (list_empty(&bdev->ddestroy)) 1482 TTM_DEBUG("Delayed destroy list was clean\n"); 1483 1484 if (list_empty(&bdev->man[0].lru)) 1485 TTM_DEBUG("Swap list was clean\n"); 1486 lockmgr(&glob->lru_lock, LK_RELEASE); 1487 1488 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1489 1490 return ret; 1491 } 1492 EXPORT_SYMBOL(ttm_bo_device_release); 1493 1494 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1495 struct ttm_bo_global *glob, 1496 struct ttm_bo_driver *driver, 1497 struct address_space *mapping, 1498 uint64_t file_page_offset, 1499 bool need_dma32) 1500 { 1501 int ret = -EINVAL; 1502 1503 bdev->driver = driver; 1504 1505 memset(bdev->man, 0, sizeof(bdev->man)); 1506 1507 /* 1508 * Initialize the system memory buffer type. 1509 * Other types need to be driver / IOCTL initialized. 1510 */ 1511 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1512 if (unlikely(ret != 0)) 1513 goto out_no_sys; 1514 1515 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, 1516 0x10000000); 1517 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1518 INIT_LIST_HEAD(&bdev->ddestroy); 1519 /* 1520 * XXX DRAGONFLY - dev_mapping NULL atm, find other XXX DRAGONFLY 1521 * lines and fix when it no longer is in later API change. 1522 */ 1523 bdev->dev_mapping = mapping; 1524 bdev->glob = glob; 1525 bdev->need_dma32 = need_dma32; 1526 mutex_lock(&glob->device_list_mutex); 1527 list_add_tail(&bdev->device_list, &glob->device_list); 1528 mutex_unlock(&glob->device_list_mutex); 1529 1530 return 0; 1531 out_no_sys: 1532 return ret; 1533 } 1534 EXPORT_SYMBOL(ttm_bo_device_init); 1535 1536 /* 1537 * buffer object vm functions. 1538 */ 1539 1540 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1541 { 1542 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1543 1544 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1545 if (mem->mem_type == TTM_PL_SYSTEM) 1546 return false; 1547 1548 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1549 return false; 1550 1551 if (mem->placement & TTM_PL_FLAG_CACHED) 1552 return false; 1553 } 1554 return true; 1555 } 1556 1557 #ifdef __DragonFly__ 1558 1559 /* 1560 * XXX DRAGONFLY - device_mapping not yet implemented so 1561 * file_mapping is basically always NULL. We have to properly 1562 * release the mmap, etc. 1563 */ 1564 void ttm_bo_release_mmap(struct ttm_buffer_object *bo); 1565 1566 /** 1567 * drm_vma_node_unmap() - Unmap offset node 1568 * @node: Offset node 1569 * @file_mapping: Address space to unmap @node from 1570 * 1571 * Unmap all userspace mappings for a given offset node. The mappings must be 1572 * associated with the @file_mapping address-space. If no offset exists or 1573 * the address-space is invalid, nothing is done. 1574 * 1575 * This call is unlocked. The caller must guarantee that drm_vma_offset_remove() 1576 * is not called on this node concurrently. 1577 */ 1578 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, 1579 struct address_space *file_mapping) 1580 { 1581 struct ttm_buffer_object *bo = container_of(node, struct ttm_buffer_object, vma_node); 1582 1583 if (drm_vma_node_has_offset(node)) 1584 unmap_mapping_range(file_mapping, 1585 drm_vma_node_offset_addr(node), 1586 drm_vma_node_size(node) << PAGE_SHIFT, 1); 1587 ttm_bo_release_mmap(bo); 1588 } 1589 #endif 1590 1591 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1592 { 1593 struct ttm_bo_device *bdev = bo->bdev; 1594 1595 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1596 ttm_mem_io_free_vm(bo); 1597 } 1598 1599 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1600 { 1601 struct ttm_bo_device *bdev = bo->bdev; 1602 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1603 1604 ttm_mem_io_lock(man, false); 1605 ttm_bo_unmap_virtual_locked(bo); 1606 ttm_mem_io_unlock(man); 1607 } 1608 1609 1610 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1611 1612 int ttm_bo_wait(struct ttm_buffer_object *bo, 1613 bool interruptible, bool no_wait) 1614 { 1615 struct reservation_object_list *fobj; 1616 struct reservation_object *resv; 1617 struct fence *excl; 1618 long timeout = 15 * HZ; 1619 int i; 1620 1621 resv = bo->resv; 1622 fobj = reservation_object_get_list(resv); 1623 excl = reservation_object_get_excl(resv); 1624 if (excl) { 1625 if (!fence_is_signaled(excl)) { 1626 if (no_wait) 1627 return -EBUSY; 1628 1629 timeout = fence_wait_timeout(excl, 1630 interruptible, timeout); 1631 } 1632 } 1633 1634 for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { 1635 struct fence *fence; 1636 fence = rcu_dereference_protected(fobj->shared[i], 1637 reservation_object_held(resv)); 1638 1639 if (!fence_is_signaled(fence)) { 1640 if (no_wait) 1641 return -EBUSY; 1642 1643 timeout = fence_wait_timeout(fence, 1644 interruptible, timeout); 1645 } 1646 } 1647 1648 if (timeout < 0) 1649 return timeout; 1650 1651 if (timeout == 0) 1652 return -EBUSY; 1653 1654 reservation_object_add_excl_fence(resv, NULL); 1655 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1656 return 0; 1657 } 1658 EXPORT_SYMBOL(ttm_bo_wait); 1659 1660 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1661 { 1662 int ret = 0; 1663 1664 /* 1665 * Using ttm_bo_reserve makes sure the lru lists are updated. 1666 */ 1667 1668 ret = ttm_bo_reserve(bo, true, no_wait, NULL); 1669 if (unlikely(ret != 0)) 1670 return ret; 1671 ret = ttm_bo_wait(bo, true, no_wait); 1672 if (likely(ret == 0)) 1673 atomic_inc(&bo->cpu_writers); 1674 ttm_bo_unreserve(bo); 1675 return ret; 1676 } 1677 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1678 1679 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1680 { 1681 atomic_dec(&bo->cpu_writers); 1682 } 1683 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1684 1685 /** 1686 * A buffer object shrink method that tries to swap out the first 1687 * buffer object on the bo_global::swap_lru list. 1688 */ 1689 1690 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1691 { 1692 struct ttm_bo_global *glob = 1693 container_of(shrink, struct ttm_bo_global, shrink); 1694 struct ttm_buffer_object *bo; 1695 int ret = -EBUSY; 1696 int put_count; 1697 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1698 1699 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1700 list_for_each_entry(bo, &glob->swap_lru, swap) { 1701 ret = __ttm_bo_reserve(bo, false, true, NULL); 1702 if (!ret) 1703 break; 1704 } 1705 1706 if (ret) { 1707 lockmgr(&glob->lru_lock, LK_RELEASE); 1708 return ret; 1709 } 1710 1711 kref_get(&bo->list_kref); 1712 1713 if (!list_empty(&bo->ddestroy)) { 1714 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1715 kref_put(&bo->list_kref, ttm_bo_release_list); 1716 return ret; 1717 } 1718 1719 put_count = ttm_bo_del_from_lru(bo); 1720 lockmgr(&glob->lru_lock, LK_RELEASE); 1721 1722 ttm_bo_list_ref_sub(bo, put_count, true); 1723 1724 /** 1725 * Wait for GPU, then move to system cached. 1726 */ 1727 1728 ret = ttm_bo_wait(bo, false, false); 1729 1730 if (unlikely(ret != 0)) 1731 goto out; 1732 1733 if ((bo->mem.placement & swap_placement) != swap_placement) { 1734 struct ttm_mem_reg evict_mem; 1735 1736 evict_mem = bo->mem; 1737 evict_mem.mm_node = NULL; 1738 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1739 evict_mem.mem_type = TTM_PL_SYSTEM; 1740 1741 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1742 false, false); 1743 if (unlikely(ret != 0)) 1744 goto out; 1745 } 1746 1747 ttm_bo_unmap_virtual(bo); 1748 1749 /** 1750 * Swap out. Buffer will be swapped in again as soon as 1751 * anyone tries to access a ttm page. 1752 */ 1753 1754 if (bo->bdev->driver->swap_notify) 1755 bo->bdev->driver->swap_notify(bo); 1756 1757 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1758 out: 1759 1760 /** 1761 * 1762 * Unreserve without putting on LRU to avoid swapping out an 1763 * already swapped buffer. 1764 */ 1765 1766 __ttm_bo_unreserve(bo); 1767 kref_put(&bo->list_kref, ttm_bo_release_list); 1768 return ret; 1769 } 1770 1771 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1772 { 1773 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1774 ; 1775 } 1776 EXPORT_SYMBOL(ttm_bo_swapout_all); 1777 1778 /** 1779 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become 1780 * unreserved 1781 * 1782 * @bo: Pointer to buffer 1783 */ 1784 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) 1785 { 1786 int ret; 1787 1788 /* 1789 * In the absense of a wait_unlocked API, 1790 * Use the bo::wu_mutex to avoid triggering livelocks due to 1791 * concurrent use of this function. Note that this use of 1792 * bo::wu_mutex can go away if we change locking order to 1793 * mmap_sem -> bo::reserve. 1794 */ 1795 ret = mutex_lock_interruptible(&bo->wu_mutex); 1796 if (unlikely(ret != 0)) 1797 return -ERESTARTSYS; 1798 if (!ww_mutex_is_locked(&bo->resv->lock)) 1799 goto out_unlock; 1800 ret = __ttm_bo_reserve(bo, true, false, NULL); 1801 if (unlikely(ret != 0)) 1802 goto out_unlock; 1803 __ttm_bo_unreserve(bo); 1804 1805 out_unlock: 1806 mutex_unlock(&bo->wu_mutex); 1807 return ret; 1808 } 1809