1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 44 #define TTM_ASSERT_LOCKED(param) do { } while (0) 45 #define TTM_DEBUG(fmt, arg...) do { } while (0) 46 #define TTM_BO_HASH_ORDER 13 47 48 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int i; 61 62 for (i = 0; i <= TTM_PL_PRIV5; i++) 63 if (place->flags & (1 << i)) { 64 *mem_type = i; 65 return 0; 66 } 67 return -EINVAL; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 74 pr_err(" has_type: %d\n", man->has_type); 75 pr_err(" use_type: %d\n", man->use_type); 76 pr_err(" flags: 0x%08X\n", man->flags); 77 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); 78 pr_err(" size: %ju\n", (uintmax_t)man->size); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 81 if (mem_type != TTM_PL_SYSTEM) 82 (*man->func->debug)(man, TTM_PFX); 83 } 84 85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 86 struct ttm_placement *placement) 87 { 88 int i, ret, mem_type; 89 90 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 91 bo, bo->mem.num_pages, bo->mem.size >> 10, 92 bo->mem.size >> 20); 93 for (i = 0; i < placement->num_placement; i++) { 94 ret = ttm_mem_type_from_place(&placement->placement[i], 95 &mem_type); 96 if (ret) 97 return; 98 pr_err(" placement[%d]=0x%08X (%d)\n", 99 i, placement->placement[i].flags, mem_type); 100 ttm_mem_type_debug(bo->bdev, mem_type); 101 } 102 } 103 104 static ssize_t ttm_bo_global_show(struct kobject *kobj, 105 struct attribute *attr, 106 char *buffer) 107 { 108 struct ttm_bo_global *glob = 109 container_of(kobj, struct ttm_bo_global, kobj); 110 111 return ksnprintf(buffer, PAGE_SIZE, "%lu\n", 112 (unsigned long) atomic_read(&glob->bo_count)); 113 } 114 115 static struct attribute *ttm_bo_global_attrs[] = { 116 &ttm_bo_count, 117 NULL 118 }; 119 120 static const struct sysfs_ops ttm_bo_global_ops = { 121 .show = &ttm_bo_global_show 122 }; 123 124 static struct kobj_type ttm_bo_glob_kobj_type = { 125 .release = &ttm_bo_global_kobj_release, 126 .sysfs_ops = &ttm_bo_global_ops, 127 .default_attrs = ttm_bo_global_attrs 128 }; 129 130 static inline uint32_t ttm_bo_type_flags(unsigned type) 131 { 132 return 1 << (type); 133 } 134 135 static void ttm_bo_release_list(struct kref *list_kref) 136 { 137 struct ttm_buffer_object *bo = 138 container_of(list_kref, struct ttm_buffer_object, list_kref); 139 struct ttm_bo_device *bdev = bo->bdev; 140 size_t acc_size = bo->acc_size; 141 142 BUG_ON(atomic_read(&bo->list_kref.refcount)); 143 BUG_ON(atomic_read(&bo->kref.refcount)); 144 BUG_ON(atomic_read(&bo->cpu_writers)); 145 BUG_ON(bo->sync_obj != NULL); 146 BUG_ON(bo->mem.mm_node != NULL); 147 BUG_ON(!list_empty(&bo->lru)); 148 BUG_ON(!list_empty(&bo->ddestroy)); 149 150 if (bo->ttm) 151 ttm_tt_destroy(bo->ttm); 152 atomic_dec(&bo->glob->bo_count); 153 if (bo->resv == &bo->ttm_resv) 154 reservation_object_fini(&bo->ttm_resv); 155 156 if (bo->destroy) 157 bo->destroy(bo); 158 else { 159 kfree(bo); 160 } 161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 162 } 163 164 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 165 { 166 struct ttm_bo_device *bdev = bo->bdev; 167 struct ttm_mem_type_manager *man; 168 169 BUG_ON(!ttm_bo_is_reserved(bo)); 170 171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 172 173 BUG_ON(!list_empty(&bo->lru)); 174 175 man = &bdev->man[bo->mem.mem_type]; 176 list_add_tail(&bo->lru, &man->lru); 177 kref_get(&bo->list_kref); 178 179 if (bo->ttm != NULL) { 180 list_add_tail(&bo->swap, &bo->glob->swap_lru); 181 kref_get(&bo->list_kref); 182 } 183 } 184 } 185 186 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 187 { 188 int put_count = 0; 189 190 if (!list_empty(&bo->swap)) { 191 list_del_init(&bo->swap); 192 ++put_count; 193 } 194 if (!list_empty(&bo->lru)) { 195 list_del_init(&bo->lru); 196 ++put_count; 197 } 198 199 /* 200 * TODO: Add a driver hook to delete from 201 * driver-specific LRU's here. 202 */ 203 204 return put_count; 205 } 206 207 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, 208 bool interruptible, 209 bool no_wait, bool use_ticket, 210 struct ww_acquire_ctx *ticket) 211 { 212 int ret = 0; 213 214 if (no_wait) { 215 bool success; 216 217 /* not valid any more, fix your locking! */ 218 if (WARN_ON(ticket)) 219 return -EBUSY; 220 221 success = ww_mutex_trylock(&bo->resv->lock); 222 return success ? 0 : -EBUSY; 223 } 224 225 if (interruptible) 226 ret = ww_mutex_lock_interruptible(&bo->resv->lock, 227 ticket); 228 else 229 ret = ww_mutex_lock(&bo->resv->lock, ticket); 230 if (ret == -EINTR) 231 return -ERESTARTSYS; 232 return ret; 233 } 234 EXPORT_SYMBOL(ttm_bo_reserve); 235 236 static void ttm_bo_ref_bug(struct kref *list_kref) 237 { 238 BUG(); 239 } 240 241 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 242 bool never_free) 243 { 244 kref_sub(&bo->list_kref, count, 245 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 246 } 247 248 int ttm_bo_reserve(struct ttm_buffer_object *bo, 249 bool interruptible, 250 bool no_wait, bool use_ticket, 251 struct ww_acquire_ctx *ticket) 252 { 253 struct ttm_bo_global *glob = bo->glob; 254 int put_count = 0; 255 int ret; 256 257 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, 258 ticket); 259 if (likely(ret == 0)) { 260 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 261 put_count = ttm_bo_del_from_lru(bo); 262 lockmgr(&glob->lru_lock, LK_RELEASE); 263 ttm_bo_list_ref_sub(bo, put_count, true); 264 } 265 266 return ret; 267 } 268 269 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 270 bool interruptible, struct ww_acquire_ctx *ticket) 271 { 272 struct ttm_bo_global *glob = bo->glob; 273 int put_count = 0; 274 int ret = 0; 275 276 if (interruptible) 277 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 278 ticket); 279 else 280 ww_mutex_lock_slow(&bo->resv->lock, ticket); 281 282 if (likely(ret == 0)) { 283 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 284 put_count = ttm_bo_del_from_lru(bo); 285 lockmgr(&glob->lru_lock, LK_RELEASE); 286 ttm_bo_list_ref_sub(bo, put_count, true); 287 } else if (ret == -EINTR) 288 ret = -ERESTARTSYS; 289 290 return ret; 291 } 292 EXPORT_SYMBOL(ttm_bo_reserve_slowpath); 293 294 void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) 295 { 296 ttm_bo_add_to_lru(bo); 297 ww_mutex_unlock(&bo->resv->lock); 298 } 299 300 void ttm_bo_unreserve(struct ttm_buffer_object *bo) 301 { 302 struct ttm_bo_global *glob = bo->glob; 303 304 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 305 ttm_bo_unreserve_ticket_locked(bo, NULL); 306 lockmgr(&glob->lru_lock, LK_RELEASE); 307 } 308 EXPORT_SYMBOL(ttm_bo_unreserve); 309 310 void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) 311 { 312 struct ttm_bo_global *glob = bo->glob; 313 314 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 315 ttm_bo_unreserve_ticket_locked(bo, ticket); 316 lockmgr(&glob->lru_lock, LK_RELEASE); 317 } 318 EXPORT_SYMBOL(ttm_bo_unreserve_ticket); 319 320 /* 321 * Call bo->mutex locked. 322 */ 323 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 324 { 325 struct ttm_bo_device *bdev = bo->bdev; 326 struct ttm_bo_global *glob = bo->glob; 327 int ret = 0; 328 uint32_t page_flags = 0; 329 330 TTM_ASSERT_LOCKED(&bo->mutex); 331 bo->ttm = NULL; 332 333 if (bdev->need_dma32) 334 page_flags |= TTM_PAGE_FLAG_DMA32; 335 336 switch (bo->type) { 337 case ttm_bo_type_device: 338 if (zero_alloc) 339 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 340 case ttm_bo_type_kernel: 341 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 342 page_flags, glob->dummy_read_page); 343 if (unlikely(bo->ttm == NULL)) 344 ret = -ENOMEM; 345 break; 346 case ttm_bo_type_sg: 347 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 348 page_flags | TTM_PAGE_FLAG_SG, 349 glob->dummy_read_page); 350 if (unlikely(bo->ttm == NULL)) { 351 ret = -ENOMEM; 352 break; 353 } 354 bo->ttm->sg = bo->sg; 355 break; 356 default: 357 pr_err("Illegal buffer object type\n"); 358 ret = -EINVAL; 359 break; 360 } 361 362 return ret; 363 } 364 365 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 366 struct ttm_mem_reg *mem, 367 bool evict, bool interruptible, 368 bool no_wait_gpu) 369 { 370 struct ttm_bo_device *bdev = bo->bdev; 371 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 372 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 373 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 374 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 375 int ret = 0; 376 377 if (old_is_pci || new_is_pci || 378 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 379 ret = ttm_mem_io_lock(old_man, true); 380 if (unlikely(ret != 0)) 381 goto out_err; 382 ttm_bo_unmap_virtual_locked(bo); 383 ttm_mem_io_unlock(old_man); 384 } 385 386 /* 387 * Create and bind a ttm if required. 388 */ 389 390 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 391 if (bo->ttm == NULL) { 392 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 393 ret = ttm_bo_add_ttm(bo, zero); 394 if (ret) 395 goto out_err; 396 } 397 398 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 399 if (ret) 400 goto out_err; 401 402 if (mem->mem_type != TTM_PL_SYSTEM) { 403 ret = ttm_tt_bind(bo->ttm, mem); 404 if (ret) 405 goto out_err; 406 } 407 408 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 409 if (bdev->driver->move_notify) 410 bdev->driver->move_notify(bo, mem); 411 bo->mem = *mem; 412 mem->mm_node = NULL; 413 goto moved; 414 } 415 } 416 417 if (bdev->driver->move_notify) 418 bdev->driver->move_notify(bo, mem); 419 420 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 421 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 422 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 423 else if (bdev->driver->move) 424 ret = bdev->driver->move(bo, evict, interruptible, 425 no_wait_gpu, mem); 426 else 427 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 428 429 if (ret) { 430 if (bdev->driver->move_notify) { 431 struct ttm_mem_reg tmp_mem = *mem; 432 *mem = bo->mem; 433 bo->mem = tmp_mem; 434 bdev->driver->move_notify(bo, mem); 435 bo->mem = *mem; 436 *mem = tmp_mem; 437 } 438 439 goto out_err; 440 } 441 442 moved: 443 if (bo->evicted) { 444 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 445 if (ret) 446 pr_err("Can not flush read caches\n"); 447 bo->evicted = false; 448 } 449 450 if (bo->mem.mm_node) { 451 bo->offset = (bo->mem.start << PAGE_SHIFT) + 452 bdev->man[bo->mem.mem_type].gpu_offset; 453 bo->cur_placement = bo->mem.placement; 454 } else 455 bo->offset = 0; 456 457 return 0; 458 459 out_err: 460 new_man = &bdev->man[bo->mem.mem_type]; 461 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 462 ttm_tt_unbind(bo->ttm); 463 ttm_tt_destroy(bo->ttm); 464 bo->ttm = NULL; 465 } 466 467 return ret; 468 } 469 470 /** 471 * Call bo::reserved. 472 * Will release GPU memory type usage on destruction. 473 * This is the place to put in driver specific hooks to release 474 * driver private resources. 475 * Will release the bo::reserved lock. 476 */ 477 478 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 479 { 480 if (bo->bdev->driver->move_notify) 481 bo->bdev->driver->move_notify(bo, NULL); 482 483 if (bo->ttm) { 484 ttm_tt_unbind(bo->ttm); 485 ttm_tt_destroy(bo->ttm); 486 bo->ttm = NULL; 487 } 488 ttm_bo_mem_put(bo, &bo->mem); 489 490 ww_mutex_unlock (&bo->resv->lock); 491 } 492 493 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 494 { 495 struct ttm_bo_device *bdev = bo->bdev; 496 struct ttm_bo_global *glob = bo->glob; 497 struct ttm_bo_driver *driver = bdev->driver; 498 void *sync_obj = NULL; 499 int put_count; 500 int ret; 501 502 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 503 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 504 505 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 506 (void) ttm_bo_wait(bo, false, false, true); 507 if (!ret && !bo->sync_obj) { 508 lockmgr(&bdev->fence_lock, LK_RELEASE); 509 put_count = ttm_bo_del_from_lru(bo); 510 511 lockmgr(&glob->lru_lock, LK_RELEASE); 512 ttm_bo_cleanup_memtype_use(bo); 513 514 ttm_bo_list_ref_sub(bo, put_count, true); 515 516 return; 517 } 518 if (bo->sync_obj) 519 sync_obj = driver->sync_obj_ref(bo->sync_obj); 520 lockmgr(&bdev->fence_lock, LK_RELEASE); 521 522 if (!ret) { 523 524 /* 525 * Make NO_EVICT bos immediately available to 526 * shrinkers, now that they are queued for 527 * destruction. 528 */ 529 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 530 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 531 ttm_bo_add_to_lru(bo); 532 } 533 534 if (!ret) 535 ww_mutex_unlock(&bo->resv->lock); 536 } 537 538 kref_get(&bo->list_kref); 539 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 540 lockmgr(&glob->lru_lock, LK_RELEASE); 541 542 if (sync_obj) { 543 driver->sync_obj_flush(sync_obj); 544 driver->sync_obj_unref(&sync_obj); 545 } 546 schedule_delayed_work(&bdev->wq, 547 ((hz / 100) < 1) ? 1 : hz / 100); 548 } 549 550 /** 551 * function ttm_bo_cleanup_refs_and_unlock 552 * If bo idle, remove from delayed- and lru lists, and unref. 553 * If not idle, do nothing. 554 * 555 * Must be called with lru_lock and reservation held, this function 556 * will drop both before returning. 557 * 558 * @interruptible Any sleeps should occur interruptibly. 559 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 560 */ 561 562 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 563 bool interruptible, 564 bool no_wait_gpu) 565 { 566 struct ttm_bo_device *bdev = bo->bdev; 567 struct ttm_bo_driver *driver = bdev->driver; 568 struct ttm_bo_global *glob = bo->glob; 569 int put_count; 570 int ret; 571 572 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 573 ret = ttm_bo_wait(bo, false, false, true); 574 575 if (ret && !no_wait_gpu) { 576 void *sync_obj; 577 578 /* 579 * Take a reference to the fence and unreserve, 580 * at this point the buffer should be dead, so 581 * no new sync objects can be attached. 582 */ 583 sync_obj = driver->sync_obj_ref(bo->sync_obj); 584 lockmgr(&bdev->fence_lock, LK_RELEASE); 585 586 ww_mutex_unlock(&bo->resv->lock); 587 lockmgr(&glob->lru_lock, LK_RELEASE); 588 589 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 590 driver->sync_obj_unref(&sync_obj); 591 if (ret) 592 return ret; 593 594 /* 595 * remove sync_obj with ttm_bo_wait, the wait should be 596 * finished, and no new wait object should have been added. 597 */ 598 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 599 ret = ttm_bo_wait(bo, false, false, true); 600 WARN_ON(ret); 601 lockmgr(&bdev->fence_lock, LK_RELEASE); 602 if (ret) 603 return ret; 604 605 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 606 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 607 608 /* 609 * We raced, and lost, someone else holds the reservation now, 610 * and is probably busy in ttm_bo_cleanup_memtype_use. 611 * 612 * Even if it's not the case, because we finished waiting any 613 * delayed destruction would succeed, so just return success 614 * here. 615 */ 616 if (ret) { 617 lockmgr(&glob->lru_lock, LK_RELEASE); 618 return 0; 619 } 620 } else 621 lockmgr(&bdev->fence_lock, LK_RELEASE); 622 623 if (ret || unlikely(list_empty(&bo->ddestroy))) { 624 ww_mutex_unlock(&bo->resv->lock); 625 lockmgr(&glob->lru_lock, LK_RELEASE); 626 return ret; 627 } 628 629 put_count = ttm_bo_del_from_lru(bo); 630 list_del_init(&bo->ddestroy); 631 ++put_count; 632 633 lockmgr(&glob->lru_lock, LK_RELEASE); 634 ttm_bo_cleanup_memtype_use(bo); 635 636 ttm_bo_list_ref_sub(bo, put_count, true); 637 638 return 0; 639 } 640 641 /** 642 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 643 * encountered buffers. 644 */ 645 646 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 647 { 648 struct ttm_bo_global *glob = bdev->glob; 649 struct ttm_buffer_object *entry = NULL; 650 int ret = 0; 651 652 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 653 if (list_empty(&bdev->ddestroy)) 654 goto out_unlock; 655 656 entry = list_first_entry(&bdev->ddestroy, 657 struct ttm_buffer_object, ddestroy); 658 kref_get(&entry->list_kref); 659 660 for (;;) { 661 struct ttm_buffer_object *nentry = NULL; 662 663 if (entry->ddestroy.next != &bdev->ddestroy) { 664 nentry = list_first_entry(&entry->ddestroy, 665 struct ttm_buffer_object, ddestroy); 666 kref_get(&nentry->list_kref); 667 } 668 669 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); 670 if (remove_all && ret) { 671 lockmgr(&glob->lru_lock, LK_RELEASE); 672 ret = ttm_bo_reserve_nolru(entry, false, false, 673 false, 0); 674 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 675 } 676 677 if (!ret) 678 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 679 !remove_all); 680 else 681 lockmgr(&glob->lru_lock, LK_RELEASE); 682 683 kref_put(&entry->list_kref, ttm_bo_release_list); 684 entry = nentry; 685 686 if (ret || !entry) 687 goto out; 688 689 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 690 if (list_empty(&entry->ddestroy)) 691 break; 692 } 693 694 out_unlock: 695 lockmgr(&glob->lru_lock, LK_RELEASE); 696 out: 697 if (entry) 698 kref_put(&entry->list_kref, ttm_bo_release_list); 699 return ret; 700 } 701 702 static void ttm_bo_delayed_workqueue(struct work_struct *work) 703 { 704 struct ttm_bo_device *bdev = 705 container_of(work, struct ttm_bo_device, wq.work); 706 707 if (ttm_bo_delayed_delete(bdev, false)) { 708 schedule_delayed_work(&bdev->wq, 709 ((hz / 100) < 1) ? 1 : hz / 100); 710 } 711 } 712 713 /* 714 * NOTE: bdev->vm_lock already held on call, this function release it. 715 */ 716 static void ttm_bo_release(struct kref *kref) 717 { 718 struct ttm_buffer_object *bo = 719 container_of(kref, struct ttm_buffer_object, kref); 720 struct ttm_bo_device *bdev = bo->bdev; 721 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 722 int release_active; 723 724 if (atomic_read(&bo->kref.refcount) > 0) { 725 lockmgr(&bdev->vm_lock, LK_RELEASE); 726 return; 727 } 728 if (likely(bo->vm_node != NULL)) { 729 RB_REMOVE(ttm_bo_device_buffer_objects, 730 &bdev->addr_space_rb, bo); 731 drm_mm_put_block(bo->vm_node); 732 bo->vm_node = NULL; 733 } 734 735 /* 736 * Should we clean up our implied list_kref? Because ttm_bo_release() 737 * can be called reentrantly due to races (this may not be true any 738 * more with the lock management changes in the deref), it is possible 739 * to get here twice, but there's only one list_kref ref to drop and 740 * in the other path 'bo' can be kfree()d by another thread the 741 * instant we release our lock. 742 */ 743 release_active = test_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 744 if (release_active) { 745 clear_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 746 lockmgr(&bdev->vm_lock, LK_RELEASE); 747 ttm_mem_io_lock(man, false); 748 ttm_mem_io_free_vm(bo); 749 ttm_mem_io_unlock(man); 750 ttm_bo_cleanup_refs_or_queue(bo); 751 kref_put(&bo->list_kref, ttm_bo_release_list); 752 } else { 753 lockmgr(&bdev->vm_lock, LK_RELEASE); 754 } 755 } 756 757 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 758 { 759 struct ttm_buffer_object *bo = *p_bo; 760 struct ttm_bo_device *bdev = bo->bdev; 761 762 *p_bo = NULL; 763 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 764 if (kref_put(&bo->kref, ttm_bo_release) == 0) 765 lockmgr(&bdev->vm_lock, LK_RELEASE); 766 } 767 EXPORT_SYMBOL(ttm_bo_unref); 768 769 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 770 { 771 return cancel_delayed_work_sync(&bdev->wq); 772 } 773 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 774 775 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 776 { 777 if (resched) 778 schedule_delayed_work(&bdev->wq, 779 ((hz / 100) < 1) ? 1 : hz / 100); 780 } 781 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 782 783 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 784 bool no_wait_gpu) 785 { 786 struct ttm_bo_device *bdev = bo->bdev; 787 struct ttm_mem_reg evict_mem; 788 struct ttm_placement placement; 789 int ret = 0; 790 791 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 792 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 793 lockmgr(&bdev->fence_lock, LK_RELEASE); 794 795 if (unlikely(ret != 0)) { 796 if (ret != -ERESTARTSYS) { 797 pr_err("Failed to expire sync object before buffer eviction\n"); 798 } 799 goto out; 800 } 801 802 BUG_ON(!ttm_bo_is_reserved(bo)); 803 804 evict_mem = bo->mem; 805 evict_mem.mm_node = NULL; 806 evict_mem.bus.io_reserved_vm = false; 807 evict_mem.bus.io_reserved_count = 0; 808 809 placement.num_placement = 0; 810 placement.num_busy_placement = 0; 811 bdev->driver->evict_flags(bo, &placement); 812 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 813 no_wait_gpu); 814 if (ret) { 815 if (ret != -ERESTARTSYS) { 816 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 817 bo); 818 ttm_bo_mem_space_debug(bo, &placement); 819 } 820 goto out; 821 } 822 823 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 824 no_wait_gpu); 825 if (ret) { 826 if (ret != -ERESTARTSYS) 827 pr_err("Buffer eviction failed\n"); 828 ttm_bo_mem_put(bo, &evict_mem); 829 goto out; 830 } 831 bo->evicted = true; 832 out: 833 return ret; 834 } 835 836 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 837 uint32_t mem_type, 838 bool interruptible, 839 bool no_wait_gpu) 840 { 841 struct ttm_bo_global *glob = bdev->glob; 842 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 843 struct ttm_buffer_object *bo; 844 int ret = -EBUSY, put_count; 845 846 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 847 list_for_each_entry(bo, &man->lru, lru) { 848 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 849 if (!ret) 850 break; 851 } 852 853 if (ret) { 854 lockmgr(&glob->lru_lock, LK_RELEASE); 855 return ret; 856 } 857 858 kref_get(&bo->list_kref); 859 860 if (!list_empty(&bo->ddestroy)) { 861 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 862 no_wait_gpu); 863 kref_put(&bo->list_kref, ttm_bo_release_list); 864 return ret; 865 } 866 867 put_count = ttm_bo_del_from_lru(bo); 868 lockmgr(&glob->lru_lock, LK_RELEASE); 869 870 BUG_ON(ret != 0); 871 872 ttm_bo_list_ref_sub(bo, put_count, true); 873 874 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 875 ttm_bo_unreserve(bo); 876 877 kref_put(&bo->list_kref, ttm_bo_release_list); 878 return ret; 879 } 880 881 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 882 { 883 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 884 885 if (mem->mm_node) 886 (*man->func->put_node)(man, mem); 887 } 888 EXPORT_SYMBOL(ttm_bo_mem_put); 889 890 /** 891 * Repeatedly evict memory from the LRU for @mem_type until we create enough 892 * space, or we've evicted everything and there isn't enough space. 893 */ 894 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 895 uint32_t mem_type, 896 const struct ttm_place *place, 897 struct ttm_mem_reg *mem, 898 bool interruptible, 899 bool no_wait_gpu) 900 { 901 struct ttm_bo_device *bdev = bo->bdev; 902 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 903 int ret; 904 905 do { 906 ret = (*man->func->get_node)(man, bo, place, mem); 907 if (unlikely(ret != 0)) 908 return ret; 909 if (mem->mm_node) 910 break; 911 ret = ttm_mem_evict_first(bdev, mem_type, 912 interruptible, no_wait_gpu); 913 if (unlikely(ret != 0)) 914 return ret; 915 } while (1); 916 if (mem->mm_node == NULL) 917 return -ENOMEM; 918 mem->mem_type = mem_type; 919 return 0; 920 } 921 922 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 923 uint32_t cur_placement, 924 uint32_t proposed_placement) 925 { 926 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 927 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 928 929 /** 930 * Keep current caching if possible. 931 */ 932 933 if ((cur_placement & caching) != 0) 934 result |= (cur_placement & caching); 935 else if ((man->default_caching & caching) != 0) 936 result |= man->default_caching; 937 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 938 result |= TTM_PL_FLAG_CACHED; 939 else if ((TTM_PL_FLAG_WC & caching) != 0) 940 result |= TTM_PL_FLAG_WC; 941 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 942 result |= TTM_PL_FLAG_UNCACHED; 943 944 return result; 945 } 946 947 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 948 uint32_t mem_type, 949 const struct ttm_place *place, 950 uint32_t *masked_placement) 951 { 952 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 953 954 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 955 return false; 956 957 if ((place->flags & man->available_caching) == 0) 958 return false; 959 960 cur_flags |= (place->flags & man->available_caching); 961 962 *masked_placement = cur_flags; 963 return true; 964 } 965 966 /** 967 * Creates space for memory region @mem according to its type. 968 * 969 * This function first searches for free space in compatible memory types in 970 * the priority order defined by the driver. If free space isn't found, then 971 * ttm_bo_mem_force_space is attempted in priority order to evict and find 972 * space. 973 */ 974 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 975 struct ttm_placement *placement, 976 struct ttm_mem_reg *mem, 977 bool interruptible, 978 bool no_wait_gpu) 979 { 980 struct ttm_bo_device *bdev = bo->bdev; 981 struct ttm_mem_type_manager *man; 982 uint32_t mem_type = TTM_PL_SYSTEM; 983 uint32_t cur_flags = 0; 984 bool type_found = false; 985 bool type_ok = false; 986 bool has_erestartsys = false; 987 int i, ret; 988 989 mem->mm_node = NULL; 990 for (i = 0; i < placement->num_placement; ++i) { 991 const struct ttm_place *place = &placement->placement[i]; 992 993 ret = ttm_mem_type_from_place(place, &mem_type); 994 if (ret) 995 return ret; 996 man = &bdev->man[mem_type]; 997 998 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 999 &cur_flags); 1000 1001 if (!type_ok) 1002 continue; 1003 1004 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1005 cur_flags); 1006 /* 1007 * Use the access and other non-mapping-related flag bits from 1008 * the memory placement flags to the current flags 1009 */ 1010 ttm_flag_masked(&cur_flags, place->flags, 1011 ~TTM_PL_MASK_MEMTYPE); 1012 1013 if (mem_type == TTM_PL_SYSTEM) 1014 break; 1015 1016 if (man->has_type && man->use_type) { 1017 type_found = true; 1018 ret = (*man->func->get_node)(man, bo, place, mem); 1019 if (unlikely(ret)) 1020 return ret; 1021 } 1022 if (mem->mm_node) 1023 break; 1024 } 1025 1026 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 1027 mem->mem_type = mem_type; 1028 mem->placement = cur_flags; 1029 return 0; 1030 } 1031 1032 if (!type_found) 1033 return -EINVAL; 1034 1035 for (i = 0; i < placement->num_busy_placement; ++i) { 1036 const struct ttm_place *place = &placement->busy_placement[i]; 1037 1038 ret = ttm_mem_type_from_place(place, &mem_type); 1039 if (ret) 1040 return ret; 1041 man = &bdev->man[mem_type]; 1042 if (!man->has_type) 1043 continue; 1044 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 1045 continue; 1046 1047 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1048 cur_flags); 1049 /* 1050 * Use the access and other non-mapping-related flag bits from 1051 * the memory placement flags to the current flags 1052 */ 1053 ttm_flag_masked(&cur_flags, place->flags, 1054 ~TTM_PL_MASK_MEMTYPE); 1055 1056 if (mem_type == TTM_PL_SYSTEM) { 1057 mem->mem_type = mem_type; 1058 mem->placement = cur_flags; 1059 mem->mm_node = NULL; 1060 return 0; 1061 } 1062 1063 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 1064 interruptible, no_wait_gpu); 1065 if (ret == 0 && mem->mm_node) { 1066 mem->placement = cur_flags; 1067 return 0; 1068 } 1069 if (ret == -ERESTARTSYS) 1070 has_erestartsys = true; 1071 } 1072 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 1073 return ret; 1074 } 1075 EXPORT_SYMBOL(ttm_bo_mem_space); 1076 1077 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1078 struct ttm_placement *placement, 1079 bool interruptible, 1080 bool no_wait_gpu) 1081 { 1082 int ret = 0; 1083 struct ttm_mem_reg mem; 1084 struct ttm_bo_device *bdev = bo->bdev; 1085 1086 BUG_ON(!ttm_bo_is_reserved(bo)); 1087 1088 /* 1089 * FIXME: It's possible to pipeline buffer moves. 1090 * Have the driver move function wait for idle when necessary, 1091 * instead of doing it here. 1092 */ 1093 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1094 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1095 lockmgr(&bdev->fence_lock, LK_RELEASE); 1096 if (ret) 1097 return ret; 1098 mem.num_pages = bo->num_pages; 1099 mem.size = mem.num_pages << PAGE_SHIFT; 1100 mem.page_alignment = bo->mem.page_alignment; 1101 mem.bus.io_reserved_vm = false; 1102 mem.bus.io_reserved_count = 0; 1103 /* 1104 * Determine where to move the buffer. 1105 */ 1106 ret = ttm_bo_mem_space(bo, placement, &mem, 1107 interruptible, no_wait_gpu); 1108 if (ret) 1109 goto out_unlock; 1110 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1111 interruptible, no_wait_gpu); 1112 out_unlock: 1113 if (ret && mem.mm_node) 1114 ttm_bo_mem_put(bo, &mem); 1115 return ret; 1116 } 1117 1118 static bool ttm_bo_mem_compat(struct ttm_placement *placement, 1119 struct ttm_mem_reg *mem, 1120 uint32_t *new_flags) 1121 { 1122 int i; 1123 1124 for (i = 0; i < placement->num_placement; i++) { 1125 const struct ttm_place *heap = &placement->placement[i]; 1126 if (mem->mm_node && 1127 (mem->start < heap->fpfn || 1128 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1129 continue; 1130 1131 *new_flags = heap->flags; 1132 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1133 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1134 return true; 1135 } 1136 1137 for (i = 0; i < placement->num_busy_placement; i++) { 1138 const struct ttm_place *heap = &placement->busy_placement[i]; 1139 if (mem->mm_node && 1140 (mem->start < heap->fpfn || 1141 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1142 continue; 1143 1144 *new_flags = heap->flags; 1145 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1146 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1147 return true; 1148 } 1149 1150 return false; 1151 } 1152 1153 int ttm_bo_validate(struct ttm_buffer_object *bo, 1154 struct ttm_placement *placement, 1155 bool interruptible, 1156 bool no_wait_gpu) 1157 { 1158 int ret; 1159 uint32_t new_flags; 1160 1161 BUG_ON(!ttm_bo_is_reserved(bo)); 1162 /* 1163 * Check whether we need to move buffer. 1164 */ 1165 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1166 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1167 no_wait_gpu); 1168 if (ret) 1169 return ret; 1170 } else { 1171 /* 1172 * Use the access and other non-mapping-related flag bits from 1173 * the compatible memory placement flags to the active flags 1174 */ 1175 ttm_flag_masked(&bo->mem.placement, new_flags, 1176 ~TTM_PL_MASK_MEMTYPE); 1177 } 1178 /* 1179 * We might need to add a TTM. 1180 */ 1181 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1182 ret = ttm_bo_add_ttm(bo, true); 1183 if (ret) 1184 return ret; 1185 } 1186 return 0; 1187 } 1188 EXPORT_SYMBOL(ttm_bo_validate); 1189 1190 int ttm_bo_init(struct ttm_bo_device *bdev, 1191 struct ttm_buffer_object *bo, 1192 unsigned long size, 1193 enum ttm_bo_type type, 1194 struct ttm_placement *placement, 1195 uint32_t page_alignment, 1196 bool interruptible, 1197 struct vm_object *persistent_swap_storage, 1198 size_t acc_size, 1199 struct sg_table *sg, 1200 void (*destroy) (struct ttm_buffer_object *)) 1201 { 1202 int ret = 0; 1203 unsigned long num_pages; 1204 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1205 bool locked; 1206 1207 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1208 if (ret) { 1209 pr_err("Out of kernel memory\n"); 1210 if (destroy) 1211 (*destroy)(bo); 1212 else 1213 kfree(bo); 1214 return -ENOMEM; 1215 } 1216 1217 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1218 if (num_pages == 0) { 1219 pr_err("Illegal buffer object size\n"); 1220 if (destroy) 1221 (*destroy)(bo); 1222 else 1223 kfree(bo); 1224 ttm_mem_global_free(mem_glob, acc_size); 1225 return -EINVAL; 1226 } 1227 bo->destroy = destroy; 1228 1229 kref_init(&bo->kref); 1230 kref_init(&bo->list_kref); 1231 atomic_set(&bo->cpu_writers, 0); 1232 INIT_LIST_HEAD(&bo->lru); 1233 INIT_LIST_HEAD(&bo->ddestroy); 1234 INIT_LIST_HEAD(&bo->swap); 1235 INIT_LIST_HEAD(&bo->io_reserve_lru); 1236 bo->bdev = bdev; 1237 bo->glob = bdev->glob; 1238 bo->type = type; 1239 bo->num_pages = num_pages; 1240 bo->mem.size = num_pages << PAGE_SHIFT; 1241 bo->mem.mem_type = TTM_PL_SYSTEM; 1242 bo->mem.num_pages = bo->num_pages; 1243 bo->mem.mm_node = NULL; 1244 bo->mem.page_alignment = page_alignment; 1245 bo->mem.bus.io_reserved_vm = false; 1246 bo->mem.bus.io_reserved_count = 0; 1247 bo->priv_flags = 0; 1248 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1249 bo->persistent_swap_storage = persistent_swap_storage; 1250 bo->acc_size = acc_size; 1251 bo->sg = sg; 1252 bo->resv = &bo->ttm_resv; 1253 reservation_object_init(bo->resv); 1254 atomic_inc(&bo->glob->bo_count); 1255 1256 /* 1257 * Mirror ref from kref_init() for list_kref. 1258 */ 1259 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 1260 1261 /* 1262 * For ttm_bo_type_device buffers, allocate 1263 * address space from the device. 1264 */ 1265 if (bo->type == ttm_bo_type_device || 1266 bo->type == ttm_bo_type_sg) { 1267 ret = ttm_bo_setup_vm(bo); 1268 if (ret) 1269 goto out_err; 1270 } 1271 1272 locked = ww_mutex_trylock(&bo->resv->lock); 1273 WARN_ON(!locked); 1274 1275 if (likely(!ret)) 1276 ret = ttm_bo_validate(bo, placement, interruptible, false); 1277 1278 out_err: 1279 ttm_bo_unreserve(bo); 1280 1281 if (unlikely(ret)) 1282 ttm_bo_unref(&bo); 1283 1284 return ret; 1285 } 1286 EXPORT_SYMBOL(ttm_bo_init); 1287 1288 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1289 unsigned long bo_size, 1290 unsigned struct_size) 1291 { 1292 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1293 size_t size = 0; 1294 1295 size += ttm_round_pot(struct_size); 1296 size += PAGE_ALIGN(npages * sizeof(void *)); 1297 size += ttm_round_pot(sizeof(struct ttm_tt)); 1298 return size; 1299 } 1300 EXPORT_SYMBOL(ttm_bo_acc_size); 1301 1302 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1303 unsigned long bo_size, 1304 unsigned struct_size) 1305 { 1306 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1307 size_t size = 0; 1308 1309 size += ttm_round_pot(struct_size); 1310 size += PAGE_ALIGN(npages * sizeof(void *)); 1311 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); 1312 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1313 return size; 1314 } 1315 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1316 1317 int ttm_bo_create(struct ttm_bo_device *bdev, 1318 unsigned long size, 1319 enum ttm_bo_type type, 1320 struct ttm_placement *placement, 1321 uint32_t page_alignment, 1322 bool interruptible, 1323 struct vm_object *persistent_swap_storage, 1324 struct ttm_buffer_object **p_bo) 1325 { 1326 struct ttm_buffer_object *bo; 1327 size_t acc_size; 1328 int ret; 1329 1330 *p_bo = NULL; 1331 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1332 if (unlikely(bo == NULL)) 1333 return -ENOMEM; 1334 1335 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1336 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1337 interruptible, persistent_swap_storage, acc_size, 1338 NULL, NULL); 1339 if (likely(ret == 0)) 1340 *p_bo = bo; 1341 1342 return ret; 1343 } 1344 EXPORT_SYMBOL(ttm_bo_create); 1345 1346 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1347 unsigned mem_type, bool allow_errors) 1348 { 1349 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1350 struct ttm_bo_global *glob = bdev->glob; 1351 int ret; 1352 1353 /* 1354 * Can't use standard list traversal since we're unlocking. 1355 */ 1356 1357 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1358 while (!list_empty(&man->lru)) { 1359 lockmgr(&glob->lru_lock, LK_RELEASE); 1360 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1361 if (ret) { 1362 if (allow_errors) { 1363 return ret; 1364 } else { 1365 pr_err("Cleanup eviction failed\n"); 1366 } 1367 } 1368 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1369 } 1370 lockmgr(&glob->lru_lock, LK_RELEASE); 1371 return 0; 1372 } 1373 1374 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1375 { 1376 struct ttm_mem_type_manager *man; 1377 int ret = -EINVAL; 1378 1379 if (mem_type >= TTM_NUM_MEM_TYPES) { 1380 pr_err("Illegal memory type %d\n", mem_type); 1381 return ret; 1382 } 1383 man = &bdev->man[mem_type]; 1384 1385 if (!man->has_type) { 1386 pr_err("Trying to take down uninitialized memory manager type %u\n", 1387 mem_type); 1388 return ret; 1389 } 1390 1391 man->use_type = false; 1392 man->has_type = false; 1393 1394 ret = 0; 1395 if (mem_type > 0) { 1396 ttm_bo_force_list_clean(bdev, mem_type, false); 1397 1398 ret = (*man->func->takedown)(man); 1399 } 1400 1401 return ret; 1402 } 1403 EXPORT_SYMBOL(ttm_bo_clean_mm); 1404 1405 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1406 { 1407 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1408 1409 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1410 pr_err("Illegal memory manager memory type %u\n", mem_type); 1411 return -EINVAL; 1412 } 1413 1414 if (!man->has_type) { 1415 pr_err("Memory type %u has not been initialized\n", mem_type); 1416 return 0; 1417 } 1418 1419 return ttm_bo_force_list_clean(bdev, mem_type, true); 1420 } 1421 EXPORT_SYMBOL(ttm_bo_evict_mm); 1422 1423 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1424 unsigned long p_size) 1425 { 1426 int ret = -EINVAL; 1427 struct ttm_mem_type_manager *man; 1428 1429 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1430 man = &bdev->man[type]; 1431 BUG_ON(man->has_type); 1432 man->io_reserve_fastpath = true; 1433 man->use_io_reserve_lru = false; 1434 lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE); 1435 INIT_LIST_HEAD(&man->io_reserve_lru); 1436 1437 ret = bdev->driver->init_mem_type(bdev, type, man); 1438 if (ret) 1439 return ret; 1440 man->bdev = bdev; 1441 1442 ret = 0; 1443 if (type != TTM_PL_SYSTEM) { 1444 ret = (*man->func->init)(man, p_size); 1445 if (ret) 1446 return ret; 1447 } 1448 man->has_type = true; 1449 man->use_type = true; 1450 man->size = p_size; 1451 1452 INIT_LIST_HEAD(&man->lru); 1453 1454 return 0; 1455 } 1456 EXPORT_SYMBOL(ttm_bo_init_mm); 1457 1458 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1459 { 1460 struct ttm_bo_global *glob = 1461 container_of(kobj, struct ttm_bo_global, kobj); 1462 1463 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1464 __free_page(glob->dummy_read_page); 1465 kfree(glob); 1466 } 1467 1468 void ttm_bo_global_release(struct drm_global_reference *ref) 1469 { 1470 struct ttm_bo_global *glob = ref->object; 1471 1472 kobject_del(&glob->kobj); 1473 kobject_put(&glob->kobj); 1474 } 1475 EXPORT_SYMBOL(ttm_bo_global_release); 1476 1477 int ttm_bo_global_init(struct drm_global_reference *ref) 1478 { 1479 struct ttm_bo_global_ref *bo_ref = 1480 container_of(ref, struct ttm_bo_global_ref, ref); 1481 struct ttm_bo_global *glob = ref->object; 1482 int ret; 1483 1484 lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE); 1485 lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE); 1486 glob->mem_glob = bo_ref->mem_glob; 1487 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1488 1489 if (unlikely(glob->dummy_read_page == NULL)) { 1490 ret = -ENOMEM; 1491 goto out_no_drp; 1492 } 1493 1494 INIT_LIST_HEAD(&glob->swap_lru); 1495 INIT_LIST_HEAD(&glob->device_list); 1496 1497 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1498 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1499 if (unlikely(ret != 0)) { 1500 pr_err("Could not register buffer object swapout\n"); 1501 goto out_no_shrink; 1502 } 1503 1504 atomic_set(&glob->bo_count, 0); 1505 1506 ret = kobject_init_and_add( 1507 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1508 if (unlikely(ret != 0)) 1509 kobject_put(&glob->kobj); 1510 return ret; 1511 out_no_shrink: 1512 __free_page(glob->dummy_read_page); 1513 out_no_drp: 1514 kfree(glob); 1515 return ret; 1516 } 1517 EXPORT_SYMBOL(ttm_bo_global_init); 1518 1519 1520 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1521 { 1522 int ret = 0; 1523 unsigned i = TTM_NUM_MEM_TYPES; 1524 struct ttm_mem_type_manager *man; 1525 struct ttm_bo_global *glob = bdev->glob; 1526 1527 while (i--) { 1528 man = &bdev->man[i]; 1529 if (man->has_type) { 1530 man->use_type = false; 1531 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1532 ret = -EBUSY; 1533 pr_err("DRM memory manager type %d is not clean\n", 1534 i); 1535 } 1536 man->has_type = false; 1537 } 1538 } 1539 1540 mutex_lock(&glob->device_list_mutex); 1541 list_del(&bdev->device_list); 1542 mutex_unlock(&glob->device_list_mutex); 1543 1544 cancel_delayed_work_sync(&bdev->wq); 1545 1546 while (ttm_bo_delayed_delete(bdev, true)) 1547 ; 1548 1549 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1550 if (list_empty(&bdev->ddestroy)) 1551 TTM_DEBUG("Delayed destroy list was clean\n"); 1552 1553 if (list_empty(&bdev->man[0].lru)) 1554 TTM_DEBUG("Swap list was clean\n"); 1555 lockmgr(&glob->lru_lock, LK_RELEASE); 1556 1557 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1558 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1559 drm_mm_takedown(&bdev->addr_space_mm); 1560 lockmgr(&bdev->vm_lock, LK_RELEASE); 1561 1562 return ret; 1563 } 1564 EXPORT_SYMBOL(ttm_bo_device_release); 1565 1566 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1567 struct ttm_bo_global *glob, 1568 struct ttm_bo_driver *driver, 1569 uint64_t file_page_offset, 1570 bool need_dma32) 1571 { 1572 int ret = -EINVAL; 1573 1574 lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE); 1575 bdev->driver = driver; 1576 1577 memset(bdev->man, 0, sizeof(bdev->man)); 1578 1579 /* 1580 * Initialize the system memory buffer type. 1581 * Other types need to be driver / IOCTL initialized. 1582 */ 1583 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1584 if (unlikely(ret != 0)) 1585 goto out_no_sys; 1586 1587 RB_INIT(&bdev->addr_space_rb); 1588 drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1589 1590 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1591 INIT_LIST_HEAD(&bdev->ddestroy); 1592 bdev->dev_mapping = NULL; 1593 bdev->glob = glob; 1594 bdev->need_dma32 = need_dma32; 1595 bdev->val_seq = 0; 1596 lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE); 1597 mutex_lock(&glob->device_list_mutex); 1598 list_add_tail(&bdev->device_list, &glob->device_list); 1599 mutex_unlock(&glob->device_list_mutex); 1600 1601 return 0; 1602 out_no_sys: 1603 return ret; 1604 } 1605 EXPORT_SYMBOL(ttm_bo_device_init); 1606 1607 /* 1608 * buffer object vm functions. 1609 */ 1610 1611 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1612 { 1613 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1614 1615 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1616 if (mem->mem_type == TTM_PL_SYSTEM) 1617 return false; 1618 1619 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1620 return false; 1621 1622 if (mem->placement & TTM_PL_FLAG_CACHED) 1623 return false; 1624 } 1625 return true; 1626 } 1627 1628 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1629 { 1630 1631 ttm_bo_release_mmap(bo); 1632 ttm_mem_io_free_vm(bo); 1633 } 1634 1635 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1636 { 1637 struct ttm_bo_device *bdev = bo->bdev; 1638 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1639 1640 ttm_mem_io_lock(man, false); 1641 ttm_bo_unmap_virtual_locked(bo); 1642 ttm_mem_io_unlock(man); 1643 } 1644 1645 1646 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1647 1648 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1649 { 1650 struct ttm_bo_device *bdev = bo->bdev; 1651 1652 /* The caller acquired bdev->vm_lock. */ 1653 RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo); 1654 } 1655 1656 /** 1657 * ttm_bo_setup_vm: 1658 * 1659 * @bo: the buffer to allocate address space for 1660 * 1661 * Allocate address space in the drm device so that applications 1662 * can mmap the buffer and access the contents. This only 1663 * applies to ttm_bo_type_device objects as others are not 1664 * placed in the drm device address space. 1665 */ 1666 1667 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) 1668 { 1669 struct ttm_bo_device *bdev = bo->bdev; 1670 int ret; 1671 1672 retry_pre_get: 1673 ret = drm_mm_pre_get(&bdev->addr_space_mm); 1674 if (unlikely(ret != 0)) 1675 return ret; 1676 1677 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1678 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, 1679 bo->mem.num_pages, 0, 0); 1680 1681 if (unlikely(bo->vm_node == NULL)) { 1682 ret = -ENOMEM; 1683 goto out_unlock; 1684 } 1685 1686 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, 1687 bo->mem.num_pages, 0); 1688 1689 if (unlikely(bo->vm_node == NULL)) { 1690 lockmgr(&bdev->vm_lock, LK_RELEASE); 1691 goto retry_pre_get; 1692 } 1693 1694 ttm_bo_vm_insert_rb(bo); 1695 lockmgr(&bdev->vm_lock, LK_RELEASE); 1696 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; 1697 1698 return 0; 1699 out_unlock: 1700 lockmgr(&bdev->vm_lock, LK_RELEASE); 1701 return ret; 1702 } 1703 1704 int ttm_bo_wait(struct ttm_buffer_object *bo, 1705 bool lazy, bool interruptible, bool no_wait) 1706 { 1707 struct ttm_bo_driver *driver = bo->bdev->driver; 1708 struct ttm_bo_device *bdev = bo->bdev; 1709 void *sync_obj; 1710 int ret = 0; 1711 1712 if (likely(bo->sync_obj == NULL)) 1713 return 0; 1714 1715 while (bo->sync_obj) { 1716 1717 if (driver->sync_obj_signaled(bo->sync_obj)) { 1718 void *tmp_obj = bo->sync_obj; 1719 bo->sync_obj = NULL; 1720 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1721 lockmgr(&bdev->fence_lock, LK_RELEASE); 1722 driver->sync_obj_unref(&tmp_obj); 1723 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1724 continue; 1725 } 1726 1727 if (no_wait) 1728 return -EBUSY; 1729 1730 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1731 lockmgr(&bdev->fence_lock, LK_RELEASE); 1732 ret = driver->sync_obj_wait(sync_obj, 1733 lazy, interruptible); 1734 if (unlikely(ret != 0)) { 1735 driver->sync_obj_unref(&sync_obj); 1736 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1737 return ret; 1738 } 1739 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1740 if (likely(bo->sync_obj == sync_obj)) { 1741 void *tmp_obj = bo->sync_obj; 1742 bo->sync_obj = NULL; 1743 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1744 &bo->priv_flags); 1745 lockmgr(&bdev->fence_lock, LK_RELEASE); 1746 driver->sync_obj_unref(&sync_obj); 1747 driver->sync_obj_unref(&tmp_obj); 1748 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1749 } else { 1750 lockmgr(&bdev->fence_lock, LK_RELEASE); 1751 driver->sync_obj_unref(&sync_obj); 1752 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1753 } 1754 } 1755 return 0; 1756 } 1757 EXPORT_SYMBOL(ttm_bo_wait); 1758 1759 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1760 { 1761 struct ttm_bo_device *bdev = bo->bdev; 1762 int ret = 0; 1763 1764 /* 1765 * Using ttm_bo_reserve makes sure the lru lists are updated. 1766 */ 1767 1768 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1769 if (unlikely(ret != 0)) 1770 return ret; 1771 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1772 ret = ttm_bo_wait(bo, false, true, no_wait); 1773 lockmgr(&bdev->fence_lock, LK_RELEASE); 1774 if (likely(ret == 0)) 1775 atomic_inc(&bo->cpu_writers); 1776 ttm_bo_unreserve(bo); 1777 return ret; 1778 } 1779 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1780 1781 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1782 { 1783 atomic_dec(&bo->cpu_writers); 1784 } 1785 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1786 1787 /** 1788 * A buffer object shrink method that tries to swap out the first 1789 * buffer object on the bo_global::swap_lru list. 1790 */ 1791 1792 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1793 { 1794 struct ttm_bo_global *glob = 1795 container_of(shrink, struct ttm_bo_global, shrink); 1796 struct ttm_buffer_object *bo; 1797 int ret = -EBUSY; 1798 int put_count; 1799 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1800 1801 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1802 list_for_each_entry(bo, &glob->swap_lru, swap) { 1803 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 1804 if (!ret) 1805 break; 1806 } 1807 1808 if (ret) { 1809 lockmgr(&glob->lru_lock, LK_RELEASE); 1810 return ret; 1811 } 1812 1813 kref_get(&bo->list_kref); 1814 1815 if (!list_empty(&bo->ddestroy)) { 1816 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1817 kref_put(&bo->list_kref, ttm_bo_release_list); 1818 return ret; 1819 } 1820 1821 put_count = ttm_bo_del_from_lru(bo); 1822 lockmgr(&glob->lru_lock, LK_RELEASE); 1823 1824 ttm_bo_list_ref_sub(bo, put_count, true); 1825 1826 /** 1827 * Wait for GPU, then move to system cached. 1828 */ 1829 1830 lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE); 1831 ret = ttm_bo_wait(bo, false, false, false); 1832 lockmgr(&bo->bdev->fence_lock, LK_RELEASE); 1833 1834 if (unlikely(ret != 0)) 1835 goto out; 1836 1837 if ((bo->mem.placement & swap_placement) != swap_placement) { 1838 struct ttm_mem_reg evict_mem; 1839 1840 evict_mem = bo->mem; 1841 evict_mem.mm_node = NULL; 1842 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1843 evict_mem.mem_type = TTM_PL_SYSTEM; 1844 1845 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1846 false, false); 1847 if (unlikely(ret != 0)) 1848 goto out; 1849 } 1850 1851 ttm_bo_unmap_virtual(bo); 1852 1853 /** 1854 * Swap out. Buffer will be swapped in again as soon as 1855 * anyone tries to access a ttm page. 1856 */ 1857 1858 if (bo->bdev->driver->swap_notify) 1859 bo->bdev->driver->swap_notify(bo); 1860 1861 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1862 out: 1863 1864 /** 1865 * 1866 * Unreserve without putting on LRU to avoid swapping out an 1867 * already swapped buffer. 1868 */ 1869 1870 ww_mutex_unlock(&bo->resv->lock); 1871 kref_put(&bo->list_kref, ttm_bo_release_list); 1872 return ret; 1873 } 1874 1875 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1876 { 1877 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1878 ; 1879 } 1880 EXPORT_SYMBOL(ttm_bo_swapout_all); 1881