1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/jiffies.h> 37 #include <linux/slab.h> 38 #include <linux/sched.h> 39 #include <linux/mm.h> 40 #include <linux/file.h> 41 #include <linux/module.h> 42 #include <linux/atomic.h> 43 44 #define TTM_ASSERT_LOCKED(param) do { } while (0) 45 #define TTM_DEBUG(fmt, arg...) do { } while (0) 46 #define TTM_BO_HASH_ORDER 13 47 48 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 49 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 50 static void ttm_bo_global_kobj_release(struct kobject *kobj); 51 52 static struct attribute ttm_bo_count = { 53 .name = "bo_count", 54 .mode = S_IRUGO 55 }; 56 57 static inline int ttm_mem_type_from_place(const struct ttm_place *place, 58 uint32_t *mem_type) 59 { 60 int i; 61 62 for (i = 0; i <= TTM_PL_PRIV5; i++) 63 if (place->flags & (1 << i)) { 64 *mem_type = i; 65 return 0; 66 } 67 return -EINVAL; 68 } 69 70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 71 { 72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 73 74 pr_err(" has_type: %d\n", man->has_type); 75 pr_err(" use_type: %d\n", man->use_type); 76 pr_err(" flags: 0x%08X\n", man->flags); 77 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); 78 pr_err(" size: %ju\n", (uintmax_t)man->size); 79 pr_err(" available_caching: 0x%08X\n", man->available_caching); 80 pr_err(" default_caching: 0x%08X\n", man->default_caching); 81 if (mem_type != TTM_PL_SYSTEM) 82 (*man->func->debug)(man, TTM_PFX); 83 } 84 85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 86 struct ttm_placement *placement) 87 { 88 int i, ret, mem_type; 89 90 pr_err("No space for %p (%lu pages, %luK, %luM)\n", 91 bo, bo->mem.num_pages, bo->mem.size >> 10, 92 bo->mem.size >> 20); 93 for (i = 0; i < placement->num_placement; i++) { 94 ret = ttm_mem_type_from_place(&placement->placement[i], 95 &mem_type); 96 if (ret) 97 return; 98 pr_err(" placement[%d]=0x%08X (%d)\n", 99 i, placement->placement[i].flags, mem_type); 100 ttm_mem_type_debug(bo->bdev, mem_type); 101 } 102 } 103 104 static ssize_t ttm_bo_global_show(struct kobject *kobj, 105 struct attribute *attr, 106 char *buffer) 107 { 108 struct ttm_bo_global *glob = 109 container_of(kobj, struct ttm_bo_global, kobj); 110 111 return ksnprintf(buffer, PAGE_SIZE, "%lu\n", 112 (unsigned long) atomic_read(&glob->bo_count)); 113 } 114 115 static struct attribute *ttm_bo_global_attrs[] = { 116 &ttm_bo_count, 117 NULL 118 }; 119 120 static const struct sysfs_ops ttm_bo_global_ops = { 121 .show = &ttm_bo_global_show 122 }; 123 124 static struct kobj_type ttm_bo_glob_kobj_type = { 125 .release = &ttm_bo_global_kobj_release, 126 .sysfs_ops = &ttm_bo_global_ops, 127 .default_attrs = ttm_bo_global_attrs 128 }; 129 130 static inline uint32_t ttm_bo_type_flags(unsigned type) 131 { 132 return 1 << (type); 133 } 134 135 static void ttm_bo_release_list(struct kref *list_kref) 136 { 137 struct ttm_buffer_object *bo = 138 container_of(list_kref, struct ttm_buffer_object, list_kref); 139 struct ttm_bo_device *bdev = bo->bdev; 140 size_t acc_size = bo->acc_size; 141 142 BUG_ON(atomic_read(&bo->list_kref.refcount)); 143 BUG_ON(atomic_read(&bo->kref.refcount)); 144 BUG_ON(atomic_read(&bo->cpu_writers)); 145 BUG_ON(bo->sync_obj != NULL); 146 BUG_ON(bo->mem.mm_node != NULL); 147 BUG_ON(!list_empty(&bo->lru)); 148 BUG_ON(!list_empty(&bo->ddestroy)); 149 150 if (bo->ttm) 151 ttm_tt_destroy(bo->ttm); 152 atomic_dec(&bo->glob->bo_count); 153 if (bo->destroy) 154 bo->destroy(bo); 155 else { 156 kfree(bo); 157 } 158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 159 } 160 161 static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 162 bool interruptible) 163 { 164 if (interruptible) { 165 return wait_event_interruptible(bo->event_queue, 166 !ttm_bo_is_reserved(bo)); 167 } else { 168 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo)); 169 return 0; 170 } 171 } 172 173 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 174 { 175 struct ttm_bo_device *bdev = bo->bdev; 176 struct ttm_mem_type_manager *man; 177 178 BUG_ON(!ttm_bo_is_reserved(bo)); 179 180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 181 182 BUG_ON(!list_empty(&bo->lru)); 183 184 man = &bdev->man[bo->mem.mem_type]; 185 list_add_tail(&bo->lru, &man->lru); 186 kref_get(&bo->list_kref); 187 188 if (bo->ttm != NULL) { 189 list_add_tail(&bo->swap, &bo->glob->swap_lru); 190 kref_get(&bo->list_kref); 191 } 192 } 193 } 194 195 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 196 { 197 int put_count = 0; 198 199 if (!list_empty(&bo->swap)) { 200 list_del_init(&bo->swap); 201 ++put_count; 202 } 203 if (!list_empty(&bo->lru)) { 204 list_del_init(&bo->lru); 205 ++put_count; 206 } 207 208 /* 209 * TODO: Add a driver hook to delete from 210 * driver-specific LRU's here. 211 */ 212 213 return put_count; 214 } 215 216 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, 217 bool interruptible, 218 bool no_wait, bool use_ticket, 219 struct ww_acquire_ctx *ticket) 220 { 221 int ret; 222 223 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 224 /** 225 * Deadlock avoidance for multi-bo reserving. 226 */ 227 if (use_ticket && bo->seq_valid) { 228 /** 229 * We've already reserved this one. 230 */ 231 if (unlikely(ticket->stamp == bo->val_seq)) 232 return -EDEADLK; 233 /** 234 * Already reserved by a thread that will not back 235 * off for us. We need to back off. 236 */ 237 if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX)) 238 return -EAGAIN; 239 } 240 241 if (no_wait) 242 return -EBUSY; 243 244 ret = ttm_bo_wait_unreserved(bo, interruptible); 245 246 if (unlikely(ret)) 247 return ret; 248 } 249 250 if (use_ticket) { 251 bool wake_up = false; 252 253 /** 254 * Wake up waiters that may need to recheck for deadlock, 255 * if we decreased the sequence number. 256 */ 257 if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX) 258 || !bo->seq_valid)) 259 wake_up = true; 260 261 /* 262 * In the worst case with memory ordering these values can be 263 * seen in the wrong order. However since we call wake_up_all 264 * in that case, this will hopefully not pose a problem, 265 * and the worst case would only cause someone to accidentally 266 * hit -EAGAIN in ttm_bo_reserve when they see old value of 267 * val_seq. However this would only happen if seq_valid was 268 * written before val_seq was, and just means some slightly 269 * increased cpu usage 270 */ 271 bo->val_seq = ticket->stamp; 272 bo->seq_valid = true; 273 if (wake_up) 274 wake_up_all(&bo->event_queue); 275 } else { 276 bo->seq_valid = false; 277 } 278 279 return 0; 280 } 281 EXPORT_SYMBOL(ttm_bo_reserve); 282 283 static void ttm_bo_ref_bug(struct kref *list_kref) 284 { 285 BUG(); 286 } 287 288 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 289 bool never_free) 290 { 291 kref_sub(&bo->list_kref, count, 292 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 293 } 294 295 int ttm_bo_reserve(struct ttm_buffer_object *bo, 296 bool interruptible, 297 bool no_wait, bool use_ticket, 298 struct ww_acquire_ctx *ticket) 299 { 300 struct ttm_bo_global *glob = bo->glob; 301 int put_count = 0; 302 int ret; 303 304 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, 305 ticket); 306 if (likely(ret == 0)) { 307 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 308 put_count = ttm_bo_del_from_lru(bo); 309 lockmgr(&glob->lru_lock, LK_RELEASE); 310 ttm_bo_list_ref_sub(bo, put_count, true); 311 } 312 313 return ret; 314 } 315 316 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, 317 bool interruptible, 318 struct ww_acquire_ctx *ticket) 319 { 320 bool wake_up = false; 321 int ret; 322 323 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 324 WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq); 325 326 ret = ttm_bo_wait_unreserved(bo, interruptible); 327 328 if (unlikely(ret)) 329 return ret; 330 } 331 332 if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid) 333 wake_up = true; 334 335 /** 336 * Wake up waiters that may need to recheck for deadlock, 337 * if we decreased the sequence number. 338 */ 339 bo->val_seq = ticket->stamp; 340 bo->seq_valid = true; 341 if (wake_up) 342 wake_up_all(&bo->event_queue); 343 344 return 0; 345 } 346 347 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 348 bool interruptible, struct ww_acquire_ctx *ticket) 349 { 350 struct ttm_bo_global *glob = bo->glob; 351 int put_count, ret; 352 353 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket); 354 if (likely(!ret)) { 355 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 356 put_count = ttm_bo_del_from_lru(bo); 357 lockmgr(&glob->lru_lock, LK_RELEASE); 358 ttm_bo_list_ref_sub(bo, put_count, true); 359 } 360 return ret; 361 } 362 EXPORT_SYMBOL(ttm_bo_reserve_slowpath); 363 364 /* 365 * Must interlock with event_queue to avoid race against 366 * wait_event_common() which can cause wait_event_common() 367 * to become stuck. 368 */ 369 static void 370 ttm_bo_unreserve_core(struct ttm_buffer_object *bo) 371 { 372 lockmgr(&bo->event_queue.lock, LK_EXCLUSIVE); 373 atomic_set(&bo->reserved, 0); 374 lockmgr(&bo->event_queue.lock, LK_RELEASE); 375 wake_up_all(&bo->event_queue); 376 } 377 378 void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) 379 { 380 ttm_bo_add_to_lru(bo); 381 ttm_bo_unreserve_core(bo); 382 } 383 384 void ttm_bo_unreserve(struct ttm_buffer_object *bo) 385 { 386 struct ttm_bo_global *glob = bo->glob; 387 388 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 389 ttm_bo_unreserve_ticket_locked(bo, NULL); 390 lockmgr(&glob->lru_lock, LK_RELEASE); 391 } 392 EXPORT_SYMBOL(ttm_bo_unreserve); 393 394 void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket) 395 { 396 struct ttm_bo_global *glob = bo->glob; 397 398 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 399 ttm_bo_unreserve_ticket_locked(bo, ticket); 400 lockmgr(&glob->lru_lock, LK_RELEASE); 401 } 402 EXPORT_SYMBOL(ttm_bo_unreserve_ticket); 403 404 /* 405 * Call bo->mutex locked. 406 */ 407 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 408 { 409 struct ttm_bo_device *bdev = bo->bdev; 410 struct ttm_bo_global *glob = bo->glob; 411 int ret = 0; 412 uint32_t page_flags = 0; 413 414 TTM_ASSERT_LOCKED(&bo->mutex); 415 bo->ttm = NULL; 416 417 if (bdev->need_dma32) 418 page_flags |= TTM_PAGE_FLAG_DMA32; 419 420 switch (bo->type) { 421 case ttm_bo_type_device: 422 if (zero_alloc) 423 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 424 case ttm_bo_type_kernel: 425 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 426 page_flags, glob->dummy_read_page); 427 if (unlikely(bo->ttm == NULL)) 428 ret = -ENOMEM; 429 break; 430 case ttm_bo_type_sg: 431 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 432 page_flags | TTM_PAGE_FLAG_SG, 433 glob->dummy_read_page); 434 if (unlikely(bo->ttm == NULL)) { 435 ret = -ENOMEM; 436 break; 437 } 438 bo->ttm->sg = bo->sg; 439 break; 440 default: 441 pr_err("Illegal buffer object type\n"); 442 ret = -EINVAL; 443 break; 444 } 445 446 return ret; 447 } 448 449 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 450 struct ttm_mem_reg *mem, 451 bool evict, bool interruptible, 452 bool no_wait_gpu) 453 { 454 struct ttm_bo_device *bdev = bo->bdev; 455 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 456 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 457 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 458 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 459 int ret = 0; 460 461 if (old_is_pci || new_is_pci || 462 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 463 ret = ttm_mem_io_lock(old_man, true); 464 if (unlikely(ret != 0)) 465 goto out_err; 466 ttm_bo_unmap_virtual_locked(bo); 467 ttm_mem_io_unlock(old_man); 468 } 469 470 /* 471 * Create and bind a ttm if required. 472 */ 473 474 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 475 if (bo->ttm == NULL) { 476 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 477 ret = ttm_bo_add_ttm(bo, zero); 478 if (ret) 479 goto out_err; 480 } 481 482 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 483 if (ret) 484 goto out_err; 485 486 if (mem->mem_type != TTM_PL_SYSTEM) { 487 ret = ttm_tt_bind(bo->ttm, mem); 488 if (ret) 489 goto out_err; 490 } 491 492 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 493 if (bdev->driver->move_notify) 494 bdev->driver->move_notify(bo, mem); 495 bo->mem = *mem; 496 mem->mm_node = NULL; 497 goto moved; 498 } 499 } 500 501 if (bdev->driver->move_notify) 502 bdev->driver->move_notify(bo, mem); 503 504 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 505 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 506 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 507 else if (bdev->driver->move) 508 ret = bdev->driver->move(bo, evict, interruptible, 509 no_wait_gpu, mem); 510 else 511 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 512 513 if (ret) { 514 if (bdev->driver->move_notify) { 515 struct ttm_mem_reg tmp_mem = *mem; 516 *mem = bo->mem; 517 bo->mem = tmp_mem; 518 bdev->driver->move_notify(bo, mem); 519 bo->mem = *mem; 520 *mem = tmp_mem; 521 } 522 523 goto out_err; 524 } 525 526 moved: 527 if (bo->evicted) { 528 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 529 if (ret) 530 pr_err("Can not flush read caches\n"); 531 bo->evicted = false; 532 } 533 534 if (bo->mem.mm_node) { 535 bo->offset = (bo->mem.start << PAGE_SHIFT) + 536 bdev->man[bo->mem.mem_type].gpu_offset; 537 bo->cur_placement = bo->mem.placement; 538 } else 539 bo->offset = 0; 540 541 return 0; 542 543 out_err: 544 new_man = &bdev->man[bo->mem.mem_type]; 545 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 546 ttm_tt_unbind(bo->ttm); 547 ttm_tt_destroy(bo->ttm); 548 bo->ttm = NULL; 549 } 550 551 return ret; 552 } 553 554 /** 555 * Call bo::reserved. 556 * Will release GPU memory type usage on destruction. 557 * This is the place to put in driver specific hooks to release 558 * driver private resources. 559 * Will release the bo::reserved lock. 560 */ 561 562 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 563 { 564 if (bo->bdev->driver->move_notify) 565 bo->bdev->driver->move_notify(bo, NULL); 566 567 if (bo->ttm) { 568 ttm_tt_unbind(bo->ttm); 569 ttm_tt_destroy(bo->ttm); 570 bo->ttm = NULL; 571 } 572 ttm_bo_mem_put(bo, &bo->mem); 573 ttm_bo_unreserve_core(bo); 574 575 /* 576 * Since the final reference to this bo may not be dropped by 577 * the current task we have to put a memory barrier here to make 578 * sure the changes done in this function are always visible. 579 * 580 * This function only needs protection against the final kref_put. 581 */ 582 cpu_mfence(); 583 } 584 585 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 586 { 587 struct ttm_bo_device *bdev = bo->bdev; 588 struct ttm_bo_global *glob = bo->glob; 589 struct ttm_bo_driver *driver = bdev->driver; 590 void *sync_obj = NULL; 591 int put_count; 592 int ret; 593 594 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 595 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 596 597 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 598 (void) ttm_bo_wait(bo, false, false, true); 599 if (!ret && !bo->sync_obj) { 600 lockmgr(&bdev->fence_lock, LK_RELEASE); 601 put_count = ttm_bo_del_from_lru(bo); 602 603 lockmgr(&glob->lru_lock, LK_RELEASE); 604 ttm_bo_cleanup_memtype_use(bo); 605 606 ttm_bo_list_ref_sub(bo, put_count, true); 607 608 return; 609 } 610 if (bo->sync_obj) 611 sync_obj = driver->sync_obj_ref(bo->sync_obj); 612 lockmgr(&bdev->fence_lock, LK_RELEASE); 613 614 if (!ret) { 615 616 /* 617 * Make NO_EVICT bos immediately available to 618 * shrinkers, now that they are queued for 619 * destruction. 620 */ 621 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 622 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; 623 ttm_bo_add_to_lru(bo); 624 } 625 626 ttm_bo_unreserve_core(bo); 627 } 628 629 kref_get(&bo->list_kref); 630 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 631 lockmgr(&glob->lru_lock, LK_RELEASE); 632 633 if (sync_obj) { 634 driver->sync_obj_flush(sync_obj); 635 driver->sync_obj_unref(&sync_obj); 636 } 637 schedule_delayed_work(&bdev->wq, 638 ((hz / 100) < 1) ? 1 : hz / 100); 639 } 640 641 /** 642 * function ttm_bo_cleanup_refs_and_unlock 643 * If bo idle, remove from delayed- and lru lists, and unref. 644 * If not idle, do nothing. 645 * 646 * Must be called with lru_lock and reservation held, this function 647 * will drop both before returning. 648 * 649 * @interruptible Any sleeps should occur interruptibly. 650 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 651 */ 652 653 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 654 bool interruptible, 655 bool no_wait_gpu) 656 { 657 struct ttm_bo_device *bdev = bo->bdev; 658 struct ttm_bo_driver *driver = bdev->driver; 659 struct ttm_bo_global *glob = bo->glob; 660 int put_count; 661 int ret; 662 663 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 664 ret = ttm_bo_wait(bo, false, false, true); 665 666 if (ret && !no_wait_gpu) { 667 void *sync_obj; 668 669 /* 670 * Take a reference to the fence and unreserve, 671 * at this point the buffer should be dead, so 672 * no new sync objects can be attached. 673 */ 674 sync_obj = driver->sync_obj_ref(bo->sync_obj); 675 lockmgr(&bdev->fence_lock, LK_RELEASE); 676 677 ttm_bo_unreserve_core(bo); 678 lockmgr(&glob->lru_lock, LK_RELEASE); 679 680 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 681 driver->sync_obj_unref(&sync_obj); 682 if (ret) 683 return ret; 684 685 /* 686 * remove sync_obj with ttm_bo_wait, the wait should be 687 * finished, and no new wait object should have been added. 688 */ 689 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 690 ret = ttm_bo_wait(bo, false, false, true); 691 WARN_ON(ret); 692 lockmgr(&bdev->fence_lock, LK_RELEASE); 693 if (ret) 694 return ret; 695 696 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 697 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 698 699 /* 700 * We raced, and lost, someone else holds the reservation now, 701 * and is probably busy in ttm_bo_cleanup_memtype_use. 702 * 703 * Even if it's not the case, because we finished waiting any 704 * delayed destruction would succeed, so just return success 705 * here. 706 */ 707 if (ret) { 708 lockmgr(&glob->lru_lock, LK_RELEASE); 709 return 0; 710 } 711 } else 712 lockmgr(&bdev->fence_lock, LK_RELEASE); 713 714 if (ret || unlikely(list_empty(&bo->ddestroy))) { 715 ttm_bo_unreserve_core(bo); 716 lockmgr(&glob->lru_lock, LK_RELEASE); 717 return ret; 718 } 719 720 put_count = ttm_bo_del_from_lru(bo); 721 list_del_init(&bo->ddestroy); 722 ++put_count; 723 724 lockmgr(&glob->lru_lock, LK_RELEASE); 725 ttm_bo_cleanup_memtype_use(bo); 726 727 ttm_bo_list_ref_sub(bo, put_count, true); 728 729 return 0; 730 } 731 732 /** 733 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 734 * encountered buffers. 735 */ 736 737 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 738 { 739 struct ttm_bo_global *glob = bdev->glob; 740 struct ttm_buffer_object *entry = NULL; 741 int ret = 0; 742 743 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 744 if (list_empty(&bdev->ddestroy)) 745 goto out_unlock; 746 747 entry = list_first_entry(&bdev->ddestroy, 748 struct ttm_buffer_object, ddestroy); 749 kref_get(&entry->list_kref); 750 751 for (;;) { 752 struct ttm_buffer_object *nentry = NULL; 753 754 if (entry->ddestroy.next != &bdev->ddestroy) { 755 nentry = list_first_entry(&entry->ddestroy, 756 struct ttm_buffer_object, ddestroy); 757 kref_get(&nentry->list_kref); 758 } 759 760 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); 761 if (remove_all && ret) { 762 lockmgr(&glob->lru_lock, LK_RELEASE); 763 ret = ttm_bo_reserve_nolru(entry, false, false, 764 false, 0); 765 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 766 } 767 768 if (!ret) 769 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 770 !remove_all); 771 else 772 lockmgr(&glob->lru_lock, LK_RELEASE); 773 774 kref_put(&entry->list_kref, ttm_bo_release_list); 775 entry = nentry; 776 777 if (ret || !entry) 778 goto out; 779 780 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 781 if (list_empty(&entry->ddestroy)) 782 break; 783 } 784 785 out_unlock: 786 lockmgr(&glob->lru_lock, LK_RELEASE); 787 out: 788 if (entry) 789 kref_put(&entry->list_kref, ttm_bo_release_list); 790 return ret; 791 } 792 793 static void ttm_bo_delayed_workqueue(struct work_struct *work) 794 { 795 struct ttm_bo_device *bdev = 796 container_of(work, struct ttm_bo_device, wq.work); 797 798 if (ttm_bo_delayed_delete(bdev, false)) { 799 schedule_delayed_work(&bdev->wq, 800 ((hz / 100) < 1) ? 1 : hz / 100); 801 } 802 } 803 804 /* 805 * NOTE: bdev->vm_lock already held on call, this function release it. 806 */ 807 static void ttm_bo_release(struct kref *kref) 808 { 809 struct ttm_buffer_object *bo = 810 container_of(kref, struct ttm_buffer_object, kref); 811 struct ttm_bo_device *bdev = bo->bdev; 812 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 813 int release_active; 814 815 if (atomic_read(&bo->kref.refcount) > 0) { 816 lockmgr(&bdev->vm_lock, LK_RELEASE); 817 return; 818 } 819 if (likely(bo->vm_node != NULL)) { 820 RB_REMOVE(ttm_bo_device_buffer_objects, 821 &bdev->addr_space_rb, bo); 822 drm_mm_put_block(bo->vm_node); 823 bo->vm_node = NULL; 824 } 825 826 /* 827 * Should we clean up our implied list_kref? Because ttm_bo_release() 828 * can be called reentrantly due to races (this may not be true any 829 * more with the lock management changes in the deref), it is possible 830 * to get here twice, but there's only one list_kref ref to drop and 831 * in the other path 'bo' can be kfree()d by another thread the 832 * instant we release our lock. 833 */ 834 release_active = test_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 835 if (release_active) { 836 clear_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 837 lockmgr(&bdev->vm_lock, LK_RELEASE); 838 ttm_mem_io_lock(man, false); 839 ttm_mem_io_free_vm(bo); 840 ttm_mem_io_unlock(man); 841 ttm_bo_cleanup_refs_or_queue(bo); 842 kref_put(&bo->list_kref, ttm_bo_release_list); 843 } else { 844 lockmgr(&bdev->vm_lock, LK_RELEASE); 845 } 846 } 847 848 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 849 { 850 struct ttm_buffer_object *bo = *p_bo; 851 struct ttm_bo_device *bdev = bo->bdev; 852 853 *p_bo = NULL; 854 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 855 if (kref_put(&bo->kref, ttm_bo_release) == 0) 856 lockmgr(&bdev->vm_lock, LK_RELEASE); 857 } 858 EXPORT_SYMBOL(ttm_bo_unref); 859 860 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 861 { 862 return cancel_delayed_work_sync(&bdev->wq); 863 } 864 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 865 866 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 867 { 868 if (resched) 869 schedule_delayed_work(&bdev->wq, 870 ((hz / 100) < 1) ? 1 : hz / 100); 871 } 872 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 873 874 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 875 bool no_wait_gpu) 876 { 877 struct ttm_bo_device *bdev = bo->bdev; 878 struct ttm_mem_reg evict_mem; 879 struct ttm_placement placement; 880 int ret = 0; 881 882 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 883 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 884 lockmgr(&bdev->fence_lock, LK_RELEASE); 885 886 if (unlikely(ret != 0)) { 887 if (ret != -ERESTARTSYS) { 888 pr_err("Failed to expire sync object before buffer eviction\n"); 889 } 890 goto out; 891 } 892 893 BUG_ON(!ttm_bo_is_reserved(bo)); 894 895 evict_mem = bo->mem; 896 evict_mem.mm_node = NULL; 897 evict_mem.bus.io_reserved_vm = false; 898 evict_mem.bus.io_reserved_count = 0; 899 900 placement.num_placement = 0; 901 placement.num_busy_placement = 0; 902 bdev->driver->evict_flags(bo, &placement); 903 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 904 no_wait_gpu); 905 if (ret) { 906 if (ret != -ERESTARTSYS) { 907 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 908 bo); 909 ttm_bo_mem_space_debug(bo, &placement); 910 } 911 goto out; 912 } 913 914 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 915 no_wait_gpu); 916 if (ret) { 917 if (ret != -ERESTARTSYS) 918 pr_err("Buffer eviction failed\n"); 919 ttm_bo_mem_put(bo, &evict_mem); 920 goto out; 921 } 922 bo->evicted = true; 923 out: 924 return ret; 925 } 926 927 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 928 uint32_t mem_type, 929 bool interruptible, 930 bool no_wait_gpu) 931 { 932 struct ttm_bo_global *glob = bdev->glob; 933 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 934 struct ttm_buffer_object *bo; 935 int ret = -EBUSY, put_count; 936 937 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 938 list_for_each_entry(bo, &man->lru, lru) { 939 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 940 if (!ret) 941 break; 942 } 943 944 if (ret) { 945 lockmgr(&glob->lru_lock, LK_RELEASE); 946 return ret; 947 } 948 949 kref_get(&bo->list_kref); 950 951 if (!list_empty(&bo->ddestroy)) { 952 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 953 no_wait_gpu); 954 kref_put(&bo->list_kref, ttm_bo_release_list); 955 return ret; 956 } 957 958 put_count = ttm_bo_del_from_lru(bo); 959 lockmgr(&glob->lru_lock, LK_RELEASE); 960 961 BUG_ON(ret != 0); 962 963 ttm_bo_list_ref_sub(bo, put_count, true); 964 965 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 966 ttm_bo_unreserve(bo); 967 968 kref_put(&bo->list_kref, ttm_bo_release_list); 969 return ret; 970 } 971 972 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 973 { 974 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 975 976 if (mem->mm_node) 977 (*man->func->put_node)(man, mem); 978 } 979 EXPORT_SYMBOL(ttm_bo_mem_put); 980 981 /** 982 * Repeatedly evict memory from the LRU for @mem_type until we create enough 983 * space, or we've evicted everything and there isn't enough space. 984 */ 985 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 986 uint32_t mem_type, 987 const struct ttm_place *place, 988 struct ttm_mem_reg *mem, 989 bool interruptible, 990 bool no_wait_gpu) 991 { 992 struct ttm_bo_device *bdev = bo->bdev; 993 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 994 int ret; 995 996 do { 997 ret = (*man->func->get_node)(man, bo, place, mem); 998 if (unlikely(ret != 0)) 999 return ret; 1000 if (mem->mm_node) 1001 break; 1002 ret = ttm_mem_evict_first(bdev, mem_type, 1003 interruptible, no_wait_gpu); 1004 if (unlikely(ret != 0)) 1005 return ret; 1006 } while (1); 1007 if (mem->mm_node == NULL) 1008 return -ENOMEM; 1009 mem->mem_type = mem_type; 1010 return 0; 1011 } 1012 1013 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 1014 uint32_t cur_placement, 1015 uint32_t proposed_placement) 1016 { 1017 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 1018 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 1019 1020 /** 1021 * Keep current caching if possible. 1022 */ 1023 1024 if ((cur_placement & caching) != 0) 1025 result |= (cur_placement & caching); 1026 else if ((man->default_caching & caching) != 0) 1027 result |= man->default_caching; 1028 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 1029 result |= TTM_PL_FLAG_CACHED; 1030 else if ((TTM_PL_FLAG_WC & caching) != 0) 1031 result |= TTM_PL_FLAG_WC; 1032 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 1033 result |= TTM_PL_FLAG_UNCACHED; 1034 1035 return result; 1036 } 1037 1038 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 1039 uint32_t mem_type, 1040 const struct ttm_place *place, 1041 uint32_t *masked_placement) 1042 { 1043 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 1044 1045 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) 1046 return false; 1047 1048 if ((place->flags & man->available_caching) == 0) 1049 return false; 1050 1051 cur_flags |= (place->flags & man->available_caching); 1052 1053 *masked_placement = cur_flags; 1054 return true; 1055 } 1056 1057 /** 1058 * Creates space for memory region @mem according to its type. 1059 * 1060 * This function first searches for free space in compatible memory types in 1061 * the priority order defined by the driver. If free space isn't found, then 1062 * ttm_bo_mem_force_space is attempted in priority order to evict and find 1063 * space. 1064 */ 1065 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 1066 struct ttm_placement *placement, 1067 struct ttm_mem_reg *mem, 1068 bool interruptible, 1069 bool no_wait_gpu) 1070 { 1071 struct ttm_bo_device *bdev = bo->bdev; 1072 struct ttm_mem_type_manager *man; 1073 uint32_t mem_type = TTM_PL_SYSTEM; 1074 uint32_t cur_flags = 0; 1075 bool type_found = false; 1076 bool type_ok = false; 1077 bool has_erestartsys = false; 1078 int i, ret; 1079 1080 mem->mm_node = NULL; 1081 for (i = 0; i < placement->num_placement; ++i) { 1082 const struct ttm_place *place = &placement->placement[i]; 1083 1084 ret = ttm_mem_type_from_place(place, &mem_type); 1085 if (ret) 1086 return ret; 1087 man = &bdev->man[mem_type]; 1088 1089 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 1090 &cur_flags); 1091 1092 if (!type_ok) 1093 continue; 1094 1095 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1096 cur_flags); 1097 /* 1098 * Use the access and other non-mapping-related flag bits from 1099 * the memory placement flags to the current flags 1100 */ 1101 ttm_flag_masked(&cur_flags, place->flags, 1102 ~TTM_PL_MASK_MEMTYPE); 1103 1104 if (mem_type == TTM_PL_SYSTEM) 1105 break; 1106 1107 if (man->has_type && man->use_type) { 1108 type_found = true; 1109 ret = (*man->func->get_node)(man, bo, place, mem); 1110 if (unlikely(ret)) 1111 return ret; 1112 } 1113 if (mem->mm_node) 1114 break; 1115 } 1116 1117 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 1118 mem->mem_type = mem_type; 1119 mem->placement = cur_flags; 1120 return 0; 1121 } 1122 1123 if (!type_found) 1124 return -EINVAL; 1125 1126 for (i = 0; i < placement->num_busy_placement; ++i) { 1127 const struct ttm_place *place = &placement->busy_placement[i]; 1128 1129 ret = ttm_mem_type_from_place(place, &mem_type); 1130 if (ret) 1131 return ret; 1132 man = &bdev->man[mem_type]; 1133 if (!man->has_type) 1134 continue; 1135 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 1136 continue; 1137 1138 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1139 cur_flags); 1140 /* 1141 * Use the access and other non-mapping-related flag bits from 1142 * the memory placement flags to the current flags 1143 */ 1144 ttm_flag_masked(&cur_flags, place->flags, 1145 ~TTM_PL_MASK_MEMTYPE); 1146 1147 if (mem_type == TTM_PL_SYSTEM) { 1148 mem->mem_type = mem_type; 1149 mem->placement = cur_flags; 1150 mem->mm_node = NULL; 1151 return 0; 1152 } 1153 1154 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, 1155 interruptible, no_wait_gpu); 1156 if (ret == 0 && mem->mm_node) { 1157 mem->placement = cur_flags; 1158 return 0; 1159 } 1160 if (ret == -ERESTARTSYS) 1161 has_erestartsys = true; 1162 } 1163 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 1164 return ret; 1165 } 1166 EXPORT_SYMBOL(ttm_bo_mem_space); 1167 1168 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1169 struct ttm_placement *placement, 1170 bool interruptible, 1171 bool no_wait_gpu) 1172 { 1173 int ret = 0; 1174 struct ttm_mem_reg mem; 1175 struct ttm_bo_device *bdev = bo->bdev; 1176 1177 BUG_ON(!ttm_bo_is_reserved(bo)); 1178 1179 /* 1180 * FIXME: It's possible to pipeline buffer moves. 1181 * Have the driver move function wait for idle when necessary, 1182 * instead of doing it here. 1183 */ 1184 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1185 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1186 lockmgr(&bdev->fence_lock, LK_RELEASE); 1187 if (ret) 1188 return ret; 1189 mem.num_pages = bo->num_pages; 1190 mem.size = mem.num_pages << PAGE_SHIFT; 1191 mem.page_alignment = bo->mem.page_alignment; 1192 mem.bus.io_reserved_vm = false; 1193 mem.bus.io_reserved_count = 0; 1194 /* 1195 * Determine where to move the buffer. 1196 */ 1197 ret = ttm_bo_mem_space(bo, placement, &mem, 1198 interruptible, no_wait_gpu); 1199 if (ret) 1200 goto out_unlock; 1201 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1202 interruptible, no_wait_gpu); 1203 out_unlock: 1204 if (ret && mem.mm_node) 1205 ttm_bo_mem_put(bo, &mem); 1206 return ret; 1207 } 1208 1209 static bool ttm_bo_mem_compat(struct ttm_placement *placement, 1210 struct ttm_mem_reg *mem, 1211 uint32_t *new_flags) 1212 { 1213 int i; 1214 1215 for (i = 0; i < placement->num_placement; i++) { 1216 const struct ttm_place *heap = &placement->placement[i]; 1217 if (mem->mm_node && 1218 (mem->start < heap->fpfn || 1219 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1220 continue; 1221 1222 *new_flags = heap->flags; 1223 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1224 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1225 return true; 1226 } 1227 1228 for (i = 0; i < placement->num_busy_placement; i++) { 1229 const struct ttm_place *heap = &placement->busy_placement[i]; 1230 if (mem->mm_node && 1231 (mem->start < heap->fpfn || 1232 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1233 continue; 1234 1235 *new_flags = heap->flags; 1236 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1237 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1238 return true; 1239 } 1240 1241 return false; 1242 } 1243 1244 int ttm_bo_validate(struct ttm_buffer_object *bo, 1245 struct ttm_placement *placement, 1246 bool interruptible, 1247 bool no_wait_gpu) 1248 { 1249 int ret; 1250 uint32_t new_flags; 1251 1252 BUG_ON(!ttm_bo_is_reserved(bo)); 1253 /* 1254 * Check whether we need to move buffer. 1255 */ 1256 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1257 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1258 no_wait_gpu); 1259 if (ret) 1260 return ret; 1261 } else { 1262 /* 1263 * Use the access and other non-mapping-related flag bits from 1264 * the compatible memory placement flags to the active flags 1265 */ 1266 ttm_flag_masked(&bo->mem.placement, new_flags, 1267 ~TTM_PL_MASK_MEMTYPE); 1268 } 1269 /* 1270 * We might need to add a TTM. 1271 */ 1272 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1273 ret = ttm_bo_add_ttm(bo, true); 1274 if (ret) 1275 return ret; 1276 } 1277 return 0; 1278 } 1279 EXPORT_SYMBOL(ttm_bo_validate); 1280 1281 int ttm_bo_init(struct ttm_bo_device *bdev, 1282 struct ttm_buffer_object *bo, 1283 unsigned long size, 1284 enum ttm_bo_type type, 1285 struct ttm_placement *placement, 1286 uint32_t page_alignment, 1287 bool interruptible, 1288 struct vm_object *persistent_swap_storage, 1289 size_t acc_size, 1290 struct sg_table *sg, 1291 void (*destroy) (struct ttm_buffer_object *)) 1292 { 1293 int ret = 0; 1294 unsigned long num_pages; 1295 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1296 1297 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1298 if (ret) { 1299 pr_err("Out of kernel memory\n"); 1300 if (destroy) 1301 (*destroy)(bo); 1302 else 1303 kfree(bo); 1304 return -ENOMEM; 1305 } 1306 1307 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1308 if (num_pages == 0) { 1309 pr_err("Illegal buffer object size\n"); 1310 if (destroy) 1311 (*destroy)(bo); 1312 else 1313 kfree(bo); 1314 ttm_mem_global_free(mem_glob, acc_size); 1315 return -EINVAL; 1316 } 1317 bo->destroy = destroy; 1318 1319 kref_init(&bo->kref); 1320 kref_init(&bo->list_kref); 1321 atomic_set(&bo->cpu_writers, 0); 1322 atomic_set(&bo->reserved, 1); 1323 init_waitqueue_head(&bo->event_queue); 1324 INIT_LIST_HEAD(&bo->lru); 1325 INIT_LIST_HEAD(&bo->ddestroy); 1326 INIT_LIST_HEAD(&bo->swap); 1327 INIT_LIST_HEAD(&bo->io_reserve_lru); 1328 /*bzero(&bo->vm_rb, sizeof(bo->vm_rb));*/ 1329 bo->bdev = bdev; 1330 bo->glob = bdev->glob; 1331 bo->type = type; 1332 bo->num_pages = num_pages; 1333 bo->mem.size = num_pages << PAGE_SHIFT; 1334 bo->mem.mem_type = TTM_PL_SYSTEM; 1335 bo->mem.num_pages = bo->num_pages; 1336 bo->mem.mm_node = NULL; 1337 bo->mem.page_alignment = page_alignment; 1338 bo->mem.bus.io_reserved_vm = false; 1339 bo->mem.bus.io_reserved_count = 0; 1340 bo->priv_flags = 0; 1341 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1342 bo->seq_valid = false; 1343 bo->persistent_swap_storage = persistent_swap_storage; 1344 bo->acc_size = acc_size; 1345 bo->sg = sg; 1346 atomic_inc(&bo->glob->bo_count); 1347 1348 /* 1349 * Mirror ref from kref_init() for list_kref. 1350 */ 1351 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 1352 1353 /* 1354 * For ttm_bo_type_device buffers, allocate 1355 * address space from the device. 1356 */ 1357 if (bo->type == ttm_bo_type_device || 1358 bo->type == ttm_bo_type_sg) { 1359 ret = ttm_bo_setup_vm(bo); 1360 if (ret) 1361 goto out_err; 1362 } 1363 1364 ret = ttm_bo_validate(bo, placement, interruptible, false); 1365 if (ret) 1366 goto out_err; 1367 1368 ttm_bo_unreserve(bo); 1369 return 0; 1370 1371 out_err: 1372 ttm_bo_unreserve(bo); 1373 ttm_bo_unref(&bo); 1374 1375 return ret; 1376 } 1377 EXPORT_SYMBOL(ttm_bo_init); 1378 1379 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1380 unsigned long bo_size, 1381 unsigned struct_size) 1382 { 1383 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1384 size_t size = 0; 1385 1386 size += ttm_round_pot(struct_size); 1387 size += PAGE_ALIGN(npages * sizeof(void *)); 1388 size += ttm_round_pot(sizeof(struct ttm_tt)); 1389 return size; 1390 } 1391 EXPORT_SYMBOL(ttm_bo_acc_size); 1392 1393 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1394 unsigned long bo_size, 1395 unsigned struct_size) 1396 { 1397 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1398 size_t size = 0; 1399 1400 size += ttm_round_pot(struct_size); 1401 size += PAGE_ALIGN(npages * sizeof(void *)); 1402 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); 1403 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1404 return size; 1405 } 1406 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1407 1408 int ttm_bo_create(struct ttm_bo_device *bdev, 1409 unsigned long size, 1410 enum ttm_bo_type type, 1411 struct ttm_placement *placement, 1412 uint32_t page_alignment, 1413 bool interruptible, 1414 struct vm_object *persistent_swap_storage, 1415 struct ttm_buffer_object **p_bo) 1416 { 1417 struct ttm_buffer_object *bo; 1418 size_t acc_size; 1419 int ret; 1420 1421 *p_bo = NULL; 1422 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1423 if (unlikely(bo == NULL)) 1424 return -ENOMEM; 1425 1426 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1427 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1428 interruptible, persistent_swap_storage, acc_size, 1429 NULL, NULL); 1430 if (likely(ret == 0)) 1431 *p_bo = bo; 1432 1433 return ret; 1434 } 1435 EXPORT_SYMBOL(ttm_bo_create); 1436 1437 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1438 unsigned mem_type, bool allow_errors) 1439 { 1440 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1441 struct ttm_bo_global *glob = bdev->glob; 1442 int ret; 1443 1444 /* 1445 * Can't use standard list traversal since we're unlocking. 1446 */ 1447 1448 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1449 while (!list_empty(&man->lru)) { 1450 lockmgr(&glob->lru_lock, LK_RELEASE); 1451 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1452 if (ret) { 1453 if (allow_errors) { 1454 return ret; 1455 } else { 1456 pr_err("Cleanup eviction failed\n"); 1457 } 1458 } 1459 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1460 } 1461 lockmgr(&glob->lru_lock, LK_RELEASE); 1462 return 0; 1463 } 1464 1465 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1466 { 1467 struct ttm_mem_type_manager *man; 1468 int ret = -EINVAL; 1469 1470 if (mem_type >= TTM_NUM_MEM_TYPES) { 1471 pr_err("Illegal memory type %d\n", mem_type); 1472 return ret; 1473 } 1474 man = &bdev->man[mem_type]; 1475 1476 if (!man->has_type) { 1477 pr_err("Trying to take down uninitialized memory manager type %u\n", 1478 mem_type); 1479 return ret; 1480 } 1481 1482 man->use_type = false; 1483 man->has_type = false; 1484 1485 ret = 0; 1486 if (mem_type > 0) { 1487 ttm_bo_force_list_clean(bdev, mem_type, false); 1488 1489 ret = (*man->func->takedown)(man); 1490 } 1491 1492 return ret; 1493 } 1494 EXPORT_SYMBOL(ttm_bo_clean_mm); 1495 1496 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1497 { 1498 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1499 1500 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1501 pr_err("Illegal memory manager memory type %u\n", mem_type); 1502 return -EINVAL; 1503 } 1504 1505 if (!man->has_type) { 1506 pr_err("Memory type %u has not been initialized\n", mem_type); 1507 return 0; 1508 } 1509 1510 return ttm_bo_force_list_clean(bdev, mem_type, true); 1511 } 1512 EXPORT_SYMBOL(ttm_bo_evict_mm); 1513 1514 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1515 unsigned long p_size) 1516 { 1517 int ret = -EINVAL; 1518 struct ttm_mem_type_manager *man; 1519 1520 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1521 man = &bdev->man[type]; 1522 BUG_ON(man->has_type); 1523 man->io_reserve_fastpath = true; 1524 man->use_io_reserve_lru = false; 1525 lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE); 1526 INIT_LIST_HEAD(&man->io_reserve_lru); 1527 1528 ret = bdev->driver->init_mem_type(bdev, type, man); 1529 if (ret) 1530 return ret; 1531 man->bdev = bdev; 1532 1533 ret = 0; 1534 if (type != TTM_PL_SYSTEM) { 1535 ret = (*man->func->init)(man, p_size); 1536 if (ret) 1537 return ret; 1538 } 1539 man->has_type = true; 1540 man->use_type = true; 1541 man->size = p_size; 1542 1543 INIT_LIST_HEAD(&man->lru); 1544 1545 return 0; 1546 } 1547 EXPORT_SYMBOL(ttm_bo_init_mm); 1548 1549 static void ttm_bo_global_kobj_release(struct kobject *kobj) 1550 { 1551 struct ttm_bo_global *glob = 1552 container_of(kobj, struct ttm_bo_global, kobj); 1553 1554 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1555 __free_page(glob->dummy_read_page); 1556 kfree(glob); 1557 } 1558 1559 void ttm_bo_global_release(struct drm_global_reference *ref) 1560 { 1561 struct ttm_bo_global *glob = ref->object; 1562 1563 kobject_del(&glob->kobj); 1564 kobject_put(&glob->kobj); 1565 } 1566 EXPORT_SYMBOL(ttm_bo_global_release); 1567 1568 int ttm_bo_global_init(struct drm_global_reference *ref) 1569 { 1570 struct ttm_bo_global_ref *bo_ref = 1571 container_of(ref, struct ttm_bo_global_ref, ref); 1572 struct ttm_bo_global *glob = ref->object; 1573 int ret; 1574 1575 lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE); 1576 lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE); 1577 glob->mem_glob = bo_ref->mem_glob; 1578 glob->dummy_read_page = (struct page *)vm_page_alloc_contig( 1579 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, 1*PAGE_SIZE, VM_MEMATTR_UNCACHEABLE); 1580 1581 if (unlikely(glob->dummy_read_page == NULL)) { 1582 ret = -ENOMEM; 1583 goto out_no_drp; 1584 } 1585 1586 INIT_LIST_HEAD(&glob->swap_lru); 1587 INIT_LIST_HEAD(&glob->device_list); 1588 1589 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1590 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1591 if (unlikely(ret != 0)) { 1592 pr_err("Could not register buffer object swapout\n"); 1593 goto out_no_shrink; 1594 } 1595 1596 atomic_set(&glob->bo_count, 0); 1597 1598 ret = kobject_init_and_add( 1599 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); 1600 if (unlikely(ret != 0)) 1601 kobject_put(&glob->kobj); 1602 return ret; 1603 out_no_shrink: 1604 __free_page(glob->dummy_read_page); 1605 out_no_drp: 1606 kfree(glob); 1607 return ret; 1608 } 1609 EXPORT_SYMBOL(ttm_bo_global_init); 1610 1611 1612 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1613 { 1614 int ret = 0; 1615 unsigned i = TTM_NUM_MEM_TYPES; 1616 struct ttm_mem_type_manager *man; 1617 struct ttm_bo_global *glob = bdev->glob; 1618 1619 while (i--) { 1620 man = &bdev->man[i]; 1621 if (man->has_type) { 1622 man->use_type = false; 1623 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1624 ret = -EBUSY; 1625 pr_err("DRM memory manager type %d is not clean\n", 1626 i); 1627 } 1628 man->has_type = false; 1629 } 1630 } 1631 1632 mutex_lock(&glob->device_list_mutex); 1633 list_del(&bdev->device_list); 1634 mutex_unlock(&glob->device_list_mutex); 1635 1636 cancel_delayed_work_sync(&bdev->wq); 1637 1638 while (ttm_bo_delayed_delete(bdev, true)) 1639 ; 1640 1641 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1642 if (list_empty(&bdev->ddestroy)) 1643 TTM_DEBUG("Delayed destroy list was clean\n"); 1644 1645 if (list_empty(&bdev->man[0].lru)) 1646 TTM_DEBUG("Swap list was clean\n"); 1647 lockmgr(&glob->lru_lock, LK_RELEASE); 1648 1649 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1650 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1651 drm_mm_takedown(&bdev->addr_space_mm); 1652 lockmgr(&bdev->vm_lock, LK_RELEASE); 1653 1654 return ret; 1655 } 1656 EXPORT_SYMBOL(ttm_bo_device_release); 1657 1658 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1659 struct ttm_bo_global *glob, 1660 struct ttm_bo_driver *driver, 1661 uint64_t file_page_offset, 1662 bool need_dma32) 1663 { 1664 int ret = -EINVAL; 1665 1666 lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE); 1667 bdev->driver = driver; 1668 1669 memset(bdev->man, 0, sizeof(bdev->man)); 1670 1671 /* 1672 * Initialize the system memory buffer type. 1673 * Other types need to be driver / IOCTL initialized. 1674 */ 1675 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1676 if (unlikely(ret != 0)) 1677 goto out_no_sys; 1678 1679 RB_INIT(&bdev->addr_space_rb); 1680 drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1681 1682 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1683 INIT_LIST_HEAD(&bdev->ddestroy); 1684 bdev->dev_mapping = NULL; 1685 bdev->glob = glob; 1686 bdev->need_dma32 = need_dma32; 1687 bdev->val_seq = 0; 1688 lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE); 1689 mutex_lock(&glob->device_list_mutex); 1690 list_add_tail(&bdev->device_list, &glob->device_list); 1691 mutex_unlock(&glob->device_list_mutex); 1692 1693 return 0; 1694 out_no_sys: 1695 return ret; 1696 } 1697 EXPORT_SYMBOL(ttm_bo_device_init); 1698 1699 /* 1700 * buffer object vm functions. 1701 */ 1702 1703 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1704 { 1705 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1706 1707 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1708 if (mem->mem_type == TTM_PL_SYSTEM) 1709 return false; 1710 1711 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1712 return false; 1713 1714 if (mem->placement & TTM_PL_FLAG_CACHED) 1715 return false; 1716 } 1717 return true; 1718 } 1719 1720 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1721 { 1722 1723 ttm_bo_release_mmap(bo); 1724 ttm_mem_io_free_vm(bo); 1725 } 1726 1727 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1728 { 1729 struct ttm_bo_device *bdev = bo->bdev; 1730 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1731 1732 ttm_mem_io_lock(man, false); 1733 ttm_bo_unmap_virtual_locked(bo); 1734 ttm_mem_io_unlock(man); 1735 } 1736 1737 1738 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1739 1740 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1741 { 1742 struct ttm_bo_device *bdev = bo->bdev; 1743 1744 /* The caller acquired bdev->vm_lock. */ 1745 RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo); 1746 } 1747 1748 /** 1749 * ttm_bo_setup_vm: 1750 * 1751 * @bo: the buffer to allocate address space for 1752 * 1753 * Allocate address space in the drm device so that applications 1754 * can mmap the buffer and access the contents. This only 1755 * applies to ttm_bo_type_device objects as others are not 1756 * placed in the drm device address space. 1757 */ 1758 1759 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) 1760 { 1761 struct ttm_bo_device *bdev = bo->bdev; 1762 int ret; 1763 1764 retry_pre_get: 1765 ret = drm_mm_pre_get(&bdev->addr_space_mm); 1766 if (unlikely(ret != 0)) 1767 return ret; 1768 1769 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1770 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, 1771 bo->mem.num_pages, 0, 0); 1772 1773 if (unlikely(bo->vm_node == NULL)) { 1774 ret = -ENOMEM; 1775 goto out_unlock; 1776 } 1777 1778 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, 1779 bo->mem.num_pages, 0); 1780 1781 if (unlikely(bo->vm_node == NULL)) { 1782 lockmgr(&bdev->vm_lock, LK_RELEASE); 1783 goto retry_pre_get; 1784 } 1785 1786 ttm_bo_vm_insert_rb(bo); 1787 lockmgr(&bdev->vm_lock, LK_RELEASE); 1788 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; 1789 1790 return 0; 1791 out_unlock: 1792 lockmgr(&bdev->vm_lock, LK_RELEASE); 1793 return ret; 1794 } 1795 1796 int ttm_bo_wait(struct ttm_buffer_object *bo, 1797 bool lazy, bool interruptible, bool no_wait) 1798 { 1799 struct ttm_bo_driver *driver = bo->bdev->driver; 1800 struct ttm_bo_device *bdev = bo->bdev; 1801 void *sync_obj; 1802 int ret = 0; 1803 1804 if (likely(bo->sync_obj == NULL)) 1805 return 0; 1806 1807 while (bo->sync_obj) { 1808 1809 if (driver->sync_obj_signaled(bo->sync_obj)) { 1810 void *tmp_obj = bo->sync_obj; 1811 bo->sync_obj = NULL; 1812 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1813 lockmgr(&bdev->fence_lock, LK_RELEASE); 1814 driver->sync_obj_unref(&tmp_obj); 1815 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1816 continue; 1817 } 1818 1819 if (no_wait) 1820 return -EBUSY; 1821 1822 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1823 lockmgr(&bdev->fence_lock, LK_RELEASE); 1824 ret = driver->sync_obj_wait(sync_obj, 1825 lazy, interruptible); 1826 if (unlikely(ret != 0)) { 1827 driver->sync_obj_unref(&sync_obj); 1828 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1829 return ret; 1830 } 1831 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1832 if (likely(bo->sync_obj == sync_obj)) { 1833 void *tmp_obj = bo->sync_obj; 1834 bo->sync_obj = NULL; 1835 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1836 &bo->priv_flags); 1837 lockmgr(&bdev->fence_lock, LK_RELEASE); 1838 driver->sync_obj_unref(&sync_obj); 1839 driver->sync_obj_unref(&tmp_obj); 1840 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1841 } else { 1842 lockmgr(&bdev->fence_lock, LK_RELEASE); 1843 driver->sync_obj_unref(&sync_obj); 1844 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1845 } 1846 } 1847 return 0; 1848 } 1849 EXPORT_SYMBOL(ttm_bo_wait); 1850 1851 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1852 { 1853 struct ttm_bo_device *bdev = bo->bdev; 1854 int ret = 0; 1855 1856 /* 1857 * Using ttm_bo_reserve makes sure the lru lists are updated. 1858 */ 1859 1860 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1861 if (unlikely(ret != 0)) 1862 return ret; 1863 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1864 ret = ttm_bo_wait(bo, false, true, no_wait); 1865 lockmgr(&bdev->fence_lock, LK_RELEASE); 1866 if (likely(ret == 0)) 1867 atomic_inc(&bo->cpu_writers); 1868 ttm_bo_unreserve(bo); 1869 return ret; 1870 } 1871 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1872 1873 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1874 { 1875 atomic_dec(&bo->cpu_writers); 1876 } 1877 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1878 1879 /** 1880 * A buffer object shrink method that tries to swap out the first 1881 * buffer object on the bo_global::swap_lru list. 1882 */ 1883 1884 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1885 { 1886 struct ttm_bo_global *glob = 1887 container_of(shrink, struct ttm_bo_global, shrink); 1888 struct ttm_buffer_object *bo; 1889 int ret = -EBUSY; 1890 int put_count; 1891 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1892 1893 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1894 list_for_each_entry(bo, &glob->swap_lru, swap) { 1895 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 1896 if (!ret) 1897 break; 1898 } 1899 1900 if (ret) { 1901 lockmgr(&glob->lru_lock, LK_RELEASE); 1902 return ret; 1903 } 1904 1905 kref_get(&bo->list_kref); 1906 1907 if (!list_empty(&bo->ddestroy)) { 1908 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1909 kref_put(&bo->list_kref, ttm_bo_release_list); 1910 return ret; 1911 } 1912 1913 put_count = ttm_bo_del_from_lru(bo); 1914 lockmgr(&glob->lru_lock, LK_RELEASE); 1915 1916 ttm_bo_list_ref_sub(bo, put_count, true); 1917 1918 /** 1919 * Wait for GPU, then move to system cached. 1920 */ 1921 1922 lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE); 1923 ret = ttm_bo_wait(bo, false, false, false); 1924 lockmgr(&bo->bdev->fence_lock, LK_RELEASE); 1925 1926 if (unlikely(ret != 0)) 1927 goto out; 1928 1929 if ((bo->mem.placement & swap_placement) != swap_placement) { 1930 struct ttm_mem_reg evict_mem; 1931 1932 evict_mem = bo->mem; 1933 evict_mem.mm_node = NULL; 1934 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1935 evict_mem.mem_type = TTM_PL_SYSTEM; 1936 1937 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1938 false, false); 1939 if (unlikely(ret != 0)) 1940 goto out; 1941 } 1942 1943 ttm_bo_unmap_virtual(bo); 1944 1945 /** 1946 * Swap out. Buffer will be swapped in again as soon as 1947 * anyone tries to access a ttm page. 1948 */ 1949 1950 if (bo->bdev->driver->swap_notify) 1951 bo->bdev->driver->swap_notify(bo); 1952 1953 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1954 out: 1955 1956 /** 1957 * 1958 * Unreserve without putting on LRU to avoid swapping out an 1959 * already swapped buffer. 1960 */ 1961 1962 ttm_bo_unreserve_core(bo); 1963 kref_put(&bo->list_kref, ttm_bo_release_list); 1964 return ret; 1965 } 1966 1967 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1968 { 1969 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1970 ; 1971 } 1972 EXPORT_SYMBOL(ttm_bo_swapout_all); 1973