1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #define pr_fmt(fmt) "[TTM] " fmt 32 33 #include <drm/ttm/ttm_module.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_placement.h> 36 #include <linux/atomic.h> 37 #include <linux/errno.h> 38 #include <linux/export.h> 39 #include <linux/wait.h> 40 41 #define TTM_ASSERT_LOCKED(param) 42 #define TTM_DEBUG(fmt, arg...) 43 #define TTM_BO_HASH_ORDER 13 44 45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 46 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 47 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob); 48 49 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) 50 { 51 int i; 52 53 for (i = 0; i <= TTM_PL_PRIV5; i++) 54 if (flags & (1 << i)) { 55 *mem_type = i; 56 return 0; 57 } 58 return -EINVAL; 59 } 60 61 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 62 { 63 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 64 65 kprintf(" has_type: %d\n", man->has_type); 66 kprintf(" use_type: %d\n", man->use_type); 67 kprintf(" flags: 0x%08X\n", man->flags); 68 kprintf(" gpu_offset: 0x%08lX\n", man->gpu_offset); 69 kprintf(" size: %ju\n", (uintmax_t)man->size); 70 kprintf(" available_caching: 0x%08X\n", man->available_caching); 71 kprintf(" default_caching: 0x%08X\n", man->default_caching); 72 if (mem_type != TTM_PL_SYSTEM) 73 (*man->func->debug)(man, TTM_PFX); 74 } 75 76 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 77 struct ttm_placement *placement) 78 { 79 int i, ret, mem_type; 80 81 kprintf("No space for %p (%lu pages, %luK, %luM)\n", 82 bo, bo->mem.num_pages, bo->mem.size >> 10, 83 bo->mem.size >> 20); 84 for (i = 0; i < placement->num_placement; i++) { 85 ret = ttm_mem_type_from_flags(placement->placement[i], 86 &mem_type); 87 if (ret) 88 return; 89 kprintf(" placement[%d]=0x%08X (%d)\n", 90 i, placement->placement[i], mem_type); 91 ttm_mem_type_debug(bo->bdev, mem_type); 92 } 93 } 94 95 #if 0 96 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob, 97 char *buffer) 98 { 99 100 return snprintf(buffer, PAGE_SIZE, "%lu\n", 101 (unsigned long) atomic_read(&glob->bo_count)); 102 } 103 #endif 104 105 static inline uint32_t ttm_bo_type_flags(unsigned type) 106 { 107 return 1 << (type); 108 } 109 110 static void ttm_bo_release_list(struct kref *list_kref) 111 { 112 struct ttm_buffer_object *bo = 113 container_of(list_kref, struct ttm_buffer_object, list_kref); 114 struct ttm_bo_device *bdev = bo->bdev; 115 size_t acc_size = bo->acc_size; 116 117 BUG_ON(atomic_read(&bo->list_kref.refcount)); 118 BUG_ON(atomic_read(&bo->kref.refcount)); 119 BUG_ON(atomic_read(&bo->cpu_writers)); 120 BUG_ON(bo->sync_obj != NULL); 121 BUG_ON(bo->mem.mm_node != NULL); 122 BUG_ON(!list_empty(&bo->lru)); 123 BUG_ON(!list_empty(&bo->ddestroy)); 124 125 if (bo->ttm) 126 ttm_tt_destroy(bo->ttm); 127 atomic_dec(&bo->glob->bo_count); 128 if (bo->destroy) 129 bo->destroy(bo); 130 else { 131 kfree(bo); 132 } 133 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 134 } 135 136 static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 137 bool interruptible) 138 { 139 if (interruptible) { 140 return wait_event_interruptible(bo->event_queue, 141 !ttm_bo_is_reserved(bo)); 142 } else { 143 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo)); 144 return 0; 145 } 146 } 147 148 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 149 { 150 struct ttm_bo_device *bdev = bo->bdev; 151 struct ttm_mem_type_manager *man; 152 153 BUG_ON(!ttm_bo_is_reserved(bo)); 154 155 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 156 157 BUG_ON(!list_empty(&bo->lru)); 158 159 man = &bdev->man[bo->mem.mem_type]; 160 list_add_tail(&bo->lru, &man->lru); 161 kref_get(&bo->list_kref); 162 163 if (bo->ttm != NULL) { 164 list_add_tail(&bo->swap, &bo->glob->swap_lru); 165 kref_get(&bo->list_kref); 166 } 167 } 168 } 169 170 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 171 { 172 int put_count = 0; 173 174 if (!list_empty(&bo->swap)) { 175 list_del_init(&bo->swap); 176 ++put_count; 177 } 178 if (!list_empty(&bo->lru)) { 179 list_del_init(&bo->lru); 180 ++put_count; 181 } 182 183 /* 184 * TODO: Add a driver hook to delete from 185 * driver-specific LRU's here. 186 */ 187 188 return put_count; 189 } 190 191 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, 192 bool interruptible, 193 bool no_wait, bool use_sequence, uint32_t sequence) 194 { 195 int ret; 196 197 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 198 /** 199 * Deadlock avoidance for multi-bo reserving. 200 */ 201 if (use_sequence && bo->seq_valid) { 202 /** 203 * We've already reserved this one. 204 */ 205 if (unlikely(sequence == bo->val_seq)) 206 return -EDEADLK; 207 /** 208 * Already reserved by a thread that will not back 209 * off for us. We need to back off. 210 */ 211 if (unlikely(sequence - bo->val_seq < (1U << 31))) 212 return -EAGAIN; 213 } 214 215 if (no_wait) 216 return -EBUSY; 217 218 ret = ttm_bo_wait_unreserved(bo, interruptible); 219 220 if (unlikely(ret)) 221 return ret; 222 } 223 224 if (use_sequence) { 225 bool wake_up = false; 226 /** 227 * Wake up waiters that may need to recheck for deadlock, 228 * if we decreased the sequence number. 229 */ 230 if (unlikely((bo->val_seq - sequence < (1U << 31)) 231 || !bo->seq_valid)) 232 wake_up = true; 233 234 /* 235 * In the worst case with memory ordering these values can be 236 * seen in the wrong order. However since we call wake_up_all 237 * in that case, this will hopefully not pose a problem, 238 * and the worst case would only cause someone to accidentally 239 * hit -EAGAIN in ttm_bo_reserve when they see old value of 240 * val_seq. However this would only happen if seq_valid was 241 * written before val_seq was, and just means some slightly 242 * increased cpu usage 243 */ 244 bo->val_seq = sequence; 245 bo->seq_valid = true; 246 if (wake_up) 247 wake_up_all(&bo->event_queue); 248 } else { 249 bo->seq_valid = false; 250 } 251 252 return 0; 253 } 254 EXPORT_SYMBOL(ttm_bo_reserve); 255 256 static void ttm_bo_ref_bug(struct kref *list_kref) 257 { 258 BUG(); 259 } 260 261 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 262 bool never_free) 263 { 264 kref_sub(&bo->list_kref, count, 265 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 266 } 267 268 int ttm_bo_reserve(struct ttm_buffer_object *bo, 269 bool interruptible, 270 bool no_wait, bool use_sequence, uint32_t sequence) 271 { 272 struct ttm_bo_global *glob = bo->glob; 273 int put_count = 0; 274 int ret; 275 276 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, 277 sequence); 278 if (likely(ret == 0)) { 279 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 280 put_count = ttm_bo_del_from_lru(bo); 281 lockmgr(&glob->lru_lock, LK_RELEASE); 282 ttm_bo_list_ref_sub(bo, put_count, true); 283 } 284 285 return ret; 286 } 287 288 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, 289 bool interruptible, uint32_t sequence) 290 { 291 bool wake_up = false; 292 int ret; 293 294 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 295 WARN_ON(bo->seq_valid && sequence == bo->val_seq); 296 297 ret = ttm_bo_wait_unreserved(bo, interruptible); 298 299 if (unlikely(ret)) 300 return ret; 301 } 302 303 if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid) 304 wake_up = true; 305 306 /** 307 * Wake up waiters that may need to recheck for deadlock, 308 * if we decreased the sequence number. 309 */ 310 bo->val_seq = sequence; 311 bo->seq_valid = true; 312 if (wake_up) 313 wake_up_all(&bo->event_queue); 314 315 return 0; 316 } 317 318 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 319 bool interruptible, uint32_t sequence) 320 { 321 struct ttm_bo_global *glob = bo->glob; 322 int put_count, ret; 323 324 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); 325 if (likely(!ret)) { 326 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 327 put_count = ttm_bo_del_from_lru(bo); 328 lockmgr(&glob->lru_lock, LK_RELEASE); 329 ttm_bo_list_ref_sub(bo, put_count, true); 330 } 331 return ret; 332 } 333 EXPORT_SYMBOL(ttm_bo_reserve_slowpath); 334 335 /* 336 * Must interlock with event_queue to avoid race against 337 * wait_event_common() which can cause wait_event_common() 338 * to become stuck. 339 */ 340 static void 341 ttm_bo_unreserve_core(struct ttm_buffer_object *bo) 342 { 343 lockmgr(&bo->event_queue.lock, LK_EXCLUSIVE); 344 atomic_set(&bo->reserved, 0); 345 lockmgr(&bo->event_queue.lock, LK_RELEASE); 346 wake_up_all(&bo->event_queue); 347 } 348 349 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) 350 { 351 ttm_bo_add_to_lru(bo); 352 ttm_bo_unreserve_core(bo); 353 } 354 355 void ttm_bo_unreserve(struct ttm_buffer_object *bo) 356 { 357 struct ttm_bo_global *glob = bo->glob; 358 359 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 360 ttm_bo_unreserve_locked(bo); 361 lockmgr(&glob->lru_lock, LK_RELEASE); 362 } 363 EXPORT_SYMBOL(ttm_bo_unreserve); 364 365 /* 366 * Call bo->mutex locked. 367 */ 368 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 369 { 370 struct ttm_bo_device *bdev = bo->bdev; 371 struct ttm_bo_global *glob = bo->glob; 372 int ret = 0; 373 uint32_t page_flags = 0; 374 375 TTM_ASSERT_LOCKED(&bo->mutex); 376 bo->ttm = NULL; 377 378 if (bdev->need_dma32) 379 page_flags |= TTM_PAGE_FLAG_DMA32; 380 381 switch (bo->type) { 382 case ttm_bo_type_device: 383 if (zero_alloc) 384 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 385 case ttm_bo_type_kernel: 386 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 387 page_flags, glob->dummy_read_page); 388 if (unlikely(bo->ttm == NULL)) 389 ret = -ENOMEM; 390 break; 391 case ttm_bo_type_sg: 392 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 393 page_flags | TTM_PAGE_FLAG_SG, 394 glob->dummy_read_page); 395 if (unlikely(bo->ttm == NULL)) { 396 ret = -ENOMEM; 397 break; 398 } 399 bo->ttm->sg = bo->sg; 400 break; 401 default: 402 kprintf("[TTM] Illegal buffer object type\n"); 403 ret = -EINVAL; 404 break; 405 } 406 407 return ret; 408 } 409 410 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 411 struct ttm_mem_reg *mem, 412 bool evict, bool interruptible, 413 bool no_wait_gpu) 414 { 415 struct ttm_bo_device *bdev = bo->bdev; 416 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 417 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 418 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 419 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 420 int ret = 0; 421 422 if (old_is_pci || new_is_pci || 423 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 424 ret = ttm_mem_io_lock(old_man, true); 425 if (unlikely(ret != 0)) 426 goto out_err; 427 ttm_bo_unmap_virtual_locked(bo); 428 ttm_mem_io_unlock(old_man); 429 } 430 431 /* 432 * Create and bind a ttm if required. 433 */ 434 435 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 436 if (bo->ttm == NULL) { 437 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 438 ret = ttm_bo_add_ttm(bo, zero); 439 if (ret) 440 goto out_err; 441 } 442 443 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 444 if (ret) 445 goto out_err; 446 447 if (mem->mem_type != TTM_PL_SYSTEM) { 448 ret = ttm_tt_bind(bo->ttm, mem); 449 if (ret) 450 goto out_err; 451 } 452 453 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 454 if (bdev->driver->move_notify) 455 bdev->driver->move_notify(bo, mem); 456 bo->mem = *mem; 457 mem->mm_node = NULL; 458 goto moved; 459 } 460 } 461 462 if (bdev->driver->move_notify) 463 bdev->driver->move_notify(bo, mem); 464 465 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 466 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 467 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 468 else if (bdev->driver->move) 469 ret = bdev->driver->move(bo, evict, interruptible, 470 no_wait_gpu, mem); 471 else 472 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 473 474 if (ret) { 475 if (bdev->driver->move_notify) { 476 struct ttm_mem_reg tmp_mem = *mem; 477 *mem = bo->mem; 478 bo->mem = tmp_mem; 479 bdev->driver->move_notify(bo, mem); 480 bo->mem = *mem; 481 *mem = tmp_mem; 482 } 483 484 goto out_err; 485 } 486 487 moved: 488 if (bo->evicted) { 489 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 490 if (ret) 491 kprintf("[TTM] Can not flush read caches\n"); 492 bo->evicted = false; 493 } 494 495 if (bo->mem.mm_node) { 496 bo->offset = (bo->mem.start << PAGE_SHIFT) + 497 bdev->man[bo->mem.mem_type].gpu_offset; 498 bo->cur_placement = bo->mem.placement; 499 } else 500 bo->offset = 0; 501 502 return 0; 503 504 out_err: 505 new_man = &bdev->man[bo->mem.mem_type]; 506 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 507 ttm_tt_unbind(bo->ttm); 508 ttm_tt_destroy(bo->ttm); 509 bo->ttm = NULL; 510 } 511 512 return ret; 513 } 514 515 /** 516 * Call bo::reserved. 517 * Will release GPU memory type usage on destruction. 518 * This is the place to put in driver specific hooks to release 519 * driver private resources. 520 * Will release the bo::reserved lock. 521 */ 522 523 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 524 { 525 if (bo->bdev->driver->move_notify) 526 bo->bdev->driver->move_notify(bo, NULL); 527 528 if (bo->ttm) { 529 ttm_tt_unbind(bo->ttm); 530 ttm_tt_destroy(bo->ttm); 531 bo->ttm = NULL; 532 } 533 ttm_bo_mem_put(bo, &bo->mem); 534 ttm_bo_unreserve_core(bo); 535 536 /* 537 * Since the final reference to this bo may not be dropped by 538 * the current task we have to put a memory barrier here to make 539 * sure the changes done in this function are always visible. 540 * 541 * This function only needs protection against the final kref_put. 542 */ 543 cpu_mfence(); 544 } 545 546 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 547 { 548 struct ttm_bo_device *bdev = bo->bdev; 549 struct ttm_bo_global *glob = bo->glob; 550 struct ttm_bo_driver *driver = bdev->driver; 551 void *sync_obj = NULL; 552 int put_count; 553 int ret; 554 555 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 556 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 557 558 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 559 (void) ttm_bo_wait(bo, false, false, true); 560 if (!ret && !bo->sync_obj) { 561 lockmgr(&bdev->fence_lock, LK_RELEASE); 562 put_count = ttm_bo_del_from_lru(bo); 563 564 lockmgr(&glob->lru_lock, LK_RELEASE); 565 ttm_bo_cleanup_memtype_use(bo); 566 567 ttm_bo_list_ref_sub(bo, put_count, true); 568 569 return; 570 } 571 if (bo->sync_obj) 572 sync_obj = driver->sync_obj_ref(bo->sync_obj); 573 lockmgr(&bdev->fence_lock, LK_RELEASE); 574 575 if (!ret) { 576 ttm_bo_unreserve_core(bo); 577 } 578 579 kref_get(&bo->list_kref); 580 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 581 lockmgr(&glob->lru_lock, LK_RELEASE); 582 583 if (sync_obj) { 584 driver->sync_obj_flush(sync_obj); 585 driver->sync_obj_unref(&sync_obj); 586 } 587 schedule_delayed_work(&bdev->wq, 588 ((hz / 100) < 1) ? 1 : hz / 100); 589 } 590 591 /** 592 * function ttm_bo_cleanup_refs_and_unlock 593 * If bo idle, remove from delayed- and lru lists, and unref. 594 * If not idle, do nothing. 595 * 596 * Must be called with lru_lock and reservation held, this function 597 * will drop both before returning. 598 * 599 * @interruptible Any sleeps should occur interruptibly. 600 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 601 */ 602 603 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 604 bool interruptible, 605 bool no_wait_gpu) 606 { 607 struct ttm_bo_device *bdev = bo->bdev; 608 struct ttm_bo_driver *driver = bdev->driver; 609 struct ttm_bo_global *glob = bo->glob; 610 int put_count; 611 int ret; 612 613 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 614 ret = ttm_bo_wait(bo, false, false, true); 615 616 if (ret && !no_wait_gpu) { 617 void *sync_obj; 618 619 /* 620 * Take a reference to the fence and unreserve, 621 * at this point the buffer should be dead, so 622 * no new sync objects can be attached. 623 */ 624 sync_obj = driver->sync_obj_ref(bo->sync_obj); 625 lockmgr(&bdev->fence_lock, LK_RELEASE); 626 627 ttm_bo_unreserve_core(bo); 628 lockmgr(&glob->lru_lock, LK_RELEASE); 629 630 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 631 driver->sync_obj_unref(&sync_obj); 632 if (ret) 633 return ret; 634 635 /* 636 * remove sync_obj with ttm_bo_wait, the wait should be 637 * finished, and no new wait object should have been added. 638 */ 639 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 640 ret = ttm_bo_wait(bo, false, false, true); 641 WARN_ON(ret); 642 lockmgr(&bdev->fence_lock, LK_RELEASE); 643 if (ret) 644 return ret; 645 646 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 647 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 648 649 /* 650 * We raced, and lost, someone else holds the reservation now, 651 * and is probably busy in ttm_bo_cleanup_memtype_use. 652 * 653 * Even if it's not the case, because we finished waiting any 654 * delayed destruction would succeed, so just return success 655 * here. 656 */ 657 if (ret) { 658 lockmgr(&glob->lru_lock, LK_RELEASE); 659 return 0; 660 } 661 } else 662 lockmgr(&bdev->fence_lock, LK_RELEASE); 663 664 if (ret || unlikely(list_empty(&bo->ddestroy))) { 665 ttm_bo_unreserve_core(bo); 666 lockmgr(&glob->lru_lock, LK_RELEASE); 667 return ret; 668 } 669 670 put_count = ttm_bo_del_from_lru(bo); 671 list_del_init(&bo->ddestroy); 672 ++put_count; 673 674 lockmgr(&glob->lru_lock, LK_RELEASE); 675 ttm_bo_cleanup_memtype_use(bo); 676 677 ttm_bo_list_ref_sub(bo, put_count, true); 678 679 return 0; 680 } 681 682 /** 683 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 684 * encountered buffers. 685 */ 686 687 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 688 { 689 struct ttm_bo_global *glob = bdev->glob; 690 struct ttm_buffer_object *entry = NULL; 691 int ret = 0; 692 693 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 694 if (list_empty(&bdev->ddestroy)) 695 goto out_unlock; 696 697 entry = list_first_entry(&bdev->ddestroy, 698 struct ttm_buffer_object, ddestroy); 699 kref_get(&entry->list_kref); 700 701 for (;;) { 702 struct ttm_buffer_object *nentry = NULL; 703 704 if (entry->ddestroy.next != &bdev->ddestroy) { 705 nentry = list_first_entry(&entry->ddestroy, 706 struct ttm_buffer_object, ddestroy); 707 kref_get(&nentry->list_kref); 708 } 709 710 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); 711 if (remove_all && ret) { 712 lockmgr(&glob->lru_lock, LK_RELEASE); 713 ret = ttm_bo_reserve_nolru(entry, false, false, 714 false, 0); 715 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 716 } 717 718 if (!ret) 719 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 720 !remove_all); 721 else 722 lockmgr(&glob->lru_lock, LK_RELEASE); 723 724 kref_put(&entry->list_kref, ttm_bo_release_list); 725 entry = nentry; 726 727 if (ret || !entry) 728 goto out; 729 730 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 731 if (list_empty(&entry->ddestroy)) 732 break; 733 } 734 735 out_unlock: 736 lockmgr(&glob->lru_lock, LK_RELEASE); 737 out: 738 if (entry) 739 kref_put(&entry->list_kref, ttm_bo_release_list); 740 return ret; 741 } 742 743 static void ttm_bo_delayed_workqueue(struct work_struct *work) 744 { 745 struct ttm_bo_device *bdev = 746 container_of(work, struct ttm_bo_device, wq.work); 747 748 if (ttm_bo_delayed_delete(bdev, false)) { 749 schedule_delayed_work(&bdev->wq, 750 ((hz / 100) < 1) ? 1 : hz / 100); 751 } 752 } 753 754 /* 755 * NOTE: bdev->vm_lock already held on call, this function release it. 756 */ 757 static void ttm_bo_release(struct kref *kref) 758 { 759 struct ttm_buffer_object *bo = 760 container_of(kref, struct ttm_buffer_object, kref); 761 struct ttm_bo_device *bdev = bo->bdev; 762 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 763 int release_active; 764 765 if (atomic_read(&bo->kref.refcount) > 0) { 766 lockmgr(&bdev->vm_lock, LK_RELEASE); 767 return; 768 } 769 if (likely(bo->vm_node != NULL)) { 770 RB_REMOVE(ttm_bo_device_buffer_objects, 771 &bdev->addr_space_rb, bo); 772 drm_mm_put_block(bo->vm_node); 773 bo->vm_node = NULL; 774 } 775 776 /* 777 * Should we clean up our implied list_kref? Because ttm_bo_release() 778 * can be called reentrantly due to races (this may not be true any 779 * more with the lock management changes in the deref), it is possible 780 * to get here twice, but there's only one list_kref ref to drop and 781 * in the other path 'bo' can be kfree()d by another thread the 782 * instant we release our lock. 783 */ 784 release_active = test_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 785 if (release_active) { 786 clear_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 787 lockmgr(&bdev->vm_lock, LK_RELEASE); 788 ttm_mem_io_lock(man, false); 789 ttm_mem_io_free_vm(bo); 790 ttm_mem_io_unlock(man); 791 ttm_bo_cleanup_refs_or_queue(bo); 792 kref_put(&bo->list_kref, ttm_bo_release_list); 793 } else { 794 lockmgr(&bdev->vm_lock, LK_RELEASE); 795 } 796 } 797 798 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 799 { 800 struct ttm_buffer_object *bo = *p_bo; 801 struct ttm_bo_device *bdev = bo->bdev; 802 803 *p_bo = NULL; 804 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 805 if (kref_put(&bo->kref, ttm_bo_release) == 0) 806 lockmgr(&bdev->vm_lock, LK_RELEASE); 807 } 808 EXPORT_SYMBOL(ttm_bo_unref); 809 810 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 811 { 812 return cancel_delayed_work_sync(&bdev->wq); 813 } 814 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 815 816 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 817 { 818 if (resched) 819 schedule_delayed_work(&bdev->wq, 820 ((hz / 100) < 1) ? 1 : hz / 100); 821 } 822 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 823 824 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 825 bool no_wait_gpu) 826 { 827 struct ttm_bo_device *bdev = bo->bdev; 828 struct ttm_mem_reg evict_mem; 829 struct ttm_placement placement; 830 int ret = 0; 831 832 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 833 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 834 lockmgr(&bdev->fence_lock, LK_RELEASE); 835 836 if (unlikely(ret != 0)) { 837 if (ret != -ERESTARTSYS) { 838 pr_err("Failed to expire sync object before buffer eviction\n"); 839 } 840 goto out; 841 } 842 843 BUG_ON(!ttm_bo_is_reserved(bo)); 844 845 evict_mem = bo->mem; 846 evict_mem.mm_node = NULL; 847 evict_mem.bus.io_reserved_vm = false; 848 evict_mem.bus.io_reserved_count = 0; 849 850 placement.fpfn = 0; 851 placement.lpfn = 0; 852 placement.num_placement = 0; 853 placement.num_busy_placement = 0; 854 bdev->driver->evict_flags(bo, &placement); 855 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 856 no_wait_gpu); 857 if (ret) { 858 if (ret != -ERESTARTSYS) { 859 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 860 bo); 861 ttm_bo_mem_space_debug(bo, &placement); 862 } 863 goto out; 864 } 865 866 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 867 no_wait_gpu); 868 if (ret) { 869 if (ret != -ERESTARTSYS) 870 pr_err("Buffer eviction failed\n"); 871 ttm_bo_mem_put(bo, &evict_mem); 872 goto out; 873 } 874 bo->evicted = true; 875 out: 876 return ret; 877 } 878 879 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 880 uint32_t mem_type, 881 bool interruptible, 882 bool no_wait_gpu) 883 { 884 struct ttm_bo_global *glob = bdev->glob; 885 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 886 struct ttm_buffer_object *bo; 887 int ret = -EBUSY, put_count; 888 889 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 890 list_for_each_entry(bo, &man->lru, lru) { 891 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 892 if (!ret) 893 break; 894 } 895 896 if (ret) { 897 lockmgr(&glob->lru_lock, LK_RELEASE); 898 return ret; 899 } 900 901 kref_get(&bo->list_kref); 902 903 if (!list_empty(&bo->ddestroy)) { 904 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 905 no_wait_gpu); 906 kref_put(&bo->list_kref, ttm_bo_release_list); 907 return ret; 908 } 909 910 put_count = ttm_bo_del_from_lru(bo); 911 lockmgr(&glob->lru_lock, LK_RELEASE); 912 913 BUG_ON(ret != 0); 914 915 ttm_bo_list_ref_sub(bo, put_count, true); 916 917 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 918 ttm_bo_unreserve(bo); 919 920 kref_put(&bo->list_kref, ttm_bo_release_list); 921 return ret; 922 } 923 924 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 925 { 926 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 927 928 if (mem->mm_node) 929 (*man->func->put_node)(man, mem); 930 } 931 EXPORT_SYMBOL(ttm_bo_mem_put); 932 933 /** 934 * Repeatedly evict memory from the LRU for @mem_type until we create enough 935 * space, or we've evicted everything and there isn't enough space. 936 */ 937 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 938 uint32_t mem_type, 939 struct ttm_placement *placement, 940 struct ttm_mem_reg *mem, 941 bool interruptible, 942 bool no_wait_gpu) 943 { 944 struct ttm_bo_device *bdev = bo->bdev; 945 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 946 int ret; 947 948 do { 949 ret = (*man->func->get_node)(man, bo, placement, mem); 950 if (unlikely(ret != 0)) 951 return ret; 952 if (mem->mm_node) 953 break; 954 ret = ttm_mem_evict_first(bdev, mem_type, 955 interruptible, no_wait_gpu); 956 if (unlikely(ret != 0)) 957 return ret; 958 } while (1); 959 if (mem->mm_node == NULL) 960 return -ENOMEM; 961 mem->mem_type = mem_type; 962 return 0; 963 } 964 965 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 966 uint32_t cur_placement, 967 uint32_t proposed_placement) 968 { 969 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 970 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 971 972 /** 973 * Keep current caching if possible. 974 */ 975 976 if ((cur_placement & caching) != 0) 977 result |= (cur_placement & caching); 978 else if ((man->default_caching & caching) != 0) 979 result |= man->default_caching; 980 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 981 result |= TTM_PL_FLAG_CACHED; 982 else if ((TTM_PL_FLAG_WC & caching) != 0) 983 result |= TTM_PL_FLAG_WC; 984 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 985 result |= TTM_PL_FLAG_UNCACHED; 986 987 return result; 988 } 989 990 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 991 uint32_t mem_type, 992 uint32_t proposed_placement, 993 uint32_t *masked_placement) 994 { 995 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 996 997 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) 998 return false; 999 1000 if ((proposed_placement & man->available_caching) == 0) 1001 return false; 1002 1003 cur_flags |= (proposed_placement & man->available_caching); 1004 1005 *masked_placement = cur_flags; 1006 return true; 1007 } 1008 1009 /** 1010 * Creates space for memory region @mem according to its type. 1011 * 1012 * This function first searches for free space in compatible memory types in 1013 * the priority order defined by the driver. If free space isn't found, then 1014 * ttm_bo_mem_force_space is attempted in priority order to evict and find 1015 * space. 1016 */ 1017 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 1018 struct ttm_placement *placement, 1019 struct ttm_mem_reg *mem, 1020 bool interruptible, 1021 bool no_wait_gpu) 1022 { 1023 struct ttm_bo_device *bdev = bo->bdev; 1024 struct ttm_mem_type_manager *man; 1025 uint32_t mem_type = TTM_PL_SYSTEM; 1026 uint32_t cur_flags = 0; 1027 bool type_found = false; 1028 bool type_ok = false; 1029 bool has_erestartsys = false; 1030 int i, ret; 1031 1032 mem->mm_node = NULL; 1033 for (i = 0; i < placement->num_placement; ++i) { 1034 ret = ttm_mem_type_from_flags(placement->placement[i], 1035 &mem_type); 1036 if (ret) 1037 return ret; 1038 man = &bdev->man[mem_type]; 1039 1040 type_ok = ttm_bo_mt_compatible(man, 1041 mem_type, 1042 placement->placement[i], 1043 &cur_flags); 1044 1045 if (!type_ok) 1046 continue; 1047 1048 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1049 cur_flags); 1050 /* 1051 * Use the access and other non-mapping-related flag bits from 1052 * the memory placement flags to the current flags 1053 */ 1054 ttm_flag_masked(&cur_flags, placement->placement[i], 1055 ~TTM_PL_MASK_MEMTYPE); 1056 1057 if (mem_type == TTM_PL_SYSTEM) 1058 break; 1059 1060 if (man->has_type && man->use_type) { 1061 type_found = true; 1062 ret = (*man->func->get_node)(man, bo, placement, mem); 1063 if (unlikely(ret)) 1064 return ret; 1065 } 1066 if (mem->mm_node) 1067 break; 1068 } 1069 1070 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 1071 mem->mem_type = mem_type; 1072 mem->placement = cur_flags; 1073 return 0; 1074 } 1075 1076 if (!type_found) 1077 return -EINVAL; 1078 1079 for (i = 0; i < placement->num_busy_placement; ++i) { 1080 ret = ttm_mem_type_from_flags(placement->busy_placement[i], 1081 &mem_type); 1082 if (ret) 1083 return ret; 1084 man = &bdev->man[mem_type]; 1085 if (!man->has_type) 1086 continue; 1087 if (!ttm_bo_mt_compatible(man, 1088 mem_type, 1089 placement->busy_placement[i], 1090 &cur_flags)) 1091 continue; 1092 1093 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1094 cur_flags); 1095 /* 1096 * Use the access and other non-mapping-related flag bits from 1097 * the memory placement flags to the current flags 1098 */ 1099 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 1100 ~TTM_PL_MASK_MEMTYPE); 1101 1102 1103 if (mem_type == TTM_PL_SYSTEM) { 1104 mem->mem_type = mem_type; 1105 mem->placement = cur_flags; 1106 mem->mm_node = NULL; 1107 return 0; 1108 } 1109 1110 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 1111 interruptible, no_wait_gpu); 1112 if (ret == 0 && mem->mm_node) { 1113 mem->placement = cur_flags; 1114 return 0; 1115 } 1116 if (ret == -ERESTARTSYS) 1117 has_erestartsys = true; 1118 } 1119 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 1120 return ret; 1121 } 1122 EXPORT_SYMBOL(ttm_bo_mem_space); 1123 1124 static 1125 int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1126 struct ttm_placement *placement, 1127 bool interruptible, 1128 bool no_wait_gpu) 1129 { 1130 int ret = 0; 1131 struct ttm_mem_reg mem; 1132 struct ttm_bo_device *bdev = bo->bdev; 1133 1134 BUG_ON(!ttm_bo_is_reserved(bo)); 1135 1136 /* 1137 * FIXME: It's possible to pipeline buffer moves. 1138 * Have the driver move function wait for idle when necessary, 1139 * instead of doing it here. 1140 */ 1141 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1142 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1143 lockmgr(&bdev->fence_lock, LK_RELEASE); 1144 if (ret) 1145 return ret; 1146 mem.num_pages = bo->num_pages; 1147 mem.size = mem.num_pages << PAGE_SHIFT; 1148 mem.page_alignment = bo->mem.page_alignment; 1149 mem.bus.io_reserved_vm = false; 1150 mem.bus.io_reserved_count = 0; 1151 /* 1152 * Determine where to move the buffer. 1153 */ 1154 ret = ttm_bo_mem_space(bo, placement, &mem, 1155 interruptible, no_wait_gpu); 1156 if (ret) 1157 goto out_unlock; 1158 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1159 interruptible, no_wait_gpu); 1160 out_unlock: 1161 if (ret && mem.mm_node) 1162 ttm_bo_mem_put(bo, &mem); 1163 return ret; 1164 } 1165 1166 static int ttm_bo_mem_compat(struct ttm_placement *placement, 1167 struct ttm_mem_reg *mem) 1168 { 1169 int i; 1170 1171 if (mem->mm_node && placement->lpfn != 0 && 1172 (mem->start < placement->fpfn || 1173 mem->start + mem->num_pages > placement->lpfn)) 1174 return -1; 1175 1176 for (i = 0; i < placement->num_placement; i++) { 1177 if ((placement->placement[i] & mem->placement & 1178 TTM_PL_MASK_CACHING) && 1179 (placement->placement[i] & mem->placement & 1180 TTM_PL_MASK_MEM)) 1181 return i; 1182 } 1183 return -1; 1184 } 1185 1186 int ttm_bo_validate(struct ttm_buffer_object *bo, 1187 struct ttm_placement *placement, 1188 bool interruptible, 1189 bool no_wait_gpu) 1190 { 1191 int ret; 1192 1193 BUG_ON(!ttm_bo_is_reserved(bo)); 1194 /* Check that range is valid */ 1195 if (placement->lpfn || placement->fpfn) 1196 if (placement->fpfn > placement->lpfn || 1197 (placement->lpfn - placement->fpfn) < bo->num_pages) 1198 return -EINVAL; 1199 /* 1200 * Check whether we need to move buffer. 1201 */ 1202 ret = ttm_bo_mem_compat(placement, &bo->mem); 1203 if (ret < 0) { 1204 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1205 no_wait_gpu); 1206 if (ret) 1207 return ret; 1208 } else { 1209 /* 1210 * Use the access and other non-mapping-related flag bits from 1211 * the compatible memory placement flags to the active flags 1212 */ 1213 ttm_flag_masked(&bo->mem.placement, placement->placement[ret], 1214 ~TTM_PL_MASK_MEMTYPE); 1215 } 1216 /* 1217 * We might need to add a TTM. 1218 */ 1219 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1220 ret = ttm_bo_add_ttm(bo, true); 1221 if (ret) 1222 return ret; 1223 } 1224 return 0; 1225 } 1226 EXPORT_SYMBOL(ttm_bo_validate); 1227 1228 int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1229 struct ttm_placement *placement) 1230 { 1231 BUG_ON((placement->fpfn || placement->lpfn) && 1232 (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); 1233 1234 return 0; 1235 } 1236 1237 int ttm_bo_init(struct ttm_bo_device *bdev, 1238 struct ttm_buffer_object *bo, 1239 unsigned long size, 1240 enum ttm_bo_type type, 1241 struct ttm_placement *placement, 1242 uint32_t page_alignment, 1243 bool interruptible, 1244 struct vm_object *persistent_swap_storage, 1245 size_t acc_size, 1246 struct sg_table *sg, 1247 void (*destroy) (struct ttm_buffer_object *)) 1248 { 1249 int ret = 0; 1250 unsigned long num_pages; 1251 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1252 1253 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1254 if (ret) { 1255 kprintf("[TTM] Out of kernel memory\n"); 1256 if (destroy) 1257 (*destroy)(bo); 1258 else 1259 kfree(bo); 1260 return -ENOMEM; 1261 } 1262 1263 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1264 if (num_pages == 0) { 1265 kprintf("[TTM] Illegal buffer object size\n"); 1266 if (destroy) 1267 (*destroy)(bo); 1268 else 1269 kfree(bo); 1270 ttm_mem_global_free(mem_glob, acc_size); 1271 return -EINVAL; 1272 } 1273 bo->destroy = destroy; 1274 1275 kref_init(&bo->kref); 1276 kref_init(&bo->list_kref); 1277 atomic_set(&bo->cpu_writers, 0); 1278 atomic_set(&bo->reserved, 1); 1279 init_waitqueue_head(&bo->event_queue); 1280 INIT_LIST_HEAD(&bo->lru); 1281 INIT_LIST_HEAD(&bo->ddestroy); 1282 INIT_LIST_HEAD(&bo->swap); 1283 INIT_LIST_HEAD(&bo->io_reserve_lru); 1284 /*bzero(&bo->vm_rb, sizeof(bo->vm_rb));*/ 1285 bo->bdev = bdev; 1286 bo->glob = bdev->glob; 1287 bo->type = type; 1288 bo->num_pages = num_pages; 1289 bo->mem.size = num_pages << PAGE_SHIFT; 1290 bo->mem.mem_type = TTM_PL_SYSTEM; 1291 bo->mem.num_pages = bo->num_pages; 1292 bo->mem.mm_node = NULL; 1293 bo->mem.page_alignment = page_alignment; 1294 bo->mem.bus.io_reserved_vm = false; 1295 bo->mem.bus.io_reserved_count = 0; 1296 bo->priv_flags = 0; 1297 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1298 bo->seq_valid = false; 1299 bo->persistent_swap_storage = persistent_swap_storage; 1300 bo->acc_size = acc_size; 1301 bo->sg = sg; 1302 atomic_inc(&bo->glob->bo_count); 1303 1304 /* 1305 * Mirror ref from kref_init() for list_kref. 1306 */ 1307 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags); 1308 1309 ret = ttm_bo_check_placement(bo, placement); 1310 if (unlikely(ret != 0)) 1311 goto out_err; 1312 1313 /* 1314 * For ttm_bo_type_device buffers, allocate 1315 * address space from the device. 1316 */ 1317 if (bo->type == ttm_bo_type_device || 1318 bo->type == ttm_bo_type_sg) { 1319 ret = ttm_bo_setup_vm(bo); 1320 if (ret) 1321 goto out_err; 1322 } 1323 1324 ret = ttm_bo_validate(bo, placement, interruptible, false); 1325 if (ret) 1326 goto out_err; 1327 1328 ttm_bo_unreserve(bo); 1329 return 0; 1330 1331 out_err: 1332 ttm_bo_unreserve(bo); 1333 ttm_bo_unref(&bo); 1334 1335 return ret; 1336 } 1337 EXPORT_SYMBOL(ttm_bo_init); 1338 1339 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1340 unsigned long bo_size, 1341 unsigned struct_size) 1342 { 1343 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1344 size_t size = 0; 1345 1346 size += ttm_round_pot(struct_size); 1347 size += PAGE_ALIGN(npages * sizeof(void *)); 1348 size += ttm_round_pot(sizeof(struct ttm_tt)); 1349 return size; 1350 } 1351 EXPORT_SYMBOL(ttm_bo_acc_size); 1352 1353 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1354 unsigned long bo_size, 1355 unsigned struct_size) 1356 { 1357 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1358 size_t size = 0; 1359 1360 size += ttm_round_pot(struct_size); 1361 size += PAGE_ALIGN(npages * sizeof(void *)); 1362 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); 1363 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1364 return size; 1365 } 1366 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1367 1368 int ttm_bo_create(struct ttm_bo_device *bdev, 1369 unsigned long size, 1370 enum ttm_bo_type type, 1371 struct ttm_placement *placement, 1372 uint32_t page_alignment, 1373 bool interruptible, 1374 struct vm_object *persistent_swap_storage, 1375 struct ttm_buffer_object **p_bo) 1376 { 1377 struct ttm_buffer_object *bo; 1378 size_t acc_size; 1379 int ret; 1380 1381 *p_bo = NULL; 1382 bo = kmalloc(sizeof(*bo), M_DRM, M_WAITOK | M_ZERO); 1383 if (unlikely(bo == NULL)) 1384 return -ENOMEM; 1385 1386 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1387 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1388 interruptible, persistent_swap_storage, acc_size, 1389 NULL, NULL); 1390 if (likely(ret == 0)) 1391 *p_bo = bo; 1392 1393 return ret; 1394 } 1395 EXPORT_SYMBOL(ttm_bo_create); 1396 1397 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1398 unsigned mem_type, bool allow_errors) 1399 { 1400 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1401 struct ttm_bo_global *glob = bdev->glob; 1402 int ret; 1403 1404 /* 1405 * Can't use standard list traversal since we're unlocking. 1406 */ 1407 1408 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1409 while (!list_empty(&man->lru)) { 1410 lockmgr(&glob->lru_lock, LK_RELEASE); 1411 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1412 if (ret) { 1413 if (allow_errors) { 1414 return ret; 1415 } else { 1416 kprintf("[TTM] Cleanup eviction failed\n"); 1417 } 1418 } 1419 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1420 } 1421 lockmgr(&glob->lru_lock, LK_RELEASE); 1422 return 0; 1423 } 1424 1425 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1426 { 1427 struct ttm_mem_type_manager *man; 1428 int ret = -EINVAL; 1429 1430 if (mem_type >= TTM_NUM_MEM_TYPES) { 1431 kprintf("[TTM] Illegal memory type %d\n", mem_type); 1432 return ret; 1433 } 1434 man = &bdev->man[mem_type]; 1435 1436 if (!man->has_type) { 1437 kprintf("[TTM] Trying to take down uninitialized memory manager type %u\n", 1438 mem_type); 1439 return ret; 1440 } 1441 1442 man->use_type = false; 1443 man->has_type = false; 1444 1445 ret = 0; 1446 if (mem_type > 0) { 1447 ttm_bo_force_list_clean(bdev, mem_type, false); 1448 1449 ret = (*man->func->takedown)(man); 1450 } 1451 1452 return ret; 1453 } 1454 EXPORT_SYMBOL(ttm_bo_clean_mm); 1455 1456 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1457 { 1458 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1459 1460 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1461 kprintf("[TTM] Illegal memory manager memory type %u\n", mem_type); 1462 return -EINVAL; 1463 } 1464 1465 if (!man->has_type) { 1466 kprintf("[TTM] Memory type %u has not been initialized\n", mem_type); 1467 return 0; 1468 } 1469 1470 return ttm_bo_force_list_clean(bdev, mem_type, true); 1471 } 1472 EXPORT_SYMBOL(ttm_bo_evict_mm); 1473 1474 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1475 unsigned long p_size) 1476 { 1477 int ret = -EINVAL; 1478 struct ttm_mem_type_manager *man; 1479 1480 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1481 man = &bdev->man[type]; 1482 BUG_ON(man->has_type); 1483 man->io_reserve_fastpath = true; 1484 man->use_io_reserve_lru = false; 1485 lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE); 1486 INIT_LIST_HEAD(&man->io_reserve_lru); 1487 1488 ret = bdev->driver->init_mem_type(bdev, type, man); 1489 if (ret) 1490 return ret; 1491 man->bdev = bdev; 1492 1493 ret = 0; 1494 if (type != TTM_PL_SYSTEM) { 1495 ret = (*man->func->init)(man, p_size); 1496 if (ret) 1497 return ret; 1498 } 1499 man->has_type = true; 1500 man->use_type = true; 1501 man->size = p_size; 1502 1503 INIT_LIST_HEAD(&man->lru); 1504 1505 return 0; 1506 } 1507 EXPORT_SYMBOL(ttm_bo_init_mm); 1508 1509 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob) 1510 { 1511 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1512 vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE); 1513 glob->dummy_read_page = NULL; 1514 /* 1515 vm_page_free(glob->dummy_read_page); 1516 */ 1517 } 1518 1519 void ttm_bo_global_release(struct drm_global_reference *ref) 1520 { 1521 struct ttm_bo_global *glob = ref->object; 1522 1523 if (refcount_release(&glob->kobj_ref)) 1524 ttm_bo_global_kobj_release(glob); 1525 } 1526 EXPORT_SYMBOL(ttm_bo_global_release); 1527 1528 int ttm_bo_global_init(struct drm_global_reference *ref) 1529 { 1530 struct ttm_bo_global_ref *bo_ref = 1531 container_of(ref, struct ttm_bo_global_ref, ref); 1532 struct ttm_bo_global *glob = ref->object; 1533 int ret; 1534 1535 lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE); 1536 lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE); 1537 glob->mem_glob = bo_ref->mem_glob; 1538 glob->dummy_read_page = vm_page_alloc_contig( 1539 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, 1*PAGE_SIZE, VM_MEMATTR_UNCACHEABLE); 1540 1541 if (unlikely(glob->dummy_read_page == NULL)) { 1542 ret = -ENOMEM; 1543 goto out_no_drp; 1544 } 1545 1546 INIT_LIST_HEAD(&glob->swap_lru); 1547 INIT_LIST_HEAD(&glob->device_list); 1548 1549 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1550 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1551 if (unlikely(ret != 0)) { 1552 kprintf("[TTM] Could not register buffer object swapout\n"); 1553 goto out_no_shrink; 1554 } 1555 1556 atomic_set(&glob->bo_count, 0); 1557 1558 refcount_init(&glob->kobj_ref, 1); 1559 return (0); 1560 1561 out_no_shrink: 1562 vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE); 1563 glob->dummy_read_page = NULL; 1564 /* 1565 vm_page_free(glob->dummy_read_page); 1566 */ 1567 out_no_drp: 1568 kfree(glob); 1569 return ret; 1570 } 1571 EXPORT_SYMBOL(ttm_bo_global_init); 1572 1573 1574 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1575 { 1576 int ret = 0; 1577 unsigned i = TTM_NUM_MEM_TYPES; 1578 struct ttm_mem_type_manager *man; 1579 struct ttm_bo_global *glob = bdev->glob; 1580 1581 while (i--) { 1582 man = &bdev->man[i]; 1583 if (man->has_type) { 1584 man->use_type = false; 1585 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1586 ret = -EBUSY; 1587 kprintf("[TTM] DRM memory manager type %d is not clean\n", 1588 i); 1589 } 1590 man->has_type = false; 1591 } 1592 } 1593 1594 lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE); 1595 list_del(&bdev->device_list); 1596 lockmgr(&glob->device_list_mutex, LK_RELEASE); 1597 1598 cancel_delayed_work_sync(&bdev->wq); 1599 1600 while (ttm_bo_delayed_delete(bdev, true)) 1601 ; 1602 1603 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1604 if (list_empty(&bdev->ddestroy)) 1605 TTM_DEBUG("Delayed destroy list was clean\n"); 1606 1607 if (list_empty(&bdev->man[0].lru)) 1608 TTM_DEBUG("Swap list was clean\n"); 1609 lockmgr(&glob->lru_lock, LK_RELEASE); 1610 1611 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1612 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1613 drm_mm_takedown(&bdev->addr_space_mm); 1614 lockmgr(&bdev->vm_lock, LK_RELEASE); 1615 1616 return ret; 1617 } 1618 EXPORT_SYMBOL(ttm_bo_device_release); 1619 1620 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1621 struct ttm_bo_global *glob, 1622 struct ttm_bo_driver *driver, 1623 uint64_t file_page_offset, 1624 bool need_dma32) 1625 { 1626 int ret = -EINVAL; 1627 1628 lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE); 1629 bdev->driver = driver; 1630 1631 memset(bdev->man, 0, sizeof(bdev->man)); 1632 1633 /* 1634 * Initialize the system memory buffer type. 1635 * Other types need to be driver / IOCTL initialized. 1636 */ 1637 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1638 if (unlikely(ret != 0)) 1639 goto out_no_sys; 1640 1641 RB_INIT(&bdev->addr_space_rb); 1642 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1643 if (unlikely(ret != 0)) 1644 goto out_no_addr_mm; 1645 1646 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1647 INIT_LIST_HEAD(&bdev->ddestroy); 1648 bdev->dev_mapping = NULL; 1649 bdev->glob = glob; 1650 bdev->need_dma32 = need_dma32; 1651 bdev->val_seq = 0; 1652 lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE); 1653 lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE); 1654 list_add_tail(&bdev->device_list, &glob->device_list); 1655 lockmgr(&glob->device_list_mutex, LK_RELEASE); 1656 1657 return 0; 1658 out_no_addr_mm: 1659 ttm_bo_clean_mm(bdev, 0); 1660 out_no_sys: 1661 return ret; 1662 } 1663 EXPORT_SYMBOL(ttm_bo_device_init); 1664 1665 /* 1666 * buffer object vm functions. 1667 */ 1668 1669 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1670 { 1671 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1672 1673 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1674 if (mem->mem_type == TTM_PL_SYSTEM) 1675 return false; 1676 1677 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1678 return false; 1679 1680 if (mem->placement & TTM_PL_FLAG_CACHED) 1681 return false; 1682 } 1683 return true; 1684 } 1685 1686 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1687 { 1688 1689 ttm_bo_release_mmap(bo); 1690 ttm_mem_io_free_vm(bo); 1691 } 1692 1693 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1694 { 1695 struct ttm_bo_device *bdev = bo->bdev; 1696 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1697 1698 ttm_mem_io_lock(man, false); 1699 ttm_bo_unmap_virtual_locked(bo); 1700 ttm_mem_io_unlock(man); 1701 } 1702 1703 1704 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1705 1706 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1707 { 1708 struct ttm_bo_device *bdev = bo->bdev; 1709 1710 /* The caller acquired bdev->vm_lock. */ 1711 RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo); 1712 } 1713 1714 /** 1715 * ttm_bo_setup_vm: 1716 * 1717 * @bo: the buffer to allocate address space for 1718 * 1719 * Allocate address space in the drm device so that applications 1720 * can mmap the buffer and access the contents. This only 1721 * applies to ttm_bo_type_device objects as others are not 1722 * placed in the drm device address space. 1723 */ 1724 1725 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) 1726 { 1727 struct ttm_bo_device *bdev = bo->bdev; 1728 int ret; 1729 1730 retry_pre_get: 1731 ret = drm_mm_pre_get(&bdev->addr_space_mm); 1732 if (unlikely(ret != 0)) 1733 return ret; 1734 1735 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1736 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, 1737 bo->mem.num_pages, 0, 0); 1738 1739 if (unlikely(bo->vm_node == NULL)) { 1740 ret = -ENOMEM; 1741 goto out_unlock; 1742 } 1743 1744 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, 1745 bo->mem.num_pages, 0); 1746 1747 if (unlikely(bo->vm_node == NULL)) { 1748 lockmgr(&bdev->vm_lock, LK_RELEASE); 1749 goto retry_pre_get; 1750 } 1751 1752 ttm_bo_vm_insert_rb(bo); 1753 lockmgr(&bdev->vm_lock, LK_RELEASE); 1754 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; 1755 1756 return 0; 1757 out_unlock: 1758 lockmgr(&bdev->vm_lock, LK_RELEASE); 1759 return ret; 1760 } 1761 1762 int ttm_bo_wait(struct ttm_buffer_object *bo, 1763 bool lazy, bool interruptible, bool no_wait) 1764 { 1765 struct ttm_bo_driver *driver = bo->bdev->driver; 1766 struct ttm_bo_device *bdev = bo->bdev; 1767 void *sync_obj; 1768 int ret = 0; 1769 1770 if (likely(bo->sync_obj == NULL)) 1771 return 0; 1772 1773 while (bo->sync_obj) { 1774 1775 if (driver->sync_obj_signaled(bo->sync_obj)) { 1776 void *tmp_obj = bo->sync_obj; 1777 bo->sync_obj = NULL; 1778 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1779 lockmgr(&bdev->fence_lock, LK_RELEASE); 1780 driver->sync_obj_unref(&tmp_obj); 1781 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1782 continue; 1783 } 1784 1785 if (no_wait) 1786 return -EBUSY; 1787 1788 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1789 lockmgr(&bdev->fence_lock, LK_RELEASE); 1790 ret = driver->sync_obj_wait(sync_obj, 1791 lazy, interruptible); 1792 if (unlikely(ret != 0)) { 1793 driver->sync_obj_unref(&sync_obj); 1794 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1795 return ret; 1796 } 1797 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1798 if (likely(bo->sync_obj == sync_obj)) { 1799 void *tmp_obj = bo->sync_obj; 1800 bo->sync_obj = NULL; 1801 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1802 &bo->priv_flags); 1803 lockmgr(&bdev->fence_lock, LK_RELEASE); 1804 driver->sync_obj_unref(&sync_obj); 1805 driver->sync_obj_unref(&tmp_obj); 1806 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1807 } else { 1808 lockmgr(&bdev->fence_lock, LK_RELEASE); 1809 driver->sync_obj_unref(&sync_obj); 1810 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1811 } 1812 } 1813 return 0; 1814 } 1815 EXPORT_SYMBOL(ttm_bo_wait); 1816 1817 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1818 { 1819 struct ttm_bo_device *bdev = bo->bdev; 1820 int ret = 0; 1821 1822 /* 1823 * Using ttm_bo_reserve makes sure the lru lists are updated. 1824 */ 1825 1826 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1827 if (unlikely(ret != 0)) 1828 return ret; 1829 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1830 ret = ttm_bo_wait(bo, false, true, no_wait); 1831 lockmgr(&bdev->fence_lock, LK_RELEASE); 1832 if (likely(ret == 0)) 1833 atomic_inc(&bo->cpu_writers); 1834 ttm_bo_unreserve(bo); 1835 return ret; 1836 } 1837 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1838 1839 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1840 { 1841 atomic_dec(&bo->cpu_writers); 1842 } 1843 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1844 1845 /** 1846 * A buffer object shrink method that tries to swap out the first 1847 * buffer object on the bo_global::swap_lru list. 1848 */ 1849 1850 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1851 { 1852 struct ttm_bo_global *glob = 1853 container_of(shrink, struct ttm_bo_global, shrink); 1854 struct ttm_buffer_object *bo; 1855 int ret = -EBUSY; 1856 int put_count; 1857 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1858 1859 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1860 list_for_each_entry(bo, &glob->swap_lru, swap) { 1861 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 1862 if (!ret) 1863 break; 1864 } 1865 1866 if (ret) { 1867 lockmgr(&glob->lru_lock, LK_RELEASE); 1868 return ret; 1869 } 1870 1871 kref_get(&bo->list_kref); 1872 1873 if (!list_empty(&bo->ddestroy)) { 1874 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1875 kref_put(&bo->list_kref, ttm_bo_release_list); 1876 return ret; 1877 } 1878 1879 put_count = ttm_bo_del_from_lru(bo); 1880 lockmgr(&glob->lru_lock, LK_RELEASE); 1881 1882 ttm_bo_list_ref_sub(bo, put_count, true); 1883 1884 /** 1885 * Wait for GPU, then move to system cached. 1886 */ 1887 1888 lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE); 1889 ret = ttm_bo_wait(bo, false, false, false); 1890 lockmgr(&bo->bdev->fence_lock, LK_RELEASE); 1891 1892 if (unlikely(ret != 0)) 1893 goto out; 1894 1895 if ((bo->mem.placement & swap_placement) != swap_placement) { 1896 struct ttm_mem_reg evict_mem; 1897 1898 evict_mem = bo->mem; 1899 evict_mem.mm_node = NULL; 1900 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1901 evict_mem.mem_type = TTM_PL_SYSTEM; 1902 1903 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1904 false, false); 1905 if (unlikely(ret != 0)) 1906 goto out; 1907 } 1908 1909 ttm_bo_unmap_virtual(bo); 1910 1911 /** 1912 * Swap out. Buffer will be swapped in again as soon as 1913 * anyone tries to access a ttm page. 1914 */ 1915 1916 if (bo->bdev->driver->swap_notify) 1917 bo->bdev->driver->swap_notify(bo); 1918 1919 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1920 out: 1921 1922 /** 1923 * 1924 * Unreserve without putting on LRU to avoid swapping out an 1925 * already swapped buffer. 1926 */ 1927 1928 ttm_bo_unreserve_core(bo); 1929 kref_put(&bo->list_kref, ttm_bo_release_list); 1930 return ret; 1931 } 1932 1933 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1934 { 1935 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1936 ; 1937 } 1938 EXPORT_SYMBOL(ttm_bo_swapout_all); 1939