1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 * 30 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo.c 248060 2013-03-08 18:11:02Z dumbbell $ 31 */ 32 33 #define pr_fmt(fmt) "[TTM] " fmt 34 35 #include <drm/ttm/ttm_module.h> 36 #include <drm/ttm/ttm_bo_driver.h> 37 #include <drm/ttm/ttm_placement.h> 38 #include <linux/atomic.h> 39 #include <linux/export.h> 40 #include <linux/rbtree.h> 41 #include <linux/wait.h> 42 43 #define TTM_ASSERT_LOCKED(param) 44 #define TTM_DEBUG(fmt, arg...) 45 #define TTM_BO_HASH_ORDER 13 46 47 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 48 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 49 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob); 50 51 MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects"); 52 53 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) 54 { 55 int i; 56 57 for (i = 0; i <= TTM_PL_PRIV5; i++) 58 if (flags & (1 << i)) { 59 *mem_type = i; 60 return 0; 61 } 62 return -EINVAL; 63 } 64 65 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 66 { 67 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 68 69 kprintf(" has_type: %d\n", man->has_type); 70 kprintf(" use_type: %d\n", man->use_type); 71 kprintf(" flags: 0x%08X\n", man->flags); 72 kprintf(" gpu_offset: 0x%08lX\n", man->gpu_offset); 73 kprintf(" size: %ju\n", (uintmax_t)man->size); 74 kprintf(" available_caching: 0x%08X\n", man->available_caching); 75 kprintf(" default_caching: 0x%08X\n", man->default_caching); 76 if (mem_type != TTM_PL_SYSTEM) 77 (*man->func->debug)(man, TTM_PFX); 78 } 79 80 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 81 struct ttm_placement *placement) 82 { 83 int i, ret, mem_type; 84 85 kprintf("No space for %p (%lu pages, %luK, %luM)\n", 86 bo, bo->mem.num_pages, bo->mem.size >> 10, 87 bo->mem.size >> 20); 88 for (i = 0; i < placement->num_placement; i++) { 89 ret = ttm_mem_type_from_flags(placement->placement[i], 90 &mem_type); 91 if (ret) 92 return; 93 kprintf(" placement[%d]=0x%08X (%d)\n", 94 i, placement->placement[i], mem_type); 95 ttm_mem_type_debug(bo->bdev, mem_type); 96 } 97 } 98 99 #if 0 100 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob, 101 char *buffer) 102 { 103 104 return snprintf(buffer, PAGE_SIZE, "%lu\n", 105 (unsigned long) atomic_read(&glob->bo_count)); 106 } 107 #endif 108 109 static inline uint32_t ttm_bo_type_flags(unsigned type) 110 { 111 return 1 << (type); 112 } 113 114 static void ttm_bo_release_list(struct kref *list_kref) 115 { 116 struct ttm_buffer_object *bo = 117 container_of(list_kref, struct ttm_buffer_object, list_kref); 118 struct ttm_bo_device *bdev = bo->bdev; 119 size_t acc_size = bo->acc_size; 120 121 BUG_ON(atomic_read(&bo->list_kref.refcount)); 122 BUG_ON(atomic_read(&bo->kref.refcount)); 123 BUG_ON(atomic_read(&bo->cpu_writers)); 124 BUG_ON(bo->sync_obj != NULL); 125 BUG_ON(bo->mem.mm_node != NULL); 126 BUG_ON(!list_empty(&bo->lru)); 127 BUG_ON(!list_empty(&bo->ddestroy)); 128 129 if (bo->ttm) 130 ttm_tt_destroy(bo->ttm); 131 atomic_dec(&bo->glob->bo_count); 132 if (bo->destroy) 133 bo->destroy(bo); 134 else { 135 kfree(bo, M_TTM_BO); 136 } 137 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 138 } 139 140 static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, 141 bool interruptible) 142 { 143 const char *wmsg; 144 int flags, ret; 145 146 ret = 0; 147 if (interruptible) { 148 flags = PCATCH; 149 wmsg = "ttbowi"; 150 } else { 151 flags = 0; 152 wmsg = "ttbowu"; 153 } 154 while (ttm_bo_is_reserved(bo)) { 155 ret = -lksleep(bo, &bo->glob->lru_lock, 0, wmsg, 0); 156 if (ret != 0) 157 break; 158 } 159 return (ret); 160 } 161 162 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 163 { 164 struct ttm_bo_device *bdev = bo->bdev; 165 struct ttm_mem_type_manager *man; 166 167 BUG_ON(!ttm_bo_is_reserved(bo)); 168 169 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 170 171 BUG_ON(!list_empty(&bo->lru)); 172 173 man = &bdev->man[bo->mem.mem_type]; 174 list_add_tail(&bo->lru, &man->lru); 175 kref_get(&bo->list_kref); 176 177 if (bo->ttm != NULL) { 178 list_add_tail(&bo->swap, &bo->glob->swap_lru); 179 kref_get(&bo->list_kref); 180 } 181 } 182 } 183 184 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) 185 { 186 int put_count = 0; 187 188 if (!list_empty(&bo->swap)) { 189 list_del_init(&bo->swap); 190 ++put_count; 191 } 192 if (!list_empty(&bo->lru)) { 193 list_del_init(&bo->lru); 194 ++put_count; 195 } 196 197 /* 198 * TODO: Add a driver hook to delete from 199 * driver-specific LRU's here. 200 */ 201 202 return put_count; 203 } 204 205 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, 206 bool interruptible, 207 bool no_wait, bool use_sequence, uint32_t sequence) 208 { 209 int ret; 210 211 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 212 /** 213 * Deadlock avoidance for multi-bo reserving. 214 */ 215 if (use_sequence && bo->seq_valid) { 216 /** 217 * We've already reserved this one. 218 */ 219 if (unlikely(sequence == bo->val_seq)) 220 return -EDEADLK; 221 /** 222 * Already reserved by a thread that will not back 223 * off for us. We need to back off. 224 */ 225 if (unlikely(sequence - bo->val_seq < (1 << 31))) 226 return -EAGAIN; 227 } 228 229 if (no_wait) 230 return -EBUSY; 231 232 ret = ttm_bo_wait_unreserved(bo, interruptible); 233 234 if (unlikely(ret)) 235 return ret; 236 } 237 238 if (use_sequence) { 239 bool wake_up = false; 240 /** 241 * Wake up waiters that may need to recheck for deadlock, 242 * if we decreased the sequence number. 243 */ 244 if (unlikely((bo->val_seq - sequence < (1 << 31)) 245 || !bo->seq_valid)) 246 wake_up = true; 247 248 /* 249 * In the worst case with memory ordering these values can be 250 * seen in the wrong order. However since we call wake_up_all 251 * in that case, this will hopefully not pose a problem, 252 * and the worst case would only cause someone to accidentally 253 * hit -EAGAIN in ttm_bo_reserve when they see old value of 254 * val_seq. However this would only happen if seq_valid was 255 * written before val_seq was, and just means some slightly 256 * increased cpu usage 257 */ 258 bo->val_seq = sequence; 259 bo->seq_valid = true; 260 if (wake_up) 261 wake_up_all(&bo->event_queue); 262 } else { 263 bo->seq_valid = false; 264 } 265 266 return 0; 267 } 268 EXPORT_SYMBOL(ttm_bo_reserve); 269 270 static void ttm_bo_ref_bug(struct kref *list_kref) 271 { 272 BUG(); 273 } 274 275 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 276 bool never_free) 277 { 278 kref_sub(&bo->list_kref, count, 279 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); 280 } 281 282 int ttm_bo_reserve(struct ttm_buffer_object *bo, 283 bool interruptible, 284 bool no_wait, bool use_sequence, uint32_t sequence) 285 { 286 struct ttm_bo_global *glob = bo->glob; 287 int put_count = 0; 288 int ret; 289 290 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, 291 sequence); 292 if (likely(ret == 0)) { 293 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 294 put_count = ttm_bo_del_from_lru(bo); 295 lockmgr(&glob->lru_lock, LK_RELEASE); 296 ttm_bo_list_ref_sub(bo, put_count, true); 297 } 298 299 return ret; 300 } 301 302 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, 303 bool interruptible, uint32_t sequence) 304 { 305 bool wake_up = false; 306 int ret; 307 308 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { 309 WARN_ON(bo->seq_valid && sequence == bo->val_seq); 310 311 ret = ttm_bo_wait_unreserved(bo, interruptible); 312 313 if (unlikely(ret)) 314 return ret; 315 } 316 317 if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) 318 wake_up = true; 319 320 /** 321 * Wake up waiters that may need to recheck for deadlock, 322 * if we decreased the sequence number. 323 */ 324 bo->val_seq = sequence; 325 bo->seq_valid = true; 326 if (wake_up) 327 wake_up_all(&bo->event_queue); 328 329 return 0; 330 } 331 332 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 333 bool interruptible, uint32_t sequence) 334 { 335 struct ttm_bo_global *glob = bo->glob; 336 int put_count, ret; 337 338 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); 339 if (likely(!ret)) { 340 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 341 put_count = ttm_bo_del_from_lru(bo); 342 lockmgr(&glob->lru_lock, LK_RELEASE); 343 ttm_bo_list_ref_sub(bo, put_count, true); 344 } 345 return ret; 346 } 347 EXPORT_SYMBOL(ttm_bo_reserve_slowpath); 348 349 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) 350 { 351 ttm_bo_add_to_lru(bo); 352 atomic_set(&bo->reserved, 0); 353 wake_up_all(&bo->event_queue); 354 } 355 356 void ttm_bo_unreserve(struct ttm_buffer_object *bo) 357 { 358 struct ttm_bo_global *glob = bo->glob; 359 360 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 361 ttm_bo_unreserve_locked(bo); 362 lockmgr(&glob->lru_lock, LK_RELEASE); 363 } 364 EXPORT_SYMBOL(ttm_bo_unreserve); 365 366 /* 367 * Call bo->mutex locked. 368 */ 369 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 370 { 371 struct ttm_bo_device *bdev = bo->bdev; 372 struct ttm_bo_global *glob = bo->glob; 373 int ret = 0; 374 uint32_t page_flags = 0; 375 376 TTM_ASSERT_LOCKED(&bo->mutex); 377 bo->ttm = NULL; 378 379 if (bdev->need_dma32) 380 page_flags |= TTM_PAGE_FLAG_DMA32; 381 382 switch (bo->type) { 383 case ttm_bo_type_device: 384 if (zero_alloc) 385 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 386 case ttm_bo_type_kernel: 387 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 388 page_flags, glob->dummy_read_page); 389 if (unlikely(bo->ttm == NULL)) 390 ret = -ENOMEM; 391 break; 392 case ttm_bo_type_sg: 393 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 394 page_flags | TTM_PAGE_FLAG_SG, 395 glob->dummy_read_page); 396 if (unlikely(bo->ttm == NULL)) { 397 ret = -ENOMEM; 398 break; 399 } 400 bo->ttm->sg = bo->sg; 401 break; 402 default: 403 kprintf("[TTM] Illegal buffer object type\n"); 404 ret = -EINVAL; 405 break; 406 } 407 408 return ret; 409 } 410 411 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 412 struct ttm_mem_reg *mem, 413 bool evict, bool interruptible, 414 bool no_wait_gpu) 415 { 416 struct ttm_bo_device *bdev = bo->bdev; 417 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 418 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); 419 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; 420 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; 421 int ret = 0; 422 423 if (old_is_pci || new_is_pci || 424 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { 425 ret = ttm_mem_io_lock(old_man, true); 426 if (unlikely(ret != 0)) 427 goto out_err; 428 ttm_bo_unmap_virtual_locked(bo); 429 ttm_mem_io_unlock(old_man); 430 } 431 432 /* 433 * Create and bind a ttm if required. 434 */ 435 436 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 437 if (bo->ttm == NULL) { 438 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 439 ret = ttm_bo_add_ttm(bo, zero); 440 if (ret) 441 goto out_err; 442 } 443 444 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 445 if (ret) 446 goto out_err; 447 448 if (mem->mem_type != TTM_PL_SYSTEM) { 449 ret = ttm_tt_bind(bo->ttm, mem); 450 if (ret) 451 goto out_err; 452 } 453 454 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 455 if (bdev->driver->move_notify) 456 bdev->driver->move_notify(bo, mem); 457 bo->mem = *mem; 458 mem->mm_node = NULL; 459 goto moved; 460 } 461 } 462 463 if (bdev->driver->move_notify) 464 bdev->driver->move_notify(bo, mem); 465 466 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 467 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 468 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 469 else if (bdev->driver->move) 470 ret = bdev->driver->move(bo, evict, interruptible, 471 no_wait_gpu, mem); 472 else 473 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 474 475 if (ret) { 476 if (bdev->driver->move_notify) { 477 struct ttm_mem_reg tmp_mem = *mem; 478 *mem = bo->mem; 479 bo->mem = tmp_mem; 480 bdev->driver->move_notify(bo, mem); 481 bo->mem = *mem; 482 *mem = tmp_mem; 483 } 484 485 goto out_err; 486 } 487 488 moved: 489 if (bo->evicted) { 490 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); 491 if (ret) 492 kprintf("[TTM] Can not flush read caches\n"); 493 bo->evicted = false; 494 } 495 496 if (bo->mem.mm_node) { 497 bo->offset = (bo->mem.start << PAGE_SHIFT) + 498 bdev->man[bo->mem.mem_type].gpu_offset; 499 bo->cur_placement = bo->mem.placement; 500 } else 501 bo->offset = 0; 502 503 return 0; 504 505 out_err: 506 new_man = &bdev->man[bo->mem.mem_type]; 507 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 508 ttm_tt_unbind(bo->ttm); 509 ttm_tt_destroy(bo->ttm); 510 bo->ttm = NULL; 511 } 512 513 return ret; 514 } 515 516 /** 517 * Call bo::reserved. 518 * Will release GPU memory type usage on destruction. 519 * This is the place to put in driver specific hooks to release 520 * driver private resources. 521 * Will release the bo::reserved lock. 522 */ 523 524 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 525 { 526 if (bo->bdev->driver->move_notify) 527 bo->bdev->driver->move_notify(bo, NULL); 528 529 if (bo->ttm) { 530 ttm_tt_unbind(bo->ttm); 531 ttm_tt_destroy(bo->ttm); 532 bo->ttm = NULL; 533 } 534 ttm_bo_mem_put(bo, &bo->mem); 535 536 atomic_set(&bo->reserved, 0); 537 wake_up_all(&bo->event_queue); 538 539 /* 540 * Since the final reference to this bo may not be dropped by 541 * the current task we have to put a memory barrier here to make 542 * sure the changes done in this function are always visible. 543 * 544 * This function only needs protection against the final kref_put. 545 */ 546 cpu_mfence(); 547 } 548 549 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 550 { 551 struct ttm_bo_device *bdev = bo->bdev; 552 struct ttm_bo_global *glob = bo->glob; 553 struct ttm_bo_driver *driver = bdev->driver; 554 void *sync_obj = NULL; 555 int put_count; 556 int ret; 557 558 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 559 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 560 561 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 562 (void) ttm_bo_wait(bo, false, false, true); 563 if (!ret && !bo->sync_obj) { 564 lockmgr(&bdev->fence_lock, LK_RELEASE); 565 put_count = ttm_bo_del_from_lru(bo); 566 567 lockmgr(&glob->lru_lock, LK_RELEASE); 568 ttm_bo_cleanup_memtype_use(bo); 569 570 ttm_bo_list_ref_sub(bo, put_count, true); 571 572 return; 573 } 574 if (bo->sync_obj) 575 sync_obj = driver->sync_obj_ref(bo->sync_obj); 576 lockmgr(&bdev->fence_lock, LK_RELEASE); 577 578 if (!ret) { 579 atomic_set(&bo->reserved, 0); 580 wake_up_all(&bo->event_queue); 581 } 582 583 kref_get(&bo->list_kref); 584 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 585 lockmgr(&glob->lru_lock, LK_RELEASE); 586 587 if (sync_obj) { 588 driver->sync_obj_flush(sync_obj); 589 driver->sync_obj_unref(&sync_obj); 590 } 591 taskqueue_enqueue_timeout(taskqueue_thread[mycpuid], &bdev->wq, 592 ((hz / 100) < 1) ? 1 : hz / 100); 593 } 594 595 /** 596 * function ttm_bo_cleanup_refs_and_unlock 597 * If bo idle, remove from delayed- and lru lists, and unref. 598 * If not idle, do nothing. 599 * 600 * Must be called with lru_lock and reservation held, this function 601 * will drop both before returning. 602 * 603 * @interruptible Any sleeps should occur interruptibly. 604 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 605 */ 606 607 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, 608 bool interruptible, 609 bool no_wait_gpu) 610 { 611 struct ttm_bo_device *bdev = bo->bdev; 612 struct ttm_bo_driver *driver = bdev->driver; 613 struct ttm_bo_global *glob = bo->glob; 614 int put_count; 615 int ret; 616 617 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 618 ret = ttm_bo_wait(bo, false, false, true); 619 620 if (ret && !no_wait_gpu) { 621 void *sync_obj; 622 623 /* 624 * Take a reference to the fence and unreserve, 625 * at this point the buffer should be dead, so 626 * no new sync objects can be attached. 627 */ 628 sync_obj = driver->sync_obj_ref(bo->sync_obj); 629 lockmgr(&bdev->fence_lock, LK_RELEASE); 630 631 atomic_set(&bo->reserved, 0); 632 wake_up_all(&bo->event_queue); 633 lockmgr(&glob->lru_lock, LK_RELEASE); 634 635 ret = driver->sync_obj_wait(sync_obj, false, interruptible); 636 driver->sync_obj_unref(&sync_obj); 637 if (ret) 638 return ret; 639 640 /* 641 * remove sync_obj with ttm_bo_wait, the wait should be 642 * finished, and no new wait object should have been added. 643 */ 644 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 645 ret = ttm_bo_wait(bo, false, false, true); 646 WARN_ON(ret); 647 lockmgr(&bdev->fence_lock, LK_RELEASE); 648 if (ret) 649 return ret; 650 651 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 652 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 653 654 /* 655 * We raced, and lost, someone else holds the reservation now, 656 * and is probably busy in ttm_bo_cleanup_memtype_use. 657 * 658 * Even if it's not the case, because we finished waiting any 659 * delayed destruction would succeed, so just return success 660 * here. 661 */ 662 if (ret) { 663 lockmgr(&glob->lru_lock, LK_RELEASE); 664 return 0; 665 } 666 } else 667 lockmgr(&bdev->fence_lock, LK_RELEASE); 668 669 if (ret || unlikely(list_empty(&bo->ddestroy))) { 670 atomic_set(&bo->reserved, 0); 671 wake_up_all(&bo->event_queue); 672 lockmgr(&glob->lru_lock, LK_RELEASE); 673 return ret; 674 } 675 676 put_count = ttm_bo_del_from_lru(bo); 677 list_del_init(&bo->ddestroy); 678 ++put_count; 679 680 lockmgr(&glob->lru_lock, LK_RELEASE); 681 ttm_bo_cleanup_memtype_use(bo); 682 683 ttm_bo_list_ref_sub(bo, put_count, true); 684 685 return 0; 686 } 687 688 /** 689 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all 690 * encountered buffers. 691 */ 692 693 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 694 { 695 struct ttm_bo_global *glob = bdev->glob; 696 struct ttm_buffer_object *entry = NULL; 697 int ret = 0; 698 699 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 700 if (list_empty(&bdev->ddestroy)) 701 goto out_unlock; 702 703 entry = list_first_entry(&bdev->ddestroy, 704 struct ttm_buffer_object, ddestroy); 705 kref_get(&entry->list_kref); 706 707 for (;;) { 708 struct ttm_buffer_object *nentry = NULL; 709 710 if (entry->ddestroy.next != &bdev->ddestroy) { 711 nentry = list_first_entry(&entry->ddestroy, 712 struct ttm_buffer_object, ddestroy); 713 kref_get(&nentry->list_kref); 714 } 715 716 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); 717 if (remove_all && ret) { 718 lockmgr(&glob->lru_lock, LK_RELEASE); 719 ret = ttm_bo_reserve_nolru(entry, false, false, 720 false, 0); 721 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 722 } 723 724 if (!ret) 725 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 726 !remove_all); 727 else 728 lockmgr(&glob->lru_lock, LK_RELEASE); 729 730 kref_put(&entry->list_kref, ttm_bo_release_list); 731 entry = nentry; 732 733 if (ret || !entry) 734 goto out; 735 736 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 737 if (list_empty(&entry->ddestroy)) 738 break; 739 } 740 741 out_unlock: 742 lockmgr(&glob->lru_lock, LK_RELEASE); 743 out: 744 if (entry) 745 kref_put(&entry->list_kref, ttm_bo_release_list); 746 return ret; 747 } 748 749 static void ttm_bo_delayed_workqueue(void *arg, int pending __unused) 750 { 751 struct ttm_bo_device *bdev = arg; 752 753 if (ttm_bo_delayed_delete(bdev, false)) { 754 taskqueue_enqueue_timeout(taskqueue_thread[mycpuid], &bdev->wq, 755 ((hz / 100) < 1) ? 1 : hz / 100); 756 } 757 } 758 759 static void ttm_bo_release(struct kref *kref) 760 { 761 struct ttm_buffer_object *bo = 762 container_of(kref, struct ttm_buffer_object, kref); 763 struct ttm_bo_device *bdev = bo->bdev; 764 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 765 766 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 767 if (likely(bo->vm_node != NULL)) { 768 rb_erase(&bo->vm_rb, &bdev->addr_space_rb); 769 drm_mm_put_block(bo->vm_node); 770 bo->vm_node = NULL; 771 } 772 lockmgr(&bdev->vm_lock, LK_RELEASE); 773 ttm_mem_io_lock(man, false); 774 ttm_mem_io_free_vm(bo); 775 ttm_mem_io_unlock(man); 776 ttm_bo_cleanup_refs_or_queue(bo); 777 kref_put(&bo->list_kref, ttm_bo_release_list); 778 } 779 780 void ttm_bo_unref(struct ttm_buffer_object **p_bo) 781 { 782 struct ttm_buffer_object *bo = *p_bo; 783 784 *p_bo = NULL; 785 kref_put(&bo->kref, ttm_bo_release); 786 } 787 EXPORT_SYMBOL(ttm_bo_unref); 788 789 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) 790 { 791 int pending; 792 793 taskqueue_cancel_timeout(taskqueue_thread[mycpuid], &bdev->wq, &pending); 794 if (pending) 795 taskqueue_drain_timeout(taskqueue_thread[mycpuid], &bdev->wq); 796 return (pending); 797 } 798 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); 799 800 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) 801 { 802 if (resched) { 803 taskqueue_enqueue_timeout(taskqueue_thread[mycpuid], &bdev->wq, 804 ((hz / 100) < 1) ? 1 : hz / 100); 805 } 806 } 807 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 808 809 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 810 bool no_wait_gpu) 811 { 812 struct ttm_bo_device *bdev = bo->bdev; 813 struct ttm_mem_reg evict_mem; 814 struct ttm_placement placement; 815 int ret = 0; 816 817 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 818 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 819 lockmgr(&bdev->fence_lock, LK_RELEASE); 820 821 if (unlikely(ret != 0)) { 822 if (ret != -ERESTART) { 823 kprintf("[TTM] Failed to expire sync object before buffer eviction\n"); 824 } 825 goto out; 826 } 827 828 BUG_ON(!ttm_bo_is_reserved(bo)); 829 830 evict_mem = bo->mem; 831 evict_mem.mm_node = NULL; 832 evict_mem.bus.io_reserved_vm = false; 833 evict_mem.bus.io_reserved_count = 0; 834 835 placement.fpfn = 0; 836 placement.lpfn = 0; 837 placement.num_placement = 0; 838 placement.num_busy_placement = 0; 839 bdev->driver->evict_flags(bo, &placement); 840 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 841 no_wait_gpu); 842 if (ret) { 843 if (ret != -ERESTART) { 844 kprintf("[TTM] Failed to find memory space for buffer 0x%p eviction\n", 845 bo); 846 ttm_bo_mem_space_debug(bo, &placement); 847 } 848 goto out; 849 } 850 851 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 852 no_wait_gpu); 853 if (ret) { 854 if (ret != -ERESTART) 855 kprintf("[TTM] Buffer eviction failed\n"); 856 ttm_bo_mem_put(bo, &evict_mem); 857 goto out; 858 } 859 bo->evicted = true; 860 out: 861 return ret; 862 } 863 864 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 865 uint32_t mem_type, 866 bool interruptible, 867 bool no_wait_gpu) 868 { 869 struct ttm_bo_global *glob = bdev->glob; 870 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 871 struct ttm_buffer_object *bo; 872 int ret = -EBUSY, put_count; 873 874 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 875 list_for_each_entry(bo, &man->lru, lru) { 876 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 877 if (!ret) 878 break; 879 } 880 881 if (ret) { 882 lockmgr(&glob->lru_lock, LK_RELEASE); 883 return ret; 884 } 885 886 kref_get(&bo->list_kref); 887 888 if (!list_empty(&bo->ddestroy)) { 889 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, 890 no_wait_gpu); 891 kref_put(&bo->list_kref, ttm_bo_release_list); 892 return ret; 893 } 894 895 put_count = ttm_bo_del_from_lru(bo); 896 lockmgr(&glob->lru_lock, LK_RELEASE); 897 898 BUG_ON(ret != 0); 899 900 ttm_bo_list_ref_sub(bo, put_count, true); 901 902 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 903 ttm_bo_unreserve(bo); 904 905 kref_put(&bo->list_kref, ttm_bo_release_list); 906 return ret; 907 } 908 909 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) 910 { 911 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; 912 913 if (mem->mm_node) 914 (*man->func->put_node)(man, mem); 915 } 916 EXPORT_SYMBOL(ttm_bo_mem_put); 917 918 /** 919 * Repeatedly evict memory from the LRU for @mem_type until we create enough 920 * space, or we've evicted everything and there isn't enough space. 921 */ 922 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 923 uint32_t mem_type, 924 struct ttm_placement *placement, 925 struct ttm_mem_reg *mem, 926 bool interruptible, 927 bool no_wait_gpu) 928 { 929 struct ttm_bo_device *bdev = bo->bdev; 930 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 931 int ret; 932 933 do { 934 ret = (*man->func->get_node)(man, bo, placement, mem); 935 if (unlikely(ret != 0)) 936 return ret; 937 if (mem->mm_node) 938 break; 939 ret = ttm_mem_evict_first(bdev, mem_type, 940 interruptible, no_wait_gpu); 941 if (unlikely(ret != 0)) 942 return ret; 943 } while (1); 944 if (mem->mm_node == NULL) 945 return -ENOMEM; 946 mem->mem_type = mem_type; 947 return 0; 948 } 949 950 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 951 uint32_t cur_placement, 952 uint32_t proposed_placement) 953 { 954 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; 955 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; 956 957 /** 958 * Keep current caching if possible. 959 */ 960 961 if ((cur_placement & caching) != 0) 962 result |= (cur_placement & caching); 963 else if ((man->default_caching & caching) != 0) 964 result |= man->default_caching; 965 else if ((TTM_PL_FLAG_CACHED & caching) != 0) 966 result |= TTM_PL_FLAG_CACHED; 967 else if ((TTM_PL_FLAG_WC & caching) != 0) 968 result |= TTM_PL_FLAG_WC; 969 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) 970 result |= TTM_PL_FLAG_UNCACHED; 971 972 return result; 973 } 974 975 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 976 uint32_t mem_type, 977 uint32_t proposed_placement, 978 uint32_t *masked_placement) 979 { 980 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 981 982 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) 983 return false; 984 985 if ((proposed_placement & man->available_caching) == 0) 986 return false; 987 988 cur_flags |= (proposed_placement & man->available_caching); 989 990 *masked_placement = cur_flags; 991 return true; 992 } 993 994 /** 995 * Creates space for memory region @mem according to its type. 996 * 997 * This function first searches for free space in compatible memory types in 998 * the priority order defined by the driver. If free space isn't found, then 999 * ttm_bo_mem_force_space is attempted in priority order to evict and find 1000 * space. 1001 */ 1002 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 1003 struct ttm_placement *placement, 1004 struct ttm_mem_reg *mem, 1005 bool interruptible, 1006 bool no_wait_gpu) 1007 { 1008 struct ttm_bo_device *bdev = bo->bdev; 1009 struct ttm_mem_type_manager *man; 1010 uint32_t mem_type = TTM_PL_SYSTEM; 1011 uint32_t cur_flags = 0; 1012 bool type_found = false; 1013 bool type_ok = false; 1014 bool has_erestartsys = false; 1015 int i, ret; 1016 1017 mem->mm_node = NULL; 1018 for (i = 0; i < placement->num_placement; ++i) { 1019 ret = ttm_mem_type_from_flags(placement->placement[i], 1020 &mem_type); 1021 if (ret) 1022 return ret; 1023 man = &bdev->man[mem_type]; 1024 1025 type_ok = ttm_bo_mt_compatible(man, 1026 mem_type, 1027 placement->placement[i], 1028 &cur_flags); 1029 1030 if (!type_ok) 1031 continue; 1032 1033 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1034 cur_flags); 1035 /* 1036 * Use the access and other non-mapping-related flag bits from 1037 * the memory placement flags to the current flags 1038 */ 1039 ttm_flag_masked(&cur_flags, placement->placement[i], 1040 ~TTM_PL_MASK_MEMTYPE); 1041 1042 if (mem_type == TTM_PL_SYSTEM) 1043 break; 1044 1045 if (man->has_type && man->use_type) { 1046 type_found = true; 1047 ret = (*man->func->get_node)(man, bo, placement, mem); 1048 if (unlikely(ret)) 1049 return ret; 1050 } 1051 if (mem->mm_node) 1052 break; 1053 } 1054 1055 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 1056 mem->mem_type = mem_type; 1057 mem->placement = cur_flags; 1058 return 0; 1059 } 1060 1061 if (!type_found) 1062 return -EINVAL; 1063 1064 for (i = 0; i < placement->num_busy_placement; ++i) { 1065 ret = ttm_mem_type_from_flags(placement->busy_placement[i], 1066 &mem_type); 1067 if (ret) 1068 return ret; 1069 man = &bdev->man[mem_type]; 1070 if (!man->has_type) 1071 continue; 1072 if (!ttm_bo_mt_compatible(man, 1073 mem_type, 1074 placement->busy_placement[i], 1075 &cur_flags)) 1076 continue; 1077 1078 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 1079 cur_flags); 1080 /* 1081 * Use the access and other non-mapping-related flag bits from 1082 * the memory placement flags to the current flags 1083 */ 1084 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 1085 ~TTM_PL_MASK_MEMTYPE); 1086 1087 1088 if (mem_type == TTM_PL_SYSTEM) { 1089 mem->mem_type = mem_type; 1090 mem->placement = cur_flags; 1091 mem->mm_node = NULL; 1092 return 0; 1093 } 1094 1095 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 1096 interruptible, no_wait_gpu); 1097 if (ret == 0 && mem->mm_node) { 1098 mem->placement = cur_flags; 1099 return 0; 1100 } 1101 if (ret == -ERESTART) 1102 has_erestartsys = true; 1103 } 1104 ret = (has_erestartsys) ? -ERESTART : -ENOMEM; 1105 return ret; 1106 } 1107 EXPORT_SYMBOL(ttm_bo_mem_space); 1108 1109 static 1110 int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1111 struct ttm_placement *placement, 1112 bool interruptible, 1113 bool no_wait_gpu) 1114 { 1115 int ret = 0; 1116 struct ttm_mem_reg mem; 1117 struct ttm_bo_device *bdev = bo->bdev; 1118 1119 BUG_ON(!ttm_bo_is_reserved(bo)); 1120 1121 /* 1122 * FIXME: It's possible to pipeline buffer moves. 1123 * Have the driver move function wait for idle when necessary, 1124 * instead of doing it here. 1125 */ 1126 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1127 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1128 lockmgr(&bdev->fence_lock, LK_RELEASE); 1129 if (ret) 1130 return ret; 1131 mem.num_pages = bo->num_pages; 1132 mem.size = mem.num_pages << PAGE_SHIFT; 1133 mem.page_alignment = bo->mem.page_alignment; 1134 mem.bus.io_reserved_vm = false; 1135 mem.bus.io_reserved_count = 0; 1136 /* 1137 * Determine where to move the buffer. 1138 */ 1139 ret = ttm_bo_mem_space(bo, placement, &mem, 1140 interruptible, no_wait_gpu); 1141 if (ret) 1142 goto out_unlock; 1143 ret = ttm_bo_handle_move_mem(bo, &mem, false, 1144 interruptible, no_wait_gpu); 1145 out_unlock: 1146 if (ret && mem.mm_node) 1147 ttm_bo_mem_put(bo, &mem); 1148 return ret; 1149 } 1150 1151 static int ttm_bo_mem_compat(struct ttm_placement *placement, 1152 struct ttm_mem_reg *mem) 1153 { 1154 int i; 1155 1156 if (mem->mm_node && placement->lpfn != 0 && 1157 (mem->start < placement->fpfn || 1158 mem->start + mem->num_pages > placement->lpfn)) 1159 return -1; 1160 1161 for (i = 0; i < placement->num_placement; i++) { 1162 if ((placement->placement[i] & mem->placement & 1163 TTM_PL_MASK_CACHING) && 1164 (placement->placement[i] & mem->placement & 1165 TTM_PL_MASK_MEM)) 1166 return i; 1167 } 1168 return -1; 1169 } 1170 1171 int ttm_bo_validate(struct ttm_buffer_object *bo, 1172 struct ttm_placement *placement, 1173 bool interruptible, 1174 bool no_wait_gpu) 1175 { 1176 int ret; 1177 1178 BUG_ON(!ttm_bo_is_reserved(bo)); 1179 /* Check that range is valid */ 1180 if (placement->lpfn || placement->fpfn) 1181 if (placement->fpfn > placement->lpfn || 1182 (placement->lpfn - placement->fpfn) < bo->num_pages) 1183 return -EINVAL; 1184 /* 1185 * Check whether we need to move buffer. 1186 */ 1187 ret = ttm_bo_mem_compat(placement, &bo->mem); 1188 if (ret < 0) { 1189 ret = ttm_bo_move_buffer(bo, placement, interruptible, 1190 no_wait_gpu); 1191 if (ret) 1192 return ret; 1193 } else { 1194 /* 1195 * Use the access and other non-mapping-related flag bits from 1196 * the compatible memory placement flags to the active flags 1197 */ 1198 ttm_flag_masked(&bo->mem.placement, placement->placement[ret], 1199 ~TTM_PL_MASK_MEMTYPE); 1200 } 1201 /* 1202 * We might need to add a TTM. 1203 */ 1204 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1205 ret = ttm_bo_add_ttm(bo, true); 1206 if (ret) 1207 return ret; 1208 } 1209 return 0; 1210 } 1211 EXPORT_SYMBOL(ttm_bo_validate); 1212 1213 int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1214 struct ttm_placement *placement) 1215 { 1216 BUG_ON((placement->fpfn || placement->lpfn) && 1217 (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); 1218 1219 return 0; 1220 } 1221 1222 int ttm_bo_init(struct ttm_bo_device *bdev, 1223 struct ttm_buffer_object *bo, 1224 unsigned long size, 1225 enum ttm_bo_type type, 1226 struct ttm_placement *placement, 1227 uint32_t page_alignment, 1228 bool interruptible, 1229 struct vm_object *persistent_swap_storage, 1230 size_t acc_size, 1231 struct sg_table *sg, 1232 void (*destroy) (struct ttm_buffer_object *)) 1233 { 1234 int ret = 0; 1235 unsigned long num_pages; 1236 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1237 1238 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1239 if (ret) { 1240 kprintf("[TTM] Out of kernel memory\n"); 1241 if (destroy) 1242 (*destroy)(bo); 1243 else 1244 kfree(bo, M_TTM_BO); 1245 return -ENOMEM; 1246 } 1247 1248 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1249 if (num_pages == 0) { 1250 kprintf("[TTM] Illegal buffer object size\n"); 1251 if (destroy) 1252 (*destroy)(bo); 1253 else 1254 kfree(bo, M_TTM_BO); 1255 ttm_mem_global_free(mem_glob, acc_size); 1256 return -EINVAL; 1257 } 1258 bo->destroy = destroy; 1259 1260 kref_init(&bo->kref); 1261 kref_init(&bo->list_kref); 1262 atomic_set(&bo->cpu_writers, 0); 1263 atomic_set(&bo->reserved, 1); 1264 init_waitqueue_head(&bo->event_queue); 1265 INIT_LIST_HEAD(&bo->lru); 1266 INIT_LIST_HEAD(&bo->ddestroy); 1267 INIT_LIST_HEAD(&bo->swap); 1268 INIT_LIST_HEAD(&bo->io_reserve_lru); 1269 bo->bdev = bdev; 1270 bo->glob = bdev->glob; 1271 bo->type = type; 1272 bo->num_pages = num_pages; 1273 bo->mem.size = num_pages << PAGE_SHIFT; 1274 bo->mem.mem_type = TTM_PL_SYSTEM; 1275 bo->mem.num_pages = bo->num_pages; 1276 bo->mem.mm_node = NULL; 1277 bo->mem.page_alignment = page_alignment; 1278 bo->mem.bus.io_reserved_vm = false; 1279 bo->mem.bus.io_reserved_count = 0; 1280 bo->priv_flags = 0; 1281 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1282 bo->seq_valid = false; 1283 bo->persistent_swap_storage = persistent_swap_storage; 1284 bo->acc_size = acc_size; 1285 bo->sg = sg; 1286 atomic_inc(&bo->glob->bo_count); 1287 1288 ret = ttm_bo_check_placement(bo, placement); 1289 if (unlikely(ret != 0)) 1290 goto out_err; 1291 1292 /* 1293 * For ttm_bo_type_device buffers, allocate 1294 * address space from the device. 1295 */ 1296 if (bo->type == ttm_bo_type_device || 1297 bo->type == ttm_bo_type_sg) { 1298 ret = ttm_bo_setup_vm(bo); 1299 if (ret) 1300 goto out_err; 1301 } 1302 1303 ret = ttm_bo_validate(bo, placement, interruptible, false); 1304 if (ret) 1305 goto out_err; 1306 1307 ttm_bo_unreserve(bo); 1308 return 0; 1309 1310 out_err: 1311 ttm_bo_unreserve(bo); 1312 ttm_bo_unref(&bo); 1313 1314 return ret; 1315 } 1316 EXPORT_SYMBOL(ttm_bo_init); 1317 1318 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 1319 unsigned long bo_size, 1320 unsigned struct_size) 1321 { 1322 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1323 size_t size = 0; 1324 1325 size += ttm_round_pot(struct_size); 1326 size += PAGE_ALIGN(npages * sizeof(void *)); 1327 size += ttm_round_pot(sizeof(struct ttm_tt)); 1328 return size; 1329 } 1330 EXPORT_SYMBOL(ttm_bo_acc_size); 1331 1332 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 1333 unsigned long bo_size, 1334 unsigned struct_size) 1335 { 1336 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; 1337 size_t size = 0; 1338 1339 size += ttm_round_pot(struct_size); 1340 size += PAGE_ALIGN(npages * sizeof(void *)); 1341 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); 1342 size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1343 return size; 1344 } 1345 EXPORT_SYMBOL(ttm_bo_dma_acc_size); 1346 1347 int ttm_bo_create(struct ttm_bo_device *bdev, 1348 unsigned long size, 1349 enum ttm_bo_type type, 1350 struct ttm_placement *placement, 1351 uint32_t page_alignment, 1352 bool interruptible, 1353 struct vm_object *persistent_swap_storage, 1354 struct ttm_buffer_object **p_bo) 1355 { 1356 struct ttm_buffer_object *bo; 1357 size_t acc_size; 1358 int ret; 1359 1360 bo = kmalloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO); 1361 if (unlikely(bo == NULL)) 1362 return -ENOMEM; 1363 1364 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1365 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1366 interruptible, persistent_swap_storage, acc_size, 1367 NULL, NULL); 1368 if (likely(ret == 0)) 1369 *p_bo = bo; 1370 1371 return ret; 1372 } 1373 EXPORT_SYMBOL(ttm_bo_create); 1374 1375 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1376 unsigned mem_type, bool allow_errors) 1377 { 1378 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1379 struct ttm_bo_global *glob = bdev->glob; 1380 int ret; 1381 1382 /* 1383 * Can't use standard list traversal since we're unlocking. 1384 */ 1385 1386 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1387 while (!list_empty(&man->lru)) { 1388 lockmgr(&glob->lru_lock, LK_RELEASE); 1389 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1390 if (ret) { 1391 if (allow_errors) { 1392 return ret; 1393 } else { 1394 kprintf("[TTM] Cleanup eviction failed\n"); 1395 } 1396 } 1397 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1398 } 1399 lockmgr(&glob->lru_lock, LK_RELEASE); 1400 return 0; 1401 } 1402 1403 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1404 { 1405 struct ttm_mem_type_manager *man; 1406 int ret = -EINVAL; 1407 1408 if (mem_type >= TTM_NUM_MEM_TYPES) { 1409 kprintf("[TTM] Illegal memory type %d\n", mem_type); 1410 return ret; 1411 } 1412 man = &bdev->man[mem_type]; 1413 1414 if (!man->has_type) { 1415 kprintf("[TTM] Trying to take down uninitialized memory manager type %u\n", 1416 mem_type); 1417 return ret; 1418 } 1419 1420 man->use_type = false; 1421 man->has_type = false; 1422 1423 ret = 0; 1424 if (mem_type > 0) { 1425 ttm_bo_force_list_clean(bdev, mem_type, false); 1426 1427 ret = (*man->func->takedown)(man); 1428 } 1429 1430 return ret; 1431 } 1432 EXPORT_SYMBOL(ttm_bo_clean_mm); 1433 1434 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1435 { 1436 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1437 1438 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { 1439 kprintf("[TTM] Illegal memory manager memory type %u\n", mem_type); 1440 return -EINVAL; 1441 } 1442 1443 if (!man->has_type) { 1444 kprintf("[TTM] Memory type %u has not been initialized\n", mem_type); 1445 return 0; 1446 } 1447 1448 return ttm_bo_force_list_clean(bdev, mem_type, true); 1449 } 1450 EXPORT_SYMBOL(ttm_bo_evict_mm); 1451 1452 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1453 unsigned long p_size) 1454 { 1455 int ret = -EINVAL; 1456 struct ttm_mem_type_manager *man; 1457 1458 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1459 man = &bdev->man[type]; 1460 BUG_ON(man->has_type); 1461 man->io_reserve_fastpath = true; 1462 man->use_io_reserve_lru = false; 1463 lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE); 1464 INIT_LIST_HEAD(&man->io_reserve_lru); 1465 1466 ret = bdev->driver->init_mem_type(bdev, type, man); 1467 if (ret) 1468 return ret; 1469 man->bdev = bdev; 1470 1471 ret = 0; 1472 if (type != TTM_PL_SYSTEM) { 1473 ret = (*man->func->init)(man, p_size); 1474 if (ret) 1475 return ret; 1476 } 1477 man->has_type = true; 1478 man->use_type = true; 1479 man->size = p_size; 1480 1481 INIT_LIST_HEAD(&man->lru); 1482 1483 return 0; 1484 } 1485 EXPORT_SYMBOL(ttm_bo_init_mm); 1486 1487 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob) 1488 { 1489 1490 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); 1491 vm_page_free(glob->dummy_read_page); 1492 } 1493 1494 void ttm_bo_global_release(struct drm_global_reference *ref) 1495 { 1496 struct ttm_bo_global *glob = ref->object; 1497 1498 if (refcount_release(&glob->kobj_ref)) 1499 ttm_bo_global_kobj_release(glob); 1500 } 1501 EXPORT_SYMBOL(ttm_bo_global_release); 1502 1503 int ttm_bo_global_init(struct drm_global_reference *ref) 1504 { 1505 struct ttm_bo_global_ref *bo_ref = 1506 container_of(ref, struct ttm_bo_global_ref, ref); 1507 struct ttm_bo_global *glob = ref->object; 1508 int ret; 1509 1510 lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE); 1511 lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE); 1512 glob->mem_glob = bo_ref->mem_glob; 1513 glob->dummy_read_page = vm_page_alloc_contig( 1514 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, 1*PAGE_SIZE, VM_MEMATTR_UNCACHEABLE); 1515 1516 if (unlikely(glob->dummy_read_page == NULL)) { 1517 ret = -ENOMEM; 1518 goto out_no_drp; 1519 } 1520 1521 INIT_LIST_HEAD(&glob->swap_lru); 1522 INIT_LIST_HEAD(&glob->device_list); 1523 1524 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1525 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); 1526 if (unlikely(ret != 0)) { 1527 kprintf("[TTM] Could not register buffer object swapout\n"); 1528 goto out_no_shrink; 1529 } 1530 1531 atomic_set(&glob->bo_count, 0); 1532 1533 refcount_init(&glob->kobj_ref, 1); 1534 return (0); 1535 1536 out_no_shrink: 1537 vm_page_free(glob->dummy_read_page); 1538 out_no_drp: 1539 kfree(glob, M_DRM_GLOBAL); 1540 return ret; 1541 } 1542 EXPORT_SYMBOL(ttm_bo_global_init); 1543 1544 1545 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1546 { 1547 int ret = 0; 1548 unsigned i = TTM_NUM_MEM_TYPES; 1549 struct ttm_mem_type_manager *man; 1550 struct ttm_bo_global *glob = bdev->glob; 1551 1552 while (i--) { 1553 man = &bdev->man[i]; 1554 if (man->has_type) { 1555 man->use_type = false; 1556 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { 1557 ret = -EBUSY; 1558 kprintf("[TTM] DRM memory manager type %d is not clean\n", 1559 i); 1560 } 1561 man->has_type = false; 1562 } 1563 } 1564 1565 lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE); 1566 list_del(&bdev->device_list); 1567 lockmgr(&glob->device_list_mutex, LK_RELEASE); 1568 1569 if (taskqueue_cancel_timeout(taskqueue_thread[mycpuid], &bdev->wq, NULL)) 1570 taskqueue_drain_timeout(taskqueue_thread[mycpuid], &bdev->wq); 1571 1572 while (ttm_bo_delayed_delete(bdev, true)) 1573 ; 1574 1575 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1576 if (list_empty(&bdev->ddestroy)) 1577 TTM_DEBUG("Delayed destroy list was clean\n"); 1578 1579 if (list_empty(&bdev->man[0].lru)) 1580 TTM_DEBUG("Swap list was clean\n"); 1581 lockmgr(&glob->lru_lock, LK_RELEASE); 1582 1583 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); 1584 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1585 drm_mm_takedown(&bdev->addr_space_mm); 1586 lockmgr(&bdev->vm_lock, LK_RELEASE); 1587 1588 return ret; 1589 } 1590 EXPORT_SYMBOL(ttm_bo_device_release); 1591 1592 int ttm_bo_device_init(struct ttm_bo_device *bdev, 1593 struct ttm_bo_global *glob, 1594 struct ttm_bo_driver *driver, 1595 uint64_t file_page_offset, 1596 bool need_dma32) 1597 { 1598 int ret = -EINVAL; 1599 1600 lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE); 1601 bdev->driver = driver; 1602 1603 memset(bdev->man, 0, sizeof(bdev->man)); 1604 1605 /* 1606 * Initialize the system memory buffer type. 1607 * Other types need to be driver / IOCTL initialized. 1608 */ 1609 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); 1610 if (unlikely(ret != 0)) 1611 goto out_no_sys; 1612 1613 bdev->addr_space_rb = RB_ROOT; 1614 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); 1615 if (unlikely(ret != 0)) 1616 goto out_no_addr_mm; 1617 1618 TIMEOUT_TASK_INIT(taskqueue_thread[mycpuid], &bdev->wq, 0, 1619 ttm_bo_delayed_workqueue, bdev); 1620 INIT_LIST_HEAD(&bdev->ddestroy); 1621 bdev->dev_mapping = NULL; 1622 bdev->glob = glob; 1623 bdev->need_dma32 = need_dma32; 1624 bdev->val_seq = 0; 1625 lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE); 1626 lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE); 1627 list_add_tail(&bdev->device_list, &glob->device_list); 1628 lockmgr(&glob->device_list_mutex, LK_RELEASE); 1629 1630 return 0; 1631 out_no_addr_mm: 1632 ttm_bo_clean_mm(bdev, 0); 1633 out_no_sys: 1634 return ret; 1635 } 1636 EXPORT_SYMBOL(ttm_bo_device_init); 1637 1638 /* 1639 * buffer object vm functions. 1640 */ 1641 1642 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1643 { 1644 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1645 1646 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 1647 if (mem->mem_type == TTM_PL_SYSTEM) 1648 return false; 1649 1650 if (man->flags & TTM_MEMTYPE_FLAG_CMA) 1651 return false; 1652 1653 if (mem->placement & TTM_PL_FLAG_CACHED) 1654 return false; 1655 } 1656 return true; 1657 } 1658 1659 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) 1660 { 1661 1662 ttm_bo_release_mmap(bo); 1663 ttm_mem_io_free_vm(bo); 1664 } 1665 1666 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1667 { 1668 struct ttm_bo_device *bdev = bo->bdev; 1669 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 1670 1671 ttm_mem_io_lock(man, false); 1672 ttm_bo_unmap_virtual_locked(bo); 1673 ttm_mem_io_unlock(man); 1674 } 1675 1676 1677 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1678 1679 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1680 { 1681 struct ttm_bo_device *bdev = bo->bdev; 1682 struct rb_node **cur = &bdev->addr_space_rb.rb_node; 1683 struct rb_node *parent = NULL; 1684 struct ttm_buffer_object *cur_bo; 1685 unsigned long offset = bo->vm_node->start; 1686 unsigned long cur_offset; 1687 1688 while (*cur) { 1689 parent = *cur; 1690 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); 1691 cur_offset = cur_bo->vm_node->start; 1692 if (offset < cur_offset) 1693 cur = &parent->rb_left; 1694 else if (offset > cur_offset) 1695 cur = &parent->rb_right; 1696 else 1697 BUG(); 1698 } 1699 1700 rb_link_node(&bo->vm_rb, parent, cur); 1701 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); 1702 } 1703 1704 /** 1705 * ttm_bo_setup_vm: 1706 * 1707 * @bo: the buffer to allocate address space for 1708 * 1709 * Allocate address space in the drm device so that applications 1710 * can mmap the buffer and access the contents. This only 1711 * applies to ttm_bo_type_device objects as others are not 1712 * placed in the drm device address space. 1713 */ 1714 1715 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) 1716 { 1717 struct ttm_bo_device *bdev = bo->bdev; 1718 int ret; 1719 1720 retry_pre_get: 1721 ret = drm_mm_pre_get(&bdev->addr_space_mm); 1722 if (unlikely(ret != 0)) 1723 return ret; 1724 1725 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); 1726 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, 1727 bo->mem.num_pages, 0, 0); 1728 1729 if (unlikely(bo->vm_node == NULL)) { 1730 ret = -ENOMEM; 1731 goto out_unlock; 1732 } 1733 1734 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, 1735 bo->mem.num_pages, 0); 1736 1737 if (unlikely(bo->vm_node == NULL)) { 1738 lockmgr(&bdev->vm_lock, LK_RELEASE); 1739 goto retry_pre_get; 1740 } 1741 1742 ttm_bo_vm_insert_rb(bo); 1743 lockmgr(&bdev->vm_lock, LK_RELEASE); 1744 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; 1745 1746 return 0; 1747 out_unlock: 1748 lockmgr(&bdev->vm_lock, LK_RELEASE); 1749 return ret; 1750 } 1751 1752 int ttm_bo_wait(struct ttm_buffer_object *bo, 1753 bool lazy, bool interruptible, bool no_wait) 1754 { 1755 struct ttm_bo_driver *driver = bo->bdev->driver; 1756 struct ttm_bo_device *bdev = bo->bdev; 1757 void *sync_obj; 1758 int ret = 0; 1759 1760 if (likely(bo->sync_obj == NULL)) 1761 return 0; 1762 1763 while (bo->sync_obj) { 1764 1765 if (driver->sync_obj_signaled(bo->sync_obj)) { 1766 void *tmp_obj = bo->sync_obj; 1767 bo->sync_obj = NULL; 1768 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1769 lockmgr(&bdev->fence_lock, LK_RELEASE); 1770 driver->sync_obj_unref(&tmp_obj); 1771 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1772 continue; 1773 } 1774 1775 if (no_wait) 1776 return -EBUSY; 1777 1778 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1779 lockmgr(&bdev->fence_lock, LK_RELEASE); 1780 ret = driver->sync_obj_wait(sync_obj, 1781 lazy, interruptible); 1782 if (unlikely(ret != 0)) { 1783 driver->sync_obj_unref(&sync_obj); 1784 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1785 return ret; 1786 } 1787 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1788 if (likely(bo->sync_obj == sync_obj)) { 1789 void *tmp_obj = bo->sync_obj; 1790 bo->sync_obj = NULL; 1791 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1792 &bo->priv_flags); 1793 lockmgr(&bdev->fence_lock, LK_RELEASE); 1794 driver->sync_obj_unref(&sync_obj); 1795 driver->sync_obj_unref(&tmp_obj); 1796 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1797 } else { 1798 lockmgr(&bdev->fence_lock, LK_RELEASE); 1799 driver->sync_obj_unref(&sync_obj); 1800 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1801 } 1802 } 1803 return 0; 1804 } 1805 EXPORT_SYMBOL(ttm_bo_wait); 1806 1807 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1808 { 1809 struct ttm_bo_device *bdev = bo->bdev; 1810 int ret = 0; 1811 1812 /* 1813 * Using ttm_bo_reserve makes sure the lru lists are updated. 1814 */ 1815 1816 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1817 if (unlikely(ret != 0)) 1818 return ret; 1819 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 1820 ret = ttm_bo_wait(bo, false, true, no_wait); 1821 lockmgr(&bdev->fence_lock, LK_RELEASE); 1822 if (likely(ret == 0)) 1823 atomic_inc(&bo->cpu_writers); 1824 ttm_bo_unreserve(bo); 1825 return ret; 1826 } 1827 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); 1828 1829 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1830 { 1831 atomic_dec(&bo->cpu_writers); 1832 } 1833 EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1834 1835 /** 1836 * A buffer object shrink method that tries to swap out the first 1837 * buffer object on the bo_global::swap_lru list. 1838 */ 1839 1840 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) 1841 { 1842 struct ttm_bo_global *glob = 1843 container_of(shrink, struct ttm_bo_global, shrink); 1844 struct ttm_buffer_object *bo; 1845 int ret = -EBUSY; 1846 int put_count; 1847 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1848 1849 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 1850 list_for_each_entry(bo, &glob->swap_lru, swap) { 1851 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); 1852 if (!ret) 1853 break; 1854 } 1855 1856 if (ret) { 1857 lockmgr(&glob->lru_lock, LK_RELEASE); 1858 return ret; 1859 } 1860 1861 kref_get(&bo->list_kref); 1862 1863 if (!list_empty(&bo->ddestroy)) { 1864 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); 1865 kref_put(&bo->list_kref, ttm_bo_release_list); 1866 return ret; 1867 } 1868 1869 put_count = ttm_bo_del_from_lru(bo); 1870 lockmgr(&glob->lru_lock, LK_RELEASE); 1871 1872 ttm_bo_list_ref_sub(bo, put_count, true); 1873 1874 /** 1875 * Wait for GPU, then move to system cached. 1876 */ 1877 1878 lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE); 1879 ret = ttm_bo_wait(bo, false, false, false); 1880 lockmgr(&bo->bdev->fence_lock, LK_RELEASE); 1881 1882 if (unlikely(ret != 0)) 1883 goto out; 1884 1885 if ((bo->mem.placement & swap_placement) != swap_placement) { 1886 struct ttm_mem_reg evict_mem; 1887 1888 evict_mem = bo->mem; 1889 evict_mem.mm_node = NULL; 1890 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 1891 evict_mem.mem_type = TTM_PL_SYSTEM; 1892 1893 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1894 false, false); 1895 if (unlikely(ret != 0)) 1896 goto out; 1897 } 1898 1899 ttm_bo_unmap_virtual(bo); 1900 1901 /** 1902 * Swap out. Buffer will be swapped in again as soon as 1903 * anyone tries to access a ttm page. 1904 */ 1905 1906 if (bo->bdev->driver->swap_notify) 1907 bo->bdev->driver->swap_notify(bo); 1908 1909 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); 1910 out: 1911 1912 /** 1913 * 1914 * Unreserve without putting on LRU to avoid swapping out an 1915 * already swapped buffer. 1916 */ 1917 1918 atomic_set(&bo->reserved, 0); 1919 wake_up_all(&bo->event_queue); 1920 kref_put(&bo->list_kref, ttm_bo_release_list); 1921 return ret; 1922 } 1923 1924 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) 1925 { 1926 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1927 ; 1928 } 1929 EXPORT_SYMBOL(ttm_bo_swapout_all); 1930