1 /************************************************************************** 2 * 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <drm/ttm/ttm_bo_driver.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <sys/sfbuf.h> 34 #include <linux/export.h> 35 #include <linux/io.h> 36 #include <linux/wait.h> 37 38 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 39 { 40 ttm_bo_mem_put(bo, &bo->mem); 41 } 42 43 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 44 bool evict, 45 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 46 { 47 struct ttm_tt *ttm = bo->ttm; 48 struct ttm_mem_reg *old_mem = &bo->mem; 49 int ret; 50 51 if (old_mem->mem_type != TTM_PL_SYSTEM) { 52 ttm_tt_unbind(ttm); 53 ttm_bo_free_old_node(bo); 54 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 55 TTM_PL_MASK_MEM); 56 old_mem->mem_type = TTM_PL_SYSTEM; 57 } 58 59 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 60 if (unlikely(ret != 0)) 61 return ret; 62 63 if (new_mem->mem_type != TTM_PL_SYSTEM) { 64 ret = ttm_tt_bind(ttm, new_mem); 65 if (unlikely(ret != 0)) 66 return ret; 67 } 68 69 *old_mem = *new_mem; 70 new_mem->mm_node = NULL; 71 72 return 0; 73 } 74 EXPORT_SYMBOL(ttm_bo_move_ttm); 75 76 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) 77 { 78 if (likely(man->io_reserve_fastpath)) 79 return 0; 80 81 if (interruptible) { 82 if (lockmgr(&man->io_reserve_mutex, 83 LK_EXCLUSIVE | LK_SLEEPFAIL)) 84 return (-EINTR); 85 else 86 return (0); 87 } 88 89 mutex_lock(&man->io_reserve_mutex); 90 return 0; 91 } 92 EXPORT_SYMBOL(ttm_mem_io_lock); 93 94 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 95 { 96 if (likely(man->io_reserve_fastpath)) 97 return; 98 99 mutex_unlock(&man->io_reserve_mutex); 100 } 101 EXPORT_SYMBOL(ttm_mem_io_unlock); 102 103 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 104 { 105 struct ttm_buffer_object *bo; 106 107 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) 108 return -EAGAIN; 109 110 bo = list_first_entry(&man->io_reserve_lru, 111 struct ttm_buffer_object, 112 io_reserve_lru); 113 list_del_init(&bo->io_reserve_lru); 114 ttm_bo_unmap_virtual_locked(bo); 115 116 return 0; 117 } 118 119 120 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 121 struct ttm_mem_reg *mem) 122 { 123 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 124 int ret = 0; 125 126 if (!bdev->driver->io_mem_reserve) 127 return 0; 128 if (likely(man->io_reserve_fastpath)) 129 return bdev->driver->io_mem_reserve(bdev, mem); 130 131 if (bdev->driver->io_mem_reserve && 132 mem->bus.io_reserved_count++ == 0) { 133 retry: 134 ret = bdev->driver->io_mem_reserve(bdev, mem); 135 if (ret == -EAGAIN) { 136 ret = ttm_mem_io_evict(man); 137 if (ret == 0) 138 goto retry; 139 } 140 } 141 return ret; 142 } 143 EXPORT_SYMBOL(ttm_mem_io_reserve); 144 145 void ttm_mem_io_free(struct ttm_bo_device *bdev, 146 struct ttm_mem_reg *mem) 147 { 148 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 149 150 if (likely(man->io_reserve_fastpath)) 151 return; 152 153 if (bdev->driver->io_mem_reserve && 154 --mem->bus.io_reserved_count == 0 && 155 bdev->driver->io_mem_free) 156 bdev->driver->io_mem_free(bdev, mem); 157 158 } 159 EXPORT_SYMBOL(ttm_mem_io_free); 160 161 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 162 { 163 struct ttm_mem_reg *mem = &bo->mem; 164 int ret; 165 166 if (!mem->bus.io_reserved_vm) { 167 struct ttm_mem_type_manager *man = 168 &bo->bdev->man[mem->mem_type]; 169 170 ret = ttm_mem_io_reserve(bo->bdev, mem); 171 if (unlikely(ret != 0)) 172 return ret; 173 mem->bus.io_reserved_vm = true; 174 if (man->use_io_reserve_lru) 175 list_add_tail(&bo->io_reserve_lru, 176 &man->io_reserve_lru); 177 } 178 return 0; 179 } 180 181 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) 182 { 183 struct ttm_mem_reg *mem = &bo->mem; 184 185 if (mem->bus.io_reserved_vm) { 186 mem->bus.io_reserved_vm = false; 187 list_del_init(&bo->io_reserve_lru); 188 ttm_mem_io_free(bo->bdev, mem); 189 } 190 } 191 192 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 193 void **virtual) 194 { 195 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 196 int ret; 197 void *addr; 198 199 *virtual = NULL; 200 (void) ttm_mem_io_lock(man, false); 201 ret = ttm_mem_io_reserve(bdev, mem); 202 ttm_mem_io_unlock(man); 203 if (ret || !mem->bus.is_iomem) 204 return ret; 205 206 if (mem->bus.addr) { 207 addr = mem->bus.addr; 208 } else { 209 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset, 210 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ? 211 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 212 if (!addr) { 213 (void) ttm_mem_io_lock(man, false); 214 ttm_mem_io_free(bdev, mem); 215 ttm_mem_io_unlock(man); 216 return -ENOMEM; 217 } 218 } 219 *virtual = addr; 220 return 0; 221 } 222 223 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 224 void *virtual) 225 { 226 struct ttm_mem_type_manager *man; 227 228 man = &bdev->man[mem->mem_type]; 229 230 if (virtual && mem->bus.addr == NULL) 231 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size); 232 (void) ttm_mem_io_lock(man, false); 233 ttm_mem_io_free(bdev, mem); 234 ttm_mem_io_unlock(man); 235 } 236 237 static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 238 { 239 uint32_t *dstP = 240 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); 241 uint32_t *srcP = 242 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); 243 244 int i; 245 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 246 /* iowrite32(ioread32(srcP++), dstP++); */ 247 *dstP++ = *srcP++; 248 return 0; 249 } 250 251 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 252 unsigned long page, 253 pgprot_t prot) 254 { 255 struct page *d = ttm->pages[page]; 256 void *dst; 257 258 if (!d) 259 return -ENOMEM; 260 261 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 262 263 #ifdef CONFIG_X86 264 dst = kmap_atomic_prot(d, prot); 265 #else 266 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 267 dst = vmap(&d, 1, 0, prot); 268 else 269 dst = kmap(d); 270 #endif 271 if (!dst) 272 return -ENOMEM; 273 274 memcpy_fromio(dst, src, PAGE_SIZE); 275 276 #ifdef CONFIG_X86 277 kunmap_atomic(dst); 278 #else 279 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 280 vunmap(dst); 281 else 282 kunmap(d); 283 #endif 284 285 return 0; 286 } 287 288 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 289 unsigned long page, 290 pgprot_t prot) 291 { 292 struct page *s = ttm->pages[page]; 293 void *src; 294 295 if (!s) 296 return -ENOMEM; 297 298 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 299 #ifdef CONFIG_X86 300 src = kmap_atomic_prot(s, prot); 301 #else 302 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 303 src = vmap(&s, 1, 0, prot); 304 else 305 src = kmap(s); 306 #endif 307 if (!src) 308 return -ENOMEM; 309 310 memcpy_toio(dst, src, PAGE_SIZE); 311 312 #ifdef CONFIG_X86 313 kunmap_atomic(src); 314 #else 315 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) 316 vunmap(src); 317 else 318 kunmap(s); 319 #endif 320 321 return 0; 322 } 323 324 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 325 bool evict, bool no_wait_gpu, 326 struct ttm_mem_reg *new_mem) 327 { 328 struct ttm_bo_device *bdev = bo->bdev; 329 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 330 struct ttm_tt *ttm = bo->ttm; 331 struct ttm_mem_reg *old_mem = &bo->mem; 332 struct ttm_mem_reg old_copy = *old_mem; 333 void *old_iomap; 334 void *new_iomap; 335 int ret; 336 unsigned long i; 337 unsigned long page; 338 unsigned long add = 0; 339 int dir; 340 341 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 342 if (ret) 343 return ret; 344 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); 345 if (ret) 346 goto out; 347 348 /* 349 * Single TTM move. NOP. 350 */ 351 if (old_iomap == NULL && new_iomap == NULL) 352 goto out2; 353 354 /* 355 * Don't move nonexistent data. Clear destination instead. 356 */ 357 if (old_iomap == NULL && 358 (ttm == NULL || (ttm->state == tt_unpopulated && 359 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { 360 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 361 goto out2; 362 } 363 364 /* 365 * TTM might be null for moves within the same region. 366 */ 367 if (ttm && ttm->state == tt_unpopulated) { 368 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 369 if (ret) 370 goto out1; 371 } 372 373 add = 0; 374 dir = 1; 375 376 if ((old_mem->mem_type == new_mem->mem_type) && 377 (new_mem->start < old_mem->start + old_mem->size)) { 378 dir = -1; 379 add = new_mem->num_pages - 1; 380 } 381 382 for (i = 0; i < new_mem->num_pages; ++i) { 383 page = i * dir + add; 384 if (old_iomap == NULL) { 385 pgprot_t prot = ttm_io_prot(old_mem->placement); 386 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, 387 prot); 388 } else if (new_iomap == NULL) { 389 pgprot_t prot = ttm_io_prot(new_mem->placement); 390 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, 391 prot); 392 } else 393 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 394 if (ret) 395 goto out1; 396 } 397 cpu_mfence(); 398 out2: 399 old_copy = *old_mem; 400 *old_mem = *new_mem; 401 new_mem->mm_node = NULL; 402 403 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 404 ttm_tt_unbind(ttm); 405 ttm_tt_destroy(ttm); 406 bo->ttm = NULL; 407 } 408 409 out1: 410 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 411 out: 412 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 413 414 /* 415 * On error, keep the mm node! 416 */ 417 if (!ret) 418 ttm_bo_mem_put(bo, &old_copy); 419 return ret; 420 } 421 EXPORT_SYMBOL(ttm_bo_move_memcpy); 422 423 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 424 { 425 kfree(bo); 426 } 427 428 /** 429 * ttm_buffer_object_transfer 430 * 431 * @bo: A pointer to a struct ttm_buffer_object. 432 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 433 * holding the data of @bo with the old placement. 434 * 435 * This is a utility function that may be called after an accelerated move 436 * has been scheduled. A new buffer object is created as a placeholder for 437 * the old data while it's being copied. When that buffer object is idle, 438 * it can be destroyed, releasing the space of the old placement. 439 * Returns: 440 * !0: Failure. 441 */ 442 443 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 444 struct ttm_buffer_object **new_obj) 445 { 446 struct ttm_buffer_object *fbo; 447 struct ttm_bo_device *bdev = bo->bdev; 448 struct ttm_bo_driver *driver = bdev->driver; 449 450 fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO); 451 if (!fbo) 452 return -ENOMEM; 453 454 *fbo = *bo; 455 456 /** 457 * Fix up members that we shouldn't copy directly: 458 * TODO: Explicit member copy would probably be better here. 459 */ 460 461 init_waitqueue_head(&fbo->event_queue); 462 INIT_LIST_HEAD(&fbo->ddestroy); 463 INIT_LIST_HEAD(&fbo->lru); 464 INIT_LIST_HEAD(&fbo->swap); 465 INIT_LIST_HEAD(&fbo->io_reserve_lru); 466 fbo->vm_node = NULL; 467 atomic_set(&fbo->cpu_writers, 0); 468 469 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 470 if (bo->sync_obj) 471 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 472 else 473 fbo->sync_obj = NULL; 474 lockmgr(&bdev->fence_lock, LK_RELEASE); 475 kref_init(&fbo->list_kref); 476 kref_init(&fbo->kref); 477 fbo->destroy = &ttm_transfered_destroy; 478 fbo->acc_size = 0; 479 480 /* 481 * Mirror ref from kref_init() for list_kref. 482 */ 483 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags); 484 485 *new_obj = fbo; 486 return 0; 487 } 488 489 vm_memattr_t 490 ttm_io_prot(uint32_t caching_flags) 491 { 492 #if defined(__x86_64__) 493 if (caching_flags & TTM_PL_FLAG_WC) 494 return (VM_MEMATTR_WRITE_COMBINING); 495 else 496 /* 497 * We do not support i386, look at the linux source 498 * for the reason of the comment. 499 */ 500 return (VM_MEMATTR_UNCACHEABLE); 501 #else 502 #error Port me 503 #endif 504 } 505 EXPORT_SYMBOL(ttm_io_prot); 506 507 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 508 unsigned long offset, 509 unsigned long size, 510 struct ttm_bo_kmap_obj *map) 511 { 512 struct ttm_mem_reg *mem = &bo->mem; 513 514 if (bo->mem.bus.addr) { 515 map->bo_kmap_type = ttm_bo_map_premapped; 516 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 517 } else { 518 map->bo_kmap_type = ttm_bo_map_iomap; 519 map->virtual = pmap_mapdev_attr(bo->mem.bus.base + 520 bo->mem.bus.offset + offset, size, 521 (mem->placement & TTM_PL_FLAG_WC) ? 522 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 523 map->size = size; 524 } 525 return (!map->virtual) ? -ENOMEM : 0; 526 } 527 528 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 529 unsigned long start_page, 530 unsigned long num_pages, 531 struct ttm_bo_kmap_obj *map) 532 { 533 struct ttm_mem_reg *mem = &bo->mem; 534 vm_memattr_t prot; 535 struct ttm_tt *ttm = bo->ttm; 536 int i, ret; 537 538 BUG_ON(!ttm); 539 540 if (ttm->state == tt_unpopulated) { 541 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 542 if (ret) 543 return ret; 544 } 545 546 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 547 /* 548 * We're mapping a single page, and the desired 549 * page protection is consistent with the bo. 550 */ 551 552 map->bo_kmap_type = ttm_bo_map_kmap; 553 map->page = ttm->pages[start_page]; 554 map->sf = sf_buf_alloc((struct vm_page *)map->page); 555 map->virtual = (void *)sf_buf_kva(map->sf); 556 } else { 557 /* 558 * We need to use vmap to get the desired page protection 559 * or to make the buffer object look contiguous. 560 */ 561 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 562 PAGE_KERNEL : 563 ttm_io_prot(mem->placement); 564 map->bo_kmap_type = ttm_bo_map_vmap; 565 map->num_pages = num_pages; 566 map->virtual = 567 (void *)kmem_alloc_nofault(&kernel_map, 568 num_pages * PAGE_SIZE, 569 VM_SUBSYS_DRM_TTM, 570 PAGE_SIZE); 571 if (map->virtual != NULL) { 572 for (i = 0; i < num_pages; i++) { 573 /* XXXKIB hack */ 574 pmap_page_set_memattr((struct vm_page *)ttm->pages[start_page + 575 i], prot); 576 } 577 pmap_qenter((vm_offset_t)map->virtual, 578 (struct vm_page **)&ttm->pages[start_page], num_pages); 579 } 580 } 581 return (!map->virtual) ? -ENOMEM : 0; 582 } 583 584 int ttm_bo_kmap(struct ttm_buffer_object *bo, 585 unsigned long start_page, unsigned long num_pages, 586 struct ttm_bo_kmap_obj *map) 587 { 588 struct ttm_mem_type_manager *man = 589 &bo->bdev->man[bo->mem.mem_type]; 590 unsigned long offset, size; 591 int ret; 592 593 BUG_ON(!list_empty(&bo->swap)); 594 map->virtual = NULL; 595 map->bo = bo; 596 if (num_pages > bo->num_pages) 597 return -EINVAL; 598 if (start_page > bo->num_pages) 599 return -EINVAL; 600 #if 0 601 if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) 602 return -EPERM; 603 #endif 604 (void) ttm_mem_io_lock(man, false); 605 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 606 ttm_mem_io_unlock(man); 607 if (ret) 608 return ret; 609 if (!bo->mem.bus.is_iomem) { 610 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 611 } else { 612 offset = start_page << PAGE_SHIFT; 613 size = num_pages << PAGE_SHIFT; 614 return ttm_bo_ioremap(bo, offset, size, map); 615 } 616 } 617 EXPORT_SYMBOL(ttm_bo_kmap); 618 619 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 620 { 621 struct ttm_buffer_object *bo = map->bo; 622 struct ttm_mem_type_manager *man = 623 &bo->bdev->man[bo->mem.mem_type]; 624 625 if (!map->virtual) 626 return; 627 switch (map->bo_kmap_type) { 628 case ttm_bo_map_iomap: 629 pmap_unmapdev((vm_offset_t)map->virtual, map->size); 630 break; 631 case ttm_bo_map_vmap: 632 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages); 633 kmem_free(&kernel_map, (vm_offset_t)map->virtual, 634 map->num_pages * PAGE_SIZE); 635 break; 636 case ttm_bo_map_kmap: 637 sf_buf_free(map->sf); 638 break; 639 case ttm_bo_map_premapped: 640 break; 641 default: 642 BUG(); 643 } 644 (void) ttm_mem_io_lock(man, false); 645 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 646 ttm_mem_io_unlock(man); 647 map->virtual = NULL; 648 map->page = NULL; 649 map->sf = NULL; 650 } 651 EXPORT_SYMBOL(ttm_bo_kunmap); 652 653 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 654 void *sync_obj, 655 bool evict, 656 bool no_wait_gpu, 657 struct ttm_mem_reg *new_mem) 658 { 659 struct ttm_bo_device *bdev = bo->bdev; 660 struct ttm_bo_driver *driver = bdev->driver; 661 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 662 struct ttm_mem_reg *old_mem = &bo->mem; 663 int ret; 664 struct ttm_buffer_object *ghost_obj; 665 void *tmp_obj = NULL; 666 667 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 668 if (bo->sync_obj) { 669 tmp_obj = bo->sync_obj; 670 bo->sync_obj = NULL; 671 } 672 bo->sync_obj = driver->sync_obj_ref(sync_obj); 673 if (evict) { 674 ret = ttm_bo_wait(bo, false, false, false); 675 lockmgr(&bdev->fence_lock, LK_RELEASE); 676 if (tmp_obj) 677 driver->sync_obj_unref(&tmp_obj); 678 if (ret) 679 return ret; 680 681 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 682 (bo->ttm != NULL)) { 683 ttm_tt_unbind(bo->ttm); 684 ttm_tt_destroy(bo->ttm); 685 bo->ttm = NULL; 686 } 687 ttm_bo_free_old_node(bo); 688 } else { 689 /** 690 * This should help pipeline ordinary buffer moves. 691 * 692 * Hang old buffer memory on a new buffer object, 693 * and leave it to be released when the GPU 694 * operation has completed. 695 */ 696 697 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 698 lockmgr(&bdev->fence_lock, LK_RELEASE); 699 if (tmp_obj) 700 driver->sync_obj_unref(&tmp_obj); 701 702 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 703 if (ret) 704 return ret; 705 706 /** 707 * If we're not moving to fixed memory, the TTM object 708 * needs to stay alive. Otherwhise hang it on the ghost 709 * bo to be unbound and destroyed. 710 */ 711 712 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) 713 ghost_obj->ttm = NULL; 714 else 715 bo->ttm = NULL; 716 717 ttm_bo_unreserve(ghost_obj); 718 ttm_bo_unref(&ghost_obj); 719 } 720 721 *old_mem = *new_mem; 722 new_mem->mm_node = NULL; 723 724 return 0; 725 } 726 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 727