1 /************************************************************************** 2 * 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <drm/ttm/ttm_bo_driver.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <sys/sfbuf.h> 34 #include <linux/export.h> 35 #include <linux/io.h> 36 #include <linux/wait.h> 37 38 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 39 { 40 ttm_bo_mem_put(bo, &bo->mem); 41 } 42 43 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 44 bool evict, 45 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 46 { 47 struct ttm_tt *ttm = bo->ttm; 48 struct ttm_mem_reg *old_mem = &bo->mem; 49 int ret; 50 51 if (old_mem->mem_type != TTM_PL_SYSTEM) { 52 ttm_tt_unbind(ttm); 53 ttm_bo_free_old_node(bo); 54 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 55 TTM_PL_MASK_MEM); 56 old_mem->mem_type = TTM_PL_SYSTEM; 57 } 58 59 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 60 if (unlikely(ret != 0)) 61 return ret; 62 63 if (new_mem->mem_type != TTM_PL_SYSTEM) { 64 ret = ttm_tt_bind(ttm, new_mem); 65 if (unlikely(ret != 0)) 66 return ret; 67 } 68 69 *old_mem = *new_mem; 70 new_mem->mm_node = NULL; 71 72 return 0; 73 } 74 EXPORT_SYMBOL(ttm_bo_move_ttm); 75 76 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) 77 { 78 if (likely(man->io_reserve_fastpath)) 79 return 0; 80 81 if (interruptible) { 82 if (lockmgr(&man->io_reserve_mutex, 83 LK_EXCLUSIVE | LK_SLEEPFAIL)) 84 return (-EINTR); 85 else 86 return (0); 87 } 88 89 lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE); 90 return 0; 91 } 92 EXPORT_SYMBOL(ttm_mem_io_lock); 93 94 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 95 { 96 if (likely(man->io_reserve_fastpath)) 97 return; 98 99 lockmgr(&man->io_reserve_mutex, LK_RELEASE); 100 } 101 EXPORT_SYMBOL(ttm_mem_io_unlock); 102 103 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 104 { 105 struct ttm_buffer_object *bo; 106 107 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) 108 return -EAGAIN; 109 110 bo = list_first_entry(&man->io_reserve_lru, 111 struct ttm_buffer_object, 112 io_reserve_lru); 113 list_del_init(&bo->io_reserve_lru); 114 ttm_bo_unmap_virtual_locked(bo); 115 116 return 0; 117 } 118 119 120 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 121 struct ttm_mem_reg *mem) 122 { 123 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 124 int ret = 0; 125 126 if (!bdev->driver->io_mem_reserve) 127 return 0; 128 if (likely(man->io_reserve_fastpath)) 129 return bdev->driver->io_mem_reserve(bdev, mem); 130 131 if (bdev->driver->io_mem_reserve && 132 mem->bus.io_reserved_count++ == 0) { 133 retry: 134 ret = bdev->driver->io_mem_reserve(bdev, mem); 135 if (ret == -EAGAIN) { 136 ret = ttm_mem_io_evict(man); 137 if (ret == 0) 138 goto retry; 139 } 140 } 141 return ret; 142 } 143 EXPORT_SYMBOL(ttm_mem_io_reserve); 144 145 void ttm_mem_io_free(struct ttm_bo_device *bdev, 146 struct ttm_mem_reg *mem) 147 { 148 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 149 150 if (likely(man->io_reserve_fastpath)) 151 return; 152 153 if (bdev->driver->io_mem_reserve && 154 --mem->bus.io_reserved_count == 0 && 155 bdev->driver->io_mem_free) 156 bdev->driver->io_mem_free(bdev, mem); 157 158 } 159 EXPORT_SYMBOL(ttm_mem_io_free); 160 161 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 162 { 163 struct ttm_mem_reg *mem = &bo->mem; 164 int ret; 165 166 if (!mem->bus.io_reserved_vm) { 167 struct ttm_mem_type_manager *man = 168 &bo->bdev->man[mem->mem_type]; 169 170 ret = ttm_mem_io_reserve(bo->bdev, mem); 171 if (unlikely(ret != 0)) 172 return ret; 173 mem->bus.io_reserved_vm = true; 174 if (man->use_io_reserve_lru) 175 list_add_tail(&bo->io_reserve_lru, 176 &man->io_reserve_lru); 177 } 178 return 0; 179 } 180 181 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) 182 { 183 struct ttm_mem_reg *mem = &bo->mem; 184 185 if (mem->bus.io_reserved_vm) { 186 mem->bus.io_reserved_vm = false; 187 list_del_init(&bo->io_reserve_lru); 188 ttm_mem_io_free(bo->bdev, mem); 189 } 190 } 191 192 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 193 void **virtual) 194 { 195 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 196 int ret; 197 void *addr; 198 199 *virtual = NULL; 200 (void) ttm_mem_io_lock(man, false); 201 ret = ttm_mem_io_reserve(bdev, mem); 202 ttm_mem_io_unlock(man); 203 if (ret || !mem->bus.is_iomem) 204 return ret; 205 206 if (mem->bus.addr) { 207 addr = mem->bus.addr; 208 } else { 209 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset, 210 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ? 211 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 212 if (!addr) { 213 (void) ttm_mem_io_lock(man, false); 214 ttm_mem_io_free(bdev, mem); 215 ttm_mem_io_unlock(man); 216 return -ENOMEM; 217 } 218 } 219 *virtual = addr; 220 return 0; 221 } 222 223 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 224 void *virtual) 225 { 226 struct ttm_mem_type_manager *man; 227 228 man = &bdev->man[mem->mem_type]; 229 230 if (virtual && mem->bus.addr == NULL) 231 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size); 232 (void) ttm_mem_io_lock(man, false); 233 ttm_mem_io_free(bdev, mem); 234 ttm_mem_io_unlock(man); 235 } 236 237 static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 238 { 239 uint32_t *dstP = 240 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); 241 uint32_t *srcP = 242 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); 243 244 int i; 245 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 246 /* iowrite32(ioread32(srcP++), dstP++); */ 247 *dstP++ = *srcP++; 248 return 0; 249 } 250 251 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 252 unsigned long page, 253 vm_memattr_t prot) 254 { 255 vm_page_t d = ttm->pages[page]; 256 void *dst; 257 258 if (!d) 259 return -ENOMEM; 260 261 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 262 263 /* XXXKIB can't sleep ? */ 264 dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot); 265 if (!dst) 266 return -ENOMEM; 267 268 memcpy_fromio(dst, src, PAGE_SIZE); 269 270 pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE); 271 272 return 0; 273 } 274 275 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 276 unsigned long page, 277 vm_memattr_t prot) 278 { 279 vm_page_t s = ttm->pages[page]; 280 void *src; 281 282 if (!s) 283 return -ENOMEM; 284 285 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 286 src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot); 287 if (!src) 288 return -ENOMEM; 289 290 memcpy_toio(dst, src, PAGE_SIZE); 291 292 pmap_unmapdev((vm_offset_t)src, PAGE_SIZE); 293 294 return 0; 295 } 296 297 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 298 bool evict, bool no_wait_gpu, 299 struct ttm_mem_reg *new_mem) 300 { 301 struct ttm_bo_device *bdev = bo->bdev; 302 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 303 struct ttm_tt *ttm = bo->ttm; 304 struct ttm_mem_reg *old_mem = &bo->mem; 305 struct ttm_mem_reg old_copy = *old_mem; 306 void *old_iomap; 307 void *new_iomap; 308 int ret; 309 unsigned long i; 310 unsigned long page; 311 unsigned long add = 0; 312 int dir; 313 314 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 315 if (ret) 316 return ret; 317 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); 318 if (ret) 319 goto out; 320 321 /* 322 * Single TTM move. NOP. 323 */ 324 if (old_iomap == NULL && new_iomap == NULL) 325 goto out2; 326 327 /* 328 * Don't move nonexistent data. Clear destination instead. 329 */ 330 if (old_iomap == NULL && 331 (ttm == NULL || (ttm->state == tt_unpopulated && 332 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { 333 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 334 goto out2; 335 } 336 337 /* 338 * TTM might be null for moves within the same region. 339 */ 340 if (ttm && ttm->state == tt_unpopulated) { 341 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 342 if (ret) 343 goto out1; 344 } 345 346 add = 0; 347 dir = 1; 348 349 if ((old_mem->mem_type == new_mem->mem_type) && 350 (new_mem->start < old_mem->start + old_mem->size)) { 351 dir = -1; 352 add = new_mem->num_pages - 1; 353 } 354 355 for (i = 0; i < new_mem->num_pages; ++i) { 356 page = i * dir + add; 357 if (old_iomap == NULL) { 358 vm_memattr_t prot = ttm_io_prot(old_mem->placement); 359 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, 360 prot); 361 } else if (new_iomap == NULL) { 362 vm_memattr_t prot = ttm_io_prot(new_mem->placement); 363 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, 364 prot); 365 } else 366 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 367 if (ret) 368 goto out1; 369 } 370 cpu_mfence(); 371 out2: 372 old_copy = *old_mem; 373 *old_mem = *new_mem; 374 new_mem->mm_node = NULL; 375 376 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 377 ttm_tt_unbind(ttm); 378 ttm_tt_destroy(ttm); 379 bo->ttm = NULL; 380 } 381 382 out1: 383 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 384 out: 385 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 386 387 /* 388 * On error, keep the mm node! 389 */ 390 if (!ret) 391 ttm_bo_mem_put(bo, &old_copy); 392 return ret; 393 } 394 EXPORT_SYMBOL(ttm_bo_move_memcpy); 395 396 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 397 { 398 kfree(bo); 399 } 400 401 /** 402 * ttm_buffer_object_transfer 403 * 404 * @bo: A pointer to a struct ttm_buffer_object. 405 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 406 * holding the data of @bo with the old placement. 407 * 408 * This is a utility function that may be called after an accelerated move 409 * has been scheduled. A new buffer object is created as a placeholder for 410 * the old data while it's being copied. When that buffer object is idle, 411 * it can be destroyed, releasing the space of the old placement. 412 * Returns: 413 * !0: Failure. 414 */ 415 416 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 417 struct ttm_buffer_object **new_obj) 418 { 419 struct ttm_buffer_object *fbo; 420 struct ttm_bo_device *bdev = bo->bdev; 421 struct ttm_bo_driver *driver = bdev->driver; 422 423 fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO); 424 if (!fbo) 425 return -ENOMEM; 426 427 *fbo = *bo; 428 429 /** 430 * Fix up members that we shouldn't copy directly: 431 * TODO: Explicit member copy would probably be better here. 432 */ 433 434 init_waitqueue_head(&fbo->event_queue); 435 INIT_LIST_HEAD(&fbo->ddestroy); 436 INIT_LIST_HEAD(&fbo->lru); 437 INIT_LIST_HEAD(&fbo->swap); 438 INIT_LIST_HEAD(&fbo->io_reserve_lru); 439 fbo->vm_node = NULL; 440 atomic_set(&fbo->cpu_writers, 0); 441 442 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 443 if (bo->sync_obj) 444 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 445 else 446 fbo->sync_obj = NULL; 447 lockmgr(&bdev->fence_lock, LK_RELEASE); 448 kref_init(&fbo->list_kref); 449 kref_init(&fbo->kref); 450 fbo->destroy = &ttm_transfered_destroy; 451 fbo->acc_size = 0; 452 453 /* 454 * Mirror ref from kref_init() for list_kref. 455 */ 456 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags); 457 458 *new_obj = fbo; 459 return 0; 460 } 461 462 vm_memattr_t 463 ttm_io_prot(uint32_t caching_flags) 464 { 465 #if defined(__x86_64__) 466 if (caching_flags & TTM_PL_FLAG_WC) 467 return (VM_MEMATTR_WRITE_COMBINING); 468 else 469 /* 470 * We do not support i386, look at the linux source 471 * for the reason of the comment. 472 */ 473 return (VM_MEMATTR_UNCACHEABLE); 474 #else 475 #error Port me 476 #endif 477 } 478 EXPORT_SYMBOL(ttm_io_prot); 479 480 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 481 unsigned long offset, 482 unsigned long size, 483 struct ttm_bo_kmap_obj *map) 484 { 485 struct ttm_mem_reg *mem = &bo->mem; 486 487 if (bo->mem.bus.addr) { 488 map->bo_kmap_type = ttm_bo_map_premapped; 489 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 490 } else { 491 map->bo_kmap_type = ttm_bo_map_iomap; 492 map->virtual = pmap_mapdev_attr(bo->mem.bus.base + 493 bo->mem.bus.offset + offset, size, 494 (mem->placement & TTM_PL_FLAG_WC) ? 495 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 496 map->size = size; 497 } 498 return (!map->virtual) ? -ENOMEM : 0; 499 } 500 501 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 502 unsigned long start_page, 503 unsigned long num_pages, 504 struct ttm_bo_kmap_obj *map) 505 { 506 struct ttm_mem_reg *mem = &bo->mem; 507 vm_memattr_t prot; 508 struct ttm_tt *ttm = bo->ttm; 509 int i, ret; 510 511 BUG_ON(!ttm); 512 513 if (ttm->state == tt_unpopulated) { 514 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 515 if (ret) 516 return ret; 517 } 518 519 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 520 /* 521 * We're mapping a single page, and the desired 522 * page protection is consistent with the bo. 523 */ 524 525 map->bo_kmap_type = ttm_bo_map_kmap; 526 map->page = ttm->pages[start_page]; 527 map->sf = sf_buf_alloc(map->page); 528 map->virtual = (void *)sf_buf_kva(map->sf); 529 } else { 530 /* 531 * We need to use vmap to get the desired page protection 532 * or to make the buffer object look contiguous. 533 */ 534 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 535 VM_MEMATTR_WRITE_COMBINING : 536 ttm_io_prot(mem->placement); 537 map->bo_kmap_type = ttm_bo_map_vmap; 538 map->num_pages = num_pages; 539 map->virtual = 540 (void *)kmem_alloc_nofault(&kernel_map, 541 num_pages * PAGE_SIZE, 542 VM_SUBSYS_DRM_TTM, 543 PAGE_SIZE); 544 if (map->virtual != NULL) { 545 for (i = 0; i < num_pages; i++) { 546 /* XXXKIB hack */ 547 pmap_page_set_memattr(ttm->pages[start_page + 548 i], prot); 549 } 550 pmap_qenter((vm_offset_t)map->virtual, 551 &ttm->pages[start_page], num_pages); 552 } 553 } 554 return (!map->virtual) ? -ENOMEM : 0; 555 } 556 557 int ttm_bo_kmap(struct ttm_buffer_object *bo, 558 unsigned long start_page, unsigned long num_pages, 559 struct ttm_bo_kmap_obj *map) 560 { 561 struct ttm_mem_type_manager *man = 562 &bo->bdev->man[bo->mem.mem_type]; 563 unsigned long offset, size; 564 int ret; 565 566 BUG_ON(!list_empty(&bo->swap)); 567 map->virtual = NULL; 568 map->bo = bo; 569 if (num_pages > bo->num_pages) 570 return -EINVAL; 571 if (start_page > bo->num_pages) 572 return -EINVAL; 573 #if 0 574 if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) 575 return -EPERM; 576 #endif 577 (void) ttm_mem_io_lock(man, false); 578 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 579 ttm_mem_io_unlock(man); 580 if (ret) 581 return ret; 582 if (!bo->mem.bus.is_iomem) { 583 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 584 } else { 585 offset = start_page << PAGE_SHIFT; 586 size = num_pages << PAGE_SHIFT; 587 return ttm_bo_ioremap(bo, offset, size, map); 588 } 589 } 590 EXPORT_SYMBOL(ttm_bo_kmap); 591 592 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 593 { 594 struct ttm_buffer_object *bo = map->bo; 595 struct ttm_mem_type_manager *man = 596 &bo->bdev->man[bo->mem.mem_type]; 597 598 if (!map->virtual) 599 return; 600 switch (map->bo_kmap_type) { 601 case ttm_bo_map_iomap: 602 pmap_unmapdev((vm_offset_t)map->virtual, map->size); 603 break; 604 case ttm_bo_map_vmap: 605 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages); 606 kmem_free(&kernel_map, (vm_offset_t)map->virtual, 607 map->num_pages * PAGE_SIZE); 608 break; 609 case ttm_bo_map_kmap: 610 sf_buf_free(map->sf); 611 break; 612 case ttm_bo_map_premapped: 613 break; 614 default: 615 BUG(); 616 } 617 (void) ttm_mem_io_lock(man, false); 618 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 619 ttm_mem_io_unlock(man); 620 map->virtual = NULL; 621 map->page = NULL; 622 map->sf = NULL; 623 } 624 EXPORT_SYMBOL(ttm_bo_kunmap); 625 626 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 627 void *sync_obj, 628 bool evict, 629 bool no_wait_gpu, 630 struct ttm_mem_reg *new_mem) 631 { 632 struct ttm_bo_device *bdev = bo->bdev; 633 struct ttm_bo_driver *driver = bdev->driver; 634 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 635 struct ttm_mem_reg *old_mem = &bo->mem; 636 int ret; 637 struct ttm_buffer_object *ghost_obj; 638 void *tmp_obj = NULL; 639 640 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 641 if (bo->sync_obj) { 642 tmp_obj = bo->sync_obj; 643 bo->sync_obj = NULL; 644 } 645 bo->sync_obj = driver->sync_obj_ref(sync_obj); 646 if (evict) { 647 ret = ttm_bo_wait(bo, false, false, false); 648 lockmgr(&bdev->fence_lock, LK_RELEASE); 649 if (tmp_obj) 650 driver->sync_obj_unref(&tmp_obj); 651 if (ret) 652 return ret; 653 654 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 655 (bo->ttm != NULL)) { 656 ttm_tt_unbind(bo->ttm); 657 ttm_tt_destroy(bo->ttm); 658 bo->ttm = NULL; 659 } 660 ttm_bo_free_old_node(bo); 661 } else { 662 /** 663 * This should help pipeline ordinary buffer moves. 664 * 665 * Hang old buffer memory on a new buffer object, 666 * and leave it to be released when the GPU 667 * operation has completed. 668 */ 669 670 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 671 lockmgr(&bdev->fence_lock, LK_RELEASE); 672 if (tmp_obj) 673 driver->sync_obj_unref(&tmp_obj); 674 675 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 676 if (ret) 677 return ret; 678 679 /** 680 * If we're not moving to fixed memory, the TTM object 681 * needs to stay alive. Otherwhise hang it on the ghost 682 * bo to be unbound and destroyed. 683 */ 684 685 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) 686 ghost_obj->ttm = NULL; 687 else 688 bo->ttm = NULL; 689 690 ttm_bo_unreserve(ghost_obj); 691 ttm_bo_unref(&ghost_obj); 692 } 693 694 *old_mem = *new_mem; 695 new_mem->mm_node = NULL; 696 697 return 0; 698 } 699 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 700