1 /************************************************************************** 2 * 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <drm/ttm/ttm_bo_driver.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <sys/sfbuf.h> 34 #include <linux/export.h> 35 #include <linux/wait.h> 36 37 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 38 { 39 ttm_bo_mem_put(bo, &bo->mem); 40 } 41 42 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 43 bool evict, 44 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 45 { 46 struct ttm_tt *ttm = bo->ttm; 47 struct ttm_mem_reg *old_mem = &bo->mem; 48 int ret; 49 50 if (old_mem->mem_type != TTM_PL_SYSTEM) { 51 ttm_tt_unbind(ttm); 52 ttm_bo_free_old_node(bo); 53 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 54 TTM_PL_MASK_MEM); 55 old_mem->mem_type = TTM_PL_SYSTEM; 56 } 57 58 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 59 if (unlikely(ret != 0)) 60 return ret; 61 62 if (new_mem->mem_type != TTM_PL_SYSTEM) { 63 ret = ttm_tt_bind(ttm, new_mem); 64 if (unlikely(ret != 0)) 65 return ret; 66 } 67 68 *old_mem = *new_mem; 69 new_mem->mm_node = NULL; 70 71 return 0; 72 } 73 EXPORT_SYMBOL(ttm_bo_move_ttm); 74 75 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) 76 { 77 if (likely(man->io_reserve_fastpath)) 78 return 0; 79 80 if (interruptible) { 81 if (lockmgr(&man->io_reserve_mutex, 82 LK_EXCLUSIVE | LK_SLEEPFAIL)) 83 return (-EINTR); 84 else 85 return (0); 86 } 87 88 lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE); 89 return 0; 90 } 91 EXPORT_SYMBOL(ttm_mem_io_lock); 92 93 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 94 { 95 if (likely(man->io_reserve_fastpath)) 96 return; 97 98 lockmgr(&man->io_reserve_mutex, LK_RELEASE); 99 } 100 EXPORT_SYMBOL(ttm_mem_io_unlock); 101 102 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 103 { 104 struct ttm_buffer_object *bo; 105 106 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) 107 return -EAGAIN; 108 109 bo = list_first_entry(&man->io_reserve_lru, 110 struct ttm_buffer_object, 111 io_reserve_lru); 112 list_del_init(&bo->io_reserve_lru); 113 ttm_bo_unmap_virtual_locked(bo); 114 115 return 0; 116 } 117 118 119 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 120 struct ttm_mem_reg *mem) 121 { 122 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 123 int ret = 0; 124 125 if (!bdev->driver->io_mem_reserve) 126 return 0; 127 if (likely(man->io_reserve_fastpath)) 128 return bdev->driver->io_mem_reserve(bdev, mem); 129 130 if (bdev->driver->io_mem_reserve && 131 mem->bus.io_reserved_count++ == 0) { 132 retry: 133 ret = bdev->driver->io_mem_reserve(bdev, mem); 134 if (ret == -EAGAIN) { 135 ret = ttm_mem_io_evict(man); 136 if (ret == 0) 137 goto retry; 138 } 139 } 140 return ret; 141 } 142 EXPORT_SYMBOL(ttm_mem_io_reserve); 143 144 void ttm_mem_io_free(struct ttm_bo_device *bdev, 145 struct ttm_mem_reg *mem) 146 { 147 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 148 149 if (likely(man->io_reserve_fastpath)) 150 return; 151 152 if (bdev->driver->io_mem_reserve && 153 --mem->bus.io_reserved_count == 0 && 154 bdev->driver->io_mem_free) 155 bdev->driver->io_mem_free(bdev, mem); 156 157 } 158 EXPORT_SYMBOL(ttm_mem_io_free); 159 160 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 161 { 162 struct ttm_mem_reg *mem = &bo->mem; 163 int ret; 164 165 if (!mem->bus.io_reserved_vm) { 166 struct ttm_mem_type_manager *man = 167 &bo->bdev->man[mem->mem_type]; 168 169 ret = ttm_mem_io_reserve(bo->bdev, mem); 170 if (unlikely(ret != 0)) 171 return ret; 172 mem->bus.io_reserved_vm = true; 173 if (man->use_io_reserve_lru) 174 list_add_tail(&bo->io_reserve_lru, 175 &man->io_reserve_lru); 176 } 177 return 0; 178 } 179 180 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) 181 { 182 struct ttm_mem_reg *mem = &bo->mem; 183 184 if (mem->bus.io_reserved_vm) { 185 mem->bus.io_reserved_vm = false; 186 list_del_init(&bo->io_reserve_lru); 187 ttm_mem_io_free(bo->bdev, mem); 188 } 189 } 190 191 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 192 void **virtual) 193 { 194 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 195 int ret; 196 void *addr; 197 198 *virtual = NULL; 199 (void) ttm_mem_io_lock(man, false); 200 ret = ttm_mem_io_reserve(bdev, mem); 201 ttm_mem_io_unlock(man); 202 if (ret || !mem->bus.is_iomem) 203 return ret; 204 205 if (mem->bus.addr) { 206 addr = mem->bus.addr; 207 } else { 208 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset, 209 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ? 210 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 211 if (!addr) { 212 (void) ttm_mem_io_lock(man, false); 213 ttm_mem_io_free(bdev, mem); 214 ttm_mem_io_unlock(man); 215 return -ENOMEM; 216 } 217 } 218 *virtual = addr; 219 return 0; 220 } 221 222 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 223 void *virtual) 224 { 225 struct ttm_mem_type_manager *man; 226 227 man = &bdev->man[mem->mem_type]; 228 229 if (virtual && mem->bus.addr == NULL) 230 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size); 231 (void) ttm_mem_io_lock(man, false); 232 ttm_mem_io_free(bdev, mem); 233 ttm_mem_io_unlock(man); 234 } 235 236 static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 237 { 238 uint32_t *dstP = 239 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); 240 uint32_t *srcP = 241 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); 242 243 int i; 244 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 245 /* iowrite32(ioread32(srcP++), dstP++); */ 246 *dstP++ = *srcP++; 247 return 0; 248 } 249 250 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 251 unsigned long page, 252 vm_memattr_t prot) 253 { 254 vm_page_t d = ttm->pages[page]; 255 void *dst; 256 257 if (!d) 258 return -ENOMEM; 259 260 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 261 262 /* XXXKIB can't sleep ? */ 263 dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot); 264 if (!dst) 265 return -ENOMEM; 266 267 memcpy_fromio(dst, src, PAGE_SIZE); 268 269 pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE); 270 271 return 0; 272 } 273 274 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 275 unsigned long page, 276 vm_memattr_t prot) 277 { 278 vm_page_t s = ttm->pages[page]; 279 void *src; 280 281 if (!s) 282 return -ENOMEM; 283 284 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 285 src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot); 286 if (!src) 287 return -ENOMEM; 288 289 memcpy_toio(dst, src, PAGE_SIZE); 290 291 pmap_unmapdev((vm_offset_t)src, PAGE_SIZE); 292 293 return 0; 294 } 295 296 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 297 bool evict, bool no_wait_gpu, 298 struct ttm_mem_reg *new_mem) 299 { 300 struct ttm_bo_device *bdev = bo->bdev; 301 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 302 struct ttm_tt *ttm = bo->ttm; 303 struct ttm_mem_reg *old_mem = &bo->mem; 304 struct ttm_mem_reg old_copy = *old_mem; 305 void *old_iomap; 306 void *new_iomap; 307 int ret; 308 unsigned long i; 309 unsigned long page; 310 unsigned long add = 0; 311 int dir; 312 313 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 314 if (ret) 315 return ret; 316 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); 317 if (ret) 318 goto out; 319 320 /* 321 * Single TTM move. NOP. 322 */ 323 if (old_iomap == NULL && new_iomap == NULL) 324 goto out2; 325 326 /* 327 * Don't move nonexistent data. Clear destination instead. 328 */ 329 if (old_iomap == NULL && 330 (ttm == NULL || (ttm->state == tt_unpopulated && 331 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { 332 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 333 goto out2; 334 } 335 336 /* 337 * TTM might be null for moves within the same region. 338 */ 339 if (ttm && ttm->state == tt_unpopulated) { 340 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 341 if (ret) 342 goto out1; 343 } 344 345 add = 0; 346 dir = 1; 347 348 if ((old_mem->mem_type == new_mem->mem_type) && 349 (new_mem->start < old_mem->start + old_mem->size)) { 350 dir = -1; 351 add = new_mem->num_pages - 1; 352 } 353 354 for (i = 0; i < new_mem->num_pages; ++i) { 355 page = i * dir + add; 356 if (old_iomap == NULL) { 357 vm_memattr_t prot = ttm_io_prot(old_mem->placement); 358 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, 359 prot); 360 } else if (new_iomap == NULL) { 361 vm_memattr_t prot = ttm_io_prot(new_mem->placement); 362 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, 363 prot); 364 } else 365 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 366 if (ret) 367 goto out1; 368 } 369 cpu_mfence(); 370 out2: 371 old_copy = *old_mem; 372 *old_mem = *new_mem; 373 new_mem->mm_node = NULL; 374 375 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 376 ttm_tt_unbind(ttm); 377 ttm_tt_destroy(ttm); 378 bo->ttm = NULL; 379 } 380 381 out1: 382 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 383 out: 384 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 385 386 /* 387 * On error, keep the mm node! 388 */ 389 if (!ret) 390 ttm_bo_mem_put(bo, &old_copy); 391 return ret; 392 } 393 EXPORT_SYMBOL(ttm_bo_move_memcpy); 394 395 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 396 { 397 kfree(bo); 398 } 399 400 /** 401 * ttm_buffer_object_transfer 402 * 403 * @bo: A pointer to a struct ttm_buffer_object. 404 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 405 * holding the data of @bo with the old placement. 406 * 407 * This is a utility function that may be called after an accelerated move 408 * has been scheduled. A new buffer object is created as a placeholder for 409 * the old data while it's being copied. When that buffer object is idle, 410 * it can be destroyed, releasing the space of the old placement. 411 * Returns: 412 * !0: Failure. 413 */ 414 415 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 416 struct ttm_buffer_object **new_obj) 417 { 418 struct ttm_buffer_object *fbo; 419 struct ttm_bo_device *bdev = bo->bdev; 420 struct ttm_bo_driver *driver = bdev->driver; 421 422 fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO); 423 if (!fbo) 424 return -ENOMEM; 425 426 *fbo = *bo; 427 428 /** 429 * Fix up members that we shouldn't copy directly: 430 * TODO: Explicit member copy would probably be better here. 431 */ 432 433 init_waitqueue_head(&fbo->event_queue); 434 INIT_LIST_HEAD(&fbo->ddestroy); 435 INIT_LIST_HEAD(&fbo->lru); 436 INIT_LIST_HEAD(&fbo->swap); 437 INIT_LIST_HEAD(&fbo->io_reserve_lru); 438 fbo->vm_node = NULL; 439 atomic_set(&fbo->cpu_writers, 0); 440 441 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 442 if (bo->sync_obj) 443 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 444 else 445 fbo->sync_obj = NULL; 446 lockmgr(&bdev->fence_lock, LK_RELEASE); 447 kref_init(&fbo->list_kref); 448 kref_init(&fbo->kref); 449 fbo->destroy = &ttm_transfered_destroy; 450 fbo->acc_size = 0; 451 452 /* 453 * Mirror ref from kref_init() for list_kref. 454 */ 455 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags); 456 457 *new_obj = fbo; 458 return 0; 459 } 460 461 vm_memattr_t 462 ttm_io_prot(uint32_t caching_flags) 463 { 464 #if defined(__x86_64__) 465 if (caching_flags & TTM_PL_FLAG_WC) 466 return (VM_MEMATTR_WRITE_COMBINING); 467 else 468 /* 469 * We do not support i386, look at the linux source 470 * for the reason of the comment. 471 */ 472 return (VM_MEMATTR_UNCACHEABLE); 473 #else 474 #error Port me 475 #endif 476 } 477 EXPORT_SYMBOL(ttm_io_prot); 478 479 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 480 unsigned long offset, 481 unsigned long size, 482 struct ttm_bo_kmap_obj *map) 483 { 484 struct ttm_mem_reg *mem = &bo->mem; 485 486 if (bo->mem.bus.addr) { 487 map->bo_kmap_type = ttm_bo_map_premapped; 488 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 489 } else { 490 map->bo_kmap_type = ttm_bo_map_iomap; 491 map->virtual = pmap_mapdev_attr(bo->mem.bus.base + 492 bo->mem.bus.offset + offset, size, 493 (mem->placement & TTM_PL_FLAG_WC) ? 494 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 495 map->size = size; 496 } 497 return (!map->virtual) ? -ENOMEM : 0; 498 } 499 500 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 501 unsigned long start_page, 502 unsigned long num_pages, 503 struct ttm_bo_kmap_obj *map) 504 { 505 struct ttm_mem_reg *mem = &bo->mem; 506 vm_memattr_t prot; 507 struct ttm_tt *ttm = bo->ttm; 508 int i, ret; 509 510 BUG_ON(!ttm); 511 512 if (ttm->state == tt_unpopulated) { 513 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 514 if (ret) 515 return ret; 516 } 517 518 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 519 /* 520 * We're mapping a single page, and the desired 521 * page protection is consistent with the bo. 522 */ 523 524 map->bo_kmap_type = ttm_bo_map_kmap; 525 map->page = ttm->pages[start_page]; 526 map->sf = sf_buf_alloc(map->page); 527 map->virtual = (void *)sf_buf_kva(map->sf); 528 } else { 529 /* 530 * We need to use vmap to get the desired page protection 531 * or to make the buffer object look contiguous. 532 */ 533 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 534 VM_MEMATTR_WRITE_COMBINING : 535 ttm_io_prot(mem->placement); 536 map->bo_kmap_type = ttm_bo_map_vmap; 537 map->num_pages = num_pages; 538 map->virtual = (void *)kmem_alloc_nofault(&kernel_map, 539 num_pages * PAGE_SIZE, PAGE_SIZE); 540 if (map->virtual != NULL) { 541 for (i = 0; i < num_pages; i++) { 542 /* XXXKIB hack */ 543 pmap_page_set_memattr(ttm->pages[start_page + 544 i], prot); 545 } 546 pmap_qenter((vm_offset_t)map->virtual, 547 &ttm->pages[start_page], num_pages); 548 } 549 } 550 return (!map->virtual) ? -ENOMEM : 0; 551 } 552 553 int ttm_bo_kmap(struct ttm_buffer_object *bo, 554 unsigned long start_page, unsigned long num_pages, 555 struct ttm_bo_kmap_obj *map) 556 { 557 struct ttm_mem_type_manager *man = 558 &bo->bdev->man[bo->mem.mem_type]; 559 unsigned long offset, size; 560 int ret; 561 562 BUG_ON(!list_empty(&bo->swap)); 563 map->virtual = NULL; 564 map->bo = bo; 565 if (num_pages > bo->num_pages) 566 return -EINVAL; 567 if (start_page > bo->num_pages) 568 return -EINVAL; 569 #if 0 570 if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) 571 return -EPERM; 572 #endif 573 (void) ttm_mem_io_lock(man, false); 574 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 575 ttm_mem_io_unlock(man); 576 if (ret) 577 return ret; 578 if (!bo->mem.bus.is_iomem) { 579 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 580 } else { 581 offset = start_page << PAGE_SHIFT; 582 size = num_pages << PAGE_SHIFT; 583 return ttm_bo_ioremap(bo, offset, size, map); 584 } 585 } 586 EXPORT_SYMBOL(ttm_bo_kmap); 587 588 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 589 { 590 struct ttm_buffer_object *bo = map->bo; 591 struct ttm_mem_type_manager *man = 592 &bo->bdev->man[bo->mem.mem_type]; 593 594 if (!map->virtual) 595 return; 596 switch (map->bo_kmap_type) { 597 case ttm_bo_map_iomap: 598 pmap_unmapdev((vm_offset_t)map->virtual, map->size); 599 break; 600 case ttm_bo_map_vmap: 601 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages); 602 kmem_free(&kernel_map, (vm_offset_t)map->virtual, 603 map->num_pages * PAGE_SIZE); 604 break; 605 case ttm_bo_map_kmap: 606 sf_buf_free(map->sf); 607 break; 608 case ttm_bo_map_premapped: 609 break; 610 default: 611 BUG(); 612 } 613 (void) ttm_mem_io_lock(man, false); 614 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 615 ttm_mem_io_unlock(man); 616 map->virtual = NULL; 617 map->page = NULL; 618 map->sf = NULL; 619 } 620 EXPORT_SYMBOL(ttm_bo_kunmap); 621 622 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 623 void *sync_obj, 624 bool evict, 625 bool no_wait_gpu, 626 struct ttm_mem_reg *new_mem) 627 { 628 struct ttm_bo_device *bdev = bo->bdev; 629 struct ttm_bo_driver *driver = bdev->driver; 630 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 631 struct ttm_mem_reg *old_mem = &bo->mem; 632 int ret; 633 struct ttm_buffer_object *ghost_obj; 634 void *tmp_obj = NULL; 635 636 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 637 if (bo->sync_obj) { 638 tmp_obj = bo->sync_obj; 639 bo->sync_obj = NULL; 640 } 641 bo->sync_obj = driver->sync_obj_ref(sync_obj); 642 if (evict) { 643 ret = ttm_bo_wait(bo, false, false, false); 644 lockmgr(&bdev->fence_lock, LK_RELEASE); 645 if (tmp_obj) 646 driver->sync_obj_unref(&tmp_obj); 647 if (ret) 648 return ret; 649 650 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 651 (bo->ttm != NULL)) { 652 ttm_tt_unbind(bo->ttm); 653 ttm_tt_destroy(bo->ttm); 654 bo->ttm = NULL; 655 } 656 ttm_bo_free_old_node(bo); 657 } else { 658 /** 659 * This should help pipeline ordinary buffer moves. 660 * 661 * Hang old buffer memory on a new buffer object, 662 * and leave it to be released when the GPU 663 * operation has completed. 664 */ 665 666 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 667 lockmgr(&bdev->fence_lock, LK_RELEASE); 668 if (tmp_obj) 669 driver->sync_obj_unref(&tmp_obj); 670 671 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 672 if (ret) 673 return ret; 674 675 /** 676 * If we're not moving to fixed memory, the TTM object 677 * needs to stay alive. Otherwhise hang it on the ghost 678 * bo to be unbound and destroyed. 679 */ 680 681 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) 682 ghost_obj->ttm = NULL; 683 else 684 bo->ttm = NULL; 685 686 ttm_bo_unreserve(ghost_obj); 687 ttm_bo_unref(&ghost_obj); 688 } 689 690 *old_mem = *new_mem; 691 new_mem->mm_node = NULL; 692 693 return 0; 694 } 695 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 696