1 /************************************************************************** 2 * 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <drm/ttm/ttm_bo_driver.h> 32 #include <drm/ttm/ttm_placement.h> 33 #include <sys/sfbuf.h> 34 #include <linux/export.h> 35 36 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 37 { 38 ttm_bo_mem_put(bo, &bo->mem); 39 } 40 41 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 42 bool evict, 43 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 44 { 45 struct ttm_tt *ttm = bo->ttm; 46 struct ttm_mem_reg *old_mem = &bo->mem; 47 int ret; 48 49 if (old_mem->mem_type != TTM_PL_SYSTEM) { 50 ttm_tt_unbind(ttm); 51 ttm_bo_free_old_node(bo); 52 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 53 TTM_PL_MASK_MEM); 54 old_mem->mem_type = TTM_PL_SYSTEM; 55 } 56 57 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 58 if (unlikely(ret != 0)) 59 return ret; 60 61 if (new_mem->mem_type != TTM_PL_SYSTEM) { 62 ret = ttm_tt_bind(ttm, new_mem); 63 if (unlikely(ret != 0)) 64 return ret; 65 } 66 67 *old_mem = *new_mem; 68 new_mem->mm_node = NULL; 69 70 return 0; 71 } 72 EXPORT_SYMBOL(ttm_bo_move_ttm); 73 74 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) 75 { 76 if (likely(man->io_reserve_fastpath)) 77 return 0; 78 79 if (interruptible) { 80 if (lockmgr(&man->io_reserve_mutex, 81 LK_EXCLUSIVE | LK_SLEEPFAIL)) 82 return (-EINTR); 83 else 84 return (0); 85 } 86 87 lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE); 88 return 0; 89 } 90 91 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 92 { 93 if (likely(man->io_reserve_fastpath)) 94 return; 95 96 lockmgr(&man->io_reserve_mutex, LK_RELEASE); 97 } 98 99 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 100 { 101 struct ttm_buffer_object *bo; 102 103 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) 104 return -EAGAIN; 105 106 bo = list_first_entry(&man->io_reserve_lru, 107 struct ttm_buffer_object, 108 io_reserve_lru); 109 list_del_init(&bo->io_reserve_lru); 110 ttm_bo_unmap_virtual_locked(bo); 111 112 return 0; 113 } 114 115 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 116 struct ttm_mem_reg *mem) 117 { 118 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 119 int ret = 0; 120 121 if (!bdev->driver->io_mem_reserve) 122 return 0; 123 if (likely(man->io_reserve_fastpath)) 124 return bdev->driver->io_mem_reserve(bdev, mem); 125 126 if (bdev->driver->io_mem_reserve && 127 mem->bus.io_reserved_count++ == 0) { 128 retry: 129 ret = bdev->driver->io_mem_reserve(bdev, mem); 130 if (ret == -EAGAIN) { 131 ret = ttm_mem_io_evict(man); 132 if (ret == 0) 133 goto retry; 134 } 135 } 136 return ret; 137 } 138 139 static void ttm_mem_io_free(struct ttm_bo_device *bdev, 140 struct ttm_mem_reg *mem) 141 { 142 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 143 144 if (likely(man->io_reserve_fastpath)) 145 return; 146 147 if (bdev->driver->io_mem_reserve && 148 --mem->bus.io_reserved_count == 0 && 149 bdev->driver->io_mem_free) 150 bdev->driver->io_mem_free(bdev, mem); 151 152 } 153 154 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 155 { 156 struct ttm_mem_reg *mem = &bo->mem; 157 int ret; 158 159 if (!mem->bus.io_reserved_vm) { 160 struct ttm_mem_type_manager *man = 161 &bo->bdev->man[mem->mem_type]; 162 163 ret = ttm_mem_io_reserve(bo->bdev, mem); 164 if (unlikely(ret != 0)) 165 return ret; 166 mem->bus.io_reserved_vm = true; 167 if (man->use_io_reserve_lru) 168 list_add_tail(&bo->io_reserve_lru, 169 &man->io_reserve_lru); 170 } 171 return 0; 172 } 173 174 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) 175 { 176 struct ttm_mem_reg *mem = &bo->mem; 177 178 if (mem->bus.io_reserved_vm) { 179 mem->bus.io_reserved_vm = false; 180 list_del_init(&bo->io_reserve_lru); 181 ttm_mem_io_free(bo->bdev, mem); 182 } 183 } 184 185 static 186 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 187 void **virtual) 188 { 189 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 190 int ret; 191 void *addr; 192 193 *virtual = NULL; 194 (void) ttm_mem_io_lock(man, false); 195 ret = ttm_mem_io_reserve(bdev, mem); 196 ttm_mem_io_unlock(man); 197 if (ret || !mem->bus.is_iomem) 198 return ret; 199 200 if (mem->bus.addr) { 201 addr = mem->bus.addr; 202 } else { 203 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset, 204 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ? 205 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 206 if (!addr) { 207 (void) ttm_mem_io_lock(man, false); 208 ttm_mem_io_free(bdev, mem); 209 ttm_mem_io_unlock(man); 210 return -ENOMEM; 211 } 212 } 213 *virtual = addr; 214 return 0; 215 } 216 217 static 218 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 219 void *virtual) 220 { 221 struct ttm_mem_type_manager *man; 222 223 man = &bdev->man[mem->mem_type]; 224 225 if (virtual && mem->bus.addr == NULL) 226 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size); 227 (void) ttm_mem_io_lock(man, false); 228 ttm_mem_io_free(bdev, mem); 229 ttm_mem_io_unlock(man); 230 } 231 232 static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 233 { 234 uint32_t *dstP = 235 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); 236 uint32_t *srcP = 237 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); 238 239 int i; 240 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 241 /* iowrite32(ioread32(srcP++), dstP++); */ 242 *dstP++ = *srcP++; 243 return 0; 244 } 245 246 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 247 unsigned long page, 248 vm_memattr_t prot) 249 { 250 vm_page_t d = ttm->pages[page]; 251 void *dst; 252 253 if (!d) 254 return -ENOMEM; 255 256 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 257 258 /* XXXKIB can't sleep ? */ 259 dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot); 260 if (!dst) 261 return -ENOMEM; 262 263 memcpy_fromio(dst, src, PAGE_SIZE); 264 265 pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE); 266 267 return 0; 268 } 269 270 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 271 unsigned long page, 272 vm_memattr_t prot) 273 { 274 vm_page_t s = ttm->pages[page]; 275 void *src; 276 277 if (!s) 278 return -ENOMEM; 279 280 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 281 src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot); 282 if (!src) 283 return -ENOMEM; 284 285 memcpy_toio(dst, src, PAGE_SIZE); 286 287 pmap_unmapdev((vm_offset_t)src, PAGE_SIZE); 288 289 return 0; 290 } 291 292 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 293 bool evict, bool no_wait_gpu, 294 struct ttm_mem_reg *new_mem) 295 { 296 struct ttm_bo_device *bdev = bo->bdev; 297 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 298 struct ttm_tt *ttm = bo->ttm; 299 struct ttm_mem_reg *old_mem = &bo->mem; 300 struct ttm_mem_reg old_copy = *old_mem; 301 void *old_iomap; 302 void *new_iomap; 303 int ret; 304 unsigned long i; 305 unsigned long page; 306 unsigned long add = 0; 307 int dir; 308 309 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 310 if (ret) 311 return ret; 312 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); 313 if (ret) 314 goto out; 315 316 if (old_iomap == NULL && new_iomap == NULL) 317 goto out2; 318 if (old_iomap == NULL && ttm == NULL) 319 goto out2; 320 321 if (ttm->state == tt_unpopulated) { 322 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 323 if (ret) { 324 /* if we fail here don't nuke the mm node 325 * as the bo still owns it */ 326 old_copy.mm_node = NULL; 327 goto out1; 328 } 329 } 330 331 add = 0; 332 dir = 1; 333 334 if ((old_mem->mem_type == new_mem->mem_type) && 335 (new_mem->start < old_mem->start + old_mem->size)) { 336 dir = -1; 337 add = new_mem->num_pages - 1; 338 } 339 340 for (i = 0; i < new_mem->num_pages; ++i) { 341 page = i * dir + add; 342 if (old_iomap == NULL) { 343 vm_memattr_t prot = ttm_io_prot(old_mem->placement); 344 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, 345 prot); 346 } else if (new_iomap == NULL) { 347 vm_memattr_t prot = ttm_io_prot(new_mem->placement); 348 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, 349 prot); 350 } else 351 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 352 if (ret) { 353 /* failing here, means keep old copy as-is */ 354 old_copy.mm_node = NULL; 355 goto out1; 356 } 357 } 358 cpu_mfence(); 359 out2: 360 old_copy = *old_mem; 361 *old_mem = *new_mem; 362 new_mem->mm_node = NULL; 363 364 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 365 ttm_tt_unbind(ttm); 366 ttm_tt_destroy(ttm); 367 bo->ttm = NULL; 368 } 369 370 out1: 371 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 372 out: 373 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 374 ttm_bo_mem_put(bo, &old_copy); 375 return ret; 376 } 377 EXPORT_SYMBOL(ttm_bo_move_memcpy); 378 379 MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects"); 380 381 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 382 { 383 kfree(bo, M_TTM_TRANSF_OBJ); 384 } 385 386 /** 387 * ttm_buffer_object_transfer 388 * 389 * @bo: A pointer to a struct ttm_buffer_object. 390 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 391 * holding the data of @bo with the old placement. 392 * 393 * This is a utility function that may be called after an accelerated move 394 * has been scheduled. A new buffer object is created as a placeholder for 395 * the old data while it's being copied. When that buffer object is idle, 396 * it can be destroyed, releasing the space of the old placement. 397 * Returns: 398 * !0: Failure. 399 */ 400 401 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 402 struct ttm_buffer_object **new_obj) 403 { 404 struct ttm_buffer_object *fbo; 405 struct ttm_bo_device *bdev = bo->bdev; 406 struct ttm_bo_driver *driver = bdev->driver; 407 408 fbo = kmalloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_WAITOK); 409 if (!fbo) 410 return -ENOMEM; 411 412 *fbo = *bo; 413 414 /** 415 * Fix up members that we shouldn't copy directly: 416 * TODO: Explicit member copy would probably be better here. 417 */ 418 419 INIT_LIST_HEAD(&fbo->ddestroy); 420 INIT_LIST_HEAD(&fbo->lru); 421 INIT_LIST_HEAD(&fbo->swap); 422 INIT_LIST_HEAD(&fbo->io_reserve_lru); 423 fbo->vm_node = NULL; 424 atomic_set(&fbo->cpu_writers, 0); 425 426 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 427 if (bo->sync_obj) 428 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 429 else 430 fbo->sync_obj = NULL; 431 lockmgr(&bdev->fence_lock, LK_RELEASE); 432 kref_init(&fbo->list_kref); 433 kref_init(&fbo->kref); 434 fbo->destroy = &ttm_transfered_destroy; 435 fbo->acc_size = 0; 436 437 *new_obj = fbo; 438 return 0; 439 } 440 441 vm_memattr_t 442 ttm_io_prot(uint32_t caching_flags) 443 { 444 #if defined(__i386__) || defined(__x86_64__) 445 if (caching_flags & TTM_PL_FLAG_WC) 446 return (VM_MEMATTR_WRITE_COMBINING); 447 else 448 /* 449 * We do not support i386, look at the linux source 450 * for the reason of the comment. 451 */ 452 return (VM_MEMATTR_UNCACHEABLE); 453 #else 454 #error Port me 455 #endif 456 } 457 EXPORT_SYMBOL(ttm_io_prot); 458 459 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 460 unsigned long offset, 461 unsigned long size, 462 struct ttm_bo_kmap_obj *map) 463 { 464 struct ttm_mem_reg *mem = &bo->mem; 465 466 if (bo->mem.bus.addr) { 467 map->bo_kmap_type = ttm_bo_map_premapped; 468 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 469 } else { 470 map->bo_kmap_type = ttm_bo_map_iomap; 471 map->virtual = pmap_mapdev_attr(bo->mem.bus.base + 472 bo->mem.bus.offset + offset, size, 473 (mem->placement & TTM_PL_FLAG_WC) ? 474 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 475 map->size = size; 476 } 477 return (!map->virtual) ? -ENOMEM : 0; 478 } 479 480 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 481 unsigned long start_page, 482 unsigned long num_pages, 483 struct ttm_bo_kmap_obj *map) 484 { 485 struct ttm_mem_reg *mem = &bo->mem; 486 vm_memattr_t prot; 487 struct ttm_tt *ttm = bo->ttm; 488 int i, ret; 489 490 BUG_ON(!ttm); 491 492 if (ttm->state == tt_unpopulated) { 493 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 494 if (ret) 495 return ret; 496 } 497 498 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 499 /* 500 * We're mapping a single page, and the desired 501 * page protection is consistent with the bo. 502 */ 503 504 map->bo_kmap_type = ttm_bo_map_kmap; 505 map->page = ttm->pages[start_page]; 506 map->sf = sf_buf_alloc(map->page); 507 map->virtual = (void *)sf_buf_kva(map->sf); 508 } else { 509 /* 510 * We need to use vmap to get the desired page protection 511 * or to make the buffer object look contiguous. 512 */ 513 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 514 VM_MEMATTR_WRITE_COMBINING : 515 ttm_io_prot(mem->placement); 516 map->bo_kmap_type = ttm_bo_map_vmap; 517 map->num_pages = num_pages; 518 map->virtual = (void *)kmem_alloc_nofault(&kernel_map, 519 num_pages * PAGE_SIZE, PAGE_SIZE); 520 if (map->virtual != NULL) { 521 for (i = 0; i < num_pages; i++) { 522 /* XXXKIB hack */ 523 pmap_page_set_memattr(ttm->pages[start_page + 524 i], prot); 525 } 526 pmap_qenter((vm_offset_t)map->virtual, 527 &ttm->pages[start_page], num_pages); 528 } 529 } 530 return (!map->virtual) ? -ENOMEM : 0; 531 } 532 533 int ttm_bo_kmap(struct ttm_buffer_object *bo, 534 unsigned long start_page, unsigned long num_pages, 535 struct ttm_bo_kmap_obj *map) 536 { 537 struct ttm_mem_type_manager *man = 538 &bo->bdev->man[bo->mem.mem_type]; 539 unsigned long offset, size; 540 int ret; 541 542 BUG_ON(!list_empty(&bo->swap)); 543 map->virtual = NULL; 544 map->bo = bo; 545 if (num_pages > bo->num_pages) 546 return -EINVAL; 547 if (start_page > bo->num_pages) 548 return -EINVAL; 549 #if 0 550 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 551 return -EPERM; 552 #endif 553 (void) ttm_mem_io_lock(man, false); 554 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 555 ttm_mem_io_unlock(man); 556 if (ret) 557 return ret; 558 if (!bo->mem.bus.is_iomem) { 559 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 560 } else { 561 offset = start_page << PAGE_SHIFT; 562 size = num_pages << PAGE_SHIFT; 563 return ttm_bo_ioremap(bo, offset, size, map); 564 } 565 } 566 EXPORT_SYMBOL(ttm_bo_kmap); 567 568 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 569 { 570 struct ttm_buffer_object *bo = map->bo; 571 struct ttm_mem_type_manager *man = 572 &bo->bdev->man[bo->mem.mem_type]; 573 574 if (!map->virtual) 575 return; 576 switch (map->bo_kmap_type) { 577 case ttm_bo_map_iomap: 578 pmap_unmapdev((vm_offset_t)map->virtual, map->size); 579 break; 580 case ttm_bo_map_vmap: 581 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages); 582 kmem_free(&kernel_map, (vm_offset_t)map->virtual, 583 map->num_pages * PAGE_SIZE); 584 break; 585 case ttm_bo_map_kmap: 586 sf_buf_free(map->sf); 587 break; 588 case ttm_bo_map_premapped: 589 break; 590 default: 591 BUG(); 592 } 593 (void) ttm_mem_io_lock(man, false); 594 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 595 ttm_mem_io_unlock(man); 596 map->virtual = NULL; 597 map->page = NULL; 598 map->sf = NULL; 599 } 600 EXPORT_SYMBOL(ttm_bo_kunmap); 601 602 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 603 void *sync_obj, 604 bool evict, 605 bool no_wait_gpu, 606 struct ttm_mem_reg *new_mem) 607 { 608 struct ttm_bo_device *bdev = bo->bdev; 609 struct ttm_bo_driver *driver = bdev->driver; 610 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 611 struct ttm_mem_reg *old_mem = &bo->mem; 612 int ret; 613 struct ttm_buffer_object *ghost_obj; 614 void *tmp_obj = NULL; 615 616 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 617 if (bo->sync_obj) { 618 tmp_obj = bo->sync_obj; 619 bo->sync_obj = NULL; 620 } 621 bo->sync_obj = driver->sync_obj_ref(sync_obj); 622 if (evict) { 623 ret = ttm_bo_wait(bo, false, false, false); 624 lockmgr(&bdev->fence_lock, LK_RELEASE); 625 if (tmp_obj) 626 driver->sync_obj_unref(&tmp_obj); 627 if (ret) 628 return ret; 629 630 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 631 (bo->ttm != NULL)) { 632 ttm_tt_unbind(bo->ttm); 633 ttm_tt_destroy(bo->ttm); 634 bo->ttm = NULL; 635 } 636 ttm_bo_free_old_node(bo); 637 } else { 638 /** 639 * This should help pipeline ordinary buffer moves. 640 * 641 * Hang old buffer memory on a new buffer object, 642 * and leave it to be released when the GPU 643 * operation has completed. 644 */ 645 646 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 647 lockmgr(&bdev->fence_lock, LK_RELEASE); 648 if (tmp_obj) 649 driver->sync_obj_unref(&tmp_obj); 650 651 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 652 if (ret) 653 return ret; 654 655 /** 656 * If we're not moving to fixed memory, the TTM object 657 * needs to stay alive. Otherwhise hang it on the ghost 658 * bo to be unbound and destroyed. 659 */ 660 661 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) 662 ghost_obj->ttm = NULL; 663 else 664 bo->ttm = NULL; 665 666 ttm_bo_unreserve(ghost_obj); 667 ttm_bo_unref(&ghost_obj); 668 } 669 670 *old_mem = *new_mem; 671 new_mem->mm_node = NULL; 672 673 return 0; 674 } 675 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 676