xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_util.c (revision ef3ac1d1)
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <sys/sfbuf.h>
34 #include <linux/export.h>
35 #include <linux/wait.h>
36 
37 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
38 {
39 	ttm_bo_mem_put(bo, &bo->mem);
40 }
41 
42 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
43 		    bool evict,
44 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
45 {
46 	struct ttm_tt *ttm = bo->ttm;
47 	struct ttm_mem_reg *old_mem = &bo->mem;
48 	int ret;
49 
50 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
51 		ttm_tt_unbind(ttm);
52 		ttm_bo_free_old_node(bo);
53 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
54 				TTM_PL_MASK_MEM);
55 		old_mem->mem_type = TTM_PL_SYSTEM;
56 	}
57 
58 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
59 	if (unlikely(ret != 0))
60 		return ret;
61 
62 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
63 		ret = ttm_tt_bind(ttm, new_mem);
64 		if (unlikely(ret != 0))
65 			return ret;
66 	}
67 
68 	*old_mem = *new_mem;
69 	new_mem->mm_node = NULL;
70 
71 	return 0;
72 }
73 EXPORT_SYMBOL(ttm_bo_move_ttm);
74 
75 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
76 {
77 	if (likely(man->io_reserve_fastpath))
78 		return 0;
79 
80 	if (interruptible) {
81 		if (lockmgr(&man->io_reserve_mutex,
82 			    LK_EXCLUSIVE | LK_SLEEPFAIL))
83 			return (-EINTR);
84 		else
85 			return (0);
86 	}
87 
88 	lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE);
89 	return 0;
90 }
91 
92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
93 {
94 	if (likely(man->io_reserve_fastpath))
95 		return;
96 
97 	lockmgr(&man->io_reserve_mutex, LK_RELEASE);
98 }
99 
100 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
101 {
102 	struct ttm_buffer_object *bo;
103 
104 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
105 		return -EAGAIN;
106 
107 	bo = list_first_entry(&man->io_reserve_lru,
108 			      struct ttm_buffer_object,
109 			      io_reserve_lru);
110 	list_del_init(&bo->io_reserve_lru);
111 	ttm_bo_unmap_virtual_locked(bo);
112 
113 	return 0;
114 }
115 
116 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
117 			      struct ttm_mem_reg *mem)
118 {
119 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
120 	int ret = 0;
121 
122 	if (!bdev->driver->io_mem_reserve)
123 		return 0;
124 	if (likely(man->io_reserve_fastpath))
125 		return bdev->driver->io_mem_reserve(bdev, mem);
126 
127 	if (bdev->driver->io_mem_reserve &&
128 	    mem->bus.io_reserved_count++ == 0) {
129 retry:
130 		ret = bdev->driver->io_mem_reserve(bdev, mem);
131 		if (ret == -EAGAIN) {
132 			ret = ttm_mem_io_evict(man);
133 			if (ret == 0)
134 				goto retry;
135 		}
136 	}
137 	return ret;
138 }
139 
140 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
141 			    struct ttm_mem_reg *mem)
142 {
143 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
144 
145 	if (likely(man->io_reserve_fastpath))
146 		return;
147 
148 	if (bdev->driver->io_mem_reserve &&
149 	    --mem->bus.io_reserved_count == 0 &&
150 	    bdev->driver->io_mem_free)
151 		bdev->driver->io_mem_free(bdev, mem);
152 
153 }
154 
155 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
156 {
157 	struct ttm_mem_reg *mem = &bo->mem;
158 	int ret;
159 
160 	if (!mem->bus.io_reserved_vm) {
161 		struct ttm_mem_type_manager *man =
162 			&bo->bdev->man[mem->mem_type];
163 
164 		ret = ttm_mem_io_reserve(bo->bdev, mem);
165 		if (unlikely(ret != 0))
166 			return ret;
167 		mem->bus.io_reserved_vm = true;
168 		if (man->use_io_reserve_lru)
169 			list_add_tail(&bo->io_reserve_lru,
170 				      &man->io_reserve_lru);
171 	}
172 	return 0;
173 }
174 
175 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
176 {
177 	struct ttm_mem_reg *mem = &bo->mem;
178 
179 	if (mem->bus.io_reserved_vm) {
180 		mem->bus.io_reserved_vm = false;
181 		list_del_init(&bo->io_reserve_lru);
182 		ttm_mem_io_free(bo->bdev, mem);
183 	}
184 }
185 
186 static
187 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
188 			void **virtual)
189 {
190 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
191 	int ret;
192 	void *addr;
193 
194 	*virtual = NULL;
195 	(void) ttm_mem_io_lock(man, false);
196 	ret = ttm_mem_io_reserve(bdev, mem);
197 	ttm_mem_io_unlock(man);
198 	if (ret || !mem->bus.is_iomem)
199 		return ret;
200 
201 	if (mem->bus.addr) {
202 		addr = mem->bus.addr;
203 	} else {
204 		addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
205 		    mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
206 		    VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
207 		if (!addr) {
208 			(void) ttm_mem_io_lock(man, false);
209 			ttm_mem_io_free(bdev, mem);
210 			ttm_mem_io_unlock(man);
211 			return -ENOMEM;
212 		}
213 	}
214 	*virtual = addr;
215 	return 0;
216 }
217 
218 static
219 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
220 			 void *virtual)
221 {
222 	struct ttm_mem_type_manager *man;
223 
224 	man = &bdev->man[mem->mem_type];
225 
226 	if (virtual && mem->bus.addr == NULL)
227 		pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
228 	(void) ttm_mem_io_lock(man, false);
229 	ttm_mem_io_free(bdev, mem);
230 	ttm_mem_io_unlock(man);
231 }
232 
233 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
234 {
235 	uint32_t *dstP =
236 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
237 	uint32_t *srcP =
238 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
239 
240 	int i;
241 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
242 		/* iowrite32(ioread32(srcP++), dstP++); */
243 		*dstP++ = *srcP++;
244 	return 0;
245 }
246 
247 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
248 				unsigned long page,
249 				vm_memattr_t prot)
250 {
251 	vm_page_t d = ttm->pages[page];
252 	void *dst;
253 
254 	if (!d)
255 		return -ENOMEM;
256 
257 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
258 
259 	/* XXXKIB can't sleep ? */
260 	dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
261 	if (!dst)
262 		return -ENOMEM;
263 
264 	memcpy_fromio(dst, src, PAGE_SIZE);
265 
266 	pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
267 
268 	return 0;
269 }
270 
271 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
272 				unsigned long page,
273 				vm_memattr_t prot)
274 {
275 	vm_page_t s = ttm->pages[page];
276 	void *src;
277 
278 	if (!s)
279 		return -ENOMEM;
280 
281 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
282 	src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
283 	if (!src)
284 		return -ENOMEM;
285 
286 	memcpy_toio(dst, src, PAGE_SIZE);
287 
288 	pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
289 
290 	return 0;
291 }
292 
293 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
294 		       bool evict, bool no_wait_gpu,
295 		       struct ttm_mem_reg *new_mem)
296 {
297 	struct ttm_bo_device *bdev = bo->bdev;
298 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
299 	struct ttm_tt *ttm = bo->ttm;
300 	struct ttm_mem_reg *old_mem = &bo->mem;
301 	struct ttm_mem_reg old_copy = *old_mem;
302 	void *old_iomap;
303 	void *new_iomap;
304 	int ret;
305 	unsigned long i;
306 	unsigned long page;
307 	unsigned long add = 0;
308 	int dir;
309 
310 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
311 	if (ret)
312 		return ret;
313 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
314 	if (ret)
315 		goto out;
316 
317 	if (old_iomap == NULL && new_iomap == NULL)
318 		goto out2;
319 	if (old_iomap == NULL && ttm == NULL)
320 		goto out2;
321 
322 	if (ttm->state == tt_unpopulated) {
323 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
324 		if (ret) {
325 			/* if we fail here don't nuke the mm node
326 			 * as the bo still owns it */
327 			old_copy.mm_node = NULL;
328 			goto out1;
329 		}
330 	}
331 
332 	add = 0;
333 	dir = 1;
334 
335 	if ((old_mem->mem_type == new_mem->mem_type) &&
336 	    (new_mem->start < old_mem->start + old_mem->size)) {
337 		dir = -1;
338 		add = new_mem->num_pages - 1;
339 	}
340 
341 	for (i = 0; i < new_mem->num_pages; ++i) {
342 		page = i * dir + add;
343 		if (old_iomap == NULL) {
344 			vm_memattr_t prot = ttm_io_prot(old_mem->placement);
345 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
346 						   prot);
347 		} else if (new_iomap == NULL) {
348 			vm_memattr_t prot = ttm_io_prot(new_mem->placement);
349 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
350 						   prot);
351 		} else
352 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
353 		if (ret) {
354 			/* failing here, means keep old copy as-is */
355 			old_copy.mm_node = NULL;
356 			goto out1;
357 		}
358 	}
359 	cpu_mfence();
360 out2:
361 	old_copy = *old_mem;
362 	*old_mem = *new_mem;
363 	new_mem->mm_node = NULL;
364 
365 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
366 		ttm_tt_unbind(ttm);
367 		ttm_tt_destroy(ttm);
368 		bo->ttm = NULL;
369 	}
370 
371 out1:
372 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
373 out:
374 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
375 	ttm_bo_mem_put(bo, &old_copy);
376 	return ret;
377 }
378 EXPORT_SYMBOL(ttm_bo_move_memcpy);
379 
380 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
381 {
382 	kfree(bo, M_DRM);
383 }
384 
385 /**
386  * ttm_buffer_object_transfer
387  *
388  * @bo: A pointer to a struct ttm_buffer_object.
389  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
390  * holding the data of @bo with the old placement.
391  *
392  * This is a utility function that may be called after an accelerated move
393  * has been scheduled. A new buffer object is created as a placeholder for
394  * the old data while it's being copied. When that buffer object is idle,
395  * it can be destroyed, releasing the space of the old placement.
396  * Returns:
397  * !0: Failure.
398  */
399 
400 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
401 				      struct ttm_buffer_object **new_obj)
402 {
403 	struct ttm_buffer_object *fbo;
404 	struct ttm_bo_device *bdev = bo->bdev;
405 	struct ttm_bo_driver *driver = bdev->driver;
406 
407 	fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO);
408 	if (!fbo)
409 		return -ENOMEM;
410 
411 	*fbo = *bo;
412 
413 	/**
414 	 * Fix up members that we shouldn't copy directly:
415 	 * TODO: Explicit member copy would probably be better here.
416 	 */
417 
418 	init_waitqueue_head(&fbo->event_queue);
419 	INIT_LIST_HEAD(&fbo->ddestroy);
420 	INIT_LIST_HEAD(&fbo->lru);
421 	INIT_LIST_HEAD(&fbo->swap);
422 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
423 	fbo->vm_node = NULL;
424 	atomic_set(&fbo->cpu_writers, 0);
425 
426 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
427 	if (bo->sync_obj)
428 		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
429 	else
430 		fbo->sync_obj = NULL;
431 	lockmgr(&bdev->fence_lock, LK_RELEASE);
432 	kref_init(&fbo->list_kref);
433 	kref_init(&fbo->kref);
434 	fbo->destroy = &ttm_transfered_destroy;
435 	fbo->acc_size = 0;
436 
437         /*
438 	 * Mirror ref from kref_init() for list_kref.
439 	 */
440 	set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
441 
442 	*new_obj = fbo;
443 	return 0;
444 }
445 
446 vm_memattr_t
447 ttm_io_prot(uint32_t caching_flags)
448 {
449 #if defined(__i386__) || defined(__x86_64__)
450 	if (caching_flags & TTM_PL_FLAG_WC)
451 		return (VM_MEMATTR_WRITE_COMBINING);
452 	else
453 		/*
454 		 * We do not support i386, look at the linux source
455 		 * for the reason of the comment.
456 		 */
457 		return (VM_MEMATTR_UNCACHEABLE);
458 #else
459 #error Port me
460 #endif
461 }
462 EXPORT_SYMBOL(ttm_io_prot);
463 
464 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
465 			  unsigned long offset,
466 			  unsigned long size,
467 			  struct ttm_bo_kmap_obj *map)
468 {
469 	struct ttm_mem_reg *mem = &bo->mem;
470 
471 	if (bo->mem.bus.addr) {
472 		map->bo_kmap_type = ttm_bo_map_premapped;
473 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
474 	} else {
475 		map->bo_kmap_type = ttm_bo_map_iomap;
476 		map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
477 		    bo->mem.bus.offset + offset, size,
478 		    (mem->placement & TTM_PL_FLAG_WC) ?
479 		    VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
480 		map->size = size;
481 	}
482 	return (!map->virtual) ? -ENOMEM : 0;
483 }
484 
485 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
486 			   unsigned long start_page,
487 			   unsigned long num_pages,
488 			   struct ttm_bo_kmap_obj *map)
489 {
490 	struct ttm_mem_reg *mem = &bo->mem;
491 	vm_memattr_t prot;
492 	struct ttm_tt *ttm = bo->ttm;
493 	int i, ret;
494 
495 	BUG_ON(!ttm);
496 
497 	if (ttm->state == tt_unpopulated) {
498 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
499 		if (ret)
500 			return ret;
501 	}
502 
503 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
504 		/*
505 		 * We're mapping a single page, and the desired
506 		 * page protection is consistent with the bo.
507 		 */
508 
509 		map->bo_kmap_type = ttm_bo_map_kmap;
510 		map->page = ttm->pages[start_page];
511 		map->sf = sf_buf_alloc(map->page);
512 		map->virtual = (void *)sf_buf_kva(map->sf);
513 	} else {
514 		/*
515 		 * We need to use vmap to get the desired page protection
516 		 * or to make the buffer object look contiguous.
517 		 */
518 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
519 			VM_MEMATTR_WRITE_COMBINING :
520 			ttm_io_prot(mem->placement);
521 		map->bo_kmap_type = ttm_bo_map_vmap;
522 		map->num_pages = num_pages;
523 		map->virtual = (void *)kmem_alloc_nofault(&kernel_map,
524 		    num_pages * PAGE_SIZE, PAGE_SIZE);
525 		if (map->virtual != NULL) {
526 			for (i = 0; i < num_pages; i++) {
527 				/* XXXKIB hack */
528 				pmap_page_set_memattr(ttm->pages[start_page +
529 				    i], prot);
530 			}
531 			pmap_qenter((vm_offset_t)map->virtual,
532 			    &ttm->pages[start_page], num_pages);
533 		}
534 	}
535 	return (!map->virtual) ? -ENOMEM : 0;
536 }
537 
538 int ttm_bo_kmap(struct ttm_buffer_object *bo,
539 		unsigned long start_page, unsigned long num_pages,
540 		struct ttm_bo_kmap_obj *map)
541 {
542 	struct ttm_mem_type_manager *man =
543 		&bo->bdev->man[bo->mem.mem_type];
544 	unsigned long offset, size;
545 	int ret;
546 
547 	BUG_ON(!list_empty(&bo->swap));
548 	map->virtual = NULL;
549 	map->bo = bo;
550 	if (num_pages > bo->num_pages)
551 		return -EINVAL;
552 	if (start_page > bo->num_pages)
553 		return -EINVAL;
554 #if 0
555 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
556 		return -EPERM;
557 #endif
558 	(void) ttm_mem_io_lock(man, false);
559 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
560 	ttm_mem_io_unlock(man);
561 	if (ret)
562 		return ret;
563 	if (!bo->mem.bus.is_iomem) {
564 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
565 	} else {
566 		offset = start_page << PAGE_SHIFT;
567 		size = num_pages << PAGE_SHIFT;
568 		return ttm_bo_ioremap(bo, offset, size, map);
569 	}
570 }
571 EXPORT_SYMBOL(ttm_bo_kmap);
572 
573 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
574 {
575 	struct ttm_buffer_object *bo = map->bo;
576 	struct ttm_mem_type_manager *man =
577 		&bo->bdev->man[bo->mem.mem_type];
578 
579 	if (!map->virtual)
580 		return;
581 	switch (map->bo_kmap_type) {
582 	case ttm_bo_map_iomap:
583 		pmap_unmapdev((vm_offset_t)map->virtual, map->size);
584 		break;
585 	case ttm_bo_map_vmap:
586 		pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
587 		kmem_free(&kernel_map, (vm_offset_t)map->virtual,
588 		    map->num_pages * PAGE_SIZE);
589 		break;
590 	case ttm_bo_map_kmap:
591 		sf_buf_free(map->sf);
592 		break;
593 	case ttm_bo_map_premapped:
594 		break;
595 	default:
596 		BUG();
597 	}
598 	(void) ttm_mem_io_lock(man, false);
599 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
600 	ttm_mem_io_unlock(man);
601 	map->virtual = NULL;
602 	map->page = NULL;
603 	map->sf = NULL;
604 }
605 EXPORT_SYMBOL(ttm_bo_kunmap);
606 
607 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
608 			      void *sync_obj,
609 			      bool evict,
610 			      bool no_wait_gpu,
611 			      struct ttm_mem_reg *new_mem)
612 {
613 	struct ttm_bo_device *bdev = bo->bdev;
614 	struct ttm_bo_driver *driver = bdev->driver;
615 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
616 	struct ttm_mem_reg *old_mem = &bo->mem;
617 	int ret;
618 	struct ttm_buffer_object *ghost_obj;
619 	void *tmp_obj = NULL;
620 
621 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
622 	if (bo->sync_obj) {
623 		tmp_obj = bo->sync_obj;
624 		bo->sync_obj = NULL;
625 	}
626 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
627 	if (evict) {
628 		ret = ttm_bo_wait(bo, false, false, false);
629 		lockmgr(&bdev->fence_lock, LK_RELEASE);
630 		if (tmp_obj)
631 			driver->sync_obj_unref(&tmp_obj);
632 		if (ret)
633 			return ret;
634 
635 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
636 		    (bo->ttm != NULL)) {
637 			ttm_tt_unbind(bo->ttm);
638 			ttm_tt_destroy(bo->ttm);
639 			bo->ttm = NULL;
640 		}
641 		ttm_bo_free_old_node(bo);
642 	} else {
643 		/**
644 		 * This should help pipeline ordinary buffer moves.
645 		 *
646 		 * Hang old buffer memory on a new buffer object,
647 		 * and leave it to be released when the GPU
648 		 * operation has completed.
649 		 */
650 
651 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
652 		lockmgr(&bdev->fence_lock, LK_RELEASE);
653 		if (tmp_obj)
654 			driver->sync_obj_unref(&tmp_obj);
655 
656 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
657 		if (ret)
658 			return ret;
659 
660 		/**
661 		 * If we're not moving to fixed memory, the TTM object
662 		 * needs to stay alive. Otherwhise hang it on the ghost
663 		 * bo to be unbound and destroyed.
664 		 */
665 
666 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
667 			ghost_obj->ttm = NULL;
668 		else
669 			bo->ttm = NULL;
670 
671 		ttm_bo_unreserve(ghost_obj);
672 		ttm_bo_unref(&ghost_obj);
673 	}
674 
675 	*old_mem = *new_mem;
676 	new_mem->mm_node = NULL;
677 
678 	return 0;
679 }
680 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
681