xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_util.c (revision b187502f)
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 
41 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42 {
43 	ttm_bo_mem_put(bo, &bo->mem);
44 }
45 
46 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
47 		    bool evict,
48 		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
49 {
50 	struct ttm_tt *ttm = bo->ttm;
51 	struct ttm_mem_reg *old_mem = &bo->mem;
52 	int ret;
53 
54 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
55 		ttm_tt_unbind(ttm);
56 		ttm_bo_free_old_node(bo);
57 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
58 				TTM_PL_MASK_MEM);
59 		old_mem->mem_type = TTM_PL_SYSTEM;
60 	}
61 
62 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63 	if (unlikely(ret != 0))
64 		return ret;
65 
66 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
67 		ret = ttm_tt_bind(ttm, new_mem);
68 		if (unlikely(ret != 0))
69 			return ret;
70 	}
71 
72 	*old_mem = *new_mem;
73 	new_mem->mm_node = NULL;
74 
75 	return 0;
76 }
77 EXPORT_SYMBOL(ttm_bo_move_ttm);
78 
79 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
80 {
81 	if (likely(man->io_reserve_fastpath))
82 		return 0;
83 
84 	if (interruptible)
85 		return mutex_lock_interruptible(&man->io_reserve_mutex);
86 
87 	mutex_lock(&man->io_reserve_mutex);
88 	return 0;
89 }
90 EXPORT_SYMBOL(ttm_mem_io_lock);
91 
92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
93 {
94 	if (likely(man->io_reserve_fastpath))
95 		return;
96 
97 	mutex_unlock(&man->io_reserve_mutex);
98 }
99 EXPORT_SYMBOL(ttm_mem_io_unlock);
100 
101 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
102 {
103 	struct ttm_buffer_object *bo;
104 
105 	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
106 		return -EAGAIN;
107 
108 	bo = list_first_entry(&man->io_reserve_lru,
109 			      struct ttm_buffer_object,
110 			      io_reserve_lru);
111 	list_del_init(&bo->io_reserve_lru);
112 	ttm_bo_unmap_virtual_locked(bo);
113 
114 	return 0;
115 }
116 
117 
118 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
119 		       struct ttm_mem_reg *mem)
120 {
121 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
122 	int ret = 0;
123 
124 	if (!bdev->driver->io_mem_reserve)
125 		return 0;
126 	if (likely(man->io_reserve_fastpath))
127 		return bdev->driver->io_mem_reserve(bdev, mem);
128 
129 	if (bdev->driver->io_mem_reserve &&
130 	    mem->bus.io_reserved_count++ == 0) {
131 retry:
132 		ret = bdev->driver->io_mem_reserve(bdev, mem);
133 		if (ret == -EAGAIN) {
134 			ret = ttm_mem_io_evict(man);
135 			if (ret == 0)
136 				goto retry;
137 		}
138 	}
139 	return ret;
140 }
141 EXPORT_SYMBOL(ttm_mem_io_reserve);
142 
143 void ttm_mem_io_free(struct ttm_bo_device *bdev,
144 		     struct ttm_mem_reg *mem)
145 {
146 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
147 
148 	if (likely(man->io_reserve_fastpath))
149 		return;
150 
151 	if (bdev->driver->io_mem_reserve &&
152 	    --mem->bus.io_reserved_count == 0 &&
153 	    bdev->driver->io_mem_free)
154 		bdev->driver->io_mem_free(bdev, mem);
155 
156 }
157 EXPORT_SYMBOL(ttm_mem_io_free);
158 
159 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160 {
161 	struct ttm_mem_reg *mem = &bo->mem;
162 	int ret;
163 
164 	if (!mem->bus.io_reserved_vm) {
165 		struct ttm_mem_type_manager *man =
166 			&bo->bdev->man[mem->mem_type];
167 
168 		ret = ttm_mem_io_reserve(bo->bdev, mem);
169 		if (unlikely(ret != 0))
170 			return ret;
171 		mem->bus.io_reserved_vm = true;
172 		if (man->use_io_reserve_lru)
173 			list_add_tail(&bo->io_reserve_lru,
174 				      &man->io_reserve_lru);
175 	}
176 	return 0;
177 }
178 
179 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180 {
181 	struct ttm_mem_reg *mem = &bo->mem;
182 
183 	if (mem->bus.io_reserved_vm) {
184 		mem->bus.io_reserved_vm = false;
185 		list_del_init(&bo->io_reserve_lru);
186 		ttm_mem_io_free(bo->bdev, mem);
187 	}
188 }
189 
190 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
191 			void **virtual)
192 {
193 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194 	int ret;
195 	void *addr;
196 
197 	*virtual = NULL;
198 	(void) ttm_mem_io_lock(man, false);
199 	ret = ttm_mem_io_reserve(bdev, mem);
200 	ttm_mem_io_unlock(man);
201 	if (ret || !mem->bus.is_iomem)
202 		return ret;
203 
204 	if (mem->bus.addr) {
205 		addr = mem->bus.addr;
206 	} else {
207 		if (mem->placement & TTM_PL_FLAG_WC)
208 			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
209 		else
210 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
211 		if (!addr) {
212 			(void) ttm_mem_io_lock(man, false);
213 			ttm_mem_io_free(bdev, mem);
214 			ttm_mem_io_unlock(man);
215 			return -ENOMEM;
216 		}
217 	}
218 	*virtual = addr;
219 	return 0;
220 }
221 
222 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
223 			 void *virtual)
224 {
225 	struct ttm_mem_type_manager *man;
226 
227 	man = &bdev->man[mem->mem_type];
228 
229 	if (virtual && mem->bus.addr == NULL)
230 		iounmap(virtual);
231 	(void) ttm_mem_io_lock(man, false);
232 	ttm_mem_io_free(bdev, mem);
233 	ttm_mem_io_unlock(man);
234 }
235 
236 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
237 {
238 	uint32_t *dstP =
239 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
240 	uint32_t *srcP =
241 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
242 
243 	int i;
244 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
245 		iowrite32(ioread32(srcP++), dstP++);
246 	return 0;
247 }
248 
249 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
250 				unsigned long page,
251 				pgprot_t prot)
252 {
253 	struct page *d = ttm->pages[page];
254 	void *dst;
255 
256 	if (!d)
257 		return -ENOMEM;
258 
259 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
260 
261 #ifdef CONFIG_X86
262 	dst = kmap_atomic_prot(d, prot);
263 #else
264 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
265 		dst = vmap(&d, 1, 0, prot);
266 	else
267 		dst = kmap(d);
268 #endif
269 	if (!dst)
270 		return -ENOMEM;
271 
272 	memcpy_fromio(dst, src, PAGE_SIZE);
273 
274 #ifdef CONFIG_X86
275 	kunmap_atomic(dst);
276 #else
277 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
278 		vunmap(dst);
279 	else
280 		kunmap(d);
281 #endif
282 
283 	return 0;
284 }
285 
286 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
287 				unsigned long page,
288 				pgprot_t prot)
289 {
290 	struct page *s = ttm->pages[page];
291 	void *src;
292 
293 	if (!s)
294 		return -ENOMEM;
295 
296 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
297 #ifdef CONFIG_X86
298 	src = kmap_atomic_prot(s, prot);
299 #else
300 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
301 		src = vmap(&s, 1, 0, prot);
302 	else
303 		src = kmap(s);
304 #endif
305 	if (!src)
306 		return -ENOMEM;
307 
308 	memcpy_toio(dst, src, PAGE_SIZE);
309 
310 #ifdef CONFIG_X86
311 	kunmap_atomic(src);
312 #else
313 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314 		vunmap(src);
315 	else
316 		kunmap(s);
317 #endif
318 
319 	return 0;
320 }
321 
322 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
323 		       bool evict, bool no_wait_gpu,
324 		       struct ttm_mem_reg *new_mem)
325 {
326 	struct ttm_bo_device *bdev = bo->bdev;
327 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
328 	struct ttm_tt *ttm = bo->ttm;
329 	struct ttm_mem_reg *old_mem = &bo->mem;
330 	struct ttm_mem_reg old_copy = *old_mem;
331 	void *old_iomap;
332 	void *new_iomap;
333 	int ret;
334 	unsigned long i;
335 	unsigned long page;
336 	unsigned long add = 0;
337 	int dir;
338 
339 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
340 	if (ret)
341 		return ret;
342 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
343 	if (ret)
344 		goto out;
345 
346 	/*
347 	 * Single TTM move. NOP.
348 	 */
349 	if (old_iomap == NULL && new_iomap == NULL)
350 		goto out2;
351 
352 	/*
353 	 * Don't move nonexistent data. Clear destination instead.
354 	 */
355 	if (old_iomap == NULL &&
356 	    (ttm == NULL || (ttm->state == tt_unpopulated &&
357 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
358 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
359 		goto out2;
360 	}
361 
362 	/*
363 	 * TTM might be null for moves within the same region.
364 	 */
365 	if (ttm && ttm->state == tt_unpopulated) {
366 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
367 		if (ret)
368 			goto out1;
369 	}
370 
371 	add = 0;
372 	dir = 1;
373 
374 	if ((old_mem->mem_type == new_mem->mem_type) &&
375 	    (new_mem->start < old_mem->start + old_mem->size)) {
376 		dir = -1;
377 		add = new_mem->num_pages - 1;
378 	}
379 
380 	for (i = 0; i < new_mem->num_pages; ++i) {
381 		page = i * dir + add;
382 		if (old_iomap == NULL) {
383 			pgprot_t prot = ttm_io_prot(old_mem->placement,
384 						    PAGE_KERNEL);
385 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
386 						   prot);
387 		} else if (new_iomap == NULL) {
388 			pgprot_t prot = ttm_io_prot(new_mem->placement,
389 						    PAGE_KERNEL);
390 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
391 						   prot);
392 		} else
393 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
394 		if (ret)
395 			goto out1;
396 	}
397 	mb();
398 out2:
399 	old_copy = *old_mem;
400 	*old_mem = *new_mem;
401 	new_mem->mm_node = NULL;
402 
403 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
404 		ttm_tt_unbind(ttm);
405 		ttm_tt_destroy(ttm);
406 		bo->ttm = NULL;
407 	}
408 
409 out1:
410 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
411 out:
412 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
413 
414 	/*
415 	 * On error, keep the mm node!
416 	 */
417 	if (!ret)
418 		ttm_bo_mem_put(bo, &old_copy);
419 	return ret;
420 }
421 EXPORT_SYMBOL(ttm_bo_move_memcpy);
422 
423 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
424 {
425 	kfree(bo);
426 }
427 
428 /**
429  * ttm_buffer_object_transfer
430  *
431  * @bo: A pointer to a struct ttm_buffer_object.
432  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
433  * holding the data of @bo with the old placement.
434  *
435  * This is a utility function that may be called after an accelerated move
436  * has been scheduled. A new buffer object is created as a placeholder for
437  * the old data while it's being copied. When that buffer object is idle,
438  * it can be destroyed, releasing the space of the old placement.
439  * Returns:
440  * !0: Failure.
441  */
442 
443 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
444 				      struct ttm_buffer_object **new_obj)
445 {
446 	struct ttm_buffer_object *fbo;
447 	struct ttm_bo_device *bdev = bo->bdev;
448 	struct ttm_bo_driver *driver = bdev->driver;
449 	int ret;
450 
451 	fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK);
452 	if (!fbo)
453 		return -ENOMEM;
454 
455 	*fbo = *bo;
456 
457 	/**
458 	 * Fix up members that we shouldn't copy directly:
459 	 * TODO: Explicit member copy would probably be better here.
460 	 */
461 
462 	INIT_LIST_HEAD(&fbo->ddestroy);
463 	INIT_LIST_HEAD(&fbo->lru);
464 	INIT_LIST_HEAD(&fbo->swap);
465 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
466 	drm_vma_node_reset(&fbo->vma_node);
467 	atomic_set(&fbo->cpu_writers, 0);
468 
469 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
470 	if (bo->sync_obj)
471 		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
472 	else
473 		fbo->sync_obj = NULL;
474 	lockmgr(&bdev->fence_lock, LK_RELEASE);
475 	kref_init(&fbo->list_kref);
476 	kref_init(&fbo->kref);
477 	fbo->destroy = &ttm_transfered_destroy;
478 	fbo->acc_size = 0;
479 	fbo->resv = &fbo->ttm_resv;
480 	reservation_object_init(fbo->resv);
481 	ret = ww_mutex_trylock(&fbo->resv->lock);
482 	WARN_ON(!ret);
483 
484 	/*
485 	 * Mirror ref from kref_init() for list_kref.
486 	 */
487 	set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
488 
489 	*new_obj = fbo;
490 	return 0;
491 }
492 
493 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
494 {
495 #if defined(__i386__) || defined(__x86_64__)
496 	if (caching_flags & TTM_PL_FLAG_WC)
497 		tmp = pgprot_writecombine(tmp);
498 	else
499 		tmp = pgprot_noncached(tmp);
500 
501 #elif defined(__powerpc__)
502 	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
503 		pgprot_val(tmp) |= _PAGE_NO_CACHE;
504 		if (caching_flags & TTM_PL_FLAG_UNCACHED)
505 			pgprot_val(tmp) |= _PAGE_GUARDED;
506 	}
507 #endif
508 #if defined(__ia64__)
509 	if (caching_flags & TTM_PL_FLAG_WC)
510 		tmp = pgprot_writecombine(tmp);
511 	else
512 		tmp = pgprot_noncached(tmp);
513 #endif
514 #if defined(__sparc__) || defined(__mips__)
515 	if (!(caching_flags & TTM_PL_FLAG_CACHED))
516 		tmp = pgprot_noncached(tmp);
517 #endif
518 	return tmp;
519 }
520 EXPORT_SYMBOL(ttm_io_prot);
521 
522 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
523 			  unsigned long offset,
524 			  unsigned long size,
525 			  struct ttm_bo_kmap_obj *map)
526 {
527 	struct ttm_mem_reg *mem = &bo->mem;
528 
529 	if (bo->mem.bus.addr) {
530 		map->bo_kmap_type = ttm_bo_map_premapped;
531 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
532 	} else {
533 		map->bo_kmap_type = ttm_bo_map_iomap;
534 		if (mem->placement & TTM_PL_FLAG_WC)
535 			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
536 						  size);
537 		else
538 			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
539 						       size);
540 	}
541 	return (!map->virtual) ? -ENOMEM : 0;
542 }
543 
544 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
545 			   unsigned long start_page,
546 			   unsigned long num_pages,
547 			   struct ttm_bo_kmap_obj *map)
548 {
549 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
550 	struct ttm_tt *ttm = bo->ttm;
551 	int ret;
552 
553 	BUG_ON(!ttm);
554 
555 	if (ttm->state == tt_unpopulated) {
556 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
557 		if (ret)
558 			return ret;
559 	}
560 
561 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
562 		/*
563 		 * We're mapping a single page, and the desired
564 		 * page protection is consistent with the bo.
565 		 */
566 
567 		map->bo_kmap_type = ttm_bo_map_kmap;
568 		map->page = ttm->pages[start_page];
569 		map->virtual = kmap(map->page);
570 	} else {
571 		/*
572 		 * We need to use vmap to get the desired page protection
573 		 * or to make the buffer object look contiguous.
574 		 */
575 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
576 			PAGE_KERNEL :
577 			ttm_io_prot(mem->placement, PAGE_KERNEL);
578 		map->bo_kmap_type = ttm_bo_map_vmap;
579 		map->virtual = vmap(ttm->pages + start_page, num_pages,
580 				    0, prot);
581 	}
582 	return (!map->virtual) ? -ENOMEM : 0;
583 }
584 
585 int ttm_bo_kmap(struct ttm_buffer_object *bo,
586 		unsigned long start_page, unsigned long num_pages,
587 		struct ttm_bo_kmap_obj *map)
588 {
589 	struct ttm_mem_type_manager *man =
590 		&bo->bdev->man[bo->mem.mem_type];
591 	unsigned long offset, size;
592 	int ret;
593 
594 	BUG_ON(!list_empty(&bo->swap));
595 	map->virtual = NULL;
596 	map->bo = bo;
597 	if (num_pages > bo->num_pages)
598 		return -EINVAL;
599 	if (start_page > bo->num_pages)
600 		return -EINVAL;
601 #if 0
602 	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
603 		return -EPERM;
604 #endif
605 	(void) ttm_mem_io_lock(man, false);
606 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
607 	ttm_mem_io_unlock(man);
608 	if (ret)
609 		return ret;
610 	if (!bo->mem.bus.is_iomem) {
611 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
612 	} else {
613 		offset = start_page << PAGE_SHIFT;
614 		size = num_pages << PAGE_SHIFT;
615 		return ttm_bo_ioremap(bo, offset, size, map);
616 	}
617 }
618 EXPORT_SYMBOL(ttm_bo_kmap);
619 
620 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
621 {
622 	struct ttm_buffer_object *bo = map->bo;
623 	struct ttm_mem_type_manager *man =
624 		&bo->bdev->man[bo->mem.mem_type];
625 
626 	if (!map->virtual)
627 		return;
628 	switch (map->bo_kmap_type) {
629 	case ttm_bo_map_iomap:
630 		iounmap(map->virtual);
631 		break;
632 	case ttm_bo_map_vmap:
633 		vunmap(map->virtual);
634 		break;
635 	case ttm_bo_map_kmap:
636 		kunmap(map->page);
637 		break;
638 	case ttm_bo_map_premapped:
639 		break;
640 	default:
641 		BUG();
642 	}
643 	(void) ttm_mem_io_lock(man, false);
644 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
645 	ttm_mem_io_unlock(man);
646 	map->virtual = NULL;
647 	map->page = NULL;
648 }
649 EXPORT_SYMBOL(ttm_bo_kunmap);
650 
651 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
652 			      void *sync_obj,
653 			      bool evict,
654 			      bool no_wait_gpu,
655 			      struct ttm_mem_reg *new_mem)
656 {
657 	struct ttm_bo_device *bdev = bo->bdev;
658 	struct ttm_bo_driver *driver = bdev->driver;
659 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
660 	struct ttm_mem_reg *old_mem = &bo->mem;
661 	int ret;
662 	struct ttm_buffer_object *ghost_obj;
663 	void *tmp_obj = NULL;
664 
665 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
666 	if (bo->sync_obj) {
667 		tmp_obj = bo->sync_obj;
668 		bo->sync_obj = NULL;
669 	}
670 	bo->sync_obj = driver->sync_obj_ref(sync_obj);
671 	if (evict) {
672 		ret = ttm_bo_wait(bo, false, false, false);
673 		lockmgr(&bdev->fence_lock, LK_RELEASE);
674 		if (tmp_obj)
675 			driver->sync_obj_unref(&tmp_obj);
676 		if (ret)
677 			return ret;
678 
679 		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
680 		    (bo->ttm != NULL)) {
681 			ttm_tt_unbind(bo->ttm);
682 			ttm_tt_destroy(bo->ttm);
683 			bo->ttm = NULL;
684 		}
685 		ttm_bo_free_old_node(bo);
686 	} else {
687 		/**
688 		 * This should help pipeline ordinary buffer moves.
689 		 *
690 		 * Hang old buffer memory on a new buffer object,
691 		 * and leave it to be released when the GPU
692 		 * operation has completed.
693 		 */
694 
695 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
696 		lockmgr(&bdev->fence_lock, LK_RELEASE);
697 		if (tmp_obj)
698 			driver->sync_obj_unref(&tmp_obj);
699 
700 		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
701 		if (ret)
702 			return ret;
703 
704 		/**
705 		 * If we're not moving to fixed memory, the TTM object
706 		 * needs to stay alive. Otherwhise hang it on the ghost
707 		 * bo to be unbound and destroyed.
708 		 */
709 
710 		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
711 			ghost_obj->ttm = NULL;
712 		else
713 			bo->ttm = NULL;
714 
715 		ttm_bo_unreserve(ghost_obj);
716 		ttm_bo_unref(&ghost_obj);
717 	}
718 
719 	*old_mem = *new_mem;
720 	new_mem->mm_node = NULL;
721 
722 	return 0;
723 }
724 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
725