xref: /openbsd/sys/dev/pci/drm/ttm/ttm_bo_util.c (revision 73471bf0)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42 
43 struct ttm_transfer_obj {
44 	struct ttm_buffer_object base;
45 	struct ttm_buffer_object *bo;
46 };
47 
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50 	ttm_resource_free(bo, &bo->mem);
51 }
52 
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54 		   struct ttm_operation_ctx *ctx,
55 		    struct ttm_resource *new_mem)
56 {
57 	struct ttm_tt *ttm = bo->ttm;
58 	struct ttm_resource *old_mem = &bo->mem;
59 	int ret;
60 
61 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
62 		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63 
64 		if (unlikely(ret != 0)) {
65 			if (ret != -ERESTARTSYS)
66 				pr_err("Failed to expire sync object before unbinding TTM\n");
67 			return ret;
68 		}
69 
70 		ttm_bo_tt_unbind(bo);
71 		ttm_bo_free_old_node(bo);
72 		old_mem->mem_type = TTM_PL_SYSTEM;
73 	}
74 
75 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
76 	if (unlikely(ret != 0))
77 		return ret;
78 
79 	if (new_mem->mem_type != TTM_PL_SYSTEM) {
80 
81 		ret = ttm_tt_populate(bo->bdev, ttm, ctx);
82 		if (unlikely(ret != 0))
83 			return ret;
84 
85 		ret = ttm_bo_tt_bind(bo, new_mem);
86 		if (unlikely(ret != 0))
87 			return ret;
88 	}
89 
90 	ttm_bo_assign_mem(bo, new_mem);
91 	return 0;
92 }
93 EXPORT_SYMBOL(ttm_bo_move_ttm);
94 
95 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
96 		       struct ttm_resource *mem)
97 {
98 	if (mem->bus.offset || mem->bus.addr)
99 		return 0;
100 
101 	mem->bus.is_iomem = false;
102 	if (!bdev->driver->io_mem_reserve)
103 		return 0;
104 
105 	return bdev->driver->io_mem_reserve(bdev, mem);
106 }
107 
108 void ttm_mem_io_free(struct ttm_bo_device *bdev,
109 		     struct ttm_resource *mem)
110 {
111 	if (!mem->bus.offset && !mem->bus.addr)
112 		return;
113 
114 	if (bdev->driver->io_mem_free)
115 		bdev->driver->io_mem_free(bdev, mem);
116 
117 	mem->bus.offset = 0;
118 	mem->bus.addr = NULL;
119 }
120 
121 static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
122 			       struct ttm_resource *mem,
123 			       void **virtual)
124 {
125 	int ret;
126 	void *addr;
127 	int flags;
128 
129 	*virtual = NULL;
130 	ret = ttm_mem_io_reserve(bdev, mem);
131 	if (ret || !mem->bus.is_iomem)
132 		return ret;
133 
134 	if (mem->bus.addr) {
135 		addr = mem->bus.addr;
136 	} else {
137 		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
138 
139 		if (mem->placement & TTM_PL_FLAG_WC)
140 			flags = BUS_SPACE_MAP_PREFETCHABLE;
141 		else
142 			flags = 0;
143 
144 		if (bus_space_map(bdev->memt, mem->bus.offset,
145 		    bus_size, BUS_SPACE_MAP_LINEAR | flags,
146 		    &mem->bus.bsh)) {
147 			printf("%s bus_space_map failed\n", __func__);
148 			return -ENOMEM;
149 		}
150 
151 		addr = bus_space_vaddr(bdev->memt, mem->bus.bsh);
152 
153 		if (!addr) {
154 			ttm_mem_io_free(bdev, mem);
155 			return -ENOMEM;
156 		}
157 	}
158 	*virtual = addr;
159 	return 0;
160 }
161 
162 static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
163 				struct ttm_resource *mem,
164 				void *virtual)
165 {
166 	if (virtual && mem->bus.addr == NULL)
167 		bus_space_unmap(bdev->memt, mem->bus.bsh,
168 		    (size_t)mem->num_pages << PAGE_SHIFT);
169 	ttm_mem_io_free(bdev, mem);
170 }
171 
172 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
173 {
174 	uint32_t *dstP =
175 	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
176 	uint32_t *srcP =
177 	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
178 
179 	int i;
180 	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
181 		iowrite32(ioread32(srcP++), dstP++);
182 	return 0;
183 }
184 
185 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
186 				unsigned long page,
187 				pgprot_t prot)
188 {
189 	struct vm_page *d = ttm->pages[page];
190 	void *dst;
191 
192 	if (!d)
193 		return -ENOMEM;
194 
195 	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
196 	dst = kmap_atomic_prot(d, prot);
197 	if (!dst)
198 		return -ENOMEM;
199 
200 	memcpy_fromio(dst, src, PAGE_SIZE);
201 
202 	kunmap_atomic(dst);
203 
204 	return 0;
205 }
206 
207 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
208 				unsigned long page,
209 				pgprot_t prot)
210 {
211 	struct vm_page *s = ttm->pages[page];
212 	void *src;
213 
214 	if (!s)
215 		return -ENOMEM;
216 
217 	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
218 	src = kmap_atomic_prot(s, prot);
219 	if (!src)
220 		return -ENOMEM;
221 
222 	memcpy_toio(dst, src, PAGE_SIZE);
223 
224 	kunmap_atomic(src);
225 
226 	return 0;
227 }
228 
229 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
230 		       struct ttm_operation_ctx *ctx,
231 		       struct ttm_resource *new_mem)
232 {
233 	struct ttm_bo_device *bdev = bo->bdev;
234 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
235 	struct ttm_tt *ttm = bo->ttm;
236 	struct ttm_resource *old_mem = &bo->mem;
237 	struct ttm_resource old_copy = *old_mem;
238 	void *old_iomap;
239 	void *new_iomap;
240 	int ret;
241 	unsigned long i;
242 	unsigned long page;
243 	unsigned long add = 0;
244 	int dir;
245 
246 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
247 	if (ret)
248 		return ret;
249 
250 	ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
251 	if (ret)
252 		return ret;
253 	ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
254 	if (ret)
255 		goto out;
256 
257 	/*
258 	 * Single TTM move. NOP.
259 	 */
260 	if (old_iomap == NULL && new_iomap == NULL)
261 		goto out2;
262 
263 	/*
264 	 * Don't move nonexistent data. Clear destination instead.
265 	 */
266 	if (old_iomap == NULL &&
267 	    (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
268 			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
269 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
270 		goto out2;
271 	}
272 
273 	/*
274 	 * TTM might be null for moves within the same region.
275 	 */
276 	if (ttm) {
277 		ret = ttm_tt_populate(bdev, ttm, ctx);
278 		if (ret)
279 			goto out1;
280 	}
281 
282 	add = 0;
283 	dir = 1;
284 
285 	if ((old_mem->mem_type == new_mem->mem_type) &&
286 	    (new_mem->start < old_mem->start + old_mem->size)) {
287 		dir = -1;
288 		add = new_mem->num_pages - 1;
289 	}
290 
291 	for (i = 0; i < new_mem->num_pages; ++i) {
292 		page = i * dir + add;
293 		if (old_iomap == NULL) {
294 			pgprot_t prot = ttm_io_prot(old_mem->placement,
295 						    PAGE_KERNEL);
296 			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
297 						   prot);
298 		} else if (new_iomap == NULL) {
299 			pgprot_t prot = ttm_io_prot(new_mem->placement,
300 						    PAGE_KERNEL);
301 			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
302 						   prot);
303 		} else {
304 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
305 		}
306 		if (ret)
307 			goto out1;
308 	}
309 	mb();
310 out2:
311 	old_copy = *old_mem;
312 
313 	ttm_bo_assign_mem(bo, new_mem);
314 
315 	if (!man->use_tt)
316 		ttm_bo_tt_destroy(bo);
317 
318 out1:
319 	ttm_resource_iounmap(bdev, old_mem, new_iomap);
320 out:
321 	ttm_resource_iounmap(bdev, &old_copy, old_iomap);
322 
323 	/*
324 	 * On error, keep the mm node!
325 	 */
326 	if (!ret)
327 		ttm_resource_free(bo, &old_copy);
328 	return ret;
329 }
330 EXPORT_SYMBOL(ttm_bo_move_memcpy);
331 
332 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
333 {
334 	struct ttm_transfer_obj *fbo;
335 
336 	fbo = container_of(bo, struct ttm_transfer_obj, base);
337 	ttm_bo_put(fbo->bo);
338 	kfree(fbo);
339 }
340 
341 /**
342  * ttm_buffer_object_transfer
343  *
344  * @bo: A pointer to a struct ttm_buffer_object.
345  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
346  * holding the data of @bo with the old placement.
347  *
348  * This is a utility function that may be called after an accelerated move
349  * has been scheduled. A new buffer object is created as a placeholder for
350  * the old data while it's being copied. When that buffer object is idle,
351  * it can be destroyed, releasing the space of the old placement.
352  * Returns:
353  * !0: Failure.
354  */
355 
356 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
357 				      struct ttm_buffer_object **new_obj)
358 {
359 	struct ttm_transfer_obj *fbo;
360 	int ret;
361 
362 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
363 	if (!fbo)
364 		return -ENOMEM;
365 
366 	fbo->base = *bo;
367 	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
368 
369 	ttm_bo_get(bo);
370 	fbo->bo = bo;
371 
372 	/**
373 	 * Fix up members that we shouldn't copy directly:
374 	 * TODO: Explicit member copy would probably be better here.
375 	 */
376 
377 	atomic_inc(&ttm_bo_glob.bo_count);
378 	INIT_LIST_HEAD(&fbo->base.ddestroy);
379 	INIT_LIST_HEAD(&fbo->base.lru);
380 	INIT_LIST_HEAD(&fbo->base.swap);
381 	fbo->base.moving = NULL;
382 	drm_vma_node_reset(&fbo->base.base.vma_node);
383 
384 	kref_init(&fbo->base.kref);
385 	fbo->base.destroy = &ttm_transfered_destroy;
386 	fbo->base.acc_size = 0;
387 	if (bo->type != ttm_bo_type_sg)
388 		fbo->base.base.resv = &fbo->base.base._resv;
389 
390 	dma_resv_init(&fbo->base.base._resv);
391 	fbo->base.base.dev = NULL;
392 	ret = dma_resv_trylock(&fbo->base.base._resv);
393 	WARN_ON(!ret);
394 
395 	*new_obj = &fbo->base;
396 	return 0;
397 }
398 
399 #ifdef __linux__
400 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
401 {
402 	/* Cached mappings need no adjustment */
403 	if (caching_flags & TTM_PL_FLAG_CACHED)
404 		return tmp;
405 
406 #if defined(__i386__) || defined(__x86_64__)
407 	if (caching_flags & TTM_PL_FLAG_WC)
408 		tmp = pgprot_writecombine(tmp);
409 	else if (boot_cpu_data.x86 > 3)
410 		tmp = pgprot_noncached(tmp);
411 #endif
412 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
413     defined(__powerpc__) || defined(__mips__)
414 	if (caching_flags & TTM_PL_FLAG_WC)
415 		tmp = pgprot_writecombine(tmp);
416 	else
417 		tmp = pgprot_noncached(tmp);
418 #endif
419 #if defined(__sparc__)
420 	tmp = pgprot_noncached(tmp);
421 #endif
422 	return tmp;
423 }
424 EXPORT_SYMBOL(ttm_io_prot);
425 #endif
426 
427 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
428 {
429 	/* Cached mappings need no adjustment */
430 	if (caching_flags & TTM_PL_FLAG_CACHED)
431 		return tmp;
432 
433 	if (caching_flags & TTM_PL_FLAG_WC)
434 		tmp = pgprot_writecombine(tmp);
435 	else
436 		tmp = pgprot_noncached(tmp);
437 
438 	return tmp;
439 }
440 
441 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
442 			  unsigned long offset,
443 			  unsigned long size,
444 			  struct ttm_bo_kmap_obj *map)
445 {
446 	int flags;
447 	struct ttm_resource *mem = &bo->mem;
448 
449 	if (bo->mem.bus.addr) {
450 		map->bo_kmap_type = ttm_bo_map_premapped;
451 		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
452 	} else {
453 		map->bo_kmap_type = ttm_bo_map_iomap;
454 		if (mem->placement & TTM_PL_FLAG_WC)
455 			flags = BUS_SPACE_MAP_PREFETCHABLE;
456 		else
457 			flags = 0;
458 		if (bus_space_map(bo->bdev->memt,
459 		    bo->mem.bus.offset + offset,
460 		    size, BUS_SPACE_MAP_LINEAR | flags,
461 		    &bo->mem.bus.bsh)) {
462 			printf("%s bus_space_map failed\n", __func__);
463 			map->virtual = 0;
464 		} else
465 			map->virtual = bus_space_vaddr(bo->bdev->memt,
466 			    bo->mem.bus.bsh);
467 	}
468 	return (!map->virtual) ? -ENOMEM : 0;
469 }
470 
471 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
472 			   unsigned long start_page,
473 			   unsigned long num_pages,
474 			   struct ttm_bo_kmap_obj *map)
475 {
476 	struct ttm_resource *mem = &bo->mem;
477 	struct ttm_operation_ctx ctx = {
478 		.interruptible = false,
479 		.no_wait_gpu = false
480 	};
481 	struct ttm_tt *ttm = bo->ttm;
482 	pgprot_t prot;
483 	int ret;
484 
485 	BUG_ON(!ttm);
486 
487 	ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
488 	if (ret)
489 		return ret;
490 
491 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
492 		/*
493 		 * We're mapping a single page, and the desired
494 		 * page protection is consistent with the bo.
495 		 */
496 
497 		map->bo_kmap_type = ttm_bo_map_kmap;
498 		map->page = ttm->pages[start_page];
499 		map->virtual = kmap(map->page);
500 	} else {
501 		/*
502 		 * We need to use vmap to get the desired page protection
503 		 * or to make the buffer object look contiguous.
504 		 */
505 		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
506 		map->bo_kmap_type = ttm_bo_map_vmap;
507 		map->virtual = vmap(ttm->pages + start_page, num_pages,
508 				    0, prot);
509 	}
510 	return (!map->virtual) ? -ENOMEM : 0;
511 }
512 
513 int ttm_bo_kmap(struct ttm_buffer_object *bo,
514 		unsigned long start_page, unsigned long num_pages,
515 		struct ttm_bo_kmap_obj *map)
516 {
517 	unsigned long offset, size;
518 	int ret;
519 
520 	map->virtual = NULL;
521 	map->bo = bo;
522 	if (num_pages > bo->num_pages)
523 		return -EINVAL;
524 	if (start_page > bo->num_pages)
525 		return -EINVAL;
526 
527 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
528 	if (ret)
529 		return ret;
530 	if (!bo->mem.bus.is_iomem) {
531 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
532 	} else {
533 		offset = start_page << PAGE_SHIFT;
534 		size = num_pages << PAGE_SHIFT;
535 		return ttm_bo_ioremap(bo, offset, size, map);
536 	}
537 }
538 EXPORT_SYMBOL(ttm_bo_kmap);
539 
540 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
541 {
542 	if (!map->virtual)
543 		return;
544 	switch (map->bo_kmap_type) {
545 	case ttm_bo_map_iomap:
546 		bus_space_unmap(map->bo->bdev->memt, map->bo->mem.bus.bsh,
547 		    (size_t)map->bo->mem.num_pages << PAGE_SHIFT);
548 		break;
549 	case ttm_bo_map_vmap:
550 		vunmap(map->virtual,
551 		    (size_t)map->bo->mem.num_pages << PAGE_SHIFT);
552 		break;
553 	case ttm_bo_map_kmap:
554 		kunmap_va(map->virtual);
555 		break;
556 	case ttm_bo_map_premapped:
557 		break;
558 	default:
559 		BUG();
560 	}
561 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
562 	map->virtual = NULL;
563 	map->page = NULL;
564 }
565 EXPORT_SYMBOL(ttm_bo_kunmap);
566 
567 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
568 				 bool dst_use_tt)
569 {
570 	int ret;
571 	ret = ttm_bo_wait(bo, false, false);
572 	if (ret)
573 		return ret;
574 
575 	if (!dst_use_tt)
576 		ttm_bo_tt_destroy(bo);
577 	ttm_bo_free_old_node(bo);
578 	return 0;
579 }
580 
581 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
582 				struct dma_fence *fence,
583 				bool dst_use_tt)
584 {
585 	struct ttm_buffer_object *ghost_obj;
586 	int ret;
587 
588 	/**
589 	 * This should help pipeline ordinary buffer moves.
590 	 *
591 	 * Hang old buffer memory on a new buffer object,
592 	 * and leave it to be released when the GPU
593 	 * operation has completed.
594 	 */
595 
596 	dma_fence_put(bo->moving);
597 	bo->moving = dma_fence_get(fence);
598 
599 	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
600 	if (ret)
601 		return ret;
602 
603 	dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
604 
605 	/**
606 	 * If we're not moving to fixed memory, the TTM object
607 	 * needs to stay alive. Otherwhise hang it on the ghost
608 	 * bo to be unbound and destroyed.
609 	 */
610 
611 	if (dst_use_tt)
612 		ghost_obj->ttm = NULL;
613 	else
614 		bo->ttm = NULL;
615 
616 	dma_resv_unlock(&ghost_obj->base._resv);
617 	ttm_bo_put(ghost_obj);
618 	return 0;
619 }
620 
621 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
622 				       struct dma_fence *fence)
623 {
624 	struct ttm_bo_device *bdev = bo->bdev;
625 	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
626 
627 	/**
628 	 * BO doesn't have a TTM we need to bind/unbind. Just remember
629 	 * this eviction and free up the allocation
630 	 */
631 	spin_lock(&from->move_lock);
632 	if (!from->move || dma_fence_is_later(fence, from->move)) {
633 		dma_fence_put(from->move);
634 		from->move = dma_fence_get(fence);
635 	}
636 	spin_unlock(&from->move_lock);
637 
638 	ttm_bo_free_old_node(bo);
639 
640 	dma_fence_put(bo->moving);
641 	bo->moving = dma_fence_get(fence);
642 }
643 
644 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
645 			      struct dma_fence *fence,
646 			      bool evict,
647 			      bool pipeline,
648 			      struct ttm_resource *new_mem)
649 {
650 	struct ttm_bo_device *bdev = bo->bdev;
651 	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
652 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
653 	int ret = 0;
654 
655 	dma_resv_add_excl_fence(bo->base.resv, fence);
656 	if (!evict)
657 		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
658 	else if (!from->use_tt && pipeline)
659 		ttm_bo_move_pipeline_evict(bo, fence);
660 	else
661 		ret = ttm_bo_wait_free_node(bo, man->use_tt);
662 
663 	if (ret)
664 		return ret;
665 
666 	ttm_bo_assign_mem(bo, new_mem);
667 
668 	return 0;
669 }
670 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
671 
672 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
673 {
674 	struct ttm_buffer_object *ghost;
675 	int ret;
676 
677 	ret = ttm_buffer_object_transfer(bo, &ghost);
678 	if (ret)
679 		return ret;
680 
681 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
682 	/* Last resort, wait for the BO to be idle when we are OOM */
683 	if (ret)
684 		ttm_bo_wait(bo, false, false);
685 
686 	memset(&bo->mem, 0, sizeof(bo->mem));
687 	bo->mem.mem_type = TTM_PL_SYSTEM;
688 	bo->ttm = NULL;
689 
690 	dma_resv_unlock(&ghost->base._resv);
691 	ttm_bo_put(ghost);
692 
693 	return 0;
694 }
695