xref: /openbsd/sys/dev/pci/drm/ttm/ttm_bo_util.c (revision 5f11f933)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <linux/vmalloc.h>
33 
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37 
38 #include <drm/drm_cache.h>
39 
40 struct ttm_transfer_obj {
41 	struct ttm_buffer_object base;
42 	struct ttm_buffer_object *bo;
43 };
44 
ttm_mem_io_reserve(struct ttm_device * bdev,struct ttm_resource * mem)45 int ttm_mem_io_reserve(struct ttm_device *bdev,
46 		       struct ttm_resource *mem)
47 {
48 	if (mem->bus.offset || mem->bus.addr)
49 		return 0;
50 
51 	mem->bus.is_iomem = false;
52 	if (!bdev->funcs->io_mem_reserve)
53 		return 0;
54 
55 	return bdev->funcs->io_mem_reserve(bdev, mem);
56 }
57 
ttm_mem_io_free(struct ttm_device * bdev,struct ttm_resource * mem)58 void ttm_mem_io_free(struct ttm_device *bdev,
59 		     struct ttm_resource *mem)
60 {
61 	if (!mem)
62 		return;
63 
64 	if (!mem->bus.offset && !mem->bus.addr)
65 		return;
66 
67 	if (bdev->funcs->io_mem_free)
68 		bdev->funcs->io_mem_free(bdev, mem);
69 
70 	mem->bus.offset = 0;
71 	mem->bus.addr = NULL;
72 }
73 
74 /**
75  * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
76  * @clear: Whether to clear rather than copy.
77  * @num_pages: Number of pages of the operation.
78  * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
79  * @src_iter: A struct ttm_kmap_iter representing the source resource.
80  *
81  * This function is intended to be able to move out async under a
82  * dma-fence if desired.
83  */
ttm_move_memcpy(bool clear,u32 num_pages,struct ttm_kmap_iter * dst_iter,struct ttm_kmap_iter * src_iter,bus_space_tag_t memt)84 void ttm_move_memcpy(bool clear,
85 		     u32 num_pages,
86 		     struct ttm_kmap_iter *dst_iter,
87 		     struct ttm_kmap_iter *src_iter,
88 		     bus_space_tag_t memt)
89 {
90 	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
91 	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
92 	struct iosys_map src_map, dst_map;
93 	pgoff_t i;
94 
95 	/* Single TTM move. NOP */
96 	if (dst_ops->maps_tt && src_ops->maps_tt)
97 		return;
98 
99 	/* Don't move nonexistent data. Clear destination instead. */
100 	if (clear) {
101 		for (i = 0; i < num_pages; ++i) {
102 			dst_ops->map_local(dst_iter, &dst_map, i, memt);
103 			if (dst_map.is_iomem)
104 				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
105 			else
106 				memset(dst_map.vaddr, 0, PAGE_SIZE);
107 			if (dst_ops->unmap_local)
108 				dst_ops->unmap_local(dst_iter, &dst_map, memt);
109 		}
110 		return;
111 	}
112 
113 	for (i = 0; i < num_pages; ++i) {
114 		dst_ops->map_local(dst_iter, &dst_map, i, memt);
115 		src_ops->map_local(src_iter, &src_map, i, memt);
116 
117 		drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
118 
119 		if (src_ops->unmap_local)
120 			src_ops->unmap_local(src_iter, &src_map, memt);
121 		if (dst_ops->unmap_local)
122 			dst_ops->unmap_local(dst_iter, &dst_map, memt);
123 	}
124 }
125 EXPORT_SYMBOL(ttm_move_memcpy);
126 
127 /**
128  * ttm_bo_move_memcpy
129  *
130  * @bo: A pointer to a struct ttm_buffer_object.
131  * @ctx: operation context
132  * @dst_mem: struct ttm_resource indicating where to move.
133  *
134  * Fallback move function for a mappable buffer object in mappable memory.
135  * The function will, if successful,
136  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
137  * and update the (@bo)->mem placement flags. If unsuccessful, the old
138  * data remains untouched, and it's up to the caller to free the
139  * memory space indicated by @new_mem.
140  * Returns:
141  * !0: Failure.
142  */
ttm_bo_move_memcpy(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_resource * dst_mem)143 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
144 		       struct ttm_operation_ctx *ctx,
145 		       struct ttm_resource *dst_mem)
146 {
147 	struct ttm_device *bdev = bo->bdev;
148 	struct ttm_resource_manager *dst_man =
149 		ttm_manager_type(bo->bdev, dst_mem->mem_type);
150 	struct ttm_tt *ttm = bo->ttm;
151 	struct ttm_resource *src_mem = bo->resource;
152 	struct ttm_resource_manager *src_man;
153 	union {
154 		struct ttm_kmap_iter_tt tt;
155 		struct ttm_kmap_iter_linear_io io;
156 	} _dst_iter, _src_iter;
157 	struct ttm_kmap_iter *dst_iter, *src_iter;
158 	bool clear;
159 	int ret = 0;
160 
161 	if (WARN_ON(!src_mem))
162 		return -EINVAL;
163 
164 	src_man = ttm_manager_type(bdev, src_mem->mem_type);
165 	if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
166 		    dst_man->use_tt)) {
167 		ret = ttm_tt_populate(bdev, ttm, ctx);
168 		if (ret)
169 			return ret;
170 	}
171 
172 	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
173 	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
174 		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
175 	if (IS_ERR(dst_iter))
176 		return PTR_ERR(dst_iter);
177 
178 	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
179 	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
180 		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
181 	if (IS_ERR(src_iter)) {
182 		ret = PTR_ERR(src_iter);
183 		goto out_src_iter;
184 	}
185 
186 	clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
187 	if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
188 		ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter,
189 		    bdev->memt);
190 
191 	if (!src_iter->ops->maps_tt)
192 		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
193 	ttm_bo_move_sync_cleanup(bo, dst_mem);
194 
195 out_src_iter:
196 	if (!dst_iter->ops->maps_tt)
197 		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
198 
199 	return ret;
200 }
201 EXPORT_SYMBOL(ttm_bo_move_memcpy);
202 
ttm_transfered_destroy(struct ttm_buffer_object * bo)203 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
204 {
205 	struct ttm_transfer_obj *fbo;
206 
207 	fbo = container_of(bo, struct ttm_transfer_obj, base);
208 	dma_resv_fini(&fbo->base.base._resv);
209 	ttm_bo_put(fbo->bo);
210 	kfree(fbo);
211 }
212 
213 /**
214  * ttm_buffer_object_transfer
215  *
216  * @bo: A pointer to a struct ttm_buffer_object.
217  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
218  * holding the data of @bo with the old placement.
219  *
220  * This is a utility function that may be called after an accelerated move
221  * has been scheduled. A new buffer object is created as a placeholder for
222  * the old data while it's being copied. When that buffer object is idle,
223  * it can be destroyed, releasing the space of the old placement.
224  * Returns:
225  * !0: Failure.
226  */
227 
ttm_buffer_object_transfer(struct ttm_buffer_object * bo,struct ttm_buffer_object ** new_obj)228 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
229 				      struct ttm_buffer_object **new_obj)
230 {
231 	struct ttm_transfer_obj *fbo;
232 	int ret;
233 
234 	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
235 	if (!fbo)
236 		return -ENOMEM;
237 
238 	fbo->base = *bo;
239 
240 	/**
241 	 * Fix up members that we shouldn't copy directly:
242 	 * TODO: Explicit member copy would probably be better here.
243 	 */
244 
245 	atomic_inc(&ttm_glob.bo_count);
246 	drm_vma_node_reset(&fbo->base.base.vma_node);
247 
248 	kref_init(&fbo->base.kref);
249 	fbo->base.destroy = &ttm_transfered_destroy;
250 	fbo->base.pin_count = 0;
251 	if (bo->type != ttm_bo_type_sg)
252 		fbo->base.base.resv = &fbo->base.base._resv;
253 
254 	dma_resv_init(&fbo->base.base._resv);
255 	fbo->base.base.dev = NULL;
256 	ret = dma_resv_trylock(&fbo->base.base._resv);
257 	WARN_ON(!ret);
258 
259 	if (fbo->base.resource) {
260 		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
261 		bo->resource = NULL;
262 		ttm_bo_set_bulk_move(&fbo->base, NULL);
263 	} else {
264 		fbo->base.bulk_move = NULL;
265 	}
266 
267 	ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
268 	if (ret) {
269 		kfree(fbo);
270 		return ret;
271 	}
272 
273 	ttm_bo_get(bo);
274 	fbo->bo = bo;
275 
276 	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
277 
278 	*new_obj = &fbo->base;
279 	return 0;
280 }
281 
282 /**
283  * ttm_io_prot
284  *
285  * @bo: ttm buffer object
286  * @res: ttm resource object
287  * @tmp: Page protection flag for a normal, cached mapping.
288  *
289  * Utility function that returns the pgprot_t that should be used for
290  * setting up a PTE with the caching model indicated by @c_state.
291  */
ttm_io_prot(struct ttm_buffer_object * bo,struct ttm_resource * res,pgprot_t tmp)292 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
293 		     pgprot_t tmp)
294 {
295 	struct ttm_resource_manager *man;
296 	enum ttm_caching caching;
297 
298 	man = ttm_manager_type(bo->bdev, res->mem_type);
299 	if (man->use_tt) {
300 		caching = bo->ttm->caching;
301 		if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
302 			tmp = pgprot_decrypted(tmp);
303 	} else  {
304 		caching = res->bus.caching;
305 	}
306 
307 	return ttm_prot_from_caching(caching, tmp);
308 }
309 EXPORT_SYMBOL(ttm_io_prot);
310 
ttm_bo_ioremap(struct ttm_buffer_object * bo,unsigned long offset,unsigned long size,struct ttm_bo_kmap_obj * map)311 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
312 			  unsigned long offset,
313 			  unsigned long size,
314 			  struct ttm_bo_kmap_obj *map)
315 {
316 	int flags;
317 	struct ttm_resource *mem = bo->resource;
318 
319 	if (bo->resource->bus.addr) {
320 		map->bo_kmap_type = ttm_bo_map_premapped;
321 		map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
322 	} else {
323 		map->bo_kmap_type = ttm_bo_map_iomap;
324 		if (mem->bus.caching == ttm_write_combined)
325 			flags = BUS_SPACE_MAP_PREFETCHABLE;
326 #ifdef CONFIG_X86
327 		else if (mem->bus.caching == ttm_cached)
328 			flags = BUS_SPACE_MAP_CACHEABLE;
329 #endif
330 		else
331 			flags = 0;
332 		if (bus_space_map(bo->bdev->memt,
333 		    bo->resource->bus.offset + offset,
334 		    size, BUS_SPACE_MAP_LINEAR | flags,
335 		    &bo->resource->bus.bsh)) {
336 			printf("%s bus_space_map failed\n", __func__);
337 			map->virtual = 0;
338 		} else {
339 			map->virtual = bus_space_vaddr(bo->bdev->memt,
340 			    bo->resource->bus.bsh);
341 		}
342 	}
343 	return (!map->virtual) ? -ENOMEM : 0;
344 }
345 
ttm_bo_kmap_ttm(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)346 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
347 			   unsigned long start_page,
348 			   unsigned long num_pages,
349 			   struct ttm_bo_kmap_obj *map)
350 {
351 	struct ttm_resource *mem = bo->resource;
352 	struct ttm_operation_ctx ctx = {
353 		.interruptible = false,
354 		.no_wait_gpu = false
355 	};
356 	struct ttm_tt *ttm = bo->ttm;
357 	struct ttm_resource_manager *man =
358 			ttm_manager_type(bo->bdev, bo->resource->mem_type);
359 	pgprot_t prot;
360 	int ret;
361 
362 	BUG_ON(!ttm);
363 
364 	ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
365 	if (ret)
366 		return ret;
367 
368 	if (num_pages == 1 && ttm->caching == ttm_cached &&
369 	    !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
370 		/*
371 		 * We're mapping a single page, and the desired
372 		 * page protection is consistent with the bo.
373 		 */
374 
375 		map->bo_kmap_type = ttm_bo_map_kmap;
376 		map->page = ttm->pages[start_page];
377 		map->virtual = kmap(map->page);
378 	} else {
379 		/*
380 		 * We need to use vmap to get the desired page protection
381 		 * or to make the buffer object look contiguous.
382 		 */
383 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
384 		map->bo_kmap_type = ttm_bo_map_vmap;
385 		map->virtual = vmap(ttm->pages + start_page, num_pages,
386 				    0, prot);
387 	}
388 	return (!map->virtual) ? -ENOMEM : 0;
389 }
390 
391 /**
392  * ttm_bo_kmap
393  *
394  * @bo: The buffer object.
395  * @start_page: The first page to map.
396  * @num_pages: Number of pages to map.
397  * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
398  *
399  * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
400  * data in the buffer object. The ttm_kmap_obj_virtual function can then be
401  * used to obtain a virtual address to the data.
402  *
403  * Returns
404  * -ENOMEM: Out of memory.
405  * -EINVAL: Invalid range.
406  */
ttm_bo_kmap(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)407 int ttm_bo_kmap(struct ttm_buffer_object *bo,
408 		unsigned long start_page, unsigned long num_pages,
409 		struct ttm_bo_kmap_obj *map)
410 {
411 	unsigned long offset, size;
412 	int ret;
413 
414 	map->virtual = NULL;
415 	map->bo = bo;
416 	if (num_pages > PFN_UP(bo->resource->size))
417 		return -EINVAL;
418 	if ((start_page + num_pages) > PFN_UP(bo->resource->size))
419 		return -EINVAL;
420 
421 	ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
422 	if (ret)
423 		return ret;
424 	if (!bo->resource->bus.is_iomem) {
425 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
426 	} else {
427 		offset = start_page << PAGE_SHIFT;
428 		size = num_pages << PAGE_SHIFT;
429 		return ttm_bo_ioremap(bo, offset, size, map);
430 	}
431 }
432 EXPORT_SYMBOL(ttm_bo_kmap);
433 
434 /**
435  * ttm_bo_kunmap
436  *
437  * @map: Object describing the map to unmap.
438  *
439  * Unmaps a kernel map set up by ttm_bo_kmap.
440  */
ttm_bo_kunmap(struct ttm_bo_kmap_obj * map)441 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
442 {
443 	if (!map->virtual)
444 		return;
445 	switch (map->bo_kmap_type) {
446 	case ttm_bo_map_iomap:
447 		bus_space_unmap(map->bo->bdev->memt, map->bo->resource->bus.bsh,
448 		    map->bo->resource->size);
449 		break;
450 	case ttm_bo_map_vmap:
451 		vunmap(map->virtual,
452 		    map->bo->resource->size);
453 		break;
454 	case ttm_bo_map_kmap:
455 		kunmap_va(map->virtual);
456 		break;
457 	case ttm_bo_map_premapped:
458 		break;
459 	default:
460 		BUG();
461 	}
462 	ttm_mem_io_free(map->bo->bdev, map->bo->resource);
463 	map->virtual = NULL;
464 	map->page = NULL;
465 }
466 EXPORT_SYMBOL(ttm_bo_kunmap);
467 
468 /**
469  * ttm_bo_vmap
470  *
471  * @bo: The buffer object.
472  * @map: pointer to a struct iosys_map representing the map.
473  *
474  * Sets up a kernel virtual mapping, using ioremap or vmap to the
475  * data in the buffer object. The parameter @map returns the virtual
476  * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
477  *
478  * Returns
479  * -ENOMEM: Out of memory.
480  * -EINVAL: Invalid range.
481  */
ttm_bo_vmap(struct ttm_buffer_object * bo,struct iosys_map * map)482 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
483 {
484 	int flags;
485 	struct ttm_resource *mem = bo->resource;
486 	int ret;
487 
488 	dma_resv_assert_held(bo->base.resv);
489 
490 	ret = ttm_mem_io_reserve(bo->bdev, mem);
491 	if (ret)
492 		return ret;
493 
494 	if (mem->bus.is_iomem) {
495 		void __iomem *vaddr_iomem;
496 
497 		if (mem->bus.addr)
498 			vaddr_iomem = (void __iomem *)mem->bus.addr;
499 		else {
500 			if (mem->bus.caching == ttm_write_combined)
501 				flags = BUS_SPACE_MAP_PREFETCHABLE;
502 #ifdef CONFIG_X86
503 			else if (mem->bus.caching == ttm_cached)
504 				flags = BUS_SPACE_MAP_CACHEABLE;
505 #endif
506 			else
507 				flags = 0;
508 			if (bus_space_map(bo->bdev->memt, mem->bus.offset,
509 			    bo->base.size, BUS_SPACE_MAP_LINEAR | flags,
510 			    &mem->bus.bsh)) {
511 				printf("%s bus_space_map failed\n", __func__);
512 				return -ENOMEM;
513 			}
514 			vaddr_iomem = bus_space_vaddr(bo->bdev->memt,
515 			    mem->bus.bsh);
516 		}
517 
518 		if (!vaddr_iomem)
519 			return -ENOMEM;
520 
521 		iosys_map_set_vaddr_iomem(map, vaddr_iomem);
522 
523 	} else {
524 		struct ttm_operation_ctx ctx = {
525 			.interruptible = false,
526 			.no_wait_gpu = false
527 		};
528 		struct ttm_tt *ttm = bo->ttm;
529 		pgprot_t prot;
530 		void *vaddr;
531 
532 		ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
533 		if (ret)
534 			return ret;
535 
536 		/*
537 		 * We need to use vmap to get the desired page protection
538 		 * or to make the buffer object look contiguous.
539 		 */
540 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
541 		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
542 		if (!vaddr)
543 			return -ENOMEM;
544 
545 		iosys_map_set_vaddr(map, vaddr);
546 	}
547 
548 	return 0;
549 }
550 EXPORT_SYMBOL(ttm_bo_vmap);
551 
552 /**
553  * ttm_bo_vunmap
554  *
555  * @bo: The buffer object.
556  * @map: Object describing the map to unmap.
557  *
558  * Unmaps a kernel map set up by ttm_bo_vmap().
559  */
ttm_bo_vunmap(struct ttm_buffer_object * bo,struct iosys_map * map)560 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
561 {
562 	struct ttm_resource *mem = bo->resource;
563 
564 	dma_resv_assert_held(bo->base.resv);
565 
566 	if (iosys_map_is_null(map))
567 		return;
568 
569 	if (!map->is_iomem)
570 		vunmap(map->vaddr,
571 		    bo->base.size);
572 	else if (!mem->bus.addr)
573 		bus_space_unmap(bo->bdev->memt, mem->bus.bsh,
574 		    bo->base.size);
575 	iosys_map_clear(map);
576 
577 	ttm_mem_io_free(bo->bdev, bo->resource);
578 }
579 EXPORT_SYMBOL(ttm_bo_vunmap);
580 
ttm_bo_wait_free_node(struct ttm_buffer_object * bo,bool dst_use_tt)581 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
582 				 bool dst_use_tt)
583 {
584 	long ret;
585 
586 	ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
587 				    false, 15 * HZ);
588 	if (ret == 0)
589 		return -EBUSY;
590 	if (ret < 0)
591 		return ret;
592 
593 	if (!dst_use_tt)
594 		ttm_bo_tt_destroy(bo);
595 	ttm_resource_free(bo, &bo->resource);
596 	return 0;
597 }
598 
ttm_bo_move_to_ghost(struct ttm_buffer_object * bo,struct dma_fence * fence,bool dst_use_tt)599 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
600 				struct dma_fence *fence,
601 				bool dst_use_tt)
602 {
603 	struct ttm_buffer_object *ghost_obj;
604 	int ret;
605 
606 	/**
607 	 * This should help pipeline ordinary buffer moves.
608 	 *
609 	 * Hang old buffer memory on a new buffer object,
610 	 * and leave it to be released when the GPU
611 	 * operation has completed.
612 	 */
613 
614 	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
615 	if (ret)
616 		return ret;
617 
618 	dma_resv_add_fence(&ghost_obj->base._resv, fence,
619 			   DMA_RESV_USAGE_KERNEL);
620 
621 	/**
622 	 * If we're not moving to fixed memory, the TTM object
623 	 * needs to stay alive. Otherwhise hang it on the ghost
624 	 * bo to be unbound and destroyed.
625 	 */
626 
627 	if (dst_use_tt)
628 		ghost_obj->ttm = NULL;
629 	else
630 		bo->ttm = NULL;
631 
632 	dma_resv_unlock(&ghost_obj->base._resv);
633 	ttm_bo_put(ghost_obj);
634 	return 0;
635 }
636 
ttm_bo_move_pipeline_evict(struct ttm_buffer_object * bo,struct dma_fence * fence)637 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
638 				       struct dma_fence *fence)
639 {
640 	struct ttm_device *bdev = bo->bdev;
641 	struct ttm_resource_manager *from;
642 
643 	from = ttm_manager_type(bdev, bo->resource->mem_type);
644 
645 	/**
646 	 * BO doesn't have a TTM we need to bind/unbind. Just remember
647 	 * this eviction and free up the allocation
648 	 */
649 	spin_lock(&from->move_lock);
650 	if (!from->move || dma_fence_is_later(fence, from->move)) {
651 		dma_fence_put(from->move);
652 		from->move = dma_fence_get(fence);
653 	}
654 	spin_unlock(&from->move_lock);
655 
656 	ttm_resource_free(bo, &bo->resource);
657 }
658 
659 /**
660  * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
661  *
662  * @bo: A pointer to a struct ttm_buffer_object.
663  * @fence: A fence object that signals when moving is complete.
664  * @evict: This is an evict move. Don't return until the buffer is idle.
665  * @pipeline: evictions are to be pipelined.
666  * @new_mem: struct ttm_resource indicating where to move.
667  *
668  * Accelerated move function to be called when an accelerated move
669  * has been scheduled. The function will create a new temporary buffer object
670  * representing the old placement, and put the sync object on both buffer
671  * objects. After that the newly created buffer object is unref'd to be
672  * destroyed when the move is complete. This will help pipeline
673  * buffer moves.
674  */
ttm_bo_move_accel_cleanup(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,bool pipeline,struct ttm_resource * new_mem)675 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
676 			      struct dma_fence *fence,
677 			      bool evict,
678 			      bool pipeline,
679 			      struct ttm_resource *new_mem)
680 {
681 	struct ttm_device *bdev = bo->bdev;
682 	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
683 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
684 	int ret = 0;
685 
686 	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
687 	if (!evict)
688 		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
689 	else if (!from->use_tt && pipeline)
690 		ttm_bo_move_pipeline_evict(bo, fence);
691 	else
692 		ret = ttm_bo_wait_free_node(bo, man->use_tt);
693 
694 	if (ret)
695 		return ret;
696 
697 	ttm_bo_assign_mem(bo, new_mem);
698 
699 	return 0;
700 }
701 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
702 
703 /**
704  * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
705  *
706  * @bo: A pointer to a struct ttm_buffer_object.
707  * @new_mem: struct ttm_resource indicating where to move.
708  *
709  * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
710  * by the caller to be idle. Typically used after memcpy buffer moves.
711  */
ttm_bo_move_sync_cleanup(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)712 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
713 			      struct ttm_resource *new_mem)
714 {
715 	struct ttm_device *bdev = bo->bdev;
716 	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
717 	int ret;
718 
719 	ret = ttm_bo_wait_free_node(bo, man->use_tt);
720 	if (WARN_ON(ret))
721 		return;
722 
723 	ttm_bo_assign_mem(bo, new_mem);
724 }
725 EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
726 
727 /**
728  * ttm_bo_pipeline_gutting - purge the contents of a bo
729  * @bo: The buffer object
730  *
731  * Purge the contents of a bo, async if the bo is not idle.
732  * After a successful call, the bo is left unpopulated in
733  * system placement. The function may wait uninterruptible
734  * for idle on OOM.
735  *
736  * Return: 0 if successful, negative error code on failure.
737  */
ttm_bo_pipeline_gutting(struct ttm_buffer_object * bo)738 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
739 {
740 	struct ttm_buffer_object *ghost;
741 	struct ttm_tt *ttm;
742 	int ret;
743 
744 	/* If already idle, no need for ghost object dance. */
745 	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
746 		if (!bo->ttm) {
747 			/* See comment below about clearing. */
748 			ret = ttm_tt_create(bo, true);
749 			if (ret)
750 				return ret;
751 		} else {
752 			ttm_tt_unpopulate(bo->bdev, bo->ttm);
753 			if (bo->type == ttm_bo_type_device)
754 				ttm_tt_mark_for_clear(bo->ttm);
755 		}
756 		ttm_resource_free(bo, &bo->resource);
757 		return 0;
758 	}
759 
760 	/*
761 	 * We need an unpopulated ttm_tt after giving our current one,
762 	 * if any, to the ghost object. And we can't afford to fail
763 	 * creating one *after* the operation. If the bo subsequently gets
764 	 * resurrected, make sure it's cleared (if ttm_bo_type_device)
765 	 * to avoid leaking sensitive information to user-space.
766 	 */
767 
768 	ttm = bo->ttm;
769 	bo->ttm = NULL;
770 	ret = ttm_tt_create(bo, true);
771 	swap(bo->ttm, ttm);
772 	if (ret)
773 		return ret;
774 
775 	ret = ttm_buffer_object_transfer(bo, &ghost);
776 	if (ret)
777 		goto error_destroy_tt;
778 
779 	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
780 	/* Last resort, wait for the BO to be idle when we are OOM */
781 	if (ret) {
782 		dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
783 				      false, MAX_SCHEDULE_TIMEOUT);
784 	}
785 
786 	dma_resv_unlock(&ghost->base._resv);
787 	ttm_bo_put(ghost);
788 	bo->ttm = ttm;
789 	return 0;
790 
791 error_destroy_tt:
792 	ttm_tt_destroy(bo->bdev, ttm);
793 	return ret;
794 }
795