13589fdbdSThomas Hellström // SPDX-License-Identifier: MIT
23589fdbdSThomas Hellström /*
33589fdbdSThomas Hellström  * Copyright © 2021 Intel Corporation
43589fdbdSThomas Hellström  */
53589fdbdSThomas Hellström 
6a3185f91SChristian König #include <drm/ttm/ttm_tt.h>
73589fdbdSThomas Hellström 
863cf4cadSThomas Hellström #include "i915_deps.h"
93589fdbdSThomas Hellström #include "i915_drv.h"
103589fdbdSThomas Hellström #include "intel_memory_region.h"
113589fdbdSThomas Hellström #include "intel_region_ttm.h"
123589fdbdSThomas Hellström 
133589fdbdSThomas Hellström #include "gem/i915_gem_object.h"
143589fdbdSThomas Hellström #include "gem/i915_gem_region.h"
153589fdbdSThomas Hellström #include "gem/i915_gem_ttm.h"
163589fdbdSThomas Hellström #include "gem/i915_gem_ttm_move.h"
173589fdbdSThomas Hellström 
183589fdbdSThomas Hellström #include "gt/intel_engine_pm.h"
193589fdbdSThomas Hellström #include "gt/intel_gt.h"
203589fdbdSThomas Hellström #include "gt/intel_migrate.h"
213589fdbdSThomas Hellström 
222b0a750cSThomas Hellström /**
232b0a750cSThomas Hellström  * DOC: Selftest failure modes for failsafe migration:
242b0a750cSThomas Hellström  *
252b0a750cSThomas Hellström  * For fail_gpu_migration, the gpu blit scheduled is always a clear blit
262b0a750cSThomas Hellström  * rather than a copy blit, and then we force the failure paths as if
272b0a750cSThomas Hellström  * the blit fence returned an error.
282b0a750cSThomas Hellström  *
292b0a750cSThomas Hellström  * For fail_work_allocation we fail the kmalloc of the async worker, we
302b0a750cSThomas Hellström  * sync the gpu blit. If it then fails, or fail_gpu_migration is set to
312b0a750cSThomas Hellström  * true, then a memcpy operation is performed sync.
322b0a750cSThomas Hellström  */
332b0a750cSThomas Hellström #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
342b0a750cSThomas Hellström static bool fail_gpu_migration;
352b0a750cSThomas Hellström static bool fail_work_allocation;
36bfe53be2SMatthew Auld static bool ban_memcpy;
372b0a750cSThomas Hellström 
i915_ttm_migrate_set_failure_modes(bool gpu_migration,bool work_allocation)382b0a750cSThomas Hellström void i915_ttm_migrate_set_failure_modes(bool gpu_migration,
392b0a750cSThomas Hellström 					bool work_allocation)
402b0a750cSThomas Hellström {
412b0a750cSThomas Hellström 	fail_gpu_migration = gpu_migration;
422b0a750cSThomas Hellström 	fail_work_allocation = work_allocation;
432b0a750cSThomas Hellström }
44bfe53be2SMatthew Auld 
i915_ttm_migrate_set_ban_memcpy(bool ban)45bfe53be2SMatthew Auld void i915_ttm_migrate_set_ban_memcpy(bool ban)
46bfe53be2SMatthew Auld {
47bfe53be2SMatthew Auld 	ban_memcpy = ban;
48bfe53be2SMatthew Auld }
492b0a750cSThomas Hellström #endif
502b0a750cSThomas Hellström 
513589fdbdSThomas Hellström static enum i915_cache_level
i915_ttm_cache_level(struct drm_i915_private * i915,struct ttm_resource * res,struct ttm_tt * ttm)523589fdbdSThomas Hellström i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
533589fdbdSThomas Hellström 		     struct ttm_tt *ttm)
543589fdbdSThomas Hellström {
553589fdbdSThomas Hellström 	return ((HAS_LLC(i915) || HAS_SNOOP(i915)) &&
563589fdbdSThomas Hellström 		!i915_ttm_gtt_binds_lmem(res) &&
573589fdbdSThomas Hellström 		ttm->caching == ttm_cached) ? I915_CACHE_LLC :
583589fdbdSThomas Hellström 		I915_CACHE_NONE;
593589fdbdSThomas Hellström }
603589fdbdSThomas Hellström 
613589fdbdSThomas Hellström static struct intel_memory_region *
i915_ttm_region(struct ttm_device * bdev,int ttm_mem_type)623589fdbdSThomas Hellström i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
633589fdbdSThomas Hellström {
643589fdbdSThomas Hellström 	struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
653589fdbdSThomas Hellström 
663589fdbdSThomas Hellström 	/* There's some room for optimization here... */
673589fdbdSThomas Hellström 	GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
683589fdbdSThomas Hellström 		   ttm_mem_type < I915_PL_LMEM0);
693589fdbdSThomas Hellström 	if (ttm_mem_type == I915_PL_SYSTEM)
703589fdbdSThomas Hellström 		return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
713589fdbdSThomas Hellström 						  0);
723589fdbdSThomas Hellström 
733589fdbdSThomas Hellström 	return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
743589fdbdSThomas Hellström 					  ttm_mem_type - I915_PL_LMEM0);
753589fdbdSThomas Hellström }
763589fdbdSThomas Hellström 
773589fdbdSThomas Hellström /**
783589fdbdSThomas Hellström  * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a
793589fdbdSThomas Hellström  * TTM move
803589fdbdSThomas Hellström  * @obj: The gem object
813589fdbdSThomas Hellström  */
i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object * obj)823589fdbdSThomas Hellström void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
833589fdbdSThomas Hellström {
843589fdbdSThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
853589fdbdSThomas Hellström 
863589fdbdSThomas Hellström 	if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
873589fdbdSThomas Hellström 		obj->write_domain = I915_GEM_DOMAIN_WC;
883589fdbdSThomas Hellström 		obj->read_domains = I915_GEM_DOMAIN_WC;
893589fdbdSThomas Hellström 	} else {
903589fdbdSThomas Hellström 		obj->write_domain = I915_GEM_DOMAIN_CPU;
913589fdbdSThomas Hellström 		obj->read_domains = I915_GEM_DOMAIN_CPU;
923589fdbdSThomas Hellström 	}
933589fdbdSThomas Hellström }
943589fdbdSThomas Hellström 
953589fdbdSThomas Hellström /**
963589fdbdSThomas Hellström  * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move
973589fdbdSThomas Hellström  * @obj: The gem object
983589fdbdSThomas Hellström  *
993589fdbdSThomas Hellström  * Adjusts the GEM object's region, mem_flags and cache coherency after a
1003589fdbdSThomas Hellström  * TTM move.
1013589fdbdSThomas Hellström  */
i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object * obj)1023589fdbdSThomas Hellström void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
1033589fdbdSThomas Hellström {
1043589fdbdSThomas Hellström 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
1053589fdbdSThomas Hellström 	unsigned int cache_level;
106516198d3SChristian König 	unsigned int mem_flags;
1073589fdbdSThomas Hellström 	unsigned int i;
108516198d3SChristian König 	int mem_type;
109516198d3SChristian König 
110516198d3SChristian König 	/*
111516198d3SChristian König 	 * We might have been purged (or swapped out) if the resource is NULL,
112516198d3SChristian König 	 * in which case the SYSTEM placement is the closest match to describe
113516198d3SChristian König 	 * the current domain. If the object is ever used in this state then we
114516198d3SChristian König 	 * will require moving it again.
115516198d3SChristian König 	 */
116516198d3SChristian König 	if (!bo->resource) {
117516198d3SChristian König 		mem_flags = I915_BO_FLAG_STRUCT_PAGE;
118516198d3SChristian König 		mem_type = I915_PL_SYSTEM;
119516198d3SChristian König 		cache_level = I915_CACHE_NONE;
120516198d3SChristian König 	} else {
121516198d3SChristian König 		mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
122516198d3SChristian König 			I915_BO_FLAG_STRUCT_PAGE;
123516198d3SChristian König 		mem_type = bo->resource->mem_type;
124516198d3SChristian König 		cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
125516198d3SChristian König 						   bo->ttm);
126516198d3SChristian König 	}
1273589fdbdSThomas Hellström 
1283589fdbdSThomas Hellström 	/*
1293589fdbdSThomas Hellström 	 * If object was moved to an allowable region, update the object
1303589fdbdSThomas Hellström 	 * region to consider it migrated. Note that if it's currently not
1313589fdbdSThomas Hellström 	 * in an allowable region, it's evicted and we don't update the
1323589fdbdSThomas Hellström 	 * object region.
1333589fdbdSThomas Hellström 	 */
134516198d3SChristian König 	if (intel_region_to_ttm_type(obj->mm.region) != mem_type) {
1353589fdbdSThomas Hellström 		for (i = 0; i < obj->mm.n_placements; ++i) {
1363589fdbdSThomas Hellström 			struct intel_memory_region *mr = obj->mm.placements[i];
1373589fdbdSThomas Hellström 
138516198d3SChristian König 			if (intel_region_to_ttm_type(mr) == mem_type &&
1393589fdbdSThomas Hellström 			    mr != obj->mm.region) {
1403589fdbdSThomas Hellström 				i915_gem_object_release_memory_region(obj);
1413589fdbdSThomas Hellström 				i915_gem_object_init_memory_region(obj, mr);
1423589fdbdSThomas Hellström 				break;
1433589fdbdSThomas Hellström 			}
1443589fdbdSThomas Hellström 		}
1453589fdbdSThomas Hellström 	}
1463589fdbdSThomas Hellström 
1473589fdbdSThomas Hellström 	obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
148516198d3SChristian König 	obj->mem_flags |= mem_flags;
1493589fdbdSThomas Hellström 
1503589fdbdSThomas Hellström 	i915_gem_object_set_cache_coherency(obj, cache_level);
1513589fdbdSThomas Hellström }
1523589fdbdSThomas Hellström 
1533589fdbdSThomas Hellström /**
1543589fdbdSThomas Hellström  * i915_ttm_move_notify - Prepare an object for move
1553589fdbdSThomas Hellström  * @bo: The ttm buffer object.
1563589fdbdSThomas Hellström  *
1573589fdbdSThomas Hellström  * This function prepares an object for move by removing all GPU bindings,
158*a3598d7dSDeming Wang  * removing all CPU mappings and finally releasing the pages sg-table.
1593589fdbdSThomas Hellström  *
1603589fdbdSThomas Hellström  * Return: 0 if successful, negative error code on error.
1613589fdbdSThomas Hellström  */
i915_ttm_move_notify(struct ttm_buffer_object * bo)1623589fdbdSThomas Hellström int i915_ttm_move_notify(struct ttm_buffer_object *bo)
1633589fdbdSThomas Hellström {
1643589fdbdSThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
1653589fdbdSThomas Hellström 	int ret;
1663589fdbdSThomas Hellström 
1672f6b90daSThomas Hellström 	/*
1682f6b90daSThomas Hellström 	 * Note: The async unbinding here will actually transform the
1692f6b90daSThomas Hellström 	 * blocking wait for unbind into a wait before finally submitting
1702f6b90daSThomas Hellström 	 * evict / migration blit and thus stall the migration timeline
1712f6b90daSThomas Hellström 	 * which may not be good for overall throughput. We should make
1722f6b90daSThomas Hellström 	 * sure we await the unbind fences *after* the migration blit
1732f6b90daSThomas Hellström 	 * instead of *before* as we currently do.
1742f6b90daSThomas Hellström 	 */
1752f6b90daSThomas Hellström 	ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE |
1762f6b90daSThomas Hellström 				     I915_GEM_OBJECT_UNBIND_ASYNC);
1773589fdbdSThomas Hellström 	if (ret)
1783589fdbdSThomas Hellström 		return ret;
1793589fdbdSThomas Hellström 
1803589fdbdSThomas Hellström 	ret = __i915_gem_object_put_pages(obj);
1813589fdbdSThomas Hellström 	if (ret)
1823589fdbdSThomas Hellström 		return ret;
1833589fdbdSThomas Hellström 
1843589fdbdSThomas Hellström 	return 0;
1853589fdbdSThomas Hellström }
1863589fdbdSThomas Hellström 
i915_ttm_accel_move(struct ttm_buffer_object * bo,bool clear,struct ttm_resource * dst_mem,struct ttm_tt * dst_ttm,struct sg_table * dst_st,const struct i915_deps * deps)1872b0a750cSThomas Hellström static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
1883589fdbdSThomas Hellström 					     bool clear,
1893589fdbdSThomas Hellström 					     struct ttm_resource *dst_mem,
1903589fdbdSThomas Hellström 					     struct ttm_tt *dst_ttm,
1916385eb7aSThomas Hellström 					     struct sg_table *dst_st,
19211930817SThomas Hellström 					     const struct i915_deps *deps)
1933589fdbdSThomas Hellström {
1943589fdbdSThomas Hellström 	struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
1953589fdbdSThomas Hellström 						     bdev);
1963589fdbdSThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
1973589fdbdSThomas Hellström 	struct i915_request *rq;
1983589fdbdSThomas Hellström 	struct ttm_tt *src_ttm = bo->ttm;
1993589fdbdSThomas Hellström 	enum i915_cache_level src_level, dst_level;
2003589fdbdSThomas Hellström 	int ret;
2013589fdbdSThomas Hellström 
2021a9c4db4SMichał Winiarski 	if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
2032b0a750cSThomas Hellström 		return ERR_PTR(-EINVAL);
2042b0a750cSThomas Hellström 
2052b0a750cSThomas Hellström 	/* With fail_gpu_migration, we always perform a GPU clear. */
2062b0a750cSThomas Hellström 	if (I915_SELFTEST_ONLY(fail_gpu_migration))
2072b0a750cSThomas Hellström 		clear = true;
2083589fdbdSThomas Hellström 
2093589fdbdSThomas Hellström 	dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
2103589fdbdSThomas Hellström 	if (clear) {
2112b0a750cSThomas Hellström 		if (bo->type == ttm_bo_type_kernel &&
2122b0a750cSThomas Hellström 		    !I915_SELFTEST_ONLY(fail_gpu_migration))
2132b0a750cSThomas Hellström 			return ERR_PTR(-EINVAL);
2143589fdbdSThomas Hellström 
2151a9c4db4SMichał Winiarski 		intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
21611930817SThomas Hellström 		ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps,
2179275277dSFei Yang 						  dst_st->sgl,
2189275277dSFei Yang 						  i915_gem_get_pat_index(i915, dst_level),
2193589fdbdSThomas Hellström 						  i915_ttm_gtt_binds_lmem(dst_mem),
2203589fdbdSThomas Hellström 						  0, &rq);
2213589fdbdSThomas Hellström 	} else {
2223589fdbdSThomas Hellström 		struct i915_refct_sgt *src_rsgt =
2233589fdbdSThomas Hellström 			i915_ttm_resource_get_st(obj, bo->resource);
2243589fdbdSThomas Hellström 
2253589fdbdSThomas Hellström 		if (IS_ERR(src_rsgt))
2262b0a750cSThomas Hellström 			return ERR_CAST(src_rsgt);
2273589fdbdSThomas Hellström 
2283589fdbdSThomas Hellström 		src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
2291a9c4db4SMichał Winiarski 		intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
2301a9c4db4SMichał Winiarski 		ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
23111930817SThomas Hellström 						 deps, src_rsgt->table.sgl,
2329275277dSFei Yang 						 i915_gem_get_pat_index(i915, src_level),
2333589fdbdSThomas Hellström 						 i915_ttm_gtt_binds_lmem(bo->resource),
2349275277dSFei Yang 						 dst_st->sgl,
2359275277dSFei Yang 						 i915_gem_get_pat_index(i915, dst_level),
2363589fdbdSThomas Hellström 						 i915_ttm_gtt_binds_lmem(dst_mem),
2373589fdbdSThomas Hellström 						 &rq);
2382b0a750cSThomas Hellström 
2393589fdbdSThomas Hellström 		i915_refct_sgt_put(src_rsgt);
2402b0a750cSThomas Hellström 	}
2412b0a750cSThomas Hellström 
2421a9c4db4SMichał Winiarski 	intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
2432b0a750cSThomas Hellström 
2442b0a750cSThomas Hellström 	if (ret && rq) {
2453589fdbdSThomas Hellström 		i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
2463589fdbdSThomas Hellström 		i915_request_put(rq);
2473589fdbdSThomas Hellström 	}
2482b0a750cSThomas Hellström 
2492b0a750cSThomas Hellström 	return ret ? ERR_PTR(ret) : &rq->fence;
2503589fdbdSThomas Hellström }
2513589fdbdSThomas Hellström 
2522b0a750cSThomas Hellström /**
2532b0a750cSThomas Hellström  * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality.
2542b0a750cSThomas Hellström  * @_dst_iter: Storage space for the destination kmap iterator.
2552b0a750cSThomas Hellström  * @_src_iter: Storage space for the source kmap iterator.
2562b0a750cSThomas Hellström  * @dst_iter: Pointer to the destination kmap iterator.
2572b0a750cSThomas Hellström  * @src_iter: Pointer to the source kmap iterator.
2586adba290SLee Jones  * @num_pages: Number of pages
2592b0a750cSThomas Hellström  * @clear: Whether to clear instead of copy.
2602b0a750cSThomas Hellström  * @src_rsgt: Refcounted scatter-gather list of source memory.
2612b0a750cSThomas Hellström  * @dst_rsgt: Refcounted scatter-gather list of destination memory.
2622b0a750cSThomas Hellström  */
2632b0a750cSThomas Hellström struct i915_ttm_memcpy_arg {
2642b0a750cSThomas Hellström 	union {
2652b0a750cSThomas Hellström 		struct ttm_kmap_iter_tt tt;
2662b0a750cSThomas Hellström 		struct ttm_kmap_iter_iomap io;
2672b0a750cSThomas Hellström 	} _dst_iter,
2682b0a750cSThomas Hellström 	_src_iter;
2692b0a750cSThomas Hellström 	struct ttm_kmap_iter *dst_iter;
2702b0a750cSThomas Hellström 	struct ttm_kmap_iter *src_iter;
2712b0a750cSThomas Hellström 	unsigned long num_pages;
2722b0a750cSThomas Hellström 	bool clear;
2732b0a750cSThomas Hellström 	struct i915_refct_sgt *src_rsgt;
2742b0a750cSThomas Hellström 	struct i915_refct_sgt *dst_rsgt;
2752b0a750cSThomas Hellström };
2762b0a750cSThomas Hellström 
2772b0a750cSThomas Hellström /**
2782b0a750cSThomas Hellström  * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence.
2792b0a750cSThomas Hellström  * @fence: The dma-fence.
2802b0a750cSThomas Hellström  * @work: The work struct use for the memcpy work.
2812b0a750cSThomas Hellström  * @lock: The fence lock. Not used to protect anything else ATM.
2822b0a750cSThomas Hellström  * @irq_work: Low latency worker to signal the fence since it can't be done
2832b0a750cSThomas Hellström  * from the callback for lockdep reasons.
2842b0a750cSThomas Hellström  * @cb: Callback for the accelerated migration fence.
2852b0a750cSThomas Hellström  * @arg: The argument for the memcpy functionality.
286bfe53be2SMatthew Auld  * @i915: The i915 pointer.
287bfe53be2SMatthew Auld  * @obj: The GEM object.
288bfe53be2SMatthew Auld  * @memcpy_allowed: Instead of processing the @arg, and falling back to memcpy
289bfe53be2SMatthew Auld  * or memset, we wedge the device and set the @obj unknown_state, to prevent
290bfe53be2SMatthew Auld  * further access to the object with the CPU or GPU.  On some devices we might
291bfe53be2SMatthew Auld  * only be permitted to use the blitter engine for such operations.
2922b0a750cSThomas Hellström  */
2932b0a750cSThomas Hellström struct i915_ttm_memcpy_work {
2942b0a750cSThomas Hellström 	struct dma_fence fence;
2952b0a750cSThomas Hellström 	struct work_struct work;
2962b0a750cSThomas Hellström 	spinlock_t lock;
2972b0a750cSThomas Hellström 	struct irq_work irq_work;
2982b0a750cSThomas Hellström 	struct dma_fence_cb cb;
2992b0a750cSThomas Hellström 	struct i915_ttm_memcpy_arg arg;
300bfe53be2SMatthew Auld 	struct drm_i915_private *i915;
301bfe53be2SMatthew Auld 	struct drm_i915_gem_object *obj;
302bfe53be2SMatthew Auld 	bool memcpy_allowed;
3032b0a750cSThomas Hellström };
3042b0a750cSThomas Hellström 
i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg * arg)3052b0a750cSThomas Hellström static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg)
3062b0a750cSThomas Hellström {
3072b0a750cSThomas Hellström 	ttm_move_memcpy(arg->clear, arg->num_pages,
3082b0a750cSThomas Hellström 			arg->dst_iter, arg->src_iter);
3092b0a750cSThomas Hellström }
3102b0a750cSThomas Hellström 
i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg * arg,struct ttm_buffer_object * bo,bool clear,struct ttm_resource * dst_mem,struct ttm_tt * dst_ttm,struct i915_refct_sgt * dst_rsgt)3112b0a750cSThomas Hellström static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg,
3122b0a750cSThomas Hellström 				 struct ttm_buffer_object *bo, bool clear,
3132b0a750cSThomas Hellström 				 struct ttm_resource *dst_mem,
3142b0a750cSThomas Hellström 				 struct ttm_tt *dst_ttm,
3152b0a750cSThomas Hellström 				 struct i915_refct_sgt *dst_rsgt)
3162b0a750cSThomas Hellström {
3172b0a750cSThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
3182b0a750cSThomas Hellström 	struct intel_memory_region *dst_reg, *src_reg;
3192b0a750cSThomas Hellström 
3202b0a750cSThomas Hellström 	dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
3212b0a750cSThomas Hellström 	src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
3222b0a750cSThomas Hellström 	GEM_BUG_ON(!dst_reg || !src_reg);
3232b0a750cSThomas Hellström 
3242b0a750cSThomas Hellström 	arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ?
3252b0a750cSThomas Hellström 		ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) :
3262b0a750cSThomas Hellström 		ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap,
3272b0a750cSThomas Hellström 					 &dst_rsgt->table, dst_reg->region.start);
3282b0a750cSThomas Hellström 
3292b0a750cSThomas Hellström 	arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ?
3302b0a750cSThomas Hellström 		ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) :
3312b0a750cSThomas Hellström 		ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap,
3322b0a750cSThomas Hellström 					 &obj->ttm.cached_io_rsgt->table,
3332b0a750cSThomas Hellström 					 src_reg->region.start);
3342b0a750cSThomas Hellström 	arg->clear = clear;
3352b0a750cSThomas Hellström 	arg->num_pages = bo->base.size >> PAGE_SHIFT;
3362b0a750cSThomas Hellström 
3372b0a750cSThomas Hellström 	arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt);
3382b0a750cSThomas Hellström 	arg->src_rsgt = clear ? NULL :
3392b0a750cSThomas Hellström 		i915_ttm_resource_get_st(obj, bo->resource);
3402b0a750cSThomas Hellström }
3412b0a750cSThomas Hellström 
i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg * arg)3422b0a750cSThomas Hellström static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg)
3432b0a750cSThomas Hellström {
3442b0a750cSThomas Hellström 	i915_refct_sgt_put(arg->src_rsgt);
3452b0a750cSThomas Hellström 	i915_refct_sgt_put(arg->dst_rsgt);
3462b0a750cSThomas Hellström }
3472b0a750cSThomas Hellström 
__memcpy_work(struct work_struct * work)3482b0a750cSThomas Hellström static void __memcpy_work(struct work_struct *work)
3492b0a750cSThomas Hellström {
3502b0a750cSThomas Hellström 	struct i915_ttm_memcpy_work *copy_work =
3512b0a750cSThomas Hellström 		container_of(work, typeof(*copy_work), work);
3522b0a750cSThomas Hellström 	struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
353bfe53be2SMatthew Auld 	bool cookie;
3542b0a750cSThomas Hellström 
355bfe53be2SMatthew Auld 	/*
356bfe53be2SMatthew Auld 	 * FIXME: We need to take a closer look here. We should be able to plonk
357bfe53be2SMatthew Auld 	 * this into the fence critical section.
358bfe53be2SMatthew Auld 	 */
359bfe53be2SMatthew Auld 	if (!copy_work->memcpy_allowed) {
360bfe53be2SMatthew Auld 		struct intel_gt *gt;
361bfe53be2SMatthew Auld 		unsigned int id;
362bfe53be2SMatthew Auld 
363bfe53be2SMatthew Auld 		for_each_gt(gt, copy_work->i915, id)
364bfe53be2SMatthew Auld 			intel_gt_set_wedged(gt);
365bfe53be2SMatthew Auld 	}
366bfe53be2SMatthew Auld 
367bfe53be2SMatthew Auld 	cookie = dma_fence_begin_signalling();
368bfe53be2SMatthew Auld 
369bfe53be2SMatthew Auld 	if (copy_work->memcpy_allowed) {
3702b0a750cSThomas Hellström 		i915_ttm_move_memcpy(arg);
371bfe53be2SMatthew Auld 	} else {
372bfe53be2SMatthew Auld 		/*
373bfe53be2SMatthew Auld 		 * Prevent further use of the object. Any future GTT binding or
374bfe53be2SMatthew Auld 		 * CPU access is not allowed once we signal the fence. Outside
375bfe53be2SMatthew Auld 		 * of the fence critical section, we then also then wedge the gpu
376bfe53be2SMatthew Auld 		 * to indicate the device is not functional.
377bfe53be2SMatthew Auld 		 *
378bfe53be2SMatthew Auld 		 * The below dma_fence_signal() is our write-memory-barrier.
379bfe53be2SMatthew Auld 		 */
380bfe53be2SMatthew Auld 		copy_work->obj->mm.unknown_state = true;
381bfe53be2SMatthew Auld 	}
382bfe53be2SMatthew Auld 
3832b0a750cSThomas Hellström 	dma_fence_end_signalling(cookie);
3842b0a750cSThomas Hellström 
3852b0a750cSThomas Hellström 	dma_fence_signal(&copy_work->fence);
3862b0a750cSThomas Hellström 
3872b0a750cSThomas Hellström 	i915_ttm_memcpy_release(arg);
388bfe53be2SMatthew Auld 	i915_gem_object_put(copy_work->obj);
3892b0a750cSThomas Hellström 	dma_fence_put(&copy_work->fence);
3902b0a750cSThomas Hellström }
3912b0a750cSThomas Hellström 
__memcpy_irq_work(struct irq_work * irq_work)3922b0a750cSThomas Hellström static void __memcpy_irq_work(struct irq_work *irq_work)
3932b0a750cSThomas Hellström {
3942b0a750cSThomas Hellström 	struct i915_ttm_memcpy_work *copy_work =
3952b0a750cSThomas Hellström 		container_of(irq_work, typeof(*copy_work), irq_work);
3962b0a750cSThomas Hellström 	struct i915_ttm_memcpy_arg *arg = &copy_work->arg;
3972b0a750cSThomas Hellström 
3982b0a750cSThomas Hellström 	dma_fence_signal(&copy_work->fence);
3992b0a750cSThomas Hellström 	i915_ttm_memcpy_release(arg);
400bfe53be2SMatthew Auld 	i915_gem_object_put(copy_work->obj);
4012b0a750cSThomas Hellström 	dma_fence_put(&copy_work->fence);
4022b0a750cSThomas Hellström }
4032b0a750cSThomas Hellström 
__memcpy_cb(struct dma_fence * fence,struct dma_fence_cb * cb)4042b0a750cSThomas Hellström static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
4052b0a750cSThomas Hellström {
4062b0a750cSThomas Hellström 	struct i915_ttm_memcpy_work *copy_work =
4072b0a750cSThomas Hellström 		container_of(cb, typeof(*copy_work), cb);
4082b0a750cSThomas Hellström 
4092b0a750cSThomas Hellström 	if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
4102b0a750cSThomas Hellström 		INIT_WORK(&copy_work->work, __memcpy_work);
4112b0a750cSThomas Hellström 		queue_work(system_unbound_wq, &copy_work->work);
4122b0a750cSThomas Hellström 	} else {
4132b0a750cSThomas Hellström 		init_irq_work(&copy_work->irq_work, __memcpy_irq_work);
4142b0a750cSThomas Hellström 		irq_work_queue(&copy_work->irq_work);
4152b0a750cSThomas Hellström 	}
4162b0a750cSThomas Hellström }
4172b0a750cSThomas Hellström 
get_driver_name(struct dma_fence * fence)4182b0a750cSThomas Hellström static const char *get_driver_name(struct dma_fence *fence)
4192b0a750cSThomas Hellström {
4202b0a750cSThomas Hellström 	return "i915_ttm_memcpy_work";
4212b0a750cSThomas Hellström }
4222b0a750cSThomas Hellström 
get_timeline_name(struct dma_fence * fence)4232b0a750cSThomas Hellström static const char *get_timeline_name(struct dma_fence *fence)
4242b0a750cSThomas Hellström {
4252b0a750cSThomas Hellström 	return "unbound";
4262b0a750cSThomas Hellström }
4272b0a750cSThomas Hellström 
4282b0a750cSThomas Hellström static const struct dma_fence_ops dma_fence_memcpy_ops = {
4292b0a750cSThomas Hellström 	.get_driver_name = get_driver_name,
4302b0a750cSThomas Hellström 	.get_timeline_name = get_timeline_name,
4312b0a750cSThomas Hellström };
4322b0a750cSThomas Hellström 
4332b0a750cSThomas Hellström static struct dma_fence *
i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work * work,struct dma_fence * dep)4342b0a750cSThomas Hellström i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work,
4352b0a750cSThomas Hellström 			 struct dma_fence *dep)
4362b0a750cSThomas Hellström {
4372b0a750cSThomas Hellström 	int ret;
4382b0a750cSThomas Hellström 
4392b0a750cSThomas Hellström 	spin_lock_init(&work->lock);
4402b0a750cSThomas Hellström 	dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0);
4412b0a750cSThomas Hellström 	dma_fence_get(&work->fence);
4422b0a750cSThomas Hellström 	ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb);
4432b0a750cSThomas Hellström 	if (ret) {
4442b0a750cSThomas Hellström 		if (ret != -ENOENT)
4452b0a750cSThomas Hellström 			dma_fence_wait(dep, false);
4462b0a750cSThomas Hellström 
4472b0a750cSThomas Hellström 		return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL :
4482b0a750cSThomas Hellström 			       dep->error);
4492b0a750cSThomas Hellström 	}
4502b0a750cSThomas Hellström 
4512b0a750cSThomas Hellström 	return &work->fence;
4523589fdbdSThomas Hellström }
4533589fdbdSThomas Hellström 
i915_ttm_memcpy_allowed(struct ttm_buffer_object * bo,struct ttm_resource * dst_mem)454bfe53be2SMatthew Auld static bool i915_ttm_memcpy_allowed(struct ttm_buffer_object *bo,
455bfe53be2SMatthew Auld 				    struct ttm_resource *dst_mem)
456bfe53be2SMatthew Auld {
457efeb3cafSMatthew Auld 	if (i915_gem_object_needs_ccs_pages(i915_ttm_to_gem(bo)))
458efeb3cafSMatthew Auld 		return false;
459efeb3cafSMatthew Auld 
460bfe53be2SMatthew Auld 	if (!(i915_ttm_resource_mappable(bo->resource) &&
461bfe53be2SMatthew Auld 	      i915_ttm_resource_mappable(dst_mem)))
462bfe53be2SMatthew Auld 		return false;
463bfe53be2SMatthew Auld 
464bfe53be2SMatthew Auld 	return I915_SELFTEST_ONLY(ban_memcpy) ? false : true;
465bfe53be2SMatthew Auld }
466bfe53be2SMatthew Auld 
4676385eb7aSThomas Hellström static struct dma_fence *
__i915_ttm_move(struct ttm_buffer_object * bo,const struct ttm_operation_ctx * ctx,bool clear,struct ttm_resource * dst_mem,struct ttm_tt * dst_ttm,struct i915_refct_sgt * dst_rsgt,bool allow_accel,const struct i915_deps * move_deps)46811930817SThomas Hellström __i915_ttm_move(struct ttm_buffer_object *bo,
46911930817SThomas Hellström 		const struct ttm_operation_ctx *ctx, bool clear,
4706385eb7aSThomas Hellström 		struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm,
4716385eb7aSThomas Hellström 		struct i915_refct_sgt *dst_rsgt, bool allow_accel,
47211930817SThomas Hellström 		const struct i915_deps *move_deps)
4733589fdbdSThomas Hellström {
474bfe53be2SMatthew Auld 	const bool memcpy_allowed = i915_ttm_memcpy_allowed(bo, dst_mem);
475bfe53be2SMatthew Auld 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
476bfe53be2SMatthew Auld 	struct drm_i915_private *i915 = to_i915(bo->base.dev);
4772b0a750cSThomas Hellström 	struct i915_ttm_memcpy_work *copy_work = NULL;
4782b0a750cSThomas Hellström 	struct i915_ttm_memcpy_arg _arg, *arg = &_arg;
4792b0a750cSThomas Hellström 	struct dma_fence *fence = ERR_PTR(-EINVAL);
4803589fdbdSThomas Hellström 
4812b0a750cSThomas Hellström 	if (allow_accel) {
4822b0a750cSThomas Hellström 		fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm,
48311930817SThomas Hellström 					    &dst_rsgt->table, move_deps);
4843589fdbdSThomas Hellström 
4852b0a750cSThomas Hellström 		/*
4862b0a750cSThomas Hellström 		 * We only need to intercept the error when moving to lmem.
4872b0a750cSThomas Hellström 		 * When moving to system, TTM or shmem will provide us with
4882b0a750cSThomas Hellström 		 * cleared pages.
4892b0a750cSThomas Hellström 		 */
4902b0a750cSThomas Hellström 		if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) &&
4912b0a750cSThomas Hellström 		    !I915_SELFTEST_ONLY(fail_gpu_migration ||
4922b0a750cSThomas Hellström 					fail_work_allocation))
4932b0a750cSThomas Hellström 			goto out;
4942b0a750cSThomas Hellström 	}
4953589fdbdSThomas Hellström 
4962b0a750cSThomas Hellström 	/* If we've scheduled gpu migration. Try to arm error intercept. */
4972b0a750cSThomas Hellström 	if (!IS_ERR(fence)) {
4982b0a750cSThomas Hellström 		struct dma_fence *dep = fence;
4993589fdbdSThomas Hellström 
5002b0a750cSThomas Hellström 		if (!I915_SELFTEST_ONLY(fail_work_allocation))
5012b0a750cSThomas Hellström 			copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL);
5023589fdbdSThomas Hellström 
5032b0a750cSThomas Hellström 		if (copy_work) {
504bfe53be2SMatthew Auld 			copy_work->i915 = i915;
505bfe53be2SMatthew Auld 			copy_work->memcpy_allowed = memcpy_allowed;
506bfe53be2SMatthew Auld 			copy_work->obj = i915_gem_object_get(obj);
5072b0a750cSThomas Hellström 			arg = &copy_work->arg;
508bfe53be2SMatthew Auld 			if (memcpy_allowed)
509bfe53be2SMatthew Auld 				i915_ttm_memcpy_init(arg, bo, clear, dst_mem,
510bfe53be2SMatthew Auld 						     dst_ttm, dst_rsgt);
511bfe53be2SMatthew Auld 
5122b0a750cSThomas Hellström 			fence = i915_ttm_memcpy_work_arm(copy_work, dep);
5132b0a750cSThomas Hellström 		} else {
5142b0a750cSThomas Hellström 			dma_fence_wait(dep, false);
5152b0a750cSThomas Hellström 			fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ?
5162b0a750cSThomas Hellström 					-EINVAL : fence->error);
5172b0a750cSThomas Hellström 		}
5182b0a750cSThomas Hellström 		dma_fence_put(dep);
5192b0a750cSThomas Hellström 
5202b0a750cSThomas Hellström 		if (!IS_ERR(fence))
5212b0a750cSThomas Hellström 			goto out;
52229b9702fSThomas Hellström 	} else {
52329b9702fSThomas Hellström 		int err = PTR_ERR(fence);
5246385eb7aSThomas Hellström 
52529b9702fSThomas Hellström 		if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
52629b9702fSThomas Hellström 			return fence;
52729b9702fSThomas Hellström 
52829b9702fSThomas Hellström 		if (move_deps) {
52929b9702fSThomas Hellström 			err = i915_deps_sync(move_deps, ctx);
5306385eb7aSThomas Hellström 			if (err)
5316385eb7aSThomas Hellström 				return ERR_PTR(err);
5322b0a750cSThomas Hellström 		}
53329b9702fSThomas Hellström 	}
5342b0a750cSThomas Hellström 
5352b0a750cSThomas Hellström 	/* Error intercept failed or no accelerated migration to start with */
536bfe53be2SMatthew Auld 
537bfe53be2SMatthew Auld 	if (memcpy_allowed) {
5382b0a750cSThomas Hellström 		if (!copy_work)
5392b0a750cSThomas Hellström 			i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm,
5402b0a750cSThomas Hellström 					     dst_rsgt);
5412b0a750cSThomas Hellström 		i915_ttm_move_memcpy(arg);
5422b0a750cSThomas Hellström 		i915_ttm_memcpy_release(arg);
543bfe53be2SMatthew Auld 	}
544bfe53be2SMatthew Auld 	if (copy_work)
545bfe53be2SMatthew Auld 		i915_gem_object_put(copy_work->obj);
5462b0a750cSThomas Hellström 	kfree(copy_work);
5472b0a750cSThomas Hellström 
548bfe53be2SMatthew Auld 	return memcpy_allowed ? NULL : ERR_PTR(-EIO);
5492b0a750cSThomas Hellström out:
5506385eb7aSThomas Hellström 	if (!fence && copy_work) {
5512b0a750cSThomas Hellström 		i915_ttm_memcpy_release(arg);
552bfe53be2SMatthew Auld 		i915_gem_object_put(copy_work->obj);
5532b0a750cSThomas Hellström 		kfree(copy_work);
5543589fdbdSThomas Hellström 	}
5556385eb7aSThomas Hellström 
5566385eb7aSThomas Hellström 	return fence;
5576385eb7aSThomas Hellström }
5586385eb7aSThomas Hellström 
5593589fdbdSThomas Hellström /**
5603589fdbdSThomas Hellström  * i915_ttm_move - The TTM move callback used by i915.
5613589fdbdSThomas Hellström  * @bo: The buffer object.
5623589fdbdSThomas Hellström  * @evict: Whether this is an eviction.
5636adba290SLee Jones  * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits should be
5646adba290SLee Jones  *       performed if waiting
5653589fdbdSThomas Hellström  * @dst_mem: The destination ttm resource.
5663589fdbdSThomas Hellström  * @hop: If we need multihop, what temporary memory type to move to.
5673589fdbdSThomas Hellström  *
5683589fdbdSThomas Hellström  * Return: 0 if successful, negative error code otherwise.
5693589fdbdSThomas Hellström  */
i915_ttm_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * dst_mem,struct ttm_place * hop)5703589fdbdSThomas Hellström int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
5713589fdbdSThomas Hellström 		  struct ttm_operation_ctx *ctx,
5723589fdbdSThomas Hellström 		  struct ttm_resource *dst_mem,
5733589fdbdSThomas Hellström 		  struct ttm_place *hop)
5743589fdbdSThomas Hellström {
5753589fdbdSThomas Hellström 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
5763589fdbdSThomas Hellström 	struct ttm_resource_manager *dst_man =
5773589fdbdSThomas Hellström 		ttm_manager_type(bo->bdev, dst_mem->mem_type);
5786385eb7aSThomas Hellström 	struct dma_fence *migration_fence = NULL;
5793589fdbdSThomas Hellström 	struct ttm_tt *ttm = bo->ttm;
5803589fdbdSThomas Hellström 	struct i915_refct_sgt *dst_rsgt;
581ddb24fc5SNirmoy Das 	bool clear, prealloc_bo;
5823589fdbdSThomas Hellström 	int ret;
5833589fdbdSThomas Hellström 
5846667d78aSNirmoy Das 	if (GEM_WARN_ON(i915_ttm_is_ghost_object(bo))) {
5856385eb7aSThomas Hellström 		ttm_bo_move_null(bo, dst_mem);
5866385eb7aSThomas Hellström 		return 0;
5876385eb7aSThomas Hellström 	}
5883589fdbdSThomas Hellström 
589516198d3SChristian König 	if (!bo->resource) {
590516198d3SChristian König 		if (dst_mem->mem_type != TTM_PL_SYSTEM) {
591516198d3SChristian König 			hop->mem_type = TTM_PL_SYSTEM;
592516198d3SChristian König 			hop->flags = TTM_PL_FLAG_TEMPORARY;
593516198d3SChristian König 			return -EMULTIHOP;
594516198d3SChristian König 		}
595516198d3SChristian König 
596516198d3SChristian König 		/*
597516198d3SChristian König 		 * This is only reached when first creating the object, or if
598516198d3SChristian König 		 * the object was purged or swapped out (pipeline-gutting). For
599516198d3SChristian König 		 * the former we can safely skip all of the below since we are
600516198d3SChristian König 		 * only using a dummy SYSTEM placement here. And with the latter
601516198d3SChristian König 		 * we will always re-enter here with bo->resource set correctly
602516198d3SChristian König 		 * (as per the above), since this is part of a multi-hop
603516198d3SChristian König 		 * sequence, where at the end we can do the move for real.
604516198d3SChristian König 		 *
605516198d3SChristian König 		 * The special case here is when the dst_mem is TTM_PL_SYSTEM,
606516198d3SChristian König 		 * which doens't require any kind of move, so it should be safe
607516198d3SChristian König 		 * to skip all the below and call ttm_bo_move_null() here, where
608516198d3SChristian König 		 * the caller in __i915_ttm_get_pages() will take care of the
609516198d3SChristian König 		 * rest, since we should have a valid ttm_tt.
610516198d3SChristian König 		 */
611516198d3SChristian König 		ttm_bo_move_null(bo, dst_mem);
612516198d3SChristian König 		return 0;
613516198d3SChristian König 	}
614516198d3SChristian König 
6153589fdbdSThomas Hellström 	ret = i915_ttm_move_notify(bo);
6163589fdbdSThomas Hellström 	if (ret)
6173589fdbdSThomas Hellström 		return ret;
6183589fdbdSThomas Hellström 
6193589fdbdSThomas Hellström 	if (obj->mm.madv != I915_MADV_WILLNEED) {
6203589fdbdSThomas Hellström 		i915_ttm_purge(obj);
6213589fdbdSThomas Hellström 		ttm_resource_free(bo, &dst_mem);
6223589fdbdSThomas Hellström 		return 0;
6233589fdbdSThomas Hellström 	}
6243589fdbdSThomas Hellström 
6253589fdbdSThomas Hellström 	/* Populate ttm with pages if needed. Typically system memory. */
6263589fdbdSThomas Hellström 	if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
6273589fdbdSThomas Hellström 		ret = ttm_tt_populate(bo->bdev, ttm, ctx);
6283589fdbdSThomas Hellström 		if (ret)
6293589fdbdSThomas Hellström 			return ret;
6303589fdbdSThomas Hellström 	}
6313589fdbdSThomas Hellström 
6323589fdbdSThomas Hellström 	dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem);
6333589fdbdSThomas Hellström 	if (IS_ERR(dst_rsgt))
6343589fdbdSThomas Hellström 		return PTR_ERR(dst_rsgt);
6353589fdbdSThomas Hellström 
6363589fdbdSThomas Hellström 	clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
637ddb24fc5SNirmoy Das 	prealloc_bo = obj->flags & I915_BO_PREALLOC;
638ddb24fc5SNirmoy Das 	if (!(clear && ttm && !((ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) && !prealloc_bo))) {
63911930817SThomas Hellström 		struct i915_deps deps;
6403589fdbdSThomas Hellström 
64111930817SThomas Hellström 		i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
6421d7f5e6cSChristian König 		ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
64311930817SThomas Hellström 		if (ret) {
6446385eb7aSThomas Hellström 			i915_refct_sgt_put(dst_rsgt);
64511930817SThomas Hellström 			return ret;
6466385eb7aSThomas Hellström 		}
6476385eb7aSThomas Hellström 
648816e3be7SJasmine Newsome 		migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, ttm,
64911930817SThomas Hellström 						  dst_rsgt, true, &deps);
65011930817SThomas Hellström 		i915_deps_fini(&deps);
6516385eb7aSThomas Hellström 	}
6526385eb7aSThomas Hellström 
6536385eb7aSThomas Hellström 	/* We can possibly get an -ERESTARTSYS here */
6546385eb7aSThomas Hellström 	if (IS_ERR(migration_fence)) {
6556385eb7aSThomas Hellström 		i915_refct_sgt_put(dst_rsgt);
6566385eb7aSThomas Hellström 		return PTR_ERR(migration_fence);
6576385eb7aSThomas Hellström 	}
6586385eb7aSThomas Hellström 
6596385eb7aSThomas Hellström 	if (migration_fence) {
660bfe53be2SMatthew Auld 		if (I915_SELFTEST_ONLY(evict && fail_gpu_migration))
661bfe53be2SMatthew Auld 			ret = -EIO; /* never feed non-migrate fences into ttm */
662bfe53be2SMatthew Auld 		else
6636385eb7aSThomas Hellström 			ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
6646385eb7aSThomas Hellström 							true, dst_mem);
6656385eb7aSThomas Hellström 		if (ret) {
6666385eb7aSThomas Hellström 			dma_fence_wait(migration_fence, false);
6673589fdbdSThomas Hellström 			ttm_bo_move_sync_cleanup(bo, dst_mem);
6686385eb7aSThomas Hellström 		}
6696385eb7aSThomas Hellström 		dma_fence_put(migration_fence);
6706385eb7aSThomas Hellström 	} else {
6716385eb7aSThomas Hellström 		ttm_bo_move_sync_cleanup(bo, dst_mem);
6726385eb7aSThomas Hellström 	}
6736385eb7aSThomas Hellström 
6743589fdbdSThomas Hellström 	i915_ttm_adjust_domains_after_move(obj);
6753589fdbdSThomas Hellström 	i915_ttm_free_cached_io_rsgt(obj);
6763589fdbdSThomas Hellström 
6773589fdbdSThomas Hellström 	if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) {
6783589fdbdSThomas Hellström 		obj->ttm.cached_io_rsgt = dst_rsgt;
6793589fdbdSThomas Hellström 		obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl;
6803589fdbdSThomas Hellström 		obj->ttm.get_io_page.sg_idx = 0;
6813589fdbdSThomas Hellström 	} else {
6823589fdbdSThomas Hellström 		i915_refct_sgt_put(dst_rsgt);
6833589fdbdSThomas Hellström 	}
6843589fdbdSThomas Hellström 
6853589fdbdSThomas Hellström 	i915_ttm_adjust_lru(obj);
6863589fdbdSThomas Hellström 	i915_ttm_adjust_gem_after_move(obj);
6873589fdbdSThomas Hellström 	return 0;
6883589fdbdSThomas Hellström }
68905d1c761SThomas Hellström 
69005d1c761SThomas Hellström /**
69105d1c761SThomas Hellström  * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
69205d1c761SThomas Hellström  * another
69305d1c761SThomas Hellström  * @dst: The destination object
69405d1c761SThomas Hellström  * @src: The source object
69505d1c761SThomas Hellström  * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
69605d1c761SThomas Hellström  * @intr: Whether to perform waits interruptible:
69705d1c761SThomas Hellström  *
69805d1c761SThomas Hellström  * Note: The caller is responsible for assuring that the underlying
69905d1c761SThomas Hellström  * TTM objects are populated if needed and locked.
70005d1c761SThomas Hellström  *
70105d1c761SThomas Hellström  * Return: Zero on success. Negative error code on error. If @intr == true,
70205d1c761SThomas Hellström  * then it may return -ERESTARTSYS or -EINTR.
70305d1c761SThomas Hellström  */
i915_gem_obj_copy_ttm(struct drm_i915_gem_object * dst,struct drm_i915_gem_object * src,bool allow_accel,bool intr)70405d1c761SThomas Hellström int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
70505d1c761SThomas Hellström 			  struct drm_i915_gem_object *src,
70605d1c761SThomas Hellström 			  bool allow_accel, bool intr)
70705d1c761SThomas Hellström {
70805d1c761SThomas Hellström 	struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
70905d1c761SThomas Hellström 	struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
71005d1c761SThomas Hellström 	struct ttm_operation_ctx ctx = {
71105d1c761SThomas Hellström 		.interruptible = intr,
71205d1c761SThomas Hellström 	};
71305d1c761SThomas Hellström 	struct i915_refct_sgt *dst_rsgt;
71411930817SThomas Hellström 	struct dma_fence *copy_fence;
7155652df82SThomas Hellström 	struct i915_deps deps;
71633654ef4SChristian König 	int ret;
71705d1c761SThomas Hellström 
71805d1c761SThomas Hellström 	assert_object_held(dst);
71905d1c761SThomas Hellström 	assert_object_held(src);
72058c7ee06SMatthew Auld 
72158c7ee06SMatthew Auld 	if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource))
72258c7ee06SMatthew Auld 		return -EINVAL;
72358c7ee06SMatthew Auld 
7245652df82SThomas Hellström 	i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
72505d1c761SThomas Hellström 
726c8d4c18bSChristian König 	ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
727c8d4c18bSChristian König 	if (ret)
728c8d4c18bSChristian König 		return ret;
729c8d4c18bSChristian König 
730c8d4c18bSChristian König 	ret = dma_resv_reserve_fences(dst_bo->base.resv, 1);
73133654ef4SChristian König 	if (ret)
73233654ef4SChristian König 		return ret;
73333654ef4SChristian König 
73433654ef4SChristian König 	ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx);
73533654ef4SChristian König 	if (ret)
73633654ef4SChristian König 		return ret;
73733654ef4SChristian König 
73833654ef4SChristian König 	ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx);
73905d1c761SThomas Hellström 	if (ret)
74005d1c761SThomas Hellström 		return ret;
74105d1c761SThomas Hellström 
74205d1c761SThomas Hellström 	dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource);
74311930817SThomas Hellström 	copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource,
7445652df82SThomas Hellström 				     dst_bo->ttm, dst_rsgt, allow_accel,
74511930817SThomas Hellström 				     &deps);
74605d1c761SThomas Hellström 
74711930817SThomas Hellström 	i915_deps_fini(&deps);
74805d1c761SThomas Hellström 	i915_refct_sgt_put(dst_rsgt);
7495652df82SThomas Hellström 	if (IS_ERR_OR_NULL(copy_fence))
7505652df82SThomas Hellström 		return PTR_ERR_OR_ZERO(copy_fence);
7516385eb7aSThomas Hellström 
75273511edfSChristian König 	dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE);
75373511edfSChristian König 	dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ);
7546385eb7aSThomas Hellström 	dma_fence_put(copy_fence);
75505d1c761SThomas Hellström 
75605d1c761SThomas Hellström 	return 0;
75705d1c761SThomas Hellström }
758