xref: /openbsd/sys/dev/pci/drm/i915/i915_vma.c (revision 7f10cbd3)
17f4dd379Sjsg /*
27f4dd379Sjsg  * Copyright © 2016 Intel Corporation
37f4dd379Sjsg  *
47f4dd379Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
57f4dd379Sjsg  * copy of this software and associated documentation files (the "Software"),
67f4dd379Sjsg  * to deal in the Software without restriction, including without limitation
77f4dd379Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87f4dd379Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
97f4dd379Sjsg  * Software is furnished to do so, subject to the following conditions:
107f4dd379Sjsg  *
117f4dd379Sjsg  * The above copyright notice and this permission notice (including the next
127f4dd379Sjsg  * paragraph) shall be included in all copies or substantial portions of the
137f4dd379Sjsg  * Software.
147f4dd379Sjsg  *
157f4dd379Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167f4dd379Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177f4dd379Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
187f4dd379Sjsg  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
197f4dd379Sjsg  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
207f4dd379Sjsg  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
217f4dd379Sjsg  * IN THE SOFTWARE.
227f4dd379Sjsg  *
237f4dd379Sjsg  */
247f4dd379Sjsg 
25c349dbc7Sjsg #include <linux/sched/mm.h>
261bb76ff1Sjsg #include <linux/dma-fence-array.h>
27c349dbc7Sjsg #include <drm/drm_gem.h>
28c349dbc7Sjsg 
29f005ef32Sjsg #include "display/intel_display.h"
30c349dbc7Sjsg #include "display/intel_frontbuffer.h"
315ca02815Sjsg #include "gem/i915_gem_lmem.h"
321bb76ff1Sjsg #include "gem/i915_gem_tiling.h"
33c349dbc7Sjsg #include "gt/intel_engine.h"
34c349dbc7Sjsg #include "gt/intel_engine_heartbeat.h"
35c349dbc7Sjsg #include "gt/intel_gt.h"
36*7f10cbd3Sjsg #include "gt/intel_gt_pm.h"
37c349dbc7Sjsg #include "gt/intel_gt_requests.h"
38f005ef32Sjsg #include "gt/intel_tlb.h"
39c349dbc7Sjsg 
407f4dd379Sjsg #include "i915_drv.h"
411bb76ff1Sjsg #include "i915_gem_evict.h"
42c349dbc7Sjsg #include "i915_sw_fence_work.h"
43c349dbc7Sjsg #include "i915_trace.h"
447f4dd379Sjsg #include "i915_vma.h"
451bb76ff1Sjsg #include "i915_vma_resource.h"
467f4dd379Sjsg 
assert_vma_held_evict(const struct i915_vma * vma)471bb76ff1Sjsg static inline void assert_vma_held_evict(const struct i915_vma *vma)
481bb76ff1Sjsg {
491bb76ff1Sjsg 	/*
501bb76ff1Sjsg 	 * We may be forced to unbind when the vm is dead, to clean it up.
511bb76ff1Sjsg 	 * This is the only exception to the requirement of the object lock
521bb76ff1Sjsg 	 * being held.
531bb76ff1Sjsg 	 */
541bb76ff1Sjsg 	if (kref_read(&vma->vm->ref))
551bb76ff1Sjsg 		assert_object_held_shared(vma->obj);
561bb76ff1Sjsg }
571bb76ff1Sjsg 
585ca02815Sjsg static struct pool slab_vmas;
59c349dbc7Sjsg 
i915_vma_alloc(void)601bb76ff1Sjsg static struct i915_vma *i915_vma_alloc(void)
61c349dbc7Sjsg {
62c349dbc7Sjsg #ifdef __linux__
635ca02815Sjsg 	return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
64c349dbc7Sjsg #else
655ca02815Sjsg 	return pool_get(&slab_vmas, PR_WAITOK | PR_ZERO);
66c349dbc7Sjsg #endif
67c349dbc7Sjsg }
68c349dbc7Sjsg 
i915_vma_free(struct i915_vma * vma)691bb76ff1Sjsg static void i915_vma_free(struct i915_vma *vma)
70c349dbc7Sjsg {
71c349dbc7Sjsg #ifdef __linux__
725ca02815Sjsg 	return kmem_cache_free(slab_vmas, vma);
73c349dbc7Sjsg #else
745ca02815Sjsg 	pool_put(&slab_vmas, vma);
75c349dbc7Sjsg #endif
76c349dbc7Sjsg }
777f4dd379Sjsg 
787f4dd379Sjsg #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
797f4dd379Sjsg 
807f4dd379Sjsg #include <linux/stackdepot.h>
817f4dd379Sjsg 
vma_print_allocator(struct i915_vma * vma,const char * reason)827f4dd379Sjsg static void vma_print_allocator(struct i915_vma *vma, const char *reason)
837f4dd379Sjsg {
847f4dd379Sjsg 	char buf[512];
857f4dd379Sjsg 
867f4dd379Sjsg 	if (!vma->node.stack) {
87f005ef32Sjsg 		drm_dbg(vma->obj->base.dev,
88f005ef32Sjsg 			"vma.node [%08llx + %08llx] %s: unknown owner\n",
897f4dd379Sjsg 			vma->node.start, vma->node.size, reason);
907f4dd379Sjsg 		return;
917f4dd379Sjsg 	}
927f4dd379Sjsg 
931bb76ff1Sjsg 	stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
94f005ef32Sjsg 	drm_dbg(vma->obj->base.dev,
95f005ef32Sjsg 		"vma.node [%08llx + %08llx] %s: inserted at %s\n",
967f4dd379Sjsg 		vma->node.start, vma->node.size, reason, buf);
977f4dd379Sjsg }
987f4dd379Sjsg 
997f4dd379Sjsg #else
1007f4dd379Sjsg 
vma_print_allocator(struct i915_vma * vma,const char * reason)1017f4dd379Sjsg static void vma_print_allocator(struct i915_vma *vma, const char *reason)
1027f4dd379Sjsg {
1037f4dd379Sjsg }
1047f4dd379Sjsg 
1057f4dd379Sjsg #endif
1067f4dd379Sjsg 
active_to_vma(struct i915_active * ref)107c349dbc7Sjsg static inline struct i915_vma *active_to_vma(struct i915_active *ref)
1087f4dd379Sjsg {
109c349dbc7Sjsg 	return container_of(ref, typeof(struct i915_vma), active);
1107f4dd379Sjsg }
1117f4dd379Sjsg 
__i915_vma_active(struct i915_active * ref)112c349dbc7Sjsg static int __i915_vma_active(struct i915_active *ref)
1137f4dd379Sjsg {
114*7f10cbd3Sjsg 	struct i915_vma *vma = active_to_vma(ref);
115*7f10cbd3Sjsg 
116*7f10cbd3Sjsg 	if (!i915_vma_tryget(vma))
117*7f10cbd3Sjsg 		return -ENOENT;
118*7f10cbd3Sjsg 
119*7f10cbd3Sjsg 	/*
120*7f10cbd3Sjsg 	 * Exclude global GTT VMA from holding a GT wakeref
121*7f10cbd3Sjsg 	 * while active, otherwise GPU never goes idle.
122*7f10cbd3Sjsg 	 */
123*7f10cbd3Sjsg 	if (!i915_vma_is_ggtt(vma))
124*7f10cbd3Sjsg 		intel_gt_pm_get(vma->vm->gt);
125*7f10cbd3Sjsg 
126*7f10cbd3Sjsg 	return 0;
1277f4dd379Sjsg }
1287f4dd379Sjsg 
__i915_vma_retire(struct i915_active * ref)129c349dbc7Sjsg static void __i915_vma_retire(struct i915_active *ref)
1307f4dd379Sjsg {
131*7f10cbd3Sjsg 	struct i915_vma *vma = active_to_vma(ref);
132*7f10cbd3Sjsg 
133*7f10cbd3Sjsg 	if (!i915_vma_is_ggtt(vma)) {
134*7f10cbd3Sjsg 		/*
135*7f10cbd3Sjsg 		 * Since we can be called from atomic contexts,
136*7f10cbd3Sjsg 		 * use an async variant of intel_gt_pm_put().
137*7f10cbd3Sjsg 		 */
138*7f10cbd3Sjsg 		intel_gt_pm_put_async(vma->vm->gt);
139*7f10cbd3Sjsg 	}
140*7f10cbd3Sjsg 
141*7f10cbd3Sjsg 	i915_vma_put(vma);
1427f4dd379Sjsg }
1437f4dd379Sjsg 
1447f4dd379Sjsg static struct i915_vma *
vma_create(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)1457f4dd379Sjsg vma_create(struct drm_i915_gem_object *obj,
1467f4dd379Sjsg 	   struct i915_address_space *vm,
1471bb76ff1Sjsg 	   const struct i915_gtt_view *view)
1487f4dd379Sjsg {
1492d8fcdc3Sjsg 	struct i915_vma *pos = ERR_PTR(-E2BIG);
1507f4dd379Sjsg 	struct i915_vma *vma;
1517f4dd379Sjsg 	struct rb_node *rb, **p;
1521bb76ff1Sjsg 	int err;
1537f4dd379Sjsg 
1547f4dd379Sjsg 	/* The aliasing_ppgtt should never be used directly! */
155c349dbc7Sjsg 	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
1567f4dd379Sjsg 
157c349dbc7Sjsg 	vma = i915_vma_alloc();
1587f4dd379Sjsg 	if (vma == NULL)
1597f4dd379Sjsg 		return ERR_PTR(-ENOMEM);
1607f4dd379Sjsg 
1617f4dd379Sjsg 	vma->ops = &vm->vma_ops;
1627f4dd379Sjsg 	vma->obj = obj;
1637f4dd379Sjsg 	vma->size = obj->base.size;
1647f4dd379Sjsg 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
1657f4dd379Sjsg 
1665ca02815Sjsg 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
167c349dbc7Sjsg 
168c349dbc7Sjsg #ifdef notyet
169c349dbc7Sjsg 	/* Declare ourselves safe for use inside shrinkers */
170c349dbc7Sjsg 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
171c349dbc7Sjsg 		fs_reclaim_acquire(GFP_KERNEL);
172c349dbc7Sjsg 		might_lock(&vma->active.mutex);
173c349dbc7Sjsg 		fs_reclaim_release(GFP_KERNEL);
174c349dbc7Sjsg 	}
175c349dbc7Sjsg #endif
176c349dbc7Sjsg 
177c349dbc7Sjsg 	INIT_LIST_HEAD(&vma->closed_link);
1781bb76ff1Sjsg 	INIT_LIST_HEAD(&vma->obj_link);
1791bb76ff1Sjsg 	RB_CLEAR_NODE(&vma->obj_node);
180c349dbc7Sjsg 
1811bb76ff1Sjsg 	if (view && view->type != I915_GTT_VIEW_NORMAL) {
1821bb76ff1Sjsg 		vma->gtt_view = *view;
1831bb76ff1Sjsg 		if (view->type == I915_GTT_VIEW_PARTIAL) {
1847f4dd379Sjsg 			GEM_BUG_ON(range_overflows_t(u64,
1857f4dd379Sjsg 						     view->partial.offset,
1867f4dd379Sjsg 						     view->partial.size,
1877f4dd379Sjsg 						     obj->base.size >> PAGE_SHIFT));
1887f4dd379Sjsg 			vma->size = view->partial.size;
1897f4dd379Sjsg 			vma->size <<= PAGE_SHIFT;
1907f4dd379Sjsg 			GEM_BUG_ON(vma->size > obj->base.size);
1911bb76ff1Sjsg 		} else if (view->type == I915_GTT_VIEW_ROTATED) {
1927f4dd379Sjsg 			vma->size = intel_rotation_info_size(&view->rotated);
1937f4dd379Sjsg 			vma->size <<= PAGE_SHIFT;
1941bb76ff1Sjsg 		} else if (view->type == I915_GTT_VIEW_REMAPPED) {
195c349dbc7Sjsg 			vma->size = intel_remapped_info_size(&view->remapped);
196c349dbc7Sjsg 			vma->size <<= PAGE_SHIFT;
1977f4dd379Sjsg 		}
1987f4dd379Sjsg 	}
1997f4dd379Sjsg 
2007f4dd379Sjsg 	if (unlikely(vma->size > vm->total))
2017f4dd379Sjsg 		goto err_vma;
2027f4dd379Sjsg 
2037f4dd379Sjsg 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
2047f4dd379Sjsg 
2051bb76ff1Sjsg 	err = mutex_lock_interruptible(&vm->mutex);
2061bb76ff1Sjsg 	if (err) {
2071bb76ff1Sjsg 		pos = ERR_PTR(err);
2081bb76ff1Sjsg 		goto err_vma;
2091bb76ff1Sjsg 	}
210c349dbc7Sjsg 
2111bb76ff1Sjsg 	vma->vm = vm;
2121bb76ff1Sjsg 	list_add_tail(&vma->vm_link, &vm->unbound_list);
2131bb76ff1Sjsg 
2141bb76ff1Sjsg 	spin_lock(&obj->vma.lock);
2157f4dd379Sjsg 	if (i915_is_ggtt(vm)) {
2167f4dd379Sjsg 		if (unlikely(overflows_type(vma->size, u32)))
217c349dbc7Sjsg 			goto err_unlock;
2187f4dd379Sjsg 
2197f4dd379Sjsg 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
2207f4dd379Sjsg 						      i915_gem_object_get_tiling(obj),
2217f4dd379Sjsg 						      i915_gem_object_get_stride(obj));
2227f4dd379Sjsg 		if (unlikely(vma->fence_size < vma->size || /* overflow */
2237f4dd379Sjsg 			     vma->fence_size > vm->total))
224c349dbc7Sjsg 			goto err_unlock;
2257f4dd379Sjsg 
2267f4dd379Sjsg 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
2277f4dd379Sjsg 
2287f4dd379Sjsg 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
2297f4dd379Sjsg 								i915_gem_object_get_tiling(obj),
2307f4dd379Sjsg 								i915_gem_object_get_stride(obj));
2317f4dd379Sjsg 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
2327f4dd379Sjsg 
233c349dbc7Sjsg 		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
234c349dbc7Sjsg 	}
235c349dbc7Sjsg 
236c349dbc7Sjsg 	rb = NULL;
237c349dbc7Sjsg 	p = &obj->vma.tree.rb_node;
238c349dbc7Sjsg 	while (*p) {
239c349dbc7Sjsg 		long cmp;
240c349dbc7Sjsg 
241c349dbc7Sjsg 		rb = *p;
242c349dbc7Sjsg 		pos = rb_entry(rb, struct i915_vma, obj_node);
243c349dbc7Sjsg 
244c349dbc7Sjsg 		/*
245c349dbc7Sjsg 		 * If the view already exists in the tree, another thread
246c349dbc7Sjsg 		 * already created a matching vma, so return the older instance
247c349dbc7Sjsg 		 * and dispose of ours.
248c349dbc7Sjsg 		 */
249c349dbc7Sjsg 		cmp = i915_vma_compare(pos, vm, view);
250c349dbc7Sjsg 		if (cmp < 0)
251c349dbc7Sjsg 			p = &rb->rb_right;
2522d8fcdc3Sjsg 		else if (cmp > 0)
253c349dbc7Sjsg 			p = &rb->rb_left;
2542d8fcdc3Sjsg 		else
2552d8fcdc3Sjsg 			goto err_unlock;
256c349dbc7Sjsg 	}
257c349dbc7Sjsg 	rb_link_node(&vma->obj_node, rb, p);
258c349dbc7Sjsg 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
259c349dbc7Sjsg 
260c349dbc7Sjsg 	if (i915_vma_is_ggtt(vma))
2617f4dd379Sjsg 		/*
2627f4dd379Sjsg 		 * We put the GGTT vma at the start of the vma-list, followed
2637f4dd379Sjsg 		 * by the ppGGTT vma. This allows us to break early when
2647f4dd379Sjsg 		 * iterating over only the GGTT vma for an object, see
2657f4dd379Sjsg 		 * for_each_ggtt_vma()
2667f4dd379Sjsg 		 */
267c349dbc7Sjsg 		list_add(&vma->obj_link, &obj->vma.list);
2687f4dd379Sjsg 	else
269c349dbc7Sjsg 		list_add_tail(&vma->obj_link, &obj->vma.list);
270c349dbc7Sjsg 
271c349dbc7Sjsg 	spin_unlock(&obj->vma.lock);
2721bb76ff1Sjsg 	mutex_unlock(&vm->mutex);
2737f4dd379Sjsg 
2747f4dd379Sjsg 	return vma;
2757f4dd379Sjsg 
276c349dbc7Sjsg err_unlock:
277c349dbc7Sjsg 	spin_unlock(&obj->vma.lock);
2781bb76ff1Sjsg 	list_del_init(&vma->vm_link);
2791bb76ff1Sjsg 	mutex_unlock(&vm->mutex);
2807f4dd379Sjsg err_vma:
281c349dbc7Sjsg 	i915_vma_free(vma);
2822d8fcdc3Sjsg 	return pos;
2837f4dd379Sjsg }
2847f4dd379Sjsg 
2857f4dd379Sjsg static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)2865ca02815Sjsg i915_vma_lookup(struct drm_i915_gem_object *obj,
2877f4dd379Sjsg 	   struct i915_address_space *vm,
2881bb76ff1Sjsg 	   const struct i915_gtt_view *view)
2897f4dd379Sjsg {
2907f4dd379Sjsg 	struct rb_node *rb;
2917f4dd379Sjsg 
292c349dbc7Sjsg 	rb = obj->vma.tree.rb_node;
2937f4dd379Sjsg 	while (rb) {
2947f4dd379Sjsg 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
2957f4dd379Sjsg 		long cmp;
2967f4dd379Sjsg 
2977f4dd379Sjsg 		cmp = i915_vma_compare(vma, vm, view);
2987f4dd379Sjsg 		if (cmp == 0)
2997f4dd379Sjsg 			return vma;
3007f4dd379Sjsg 
3017f4dd379Sjsg 		if (cmp < 0)
3027f4dd379Sjsg 			rb = rb->rb_right;
3037f4dd379Sjsg 		else
3047f4dd379Sjsg 			rb = rb->rb_left;
3057f4dd379Sjsg 	}
3067f4dd379Sjsg 
3077f4dd379Sjsg 	return NULL;
3087f4dd379Sjsg }
3097f4dd379Sjsg 
3107f4dd379Sjsg /**
3117f4dd379Sjsg  * i915_vma_instance - return the singleton instance of the VMA
3127f4dd379Sjsg  * @obj: parent &struct drm_i915_gem_object to be mapped
3137f4dd379Sjsg  * @vm: address space in which the mapping is located
3147f4dd379Sjsg  * @view: additional mapping requirements
3157f4dd379Sjsg  *
3167f4dd379Sjsg  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
3177f4dd379Sjsg  * the same @view characteristics. If a match is not found, one is created.
3187f4dd379Sjsg  * Once created, the VMA is kept until either the object is freed, or the
3197f4dd379Sjsg  * address space is closed.
3207f4dd379Sjsg  *
3217f4dd379Sjsg  * Returns the vma, or an error pointer.
3227f4dd379Sjsg  */
3237f4dd379Sjsg struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)3247f4dd379Sjsg i915_vma_instance(struct drm_i915_gem_object *obj,
3257f4dd379Sjsg 		  struct i915_address_space *vm,
3261bb76ff1Sjsg 		  const struct i915_gtt_view *view)
3277f4dd379Sjsg {
3287f4dd379Sjsg 	struct i915_vma *vma;
3297f4dd379Sjsg 
3305ca02815Sjsg 	GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
3311bb76ff1Sjsg 	GEM_BUG_ON(!kref_read(&vm->ref));
3327f4dd379Sjsg 
333c349dbc7Sjsg 	spin_lock(&obj->vma.lock);
3345ca02815Sjsg 	vma = i915_vma_lookup(obj, vm, view);
335c349dbc7Sjsg 	spin_unlock(&obj->vma.lock);
336c349dbc7Sjsg 
337c349dbc7Sjsg 	/* vma_create() will resolve the race if another creates the vma */
338c349dbc7Sjsg 	if (unlikely(!vma))
3397f4dd379Sjsg 		vma = vma_create(obj, vm, view);
3407f4dd379Sjsg 
3417f4dd379Sjsg 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
3427f4dd379Sjsg 	return vma;
3437f4dd379Sjsg }
3447f4dd379Sjsg 
345c349dbc7Sjsg struct i915_vma_work {
346c349dbc7Sjsg 	struct dma_fence_work base;
347ad8b1aafSjsg 	struct i915_address_space *vm;
348ad8b1aafSjsg 	struct i915_vm_pt_stash stash;
3491bb76ff1Sjsg 	struct i915_vma_resource *vma_res;
3501bb76ff1Sjsg 	struct drm_i915_gem_object *obj;
351c349dbc7Sjsg 	struct i915_sw_dma_fence_cb cb;
352f005ef32Sjsg 	unsigned int pat_index;
353c349dbc7Sjsg 	unsigned int flags;
354c349dbc7Sjsg };
355c349dbc7Sjsg 
__vma_bind(struct dma_fence_work * work)3565ca02815Sjsg static void __vma_bind(struct dma_fence_work *work)
357c349dbc7Sjsg {
358c349dbc7Sjsg 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
3591bb76ff1Sjsg 	struct i915_vma_resource *vma_res = vw->vma_res;
360c349dbc7Sjsg 
3611bb76ff1Sjsg 	/*
3621bb76ff1Sjsg 	 * We are about the bind the object, which must mean we have already
3631bb76ff1Sjsg 	 * signaled the work to potentially clear/move the pages underneath. If
3641bb76ff1Sjsg 	 * something went wrong at that stage then the object should have
3651bb76ff1Sjsg 	 * unknown_state set, in which case we need to skip the bind.
3661bb76ff1Sjsg 	 */
3671bb76ff1Sjsg 	if (i915_gem_object_has_unknown_state(vw->obj))
3681bb76ff1Sjsg 		return;
3691bb76ff1Sjsg 
3701bb76ff1Sjsg 	vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
371f005ef32Sjsg 			       vma_res, vw->pat_index, vw->flags);
372c349dbc7Sjsg }
373c349dbc7Sjsg 
__vma_release(struct dma_fence_work * work)374c349dbc7Sjsg static void __vma_release(struct dma_fence_work *work)
375c349dbc7Sjsg {
376c349dbc7Sjsg 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
377c349dbc7Sjsg 
3781bb76ff1Sjsg 	if (vw->obj)
3791bb76ff1Sjsg 		i915_gem_object_put(vw->obj);
380ad8b1aafSjsg 
381ad8b1aafSjsg 	i915_vm_free_pt_stash(vw->vm, &vw->stash);
3821bb76ff1Sjsg 	if (vw->vma_res)
3831bb76ff1Sjsg 		i915_vma_resource_put(vw->vma_res);
384c349dbc7Sjsg }
385c349dbc7Sjsg 
386c349dbc7Sjsg static const struct dma_fence_work_ops bind_ops = {
387c349dbc7Sjsg 	.name = "bind",
388c349dbc7Sjsg 	.work = __vma_bind,
389c349dbc7Sjsg 	.release = __vma_release,
390c349dbc7Sjsg };
391c349dbc7Sjsg 
i915_vma_work(void)392c349dbc7Sjsg struct i915_vma_work *i915_vma_work(void)
393c349dbc7Sjsg {
394c349dbc7Sjsg 	struct i915_vma_work *vw;
395c349dbc7Sjsg 
396c349dbc7Sjsg 	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
397c349dbc7Sjsg 	if (!vw)
398c349dbc7Sjsg 		return NULL;
399c349dbc7Sjsg 
400c349dbc7Sjsg 	dma_fence_work_init(&vw->base, &bind_ops);
401c349dbc7Sjsg 	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
402c349dbc7Sjsg 
403c349dbc7Sjsg 	return vw;
404c349dbc7Sjsg }
405c349dbc7Sjsg 
i915_vma_wait_for_bind(struct i915_vma * vma)406c349dbc7Sjsg int i915_vma_wait_for_bind(struct i915_vma *vma)
407c349dbc7Sjsg {
408c349dbc7Sjsg 	int err = 0;
409c349dbc7Sjsg 
410c349dbc7Sjsg 	if (rcu_access_pointer(vma->active.excl.fence)) {
411c349dbc7Sjsg 		struct dma_fence *fence;
412c349dbc7Sjsg 
413c349dbc7Sjsg 		rcu_read_lock();
414c349dbc7Sjsg 		fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
415c349dbc7Sjsg 		rcu_read_unlock();
416c349dbc7Sjsg 		if (fence) {
4171bb76ff1Sjsg 			err = dma_fence_wait(fence, true);
418c349dbc7Sjsg 			dma_fence_put(fence);
419c349dbc7Sjsg 		}
420c349dbc7Sjsg 	}
421c349dbc7Sjsg 
422c349dbc7Sjsg 	return err;
423c349dbc7Sjsg }
424c349dbc7Sjsg 
4251bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
i915_vma_verify_bind_complete(struct i915_vma * vma)4261bb76ff1Sjsg static int i915_vma_verify_bind_complete(struct i915_vma *vma)
4271bb76ff1Sjsg {
4281bb76ff1Sjsg 	struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
4291bb76ff1Sjsg 	int err;
4301bb76ff1Sjsg 
4311bb76ff1Sjsg 	if (!fence)
4321bb76ff1Sjsg 		return 0;
4331bb76ff1Sjsg 
4341bb76ff1Sjsg 	if (dma_fence_is_signaled(fence))
4351bb76ff1Sjsg 		err = fence->error;
4361bb76ff1Sjsg 	else
4371bb76ff1Sjsg 		err = -EBUSY;
4381bb76ff1Sjsg 
4391bb76ff1Sjsg 	dma_fence_put(fence);
4401bb76ff1Sjsg 
4411bb76ff1Sjsg 	return err;
4421bb76ff1Sjsg }
4431bb76ff1Sjsg #else
4441bb76ff1Sjsg #define i915_vma_verify_bind_complete(_vma) 0
4451bb76ff1Sjsg #endif
4461bb76ff1Sjsg 
4471bb76ff1Sjsg I915_SELFTEST_EXPORT void
i915_vma_resource_init_from_vma(struct i915_vma_resource * vma_res,struct i915_vma * vma)4481bb76ff1Sjsg i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
4491bb76ff1Sjsg 				struct i915_vma *vma)
4501bb76ff1Sjsg {
4511bb76ff1Sjsg 	struct drm_i915_gem_object *obj = vma->obj;
4521bb76ff1Sjsg 
4531bb76ff1Sjsg 	i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
4541bb76ff1Sjsg 			       obj->mm.rsgt, i915_gem_object_is_readonly(obj),
4551bb76ff1Sjsg 			       i915_gem_object_is_lmem(obj), obj->mm.region,
456f005ef32Sjsg 			       vma->ops, vma->private, __i915_vma_offset(vma),
457f005ef32Sjsg 			       __i915_vma_size(vma), vma->size, vma->guard);
4581bb76ff1Sjsg }
4591bb76ff1Sjsg 
4607f4dd379Sjsg /**
4617f4dd379Sjsg  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
4627f4dd379Sjsg  * @vma: VMA to map
463f005ef32Sjsg  * @pat_index: PAT index to set in PTE
4647f4dd379Sjsg  * @flags: flags like global or local mapping
465c349dbc7Sjsg  * @work: preallocated worker for allocating and binding the PTE
4661bb76ff1Sjsg  * @vma_res: pointer to a preallocated vma resource. The resource is either
4671bb76ff1Sjsg  * consumed or freed.
4687f4dd379Sjsg  *
4697f4dd379Sjsg  * DMA addresses are taken from the scatter-gather table of this object (or of
4707f4dd379Sjsg  * this VMA in case of non-default GGTT views) and PTE entries set up.
4717f4dd379Sjsg  * Note that DMA addresses are also the only part of the SG table we care about.
4727f4dd379Sjsg  */
i915_vma_bind(struct i915_vma * vma,unsigned int pat_index,u32 flags,struct i915_vma_work * work,struct i915_vma_resource * vma_res)473c349dbc7Sjsg int i915_vma_bind(struct i915_vma *vma,
474f005ef32Sjsg 		  unsigned int pat_index,
475c349dbc7Sjsg 		  u32 flags,
4761bb76ff1Sjsg 		  struct i915_vma_work *work,
4771bb76ff1Sjsg 		  struct i915_vma_resource *vma_res)
4787f4dd379Sjsg {
4797f4dd379Sjsg 	u32 bind_flags;
4807f4dd379Sjsg 	u32 vma_flags;
4811bb76ff1Sjsg 	int ret;
4827f4dd379Sjsg 
483543bcfd9Sjsg 	lockdep_assert_held(&vma->vm->mutex);
4847f4dd379Sjsg 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
485f005ef32Sjsg 	GEM_BUG_ON(vma->size > i915_vma_size(vma));
4867f4dd379Sjsg 
487c349dbc7Sjsg 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
4887f4dd379Sjsg 					      vma->node.size,
4891bb76ff1Sjsg 					      vma->vm->total))) {
4901bb76ff1Sjsg 		i915_vma_resource_free(vma_res);
4917f4dd379Sjsg 		return -ENODEV;
4921bb76ff1Sjsg 	}
4937f4dd379Sjsg 
4941bb76ff1Sjsg 	if (GEM_DEBUG_WARN_ON(!flags)) {
4951bb76ff1Sjsg 		i915_vma_resource_free(vma_res);
4967f4dd379Sjsg 		return -EINVAL;
4971bb76ff1Sjsg 	}
4987f4dd379Sjsg 
499c349dbc7Sjsg 	bind_flags = flags;
500c349dbc7Sjsg 	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
5017f4dd379Sjsg 
502c349dbc7Sjsg 	vma_flags = atomic_read(&vma->flags);
503c349dbc7Sjsg 	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
504ad8b1aafSjsg 
5057f4dd379Sjsg 	bind_flags &= ~vma_flags;
5061bb76ff1Sjsg 	if (bind_flags == 0) {
5071bb76ff1Sjsg 		i915_vma_resource_free(vma_res);
5087f4dd379Sjsg 		return 0;
5091bb76ff1Sjsg 	}
5107f4dd379Sjsg 
5111bb76ff1Sjsg 	GEM_BUG_ON(!atomic_read(&vma->pages_count));
5127f4dd379Sjsg 
5131bb76ff1Sjsg 	/* Wait for or await async unbinds touching our range */
5141bb76ff1Sjsg 	if (work && bind_flags & vma->vm->bind_async_flags)
5151bb76ff1Sjsg 		ret = i915_vma_resource_bind_dep_await(vma->vm,
5161bb76ff1Sjsg 						       &work->base.chain,
5171bb76ff1Sjsg 						       vma->node.start,
5181bb76ff1Sjsg 						       vma->node.size,
5191bb76ff1Sjsg 						       true,
5201bb76ff1Sjsg 						       GFP_NOWAIT |
5211bb76ff1Sjsg 						       __GFP_RETRY_MAYFAIL |
5221bb76ff1Sjsg 						       __GFP_NOWARN);
5231bb76ff1Sjsg 	else
5241bb76ff1Sjsg 		ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
5251bb76ff1Sjsg 						      vma->node.size, true);
5261bb76ff1Sjsg 	if (ret) {
5271bb76ff1Sjsg 		i915_vma_resource_free(vma_res);
5281bb76ff1Sjsg 		return ret;
5291bb76ff1Sjsg 	}
5301bb76ff1Sjsg 
5311bb76ff1Sjsg 	if (vma->resource || !vma_res) {
5321bb76ff1Sjsg 		/* Rebinding with an additional I915_VMA_*_BIND */
5331bb76ff1Sjsg 		GEM_WARN_ON(!vma_flags);
5341bb76ff1Sjsg 		i915_vma_resource_free(vma_res);
5351bb76ff1Sjsg 	} else {
5361bb76ff1Sjsg 		i915_vma_resource_init_from_vma(vma_res, vma);
5371bb76ff1Sjsg 		vma->resource = vma_res;
5381bb76ff1Sjsg 	}
5397f4dd379Sjsg 	trace_i915_vma_bind(vma, bind_flags);
540ad8b1aafSjsg 	if (work && bind_flags & vma->vm->bind_async_flags) {
541c349dbc7Sjsg 		struct dma_fence *prev;
542c349dbc7Sjsg 
5431bb76ff1Sjsg 		work->vma_res = i915_vma_resource_get(vma->resource);
544f005ef32Sjsg 		work->pat_index = pat_index;
545ad8b1aafSjsg 		work->flags = bind_flags;
546c349dbc7Sjsg 
547c349dbc7Sjsg 		/*
548c349dbc7Sjsg 		 * Note we only want to chain up to the migration fence on
549c349dbc7Sjsg 		 * the pages (not the object itself). As we don't track that,
550c349dbc7Sjsg 		 * yet, we have to use the exclusive fence instead.
551c349dbc7Sjsg 		 *
552c349dbc7Sjsg 		 * Also note that we do not want to track the async vma as
553c349dbc7Sjsg 		 * part of the obj->resv->excl_fence as it only affects
554c349dbc7Sjsg 		 * execution and not content or object's backing store lifetime.
555c349dbc7Sjsg 		 */
556c349dbc7Sjsg 		prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
557c349dbc7Sjsg 		if (prev) {
558c349dbc7Sjsg 			__i915_sw_fence_await_dma_fence(&work->base.chain,
559c349dbc7Sjsg 							prev,
560c349dbc7Sjsg 							&work->cb);
561c349dbc7Sjsg 			dma_fence_put(prev);
562c349dbc7Sjsg 		}
563c349dbc7Sjsg 
564c349dbc7Sjsg 		work->base.dma.error = 0; /* enable the queue_work() */
5651bb76ff1Sjsg 		work->obj = i915_gem_object_get(vma->obj);
566c349dbc7Sjsg 	} else {
5671bb76ff1Sjsg 		ret = i915_gem_object_wait_moving_fence(vma->obj, true);
5681bb76ff1Sjsg 		if (ret) {
5691bb76ff1Sjsg 			i915_vma_resource_free(vma->resource);
5701bb76ff1Sjsg 			vma->resource = NULL;
5717f4dd379Sjsg 
5721bb76ff1Sjsg 			return ret;
5731bb76ff1Sjsg 		}
574f005ef32Sjsg 		vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
5751bb76ff1Sjsg 				   bind_flags);
5761bb76ff1Sjsg 	}
5771fd8e27eSjsg 
578c349dbc7Sjsg 	atomic_or(bind_flags, &vma->flags);
5797f4dd379Sjsg 	return 0;
5807f4dd379Sjsg }
5817f4dd379Sjsg 
i915_vma_pin_iomap(struct i915_vma * vma)5827f4dd379Sjsg void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
5837f4dd379Sjsg {
5847f4dd379Sjsg 	void __iomem *ptr;
5857f4dd379Sjsg 	int err;
5867f4dd379Sjsg 
5871bb76ff1Sjsg 	if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
5881bb76ff1Sjsg 		return IOMEM_ERR_PTR(-EINVAL);
5897f4dd379Sjsg 
5907f4dd379Sjsg 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
591c349dbc7Sjsg 	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
5921bb76ff1Sjsg 	GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
5937f4dd379Sjsg 
594c349dbc7Sjsg 	ptr = READ_ONCE(vma->iomap);
5957f4dd379Sjsg 	if (ptr == NULL) {
5965ca02815Sjsg 		/*
5975ca02815Sjsg 		 * TODO: consider just using i915_gem_object_pin_map() for lmem
5985ca02815Sjsg 		 * instead, which already supports mapping non-contiguous chunks
5995ca02815Sjsg 		 * of pages, that way we can also drop the
6005ca02815Sjsg 		 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
6015ca02815Sjsg 		 */
6021bb76ff1Sjsg 		if (i915_gem_object_is_lmem(vma->obj)) {
6035ca02815Sjsg 			ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
6045ca02815Sjsg 							  vma->obj->base.size);
6051bb76ff1Sjsg 		} else if (i915_vma_is_map_and_fenceable(vma)) {
6067f4dd379Sjsg 			ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
607f005ef32Sjsg 						i915_vma_offset(vma),
608f005ef32Sjsg 						i915_vma_size(vma));
6091bb76ff1Sjsg 		} else {
6101bb76ff1Sjsg 			ptr = (void __iomem *)
6111bb76ff1Sjsg 				i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
6121bb76ff1Sjsg 			if (IS_ERR(ptr)) {
6131bb76ff1Sjsg 				err = PTR_ERR(ptr);
6141bb76ff1Sjsg 				goto err;
6151bb76ff1Sjsg 			}
6161bb76ff1Sjsg 			ptr = page_pack_bits(ptr, 1);
6171bb76ff1Sjsg 		}
6181bb76ff1Sjsg 
6197f4dd379Sjsg 		if (ptr == NULL) {
6207f4dd379Sjsg 			err = -ENOMEM;
6217f4dd379Sjsg 			goto err;
6227f4dd379Sjsg 		}
6237f4dd379Sjsg 
624c349dbc7Sjsg 		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
6251bb76ff1Sjsg 			if (page_unmask_bits(ptr))
6261bb76ff1Sjsg 				__i915_gem_object_release_map(vma->obj);
6271bb76ff1Sjsg 			else
628c349dbc7Sjsg 				io_mapping_unmap(ptr);
629c349dbc7Sjsg 			ptr = vma->iomap;
630c349dbc7Sjsg 		}
6317f4dd379Sjsg 	}
6327f4dd379Sjsg 
6337f4dd379Sjsg 	__i915_vma_pin(vma);
6347f4dd379Sjsg 
6357f4dd379Sjsg 	err = i915_vma_pin_fence(vma);
6367f4dd379Sjsg 	if (err)
6377f4dd379Sjsg 		goto err_unpin;
6387f4dd379Sjsg 
6397f4dd379Sjsg 	i915_vma_set_ggtt_write(vma);
640c349dbc7Sjsg 
641c349dbc7Sjsg 	/* NB Access through the GTT requires the device to be awake. */
6421bb76ff1Sjsg 	return page_mask_bits(ptr);
6437f4dd379Sjsg 
6447f4dd379Sjsg err_unpin:
6457f4dd379Sjsg 	__i915_vma_unpin(vma);
6467f4dd379Sjsg err:
6471bb76ff1Sjsg 	return IOMEM_ERR_PTR(err);
6487f4dd379Sjsg }
6497f4dd379Sjsg 
i915_vma_flush_writes(struct i915_vma * vma)6507f4dd379Sjsg void i915_vma_flush_writes(struct i915_vma *vma)
6517f4dd379Sjsg {
652c349dbc7Sjsg 	if (i915_vma_unset_ggtt_write(vma))
653c349dbc7Sjsg 		intel_gt_flush_ggtt_writes(vma->vm->gt);
6547f4dd379Sjsg }
6557f4dd379Sjsg 
i915_vma_unpin_iomap(struct i915_vma * vma)6567f4dd379Sjsg void i915_vma_unpin_iomap(struct i915_vma *vma)
6577f4dd379Sjsg {
6587f4dd379Sjsg 	GEM_BUG_ON(vma->iomap == NULL);
6597f4dd379Sjsg 
6601bb76ff1Sjsg 	/* XXX We keep the mapping until __i915_vma_unbind()/evict() */
6611bb76ff1Sjsg 
6627f4dd379Sjsg 	i915_vma_flush_writes(vma);
6637f4dd379Sjsg 
6647f4dd379Sjsg 	i915_vma_unpin_fence(vma);
6657f4dd379Sjsg 	i915_vma_unpin(vma);
6667f4dd379Sjsg }
6677f4dd379Sjsg 
i915_vma_unpin_and_release(struct i915_vma ** p_vma,unsigned int flags)668c349dbc7Sjsg void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
6697f4dd379Sjsg {
6707f4dd379Sjsg 	struct i915_vma *vma;
6717f4dd379Sjsg 	struct drm_i915_gem_object *obj;
6727f4dd379Sjsg 
6737f4dd379Sjsg 	vma = fetch_and_zero(p_vma);
6747f4dd379Sjsg 	if (!vma)
6757f4dd379Sjsg 		return;
6767f4dd379Sjsg 
6777f4dd379Sjsg 	obj = vma->obj;
6787f4dd379Sjsg 	GEM_BUG_ON(!obj);
6797f4dd379Sjsg 
6807f4dd379Sjsg 	i915_vma_unpin(vma);
6817f4dd379Sjsg 
682c349dbc7Sjsg 	if (flags & I915_VMA_RELEASE_MAP)
683c349dbc7Sjsg 		i915_gem_object_unpin_map(obj);
684c349dbc7Sjsg 
685c349dbc7Sjsg 	i915_gem_object_put(obj);
6867f4dd379Sjsg }
6877f4dd379Sjsg 
i915_vma_misplaced(const struct i915_vma * vma,u64 size,u64 alignment,u64 flags)6887f4dd379Sjsg bool i915_vma_misplaced(const struct i915_vma *vma,
6897f4dd379Sjsg 			u64 size, u64 alignment, u64 flags)
6907f4dd379Sjsg {
6917f4dd379Sjsg 	if (!drm_mm_node_allocated(&vma->node))
6927f4dd379Sjsg 		return false;
6937f4dd379Sjsg 
694c349dbc7Sjsg 	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
695c349dbc7Sjsg 		return true;
696c349dbc7Sjsg 
697f005ef32Sjsg 	if (i915_vma_size(vma) < size)
6987f4dd379Sjsg 		return true;
6997f4dd379Sjsg 
7007f4dd379Sjsg 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
701f005ef32Sjsg 	if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
7027f4dd379Sjsg 		return true;
7037f4dd379Sjsg 
7047f4dd379Sjsg 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
7057f4dd379Sjsg 		return true;
7067f4dd379Sjsg 
7077f4dd379Sjsg 	if (flags & PIN_OFFSET_BIAS &&
708f005ef32Sjsg 	    i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
7097f4dd379Sjsg 		return true;
7107f4dd379Sjsg 
7117f4dd379Sjsg 	if (flags & PIN_OFFSET_FIXED &&
712f005ef32Sjsg 	    i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
713f005ef32Sjsg 		return true;
714f005ef32Sjsg 
715f005ef32Sjsg 	if (flags & PIN_OFFSET_GUARD &&
716f005ef32Sjsg 	    vma->guard < (flags & PIN_OFFSET_MASK))
7177f4dd379Sjsg 		return true;
7187f4dd379Sjsg 
7197f4dd379Sjsg 	return false;
7207f4dd379Sjsg }
7217f4dd379Sjsg 
__i915_vma_set_map_and_fenceable(struct i915_vma * vma)7227f4dd379Sjsg void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
7237f4dd379Sjsg {
7247f4dd379Sjsg 	bool mappable, fenceable;
7257f4dd379Sjsg 
7267f4dd379Sjsg 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
7277f4dd379Sjsg 	GEM_BUG_ON(!vma->fence_size);
7287f4dd379Sjsg 
729f005ef32Sjsg 	fenceable = (i915_vma_size(vma) >= vma->fence_size &&
730f005ef32Sjsg 		     IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
7317f4dd379Sjsg 
732f005ef32Sjsg 	mappable = i915_ggtt_offset(vma) + vma->fence_size <=
733f005ef32Sjsg 		   i915_vm_to_ggtt(vma->vm)->mappable_end;
7347f4dd379Sjsg 
7357f4dd379Sjsg 	if (mappable && fenceable)
736c349dbc7Sjsg 		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
7377f4dd379Sjsg 	else
738c349dbc7Sjsg 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
7397f4dd379Sjsg }
7407f4dd379Sjsg 
i915_gem_valid_gtt_space(struct i915_vma * vma,unsigned long color)741c349dbc7Sjsg bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
7427f4dd379Sjsg {
7437f4dd379Sjsg 	struct drm_mm_node *node = &vma->node;
7447f4dd379Sjsg 	struct drm_mm_node *other;
7457f4dd379Sjsg 
7467f4dd379Sjsg 	/*
7477f4dd379Sjsg 	 * On some machines we have to be careful when putting differing types
7487f4dd379Sjsg 	 * of snoopable memory together to avoid the prefetcher crossing memory
7497f4dd379Sjsg 	 * domains and dying. During vm initialisation, we decide whether or not
7507f4dd379Sjsg 	 * these constraints apply and set the drm_mm.color_adjust
7517f4dd379Sjsg 	 * appropriately.
7527f4dd379Sjsg 	 */
753c349dbc7Sjsg 	if (!i915_vm_has_cache_coloring(vma->vm))
7547f4dd379Sjsg 		return true;
7557f4dd379Sjsg 
7567f4dd379Sjsg 	/* Only valid to be called on an already inserted vma */
7577f4dd379Sjsg 	GEM_BUG_ON(!drm_mm_node_allocated(node));
7587f4dd379Sjsg 	GEM_BUG_ON(list_empty(&node->node_list));
7597f4dd379Sjsg 
7607f4dd379Sjsg 	other = list_prev_entry(node, node_list);
761c349dbc7Sjsg 	if (i915_node_color_differs(other, color) &&
762c349dbc7Sjsg 	    !drm_mm_hole_follows(other))
7637f4dd379Sjsg 		return false;
7647f4dd379Sjsg 
7657f4dd379Sjsg 	other = list_next_entry(node, node_list);
766c349dbc7Sjsg 	if (i915_node_color_differs(other, color) &&
767c349dbc7Sjsg 	    !drm_mm_hole_follows(node))
7687f4dd379Sjsg 		return false;
7697f4dd379Sjsg 
7707f4dd379Sjsg 	return true;
7717f4dd379Sjsg }
7727f4dd379Sjsg 
7737f4dd379Sjsg /**
7747f4dd379Sjsg  * i915_vma_insert - finds a slot for the vma in its address space
7757f4dd379Sjsg  * @vma: the vma
776f005ef32Sjsg  * @ww: An optional struct i915_gem_ww_ctx
7777f4dd379Sjsg  * @size: requested size in bytes (can be larger than the VMA)
7787f4dd379Sjsg  * @alignment: required alignment
7797f4dd379Sjsg  * @flags: mask of PIN_* flags to use
7807f4dd379Sjsg  *
7817f4dd379Sjsg  * First we try to allocate some free space that meets the requirements for
7827f4dd379Sjsg  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
7837f4dd379Sjsg  * preferrably the oldest idle entry to make room for the new VMA.
7847f4dd379Sjsg  *
7857f4dd379Sjsg  * Returns:
7867f4dd379Sjsg  * 0 on success, negative error code otherwise.
7877f4dd379Sjsg  */
7887f4dd379Sjsg static int
i915_vma_insert(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)7891bb76ff1Sjsg i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
7901bb76ff1Sjsg 		u64 size, u64 alignment, u64 flags)
7917f4dd379Sjsg {
792f005ef32Sjsg 	unsigned long color, guard;
7937f4dd379Sjsg 	u64 start, end;
7947f4dd379Sjsg 	int ret;
7957f4dd379Sjsg 
796c349dbc7Sjsg 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
7977f4dd379Sjsg 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
798f005ef32Sjsg 	GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
7997f4dd379Sjsg 
8007f4dd379Sjsg 	size = max(size, vma->size);
801f005ef32Sjsg 	alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
8027f4dd379Sjsg 	if (flags & PIN_MAPPABLE) {
8037f4dd379Sjsg 		size = max_t(typeof(size), size, vma->fence_size);
8047f4dd379Sjsg 		alignment = max_t(typeof(alignment),
8057f4dd379Sjsg 				  alignment, vma->fence_alignment);
8067f4dd379Sjsg 	}
8077f4dd379Sjsg 
8087f4dd379Sjsg 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
8097f4dd379Sjsg 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
8107f4dd379Sjsg 	GEM_BUG_ON(!is_power_of_2(alignment));
8117f4dd379Sjsg 
812f005ef32Sjsg 	guard = vma->guard; /* retain guard across rebinds */
813f005ef32Sjsg 	if (flags & PIN_OFFSET_GUARD) {
814f005ef32Sjsg 		GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
815f005ef32Sjsg 		guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
816f005ef32Sjsg 	}
817f005ef32Sjsg 	/*
818f005ef32Sjsg 	 * As we align the node upon insertion, but the hardware gets
819f005ef32Sjsg 	 * node.start + guard, the easiest way to make that work is
820f005ef32Sjsg 	 * to make the guard a multiple of the alignment size.
821f005ef32Sjsg 	 */
822f005ef32Sjsg 	guard = ALIGN(guard, alignment);
823f005ef32Sjsg 
8247f4dd379Sjsg 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
8257f4dd379Sjsg 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
8267f4dd379Sjsg 
8277f4dd379Sjsg 	end = vma->vm->total;
8287f4dd379Sjsg 	if (flags & PIN_MAPPABLE)
829c349dbc7Sjsg 		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
8307f4dd379Sjsg 	if (flags & PIN_ZONE_4G)
8317f4dd379Sjsg 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
8327f4dd379Sjsg 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
8337f4dd379Sjsg 
8341bb76ff1Sjsg 	alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
8351bb76ff1Sjsg 
836f005ef32Sjsg 	/*
837f005ef32Sjsg 	 * If binding the object/GGTT view requires more space than the entire
8387f4dd379Sjsg 	 * aperture has, reject it early before evicting everything in a vain
8397f4dd379Sjsg 	 * attempt to find space.
8407f4dd379Sjsg 	 */
841f005ef32Sjsg 	if (size > end - 2 * guard) {
842f005ef32Sjsg 		drm_dbg(vma->obj->base.dev,
843f005ef32Sjsg 			"Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
844f005ef32Sjsg 			size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
8457f4dd379Sjsg 		return -ENOSPC;
8467f4dd379Sjsg 	}
8477f4dd379Sjsg 
848c349dbc7Sjsg 	color = 0;
8491bb76ff1Sjsg 
8501bb76ff1Sjsg 	if (i915_vm_has_cache_coloring(vma->vm))
851f005ef32Sjsg 		color = vma->obj->pat_index;
8527f4dd379Sjsg 
8537f4dd379Sjsg 	if (flags & PIN_OFFSET_FIXED) {
8547f4dd379Sjsg 		u64 offset = flags & PIN_OFFSET_MASK;
8557f4dd379Sjsg 		if (!IS_ALIGNED(offset, alignment) ||
856c349dbc7Sjsg 		    range_overflows(offset, size, end))
857c349dbc7Sjsg 			return -EINVAL;
858f005ef32Sjsg 		/*
859f005ef32Sjsg 		 * The caller knows not of the guard added by others and
860f005ef32Sjsg 		 * requests for the offset of the start of its buffer
861f005ef32Sjsg 		 * to be fixed, which may not be the same as the position
862f005ef32Sjsg 		 * of the vma->node due to the guard pages.
863f005ef32Sjsg 		 */
864f005ef32Sjsg 		if (offset < guard || offset + size > end - guard)
865f005ef32Sjsg 			return -ENOSPC;
8667f4dd379Sjsg 
8671bb76ff1Sjsg 		ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
868f005ef32Sjsg 					   size + 2 * guard,
869f005ef32Sjsg 					   offset - guard,
870f005ef32Sjsg 					   color, flags);
8717f4dd379Sjsg 		if (ret)
872c349dbc7Sjsg 			return ret;
8737f4dd379Sjsg 	} else {
874f005ef32Sjsg 		size += 2 * guard;
8757f4dd379Sjsg 		/*
8767f4dd379Sjsg 		 * We only support huge gtt pages through the 48b PPGTT,
8777f4dd379Sjsg 		 * however we also don't want to force any alignment for
8787f4dd379Sjsg 		 * objects which need to be tightly packed into the low 32bits.
8797f4dd379Sjsg 		 *
8807f4dd379Sjsg 		 * Note that we assume that GGTT are limited to 4GiB for the
8817f4dd379Sjsg 		 * forseeable future. See also i915_ggtt_offset().
8827f4dd379Sjsg 		 */
8837f4dd379Sjsg 		if (upper_32_bits(end - 1) &&
884f005ef32Sjsg 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
885f005ef32Sjsg 		    !HAS_64K_PAGES(vma->vm->i915)) {
8867f4dd379Sjsg 			/*
8877f4dd379Sjsg 			 * We can't mix 64K and 4K PTEs in the same page-table
8887f4dd379Sjsg 			 * (2M block), and so to avoid the ugliness and
8897f4dd379Sjsg 			 * complexity of coloring we opt for just aligning 64K
8907f4dd379Sjsg 			 * objects to 2M.
8917f4dd379Sjsg 			 */
8927f4dd379Sjsg 			u64 page_alignment =
8937f4dd379Sjsg 				rounddown_pow_of_two(vma->page_sizes.sg |
8947f4dd379Sjsg 						     I915_GTT_PAGE_SIZE_2M);
8957f4dd379Sjsg 
8967f4dd379Sjsg 			/*
8977f4dd379Sjsg 			 * Check we don't expand for the limited Global GTT
8987f4dd379Sjsg 			 * (mappable aperture is even more precious!). This
8997f4dd379Sjsg 			 * also checks that we exclude the aliasing-ppgtt.
9007f4dd379Sjsg 			 */
9017f4dd379Sjsg 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
9027f4dd379Sjsg 
9037f4dd379Sjsg 			alignment = max(alignment, page_alignment);
9047f4dd379Sjsg 
9057f4dd379Sjsg 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
9067f4dd379Sjsg 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
9077f4dd379Sjsg 		}
9087f4dd379Sjsg 
9091bb76ff1Sjsg 		ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
910c349dbc7Sjsg 					  size, alignment, color,
9117f4dd379Sjsg 					  start, end, flags);
9127f4dd379Sjsg 		if (ret)
913c349dbc7Sjsg 			return ret;
9147f4dd379Sjsg 
9157f4dd379Sjsg 		GEM_BUG_ON(vma->node.start < start);
9167f4dd379Sjsg 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
9177f4dd379Sjsg 	}
9187f4dd379Sjsg 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
919c349dbc7Sjsg 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
9207f4dd379Sjsg 
9211bb76ff1Sjsg 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
922f005ef32Sjsg 	vma->guard = guard;
9237f4dd379Sjsg 
9247f4dd379Sjsg 	return 0;
9257f4dd379Sjsg }
9267f4dd379Sjsg 
9277f4dd379Sjsg static void
i915_vma_detach(struct i915_vma * vma)928c349dbc7Sjsg i915_vma_detach(struct i915_vma *vma)
9297f4dd379Sjsg {
9307f4dd379Sjsg 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
931c349dbc7Sjsg 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
9327f4dd379Sjsg 
9337f4dd379Sjsg 	/*
9347f4dd379Sjsg 	 * And finally now the object is completely decoupled from this
9357f4dd379Sjsg 	 * vma, we can drop its hold on the backing storage and allow
9367f4dd379Sjsg 	 * it to be reaped by the shrinker.
9377f4dd379Sjsg 	 */
9381bb76ff1Sjsg 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
9397f4dd379Sjsg }
9407f4dd379Sjsg 
try_qad_pin(struct i915_vma * vma,unsigned int flags)941c349dbc7Sjsg static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
9427f4dd379Sjsg {
943c349dbc7Sjsg 	unsigned int bound;
9447f4dd379Sjsg 
945c349dbc7Sjsg 	bound = atomic_read(&vma->flags);
9461bb76ff1Sjsg 
9471bb76ff1Sjsg 	if (flags & PIN_VALIDATE) {
9481bb76ff1Sjsg 		flags &= I915_VMA_BIND_MASK;
9491bb76ff1Sjsg 
9501bb76ff1Sjsg 		return (flags & bound) == flags;
9511bb76ff1Sjsg 	}
9521bb76ff1Sjsg 
9531bb76ff1Sjsg 	/* with the lock mandatory for unbind, we don't race here */
9541bb76ff1Sjsg 	flags &= I915_VMA_BIND_MASK;
955c349dbc7Sjsg 	do {
956c349dbc7Sjsg 		if (unlikely(flags & ~bound))
957c349dbc7Sjsg 			return false;
9587f4dd379Sjsg 
959c349dbc7Sjsg 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
960c349dbc7Sjsg 			return false;
961c349dbc7Sjsg 
962c349dbc7Sjsg 		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
963c349dbc7Sjsg 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
964c349dbc7Sjsg 
965c349dbc7Sjsg 	return true;
9667f4dd379Sjsg }
9677f4dd379Sjsg 
9681bb76ff1Sjsg static struct scatterlist *
rotate_pages(struct drm_i915_gem_object * obj,unsigned int offset,unsigned int width,unsigned int height,unsigned int src_stride,unsigned int dst_stride,struct sg_table * st,struct scatterlist * sg)9691bb76ff1Sjsg rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
9701bb76ff1Sjsg 	     unsigned int width, unsigned int height,
9711bb76ff1Sjsg 	     unsigned int src_stride, unsigned int dst_stride,
9721bb76ff1Sjsg 	     struct sg_table *st, struct scatterlist *sg)
973c349dbc7Sjsg {
9741bb76ff1Sjsg 	unsigned int column, row;
975f005ef32Sjsg 	pgoff_t src_idx;
9761bb76ff1Sjsg 
9771bb76ff1Sjsg 	for (column = 0; column < width; column++) {
9781bb76ff1Sjsg 		unsigned int left;
9791bb76ff1Sjsg 
9801bb76ff1Sjsg 		src_idx = src_stride * (height - 1) + column + offset;
9811bb76ff1Sjsg 		for (row = 0; row < height; row++) {
9821bb76ff1Sjsg 			st->nents++;
9831bb76ff1Sjsg 			/*
9841bb76ff1Sjsg 			 * We don't need the pages, but need to initialize
9851bb76ff1Sjsg 			 * the entries so the sg list can be happily traversed.
9861bb76ff1Sjsg 			 * The only thing we need are DMA addresses.
9871bb76ff1Sjsg 			 */
9881bb76ff1Sjsg 			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
9891bb76ff1Sjsg 			sg_dma_address(sg) =
9901bb76ff1Sjsg 				i915_gem_object_get_dma_address(obj, src_idx);
9911bb76ff1Sjsg 			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
9921bb76ff1Sjsg 			sg = sg_next(sg);
9931bb76ff1Sjsg 			src_idx -= src_stride;
9941bb76ff1Sjsg 		}
9951bb76ff1Sjsg 
9961bb76ff1Sjsg 		left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
9971bb76ff1Sjsg 
9981bb76ff1Sjsg 		if (!left)
9991bb76ff1Sjsg 			continue;
10001bb76ff1Sjsg 
10011bb76ff1Sjsg 		st->nents++;
10021bb76ff1Sjsg 
10031bb76ff1Sjsg 		/*
10041bb76ff1Sjsg 		 * The DE ignores the PTEs for the padding tiles, the sg entry
10051bb76ff1Sjsg 		 * here is just a conenience to indicate how many padding PTEs
10061bb76ff1Sjsg 		 * to insert at this spot.
10071bb76ff1Sjsg 		 */
10081bb76ff1Sjsg 		sg_set_page(sg, NULL, left, 0);
10091bb76ff1Sjsg 		sg_dma_address(sg) = 0;
10101bb76ff1Sjsg 		sg_dma_len(sg) = left;
10111bb76ff1Sjsg 		sg = sg_next(sg);
10121bb76ff1Sjsg 	}
10131bb76ff1Sjsg 
10141bb76ff1Sjsg 	return sg;
10151bb76ff1Sjsg }
10161bb76ff1Sjsg 
10171bb76ff1Sjsg static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info * rot_info,struct drm_i915_gem_object * obj)10181bb76ff1Sjsg intel_rotate_pages(struct intel_rotation_info *rot_info,
10191bb76ff1Sjsg 		   struct drm_i915_gem_object *obj)
10201bb76ff1Sjsg {
10211bb76ff1Sjsg 	unsigned int size = intel_rotation_info_size(rot_info);
10221bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
10231bb76ff1Sjsg 	struct sg_table *st;
10241bb76ff1Sjsg 	struct scatterlist *sg;
10251bb76ff1Sjsg 	int ret = -ENOMEM;
10261bb76ff1Sjsg 	int i;
10271bb76ff1Sjsg 
10281bb76ff1Sjsg 	/* Allocate target SG list. */
10291bb76ff1Sjsg 	st = kmalloc(sizeof(*st), GFP_KERNEL);
10301bb76ff1Sjsg 	if (!st)
10311bb76ff1Sjsg 		goto err_st_alloc;
10321bb76ff1Sjsg 
10331bb76ff1Sjsg 	ret = sg_alloc_table(st, size, GFP_KERNEL);
10341bb76ff1Sjsg 	if (ret)
10351bb76ff1Sjsg 		goto err_sg_alloc;
10361bb76ff1Sjsg 
10371bb76ff1Sjsg 	st->nents = 0;
10381bb76ff1Sjsg 	sg = st->sgl;
10391bb76ff1Sjsg 
10401bb76ff1Sjsg 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
10411bb76ff1Sjsg 		sg = rotate_pages(obj, rot_info->plane[i].offset,
10421bb76ff1Sjsg 				  rot_info->plane[i].width, rot_info->plane[i].height,
10431bb76ff1Sjsg 				  rot_info->plane[i].src_stride,
10441bb76ff1Sjsg 				  rot_info->plane[i].dst_stride,
10451bb76ff1Sjsg 				  st, sg);
10461bb76ff1Sjsg 
10471bb76ff1Sjsg 	return st;
10481bb76ff1Sjsg 
10491bb76ff1Sjsg err_sg_alloc:
10501bb76ff1Sjsg 	kfree(st);
10511bb76ff1Sjsg err_st_alloc:
10521bb76ff1Sjsg 
10531bb76ff1Sjsg 	drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
10541bb76ff1Sjsg 		obj->base.size, rot_info->plane[0].width,
10551bb76ff1Sjsg 		rot_info->plane[0].height, size);
10561bb76ff1Sjsg 
10571bb76ff1Sjsg 	return ERR_PTR(ret);
10581bb76ff1Sjsg }
10591bb76ff1Sjsg 
10601bb76ff1Sjsg static struct scatterlist *
add_padding_pages(unsigned int count,struct sg_table * st,struct scatterlist * sg)10611bb76ff1Sjsg add_padding_pages(unsigned int count,
10621bb76ff1Sjsg 		  struct sg_table *st, struct scatterlist *sg)
10631bb76ff1Sjsg {
10641bb76ff1Sjsg 	st->nents++;
10651bb76ff1Sjsg 
10661bb76ff1Sjsg 	/*
10671bb76ff1Sjsg 	 * The DE ignores the PTEs for the padding tiles, the sg entry
10681bb76ff1Sjsg 	 * here is just a convenience to indicate how many padding PTEs
10691bb76ff1Sjsg 	 * to insert at this spot.
10701bb76ff1Sjsg 	 */
10711bb76ff1Sjsg 	sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
10721bb76ff1Sjsg 	sg_dma_address(sg) = 0;
10731bb76ff1Sjsg 	sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
10741bb76ff1Sjsg 	sg = sg_next(sg);
10751bb76ff1Sjsg 
10761bb76ff1Sjsg 	return sg;
10771bb76ff1Sjsg }
10781bb76ff1Sjsg 
10791bb76ff1Sjsg static struct scatterlist *
remap_tiled_color_plane_pages(struct drm_i915_gem_object * obj,unsigned long offset,unsigned int alignment_pad,unsigned int width,unsigned int height,unsigned int src_stride,unsigned int dst_stride,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)10801bb76ff1Sjsg remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1081f005ef32Sjsg 			      unsigned long offset, unsigned int alignment_pad,
10821bb76ff1Sjsg 			      unsigned int width, unsigned int height,
10831bb76ff1Sjsg 			      unsigned int src_stride, unsigned int dst_stride,
10841bb76ff1Sjsg 			      struct sg_table *st, struct scatterlist *sg,
10851bb76ff1Sjsg 			      unsigned int *gtt_offset)
10861bb76ff1Sjsg {
10871bb76ff1Sjsg 	unsigned int row;
10881bb76ff1Sjsg 
10891bb76ff1Sjsg 	if (!width || !height)
10901bb76ff1Sjsg 		return sg;
10911bb76ff1Sjsg 
10921bb76ff1Sjsg 	if (alignment_pad)
10931bb76ff1Sjsg 		sg = add_padding_pages(alignment_pad, st, sg);
10941bb76ff1Sjsg 
10951bb76ff1Sjsg 	for (row = 0; row < height; row++) {
10961bb76ff1Sjsg 		unsigned int left = width * I915_GTT_PAGE_SIZE;
10971bb76ff1Sjsg 
10981bb76ff1Sjsg 		while (left) {
10991bb76ff1Sjsg 			dma_addr_t addr;
11001bb76ff1Sjsg 			unsigned int length;
11011bb76ff1Sjsg 
11021bb76ff1Sjsg 			/*
11031bb76ff1Sjsg 			 * We don't need the pages, but need to initialize
11041bb76ff1Sjsg 			 * the entries so the sg list can be happily traversed.
11051bb76ff1Sjsg 			 * The only thing we need are DMA addresses.
11061bb76ff1Sjsg 			 */
11071bb76ff1Sjsg 
11081bb76ff1Sjsg 			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
11091bb76ff1Sjsg 
11101bb76ff1Sjsg 			length = min(left, length);
11111bb76ff1Sjsg 
11121bb76ff1Sjsg 			st->nents++;
11131bb76ff1Sjsg 
11141bb76ff1Sjsg 			sg_set_page(sg, NULL, length, 0);
11151bb76ff1Sjsg 			sg_dma_address(sg) = addr;
11161bb76ff1Sjsg 			sg_dma_len(sg) = length;
11171bb76ff1Sjsg 			sg = sg_next(sg);
11181bb76ff1Sjsg 
11191bb76ff1Sjsg 			offset += length / I915_GTT_PAGE_SIZE;
11201bb76ff1Sjsg 			left -= length;
11211bb76ff1Sjsg 		}
11221bb76ff1Sjsg 
11231bb76ff1Sjsg 		offset += src_stride - width;
11241bb76ff1Sjsg 
11251bb76ff1Sjsg 		left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
11261bb76ff1Sjsg 
11271bb76ff1Sjsg 		if (!left)
11281bb76ff1Sjsg 			continue;
11291bb76ff1Sjsg 
11301bb76ff1Sjsg 		sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
11311bb76ff1Sjsg 	}
11321bb76ff1Sjsg 
11331bb76ff1Sjsg 	*gtt_offset += alignment_pad + dst_stride * height;
11341bb76ff1Sjsg 
11351bb76ff1Sjsg 	return sg;
11361bb76ff1Sjsg }
11371bb76ff1Sjsg 
11381bb76ff1Sjsg static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object * obj,pgoff_t obj_offset,unsigned int count,struct sg_table * st,struct scatterlist * sg)11391bb76ff1Sjsg remap_contiguous_pages(struct drm_i915_gem_object *obj,
1140f005ef32Sjsg 		       pgoff_t obj_offset,
11411bb76ff1Sjsg 		       unsigned int count,
11421bb76ff1Sjsg 		       struct sg_table *st, struct scatterlist *sg)
11431bb76ff1Sjsg {
11441bb76ff1Sjsg 	struct scatterlist *iter;
11451bb76ff1Sjsg 	unsigned int offset;
11461bb76ff1Sjsg 
11471bb76ff1Sjsg 	iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
11481bb76ff1Sjsg 	GEM_BUG_ON(!iter);
11491bb76ff1Sjsg 
11501bb76ff1Sjsg 	do {
11511bb76ff1Sjsg 		unsigned int len;
11521bb76ff1Sjsg 
11531bb76ff1Sjsg 		len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
11541bb76ff1Sjsg 			  count << PAGE_SHIFT);
11551bb76ff1Sjsg 		sg_set_page(sg, NULL, len, 0);
11561bb76ff1Sjsg 		sg_dma_address(sg) =
11571bb76ff1Sjsg 			sg_dma_address(iter) + (offset << PAGE_SHIFT);
11581bb76ff1Sjsg 		sg_dma_len(sg) = len;
11591bb76ff1Sjsg 
11601bb76ff1Sjsg 		st->nents++;
11611bb76ff1Sjsg 		count -= len >> PAGE_SHIFT;
11621bb76ff1Sjsg 		if (count == 0)
11631bb76ff1Sjsg 			return sg;
11641bb76ff1Sjsg 
11651bb76ff1Sjsg 		sg = __sg_next(sg);
11661bb76ff1Sjsg 		iter = __sg_next(iter);
11671bb76ff1Sjsg 		offset = 0;
11681bb76ff1Sjsg 	} while (1);
11691bb76ff1Sjsg }
11701bb76ff1Sjsg 
11711bb76ff1Sjsg static struct scatterlist *
remap_linear_color_plane_pages(struct drm_i915_gem_object * obj,pgoff_t obj_offset,unsigned int alignment_pad,unsigned int size,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)11721bb76ff1Sjsg remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1173f005ef32Sjsg 			       pgoff_t obj_offset, unsigned int alignment_pad,
11741bb76ff1Sjsg 			       unsigned int size,
11751bb76ff1Sjsg 			       struct sg_table *st, struct scatterlist *sg,
11761bb76ff1Sjsg 			       unsigned int *gtt_offset)
11771bb76ff1Sjsg {
11781bb76ff1Sjsg 	if (!size)
11791bb76ff1Sjsg 		return sg;
11801bb76ff1Sjsg 
11811bb76ff1Sjsg 	if (alignment_pad)
11821bb76ff1Sjsg 		sg = add_padding_pages(alignment_pad, st, sg);
11831bb76ff1Sjsg 
11841bb76ff1Sjsg 	sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
11851bb76ff1Sjsg 	sg = sg_next(sg);
11861bb76ff1Sjsg 
11871bb76ff1Sjsg 	*gtt_offset += alignment_pad + size;
11881bb76ff1Sjsg 
11891bb76ff1Sjsg 	return sg;
11901bb76ff1Sjsg }
11911bb76ff1Sjsg 
11921bb76ff1Sjsg static struct scatterlist *
remap_color_plane_pages(const struct intel_remapped_info * rem_info,struct drm_i915_gem_object * obj,int color_plane,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)11931bb76ff1Sjsg remap_color_plane_pages(const struct intel_remapped_info *rem_info,
11941bb76ff1Sjsg 			struct drm_i915_gem_object *obj,
11951bb76ff1Sjsg 			int color_plane,
11961bb76ff1Sjsg 			struct sg_table *st, struct scatterlist *sg,
11971bb76ff1Sjsg 			unsigned int *gtt_offset)
11981bb76ff1Sjsg {
11991bb76ff1Sjsg 	unsigned int alignment_pad = 0;
12001bb76ff1Sjsg 
12011bb76ff1Sjsg 	if (rem_info->plane_alignment)
1202f005ef32Sjsg 		alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
12031bb76ff1Sjsg 
12041bb76ff1Sjsg 	if (rem_info->plane[color_plane].linear)
12051bb76ff1Sjsg 		sg = remap_linear_color_plane_pages(obj,
12061bb76ff1Sjsg 						    rem_info->plane[color_plane].offset,
12071bb76ff1Sjsg 						    alignment_pad,
12081bb76ff1Sjsg 						    rem_info->plane[color_plane].size,
12091bb76ff1Sjsg 						    st, sg,
12101bb76ff1Sjsg 						    gtt_offset);
12111bb76ff1Sjsg 
12121bb76ff1Sjsg 	else
12131bb76ff1Sjsg 		sg = remap_tiled_color_plane_pages(obj,
12141bb76ff1Sjsg 						   rem_info->plane[color_plane].offset,
12151bb76ff1Sjsg 						   alignment_pad,
12161bb76ff1Sjsg 						   rem_info->plane[color_plane].width,
12171bb76ff1Sjsg 						   rem_info->plane[color_plane].height,
12181bb76ff1Sjsg 						   rem_info->plane[color_plane].src_stride,
12191bb76ff1Sjsg 						   rem_info->plane[color_plane].dst_stride,
12201bb76ff1Sjsg 						   st, sg,
12211bb76ff1Sjsg 						   gtt_offset);
12221bb76ff1Sjsg 
12231bb76ff1Sjsg 	return sg;
12241bb76ff1Sjsg }
12251bb76ff1Sjsg 
12261bb76ff1Sjsg static noinline struct sg_table *
intel_remap_pages(struct intel_remapped_info * rem_info,struct drm_i915_gem_object * obj)12271bb76ff1Sjsg intel_remap_pages(struct intel_remapped_info *rem_info,
12281bb76ff1Sjsg 		  struct drm_i915_gem_object *obj)
12291bb76ff1Sjsg {
12301bb76ff1Sjsg 	unsigned int size = intel_remapped_info_size(rem_info);
12311bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
12321bb76ff1Sjsg 	struct sg_table *st;
12331bb76ff1Sjsg 	struct scatterlist *sg;
12341bb76ff1Sjsg 	unsigned int gtt_offset = 0;
12351bb76ff1Sjsg 	int ret = -ENOMEM;
12361bb76ff1Sjsg 	int i;
12371bb76ff1Sjsg 
12381bb76ff1Sjsg 	/* Allocate target SG list. */
12391bb76ff1Sjsg 	st = kmalloc(sizeof(*st), GFP_KERNEL);
12401bb76ff1Sjsg 	if (!st)
12411bb76ff1Sjsg 		goto err_st_alloc;
12421bb76ff1Sjsg 
12431bb76ff1Sjsg 	ret = sg_alloc_table(st, size, GFP_KERNEL);
12441bb76ff1Sjsg 	if (ret)
12451bb76ff1Sjsg 		goto err_sg_alloc;
12461bb76ff1Sjsg 
12471bb76ff1Sjsg 	st->nents = 0;
12481bb76ff1Sjsg 	sg = st->sgl;
12491bb76ff1Sjsg 
12501bb76ff1Sjsg 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
12511bb76ff1Sjsg 		sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
12521bb76ff1Sjsg 
12531bb76ff1Sjsg 	i915_sg_trim(st);
12541bb76ff1Sjsg 
12551bb76ff1Sjsg 	return st;
12561bb76ff1Sjsg 
12571bb76ff1Sjsg err_sg_alloc:
12581bb76ff1Sjsg 	kfree(st);
12591bb76ff1Sjsg err_st_alloc:
12601bb76ff1Sjsg 
12611bb76ff1Sjsg 	drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
12621bb76ff1Sjsg 		obj->base.size, rem_info->plane[0].width,
12631bb76ff1Sjsg 		rem_info->plane[0].height, size);
12641bb76ff1Sjsg 
12651bb76ff1Sjsg 	return ERR_PTR(ret);
12661bb76ff1Sjsg }
12671bb76ff1Sjsg 
12681bb76ff1Sjsg static noinline struct sg_table *
intel_partial_pages(const struct i915_gtt_view * view,struct drm_i915_gem_object * obj)12691bb76ff1Sjsg intel_partial_pages(const struct i915_gtt_view *view,
12701bb76ff1Sjsg 		    struct drm_i915_gem_object *obj)
12711bb76ff1Sjsg {
12721bb76ff1Sjsg 	struct sg_table *st;
12731bb76ff1Sjsg 	struct scatterlist *sg;
12741bb76ff1Sjsg 	unsigned int count = view->partial.size;
12751bb76ff1Sjsg 	int ret = -ENOMEM;
12761bb76ff1Sjsg 
12771bb76ff1Sjsg 	st = kmalloc(sizeof(*st), GFP_KERNEL);
12781bb76ff1Sjsg 	if (!st)
12791bb76ff1Sjsg 		goto err_st_alloc;
12801bb76ff1Sjsg 
12811bb76ff1Sjsg 	ret = sg_alloc_table(st, count, GFP_KERNEL);
12821bb76ff1Sjsg 	if (ret)
12831bb76ff1Sjsg 		goto err_sg_alloc;
12841bb76ff1Sjsg 
12851bb76ff1Sjsg 	st->nents = 0;
12861bb76ff1Sjsg 
12871bb76ff1Sjsg 	sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
12881bb76ff1Sjsg 
12891bb76ff1Sjsg 	sg_mark_end(sg);
12901bb76ff1Sjsg 	i915_sg_trim(st); /* Drop any unused tail entries. */
12911bb76ff1Sjsg 
12921bb76ff1Sjsg 	return st;
12931bb76ff1Sjsg 
12941bb76ff1Sjsg err_sg_alloc:
12951bb76ff1Sjsg 	kfree(st);
12961bb76ff1Sjsg err_st_alloc:
12971bb76ff1Sjsg 	return ERR_PTR(ret);
12981bb76ff1Sjsg }
12991bb76ff1Sjsg 
13001bb76ff1Sjsg static int
__i915_vma_get_pages(struct i915_vma * vma)13011bb76ff1Sjsg __i915_vma_get_pages(struct i915_vma *vma)
13021bb76ff1Sjsg {
13031bb76ff1Sjsg 	struct sg_table *pages;
13041bb76ff1Sjsg 
13051bb76ff1Sjsg 	/*
13061bb76ff1Sjsg 	 * The vma->pages are only valid within the lifespan of the borrowed
13071bb76ff1Sjsg 	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
13081bb76ff1Sjsg 	 * must be the vma->pages. A simple rule is that vma->pages must only
13091bb76ff1Sjsg 	 * be accessed when the obj->mm.pages are pinned.
13101bb76ff1Sjsg 	 */
13111bb76ff1Sjsg 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
13121bb76ff1Sjsg 
13131bb76ff1Sjsg 	switch (vma->gtt_view.type) {
13141bb76ff1Sjsg 	default:
13151bb76ff1Sjsg 		GEM_BUG_ON(vma->gtt_view.type);
13161bb76ff1Sjsg 		fallthrough;
13171bb76ff1Sjsg 	case I915_GTT_VIEW_NORMAL:
13181bb76ff1Sjsg 		pages = vma->obj->mm.pages;
13191bb76ff1Sjsg 		break;
13201bb76ff1Sjsg 
13211bb76ff1Sjsg 	case I915_GTT_VIEW_ROTATED:
13221bb76ff1Sjsg 		pages =
13231bb76ff1Sjsg 			intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
13241bb76ff1Sjsg 		break;
13251bb76ff1Sjsg 
13261bb76ff1Sjsg 	case I915_GTT_VIEW_REMAPPED:
13271bb76ff1Sjsg 		pages =
13281bb76ff1Sjsg 			intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
13291bb76ff1Sjsg 		break;
13301bb76ff1Sjsg 
13311bb76ff1Sjsg 	case I915_GTT_VIEW_PARTIAL:
13321bb76ff1Sjsg 		pages = intel_partial_pages(&vma->gtt_view, vma->obj);
13331bb76ff1Sjsg 		break;
13341bb76ff1Sjsg 	}
13351bb76ff1Sjsg 
13361bb76ff1Sjsg 	if (IS_ERR(pages)) {
13371bb76ff1Sjsg 		drm_err(&vma->vm->i915->drm,
13381bb76ff1Sjsg 			"Failed to get pages for VMA view type %u (%ld)!\n",
13391bb76ff1Sjsg 			vma->gtt_view.type, PTR_ERR(pages));
13401bb76ff1Sjsg 		return PTR_ERR(pages);
13411bb76ff1Sjsg 	}
13421bb76ff1Sjsg 
13431bb76ff1Sjsg 	vma->pages = pages;
13441bb76ff1Sjsg 
13451bb76ff1Sjsg 	return 0;
13461bb76ff1Sjsg }
13471bb76ff1Sjsg 
i915_vma_get_pages(struct i915_vma * vma)13481bb76ff1Sjsg I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
13491bb76ff1Sjsg {
13501bb76ff1Sjsg 	int err;
13517f4dd379Sjsg 
1352c349dbc7Sjsg 	if (atomic_add_unless(&vma->pages_count, 1, 0))
13537f4dd379Sjsg 		return 0;
13547f4dd379Sjsg 
1355c349dbc7Sjsg 	err = i915_gem_object_pin_pages(vma->obj);
1356c349dbc7Sjsg 	if (err)
13575ca02815Sjsg 		return err;
1358c349dbc7Sjsg 
13591bb76ff1Sjsg 	err = __i915_vma_get_pages(vma);
13605ca02815Sjsg 	if (err)
13611bb76ff1Sjsg 		goto err_unpin;
13621bb76ff1Sjsg 
13631bb76ff1Sjsg 	vma->page_sizes = vma->obj->mm.page_sizes;
1364c349dbc7Sjsg 	atomic_inc(&vma->pages_count);
1365c349dbc7Sjsg 
13661bb76ff1Sjsg 	return 0;
13671bb76ff1Sjsg 
13681bb76ff1Sjsg err_unpin:
13695ca02815Sjsg 	__i915_gem_object_unpin_pages(vma->obj);
1370c349dbc7Sjsg 
1371c349dbc7Sjsg 	return err;
1372c349dbc7Sjsg }
1373c349dbc7Sjsg 
vma_invalidate_tlb(struct i915_address_space * vm,u32 * tlb)13741bb76ff1Sjsg void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
13751bb76ff1Sjsg {
1376f005ef32Sjsg 	struct intel_gt *gt;
1377f005ef32Sjsg 	int id;
1378f005ef32Sjsg 
1379f005ef32Sjsg 	if (!tlb)
1380f005ef32Sjsg 		return;
1381f005ef32Sjsg 
13821bb76ff1Sjsg 	/*
13831bb76ff1Sjsg 	 * Before we release the pages that were bound by this vma, we
13841bb76ff1Sjsg 	 * must invalidate all the TLBs that may still have a reference
13851bb76ff1Sjsg 	 * back to our physical address. It only needs to be done once,
13861bb76ff1Sjsg 	 * so after updating the PTE to point away from the pages, record
13871bb76ff1Sjsg 	 * the most recent TLB invalidation seqno, and if we have not yet
13881bb76ff1Sjsg 	 * flushed the TLBs upon release, perform a full invalidation.
13891bb76ff1Sjsg 	 */
1390f005ef32Sjsg 	for_each_gt(gt, vm->i915, id)
1391f005ef32Sjsg 		WRITE_ONCE(tlb[id],
1392f005ef32Sjsg 			   intel_gt_next_invalidate_tlb_full(gt));
13931bb76ff1Sjsg }
13941bb76ff1Sjsg 
__vma_put_pages(struct i915_vma * vma,unsigned int count)1395c349dbc7Sjsg static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1396c349dbc7Sjsg {
1397c349dbc7Sjsg 	/* We allocate under vma_get_pages, so beware the shrinker */
1398c349dbc7Sjsg 	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
13991bb76ff1Sjsg 
1400c349dbc7Sjsg 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
14011bb76ff1Sjsg 		if (vma->pages != vma->obj->mm.pages) {
14021bb76ff1Sjsg 			sg_free_table(vma->pages);
14031bb76ff1Sjsg 			kfree(vma->pages);
14041bb76ff1Sjsg 		}
14051bb76ff1Sjsg 		vma->pages = NULL;
14061bb76ff1Sjsg 
1407c349dbc7Sjsg 		i915_gem_object_unpin_pages(vma->obj);
1408c349dbc7Sjsg 	}
1409c349dbc7Sjsg }
1410c349dbc7Sjsg 
i915_vma_put_pages(struct i915_vma * vma)14111bb76ff1Sjsg I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1412c349dbc7Sjsg {
1413c349dbc7Sjsg 	if (atomic_add_unless(&vma->pages_count, -1, 1))
1414c349dbc7Sjsg 		return;
1415c349dbc7Sjsg 
1416c349dbc7Sjsg 	__vma_put_pages(vma, 1);
1417c349dbc7Sjsg }
1418c349dbc7Sjsg 
vma_unbind_pages(struct i915_vma * vma)1419c349dbc7Sjsg static void vma_unbind_pages(struct i915_vma *vma)
1420c349dbc7Sjsg {
1421c349dbc7Sjsg 	unsigned int count;
1422c349dbc7Sjsg 
1423c349dbc7Sjsg 	lockdep_assert_held(&vma->vm->mutex);
1424c349dbc7Sjsg 
1425c349dbc7Sjsg 	/* The upper portion of pages_count is the number of bindings */
1426c349dbc7Sjsg 	count = atomic_read(&vma->pages_count);
1427c349dbc7Sjsg 	count >>= I915_VMA_PAGES_BIAS;
1428c349dbc7Sjsg 	GEM_BUG_ON(!count);
1429c349dbc7Sjsg 
1430c349dbc7Sjsg 	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1431c349dbc7Sjsg }
1432c349dbc7Sjsg 
i915_vma_pin_ww(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)1433ad8b1aafSjsg int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1434ad8b1aafSjsg 		    u64 size, u64 alignment, u64 flags)
1435c349dbc7Sjsg {
1436c349dbc7Sjsg 	struct i915_vma_work *work = NULL;
14371bb76ff1Sjsg 	struct dma_fence *moving = NULL;
14381bb76ff1Sjsg 	struct i915_vma_resource *vma_res = NULL;
1439*7f10cbd3Sjsg 	intel_wakeref_t wakeref;
1440c349dbc7Sjsg 	unsigned int bound;
1441c349dbc7Sjsg 	int err;
1442c349dbc7Sjsg 
14435ca02815Sjsg 	assert_vma_held(vma);
14441bb76ff1Sjsg 	GEM_BUG_ON(!ww);
1445ad8b1aafSjsg 
1446c349dbc7Sjsg 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1447c349dbc7Sjsg 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1448c349dbc7Sjsg 
1449c349dbc7Sjsg 	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1450c349dbc7Sjsg 
1451c349dbc7Sjsg 	/* First try and grab the pin without rebinding the vma */
14521bb76ff1Sjsg 	if (try_qad_pin(vma, flags))
1453c349dbc7Sjsg 		return 0;
1454c349dbc7Sjsg 
14551bb76ff1Sjsg 	err = i915_vma_get_pages(vma);
1456c349dbc7Sjsg 	if (err)
1457c349dbc7Sjsg 		return err;
1458c349dbc7Sjsg 
1459*7f10cbd3Sjsg 	/*
1460*7f10cbd3Sjsg 	 * In case of a global GTT, we must hold a runtime-pm wakeref
1461*7f10cbd3Sjsg 	 * while global PTEs are updated.  In other cases, we hold
1462*7f10cbd3Sjsg 	 * the rpm reference while the VMA is active.  Since runtime
1463*7f10cbd3Sjsg 	 * resume may require allocations, which are forbidden inside
1464*7f10cbd3Sjsg 	 * vm->mutex, get the first rpm wakeref outside of the mutex.
1465*7f10cbd3Sjsg 	 */
1466ad8b1aafSjsg 	wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1467ad8b1aafSjsg 
1468c349dbc7Sjsg 	if (flags & vma->vm->bind_async_flags) {
14695ca02815Sjsg 		/* lock VM */
14705ca02815Sjsg 		err = i915_vm_lock_objects(vma->vm, ww);
14715ca02815Sjsg 		if (err)
14725ca02815Sjsg 			goto err_rpm;
14735ca02815Sjsg 
1474c349dbc7Sjsg 		work = i915_vma_work();
1475c349dbc7Sjsg 		if (!work) {
1476c349dbc7Sjsg 			err = -ENOMEM;
1477ad8b1aafSjsg 			goto err_rpm;
1478c349dbc7Sjsg 		}
1479c349dbc7Sjsg 
14801bb76ff1Sjsg 		work->vm = vma->vm;
14811bb76ff1Sjsg 
14821bb76ff1Sjsg 		err = i915_gem_object_get_moving_fence(vma->obj, &moving);
14831bb76ff1Sjsg 		if (err)
14841bb76ff1Sjsg 			goto err_rpm;
14851bb76ff1Sjsg 
14861bb76ff1Sjsg 		dma_fence_work_chain(&work->base, moving);
1487c349dbc7Sjsg 
1488ad8b1aafSjsg 		/* Allocate enough page directories to used PTE */
1489ad8b1aafSjsg 		if (vma->vm->allocate_va_range) {
1490ad8b1aafSjsg 			err = i915_vm_alloc_pt_stash(vma->vm,
1491ad8b1aafSjsg 						     &work->stash,
1492ad8b1aafSjsg 						     vma->size);
1493c349dbc7Sjsg 			if (err)
1494c349dbc7Sjsg 				goto err_fence;
1495c349dbc7Sjsg 
14965ca02815Sjsg 			err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1497ad8b1aafSjsg 			if (err)
1498ad8b1aafSjsg 				goto err_fence;
1499ad8b1aafSjsg 		}
1500ad8b1aafSjsg 	}
1501ad8b1aafSjsg 
15021bb76ff1Sjsg 	vma_res = i915_vma_resource_alloc();
15031bb76ff1Sjsg 	if (IS_ERR(vma_res)) {
15041bb76ff1Sjsg 		err = PTR_ERR(vma_res);
15051bb76ff1Sjsg 		goto err_fence;
15061bb76ff1Sjsg 	}
15071bb76ff1Sjsg 
1508ad8b1aafSjsg 	/*
1509ad8b1aafSjsg 	 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1510ad8b1aafSjsg 	 *
1511ad8b1aafSjsg 	 * We conflate the Global GTT with the user's vma when using the
1512ad8b1aafSjsg 	 * aliasing-ppgtt, but it is still vitally important to try and
1513ad8b1aafSjsg 	 * keep the use cases distinct. For example, userptr objects are
1514ad8b1aafSjsg 	 * not allowed inside the Global GTT as that will cause lock
1515ad8b1aafSjsg 	 * inversions when we have to evict them the mmu_notifier callbacks -
1516ad8b1aafSjsg 	 * but they are allowed to be part of the user ppGTT which can never
1517ad8b1aafSjsg 	 * be mapped. As such we try to give the distinct users of the same
1518ad8b1aafSjsg 	 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
1519ad8b1aafSjsg 	 * and i915_ppgtt separate].
1520ad8b1aafSjsg 	 *
1521ad8b1aafSjsg 	 * NB this may cause us to mask real lock inversions -- while the
1522ad8b1aafSjsg 	 * code is safe today, lockdep may not be able to spot future
1523ad8b1aafSjsg 	 * transgressions.
1524ad8b1aafSjsg 	 */
1525ad8b1aafSjsg 	err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1526ad8b1aafSjsg 					      !(flags & PIN_GLOBAL));
1527ad8b1aafSjsg 	if (err)
15281bb76ff1Sjsg 		goto err_vma_res;
1529ad8b1aafSjsg 
1530ad8b1aafSjsg 	/* No more allocations allowed now we hold vm->mutex */
1531ad8b1aafSjsg 
1532c349dbc7Sjsg 	if (unlikely(i915_vma_is_closed(vma))) {
1533c349dbc7Sjsg 		err = -ENOENT;
1534c349dbc7Sjsg 		goto err_unlock;
1535c349dbc7Sjsg 	}
1536c349dbc7Sjsg 
1537c349dbc7Sjsg 	bound = atomic_read(&vma->flags);
1538c349dbc7Sjsg 	if (unlikely(bound & I915_VMA_ERROR)) {
1539c349dbc7Sjsg 		err = -ENOMEM;
1540c349dbc7Sjsg 		goto err_unlock;
1541c349dbc7Sjsg 	}
1542c349dbc7Sjsg 
1543c349dbc7Sjsg 	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1544c349dbc7Sjsg 		err = -EAGAIN; /* pins are meant to be fairly temporary */
1545c349dbc7Sjsg 		goto err_unlock;
1546c349dbc7Sjsg 	}
1547c349dbc7Sjsg 
1548c349dbc7Sjsg 	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
15491bb76ff1Sjsg 		if (!(flags & PIN_VALIDATE))
1550c349dbc7Sjsg 			__i915_vma_pin(vma);
1551c349dbc7Sjsg 		goto err_unlock;
1552c349dbc7Sjsg 	}
1553c349dbc7Sjsg 
1554c349dbc7Sjsg 	err = i915_active_acquire(&vma->active);
1555c349dbc7Sjsg 	if (err)
1556c349dbc7Sjsg 		goto err_unlock;
1557c349dbc7Sjsg 
1558c349dbc7Sjsg 	if (!(bound & I915_VMA_BIND_MASK)) {
15591bb76ff1Sjsg 		err = i915_vma_insert(vma, ww, size, alignment, flags);
1560c349dbc7Sjsg 		if (err)
1561c349dbc7Sjsg 			goto err_active;
1562c349dbc7Sjsg 
1563c349dbc7Sjsg 		if (i915_is_ggtt(vma->vm))
1564c349dbc7Sjsg 			__i915_vma_set_map_and_fenceable(vma);
1565c349dbc7Sjsg 	}
1566c349dbc7Sjsg 
1567c349dbc7Sjsg 	GEM_BUG_ON(!vma->pages);
1568c349dbc7Sjsg 	err = i915_vma_bind(vma,
1569f005ef32Sjsg 			    vma->obj->pat_index,
15701bb76ff1Sjsg 			    flags, work, vma_res);
15711bb76ff1Sjsg 	vma_res = NULL;
1572c349dbc7Sjsg 	if (err)
1573c349dbc7Sjsg 		goto err_remove;
1574c349dbc7Sjsg 
1575c349dbc7Sjsg 	/* There should only be at most 2 active bindings (user, global) */
1576c349dbc7Sjsg 	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1577c349dbc7Sjsg 	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1578c349dbc7Sjsg 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1579c349dbc7Sjsg 
15801bb76ff1Sjsg 	if (!(flags & PIN_VALIDATE)) {
1581c349dbc7Sjsg 		__i915_vma_pin(vma);
1582c349dbc7Sjsg 		GEM_BUG_ON(!i915_vma_is_pinned(vma));
15831bb76ff1Sjsg 	}
1584c349dbc7Sjsg 	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1585c349dbc7Sjsg 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1586c349dbc7Sjsg 
1587c349dbc7Sjsg err_remove:
1588c349dbc7Sjsg 	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1589c349dbc7Sjsg 		i915_vma_detach(vma);
1590c349dbc7Sjsg 		drm_mm_remove_node(&vma->node);
1591c349dbc7Sjsg 	}
1592c349dbc7Sjsg err_active:
1593c349dbc7Sjsg 	i915_active_release(&vma->active);
1594c349dbc7Sjsg err_unlock:
1595c349dbc7Sjsg 	mutex_unlock(&vma->vm->mutex);
15961bb76ff1Sjsg err_vma_res:
15971bb76ff1Sjsg 	i915_vma_resource_free(vma_res);
1598c349dbc7Sjsg err_fence:
1599c349dbc7Sjsg 	if (work)
1600ad8b1aafSjsg 		dma_fence_work_commit_imm(&work->base);
1601ad8b1aafSjsg err_rpm:
1602c349dbc7Sjsg 	intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
16031bb76ff1Sjsg 
16041bb76ff1Sjsg 	if (moving)
16051bb76ff1Sjsg 		dma_fence_put(moving);
16061bb76ff1Sjsg 
16071bb76ff1Sjsg 	i915_vma_put_pages(vma);
1608c349dbc7Sjsg 	return err;
1609c349dbc7Sjsg }
1610c349dbc7Sjsg 
flush_idle_contexts(struct intel_gt * gt)1611c349dbc7Sjsg static void flush_idle_contexts(struct intel_gt *gt)
1612c349dbc7Sjsg {
1613c349dbc7Sjsg 	struct intel_engine_cs *engine;
1614c349dbc7Sjsg 	enum intel_engine_id id;
1615c349dbc7Sjsg 
1616c349dbc7Sjsg 	for_each_engine(engine, gt, id)
1617c349dbc7Sjsg 		intel_engine_flush_barriers(engine);
1618c349dbc7Sjsg 
1619c349dbc7Sjsg 	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1620c349dbc7Sjsg }
1621c349dbc7Sjsg 
__i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)16221bb76ff1Sjsg static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1623ad8b1aafSjsg 			   u32 align, unsigned int flags)
1624c349dbc7Sjsg {
1625c349dbc7Sjsg 	struct i915_address_space *vm = vma->vm;
1626f005ef32Sjsg 	struct intel_gt *gt;
1627f005ef32Sjsg 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1628c349dbc7Sjsg 	int err;
1629c349dbc7Sjsg 
1630c349dbc7Sjsg 	do {
1631ad8b1aafSjsg 		err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
16321bb76ff1Sjsg 
1633c349dbc7Sjsg 		if (err != -ENOSPC) {
1634c349dbc7Sjsg 			if (!err) {
1635c349dbc7Sjsg 				err = i915_vma_wait_for_bind(vma);
1636c349dbc7Sjsg 				if (err)
1637c349dbc7Sjsg 					i915_vma_unpin(vma);
1638c349dbc7Sjsg 			}
1639c349dbc7Sjsg 			return err;
1640c349dbc7Sjsg 		}
1641c349dbc7Sjsg 
1642c349dbc7Sjsg 		/* Unlike i915_vma_pin, we don't take no for an answer! */
1643f005ef32Sjsg 		list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
1644f005ef32Sjsg 			flush_idle_contexts(gt);
1645c349dbc7Sjsg 		if (mutex_lock_interruptible(&vm->mutex) == 0) {
16461bb76ff1Sjsg 			/*
16471bb76ff1Sjsg 			 * We pass NULL ww here, as we don't want to unbind
16481bb76ff1Sjsg 			 * locked objects when called from execbuf when pinning
16491bb76ff1Sjsg 			 * is removed. This would probably regress badly.
16501bb76ff1Sjsg 			 */
16516823e11cSjsg 			i915_gem_evict_vm(vm, NULL, NULL);
1652c349dbc7Sjsg 			mutex_unlock(&vm->mutex);
1653c349dbc7Sjsg 		}
1654c349dbc7Sjsg 	} while (1);
16557f4dd379Sjsg }
16567f4dd379Sjsg 
i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)16571bb76ff1Sjsg int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
16581bb76ff1Sjsg 		  u32 align, unsigned int flags)
16591bb76ff1Sjsg {
16601bb76ff1Sjsg 	struct i915_gem_ww_ctx _ww;
16611bb76ff1Sjsg 	int err;
16621bb76ff1Sjsg 
16631bb76ff1Sjsg 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
16641bb76ff1Sjsg 
16651bb76ff1Sjsg 	if (ww)
16661bb76ff1Sjsg 		return __i915_ggtt_pin(vma, ww, align, flags);
16671bb76ff1Sjsg 
16681bb76ff1Sjsg 	lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
16691bb76ff1Sjsg 
16701bb76ff1Sjsg 	for_i915_gem_ww(&_ww, err, true) {
16711bb76ff1Sjsg 		err = i915_gem_object_lock(vma->obj, &_ww);
16721bb76ff1Sjsg 		if (!err)
16731bb76ff1Sjsg 			err = __i915_ggtt_pin(vma, &_ww, align, flags);
16741bb76ff1Sjsg 	}
16751bb76ff1Sjsg 
16761bb76ff1Sjsg 	return err;
16771bb76ff1Sjsg }
16781bb76ff1Sjsg 
1679f005ef32Sjsg /**
1680f005ef32Sjsg  * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
1681f005ef32Sjsg  * @obj: i915 GEM object
1682f005ef32Sjsg  * This function clears scanout flags for objects ggtt vmas. These flags are set
1683f005ef32Sjsg  * when object is pinned for display use and this function to clear them all is
1684f005ef32Sjsg  * targeted to be called by frontbuffer tracking code when the frontbuffer is
1685f005ef32Sjsg  * about to be released.
1686f005ef32Sjsg  */
i915_ggtt_clear_scanout(struct drm_i915_gem_object * obj)1687f005ef32Sjsg void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
1688f005ef32Sjsg {
1689f005ef32Sjsg 	struct i915_vma *vma;
1690f005ef32Sjsg 
1691f005ef32Sjsg 	spin_lock(&obj->vma.lock);
1692f005ef32Sjsg 	for_each_ggtt_vma(vma, obj) {
1693f005ef32Sjsg 		i915_vma_clear_scanout(vma);
1694f005ef32Sjsg 		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
1695f005ef32Sjsg 	}
1696f005ef32Sjsg 	spin_unlock(&obj->vma.lock);
1697f005ef32Sjsg }
1698f005ef32Sjsg 
__vma_close(struct i915_vma * vma,struct intel_gt * gt)1699ad8b1aafSjsg static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
17007f4dd379Sjsg {
17017f4dd379Sjsg 	/*
17027f4dd379Sjsg 	 * We defer actually closing, unbinding and destroying the VMA until
17037f4dd379Sjsg 	 * the next idle point, or if the object is freed in the meantime. By
17047f4dd379Sjsg 	 * postponing the unbind, we allow for it to be resurrected by the
17057f4dd379Sjsg 	 * client, avoiding the work required to rebind the VMA. This is
17067f4dd379Sjsg 	 * advantageous for DRI, where the client/server pass objects
17077f4dd379Sjsg 	 * between themselves, temporarily opening a local VMA to the
17087f4dd379Sjsg 	 * object, and then closing it again. The same object is then reused
17097f4dd379Sjsg 	 * on the next frame (or two, depending on the depth of the swap queue)
17107f4dd379Sjsg 	 * causing us to rebind the VMA once more. This ends up being a lot
17117f4dd379Sjsg 	 * of wasted work for the steady state.
17127f4dd379Sjsg 	 */
1713ad8b1aafSjsg 	GEM_BUG_ON(i915_vma_is_closed(vma));
1714c349dbc7Sjsg 	list_add(&vma->closed_link, &gt->closed_vma);
1715ad8b1aafSjsg }
1716ad8b1aafSjsg 
i915_vma_close(struct i915_vma * vma)1717ad8b1aafSjsg void i915_vma_close(struct i915_vma *vma)
1718ad8b1aafSjsg {
1719ad8b1aafSjsg 	struct intel_gt *gt = vma->vm->gt;
1720ad8b1aafSjsg 	unsigned long flags;
1721ad8b1aafSjsg 
1722ad8b1aafSjsg 	if (i915_vma_is_ggtt(vma))
1723ad8b1aafSjsg 		return;
1724ad8b1aafSjsg 
1725ad8b1aafSjsg 	GEM_BUG_ON(!atomic_read(&vma->open_count));
1726ad8b1aafSjsg 	if (atomic_dec_and_lock_irqsave(&vma->open_count,
1727ad8b1aafSjsg 					&gt->closed_lock,
1728ad8b1aafSjsg 					flags)) {
1729ad8b1aafSjsg 		__vma_close(vma, gt);
1730c349dbc7Sjsg 		spin_unlock_irqrestore(&gt->closed_lock, flags);
1731c349dbc7Sjsg 	}
1732ad8b1aafSjsg }
1733c349dbc7Sjsg 
__i915_vma_remove_closed(struct i915_vma * vma)1734c349dbc7Sjsg static void __i915_vma_remove_closed(struct i915_vma *vma)
1735c349dbc7Sjsg {
1736c349dbc7Sjsg 	list_del_init(&vma->closed_link);
17377f4dd379Sjsg }
17387f4dd379Sjsg 
i915_vma_reopen(struct i915_vma * vma)17397f4dd379Sjsg void i915_vma_reopen(struct i915_vma *vma)
17407f4dd379Sjsg {
17411bb76ff1Sjsg 	struct intel_gt *gt = vma->vm->gt;
17421bb76ff1Sjsg 
17431bb76ff1Sjsg 	spin_lock_irq(&gt->closed_lock);
17447f4dd379Sjsg 	if (i915_vma_is_closed(vma))
1745c349dbc7Sjsg 		__i915_vma_remove_closed(vma);
17461bb76ff1Sjsg 	spin_unlock_irq(&gt->closed_lock);
17477f4dd379Sjsg }
17487f4dd379Sjsg 
force_unbind(struct i915_vma * vma)17491bb76ff1Sjsg static void force_unbind(struct i915_vma *vma)
1750c349dbc7Sjsg {
17511bb76ff1Sjsg 	if (!drm_mm_node_allocated(&vma->node))
17521bb76ff1Sjsg 		return;
1753c349dbc7Sjsg 
1754c349dbc7Sjsg 	atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1755c349dbc7Sjsg 	WARN_ON(__i915_vma_unbind(vma));
1756c349dbc7Sjsg 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1757c349dbc7Sjsg }
1758c349dbc7Sjsg 
release_references(struct i915_vma * vma,struct intel_gt * gt,bool vm_ddestroy)17591bb76ff1Sjsg static void release_references(struct i915_vma *vma, struct intel_gt *gt,
17601bb76ff1Sjsg 			       bool vm_ddestroy)
17611bb76ff1Sjsg {
1762c349dbc7Sjsg 	struct drm_i915_gem_object *obj = vma->obj;
1763c349dbc7Sjsg 
17641bb76ff1Sjsg 	GEM_BUG_ON(i915_vma_is_active(vma));
17651bb76ff1Sjsg 
1766c349dbc7Sjsg 	spin_lock(&obj->vma.lock);
1767c349dbc7Sjsg 	list_del(&vma->obj_link);
1768ad8b1aafSjsg 	if (!RB_EMPTY_NODE(&vma->obj_node))
1769c349dbc7Sjsg 		rb_erase(&vma->obj_node, &obj->vma.tree);
1770c349dbc7Sjsg 
17711bb76ff1Sjsg 	spin_unlock(&obj->vma.lock);
17721bb76ff1Sjsg 
17731bb76ff1Sjsg 	spin_lock_irq(&gt->closed_lock);
1774c349dbc7Sjsg 	__i915_vma_remove_closed(vma);
17751bb76ff1Sjsg 	spin_unlock_irq(&gt->closed_lock);
17761bb76ff1Sjsg 
17771bb76ff1Sjsg 	if (vm_ddestroy)
17781bb76ff1Sjsg 		i915_vm_resv_put(vma->vm);
1779c349dbc7Sjsg 
1780f005ef32Sjsg 	/* Wait for async active retire */
1781f005ef32Sjsg 	i915_active_wait(&vma->active);
1782c349dbc7Sjsg 	i915_active_fini(&vma->active);
17831bb76ff1Sjsg 	GEM_WARN_ON(vma->resource);
1784c349dbc7Sjsg 	i915_vma_free(vma);
1785c349dbc7Sjsg }
1786c349dbc7Sjsg 
1787f005ef32Sjsg /*
17881bb76ff1Sjsg  * i915_vma_destroy_locked - Remove all weak reference to the vma and put
17891bb76ff1Sjsg  * the initial reference.
17901bb76ff1Sjsg  *
17911bb76ff1Sjsg  * This function should be called when it's decided the vma isn't needed
17921bb76ff1Sjsg  * anymore. The caller must assure that it doesn't race with another lookup
17931bb76ff1Sjsg  * plus destroy, typically by taking an appropriate reference.
17941bb76ff1Sjsg  *
17951bb76ff1Sjsg  * Current callsites are
17961bb76ff1Sjsg  * - __i915_gem_object_pages_fini()
17971bb76ff1Sjsg  * - __i915_vm_close() - Blocks the above function by taking a reference on
17981bb76ff1Sjsg  * the object.
17991bb76ff1Sjsg  * - __i915_vma_parked() - Blocks the above functions by taking a reference
18001bb76ff1Sjsg  * on the vm and a reference on the object. Also takes the object lock so
18011bb76ff1Sjsg  * destruction from __i915_vma_parked() can be blocked by holding the
18021bb76ff1Sjsg  * object lock. Since the object lock is only allowed from within i915 with
18031bb76ff1Sjsg  * an object refcount, holding the object lock also implicitly blocks the
18041bb76ff1Sjsg  * vma freeing from __i915_gem_object_pages_fini().
18051bb76ff1Sjsg  *
18061bb76ff1Sjsg  * Because of locks taken during destruction, a vma is also guaranteed to
18071bb76ff1Sjsg  * stay alive while the following locks are held if it was looked up while
18081bb76ff1Sjsg  * holding one of the locks:
18091bb76ff1Sjsg  * - vm->mutex
18101bb76ff1Sjsg  * - obj->vma.lock
18111bb76ff1Sjsg  * - gt->closed_lock
18121bb76ff1Sjsg  */
i915_vma_destroy_locked(struct i915_vma * vma)18131bb76ff1Sjsg void i915_vma_destroy_locked(struct i915_vma *vma)
18141bb76ff1Sjsg {
18151bb76ff1Sjsg 	lockdep_assert_held(&vma->vm->mutex);
18161bb76ff1Sjsg 
18171bb76ff1Sjsg 	force_unbind(vma);
18181bb76ff1Sjsg 	list_del_init(&vma->vm_link);
18191bb76ff1Sjsg 	release_references(vma, vma->vm->gt, false);
18201bb76ff1Sjsg }
18211bb76ff1Sjsg 
i915_vma_destroy(struct i915_vma * vma)18221bb76ff1Sjsg void i915_vma_destroy(struct i915_vma *vma)
18231bb76ff1Sjsg {
18241bb76ff1Sjsg 	struct intel_gt *gt;
18251bb76ff1Sjsg 	bool vm_ddestroy;
18261bb76ff1Sjsg 
18271bb76ff1Sjsg 	mutex_lock(&vma->vm->mutex);
18281bb76ff1Sjsg 	force_unbind(vma);
18291bb76ff1Sjsg 	list_del_init(&vma->vm_link);
18301bb76ff1Sjsg 	vm_ddestroy = vma->vm_ddestroy;
18311bb76ff1Sjsg 	vma->vm_ddestroy = false;
18321bb76ff1Sjsg 
18331bb76ff1Sjsg 	/* vma->vm may be freed when releasing vma->vm->mutex. */
18341bb76ff1Sjsg 	gt = vma->vm->gt;
18351bb76ff1Sjsg 	mutex_unlock(&vma->vm->mutex);
18361bb76ff1Sjsg 	release_references(vma, gt, vm_ddestroy);
18371bb76ff1Sjsg }
18381bb76ff1Sjsg 
i915_vma_parked(struct intel_gt * gt)1839c349dbc7Sjsg void i915_vma_parked(struct intel_gt *gt)
18407f4dd379Sjsg {
18417f4dd379Sjsg 	struct i915_vma *vma, *next;
1842c349dbc7Sjsg 	DRM_LIST_HEAD(closed);
18437f4dd379Sjsg 
1844c349dbc7Sjsg 	spin_lock_irq(&gt->closed_lock);
1845c349dbc7Sjsg 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1846c349dbc7Sjsg 		struct drm_i915_gem_object *obj = vma->obj;
1847c349dbc7Sjsg 		struct i915_address_space *vm = vma->vm;
1848c349dbc7Sjsg 
1849c349dbc7Sjsg 		/* XXX All to avoid keeping a reference on i915_vma itself */
1850c349dbc7Sjsg 
1851c349dbc7Sjsg 		if (!kref_get_unless_zero(&obj->base.refcount))
1852c349dbc7Sjsg 			continue;
1853c349dbc7Sjsg 
18541bb76ff1Sjsg 		if (!i915_vm_tryget(vm)) {
1855c349dbc7Sjsg 			i915_gem_object_put(obj);
1856c349dbc7Sjsg 			continue;
18577f4dd379Sjsg 		}
18587f4dd379Sjsg 
1859c349dbc7Sjsg 		list_move(&vma->closed_link, &closed);
1860c349dbc7Sjsg 	}
1861c349dbc7Sjsg 	spin_unlock_irq(&gt->closed_lock);
1862c349dbc7Sjsg 
1863c349dbc7Sjsg 	/* As the GT is held idle, no vma can be reopened as we destroy them */
1864c349dbc7Sjsg 	list_for_each_entry_safe(vma, next, &closed, closed_link) {
1865c349dbc7Sjsg 		struct drm_i915_gem_object *obj = vma->obj;
1866c349dbc7Sjsg 		struct i915_address_space *vm = vma->vm;
1867c349dbc7Sjsg 
18681bb76ff1Sjsg 		if (i915_gem_object_trylock(obj, NULL)) {
1869c349dbc7Sjsg 			INIT_LIST_HEAD(&vma->closed_link);
18701bb76ff1Sjsg 			i915_vma_destroy(vma);
18711bb76ff1Sjsg 			i915_gem_object_unlock(obj);
18721bb76ff1Sjsg 		} else {
18731bb76ff1Sjsg 			/* back you go.. */
18741bb76ff1Sjsg 			spin_lock_irq(&gt->closed_lock);
18751bb76ff1Sjsg 			list_add(&vma->closed_link, &gt->closed_vma);
18761bb76ff1Sjsg 			spin_unlock_irq(&gt->closed_lock);
18771bb76ff1Sjsg 		}
1878c349dbc7Sjsg 
1879c349dbc7Sjsg 		i915_gem_object_put(obj);
18801bb76ff1Sjsg 		i915_vm_put(vm);
1881c349dbc7Sjsg 	}
18827f4dd379Sjsg }
18837f4dd379Sjsg 
__i915_vma_iounmap(struct i915_vma * vma)18847f4dd379Sjsg static void __i915_vma_iounmap(struct i915_vma *vma)
18857f4dd379Sjsg {
18867f4dd379Sjsg 	GEM_BUG_ON(i915_vma_is_pinned(vma));
18877f4dd379Sjsg 
18887f4dd379Sjsg 	if (vma->iomap == NULL)
18897f4dd379Sjsg 		return;
18907f4dd379Sjsg 
18911bb76ff1Sjsg 	if (page_unmask_bits(vma->iomap))
18921bb76ff1Sjsg 		__i915_gem_object_release_map(vma->obj);
1893e64fda40Sjsg 	else
18947f4dd379Sjsg 		io_mapping_unmap(vma->iomap);
18957f4dd379Sjsg 	vma->iomap = NULL;
18967f4dd379Sjsg }
18977f4dd379Sjsg 
i915_vma_revoke_mmap(struct i915_vma * vma)18987f4dd379Sjsg void i915_vma_revoke_mmap(struct i915_vma *vma)
18997f4dd379Sjsg {
1900c349dbc7Sjsg 	struct drm_vma_offset_node *node;
19017f4dd379Sjsg 	u64 vma_offset;
19027f4dd379Sjsg 
19037f4dd379Sjsg 	if (!i915_vma_has_userfault(vma))
19047f4dd379Sjsg 		return;
19057f4dd379Sjsg 
19067f4dd379Sjsg 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
19077f4dd379Sjsg 	GEM_BUG_ON(!vma->obj->userfault_count);
19087f4dd379Sjsg 
1909c349dbc7Sjsg 	node = &vma->mmo->vma_node;
19101bb76ff1Sjsg 	vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
19117f4dd379Sjsg #ifdef __linux__
19127f4dd379Sjsg 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
19137f4dd379Sjsg 			    drm_vma_node_offset_addr(node) + vma_offset,
19147f4dd379Sjsg 			    vma->size,
19157f4dd379Sjsg 			    1);
19167f4dd379Sjsg #else
19177f4dd379Sjsg 	struct drm_i915_private *dev_priv = vma->obj->base.dev->dev_private;
19187f4dd379Sjsg 	struct vm_page *pg;
19197f4dd379Sjsg 
19207f4dd379Sjsg 	for (pg = &dev_priv->pgs[atop(vma->node.start)];
19217f4dd379Sjsg 	    pg != &dev_priv->pgs[atop(vma->node.start + vma->size)];
19227f4dd379Sjsg 	    pg++)
19237f4dd379Sjsg 		pmap_page_protect(pg, PROT_NONE);
19247f4dd379Sjsg #endif
19257f4dd379Sjsg 
19267f4dd379Sjsg 	i915_vma_unset_userfault(vma);
19277f4dd379Sjsg 	if (!--vma->obj->userfault_count)
19287f4dd379Sjsg 		list_del(&vma->obj->userfault_link);
19297f4dd379Sjsg }
19307f4dd379Sjsg 
1931ad8b1aafSjsg static int
__i915_request_await_bind(struct i915_request * rq,struct i915_vma * vma)1932ad8b1aafSjsg __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1933ad8b1aafSjsg {
1934ad8b1aafSjsg 	return __i915_request_await_exclusive(rq, &vma->active);
1935ad8b1aafSjsg }
1936ad8b1aafSjsg 
__i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq)19371bb76ff1Sjsg static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
19387f4dd379Sjsg {
1939c349dbc7Sjsg 	int err;
19407f4dd379Sjsg 
1941c349dbc7Sjsg 	/* Wait for the vma to be bound before we start! */
1942ad8b1aafSjsg 	err = __i915_request_await_bind(rq, vma);
1943c349dbc7Sjsg 	if (err)
1944c349dbc7Sjsg 		return err;
19457f4dd379Sjsg 
1946c349dbc7Sjsg 	return i915_active_add_request(&vma->active, rq);
19477f4dd379Sjsg }
19487f4dd379Sjsg 
_i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq,struct dma_fence * fence,unsigned int flags)19491bb76ff1Sjsg int _i915_vma_move_to_active(struct i915_vma *vma,
19507f4dd379Sjsg 			     struct i915_request *rq,
19511bb76ff1Sjsg 			     struct dma_fence *fence,
19527f4dd379Sjsg 			     unsigned int flags)
19537f4dd379Sjsg {
19547f4dd379Sjsg 	struct drm_i915_gem_object *obj = vma->obj;
1955c349dbc7Sjsg 	int err;
19567f4dd379Sjsg 
1957c349dbc7Sjsg 	assert_object_held(obj);
19587f4dd379Sjsg 
19591bb76ff1Sjsg 	GEM_BUG_ON(!vma->pages);
19601bb76ff1Sjsg 
1961f005ef32Sjsg 	if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
1962f005ef32Sjsg 		err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
1963f005ef32Sjsg 		if (unlikely(err))
1964f005ef32Sjsg 			return err;
1965f005ef32Sjsg 	}
1966c349dbc7Sjsg 	err = __i915_vma_move_to_active(vma, rq);
1967c349dbc7Sjsg 	if (unlikely(err))
1968c349dbc7Sjsg 		return err;
19697f4dd379Sjsg 
19701bb76ff1Sjsg 	/*
19711bb76ff1Sjsg 	 * Reserve fences slot early to prevent an allocation after preparing
19721bb76ff1Sjsg 	 * the workload and associating fences with dma_resv.
19731bb76ff1Sjsg 	 */
19741bb76ff1Sjsg 	if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
19751bb76ff1Sjsg 		struct dma_fence *curr;
19761bb76ff1Sjsg 		int idx;
19771bb76ff1Sjsg 
19781bb76ff1Sjsg 		dma_fence_array_for_each(curr, idx, fence)
19791bb76ff1Sjsg 			;
19801bb76ff1Sjsg 		err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
19811bb76ff1Sjsg 		if (unlikely(err))
19821bb76ff1Sjsg 			return err;
19831bb76ff1Sjsg 	}
19841bb76ff1Sjsg 
19857f4dd379Sjsg 	if (flags & EXEC_OBJECT_WRITE) {
1986c349dbc7Sjsg 		struct intel_frontbuffer *front;
1987c349dbc7Sjsg 
1988f005ef32Sjsg 		front = i915_gem_object_get_frontbuffer(obj);
1989c349dbc7Sjsg 		if (unlikely(front)) {
1990c349dbc7Sjsg 			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1991c349dbc7Sjsg 				i915_active_add_request(&front->write, rq);
1992c349dbc7Sjsg 			intel_frontbuffer_put(front);
1993c349dbc7Sjsg 		}
19941bb76ff1Sjsg 	}
1995c349dbc7Sjsg 
19961bb76ff1Sjsg 	if (fence) {
19971bb76ff1Sjsg 		struct dma_fence *curr;
19981bb76ff1Sjsg 		enum dma_resv_usage usage;
19991bb76ff1Sjsg 		int idx;
20001bb76ff1Sjsg 
20011bb76ff1Sjsg 		if (flags & EXEC_OBJECT_WRITE) {
20021bb76ff1Sjsg 			usage = DMA_RESV_USAGE_WRITE;
20037f4dd379Sjsg 			obj->write_domain = I915_GEM_DOMAIN_RENDER;
20047f4dd379Sjsg 			obj->read_domains = 0;
2005c349dbc7Sjsg 		} else {
20061bb76ff1Sjsg 			usage = DMA_RESV_USAGE_READ;
20071bb76ff1Sjsg 			obj->write_domain = 0;
20085ca02815Sjsg 		}
2009c349dbc7Sjsg 
20101bb76ff1Sjsg 		dma_fence_array_for_each(curr, idx, fence)
20111bb76ff1Sjsg 			dma_resv_add_fence(vma->obj->base.resv, curr, usage);
20127f4dd379Sjsg 	}
2013ad8b1aafSjsg 
2014ad8b1aafSjsg 	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
2015ad8b1aafSjsg 		i915_active_add_request(&vma->fence->active, rq);
2016ad8b1aafSjsg 
20177f4dd379Sjsg 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
2018c349dbc7Sjsg 	obj->mm.dirty = true;
20197f4dd379Sjsg 
2020c349dbc7Sjsg 	GEM_BUG_ON(!i915_vma_is_active(vma));
20217f4dd379Sjsg 	return 0;
20227f4dd379Sjsg }
20237f4dd379Sjsg 
__i915_vma_evict(struct i915_vma * vma,bool async)20241bb76ff1Sjsg struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
20257f4dd379Sjsg {
20261bb76ff1Sjsg 	struct i915_vma_resource *vma_res = vma->resource;
20271bb76ff1Sjsg 	struct dma_fence *unbind_fence;
20281bb76ff1Sjsg 
2029c349dbc7Sjsg 	GEM_BUG_ON(i915_vma_is_pinned(vma));
20301bb76ff1Sjsg 	assert_vma_held_evict(vma);
2031c349dbc7Sjsg 
20327f4dd379Sjsg 	if (i915_vma_is_map_and_fenceable(vma)) {
2033ad8b1aafSjsg 		/* Force a pagefault for domain tracking on next user access */
2034ad8b1aafSjsg 		i915_vma_revoke_mmap(vma);
2035ad8b1aafSjsg 
20367f4dd379Sjsg 		/*
20377f4dd379Sjsg 		 * Check that we have flushed all writes through the GGTT
20387f4dd379Sjsg 		 * before the unbind, other due to non-strict nature of those
20397f4dd379Sjsg 		 * indirect writes they may end up referencing the GGTT PTE
20407f4dd379Sjsg 		 * after the unbind.
2041c349dbc7Sjsg 		 *
2042c349dbc7Sjsg 		 * Note that we may be concurrently poking at the GGTT_WRITE
2043c349dbc7Sjsg 		 * bit from set-domain, as we mark all GGTT vma associated
2044c349dbc7Sjsg 		 * with an object. We know this is for another vma, as we
2045c349dbc7Sjsg 		 * are currently unbinding this one -- so if this vma will be
2046c349dbc7Sjsg 		 * reused, it will be refaulted and have its dirty bit set
2047c349dbc7Sjsg 		 * before the next write.
20487f4dd379Sjsg 		 */
20497f4dd379Sjsg 		i915_vma_flush_writes(vma);
20507f4dd379Sjsg 
20517f4dd379Sjsg 		/* release the fence reg _after_ flushing */
2052ad8b1aafSjsg 		i915_vma_revoke_fence(vma);
20537f4dd379Sjsg 
2054c349dbc7Sjsg 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
20557f4dd379Sjsg 	}
20561bb76ff1Sjsg 
20571bb76ff1Sjsg 	__i915_vma_iounmap(vma);
20581bb76ff1Sjsg 
20597f4dd379Sjsg 	GEM_BUG_ON(vma->fence);
20607f4dd379Sjsg 	GEM_BUG_ON(i915_vma_has_userfault(vma));
20617f4dd379Sjsg 
20621bb76ff1Sjsg 	/* Object backend must be async capable. */
20631bb76ff1Sjsg 	GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
20641bb76ff1Sjsg 
20651bb76ff1Sjsg 	/* If vm is not open, unbind is a nop. */
20661bb76ff1Sjsg 	vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
20671bb76ff1Sjsg 		kref_read(&vma->vm->ref);
20681bb76ff1Sjsg 	vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
20691bb76ff1Sjsg 		vma->vm->skip_pte_rewrite;
20707f4dd379Sjsg 	trace_i915_vma_unbind(vma);
20711bb76ff1Sjsg 
20721bb76ff1Sjsg 	if (async)
20731bb76ff1Sjsg 		unbind_fence = i915_vma_resource_unbind(vma_res,
2074f005ef32Sjsg 							vma->obj->mm.tlb);
20751bb76ff1Sjsg 	else
20761bb76ff1Sjsg 		unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
20771bb76ff1Sjsg 
20781bb76ff1Sjsg 	vma->resource = NULL;
20791bb76ff1Sjsg 
2080c349dbc7Sjsg 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
2081c349dbc7Sjsg 		   &vma->flags);
20827f4dd379Sjsg 
2083c349dbc7Sjsg 	i915_vma_detach(vma);
20841bb76ff1Sjsg 
20851bb76ff1Sjsg 	if (!async) {
20861bb76ff1Sjsg 		if (unbind_fence) {
20871bb76ff1Sjsg 			dma_fence_wait(unbind_fence, false);
20881bb76ff1Sjsg 			dma_fence_put(unbind_fence);
20891bb76ff1Sjsg 			unbind_fence = NULL;
20901bb76ff1Sjsg 		}
2091f005ef32Sjsg 		vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
20921bb76ff1Sjsg 	}
20931bb76ff1Sjsg 
20941bb76ff1Sjsg 	/*
20951bb76ff1Sjsg 	 * Binding itself may not have completed until the unbind fence signals,
20961bb76ff1Sjsg 	 * so don't drop the pages until that happens, unless the resource is
20971bb76ff1Sjsg 	 * async_capable.
20981bb76ff1Sjsg 	 */
20991bb76ff1Sjsg 
2100c349dbc7Sjsg 	vma_unbind_pages(vma);
21011bb76ff1Sjsg 	return unbind_fence;
2102ad8b1aafSjsg }
2103ad8b1aafSjsg 
__i915_vma_unbind(struct i915_vma * vma)2104ad8b1aafSjsg int __i915_vma_unbind(struct i915_vma *vma)
2105ad8b1aafSjsg {
2106ad8b1aafSjsg 	int ret;
2107ad8b1aafSjsg 
2108ad8b1aafSjsg 	lockdep_assert_held(&vma->vm->mutex);
21091bb76ff1Sjsg 	assert_vma_held_evict(vma);
2110ad8b1aafSjsg 
2111ad8b1aafSjsg 	if (!drm_mm_node_allocated(&vma->node))
2112ad8b1aafSjsg 		return 0;
2113ad8b1aafSjsg 
2114ad8b1aafSjsg 	if (i915_vma_is_pinned(vma)) {
2115ad8b1aafSjsg 		vma_print_allocator(vma, "is pinned");
2116ad8b1aafSjsg 		return -EAGAIN;
2117ad8b1aafSjsg 	}
2118ad8b1aafSjsg 
2119ad8b1aafSjsg 	/*
2120ad8b1aafSjsg 	 * After confirming that no one else is pinning this vma, wait for
2121ad8b1aafSjsg 	 * any laggards who may have crept in during the wait (through
2122ad8b1aafSjsg 	 * a residual pin skipping the vm->mutex) to complete.
2123ad8b1aafSjsg 	 */
2124ad8b1aafSjsg 	ret = i915_vma_sync(vma);
2125ad8b1aafSjsg 	if (ret)
2126ad8b1aafSjsg 		return ret;
2127ad8b1aafSjsg 
2128ad8b1aafSjsg 	GEM_BUG_ON(i915_vma_is_active(vma));
21291bb76ff1Sjsg 	__i915_vma_evict(vma, false);
21307f4dd379Sjsg 
2131c349dbc7Sjsg 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
21327f4dd379Sjsg 	return 0;
21337f4dd379Sjsg }
21347f4dd379Sjsg 
__i915_vma_unbind_async(struct i915_vma * vma)21351bb76ff1Sjsg static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
21361bb76ff1Sjsg {
21371bb76ff1Sjsg 	struct dma_fence *fence;
21381bb76ff1Sjsg 
21391bb76ff1Sjsg 	lockdep_assert_held(&vma->vm->mutex);
21401bb76ff1Sjsg 
21411bb76ff1Sjsg 	if (!drm_mm_node_allocated(&vma->node))
21421bb76ff1Sjsg 		return NULL;
21431bb76ff1Sjsg 
21441bb76ff1Sjsg 	if (i915_vma_is_pinned(vma) ||
21451bb76ff1Sjsg 	    &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
21461bb76ff1Sjsg 		return ERR_PTR(-EAGAIN);
21471bb76ff1Sjsg 
21481bb76ff1Sjsg 	/*
21491bb76ff1Sjsg 	 * We probably need to replace this with awaiting the fences of the
21501bb76ff1Sjsg 	 * object's dma_resv when the vma active goes away. When doing that
21511bb76ff1Sjsg 	 * we need to be careful to not add the vma_resource unbind fence
21521bb76ff1Sjsg 	 * immediately to the object's dma_resv, because then unbinding
21531bb76ff1Sjsg 	 * the next vma from the object, in case there are many, will
21541bb76ff1Sjsg 	 * actually await the unbinding of the previous vmas, which is
21551bb76ff1Sjsg 	 * undesirable.
21561bb76ff1Sjsg 	 */
21571bb76ff1Sjsg 	if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
21581bb76ff1Sjsg 				       I915_ACTIVE_AWAIT_EXCL |
21591bb76ff1Sjsg 				       I915_ACTIVE_AWAIT_ACTIVE) < 0) {
21601bb76ff1Sjsg 		return ERR_PTR(-EBUSY);
21611bb76ff1Sjsg 	}
21621bb76ff1Sjsg 
21631bb76ff1Sjsg 	fence = __i915_vma_evict(vma, true);
21641bb76ff1Sjsg 
21651bb76ff1Sjsg 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
21661bb76ff1Sjsg 
21671bb76ff1Sjsg 	return fence;
21681bb76ff1Sjsg }
21691bb76ff1Sjsg 
i915_vma_unbind(struct i915_vma * vma)2170c349dbc7Sjsg int i915_vma_unbind(struct i915_vma *vma)
2171c349dbc7Sjsg {
2172c349dbc7Sjsg 	struct i915_address_space *vm = vma->vm;
2173c349dbc7Sjsg 	intel_wakeref_t wakeref = 0;
2174c349dbc7Sjsg 	int err;
2175c349dbc7Sjsg 
21761bb76ff1Sjsg 	assert_object_held_shared(vma->obj);
21771bb76ff1Sjsg 
2178c349dbc7Sjsg 	/* Optimistic wait before taking the mutex */
2179c349dbc7Sjsg 	err = i915_vma_sync(vma);
2180c349dbc7Sjsg 	if (err)
2181ad8b1aafSjsg 		return err;
2182ad8b1aafSjsg 
2183ad8b1aafSjsg 	if (!drm_mm_node_allocated(&vma->node))
2184ad8b1aafSjsg 		return 0;
2185c349dbc7Sjsg 
2186c349dbc7Sjsg 	if (i915_vma_is_pinned(vma)) {
2187c349dbc7Sjsg 		vma_print_allocator(vma, "is pinned");
2188c349dbc7Sjsg 		return -EAGAIN;
2189c349dbc7Sjsg 	}
2190c349dbc7Sjsg 
2191c349dbc7Sjsg 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2192c349dbc7Sjsg 		/* XXX not always required: nop_clear_range */
2193c349dbc7Sjsg 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2194c349dbc7Sjsg 
2195ad8b1aafSjsg 	err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2196c349dbc7Sjsg 	if (err)
2197c349dbc7Sjsg 		goto out_rpm;
2198c349dbc7Sjsg 
2199c349dbc7Sjsg 	err = __i915_vma_unbind(vma);
2200c349dbc7Sjsg 	mutex_unlock(&vm->mutex);
2201c349dbc7Sjsg 
2202c349dbc7Sjsg out_rpm:
2203c349dbc7Sjsg 	if (wakeref)
2204c349dbc7Sjsg 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2205c349dbc7Sjsg 	return err;
2206c349dbc7Sjsg }
2207c349dbc7Sjsg 
i915_vma_unbind_async(struct i915_vma * vma,bool trylock_vm)22081bb76ff1Sjsg int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
22091bb76ff1Sjsg {
22101bb76ff1Sjsg 	struct drm_i915_gem_object *obj = vma->obj;
22111bb76ff1Sjsg 	struct i915_address_space *vm = vma->vm;
22121bb76ff1Sjsg 	intel_wakeref_t wakeref = 0;
22131bb76ff1Sjsg 	struct dma_fence *fence;
22141bb76ff1Sjsg 	int err;
22151bb76ff1Sjsg 
22161bb76ff1Sjsg 	/*
22171bb76ff1Sjsg 	 * We need the dma-resv lock since we add the
22181bb76ff1Sjsg 	 * unbind fence to the dma-resv object.
22191bb76ff1Sjsg 	 */
22201bb76ff1Sjsg 	assert_object_held(obj);
22211bb76ff1Sjsg 
22221bb76ff1Sjsg 	if (!drm_mm_node_allocated(&vma->node))
22231bb76ff1Sjsg 		return 0;
22241bb76ff1Sjsg 
22251bb76ff1Sjsg 	if (i915_vma_is_pinned(vma)) {
22261bb76ff1Sjsg 		vma_print_allocator(vma, "is pinned");
22271bb76ff1Sjsg 		return -EAGAIN;
22281bb76ff1Sjsg 	}
22291bb76ff1Sjsg 
22301bb76ff1Sjsg 	if (!obj->mm.rsgt)
22311bb76ff1Sjsg 		return -EBUSY;
22321bb76ff1Sjsg 
223341cecf2fSjsg 	err = dma_resv_reserve_fences(obj->base.resv, 2);
22341bb76ff1Sjsg 	if (err)
22351bb76ff1Sjsg 		return -EBUSY;
22361bb76ff1Sjsg 
22371bb76ff1Sjsg 	/*
22381bb76ff1Sjsg 	 * It would be great if we could grab this wakeref from the
22391bb76ff1Sjsg 	 * async unbind work if needed, but we can't because it uses
22401bb76ff1Sjsg 	 * kmalloc and it's in the dma-fence signalling critical path.
22411bb76ff1Sjsg 	 */
22421bb76ff1Sjsg 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
22431bb76ff1Sjsg 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
22441bb76ff1Sjsg 
22451bb76ff1Sjsg 	if (trylock_vm && !mutex_trylock(&vm->mutex)) {
22461bb76ff1Sjsg 		err = -EBUSY;
22471bb76ff1Sjsg 		goto out_rpm;
22481bb76ff1Sjsg 	} else if (!trylock_vm) {
22491bb76ff1Sjsg 		err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
22501bb76ff1Sjsg 		if (err)
22511bb76ff1Sjsg 			goto out_rpm;
22521bb76ff1Sjsg 	}
22531bb76ff1Sjsg 
22541bb76ff1Sjsg 	fence = __i915_vma_unbind_async(vma);
22551bb76ff1Sjsg 	mutex_unlock(&vm->mutex);
22561bb76ff1Sjsg 	if (IS_ERR_OR_NULL(fence)) {
22571bb76ff1Sjsg 		err = PTR_ERR_OR_ZERO(fence);
22581bb76ff1Sjsg 		goto out_rpm;
22591bb76ff1Sjsg 	}
22601bb76ff1Sjsg 
22611bb76ff1Sjsg 	dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
22621bb76ff1Sjsg 	dma_fence_put(fence);
22631bb76ff1Sjsg 
22641bb76ff1Sjsg out_rpm:
22651bb76ff1Sjsg 	if (wakeref)
22661bb76ff1Sjsg 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
22671bb76ff1Sjsg 	return err;
22681bb76ff1Sjsg }
22691bb76ff1Sjsg 
i915_vma_unbind_unlocked(struct i915_vma * vma)22701bb76ff1Sjsg int i915_vma_unbind_unlocked(struct i915_vma *vma)
22711bb76ff1Sjsg {
22721bb76ff1Sjsg 	int err;
22731bb76ff1Sjsg 
22741bb76ff1Sjsg 	i915_gem_object_lock(vma->obj, NULL);
22751bb76ff1Sjsg 	err = i915_vma_unbind(vma);
22761bb76ff1Sjsg 	i915_gem_object_unlock(vma->obj);
22771bb76ff1Sjsg 
22781bb76ff1Sjsg 	return err;
22791bb76ff1Sjsg }
22801bb76ff1Sjsg 
i915_vma_make_unshrinkable(struct i915_vma * vma)2281c349dbc7Sjsg struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2282c349dbc7Sjsg {
2283c349dbc7Sjsg 	i915_gem_object_make_unshrinkable(vma->obj);
2284c349dbc7Sjsg 	return vma;
2285c349dbc7Sjsg }
2286c349dbc7Sjsg 
i915_vma_make_shrinkable(struct i915_vma * vma)2287c349dbc7Sjsg void i915_vma_make_shrinkable(struct i915_vma *vma)
2288c349dbc7Sjsg {
2289c349dbc7Sjsg 	i915_gem_object_make_shrinkable(vma->obj);
2290c349dbc7Sjsg }
2291c349dbc7Sjsg 
i915_vma_make_purgeable(struct i915_vma * vma)2292c349dbc7Sjsg void i915_vma_make_purgeable(struct i915_vma *vma)
2293c349dbc7Sjsg {
2294c349dbc7Sjsg 	i915_gem_object_make_purgeable(vma->obj);
2295c349dbc7Sjsg }
2296c349dbc7Sjsg 
22977f4dd379Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
22987f4dd379Sjsg #include "selftests/i915_vma.c"
22997f4dd379Sjsg #endif
2300c349dbc7Sjsg 
i915_vma_module_exit(void)23015ca02815Sjsg void i915_vma_module_exit(void)
2302c349dbc7Sjsg {
2303c349dbc7Sjsg #ifdef __linux__
23045ca02815Sjsg 	kmem_cache_destroy(slab_vmas);
2305c349dbc7Sjsg #else
23065ca02815Sjsg 	pool_destroy(&slab_vmas);
2307c349dbc7Sjsg #endif
2308c349dbc7Sjsg }
2309c349dbc7Sjsg 
i915_vma_module_init(void)23105ca02815Sjsg int __init i915_vma_module_init(void)
2311c349dbc7Sjsg {
2312c349dbc7Sjsg #ifdef __linux__
23135ca02815Sjsg 	slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
23145ca02815Sjsg 	if (!slab_vmas)
2315c349dbc7Sjsg 		return -ENOMEM;
2316c349dbc7Sjsg #else
23175ca02815Sjsg 	pool_init(&slab_vmas, sizeof(struct i915_vma),
23180f557061Sjsg 	    CACHELINESIZE, IPL_NONE, 0, "drmvma", NULL);
2319c349dbc7Sjsg #endif
2320c349dbc7Sjsg 
2321c349dbc7Sjsg 	return 0;
2322c349dbc7Sjsg }
2323