xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c (revision 7b006203)
1dff96888SDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2fb1d9738SJakob Bornecrantz /**************************************************************************
3fb1d9738SJakob Bornecrantz  *
409881d29SZack Rusin  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5fb1d9738SJakob Bornecrantz  *
6fb1d9738SJakob Bornecrantz  * Permission is hereby granted, free of charge, to any person obtaining a
7fb1d9738SJakob Bornecrantz  * copy of this software and associated documentation files (the
8fb1d9738SJakob Bornecrantz  * "Software"), to deal in the Software without restriction, including
9fb1d9738SJakob Bornecrantz  * without limitation the rights to use, copy, modify, merge, publish,
10fb1d9738SJakob Bornecrantz  * distribute, sub license, and/or sell copies of the Software, and to
11fb1d9738SJakob Bornecrantz  * permit persons to whom the Software is furnished to do so, subject to
12fb1d9738SJakob Bornecrantz  * the following conditions:
13fb1d9738SJakob Bornecrantz  *
14fb1d9738SJakob Bornecrantz  * The above copyright notice and this permission notice (including the
15fb1d9738SJakob Bornecrantz  * next paragraph) shall be included in all copies or substantial portions
16fb1d9738SJakob Bornecrantz  * of the Software.
17fb1d9738SJakob Bornecrantz  *
18fb1d9738SJakob Bornecrantz  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19fb1d9738SJakob Bornecrantz  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20fb1d9738SJakob Bornecrantz  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21fb1d9738SJakob Bornecrantz  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22fb1d9738SJakob Bornecrantz  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23fb1d9738SJakob Bornecrantz  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24fb1d9738SJakob Bornecrantz  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25fb1d9738SJakob Bornecrantz  *
26fb1d9738SJakob Bornecrantz  **************************************************************************/
27fb1d9738SJakob Bornecrantz 
28760285e7SDavid Howells #include <drm/ttm/ttm_placement.h>
296ae8748bSSam Ravnborg 
30d80efd5cSThomas Hellstrom #include "vmwgfx_binding.h"
3109881d29SZack Rusin #include "vmwgfx_bo.h"
326ae8748bSSam Ravnborg #include "vmwgfx_drv.h"
3309881d29SZack Rusin #include "vmwgfx_resource_priv.h"
34fb1d9738SJakob Bornecrantz 
35ea029c28SThomas Hellstrom #define VMW_RES_EVICT_ERR_COUNT 10
36ea029c28SThomas Hellstrom 
37a0a63940SThomas Hellstrom /**
38a0a63940SThomas Hellstrom  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
39a0a63940SThomas Hellstrom  * @res: The resource
40a0a63940SThomas Hellstrom  */
vmw_resource_mob_attach(struct vmw_resource * res)41a0a63940SThomas Hellstrom void vmw_resource_mob_attach(struct vmw_resource *res)
42a0a63940SThomas Hellstrom {
43668b2066SZack Rusin 	struct vmw_bo *gbo = res->guest_memory_bo;
44668b2066SZack Rusin 	struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
45a0a63940SThomas Hellstrom 
46668b2066SZack Rusin 	dma_resv_assert_held(gbo->tbo.base.resv);
47a0a63940SThomas Hellstrom 	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
48a0a63940SThomas Hellstrom 		res->func->prio;
4961335d7aSThomas Hellstrom 
5061335d7aSThomas Hellstrom 	while (*new) {
5161335d7aSThomas Hellstrom 		struct vmw_resource *this =
5261335d7aSThomas Hellstrom 			container_of(*new, struct vmw_resource, mob_node);
5361335d7aSThomas Hellstrom 
5461335d7aSThomas Hellstrom 		parent = *new;
55668b2066SZack Rusin 		new = (res->guest_memory_offset < this->guest_memory_offset) ?
5661335d7aSThomas Hellstrom 			&((*new)->rb_left) : &((*new)->rb_right);
5761335d7aSThomas Hellstrom 	}
5861335d7aSThomas Hellstrom 
5961335d7aSThomas Hellstrom 	rb_link_node(&res->mob_node, parent, new);
60668b2066SZack Rusin 	rb_insert_color(&res->mob_node, &gbo->res_tree);
6161335d7aSThomas Hellstrom 
62668b2066SZack Rusin 	vmw_bo_prio_add(gbo, res->used_prio);
63a0a63940SThomas Hellstrom }
64a0a63940SThomas Hellstrom 
65a0a63940SThomas Hellstrom /**
66a0a63940SThomas Hellstrom  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
67a0a63940SThomas Hellstrom  * @res: The resource
68a0a63940SThomas Hellstrom  */
vmw_resource_mob_detach(struct vmw_resource * res)69a0a63940SThomas Hellstrom void vmw_resource_mob_detach(struct vmw_resource *res)
70a0a63940SThomas Hellstrom {
71668b2066SZack Rusin 	struct vmw_bo *gbo = res->guest_memory_bo;
72a0a63940SThomas Hellstrom 
73668b2066SZack Rusin 	dma_resv_assert_held(gbo->tbo.base.resv);
74a0a63940SThomas Hellstrom 	if (vmw_resource_mob_attached(res)) {
75668b2066SZack Rusin 		rb_erase(&res->mob_node, &gbo->res_tree);
7661335d7aSThomas Hellstrom 		RB_CLEAR_NODE(&res->mob_node);
77668b2066SZack Rusin 		vmw_bo_prio_del(gbo, res->used_prio);
78a0a63940SThomas Hellstrom 	}
79a0a63940SThomas Hellstrom }
80a0a63940SThomas Hellstrom 
vmw_resource_reference(struct vmw_resource * res)81fb1d9738SJakob Bornecrantz struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82fb1d9738SJakob Bornecrantz {
83fb1d9738SJakob Bornecrantz 	kref_get(&res->kref);
84fb1d9738SJakob Bornecrantz 	return res;
85fb1d9738SJakob Bornecrantz }
86fb1d9738SJakob Bornecrantz 
8730f82d81SThomas Hellstrom struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource * res)8830f82d81SThomas Hellstrom vmw_resource_reference_unless_doomed(struct vmw_resource *res)
8930f82d81SThomas Hellstrom {
9030f82d81SThomas Hellstrom 	return kref_get_unless_zero(&res->kref) ? res : NULL;
9130f82d81SThomas Hellstrom }
925bb39e81SThomas Hellstrom 
935bb39e81SThomas Hellstrom /**
945bb39e81SThomas Hellstrom  * vmw_resource_release_id - release a resource id to the id manager.
955bb39e81SThomas Hellstrom  *
965bb39e81SThomas Hellstrom  * @res: Pointer to the resource.
975bb39e81SThomas Hellstrom  *
985bb39e81SThomas Hellstrom  * Release the resource id to the resource id manager and set it to -1
995bb39e81SThomas Hellstrom  */
vmw_resource_release_id(struct vmw_resource * res)100543831cfSThomas Hellstrom void vmw_resource_release_id(struct vmw_resource *res)
1015bb39e81SThomas Hellstrom {
1025bb39e81SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
103c0951b79SThomas Hellstrom 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
1045bb39e81SThomas Hellstrom 
10513289241SThomas Hellstrom 	spin_lock(&dev_priv->resource_lock);
1065bb39e81SThomas Hellstrom 	if (res->id != -1)
107c0951b79SThomas Hellstrom 		idr_remove(idr, res->id);
1085bb39e81SThomas Hellstrom 	res->id = -1;
10913289241SThomas Hellstrom 	spin_unlock(&dev_priv->resource_lock);
1105bb39e81SThomas Hellstrom }
1115bb39e81SThomas Hellstrom 
vmw_resource_release(struct kref * kref)112fb1d9738SJakob Bornecrantz static void vmw_resource_release(struct kref *kref)
113fb1d9738SJakob Bornecrantz {
114fb1d9738SJakob Bornecrantz 	struct vmw_resource *res =
115fb1d9738SJakob Bornecrantz 	    container_of(kref, struct vmw_resource, kref);
116fb1d9738SJakob Bornecrantz 	struct vmw_private *dev_priv = res->dev_priv;
117c0951b79SThomas Hellstrom 	int id;
11874231041SZack Rusin 	int ret;
119c0951b79SThomas Hellstrom 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
120fb1d9738SJakob Bornecrantz 
12113289241SThomas Hellstrom 	spin_lock(&dev_priv->resource_lock);
122c0951b79SThomas Hellstrom 	list_del_init(&res->lru_head);
12313289241SThomas Hellstrom 	spin_unlock(&dev_priv->resource_lock);
124668b2066SZack Rusin 	if (res->guest_memory_bo) {
125668b2066SZack Rusin 		struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
126c0951b79SThomas Hellstrom 
12774231041SZack Rusin 		ret = ttm_bo_reserve(bo, false, false, NULL);
12874231041SZack Rusin 		BUG_ON(ret);
129a0a63940SThomas Hellstrom 		if (vmw_resource_mob_attached(res) &&
130c0951b79SThomas Hellstrom 		    res->func->unbind != NULL) {
131c0951b79SThomas Hellstrom 			struct ttm_validate_buffer val_buf;
132c0951b79SThomas Hellstrom 
133c0951b79SThomas Hellstrom 			val_buf.bo = bo;
134a9f34c70SChristian König 			val_buf.num_shared = 0;
135c0951b79SThomas Hellstrom 			res->func->unbind(res, false, &val_buf);
136c0951b79SThomas Hellstrom 		}
137668b2066SZack Rusin 		res->guest_memory_size = false;
138a0a63940SThomas Hellstrom 		vmw_resource_mob_detach(res);
139b7468b15SThomas Hellstrom 		if (res->dirty)
140b7468b15SThomas Hellstrom 			res->func->dirty_free(res);
141b7468b15SThomas Hellstrom 		if (res->coherent)
142668b2066SZack Rusin 			vmw_bo_dirty_release(res->guest_memory_bo);
143c0951b79SThomas Hellstrom 		ttm_bo_unreserve(bo);
14491398b41SZack Rusin 		vmw_user_bo_unref(&res->guest_memory_bo);
145c0951b79SThomas Hellstrom 	}
146fb1d9738SJakob Bornecrantz 
14730f82d81SThomas Hellstrom 	if (likely(res->hw_destroy != NULL)) {
14830f82d81SThomas Hellstrom 		mutex_lock(&dev_priv->binding_mutex);
149d80efd5cSThomas Hellstrom 		vmw_binding_res_list_kill(&res->binding_head);
15030f82d81SThomas Hellstrom 		mutex_unlock(&dev_priv->binding_mutex);
151d80efd5cSThomas Hellstrom 		res->hw_destroy(res);
15230f82d81SThomas Hellstrom 	}
153fb1d9738SJakob Bornecrantz 
154c0951b79SThomas Hellstrom 	id = res->id;
155fb1d9738SJakob Bornecrantz 	if (res->res_free != NULL)
156fb1d9738SJakob Bornecrantz 		res->res_free(res);
157fb1d9738SJakob Bornecrantz 	else
158fb1d9738SJakob Bornecrantz 		kfree(res);
159fb1d9738SJakob Bornecrantz 
16013289241SThomas Hellstrom 	spin_lock(&dev_priv->resource_lock);
1615bb39e81SThomas Hellstrom 	if (id != -1)
1625bb39e81SThomas Hellstrom 		idr_remove(idr, id);
16313289241SThomas Hellstrom 	spin_unlock(&dev_priv->resource_lock);
164fb1d9738SJakob Bornecrantz }
165fb1d9738SJakob Bornecrantz 
vmw_resource_unreference(struct vmw_resource ** p_res)166fb1d9738SJakob Bornecrantz void vmw_resource_unreference(struct vmw_resource **p_res)
167fb1d9738SJakob Bornecrantz {
168fb1d9738SJakob Bornecrantz 	struct vmw_resource *res = *p_res;
169fb1d9738SJakob Bornecrantz 
170fb1d9738SJakob Bornecrantz 	*p_res = NULL;
171fb1d9738SJakob Bornecrantz 	kref_put(&res->kref, vmw_resource_release);
172fb1d9738SJakob Bornecrantz }
173fb1d9738SJakob Bornecrantz 
1745bb39e81SThomas Hellstrom 
1755bb39e81SThomas Hellstrom /**
1765bb39e81SThomas Hellstrom  * vmw_resource_alloc_id - release a resource id to the id manager.
1775bb39e81SThomas Hellstrom  *
1785bb39e81SThomas Hellstrom  * @res: Pointer to the resource.
1795bb39e81SThomas Hellstrom  *
1805bb39e81SThomas Hellstrom  * Allocate the lowest free resource from the resource manager, and set
1815bb39e81SThomas Hellstrom  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
1825bb39e81SThomas Hellstrom  */
vmw_resource_alloc_id(struct vmw_resource * res)183543831cfSThomas Hellstrom int vmw_resource_alloc_id(struct vmw_resource *res)
1845bb39e81SThomas Hellstrom {
185c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
1865bb39e81SThomas Hellstrom 	int ret;
187c0951b79SThomas Hellstrom 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
1885bb39e81SThomas Hellstrom 
1895bb39e81SThomas Hellstrom 	BUG_ON(res->id != -1);
1905bb39e81SThomas Hellstrom 
191cc39a8faSTejun Heo 	idr_preload(GFP_KERNEL);
19213289241SThomas Hellstrom 	spin_lock(&dev_priv->resource_lock);
193cc39a8faSTejun Heo 
194cc39a8faSTejun Heo 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
195cc39a8faSTejun Heo 	if (ret >= 0)
196cc39a8faSTejun Heo 		res->id = ret;
197cc39a8faSTejun Heo 
19813289241SThomas Hellstrom 	spin_unlock(&dev_priv->resource_lock);
199cc39a8faSTejun Heo 	idr_preload_end();
200cc39a8faSTejun Heo 	return ret < 0 ? ret : 0;
2015bb39e81SThomas Hellstrom }
2025bb39e81SThomas Hellstrom 
203c0951b79SThomas Hellstrom /**
204c0951b79SThomas Hellstrom  * vmw_resource_init - initialize a struct vmw_resource
205c0951b79SThomas Hellstrom  *
206c0951b79SThomas Hellstrom  * @dev_priv:       Pointer to a device private struct.
207c0951b79SThomas Hellstrom  * @res:            The struct vmw_resource to initialize.
208c0951b79SThomas Hellstrom  * @delay_id:       Boolean whether to defer device id allocation until
209c0951b79SThomas Hellstrom  *                  the first validation.
210c0951b79SThomas Hellstrom  * @res_free:       Resource destructor.
211c0951b79SThomas Hellstrom  * @func:           Resource function table.
212c0951b79SThomas Hellstrom  */
vmw_resource_init(struct vmw_private * dev_priv,struct vmw_resource * res,bool delay_id,void (* res_free)(struct vmw_resource * res),const struct vmw_res_func * func)213543831cfSThomas Hellstrom int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
2145bb39e81SThomas Hellstrom 		      bool delay_id,
2155bb39e81SThomas Hellstrom 		      void (*res_free) (struct vmw_resource *res),
216c0951b79SThomas Hellstrom 		      const struct vmw_res_func *func)
217fb1d9738SJakob Bornecrantz {
218fb1d9738SJakob Bornecrantz 	kref_init(&res->kref);
219fb1d9738SJakob Bornecrantz 	res->hw_destroy = NULL;
220fb1d9738SJakob Bornecrantz 	res->res_free = res_free;
221fb1d9738SJakob Bornecrantz 	res->dev_priv = dev_priv;
222c0951b79SThomas Hellstrom 	res->func = func;
22361335d7aSThomas Hellstrom 	RB_CLEAR_NODE(&res->mob_node);
224c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&res->lru_head);
225173fb7d4SThomas Hellstrom 	INIT_LIST_HEAD(&res->binding_head);
2265bb39e81SThomas Hellstrom 	res->id = -1;
227668b2066SZack Rusin 	res->guest_memory_bo = NULL;
228668b2066SZack Rusin 	res->guest_memory_offset = 0;
229668b2066SZack Rusin 	res->guest_memory_dirty = false;
230c0951b79SThomas Hellstrom 	res->res_dirty = false;
231b7468b15SThomas Hellstrom 	res->coherent = false;
232a0a63940SThomas Hellstrom 	res->used_prio = 3;
233b7468b15SThomas Hellstrom 	res->dirty = NULL;
2345bb39e81SThomas Hellstrom 	if (delay_id)
2355bb39e81SThomas Hellstrom 		return 0;
2365bb39e81SThomas Hellstrom 	else
237c0951b79SThomas Hellstrom 		return vmw_resource_alloc_id(res);
238fb1d9738SJakob Bornecrantz }
239fb1d9738SJakob Bornecrantz 
240fb1d9738SJakob Bornecrantz 
241fb1d9738SJakob Bornecrantz /**
242c0951b79SThomas Hellstrom  * vmw_user_resource_lookup_handle - lookup a struct resource from a
243c0951b79SThomas Hellstrom  * TTM user-space handle and perform basic type checks
244c0951b79SThomas Hellstrom  *
245c0951b79SThomas Hellstrom  * @dev_priv:     Pointer to a device private struct
246c0951b79SThomas Hellstrom  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
247c0951b79SThomas Hellstrom  * @handle:       The TTM user-space handle
248c0951b79SThomas Hellstrom  * @converter:    Pointer to an object describing the resource type
249c0951b79SThomas Hellstrom  * @p_res:        On successful return the location pointed to will contain
250c0951b79SThomas Hellstrom  *                a pointer to a refcounted struct vmw_resource.
251c0951b79SThomas Hellstrom  *
252c0951b79SThomas Hellstrom  * If the handle can't be found or is associated with an incorrect resource
253c0951b79SThomas Hellstrom  * type, -EINVAL will be returned.
254c0951b79SThomas Hellstrom  */
vmw_user_resource_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter,struct vmw_resource ** p_res)255c0951b79SThomas Hellstrom int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
256fb1d9738SJakob Bornecrantz 				    struct ttm_object_file *tfile,
257c0951b79SThomas Hellstrom 				    uint32_t handle,
258c0951b79SThomas Hellstrom 				    const struct vmw_user_resource_conv
259c0951b79SThomas Hellstrom 				    *converter,
260c0951b79SThomas Hellstrom 				    struct vmw_resource **p_res)
261fb1d9738SJakob Bornecrantz {
2627a73ba74SThomas Hellstrom 	struct ttm_base_object *base;
263c0951b79SThomas Hellstrom 	struct vmw_resource *res;
264c0951b79SThomas Hellstrom 	int ret = -EINVAL;
265fb1d9738SJakob Bornecrantz 
2667a73ba74SThomas Hellstrom 	base = ttm_base_object_lookup(tfile, handle);
267668b2066SZack Rusin 	if (unlikely(!base))
2687a73ba74SThomas Hellstrom 		return -EINVAL;
269fb1d9738SJakob Bornecrantz 
27079e5f810SThomas Hellstrom 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
271c0951b79SThomas Hellstrom 		goto out_bad_resource;
2727a73ba74SThomas Hellstrom 
273c0951b79SThomas Hellstrom 	res = converter->base_obj_to_res(base);
274c0951b79SThomas Hellstrom 	kref_get(&res->kref);
275c0951b79SThomas Hellstrom 
276c0951b79SThomas Hellstrom 	*p_res = res;
2777a73ba74SThomas Hellstrom 	ret = 0;
2787a73ba74SThomas Hellstrom 
279c0951b79SThomas Hellstrom out_bad_resource:
2807a73ba74SThomas Hellstrom 	ttm_base_object_unref(&base);
281c0951b79SThomas Hellstrom 
282c0951b79SThomas Hellstrom 	return ret;
283c0951b79SThomas Hellstrom }
284c0951b79SThomas Hellstrom 
285ba608091SLee Jones /*
286f1d34bfdSThomas Hellstrom  * Helper function that looks either a surface or bo.
287c0951b79SThomas Hellstrom  *
288c0951b79SThomas Hellstrom  * The pointer this pointed at by out_surf and out_buf needs to be null.
289c0951b79SThomas Hellstrom  */
vmw_user_lookup_handle(struct vmw_private * dev_priv,struct drm_file * filp,uint32_t handle,struct vmw_surface ** out_surf,struct vmw_bo ** out_buf)290c0951b79SThomas Hellstrom int vmw_user_lookup_handle(struct vmw_private *dev_priv,
2918afa13a0SZack Rusin 			   struct drm_file *filp,
292c0951b79SThomas Hellstrom 			   uint32_t handle,
293c0951b79SThomas Hellstrom 			   struct vmw_surface **out_surf,
29409881d29SZack Rusin 			   struct vmw_bo **out_buf)
295c0951b79SThomas Hellstrom {
2968afa13a0SZack Rusin 	struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
297c0951b79SThomas Hellstrom 	struct vmw_resource *res;
298c0951b79SThomas Hellstrom 	int ret;
299c0951b79SThomas Hellstrom 
300c0951b79SThomas Hellstrom 	BUG_ON(*out_surf || *out_buf);
301c0951b79SThomas Hellstrom 
302c0951b79SThomas Hellstrom 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
303c0951b79SThomas Hellstrom 					      user_surface_converter,
304c0951b79SThomas Hellstrom 					      &res);
305c0951b79SThomas Hellstrom 	if (!ret) {
306c0951b79SThomas Hellstrom 		*out_surf = vmw_res_to_srf(res);
307c0951b79SThomas Hellstrom 		return 0;
308c0951b79SThomas Hellstrom 	}
309c0951b79SThomas Hellstrom 
310c0951b79SThomas Hellstrom 	*out_surf = NULL;
3118afa13a0SZack Rusin 	ret = vmw_user_bo_lookup(filp, handle, out_buf);
312fb1d9738SJakob Bornecrantz 	return ret;
313fb1d9738SJakob Bornecrantz }
314fb1d9738SJakob Bornecrantz 
315fb1d9738SJakob Bornecrantz /**
316668b2066SZack Rusin  * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
317c0951b79SThomas Hellstrom  *
318668b2066SZack Rusin  * @res:            The resource for which to allocate a gbo buffer.
319c0951b79SThomas Hellstrom  * @interruptible:  Whether any sleeps during allocation should be
320c0951b79SThomas Hellstrom  *                  performed while interruptible.
321c0951b79SThomas Hellstrom  */
vmw_resource_buf_alloc(struct vmw_resource * res,bool interruptible)322c0951b79SThomas Hellstrom static int vmw_resource_buf_alloc(struct vmw_resource *res,
323c0951b79SThomas Hellstrom 				  bool interruptible)
324c0951b79SThomas Hellstrom {
325668b2066SZack Rusin 	unsigned long size = PFN_ALIGN(res->guest_memory_size);
326668b2066SZack Rusin 	struct vmw_bo *gbo;
327668b2066SZack Rusin 	struct vmw_bo_params bo_params = {
328668b2066SZack Rusin 		.domain = res->func->domain,
329668b2066SZack Rusin 		.busy_domain = res->func->busy_domain,
330668b2066SZack Rusin 		.bo_type = ttm_bo_type_device,
331668b2066SZack Rusin 		.size = res->guest_memory_size,
332668b2066SZack Rusin 		.pin = false
333668b2066SZack Rusin 	};
334c0951b79SThomas Hellstrom 	int ret;
335c0951b79SThomas Hellstrom 
336668b2066SZack Rusin 	if (likely(res->guest_memory_bo)) {
337668b2066SZack Rusin 		BUG_ON(res->guest_memory_bo->tbo.base.size < size);
338c0951b79SThomas Hellstrom 		return 0;
339c0951b79SThomas Hellstrom 	}
340c0951b79SThomas Hellstrom 
34191398b41SZack Rusin 	ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
342c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
343f1d34bfdSThomas Hellstrom 		goto out_no_bo;
344c0951b79SThomas Hellstrom 
345668b2066SZack Rusin 	res->guest_memory_bo = gbo;
346c0951b79SThomas Hellstrom 
347f1d34bfdSThomas Hellstrom out_no_bo:
348c0951b79SThomas Hellstrom 	return ret;
349c0951b79SThomas Hellstrom }
350c0951b79SThomas Hellstrom 
351c0951b79SThomas Hellstrom /**
352c0951b79SThomas Hellstrom  * vmw_resource_do_validate - Make a resource up-to-date and visible
353c0951b79SThomas Hellstrom  *                            to the device.
354c0951b79SThomas Hellstrom  *
355c0951b79SThomas Hellstrom  * @res:            The resource to make visible to the device.
356c0951b79SThomas Hellstrom  * @val_buf:        Information about a buffer possibly
357c0951b79SThomas Hellstrom  *                  containing backup data if a bind operation is needed.
358ba608091SLee Jones  * @dirtying:       Transfer dirty regions.
359c0951b79SThomas Hellstrom  *
360c0951b79SThomas Hellstrom  * On hardware resource shortage, this function returns -EBUSY and
361c0951b79SThomas Hellstrom  * should be retried once resources have been freed up.
362c0951b79SThomas Hellstrom  */
vmw_resource_do_validate(struct vmw_resource * res,struct ttm_validate_buffer * val_buf,bool dirtying)363c0951b79SThomas Hellstrom static int vmw_resource_do_validate(struct vmw_resource *res,
364fb80edb0SThomas Hellstrom 				    struct ttm_validate_buffer *val_buf,
365fb80edb0SThomas Hellstrom 				    bool dirtying)
366c0951b79SThomas Hellstrom {
367c0951b79SThomas Hellstrom 	int ret = 0;
368c0951b79SThomas Hellstrom 	const struct vmw_res_func *func = res->func;
369c0951b79SThomas Hellstrom 
370c0951b79SThomas Hellstrom 	if (unlikely(res->id == -1)) {
371c0951b79SThomas Hellstrom 		ret = func->create(res);
372c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
373c0951b79SThomas Hellstrom 			return ret;
374c0951b79SThomas Hellstrom 	}
375c0951b79SThomas Hellstrom 
376c0951b79SThomas Hellstrom 	if (func->bind &&
377668b2066SZack Rusin 	    ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
378668b2066SZack Rusin 	      val_buf->bo) ||
379668b2066SZack Rusin 	     (!func->needs_guest_memory && val_buf->bo))) {
380c0951b79SThomas Hellstrom 		ret = func->bind(res, val_buf);
381c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
382c0951b79SThomas Hellstrom 			goto out_bind_failed;
383668b2066SZack Rusin 		if (func->needs_guest_memory)
384a0a63940SThomas Hellstrom 			vmw_resource_mob_attach(res);
385c0951b79SThomas Hellstrom 	}
386c0951b79SThomas Hellstrom 
387b7468b15SThomas Hellstrom 	/*
388b7468b15SThomas Hellstrom 	 * Handle the case where the backup mob is marked coherent but
389b7468b15SThomas Hellstrom 	 * the resource isn't.
390b7468b15SThomas Hellstrom 	 */
391b7468b15SThomas Hellstrom 	if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
392b7468b15SThomas Hellstrom 	    !res->coherent) {
393668b2066SZack Rusin 		if (res->guest_memory_bo->dirty && !res->dirty) {
394b7468b15SThomas Hellstrom 			ret = func->dirty_alloc(res);
395b7468b15SThomas Hellstrom 			if (ret)
396b7468b15SThomas Hellstrom 				return ret;
397668b2066SZack Rusin 		} else if (!res->guest_memory_bo->dirty && res->dirty) {
398b7468b15SThomas Hellstrom 			func->dirty_free(res);
399b7468b15SThomas Hellstrom 		}
400b7468b15SThomas Hellstrom 	}
401b7468b15SThomas Hellstrom 
402b7468b15SThomas Hellstrom 	/*
403b7468b15SThomas Hellstrom 	 * Transfer the dirty regions to the resource and update
404b7468b15SThomas Hellstrom 	 * the resource.
405b7468b15SThomas Hellstrom 	 */
406b7468b15SThomas Hellstrom 	if (res->dirty) {
407fb80edb0SThomas Hellstrom 		if (dirtying && !res->res_dirty) {
408668b2066SZack Rusin 			pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
409fb80edb0SThomas Hellstrom 			pgoff_t end = __KERNEL_DIV_ROUND_UP
410668b2066SZack Rusin 				(res->guest_memory_offset + res->guest_memory_size,
411fb80edb0SThomas Hellstrom 				 PAGE_SIZE);
412fb80edb0SThomas Hellstrom 
413668b2066SZack Rusin 			vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
414fb80edb0SThomas Hellstrom 		}
415fb80edb0SThomas Hellstrom 
416b7468b15SThomas Hellstrom 		vmw_bo_dirty_transfer_to_res(res);
417b7468b15SThomas Hellstrom 		return func->dirty_sync(res);
418b7468b15SThomas Hellstrom 	}
419b7468b15SThomas Hellstrom 
420c0951b79SThomas Hellstrom 	return 0;
421c0951b79SThomas Hellstrom 
422c0951b79SThomas Hellstrom out_bind_failed:
423c0951b79SThomas Hellstrom 	func->destroy(res);
424c0951b79SThomas Hellstrom 
425c0951b79SThomas Hellstrom 	return ret;
426c0951b79SThomas Hellstrom }
427c0951b79SThomas Hellstrom 
428c0951b79SThomas Hellstrom /**
429c0951b79SThomas Hellstrom  * vmw_resource_unreserve - Unreserve a resource previously reserved for
430c0951b79SThomas Hellstrom  * command submission.
431c0951b79SThomas Hellstrom  *
432c0951b79SThomas Hellstrom  * @res:               Pointer to the struct vmw_resource to unreserve.
433a9f58c45SThomas Hellstrom  * @dirty_set:         Change dirty status of the resource.
434a9f58c45SThomas Hellstrom  * @dirty:             When changing dirty status indicates the new status.
435668b2066SZack Rusin  * @switch_guest_memory: Guest memory buffer has been switched.
436668b2066SZack Rusin  * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
437d80efd5cSThomas Hellstrom  *                     switched. May be NULL.
438668b2066SZack Rusin  * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
439c0951b79SThomas Hellstrom  *
440c0951b79SThomas Hellstrom  * Currently unreserving a resource means putting it back on the device's
441c0951b79SThomas Hellstrom  * resource lru list, so that it can be evicted if necessary.
442c0951b79SThomas Hellstrom  */
vmw_resource_unreserve(struct vmw_resource * res,bool dirty_set,bool dirty,bool switch_guest_memory,struct vmw_bo * new_guest_memory_bo,unsigned long new_guest_memory_offset)443c0951b79SThomas Hellstrom void vmw_resource_unreserve(struct vmw_resource *res,
444a9f58c45SThomas Hellstrom 			    bool dirty_set,
445a9f58c45SThomas Hellstrom 			    bool dirty,
446668b2066SZack Rusin 			    bool switch_guest_memory,
447668b2066SZack Rusin 			    struct vmw_bo *new_guest_memory_bo,
448668b2066SZack Rusin 			    unsigned long new_guest_memory_offset)
449c0951b79SThomas Hellstrom {
450c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
451c0951b79SThomas Hellstrom 
452c0951b79SThomas Hellstrom 	if (!list_empty(&res->lru_head))
453c0951b79SThomas Hellstrom 		return;
454c0951b79SThomas Hellstrom 
455668b2066SZack Rusin 	if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
456668b2066SZack Rusin 		if (res->guest_memory_bo) {
457a0a63940SThomas Hellstrom 			vmw_resource_mob_detach(res);
458b7468b15SThomas Hellstrom 			if (res->coherent)
459668b2066SZack Rusin 				vmw_bo_dirty_release(res->guest_memory_bo);
46091398b41SZack Rusin 			vmw_user_bo_unref(&res->guest_memory_bo);
461c0951b79SThomas Hellstrom 		}
462c0951b79SThomas Hellstrom 
463668b2066SZack Rusin 		if (new_guest_memory_bo) {
46491398b41SZack Rusin 			res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
465b7468b15SThomas Hellstrom 
466b7468b15SThomas Hellstrom 			/*
467b7468b15SThomas Hellstrom 			 * The validation code should already have added a
468b7468b15SThomas Hellstrom 			 * dirty tracker here.
469b7468b15SThomas Hellstrom 			 */
470668b2066SZack Rusin 			WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
471b7468b15SThomas Hellstrom 
472a0a63940SThomas Hellstrom 			vmw_resource_mob_attach(res);
473d80efd5cSThomas Hellstrom 		} else {
474668b2066SZack Rusin 			res->guest_memory_bo = NULL;
475c0951b79SThomas Hellstrom 		}
476668b2066SZack Rusin 	} else if (switch_guest_memory && res->coherent) {
477668b2066SZack Rusin 		vmw_bo_dirty_release(res->guest_memory_bo);
478d80efd5cSThomas Hellstrom 	}
479b7468b15SThomas Hellstrom 
480668b2066SZack Rusin 	if (switch_guest_memory)
481668b2066SZack Rusin 		res->guest_memory_offset = new_guest_memory_offset;
482c0951b79SThomas Hellstrom 
483a9f58c45SThomas Hellstrom 	if (dirty_set)
484a9f58c45SThomas Hellstrom 		res->res_dirty = dirty;
485a9f58c45SThomas Hellstrom 
486ed93394cSThomas Hellstrom 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
487c0951b79SThomas Hellstrom 		return;
488c0951b79SThomas Hellstrom 
48913289241SThomas Hellstrom 	spin_lock(&dev_priv->resource_lock);
490c0951b79SThomas Hellstrom 	list_add_tail(&res->lru_head,
491c0951b79SThomas Hellstrom 		      &res->dev_priv->res_lru[res->func->res_type]);
49213289241SThomas Hellstrom 	spin_unlock(&dev_priv->resource_lock);
493c0951b79SThomas Hellstrom }
494c0951b79SThomas Hellstrom 
495c0951b79SThomas Hellstrom /**
496c0951b79SThomas Hellstrom  * vmw_resource_check_buffer - Check whether a backup buffer is needed
497c0951b79SThomas Hellstrom  *                             for a resource and in that case, allocate
498c0951b79SThomas Hellstrom  *                             one, reserve and validate it.
499c0951b79SThomas Hellstrom  *
50005436815STom Rix  * @ticket:         The ww acquire context to use, or NULL if trylocking.
501c0951b79SThomas Hellstrom  * @res:            The resource for which to allocate a backup buffer.
502c0951b79SThomas Hellstrom  * @interruptible:  Whether any sleeps during allocation should be
503c0951b79SThomas Hellstrom  *                  performed while interruptible.
504c0951b79SThomas Hellstrom  * @val_buf:        On successful return contains data about the
505c0951b79SThomas Hellstrom  *                  reserved and validated backup buffer.
506c0951b79SThomas Hellstrom  */
507ecff665fSMaarten Lankhorst static int
vmw_resource_check_buffer(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible,struct ttm_validate_buffer * val_buf)50819f976abSThomas Hellstrom vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
50919f976abSThomas Hellstrom 			  struct vmw_resource *res,
510c0951b79SThomas Hellstrom 			  bool interruptible,
511c0951b79SThomas Hellstrom 			  struct ttm_validate_buffer *val_buf)
512c0951b79SThomas Hellstrom {
51319be5570SChristian König 	struct ttm_operation_ctx ctx = { true, false };
514c0951b79SThomas Hellstrom 	struct list_head val_list;
515668b2066SZack Rusin 	bool guest_memory_dirty = false;
516c0951b79SThomas Hellstrom 	int ret;
517c0951b79SThomas Hellstrom 
518668b2066SZack Rusin 	if (unlikely(!res->guest_memory_bo)) {
519c0951b79SThomas Hellstrom 		ret = vmw_resource_buf_alloc(res, interruptible);
520c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
521c0951b79SThomas Hellstrom 			return ret;
522c0951b79SThomas Hellstrom 	}
523c0951b79SThomas Hellstrom 
524c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&val_list);
525668b2066SZack Rusin 	ttm_bo_get(&res->guest_memory_bo->tbo);
526668b2066SZack Rusin 	val_buf->bo = &res->guest_memory_bo->tbo;
527a9f34c70SChristian König 	val_buf->num_shared = 0;
528c0951b79SThomas Hellstrom 	list_add_tail(&val_buf->head, &val_list);
5299165fb87SChristian König 	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
530c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
531c0951b79SThomas Hellstrom 		goto out_no_reserve;
532c0951b79SThomas Hellstrom 
533668b2066SZack Rusin 	if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
534c0951b79SThomas Hellstrom 		return 0;
535c0951b79SThomas Hellstrom 
536668b2066SZack Rusin 	guest_memory_dirty = res->guest_memory_dirty;
537668b2066SZack Rusin 	vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
53839985eeaSZack Rusin 			     res->func->busy_domain);
539668b2066SZack Rusin 	ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
540668b2066SZack Rusin 			      &res->guest_memory_bo->placement,
54119be5570SChristian König 			      &ctx);
542c0951b79SThomas Hellstrom 
543c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
544c0951b79SThomas Hellstrom 		goto out_no_validate;
545c0951b79SThomas Hellstrom 
546c0951b79SThomas Hellstrom 	return 0;
547c0951b79SThomas Hellstrom 
548c0951b79SThomas Hellstrom out_no_validate:
54919f976abSThomas Hellstrom 	ttm_eu_backoff_reservation(ticket, &val_list);
550c0951b79SThomas Hellstrom out_no_reserve:
5516034d9d4SThomas Zimmermann 	ttm_bo_put(val_buf->bo);
5526034d9d4SThomas Zimmermann 	val_buf->bo = NULL;
553668b2066SZack Rusin 	if (guest_memory_dirty)
55491398b41SZack Rusin 		vmw_user_bo_unref(&res->guest_memory_bo);
555c0951b79SThomas Hellstrom 
556c0951b79SThomas Hellstrom 	return ret;
557c0951b79SThomas Hellstrom }
558c0951b79SThomas Hellstrom 
559ba608091SLee Jones /*
560c0951b79SThomas Hellstrom  * vmw_resource_reserve - Reserve a resource for command submission
561c0951b79SThomas Hellstrom  *
562c0951b79SThomas Hellstrom  * @res:            The resource to reserve.
563c0951b79SThomas Hellstrom  *
564c0951b79SThomas Hellstrom  * This function takes the resource off the LRU list and make sure
565668b2066SZack Rusin  * a guest memory buffer is present for guest-backed resources.
566668b2066SZack Rusin  * However, the buffer may not be bound to the resource at this
567668b2066SZack Rusin  * point.
568c0951b79SThomas Hellstrom  *
569c0951b79SThomas Hellstrom  */
vmw_resource_reserve(struct vmw_resource * res,bool interruptible,bool no_guest_memory)5701a4b172aSThomas Hellstrom int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
571668b2066SZack Rusin 			 bool no_guest_memory)
572c0951b79SThomas Hellstrom {
573c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
574c0951b79SThomas Hellstrom 	int ret;
575c0951b79SThomas Hellstrom 
57613289241SThomas Hellstrom 	spin_lock(&dev_priv->resource_lock);
577c0951b79SThomas Hellstrom 	list_del_init(&res->lru_head);
57813289241SThomas Hellstrom 	spin_unlock(&dev_priv->resource_lock);
579c0951b79SThomas Hellstrom 
580668b2066SZack Rusin 	if (res->func->needs_guest_memory && !res->guest_memory_bo &&
581668b2066SZack Rusin 	    !no_guest_memory) {
5821a4b172aSThomas Hellstrom 		ret = vmw_resource_buf_alloc(res, interruptible);
583d80efd5cSThomas Hellstrom 		if (unlikely(ret != 0)) {
584668b2066SZack Rusin 			DRM_ERROR("Failed to allocate a guest memory buffer "
585d80efd5cSThomas Hellstrom 				  "of size %lu. bytes\n",
586668b2066SZack Rusin 				  (unsigned long) res->guest_memory_size);
587c0951b79SThomas Hellstrom 			return ret;
588c0951b79SThomas Hellstrom 		}
589d80efd5cSThomas Hellstrom 	}
590c0951b79SThomas Hellstrom 
591c0951b79SThomas Hellstrom 	return 0;
592c0951b79SThomas Hellstrom }
593c0951b79SThomas Hellstrom 
594c0951b79SThomas Hellstrom /**
595c0951b79SThomas Hellstrom  * vmw_resource_backoff_reservation - Unreserve and unreference a
596668b2066SZack Rusin  *                                    guest memory buffer
597c0951b79SThomas Hellstrom  *.
59819f976abSThomas Hellstrom  * @ticket:         The ww acquire ctx used for reservation.
599668b2066SZack Rusin  * @val_buf:        Guest memory buffer information.
600c0951b79SThomas Hellstrom  */
601ecff665fSMaarten Lankhorst static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx * ticket,struct ttm_validate_buffer * val_buf)60219f976abSThomas Hellstrom vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
60319f976abSThomas Hellstrom 				 struct ttm_validate_buffer *val_buf)
604c0951b79SThomas Hellstrom {
605c0951b79SThomas Hellstrom 	struct list_head val_list;
606c0951b79SThomas Hellstrom 
607c0951b79SThomas Hellstrom 	if (likely(val_buf->bo == NULL))
608c0951b79SThomas Hellstrom 		return;
609c0951b79SThomas Hellstrom 
610c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&val_list);
611c0951b79SThomas Hellstrom 	list_add_tail(&val_buf->head, &val_list);
61219f976abSThomas Hellstrom 	ttm_eu_backoff_reservation(ticket, &val_list);
6136034d9d4SThomas Zimmermann 	ttm_bo_put(val_buf->bo);
6146034d9d4SThomas Zimmermann 	val_buf->bo = NULL;
615c0951b79SThomas Hellstrom }
616c0951b79SThomas Hellstrom 
617c0951b79SThomas Hellstrom /**
618c0951b79SThomas Hellstrom  * vmw_resource_do_evict - Evict a resource, and transfer its data
619c0951b79SThomas Hellstrom  *                         to a backup buffer.
620c0951b79SThomas Hellstrom  *
62119f976abSThomas Hellstrom  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
622c0951b79SThomas Hellstrom  * @res:            The resource to evict.
623ea029c28SThomas Hellstrom  * @interruptible:  Whether to wait interruptible.
624c0951b79SThomas Hellstrom  */
vmw_resource_do_evict(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible)62519f976abSThomas Hellstrom static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
62619f976abSThomas Hellstrom 				 struct vmw_resource *res, bool interruptible)
627c0951b79SThomas Hellstrom {
628c0951b79SThomas Hellstrom 	struct ttm_validate_buffer val_buf;
629c0951b79SThomas Hellstrom 	const struct vmw_res_func *func = res->func;
630c0951b79SThomas Hellstrom 	int ret;
631c0951b79SThomas Hellstrom 
632c0951b79SThomas Hellstrom 	BUG_ON(!func->may_evict);
633c0951b79SThomas Hellstrom 
634c0951b79SThomas Hellstrom 	val_buf.bo = NULL;
635a9f34c70SChristian König 	val_buf.num_shared = 0;
63619f976abSThomas Hellstrom 	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
637c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
638c0951b79SThomas Hellstrom 		return ret;
639c0951b79SThomas Hellstrom 
640c0951b79SThomas Hellstrom 	if (unlikely(func->unbind != NULL &&
641668b2066SZack Rusin 		     (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
642c0951b79SThomas Hellstrom 		ret = func->unbind(res, res->res_dirty, &val_buf);
643c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
644c0951b79SThomas Hellstrom 			goto out_no_unbind;
645a0a63940SThomas Hellstrom 		vmw_resource_mob_detach(res);
646c0951b79SThomas Hellstrom 	}
647c0951b79SThomas Hellstrom 	ret = func->destroy(res);
648668b2066SZack Rusin 	res->guest_memory_dirty = true;
649c0951b79SThomas Hellstrom 	res->res_dirty = false;
650c0951b79SThomas Hellstrom out_no_unbind:
65119f976abSThomas Hellstrom 	vmw_resource_backoff_reservation(ticket, &val_buf);
652c0951b79SThomas Hellstrom 
653c0951b79SThomas Hellstrom 	return ret;
654c0951b79SThomas Hellstrom }
655c0951b79SThomas Hellstrom 
656c0951b79SThomas Hellstrom 
657c0951b79SThomas Hellstrom /**
658c0951b79SThomas Hellstrom  * vmw_resource_validate - Make a resource up-to-date and visible
659c0951b79SThomas Hellstrom  *                         to the device.
660c0951b79SThomas Hellstrom  * @res: The resource to make visible to the device.
66184e1bf06SThomas Hellstrom  * @intr: Perform waits interruptible if possible.
662fb80edb0SThomas Hellstrom  * @dirtying: Pending GPU operation will dirty the resource
663c0951b79SThomas Hellstrom  *
66405436815STom Rix  * On successful return, any backup DMA buffer pointed to by @res->backup will
665c0951b79SThomas Hellstrom  * be reserved and validated.
666c0951b79SThomas Hellstrom  * On hardware resource shortage, this function will repeatedly evict
667c0951b79SThomas Hellstrom  * resources of the same type until the validation succeeds.
66884e1bf06SThomas Hellstrom  *
66984e1bf06SThomas Hellstrom  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
67084e1bf06SThomas Hellstrom  * on failure.
671c0951b79SThomas Hellstrom  */
vmw_resource_validate(struct vmw_resource * res,bool intr,bool dirtying)672fb80edb0SThomas Hellstrom int vmw_resource_validate(struct vmw_resource *res, bool intr,
673fb80edb0SThomas Hellstrom 			  bool dirtying)
674c0951b79SThomas Hellstrom {
675c0951b79SThomas Hellstrom 	int ret;
676c0951b79SThomas Hellstrom 	struct vmw_resource *evict_res;
677c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
678c0951b79SThomas Hellstrom 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
679c0951b79SThomas Hellstrom 	struct ttm_validate_buffer val_buf;
680ea029c28SThomas Hellstrom 	unsigned err_count = 0;
681c0951b79SThomas Hellstrom 
682d80efd5cSThomas Hellstrom 	if (!res->func->create)
683c0951b79SThomas Hellstrom 		return 0;
684c0951b79SThomas Hellstrom 
685c0951b79SThomas Hellstrom 	val_buf.bo = NULL;
686a9f34c70SChristian König 	val_buf.num_shared = 0;
687668b2066SZack Rusin 	if (res->guest_memory_bo)
688668b2066SZack Rusin 		val_buf.bo = &res->guest_memory_bo->tbo;
689c0951b79SThomas Hellstrom 	do {
690fb80edb0SThomas Hellstrom 		ret = vmw_resource_do_validate(res, &val_buf, dirtying);
691c0951b79SThomas Hellstrom 		if (likely(ret != -EBUSY))
692c0951b79SThomas Hellstrom 			break;
693c0951b79SThomas Hellstrom 
69413289241SThomas Hellstrom 		spin_lock(&dev_priv->resource_lock);
695c0951b79SThomas Hellstrom 		if (list_empty(lru_list) || !res->func->may_evict) {
696ea029c28SThomas Hellstrom 			DRM_ERROR("Out of device device resources "
697c0951b79SThomas Hellstrom 				  "for %s.\n", res->func->type_name);
698c0951b79SThomas Hellstrom 			ret = -EBUSY;
69913289241SThomas Hellstrom 			spin_unlock(&dev_priv->resource_lock);
700c0951b79SThomas Hellstrom 			break;
701c0951b79SThomas Hellstrom 		}
702c0951b79SThomas Hellstrom 
703c0951b79SThomas Hellstrom 		evict_res = vmw_resource_reference
704c0951b79SThomas Hellstrom 			(list_first_entry(lru_list, struct vmw_resource,
705c0951b79SThomas Hellstrom 					  lru_head));
706c0951b79SThomas Hellstrom 		list_del_init(&evict_res->lru_head);
707c0951b79SThomas Hellstrom 
70813289241SThomas Hellstrom 		spin_unlock(&dev_priv->resource_lock);
709ea029c28SThomas Hellstrom 
71019f976abSThomas Hellstrom 		/* Trylock backup buffers with a NULL ticket. */
71184e1bf06SThomas Hellstrom 		ret = vmw_resource_do_evict(NULL, evict_res, intr);
712ea029c28SThomas Hellstrom 		if (unlikely(ret != 0)) {
71313289241SThomas Hellstrom 			spin_lock(&dev_priv->resource_lock);
714ea029c28SThomas Hellstrom 			list_add_tail(&evict_res->lru_head, lru_list);
71513289241SThomas Hellstrom 			spin_unlock(&dev_priv->resource_lock);
716ea029c28SThomas Hellstrom 			if (ret == -ERESTARTSYS ||
717ea029c28SThomas Hellstrom 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
718ea029c28SThomas Hellstrom 				vmw_resource_unreference(&evict_res);
719ea029c28SThomas Hellstrom 				goto out_no_validate;
720ea029c28SThomas Hellstrom 			}
721ea029c28SThomas Hellstrom 		}
722ea029c28SThomas Hellstrom 
723c0951b79SThomas Hellstrom 		vmw_resource_unreference(&evict_res);
724c0951b79SThomas Hellstrom 	} while (1);
725c0951b79SThomas Hellstrom 
726c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
727c0951b79SThomas Hellstrom 		goto out_no_validate;
728668b2066SZack Rusin 	else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
729a0a63940SThomas Hellstrom 		WARN_ON_ONCE(vmw_resource_mob_attached(res));
73091398b41SZack Rusin 		vmw_user_bo_unref(&res->guest_memory_bo);
731c0951b79SThomas Hellstrom 	}
732c0951b79SThomas Hellstrom 
733c0951b79SThomas Hellstrom 	return 0;
734c0951b79SThomas Hellstrom 
735c0951b79SThomas Hellstrom out_no_validate:
736c0951b79SThomas Hellstrom 	return ret;
737c0951b79SThomas Hellstrom }
738c0951b79SThomas Hellstrom 
739c0951b79SThomas Hellstrom 
740c0951b79SThomas Hellstrom /**
741e9431ea5SThomas Hellstrom  * vmw_resource_unbind_list
742c0951b79SThomas Hellstrom  *
743e9431ea5SThomas Hellstrom  * @vbo: Pointer to the current backing MOB.
744c0951b79SThomas Hellstrom  *
745f468911fSThomas Hellstrom  * Evicts the Guest Backed hardware resource if the backup
746f468911fSThomas Hellstrom  * buffer is being moved out of MOB memory.
747e9431ea5SThomas Hellstrom  * Note that this function will not race with the resource
748e9431ea5SThomas Hellstrom  * validation code, since resource validation and eviction
749e9431ea5SThomas Hellstrom  * both require the backup buffer to be reserved.
750c0951b79SThomas Hellstrom  */
vmw_resource_unbind_list(struct vmw_bo * vbo)75109881d29SZack Rusin void vmw_resource_unbind_list(struct vmw_bo *vbo)
752c0951b79SThomas Hellstrom {
753e9431ea5SThomas Hellstrom 	struct ttm_validate_buffer val_buf = {
754668b2066SZack Rusin 		.bo = &vbo->tbo,
755a9f34c70SChristian König 		.num_shared = 0
756e9431ea5SThomas Hellstrom 	};
757f468911fSThomas Hellstrom 
758668b2066SZack Rusin 	dma_resv_assert_held(vbo->tbo.base.resv);
75961335d7aSThomas Hellstrom 	while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
76061335d7aSThomas Hellstrom 		struct rb_node *node = vbo->res_tree.rb_node;
76161335d7aSThomas Hellstrom 		struct vmw_resource *res =
76261335d7aSThomas Hellstrom 			container_of(node, struct vmw_resource, mob_node);
763f468911fSThomas Hellstrom 
76461335d7aSThomas Hellstrom 		if (!WARN_ON_ONCE(!res->func->unbind))
765a9f58c45SThomas Hellstrom 			(void) res->func->unbind(res, res->res_dirty, &val_buf);
76661335d7aSThomas Hellstrom 
767668b2066SZack Rusin 		res->guest_memory_size = true;
768f468911fSThomas Hellstrom 		res->res_dirty = false;
769a0a63940SThomas Hellstrom 		vmw_resource_mob_detach(res);
770f468911fSThomas Hellstrom 	}
771f468911fSThomas Hellstrom 
772668b2066SZack Rusin 	(void) ttm_bo_wait(&vbo->tbo, false, false);
773bf833fd3SThomas Hellstrom }
774bf833fd3SThomas Hellstrom 
775fd11a3c0SSinclair Yeh 
776fd11a3c0SSinclair Yeh /**
777fd11a3c0SSinclair Yeh  * vmw_query_readback_all - Read back cached query states
778fd11a3c0SSinclair Yeh  *
779fd11a3c0SSinclair Yeh  * @dx_query_mob: Buffer containing the DX query MOB
780fd11a3c0SSinclair Yeh  *
781fd11a3c0SSinclair Yeh  * Read back cached states from the device if they exist.  This function
78205436815STom Rix  * assumes binding_mutex is held.
783fd11a3c0SSinclair Yeh  */
vmw_query_readback_all(struct vmw_bo * dx_query_mob)78409881d29SZack Rusin int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
785fd11a3c0SSinclair Yeh {
786fd11a3c0SSinclair Yeh 	struct vmw_resource *dx_query_ctx;
787fd11a3c0SSinclair Yeh 	struct vmw_private *dev_priv;
788fd11a3c0SSinclair Yeh 	struct {
789fd11a3c0SSinclair Yeh 		SVGA3dCmdHeader header;
790fd11a3c0SSinclair Yeh 		SVGA3dCmdDXReadbackAllQuery body;
791fd11a3c0SSinclair Yeh 	} *cmd;
792fd11a3c0SSinclair Yeh 
793fd11a3c0SSinclair Yeh 
794fd11a3c0SSinclair Yeh 	/* No query bound, so do nothing */
795fd11a3c0SSinclair Yeh 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
796fd11a3c0SSinclair Yeh 		return 0;
797fd11a3c0SSinclair Yeh 
798fd11a3c0SSinclair Yeh 	dx_query_ctx = dx_query_mob->dx_query_ctx;
799fd11a3c0SSinclair Yeh 	dev_priv     = dx_query_ctx->dev_priv;
800fd11a3c0SSinclair Yeh 
8018426ed9cSZack Rusin 	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
80211c45419SDeepak Rawat 	if (unlikely(cmd == NULL))
803fd11a3c0SSinclair Yeh 		return -ENOMEM;
804fd11a3c0SSinclair Yeh 
805fd11a3c0SSinclair Yeh 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
806fd11a3c0SSinclair Yeh 	cmd->header.size = sizeof(cmd->body);
807fd11a3c0SSinclair Yeh 	cmd->body.cid    = dx_query_ctx->id;
808fd11a3c0SSinclair Yeh 
8098426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
810fd11a3c0SSinclair Yeh 
811fd11a3c0SSinclair Yeh 	/* Triggers a rebind the next time affected context is bound */
812fd11a3c0SSinclair Yeh 	dx_query_mob->dx_query_ctx = NULL;
813fd11a3c0SSinclair Yeh 
814fd11a3c0SSinclair Yeh 	return 0;
815fd11a3c0SSinclair Yeh }
816fd11a3c0SSinclair Yeh 
817fd11a3c0SSinclair Yeh 
818fd11a3c0SSinclair Yeh 
819fd11a3c0SSinclair Yeh /**
820fd11a3c0SSinclair Yeh  * vmw_query_move_notify - Read back cached query states
821fd11a3c0SSinclair Yeh  *
822fd11a3c0SSinclair Yeh  * @bo: The TTM buffer object about to move.
8236cf9dc23SChristian König  * @old_mem: The memory region @bo is moving from.
8246cf9dc23SChristian König  * @new_mem: The memory region @bo is moving to.
825fd11a3c0SSinclair Yeh  *
826fd11a3c0SSinclair Yeh  * Called before the query MOB is swapped out to read back cached query
827fd11a3c0SSinclair Yeh  * states from the device.
828fd11a3c0SSinclair Yeh  */
vmw_query_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * old_mem,struct ttm_resource * new_mem)829fd11a3c0SSinclair Yeh void vmw_query_move_notify(struct ttm_buffer_object *bo,
8306cf9dc23SChristian König 			   struct ttm_resource *old_mem,
8316cf9dc23SChristian König 			   struct ttm_resource *new_mem)
832fd11a3c0SSinclair Yeh {
83309881d29SZack Rusin 	struct vmw_bo *dx_query_mob;
8348af8a109SChristian König 	struct ttm_device *bdev = bo->bdev;
835668b2066SZack Rusin 	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
836fd11a3c0SSinclair Yeh 
837fd11a3c0SSinclair Yeh 	mutex_lock(&dev_priv->binding_mutex);
838fd11a3c0SSinclair Yeh 
83910a26e0dSZack Rusin 	/* If BO is being moved from MOB to system memory */
840a44df74cSZack Rusin 	if (old_mem &&
841a44df74cSZack Rusin 	    new_mem->mem_type == TTM_PL_SYSTEM &&
84210a26e0dSZack Rusin 	    old_mem->mem_type == VMW_PL_MOB) {
84310a26e0dSZack Rusin 		struct vmw_fence_obj *fence;
84410a26e0dSZack Rusin 
845668b2066SZack Rusin 		dx_query_mob = to_vmw_bo(&bo->base);
846279a3010SDave Airlie 		if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
847fd11a3c0SSinclair Yeh 			mutex_unlock(&dev_priv->binding_mutex);
848fd11a3c0SSinclair Yeh 			return;
849fd11a3c0SSinclair Yeh 		}
850fd11a3c0SSinclair Yeh 
851fd11a3c0SSinclair Yeh 		(void) vmw_query_readback_all(dx_query_mob);
852fd11a3c0SSinclair Yeh 		mutex_unlock(&dev_priv->binding_mutex);
853fd11a3c0SSinclair Yeh 
854fd11a3c0SSinclair Yeh 		/* Create a fence and attach the BO to it */
855fd11a3c0SSinclair Yeh 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
856e9431ea5SThomas Hellstrom 		vmw_bo_fence_single(bo, fence);
857fd11a3c0SSinclair Yeh 
858fd11a3c0SSinclair Yeh 		if (fence != NULL)
859fd11a3c0SSinclair Yeh 			vmw_fence_obj_unreference(&fence);
860fd11a3c0SSinclair Yeh 
8618aa6d4fcSChristian König 		(void) ttm_bo_wait(bo, false, false);
862fd11a3c0SSinclair Yeh 	} else
863fd11a3c0SSinclair Yeh 		mutex_unlock(&dev_priv->binding_mutex);
864fd11a3c0SSinclair Yeh }
865fd11a3c0SSinclair Yeh 
866c0951b79SThomas Hellstrom /**
867c0951b79SThomas Hellstrom  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
868c0951b79SThomas Hellstrom  *
869c0951b79SThomas Hellstrom  * @res:            The resource being queried.
870c0951b79SThomas Hellstrom  */
vmw_resource_needs_backup(const struct vmw_resource * res)871c0951b79SThomas Hellstrom bool vmw_resource_needs_backup(const struct vmw_resource *res)
872c0951b79SThomas Hellstrom {
873668b2066SZack Rusin 	return res->func->needs_guest_memory;
874c0951b79SThomas Hellstrom }
875c0951b79SThomas Hellstrom 
876c0951b79SThomas Hellstrom /**
877c0951b79SThomas Hellstrom  * vmw_resource_evict_type - Evict all resources of a specific type
878c0951b79SThomas Hellstrom  *
879c0951b79SThomas Hellstrom  * @dev_priv:       Pointer to a device private struct
880c0951b79SThomas Hellstrom  * @type:           The resource type to evict
881c0951b79SThomas Hellstrom  *
882c0951b79SThomas Hellstrom  * To avoid thrashing starvation or as part of the hibernation sequence,
883ea029c28SThomas Hellstrom  * try to evict all evictable resources of a specific type.
884c0951b79SThomas Hellstrom  */
vmw_resource_evict_type(struct vmw_private * dev_priv,enum vmw_res_type type)885c0951b79SThomas Hellstrom static void vmw_resource_evict_type(struct vmw_private *dev_priv,
886c0951b79SThomas Hellstrom 				    enum vmw_res_type type)
887c0951b79SThomas Hellstrom {
888c0951b79SThomas Hellstrom 	struct list_head *lru_list = &dev_priv->res_lru[type];
889c0951b79SThomas Hellstrom 	struct vmw_resource *evict_res;
890ea029c28SThomas Hellstrom 	unsigned err_count = 0;
891ea029c28SThomas Hellstrom 	int ret;
89219f976abSThomas Hellstrom 	struct ww_acquire_ctx ticket;
893c0951b79SThomas Hellstrom 
894c0951b79SThomas Hellstrom 	do {
89513289241SThomas Hellstrom 		spin_lock(&dev_priv->resource_lock);
896c0951b79SThomas Hellstrom 
897c0951b79SThomas Hellstrom 		if (list_empty(lru_list))
898c0951b79SThomas Hellstrom 			goto out_unlock;
899c0951b79SThomas Hellstrom 
900c0951b79SThomas Hellstrom 		evict_res = vmw_resource_reference(
901c0951b79SThomas Hellstrom 			list_first_entry(lru_list, struct vmw_resource,
902c0951b79SThomas Hellstrom 					 lru_head));
903c0951b79SThomas Hellstrom 		list_del_init(&evict_res->lru_head);
90413289241SThomas Hellstrom 		spin_unlock(&dev_priv->resource_lock);
905ea029c28SThomas Hellstrom 
90619f976abSThomas Hellstrom 		/* Wait lock backup buffers with a ticket. */
90719f976abSThomas Hellstrom 		ret = vmw_resource_do_evict(&ticket, evict_res, false);
908ea029c28SThomas Hellstrom 		if (unlikely(ret != 0)) {
90913289241SThomas Hellstrom 			spin_lock(&dev_priv->resource_lock);
910ea029c28SThomas Hellstrom 			list_add_tail(&evict_res->lru_head, lru_list);
91113289241SThomas Hellstrom 			spin_unlock(&dev_priv->resource_lock);
912ea029c28SThomas Hellstrom 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
913ea029c28SThomas Hellstrom 				vmw_resource_unreference(&evict_res);
914ea029c28SThomas Hellstrom 				return;
915ea029c28SThomas Hellstrom 			}
916ea029c28SThomas Hellstrom 		}
917ea029c28SThomas Hellstrom 
918c0951b79SThomas Hellstrom 		vmw_resource_unreference(&evict_res);
919c0951b79SThomas Hellstrom 	} while (1);
920c0951b79SThomas Hellstrom 
921c0951b79SThomas Hellstrom out_unlock:
92213289241SThomas Hellstrom 	spin_unlock(&dev_priv->resource_lock);
923c0951b79SThomas Hellstrom }
924c0951b79SThomas Hellstrom 
925c0951b79SThomas Hellstrom /**
926c0951b79SThomas Hellstrom  * vmw_resource_evict_all - Evict all evictable resources
927c0951b79SThomas Hellstrom  *
928c0951b79SThomas Hellstrom  * @dev_priv:       Pointer to a device private struct
929c0951b79SThomas Hellstrom  *
930c0951b79SThomas Hellstrom  * To avoid thrashing starvation or as part of the hibernation sequence,
931c0951b79SThomas Hellstrom  * evict all evictable resources. In particular this means that all
932c0951b79SThomas Hellstrom  * guest-backed resources that are registered with the device are
933c0951b79SThomas Hellstrom  * evicted and the OTable becomes clean.
934c0951b79SThomas Hellstrom  */
vmw_resource_evict_all(struct vmw_private * dev_priv)935c0951b79SThomas Hellstrom void vmw_resource_evict_all(struct vmw_private *dev_priv)
936c0951b79SThomas Hellstrom {
937c0951b79SThomas Hellstrom 	enum vmw_res_type type;
938c0951b79SThomas Hellstrom 
939c0951b79SThomas Hellstrom 	mutex_lock(&dev_priv->cmdbuf_mutex);
940c0951b79SThomas Hellstrom 
941c0951b79SThomas Hellstrom 	for (type = 0; type < vmw_res_max; ++type)
942c0951b79SThomas Hellstrom 		vmw_resource_evict_type(dev_priv, type);
943c0951b79SThomas Hellstrom 
944c0951b79SThomas Hellstrom 	mutex_unlock(&dev_priv->cmdbuf_mutex);
945c0951b79SThomas Hellstrom }
946ed93394cSThomas Hellstrom 
947ba608091SLee Jones /*
948ed93394cSThomas Hellstrom  * vmw_resource_pin - Add a pin reference on a resource
949ed93394cSThomas Hellstrom  *
950ed93394cSThomas Hellstrom  * @res: The resource to add a pin reference on
951ed93394cSThomas Hellstrom  *
952ed93394cSThomas Hellstrom  * This function adds a pin reference, and if needed validates the resource.
953ed93394cSThomas Hellstrom  * Having a pin reference means that the resource can never be evicted, and
954ed93394cSThomas Hellstrom  * its id will never change as long as there is a pin reference.
955ed93394cSThomas Hellstrom  * This function returns 0 on success and a negative error code on failure.
956ed93394cSThomas Hellstrom  */
vmw_resource_pin(struct vmw_resource * res,bool interruptible)9571a4b172aSThomas Hellstrom int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
958ed93394cSThomas Hellstrom {
95919be5570SChristian König 	struct ttm_operation_ctx ctx = { interruptible, false };
960ed93394cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
961ed93394cSThomas Hellstrom 	int ret;
962ed93394cSThomas Hellstrom 
963ed93394cSThomas Hellstrom 	mutex_lock(&dev_priv->cmdbuf_mutex);
9641a4b172aSThomas Hellstrom 	ret = vmw_resource_reserve(res, interruptible, false);
965ed93394cSThomas Hellstrom 	if (ret)
966ed93394cSThomas Hellstrom 		goto out_no_reserve;
967ed93394cSThomas Hellstrom 
968ed93394cSThomas Hellstrom 	if (res->pin_count == 0) {
96909881d29SZack Rusin 		struct vmw_bo *vbo = NULL;
970ed93394cSThomas Hellstrom 
971668b2066SZack Rusin 		if (res->guest_memory_bo) {
972668b2066SZack Rusin 			vbo = res->guest_memory_bo;
973ed93394cSThomas Hellstrom 
974668b2066SZack Rusin 			ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
97574231041SZack Rusin 			if (ret)
97674231041SZack Rusin 				goto out_no_validate;
977668b2066SZack Rusin 			if (!vbo->tbo.pin_count) {
97839985eeaSZack Rusin 				vmw_bo_placement_set(vbo,
97939985eeaSZack Rusin 						     res->func->domain,
98039985eeaSZack Rusin 						     res->func->busy_domain);
981459d0fa7SThomas Hellstrom 				ret = ttm_bo_validate
982668b2066SZack Rusin 					(&vbo->tbo,
98339985eeaSZack Rusin 					 &vbo->placement,
98419be5570SChristian König 					 &ctx);
985ed93394cSThomas Hellstrom 				if (ret) {
986668b2066SZack Rusin 					ttm_bo_unreserve(&vbo->tbo);
987ed93394cSThomas Hellstrom 					goto out_no_validate;
988ed93394cSThomas Hellstrom 				}
989459d0fa7SThomas Hellstrom 			}
990ed93394cSThomas Hellstrom 
991ed93394cSThomas Hellstrom 			/* Do we really need to pin the MOB as well? */
992459d0fa7SThomas Hellstrom 			vmw_bo_pin_reserved(vbo, true);
993ed93394cSThomas Hellstrom 		}
994fb80edb0SThomas Hellstrom 		ret = vmw_resource_validate(res, interruptible, true);
995459d0fa7SThomas Hellstrom 		if (vbo)
996668b2066SZack Rusin 			ttm_bo_unreserve(&vbo->tbo);
997ed93394cSThomas Hellstrom 		if (ret)
998ed93394cSThomas Hellstrom 			goto out_no_validate;
999ed93394cSThomas Hellstrom 	}
1000ed93394cSThomas Hellstrom 	res->pin_count++;
1001ed93394cSThomas Hellstrom 
1002ed93394cSThomas Hellstrom out_no_validate:
1003a9f58c45SThomas Hellstrom 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1004ed93394cSThomas Hellstrom out_no_reserve:
1005ed93394cSThomas Hellstrom 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1006ed93394cSThomas Hellstrom 
1007ed93394cSThomas Hellstrom 	return ret;
1008ed93394cSThomas Hellstrom }
1009ed93394cSThomas Hellstrom 
1010ed93394cSThomas Hellstrom /**
1011ed93394cSThomas Hellstrom  * vmw_resource_unpin - Remove a pin reference from a resource
1012ed93394cSThomas Hellstrom  *
1013ed93394cSThomas Hellstrom  * @res: The resource to remove a pin reference from
1014ed93394cSThomas Hellstrom  *
1015ed93394cSThomas Hellstrom  * Having a pin reference means that the resource can never be evicted, and
1016ed93394cSThomas Hellstrom  * its id will never change as long as there is a pin reference.
1017ed93394cSThomas Hellstrom  */
vmw_resource_unpin(struct vmw_resource * res)1018ed93394cSThomas Hellstrom void vmw_resource_unpin(struct vmw_resource *res)
1019ed93394cSThomas Hellstrom {
1020ed93394cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
1021ed93394cSThomas Hellstrom 	int ret;
1022ed93394cSThomas Hellstrom 
1023ed93394cSThomas Hellstrom 	mutex_lock(&dev_priv->cmdbuf_mutex);
1024ed93394cSThomas Hellstrom 
10251a4b172aSThomas Hellstrom 	ret = vmw_resource_reserve(res, false, true);
1026ed93394cSThomas Hellstrom 	WARN_ON(ret);
1027ed93394cSThomas Hellstrom 
1028ed93394cSThomas Hellstrom 	WARN_ON(res->pin_count == 0);
1029668b2066SZack Rusin 	if (--res->pin_count == 0 && res->guest_memory_bo) {
1030668b2066SZack Rusin 		struct vmw_bo *vbo = res->guest_memory_bo;
1031ed93394cSThomas Hellstrom 
1032668b2066SZack Rusin 		(void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
1033459d0fa7SThomas Hellstrom 		vmw_bo_pin_reserved(vbo, false);
1034668b2066SZack Rusin 		ttm_bo_unreserve(&vbo->tbo);
1035ed93394cSThomas Hellstrom 	}
1036ed93394cSThomas Hellstrom 
1037a9f58c45SThomas Hellstrom 	vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1038ed93394cSThomas Hellstrom 
1039ed93394cSThomas Hellstrom 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1040ed93394cSThomas Hellstrom }
1041d80efd5cSThomas Hellstrom 
1042d80efd5cSThomas Hellstrom /**
1043d80efd5cSThomas Hellstrom  * vmw_res_type - Return the resource type
1044d80efd5cSThomas Hellstrom  *
1045d80efd5cSThomas Hellstrom  * @res: Pointer to the resource
1046d80efd5cSThomas Hellstrom  */
vmw_res_type(const struct vmw_resource * res)1047d80efd5cSThomas Hellstrom enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1048d80efd5cSThomas Hellstrom {
1049d80efd5cSThomas Hellstrom 	return res->func->res_type;
1050d80efd5cSThomas Hellstrom }
1051b7468b15SThomas Hellstrom 
1052b7468b15SThomas Hellstrom /**
10532cd80dbdSZack Rusin  * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1054b7468b15SThomas Hellstrom  * sequential range of touched backing store memory.
1055b7468b15SThomas Hellstrom  * @res: The resource.
1056b7468b15SThomas Hellstrom  * @start: The first page touched.
1057b7468b15SThomas Hellstrom  * @end: The last page touched + 1.
1058b7468b15SThomas Hellstrom  */
vmw_resource_dirty_update(struct vmw_resource * res,pgoff_t start,pgoff_t end)1059b7468b15SThomas Hellstrom void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1060b7468b15SThomas Hellstrom 			       pgoff_t end)
1061b7468b15SThomas Hellstrom {
1062b7468b15SThomas Hellstrom 	if (res->dirty)
1063b7468b15SThomas Hellstrom 		res->func->dirty_range_add(res, start << PAGE_SHIFT,
1064b7468b15SThomas Hellstrom 					   end << PAGE_SHIFT);
1065b7468b15SThomas Hellstrom }
1066fb80edb0SThomas Hellstrom 
vmw_resource_clean(struct vmw_resource * res)1067*7b006203SZack Rusin int vmw_resource_clean(struct vmw_resource *res)
1068*7b006203SZack Rusin {
1069*7b006203SZack Rusin 	int ret = 0;
1070*7b006203SZack Rusin 
1071*7b006203SZack Rusin 	if (res->res_dirty) {
1072*7b006203SZack Rusin 		if (!res->func->clean)
1073*7b006203SZack Rusin 			return -EINVAL;
1074*7b006203SZack Rusin 
1075*7b006203SZack Rusin 		ret = res->func->clean(res);
1076*7b006203SZack Rusin 		if (ret)
1077*7b006203SZack Rusin 			return ret;
1078*7b006203SZack Rusin 		res->res_dirty = false;
1079*7b006203SZack Rusin 	}
1080*7b006203SZack Rusin 	return ret;
1081*7b006203SZack Rusin }
1082*7b006203SZack Rusin 
1083fb80edb0SThomas Hellstrom /**
1084fb80edb0SThomas Hellstrom  * vmw_resources_clean - Clean resources intersecting a mob range
1085fb80edb0SThomas Hellstrom  * @vbo: The mob buffer object
1086fb80edb0SThomas Hellstrom  * @start: The mob page offset starting the range
1087fb80edb0SThomas Hellstrom  * @end: The mob page offset ending the range
1088fb80edb0SThomas Hellstrom  * @num_prefault: Returns how many pages including the first have been
1089fb80edb0SThomas Hellstrom  * cleaned and are ok to prefault
1090fb80edb0SThomas Hellstrom  */
vmw_resources_clean(struct vmw_bo * vbo,pgoff_t start,pgoff_t end,pgoff_t * num_prefault)109109881d29SZack Rusin int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1092fb80edb0SThomas Hellstrom 			pgoff_t end, pgoff_t *num_prefault)
1093fb80edb0SThomas Hellstrom {
1094fb80edb0SThomas Hellstrom 	struct rb_node *cur = vbo->res_tree.rb_node;
1095fb80edb0SThomas Hellstrom 	struct vmw_resource *found = NULL;
1096fb80edb0SThomas Hellstrom 	unsigned long res_start = start << PAGE_SHIFT;
1097fb80edb0SThomas Hellstrom 	unsigned long res_end = end << PAGE_SHIFT;
1098fb80edb0SThomas Hellstrom 	unsigned long last_cleaned = 0;
1099*7b006203SZack Rusin 	int ret;
1100fb80edb0SThomas Hellstrom 
1101fb80edb0SThomas Hellstrom 	/*
1102fb80edb0SThomas Hellstrom 	 * Find the resource with lowest backup_offset that intersects the
1103fb80edb0SThomas Hellstrom 	 * range.
1104fb80edb0SThomas Hellstrom 	 */
1105fb80edb0SThomas Hellstrom 	while (cur) {
1106fb80edb0SThomas Hellstrom 		struct vmw_resource *cur_res =
1107fb80edb0SThomas Hellstrom 			container_of(cur, struct vmw_resource, mob_node);
1108fb80edb0SThomas Hellstrom 
1109668b2066SZack Rusin 		if (cur_res->guest_memory_offset >= res_end) {
1110fb80edb0SThomas Hellstrom 			cur = cur->rb_left;
1111668b2066SZack Rusin 		} else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
1112fb80edb0SThomas Hellstrom 			   res_start) {
1113fb80edb0SThomas Hellstrom 			cur = cur->rb_right;
1114fb80edb0SThomas Hellstrom 		} else {
1115fb80edb0SThomas Hellstrom 			found = cur_res;
1116fb80edb0SThomas Hellstrom 			cur = cur->rb_left;
1117fb80edb0SThomas Hellstrom 			/* Continue to look for resources with lower offsets */
1118fb80edb0SThomas Hellstrom 		}
1119fb80edb0SThomas Hellstrom 	}
1120fb80edb0SThomas Hellstrom 
1121fb80edb0SThomas Hellstrom 	/*
1122668b2066SZack Rusin 	 * In order of increasing guest_memory_offset, clean dirty resources
1123fb80edb0SThomas Hellstrom 	 * intersecting the range.
1124fb80edb0SThomas Hellstrom 	 */
1125fb80edb0SThomas Hellstrom 	while (found) {
1126*7b006203SZack Rusin 		ret = vmw_resource_clean(found);
1127fb80edb0SThomas Hellstrom 		if (ret)
1128fb80edb0SThomas Hellstrom 			return ret;
1129668b2066SZack Rusin 		last_cleaned = found->guest_memory_offset + found->guest_memory_size;
1130fb80edb0SThomas Hellstrom 		cur = rb_next(&found->mob_node);
1131fb80edb0SThomas Hellstrom 		if (!cur)
1132fb80edb0SThomas Hellstrom 			break;
1133fb80edb0SThomas Hellstrom 
1134fb80edb0SThomas Hellstrom 		found = container_of(cur, struct vmw_resource, mob_node);
1135668b2066SZack Rusin 		if (found->guest_memory_offset >= res_end)
1136fb80edb0SThomas Hellstrom 			break;
1137fb80edb0SThomas Hellstrom 	}
1138fb80edb0SThomas Hellstrom 
1139fb80edb0SThomas Hellstrom 	/*
1140fb80edb0SThomas Hellstrom 	 * Set number of pages allowed prefaulting and fence the buffer object
1141fb80edb0SThomas Hellstrom 	 */
1142fb80edb0SThomas Hellstrom 	*num_prefault = 1;
1143fb80edb0SThomas Hellstrom 	if (last_cleaned > res_start) {
1144668b2066SZack Rusin 		struct ttm_buffer_object *bo = &vbo->tbo;
1145fb80edb0SThomas Hellstrom 
1146fb80edb0SThomas Hellstrom 		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1147fb80edb0SThomas Hellstrom 						      PAGE_SIZE);
1148fb80edb0SThomas Hellstrom 		vmw_bo_fence_single(bo, NULL);
1149fb80edb0SThomas Hellstrom 	}
1150fb80edb0SThomas Hellstrom 
1151fb80edb0SThomas Hellstrom 	return 0;
1152fb80edb0SThomas Hellstrom }
1153