xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c (revision bf833fd3)
1fb1d9738SJakob Bornecrantz /**************************************************************************
2fb1d9738SJakob Bornecrantz  *
354fbde8aSSinclair Yeh  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4fb1d9738SJakob Bornecrantz  * All Rights Reserved.
5fb1d9738SJakob Bornecrantz  *
6fb1d9738SJakob Bornecrantz  * Permission is hereby granted, free of charge, to any person obtaining a
7fb1d9738SJakob Bornecrantz  * copy of this software and associated documentation files (the
8fb1d9738SJakob Bornecrantz  * "Software"), to deal in the Software without restriction, including
9fb1d9738SJakob Bornecrantz  * without limitation the rights to use, copy, modify, merge, publish,
10fb1d9738SJakob Bornecrantz  * distribute, sub license, and/or sell copies of the Software, and to
11fb1d9738SJakob Bornecrantz  * permit persons to whom the Software is furnished to do so, subject to
12fb1d9738SJakob Bornecrantz  * the following conditions:
13fb1d9738SJakob Bornecrantz  *
14fb1d9738SJakob Bornecrantz  * The above copyright notice and this permission notice (including the
15fb1d9738SJakob Bornecrantz  * next paragraph) shall be included in all copies or substantial portions
16fb1d9738SJakob Bornecrantz  * of the Software.
17fb1d9738SJakob Bornecrantz  *
18fb1d9738SJakob Bornecrantz  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19fb1d9738SJakob Bornecrantz  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20fb1d9738SJakob Bornecrantz  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21fb1d9738SJakob Bornecrantz  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22fb1d9738SJakob Bornecrantz  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23fb1d9738SJakob Bornecrantz  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24fb1d9738SJakob Bornecrantz  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25fb1d9738SJakob Bornecrantz  *
26fb1d9738SJakob Bornecrantz  **************************************************************************/
27fb1d9738SJakob Bornecrantz 
28fb1d9738SJakob Bornecrantz #include "vmwgfx_drv.h"
29760285e7SDavid Howells #include <drm/vmwgfx_drm.h>
30760285e7SDavid Howells #include <drm/ttm/ttm_object.h>
31760285e7SDavid Howells #include <drm/ttm/ttm_placement.h>
32760285e7SDavid Howells #include <drm/drmP.h>
33543831cfSThomas Hellstrom #include "vmwgfx_resource_priv.h"
34d80efd5cSThomas Hellstrom #include "vmwgfx_binding.h"
35fb1d9738SJakob Bornecrantz 
36ea029c28SThomas Hellstrom #define VMW_RES_EVICT_ERR_COUNT 10
37ea029c28SThomas Hellstrom 
38fb1d9738SJakob Bornecrantz struct vmw_user_dma_buffer {
39c486d4f8SThomas Hellstrom 	struct ttm_prime_object prime;
40fb1d9738SJakob Bornecrantz 	struct vmw_dma_buffer dma;
41fb1d9738SJakob Bornecrantz };
42fb1d9738SJakob Bornecrantz 
43fb1d9738SJakob Bornecrantz struct vmw_bo_user_rep {
44fb1d9738SJakob Bornecrantz 	uint32_t handle;
45fb1d9738SJakob Bornecrantz 	uint64_t map_handle;
46fb1d9738SJakob Bornecrantz };
47fb1d9738SJakob Bornecrantz 
48fb1d9738SJakob Bornecrantz static inline struct vmw_dma_buffer *
49fb1d9738SJakob Bornecrantz vmw_dma_buffer(struct ttm_buffer_object *bo)
50fb1d9738SJakob Bornecrantz {
51fb1d9738SJakob Bornecrantz 	return container_of(bo, struct vmw_dma_buffer, base);
52fb1d9738SJakob Bornecrantz }
53fb1d9738SJakob Bornecrantz 
54fb1d9738SJakob Bornecrantz static inline struct vmw_user_dma_buffer *
55fb1d9738SJakob Bornecrantz vmw_user_dma_buffer(struct ttm_buffer_object *bo)
56fb1d9738SJakob Bornecrantz {
57fb1d9738SJakob Bornecrantz 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
58fb1d9738SJakob Bornecrantz 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
59fb1d9738SJakob Bornecrantz }
60fb1d9738SJakob Bornecrantz 
61fb1d9738SJakob Bornecrantz struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
62fb1d9738SJakob Bornecrantz {
63fb1d9738SJakob Bornecrantz 	kref_get(&res->kref);
64fb1d9738SJakob Bornecrantz 	return res;
65fb1d9738SJakob Bornecrantz }
66fb1d9738SJakob Bornecrantz 
6730f82d81SThomas Hellstrom struct vmw_resource *
6830f82d81SThomas Hellstrom vmw_resource_reference_unless_doomed(struct vmw_resource *res)
6930f82d81SThomas Hellstrom {
7030f82d81SThomas Hellstrom 	return kref_get_unless_zero(&res->kref) ? res : NULL;
7130f82d81SThomas Hellstrom }
725bb39e81SThomas Hellstrom 
735bb39e81SThomas Hellstrom /**
745bb39e81SThomas Hellstrom  * vmw_resource_release_id - release a resource id to the id manager.
755bb39e81SThomas Hellstrom  *
765bb39e81SThomas Hellstrom  * @res: Pointer to the resource.
775bb39e81SThomas Hellstrom  *
785bb39e81SThomas Hellstrom  * Release the resource id to the resource id manager and set it to -1
795bb39e81SThomas Hellstrom  */
80543831cfSThomas Hellstrom void vmw_resource_release_id(struct vmw_resource *res)
815bb39e81SThomas Hellstrom {
825bb39e81SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
83c0951b79SThomas Hellstrom 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
845bb39e81SThomas Hellstrom 
855bb39e81SThomas Hellstrom 	write_lock(&dev_priv->resource_lock);
865bb39e81SThomas Hellstrom 	if (res->id != -1)
87c0951b79SThomas Hellstrom 		idr_remove(idr, res->id);
885bb39e81SThomas Hellstrom 	res->id = -1;
895bb39e81SThomas Hellstrom 	write_unlock(&dev_priv->resource_lock);
905bb39e81SThomas Hellstrom }
915bb39e81SThomas Hellstrom 
92fb1d9738SJakob Bornecrantz static void vmw_resource_release(struct kref *kref)
93fb1d9738SJakob Bornecrantz {
94fb1d9738SJakob Bornecrantz 	struct vmw_resource *res =
95fb1d9738SJakob Bornecrantz 	    container_of(kref, struct vmw_resource, kref);
96fb1d9738SJakob Bornecrantz 	struct vmw_private *dev_priv = res->dev_priv;
97c0951b79SThomas Hellstrom 	int id;
98c0951b79SThomas Hellstrom 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
99fb1d9738SJakob Bornecrantz 
100b9eb1a61SThomas Hellstrom 	write_lock(&dev_priv->resource_lock);
1015bb39e81SThomas Hellstrom 	res->avail = false;
102c0951b79SThomas Hellstrom 	list_del_init(&res->lru_head);
103fb1d9738SJakob Bornecrantz 	write_unlock(&dev_priv->resource_lock);
104c0951b79SThomas Hellstrom 	if (res->backup) {
105c0951b79SThomas Hellstrom 		struct ttm_buffer_object *bo = &res->backup->base;
106c0951b79SThomas Hellstrom 
107dfd5e50eSChristian König 		ttm_bo_reserve(bo, false, false, NULL);
108c0951b79SThomas Hellstrom 		if (!list_empty(&res->mob_head) &&
109c0951b79SThomas Hellstrom 		    res->func->unbind != NULL) {
110c0951b79SThomas Hellstrom 			struct ttm_validate_buffer val_buf;
111c0951b79SThomas Hellstrom 
112c0951b79SThomas Hellstrom 			val_buf.bo = bo;
113ae9c0af2SChristian König 			val_buf.shared = false;
114c0951b79SThomas Hellstrom 			res->func->unbind(res, false, &val_buf);
115c0951b79SThomas Hellstrom 		}
116c0951b79SThomas Hellstrom 		res->backup_dirty = false;
117c0951b79SThomas Hellstrom 		list_del_init(&res->mob_head);
118c0951b79SThomas Hellstrom 		ttm_bo_unreserve(bo);
119c0951b79SThomas Hellstrom 		vmw_dmabuf_unreference(&res->backup);
120c0951b79SThomas Hellstrom 	}
121fb1d9738SJakob Bornecrantz 
12230f82d81SThomas Hellstrom 	if (likely(res->hw_destroy != NULL)) {
12330f82d81SThomas Hellstrom 		mutex_lock(&dev_priv->binding_mutex);
124d80efd5cSThomas Hellstrom 		vmw_binding_res_list_kill(&res->binding_head);
12530f82d81SThomas Hellstrom 		mutex_unlock(&dev_priv->binding_mutex);
126d80efd5cSThomas Hellstrom 		res->hw_destroy(res);
12730f82d81SThomas Hellstrom 	}
128fb1d9738SJakob Bornecrantz 
129c0951b79SThomas Hellstrom 	id = res->id;
130fb1d9738SJakob Bornecrantz 	if (res->res_free != NULL)
131fb1d9738SJakob Bornecrantz 		res->res_free(res);
132fb1d9738SJakob Bornecrantz 	else
133fb1d9738SJakob Bornecrantz 		kfree(res);
134fb1d9738SJakob Bornecrantz 
135fb1d9738SJakob Bornecrantz 	write_lock(&dev_priv->resource_lock);
1365bb39e81SThomas Hellstrom 	if (id != -1)
1375bb39e81SThomas Hellstrom 		idr_remove(idr, id);
138b9eb1a61SThomas Hellstrom 	write_unlock(&dev_priv->resource_lock);
139fb1d9738SJakob Bornecrantz }
140fb1d9738SJakob Bornecrantz 
141fb1d9738SJakob Bornecrantz void vmw_resource_unreference(struct vmw_resource **p_res)
142fb1d9738SJakob Bornecrantz {
143fb1d9738SJakob Bornecrantz 	struct vmw_resource *res = *p_res;
144fb1d9738SJakob Bornecrantz 
145fb1d9738SJakob Bornecrantz 	*p_res = NULL;
146fb1d9738SJakob Bornecrantz 	kref_put(&res->kref, vmw_resource_release);
147fb1d9738SJakob Bornecrantz }
148fb1d9738SJakob Bornecrantz 
1495bb39e81SThomas Hellstrom 
1505bb39e81SThomas Hellstrom /**
1515bb39e81SThomas Hellstrom  * vmw_resource_alloc_id - release a resource id to the id manager.
1525bb39e81SThomas Hellstrom  *
1535bb39e81SThomas Hellstrom  * @res: Pointer to the resource.
1545bb39e81SThomas Hellstrom  *
1555bb39e81SThomas Hellstrom  * Allocate the lowest free resource from the resource manager, and set
1565bb39e81SThomas Hellstrom  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
1575bb39e81SThomas Hellstrom  */
158543831cfSThomas Hellstrom int vmw_resource_alloc_id(struct vmw_resource *res)
1595bb39e81SThomas Hellstrom {
160c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
1615bb39e81SThomas Hellstrom 	int ret;
162c0951b79SThomas Hellstrom 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
1635bb39e81SThomas Hellstrom 
1645bb39e81SThomas Hellstrom 	BUG_ON(res->id != -1);
1655bb39e81SThomas Hellstrom 
166cc39a8faSTejun Heo 	idr_preload(GFP_KERNEL);
1675bb39e81SThomas Hellstrom 	write_lock(&dev_priv->resource_lock);
168cc39a8faSTejun Heo 
169cc39a8faSTejun Heo 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
170cc39a8faSTejun Heo 	if (ret >= 0)
171cc39a8faSTejun Heo 		res->id = ret;
172cc39a8faSTejun Heo 
1735bb39e81SThomas Hellstrom 	write_unlock(&dev_priv->resource_lock);
174cc39a8faSTejun Heo 	idr_preload_end();
175cc39a8faSTejun Heo 	return ret < 0 ? ret : 0;
1765bb39e81SThomas Hellstrom }
1775bb39e81SThomas Hellstrom 
178c0951b79SThomas Hellstrom /**
179c0951b79SThomas Hellstrom  * vmw_resource_init - initialize a struct vmw_resource
180c0951b79SThomas Hellstrom  *
181c0951b79SThomas Hellstrom  * @dev_priv:       Pointer to a device private struct.
182c0951b79SThomas Hellstrom  * @res:            The struct vmw_resource to initialize.
183c0951b79SThomas Hellstrom  * @obj_type:       Resource object type.
184c0951b79SThomas Hellstrom  * @delay_id:       Boolean whether to defer device id allocation until
185c0951b79SThomas Hellstrom  *                  the first validation.
186c0951b79SThomas Hellstrom  * @res_free:       Resource destructor.
187c0951b79SThomas Hellstrom  * @func:           Resource function table.
188c0951b79SThomas Hellstrom  */
189543831cfSThomas Hellstrom int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
1905bb39e81SThomas Hellstrom 		      bool delay_id,
1915bb39e81SThomas Hellstrom 		      void (*res_free) (struct vmw_resource *res),
192c0951b79SThomas Hellstrom 		      const struct vmw_res_func *func)
193fb1d9738SJakob Bornecrantz {
194fb1d9738SJakob Bornecrantz 	kref_init(&res->kref);
195fb1d9738SJakob Bornecrantz 	res->hw_destroy = NULL;
196fb1d9738SJakob Bornecrantz 	res->res_free = res_free;
197fb1d9738SJakob Bornecrantz 	res->avail = false;
198fb1d9738SJakob Bornecrantz 	res->dev_priv = dev_priv;
199c0951b79SThomas Hellstrom 	res->func = func;
200c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&res->lru_head);
201c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&res->mob_head);
202173fb7d4SThomas Hellstrom 	INIT_LIST_HEAD(&res->binding_head);
2035bb39e81SThomas Hellstrom 	res->id = -1;
204c0951b79SThomas Hellstrom 	res->backup = NULL;
205c0951b79SThomas Hellstrom 	res->backup_offset = 0;
206c0951b79SThomas Hellstrom 	res->backup_dirty = false;
207c0951b79SThomas Hellstrom 	res->res_dirty = false;
2085bb39e81SThomas Hellstrom 	if (delay_id)
2095bb39e81SThomas Hellstrom 		return 0;
2105bb39e81SThomas Hellstrom 	else
211c0951b79SThomas Hellstrom 		return vmw_resource_alloc_id(res);
212fb1d9738SJakob Bornecrantz }
213fb1d9738SJakob Bornecrantz 
214fb1d9738SJakob Bornecrantz /**
215fb1d9738SJakob Bornecrantz  * vmw_resource_activate
216fb1d9738SJakob Bornecrantz  *
217fb1d9738SJakob Bornecrantz  * @res:        Pointer to the newly created resource
218fb1d9738SJakob Bornecrantz  * @hw_destroy: Destroy function. NULL if none.
219fb1d9738SJakob Bornecrantz  *
220fb1d9738SJakob Bornecrantz  * Activate a resource after the hardware has been made aware of it.
221fb1d9738SJakob Bornecrantz  * Set tye destroy function to @destroy. Typically this frees the
222fb1d9738SJakob Bornecrantz  * resource and destroys the hardware resources associated with it.
223fb1d9738SJakob Bornecrantz  * Activate basically means that the function vmw_resource_lookup will
224fb1d9738SJakob Bornecrantz  * find it.
225fb1d9738SJakob Bornecrantz  */
226543831cfSThomas Hellstrom void vmw_resource_activate(struct vmw_resource *res,
227fb1d9738SJakob Bornecrantz 			   void (*hw_destroy) (struct vmw_resource *))
228fb1d9738SJakob Bornecrantz {
229fb1d9738SJakob Bornecrantz 	struct vmw_private *dev_priv = res->dev_priv;
230fb1d9738SJakob Bornecrantz 
231fb1d9738SJakob Bornecrantz 	write_lock(&dev_priv->resource_lock);
232fb1d9738SJakob Bornecrantz 	res->avail = true;
233fb1d9738SJakob Bornecrantz 	res->hw_destroy = hw_destroy;
234fb1d9738SJakob Bornecrantz 	write_unlock(&dev_priv->resource_lock);
235fb1d9738SJakob Bornecrantz }
236fb1d9738SJakob Bornecrantz 
237fb1d9738SJakob Bornecrantz /**
238c0951b79SThomas Hellstrom  * vmw_user_resource_lookup_handle - lookup a struct resource from a
239c0951b79SThomas Hellstrom  * TTM user-space handle and perform basic type checks
240c0951b79SThomas Hellstrom  *
241c0951b79SThomas Hellstrom  * @dev_priv:     Pointer to a device private struct
242c0951b79SThomas Hellstrom  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
243c0951b79SThomas Hellstrom  * @handle:       The TTM user-space handle
244c0951b79SThomas Hellstrom  * @converter:    Pointer to an object describing the resource type
245c0951b79SThomas Hellstrom  * @p_res:        On successful return the location pointed to will contain
246c0951b79SThomas Hellstrom  *                a pointer to a refcounted struct vmw_resource.
247c0951b79SThomas Hellstrom  *
248c0951b79SThomas Hellstrom  * If the handle can't be found or is associated with an incorrect resource
249c0951b79SThomas Hellstrom  * type, -EINVAL will be returned.
250c0951b79SThomas Hellstrom  */
251c0951b79SThomas Hellstrom int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
252fb1d9738SJakob Bornecrantz 				    struct ttm_object_file *tfile,
253c0951b79SThomas Hellstrom 				    uint32_t handle,
254c0951b79SThomas Hellstrom 				    const struct vmw_user_resource_conv
255c0951b79SThomas Hellstrom 				    *converter,
256c0951b79SThomas Hellstrom 				    struct vmw_resource **p_res)
257fb1d9738SJakob Bornecrantz {
2587a73ba74SThomas Hellstrom 	struct ttm_base_object *base;
259c0951b79SThomas Hellstrom 	struct vmw_resource *res;
260c0951b79SThomas Hellstrom 	int ret = -EINVAL;
261fb1d9738SJakob Bornecrantz 
2627a73ba74SThomas Hellstrom 	base = ttm_base_object_lookup(tfile, handle);
2637a73ba74SThomas Hellstrom 	if (unlikely(base == NULL))
2647a73ba74SThomas Hellstrom 		return -EINVAL;
265fb1d9738SJakob Bornecrantz 
26679e5f810SThomas Hellstrom 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
267c0951b79SThomas Hellstrom 		goto out_bad_resource;
2687a73ba74SThomas Hellstrom 
269c0951b79SThomas Hellstrom 	res = converter->base_obj_to_res(base);
270c0951b79SThomas Hellstrom 
271c0951b79SThomas Hellstrom 	read_lock(&dev_priv->resource_lock);
272c0951b79SThomas Hellstrom 	if (!res->avail || res->res_free != converter->res_free) {
273c0951b79SThomas Hellstrom 		read_unlock(&dev_priv->resource_lock);
274c0951b79SThomas Hellstrom 		goto out_bad_resource;
275c0951b79SThomas Hellstrom 	}
276c0951b79SThomas Hellstrom 
277c0951b79SThomas Hellstrom 	kref_get(&res->kref);
278c0951b79SThomas Hellstrom 	read_unlock(&dev_priv->resource_lock);
279c0951b79SThomas Hellstrom 
280c0951b79SThomas Hellstrom 	*p_res = res;
2817a73ba74SThomas Hellstrom 	ret = 0;
2827a73ba74SThomas Hellstrom 
283c0951b79SThomas Hellstrom out_bad_resource:
2847a73ba74SThomas Hellstrom 	ttm_base_object_unref(&base);
285c0951b79SThomas Hellstrom 
286c0951b79SThomas Hellstrom 	return ret;
287c0951b79SThomas Hellstrom }
288c0951b79SThomas Hellstrom 
289c0951b79SThomas Hellstrom /**
290c0951b79SThomas Hellstrom  * Helper function that looks either a surface or dmabuf.
291c0951b79SThomas Hellstrom  *
292c0951b79SThomas Hellstrom  * The pointer this pointed at by out_surf and out_buf needs to be null.
293c0951b79SThomas Hellstrom  */
294c0951b79SThomas Hellstrom int vmw_user_lookup_handle(struct vmw_private *dev_priv,
295c0951b79SThomas Hellstrom 			   struct ttm_object_file *tfile,
296c0951b79SThomas Hellstrom 			   uint32_t handle,
297c0951b79SThomas Hellstrom 			   struct vmw_surface **out_surf,
298c0951b79SThomas Hellstrom 			   struct vmw_dma_buffer **out_buf)
299c0951b79SThomas Hellstrom {
300c0951b79SThomas Hellstrom 	struct vmw_resource *res;
301c0951b79SThomas Hellstrom 	int ret;
302c0951b79SThomas Hellstrom 
303c0951b79SThomas Hellstrom 	BUG_ON(*out_surf || *out_buf);
304c0951b79SThomas Hellstrom 
305c0951b79SThomas Hellstrom 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
306c0951b79SThomas Hellstrom 					      user_surface_converter,
307c0951b79SThomas Hellstrom 					      &res);
308c0951b79SThomas Hellstrom 	if (!ret) {
309c0951b79SThomas Hellstrom 		*out_surf = vmw_res_to_srf(res);
310c0951b79SThomas Hellstrom 		return 0;
311c0951b79SThomas Hellstrom 	}
312c0951b79SThomas Hellstrom 
313c0951b79SThomas Hellstrom 	*out_surf = NULL;
31454c12bc3SThomas Hellstrom 	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
315fb1d9738SJakob Bornecrantz 	return ret;
316fb1d9738SJakob Bornecrantz }
317fb1d9738SJakob Bornecrantz 
318fb1d9738SJakob Bornecrantz /**
319fb1d9738SJakob Bornecrantz  * Buffer management.
320fb1d9738SJakob Bornecrantz  */
321308d17efSThomas Hellstrom 
322308d17efSThomas Hellstrom /**
323308d17efSThomas Hellstrom  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
324308d17efSThomas Hellstrom  *
325308d17efSThomas Hellstrom  * @dev_priv: Pointer to a struct vmw_private identifying the device.
326308d17efSThomas Hellstrom  * @size: The requested buffer size.
327308d17efSThomas Hellstrom  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
328308d17efSThomas Hellstrom  */
329308d17efSThomas Hellstrom static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
330308d17efSThomas Hellstrom 				  bool user)
331308d17efSThomas Hellstrom {
332308d17efSThomas Hellstrom 	static size_t struct_size, user_struct_size;
333308d17efSThomas Hellstrom 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
334308d17efSThomas Hellstrom 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
335308d17efSThomas Hellstrom 
336308d17efSThomas Hellstrom 	if (unlikely(struct_size == 0)) {
337308d17efSThomas Hellstrom 		size_t backend_size = ttm_round_pot(vmw_tt_size);
338308d17efSThomas Hellstrom 
339308d17efSThomas Hellstrom 		struct_size = backend_size +
340308d17efSThomas Hellstrom 			ttm_round_pot(sizeof(struct vmw_dma_buffer));
341308d17efSThomas Hellstrom 		user_struct_size = backend_size +
342308d17efSThomas Hellstrom 			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
343308d17efSThomas Hellstrom 	}
344308d17efSThomas Hellstrom 
345308d17efSThomas Hellstrom 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
346308d17efSThomas Hellstrom 		page_array_size +=
347308d17efSThomas Hellstrom 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
348308d17efSThomas Hellstrom 
349308d17efSThomas Hellstrom 	return ((user) ? user_struct_size : struct_size) +
350308d17efSThomas Hellstrom 		page_array_size;
351308d17efSThomas Hellstrom }
352308d17efSThomas Hellstrom 
353effe1105SThomas Hellstrom void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
354effe1105SThomas Hellstrom {
355effe1105SThomas Hellstrom 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
356effe1105SThomas Hellstrom 
357*bf833fd3SThomas Hellstrom 	vmw_dma_buffer_unmap(vmw_bo);
358fb1d9738SJakob Bornecrantz 	kfree(vmw_bo);
359fb1d9738SJakob Bornecrantz }
360fb1d9738SJakob Bornecrantz 
361308d17efSThomas Hellstrom static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
362308d17efSThomas Hellstrom {
363308d17efSThomas Hellstrom 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
364308d17efSThomas Hellstrom 
365*bf833fd3SThomas Hellstrom 	vmw_dma_buffer_unmap(&vmw_user_bo->dma);
366308d17efSThomas Hellstrom 	ttm_prime_object_kfree(vmw_user_bo, prime);
367308d17efSThomas Hellstrom }
368308d17efSThomas Hellstrom 
369fb1d9738SJakob Bornecrantz int vmw_dmabuf_init(struct vmw_private *dev_priv,
370fb1d9738SJakob Bornecrantz 		    struct vmw_dma_buffer *vmw_bo,
371fb1d9738SJakob Bornecrantz 		    size_t size, struct ttm_placement *placement,
372fb1d9738SJakob Bornecrantz 		    bool interruptible,
373fb1d9738SJakob Bornecrantz 		    void (*bo_free) (struct ttm_buffer_object *bo))
374fb1d9738SJakob Bornecrantz {
375fb1d9738SJakob Bornecrantz 	struct ttm_bo_device *bdev = &dev_priv->bdev;
376fb1d9738SJakob Bornecrantz 	size_t acc_size;
377fb1d9738SJakob Bornecrantz 	int ret;
378308d17efSThomas Hellstrom 	bool user = (bo_free == &vmw_user_dmabuf_destroy);
379fb1d9738SJakob Bornecrantz 
380308d17efSThomas Hellstrom 	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
381fb1d9738SJakob Bornecrantz 
382308d17efSThomas Hellstrom 	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
383fb1d9738SJakob Bornecrantz 	memset(vmw_bo, 0, sizeof(*vmw_bo));
384fb1d9738SJakob Bornecrantz 
385c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&vmw_bo->res_list);
386fb1d9738SJakob Bornecrantz 
387fb1d9738SJakob Bornecrantz 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
388a34417f6SThomas Hellstrom 			  ttm_bo_type_device, placement,
389724daa4fSChristian König 			  0, interruptible, acc_size,
390724daa4fSChristian König 			  NULL, NULL, bo_free);
391fb1d9738SJakob Bornecrantz 	return ret;
392fb1d9738SJakob Bornecrantz }
393fb1d9738SJakob Bornecrantz 
394fb1d9738SJakob Bornecrantz static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
395fb1d9738SJakob Bornecrantz {
396fb1d9738SJakob Bornecrantz 	struct vmw_user_dma_buffer *vmw_user_bo;
397fb1d9738SJakob Bornecrantz 	struct ttm_base_object *base = *p_base;
398fb1d9738SJakob Bornecrantz 	struct ttm_buffer_object *bo;
399fb1d9738SJakob Bornecrantz 
400fb1d9738SJakob Bornecrantz 	*p_base = NULL;
401fb1d9738SJakob Bornecrantz 
402fb1d9738SJakob Bornecrantz 	if (unlikely(base == NULL))
403fb1d9738SJakob Bornecrantz 		return;
404fb1d9738SJakob Bornecrantz 
405c486d4f8SThomas Hellstrom 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
406c486d4f8SThomas Hellstrom 				   prime.base);
407fb1d9738SJakob Bornecrantz 	bo = &vmw_user_bo->dma.base;
408fb1d9738SJakob Bornecrantz 	ttm_bo_unref(&bo);
409fb1d9738SJakob Bornecrantz }
410fb1d9738SJakob Bornecrantz 
4111d7a5cbfSThomas Hellstrom static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
4121d7a5cbfSThomas Hellstrom 					    enum ttm_ref_type ref_type)
4131d7a5cbfSThomas Hellstrom {
4141d7a5cbfSThomas Hellstrom 	struct vmw_user_dma_buffer *user_bo;
4151d7a5cbfSThomas Hellstrom 	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
4161d7a5cbfSThomas Hellstrom 
4171d7a5cbfSThomas Hellstrom 	switch (ref_type) {
4181d7a5cbfSThomas Hellstrom 	case TTM_REF_SYNCCPU_WRITE:
4191d7a5cbfSThomas Hellstrom 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
4201d7a5cbfSThomas Hellstrom 		break;
4211d7a5cbfSThomas Hellstrom 	default:
4221d7a5cbfSThomas Hellstrom 		BUG();
4231d7a5cbfSThomas Hellstrom 	}
4241d7a5cbfSThomas Hellstrom }
4251d7a5cbfSThomas Hellstrom 
426c0951b79SThomas Hellstrom /**
427c0951b79SThomas Hellstrom  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
428c0951b79SThomas Hellstrom  *
429c0951b79SThomas Hellstrom  * @dev_priv: Pointer to a struct device private.
430c0951b79SThomas Hellstrom  * @tfile: Pointer to a struct ttm_object_file on which to register the user
431c0951b79SThomas Hellstrom  * object.
432c0951b79SThomas Hellstrom  * @size: Size of the dma buffer.
433c0951b79SThomas Hellstrom  * @shareable: Boolean whether the buffer is shareable with other open files.
434c0951b79SThomas Hellstrom  * @handle: Pointer to where the handle value should be assigned.
435c0951b79SThomas Hellstrom  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
436c0951b79SThomas Hellstrom  * should be assigned.
437c0951b79SThomas Hellstrom  */
438c0951b79SThomas Hellstrom int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
439c0951b79SThomas Hellstrom 			  struct ttm_object_file *tfile,
440c0951b79SThomas Hellstrom 			  uint32_t size,
441c0951b79SThomas Hellstrom 			  bool shareable,
442c0951b79SThomas Hellstrom 			  uint32_t *handle,
44354c12bc3SThomas Hellstrom 			  struct vmw_dma_buffer **p_dma_buf,
44454c12bc3SThomas Hellstrom 			  struct ttm_base_object **p_base)
445c0951b79SThomas Hellstrom {
446c0951b79SThomas Hellstrom 	struct vmw_user_dma_buffer *user_bo;
447c0951b79SThomas Hellstrom 	struct ttm_buffer_object *tmp;
448c0951b79SThomas Hellstrom 	int ret;
449c0951b79SThomas Hellstrom 
450c0951b79SThomas Hellstrom 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
4511a4adb05SRavikant B Sharma 	if (unlikely(!user_bo)) {
452c0951b79SThomas Hellstrom 		DRM_ERROR("Failed to allocate a buffer.\n");
453c0951b79SThomas Hellstrom 		return -ENOMEM;
454c0951b79SThomas Hellstrom 	}
455c0951b79SThomas Hellstrom 
456c0951b79SThomas Hellstrom 	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
45796c5f0dfSThomas Hellstrom 			      (dev_priv->has_mob) ?
4586bf6bf03SThomas Hellstrom 			      &vmw_sys_placement :
459c0951b79SThomas Hellstrom 			      &vmw_vram_sys_placement, true,
460c0951b79SThomas Hellstrom 			      &vmw_user_dmabuf_destroy);
461c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
462c0951b79SThomas Hellstrom 		return ret;
463c0951b79SThomas Hellstrom 
464c0951b79SThomas Hellstrom 	tmp = ttm_bo_reference(&user_bo->dma.base);
465c486d4f8SThomas Hellstrom 	ret = ttm_prime_object_init(tfile,
466c486d4f8SThomas Hellstrom 				    size,
467c486d4f8SThomas Hellstrom 				    &user_bo->prime,
468c0951b79SThomas Hellstrom 				    shareable,
469c0951b79SThomas Hellstrom 				    ttm_buffer_type,
4701d7a5cbfSThomas Hellstrom 				    &vmw_user_dmabuf_release,
4711d7a5cbfSThomas Hellstrom 				    &vmw_user_dmabuf_ref_obj_release);
472c0951b79SThomas Hellstrom 	if (unlikely(ret != 0)) {
473c0951b79SThomas Hellstrom 		ttm_bo_unref(&tmp);
474c0951b79SThomas Hellstrom 		goto out_no_base_object;
475c0951b79SThomas Hellstrom 	}
476c0951b79SThomas Hellstrom 
477c0951b79SThomas Hellstrom 	*p_dma_buf = &user_bo->dma;
47854c12bc3SThomas Hellstrom 	if (p_base) {
47954c12bc3SThomas Hellstrom 		*p_base = &user_bo->prime.base;
48054c12bc3SThomas Hellstrom 		kref_get(&(*p_base)->refcount);
48154c12bc3SThomas Hellstrom 	}
482c486d4f8SThomas Hellstrom 	*handle = user_bo->prime.base.hash.key;
483c0951b79SThomas Hellstrom 
484c0951b79SThomas Hellstrom out_no_base_object:
485c0951b79SThomas Hellstrom 	return ret;
486c0951b79SThomas Hellstrom }
487c0951b79SThomas Hellstrom 
488d08a9b9cSThomas Hellstrom /**
489d08a9b9cSThomas Hellstrom  * vmw_user_dmabuf_verify_access - verify access permissions on this
490d08a9b9cSThomas Hellstrom  * buffer object.
491d08a9b9cSThomas Hellstrom  *
492d08a9b9cSThomas Hellstrom  * @bo: Pointer to the buffer object being accessed
493d08a9b9cSThomas Hellstrom  * @tfile: Identifying the caller.
494d08a9b9cSThomas Hellstrom  */
495d08a9b9cSThomas Hellstrom int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
496d08a9b9cSThomas Hellstrom 				  struct ttm_object_file *tfile)
497d08a9b9cSThomas Hellstrom {
498d08a9b9cSThomas Hellstrom 	struct vmw_user_dma_buffer *vmw_user_bo;
499d08a9b9cSThomas Hellstrom 
500d08a9b9cSThomas Hellstrom 	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
501d08a9b9cSThomas Hellstrom 		return -EPERM;
502d08a9b9cSThomas Hellstrom 
503d08a9b9cSThomas Hellstrom 	vmw_user_bo = vmw_user_dma_buffer(bo);
504f6dfe73aSThomas Hellstrom 
505f6dfe73aSThomas Hellstrom 	/* Check that the caller has opened the object. */
506f6dfe73aSThomas Hellstrom 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
507f6dfe73aSThomas Hellstrom 		return 0;
508f6dfe73aSThomas Hellstrom 
509f6dfe73aSThomas Hellstrom 	DRM_ERROR("Could not grant buffer access.\n");
510f6dfe73aSThomas Hellstrom 	return -EPERM;
511d08a9b9cSThomas Hellstrom }
512d08a9b9cSThomas Hellstrom 
5131d7a5cbfSThomas Hellstrom /**
5141d7a5cbfSThomas Hellstrom  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
5151d7a5cbfSThomas Hellstrom  * access, idling previous GPU operations on the buffer and optionally
5161d7a5cbfSThomas Hellstrom  * blocking it for further command submissions.
5171d7a5cbfSThomas Hellstrom  *
5181d7a5cbfSThomas Hellstrom  * @user_bo: Pointer to the buffer object being grabbed for CPU access
5191d7a5cbfSThomas Hellstrom  * @tfile: Identifying the caller.
5201d7a5cbfSThomas Hellstrom  * @flags: Flags indicating how the grab should be performed.
5211d7a5cbfSThomas Hellstrom  *
5221d7a5cbfSThomas Hellstrom  * A blocking grab will be automatically released when @tfile is closed.
5231d7a5cbfSThomas Hellstrom  */
5241d7a5cbfSThomas Hellstrom static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
5251d7a5cbfSThomas Hellstrom 					struct ttm_object_file *tfile,
5261d7a5cbfSThomas Hellstrom 					uint32_t flags)
5271d7a5cbfSThomas Hellstrom {
5281d7a5cbfSThomas Hellstrom 	struct ttm_buffer_object *bo = &user_bo->dma.base;
5291d7a5cbfSThomas Hellstrom 	bool existed;
5301d7a5cbfSThomas Hellstrom 	int ret;
5311d7a5cbfSThomas Hellstrom 
5321d7a5cbfSThomas Hellstrom 	if (flags & drm_vmw_synccpu_allow_cs) {
533dd7cfd64SMaarten Lankhorst 		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
5345fbad992SMaarten Lankhorst 		long lret;
5351d7a5cbfSThomas Hellstrom 
536998a7aa1SChris Wilson 		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
537998a7aa1SChris Wilson 							   nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
5385fbad992SMaarten Lankhorst 		if (!lret)
5395fbad992SMaarten Lankhorst 			return -EBUSY;
5405fbad992SMaarten Lankhorst 		else if (lret < 0)
5415fbad992SMaarten Lankhorst 			return lret;
5425fbad992SMaarten Lankhorst 		return 0;
5431d7a5cbfSThomas Hellstrom 	}
5441d7a5cbfSThomas Hellstrom 
5451d7a5cbfSThomas Hellstrom 	ret = ttm_bo_synccpu_write_grab
5461d7a5cbfSThomas Hellstrom 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
5471d7a5cbfSThomas Hellstrom 	if (unlikely(ret != 0))
5481d7a5cbfSThomas Hellstrom 		return ret;
5491d7a5cbfSThomas Hellstrom 
5501d7a5cbfSThomas Hellstrom 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
551fe25deb7SThomas Hellstrom 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
5521d7a5cbfSThomas Hellstrom 	if (ret != 0 || existed)
5531d7a5cbfSThomas Hellstrom 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
5541d7a5cbfSThomas Hellstrom 
5551d7a5cbfSThomas Hellstrom 	return ret;
5561d7a5cbfSThomas Hellstrom }
5571d7a5cbfSThomas Hellstrom 
5581d7a5cbfSThomas Hellstrom /**
5591d7a5cbfSThomas Hellstrom  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
5601d7a5cbfSThomas Hellstrom  * and unblock command submission on the buffer if blocked.
5611d7a5cbfSThomas Hellstrom  *
5621d7a5cbfSThomas Hellstrom  * @handle: Handle identifying the buffer object.
5631d7a5cbfSThomas Hellstrom  * @tfile: Identifying the caller.
5641d7a5cbfSThomas Hellstrom  * @flags: Flags indicating the type of release.
5651d7a5cbfSThomas Hellstrom  */
5661d7a5cbfSThomas Hellstrom static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
5671d7a5cbfSThomas Hellstrom 					   struct ttm_object_file *tfile,
5681d7a5cbfSThomas Hellstrom 					   uint32_t flags)
5691d7a5cbfSThomas Hellstrom {
5701d7a5cbfSThomas Hellstrom 	if (!(flags & drm_vmw_synccpu_allow_cs))
5711d7a5cbfSThomas Hellstrom 		return ttm_ref_object_base_unref(tfile, handle,
5721d7a5cbfSThomas Hellstrom 						 TTM_REF_SYNCCPU_WRITE);
5731d7a5cbfSThomas Hellstrom 
5741d7a5cbfSThomas Hellstrom 	return 0;
5751d7a5cbfSThomas Hellstrom }
5761d7a5cbfSThomas Hellstrom 
5771d7a5cbfSThomas Hellstrom /**
5781d7a5cbfSThomas Hellstrom  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
5791d7a5cbfSThomas Hellstrom  * functionality.
5801d7a5cbfSThomas Hellstrom  *
5811d7a5cbfSThomas Hellstrom  * @dev: Identifies the drm device.
5821d7a5cbfSThomas Hellstrom  * @data: Pointer to the ioctl argument.
5831d7a5cbfSThomas Hellstrom  * @file_priv: Identifies the caller.
5841d7a5cbfSThomas Hellstrom  *
5851d7a5cbfSThomas Hellstrom  * This function checks the ioctl arguments for validity and calls the
5861d7a5cbfSThomas Hellstrom  * relevant synccpu functions.
5871d7a5cbfSThomas Hellstrom  */
5881d7a5cbfSThomas Hellstrom int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
5891d7a5cbfSThomas Hellstrom 				  struct drm_file *file_priv)
5901d7a5cbfSThomas Hellstrom {
5911d7a5cbfSThomas Hellstrom 	struct drm_vmw_synccpu_arg *arg =
5921d7a5cbfSThomas Hellstrom 		(struct drm_vmw_synccpu_arg *) data;
5931d7a5cbfSThomas Hellstrom 	struct vmw_dma_buffer *dma_buf;
5941d7a5cbfSThomas Hellstrom 	struct vmw_user_dma_buffer *user_bo;
5951d7a5cbfSThomas Hellstrom 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
59654c12bc3SThomas Hellstrom 	struct ttm_base_object *buffer_base;
5971d7a5cbfSThomas Hellstrom 	int ret;
5981d7a5cbfSThomas Hellstrom 
5991d7a5cbfSThomas Hellstrom 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
6001d7a5cbfSThomas Hellstrom 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
6011d7a5cbfSThomas Hellstrom 			       drm_vmw_synccpu_dontblock |
6021d7a5cbfSThomas Hellstrom 			       drm_vmw_synccpu_allow_cs)) != 0) {
6031d7a5cbfSThomas Hellstrom 		DRM_ERROR("Illegal synccpu flags.\n");
6041d7a5cbfSThomas Hellstrom 		return -EINVAL;
6051d7a5cbfSThomas Hellstrom 	}
6061d7a5cbfSThomas Hellstrom 
6071d7a5cbfSThomas Hellstrom 	switch (arg->op) {
6081d7a5cbfSThomas Hellstrom 	case drm_vmw_synccpu_grab:
60954c12bc3SThomas Hellstrom 		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
61054c12bc3SThomas Hellstrom 					     &buffer_base);
6111d7a5cbfSThomas Hellstrom 		if (unlikely(ret != 0))
6121d7a5cbfSThomas Hellstrom 			return ret;
6131d7a5cbfSThomas Hellstrom 
6141d7a5cbfSThomas Hellstrom 		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
6151d7a5cbfSThomas Hellstrom 				       dma);
6161d7a5cbfSThomas Hellstrom 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
6171d7a5cbfSThomas Hellstrom 		vmw_dmabuf_unreference(&dma_buf);
61854c12bc3SThomas Hellstrom 		ttm_base_object_unref(&buffer_base);
6191d7a5cbfSThomas Hellstrom 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
6201d7a5cbfSThomas Hellstrom 			     ret != -EBUSY)) {
6211d7a5cbfSThomas Hellstrom 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
6221d7a5cbfSThomas Hellstrom 				  (unsigned int) arg->handle);
6231d7a5cbfSThomas Hellstrom 			return ret;
6241d7a5cbfSThomas Hellstrom 		}
6251d7a5cbfSThomas Hellstrom 		break;
6261d7a5cbfSThomas Hellstrom 	case drm_vmw_synccpu_release:
6271d7a5cbfSThomas Hellstrom 		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
6281d7a5cbfSThomas Hellstrom 						      arg->flags);
6291d7a5cbfSThomas Hellstrom 		if (unlikely(ret != 0)) {
6301d7a5cbfSThomas Hellstrom 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
6311d7a5cbfSThomas Hellstrom 				  (unsigned int) arg->handle);
6321d7a5cbfSThomas Hellstrom 			return ret;
6331d7a5cbfSThomas Hellstrom 		}
6341d7a5cbfSThomas Hellstrom 		break;
6351d7a5cbfSThomas Hellstrom 	default:
6361d7a5cbfSThomas Hellstrom 		DRM_ERROR("Invalid synccpu operation.\n");
6371d7a5cbfSThomas Hellstrom 		return -EINVAL;
6381d7a5cbfSThomas Hellstrom 	}
6391d7a5cbfSThomas Hellstrom 
6401d7a5cbfSThomas Hellstrom 	return 0;
6411d7a5cbfSThomas Hellstrom }
6421d7a5cbfSThomas Hellstrom 
643fb1d9738SJakob Bornecrantz int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
644fb1d9738SJakob Bornecrantz 			   struct drm_file *file_priv)
645fb1d9738SJakob Bornecrantz {
646fb1d9738SJakob Bornecrantz 	struct vmw_private *dev_priv = vmw_priv(dev);
647fb1d9738SJakob Bornecrantz 	union drm_vmw_alloc_dmabuf_arg *arg =
648fb1d9738SJakob Bornecrantz 	    (union drm_vmw_alloc_dmabuf_arg *)data;
649fb1d9738SJakob Bornecrantz 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
650fb1d9738SJakob Bornecrantz 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
651c0951b79SThomas Hellstrom 	struct vmw_dma_buffer *dma_buf;
652c0951b79SThomas Hellstrom 	uint32_t handle;
653fb1d9738SJakob Bornecrantz 	int ret;
654fb1d9738SJakob Bornecrantz 
655294adf7dSThomas Hellstrom 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
656c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
657fb1d9738SJakob Bornecrantz 		return ret;
658fb1d9738SJakob Bornecrantz 
659c0951b79SThomas Hellstrom 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
66054c12bc3SThomas Hellstrom 				    req->size, false, &handle, &dma_buf,
66154c12bc3SThomas Hellstrom 				    NULL);
662fb1d9738SJakob Bornecrantz 	if (unlikely(ret != 0))
6632f5993ccSThomas Hellstrom 		goto out_no_dmabuf;
664fb1d9738SJakob Bornecrantz 
665c0951b79SThomas Hellstrom 	rep->handle = handle;
66672525b3fSDavid Herrmann 	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
667c0951b79SThomas Hellstrom 	rep->cur_gmr_id = handle;
668fb1d9738SJakob Bornecrantz 	rep->cur_gmr_offset = 0;
669fb1d9738SJakob Bornecrantz 
670c0951b79SThomas Hellstrom 	vmw_dmabuf_unreference(&dma_buf);
671c0951b79SThomas Hellstrom 
6722f5993ccSThomas Hellstrom out_no_dmabuf:
673294adf7dSThomas Hellstrom 	ttm_read_unlock(&dev_priv->reservation_sem);
674fb1d9738SJakob Bornecrantz 
6752f5993ccSThomas Hellstrom 	return ret;
676fb1d9738SJakob Bornecrantz }
677fb1d9738SJakob Bornecrantz 
678fb1d9738SJakob Bornecrantz int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
679fb1d9738SJakob Bornecrantz 			   struct drm_file *file_priv)
680fb1d9738SJakob Bornecrantz {
681fb1d9738SJakob Bornecrantz 	struct drm_vmw_unref_dmabuf_arg *arg =
682fb1d9738SJakob Bornecrantz 	    (struct drm_vmw_unref_dmabuf_arg *)data;
683fb1d9738SJakob Bornecrantz 
684fb1d9738SJakob Bornecrantz 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
685fb1d9738SJakob Bornecrantz 					 arg->handle,
686fb1d9738SJakob Bornecrantz 					 TTM_REF_USAGE);
687fb1d9738SJakob Bornecrantz }
688fb1d9738SJakob Bornecrantz 
689fb1d9738SJakob Bornecrantz int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
69054c12bc3SThomas Hellstrom 			   uint32_t handle, struct vmw_dma_buffer **out,
69154c12bc3SThomas Hellstrom 			   struct ttm_base_object **p_base)
692fb1d9738SJakob Bornecrantz {
693fb1d9738SJakob Bornecrantz 	struct vmw_user_dma_buffer *vmw_user_bo;
694fb1d9738SJakob Bornecrantz 	struct ttm_base_object *base;
695fb1d9738SJakob Bornecrantz 
696fb1d9738SJakob Bornecrantz 	base = ttm_base_object_lookup(tfile, handle);
697fb1d9738SJakob Bornecrantz 	if (unlikely(base == NULL)) {
6988dfe162aSJoe Perches 		pr_err("Invalid buffer object handle 0x%08lx\n",
699fb1d9738SJakob Bornecrantz 		       (unsigned long)handle);
700fb1d9738SJakob Bornecrantz 		return -ESRCH;
701fb1d9738SJakob Bornecrantz 	}
702fb1d9738SJakob Bornecrantz 
703c486d4f8SThomas Hellstrom 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
704fb1d9738SJakob Bornecrantz 		ttm_base_object_unref(&base);
7058dfe162aSJoe Perches 		pr_err("Invalid buffer object handle 0x%08lx\n",
706fb1d9738SJakob Bornecrantz 		       (unsigned long)handle);
707fb1d9738SJakob Bornecrantz 		return -EINVAL;
708fb1d9738SJakob Bornecrantz 	}
709fb1d9738SJakob Bornecrantz 
710c486d4f8SThomas Hellstrom 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
711c486d4f8SThomas Hellstrom 				   prime.base);
712fb1d9738SJakob Bornecrantz 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
71354c12bc3SThomas Hellstrom 	if (p_base)
71454c12bc3SThomas Hellstrom 		*p_base = base;
71554c12bc3SThomas Hellstrom 	else
716fb1d9738SJakob Bornecrantz 		ttm_base_object_unref(&base);
717fb1d9738SJakob Bornecrantz 	*out = &vmw_user_bo->dma;
718fb1d9738SJakob Bornecrantz 
719fb1d9738SJakob Bornecrantz 	return 0;
720fb1d9738SJakob Bornecrantz }
721fb1d9738SJakob Bornecrantz 
722c0951b79SThomas Hellstrom int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
723a97e2192SThomas Hellstrom 			      struct vmw_dma_buffer *dma_buf,
724a97e2192SThomas Hellstrom 			      uint32_t *handle)
725c0951b79SThomas Hellstrom {
726c0951b79SThomas Hellstrom 	struct vmw_user_dma_buffer *user_bo;
727c0951b79SThomas Hellstrom 
728c0951b79SThomas Hellstrom 	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
729c0951b79SThomas Hellstrom 		return -EINVAL;
730c0951b79SThomas Hellstrom 
731c0951b79SThomas Hellstrom 	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
732a97e2192SThomas Hellstrom 
733a97e2192SThomas Hellstrom 	*handle = user_bo->prime.base.hash.key;
734c486d4f8SThomas Hellstrom 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
735fe25deb7SThomas Hellstrom 				  TTM_REF_USAGE, NULL, false);
736c0951b79SThomas Hellstrom }
737c0951b79SThomas Hellstrom 
738d69d51d7SThomas Hellstrom /**
739d69d51d7SThomas Hellstrom  * vmw_dumb_create - Create a dumb kms buffer
740d69d51d7SThomas Hellstrom  *
741d69d51d7SThomas Hellstrom  * @file_priv: Pointer to a struct drm_file identifying the caller.
742d69d51d7SThomas Hellstrom  * @dev: Pointer to the drm device.
743d69d51d7SThomas Hellstrom  * @args: Pointer to a struct drm_mode_create_dumb structure
744d69d51d7SThomas Hellstrom  *
745d69d51d7SThomas Hellstrom  * This is a driver callback for the core drm create_dumb functionality.
746d69d51d7SThomas Hellstrom  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
747d69d51d7SThomas Hellstrom  * that the arguments have a different format.
748d69d51d7SThomas Hellstrom  */
7495e1782d2SDave Airlie int vmw_dumb_create(struct drm_file *file_priv,
7505e1782d2SDave Airlie 		    struct drm_device *dev,
7515e1782d2SDave Airlie 		    struct drm_mode_create_dumb *args)
7525e1782d2SDave Airlie {
7535e1782d2SDave Airlie 	struct vmw_private *dev_priv = vmw_priv(dev);
754d69d51d7SThomas Hellstrom 	struct vmw_dma_buffer *dma_buf;
7555e1782d2SDave Airlie 	int ret;
7565e1782d2SDave Airlie 
7575e1782d2SDave Airlie 	args->pitch = args->width * ((args->bpp + 7) / 8);
7585e1782d2SDave Airlie 	args->size = args->pitch * args->height;
7595e1782d2SDave Airlie 
760294adf7dSThomas Hellstrom 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
761d69d51d7SThomas Hellstrom 	if (unlikely(ret != 0))
7625e1782d2SDave Airlie 		return ret;
7635e1782d2SDave Airlie 
764d69d51d7SThomas Hellstrom 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
765d69d51d7SThomas Hellstrom 				    args->size, false, &args->handle,
76654c12bc3SThomas Hellstrom 				    &dma_buf, NULL);
767d69d51d7SThomas Hellstrom 	if (unlikely(ret != 0))
7685e1782d2SDave Airlie 		goto out_no_dmabuf;
7695e1782d2SDave Airlie 
770d69d51d7SThomas Hellstrom 	vmw_dmabuf_unreference(&dma_buf);
7715e1782d2SDave Airlie out_no_dmabuf:
772294adf7dSThomas Hellstrom 	ttm_read_unlock(&dev_priv->reservation_sem);
7735e1782d2SDave Airlie 	return ret;
7745e1782d2SDave Airlie }
7755e1782d2SDave Airlie 
776d69d51d7SThomas Hellstrom /**
777d69d51d7SThomas Hellstrom  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
778d69d51d7SThomas Hellstrom  *
779d69d51d7SThomas Hellstrom  * @file_priv: Pointer to a struct drm_file identifying the caller.
780d69d51d7SThomas Hellstrom  * @dev: Pointer to the drm device.
781d69d51d7SThomas Hellstrom  * @handle: Handle identifying the dumb buffer.
782d69d51d7SThomas Hellstrom  * @offset: The address space offset returned.
783d69d51d7SThomas Hellstrom  *
784d69d51d7SThomas Hellstrom  * This is a driver callback for the core drm dumb_map_offset functionality.
785d69d51d7SThomas Hellstrom  */
7865e1782d2SDave Airlie int vmw_dumb_map_offset(struct drm_file *file_priv,
7875e1782d2SDave Airlie 			struct drm_device *dev, uint32_t handle,
7885e1782d2SDave Airlie 			uint64_t *offset)
7895e1782d2SDave Airlie {
7905e1782d2SDave Airlie 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
7915e1782d2SDave Airlie 	struct vmw_dma_buffer *out_buf;
7925e1782d2SDave Airlie 	int ret;
7935e1782d2SDave Airlie 
79454c12bc3SThomas Hellstrom 	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
7955e1782d2SDave Airlie 	if (ret != 0)
7965e1782d2SDave Airlie 		return -EINVAL;
7975e1782d2SDave Airlie 
79872525b3fSDavid Herrmann 	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
7995e1782d2SDave Airlie 	vmw_dmabuf_unreference(&out_buf);
8005e1782d2SDave Airlie 	return 0;
8015e1782d2SDave Airlie }
8025e1782d2SDave Airlie 
803d69d51d7SThomas Hellstrom /**
804d69d51d7SThomas Hellstrom  * vmw_dumb_destroy - Destroy a dumb boffer
805d69d51d7SThomas Hellstrom  *
806d69d51d7SThomas Hellstrom  * @file_priv: Pointer to a struct drm_file identifying the caller.
807d69d51d7SThomas Hellstrom  * @dev: Pointer to the drm device.
808d69d51d7SThomas Hellstrom  * @handle: Handle identifying the dumb buffer.
809d69d51d7SThomas Hellstrom  *
810d69d51d7SThomas Hellstrom  * This is a driver callback for the core drm dumb_destroy functionality.
811d69d51d7SThomas Hellstrom  */
8125e1782d2SDave Airlie int vmw_dumb_destroy(struct drm_file *file_priv,
8135e1782d2SDave Airlie 		     struct drm_device *dev,
8145e1782d2SDave Airlie 		     uint32_t handle)
8155e1782d2SDave Airlie {
8165e1782d2SDave Airlie 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
8175e1782d2SDave Airlie 					 handle, TTM_REF_USAGE);
8185e1782d2SDave Airlie }
819c0951b79SThomas Hellstrom 
820c0951b79SThomas Hellstrom /**
821c0951b79SThomas Hellstrom  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
822c0951b79SThomas Hellstrom  *
823c0951b79SThomas Hellstrom  * @res:            The resource for which to allocate a backup buffer.
824c0951b79SThomas Hellstrom  * @interruptible:  Whether any sleeps during allocation should be
825c0951b79SThomas Hellstrom  *                  performed while interruptible.
826c0951b79SThomas Hellstrom  */
827c0951b79SThomas Hellstrom static int vmw_resource_buf_alloc(struct vmw_resource *res,
828c0951b79SThomas Hellstrom 				  bool interruptible)
829c0951b79SThomas Hellstrom {
830c0951b79SThomas Hellstrom 	unsigned long size =
831c0951b79SThomas Hellstrom 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
832c0951b79SThomas Hellstrom 	struct vmw_dma_buffer *backup;
833c0951b79SThomas Hellstrom 	int ret;
834c0951b79SThomas Hellstrom 
835c0951b79SThomas Hellstrom 	if (likely(res->backup)) {
836c0951b79SThomas Hellstrom 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
837c0951b79SThomas Hellstrom 		return 0;
838c0951b79SThomas Hellstrom 	}
839c0951b79SThomas Hellstrom 
840c0951b79SThomas Hellstrom 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
8411a4adb05SRavikant B Sharma 	if (unlikely(!backup))
842c0951b79SThomas Hellstrom 		return -ENOMEM;
843c0951b79SThomas Hellstrom 
844c0951b79SThomas Hellstrom 	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
845c0951b79SThomas Hellstrom 			      res->func->backup_placement,
846c0951b79SThomas Hellstrom 			      interruptible,
847c0951b79SThomas Hellstrom 			      &vmw_dmabuf_bo_free);
848c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
849c0951b79SThomas Hellstrom 		goto out_no_dmabuf;
850c0951b79SThomas Hellstrom 
851c0951b79SThomas Hellstrom 	res->backup = backup;
852c0951b79SThomas Hellstrom 
853c0951b79SThomas Hellstrom out_no_dmabuf:
854c0951b79SThomas Hellstrom 	return ret;
855c0951b79SThomas Hellstrom }
856c0951b79SThomas Hellstrom 
857c0951b79SThomas Hellstrom /**
858c0951b79SThomas Hellstrom  * vmw_resource_do_validate - Make a resource up-to-date and visible
859c0951b79SThomas Hellstrom  *                            to the device.
860c0951b79SThomas Hellstrom  *
861c0951b79SThomas Hellstrom  * @res:            The resource to make visible to the device.
862c0951b79SThomas Hellstrom  * @val_buf:        Information about a buffer possibly
863c0951b79SThomas Hellstrom  *                  containing backup data if a bind operation is needed.
864c0951b79SThomas Hellstrom  *
865c0951b79SThomas Hellstrom  * On hardware resource shortage, this function returns -EBUSY and
866c0951b79SThomas Hellstrom  * should be retried once resources have been freed up.
867c0951b79SThomas Hellstrom  */
868c0951b79SThomas Hellstrom static int vmw_resource_do_validate(struct vmw_resource *res,
869c0951b79SThomas Hellstrom 				    struct ttm_validate_buffer *val_buf)
870c0951b79SThomas Hellstrom {
871c0951b79SThomas Hellstrom 	int ret = 0;
872c0951b79SThomas Hellstrom 	const struct vmw_res_func *func = res->func;
873c0951b79SThomas Hellstrom 
874c0951b79SThomas Hellstrom 	if (unlikely(res->id == -1)) {
875c0951b79SThomas Hellstrom 		ret = func->create(res);
876c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
877c0951b79SThomas Hellstrom 			return ret;
878c0951b79SThomas Hellstrom 	}
879c0951b79SThomas Hellstrom 
880c0951b79SThomas Hellstrom 	if (func->bind &&
881c0951b79SThomas Hellstrom 	    ((func->needs_backup && list_empty(&res->mob_head) &&
882c0951b79SThomas Hellstrom 	      val_buf->bo != NULL) ||
883c0951b79SThomas Hellstrom 	     (!func->needs_backup && val_buf->bo != NULL))) {
884c0951b79SThomas Hellstrom 		ret = func->bind(res, val_buf);
885c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
886c0951b79SThomas Hellstrom 			goto out_bind_failed;
887c0951b79SThomas Hellstrom 		if (func->needs_backup)
888c0951b79SThomas Hellstrom 			list_add_tail(&res->mob_head, &res->backup->res_list);
889c0951b79SThomas Hellstrom 	}
890c0951b79SThomas Hellstrom 
891c0951b79SThomas Hellstrom 	/*
892c0951b79SThomas Hellstrom 	 * Only do this on write operations, and move to
893c0951b79SThomas Hellstrom 	 * vmw_resource_unreserve if it can be called after
894c0951b79SThomas Hellstrom 	 * backup buffers have been unreserved. Otherwise
895c0951b79SThomas Hellstrom 	 * sort out locking.
896c0951b79SThomas Hellstrom 	 */
897c0951b79SThomas Hellstrom 	res->res_dirty = true;
898c0951b79SThomas Hellstrom 
899c0951b79SThomas Hellstrom 	return 0;
900c0951b79SThomas Hellstrom 
901c0951b79SThomas Hellstrom out_bind_failed:
902c0951b79SThomas Hellstrom 	func->destroy(res);
903c0951b79SThomas Hellstrom 
904c0951b79SThomas Hellstrom 	return ret;
905c0951b79SThomas Hellstrom }
906c0951b79SThomas Hellstrom 
907c0951b79SThomas Hellstrom /**
908c0951b79SThomas Hellstrom  * vmw_resource_unreserve - Unreserve a resource previously reserved for
909c0951b79SThomas Hellstrom  * command submission.
910c0951b79SThomas Hellstrom  *
911c0951b79SThomas Hellstrom  * @res:               Pointer to the struct vmw_resource to unreserve.
912d80efd5cSThomas Hellstrom  * @switch_backup:     Backup buffer has been switched.
913c0951b79SThomas Hellstrom  * @new_backup:        Pointer to new backup buffer if command submission
914d80efd5cSThomas Hellstrom  *                     switched. May be NULL.
915d80efd5cSThomas Hellstrom  * @new_backup_offset: New backup offset if @switch_backup is true.
916c0951b79SThomas Hellstrom  *
917c0951b79SThomas Hellstrom  * Currently unreserving a resource means putting it back on the device's
918c0951b79SThomas Hellstrom  * resource lru list, so that it can be evicted if necessary.
919c0951b79SThomas Hellstrom  */
920c0951b79SThomas Hellstrom void vmw_resource_unreserve(struct vmw_resource *res,
921d80efd5cSThomas Hellstrom 			    bool switch_backup,
922c0951b79SThomas Hellstrom 			    struct vmw_dma_buffer *new_backup,
923c0951b79SThomas Hellstrom 			    unsigned long new_backup_offset)
924c0951b79SThomas Hellstrom {
925c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
926c0951b79SThomas Hellstrom 
927c0951b79SThomas Hellstrom 	if (!list_empty(&res->lru_head))
928c0951b79SThomas Hellstrom 		return;
929c0951b79SThomas Hellstrom 
930d80efd5cSThomas Hellstrom 	if (switch_backup && new_backup != res->backup) {
931c0951b79SThomas Hellstrom 		if (res->backup) {
9328bd4ce56SMaarten Lankhorst 			lockdep_assert_held(&res->backup->base.resv->lock.base);
933c0951b79SThomas Hellstrom 			list_del_init(&res->mob_head);
934c0951b79SThomas Hellstrom 			vmw_dmabuf_unreference(&res->backup);
935c0951b79SThomas Hellstrom 		}
936c0951b79SThomas Hellstrom 
937d80efd5cSThomas Hellstrom 		if (new_backup) {
938c0951b79SThomas Hellstrom 			res->backup = vmw_dmabuf_reference(new_backup);
9398bd4ce56SMaarten Lankhorst 			lockdep_assert_held(&new_backup->base.resv->lock.base);
940c0951b79SThomas Hellstrom 			list_add_tail(&res->mob_head, &new_backup->res_list);
941d80efd5cSThomas Hellstrom 		} else {
942d80efd5cSThomas Hellstrom 			res->backup = NULL;
943c0951b79SThomas Hellstrom 		}
944d80efd5cSThomas Hellstrom 	}
945d80efd5cSThomas Hellstrom 	if (switch_backup)
946c0951b79SThomas Hellstrom 		res->backup_offset = new_backup_offset;
947c0951b79SThomas Hellstrom 
948ed93394cSThomas Hellstrom 	if (!res->func->may_evict || res->id == -1 || res->pin_count)
949c0951b79SThomas Hellstrom 		return;
950c0951b79SThomas Hellstrom 
951c0951b79SThomas Hellstrom 	write_lock(&dev_priv->resource_lock);
952c0951b79SThomas Hellstrom 	list_add_tail(&res->lru_head,
953c0951b79SThomas Hellstrom 		      &res->dev_priv->res_lru[res->func->res_type]);
954c0951b79SThomas Hellstrom 	write_unlock(&dev_priv->resource_lock);
955c0951b79SThomas Hellstrom }
956c0951b79SThomas Hellstrom 
957c0951b79SThomas Hellstrom /**
958c0951b79SThomas Hellstrom  * vmw_resource_check_buffer - Check whether a backup buffer is needed
959c0951b79SThomas Hellstrom  *                             for a resource and in that case, allocate
960c0951b79SThomas Hellstrom  *                             one, reserve and validate it.
961c0951b79SThomas Hellstrom  *
962c0951b79SThomas Hellstrom  * @res:            The resource for which to allocate a backup buffer.
963c0951b79SThomas Hellstrom  * @interruptible:  Whether any sleeps during allocation should be
964c0951b79SThomas Hellstrom  *                  performed while interruptible.
965c0951b79SThomas Hellstrom  * @val_buf:        On successful return contains data about the
966c0951b79SThomas Hellstrom  *                  reserved and validated backup buffer.
967c0951b79SThomas Hellstrom  */
968ecff665fSMaarten Lankhorst static int
969ecff665fSMaarten Lankhorst vmw_resource_check_buffer(struct vmw_resource *res,
970c0951b79SThomas Hellstrom 			  bool interruptible,
971c0951b79SThomas Hellstrom 			  struct ttm_validate_buffer *val_buf)
972c0951b79SThomas Hellstrom {
97319be5570SChristian König 	struct ttm_operation_ctx ctx = { true, false };
974c0951b79SThomas Hellstrom 	struct list_head val_list;
975c0951b79SThomas Hellstrom 	bool backup_dirty = false;
976c0951b79SThomas Hellstrom 	int ret;
977c0951b79SThomas Hellstrom 
978c0951b79SThomas Hellstrom 	if (unlikely(res->backup == NULL)) {
979c0951b79SThomas Hellstrom 		ret = vmw_resource_buf_alloc(res, interruptible);
980c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
981c0951b79SThomas Hellstrom 			return ret;
982c0951b79SThomas Hellstrom 	}
983c0951b79SThomas Hellstrom 
984c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&val_list);
985c0951b79SThomas Hellstrom 	val_buf->bo = ttm_bo_reference(&res->backup->base);
986ae9c0af2SChristian König 	val_buf->shared = false;
987c0951b79SThomas Hellstrom 	list_add_tail(&val_buf->head, &val_list);
988aa35071cSChristian König 	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
989c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
990c0951b79SThomas Hellstrom 		goto out_no_reserve;
991c0951b79SThomas Hellstrom 
992c0951b79SThomas Hellstrom 	if (res->func->needs_backup && list_empty(&res->mob_head))
993c0951b79SThomas Hellstrom 		return 0;
994c0951b79SThomas Hellstrom 
995c0951b79SThomas Hellstrom 	backup_dirty = res->backup_dirty;
996c0951b79SThomas Hellstrom 	ret = ttm_bo_validate(&res->backup->base,
997c0951b79SThomas Hellstrom 			      res->func->backup_placement,
99819be5570SChristian König 			      &ctx);
999c0951b79SThomas Hellstrom 
1000c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
1001c0951b79SThomas Hellstrom 		goto out_no_validate;
1002c0951b79SThomas Hellstrom 
1003c0951b79SThomas Hellstrom 	return 0;
1004c0951b79SThomas Hellstrom 
1005c0951b79SThomas Hellstrom out_no_validate:
1006ac49251bSThomas Hellstrom 	ttm_eu_backoff_reservation(NULL, &val_list);
1007c0951b79SThomas Hellstrom out_no_reserve:
1008c0951b79SThomas Hellstrom 	ttm_bo_unref(&val_buf->bo);
1009c0951b79SThomas Hellstrom 	if (backup_dirty)
1010c0951b79SThomas Hellstrom 		vmw_dmabuf_unreference(&res->backup);
1011c0951b79SThomas Hellstrom 
1012c0951b79SThomas Hellstrom 	return ret;
1013c0951b79SThomas Hellstrom }
1014c0951b79SThomas Hellstrom 
1015c0951b79SThomas Hellstrom /**
1016c0951b79SThomas Hellstrom  * vmw_resource_reserve - Reserve a resource for command submission
1017c0951b79SThomas Hellstrom  *
1018c0951b79SThomas Hellstrom  * @res:            The resource to reserve.
1019c0951b79SThomas Hellstrom  *
1020c0951b79SThomas Hellstrom  * This function takes the resource off the LRU list and make sure
1021c0951b79SThomas Hellstrom  * a backup buffer is present for guest-backed resources. However,
1022c0951b79SThomas Hellstrom  * the buffer may not be bound to the resource at this point.
1023c0951b79SThomas Hellstrom  *
1024c0951b79SThomas Hellstrom  */
10251a4b172aSThomas Hellstrom int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
10261a4b172aSThomas Hellstrom 			 bool no_backup)
1027c0951b79SThomas Hellstrom {
1028c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
1029c0951b79SThomas Hellstrom 	int ret;
1030c0951b79SThomas Hellstrom 
1031c0951b79SThomas Hellstrom 	write_lock(&dev_priv->resource_lock);
1032c0951b79SThomas Hellstrom 	list_del_init(&res->lru_head);
1033c0951b79SThomas Hellstrom 	write_unlock(&dev_priv->resource_lock);
1034c0951b79SThomas Hellstrom 
1035c0951b79SThomas Hellstrom 	if (res->func->needs_backup && res->backup == NULL &&
1036c0951b79SThomas Hellstrom 	    !no_backup) {
10371a4b172aSThomas Hellstrom 		ret = vmw_resource_buf_alloc(res, interruptible);
1038d80efd5cSThomas Hellstrom 		if (unlikely(ret != 0)) {
1039d80efd5cSThomas Hellstrom 			DRM_ERROR("Failed to allocate a backup buffer "
1040d80efd5cSThomas Hellstrom 				  "of size %lu. bytes\n",
1041d80efd5cSThomas Hellstrom 				  (unsigned long) res->backup_size);
1042c0951b79SThomas Hellstrom 			return ret;
1043c0951b79SThomas Hellstrom 		}
1044d80efd5cSThomas Hellstrom 	}
1045c0951b79SThomas Hellstrom 
1046c0951b79SThomas Hellstrom 	return 0;
1047c0951b79SThomas Hellstrom }
1048c0951b79SThomas Hellstrom 
1049c0951b79SThomas Hellstrom /**
1050c0951b79SThomas Hellstrom  * vmw_resource_backoff_reservation - Unreserve and unreference a
1051c0951b79SThomas Hellstrom  *                                    backup buffer
1052c0951b79SThomas Hellstrom  *.
1053c0951b79SThomas Hellstrom  * @val_buf:        Backup buffer information.
1054c0951b79SThomas Hellstrom  */
1055ecff665fSMaarten Lankhorst static void
1056ac49251bSThomas Hellstrom vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1057c0951b79SThomas Hellstrom {
1058c0951b79SThomas Hellstrom 	struct list_head val_list;
1059c0951b79SThomas Hellstrom 
1060c0951b79SThomas Hellstrom 	if (likely(val_buf->bo == NULL))
1061c0951b79SThomas Hellstrom 		return;
1062c0951b79SThomas Hellstrom 
1063c0951b79SThomas Hellstrom 	INIT_LIST_HEAD(&val_list);
1064c0951b79SThomas Hellstrom 	list_add_tail(&val_buf->head, &val_list);
1065ac49251bSThomas Hellstrom 	ttm_eu_backoff_reservation(NULL, &val_list);
1066c0951b79SThomas Hellstrom 	ttm_bo_unref(&val_buf->bo);
1067c0951b79SThomas Hellstrom }
1068c0951b79SThomas Hellstrom 
1069c0951b79SThomas Hellstrom /**
1070c0951b79SThomas Hellstrom  * vmw_resource_do_evict - Evict a resource, and transfer its data
1071c0951b79SThomas Hellstrom  *                         to a backup buffer.
1072c0951b79SThomas Hellstrom  *
1073c0951b79SThomas Hellstrom  * @res:            The resource to evict.
1074ea029c28SThomas Hellstrom  * @interruptible:  Whether to wait interruptible.
1075c0951b79SThomas Hellstrom  */
1076b9eb1a61SThomas Hellstrom static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1077c0951b79SThomas Hellstrom {
1078c0951b79SThomas Hellstrom 	struct ttm_validate_buffer val_buf;
1079c0951b79SThomas Hellstrom 	const struct vmw_res_func *func = res->func;
1080c0951b79SThomas Hellstrom 	int ret;
1081c0951b79SThomas Hellstrom 
1082c0951b79SThomas Hellstrom 	BUG_ON(!func->may_evict);
1083c0951b79SThomas Hellstrom 
1084c0951b79SThomas Hellstrom 	val_buf.bo = NULL;
1085ae9c0af2SChristian König 	val_buf.shared = false;
1086ac49251bSThomas Hellstrom 	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1087c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
1088c0951b79SThomas Hellstrom 		return ret;
1089c0951b79SThomas Hellstrom 
1090c0951b79SThomas Hellstrom 	if (unlikely(func->unbind != NULL &&
1091c0951b79SThomas Hellstrom 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1092c0951b79SThomas Hellstrom 		ret = func->unbind(res, res->res_dirty, &val_buf);
1093c0951b79SThomas Hellstrom 		if (unlikely(ret != 0))
1094c0951b79SThomas Hellstrom 			goto out_no_unbind;
1095c0951b79SThomas Hellstrom 		list_del_init(&res->mob_head);
1096c0951b79SThomas Hellstrom 	}
1097c0951b79SThomas Hellstrom 	ret = func->destroy(res);
1098c0951b79SThomas Hellstrom 	res->backup_dirty = true;
1099c0951b79SThomas Hellstrom 	res->res_dirty = false;
1100c0951b79SThomas Hellstrom out_no_unbind:
1101ac49251bSThomas Hellstrom 	vmw_resource_backoff_reservation(&val_buf);
1102c0951b79SThomas Hellstrom 
1103c0951b79SThomas Hellstrom 	return ret;
1104c0951b79SThomas Hellstrom }
1105c0951b79SThomas Hellstrom 
1106c0951b79SThomas Hellstrom 
1107c0951b79SThomas Hellstrom /**
1108c0951b79SThomas Hellstrom  * vmw_resource_validate - Make a resource up-to-date and visible
1109c0951b79SThomas Hellstrom  *                         to the device.
1110c0951b79SThomas Hellstrom  *
1111c0951b79SThomas Hellstrom  * @res:            The resource to make visible to the device.
1112c0951b79SThomas Hellstrom  *
1113c0951b79SThomas Hellstrom  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1114c0951b79SThomas Hellstrom  * be reserved and validated.
1115c0951b79SThomas Hellstrom  * On hardware resource shortage, this function will repeatedly evict
1116c0951b79SThomas Hellstrom  * resources of the same type until the validation succeeds.
1117c0951b79SThomas Hellstrom  */
1118c0951b79SThomas Hellstrom int vmw_resource_validate(struct vmw_resource *res)
1119c0951b79SThomas Hellstrom {
1120c0951b79SThomas Hellstrom 	int ret;
1121c0951b79SThomas Hellstrom 	struct vmw_resource *evict_res;
1122c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
1123c0951b79SThomas Hellstrom 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1124c0951b79SThomas Hellstrom 	struct ttm_validate_buffer val_buf;
1125ea029c28SThomas Hellstrom 	unsigned err_count = 0;
1126c0951b79SThomas Hellstrom 
1127d80efd5cSThomas Hellstrom 	if (!res->func->create)
1128c0951b79SThomas Hellstrom 		return 0;
1129c0951b79SThomas Hellstrom 
1130c0951b79SThomas Hellstrom 	val_buf.bo = NULL;
1131ae9c0af2SChristian König 	val_buf.shared = false;
1132c0951b79SThomas Hellstrom 	if (res->backup)
1133c0951b79SThomas Hellstrom 		val_buf.bo = &res->backup->base;
1134c0951b79SThomas Hellstrom 	do {
1135c0951b79SThomas Hellstrom 		ret = vmw_resource_do_validate(res, &val_buf);
1136c0951b79SThomas Hellstrom 		if (likely(ret != -EBUSY))
1137c0951b79SThomas Hellstrom 			break;
1138c0951b79SThomas Hellstrom 
1139c0951b79SThomas Hellstrom 		write_lock(&dev_priv->resource_lock);
1140c0951b79SThomas Hellstrom 		if (list_empty(lru_list) || !res->func->may_evict) {
1141ea029c28SThomas Hellstrom 			DRM_ERROR("Out of device device resources "
1142c0951b79SThomas Hellstrom 				  "for %s.\n", res->func->type_name);
1143c0951b79SThomas Hellstrom 			ret = -EBUSY;
1144c0951b79SThomas Hellstrom 			write_unlock(&dev_priv->resource_lock);
1145c0951b79SThomas Hellstrom 			break;
1146c0951b79SThomas Hellstrom 		}
1147c0951b79SThomas Hellstrom 
1148c0951b79SThomas Hellstrom 		evict_res = vmw_resource_reference
1149c0951b79SThomas Hellstrom 			(list_first_entry(lru_list, struct vmw_resource,
1150c0951b79SThomas Hellstrom 					  lru_head));
1151c0951b79SThomas Hellstrom 		list_del_init(&evict_res->lru_head);
1152c0951b79SThomas Hellstrom 
1153c0951b79SThomas Hellstrom 		write_unlock(&dev_priv->resource_lock);
1154ea029c28SThomas Hellstrom 
1155ea029c28SThomas Hellstrom 		ret = vmw_resource_do_evict(evict_res, true);
1156ea029c28SThomas Hellstrom 		if (unlikely(ret != 0)) {
1157ea029c28SThomas Hellstrom 			write_lock(&dev_priv->resource_lock);
1158ea029c28SThomas Hellstrom 			list_add_tail(&evict_res->lru_head, lru_list);
1159ea029c28SThomas Hellstrom 			write_unlock(&dev_priv->resource_lock);
1160ea029c28SThomas Hellstrom 			if (ret == -ERESTARTSYS ||
1161ea029c28SThomas Hellstrom 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1162ea029c28SThomas Hellstrom 				vmw_resource_unreference(&evict_res);
1163ea029c28SThomas Hellstrom 				goto out_no_validate;
1164ea029c28SThomas Hellstrom 			}
1165ea029c28SThomas Hellstrom 		}
1166ea029c28SThomas Hellstrom 
1167c0951b79SThomas Hellstrom 		vmw_resource_unreference(&evict_res);
1168c0951b79SThomas Hellstrom 	} while (1);
1169c0951b79SThomas Hellstrom 
1170c0951b79SThomas Hellstrom 	if (unlikely(ret != 0))
1171c0951b79SThomas Hellstrom 		goto out_no_validate;
1172c0951b79SThomas Hellstrom 	else if (!res->func->needs_backup && res->backup) {
1173c0951b79SThomas Hellstrom 		list_del_init(&res->mob_head);
1174c0951b79SThomas Hellstrom 		vmw_dmabuf_unreference(&res->backup);
1175c0951b79SThomas Hellstrom 	}
1176c0951b79SThomas Hellstrom 
1177c0951b79SThomas Hellstrom 	return 0;
1178c0951b79SThomas Hellstrom 
1179c0951b79SThomas Hellstrom out_no_validate:
1180c0951b79SThomas Hellstrom 	return ret;
1181c0951b79SThomas Hellstrom }
1182c0951b79SThomas Hellstrom 
1183c0951b79SThomas Hellstrom /**
1184c0951b79SThomas Hellstrom  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1185c0951b79SThomas Hellstrom  *                       object without unreserving it.
1186c0951b79SThomas Hellstrom  *
1187c0951b79SThomas Hellstrom  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1188c0951b79SThomas Hellstrom  * @fence:          Pointer to the fence. If NULL, this function will
1189c0951b79SThomas Hellstrom  *                  insert a fence into the command stream..
1190c0951b79SThomas Hellstrom  *
1191c0951b79SThomas Hellstrom  * Contrary to the ttm_eu version of this function, it takes only
1192c0951b79SThomas Hellstrom  * a single buffer object instead of a list, and it also doesn't
1193c0951b79SThomas Hellstrom  * unreserve the buffer object, which needs to be done separately.
1194c0951b79SThomas Hellstrom  */
1195c0951b79SThomas Hellstrom void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1196c0951b79SThomas Hellstrom 			 struct vmw_fence_obj *fence)
1197c0951b79SThomas Hellstrom {
1198c0951b79SThomas Hellstrom 	struct ttm_bo_device *bdev = bo->bdev;
1199f2c24b83SMaarten Lankhorst 
1200c0951b79SThomas Hellstrom 	struct vmw_private *dev_priv =
1201c0951b79SThomas Hellstrom 		container_of(bdev, struct vmw_private, bdev);
1202c0951b79SThomas Hellstrom 
12032298e804SMaarten Lankhorst 	if (fence == NULL) {
1204c0951b79SThomas Hellstrom 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
12052298e804SMaarten Lankhorst 		reservation_object_add_excl_fence(bo->resv, &fence->base);
1206f54d1867SChris Wilson 		dma_fence_put(&fence->base);
1207f2c24b83SMaarten Lankhorst 	} else
1208f2c24b83SMaarten Lankhorst 		reservation_object_add_excl_fence(bo->resv, &fence->base);
1209c0951b79SThomas Hellstrom }
1210c0951b79SThomas Hellstrom 
1211c0951b79SThomas Hellstrom /**
1212c0951b79SThomas Hellstrom  * vmw_resource_move_notify - TTM move_notify_callback
1213c0951b79SThomas Hellstrom  *
1214c0951b79SThomas Hellstrom  * @bo: The TTM buffer object about to move.
1215fd11a3c0SSinclair Yeh  * @mem: The struct ttm_mem_reg indicating to what memory
1216c0951b79SThomas Hellstrom  *       region the move is taking place.
1217c0951b79SThomas Hellstrom  *
1218f468911fSThomas Hellstrom  * Evicts the Guest Backed hardware resource if the backup
1219f468911fSThomas Hellstrom  * buffer is being moved out of MOB memory.
1220f468911fSThomas Hellstrom  * Note that this function should not race with the resource
1221f468911fSThomas Hellstrom  * validation code as long as it accesses only members of struct
1222f468911fSThomas Hellstrom  * resource that remain static while bo::res is !NULL and
1223f468911fSThomas Hellstrom  * while we have @bo reserved. struct resource::backup is *not* a
1224f468911fSThomas Hellstrom  * static member. The resource validation code will take care
1225f468911fSThomas Hellstrom  * to set @bo::res to NULL, while having @bo reserved when the
1226f468911fSThomas Hellstrom  * buffer is no longer bound to the resource, so @bo:res can be
1227f468911fSThomas Hellstrom  * used to determine whether there is a need to unbind and whether
1228f468911fSThomas Hellstrom  * it is safe to unbind.
1229c0951b79SThomas Hellstrom  */
1230c0951b79SThomas Hellstrom void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231c0951b79SThomas Hellstrom 			      struct ttm_mem_reg *mem)
1232c0951b79SThomas Hellstrom {
1233f468911fSThomas Hellstrom 	struct vmw_dma_buffer *dma_buf;
1234f468911fSThomas Hellstrom 
1235f468911fSThomas Hellstrom 	if (mem == NULL)
1236f468911fSThomas Hellstrom 		return;
1237f468911fSThomas Hellstrom 
1238f468911fSThomas Hellstrom 	if (bo->destroy != vmw_dmabuf_bo_free &&
1239f468911fSThomas Hellstrom 	    bo->destroy != vmw_user_dmabuf_destroy)
1240f468911fSThomas Hellstrom 		return;
1241f468911fSThomas Hellstrom 
1242f468911fSThomas Hellstrom 	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1243f468911fSThomas Hellstrom 
1244*bf833fd3SThomas Hellstrom 	/*
1245*bf833fd3SThomas Hellstrom 	 * Kill any cached kernel maps before move. An optimization could
1246*bf833fd3SThomas Hellstrom 	 * be to do this iff source or destination memory type is VRAM.
1247*bf833fd3SThomas Hellstrom 	 */
1248*bf833fd3SThomas Hellstrom 	vmw_dma_buffer_unmap(dma_buf);
1249*bf833fd3SThomas Hellstrom 
1250f468911fSThomas Hellstrom 	if (mem->mem_type != VMW_PL_MOB) {
1251f468911fSThomas Hellstrom 		struct vmw_resource *res, *n;
1252f468911fSThomas Hellstrom 		struct ttm_validate_buffer val_buf;
1253f468911fSThomas Hellstrom 
1254f468911fSThomas Hellstrom 		val_buf.bo = bo;
1255ae9c0af2SChristian König 		val_buf.shared = false;
1256f468911fSThomas Hellstrom 
1257f468911fSThomas Hellstrom 		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1258f468911fSThomas Hellstrom 
1259f468911fSThomas Hellstrom 			if (unlikely(res->func->unbind == NULL))
1260f468911fSThomas Hellstrom 				continue;
1261f468911fSThomas Hellstrom 
1262f468911fSThomas Hellstrom 			(void) res->func->unbind(res, true, &val_buf);
1263f468911fSThomas Hellstrom 			res->backup_dirty = true;
1264f468911fSThomas Hellstrom 			res->res_dirty = false;
1265f468911fSThomas Hellstrom 			list_del_init(&res->mob_head);
1266f468911fSThomas Hellstrom 		}
1267f468911fSThomas Hellstrom 
12688aa6d4fcSChristian König 		(void) ttm_bo_wait(bo, false, false);
1269f468911fSThomas Hellstrom 	}
1270c0951b79SThomas Hellstrom }
1271c0951b79SThomas Hellstrom 
1272fd11a3c0SSinclair Yeh 
1273*bf833fd3SThomas Hellstrom /**
1274*bf833fd3SThomas Hellstrom  * vmw_resource_swap_notify - swapout notify callback.
1275*bf833fd3SThomas Hellstrom  *
1276*bf833fd3SThomas Hellstrom  * @bo: The buffer object to be swapped out.
1277*bf833fd3SThomas Hellstrom  */
1278*bf833fd3SThomas Hellstrom void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1279*bf833fd3SThomas Hellstrom {
1280*bf833fd3SThomas Hellstrom 	if (bo->destroy != vmw_dmabuf_bo_free &&
1281*bf833fd3SThomas Hellstrom 	    bo->destroy != vmw_user_dmabuf_destroy)
1282*bf833fd3SThomas Hellstrom 		return;
1283*bf833fd3SThomas Hellstrom 
1284*bf833fd3SThomas Hellstrom 	/* Kill any cached kernel maps before swapout */
1285*bf833fd3SThomas Hellstrom 	vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
1286*bf833fd3SThomas Hellstrom }
1287*bf833fd3SThomas Hellstrom 
1288fd11a3c0SSinclair Yeh 
1289fd11a3c0SSinclair Yeh /**
1290fd11a3c0SSinclair Yeh  * vmw_query_readback_all - Read back cached query states
1291fd11a3c0SSinclair Yeh  *
1292fd11a3c0SSinclair Yeh  * @dx_query_mob: Buffer containing the DX query MOB
1293fd11a3c0SSinclair Yeh  *
1294fd11a3c0SSinclair Yeh  * Read back cached states from the device if they exist.  This function
1295fd11a3c0SSinclair Yeh  * assumings binding_mutex is held.
1296fd11a3c0SSinclair Yeh  */
1297fd11a3c0SSinclair Yeh int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1298fd11a3c0SSinclair Yeh {
1299fd11a3c0SSinclair Yeh 	struct vmw_resource *dx_query_ctx;
1300fd11a3c0SSinclair Yeh 	struct vmw_private *dev_priv;
1301fd11a3c0SSinclair Yeh 	struct {
1302fd11a3c0SSinclair Yeh 		SVGA3dCmdHeader header;
1303fd11a3c0SSinclair Yeh 		SVGA3dCmdDXReadbackAllQuery body;
1304fd11a3c0SSinclair Yeh 	} *cmd;
1305fd11a3c0SSinclair Yeh 
1306fd11a3c0SSinclair Yeh 
1307fd11a3c0SSinclair Yeh 	/* No query bound, so do nothing */
1308fd11a3c0SSinclair Yeh 	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1309fd11a3c0SSinclair Yeh 		return 0;
1310fd11a3c0SSinclair Yeh 
1311fd11a3c0SSinclair Yeh 	dx_query_ctx = dx_query_mob->dx_query_ctx;
1312fd11a3c0SSinclair Yeh 	dev_priv     = dx_query_ctx->dev_priv;
1313fd11a3c0SSinclair Yeh 
1314fd11a3c0SSinclair Yeh 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1315fd11a3c0SSinclair Yeh 	if (unlikely(cmd == NULL)) {
1316fd11a3c0SSinclair Yeh 		DRM_ERROR("Failed reserving FIFO space for "
1317fd11a3c0SSinclair Yeh 			  "query MOB read back.\n");
1318fd11a3c0SSinclair Yeh 		return -ENOMEM;
1319fd11a3c0SSinclair Yeh 	}
1320fd11a3c0SSinclair Yeh 
1321fd11a3c0SSinclair Yeh 	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1322fd11a3c0SSinclair Yeh 	cmd->header.size = sizeof(cmd->body);
1323fd11a3c0SSinclair Yeh 	cmd->body.cid    = dx_query_ctx->id;
1324fd11a3c0SSinclair Yeh 
1325fd11a3c0SSinclair Yeh 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1326fd11a3c0SSinclair Yeh 
1327fd11a3c0SSinclair Yeh 	/* Triggers a rebind the next time affected context is bound */
1328fd11a3c0SSinclair Yeh 	dx_query_mob->dx_query_ctx = NULL;
1329fd11a3c0SSinclair Yeh 
1330fd11a3c0SSinclair Yeh 	return 0;
1331fd11a3c0SSinclair Yeh }
1332fd11a3c0SSinclair Yeh 
1333fd11a3c0SSinclair Yeh 
1334fd11a3c0SSinclair Yeh 
1335fd11a3c0SSinclair Yeh /**
1336fd11a3c0SSinclair Yeh  * vmw_query_move_notify - Read back cached query states
1337fd11a3c0SSinclair Yeh  *
1338fd11a3c0SSinclair Yeh  * @bo: The TTM buffer object about to move.
1339fd11a3c0SSinclair Yeh  * @mem: The memory region @bo is moving to.
1340fd11a3c0SSinclair Yeh  *
1341fd11a3c0SSinclair Yeh  * Called before the query MOB is swapped out to read back cached query
1342fd11a3c0SSinclair Yeh  * states from the device.
1343fd11a3c0SSinclair Yeh  */
1344fd11a3c0SSinclair Yeh void vmw_query_move_notify(struct ttm_buffer_object *bo,
1345fd11a3c0SSinclair Yeh 			   struct ttm_mem_reg *mem)
1346fd11a3c0SSinclair Yeh {
1347fd11a3c0SSinclair Yeh 	struct vmw_dma_buffer *dx_query_mob;
1348fd11a3c0SSinclair Yeh 	struct ttm_bo_device *bdev = bo->bdev;
1349fd11a3c0SSinclair Yeh 	struct vmw_private *dev_priv;
1350fd11a3c0SSinclair Yeh 
1351fd11a3c0SSinclair Yeh 
1352fd11a3c0SSinclair Yeh 	dev_priv = container_of(bdev, struct vmw_private, bdev);
1353fd11a3c0SSinclair Yeh 
1354fd11a3c0SSinclair Yeh 	mutex_lock(&dev_priv->binding_mutex);
1355fd11a3c0SSinclair Yeh 
1356fd11a3c0SSinclair Yeh 	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1357fd11a3c0SSinclair Yeh 	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1358fd11a3c0SSinclair Yeh 		mutex_unlock(&dev_priv->binding_mutex);
1359fd11a3c0SSinclair Yeh 		return;
1360fd11a3c0SSinclair Yeh 	}
1361fd11a3c0SSinclair Yeh 
1362fd11a3c0SSinclair Yeh 	/* If BO is being moved from MOB to system memory */
1363fd11a3c0SSinclair Yeh 	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1364fd11a3c0SSinclair Yeh 		struct vmw_fence_obj *fence;
1365fd11a3c0SSinclair Yeh 
1366fd11a3c0SSinclair Yeh 		(void) vmw_query_readback_all(dx_query_mob);
1367fd11a3c0SSinclair Yeh 		mutex_unlock(&dev_priv->binding_mutex);
1368fd11a3c0SSinclair Yeh 
1369fd11a3c0SSinclair Yeh 		/* Create a fence and attach the BO to it */
1370fd11a3c0SSinclair Yeh 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1371fd11a3c0SSinclair Yeh 		vmw_fence_single_bo(bo, fence);
1372fd11a3c0SSinclair Yeh 
1373fd11a3c0SSinclair Yeh 		if (fence != NULL)
1374fd11a3c0SSinclair Yeh 			vmw_fence_obj_unreference(&fence);
1375fd11a3c0SSinclair Yeh 
13768aa6d4fcSChristian König 		(void) ttm_bo_wait(bo, false, false);
1377fd11a3c0SSinclair Yeh 	} else
1378fd11a3c0SSinclair Yeh 		mutex_unlock(&dev_priv->binding_mutex);
1379fd11a3c0SSinclair Yeh 
1380fd11a3c0SSinclair Yeh }
1381fd11a3c0SSinclair Yeh 
1382c0951b79SThomas Hellstrom /**
1383c0951b79SThomas Hellstrom  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1384c0951b79SThomas Hellstrom  *
1385c0951b79SThomas Hellstrom  * @res:            The resource being queried.
1386c0951b79SThomas Hellstrom  */
1387c0951b79SThomas Hellstrom bool vmw_resource_needs_backup(const struct vmw_resource *res)
1388c0951b79SThomas Hellstrom {
1389c0951b79SThomas Hellstrom 	return res->func->needs_backup;
1390c0951b79SThomas Hellstrom }
1391c0951b79SThomas Hellstrom 
1392c0951b79SThomas Hellstrom /**
1393c0951b79SThomas Hellstrom  * vmw_resource_evict_type - Evict all resources of a specific type
1394c0951b79SThomas Hellstrom  *
1395c0951b79SThomas Hellstrom  * @dev_priv:       Pointer to a device private struct
1396c0951b79SThomas Hellstrom  * @type:           The resource type to evict
1397c0951b79SThomas Hellstrom  *
1398c0951b79SThomas Hellstrom  * To avoid thrashing starvation or as part of the hibernation sequence,
1399ea029c28SThomas Hellstrom  * try to evict all evictable resources of a specific type.
1400c0951b79SThomas Hellstrom  */
1401c0951b79SThomas Hellstrom static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1402c0951b79SThomas Hellstrom 				    enum vmw_res_type type)
1403c0951b79SThomas Hellstrom {
1404c0951b79SThomas Hellstrom 	struct list_head *lru_list = &dev_priv->res_lru[type];
1405c0951b79SThomas Hellstrom 	struct vmw_resource *evict_res;
1406ea029c28SThomas Hellstrom 	unsigned err_count = 0;
1407ea029c28SThomas Hellstrom 	int ret;
1408c0951b79SThomas Hellstrom 
1409c0951b79SThomas Hellstrom 	do {
1410c0951b79SThomas Hellstrom 		write_lock(&dev_priv->resource_lock);
1411c0951b79SThomas Hellstrom 
1412c0951b79SThomas Hellstrom 		if (list_empty(lru_list))
1413c0951b79SThomas Hellstrom 			goto out_unlock;
1414c0951b79SThomas Hellstrom 
1415c0951b79SThomas Hellstrom 		evict_res = vmw_resource_reference(
1416c0951b79SThomas Hellstrom 			list_first_entry(lru_list, struct vmw_resource,
1417c0951b79SThomas Hellstrom 					 lru_head));
1418c0951b79SThomas Hellstrom 		list_del_init(&evict_res->lru_head);
1419c0951b79SThomas Hellstrom 		write_unlock(&dev_priv->resource_lock);
1420ea029c28SThomas Hellstrom 
1421ea029c28SThomas Hellstrom 		ret = vmw_resource_do_evict(evict_res, false);
1422ea029c28SThomas Hellstrom 		if (unlikely(ret != 0)) {
1423ea029c28SThomas Hellstrom 			write_lock(&dev_priv->resource_lock);
1424ea029c28SThomas Hellstrom 			list_add_tail(&evict_res->lru_head, lru_list);
1425ea029c28SThomas Hellstrom 			write_unlock(&dev_priv->resource_lock);
1426ea029c28SThomas Hellstrom 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1427ea029c28SThomas Hellstrom 				vmw_resource_unreference(&evict_res);
1428ea029c28SThomas Hellstrom 				return;
1429ea029c28SThomas Hellstrom 			}
1430ea029c28SThomas Hellstrom 		}
1431ea029c28SThomas Hellstrom 
1432c0951b79SThomas Hellstrom 		vmw_resource_unreference(&evict_res);
1433c0951b79SThomas Hellstrom 	} while (1);
1434c0951b79SThomas Hellstrom 
1435c0951b79SThomas Hellstrom out_unlock:
1436c0951b79SThomas Hellstrom 	write_unlock(&dev_priv->resource_lock);
1437c0951b79SThomas Hellstrom }
1438c0951b79SThomas Hellstrom 
1439c0951b79SThomas Hellstrom /**
1440c0951b79SThomas Hellstrom  * vmw_resource_evict_all - Evict all evictable resources
1441c0951b79SThomas Hellstrom  *
1442c0951b79SThomas Hellstrom  * @dev_priv:       Pointer to a device private struct
1443c0951b79SThomas Hellstrom  *
1444c0951b79SThomas Hellstrom  * To avoid thrashing starvation or as part of the hibernation sequence,
1445c0951b79SThomas Hellstrom  * evict all evictable resources. In particular this means that all
1446c0951b79SThomas Hellstrom  * guest-backed resources that are registered with the device are
1447c0951b79SThomas Hellstrom  * evicted and the OTable becomes clean.
1448c0951b79SThomas Hellstrom  */
1449c0951b79SThomas Hellstrom void vmw_resource_evict_all(struct vmw_private *dev_priv)
1450c0951b79SThomas Hellstrom {
1451c0951b79SThomas Hellstrom 	enum vmw_res_type type;
1452c0951b79SThomas Hellstrom 
1453c0951b79SThomas Hellstrom 	mutex_lock(&dev_priv->cmdbuf_mutex);
1454c0951b79SThomas Hellstrom 
1455c0951b79SThomas Hellstrom 	for (type = 0; type < vmw_res_max; ++type)
1456c0951b79SThomas Hellstrom 		vmw_resource_evict_type(dev_priv, type);
1457c0951b79SThomas Hellstrom 
1458c0951b79SThomas Hellstrom 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1459c0951b79SThomas Hellstrom }
1460ed93394cSThomas Hellstrom 
1461ed93394cSThomas Hellstrom /**
1462ed93394cSThomas Hellstrom  * vmw_resource_pin - Add a pin reference on a resource
1463ed93394cSThomas Hellstrom  *
1464ed93394cSThomas Hellstrom  * @res: The resource to add a pin reference on
1465ed93394cSThomas Hellstrom  *
1466ed93394cSThomas Hellstrom  * This function adds a pin reference, and if needed validates the resource.
1467ed93394cSThomas Hellstrom  * Having a pin reference means that the resource can never be evicted, and
1468ed93394cSThomas Hellstrom  * its id will never change as long as there is a pin reference.
1469ed93394cSThomas Hellstrom  * This function returns 0 on success and a negative error code on failure.
1470ed93394cSThomas Hellstrom  */
14711a4b172aSThomas Hellstrom int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1472ed93394cSThomas Hellstrom {
147319be5570SChristian König 	struct ttm_operation_ctx ctx = { interruptible, false };
1474ed93394cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
1475ed93394cSThomas Hellstrom 	int ret;
1476ed93394cSThomas Hellstrom 
14771a4b172aSThomas Hellstrom 	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1478ed93394cSThomas Hellstrom 	mutex_lock(&dev_priv->cmdbuf_mutex);
14791a4b172aSThomas Hellstrom 	ret = vmw_resource_reserve(res, interruptible, false);
1480ed93394cSThomas Hellstrom 	if (ret)
1481ed93394cSThomas Hellstrom 		goto out_no_reserve;
1482ed93394cSThomas Hellstrom 
1483ed93394cSThomas Hellstrom 	if (res->pin_count == 0) {
1484459d0fa7SThomas Hellstrom 		struct vmw_dma_buffer *vbo = NULL;
1485ed93394cSThomas Hellstrom 
1486ed93394cSThomas Hellstrom 		if (res->backup) {
1487459d0fa7SThomas Hellstrom 			vbo = res->backup;
1488ed93394cSThomas Hellstrom 
1489dfd5e50eSChristian König 			ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1490459d0fa7SThomas Hellstrom 			if (!vbo->pin_count) {
1491459d0fa7SThomas Hellstrom 				ret = ttm_bo_validate
1492459d0fa7SThomas Hellstrom 					(&vbo->base,
1493459d0fa7SThomas Hellstrom 					 res->func->backup_placement,
149419be5570SChristian König 					 &ctx);
1495ed93394cSThomas Hellstrom 				if (ret) {
1496459d0fa7SThomas Hellstrom 					ttm_bo_unreserve(&vbo->base);
1497ed93394cSThomas Hellstrom 					goto out_no_validate;
1498ed93394cSThomas Hellstrom 				}
1499459d0fa7SThomas Hellstrom 			}
1500ed93394cSThomas Hellstrom 
1501ed93394cSThomas Hellstrom 			/* Do we really need to pin the MOB as well? */
1502459d0fa7SThomas Hellstrom 			vmw_bo_pin_reserved(vbo, true);
1503ed93394cSThomas Hellstrom 		}
1504ed93394cSThomas Hellstrom 		ret = vmw_resource_validate(res);
1505459d0fa7SThomas Hellstrom 		if (vbo)
1506459d0fa7SThomas Hellstrom 			ttm_bo_unreserve(&vbo->base);
1507ed93394cSThomas Hellstrom 		if (ret)
1508ed93394cSThomas Hellstrom 			goto out_no_validate;
1509ed93394cSThomas Hellstrom 	}
1510ed93394cSThomas Hellstrom 	res->pin_count++;
1511ed93394cSThomas Hellstrom 
1512ed93394cSThomas Hellstrom out_no_validate:
1513d80efd5cSThomas Hellstrom 	vmw_resource_unreserve(res, false, NULL, 0UL);
1514ed93394cSThomas Hellstrom out_no_reserve:
1515ed93394cSThomas Hellstrom 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1516ed93394cSThomas Hellstrom 	ttm_write_unlock(&dev_priv->reservation_sem);
1517ed93394cSThomas Hellstrom 
1518ed93394cSThomas Hellstrom 	return ret;
1519ed93394cSThomas Hellstrom }
1520ed93394cSThomas Hellstrom 
1521ed93394cSThomas Hellstrom /**
1522ed93394cSThomas Hellstrom  * vmw_resource_unpin - Remove a pin reference from a resource
1523ed93394cSThomas Hellstrom  *
1524ed93394cSThomas Hellstrom  * @res: The resource to remove a pin reference from
1525ed93394cSThomas Hellstrom  *
1526ed93394cSThomas Hellstrom  * Having a pin reference means that the resource can never be evicted, and
1527ed93394cSThomas Hellstrom  * its id will never change as long as there is a pin reference.
1528ed93394cSThomas Hellstrom  */
1529ed93394cSThomas Hellstrom void vmw_resource_unpin(struct vmw_resource *res)
1530ed93394cSThomas Hellstrom {
1531ed93394cSThomas Hellstrom 	struct vmw_private *dev_priv = res->dev_priv;
1532ed93394cSThomas Hellstrom 	int ret;
1533ed93394cSThomas Hellstrom 
1534f08c86c3SThomas Hellstrom 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1535ed93394cSThomas Hellstrom 	mutex_lock(&dev_priv->cmdbuf_mutex);
1536ed93394cSThomas Hellstrom 
15371a4b172aSThomas Hellstrom 	ret = vmw_resource_reserve(res, false, true);
1538ed93394cSThomas Hellstrom 	WARN_ON(ret);
1539ed93394cSThomas Hellstrom 
1540ed93394cSThomas Hellstrom 	WARN_ON(res->pin_count == 0);
1541ed93394cSThomas Hellstrom 	if (--res->pin_count == 0 && res->backup) {
1542459d0fa7SThomas Hellstrom 		struct vmw_dma_buffer *vbo = res->backup;
1543ed93394cSThomas Hellstrom 
1544f08c86c3SThomas Hellstrom 		(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1545459d0fa7SThomas Hellstrom 		vmw_bo_pin_reserved(vbo, false);
1546459d0fa7SThomas Hellstrom 		ttm_bo_unreserve(&vbo->base);
1547ed93394cSThomas Hellstrom 	}
1548ed93394cSThomas Hellstrom 
1549d80efd5cSThomas Hellstrom 	vmw_resource_unreserve(res, false, NULL, 0UL);
1550ed93394cSThomas Hellstrom 
1551ed93394cSThomas Hellstrom 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1552ed93394cSThomas Hellstrom 	ttm_read_unlock(&dev_priv->reservation_sem);
1553ed93394cSThomas Hellstrom }
1554d80efd5cSThomas Hellstrom 
1555d80efd5cSThomas Hellstrom /**
1556d80efd5cSThomas Hellstrom  * vmw_res_type - Return the resource type
1557d80efd5cSThomas Hellstrom  *
1558d80efd5cSThomas Hellstrom  * @res: Pointer to the resource
1559d80efd5cSThomas Hellstrom  */
1560d80efd5cSThomas Hellstrom enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1561d80efd5cSThomas Hellstrom {
1562d80efd5cSThomas Hellstrom 	return res->func->res_type;
1563d80efd5cSThomas Hellstrom }
1564