1 /*	$NetBSD: vmwgfx_execbuf.c,v 1.5 2022/10/25 23:35:57 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5  *
6  * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_execbuf.c,v 1.5 2022/10/25 23:35:57 riastradh Exp $");
31 
32 #include <linux/sync_file.h>
33 
34 #ifdef __NetBSD__
35 #include <sys/filedesc.h>
36 #endif
37 
38 #include "vmwgfx_drv.h"
39 #include "vmwgfx_reg.h"
40 #include <drm/ttm/ttm_bo_api.h>
41 #include <drm/ttm/ttm_placement.h>
42 #include "vmwgfx_so.h"
43 #include "vmwgfx_binding.h"
44 
45 #define VMW_RES_HT_ORDER 12
46 
47 /*
48  * Helper macro to get dx_ctx_node if available otherwise print an error
49  * message. This is for use in command verifier function where if dx_ctx_node
50  * is not set then command is invalid.
51  */
52 #define VMW_GET_CTX_NODE(__sw_context)                                        \
53 ({                                                                            \
54 	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
55 		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
56 		__sw_context->dx_ctx_node;                                    \
57 	});                                                                   \
58 })
59 
60 #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
61 	struct {                                                              \
62 		SVGA3dCmdHeader header;                                       \
63 		__type body;                                                  \
64 	} __var
65 
66 /**
67  * struct vmw_relocation - Buffer object relocation
68  *
69  * @head: List head for the command submission context's relocation list
70  * @vbo: Non ref-counted pointer to buffer object
71  * @mob_loc: Pointer to location for mob id to be modified
72  * @location: Pointer to location for guest pointer to be modified
73  */
74 struct vmw_relocation {
75 	struct list_head head;
76 	struct vmw_buffer_object *vbo;
77 	union {
78 		SVGAMobId *mob_loc;
79 		SVGAGuestPtr *location;
80 	};
81 };
82 
83 /**
84  * enum vmw_resource_relocation_type - Relocation type for resources
85  *
86  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
87  * command stream is replaced with the actual id after validation.
88  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
89  * with a NOP.
90  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
91  * validation is -1, the command is replaced with a NOP. Otherwise no action.
92  */
93 enum vmw_resource_relocation_type {
94 	vmw_res_rel_normal,
95 	vmw_res_rel_nop,
96 	vmw_res_rel_cond_nop,
97 	vmw_res_rel_max
98 };
99 
100 /**
101  * struct vmw_resource_relocation - Relocation info for resources
102  *
103  * @head: List head for the software context's relocation list.
104  * @res: Non-ref-counted pointer to the resource.
105  * @offset: Offset of single byte entries into the command buffer where the id
106  * that needs fixup is located.
107  * @rel_type: Type of relocation.
108  */
109 struct vmw_resource_relocation {
110 	struct list_head head;
111 	const struct vmw_resource *res;
112 	u32 offset:29;
113 	enum vmw_resource_relocation_type rel_type:3;
114 };
115 
116 /**
117  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
118  *
119  * @head: List head of context list
120  * @ctx: The context resource
121  * @cur: The context's persistent binding state
122  * @staged: The binding state changes of this command buffer
123  */
124 struct vmw_ctx_validation_info {
125 	struct list_head head;
126 	struct vmw_resource *ctx;
127 	struct vmw_ctx_binding_state *cur;
128 	struct vmw_ctx_binding_state *staged;
129 };
130 
131 /**
132  * struct vmw_cmd_entry - Describe a command for the verifier
133  *
134  * @user_allow: Whether allowed from the execbuf ioctl.
135  * @gb_disable: Whether disabled if guest-backed objects are available.
136  * @gb_enable: Whether enabled iff guest-backed objects are available.
137  */
138 struct vmw_cmd_entry {
139 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
140 		     SVGA3dCmdHeader *);
141 	bool user_allow;
142 	bool gb_disable;
143 	bool gb_enable;
144 	const char *cmd_name;
145 };
146 
147 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
148 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
149 				       (_gb_disable), (_gb_enable), #_cmd}
150 
151 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
152 					struct vmw_sw_context *sw_context,
153 					struct vmw_resource *ctx);
154 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
155 				 struct vmw_sw_context *sw_context,
156 				 SVGAMobId *id,
157 				 struct vmw_buffer_object **vmw_bo_p);
158 /**
159  * vmw_ptr_diff - Compute the offset from a to b in bytes
160  *
161  * @a: A starting pointer.
162  * @b: A pointer offset in the same address space.
163  *
164  * Returns: The offset in bytes between the two pointers.
165  */
vmw_ptr_diff(void * a,void * b)166 static size_t vmw_ptr_diff(void *a, void *b)
167 {
168 	return (unsigned long) b - (unsigned long) a;
169 }
170 
171 /**
172  * vmw_execbuf_bindings_commit - Commit modified binding state
173  *
174  * @sw_context: The command submission context
175  * @backoff: Whether this is part of the error path and binding state changes
176  * should be ignored
177  */
vmw_execbuf_bindings_commit(struct vmw_sw_context * sw_context,bool backoff)178 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
179 					bool backoff)
180 {
181 	struct vmw_ctx_validation_info *entry;
182 
183 	list_for_each_entry(entry, &sw_context->ctx_list, head) {
184 		if (!backoff)
185 			vmw_binding_state_commit(entry->cur, entry->staged);
186 
187 		if (entry->staged != sw_context->staged_bindings)
188 			vmw_binding_state_free(entry->staged);
189 		else
190 			sw_context->staged_bindings_inuse = false;
191 	}
192 
193 	/* List entries are freed with the validation context */
194 	INIT_LIST_HEAD(&sw_context->ctx_list);
195 }
196 
197 /**
198  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
199  *
200  * @sw_context: The command submission context
201  */
vmw_bind_dx_query_mob(struct vmw_sw_context * sw_context)202 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
203 {
204 	if (sw_context->dx_query_mob)
205 		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
206 					  sw_context->dx_query_mob);
207 }
208 
209 /**
210  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
211  * the validate list.
212  *
213  * @dev_priv: Pointer to the device private:
214  * @sw_context: The command submission context
215  * @node: The validation node holding the context resource metadata
216  */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_ctx_validation_info * node)217 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
218 				   struct vmw_sw_context *sw_context,
219 				   struct vmw_resource *res,
220 				   struct vmw_ctx_validation_info *node)
221 {
222 	int ret;
223 
224 	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
225 	if (unlikely(ret != 0))
226 		goto out_err;
227 
228 	if (!sw_context->staged_bindings) {
229 		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
230 		if (IS_ERR(sw_context->staged_bindings)) {
231 			ret = PTR_ERR(sw_context->staged_bindings);
232 			sw_context->staged_bindings = NULL;
233 			goto out_err;
234 		}
235 	}
236 
237 	if (sw_context->staged_bindings_inuse) {
238 		node->staged = vmw_binding_state_alloc(dev_priv);
239 		if (IS_ERR(node->staged)) {
240 			ret = PTR_ERR(node->staged);
241 			node->staged = NULL;
242 			goto out_err;
243 		}
244 	} else {
245 		node->staged = sw_context->staged_bindings;
246 		sw_context->staged_bindings_inuse = true;
247 	}
248 
249 	node->ctx = res;
250 	node->cur = vmw_context_binding_state(res);
251 	list_add_tail(&node->head, &sw_context->ctx_list);
252 
253 	return 0;
254 
255 out_err:
256 	return ret;
257 }
258 
259 /**
260  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
261  *
262  * @dev_priv: Pointer to the device private struct.
263  * @res_type: The resource type.
264  *
265  * Guest-backed contexts and DX contexts require extra size to store execbuf
266  * private information in the validation node. Typically the binding manager
267  * associated data structures.
268  *
269  * Returns: The extra size requirement based on resource type.
270  */
vmw_execbuf_res_size(struct vmw_private * dev_priv,enum vmw_res_type res_type)271 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
272 					 enum vmw_res_type res_type)
273 {
274 	return (res_type == vmw_res_dx_context ||
275 		(res_type == vmw_res_context && dev_priv->has_mob)) ?
276 		sizeof(struct vmw_ctx_validation_info) : 0;
277 }
278 
279 /**
280  * vmw_execbuf_rcache_update - Update a resource-node cache entry
281  *
282  * @rcache: Pointer to the entry to update.
283  * @res: Pointer to the resource.
284  * @private: Pointer to the execbuf-private space in the resource validation
285  * node.
286  */
vmw_execbuf_rcache_update(struct vmw_res_cache_entry * rcache,struct vmw_resource * res,void * private)287 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
288 				      struct vmw_resource *res,
289 				      void *private)
290 {
291 	rcache->res = res;
292 	rcache->private = private;
293 	rcache->valid = 1;
294 	rcache->valid_handle = 0;
295 }
296 
297 /**
298  * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
299  * rcu-protected pointer to the validation list.
300  *
301  * @sw_context: Pointer to the software context.
302  * @res: Unreferenced rcu-protected pointer to the resource.
303  * @dirty: Whether to change dirty status.
304  *
305  * Returns: 0 on success. Negative error code on failure. Typical error codes
306  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
307  */
vmw_execbuf_res_noref_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty)308 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
309 					 struct vmw_resource *res,
310 					 u32 dirty)
311 {
312 	struct vmw_private *dev_priv = res->dev_priv;
313 	int ret;
314 	enum vmw_res_type res_type = vmw_res_type(res);
315 	struct vmw_res_cache_entry *rcache;
316 	struct vmw_ctx_validation_info *ctx_info;
317 	bool first_usage;
318 	unsigned int priv_size;
319 
320 	rcache = &sw_context->res_cache[res_type];
321 	if (likely(rcache->valid && rcache->res == res)) {
322 		if (dirty)
323 			vmw_validation_res_set_dirty(sw_context->ctx,
324 						     rcache->private, dirty);
325 		vmw_user_resource_noref_release();
326 		return 0;
327 	}
328 
329 	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
330 	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
331 					  dirty, (void **)&ctx_info,
332 					  &first_usage);
333 	vmw_user_resource_noref_release();
334 	if (ret)
335 		return ret;
336 
337 	if (priv_size && first_usage) {
338 		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
339 					      ctx_info);
340 		if (ret) {
341 			VMW_DEBUG_USER("Failed first usage context setup.\n");
342 			return ret;
343 		}
344 	}
345 
346 	vmw_execbuf_rcache_update(rcache, res, ctx_info);
347 	return 0;
348 }
349 
350 /**
351  * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
352  * validation list if it's not already on it
353  *
354  * @sw_context: Pointer to the software context.
355  * @res: Pointer to the resource.
356  * @dirty: Whether to change dirty status.
357  *
358  * Returns: Zero on success. Negative error code on failure.
359  */
vmw_execbuf_res_noctx_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty)360 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
361 					 struct vmw_resource *res,
362 					 u32 dirty)
363 {
364 	struct vmw_res_cache_entry *rcache;
365 	enum vmw_res_type res_type = vmw_res_type(res);
366 	void *ptr;
367 	int ret;
368 
369 	rcache = &sw_context->res_cache[res_type];
370 	if (likely(rcache->valid && rcache->res == res)) {
371 		if (dirty)
372 			vmw_validation_res_set_dirty(sw_context->ctx,
373 						     rcache->private, dirty);
374 		return 0;
375 	}
376 
377 	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
378 					  &ptr, NULL);
379 	if (ret)
380 		return ret;
381 
382 	vmw_execbuf_rcache_update(rcache, res, ptr);
383 
384 	return 0;
385 }
386 
387 /**
388  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
389  * validation list
390  *
391  * @sw_context: The software context holding the validation list.
392  * @view: Pointer to the view resource.
393  *
394  * Returns 0 if success, negative error code otherwise.
395  */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)396 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
397 				struct vmw_resource *view)
398 {
399 	int ret;
400 
401 	/*
402 	 * First add the resource the view is pointing to, otherwise it may be
403 	 * swapped out when the view is validated.
404 	 */
405 	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
406 					    vmw_view_dirtying(view));
407 	if (ret)
408 		return ret;
409 
410 	return vmw_execbuf_res_noctx_val_add(sw_context, view,
411 					     VMW_RES_DIRTY_NONE);
412 }
413 
414 /**
415  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
416  * to to the validation list.
417  *
418  * @sw_context: The software context holding the validation list.
419  * @view_type: The view type to look up.
420  * @id: view id of the view.
421  *
422  * The view is represented by a view id and the DX context it's created on, or
423  * scheduled for creation on. If there is no DX context set, the function will
424  * return an -EINVAL error pointer.
425  *
426  * Returns: Unreferenced pointer to the resource on success, negative error
427  * pointer on failure.
428  */
429 static struct vmw_resource *
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)430 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
431 		    enum vmw_view_type view_type, u32 id)
432 {
433 	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
434 	struct vmw_resource *view;
435 	int ret;
436 
437 	if (!ctx_node)
438 		return ERR_PTR(-EINVAL);
439 
440 	view = vmw_view_lookup(sw_context->man, view_type, id);
441 	if (IS_ERR(view))
442 		return view;
443 
444 	ret = vmw_view_res_val_add(sw_context, view);
445 	if (ret)
446 		return ERR_PTR(ret);
447 
448 	return view;
449 }
450 
451 /**
452  * vmw_resource_context_res_add - Put resources previously bound to a context on
453  * the validation list
454  *
455  * @dev_priv: Pointer to a device private structure
456  * @sw_context: Pointer to a software context used for this command submission
457  * @ctx: Pointer to the context resource
458  *
459  * This function puts all resources that were previously bound to @ctx on the
460  * resource validation list. This is part of the context state reemission
461  */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)462 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
463 					struct vmw_sw_context *sw_context,
464 					struct vmw_resource *ctx)
465 {
466 	struct list_head *binding_list;
467 	struct vmw_ctx_bindinfo *entry;
468 	int ret = 0;
469 	struct vmw_resource *res;
470 	u32 i;
471 
472 	/* Add all cotables to the validation list. */
473 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
474 		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
475 			res = vmw_context_cotable(ctx, i);
476 			if (IS_ERR(res))
477 				continue;
478 
479 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
480 							    VMW_RES_DIRTY_SET);
481 			if (unlikely(ret != 0))
482 				return ret;
483 		}
484 	}
485 
486 	/* Add all resources bound to the context to the validation list */
487 	mutex_lock(&dev_priv->binding_mutex);
488 	binding_list = vmw_context_binding_list(ctx);
489 
490 	list_for_each_entry(entry, binding_list, ctx_list) {
491 		if (vmw_res_type(entry->res) == vmw_res_view)
492 			ret = vmw_view_res_val_add(sw_context, entry->res);
493 		else
494 			ret = vmw_execbuf_res_noctx_val_add
495 				(sw_context, entry->res,
496 				 vmw_binding_dirtying(entry->bt));
497 		if (unlikely(ret != 0))
498 			break;
499 	}
500 
501 	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
502 		struct vmw_buffer_object *dx_query_mob;
503 
504 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
505 		if (dx_query_mob)
506 			ret = vmw_validation_add_bo(sw_context->ctx,
507 						    dx_query_mob, true, false);
508 	}
509 
510 	mutex_unlock(&dev_priv->binding_mutex);
511 	return ret;
512 }
513 
514 /**
515  * vmw_resource_relocation_add - Add a relocation to the relocation list
516  *
517  * @list: Pointer to head of relocation list.
518  * @res: The resource.
519  * @offset: Offset into the command buffer currently being parsed where the id
520  * that needs fixup is located. Granularity is one byte.
521  * @rel_type: Relocation type.
522  */
vmw_resource_relocation_add(struct vmw_sw_context * sw_context,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)523 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
524 				       const struct vmw_resource *res,
525 				       unsigned long offset,
526 				       enum vmw_resource_relocation_type
527 				       rel_type)
528 {
529 	struct vmw_resource_relocation *rel;
530 
531 	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
532 	if (unlikely(!rel)) {
533 		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
534 		return -ENOMEM;
535 	}
536 
537 	rel->res = res;
538 	rel->offset = offset;
539 	rel->rel_type = rel_type;
540 	list_add_tail(&rel->head, &sw_context->res_relocations);
541 
542 	return 0;
543 }
544 
545 /**
546  * vmw_resource_relocations_free - Free all relocations on a list
547  *
548  * @list: Pointer to the head of the relocation list
549  */
vmw_resource_relocations_free(struct list_head * list)550 static void vmw_resource_relocations_free(struct list_head *list)
551 {
552 	/* Memory is validation context memory, so no need to free it */
553 	INIT_LIST_HEAD(list);
554 }
555 
556 /**
557  * vmw_resource_relocations_apply - Apply all relocations on a list
558  *
559  * @cb: Pointer to the start of the command buffer bein patch. This need not be
560  * the same buffer as the one being parsed when the relocation list was built,
561  * but the contents must be the same modulo the resource ids.
562  * @list: Pointer to the head of the relocation list.
563  */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)564 static void vmw_resource_relocations_apply(uint32_t *cb,
565 					   struct list_head *list)
566 {
567 	struct vmw_resource_relocation *rel;
568 
569 	/* Validate the struct vmw_resource_relocation member size */
570 	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
571 	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
572 
573 	list_for_each_entry(rel, list, head) {
574 		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
575 		switch (rel->rel_type) {
576 		case vmw_res_rel_normal:
577 			*addr = rel->res->id;
578 			break;
579 		case vmw_res_rel_nop:
580 			*addr = SVGA_3D_CMD_NOP;
581 			break;
582 		default:
583 			if (rel->res->id == -1)
584 				*addr = SVGA_3D_CMD_NOP;
585 			break;
586 		}
587 	}
588 }
589 
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)590 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
591 			   struct vmw_sw_context *sw_context,
592 			   SVGA3dCmdHeader *header)
593 {
594 	return -EINVAL;
595 }
596 
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)597 static int vmw_cmd_ok(struct vmw_private *dev_priv,
598 		      struct vmw_sw_context *sw_context,
599 		      SVGA3dCmdHeader *header)
600 {
601 	return 0;
602 }
603 
604 /**
605  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
606  * list.
607  *
608  * @sw_context: Pointer to the software context.
609  *
610  * Note that since vmware's command submission currently is protected by the
611  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
612  * only a single thread at once will attempt this.
613  */
vmw_resources_reserve(struct vmw_sw_context * sw_context)614 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
615 {
616 	int ret;
617 
618 	ret = vmw_validation_res_reserve(sw_context->ctx, true);
619 	if (ret)
620 		return ret;
621 
622 	if (sw_context->dx_query_mob) {
623 		struct vmw_buffer_object *expected_dx_query_mob;
624 
625 		expected_dx_query_mob =
626 			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
627 		if (expected_dx_query_mob &&
628 		    expected_dx_query_mob != sw_context->dx_query_mob) {
629 			ret = -EINVAL;
630 		}
631 	}
632 
633 	return ret;
634 }
635 
636 /**
637  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
638  * resource validate list unless it's already there.
639  *
640  * @dev_priv: Pointer to a device private structure.
641  * @sw_context: Pointer to the software context.
642  * @res_type: Resource type.
643  * @dirty: Whether to change dirty status.
644  * @converter: User-space visisble type specific information.
645  * @id_loc: Pointer to the location in the command buffer currently being parsed
646  * from where the user-space resource id handle is located.
647  * @p_val: Pointer to pointer to resource validalidation node. Populated on
648  * exit.
649  */
650 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,u32 dirty,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource ** p_res)651 vmw_cmd_res_check(struct vmw_private *dev_priv,
652 		  struct vmw_sw_context *sw_context,
653 		  enum vmw_res_type res_type,
654 		  u32 dirty,
655 		  const struct vmw_user_resource_conv *converter,
656 		  uint32_t *id_loc,
657 		  struct vmw_resource **p_res)
658 {
659 	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
660 	struct vmw_resource *res;
661 	int ret;
662 
663 	if (p_res)
664 		*p_res = NULL;
665 
666 	if (*id_loc == SVGA3D_INVALID_ID) {
667 		if (res_type == vmw_res_context) {
668 			VMW_DEBUG_USER("Illegal context invalid id.\n");
669 			return -EINVAL;
670 		}
671 		return 0;
672 	}
673 
674 	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
675 		res = rcache->res;
676 		if (dirty)
677 			vmw_validation_res_set_dirty(sw_context->ctx,
678 						     rcache->private, dirty);
679 	} else {
680 		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
681 
682 		ret = vmw_validation_preload_res(sw_context->ctx, size);
683 		if (ret)
684 			return ret;
685 
686 		res = vmw_user_resource_noref_lookup_handle
687 			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
688 		if (IS_ERR(res)) {
689 			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
690 				       (unsigned int) *id_loc);
691 			return PTR_ERR(res);
692 		}
693 
694 		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
695 		if (unlikely(ret != 0))
696 			return ret;
697 
698 		if (rcache->valid && rcache->res == res) {
699 			rcache->valid_handle = true;
700 			rcache->handle = *id_loc;
701 		}
702 	}
703 
704 	ret = vmw_resource_relocation_add(sw_context, res,
705 					  vmw_ptr_diff(sw_context->buf_start,
706 						       id_loc),
707 					  vmw_res_rel_normal);
708 	if (p_res)
709 		*p_res = res;
710 
711 	return 0;
712 }
713 
714 /**
715  * vmw_rebind_dx_query - Rebind DX query associated with the context
716  *
717  * @ctx_res: context the query belongs to
718  *
719  * This function assumes binding_mutex is held.
720  */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)721 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
722 {
723 	struct vmw_private *dev_priv = ctx_res->dev_priv;
724 	struct vmw_buffer_object *dx_query_mob;
725 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
726 
727 	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
728 
729 	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
730 		return 0;
731 
732 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
733 	if (cmd == NULL)
734 		return -ENOMEM;
735 
736 	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
737 	cmd->header.size = sizeof(cmd->body);
738 	cmd->body.cid = ctx_res->id;
739 	cmd->body.mobid = dx_query_mob->base.mem.start;
740 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
741 
742 	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
743 
744 	return 0;
745 }
746 
747 /**
748  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
749  * contexts.
750  *
751  * @sw_context: Pointer to the software context.
752  *
753  * Rebind context binding points that have been scrubbed because of eviction.
754  */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)755 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
756 {
757 	struct vmw_ctx_validation_info *val;
758 	int ret;
759 
760 	list_for_each_entry(val, &sw_context->ctx_list, head) {
761 		ret = vmw_binding_rebind_all(val->cur);
762 		if (unlikely(ret != 0)) {
763 			if (ret != -ERESTARTSYS)
764 				VMW_DEBUG_USER("Failed to rebind context.\n");
765 			return ret;
766 		}
767 
768 		ret = vmw_rebind_all_dx_query(val->ctx);
769 		if (ret != 0) {
770 			VMW_DEBUG_USER("Failed to rebind queries.\n");
771 			return ret;
772 		}
773 	}
774 
775 	return 0;
776 }
777 
778 /**
779  * vmw_view_bindings_add - Add an array of view bindings to a context binding
780  * state tracker.
781  *
782  * @sw_context: The execbuf state used for this command.
783  * @view_type: View type for the bindings.
784  * @binding_type: Binding type for the bindings.
785  * @shader_slot: The shader slot to user for the bindings.
786  * @view_ids: Array of view ids to be bound.
787  * @num_views: Number of view ids in @view_ids.
788  * @first_slot: The binding slot to be used for the first view id in @view_ids.
789  */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)790 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
791 				 enum vmw_view_type view_type,
792 				 enum vmw_ctx_binding_type binding_type,
793 				 uint32 shader_slot,
794 				 uint32 view_ids[], u32 num_views,
795 				 u32 first_slot)
796 {
797 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
798 	u32 i;
799 
800 	if (!ctx_node)
801 		return -EINVAL;
802 
803 	for (i = 0; i < num_views; ++i) {
804 		struct vmw_ctx_bindinfo_view binding;
805 		struct vmw_resource *view = NULL;
806 
807 		if (view_ids[i] != SVGA3D_INVALID_ID) {
808 			view = vmw_view_id_val_add(sw_context, view_type,
809 						   view_ids[i]);
810 			if (IS_ERR(view)) {
811 				VMW_DEBUG_USER("View not found.\n");
812 				return PTR_ERR(view);
813 			}
814 		}
815 		binding.bi.ctx = ctx_node->ctx;
816 		binding.bi.res = view;
817 		binding.bi.bt = binding_type;
818 		binding.shader_slot = shader_slot;
819 		binding.slot = first_slot + i;
820 		vmw_binding_add(ctx_node->staged, &binding.bi,
821 				shader_slot, binding.slot);
822 	}
823 
824 	return 0;
825 }
826 
827 /**
828  * vmw_cmd_cid_check - Check a command header for valid context information.
829  *
830  * @dev_priv: Pointer to a device private structure.
831  * @sw_context: Pointer to the software context.
832  * @header: A command header with an embedded user-space context handle.
833  *
834  * Convenience function: Call vmw_cmd_res_check with the user-space context
835  * handle embedded in @header.
836  */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)837 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
838 			     struct vmw_sw_context *sw_context,
839 			     SVGA3dCmdHeader *header)
840 {
841 	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
842 		container_of(header, typeof(*cmd), header);
843 
844 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
845 				 VMW_RES_DIRTY_SET, user_context_converter,
846 				 &cmd->body, NULL);
847 }
848 
849 /**
850  * vmw_execbuf_info_from_res - Get the private validation metadata for a
851  * recently validated resource
852  *
853  * @sw_context: Pointer to the command submission context
854  * @res: The resource
855  *
856  * The resource pointed to by @res needs to be present in the command submission
857  * context's resource cache and hence the last resource of that type to be
858  * processed by the validation code.
859  *
860  * Return: a pointer to the private metadata of the resource, or NULL if it
861  * wasn't found
862  */
863 static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context * sw_context,struct vmw_resource * res)864 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
865 			  struct vmw_resource *res)
866 {
867 	struct vmw_res_cache_entry *rcache =
868 		&sw_context->res_cache[vmw_res_type(res)];
869 
870 	if (rcache->valid && rcache->res == res)
871 		return rcache->private;
872 
873 	WARN_ON_ONCE(true);
874 	return NULL;
875 }
876 
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)877 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
878 					   struct vmw_sw_context *sw_context,
879 					   SVGA3dCmdHeader *header)
880 {
881 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
882 	struct vmw_resource *ctx;
883 	struct vmw_resource *res;
884 	int ret;
885 
886 	cmd = container_of(header, typeof(*cmd), header);
887 
888 	if (cmd->body.type >= SVGA3D_RT_MAX) {
889 		VMW_DEBUG_USER("Illegal render target type %u.\n",
890 			       (unsigned int) cmd->body.type);
891 		return -EINVAL;
892 	}
893 
894 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
895 				VMW_RES_DIRTY_SET, user_context_converter,
896 				&cmd->body.cid, &ctx);
897 	if (unlikely(ret != 0))
898 		return ret;
899 
900 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
901 				VMW_RES_DIRTY_SET, user_surface_converter,
902 				&cmd->body.target.sid, &res);
903 	if (unlikely(ret))
904 		return ret;
905 
906 	if (dev_priv->has_mob) {
907 		struct vmw_ctx_bindinfo_view binding;
908 		struct vmw_ctx_validation_info *node;
909 
910 		node = vmw_execbuf_info_from_res(sw_context, ctx);
911 		if (!node)
912 			return -EINVAL;
913 
914 		binding.bi.ctx = ctx;
915 		binding.bi.res = res;
916 		binding.bi.bt = vmw_ctx_binding_rt;
917 		binding.slot = cmd->body.type;
918 		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
919 	}
920 
921 	return 0;
922 }
923 
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)924 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
925 				      struct vmw_sw_context *sw_context,
926 				      SVGA3dCmdHeader *header)
927 {
928 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
929 	int ret;
930 
931 	cmd = container_of(header, typeof(*cmd), header);
932 
933 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
934 				VMW_RES_DIRTY_NONE, user_surface_converter,
935 				&cmd->body.src.sid, NULL);
936 	if (ret)
937 		return ret;
938 
939 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
940 				 VMW_RES_DIRTY_SET, user_surface_converter,
941 				 &cmd->body.dest.sid, NULL);
942 }
943 
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)944 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
945 				     struct vmw_sw_context *sw_context,
946 				     SVGA3dCmdHeader *header)
947 {
948 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
949 	int ret;
950 
951 	cmd = container_of(header, typeof(*cmd), header);
952 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
953 				VMW_RES_DIRTY_NONE, user_surface_converter,
954 				&cmd->body.src, NULL);
955 	if (ret != 0)
956 		return ret;
957 
958 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
959 				 VMW_RES_DIRTY_SET, user_surface_converter,
960 				 &cmd->body.dest, NULL);
961 }
962 
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)963 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
964 				   struct vmw_sw_context *sw_context,
965 				   SVGA3dCmdHeader *header)
966 {
967 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
968 	int ret;
969 
970 	cmd = container_of(header, typeof(*cmd), header);
971 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
972 				VMW_RES_DIRTY_NONE, user_surface_converter,
973 				&cmd->body.srcSid, NULL);
974 	if (ret != 0)
975 		return ret;
976 
977 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
978 				 VMW_RES_DIRTY_SET, user_surface_converter,
979 				 &cmd->body.dstSid, NULL);
980 }
981 
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)982 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
983 				     struct vmw_sw_context *sw_context,
984 				     SVGA3dCmdHeader *header)
985 {
986 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
987 	int ret;
988 
989 	cmd = container_of(header, typeof(*cmd), header);
990 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
991 				VMW_RES_DIRTY_NONE, user_surface_converter,
992 				&cmd->body.src.sid, NULL);
993 	if (unlikely(ret != 0))
994 		return ret;
995 
996 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
997 				 VMW_RES_DIRTY_SET, user_surface_converter,
998 				 &cmd->body.dest.sid, NULL);
999 }
1000 
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1001 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1002 					 struct vmw_sw_context *sw_context,
1003 					 SVGA3dCmdHeader *header)
1004 {
1005 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1006 		container_of(header, typeof(*cmd), header);
1007 
1008 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1009 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1010 				 &cmd->body.srcImage.sid, NULL);
1011 }
1012 
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1013 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1014 				 struct vmw_sw_context *sw_context,
1015 				 SVGA3dCmdHeader *header)
1016 {
1017 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1018 		container_of(header, typeof(*cmd), header);
1019 
1020 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1021 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1022 				 &cmd->body.sid, NULL);
1023 }
1024 
1025 /**
1026  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1027  *
1028  * @dev_priv: The device private structure.
1029  * @new_query_bo: The new buffer holding query results.
1030  * @sw_context: The software context used for this command submission.
1031  *
1032  * This function checks whether @new_query_bo is suitable for holding query
1033  * results, and if another buffer currently is pinned for query results. If so,
1034  * the function prepares the state of @sw_context for switching pinned buffers
1035  * after successful submission of the current command batch.
1036  */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_buffer_object * new_query_bo,struct vmw_sw_context * sw_context)1037 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1038 				       struct vmw_buffer_object *new_query_bo,
1039 				       struct vmw_sw_context *sw_context)
1040 {
1041 	struct vmw_res_cache_entry *ctx_entry =
1042 		&sw_context->res_cache[vmw_res_context];
1043 	int ret;
1044 
1045 	BUG_ON(!ctx_entry->valid);
1046 	sw_context->last_query_ctx = ctx_entry->res;
1047 
1048 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1049 
1050 		if (unlikely(new_query_bo->base.num_pages > 4)) {
1051 			VMW_DEBUG_USER("Query buffer too large.\n");
1052 			return -EINVAL;
1053 		}
1054 
1055 		if (unlikely(sw_context->cur_query_bo != NULL)) {
1056 			sw_context->needs_post_query_barrier = true;
1057 			ret = vmw_validation_add_bo(sw_context->ctx,
1058 						    sw_context->cur_query_bo,
1059 						    dev_priv->has_mob, false);
1060 			if (unlikely(ret != 0))
1061 				return ret;
1062 		}
1063 		sw_context->cur_query_bo = new_query_bo;
1064 
1065 		ret = vmw_validation_add_bo(sw_context->ctx,
1066 					    dev_priv->dummy_query_bo,
1067 					    dev_priv->has_mob, false);
1068 		if (unlikely(ret != 0))
1069 			return ret;
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 /**
1076  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1077  *
1078  * @dev_priv: The device private structure.
1079  * @sw_context: The software context used for this command submission batch.
1080  *
1081  * This function will check if we're switching query buffers, and will then,
1082  * issue a dummy occlusion query wait used as a query barrier. When the fence
1083  * object following that query wait has signaled, we are sure that all preceding
1084  * queries have finished, and the old query buffer can be unpinned. However,
1085  * since both the new query buffer and the old one are fenced with that fence,
1086  * we can do an asynchronus unpin now, and be sure that the old query buffer
1087  * won't be moved until the fence has signaled.
1088  *
1089  * As mentioned above, both the new - and old query buffers need to be fenced
1090  * using a sequence emitted *after* calling this function.
1091  */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1092 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1093 				     struct vmw_sw_context *sw_context)
1094 {
1095 	/*
1096 	 * The validate list should still hold references to all
1097 	 * contexts here.
1098 	 */
1099 	if (sw_context->needs_post_query_barrier) {
1100 		struct vmw_res_cache_entry *ctx_entry =
1101 			&sw_context->res_cache[vmw_res_context];
1102 		struct vmw_resource *ctx;
1103 		int ret;
1104 
1105 		BUG_ON(!ctx_entry->valid);
1106 		ctx = ctx_entry->res;
1107 
1108 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1109 
1110 		if (unlikely(ret != 0))
1111 			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1112 	}
1113 
1114 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1115 		if (dev_priv->pinned_bo) {
1116 			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1117 			vmw_bo_unreference(&dev_priv->pinned_bo);
1118 		}
1119 
1120 		if (!sw_context->needs_post_query_barrier) {
1121 			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1122 
1123 			/*
1124 			 * We pin also the dummy_query_bo buffer so that we
1125 			 * don't need to validate it when emitting dummy queries
1126 			 * in context destroy paths.
1127 			 */
1128 			if (!dev_priv->dummy_query_bo_pinned) {
1129 				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1130 						    true);
1131 				dev_priv->dummy_query_bo_pinned = true;
1132 			}
1133 
1134 			BUG_ON(sw_context->last_query_ctx == NULL);
1135 			dev_priv->query_cid = sw_context->last_query_ctx->id;
1136 			dev_priv->query_cid_valid = true;
1137 			dev_priv->pinned_bo =
1138 				vmw_bo_reference(sw_context->cur_query_bo);
1139 		}
1140 	}
1141 }
1142 
1143 /**
1144  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1145  * to a MOB id.
1146  *
1147  * @dev_priv: Pointer to a device private structure.
1148  * @sw_context: The software context used for this command batch validation.
1149  * @id: Pointer to the user-space handle to be translated.
1150  * @vmw_bo_p: Points to a location that, on successful return will carry a
1151  * non-reference-counted pointer to the buffer object identified by the
1152  * user-space handle in @id.
1153  *
1154  * This function saves information needed to translate a user-space buffer
1155  * handle to a MOB id. The translation does not take place immediately, but
1156  * during a call to vmw_apply_relocations().
1157  *
1158  * This function builds a relocation list and a list of buffers to validate. The
1159  * former needs to be freed using either vmw_apply_relocations() or
1160  * vmw_free_relocations(). The latter needs to be freed using
1161  * vmw_clear_validations.
1162  */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_buffer_object ** vmw_bo_p)1163 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1164 				 struct vmw_sw_context *sw_context,
1165 				 SVGAMobId *id,
1166 				 struct vmw_buffer_object **vmw_bo_p)
1167 {
1168 	struct vmw_buffer_object *vmw_bo;
1169 	uint32_t handle = *id;
1170 	struct vmw_relocation *reloc;
1171 	int ret;
1172 
1173 	vmw_validation_preload_bo(sw_context->ctx);
1174 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1175 	if (IS_ERR(vmw_bo)) {
1176 		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1177 		return PTR_ERR(vmw_bo);
1178 	}
1179 
1180 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1181 	vmw_user_bo_noref_release();
1182 	if (unlikely(ret != 0))
1183 		return ret;
1184 
1185 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1186 	if (!reloc)
1187 		return -ENOMEM;
1188 
1189 	reloc->mob_loc = id;
1190 	reloc->vbo = vmw_bo;
1191 
1192 	*vmw_bo_p = vmw_bo;
1193 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1194 
1195 	return 0;
1196 }
1197 
1198 /**
1199  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1200  * to a valid SVGAGuestPtr
1201  *
1202  * @dev_priv: Pointer to a device private structure.
1203  * @sw_context: The software context used for this command batch validation.
1204  * @ptr: Pointer to the user-space handle to be translated.
1205  * @vmw_bo_p: Points to a location that, on successful return will carry a
1206  * non-reference-counted pointer to the DMA buffer identified by the user-space
1207  * handle in @id.
1208  *
1209  * This function saves information needed to translate a user-space buffer
1210  * handle to a valid SVGAGuestPtr. The translation does not take place
1211  * immediately, but during a call to vmw_apply_relocations().
1212  *
1213  * This function builds a relocation list and a list of buffers to validate.
1214  * The former needs to be freed using either vmw_apply_relocations() or
1215  * vmw_free_relocations(). The latter needs to be freed using
1216  * vmw_clear_validations.
1217  */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_buffer_object ** vmw_bo_p)1218 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1219 				   struct vmw_sw_context *sw_context,
1220 				   SVGAGuestPtr *ptr,
1221 				   struct vmw_buffer_object **vmw_bo_p)
1222 {
1223 	struct vmw_buffer_object *vmw_bo;
1224 	uint32_t handle = ptr->gmrId;
1225 	struct vmw_relocation *reloc;
1226 	int ret;
1227 
1228 	vmw_validation_preload_bo(sw_context->ctx);
1229 	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1230 	if (IS_ERR(vmw_bo)) {
1231 		VMW_DEBUG_USER("Could not find or use GMR region.\n");
1232 		return PTR_ERR(vmw_bo);
1233 	}
1234 
1235 	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1236 	vmw_user_bo_noref_release();
1237 	if (unlikely(ret != 0))
1238 		return ret;
1239 
1240 	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1241 	if (!reloc)
1242 		return -ENOMEM;
1243 
1244 	reloc->location = ptr;
1245 	reloc->vbo = vmw_bo;
1246 	*vmw_bo_p = vmw_bo;
1247 	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1248 
1249 	return 0;
1250 }
1251 
1252 /**
1253  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1254  *
1255  * @dev_priv: Pointer to a device private struct.
1256  * @sw_context: The software context used for this command submission.
1257  * @header: Pointer to the command header in the command stream.
1258  *
1259  * This function adds the new query into the query COTABLE
1260  */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1261 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1262 				   struct vmw_sw_context *sw_context,
1263 				   SVGA3dCmdHeader *header)
1264 {
1265 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1266 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1267 	struct vmw_resource *cotable_res;
1268 	int ret;
1269 
1270 	if (!ctx_node)
1271 		return -EINVAL;
1272 
1273 	cmd = container_of(header, typeof(*cmd), header);
1274 
1275 	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1276 	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1277 		return -EINVAL;
1278 
1279 	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1280 	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1281 
1282 	return ret;
1283 }
1284 
1285 /**
1286  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1287  *
1288  * @dev_priv: Pointer to a device private struct.
1289  * @sw_context: The software context used for this command submission.
1290  * @header: Pointer to the command header in the command stream.
1291  *
1292  * The query bind operation will eventually associate the query ID with its
1293  * backing MOB.  In this function, we take the user mode MOB ID and use
1294  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1295  */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1296 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1297 				 struct vmw_sw_context *sw_context,
1298 				 SVGA3dCmdHeader *header)
1299 {
1300 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1301 	struct vmw_buffer_object *vmw_bo;
1302 	int ret;
1303 
1304 	cmd = container_of(header, typeof(*cmd), header);
1305 
1306 	/*
1307 	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1308 	 * list so its kernel mode MOB ID can be filled in later
1309 	 */
1310 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1311 				    &vmw_bo);
1312 
1313 	if (ret != 0)
1314 		return ret;
1315 
1316 	sw_context->dx_query_mob = vmw_bo;
1317 	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1318 	return 0;
1319 }
1320 
1321 /**
1322  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1323  *
1324  * @dev_priv: Pointer to a device private struct.
1325  * @sw_context: The software context used for this command submission.
1326  * @header: Pointer to the command header in the command stream.
1327  */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1328 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1329 				  struct vmw_sw_context *sw_context,
1330 				  SVGA3dCmdHeader *header)
1331 {
1332 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1333 		container_of(header, typeof(*cmd), header);
1334 
1335 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1336 				 VMW_RES_DIRTY_SET, user_context_converter,
1337 				 &cmd->body.cid, NULL);
1338 }
1339 
1340 /**
1341  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1342  *
1343  * @dev_priv: Pointer to a device private struct.
1344  * @sw_context: The software context used for this command submission.
1345  * @header: Pointer to the command header in the command stream.
1346  */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1347 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1348 			       struct vmw_sw_context *sw_context,
1349 			       SVGA3dCmdHeader *header)
1350 {
1351 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1352 		container_of(header, typeof(*cmd), header);
1353 
1354 	if (unlikely(dev_priv->has_mob)) {
1355 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1356 
1357 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1358 
1359 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1360 		gb_cmd.header.size = cmd->header.size;
1361 		gb_cmd.body.cid = cmd->body.cid;
1362 		gb_cmd.body.type = cmd->body.type;
1363 
1364 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1365 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1366 	}
1367 
1368 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1369 				 VMW_RES_DIRTY_SET, user_context_converter,
1370 				 &cmd->body.cid, NULL);
1371 }
1372 
1373 /**
1374  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1375  *
1376  * @dev_priv: Pointer to a device private struct.
1377  * @sw_context: The software context used for this command submission.
1378  * @header: Pointer to the command header in the command stream.
1379  */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1380 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1381 				struct vmw_sw_context *sw_context,
1382 				SVGA3dCmdHeader *header)
1383 {
1384 	struct vmw_buffer_object *vmw_bo;
1385 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1386 	int ret;
1387 
1388 	cmd = container_of(header, typeof(*cmd), header);
1389 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1390 	if (unlikely(ret != 0))
1391 		return ret;
1392 
1393 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1394 				    &vmw_bo);
1395 	if (unlikely(ret != 0))
1396 		return ret;
1397 
1398 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1399 
1400 	return ret;
1401 }
1402 
1403 /**
1404  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1405  *
1406  * @dev_priv: Pointer to a device private struct.
1407  * @sw_context: The software context used for this command submission.
1408  * @header: Pointer to the command header in the command stream.
1409  */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1410 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1411 			     struct vmw_sw_context *sw_context,
1412 			     SVGA3dCmdHeader *header)
1413 {
1414 	struct vmw_buffer_object *vmw_bo;
1415 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1416 	int ret;
1417 
1418 	cmd = container_of(header, typeof(*cmd), header);
1419 	if (dev_priv->has_mob) {
1420 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1421 
1422 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1423 
1424 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1425 		gb_cmd.header.size = cmd->header.size;
1426 		gb_cmd.body.cid = cmd->body.cid;
1427 		gb_cmd.body.type = cmd->body.type;
1428 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1429 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1430 
1431 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1432 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1433 	}
1434 
1435 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1436 	if (unlikely(ret != 0))
1437 		return ret;
1438 
1439 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1440 				      &cmd->body.guestResult, &vmw_bo);
1441 	if (unlikely(ret != 0))
1442 		return ret;
1443 
1444 	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1445 
1446 	return ret;
1447 }
1448 
1449 /**
1450  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1451  *
1452  * @dev_priv: Pointer to a device private struct.
1453  * @sw_context: The software context used for this command submission.
1454  * @header: Pointer to the command header in the command stream.
1455  */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1456 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1457 				 struct vmw_sw_context *sw_context,
1458 				 SVGA3dCmdHeader *header)
1459 {
1460 	struct vmw_buffer_object *vmw_bo;
1461 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1462 	int ret;
1463 
1464 	cmd = container_of(header, typeof(*cmd), header);
1465 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1466 	if (unlikely(ret != 0))
1467 		return ret;
1468 
1469 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1470 				    &vmw_bo);
1471 	if (unlikely(ret != 0))
1472 		return ret;
1473 
1474 	return 0;
1475 }
1476 
1477 /**
1478  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1479  *
1480  * @dev_priv: Pointer to a device private struct.
1481  * @sw_context: The software context used for this command submission.
1482  * @header: Pointer to the command header in the command stream.
1483  */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1484 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1485 			      struct vmw_sw_context *sw_context,
1486 			      SVGA3dCmdHeader *header)
1487 {
1488 	struct vmw_buffer_object *vmw_bo;
1489 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1490 	int ret;
1491 
1492 	cmd = container_of(header, typeof(*cmd), header);
1493 	if (dev_priv->has_mob) {
1494 		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1495 
1496 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1497 
1498 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1499 		gb_cmd.header.size = cmd->header.size;
1500 		gb_cmd.body.cid = cmd->body.cid;
1501 		gb_cmd.body.type = cmd->body.type;
1502 		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1503 		gb_cmd.body.offset = cmd->body.guestResult.offset;
1504 
1505 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1506 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1507 	}
1508 
1509 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1510 	if (unlikely(ret != 0))
1511 		return ret;
1512 
1513 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1514 				      &cmd->body.guestResult, &vmw_bo);
1515 	if (unlikely(ret != 0))
1516 		return ret;
1517 
1518 	return 0;
1519 }
1520 
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1521 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1522 		       struct vmw_sw_context *sw_context,
1523 		       SVGA3dCmdHeader *header)
1524 {
1525 	struct vmw_buffer_object *vmw_bo = NULL;
1526 	struct vmw_surface *srf = NULL;
1527 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1528 	int ret;
1529 	SVGA3dCmdSurfaceDMASuffix *suffix;
1530 	uint32_t bo_size;
1531 	bool dirty;
1532 
1533 	cmd = container_of(header, typeof(*cmd), header);
1534 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1535 					       header->size - sizeof(*suffix));
1536 
1537 	/* Make sure device and verifier stays in sync. */
1538 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1539 		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1540 		return -EINVAL;
1541 	}
1542 
1543 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1544 				      &cmd->body.guest.ptr, &vmw_bo);
1545 	if (unlikely(ret != 0))
1546 		return ret;
1547 
1548 	/* Make sure DMA doesn't cross BO boundaries. */
1549 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1550 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1551 		VMW_DEBUG_USER("Invalid DMA offset.\n");
1552 		return -EINVAL;
1553 	}
1554 
1555 	bo_size -= cmd->body.guest.ptr.offset;
1556 	if (unlikely(suffix->maximumOffset > bo_size))
1557 		suffix->maximumOffset = bo_size;
1558 
1559 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1560 		VMW_RES_DIRTY_SET : 0;
1561 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1562 				dirty, user_surface_converter,
1563 				&cmd->body.host.sid, NULL);
1564 	if (unlikely(ret != 0)) {
1565 		if (unlikely(ret != -ERESTARTSYS))
1566 			VMW_DEBUG_USER("could not find surface for DMA.\n");
1567 		return ret;
1568 	}
1569 
1570 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1571 
1572 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1573 
1574 	return 0;
1575 }
1576 
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1577 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1578 			struct vmw_sw_context *sw_context,
1579 			SVGA3dCmdHeader *header)
1580 {
1581 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1582 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1583 		(unsigned long)header + sizeof(*cmd));
1584 	SVGA3dPrimitiveRange *range;
1585 	uint32_t i;
1586 	uint32_t maxnum;
1587 	int ret;
1588 
1589 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1590 	if (unlikely(ret != 0))
1591 		return ret;
1592 
1593 	cmd = container_of(header, typeof(*cmd), header);
1594 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1595 
1596 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1597 		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1598 		return -EINVAL;
1599 	}
1600 
1601 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1602 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1603 					VMW_RES_DIRTY_NONE,
1604 					user_surface_converter,
1605 					&decl->array.surfaceId, NULL);
1606 		if (unlikely(ret != 0))
1607 			return ret;
1608 	}
1609 
1610 	maxnum = (header->size - sizeof(cmd->body) -
1611 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1612 	if (unlikely(cmd->body.numRanges > maxnum)) {
1613 		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1614 		return -EINVAL;
1615 	}
1616 
1617 	range = (SVGA3dPrimitiveRange *) decl;
1618 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1619 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1620 					VMW_RES_DIRTY_NONE,
1621 					user_surface_converter,
1622 					&range->indexArray.surfaceId, NULL);
1623 		if (unlikely(ret != 0))
1624 			return ret;
1625 	}
1626 	return 0;
1627 }
1628 
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1629 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1630 			     struct vmw_sw_context *sw_context,
1631 			     SVGA3dCmdHeader *header)
1632 {
1633 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1634 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1635 	  ((unsigned long) header + header->size + sizeof(header));
1636 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1637 		((unsigned long) header + sizeof(*cmd));
1638 	struct vmw_resource *ctx;
1639 	struct vmw_resource *res;
1640 	int ret;
1641 
1642 	cmd = container_of(header, typeof(*cmd), header);
1643 
1644 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1645 				VMW_RES_DIRTY_SET, user_context_converter,
1646 				&cmd->body.cid, &ctx);
1647 	if (unlikely(ret != 0))
1648 		return ret;
1649 
1650 	for (; cur_state < last_state; ++cur_state) {
1651 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1652 			continue;
1653 
1654 		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1655 			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1656 				       (unsigned int) cur_state->stage);
1657 			return -EINVAL;
1658 		}
1659 
1660 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1661 					VMW_RES_DIRTY_NONE,
1662 					user_surface_converter,
1663 					&cur_state->value, &res);
1664 		if (unlikely(ret != 0))
1665 			return ret;
1666 
1667 		if (dev_priv->has_mob) {
1668 			struct vmw_ctx_bindinfo_tex binding;
1669 			struct vmw_ctx_validation_info *node;
1670 
1671 			node = vmw_execbuf_info_from_res(sw_context, ctx);
1672 			if (!node)
1673 				return -EINVAL;
1674 
1675 			binding.bi.ctx = ctx;
1676 			binding.bi.res = res;
1677 			binding.bi.bt = vmw_ctx_binding_tex;
1678 			binding.texture_stage = cur_state->stage;
1679 			vmw_binding_add(node->staged, &binding.bi, 0,
1680 					binding.texture_stage);
1681 		}
1682 	}
1683 
1684 	return 0;
1685 }
1686 
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1687 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1688 				      struct vmw_sw_context *sw_context,
1689 				      void *buf)
1690 {
1691 	struct vmw_buffer_object *vmw_bo;
1692 
1693 	struct {
1694 		uint32_t header;
1695 		SVGAFifoCmdDefineGMRFB body;
1696 	} *cmd = buf;
1697 
1698 	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1699 				       &vmw_bo);
1700 }
1701 
1702 /**
1703  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1704  * switching
1705  *
1706  * @dev_priv: Pointer to a device private struct.
1707  * @sw_context: The software context being used for this batch.
1708  * @val_node: The validation node representing the resource.
1709  * @buf_id: Pointer to the user-space backup buffer handle in the command
1710  * stream.
1711  * @backup_offset: Offset of backup into MOB.
1712  *
1713  * This function prepares for registering a switch of backup buffers in the
1714  * resource metadata just prior to unreserving. It's basically a wrapper around
1715  * vmw_cmd_res_switch_backup with a different interface.
1716  */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,uint32_t * buf_id,unsigned long backup_offset)1717 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1718 				     struct vmw_sw_context *sw_context,
1719 				     struct vmw_resource *res, uint32_t *buf_id,
1720 				     unsigned long backup_offset)
1721 {
1722 	struct vmw_buffer_object *vbo;
1723 	void *info;
1724 	int ret;
1725 
1726 	info = vmw_execbuf_info_from_res(sw_context, res);
1727 	if (!info)
1728 		return -EINVAL;
1729 
1730 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1731 	if (ret)
1732 		return ret;
1733 
1734 	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1735 					 backup_offset);
1736 	return 0;
1737 }
1738 
1739 /**
1740  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1741  *
1742  * @dev_priv: Pointer to a device private struct.
1743  * @sw_context: The software context being used for this batch.
1744  * @res_type: The resource type.
1745  * @converter: Information about user-space binding for this resource type.
1746  * @res_id: Pointer to the user-space resource handle in the command stream.
1747  * @buf_id: Pointer to the user-space backup buffer handle in the command
1748  * stream.
1749  * @backup_offset: Offset of backup into MOB.
1750  *
1751  * This function prepares for registering a switch of backup buffers in the
1752  * resource metadata just prior to unreserving. It's basically a wrapper around
1753  * vmw_cmd_res_switch_backup with a different interface.
1754  */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1755 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1756 				 struct vmw_sw_context *sw_context,
1757 				 enum vmw_res_type res_type,
1758 				 const struct vmw_user_resource_conv
1759 				 *converter, uint32_t *res_id, uint32_t *buf_id,
1760 				 unsigned long backup_offset)
1761 {
1762 	struct vmw_resource *res;
1763 	int ret;
1764 
1765 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1766 				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1767 	if (ret)
1768 		return ret;
1769 
1770 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1771 					 backup_offset);
1772 }
1773 
1774 /**
1775  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1776  *
1777  * @dev_priv: Pointer to a device private struct.
1778  * @sw_context: The software context being used for this batch.
1779  * @header: Pointer to the command header in the command stream.
1780  */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1781 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1782 				   struct vmw_sw_context *sw_context,
1783 				   SVGA3dCmdHeader *header)
1784 {
1785 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1786 		container_of(header, typeof(*cmd), header);
1787 
1788 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1789 				     user_surface_converter, &cmd->body.sid,
1790 				     &cmd->body.mobid, 0);
1791 }
1792 
1793 /**
1794  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1795  *
1796  * @dev_priv: Pointer to a device private struct.
1797  * @sw_context: The software context being used for this batch.
1798  * @header: Pointer to the command header in the command stream.
1799  */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1800 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1801 				   struct vmw_sw_context *sw_context,
1802 				   SVGA3dCmdHeader *header)
1803 {
1804 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1805 		container_of(header, typeof(*cmd), header);
1806 
1807 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1808 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1809 				 &cmd->body.image.sid, NULL);
1810 }
1811 
1812 /**
1813  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1814  *
1815  * @dev_priv: Pointer to a device private struct.
1816  * @sw_context: The software context being used for this batch.
1817  * @header: Pointer to the command header in the command stream.
1818  */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1819 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1820 				     struct vmw_sw_context *sw_context,
1821 				     SVGA3dCmdHeader *header)
1822 {
1823 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1824 		container_of(header, typeof(*cmd), header);
1825 
1826 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1827 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1828 				 &cmd->body.sid, NULL);
1829 }
1830 
1831 /**
1832  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1833  *
1834  * @dev_priv: Pointer to a device private struct.
1835  * @sw_context: The software context being used for this batch.
1836  * @header: Pointer to the command header in the command stream.
1837  */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1838 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1839 				     struct vmw_sw_context *sw_context,
1840 				     SVGA3dCmdHeader *header)
1841 {
1842 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1843 		container_of(header, typeof(*cmd), header);
1844 
1845 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1846 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1847 				 &cmd->body.image.sid, NULL);
1848 }
1849 
1850 /**
1851  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1852  * command
1853  *
1854  * @dev_priv: Pointer to a device private struct.
1855  * @sw_context: The software context being used for this batch.
1856  * @header: Pointer to the command header in the command stream.
1857  */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1858 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1859 				       struct vmw_sw_context *sw_context,
1860 				       SVGA3dCmdHeader *header)
1861 {
1862 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1863 		container_of(header, typeof(*cmd), header);
1864 
1865 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1866 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1867 				 &cmd->body.sid, NULL);
1868 }
1869 
1870 /**
1871  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1872  * command
1873  *
1874  * @dev_priv: Pointer to a device private struct.
1875  * @sw_context: The software context being used for this batch.
1876  * @header: Pointer to the command header in the command stream.
1877  */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1878 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1879 				       struct vmw_sw_context *sw_context,
1880 				       SVGA3dCmdHeader *header)
1881 {
1882 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1883 		container_of(header, typeof(*cmd), header);
1884 
1885 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1886 				 VMW_RES_DIRTY_NONE, user_surface_converter,
1887 				 &cmd->body.image.sid, NULL);
1888 }
1889 
1890 /**
1891  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1892  * command
1893  *
1894  * @dev_priv: Pointer to a device private struct.
1895  * @sw_context: The software context being used for this batch.
1896  * @header: Pointer to the command header in the command stream.
1897  */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1898 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1899 					 struct vmw_sw_context *sw_context,
1900 					 SVGA3dCmdHeader *header)
1901 {
1902 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1903 		container_of(header, typeof(*cmd), header);
1904 
1905 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1906 				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1907 				 &cmd->body.sid, NULL);
1908 }
1909 
1910 /**
1911  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1912  *
1913  * @dev_priv: Pointer to a device private struct.
1914  * @sw_context: The software context being used for this batch.
1915  * @header: Pointer to the command header in the command stream.
1916  */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1917 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1918 				 struct vmw_sw_context *sw_context,
1919 				 SVGA3dCmdHeader *header)
1920 {
1921 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1922 	int ret;
1923 	size_t size;
1924 	struct vmw_resource *ctx;
1925 
1926 	cmd = container_of(header, typeof(*cmd), header);
1927 
1928 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1929 				VMW_RES_DIRTY_SET, user_context_converter,
1930 				&cmd->body.cid, &ctx);
1931 	if (unlikely(ret != 0))
1932 		return ret;
1933 
1934 	if (unlikely(!dev_priv->has_mob))
1935 		return 0;
1936 
1937 	size = cmd->header.size - sizeof(cmd->body);
1938 	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1939 				    cmd->body.shid, cmd + 1, cmd->body.type,
1940 				    size, &sw_context->staged_cmd_res);
1941 	if (unlikely(ret != 0))
1942 		return ret;
1943 
1944 	return vmw_resource_relocation_add(sw_context, NULL,
1945 					   vmw_ptr_diff(sw_context->buf_start,
1946 							&cmd->header.id),
1947 					   vmw_res_rel_nop);
1948 }
1949 
1950 /**
1951  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1952  *
1953  * @dev_priv: Pointer to a device private struct.
1954  * @sw_context: The software context being used for this batch.
1955  * @header: Pointer to the command header in the command stream.
1956  */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1957 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1958 				  struct vmw_sw_context *sw_context,
1959 				  SVGA3dCmdHeader *header)
1960 {
1961 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1962 	int ret;
1963 	struct vmw_resource *ctx;
1964 
1965 	cmd = container_of(header, typeof(*cmd), header);
1966 
1967 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1968 				VMW_RES_DIRTY_SET, user_context_converter,
1969 				&cmd->body.cid, &ctx);
1970 	if (unlikely(ret != 0))
1971 		return ret;
1972 
1973 	if (unlikely(!dev_priv->has_mob))
1974 		return 0;
1975 
1976 	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1977 				cmd->body.type, &sw_context->staged_cmd_res);
1978 	if (unlikely(ret != 0))
1979 		return ret;
1980 
1981 	return vmw_resource_relocation_add(sw_context, NULL,
1982 					   vmw_ptr_diff(sw_context->buf_start,
1983 							&cmd->header.id),
1984 					   vmw_res_rel_nop);
1985 }
1986 
1987 /**
1988  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1989  *
1990  * @dev_priv: Pointer to a device private struct.
1991  * @sw_context: The software context being used for this batch.
1992  * @header: Pointer to the command header in the command stream.
1993  */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1994 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1995 			      struct vmw_sw_context *sw_context,
1996 			      SVGA3dCmdHeader *header)
1997 {
1998 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1999 	struct vmw_ctx_bindinfo_shader binding;
2000 	struct vmw_resource *ctx, *res = NULL;
2001 	struct vmw_ctx_validation_info *ctx_info;
2002 	int ret;
2003 
2004 	cmd = container_of(header, typeof(*cmd), header);
2005 
2006 	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2007 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2008 			       (unsigned int) cmd->body.type);
2009 		return -EINVAL;
2010 	}
2011 
2012 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2013 				VMW_RES_DIRTY_SET, user_context_converter,
2014 				&cmd->body.cid, &ctx);
2015 	if (unlikely(ret != 0))
2016 		return ret;
2017 
2018 	if (!dev_priv->has_mob)
2019 		return 0;
2020 
2021 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2022 		/*
2023 		 * This is the compat shader path - Per device guest-backed
2024 		 * shaders, but user-space thinks it's per context host-
2025 		 * backed shaders.
2026 		 */
2027 		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2028 					cmd->body.shid, cmd->body.type);
2029 		if (!IS_ERR(res)) {
2030 			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2031 							    VMW_RES_DIRTY_NONE);
2032 			if (unlikely(ret != 0))
2033 				return ret;
2034 
2035 			ret = vmw_resource_relocation_add
2036 				(sw_context, res,
2037 				 vmw_ptr_diff(sw_context->buf_start,
2038 					      &cmd->body.shid),
2039 				 vmw_res_rel_normal);
2040 			if (unlikely(ret != 0))
2041 				return ret;
2042 		}
2043 	}
2044 
2045 	if (IS_ERR_OR_NULL(res)) {
2046 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2047 					VMW_RES_DIRTY_NONE,
2048 					user_shader_converter, &cmd->body.shid,
2049 					&res);
2050 		if (unlikely(ret != 0))
2051 			return ret;
2052 	}
2053 
2054 	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2055 	if (!ctx_info)
2056 		return -EINVAL;
2057 
2058 	binding.bi.ctx = ctx;
2059 	binding.bi.res = res;
2060 	binding.bi.bt = vmw_ctx_binding_shader;
2061 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2062 	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2063 
2064 	return 0;
2065 }
2066 
2067 /**
2068  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2069  *
2070  * @dev_priv: Pointer to a device private struct.
2071  * @sw_context: The software context being used for this batch.
2072  * @header: Pointer to the command header in the command stream.
2073  */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2074 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2075 				    struct vmw_sw_context *sw_context,
2076 				    SVGA3dCmdHeader *header)
2077 {
2078 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2079 	int ret;
2080 
2081 	cmd = container_of(header, typeof(*cmd), header);
2082 
2083 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2084 				VMW_RES_DIRTY_SET, user_context_converter,
2085 				&cmd->body.cid, NULL);
2086 	if (unlikely(ret != 0))
2087 		return ret;
2088 
2089 	if (dev_priv->has_mob)
2090 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2091 
2092 	return 0;
2093 }
2094 
2095 /**
2096  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2097  *
2098  * @dev_priv: Pointer to a device private struct.
2099  * @sw_context: The software context being used for this batch.
2100  * @header: Pointer to the command header in the command stream.
2101  */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2102 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2103 				  struct vmw_sw_context *sw_context,
2104 				  SVGA3dCmdHeader *header)
2105 {
2106 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2107 		container_of(header, typeof(*cmd), header);
2108 
2109 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2110 				     user_shader_converter, &cmd->body.shid,
2111 				     &cmd->body.mobid, cmd->body.offsetInBytes);
2112 }
2113 
2114 /**
2115  * vmw_cmd_dx_set_single_constant_buffer - Validate
2116  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2117  *
2118  * @dev_priv: Pointer to a device private struct.
2119  * @sw_context: The software context being used for this batch.
2120  * @header: Pointer to the command header in the command stream.
2121  */
2122 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2123 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2124 				      struct vmw_sw_context *sw_context,
2125 				      SVGA3dCmdHeader *header)
2126 {
2127 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2128 	struct vmw_resource *res = NULL;
2129 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2130 	struct vmw_ctx_bindinfo_cb binding;
2131 	int ret;
2132 
2133 	if (!ctx_node)
2134 		return -EINVAL;
2135 
2136 	cmd = container_of(header, typeof(*cmd), header);
2137 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2138 				VMW_RES_DIRTY_NONE, user_surface_converter,
2139 				&cmd->body.sid, &res);
2140 	if (unlikely(ret != 0))
2141 		return ret;
2142 
2143 	binding.bi.ctx = ctx_node->ctx;
2144 	binding.bi.res = res;
2145 	binding.bi.bt = vmw_ctx_binding_cb;
2146 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2147 	binding.offset = cmd->body.offsetInBytes;
2148 	binding.size = cmd->body.sizeInBytes;
2149 	binding.slot = cmd->body.slot;
2150 
2151 	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2152 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2153 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2154 			       (unsigned int) cmd->body.type,
2155 			       (unsigned int) binding.slot);
2156 		return -EINVAL;
2157 	}
2158 
2159 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2160 			binding.slot);
2161 
2162 	return 0;
2163 }
2164 
2165 /**
2166  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2167  * command
2168  *
2169  * @dev_priv: Pointer to a device private struct.
2170  * @sw_context: The software context being used for this batch.
2171  * @header: Pointer to the command header in the command stream.
2172  */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2173 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2174 				     struct vmw_sw_context *sw_context,
2175 				     SVGA3dCmdHeader *header)
2176 {
2177 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2178 		container_of(header, typeof(*cmd), header);
2179 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2180 		sizeof(SVGA3dShaderResourceViewId);
2181 
2182 	if ((u64) cmd->body.startView + (u64) num_sr_view >
2183 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2184 	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2185 		VMW_DEBUG_USER("Invalid shader binding.\n");
2186 		return -EINVAL;
2187 	}
2188 
2189 	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2190 				     vmw_ctx_binding_sr,
2191 				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2192 				     (void *) &cmd[1], num_sr_view,
2193 				     cmd->body.startView);
2194 }
2195 
2196 /**
2197  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2198  *
2199  * @dev_priv: Pointer to a device private struct.
2200  * @sw_context: The software context being used for this batch.
2201  * @header: Pointer to the command header in the command stream.
2202  */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2203 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2204 				 struct vmw_sw_context *sw_context,
2205 				 SVGA3dCmdHeader *header)
2206 {
2207 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2208 	struct vmw_resource *res = NULL;
2209 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2210 	struct vmw_ctx_bindinfo_shader binding;
2211 	int ret = 0;
2212 
2213 	if (!ctx_node)
2214 		return -EINVAL;
2215 
2216 	cmd = container_of(header, typeof(*cmd), header);
2217 
2218 	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2219 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2220 		VMW_DEBUG_USER("Illegal shader type %u.\n",
2221 			       (unsigned int) cmd->body.type);
2222 		return -EINVAL;
2223 	}
2224 
2225 	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2226 		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2227 		if (IS_ERR(res)) {
2228 			VMW_DEBUG_USER("Could not find shader for binding.\n");
2229 			return PTR_ERR(res);
2230 		}
2231 
2232 		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2233 						    VMW_RES_DIRTY_NONE);
2234 		if (ret)
2235 			return ret;
2236 	}
2237 
2238 	binding.bi.ctx = ctx_node->ctx;
2239 	binding.bi.res = res;
2240 	binding.bi.bt = vmw_ctx_binding_dx_shader;
2241 	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2242 
2243 	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2244 
2245 	return 0;
2246 }
2247 
2248 /**
2249  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2250  * command
2251  *
2252  * @dev_priv: Pointer to a device private struct.
2253  * @sw_context: The software context being used for this batch.
2254  * @header: Pointer to the command header in the command stream.
2255  */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2256 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2257 					 struct vmw_sw_context *sw_context,
2258 					 SVGA3dCmdHeader *header)
2259 {
2260 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2261 	struct vmw_ctx_bindinfo_vb binding;
2262 	struct vmw_resource *res;
2263 	struct {
2264 		SVGA3dCmdHeader header;
2265 		SVGA3dCmdDXSetVertexBuffers body;
2266 		SVGA3dVertexBuffer buf[];
2267 	} *cmd;
2268 	int i, ret, num;
2269 
2270 	if (!ctx_node)
2271 		return -EINVAL;
2272 
2273 	cmd = container_of(header, typeof(*cmd), header);
2274 	num = (cmd->header.size - sizeof(cmd->body)) /
2275 		sizeof(SVGA3dVertexBuffer);
2276 	if ((u64)num + (u64)cmd->body.startBuffer >
2277 	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2278 		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2279 		return -EINVAL;
2280 	}
2281 
2282 	for (i = 0; i < num; i++) {
2283 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2284 					VMW_RES_DIRTY_NONE,
2285 					user_surface_converter,
2286 					&cmd->buf[i].sid, &res);
2287 		if (unlikely(ret != 0))
2288 			return ret;
2289 
2290 		binding.bi.ctx = ctx_node->ctx;
2291 		binding.bi.bt = vmw_ctx_binding_vb;
2292 		binding.bi.res = res;
2293 		binding.offset = cmd->buf[i].offset;
2294 		binding.stride = cmd->buf[i].stride;
2295 		binding.slot = i + cmd->body.startBuffer;
2296 
2297 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2298 	}
2299 
2300 	return 0;
2301 }
2302 
2303 /**
2304  * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2305  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2306  *
2307  * @dev_priv: Pointer to a device private struct.
2308  * @sw_context: The software context being used for this batch.
2309  * @header: Pointer to the command header in the command stream.
2310  */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2311 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2312 				       struct vmw_sw_context *sw_context,
2313 				       SVGA3dCmdHeader *header)
2314 {
2315 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2316 	struct vmw_ctx_bindinfo_ib binding;
2317 	struct vmw_resource *res;
2318 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2319 	int ret;
2320 
2321 	if (!ctx_node)
2322 		return -EINVAL;
2323 
2324 	cmd = container_of(header, typeof(*cmd), header);
2325 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2326 				VMW_RES_DIRTY_NONE, user_surface_converter,
2327 				&cmd->body.sid, &res);
2328 	if (unlikely(ret != 0))
2329 		return ret;
2330 
2331 	binding.bi.ctx = ctx_node->ctx;
2332 	binding.bi.res = res;
2333 	binding.bi.bt = vmw_ctx_binding_ib;
2334 	binding.offset = cmd->body.offset;
2335 	binding.format = cmd->body.format;
2336 
2337 	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2338 
2339 	return 0;
2340 }
2341 
2342 /**
2343  * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2344  * command
2345  *
2346  * @dev_priv: Pointer to a device private struct.
2347  * @sw_context: The software context being used for this batch.
2348  * @header: Pointer to the command header in the command stream.
2349  */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2350 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2351 					struct vmw_sw_context *sw_context,
2352 					SVGA3dCmdHeader *header)
2353 {
2354 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2355 		container_of(header, typeof(*cmd), header);
2356 	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2357 		sizeof(SVGA3dRenderTargetViewId);
2358 	int ret;
2359 
2360 	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2361 		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2362 		return -EINVAL;
2363 	}
2364 
2365 	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2366 				    0, &cmd->body.depthStencilViewId, 1, 0);
2367 	if (ret)
2368 		return ret;
2369 
2370 	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2371 				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2372 				     num_rt_view, 0);
2373 }
2374 
2375 /**
2376  * vmw_cmd_dx_clear_rendertarget_view - Validate
2377  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2378  *
2379  * @dev_priv: Pointer to a device private struct.
2380  * @sw_context: The software context being used for this batch.
2381  * @header: Pointer to the command header in the command stream.
2382  */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2383 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2384 					      struct vmw_sw_context *sw_context,
2385 					      SVGA3dCmdHeader *header)
2386 {
2387 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2388 		container_of(header, typeof(*cmd), header);
2389 	struct vmw_resource *ret;
2390 
2391 	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2392 				  cmd->body.renderTargetViewId);
2393 
2394 	return PTR_ERR_OR_ZERO(ret);
2395 }
2396 
2397 /**
2398  * vmw_cmd_dx_clear_rendertarget_view - Validate
2399  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2400  *
2401  * @dev_priv: Pointer to a device private struct.
2402  * @sw_context: The software context being used for this batch.
2403  * @header: Pointer to the command header in the command stream.
2404  */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2405 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2406 					      struct vmw_sw_context *sw_context,
2407 					      SVGA3dCmdHeader *header)
2408 {
2409 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2410 		container_of(header, typeof(*cmd), header);
2411 	struct vmw_resource *ret;
2412 
2413 	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2414 				  cmd->body.depthStencilViewId);
2415 
2416 	return PTR_ERR_OR_ZERO(ret);
2417 }
2418 
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2419 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2420 				  struct vmw_sw_context *sw_context,
2421 				  SVGA3dCmdHeader *header)
2422 {
2423 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2424 	struct vmw_resource *srf;
2425 	struct vmw_resource *res;
2426 	enum vmw_view_type view_type;
2427 	int ret;
2428 	/*
2429 	 * This is based on the fact that all affected define commands have the
2430 	 * same initial command body layout.
2431 	 */
2432 	struct {
2433 		SVGA3dCmdHeader header;
2434 		uint32 defined_id;
2435 		uint32 sid;
2436 	} *cmd;
2437 
2438 	if (!ctx_node)
2439 		return -EINVAL;
2440 
2441 	view_type = vmw_view_cmd_to_type(header->id);
2442 	if (view_type == vmw_view_max)
2443 		return -EINVAL;
2444 
2445 	cmd = container_of(header, typeof(*cmd), header);
2446 	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2447 		VMW_DEBUG_USER("Invalid surface id.\n");
2448 		return -EINVAL;
2449 	}
2450 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2451 				VMW_RES_DIRTY_NONE, user_surface_converter,
2452 				&cmd->sid, &srf);
2453 	if (unlikely(ret != 0))
2454 		return ret;
2455 
2456 	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2457 	ret = vmw_cotable_notify(res, cmd->defined_id);
2458 	if (unlikely(ret != 0))
2459 		return ret;
2460 
2461 	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2462 			    cmd->defined_id, header,
2463 			    header->size + sizeof(*header),
2464 			    &sw_context->staged_cmd_res);
2465 }
2466 
2467 /**
2468  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2469  *
2470  * @dev_priv: Pointer to a device private struct.
2471  * @sw_context: The software context being used for this batch.
2472  * @header: Pointer to the command header in the command stream.
2473  */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2474 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2475 				     struct vmw_sw_context *sw_context,
2476 				     SVGA3dCmdHeader *header)
2477 {
2478 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2479 	struct vmw_ctx_bindinfo_so binding;
2480 	struct vmw_resource *res;
2481 	struct {
2482 		SVGA3dCmdHeader header;
2483 		SVGA3dCmdDXSetSOTargets body;
2484 		SVGA3dSoTarget targets[];
2485 	} *cmd;
2486 	int i, ret, num;
2487 
2488 	if (!ctx_node)
2489 		return -EINVAL;
2490 
2491 	cmd = container_of(header, typeof(*cmd), header);
2492 	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2493 
2494 	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2495 		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2496 		return -EINVAL;
2497 	}
2498 
2499 	for (i = 0; i < num; i++) {
2500 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2501 					VMW_RES_DIRTY_SET,
2502 					user_surface_converter,
2503 					&cmd->targets[i].sid, &res);
2504 		if (unlikely(ret != 0))
2505 			return ret;
2506 
2507 		binding.bi.ctx = ctx_node->ctx;
2508 		binding.bi.res = res;
2509 		binding.bi.bt = vmw_ctx_binding_so,
2510 		binding.offset = cmd->targets[i].offset;
2511 		binding.size = cmd->targets[i].sizeInBytes;
2512 		binding.slot = i;
2513 
2514 		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2515 	}
2516 
2517 	return 0;
2518 }
2519 
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2520 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2521 				struct vmw_sw_context *sw_context,
2522 				SVGA3dCmdHeader *header)
2523 {
2524 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2525 	struct vmw_resource *res;
2526 	/*
2527 	 * This is based on the fact that all affected define commands have
2528 	 * the same initial command body layout.
2529 	 */
2530 	struct {
2531 		SVGA3dCmdHeader header;
2532 		uint32 defined_id;
2533 	} *cmd;
2534 	enum vmw_so_type so_type;
2535 	int ret;
2536 
2537 	if (!ctx_node)
2538 		return -EINVAL;
2539 
2540 	so_type = vmw_so_cmd_to_type(header->id);
2541 	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2542 	cmd = container_of(header, typeof(*cmd), header);
2543 	ret = vmw_cotable_notify(res, cmd->defined_id);
2544 
2545 	return ret;
2546 }
2547 
2548 /**
2549  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2550  * command
2551  *
2552  * @dev_priv: Pointer to a device private struct.
2553  * @sw_context: The software context being used for this batch.
2554  * @header: Pointer to the command header in the command stream.
2555  */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2556 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2557 					struct vmw_sw_context *sw_context,
2558 					SVGA3dCmdHeader *header)
2559 {
2560 	struct {
2561 		SVGA3dCmdHeader header;
2562 		union {
2563 			SVGA3dCmdDXReadbackSubResource r_body;
2564 			SVGA3dCmdDXInvalidateSubResource i_body;
2565 			SVGA3dCmdDXUpdateSubResource u_body;
2566 			SVGA3dSurfaceId sid;
2567 		};
2568 	} *cmd;
2569 
2570 	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2571 		     offsetof(typeof(*cmd), sid));
2572 	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2573 		     offsetof(typeof(*cmd), sid));
2574 	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2575 		     offsetof(typeof(*cmd), sid));
2576 
2577 	cmd = container_of(header, typeof(*cmd), header);
2578 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2579 				 VMW_RES_DIRTY_NONE, user_surface_converter,
2580 				 &cmd->sid, NULL);
2581 }
2582 
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2583 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2584 				struct vmw_sw_context *sw_context,
2585 				SVGA3dCmdHeader *header)
2586 {
2587 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2588 
2589 	if (!ctx_node)
2590 		return -EINVAL;
2591 
2592 	return 0;
2593 }
2594 
2595 /**
2596  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2597  * resource for removal.
2598  *
2599  * @dev_priv: Pointer to a device private struct.
2600  * @sw_context: The software context being used for this batch.
2601  * @header: Pointer to the command header in the command stream.
2602  *
2603  * Check that the view exists, and if it was not created using this command
2604  * batch, conditionally make this command a NOP.
2605  */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2606 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2607 				  struct vmw_sw_context *sw_context,
2608 				  SVGA3dCmdHeader *header)
2609 {
2610 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2611 	struct {
2612 		SVGA3dCmdHeader header;
2613 		union vmw_view_destroy body;
2614 	} *cmd = container_of(header, typeof(*cmd), header);
2615 	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2616 	struct vmw_resource *view;
2617 	int ret;
2618 
2619 	if (!ctx_node)
2620 		return -EINVAL;
2621 
2622 	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2623 			      &sw_context->staged_cmd_res, &view);
2624 	if (ret || !view)
2625 		return ret;
2626 
2627 	/*
2628 	 * If the view wasn't created during this command batch, it might
2629 	 * have been removed due to a context swapout, so add a
2630 	 * relocation to conditionally make this command a NOP to avoid
2631 	 * device errors.
2632 	 */
2633 	return vmw_resource_relocation_add(sw_context, view,
2634 					   vmw_ptr_diff(sw_context->buf_start,
2635 							&cmd->header.id),
2636 					   vmw_res_rel_cond_nop);
2637 }
2638 
2639 /**
2640  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2641  *
2642  * @dev_priv: Pointer to a device private struct.
2643  * @sw_context: The software context being used for this batch.
2644  * @header: Pointer to the command header in the command stream.
2645  */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2646 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2647 				    struct vmw_sw_context *sw_context,
2648 				    SVGA3dCmdHeader *header)
2649 {
2650 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2651 	struct vmw_resource *res;
2652 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2653 		container_of(header, typeof(*cmd), header);
2654 	int ret;
2655 
2656 	if (!ctx_node)
2657 		return -EINVAL;
2658 
2659 	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2660 	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2661 	if (ret)
2662 		return ret;
2663 
2664 	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2665 				 cmd->body.shaderId, cmd->body.type,
2666 				 &sw_context->staged_cmd_res);
2667 }
2668 
2669 /**
2670  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2671  *
2672  * @dev_priv: Pointer to a device private struct.
2673  * @sw_context: The software context being used for this batch.
2674  * @header: Pointer to the command header in the command stream.
2675  */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2676 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2677 				     struct vmw_sw_context *sw_context,
2678 				     SVGA3dCmdHeader *header)
2679 {
2680 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2681 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2682 		container_of(header, typeof(*cmd), header);
2683 	int ret;
2684 
2685 	if (!ctx_node)
2686 		return -EINVAL;
2687 
2688 	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2689 				&sw_context->staged_cmd_res);
2690 
2691 	return ret;
2692 }
2693 
2694 /**
2695  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2696  *
2697  * @dev_priv: Pointer to a device private struct.
2698  * @sw_context: The software context being used for this batch.
2699  * @header: Pointer to the command header in the command stream.
2700  */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2701 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2702 				  struct vmw_sw_context *sw_context,
2703 				  SVGA3dCmdHeader *header)
2704 {
2705 	struct vmw_resource *ctx;
2706 	struct vmw_resource *res;
2707 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2708 		container_of(header, typeof(*cmd), header);
2709 	int ret;
2710 
2711 	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2712 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2713 					VMW_RES_DIRTY_SET,
2714 					user_context_converter, &cmd->body.cid,
2715 					&ctx);
2716 		if (ret)
2717 			return ret;
2718 	} else {
2719 		struct vmw_ctx_validation_info *ctx_node =
2720 			VMW_GET_CTX_NODE(sw_context);
2721 
2722 		if (!ctx_node)
2723 			return -EINVAL;
2724 
2725 		ctx = ctx_node->ctx;
2726 	}
2727 
2728 	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2729 	if (IS_ERR(res)) {
2730 		VMW_DEBUG_USER("Could not find shader to bind.\n");
2731 		return PTR_ERR(res);
2732 	}
2733 
2734 	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2735 					    VMW_RES_DIRTY_NONE);
2736 	if (ret) {
2737 		VMW_DEBUG_USER("Error creating resource validation node.\n");
2738 		return ret;
2739 	}
2740 
2741 	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2742 					 &cmd->body.mobid,
2743 					 cmd->body.offsetInBytes);
2744 }
2745 
2746 /**
2747  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2748  *
2749  * @dev_priv: Pointer to a device private struct.
2750  * @sw_context: The software context being used for this batch.
2751  * @header: Pointer to the command header in the command stream.
2752  */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2753 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2754 			      struct vmw_sw_context *sw_context,
2755 			      SVGA3dCmdHeader *header)
2756 {
2757 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2758 		container_of(header, typeof(*cmd), header);
2759 	struct vmw_resource *ret;
2760 
2761 	ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
2762 				  cmd->body.shaderResourceViewId);
2763 
2764 	return PTR_ERR_OR_ZERO(ret);
2765 }
2766 
2767 /**
2768  * vmw_cmd_dx_transfer_from_buffer - Validate
2769  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2770  *
2771  * @dev_priv: Pointer to a device private struct.
2772  * @sw_context: The software context being used for this batch.
2773  * @header: Pointer to the command header in the command stream.
2774  */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2775 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2776 					   struct vmw_sw_context *sw_context,
2777 					   SVGA3dCmdHeader *header)
2778 {
2779 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2780 		container_of(header, typeof(*cmd), header);
2781 	int ret;
2782 
2783 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2784 				VMW_RES_DIRTY_NONE, user_surface_converter,
2785 				&cmd->body.srcSid, NULL);
2786 	if (ret != 0)
2787 		return ret;
2788 
2789 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2790 				 VMW_RES_DIRTY_SET, user_surface_converter,
2791 				 &cmd->body.destSid, NULL);
2792 }
2793 
2794 /**
2795  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2796  *
2797  * @dev_priv: Pointer to a device private struct.
2798  * @sw_context: The software context being used for this batch.
2799  * @header: Pointer to the command header in the command stream.
2800  */
vmw_cmd_intra_surface_copy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2801 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2802 					   struct vmw_sw_context *sw_context,
2803 					   SVGA3dCmdHeader *header)
2804 {
2805 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2806 		container_of(header, typeof(*cmd), header);
2807 
2808 	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2809 		return -EINVAL;
2810 
2811 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2812 				 VMW_RES_DIRTY_SET, user_surface_converter,
2813 				 &cmd->body.surface.sid, NULL);
2814 }
2815 
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)2816 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2817 				struct vmw_sw_context *sw_context,
2818 				void *buf, uint32_t *size)
2819 {
2820 	uint32_t size_remaining = *size;
2821 	uint32_t cmd_id;
2822 
2823 	cmd_id = ((uint32_t *)buf)[0];
2824 	switch (cmd_id) {
2825 	case SVGA_CMD_UPDATE:
2826 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2827 		break;
2828 	case SVGA_CMD_DEFINE_GMRFB:
2829 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2830 		break;
2831 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2832 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2833 		break;
2834 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2835 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2836 		break;
2837 	default:
2838 		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
2839 		return -EINVAL;
2840 	}
2841 
2842 	if (*size > size_remaining) {
2843 		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2844 			       cmd_id);
2845 		return -EINVAL;
2846 	}
2847 
2848 	if (unlikely(!sw_context->kernel)) {
2849 		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
2850 		return -EPERM;
2851 	}
2852 
2853 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2854 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2855 
2856 	return 0;
2857 }
2858 
2859 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2860 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2861 		    false, false, false),
2862 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2863 		    false, false, false),
2864 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2865 		    true, false, false),
2866 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2867 		    true, false, false),
2868 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2869 		    true, false, false),
2870 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2871 		    false, false, false),
2872 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2873 		    false, false, false),
2874 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2875 		    true, false, false),
2876 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2877 		    true, false, false),
2878 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2879 		    true, false, false),
2880 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2881 		    &vmw_cmd_set_render_target_check, true, false, false),
2882 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2883 		    true, false, false),
2884 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2885 		    true, false, false),
2886 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2887 		    true, false, false),
2888 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2889 		    true, false, false),
2890 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2891 		    true, false, false),
2892 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2893 		    true, false, false),
2894 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2895 		    true, false, false),
2896 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2897 		    false, false, false),
2898 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2899 		    true, false, false),
2900 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2901 		    true, false, false),
2902 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2903 		    true, false, false),
2904 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2905 		    true, false, false),
2906 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2907 		    true, false, false),
2908 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2909 		    true, false, false),
2910 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2911 		    true, false, false),
2912 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2913 		    true, false, false),
2914 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2915 		    true, false, false),
2916 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2917 		    true, false, false),
2918 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
2919 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
2920 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2921 		    false, false, false),
2922 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2923 		    false, false, false),
2924 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2925 		    false, false, false),
2926 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2927 		    false, false, false),
2928 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2929 		    false, false, false),
2930 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
2931 		    false, false, false),
2932 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
2933 		    false, false, false),
2934 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2935 		    false, false, false),
2936 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2937 		    false, false, false),
2938 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2939 		    false, false, false),
2940 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2941 		    false, false, false),
2942 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2943 		    false, false, false),
2944 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2945 		    false, false, false),
2946 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2947 		    false, false, true),
2948 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2949 		    false, false, true),
2950 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2951 		    false, false, true),
2952 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2953 		    false, false, true),
2954 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2955 		    false, false, true),
2956 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2957 		    false, false, true),
2958 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2959 		    false, false, true),
2960 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2961 		    false, false, true),
2962 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2963 		    true, false, true),
2964 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2965 		    false, false, true),
2966 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2967 		    true, false, true),
2968 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2969 		    &vmw_cmd_update_gb_surface, true, false, true),
2970 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2971 		    &vmw_cmd_readback_gb_image, true, false, true),
2972 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2973 		    &vmw_cmd_readback_gb_surface, true, false, true),
2974 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2975 		    &vmw_cmd_invalidate_gb_image, true, false, true),
2976 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2977 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
2978 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2979 		    false, false, true),
2980 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2981 		    false, false, true),
2982 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2983 		    false, false, true),
2984 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2985 		    false, false, true),
2986 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2987 		    false, false, true),
2988 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2989 		    false, false, true),
2990 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2991 		    true, false, true),
2992 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2993 		    false, false, true),
2994 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2995 		    false, false, false),
2996 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2997 		    true, false, true),
2998 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2999 		    true, false, true),
3000 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3001 		    true, false, true),
3002 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3003 		    true, false, true),
3004 	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3005 		    true, false, true),
3006 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3007 		    false, false, true),
3008 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3009 		    false, false, true),
3010 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3011 		    false, false, true),
3012 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3013 		    false, false, true),
3014 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3015 		    false, false, true),
3016 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3017 		    false, false, true),
3018 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3019 		    false, false, true),
3020 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3021 		    false, false, true),
3022 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3023 		    false, false, true),
3024 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3025 		    false, false, true),
3026 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3027 		    true, false, true),
3028 	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3029 		    false, false, true),
3030 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3031 		    false, false, true),
3032 	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3033 		    false, false, true),
3034 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3035 		    false, false, true),
3036 
3037 	/* SM commands */
3038 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3039 		    false, false, true),
3040 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3041 		    false, false, true),
3042 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3043 		    false, false, true),
3044 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3045 		    false, false, true),
3046 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3047 		    false, false, true),
3048 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3049 		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3050 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3051 		    &vmw_cmd_dx_set_shader_res, true, false, true),
3052 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3053 		    true, false, true),
3054 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3055 		    true, false, true),
3056 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3057 		    true, false, true),
3058 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3059 		    true, false, true),
3060 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3061 		    true, false, true),
3062 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3063 		    &vmw_cmd_dx_cid_check, true, false, true),
3064 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3065 		    true, false, true),
3066 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3067 		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3068 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3069 		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3070 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3071 		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3072 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3073 		    true, false, true),
3074 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3075 		    &vmw_cmd_dx_cid_check, true, false, true),
3076 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3077 		    &vmw_cmd_dx_cid_check, true, false, true),
3078 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3079 		    true, false, true),
3080 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3081 		    true, false, true),
3082 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3083 		    true, false, true),
3084 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3085 		    &vmw_cmd_dx_cid_check, true, false, true),
3086 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3087 		    true, false, true),
3088 	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3089 		    true, false, true),
3090 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3091 		    true, false, true),
3092 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3093 		    true, false, true),
3094 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3095 		    true, false, true),
3096 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3097 		    true, false, true),
3098 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3099 		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3100 	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3101 		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3102 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3103 		    true, false, true),
3104 	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3105 		    true, false, true),
3106 	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3107 		    &vmw_cmd_dx_check_subresource, true, false, true),
3108 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3109 		    &vmw_cmd_dx_check_subresource, true, false, true),
3110 	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3111 		    &vmw_cmd_dx_check_subresource, true, false, true),
3112 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3113 		    &vmw_cmd_dx_view_define, true, false, true),
3114 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3115 		    &vmw_cmd_dx_view_remove, true, false, true),
3116 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3117 		    &vmw_cmd_dx_view_define, true, false, true),
3118 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3119 		    &vmw_cmd_dx_view_remove, true, false, true),
3120 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3121 		    &vmw_cmd_dx_view_define, true, false, true),
3122 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3123 		    &vmw_cmd_dx_view_remove, true, false, true),
3124 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3125 		    &vmw_cmd_dx_so_define, true, false, true),
3126 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3127 		    &vmw_cmd_dx_cid_check, true, false, true),
3128 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3129 		    &vmw_cmd_dx_so_define, true, false, true),
3130 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3131 		    &vmw_cmd_dx_cid_check, true, false, true),
3132 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3133 		    &vmw_cmd_dx_so_define, true, false, true),
3134 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3135 		    &vmw_cmd_dx_cid_check, true, false, true),
3136 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3137 		    &vmw_cmd_dx_so_define, true, false, true),
3138 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3139 		    &vmw_cmd_dx_cid_check, true, false, true),
3140 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3141 		    &vmw_cmd_dx_so_define, true, false, true),
3142 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3143 		    &vmw_cmd_dx_cid_check, true, false, true),
3144 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3145 		    &vmw_cmd_dx_define_shader, true, false, true),
3146 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3147 		    &vmw_cmd_dx_destroy_shader, true, false, true),
3148 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3149 		    &vmw_cmd_dx_bind_shader, true, false, true),
3150 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3151 		    &vmw_cmd_dx_so_define, true, false, true),
3152 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3153 		    &vmw_cmd_dx_cid_check, true, false, true),
3154 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3155 		    true, false, true),
3156 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3157 		    &vmw_cmd_dx_set_so_targets, true, false, true),
3158 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3159 		    &vmw_cmd_dx_cid_check, true, false, true),
3160 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3161 		    &vmw_cmd_dx_cid_check, true, false, true),
3162 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3163 		    &vmw_cmd_buffer_copy_check, true, false, true),
3164 	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3165 		    &vmw_cmd_pred_copy_check, true, false, true),
3166 	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3167 		    &vmw_cmd_dx_transfer_from_buffer,
3168 		    true, false, true),
3169 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3170 		    true, false, true),
3171 };
3172 
vmw_cmd_describe(const void * buf,u32 * size,char const ** cmd)3173 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3174 {
3175 	u32 cmd_id = ((u32 *) buf)[0];
3176 
3177 	if (cmd_id >= SVGA_CMD_MAX) {
3178 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3179 		const struct vmw_cmd_entry *entry;
3180 
3181 		*size = header->size + sizeof(SVGA3dCmdHeader);
3182 		cmd_id = header->id;
3183 		if (cmd_id >= SVGA_3D_CMD_MAX)
3184 			return false;
3185 
3186 		cmd_id -= SVGA_3D_CMD_BASE;
3187 		entry = &vmw_cmd_entries[cmd_id];
3188 		*cmd = entry->cmd_name;
3189 		return true;
3190 	}
3191 
3192 	switch (cmd_id) {
3193 	case SVGA_CMD_UPDATE:
3194 		*cmd = "SVGA_CMD_UPDATE";
3195 		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3196 		break;
3197 	case SVGA_CMD_DEFINE_GMRFB:
3198 		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3199 		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3200 		break;
3201 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3202 		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3203 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3204 		break;
3205 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3206 		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3207 		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3208 		break;
3209 	default:
3210 		*cmd = "UNKNOWN";
3211 		*size = 0;
3212 		return false;
3213 	}
3214 
3215 	return true;
3216 }
3217 
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3218 static int vmw_cmd_check(struct vmw_private *dev_priv,
3219 			 struct vmw_sw_context *sw_context, void *buf,
3220 			 uint32_t *size)
3221 {
3222 	uint32_t cmd_id;
3223 	uint32_t size_remaining = *size;
3224 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3225 	int ret;
3226 	const struct vmw_cmd_entry *entry;
3227 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3228 
3229 	cmd_id = ((uint32_t *)buf)[0];
3230 	/* Handle any none 3D commands */
3231 	if (unlikely(cmd_id < SVGA_CMD_MAX))
3232 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3233 
3234 
3235 	cmd_id = header->id;
3236 	*size = header->size + sizeof(SVGA3dCmdHeader);
3237 
3238 	cmd_id -= SVGA_3D_CMD_BASE;
3239 	if (unlikely(*size > size_remaining))
3240 		goto out_invalid;
3241 
3242 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3243 		goto out_invalid;
3244 
3245 	entry = &vmw_cmd_entries[cmd_id];
3246 	if (unlikely(!entry->func))
3247 		goto out_invalid;
3248 
3249 	if (unlikely(!entry->user_allow && !sw_context->kernel))
3250 		goto out_privileged;
3251 
3252 	if (unlikely(entry->gb_disable && gb))
3253 		goto out_old;
3254 
3255 	if (unlikely(entry->gb_enable && !gb))
3256 		goto out_new;
3257 
3258 	ret = entry->func(dev_priv, sw_context, header);
3259 	if (unlikely(ret != 0)) {
3260 		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3261 			       cmd_id + SVGA_3D_CMD_BASE, ret);
3262 		return ret;
3263 	}
3264 
3265 	return 0;
3266 out_invalid:
3267 	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3268 		       cmd_id + SVGA_3D_CMD_BASE);
3269 	return -EINVAL;
3270 out_privileged:
3271 	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3272 		       cmd_id + SVGA_3D_CMD_BASE);
3273 	return -EPERM;
3274 out_old:
3275 	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3276 		       cmd_id + SVGA_3D_CMD_BASE);
3277 	return -EINVAL;
3278 out_new:
3279 	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3280 		       cmd_id + SVGA_3D_CMD_BASE);
3281 	return -EINVAL;
3282 }
3283 
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3284 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3285 			     struct vmw_sw_context *sw_context, void *buf,
3286 			     uint32_t size)
3287 {
3288 	int32_t cur_size = size;
3289 	int ret;
3290 
3291 	sw_context->buf_start = buf;
3292 
3293 	while (cur_size > 0) {
3294 		size = cur_size;
3295 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3296 		if (unlikely(ret != 0))
3297 			return ret;
3298 		buf = (void *)((unsigned long) buf + size);
3299 		cur_size -= size;
3300 	}
3301 
3302 	if (unlikely(cur_size != 0)) {
3303 		VMW_DEBUG_USER("Command verifier out of sync.\n");
3304 		return -EINVAL;
3305 	}
3306 
3307 	return 0;
3308 }
3309 
vmw_free_relocations(struct vmw_sw_context * sw_context)3310 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3311 {
3312 	/* Memory is validation context memory, so no need to free it */
3313 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3314 }
3315 
vmw_apply_relocations(struct vmw_sw_context * sw_context)3316 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3317 {
3318 	struct vmw_relocation *reloc;
3319 	struct ttm_buffer_object *bo;
3320 
3321 	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3322 		bo = &reloc->vbo->base;
3323 		switch (bo->mem.mem_type) {
3324 		case TTM_PL_VRAM:
3325 			reloc->location->offset += bo->offset;
3326 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3327 			break;
3328 		case VMW_PL_GMR:
3329 			reloc->location->gmrId = bo->mem.start;
3330 			break;
3331 		case VMW_PL_MOB:
3332 			*reloc->mob_loc = bo->mem.start;
3333 			break;
3334 		default:
3335 			BUG();
3336 		}
3337 	}
3338 	vmw_free_relocations(sw_context);
3339 }
3340 
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3341 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3342 				 uint32_t size)
3343 {
3344 	if (likely(sw_context->cmd_bounce_size >= size))
3345 		return 0;
3346 
3347 	if (sw_context->cmd_bounce_size == 0)
3348 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3349 
3350 	while (sw_context->cmd_bounce_size < size) {
3351 		sw_context->cmd_bounce_size =
3352 			PAGE_ALIGN(sw_context->cmd_bounce_size +
3353 				   (sw_context->cmd_bounce_size >> 1));
3354 	}
3355 
3356 	vfree(sw_context->cmd_bounce);
3357 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3358 
3359 	if (sw_context->cmd_bounce == NULL) {
3360 		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3361 		sw_context->cmd_bounce_size = 0;
3362 		return -ENOMEM;
3363 	}
3364 
3365 	return 0;
3366 }
3367 
3368 /**
3369  * vmw_execbuf_fence_commands - create and submit a command stream fence
3370  *
3371  * Creates a fence object and submits a command stream marker.
3372  * If this fails for some reason, We sync the fifo and return NULL.
3373  * It is then safe to fence buffers with a NULL pointer.
3374  *
3375  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3376  * userspace handle if @p_handle is not NULL, otherwise not.
3377  */
3378 
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3379 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3380 			       struct vmw_private *dev_priv,
3381 			       struct vmw_fence_obj **p_fence,
3382 			       uint32_t *p_handle)
3383 {
3384 	uint32_t sequence;
3385 	int ret;
3386 	bool synced = false;
3387 
3388 	/* p_handle implies file_priv. */
3389 	BUG_ON(p_handle != NULL && file_priv == NULL);
3390 
3391 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3392 	if (unlikely(ret != 0)) {
3393 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3394 		synced = true;
3395 	}
3396 
3397 	if (p_handle != NULL)
3398 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3399 					    sequence, p_fence, p_handle);
3400 	else
3401 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3402 
3403 	if (unlikely(ret != 0 && !synced)) {
3404 		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3405 					 false, VMW_FENCE_WAIT_TIMEOUT);
3406 		*p_fence = NULL;
3407 	}
3408 
3409 	return ret;
3410 }
3411 
3412 /**
3413  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3414  *
3415  * @dev_priv: Pointer to a vmw_private struct.
3416  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3417  * @ret: Return value from fence object creation.
3418  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3419  * the information should be copied.
3420  * @fence: Pointer to the fenc object.
3421  * @fence_handle: User-space fence handle.
3422  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3423  * @sync_file:  Only used to clean up in case of an error in this function.
3424  *
3425  * This function copies fence information to user-space. If copying fails, the
3426  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3427  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3428  * will hopefully be detected.
3429  *
3430  * Also if copying fails, user-space will be unable to signal the fence object
3431  * so we wait for it immediately, and then unreference the user-space reference.
3432  */
3433 void
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle,int32_t out_fence_fd,struct sync_file * sync_file)3434 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3435 			    struct vmw_fpriv *vmw_fp, int ret,
3436 			    struct drm_vmw_fence_rep __user *user_fence_rep,
3437 			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3438 			    int32_t out_fence_fd, struct sync_file *sync_file)
3439 {
3440 	struct drm_vmw_fence_rep fence_rep;
3441 
3442 	if (user_fence_rep == NULL)
3443 		return;
3444 
3445 	memset(&fence_rep, 0, sizeof(fence_rep));
3446 
3447 	fence_rep.error = ret;
3448 	fence_rep.fd = out_fence_fd;
3449 	if (ret == 0) {
3450 		BUG_ON(fence == NULL);
3451 
3452 		fence_rep.handle = fence_handle;
3453 		fence_rep.seqno = fence->base.seqno;
3454 		spin_lock(&dev_priv->fence_lock);
3455 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3456 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3457 		spin_unlock(&dev_priv->fence_lock);
3458 	}
3459 
3460 	/*
3461 	 * copy_to_user errors will be detected by user space not seeing
3462 	 * fence_rep::error filled in. Typically user-space would have pre-set
3463 	 * that member to -EFAULT.
3464 	 */
3465 	ret = copy_to_user(user_fence_rep, &fence_rep,
3466 			   sizeof(fence_rep));
3467 
3468 	/*
3469 	 * User-space lost the fence object. We need to sync and unreference the
3470 	 * handle.
3471 	 */
3472 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3473 #ifdef __NetBSD__
3474 		if (fd_getfile(fence_rep.fd))
3475 			(void)fd_close(fence_rep.fd);
3476 #else
3477 		if (sync_file)
3478 			fput(sync_file->file);
3479 
3480 		if (fence_rep.fd != -1) {
3481 			put_unused_fd(fence_rep.fd);
3482 			fence_rep.fd = -1;
3483 		}
3484 #endif
3485 
3486 		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3487 					  TTM_REF_USAGE);
3488 		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3489 		(void) vmw_fence_obj_wait(fence, false, false,
3490 					  VMW_FENCE_WAIT_TIMEOUT);
3491 	}
3492 }
3493 
3494 /**
3495  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3496  *
3497  * @dev_priv: Pointer to a device private structure.
3498  * @kernel_commands: Pointer to the unpatched command batch.
3499  * @command_size: Size of the unpatched command batch.
3500  * @sw_context: Structure holding the relocation lists.
3501  *
3502  * Side effects: If this function returns 0, then the command batch pointed to
3503  * by @kernel_commands will have been modified.
3504  */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3505 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3506 				   void *kernel_commands, u32 command_size,
3507 				   struct vmw_sw_context *sw_context)
3508 {
3509 	void *cmd;
3510 
3511 	if (sw_context->dx_ctx_node)
3512 		cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3513 					  sw_context->dx_ctx_node->ctx->id);
3514 	else
3515 		cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3516 
3517 	if (!cmd)
3518 		return -ENOMEM;
3519 
3520 	vmw_apply_relocations(sw_context);
3521 	memcpy(cmd, kernel_commands, command_size);
3522 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3523 	vmw_resource_relocations_free(&sw_context->res_relocations);
3524 	vmw_fifo_commit(dev_priv, command_size);
3525 
3526 	return 0;
3527 }
3528 
3529 /**
3530  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3531  * command buffer manager.
3532  *
3533  * @dev_priv: Pointer to a device private structure.
3534  * @header: Opaque handle to the command buffer allocation.
3535  * @command_size: Size of the unpatched command batch.
3536  * @sw_context: Structure holding the relocation lists.
3537  *
3538  * Side effects: If this function returns 0, then the command buffer represented
3539  * by @header will have been modified.
3540  */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3541 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3542 				     struct vmw_cmdbuf_header *header,
3543 				     u32 command_size,
3544 				     struct vmw_sw_context *sw_context)
3545 {
3546 	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3547 		  SVGA3D_INVALID_ID);
3548 	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3549 				       header);
3550 
3551 	vmw_apply_relocations(sw_context);
3552 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3553 	vmw_resource_relocations_free(&sw_context->res_relocations);
3554 	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3555 
3556 	return 0;
3557 }
3558 
3559 /**
3560  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3561  * submission using a command buffer.
3562  *
3563  * @dev_priv: Pointer to a device private structure.
3564  * @user_commands: User-space pointer to the commands to be submitted.
3565  * @command_size: Size of the unpatched command batch.
3566  * @header: Out parameter returning the opaque pointer to the command buffer.
3567  *
3568  * This function checks whether we can use the command buffer manager for
3569  * submission and if so, creates a command buffer of suitable size and copies
3570  * the user data into that buffer.
3571  *
3572  * On successful return, the function returns a pointer to the data in the
3573  * command buffer and *@header is set to non-NULL.
3574  *
3575  * If command buffers could not be used, the function will return the value of
3576  * @kernel_commands on function call. That value may be NULL. In that case, the
3577  * value of *@header will be set to NULL.
3578  *
3579  * If an error is encountered, the function will return a pointer error value.
3580  * If the function is interrupted by a signal while sleeping, it will return
3581  * -ERESTARTSYS casted to a pointer error value.
3582  */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)3583 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3584 				void __user *user_commands,
3585 				void *kernel_commands, u32 command_size,
3586 				struct vmw_cmdbuf_header **header)
3587 {
3588 	size_t cmdbuf_size;
3589 	int ret;
3590 
3591 	*header = NULL;
3592 	if (command_size > SVGA_CB_MAX_SIZE) {
3593 		VMW_DEBUG_USER("Command buffer is too large.\n");
3594 		return ERR_PTR(-EINVAL);
3595 	}
3596 
3597 	if (!dev_priv->cman || kernel_commands)
3598 		return kernel_commands;
3599 
3600 	/* If possible, add a little space for fencing. */
3601 	cmdbuf_size = command_size + 512;
3602 	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3603 	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3604 					   header);
3605 	if (IS_ERR(kernel_commands))
3606 		return kernel_commands;
3607 
3608 	ret = copy_from_user(kernel_commands, user_commands, command_size);
3609 	if (ret) {
3610 		VMW_DEBUG_USER("Failed copying commands.\n");
3611 		vmw_cmdbuf_header_free(*header);
3612 		*header = NULL;
3613 		return ERR_PTR(-EFAULT);
3614 	}
3615 
3616 	return kernel_commands;
3617 }
3618 
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)3619 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3620 				   struct vmw_sw_context *sw_context,
3621 				   uint32_t handle)
3622 {
3623 	struct vmw_resource *res;
3624 	int ret;
3625 	unsigned int size;
3626 
3627 	if (handle == SVGA3D_INVALID_ID)
3628 		return 0;
3629 
3630 	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3631 	ret = vmw_validation_preload_res(sw_context->ctx, size);
3632 	if (ret)
3633 		return ret;
3634 
3635 	res = vmw_user_resource_noref_lookup_handle
3636 		(dev_priv, sw_context->fp->tfile, handle,
3637 		 user_context_converter);
3638 	if (IS_ERR(res)) {
3639 		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3640 			       (unsigned int) handle);
3641 		return PTR_ERR(res);
3642 	}
3643 
3644 	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
3645 	if (unlikely(ret != 0))
3646 		return ret;
3647 
3648 	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3649 	sw_context->man = vmw_context_res_man(res);
3650 
3651 	return 0;
3652 }
3653 
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence,uint32_t flags)3654 int vmw_execbuf_process(struct drm_file *file_priv,
3655 			struct vmw_private *dev_priv,
3656 			void __user *user_commands, void *kernel_commands,
3657 			uint32_t command_size, uint64_t throttle_us,
3658 			uint32_t dx_context_handle,
3659 			struct drm_vmw_fence_rep __user *user_fence_rep,
3660 			struct vmw_fence_obj **out_fence, uint32_t flags)
3661 {
3662 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
3663 	struct vmw_fence_obj *fence = NULL;
3664 	struct vmw_cmdbuf_header *header;
3665 	uint32_t handle = 0;
3666 	int ret;
3667 #ifdef __NetBSD__
3668 	int out_fence_fd = -1;
3669 	struct file *out_fence_fp = NULL;
3670 #else
3671 	int32_t out_fence_fd = -1;
3672 #endif
3673 	struct sync_file *sync_file = NULL;
3674 	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3675 
3676 	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3677 
3678 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3679 #ifdef __NetBSD__
3680 		ret = -fd_allocfile(&out_fence_fp, &out_fence_fd);
3681 		if (ret)
3682 			return ret;
3683 #else
3684 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3685 		if (out_fence_fd < 0) {
3686 			VMW_DEBUG_USER("Failed to get a fence fd.\n");
3687 			return out_fence_fd;
3688 		}
3689 #endif
3690 	}
3691 
3692 	if (throttle_us) {
3693 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3694 				   throttle_us);
3695 
3696 		if (ret)
3697 			goto out_free_fence_fd;
3698 	}
3699 
3700 	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3701 					     kernel_commands, command_size,
3702 					     &header);
3703 	if (IS_ERR(kernel_commands)) {
3704 		ret = PTR_ERR(kernel_commands);
3705 		goto out_free_fence_fd;
3706 	}
3707 
3708 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3709 	if (ret) {
3710 		ret = -ERESTARTSYS;
3711 		goto out_free_header;
3712 	}
3713 
3714 	sw_context->kernel = false;
3715 	if (kernel_commands == NULL) {
3716 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
3717 		if (unlikely(ret != 0))
3718 			goto out_unlock;
3719 
3720 		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3721 				     command_size);
3722 		if (unlikely(ret != 0)) {
3723 			ret = -EFAULT;
3724 			VMW_DEBUG_USER("Failed copying commands.\n");
3725 			goto out_unlock;
3726 		}
3727 
3728 		kernel_commands = sw_context->cmd_bounce;
3729 	} else if (!header) {
3730 		sw_context->kernel = true;
3731 	}
3732 
3733 	sw_context->fp = vmw_fpriv(file_priv);
3734 	INIT_LIST_HEAD(&sw_context->ctx_list);
3735 	sw_context->cur_query_bo = dev_priv->pinned_bo;
3736 	sw_context->last_query_ctx = NULL;
3737 	sw_context->needs_post_query_barrier = false;
3738 	sw_context->dx_ctx_node = NULL;
3739 	sw_context->dx_query_mob = NULL;
3740 	sw_context->dx_query_ctx = NULL;
3741 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3742 	INIT_LIST_HEAD(&sw_context->res_relocations);
3743 	INIT_LIST_HEAD(&sw_context->bo_relocations);
3744 
3745 	if (sw_context->staged_bindings)
3746 		vmw_binding_state_reset(sw_context->staged_bindings);
3747 
3748 	if (!sw_context->res_ht_initialized) {
3749 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3750 		if (unlikely(ret != 0))
3751 			goto out_unlock;
3752 
3753 		sw_context->res_ht_initialized = true;
3754 	}
3755 
3756 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3757 	sw_context->ctx = &val_ctx;
3758 	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3759 	if (unlikely(ret != 0))
3760 		goto out_err_nores;
3761 
3762 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3763 				command_size);
3764 	if (unlikely(ret != 0))
3765 		goto out_err_nores;
3766 
3767 	ret = vmw_resources_reserve(sw_context);
3768 	if (unlikely(ret != 0))
3769 		goto out_err_nores;
3770 
3771 	ret = vmw_validation_bo_reserve(&val_ctx, true);
3772 	if (unlikely(ret != 0))
3773 		goto out_err_nores;
3774 
3775 	ret = vmw_validation_bo_validate(&val_ctx, true);
3776 	if (unlikely(ret != 0))
3777 		goto out_err;
3778 
3779 	ret = vmw_validation_res_validate(&val_ctx, true);
3780 	if (unlikely(ret != 0))
3781 		goto out_err;
3782 
3783 	vmw_validation_drop_ht(&val_ctx);
3784 
3785 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3786 	if (unlikely(ret != 0)) {
3787 		ret = -ERESTARTSYS;
3788 		goto out_err;
3789 	}
3790 
3791 	if (dev_priv->has_mob) {
3792 		ret = vmw_rebind_contexts(sw_context);
3793 		if (unlikely(ret != 0))
3794 			goto out_unlock_binding;
3795 	}
3796 
3797 	if (!header) {
3798 		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3799 					      command_size, sw_context);
3800 	} else {
3801 		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3802 						sw_context);
3803 		header = NULL;
3804 	}
3805 	mutex_unlock(&dev_priv->binding_mutex);
3806 	if (ret)
3807 		goto out_err;
3808 
3809 	vmw_query_bo_switch_commit(dev_priv, sw_context);
3810 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
3811 					 (user_fence_rep) ? &handle : NULL);
3812 	/*
3813 	 * This error is harmless, because if fence submission fails,
3814 	 * vmw_fifo_send_fence will sync. The error will be propagated to
3815 	 * user-space in @fence_rep
3816 	 */
3817 	if (ret != 0)
3818 		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3819 
3820 	vmw_execbuf_bindings_commit(sw_context, false);
3821 	vmw_bind_dx_query_mob(sw_context);
3822 	vmw_validation_res_unreserve(&val_ctx, false);
3823 
3824 	vmw_validation_bo_fence(sw_context->ctx, fence);
3825 
3826 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3827 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
3828 
3829 	/*
3830 	 * If anything fails here, give up trying to export the fence and do a
3831 	 * sync since the user mode will not be able to sync the fence itself.
3832 	 * This ensures we are still functionally correct.
3833 	 */
3834 	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3835 
3836 #ifdef __NetBSD__
3837 		sync_file = sync_file_create(&fence->base, out_fence_fp);
3838 #else
3839 		sync_file = sync_file_create(&fence->base);
3840 #endif
3841 		if (!sync_file) {
3842 			VMW_DEBUG_USER("Sync file create failed for fence\n");
3843 #ifdef __NetBSD__
3844 			fd_abort(curproc, out_fence_fp, out_fence_fd);
3845 			out_fence_fp = NULL;
3846 #else
3847 			put_unused_fd(out_fence_fd);
3848 #endif
3849 			out_fence_fd = -1;
3850 
3851 			(void) vmw_fence_obj_wait(fence, false, false,
3852 						  VMW_FENCE_WAIT_TIMEOUT);
3853 		} else {
3854 			/* Link the fence with the FD created earlier */
3855 #ifdef __NetBSD__
3856 			fd_affix(curproc, out_fence_fp, out_fence_fd);
3857 #else
3858 			fd_install(out_fence_fd, sync_file->file);
3859 #endif
3860 		}
3861 	}
3862 
3863 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3864 				    user_fence_rep, fence, handle, out_fence_fd,
3865 				    sync_file);
3866 
3867 	/* Don't unreference when handing fence out */
3868 	if (unlikely(out_fence != NULL)) {
3869 		*out_fence = fence;
3870 		fence = NULL;
3871 	} else if (likely(fence != NULL)) {
3872 		vmw_fence_obj_unreference(&fence);
3873 	}
3874 
3875 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
3876 	mutex_unlock(&dev_priv->cmdbuf_mutex);
3877 
3878 	/*
3879 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3880 	 * in resource destruction paths.
3881 	 */
3882 	vmw_validation_unref_lists(&val_ctx);
3883 
3884 	return 0;
3885 
3886 out_unlock_binding:
3887 	mutex_unlock(&dev_priv->binding_mutex);
3888 out_err:
3889 	vmw_validation_bo_backoff(&val_ctx);
3890 out_err_nores:
3891 	vmw_execbuf_bindings_commit(sw_context, true);
3892 	vmw_validation_res_unreserve(&val_ctx, true);
3893 	vmw_resource_relocations_free(&sw_context->res_relocations);
3894 	vmw_free_relocations(sw_context);
3895 	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3896 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3897 out_unlock:
3898 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
3899 	vmw_validation_drop_ht(&val_ctx);
3900 	WARN_ON(!list_empty(&sw_context->ctx_list));
3901 	mutex_unlock(&dev_priv->cmdbuf_mutex);
3902 
3903 	/*
3904 	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3905 	 * in resource destruction paths.
3906 	 */
3907 	vmw_validation_unref_lists(&val_ctx);
3908 out_free_header:
3909 	if (header)
3910 		vmw_cmdbuf_header_free(header);
3911 out_free_fence_fd:
3912 	if (out_fence_fd >= 0)
3913 #ifdef __NetBSD__
3914 		fd_abort(curproc, out_fence_fp, out_fence_fd);
3915 #else
3916 		put_unused_fd(out_fence_fd);
3917 #endif
3918 
3919 	return ret;
3920 }
3921 
3922 /**
3923  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3924  *
3925  * @dev_priv: The device private structure.
3926  *
3927  * This function is called to idle the fifo and unpin the query buffer if the
3928  * normal way to do this hits an error, which should typically be extremely
3929  * rare.
3930  */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)3931 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3932 {
3933 	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
3934 
3935 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
3936 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3937 	if (dev_priv->dummy_query_bo_pinned) {
3938 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3939 		dev_priv->dummy_query_bo_pinned = false;
3940 	}
3941 }
3942 
3943 
3944 /**
3945  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3946  * bo.
3947  *
3948  * @dev_priv: The device private structure.
3949  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3950  * query barrier that flushes all queries touching the current buffer pointed to
3951  * by @dev_priv->pinned_bo
3952  *
3953  * This function should be used to unpin the pinned query bo, or as a query
3954  * barrier when we need to make sure that all queries have finished before the
3955  * next fifo command. (For example on hardware context destructions where the
3956  * hardware may otherwise leak unfinished queries).
3957  *
3958  * This function does not return any failure codes, but make attempts to do safe
3959  * unpinning in case of errors.
3960  *
3961  * The function will synchronize on the previous query barrier, and will thus
3962  * not finish until that barrier has executed.
3963  *
3964  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3965  * calling this function.
3966  */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)3967 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3968 				     struct vmw_fence_obj *fence)
3969 {
3970 	int ret = 0;
3971 	struct vmw_fence_obj *lfence = NULL;
3972 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3973 
3974 	if (dev_priv->pinned_bo == NULL)
3975 		goto out_unlock;
3976 
3977 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3978 				    false);
3979 	if (ret)
3980 		goto out_no_reserve;
3981 
3982 	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3983 				    false);
3984 	if (ret)
3985 		goto out_no_reserve;
3986 
3987 	ret = vmw_validation_bo_reserve(&val_ctx, false);
3988 	if (ret)
3989 		goto out_no_reserve;
3990 
3991 	if (dev_priv->query_cid_valid) {
3992 		BUG_ON(fence != NULL);
3993 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3994 		if (ret)
3995 			goto out_no_emit;
3996 		dev_priv->query_cid_valid = false;
3997 	}
3998 
3999 	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4000 	if (dev_priv->dummy_query_bo_pinned) {
4001 		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4002 		dev_priv->dummy_query_bo_pinned = false;
4003 	}
4004 	if (fence == NULL) {
4005 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4006 						  NULL);
4007 		fence = lfence;
4008 	}
4009 	vmw_validation_bo_fence(&val_ctx, fence);
4010 	if (lfence != NULL)
4011 		vmw_fence_obj_unreference(&lfence);
4012 
4013 	vmw_validation_unref_lists(&val_ctx);
4014 	vmw_bo_unreference(&dev_priv->pinned_bo);
4015 
4016 out_unlock:
4017 	return;
4018 out_no_emit:
4019 	vmw_validation_bo_backoff(&val_ctx);
4020 out_no_reserve:
4021 	vmw_validation_unref_lists(&val_ctx);
4022 	vmw_execbuf_unpin_panic(dev_priv);
4023 	vmw_bo_unreference(&dev_priv->pinned_bo);
4024 }
4025 
4026 /**
4027  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4028  *
4029  * @dev_priv: The device private structure.
4030  *
4031  * This function should be used to unpin the pinned query bo, or as a query
4032  * barrier when we need to make sure that all queries have finished before the
4033  * next fifo command. (For example on hardware context destructions where the
4034  * hardware may otherwise leak unfinished queries).
4035  *
4036  * This function does not return any failure codes, but make attempts to do safe
4037  * unpinning in case of errors.
4038  *
4039  * The function will synchronize on the previous query barrier, and will thus
4040  * not finish until that barrier has executed.
4041  */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4042 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4043 {
4044 	mutex_lock(&dev_priv->cmdbuf_mutex);
4045 	if (dev_priv->query_cid_valid)
4046 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4047 	mutex_unlock(&dev_priv->cmdbuf_mutex);
4048 }
4049 
vmw_execbuf_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)4050 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4051 		      struct drm_file *file_priv)
4052 {
4053 	struct vmw_private *dev_priv = vmw_priv(dev);
4054 	struct drm_vmw_execbuf_arg *arg = data;
4055 	int ret;
4056 	struct dma_fence *in_fence = NULL;
4057 
4058 	/*
4059 	 * Extend the ioctl argument while maintaining backwards compatibility:
4060 	 * We take different code paths depending on the value of arg->version.
4061 	 *
4062 	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4063 	 */
4064 	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4065 		     arg->version == 0)) {
4066 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4067 		return -EINVAL;
4068 	}
4069 
4070 	switch (arg->version) {
4071 	case 1:
4072 		/* For v1 core DRM have extended + zeropadded the data */
4073 		arg->context_handle = (uint32_t) -1;
4074 		break;
4075 	case 2:
4076 	default:
4077 		/* For v2 and later core DRM would have correctly copied it */
4078 		break;
4079 	}
4080 
4081 	/* If imported a fence FD from elsewhere, then wait on it */
4082 	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4083 		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4084 
4085 		if (!in_fence) {
4086 			VMW_DEBUG_USER("Cannot get imported fence\n");
4087 			return -EINVAL;
4088 		}
4089 
4090 		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4091 		if (ret)
4092 			goto out;
4093 	}
4094 
4095 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4096 	if (unlikely(ret != 0))
4097 		return ret;
4098 
4099 	ret = vmw_execbuf_process(file_priv, dev_priv,
4100 				  (void __user *)(unsigned long)arg->commands,
4101 				  NULL, arg->command_size, arg->throttle_us,
4102 				  arg->context_handle,
4103 				  (void __user *)(unsigned long)arg->fence_rep,
4104 				  NULL, arg->flags);
4105 
4106 	ttm_read_unlock(&dev_priv->reservation_sem);
4107 	if (unlikely(ret != 0))
4108 		goto out;
4109 
4110 	vmw_kms_cursor_post_execbuf(dev_priv);
4111 
4112 out:
4113 	if (in_fence)
4114 		dma_fence_put(in_fence);
4115 	return ret;
4116 }
4117