1 /*	$NetBSD: vmwgfx_validation.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5  *
6  * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  **************************************************************************/
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_validation.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $");
32 
33 #include <linux/slab.h>
34 #include "vmwgfx_validation.h"
35 #include "vmwgfx_drv.h"
36 
37 /**
38  * struct vmw_validation_bo_node - Buffer object validation metadata.
39  * @base: Metadata used for TTM reservation- and validation.
40  * @hash: A hash entry used for the duplicate detection hash table.
41  * @coherent_count: If switching backup buffers, number of new coherent
42  * resources that will have this buffer as a backup buffer.
43  * @as_mob: Validate as mob.
44  * @cpu_blit: Validate for cpu blit access.
45  *
46  * Bit fields are used since these structures are allocated and freed in
47  * large numbers and space conservation is desired.
48  */
49 struct vmw_validation_bo_node {
50 	struct ttm_validate_buffer base;
51 	struct drm_hash_item hash;
52 	unsigned int coherent_count;
53 	u32 as_mob : 1;
54 	u32 cpu_blit : 1;
55 };
56 
57 /**
58  * struct vmw_validation_res_node - Resource validation metadata.
59  * @head: List head for the resource validation list.
60  * @hash: A hash entry used for the duplicate detection hash table.
61  * @res: Reference counted resource pointer.
62  * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
63  * to a resource.
64  * @new_backup_offset: Offset into the new backup mob for resources that can
65  * share MOBs.
66  * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
67  * the command stream provides a mob bind operation.
68  * @switching_backup: The validation process is switching backup MOB.
69  * @first_usage: True iff the resource has been seen only once in the current
70  * validation batch.
71  * @reserved: Whether the resource is currently reserved by this process.
72  * @private: Optionally additional memory for caller-private data.
73  *
74  * Bit fields are used since these structures are allocated and freed in
75  * large numbers and space conservation is desired.
76  */
77 struct vmw_validation_res_node {
78 	struct list_head head;
79 	struct drm_hash_item hash;
80 	struct vmw_resource *res;
81 	struct vmw_buffer_object *new_backup;
82 	unsigned long new_backup_offset;
83 	u32 no_buffer_needed : 1;
84 	u32 switching_backup : 1;
85 	u32 first_usage : 1;
86 	u32 reserved : 1;
87 	u32 dirty : 1;
88 	u32 dirty_set : 1;
89 	unsigned long private[0];
90 };
91 
92 /**
93  * vmw_validation_mem_alloc - Allocate kernel memory from the validation
94  * context based allocator
95  * @ctx: The validation context
96  * @size: The number of bytes to allocated.
97  *
98  * The memory allocated may not exceed PAGE_SIZE, and the returned
99  * address is aligned to sizeof(long). All memory allocated this way is
100  * reclaimed after validation when calling any of the exported functions:
101  * vmw_validation_unref_lists()
102  * vmw_validation_revert()
103  * vmw_validation_done()
104  *
105  * Return: Pointer to the allocated memory on success. NULL on failure.
106  */
vmw_validation_mem_alloc(struct vmw_validation_context * ctx,unsigned int size)107 void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
108 			       unsigned int size)
109 {
110 	void *addr;
111 
112 	size = vmw_validation_align(size);
113 	if (size > PAGE_SIZE)
114 		return NULL;
115 
116 	if (ctx->mem_size_left < size) {
117 		struct page *page;
118 
119 		if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
120 			int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
121 
122 			if (ret)
123 				return NULL;
124 
125 			ctx->vm_size_left += ctx->vm->gran;
126 			ctx->total_mem += ctx->vm->gran;
127 		}
128 
129 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
130 		if (!page)
131 			return NULL;
132 
133 		if (ctx->vm)
134 			ctx->vm_size_left -= PAGE_SIZE;
135 
136 		list_add_tail(&page->lru, &ctx->page_list);
137 		ctx->page_address = page_address(page);
138 		ctx->mem_size_left = PAGE_SIZE;
139 	}
140 
141 	addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
142 	ctx->mem_size_left -= size;
143 
144 	return addr;
145 }
146 
147 /**
148  * vmw_validation_mem_free - Free all memory allocated using
149  * vmw_validation_mem_alloc()
150  * @ctx: The validation context
151  *
152  * All memory previously allocated for this context using
153  * vmw_validation_mem_alloc() is freed.
154  */
vmw_validation_mem_free(struct vmw_validation_context * ctx)155 static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
156 {
157 	struct page *entry, *next;
158 
159 	list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
160 		list_del_init(&entry->lru);
161 		__free_page(entry);
162 	}
163 
164 	ctx->mem_size_left = 0;
165 	if (ctx->vm && ctx->total_mem) {
166 		ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
167 		ctx->total_mem = 0;
168 		ctx->vm_size_left = 0;
169 	}
170 }
171 
172 /**
173  * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
174  * validation context's lists.
175  * @ctx: The validation context to search.
176  * @vbo: The buffer object to search for.
177  *
178  * Return: Pointer to the struct vmw_validation_bo_node referencing the
179  * duplicate, or NULL if none found.
180  */
181 static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context * ctx,struct vmw_buffer_object * vbo)182 vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
183 			   struct vmw_buffer_object *vbo)
184 {
185 	struct  vmw_validation_bo_node *bo_node = NULL;
186 
187 	if (!ctx->merge_dups)
188 		return NULL;
189 
190 	if (ctx->ht) {
191 		struct drm_hash_item *hash;
192 
193 		if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
194 			bo_node = container_of(hash, typeof(*bo_node), hash);
195 	} else {
196 		struct  vmw_validation_bo_node *entry;
197 
198 		list_for_each_entry(entry, &ctx->bo_list, base.head) {
199 			if (entry->base.bo == &vbo->base) {
200 				bo_node = entry;
201 				break;
202 			}
203 		}
204 	}
205 
206 	return bo_node;
207 }
208 
209 /**
210  * vmw_validation_find_res_dup - Find a duplicate resource entry in the
211  * validation context's lists.
212  * @ctx: The validation context to search.
213  * @vbo: The buffer object to search for.
214  *
215  * Return: Pointer to the struct vmw_validation_bo_node referencing the
216  * duplicate, or NULL if none found.
217  */
218 static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context * ctx,struct vmw_resource * res)219 vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
220 			    struct vmw_resource *res)
221 {
222 	struct  vmw_validation_res_node *res_node = NULL;
223 
224 	if (!ctx->merge_dups)
225 		return NULL;
226 
227 	if (ctx->ht) {
228 		struct drm_hash_item *hash;
229 
230 		if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
231 			res_node = container_of(hash, typeof(*res_node), hash);
232 	} else {
233 		struct  vmw_validation_res_node *entry;
234 
235 		list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
236 			if (entry->res == res) {
237 				res_node = entry;
238 				goto out;
239 			}
240 		}
241 
242 		list_for_each_entry(entry, &ctx->resource_list, head) {
243 			if (entry->res == res) {
244 				res_node = entry;
245 				break;
246 			}
247 		}
248 
249 	}
250 out:
251 	return res_node;
252 }
253 
254 /**
255  * vmw_validation_add_bo - Add a buffer object to the validation context.
256  * @ctx: The validation context.
257  * @vbo: The buffer object.
258  * @as_mob: Validate as mob, otherwise suitable for GMR operations.
259  * @cpu_blit: Validate in a page-mappable location.
260  *
261  * Return: Zero on success, negative error code otherwise.
262  */
vmw_validation_add_bo(struct vmw_validation_context * ctx,struct vmw_buffer_object * vbo,bool as_mob,bool cpu_blit)263 int vmw_validation_add_bo(struct vmw_validation_context *ctx,
264 			  struct vmw_buffer_object *vbo,
265 			  bool as_mob,
266 			  bool cpu_blit)
267 {
268 	struct vmw_validation_bo_node *bo_node;
269 
270 	bo_node = vmw_validation_find_bo_dup(ctx, vbo);
271 	if (bo_node) {
272 		if (bo_node->as_mob != as_mob ||
273 		    bo_node->cpu_blit != cpu_blit) {
274 			DRM_ERROR("Inconsistent buffer usage.\n");
275 			return -EINVAL;
276 		}
277 	} else {
278 		struct ttm_validate_buffer *val_buf;
279 		int ret;
280 
281 		bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
282 		if (!bo_node)
283 			return -ENOMEM;
284 
285 		if (ctx->ht) {
286 			bo_node->hash.key = (unsigned long) vbo;
287 			ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
288 			if (ret) {
289 				DRM_ERROR("Failed to initialize a buffer "
290 					  "validation entry.\n");
291 				return ret;
292 			}
293 		}
294 		val_buf = &bo_node->base;
295 		val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
296 		if (!val_buf->bo)
297 			return -ESRCH;
298 		val_buf->num_shared = 0;
299 		list_add_tail(&val_buf->head, &ctx->bo_list);
300 		bo_node->as_mob = as_mob;
301 		bo_node->cpu_blit = cpu_blit;
302 	}
303 
304 	return 0;
305 }
306 
307 /**
308  * vmw_validation_add_resource - Add a resource to the validation context.
309  * @ctx: The validation context.
310  * @res: The resource.
311  * @priv_size: Size of private, additional metadata.
312  * @dirty: Whether to change dirty status.
313  * @p_node: Output pointer of additional metadata address.
314  * @first_usage: Whether this was the first time this resource was seen.
315  *
316  * Return: Zero on success, negative error code otherwise.
317  */
vmw_validation_add_resource(struct vmw_validation_context * ctx,struct vmw_resource * res,size_t priv_size,u32 dirty,void ** p_node,bool * first_usage)318 int vmw_validation_add_resource(struct vmw_validation_context *ctx,
319 				struct vmw_resource *res,
320 				size_t priv_size,
321 				u32 dirty,
322 				void **p_node,
323 				bool *first_usage)
324 {
325 	struct vmw_validation_res_node *node;
326 	int ret;
327 
328 	node = vmw_validation_find_res_dup(ctx, res);
329 	if (node) {
330 		node->first_usage = 0;
331 		goto out_fill;
332 	}
333 
334 	node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
335 	if (!node) {
336 		VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
337 		return -ENOMEM;
338 	}
339 
340 	if (ctx->ht) {
341 		node->hash.key = (unsigned long) res;
342 		ret = drm_ht_insert_item(ctx->ht, &node->hash);
343 		if (ret) {
344 			DRM_ERROR("Failed to initialize a resource validation "
345 				  "entry.\n");
346 			return ret;
347 		}
348 	}
349 	node->res = vmw_resource_reference_unless_doomed(res);
350 	if (!node->res)
351 		return -ESRCH;
352 
353 	node->first_usage = 1;
354 	if (!res->dev_priv->has_mob) {
355 		list_add_tail(&node->head, &ctx->resource_list);
356 	} else {
357 		switch (vmw_res_type(res)) {
358 		case vmw_res_context:
359 		case vmw_res_dx_context:
360 			list_add(&node->head, &ctx->resource_ctx_list);
361 			break;
362 		case vmw_res_cotable:
363 			list_add_tail(&node->head, &ctx->resource_ctx_list);
364 			break;
365 		default:
366 			list_add_tail(&node->head, &ctx->resource_list);
367 			break;
368 		}
369 	}
370 
371 out_fill:
372 	if (dirty) {
373 		node->dirty_set = 1;
374 		/* Overwriting previous information here is intentional! */
375 		node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
376 	}
377 	if (first_usage)
378 		*first_usage = node->first_usage;
379 	if (p_node)
380 		*p_node = &node->private;
381 
382 	return 0;
383 }
384 
385 /**
386  * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
387  * validation.
388  * @ctx: The validation context.
389  * @val_private: The additional meta-data pointer returned when the
390  * resource was registered with the validation context. Used to identify
391  * the resource.
392  * @dirty: Dirty information VMW_RES_DIRTY_XX
393  */
vmw_validation_res_set_dirty(struct vmw_validation_context * ctx,void * val_private,u32 dirty)394 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
395 				  void *val_private, u32 dirty)
396 {
397 	struct vmw_validation_res_node *val;
398 
399 	if (!dirty)
400 		return;
401 
402 	val = container_of(val_private, typeof(*val), private);
403 	val->dirty_set = 1;
404 	/* Overwriting previous information here is intentional! */
405 	val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
406 }
407 
408 /**
409  * vmw_validation_res_switch_backup - Register a backup MOB switch during
410  * validation.
411  * @ctx: The validation context.
412  * @val_private: The additional meta-data pointer returned when the
413  * resource was registered with the validation context. Used to identify
414  * the resource.
415  * @vbo: The new backup buffer object MOB. This buffer object needs to have
416  * already been registered with the validation context.
417  * @backup_offset: Offset into the new backup MOB.
418  */
vmw_validation_res_switch_backup(struct vmw_validation_context * ctx,void * val_private,struct vmw_buffer_object * vbo,unsigned long backup_offset)419 void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
420 				      void *val_private,
421 				      struct vmw_buffer_object *vbo,
422 				      unsigned long backup_offset)
423 {
424 	struct vmw_validation_res_node *val;
425 
426 	val = container_of(val_private, typeof(*val), private);
427 
428 	val->switching_backup = 1;
429 	if (val->first_usage)
430 		val->no_buffer_needed = 1;
431 
432 	val->new_backup = vbo;
433 	val->new_backup_offset = backup_offset;
434 }
435 
436 /**
437  * vmw_validation_res_reserve - Reserve all resources registered with this
438  * validation context.
439  * @ctx: The validation context.
440  * @intr: Use interruptible waits when possible.
441  *
442  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
443  * code on failure.
444  */
vmw_validation_res_reserve(struct vmw_validation_context * ctx,bool intr)445 int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
446 			       bool intr)
447 {
448 	struct vmw_validation_res_node *val;
449 	int ret = 0;
450 
451 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
452 
453 	list_for_each_entry(val, &ctx->resource_list, head) {
454 		struct vmw_resource *res = val->res;
455 
456 		ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
457 		if (ret)
458 			goto out_unreserve;
459 
460 		val->reserved = 1;
461 		if (res->backup) {
462 			struct vmw_buffer_object *vbo = res->backup;
463 
464 			ret = vmw_validation_add_bo
465 				(ctx, vbo, vmw_resource_needs_backup(res),
466 				 false);
467 			if (ret)
468 				goto out_unreserve;
469 		}
470 
471 		if (val->switching_backup && val->new_backup &&
472 		    res->coherent) {
473 			struct vmw_validation_bo_node *bo_node =
474 				vmw_validation_find_bo_dup(ctx,
475 							   val->new_backup);
476 
477 			if (WARN_ON(!bo_node)) {
478 				ret = -EINVAL;
479 				goto out_unreserve;
480 			}
481 			bo_node->coherent_count++;
482 		}
483 	}
484 
485 	return 0;
486 
487 out_unreserve:
488 	vmw_validation_res_unreserve(ctx, true);
489 	return ret;
490 }
491 
492 /**
493  * vmw_validation_res_unreserve - Unreserve all reserved resources
494  * registered with this validation context.
495  * @ctx: The validation context.
496  * @backoff: Whether this is a backoff- of a commit-type operation. This
497  * is used to determine whether to switch backup MOBs or not.
498  */
vmw_validation_res_unreserve(struct vmw_validation_context * ctx,bool backoff)499 void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
500 				 bool backoff)
501 {
502 	struct vmw_validation_res_node *val;
503 
504 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
505 	if (backoff)
506 		list_for_each_entry(val, &ctx->resource_list, head) {
507 			if (val->reserved)
508 				vmw_resource_unreserve(val->res,
509 						       false, false, false,
510 						       NULL, 0);
511 		}
512 	else
513 		list_for_each_entry(val, &ctx->resource_list, head) {
514 			if (val->reserved)
515 				vmw_resource_unreserve(val->res,
516 						       val->dirty_set,
517 						       val->dirty,
518 						       val->switching_backup,
519 						       val->new_backup,
520 						       val->new_backup_offset);
521 		}
522 }
523 
524 /**
525  * vmw_validation_bo_validate_single - Validate a single buffer object.
526  * @bo: The TTM buffer object base.
527  * @interruptible: Whether to perform waits interruptible if possible.
528  * @validate_as_mob: Whether to validate in MOB memory.
529  *
530  * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
531  * code on failure.
532  */
vmw_validation_bo_validate_single(struct ttm_buffer_object * bo,bool interruptible,bool validate_as_mob)533 int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
534 				      bool interruptible,
535 				      bool validate_as_mob)
536 {
537 	struct vmw_buffer_object *vbo =
538 		container_of(bo, struct vmw_buffer_object, base);
539 	struct ttm_operation_ctx ctx = {
540 		.interruptible = interruptible,
541 		.no_wait_gpu = false
542 	};
543 	int ret;
544 
545 	if (atomic_read(&vbo->cpu_writers))
546 		return -EBUSY;
547 
548 	if (vbo->pin_count > 0)
549 		return 0;
550 
551 	if (validate_as_mob)
552 		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
553 
554 	/**
555 	 * Put BO in VRAM if there is space, otherwise as a GMR.
556 	 * If there is no space in VRAM and GMR ids are all used up,
557 	 * start evicting GMRs to make room. If the DMA buffer can't be
558 	 * used as a GMR, this will return -ENOMEM.
559 	 */
560 
561 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
562 	if (ret == 0 || ret == -ERESTARTSYS)
563 		return ret;
564 
565 	/**
566 	 * If that failed, try VRAM again, this time evicting
567 	 * previous contents.
568 	 */
569 
570 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
571 	return ret;
572 }
573 
574 /**
575  * vmw_validation_bo_validate - Validate all buffer objects registered with
576  * the validation context.
577  * @ctx: The validation context.
578  * @intr: Whether to perform waits interruptible if possible.
579  *
580  * Return: Zero on success, -ERESTARTSYS if interrupted,
581  * negative error code on failure.
582  */
vmw_validation_bo_validate(struct vmw_validation_context * ctx,bool intr)583 int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
584 {
585 	struct vmw_validation_bo_node *entry;
586 	int ret;
587 
588 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
589 		struct vmw_buffer_object *vbo =
590 			container_of(entry->base.bo, typeof(*vbo), base);
591 
592 		if (entry->cpu_blit) {
593 			struct ttm_operation_ctx ctx = {
594 				.interruptible = intr,
595 				.no_wait_gpu = false
596 			};
597 
598 			ret = ttm_bo_validate(entry->base.bo,
599 					      &vmw_nonfixed_placement, &ctx);
600 		} else {
601 			ret = vmw_validation_bo_validate_single
602 			(entry->base.bo, intr, entry->as_mob);
603 		}
604 		if (ret)
605 			return ret;
606 
607 		/*
608 		 * Rather than having the resource code allocating the bo
609 		 * dirty tracker in resource_unreserve() where we can't fail,
610 		 * Do it here when validating the buffer object.
611 		 */
612 		if (entry->coherent_count) {
613 			unsigned int coherent_count = entry->coherent_count;
614 
615 			while (coherent_count) {
616 				ret = vmw_bo_dirty_add(vbo);
617 				if (ret)
618 					return ret;
619 
620 				coherent_count--;
621 			}
622 			entry->coherent_count -= coherent_count;
623 		}
624 
625 		if (vbo->dirty)
626 			vmw_bo_dirty_scan(vbo);
627 	}
628 	return 0;
629 }
630 
631 /**
632  * vmw_validation_res_validate - Validate all resources registered with the
633  * validation context.
634  * @ctx: The validation context.
635  * @intr: Whether to perform waits interruptible if possible.
636  *
637  * Before this function is called, all resource backup buffers must have
638  * been validated.
639  *
640  * Return: Zero on success, -ERESTARTSYS if interrupted,
641  * negative error code on failure.
642  */
vmw_validation_res_validate(struct vmw_validation_context * ctx,bool intr)643 int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
644 {
645 	struct vmw_validation_res_node *val;
646 	int ret;
647 
648 	list_for_each_entry(val, &ctx->resource_list, head) {
649 		struct vmw_resource *res = val->res;
650 		struct vmw_buffer_object *backup = res->backup;
651 
652 		ret = vmw_resource_validate(res, intr, val->dirty_set &&
653 					    val->dirty);
654 		if (ret) {
655 			if (ret != -ERESTARTSYS)
656 				DRM_ERROR("Failed to validate resource.\n");
657 			return ret;
658 		}
659 
660 		/* Check if the resource switched backup buffer */
661 		if (backup && res->backup && (backup != res->backup)) {
662 			struct vmw_buffer_object *vbo = res->backup;
663 
664 			ret = vmw_validation_add_bo
665 				(ctx, vbo, vmw_resource_needs_backup(res),
666 				 false);
667 			if (ret)
668 				return ret;
669 		}
670 	}
671 	return 0;
672 }
673 
674 /**
675  * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
676  * and unregister it from this validation context.
677  * @ctx: The validation context.
678  *
679  * The hash table used for duplicate finding is an expensive resource and
680  * may be protected by mutexes that may cause deadlocks during resource
681  * unreferencing if held. After resource- and buffer object registering,
682  * there is no longer any use for this hash table, so allow freeing it
683  * either to shorten any mutex locking time, or before resources- and
684  * buffer objects are freed during validation context cleanup.
685  */
vmw_validation_drop_ht(struct vmw_validation_context * ctx)686 void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
687 {
688 	struct vmw_validation_bo_node *entry;
689 	struct vmw_validation_res_node *val;
690 
691 	if (!ctx->ht)
692 		return;
693 
694 	list_for_each_entry(entry, &ctx->bo_list, base.head)
695 		(void) drm_ht_remove_item(ctx->ht, &entry->hash);
696 
697 	list_for_each_entry(val, &ctx->resource_list, head)
698 		(void) drm_ht_remove_item(ctx->ht, &val->hash);
699 
700 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
701 		(void) drm_ht_remove_item(ctx->ht, &val->hash);
702 
703 	ctx->ht = NULL;
704 }
705 
706 /**
707  * vmw_validation_unref_lists - Unregister previously registered buffer
708  * object and resources.
709  * @ctx: The validation context.
710  *
711  * Note that this function may cause buffer object- and resource destructors
712  * to be invoked.
713  */
vmw_validation_unref_lists(struct vmw_validation_context * ctx)714 void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
715 {
716 	struct vmw_validation_bo_node *entry;
717 	struct vmw_validation_res_node *val;
718 
719 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
720 		ttm_bo_put(entry->base.bo);
721 		entry->base.bo = NULL;
722 	}
723 
724 	list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
725 	list_for_each_entry(val, &ctx->resource_list, head)
726 		vmw_resource_unreference(&val->res);
727 
728 	/*
729 	 * No need to detach each list entry since they are all freed with
730 	 * vmw_validation_free_mem. Just make the inaccessible.
731 	 */
732 	INIT_LIST_HEAD(&ctx->bo_list);
733 	INIT_LIST_HEAD(&ctx->resource_list);
734 
735 	vmw_validation_mem_free(ctx);
736 }
737 
738 /**
739  * vmw_validation_prepare - Prepare a validation context for command
740  * submission.
741  * @ctx: The validation context.
742  * @mutex: The mutex used to protect resource reservation.
743  * @intr: Whether to perform waits interruptible if possible.
744  *
745  * Note that the single reservation mutex @mutex is an unfortunate
746  * construct. Ideally resource reservation should be moved to per-resource
747  * ww_mutexes.
748  * If this functions doesn't return Zero to indicate success, all resources
749  * are left unreserved but still referenced.
750  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
751  * on error.
752  */
vmw_validation_prepare(struct vmw_validation_context * ctx,struct mutex * mutex,bool intr)753 int vmw_validation_prepare(struct vmw_validation_context *ctx,
754 			   struct mutex *mutex,
755 			   bool intr)
756 {
757 	int ret = 0;
758 
759 	if (mutex) {
760 		if (intr)
761 			ret = mutex_lock_interruptible(mutex);
762 		else
763 			mutex_lock(mutex);
764 		if (ret)
765 			return -ERESTARTSYS;
766 	}
767 
768 	ctx->res_mutex = mutex;
769 	ret = vmw_validation_res_reserve(ctx, intr);
770 	if (ret)
771 		goto out_no_res_reserve;
772 
773 	ret = vmw_validation_bo_reserve(ctx, intr);
774 	if (ret)
775 		goto out_no_bo_reserve;
776 
777 	ret = vmw_validation_bo_validate(ctx, intr);
778 	if (ret)
779 		goto out_no_validate;
780 
781 	ret = vmw_validation_res_validate(ctx, intr);
782 	if (ret)
783 		goto out_no_validate;
784 
785 	return 0;
786 
787 out_no_validate:
788 	vmw_validation_bo_backoff(ctx);
789 out_no_bo_reserve:
790 	vmw_validation_res_unreserve(ctx, true);
791 out_no_res_reserve:
792 	if (mutex)
793 		mutex_unlock(mutex);
794 
795 	return ret;
796 }
797 
798 /**
799  * vmw_validation_revert - Revert validation actions if command submission
800  * failed.
801  *
802  * @ctx: The validation context.
803  *
804  * The caller still needs to unref resources after a call to this function.
805  */
vmw_validation_revert(struct vmw_validation_context * ctx)806 void vmw_validation_revert(struct vmw_validation_context *ctx)
807 {
808 	vmw_validation_bo_backoff(ctx);
809 	vmw_validation_res_unreserve(ctx, true);
810 	if (ctx->res_mutex)
811 		mutex_unlock(ctx->res_mutex);
812 	vmw_validation_unref_lists(ctx);
813 }
814 
815 /**
816  * vmw_validation_cone - Commit validation actions after command submission
817  * success.
818  * @ctx: The validation context.
819  * @fence: Fence with which to fence all buffer objects taking part in the
820  * command submission.
821  *
822  * The caller does NOT need to unref resources after a call to this function.
823  */
vmw_validation_done(struct vmw_validation_context * ctx,struct vmw_fence_obj * fence)824 void vmw_validation_done(struct vmw_validation_context *ctx,
825 			 struct vmw_fence_obj *fence)
826 {
827 	vmw_validation_bo_fence(ctx, fence);
828 	vmw_validation_res_unreserve(ctx, false);
829 	if (ctx->res_mutex)
830 		mutex_unlock(ctx->res_mutex);
831 	vmw_validation_unref_lists(ctx);
832 }
833 
834 /**
835  * vmw_validation_preload_bo - Preload the validation memory allocator for a
836  * call to vmw_validation_add_bo().
837  * @ctx: Pointer to the validation context.
838  *
839  * Iff this function returns successfully, the next call to
840  * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
841  * but voids the guarantee.
842  *
843  * Returns: Zero if successful, %-EINVAL otherwise.
844  */
vmw_validation_preload_bo(struct vmw_validation_context * ctx)845 int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
846 {
847 	unsigned int size = sizeof(struct vmw_validation_bo_node);
848 
849 	if (!vmw_validation_mem_alloc(ctx, size))
850 		return -ENOMEM;
851 
852 	ctx->mem_size_left += size;
853 	return 0;
854 }
855 
856 /**
857  * vmw_validation_preload_res - Preload the validation memory allocator for a
858  * call to vmw_validation_add_res().
859  * @ctx: Pointer to the validation context.
860  * @size: Size of the validation node extra data. See below.
861  *
862  * Iff this function returns successfully, the next call to
863  * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
864  * sleep. An error is not fatal but voids the guarantee.
865  *
866  * Returns: Zero if successful, %-EINVAL otherwise.
867  */
vmw_validation_preload_res(struct vmw_validation_context * ctx,unsigned int size)868 int vmw_validation_preload_res(struct vmw_validation_context *ctx,
869 			       unsigned int size)
870 {
871 	size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
872 				    size) +
873 		vmw_validation_align(sizeof(struct vmw_validation_bo_node));
874 	if (!vmw_validation_mem_alloc(ctx, size))
875 		return -ENOMEM;
876 
877 	ctx->mem_size_left += size;
878 	return 0;
879 }
880 
881 /**
882  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
883  * validation context
884  * @ctx: The validation context
885  *
886  * This function unreserves the buffer objects previously reserved using
887  * vmw_validation_bo_reserve. It's typically used as part of an error path
888  */
vmw_validation_bo_backoff(struct vmw_validation_context * ctx)889 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
890 {
891 	struct vmw_validation_bo_node *entry;
892 
893 	/*
894 	 * Switching coherent resource backup buffers failed.
895 	 * Release corresponding buffer object dirty trackers.
896 	 */
897 	list_for_each_entry(entry, &ctx->bo_list, base.head) {
898 		if (entry->coherent_count) {
899 			unsigned int coherent_count = entry->coherent_count;
900 			struct vmw_buffer_object *vbo =
901 				container_of(entry->base.bo, typeof(*vbo),
902 					     base);
903 
904 			while (coherent_count--)
905 				vmw_bo_dirty_release(vbo);
906 		}
907 	}
908 
909 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
910 }
911