1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 
35 #define VMW_RES_EVICT_ERR_COUNT 10
36 
37 struct vmw_user_dma_buffer {
38 	struct ttm_prime_object prime;
39 	struct vmw_dma_buffer dma;
40 };
41 
42 struct vmw_bo_user_rep {
43 	uint32_t handle;
44 	uint64_t map_handle;
45 };
46 
47 struct vmw_stream {
48 	struct vmw_resource res;
49 	uint32_t stream_id;
50 };
51 
52 struct vmw_user_stream {
53 	struct ttm_base_object base;
54 	struct vmw_stream stream;
55 };
56 
57 
58 static uint64_t vmw_user_stream_size;
59 
60 static const struct vmw_res_func vmw_stream_func = {
61 	.res_type = vmw_res_stream,
62 	.needs_backup = false,
63 	.may_evict = false,
64 	.type_name = "video streams",
65 	.backup_placement = NULL,
66 	.create = NULL,
67 	.destroy = NULL,
68 	.bind = NULL,
69 	.unbind = NULL
70 };
71 
72 static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object * bo)73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75 	return container_of(bo, struct vmw_dma_buffer, base);
76 }
77 
78 static inline struct vmw_user_dma_buffer *
vmw_user_dma_buffer(struct ttm_buffer_object * bo)79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84 
vmw_resource_reference(struct vmw_resource * res)85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87 	kref_get(&res->kref);
88 	return res;
89 }
90 
91 struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource * res)92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93 {
94 	return kref_get_unless_zero(&res->kref) ? res : NULL;
95 }
96 
97 /**
98  * vmw_resource_release_id - release a resource id to the id manager.
99  *
100  * @res: Pointer to the resource.
101  *
102  * Release the resource id to the resource id manager and set it to -1
103  */
vmw_resource_release_id(struct vmw_resource * res)104 void vmw_resource_release_id(struct vmw_resource *res)
105 {
106 	struct vmw_private *dev_priv = res->dev_priv;
107 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
108 
109 	write_lock(&dev_priv->resource_lock);
110 	if (res->id != -1)
111 		idr_remove(idr, res->id);
112 	res->id = -1;
113 	write_unlock(&dev_priv->resource_lock);
114 }
115 
vmw_resource_release(struct kref * kref)116 static void vmw_resource_release(struct kref *kref)
117 {
118 	struct vmw_resource *res =
119 	    container_of(kref, struct vmw_resource, kref);
120 	struct vmw_private *dev_priv = res->dev_priv;
121 	int id;
122 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123 
124 	res->avail = false;
125 	list_del_init(&res->lru_head);
126 	write_unlock(&dev_priv->resource_lock);
127 	if (res->backup) {
128 		struct ttm_buffer_object *bo = &res->backup->base;
129 
130 		ttm_bo_reserve(bo, false, false, false, 0);
131 		if (!list_empty(&res->mob_head) &&
132 		    res->func->unbind != NULL) {
133 			struct ttm_validate_buffer val_buf;
134 
135 			val_buf.bo = bo;
136 			res->func->unbind(res, false, &val_buf);
137 		}
138 		res->backup_dirty = false;
139 		list_del_init(&res->mob_head);
140 		ttm_bo_unreserve(bo);
141 		vmw_dmabuf_unreference(&res->backup);
142 	}
143 
144 	if (likely(res->hw_destroy != NULL)) {
145 		res->hw_destroy(res);
146 		mutex_lock(&dev_priv->binding_mutex);
147 		vmw_context_binding_res_list_kill(&res->binding_head);
148 		mutex_unlock(&dev_priv->binding_mutex);
149 	}
150 
151 	id = res->id;
152 	if (res->res_free != NULL)
153 		res->res_free(res);
154 	else
155 		kfree(res);
156 
157 	write_lock(&dev_priv->resource_lock);
158 
159 	if (id != -1)
160 		idr_remove(idr, id);
161 }
162 
vmw_resource_unreference(struct vmw_resource ** p_res)163 void vmw_resource_unreference(struct vmw_resource **p_res)
164 {
165 	struct vmw_resource *res = *p_res;
166 	struct vmw_private *dev_priv = res->dev_priv;
167 
168 	*p_res = NULL;
169 	write_lock(&dev_priv->resource_lock);
170 	kref_put(&res->kref, vmw_resource_release);
171 	write_unlock(&dev_priv->resource_lock);
172 }
173 
174 
175 /**
176  * vmw_resource_alloc_id - release a resource id to the id manager.
177  *
178  * @res: Pointer to the resource.
179  *
180  * Allocate the lowest free resource from the resource manager, and set
181  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
182  */
vmw_resource_alloc_id(struct vmw_resource * res)183 int vmw_resource_alloc_id(struct vmw_resource *res)
184 {
185 	struct vmw_private *dev_priv = res->dev_priv;
186 	int ret;
187 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188 
189 	BUG_ON(res->id != -1);
190 
191 	idr_preload(GFP_KERNEL);
192 	write_lock(&dev_priv->resource_lock);
193 
194 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
195 	if (ret >= 0)
196 		res->id = ret;
197 
198 	write_unlock(&dev_priv->resource_lock);
199 	idr_preload_end();
200 	return ret < 0 ? ret : 0;
201 }
202 
203 /**
204  * vmw_resource_init - initialize a struct vmw_resource
205  *
206  * @dev_priv:       Pointer to a device private struct.
207  * @res:            The struct vmw_resource to initialize.
208  * @obj_type:       Resource object type.
209  * @delay_id:       Boolean whether to defer device id allocation until
210  *                  the first validation.
211  * @res_free:       Resource destructor.
212  * @func:           Resource function table.
213  */
vmw_resource_init(struct vmw_private * dev_priv,struct vmw_resource * res,bool delay_id,void (* res_free)(struct vmw_resource * res),const struct vmw_res_func * func)214 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 		      bool delay_id,
216 		      void (*res_free) (struct vmw_resource *res),
217 		      const struct vmw_res_func *func)
218 {
219 	kref_init(&res->kref);
220 	res->hw_destroy = NULL;
221 	res->res_free = res_free;
222 	res->avail = false;
223 	res->dev_priv = dev_priv;
224 	res->func = func;
225 	INIT_LIST_HEAD(&res->lru_head);
226 	INIT_LIST_HEAD(&res->mob_head);
227 	INIT_LIST_HEAD(&res->binding_head);
228 	res->id = -1;
229 	res->backup = NULL;
230 	res->backup_offset = 0;
231 	res->backup_dirty = false;
232 	res->res_dirty = false;
233 	if (delay_id)
234 		return 0;
235 	else
236 		return vmw_resource_alloc_id(res);
237 }
238 
239 /**
240  * vmw_resource_activate
241  *
242  * @res:        Pointer to the newly created resource
243  * @hw_destroy: Destroy function. NULL if none.
244  *
245  * Activate a resource after the hardware has been made aware of it.
246  * Set tye destroy function to @destroy. Typically this frees the
247  * resource and destroys the hardware resources associated with it.
248  * Activate basically means that the function vmw_resource_lookup will
249  * find it.
250  */
vmw_resource_activate(struct vmw_resource * res,void (* hw_destroy)(struct vmw_resource *))251 void vmw_resource_activate(struct vmw_resource *res,
252 			   void (*hw_destroy) (struct vmw_resource *))
253 {
254 	struct vmw_private *dev_priv = res->dev_priv;
255 
256 	write_lock(&dev_priv->resource_lock);
257 	res->avail = true;
258 	res->hw_destroy = hw_destroy;
259 	write_unlock(&dev_priv->resource_lock);
260 }
261 
vmw_resource_lookup(struct vmw_private * dev_priv,struct idr * idr,int id)262 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
263 					 struct idr *idr, int id)
264 {
265 	struct vmw_resource *res;
266 
267 	read_lock(&dev_priv->resource_lock);
268 	res = idr_find(idr, id);
269 	if (res && res->avail)
270 		kref_get(&res->kref);
271 	else
272 		res = NULL;
273 	read_unlock(&dev_priv->resource_lock);
274 
275 	if (unlikely(res == NULL))
276 		return NULL;
277 
278 	return res;
279 }
280 
281 /**
282  * vmw_user_resource_lookup_handle - lookup a struct resource from a
283  * TTM user-space handle and perform basic type checks
284  *
285  * @dev_priv:     Pointer to a device private struct
286  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
287  * @handle:       The TTM user-space handle
288  * @converter:    Pointer to an object describing the resource type
289  * @p_res:        On successful return the location pointed to will contain
290  *                a pointer to a refcounted struct vmw_resource.
291  *
292  * If the handle can't be found or is associated with an incorrect resource
293  * type, -EINVAL will be returned.
294  */
vmw_user_resource_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter,struct vmw_resource ** p_res)295 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
296 				    struct ttm_object_file *tfile,
297 				    uint32_t handle,
298 				    const struct vmw_user_resource_conv
299 				    *converter,
300 				    struct vmw_resource **p_res)
301 {
302 	struct ttm_base_object *base;
303 	struct vmw_resource *res;
304 	int ret = -EINVAL;
305 
306 	base = ttm_base_object_lookup(tfile, handle);
307 	if (unlikely(base == NULL))
308 		return -EINVAL;
309 
310 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
311 		goto out_bad_resource;
312 
313 	res = converter->base_obj_to_res(base);
314 
315 	read_lock(&dev_priv->resource_lock);
316 	if (!res->avail || res->res_free != converter->res_free) {
317 		read_unlock(&dev_priv->resource_lock);
318 		goto out_bad_resource;
319 	}
320 
321 	kref_get(&res->kref);
322 	read_unlock(&dev_priv->resource_lock);
323 
324 	*p_res = res;
325 	ret = 0;
326 
327 out_bad_resource:
328 	ttm_base_object_unref(&base);
329 
330 	return ret;
331 }
332 
333 /**
334  * Helper function that looks either a surface or dmabuf.
335  *
336  * The pointer this pointed at by out_surf and out_buf needs to be null.
337  */
vmw_user_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,struct vmw_surface ** out_surf,struct vmw_dma_buffer ** out_buf)338 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
339 			   struct ttm_object_file *tfile,
340 			   uint32_t handle,
341 			   struct vmw_surface **out_surf,
342 			   struct vmw_dma_buffer **out_buf)
343 {
344 	struct vmw_resource *res;
345 	int ret;
346 
347 	BUG_ON(*out_surf || *out_buf);
348 
349 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
350 					      user_surface_converter,
351 					      &res);
352 	if (!ret) {
353 		*out_surf = vmw_res_to_srf(res);
354 		return 0;
355 	}
356 
357 	*out_surf = NULL;
358 	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
359 	return ret;
360 }
361 
362 /**
363  * Buffer management.
364  */
365 
366 /**
367  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
368  *
369  * @dev_priv: Pointer to a struct vmw_private identifying the device.
370  * @size: The requested buffer size.
371  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
372  */
vmw_dmabuf_acc_size(struct vmw_private * dev_priv,size_t size,bool user)373 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
374 				  bool user)
375 {
376 	static size_t struct_size, user_struct_size;
377 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
378 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
379 
380 	if (unlikely(struct_size == 0)) {
381 		size_t backend_size = ttm_round_pot(vmw_tt_size);
382 
383 		struct_size = backend_size +
384 			ttm_round_pot(sizeof(struct vmw_dma_buffer));
385 		user_struct_size = backend_size +
386 			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
387 	}
388 
389 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
390 		page_array_size +=
391 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
392 
393 	return ((user) ? user_struct_size : struct_size) +
394 		page_array_size;
395 }
396 
vmw_dmabuf_bo_free(struct ttm_buffer_object * bo)397 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
398 {
399 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
400 
401 	kfree(vmw_bo);
402 }
403 
vmw_user_dmabuf_destroy(struct ttm_buffer_object * bo)404 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
405 {
406 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
407 
408 	ttm_prime_object_kfree(vmw_user_bo, prime);
409 }
410 
vmw_dmabuf_init(struct vmw_private * dev_priv,struct vmw_dma_buffer * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,void (* bo_free)(struct ttm_buffer_object * bo))411 int vmw_dmabuf_init(struct vmw_private *dev_priv,
412 		    struct vmw_dma_buffer *vmw_bo,
413 		    size_t size, struct ttm_placement *placement,
414 		    bool interruptible,
415 		    void (*bo_free) (struct ttm_buffer_object *bo))
416 {
417 	struct ttm_bo_device *bdev = &dev_priv->bdev;
418 	size_t acc_size;
419 	int ret;
420 	bool user = (bo_free == &vmw_user_dmabuf_destroy);
421 
422 	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
423 
424 	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
425 	memset(vmw_bo, 0, sizeof(*vmw_bo));
426 
427 	INIT_LIST_HEAD(&vmw_bo->res_list);
428 
429 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
430 			  ttm_bo_type_device, placement,
431 			  0, interruptible,
432 			  NULL, acc_size, NULL, bo_free);
433 	return ret;
434 }
435 
vmw_user_dmabuf_release(struct ttm_base_object ** p_base)436 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
437 {
438 	struct vmw_user_dma_buffer *vmw_user_bo;
439 	struct ttm_base_object *base = *p_base;
440 	struct ttm_buffer_object *bo;
441 
442 	*p_base = NULL;
443 
444 	if (unlikely(base == NULL))
445 		return;
446 
447 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
448 				   prime.base);
449 	bo = &vmw_user_bo->dma.base;
450 	ttm_bo_unref(&bo);
451 }
452 
vmw_user_dmabuf_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)453 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
454 					    enum ttm_ref_type ref_type)
455 {
456 	struct vmw_user_dma_buffer *user_bo;
457 	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
458 
459 	switch (ref_type) {
460 	case TTM_REF_SYNCCPU_WRITE:
461 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
462 		break;
463 	default:
464 		BUG();
465 	}
466 }
467 
468 /**
469  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
470  *
471  * @dev_priv: Pointer to a struct device private.
472  * @tfile: Pointer to a struct ttm_object_file on which to register the user
473  * object.
474  * @size: Size of the dma buffer.
475  * @shareable: Boolean whether the buffer is shareable with other open files.
476  * @handle: Pointer to where the handle value should be assigned.
477  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
478  * should be assigned.
479  */
vmw_user_dmabuf_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_dma_buffer ** p_dma_buf)480 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
481 			  struct ttm_object_file *tfile,
482 			  uint32_t size,
483 			  bool shareable,
484 			  uint32_t *handle,
485 			  struct vmw_dma_buffer **p_dma_buf)
486 {
487 	struct vmw_user_dma_buffer *user_bo;
488 	struct ttm_buffer_object *tmp;
489 	int ret;
490 
491 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
492 	if (unlikely(user_bo == NULL)) {
493 		DRM_ERROR("Failed to allocate a buffer.\n");
494 		return -ENOMEM;
495 	}
496 
497 	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
498 			      (dev_priv->has_mob) ?
499 			      &vmw_sys_placement :
500 			      &vmw_vram_sys_placement, true,
501 			      &vmw_user_dmabuf_destroy);
502 	if (unlikely(ret != 0))
503 		return ret;
504 
505 	tmp = ttm_bo_reference(&user_bo->dma.base);
506 	ret = ttm_prime_object_init(tfile,
507 				    size,
508 				    &user_bo->prime,
509 				    shareable,
510 				    ttm_buffer_type,
511 				    &vmw_user_dmabuf_release,
512 				    &vmw_user_dmabuf_ref_obj_release);
513 	if (unlikely(ret != 0)) {
514 		ttm_bo_unref(&tmp);
515 		goto out_no_base_object;
516 	}
517 
518 	*p_dma_buf = &user_bo->dma;
519 	*handle = user_bo->prime.base.hash.key;
520 
521 out_no_base_object:
522 	return ret;
523 }
524 
525 /**
526  * vmw_user_dmabuf_verify_access - verify access permissions on this
527  * buffer object.
528  *
529  * @bo: Pointer to the buffer object being accessed
530  * @tfile: Identifying the caller.
531  */
vmw_user_dmabuf_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)532 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
533 				  struct ttm_object_file *tfile)
534 {
535 	struct vmw_user_dma_buffer *vmw_user_bo;
536 
537 	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
538 		return -EPERM;
539 
540 	vmw_user_bo = vmw_user_dma_buffer(bo);
541 
542 	/* Check that the caller has opened the object. */
543 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
544 		return 0;
545 
546 	DRM_ERROR("Could not grant buffer access.\n");
547 	return -EPERM;
548 }
549 
550 /**
551  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
552  * access, idling previous GPU operations on the buffer and optionally
553  * blocking it for further command submissions.
554  *
555  * @user_bo: Pointer to the buffer object being grabbed for CPU access
556  * @tfile: Identifying the caller.
557  * @flags: Flags indicating how the grab should be performed.
558  *
559  * A blocking grab will be automatically released when @tfile is closed.
560  */
vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer * user_bo,struct ttm_object_file * tfile,uint32_t flags)561 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
562 					struct ttm_object_file *tfile,
563 					uint32_t flags)
564 {
565 	struct ttm_buffer_object *bo = &user_bo->dma.base;
566 	bool existed;
567 	int ret;
568 
569 	if (flags & drm_vmw_synccpu_allow_cs) {
570 		struct ttm_bo_device *bdev = bo->bdev;
571 
572 		spin_lock(&bdev->fence_lock);
573 		ret = ttm_bo_wait(bo, false, true,
574 				  !!(flags & drm_vmw_synccpu_dontblock));
575 		spin_unlock(&bdev->fence_lock);
576 		return ret;
577 	}
578 
579 	ret = ttm_bo_synccpu_write_grab
580 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
581 	if (unlikely(ret != 0))
582 		return ret;
583 
584 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
585 				 TTM_REF_SYNCCPU_WRITE, &existed);
586 	if (ret != 0 || existed)
587 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
588 
589 	return ret;
590 }
591 
592 /**
593  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
594  * and unblock command submission on the buffer if blocked.
595  *
596  * @handle: Handle identifying the buffer object.
597  * @tfile: Identifying the caller.
598  * @flags: Flags indicating the type of release.
599  */
vmw_user_dmabuf_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)600 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
601 					   struct ttm_object_file *tfile,
602 					   uint32_t flags)
603 {
604 	if (!(flags & drm_vmw_synccpu_allow_cs))
605 		return ttm_ref_object_base_unref(tfile, handle,
606 						 TTM_REF_SYNCCPU_WRITE);
607 
608 	return 0;
609 }
610 
611 /**
612  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
613  * functionality.
614  *
615  * @dev: Identifies the drm device.
616  * @data: Pointer to the ioctl argument.
617  * @file_priv: Identifies the caller.
618  *
619  * This function checks the ioctl arguments for validity and calls the
620  * relevant synccpu functions.
621  */
vmw_user_dmabuf_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)622 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
623 				  struct drm_file *file_priv)
624 {
625 	struct drm_vmw_synccpu_arg *arg =
626 		(struct drm_vmw_synccpu_arg *) data;
627 	struct vmw_dma_buffer *dma_buf;
628 	struct vmw_user_dma_buffer *user_bo;
629 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
630 	int ret;
631 
632 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
633 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
634 			       drm_vmw_synccpu_dontblock |
635 			       drm_vmw_synccpu_allow_cs)) != 0) {
636 		DRM_ERROR("Illegal synccpu flags.\n");
637 		return -EINVAL;
638 	}
639 
640 	switch (arg->op) {
641 	case drm_vmw_synccpu_grab:
642 		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
643 		if (unlikely(ret != 0))
644 			return ret;
645 
646 		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
647 				       dma);
648 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
649 		vmw_dmabuf_unreference(&dma_buf);
650 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
651 			     ret != -EBUSY)) {
652 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
653 				  (unsigned int) arg->handle);
654 			return ret;
655 		}
656 		break;
657 	case drm_vmw_synccpu_release:
658 		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
659 						      arg->flags);
660 		if (unlikely(ret != 0)) {
661 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
662 				  (unsigned int) arg->handle);
663 			return ret;
664 		}
665 		break;
666 	default:
667 		DRM_ERROR("Invalid synccpu operation.\n");
668 		return -EINVAL;
669 	}
670 
671 	return 0;
672 }
673 
vmw_dmabuf_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)674 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
675 			   struct drm_file *file_priv)
676 {
677 	struct vmw_private *dev_priv = vmw_priv(dev);
678 	union drm_vmw_alloc_dmabuf_arg *arg =
679 	    (union drm_vmw_alloc_dmabuf_arg *)data;
680 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
681 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
682 	struct vmw_dma_buffer *dma_buf;
683 	uint32_t handle;
684 	int ret;
685 
686 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
687 	if (unlikely(ret != 0))
688 		return ret;
689 
690 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
691 				    req->size, false, &handle, &dma_buf);
692 	if (unlikely(ret != 0))
693 		goto out_no_dmabuf;
694 
695 	rep->handle = handle;
696 	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
697 	rep->cur_gmr_id = handle;
698 	rep->cur_gmr_offset = 0;
699 
700 	vmw_dmabuf_unreference(&dma_buf);
701 
702 out_no_dmabuf:
703 	ttm_read_unlock(&dev_priv->reservation_sem);
704 
705 	return ret;
706 }
707 
vmw_dmabuf_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)708 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
709 			   struct drm_file *file_priv)
710 {
711 	struct drm_vmw_unref_dmabuf_arg *arg =
712 	    (struct drm_vmw_unref_dmabuf_arg *)data;
713 
714 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
715 					 arg->handle,
716 					 TTM_REF_USAGE);
717 }
718 
vmw_user_dmabuf_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_dma_buffer ** out)719 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
720 			   uint32_t handle, struct vmw_dma_buffer **out)
721 {
722 	struct vmw_user_dma_buffer *vmw_user_bo;
723 	struct ttm_base_object *base;
724 
725 	base = ttm_base_object_lookup(tfile, handle);
726 	if (unlikely(base == NULL)) {
727 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
728 		       (unsigned long)handle);
729 		return -ESRCH;
730 	}
731 
732 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
733 		ttm_base_object_unref(&base);
734 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
735 		       (unsigned long)handle);
736 		return -EINVAL;
737 	}
738 
739 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
740 				   prime.base);
741 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
742 	ttm_base_object_unref(&base);
743 	*out = &vmw_user_bo->dma;
744 
745 	return 0;
746 }
747 
vmw_user_dmabuf_reference(struct ttm_object_file * tfile,struct vmw_dma_buffer * dma_buf,uint32_t * handle)748 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
749 			      struct vmw_dma_buffer *dma_buf,
750 			      uint32_t *handle)
751 {
752 	struct vmw_user_dma_buffer *user_bo;
753 
754 	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
755 		return -EINVAL;
756 
757 	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
758 
759 	*handle = user_bo->prime.base.hash.key;
760 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
761 				  TTM_REF_USAGE, NULL);
762 }
763 
764 /*
765  * Stream management
766  */
767 
vmw_stream_destroy(struct vmw_resource * res)768 static void vmw_stream_destroy(struct vmw_resource *res)
769 {
770 	struct vmw_private *dev_priv = res->dev_priv;
771 	struct vmw_stream *stream;
772 	int ret;
773 
774 	DRM_INFO("%s: unref\n", __func__);
775 	stream = container_of(res, struct vmw_stream, res);
776 
777 	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
778 	WARN_ON(ret != 0);
779 }
780 
vmw_stream_init(struct vmw_private * dev_priv,struct vmw_stream * stream,void (* res_free)(struct vmw_resource * res))781 static int vmw_stream_init(struct vmw_private *dev_priv,
782 			   struct vmw_stream *stream,
783 			   void (*res_free) (struct vmw_resource *res))
784 {
785 	struct vmw_resource *res = &stream->res;
786 	int ret;
787 
788 	ret = vmw_resource_init(dev_priv, res, false, res_free,
789 				&vmw_stream_func);
790 
791 	if (unlikely(ret != 0)) {
792 		if (res_free == NULL)
793 			kfree(stream);
794 		else
795 			res_free(&stream->res);
796 		return ret;
797 	}
798 
799 	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
800 	if (ret) {
801 		vmw_resource_unreference(&res);
802 		return ret;
803 	}
804 
805 	DRM_INFO("%s: claimed\n", __func__);
806 
807 	vmw_resource_activate(&stream->res, vmw_stream_destroy);
808 	return 0;
809 }
810 
vmw_user_stream_free(struct vmw_resource * res)811 static void vmw_user_stream_free(struct vmw_resource *res)
812 {
813 	struct vmw_user_stream *stream =
814 	    container_of(res, struct vmw_user_stream, stream.res);
815 	struct vmw_private *dev_priv = res->dev_priv;
816 
817 	ttm_base_object_kfree(stream, base);
818 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
819 			    vmw_user_stream_size);
820 }
821 
822 /**
823  * This function is called when user space has no more references on the
824  * base object. It releases the base-object's reference on the resource object.
825  */
826 
vmw_user_stream_base_release(struct ttm_base_object ** p_base)827 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
828 {
829 	struct ttm_base_object *base = *p_base;
830 	struct vmw_user_stream *stream =
831 	    container_of(base, struct vmw_user_stream, base);
832 	struct vmw_resource *res = &stream->stream.res;
833 
834 	*p_base = NULL;
835 	vmw_resource_unreference(&res);
836 }
837 
vmw_stream_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)838 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
839 			   struct drm_file *file_priv)
840 {
841 	struct vmw_private *dev_priv = vmw_priv(dev);
842 	struct vmw_resource *res;
843 	struct vmw_user_stream *stream;
844 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
845 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
846 	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
847 	int ret = 0;
848 
849 
850 	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
851 	if (unlikely(res == NULL))
852 		return -EINVAL;
853 
854 	if (res->res_free != &vmw_user_stream_free) {
855 		ret = -EINVAL;
856 		goto out;
857 	}
858 
859 	stream = container_of(res, struct vmw_user_stream, stream.res);
860 	if (stream->base.tfile != tfile) {
861 		ret = -EINVAL;
862 		goto out;
863 	}
864 
865 	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
866 out:
867 	vmw_resource_unreference(&res);
868 	return ret;
869 }
870 
vmw_stream_claim_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)871 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
872 			   struct drm_file *file_priv)
873 {
874 	struct vmw_private *dev_priv = vmw_priv(dev);
875 	struct vmw_user_stream *stream;
876 	struct vmw_resource *res;
877 	struct vmw_resource *tmp;
878 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
879 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
880 	int ret;
881 
882 	/*
883 	 * Approximate idr memory usage with 128 bytes. It will be limited
884 	 * by maximum number_of streams anyway?
885 	 */
886 
887 	if (unlikely(vmw_user_stream_size == 0))
888 		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
889 
890 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
891 	if (unlikely(ret != 0))
892 		return ret;
893 
894 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
895 				   vmw_user_stream_size,
896 				   false, true);
897 	if (unlikely(ret != 0)) {
898 		if (ret != -ERESTARTSYS)
899 			DRM_ERROR("Out of graphics memory for stream"
900 				  " creation.\n");
901 		goto out_unlock;
902 	}
903 
904 
905 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
906 	if (unlikely(stream == NULL)) {
907 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
908 				    vmw_user_stream_size);
909 		ret = -ENOMEM;
910 		goto out_unlock;
911 	}
912 
913 	res = &stream->stream.res;
914 	stream->base.shareable = false;
915 	stream->base.tfile = NULL;
916 
917 	/*
918 	 * From here on, the destructor takes over resource freeing.
919 	 */
920 
921 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
922 	if (unlikely(ret != 0))
923 		goto out_unlock;
924 
925 	tmp = vmw_resource_reference(res);
926 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
927 				   &vmw_user_stream_base_release, NULL);
928 
929 	if (unlikely(ret != 0)) {
930 		vmw_resource_unreference(&tmp);
931 		goto out_err;
932 	}
933 
934 	arg->stream_id = res->id;
935 out_err:
936 	vmw_resource_unreference(&res);
937 out_unlock:
938 	ttm_read_unlock(&dev_priv->reservation_sem);
939 	return ret;
940 }
941 
vmw_user_stream_lookup(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t * inout_id,struct vmw_resource ** out)942 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
943 			   struct ttm_object_file *tfile,
944 			   uint32_t *inout_id, struct vmw_resource **out)
945 {
946 	struct vmw_user_stream *stream;
947 	struct vmw_resource *res;
948 	int ret;
949 
950 	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
951 				  *inout_id);
952 	if (unlikely(res == NULL))
953 		return -EINVAL;
954 
955 	if (res->res_free != &vmw_user_stream_free) {
956 		ret = -EINVAL;
957 		goto err_ref;
958 	}
959 
960 	stream = container_of(res, struct vmw_user_stream, stream.res);
961 	if (stream->base.tfile != tfile) {
962 		ret = -EPERM;
963 		goto err_ref;
964 	}
965 
966 	*inout_id = stream->stream.stream_id;
967 	*out = res;
968 	return 0;
969 err_ref:
970 	vmw_resource_unreference(&res);
971 	return ret;
972 }
973 
974 
975 /**
976  * vmw_dumb_create - Create a dumb kms buffer
977  *
978  * @file_priv: Pointer to a struct drm_file identifying the caller.
979  * @dev: Pointer to the drm device.
980  * @args: Pointer to a struct drm_mode_create_dumb structure
981  *
982  * This is a driver callback for the core drm create_dumb functionality.
983  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
984  * that the arguments have a different format.
985  */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)986 int vmw_dumb_create(struct drm_file *file_priv,
987 		    struct drm_device *dev,
988 		    struct drm_mode_create_dumb *args)
989 {
990 	struct vmw_private *dev_priv = vmw_priv(dev);
991 	struct vmw_dma_buffer *dma_buf;
992 	int ret;
993 
994 	args->pitch = args->width * ((args->bpp + 7) / 8);
995 	args->size = args->pitch * args->height;
996 
997 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
998 	if (unlikely(ret != 0))
999 		return ret;
1000 
1001 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1002 				    args->size, false, &args->handle,
1003 				    &dma_buf);
1004 	if (unlikely(ret != 0))
1005 		goto out_no_dmabuf;
1006 
1007 	vmw_dmabuf_unreference(&dma_buf);
1008 out_no_dmabuf:
1009 	ttm_read_unlock(&dev_priv->reservation_sem);
1010 	return ret;
1011 }
1012 
1013 /**
1014  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1015  *
1016  * @file_priv: Pointer to a struct drm_file identifying the caller.
1017  * @dev: Pointer to the drm device.
1018  * @handle: Handle identifying the dumb buffer.
1019  * @offset: The address space offset returned.
1020  *
1021  * This is a driver callback for the core drm dumb_map_offset functionality.
1022  */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1023 int vmw_dumb_map_offset(struct drm_file *file_priv,
1024 			struct drm_device *dev, uint32_t handle,
1025 			uint64_t *offset)
1026 {
1027 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1028 	struct vmw_dma_buffer *out_buf;
1029 	int ret;
1030 
1031 	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1032 	if (ret != 0)
1033 		return -EINVAL;
1034 
1035 	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1036 	vmw_dmabuf_unreference(&out_buf);
1037 	return 0;
1038 }
1039 
1040 /**
1041  * vmw_dumb_destroy - Destroy a dumb boffer
1042  *
1043  * @file_priv: Pointer to a struct drm_file identifying the caller.
1044  * @dev: Pointer to the drm device.
1045  * @handle: Handle identifying the dumb buffer.
1046  *
1047  * This is a driver callback for the core drm dumb_destroy functionality.
1048  */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1049 int vmw_dumb_destroy(struct drm_file *file_priv,
1050 		     struct drm_device *dev,
1051 		     uint32_t handle)
1052 {
1053 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1054 					 handle, TTM_REF_USAGE);
1055 }
1056 
1057 /**
1058  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1059  *
1060  * @res:            The resource for which to allocate a backup buffer.
1061  * @interruptible:  Whether any sleeps during allocation should be
1062  *                  performed while interruptible.
1063  */
vmw_resource_buf_alloc(struct vmw_resource * res,bool interruptible)1064 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1065 				  bool interruptible)
1066 {
1067 	unsigned long size =
1068 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1069 	struct vmw_dma_buffer *backup;
1070 	int ret;
1071 
1072 	if (likely(res->backup)) {
1073 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1074 		return 0;
1075 	}
1076 
1077 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1078 	if (unlikely(backup == NULL))
1079 		return -ENOMEM;
1080 
1081 	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1082 			      res->func->backup_placement,
1083 			      interruptible,
1084 			      &vmw_dmabuf_bo_free);
1085 	if (unlikely(ret != 0))
1086 		goto out_no_dmabuf;
1087 
1088 	res->backup = backup;
1089 
1090 out_no_dmabuf:
1091 	return ret;
1092 }
1093 
1094 /**
1095  * vmw_resource_do_validate - Make a resource up-to-date and visible
1096  *                            to the device.
1097  *
1098  * @res:            The resource to make visible to the device.
1099  * @val_buf:        Information about a buffer possibly
1100  *                  containing backup data if a bind operation is needed.
1101  *
1102  * On hardware resource shortage, this function returns -EBUSY and
1103  * should be retried once resources have been freed up.
1104  */
vmw_resource_do_validate(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)1105 static int vmw_resource_do_validate(struct vmw_resource *res,
1106 				    struct ttm_validate_buffer *val_buf)
1107 {
1108 	int ret = 0;
1109 	const struct vmw_res_func *func = res->func;
1110 
1111 	if (unlikely(res->id == -1)) {
1112 		ret = func->create(res);
1113 		if (unlikely(ret != 0))
1114 			return ret;
1115 	}
1116 
1117 	if (func->bind &&
1118 	    ((func->needs_backup && list_empty(&res->mob_head) &&
1119 	      val_buf->bo != NULL) ||
1120 	     (!func->needs_backup && val_buf->bo != NULL))) {
1121 		ret = func->bind(res, val_buf);
1122 		if (unlikely(ret != 0))
1123 			goto out_bind_failed;
1124 		if (func->needs_backup)
1125 			list_add_tail(&res->mob_head, &res->backup->res_list);
1126 	}
1127 
1128 	/*
1129 	 * Only do this on write operations, and move to
1130 	 * vmw_resource_unreserve if it can be called after
1131 	 * backup buffers have been unreserved. Otherwise
1132 	 * sort out locking.
1133 	 */
1134 	res->res_dirty = true;
1135 
1136 	return 0;
1137 
1138 out_bind_failed:
1139 	func->destroy(res);
1140 
1141 	return ret;
1142 }
1143 
1144 /**
1145  * vmw_resource_unreserve - Unreserve a resource previously reserved for
1146  * command submission.
1147  *
1148  * @res:               Pointer to the struct vmw_resource to unreserve.
1149  * @new_backup:        Pointer to new backup buffer if command submission
1150  *                     switched.
1151  * @new_backup_offset: New backup offset if @new_backup is !NULL.
1152  *
1153  * Currently unreserving a resource means putting it back on the device's
1154  * resource lru list, so that it can be evicted if necessary.
1155  */
vmw_resource_unreserve(struct vmw_resource * res,struct vmw_dma_buffer * new_backup,unsigned long new_backup_offset)1156 void vmw_resource_unreserve(struct vmw_resource *res,
1157 			    struct vmw_dma_buffer *new_backup,
1158 			    unsigned long new_backup_offset)
1159 {
1160 	struct vmw_private *dev_priv = res->dev_priv;
1161 
1162 	if (!list_empty(&res->lru_head))
1163 		return;
1164 
1165 	if (new_backup && new_backup != res->backup) {
1166 
1167 		if (res->backup) {
1168 			lockdep_assert_held(&res->backup->base.resv->lock.base);
1169 			list_del_init(&res->mob_head);
1170 			vmw_dmabuf_unreference(&res->backup);
1171 		}
1172 
1173 		res->backup = vmw_dmabuf_reference(new_backup);
1174 		lockdep_assert_held(&new_backup->base.resv->lock.base);
1175 		list_add_tail(&res->mob_head, &new_backup->res_list);
1176 	}
1177 	if (new_backup)
1178 		res->backup_offset = new_backup_offset;
1179 
1180 	if (!res->func->may_evict || res->id == -1)
1181 		return;
1182 
1183 	write_lock(&dev_priv->resource_lock);
1184 	list_add_tail(&res->lru_head,
1185 		      &res->dev_priv->res_lru[res->func->res_type]);
1186 	write_unlock(&dev_priv->resource_lock);
1187 }
1188 
1189 /**
1190  * vmw_resource_check_buffer - Check whether a backup buffer is needed
1191  *                             for a resource and in that case, allocate
1192  *                             one, reserve and validate it.
1193  *
1194  * @res:            The resource for which to allocate a backup buffer.
1195  * @interruptible:  Whether any sleeps during allocation should be
1196  *                  performed while interruptible.
1197  * @val_buf:        On successful return contains data about the
1198  *                  reserved and validated backup buffer.
1199  */
1200 static int
vmw_resource_check_buffer(struct vmw_resource * res,bool interruptible,struct ttm_validate_buffer * val_buf)1201 vmw_resource_check_buffer(struct vmw_resource *res,
1202 			  bool interruptible,
1203 			  struct ttm_validate_buffer *val_buf)
1204 {
1205 	struct list_head val_list;
1206 	bool backup_dirty = false;
1207 	int ret;
1208 
1209 	if (unlikely(res->backup == NULL)) {
1210 		ret = vmw_resource_buf_alloc(res, interruptible);
1211 		if (unlikely(ret != 0))
1212 			return ret;
1213 	}
1214 
1215 	INIT_LIST_HEAD(&val_list);
1216 	val_buf->bo = ttm_bo_reference(&res->backup->base);
1217 	list_add_tail(&val_buf->head, &val_list);
1218 	ret = ttm_eu_reserve_buffers(NULL, &val_list);
1219 	if (unlikely(ret != 0))
1220 		goto out_no_reserve;
1221 
1222 	if (res->func->needs_backup && list_empty(&res->mob_head))
1223 		return 0;
1224 
1225 	backup_dirty = res->backup_dirty;
1226 	ret = ttm_bo_validate(&res->backup->base,
1227 			      res->func->backup_placement,
1228 			      true, false);
1229 
1230 	if (unlikely(ret != 0))
1231 		goto out_no_validate;
1232 
1233 	return 0;
1234 
1235 out_no_validate:
1236 	ttm_eu_backoff_reservation(NULL, &val_list);
1237 out_no_reserve:
1238 	ttm_bo_unref(&val_buf->bo);
1239 	if (backup_dirty)
1240 		vmw_dmabuf_unreference(&res->backup);
1241 
1242 	return ret;
1243 }
1244 
1245 /**
1246  * vmw_resource_reserve - Reserve a resource for command submission
1247  *
1248  * @res:            The resource to reserve.
1249  *
1250  * This function takes the resource off the LRU list and make sure
1251  * a backup buffer is present for guest-backed resources. However,
1252  * the buffer may not be bound to the resource at this point.
1253  *
1254  */
vmw_resource_reserve(struct vmw_resource * res,bool no_backup)1255 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1256 {
1257 	struct vmw_private *dev_priv = res->dev_priv;
1258 	int ret;
1259 
1260 	write_lock(&dev_priv->resource_lock);
1261 	list_del_init(&res->lru_head);
1262 	write_unlock(&dev_priv->resource_lock);
1263 
1264 	if (res->func->needs_backup && res->backup == NULL &&
1265 	    !no_backup) {
1266 		ret = vmw_resource_buf_alloc(res, true);
1267 		if (unlikely(ret != 0))
1268 			return ret;
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 /**
1275  * vmw_resource_backoff_reservation - Unreserve and unreference a
1276  *                                    backup buffer
1277  *.
1278  * @val_buf:        Backup buffer information.
1279  */
1280 static void
vmw_resource_backoff_reservation(struct ttm_validate_buffer * val_buf)1281 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1282 {
1283 	struct list_head val_list;
1284 
1285 	if (likely(val_buf->bo == NULL))
1286 		return;
1287 
1288 	INIT_LIST_HEAD(&val_list);
1289 	list_add_tail(&val_buf->head, &val_list);
1290 	ttm_eu_backoff_reservation(NULL, &val_list);
1291 	ttm_bo_unref(&val_buf->bo);
1292 }
1293 
1294 /**
1295  * vmw_resource_do_evict - Evict a resource, and transfer its data
1296  *                         to a backup buffer.
1297  *
1298  * @res:            The resource to evict.
1299  * @interruptible:  Whether to wait interruptible.
1300  */
vmw_resource_do_evict(struct vmw_resource * res,bool interruptible)1301 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1302 {
1303 	struct ttm_validate_buffer val_buf;
1304 	const struct vmw_res_func *func = res->func;
1305 	int ret;
1306 
1307 	BUG_ON(!func->may_evict);
1308 
1309 	val_buf.bo = NULL;
1310 	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1311 	if (unlikely(ret != 0))
1312 		return ret;
1313 
1314 	if (unlikely(func->unbind != NULL &&
1315 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1316 		ret = func->unbind(res, res->res_dirty, &val_buf);
1317 		if (unlikely(ret != 0))
1318 			goto out_no_unbind;
1319 		list_del_init(&res->mob_head);
1320 	}
1321 	ret = func->destroy(res);
1322 	res->backup_dirty = true;
1323 	res->res_dirty = false;
1324 out_no_unbind:
1325 	vmw_resource_backoff_reservation(&val_buf);
1326 
1327 	return ret;
1328 }
1329 
1330 
1331 /**
1332  * vmw_resource_validate - Make a resource up-to-date and visible
1333  *                         to the device.
1334  *
1335  * @res:            The resource to make visible to the device.
1336  *
1337  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1338  * be reserved and validated.
1339  * On hardware resource shortage, this function will repeatedly evict
1340  * resources of the same type until the validation succeeds.
1341  */
vmw_resource_validate(struct vmw_resource * res)1342 int vmw_resource_validate(struct vmw_resource *res)
1343 {
1344 	int ret;
1345 	struct vmw_resource *evict_res;
1346 	struct vmw_private *dev_priv = res->dev_priv;
1347 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1348 	struct ttm_validate_buffer val_buf;
1349 	unsigned err_count = 0;
1350 
1351 	if (likely(!res->func->may_evict))
1352 		return 0;
1353 
1354 	val_buf.bo = NULL;
1355 	if (res->backup)
1356 		val_buf.bo = &res->backup->base;
1357 	do {
1358 		ret = vmw_resource_do_validate(res, &val_buf);
1359 		if (likely(ret != -EBUSY))
1360 			break;
1361 
1362 		write_lock(&dev_priv->resource_lock);
1363 		if (list_empty(lru_list) || !res->func->may_evict) {
1364 			DRM_ERROR("Out of device device resources "
1365 				  "for %s.\n", res->func->type_name);
1366 			ret = -EBUSY;
1367 			write_unlock(&dev_priv->resource_lock);
1368 			break;
1369 		}
1370 
1371 		evict_res = vmw_resource_reference
1372 			(list_first_entry(lru_list, struct vmw_resource,
1373 					  lru_head));
1374 		list_del_init(&evict_res->lru_head);
1375 
1376 		write_unlock(&dev_priv->resource_lock);
1377 
1378 		ret = vmw_resource_do_evict(evict_res, true);
1379 		if (unlikely(ret != 0)) {
1380 			write_lock(&dev_priv->resource_lock);
1381 			list_add_tail(&evict_res->lru_head, lru_list);
1382 			write_unlock(&dev_priv->resource_lock);
1383 			if (ret == -ERESTARTSYS ||
1384 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1385 				vmw_resource_unreference(&evict_res);
1386 				goto out_no_validate;
1387 			}
1388 		}
1389 
1390 		vmw_resource_unreference(&evict_res);
1391 	} while (1);
1392 
1393 	if (unlikely(ret != 0))
1394 		goto out_no_validate;
1395 	else if (!res->func->needs_backup && res->backup) {
1396 		list_del_init(&res->mob_head);
1397 		vmw_dmabuf_unreference(&res->backup);
1398 	}
1399 
1400 	return 0;
1401 
1402 out_no_validate:
1403 	return ret;
1404 }
1405 
1406 /**
1407  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1408  *                       object without unreserving it.
1409  *
1410  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1411  * @fence:          Pointer to the fence. If NULL, this function will
1412  *                  insert a fence into the command stream..
1413  *
1414  * Contrary to the ttm_eu version of this function, it takes only
1415  * a single buffer object instead of a list, and it also doesn't
1416  * unreserve the buffer object, which needs to be done separately.
1417  */
vmw_fence_single_bo(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1418 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1419 			 struct vmw_fence_obj *fence)
1420 {
1421 	struct ttm_bo_device *bdev = bo->bdev;
1422 	struct ttm_bo_driver *driver = bdev->driver;
1423 	struct vmw_fence_obj *old_fence_obj;
1424 	struct vmw_private *dev_priv =
1425 		container_of(bdev, struct vmw_private, bdev);
1426 
1427 	if (fence == NULL)
1428 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1429 	else
1430 		driver->sync_obj_ref(fence);
1431 
1432 	spin_lock(&bdev->fence_lock);
1433 
1434 	old_fence_obj = bo->sync_obj;
1435 	bo->sync_obj = fence;
1436 
1437 	spin_unlock(&bdev->fence_lock);
1438 
1439 	if (old_fence_obj)
1440 		vmw_fence_obj_unreference(&old_fence_obj);
1441 }
1442 
1443 /**
1444  * vmw_resource_move_notify - TTM move_notify_callback
1445  *
1446  * @bo:             The TTM buffer object about to move.
1447  * @mem:            The truct ttm_mem_reg indicating to what memory
1448  *                  region the move is taking place.
1449  *
1450  * Evicts the Guest Backed hardware resource if the backup
1451  * buffer is being moved out of MOB memory.
1452  * Note that this function should not race with the resource
1453  * validation code as long as it accesses only members of struct
1454  * resource that remain static while bo::res is !NULL and
1455  * while we have @bo reserved. struct resource::backup is *not* a
1456  * static member. The resource validation code will take care
1457  * to set @bo::res to NULL, while having @bo reserved when the
1458  * buffer is no longer bound to the resource, so @bo:res can be
1459  * used to determine whether there is a need to unbind and whether
1460  * it is safe to unbind.
1461  */
vmw_resource_move_notify(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)1462 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1463 			      struct ttm_mem_reg *mem)
1464 {
1465 	struct vmw_dma_buffer *dma_buf;
1466 
1467 	if (mem == NULL)
1468 		return;
1469 
1470 	if (bo->destroy != vmw_dmabuf_bo_free &&
1471 	    bo->destroy != vmw_user_dmabuf_destroy)
1472 		return;
1473 
1474 	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1475 
1476 	if (mem->mem_type != VMW_PL_MOB) {
1477 		struct vmw_resource *res, *n;
1478 		struct ttm_bo_device *bdev = bo->bdev;
1479 		struct ttm_validate_buffer val_buf;
1480 
1481 		val_buf.bo = bo;
1482 
1483 		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1484 
1485 			if (unlikely(res->func->unbind == NULL))
1486 				continue;
1487 
1488 			(void) res->func->unbind(res, true, &val_buf);
1489 			res->backup_dirty = true;
1490 			res->res_dirty = false;
1491 			list_del_init(&res->mob_head);
1492 		}
1493 
1494 		spin_lock(&bdev->fence_lock);
1495 		(void) ttm_bo_wait(bo, false, false, false);
1496 		spin_unlock(&bdev->fence_lock);
1497 	}
1498 }
1499 
1500 /**
1501  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1502  *
1503  * @res:            The resource being queried.
1504  */
vmw_resource_needs_backup(const struct vmw_resource * res)1505 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1506 {
1507 	return res->func->needs_backup;
1508 }
1509 
1510 /**
1511  * vmw_resource_evict_type - Evict all resources of a specific type
1512  *
1513  * @dev_priv:       Pointer to a device private struct
1514  * @type:           The resource type to evict
1515  *
1516  * To avoid thrashing starvation or as part of the hibernation sequence,
1517  * try to evict all evictable resources of a specific type.
1518  */
vmw_resource_evict_type(struct vmw_private * dev_priv,enum vmw_res_type type)1519 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1520 				    enum vmw_res_type type)
1521 {
1522 	struct list_head *lru_list = &dev_priv->res_lru[type];
1523 	struct vmw_resource *evict_res;
1524 	unsigned err_count = 0;
1525 	int ret;
1526 
1527 	do {
1528 		write_lock(&dev_priv->resource_lock);
1529 
1530 		if (list_empty(lru_list))
1531 			goto out_unlock;
1532 
1533 		evict_res = vmw_resource_reference(
1534 			list_first_entry(lru_list, struct vmw_resource,
1535 					 lru_head));
1536 		list_del_init(&evict_res->lru_head);
1537 		write_unlock(&dev_priv->resource_lock);
1538 
1539 		ret = vmw_resource_do_evict(evict_res, false);
1540 		if (unlikely(ret != 0)) {
1541 			write_lock(&dev_priv->resource_lock);
1542 			list_add_tail(&evict_res->lru_head, lru_list);
1543 			write_unlock(&dev_priv->resource_lock);
1544 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1545 				vmw_resource_unreference(&evict_res);
1546 				return;
1547 			}
1548 		}
1549 
1550 		vmw_resource_unreference(&evict_res);
1551 	} while (1);
1552 
1553 out_unlock:
1554 	write_unlock(&dev_priv->resource_lock);
1555 }
1556 
1557 /**
1558  * vmw_resource_evict_all - Evict all evictable resources
1559  *
1560  * @dev_priv:       Pointer to a device private struct
1561  *
1562  * To avoid thrashing starvation or as part of the hibernation sequence,
1563  * evict all evictable resources. In particular this means that all
1564  * guest-backed resources that are registered with the device are
1565  * evicted and the OTable becomes clean.
1566  */
vmw_resource_evict_all(struct vmw_private * dev_priv)1567 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1568 {
1569 	enum vmw_res_type type;
1570 
1571 	mutex_lock(&dev_priv->cmdbuf_mutex);
1572 
1573 	for (type = 0; type < vmw_res_max; ++type)
1574 		vmw_resource_evict_type(dev_priv, type);
1575 
1576 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1577 }
1578