1 /*	$NetBSD: vmwgfx_bo.c,v 1.3 2022/10/25 23:39:13 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5  *
6  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  **************************************************************************/
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_bo.c,v 1.3 2022/10/25 23:39:13 riastradh Exp $");
33 
34 #include <drm/ttm/ttm_placement.h>
35 
36 #include "vmwgfx_drv.h"
37 #include "ttm_object.h"
38 
39 
40 /**
41  * struct vmw_user_buffer_object - User-space-visible buffer object
42  *
43  * @prime: The prime object providing user visibility.
44  * @vbo: The struct vmw_buffer_object
45  */
46 struct vmw_user_buffer_object {
47 	struct ttm_prime_object prime;
48 	struct vmw_buffer_object vbo;
49 };
50 
51 
52 /**
53  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
54  * vmw_buffer_object.
55  *
56  * @bo: Pointer to the TTM buffer object.
57  * Return: Pointer to the struct vmw_buffer_object embedding the
58  * TTM buffer object.
59  */
60 static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object * bo)61 vmw_buffer_object(struct ttm_buffer_object *bo)
62 {
63 	return container_of(bo, struct vmw_buffer_object, base);
64 }
65 
66 
67 /**
68  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
69  * vmw_user_buffer_object.
70  *
71  * @bo: Pointer to the TTM buffer object.
72  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
73  * object.
74  */
75 static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object * bo)76 vmw_user_buffer_object(struct ttm_buffer_object *bo)
77 {
78 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
79 
80 	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
81 }
82 
83 
84 /**
85  * vmw_bo_pin_in_placement - Validate a buffer to placement.
86  *
87  * @dev_priv:  Driver private.
88  * @buf:  DMA buffer to move.
89  * @placement:  The placement to pin it.
90  * @interruptible:  Use interruptible wait.
91  * Return: Zero on success, Negative error code on failure. In particular
92  * -ERESTARTSYS if interrupted by a signal
93  */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct ttm_placement * placement,bool interruptible)94 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
95 			    struct vmw_buffer_object *buf,
96 			    struct ttm_placement *placement,
97 			    bool interruptible)
98 {
99 	struct ttm_operation_ctx ctx = {interruptible, false };
100 	struct ttm_buffer_object *bo = &buf->base;
101 	int ret;
102 	uint32_t new_flags;
103 
104 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
105 	if (unlikely(ret != 0))
106 		return ret;
107 
108 	vmw_execbuf_release_pinned_bo(dev_priv);
109 
110 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
111 	if (unlikely(ret != 0))
112 		goto err;
113 
114 	if (buf->pin_count > 0)
115 		ret = ttm_bo_mem_compat(placement, &bo->mem,
116 					&new_flags) == true ? 0 : -EINVAL;
117 	else
118 		ret = ttm_bo_validate(bo, placement, &ctx);
119 
120 	if (!ret)
121 		vmw_bo_pin_reserved(buf, true);
122 
123 	ttm_bo_unreserve(bo);
124 
125 err:
126 	ttm_write_unlock(&dev_priv->reservation_sem);
127 	return ret;
128 }
129 
130 
131 /**
132  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
133  *
134  * This function takes the reservation_sem in write mode.
135  * Flushes and unpins the query bo to avoid failures.
136  *
137  * @dev_priv:  Driver private.
138  * @buf:  DMA buffer to move.
139  * @pin:  Pin buffer if true.
140  * @interruptible:  Use interruptible wait.
141  * Return: Zero on success, Negative error code on failure. In particular
142  * -ERESTARTSYS if interrupted by a signal
143  */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)144 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
145 			      struct vmw_buffer_object *buf,
146 			      bool interruptible)
147 {
148 	struct ttm_operation_ctx ctx = {interruptible, false };
149 	struct ttm_buffer_object *bo = &buf->base;
150 	int ret;
151 	uint32_t new_flags;
152 
153 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
154 	if (unlikely(ret != 0))
155 		return ret;
156 
157 	vmw_execbuf_release_pinned_bo(dev_priv);
158 
159 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
160 	if (unlikely(ret != 0))
161 		goto err;
162 
163 	if (buf->pin_count > 0) {
164 		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
165 					&new_flags) == true ? 0 : -EINVAL;
166 		goto out_unreserve;
167 	}
168 
169 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
170 	if (likely(ret == 0) || ret == -ERESTARTSYS)
171 		goto out_unreserve;
172 
173 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
174 
175 out_unreserve:
176 	if (!ret)
177 		vmw_bo_pin_reserved(buf, true);
178 
179 	ttm_bo_unreserve(bo);
180 err:
181 	ttm_write_unlock(&dev_priv->reservation_sem);
182 	return ret;
183 }
184 
185 
186 /**
187  * vmw_bo_pin_in_vram - Move a buffer to vram.
188  *
189  * This function takes the reservation_sem in write mode.
190  * Flushes and unpins the query bo to avoid failures.
191  *
192  * @dev_priv:  Driver private.
193  * @buf:  DMA buffer to move.
194  * @interruptible:  Use interruptible wait.
195  * Return: Zero on success, Negative error code on failure. In particular
196  * -ERESTARTSYS if interrupted by a signal
197  */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)198 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
199 		       struct vmw_buffer_object *buf,
200 		       bool interruptible)
201 {
202 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
203 				       interruptible);
204 }
205 
206 
207 /**
208  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
209  *
210  * This function takes the reservation_sem in write mode.
211  * Flushes and unpins the query bo to avoid failures.
212  *
213  * @dev_priv:  Driver private.
214  * @buf:  DMA buffer to pin.
215  * @interruptible:  Use interruptible wait.
216  * Return: Zero on success, Negative error code on failure. In particular
217  * -ERESTARTSYS if interrupted by a signal
218  */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)219 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
220 				struct vmw_buffer_object *buf,
221 				bool interruptible)
222 {
223 	struct ttm_operation_ctx ctx = {interruptible, false };
224 	struct ttm_buffer_object *bo = &buf->base;
225 	struct ttm_placement placement;
226 	struct ttm_place place;
227 	int ret = 0;
228 	uint32_t new_flags;
229 
230 	place = vmw_vram_placement.placement[0];
231 	place.lpfn = bo->num_pages;
232 	placement.num_placement = 1;
233 	placement.placement = &place;
234 	placement.num_busy_placement = 1;
235 	placement.busy_placement = &place;
236 
237 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
238 	if (unlikely(ret != 0))
239 		return ret;
240 
241 	vmw_execbuf_release_pinned_bo(dev_priv);
242 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
243 	if (unlikely(ret != 0))
244 		goto err_unlock;
245 
246 	/*
247 	 * Is this buffer already in vram but not at the start of it?
248 	 * In that case, evict it first because TTM isn't good at handling
249 	 * that situation.
250 	 */
251 	if (bo->mem.mem_type == TTM_PL_VRAM &&
252 	    bo->mem.start < bo->num_pages &&
253 	    bo->mem.start > 0 &&
254 	    buf->pin_count == 0) {
255 		ctx.interruptible = false;
256 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
257 	}
258 
259 	if (buf->pin_count > 0)
260 		ret = ttm_bo_mem_compat(&placement, &bo->mem,
261 					&new_flags) == true ? 0 : -EINVAL;
262 	else
263 		ret = ttm_bo_validate(bo, &placement, &ctx);
264 
265 	/* For some reason we didn't end up at the start of vram */
266 	WARN_ON(ret == 0 && bo->offset != 0);
267 	if (!ret)
268 		vmw_bo_pin_reserved(buf, true);
269 
270 	ttm_bo_unreserve(bo);
271 err_unlock:
272 	ttm_write_unlock(&dev_priv->reservation_sem);
273 
274 	return ret;
275 }
276 
277 
278 /**
279  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
280  *
281  * This function takes the reservation_sem in write mode.
282  *
283  * @dev_priv:  Driver private.
284  * @buf:  DMA buffer to unpin.
285  * @interruptible:  Use interruptible wait.
286  * Return: Zero on success, Negative error code on failure. In particular
287  * -ERESTARTSYS if interrupted by a signal
288  */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)289 int vmw_bo_unpin(struct vmw_private *dev_priv,
290 		 struct vmw_buffer_object *buf,
291 		 bool interruptible)
292 {
293 	struct ttm_buffer_object *bo = &buf->base;
294 	int ret;
295 
296 	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
297 	if (unlikely(ret != 0))
298 		return ret;
299 
300 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
301 	if (unlikely(ret != 0))
302 		goto err;
303 
304 	vmw_bo_pin_reserved(buf, false);
305 
306 	ttm_bo_unreserve(bo);
307 
308 err:
309 	ttm_read_unlock(&dev_priv->reservation_sem);
310 	return ret;
311 }
312 
313 /**
314  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
315  * of a buffer.
316  *
317  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
318  * @ptr: SVGAGuestPtr returning the result.
319  */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)320 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
321 			  SVGAGuestPtr *ptr)
322 {
323 	if (bo->mem.mem_type == TTM_PL_VRAM) {
324 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
325 		ptr->offset = bo->offset;
326 	} else {
327 		ptr->gmrId = bo->mem.start;
328 		ptr->offset = 0;
329 	}
330 }
331 
332 
333 /**
334  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
335  *
336  * @vbo: The buffer object. Must be reserved.
337  * @pin: Whether to pin or unpin.
338  *
339  */
vmw_bo_pin_reserved(struct vmw_buffer_object * vbo,bool pin)340 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
341 {
342 	struct ttm_operation_ctx ctx = { false, true };
343 	struct ttm_place pl;
344 	struct ttm_placement placement;
345 	struct ttm_buffer_object *bo = &vbo->base;
346 	uint32_t old_mem_type = bo->mem.mem_type;
347 	int ret;
348 
349 	dma_resv_assert_held(bo->base.resv);
350 
351 	if (pin) {
352 		if (vbo->pin_count++ > 0)
353 			return;
354 	} else {
355 		WARN_ON(vbo->pin_count <= 0);
356 		if (--vbo->pin_count > 0)
357 			return;
358 	}
359 
360 	pl.fpfn = 0;
361 	pl.lpfn = 0;
362 	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
363 		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
364 	if (pin)
365 		pl.flags |= TTM_PL_FLAG_NO_EVICT;
366 
367 	memset(&placement, 0, sizeof(placement));
368 	placement.num_placement = 1;
369 	placement.placement = &pl;
370 
371 	ret = ttm_bo_validate(bo, &placement, &ctx);
372 
373 	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
374 }
375 
376 
377 /**
378  * vmw_bo_map_and_cache - Map a buffer object and cache the map
379  *
380  * @vbo: The buffer object to map
381  * Return: A kernel virtual address or NULL if mapping failed.
382  *
383  * This function maps a buffer object into the kernel address space, or
384  * returns the virtual kernel address of an already existing map. The virtual
385  * address remains valid as long as the buffer object is pinned or reserved.
386  * The cached map is torn down on either
387  * 1) Buffer object move
388  * 2) Buffer object swapout
389  * 3) Buffer object destruction
390  *
391  */
vmw_bo_map_and_cache(struct vmw_buffer_object * vbo)392 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
393 {
394 	struct ttm_buffer_object *bo = &vbo->base;
395 	bool not_used;
396 	void *virtual;
397 	int ret;
398 
399 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
400 	if (virtual)
401 		return virtual;
402 
403 	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
404 	if (ret)
405 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
406 
407 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
408 }
409 
410 
411 /**
412  * vmw_bo_unmap - Tear down a cached buffer object map.
413  *
414  * @vbo: The buffer object whose map we are tearing down.
415  *
416  * This function tears down a cached map set up using
417  * vmw_buffer_object_map_and_cache().
418  */
vmw_bo_unmap(struct vmw_buffer_object * vbo)419 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
420 {
421 	if (vbo->map.bo == NULL)
422 		return;
423 
424 	ttm_bo_kunmap(&vbo->map);
425 }
426 
427 
428 /**
429  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
430  *
431  * @dev_priv: Pointer to a struct vmw_private identifying the device.
432  * @size: The requested buffer size.
433  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
434  */
vmw_bo_acc_size(struct vmw_private * dev_priv,size_t size,bool user)435 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
436 			      bool user)
437 {
438 	static size_t struct_size, user_struct_size;
439 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
440 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
441 
442 	if (unlikely(struct_size == 0)) {
443 		size_t backend_size = ttm_round_pot(vmw_tt_size);
444 
445 		struct_size = backend_size +
446 			ttm_round_pot(sizeof(struct vmw_buffer_object));
447 		user_struct_size = backend_size +
448 		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
449 				      TTM_OBJ_EXTRA_SIZE;
450 	}
451 
452 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
453 		page_array_size +=
454 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
455 
456 	return ((user) ? user_struct_size : struct_size) +
457 		page_array_size;
458 }
459 
460 
461 /**
462  * vmw_bo_bo_free - vmw buffer object destructor
463  *
464  * @bo: Pointer to the embedded struct ttm_buffer_object
465  */
vmw_bo_bo_free(struct ttm_buffer_object * bo)466 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
467 {
468 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
469 
470 	WARN_ON(vmw_bo->dirty);
471 	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
472 	vmw_bo_unmap(vmw_bo);
473 	kfree(vmw_bo);
474 }
475 
476 
477 /**
478  * vmw_user_bo_destroy - vmw buffer object destructor
479  *
480  * @bo: Pointer to the embedded struct ttm_buffer_object
481  */
vmw_user_bo_destroy(struct ttm_buffer_object * bo)482 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
483 {
484 	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
485 	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
486 
487 	WARN_ON(vbo->dirty);
488 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
489 	vmw_bo_unmap(vbo);
490 	ttm_prime_object_kfree(vmw_user_bo, prime);
491 }
492 
493 #ifdef __NetBSD__
494 extern rb_tree_ops_t vmwgfx_res_rb_ops;
495 #endif
496 
497 /**
498  * vmw_bo_init - Initialize a vmw buffer object
499  *
500  * @dev_priv: Pointer to the device private struct
501  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
502  * @size: Buffer object size in bytes.
503  * @placement: Initial placement.
504  * @interruptible: Whether waits should be performed interruptible.
505  * @bo_free: The buffer object destructor.
506  * Returns: Zero on success, negative error code on error.
507  *
508  * Note that on error, the code will free the buffer object.
509  */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_buffer_object * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,void (* bo_free)(struct ttm_buffer_object * bo))510 int vmw_bo_init(struct vmw_private *dev_priv,
511 		struct vmw_buffer_object *vmw_bo,
512 		size_t size, struct ttm_placement *placement,
513 		bool interruptible,
514 		void (*bo_free)(struct ttm_buffer_object *bo))
515 {
516 	struct ttm_bo_device *bdev = &dev_priv->bdev;
517 	size_t acc_size;
518 	int ret;
519 	bool user = (bo_free == &vmw_user_bo_destroy);
520 
521 	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
522 
523 	acc_size = vmw_bo_acc_size(dev_priv, size, user);
524 	memset(vmw_bo, 0, sizeof(*vmw_bo));
525 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
526 	vmw_bo->base.priority = 3;
527 #ifdef __NetBSD__
528 	rb_tree_init(&vmw_bo->res_tree.rbr_tree, &vmwgfx_res_rb_ops);
529 #else
530 	vmw_bo->res_tree = RB_ROOT;
531 #endif
532 
533 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
534 			  ttm_bo_type_device, placement,
535 			  0, interruptible, acc_size,
536 			  NULL, NULL, bo_free);
537 	return ret;
538 }
539 
540 
541 /**
542  * vmw_user_bo_release - TTM reference base object release callback for
543  * vmw user buffer objects
544  *
545  * @p_base: The TTM base object pointer about to be unreferenced.
546  *
547  * Clears the TTM base object pointer and drops the reference the
548  * base object has on the underlying struct vmw_buffer_object.
549  */
vmw_user_bo_release(struct ttm_base_object ** p_base)550 static void vmw_user_bo_release(struct ttm_base_object **p_base)
551 {
552 	struct vmw_user_buffer_object *vmw_user_bo;
553 	struct ttm_base_object *base = *p_base;
554 
555 	*p_base = NULL;
556 
557 	if (unlikely(base == NULL))
558 		return;
559 
560 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
561 				   prime.base);
562 	ttm_bo_put(&vmw_user_bo->vbo.base);
563 }
564 
565 
566 /**
567  * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
568  * for vmw user buffer objects
569  *
570  * @base: Pointer to the TTM base object
571  * @ref_type: Reference type of the reference reaching zero.
572  *
573  * Called when user-space drops its last synccpu reference on the buffer
574  * object, Either explicitly or as part of a cleanup file close.
575  */
vmw_user_bo_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)576 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
577 					enum ttm_ref_type ref_type)
578 {
579 	struct vmw_user_buffer_object *user_bo;
580 
581 	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
582 
583 	switch (ref_type) {
584 	case TTM_REF_SYNCCPU_WRITE:
585 		atomic_dec(&user_bo->vbo.cpu_writers);
586 		break;
587 	default:
588 		WARN_ONCE(true, "Undefined buffer object reference release.\n");
589 	}
590 }
591 
592 
593 /**
594  * vmw_user_bo_alloc - Allocate a user buffer object
595  *
596  * @dev_priv: Pointer to a struct device private.
597  * @tfile: Pointer to a struct ttm_object_file on which to register the user
598  * object.
599  * @size: Size of the buffer object.
600  * @shareable: Boolean whether the buffer is shareable with other open files.
601  * @handle: Pointer to where the handle value should be assigned.
602  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
603  * should be assigned.
604  * Return: Zero on success, negative error code on error.
605  */
vmw_user_bo_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_buffer_object ** p_vbo,struct ttm_base_object ** p_base)606 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
607 		      struct ttm_object_file *tfile,
608 		      uint32_t size,
609 		      bool shareable,
610 		      uint32_t *handle,
611 		      struct vmw_buffer_object **p_vbo,
612 		      struct ttm_base_object **p_base)
613 {
614 	struct vmw_user_buffer_object *user_bo;
615 	int ret;
616 
617 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
618 	if (unlikely(!user_bo)) {
619 		DRM_ERROR("Failed to allocate a buffer.\n");
620 		return -ENOMEM;
621 	}
622 
623 	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
624 			  (dev_priv->has_mob) ?
625 			  &vmw_sys_placement :
626 			  &vmw_vram_sys_placement, true,
627 			  &vmw_user_bo_destroy);
628 	if (unlikely(ret != 0))
629 		return ret;
630 
631 	ttm_bo_get(&user_bo->vbo.base);
632 	ret = ttm_prime_object_init(tfile,
633 				    size,
634 				    &user_bo->prime,
635 				    shareable,
636 				    ttm_buffer_type,
637 				    &vmw_user_bo_release,
638 				    &vmw_user_bo_ref_obj_release);
639 	if (unlikely(ret != 0)) {
640 		ttm_bo_put(&user_bo->vbo.base);
641 		goto out_no_base_object;
642 	}
643 
644 	*p_vbo = &user_bo->vbo;
645 	if (p_base) {
646 		*p_base = &user_bo->prime.base;
647 		kref_get(&(*p_base)->refcount);
648 	}
649 	*handle = user_bo->prime.base.handle;
650 
651 out_no_base_object:
652 	return ret;
653 }
654 
655 
656 /**
657  * vmw_user_bo_verify_access - verify access permissions on this
658  * buffer object.
659  *
660  * @bo: Pointer to the buffer object being accessed
661  * @tfile: Identifying the caller.
662  */
vmw_user_bo_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)663 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
664 			      struct ttm_object_file *tfile)
665 {
666 	struct vmw_user_buffer_object *vmw_user_bo;
667 
668 	if (unlikely(bo->destroy != vmw_user_bo_destroy))
669 		return -EPERM;
670 
671 	vmw_user_bo = vmw_user_buffer_object(bo);
672 
673 	/* Check that the caller has opened the object. */
674 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
675 		return 0;
676 
677 	DRM_ERROR("Could not grant buffer access.\n");
678 	return -EPERM;
679 }
680 
681 
682 /**
683  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
684  * access, idling previous GPU operations on the buffer and optionally
685  * blocking it for further command submissions.
686  *
687  * @user_bo: Pointer to the buffer object being grabbed for CPU access
688  * @tfile: Identifying the caller.
689  * @flags: Flags indicating how the grab should be performed.
690  * Return: Zero on success, Negative error code on error. In particular,
691  * -EBUSY will be returned if a dontblock operation is requested and the
692  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
693  * interrupted by a signal.
694  *
695  * A blocking grab will be automatically released when @tfile is closed.
696  */
vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object * user_bo,struct ttm_object_file * tfile,uint32_t flags)697 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
698 				    struct ttm_object_file *tfile,
699 				    uint32_t flags)
700 {
701 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
702 	struct ttm_buffer_object *bo = &user_bo->vbo.base;
703 	bool existed;
704 	int ret;
705 
706 	if (flags & drm_vmw_synccpu_allow_cs) {
707 		long lret;
708 
709 		lret = dma_resv_wait_timeout_rcu
710 			(bo->base.resv, true, true,
711 			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
712 		if (!lret)
713 			return -EBUSY;
714 		else if (lret < 0)
715 			return lret;
716 		return 0;
717 	}
718 
719 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
720 	if (unlikely(ret != 0))
721 		return ret;
722 
723 	ret = ttm_bo_wait(bo, true, nonblock);
724 	if (likely(ret == 0))
725 		atomic_inc(&user_bo->vbo.cpu_writers);
726 
727 	ttm_bo_unreserve(bo);
728 	if (unlikely(ret != 0))
729 		return ret;
730 
731 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
732 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
733 	if (ret != 0 || existed)
734 		atomic_dec(&user_bo->vbo.cpu_writers);
735 
736 	return ret;
737 }
738 
739 /**
740  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
741  * and unblock command submission on the buffer if blocked.
742  *
743  * @handle: Handle identifying the buffer object.
744  * @tfile: Identifying the caller.
745  * @flags: Flags indicating the type of release.
746  */
vmw_user_bo_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)747 static int vmw_user_bo_synccpu_release(uint32_t handle,
748 					   struct ttm_object_file *tfile,
749 					   uint32_t flags)
750 {
751 	if (!(flags & drm_vmw_synccpu_allow_cs))
752 		return ttm_ref_object_base_unref(tfile, handle,
753 						 TTM_REF_SYNCCPU_WRITE);
754 
755 	return 0;
756 }
757 
758 
759 /**
760  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
761  * functionality.
762  *
763  * @dev: Identifies the drm device.
764  * @data: Pointer to the ioctl argument.
765  * @file_priv: Identifies the caller.
766  * Return: Zero on success, negative error code on error.
767  *
768  * This function checks the ioctl arguments for validity and calls the
769  * relevant synccpu functions.
770  */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)771 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
772 			      struct drm_file *file_priv)
773 {
774 	struct drm_vmw_synccpu_arg *arg =
775 		(struct drm_vmw_synccpu_arg *) data;
776 	struct vmw_buffer_object *vbo;
777 	struct vmw_user_buffer_object *user_bo;
778 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
779 	struct ttm_base_object *buffer_base;
780 	int ret;
781 
782 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
783 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
784 			       drm_vmw_synccpu_dontblock |
785 			       drm_vmw_synccpu_allow_cs)) != 0) {
786 		DRM_ERROR("Illegal synccpu flags.\n");
787 		return -EINVAL;
788 	}
789 
790 	switch (arg->op) {
791 	case drm_vmw_synccpu_grab:
792 		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
793 					     &buffer_base);
794 		if (unlikely(ret != 0))
795 			return ret;
796 
797 		user_bo = container_of(vbo, struct vmw_user_buffer_object,
798 				       vbo);
799 		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
800 		vmw_bo_unreference(&vbo);
801 		ttm_base_object_unref(&buffer_base);
802 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
803 			     ret != -EBUSY)) {
804 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
805 				  (unsigned int) arg->handle);
806 			return ret;
807 		}
808 		break;
809 	case drm_vmw_synccpu_release:
810 		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
811 						  arg->flags);
812 		if (unlikely(ret != 0)) {
813 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
814 				  (unsigned int) arg->handle);
815 			return ret;
816 		}
817 		break;
818 	default:
819 		DRM_ERROR("Invalid synccpu operation.\n");
820 		return -EINVAL;
821 	}
822 
823 	return 0;
824 }
825 
826 
827 /**
828  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
829  * allocation functionality.
830  *
831  * @dev: Identifies the drm device.
832  * @data: Pointer to the ioctl argument.
833  * @file_priv: Identifies the caller.
834  * Return: Zero on success, negative error code on error.
835  *
836  * This function checks the ioctl arguments for validity and allocates a
837  * struct vmw_user_buffer_object bo.
838  */
vmw_bo_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)839 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
840 		       struct drm_file *file_priv)
841 {
842 	struct vmw_private *dev_priv = vmw_priv(dev);
843 	union drm_vmw_alloc_dmabuf_arg *arg =
844 	    (union drm_vmw_alloc_dmabuf_arg *)data;
845 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
846 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
847 	struct vmw_buffer_object *vbo;
848 	uint32_t handle;
849 	int ret;
850 
851 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
852 	if (unlikely(ret != 0))
853 		return ret;
854 
855 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
856 				req->size, false, &handle, &vbo,
857 				NULL);
858 	if (unlikely(ret != 0))
859 		goto out_no_bo;
860 
861 	rep->handle = handle;
862 	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
863 	rep->cur_gmr_id = handle;
864 	rep->cur_gmr_offset = 0;
865 
866 	vmw_bo_unreference(&vbo);
867 
868 out_no_bo:
869 	ttm_read_unlock(&dev_priv->reservation_sem);
870 
871 	return ret;
872 }
873 
874 
875 /**
876  * vmw_bo_unref_ioctl - Generic handle close ioctl.
877  *
878  * @dev: Identifies the drm device.
879  * @data: Pointer to the ioctl argument.
880  * @file_priv: Identifies the caller.
881  * Return: Zero on success, negative error code on error.
882  *
883  * This function checks the ioctl arguments for validity and closes a
884  * handle to a TTM base object, optionally freeing the object.
885  */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)886 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
887 		       struct drm_file *file_priv)
888 {
889 	struct drm_vmw_unref_dmabuf_arg *arg =
890 	    (struct drm_vmw_unref_dmabuf_arg *)data;
891 
892 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
893 					 arg->handle,
894 					 TTM_REF_USAGE);
895 }
896 
897 
898 /**
899  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
900  *
901  * @tfile: The TTM object file the handle is registered with.
902  * @handle: The user buffer object handle
903  * @out: Pointer to a where a pointer to the embedded
904  * struct vmw_buffer_object should be placed.
905  * @p_base: Pointer to where a pointer to the TTM base object should be
906  * placed, or NULL if no such pointer is required.
907  * Return: Zero on success, Negative error code on error.
908  *
909  * Both the output base object pointer and the vmw buffer object pointer
910  * will be refcounted.
911  */
vmw_user_bo_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_buffer_object ** out,struct ttm_base_object ** p_base)912 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
913 		       uint32_t handle, struct vmw_buffer_object **out,
914 		       struct ttm_base_object **p_base)
915 {
916 	struct vmw_user_buffer_object *vmw_user_bo;
917 	struct ttm_base_object *base;
918 
919 	base = ttm_base_object_lookup(tfile, handle);
920 	if (unlikely(base == NULL)) {
921 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
922 			  (unsigned long)handle);
923 		return -ESRCH;
924 	}
925 
926 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
927 		ttm_base_object_unref(&base);
928 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
929 			  (unsigned long)handle);
930 		return -EINVAL;
931 	}
932 
933 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
934 				   prime.base);
935 	ttm_bo_get(&vmw_user_bo->vbo.base);
936 	if (p_base)
937 		*p_base = base;
938 	else
939 		ttm_base_object_unref(&base);
940 	*out = &vmw_user_bo->vbo;
941 
942 	return 0;
943 }
944 
945 /**
946  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
947  * @tfile: The TTM object file the handle is registered with.
948  * @handle: The user buffer object handle.
949  *
950  * This function looks up a struct vmw_user_bo and returns a pointer to the
951  * struct vmw_buffer_object it derives from without refcounting the pointer.
952  * The returned pointer is only valid until vmw_user_bo_noref_release() is
953  * called, and the object pointed to by the returned pointer may be doomed.
954  * Any persistent usage of the object requires a refcount to be taken using
955  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
956  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
957  * or scheduling functions may be called inbetween these function calls.
958  *
959  * Return: A struct vmw_buffer_object pointer if successful or negative
960  * error pointer on failure.
961  */
962 struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file * tfile,u32 handle)963 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
964 {
965 	struct vmw_user_buffer_object *vmw_user_bo;
966 	struct ttm_base_object *base;
967 
968 	base = ttm_base_object_noref_lookup(tfile, handle);
969 	if (!base) {
970 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
971 			  (unsigned long)handle);
972 		return ERR_PTR(-ESRCH);
973 	}
974 
975 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
976 		ttm_base_object_noref_release();
977 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
978 			  (unsigned long)handle);
979 		return ERR_PTR(-EINVAL);
980 	}
981 
982 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
983 				   prime.base);
984 	return &vmw_user_bo->vbo;
985 }
986 
987 /**
988  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
989  *
990  * @tfile: The TTM object file to register the handle with.
991  * @vbo: The embedded vmw buffer object.
992  * @handle: Pointer to where the new handle should be placed.
993  * Return: Zero on success, Negative error code on error.
994  */
vmw_user_bo_reference(struct ttm_object_file * tfile,struct vmw_buffer_object * vbo,uint32_t * handle)995 int vmw_user_bo_reference(struct ttm_object_file *tfile,
996 			  struct vmw_buffer_object *vbo,
997 			  uint32_t *handle)
998 {
999 	struct vmw_user_buffer_object *user_bo;
1000 
1001 	if (vbo->base.destroy != vmw_user_bo_destroy)
1002 		return -EINVAL;
1003 
1004 	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1005 
1006 	*handle = user_bo->prime.base.handle;
1007 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
1008 				  TTM_REF_USAGE, NULL, false);
1009 }
1010 
1011 
1012 /**
1013  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1014  *                       object without unreserving it.
1015  *
1016  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1017  * @fence:          Pointer to the fence. If NULL, this function will
1018  *                  insert a fence into the command stream..
1019  *
1020  * Contrary to the ttm_eu version of this function, it takes only
1021  * a single buffer object instead of a list, and it also doesn't
1022  * unreserve the buffer object, which needs to be done separately.
1023  */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1024 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1025 			 struct vmw_fence_obj *fence)
1026 {
1027 	struct ttm_bo_device *bdev = bo->bdev;
1028 
1029 	struct vmw_private *dev_priv =
1030 		container_of(bdev, struct vmw_private, bdev);
1031 
1032 	if (fence == NULL) {
1033 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1034 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1035 		dma_fence_put(&fence->base);
1036 	} else
1037 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1038 }
1039 
1040 
1041 /**
1042  * vmw_dumb_create - Create a dumb kms buffer
1043  *
1044  * @file_priv: Pointer to a struct drm_file identifying the caller.
1045  * @dev: Pointer to the drm device.
1046  * @args: Pointer to a struct drm_mode_create_dumb structure
1047  * Return: Zero on success, negative error code on failure.
1048  *
1049  * This is a driver callback for the core drm create_dumb functionality.
1050  * Note that this is very similar to the vmw_bo_alloc ioctl, except
1051  * that the arguments have a different format.
1052  */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)1053 int vmw_dumb_create(struct drm_file *file_priv,
1054 		    struct drm_device *dev,
1055 		    struct drm_mode_create_dumb *args)
1056 {
1057 	struct vmw_private *dev_priv = vmw_priv(dev);
1058 	struct vmw_buffer_object *vbo;
1059 	int ret;
1060 
1061 	args->pitch = args->width * ((args->bpp + 7) / 8);
1062 	args->size = args->pitch * args->height;
1063 
1064 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1065 	if (unlikely(ret != 0))
1066 		return ret;
1067 
1068 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1069 				    args->size, false, &args->handle,
1070 				    &vbo, NULL);
1071 	if (unlikely(ret != 0))
1072 		goto out_no_bo;
1073 
1074 	vmw_bo_unreference(&vbo);
1075 out_no_bo:
1076 	ttm_read_unlock(&dev_priv->reservation_sem);
1077 	return ret;
1078 }
1079 
1080 
1081 /**
1082  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1083  *
1084  * @file_priv: Pointer to a struct drm_file identifying the caller.
1085  * @dev: Pointer to the drm device.
1086  * @handle: Handle identifying the dumb buffer.
1087  * @offset: The address space offset returned.
1088  * Return: Zero on success, negative error code on failure.
1089  *
1090  * This is a driver callback for the core drm dumb_map_offset functionality.
1091  */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1092 int vmw_dumb_map_offset(struct drm_file *file_priv,
1093 			struct drm_device *dev, uint32_t handle,
1094 			uint64_t *offset)
1095 {
1096 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1097 	struct vmw_buffer_object *out_buf;
1098 	int ret;
1099 
1100 	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1101 	if (ret != 0)
1102 		return -EINVAL;
1103 
1104 	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1105 	vmw_bo_unreference(&out_buf);
1106 	return 0;
1107 }
1108 
1109 
1110 /**
1111  * vmw_dumb_destroy - Destroy a dumb boffer
1112  *
1113  * @file_priv: Pointer to a struct drm_file identifying the caller.
1114  * @dev: Pointer to the drm device.
1115  * @handle: Handle identifying the dumb buffer.
1116  * Return: Zero on success, negative error code on failure.
1117  *
1118  * This is a driver callback for the core drm dumb_destroy functionality.
1119  */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1120 int vmw_dumb_destroy(struct drm_file *file_priv,
1121 		     struct drm_device *dev,
1122 		     uint32_t handle)
1123 {
1124 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1125 					 handle, TTM_REF_USAGE);
1126 }
1127 
1128 
1129 /**
1130  * vmw_bo_swap_notify - swapout notify callback.
1131  *
1132  * @bo: The buffer object to be swapped out.
1133  */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)1134 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1135 {
1136 	/* Is @bo embedded in a struct vmw_buffer_object? */
1137 	if (bo->destroy != vmw_bo_bo_free &&
1138 	    bo->destroy != vmw_user_bo_destroy)
1139 		return;
1140 
1141 	/* Kill any cached kernel maps before swapout */
1142 	vmw_bo_unmap(vmw_buffer_object(bo));
1143 }
1144 
1145 
1146 /**
1147  * vmw_bo_move_notify - TTM move_notify_callback
1148  *
1149  * @bo: The TTM buffer object about to move.
1150  * @mem: The struct ttm_mem_reg indicating to what memory
1151  *       region the move is taking place.
1152  *
1153  * Detaches cached maps and device bindings that require that the
1154  * buffer doesn't move.
1155  */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)1156 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1157 			struct ttm_mem_reg *mem)
1158 {
1159 	struct vmw_buffer_object *vbo;
1160 
1161 	if (mem == NULL)
1162 		return;
1163 
1164 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
1165 	if (bo->destroy != vmw_bo_bo_free &&
1166 	    bo->destroy != vmw_user_bo_destroy)
1167 		return;
1168 
1169 	vbo = container_of(bo, struct vmw_buffer_object, base);
1170 
1171 	/*
1172 	 * Kill any cached kernel maps before move to or from VRAM.
1173 	 * With other types of moves, the underlying pages stay the same,
1174 	 * and the map can be kept.
1175 	 */
1176 	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1177 		vmw_bo_unmap(vbo);
1178 
1179 	/*
1180 	 * If we're moving a backup MOB out of MOB placement, then make sure we
1181 	 * read back all resource content first, and unbind the MOB from
1182 	 * the resource.
1183 	 */
1184 	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1185 		vmw_resource_unbind_list(vbo);
1186 }
1187