1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include <drm/ttm/ttm_placement.h>
30
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33
34
35 /**
36 * struct vmw_user_buffer_object - User-space-visible buffer object
37 *
38 * @prime: The prime object providing user visibility.
39 * @vbo: The struct vmw_buffer_object
40 */
41 struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44 };
45
46
47 /**
48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49 * vmw_buffer_object.
50 *
51 * @bo: Pointer to the TTM buffer object.
52 * Return: Pointer to the struct vmw_buffer_object embedding the
53 * TTM buffer object.
54 */
55 static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object * bo)56 vmw_buffer_object(struct ttm_buffer_object *bo)
57 {
58 return container_of(bo, struct vmw_buffer_object, base);
59 }
60
61
62 /**
63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64 * vmw_user_buffer_object.
65 *
66 * @bo: Pointer to the TTM buffer object.
67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68 * object.
69 */
70 static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object * bo)71 vmw_user_buffer_object(struct ttm_buffer_object *bo)
72 {
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76 }
77
78
79 /**
80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
81 *
82 * @dev_priv: Driver private.
83 * @buf: DMA buffer to move.
84 * @placement: The placement to pin it.
85 * @interruptible: Use interruptible wait.
86 * Return: Zero on success, Negative error code on failure. In particular
87 * -ERESTARTSYS if interrupted by a signal
88 */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct ttm_placement * placement,bool interruptible)89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93 {
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100 if (unlikely(ret != 0))
101 return ret;
102
103 vmw_execbuf_release_pinned_bo(dev_priv);
104
105 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106 if (unlikely(ret != 0))
107 goto err;
108
109 if (buf->base.pin_count > 0)
110 ret = ttm_bo_mem_compat(placement, &bo->mem,
111 &new_flags) == true ? 0 : -EINVAL;
112 else
113 ret = ttm_bo_validate(bo, placement, &ctx);
114
115 if (!ret)
116 vmw_bo_pin_reserved(buf, true);
117
118 ttm_bo_unreserve(bo);
119
120 err:
121 ttm_write_unlock(&dev_priv->reservation_sem);
122 return ret;
123 }
124
125
126 /**
127 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
128 *
129 * This function takes the reservation_sem in write mode.
130 * Flushes and unpins the query bo to avoid failures.
131 *
132 * @dev_priv: Driver private.
133 * @buf: DMA buffer to move.
134 * @interruptible: Use interruptible wait.
135 * Return: Zero on success, Negative error code on failure. In particular
136 * -ERESTARTSYS if interrupted by a signal
137 */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)138 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
139 struct vmw_buffer_object *buf,
140 bool interruptible)
141 {
142 struct ttm_operation_ctx ctx = {interruptible, false };
143 struct ttm_buffer_object *bo = &buf->base;
144 int ret;
145 uint32_t new_flags;
146
147 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
148 if (unlikely(ret != 0))
149 return ret;
150
151 vmw_execbuf_release_pinned_bo(dev_priv);
152
153 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
154 if (unlikely(ret != 0))
155 goto err;
156
157 if (buf->base.pin_count > 0) {
158 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
159 &new_flags) == true ? 0 : -EINVAL;
160 goto out_unreserve;
161 }
162
163 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
164 if (likely(ret == 0) || ret == -ERESTARTSYS)
165 goto out_unreserve;
166
167 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
168
169 out_unreserve:
170 if (!ret)
171 vmw_bo_pin_reserved(buf, true);
172
173 ttm_bo_unreserve(bo);
174 err:
175 ttm_write_unlock(&dev_priv->reservation_sem);
176 return ret;
177 }
178
179
180 /**
181 * vmw_bo_pin_in_vram - Move a buffer to vram.
182 *
183 * This function takes the reservation_sem in write mode.
184 * Flushes and unpins the query bo to avoid failures.
185 *
186 * @dev_priv: Driver private.
187 * @buf: DMA buffer to move.
188 * @interruptible: Use interruptible wait.
189 * Return: Zero on success, Negative error code on failure. In particular
190 * -ERESTARTSYS if interrupted by a signal
191 */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)192 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
193 struct vmw_buffer_object *buf,
194 bool interruptible)
195 {
196 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
197 interruptible);
198 }
199
200
201 /**
202 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
203 *
204 * This function takes the reservation_sem in write mode.
205 * Flushes and unpins the query bo to avoid failures.
206 *
207 * @dev_priv: Driver private.
208 * @buf: DMA buffer to pin.
209 * @interruptible: Use interruptible wait.
210 * Return: Zero on success, Negative error code on failure. In particular
211 * -ERESTARTSYS if interrupted by a signal
212 */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)213 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
214 struct vmw_buffer_object *buf,
215 bool interruptible)
216 {
217 struct ttm_operation_ctx ctx = {interruptible, false };
218 struct ttm_buffer_object *bo = &buf->base;
219 struct ttm_placement placement;
220 struct ttm_place place;
221 int ret = 0;
222 uint32_t new_flags;
223
224 place = vmw_vram_placement.placement[0];
225 place.lpfn = bo->mem.num_pages;
226 placement.num_placement = 1;
227 placement.placement = &place;
228 placement.num_busy_placement = 1;
229 placement.busy_placement = &place;
230
231 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
232 if (unlikely(ret != 0))
233 return ret;
234
235 vmw_execbuf_release_pinned_bo(dev_priv);
236 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
237 if (unlikely(ret != 0))
238 goto err_unlock;
239
240 /*
241 * Is this buffer already in vram but not at the start of it?
242 * In that case, evict it first because TTM isn't good at handling
243 * that situation.
244 */
245 if (bo->mem.mem_type == TTM_PL_VRAM &&
246 bo->mem.start < bo->mem.num_pages &&
247 bo->mem.start > 0 &&
248 buf->base.pin_count == 0) {
249 ctx.interruptible = false;
250 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
251 }
252
253 if (buf->base.pin_count > 0)
254 ret = ttm_bo_mem_compat(&placement, &bo->mem,
255 &new_flags) == true ? 0 : -EINVAL;
256 else
257 ret = ttm_bo_validate(bo, &placement, &ctx);
258
259 /* For some reason we didn't end up at the start of vram */
260 WARN_ON(ret == 0 && bo->mem.start != 0);
261 if (!ret)
262 vmw_bo_pin_reserved(buf, true);
263
264 ttm_bo_unreserve(bo);
265 err_unlock:
266 ttm_write_unlock(&dev_priv->reservation_sem);
267
268 return ret;
269 }
270
271
272 /**
273 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
274 *
275 * This function takes the reservation_sem in write mode.
276 *
277 * @dev_priv: Driver private.
278 * @buf: DMA buffer to unpin.
279 * @interruptible: Use interruptible wait.
280 * Return: Zero on success, Negative error code on failure. In particular
281 * -ERESTARTSYS if interrupted by a signal
282 */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)283 int vmw_bo_unpin(struct vmw_private *dev_priv,
284 struct vmw_buffer_object *buf,
285 bool interruptible)
286 {
287 struct ttm_buffer_object *bo = &buf->base;
288 int ret;
289
290 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
291 if (unlikely(ret != 0))
292 return ret;
293
294 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
295 if (unlikely(ret != 0))
296 goto err;
297
298 vmw_bo_pin_reserved(buf, false);
299
300 ttm_bo_unreserve(bo);
301
302 err:
303 ttm_read_unlock(&dev_priv->reservation_sem);
304 return ret;
305 }
306
307 /**
308 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
309 * of a buffer.
310 *
311 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
312 * @ptr: SVGAGuestPtr returning the result.
313 */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)314 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
315 SVGAGuestPtr *ptr)
316 {
317 if (bo->mem.mem_type == TTM_PL_VRAM) {
318 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
319 ptr->offset = bo->mem.start << PAGE_SHIFT;
320 } else {
321 ptr->gmrId = bo->mem.start;
322 ptr->offset = 0;
323 }
324 }
325
326
327 /**
328 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
329 *
330 * @vbo: The buffer object. Must be reserved.
331 * @pin: Whether to pin or unpin.
332 *
333 */
vmw_bo_pin_reserved(struct vmw_buffer_object * vbo,bool pin)334 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
335 {
336 struct ttm_operation_ctx ctx = { false, true };
337 struct ttm_place pl;
338 struct ttm_placement placement;
339 struct ttm_buffer_object *bo = &vbo->base;
340 uint32_t old_mem_type = bo->mem.mem_type;
341 int ret;
342
343 dma_resv_assert_held(bo->base.resv);
344
345 if (pin == !!bo->pin_count)
346 return;
347
348 pl.fpfn = 0;
349 pl.lpfn = 0;
350 pl.mem_type = bo->mem.mem_type;
351 pl.flags = bo->mem.placement;
352
353 memset(&placement, 0, sizeof(placement));
354 placement.num_placement = 1;
355 placement.placement = &pl;
356
357 ret = ttm_bo_validate(bo, &placement, &ctx);
358
359 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
360
361 if (pin)
362 ttm_bo_pin(bo);
363 else
364 ttm_bo_unpin(bo);
365 }
366
367 /**
368 * vmw_bo_map_and_cache - Map a buffer object and cache the map
369 *
370 * @vbo: The buffer object to map
371 * Return: A kernel virtual address or NULL if mapping failed.
372 *
373 * This function maps a buffer object into the kernel address space, or
374 * returns the virtual kernel address of an already existing map. The virtual
375 * address remains valid as long as the buffer object is pinned or reserved.
376 * The cached map is torn down on either
377 * 1) Buffer object move
378 * 2) Buffer object swapout
379 * 3) Buffer object destruction
380 *
381 */
vmw_bo_map_and_cache(struct vmw_buffer_object * vbo)382 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
383 {
384 struct ttm_buffer_object *bo = &vbo->base;
385 bool not_used;
386 void *virtual;
387 int ret;
388
389 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
390 if (virtual)
391 return virtual;
392
393 ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
394 if (ret)
395 DRM_ERROR("Buffer object map failed: %d.\n", ret);
396
397 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
398 }
399
400
401 /**
402 * vmw_bo_unmap - Tear down a cached buffer object map.
403 *
404 * @vbo: The buffer object whose map we are tearing down.
405 *
406 * This function tears down a cached map set up using
407 * vmw_buffer_object_map_and_cache().
408 */
vmw_bo_unmap(struct vmw_buffer_object * vbo)409 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
410 {
411 if (vbo->map.bo == NULL)
412 return;
413
414 ttm_bo_kunmap(&vbo->map);
415 }
416
417
418 /**
419 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
420 *
421 * @dev_priv: Pointer to a struct vmw_private identifying the device.
422 * @size: The requested buffer size.
423 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
424 */
vmw_bo_acc_size(struct vmw_private * dev_priv,size_t size,bool user)425 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
426 bool user)
427 {
428 static size_t struct_size, user_struct_size;
429 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
430 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
431
432 if (unlikely(struct_size == 0)) {
433 size_t backend_size = ttm_round_pot(vmw_tt_size);
434
435 struct_size = backend_size +
436 ttm_round_pot(sizeof(struct vmw_buffer_object));
437 user_struct_size = backend_size +
438 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
439 TTM_OBJ_EXTRA_SIZE;
440 }
441
442 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
443 page_array_size +=
444 ttm_round_pot(num_pages * sizeof(dma_addr_t));
445
446 return ((user) ? user_struct_size : struct_size) +
447 page_array_size;
448 }
449
450
451 /**
452 * vmw_bo_bo_free - vmw buffer object destructor
453 *
454 * @bo: Pointer to the embedded struct ttm_buffer_object
455 */
vmw_bo_bo_free(struct ttm_buffer_object * bo)456 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
457 {
458 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
459
460 WARN_ON(vmw_bo->dirty);
461 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
462 vmw_bo_unmap(vmw_bo);
463 kfree(vmw_bo);
464 }
465
466
467 /**
468 * vmw_user_bo_destroy - vmw buffer object destructor
469 *
470 * @bo: Pointer to the embedded struct ttm_buffer_object
471 */
vmw_user_bo_destroy(struct ttm_buffer_object * bo)472 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
473 {
474 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
475 struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
476
477 WARN_ON(vbo->dirty);
478 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
479 vmw_bo_unmap(vbo);
480 ttm_prime_object_kfree(vmw_user_bo, prime);
481 }
482
483 /**
484 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
485 *
486 * @dev_priv: Pointer to the device private struct
487 * @size: size of the BO we need
488 * @placement: where to put it
489 * @p_bo: resulting BO
490 *
491 * Creates and pin a simple BO for in kernel use.
492 */
vmw_bo_create_kernel(struct vmw_private * dev_priv,unsigned long size,struct ttm_placement * placement,struct ttm_buffer_object ** p_bo)493 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
494 struct ttm_placement *placement,
495 struct ttm_buffer_object **p_bo)
496 {
497 unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
498 struct ttm_operation_ctx ctx = { false, false };
499 struct ttm_buffer_object *bo;
500 size_t acc_size;
501 int ret;
502
503 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
504 if (unlikely(!bo))
505 return -ENOMEM;
506
507 acc_size = ttm_round_pot(sizeof(*bo));
508 acc_size += ttm_round_pot(npages * sizeof(void *));
509 acc_size += ttm_round_pot(sizeof(struct ttm_tt));
510
511 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
512 if (unlikely(ret))
513 goto error_free;
514
515 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
516 ttm_bo_type_device, placement, 0,
517 &ctx, NULL, NULL, NULL);
518 if (unlikely(ret))
519 goto error_account;
520
521 ttm_bo_pin(bo);
522 ttm_bo_unreserve(bo);
523 *p_bo = bo;
524
525 return 0;
526
527 error_account:
528 ttm_mem_global_free(&ttm_mem_glob, acc_size);
529
530 error_free:
531 kfree(bo);
532 return ret;
533 }
534
535 /**
536 * vmw_bo_init - Initialize a vmw buffer object
537 *
538 * @dev_priv: Pointer to the device private struct
539 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
540 * @size: Buffer object size in bytes.
541 * @placement: Initial placement.
542 * @interruptible: Whether waits should be performed interruptible.
543 * @pin: If the BO should be created pinned at a fixed location.
544 * @bo_free: The buffer object destructor.
545 * Returns: Zero on success, negative error code on error.
546 *
547 * Note that on error, the code will free the buffer object.
548 */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_buffer_object * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,bool pin,void (* bo_free)(struct ttm_buffer_object * bo))549 int vmw_bo_init(struct vmw_private *dev_priv,
550 struct vmw_buffer_object *vmw_bo,
551 size_t size, struct ttm_placement *placement,
552 bool interruptible, bool pin,
553 void (*bo_free)(struct ttm_buffer_object *bo))
554 {
555 struct ttm_operation_ctx ctx = { interruptible, false };
556 struct ttm_device *bdev = &dev_priv->bdev;
557 size_t acc_size;
558 int ret;
559 bool user = (bo_free == &vmw_user_bo_destroy);
560
561 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
562
563 acc_size = vmw_bo_acc_size(dev_priv, size, user);
564 memset(vmw_bo, 0, sizeof(*vmw_bo));
565 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
566 vmw_bo->base.priority = 3;
567 vmw_bo->res_tree = RB_ROOT;
568
569 ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
570 if (unlikely(ret))
571 return ret;
572
573 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
574 ttm_bo_type_device, placement,
575 0, &ctx, NULL, NULL, bo_free);
576 if (unlikely(ret)) {
577 ttm_mem_global_free(&ttm_mem_glob, acc_size);
578 return ret;
579 }
580
581 if (pin)
582 ttm_bo_pin(&vmw_bo->base);
583 ttm_bo_unreserve(&vmw_bo->base);
584 return 0;
585 }
586
587
588 /**
589 * vmw_user_bo_release - TTM reference base object release callback for
590 * vmw user buffer objects
591 *
592 * @p_base: The TTM base object pointer about to be unreferenced.
593 *
594 * Clears the TTM base object pointer and drops the reference the
595 * base object has on the underlying struct vmw_buffer_object.
596 */
vmw_user_bo_release(struct ttm_base_object ** p_base)597 static void vmw_user_bo_release(struct ttm_base_object **p_base)
598 {
599 struct vmw_user_buffer_object *vmw_user_bo;
600 struct ttm_base_object *base = *p_base;
601
602 *p_base = NULL;
603
604 if (unlikely(base == NULL))
605 return;
606
607 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
608 prime.base);
609 ttm_bo_put(&vmw_user_bo->vbo.base);
610 }
611
612
613 /**
614 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
615 * for vmw user buffer objects
616 *
617 * @base: Pointer to the TTM base object
618 * @ref_type: Reference type of the reference reaching zero.
619 *
620 * Called when user-space drops its last synccpu reference on the buffer
621 * object, Either explicitly or as part of a cleanup file close.
622 */
vmw_user_bo_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)623 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
624 enum ttm_ref_type ref_type)
625 {
626 struct vmw_user_buffer_object *user_bo;
627
628 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
629
630 switch (ref_type) {
631 case TTM_REF_SYNCCPU_WRITE:
632 atomic_dec(&user_bo->vbo.cpu_writers);
633 break;
634 default:
635 WARN_ONCE(true, "Undefined buffer object reference release.\n");
636 }
637 }
638
639
640 /**
641 * vmw_user_bo_alloc - Allocate a user buffer object
642 *
643 * @dev_priv: Pointer to a struct device private.
644 * @tfile: Pointer to a struct ttm_object_file on which to register the user
645 * object.
646 * @size: Size of the buffer object.
647 * @shareable: Boolean whether the buffer is shareable with other open files.
648 * @handle: Pointer to where the handle value should be assigned.
649 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
650 * should be assigned.
651 * @p_base: The TTM base object pointer about to be allocated.
652 * Return: Zero on success, negative error code on error.
653 */
vmw_user_bo_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_buffer_object ** p_vbo,struct ttm_base_object ** p_base)654 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
655 struct ttm_object_file *tfile,
656 uint32_t size,
657 bool shareable,
658 uint32_t *handle,
659 struct vmw_buffer_object **p_vbo,
660 struct ttm_base_object **p_base)
661 {
662 struct vmw_user_buffer_object *user_bo;
663 int ret;
664
665 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
666 if (unlikely(!user_bo)) {
667 DRM_ERROR("Failed to allocate a buffer.\n");
668 return -ENOMEM;
669 }
670
671 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
672 (dev_priv->has_mob) ?
673 &vmw_sys_placement :
674 &vmw_vram_sys_placement, true, false,
675 &vmw_user_bo_destroy);
676 if (unlikely(ret != 0))
677 return ret;
678
679 ttm_bo_get(&user_bo->vbo.base);
680 ret = ttm_prime_object_init(tfile,
681 size,
682 &user_bo->prime,
683 shareable,
684 ttm_buffer_type,
685 &vmw_user_bo_release,
686 &vmw_user_bo_ref_obj_release);
687 if (unlikely(ret != 0)) {
688 ttm_bo_put(&user_bo->vbo.base);
689 goto out_no_base_object;
690 }
691
692 *p_vbo = &user_bo->vbo;
693 if (p_base) {
694 *p_base = &user_bo->prime.base;
695 kref_get(&(*p_base)->refcount);
696 }
697 *handle = user_bo->prime.base.handle;
698
699 out_no_base_object:
700 return ret;
701 }
702
703
704 /**
705 * vmw_user_bo_verify_access - verify access permissions on this
706 * buffer object.
707 *
708 * @bo: Pointer to the buffer object being accessed
709 * @tfile: Identifying the caller.
710 */
vmw_user_bo_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)711 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
712 struct ttm_object_file *tfile)
713 {
714 struct vmw_user_buffer_object *vmw_user_bo;
715
716 if (unlikely(bo->destroy != vmw_user_bo_destroy))
717 return -EPERM;
718
719 vmw_user_bo = vmw_user_buffer_object(bo);
720
721 /* Check that the caller has opened the object. */
722 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
723 return 0;
724
725 DRM_ERROR("Could not grant buffer access.\n");
726 return -EPERM;
727 }
728
729
730 /**
731 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
732 * access, idling previous GPU operations on the buffer and optionally
733 * blocking it for further command submissions.
734 *
735 * @user_bo: Pointer to the buffer object being grabbed for CPU access
736 * @tfile: Identifying the caller.
737 * @flags: Flags indicating how the grab should be performed.
738 * Return: Zero on success, Negative error code on error. In particular,
739 * -EBUSY will be returned if a dontblock operation is requested and the
740 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
741 * interrupted by a signal.
742 *
743 * A blocking grab will be automatically released when @tfile is closed.
744 */
vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object * user_bo,struct ttm_object_file * tfile,uint32_t flags)745 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
746 struct ttm_object_file *tfile,
747 uint32_t flags)
748 {
749 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
750 struct ttm_buffer_object *bo = &user_bo->vbo.base;
751 bool existed;
752 int ret;
753
754 if (flags & drm_vmw_synccpu_allow_cs) {
755 long lret;
756
757 lret = dma_resv_wait_timeout_rcu
758 (bo->base.resv, true, true,
759 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
760 if (!lret)
761 return -EBUSY;
762 else if (lret < 0)
763 return lret;
764 return 0;
765 }
766
767 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
768 if (unlikely(ret != 0))
769 return ret;
770
771 ret = ttm_bo_wait(bo, true, nonblock);
772 if (likely(ret == 0))
773 atomic_inc(&user_bo->vbo.cpu_writers);
774
775 ttm_bo_unreserve(bo);
776 if (unlikely(ret != 0))
777 return ret;
778
779 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
780 TTM_REF_SYNCCPU_WRITE, &existed, false);
781 if (ret != 0 || existed)
782 atomic_dec(&user_bo->vbo.cpu_writers);
783
784 return ret;
785 }
786
787 /**
788 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
789 * and unblock command submission on the buffer if blocked.
790 *
791 * @handle: Handle identifying the buffer object.
792 * @tfile: Identifying the caller.
793 * @flags: Flags indicating the type of release.
794 */
vmw_user_bo_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)795 static int vmw_user_bo_synccpu_release(uint32_t handle,
796 struct ttm_object_file *tfile,
797 uint32_t flags)
798 {
799 if (!(flags & drm_vmw_synccpu_allow_cs))
800 return ttm_ref_object_base_unref(tfile, handle,
801 TTM_REF_SYNCCPU_WRITE);
802
803 return 0;
804 }
805
806
807 /**
808 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
809 * functionality.
810 *
811 * @dev: Identifies the drm device.
812 * @data: Pointer to the ioctl argument.
813 * @file_priv: Identifies the caller.
814 * Return: Zero on success, negative error code on error.
815 *
816 * This function checks the ioctl arguments for validity and calls the
817 * relevant synccpu functions.
818 */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)819 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
820 struct drm_file *file_priv)
821 {
822 struct drm_vmw_synccpu_arg *arg =
823 (struct drm_vmw_synccpu_arg *) data;
824 struct vmw_buffer_object *vbo;
825 struct vmw_user_buffer_object *user_bo;
826 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
827 struct ttm_base_object *buffer_base;
828 int ret;
829
830 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
831 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
832 drm_vmw_synccpu_dontblock |
833 drm_vmw_synccpu_allow_cs)) != 0) {
834 DRM_ERROR("Illegal synccpu flags.\n");
835 return -EINVAL;
836 }
837
838 switch (arg->op) {
839 case drm_vmw_synccpu_grab:
840 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
841 &buffer_base);
842 if (unlikely(ret != 0))
843 return ret;
844
845 user_bo = container_of(vbo, struct vmw_user_buffer_object,
846 vbo);
847 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
848 vmw_bo_unreference(&vbo);
849 ttm_base_object_unref(&buffer_base);
850 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
851 ret != -EBUSY)) {
852 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
853 (unsigned int) arg->handle);
854 return ret;
855 }
856 break;
857 case drm_vmw_synccpu_release:
858 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
859 arg->flags);
860 if (unlikely(ret != 0)) {
861 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
862 (unsigned int) arg->handle);
863 return ret;
864 }
865 break;
866 default:
867 DRM_ERROR("Invalid synccpu operation.\n");
868 return -EINVAL;
869 }
870
871 return 0;
872 }
873
874
875 /**
876 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
877 * allocation functionality.
878 *
879 * @dev: Identifies the drm device.
880 * @data: Pointer to the ioctl argument.
881 * @file_priv: Identifies the caller.
882 * Return: Zero on success, negative error code on error.
883 *
884 * This function checks the ioctl arguments for validity and allocates a
885 * struct vmw_user_buffer_object bo.
886 */
vmw_bo_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)887 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
888 struct drm_file *file_priv)
889 {
890 struct vmw_private *dev_priv = vmw_priv(dev);
891 union drm_vmw_alloc_dmabuf_arg *arg =
892 (union drm_vmw_alloc_dmabuf_arg *)data;
893 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
894 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
895 struct vmw_buffer_object *vbo;
896 uint32_t handle;
897 int ret;
898
899 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
900 if (unlikely(ret != 0))
901 return ret;
902
903 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
904 req->size, false, &handle, &vbo,
905 NULL);
906 if (unlikely(ret != 0))
907 goto out_no_bo;
908
909 rep->handle = handle;
910 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
911 rep->cur_gmr_id = handle;
912 rep->cur_gmr_offset = 0;
913
914 vmw_bo_unreference(&vbo);
915
916 out_no_bo:
917 ttm_read_unlock(&dev_priv->reservation_sem);
918
919 return ret;
920 }
921
922
923 /**
924 * vmw_bo_unref_ioctl - Generic handle close ioctl.
925 *
926 * @dev: Identifies the drm device.
927 * @data: Pointer to the ioctl argument.
928 * @file_priv: Identifies the caller.
929 * Return: Zero on success, negative error code on error.
930 *
931 * This function checks the ioctl arguments for validity and closes a
932 * handle to a TTM base object, optionally freeing the object.
933 */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)934 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
935 struct drm_file *file_priv)
936 {
937 struct drm_vmw_unref_dmabuf_arg *arg =
938 (struct drm_vmw_unref_dmabuf_arg *)data;
939
940 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
941 arg->handle,
942 TTM_REF_USAGE);
943 }
944
945
946 /**
947 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
948 *
949 * @tfile: The TTM object file the handle is registered with.
950 * @handle: The user buffer object handle
951 * @out: Pointer to a where a pointer to the embedded
952 * struct vmw_buffer_object should be placed.
953 * @p_base: Pointer to where a pointer to the TTM base object should be
954 * placed, or NULL if no such pointer is required.
955 * Return: Zero on success, Negative error code on error.
956 *
957 * Both the output base object pointer and the vmw buffer object pointer
958 * will be refcounted.
959 */
vmw_user_bo_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_buffer_object ** out,struct ttm_base_object ** p_base)960 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
961 uint32_t handle, struct vmw_buffer_object **out,
962 struct ttm_base_object **p_base)
963 {
964 struct vmw_user_buffer_object *vmw_user_bo;
965 struct ttm_base_object *base;
966
967 base = ttm_base_object_lookup(tfile, handle);
968 if (unlikely(base == NULL)) {
969 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
970 (unsigned long)handle);
971 return -ESRCH;
972 }
973
974 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
975 ttm_base_object_unref(&base);
976 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
977 (unsigned long)handle);
978 return -EINVAL;
979 }
980
981 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
982 prime.base);
983 ttm_bo_get(&vmw_user_bo->vbo.base);
984 if (p_base)
985 *p_base = base;
986 else
987 ttm_base_object_unref(&base);
988 *out = &vmw_user_bo->vbo;
989
990 return 0;
991 }
992
993 /**
994 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
995 * @tfile: The TTM object file the handle is registered with.
996 * @handle: The user buffer object handle.
997 *
998 * This function looks up a struct vmw_user_bo and returns a pointer to the
999 * struct vmw_buffer_object it derives from without refcounting the pointer.
1000 * The returned pointer is only valid until vmw_user_bo_noref_release() is
1001 * called, and the object pointed to by the returned pointer may be doomed.
1002 * Any persistent usage of the object requires a refcount to be taken using
1003 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
1004 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
1005 * or scheduling functions may be called inbetween these function calls.
1006 *
1007 * Return: A struct vmw_buffer_object pointer if successful or negative
1008 * error pointer on failure.
1009 */
1010 struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file * tfile,u32 handle)1011 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
1012 {
1013 struct vmw_user_buffer_object *vmw_user_bo;
1014 struct ttm_base_object *base;
1015
1016 base = ttm_base_object_noref_lookup(tfile, handle);
1017 if (!base) {
1018 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1019 (unsigned long)handle);
1020 return ERR_PTR(-ESRCH);
1021 }
1022
1023 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
1024 ttm_base_object_noref_release();
1025 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1026 (unsigned long)handle);
1027 return ERR_PTR(-EINVAL);
1028 }
1029
1030 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
1031 prime.base);
1032 return &vmw_user_bo->vbo;
1033 }
1034
1035 /**
1036 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
1037 *
1038 * @tfile: The TTM object file to register the handle with.
1039 * @vbo: The embedded vmw buffer object.
1040 * @handle: Pointer to where the new handle should be placed.
1041 * Return: Zero on success, Negative error code on error.
1042 */
vmw_user_bo_reference(struct ttm_object_file * tfile,struct vmw_buffer_object * vbo,uint32_t * handle)1043 int vmw_user_bo_reference(struct ttm_object_file *tfile,
1044 struct vmw_buffer_object *vbo,
1045 uint32_t *handle)
1046 {
1047 struct vmw_user_buffer_object *user_bo;
1048
1049 if (vbo->base.destroy != vmw_user_bo_destroy)
1050 return -EINVAL;
1051
1052 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1053
1054 *handle = user_bo->prime.base.handle;
1055 return ttm_ref_object_add(tfile, &user_bo->prime.base,
1056 TTM_REF_USAGE, NULL, false);
1057 }
1058
1059
1060 /**
1061 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1062 * object without unreserving it.
1063 *
1064 * @bo: Pointer to the struct ttm_buffer_object to fence.
1065 * @fence: Pointer to the fence. If NULL, this function will
1066 * insert a fence into the command stream..
1067 *
1068 * Contrary to the ttm_eu version of this function, it takes only
1069 * a single buffer object instead of a list, and it also doesn't
1070 * unreserve the buffer object, which needs to be done separately.
1071 */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1072 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1073 struct vmw_fence_obj *fence)
1074 {
1075 struct ttm_device *bdev = bo->bdev;
1076
1077 struct vmw_private *dev_priv =
1078 container_of(bdev, struct vmw_private, bdev);
1079
1080 if (fence == NULL) {
1081 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1082 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1083 dma_fence_put(&fence->base);
1084 } else
1085 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1086 }
1087
1088
1089 /**
1090 * vmw_dumb_create - Create a dumb kms buffer
1091 *
1092 * @file_priv: Pointer to a struct drm_file identifying the caller.
1093 * @dev: Pointer to the drm device.
1094 * @args: Pointer to a struct drm_mode_create_dumb structure
1095 * Return: Zero on success, negative error code on failure.
1096 *
1097 * This is a driver callback for the core drm create_dumb functionality.
1098 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1099 * that the arguments have a different format.
1100 */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)1101 int vmw_dumb_create(struct drm_file *file_priv,
1102 struct drm_device *dev,
1103 struct drm_mode_create_dumb *args)
1104 {
1105 struct vmw_private *dev_priv = vmw_priv(dev);
1106 struct vmw_buffer_object *vbo;
1107 int ret;
1108
1109 args->pitch = args->width * ((args->bpp + 7) / 8);
1110 args->size = args->pitch * args->height;
1111
1112 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1113 if (unlikely(ret != 0))
1114 return ret;
1115
1116 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1117 args->size, false, &args->handle,
1118 &vbo, NULL);
1119 if (unlikely(ret != 0))
1120 goto out_no_bo;
1121
1122 vmw_bo_unreference(&vbo);
1123 out_no_bo:
1124 ttm_read_unlock(&dev_priv->reservation_sem);
1125 return ret;
1126 }
1127
1128
1129 /**
1130 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1131 *
1132 * @file_priv: Pointer to a struct drm_file identifying the caller.
1133 * @dev: Pointer to the drm device.
1134 * @handle: Handle identifying the dumb buffer.
1135 * @offset: The address space offset returned.
1136 * Return: Zero on success, negative error code on failure.
1137 *
1138 * This is a driver callback for the core drm dumb_map_offset functionality.
1139 */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1140 int vmw_dumb_map_offset(struct drm_file *file_priv,
1141 struct drm_device *dev, uint32_t handle,
1142 uint64_t *offset)
1143 {
1144 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1145 struct vmw_buffer_object *out_buf;
1146 int ret;
1147
1148 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1149 if (ret != 0)
1150 return -EINVAL;
1151
1152 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1153 vmw_bo_unreference(&out_buf);
1154 return 0;
1155 }
1156
1157
1158 /**
1159 * vmw_dumb_destroy - Destroy a dumb boffer
1160 *
1161 * @file_priv: Pointer to a struct drm_file identifying the caller.
1162 * @dev: Pointer to the drm device.
1163 * @handle: Handle identifying the dumb buffer.
1164 * Return: Zero on success, negative error code on failure.
1165 *
1166 * This is a driver callback for the core drm dumb_destroy functionality.
1167 */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1168 int vmw_dumb_destroy(struct drm_file *file_priv,
1169 struct drm_device *dev,
1170 uint32_t handle)
1171 {
1172 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1173 handle, TTM_REF_USAGE);
1174 }
1175
1176
1177 /**
1178 * vmw_bo_swap_notify - swapout notify callback.
1179 *
1180 * @bo: The buffer object to be swapped out.
1181 */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)1182 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1183 {
1184 /* Is @bo embedded in a struct vmw_buffer_object? */
1185 if (bo->destroy != vmw_bo_bo_free &&
1186 bo->destroy != vmw_user_bo_destroy)
1187 return;
1188
1189 /* Kill any cached kernel maps before swapout */
1190 vmw_bo_unmap(vmw_buffer_object(bo));
1191 }
1192
1193
1194 /**
1195 * vmw_bo_move_notify - TTM move_notify_callback
1196 *
1197 * @bo: The TTM buffer object about to move.
1198 * @mem: The struct ttm_resource indicating to what memory
1199 * region the move is taking place.
1200 *
1201 * Detaches cached maps and device bindings that require that the
1202 * buffer doesn't move.
1203 */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)1204 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1205 struct ttm_resource *mem)
1206 {
1207 struct vmw_buffer_object *vbo;
1208
1209 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1210 if (bo->destroy != vmw_bo_bo_free &&
1211 bo->destroy != vmw_user_bo_destroy)
1212 return;
1213
1214 vbo = container_of(bo, struct vmw_buffer_object, base);
1215
1216 /*
1217 * Kill any cached kernel maps before move to or from VRAM.
1218 * With other types of moves, the underlying pages stay the same,
1219 * and the map can be kept.
1220 */
1221 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1222 vmw_bo_unmap(vbo);
1223
1224 /*
1225 * If we're moving a backup MOB out of MOB placement, then make sure we
1226 * read back all resource content first, and unbind the MOB from
1227 * the resource.
1228 */
1229 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1230 vmw_resource_unbind_list(vbo);
1231 }
1232