1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file
28 *
29 * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
30 * drm winsys.
31 *
32 * Based on svgaicd_escape.c
33 */
34
35
36 #include "svga_cmd.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include "svgadump/svga_dump.h"
40 #include "frontend/drm_driver.h"
41 #include "vmw_screen.h"
42 #include "vmw_context.h"
43 #include "vmw_fence.h"
44 #include "xf86drm.h"
45 #include "vmwgfx_drm.h"
46 #include "svga3d_caps.h"
47 #include "svga3d_reg.h"
48
49 #include "os/os_mman.h"
50
51 #include <errno.h>
52 #include <unistd.h>
53
54 #define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
55 #define VMW_FENCE_TIMEOUT_SECONDS 3600UL
56
57 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
58 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
59 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
60 (svga3d_flags & ((uint64_t)UINT32_MAX))
61
62 struct vmw_region
63 {
64 uint32_t handle;
65 uint64_t map_handle;
66 void *data;
67 uint32_t map_count;
68 int drm_fd;
69 uint32_t size;
70 };
71
72 uint32_t
vmw_region_size(struct vmw_region * region)73 vmw_region_size(struct vmw_region *region)
74 {
75 return region->size;
76 }
77
78 #if defined(__DragonFly__) || defined(__FreeBSD__) || \
79 defined(__NetBSD__) || defined(__OpenBSD__)
80 #define ERESTART EINTR
81 #endif
82
83 uint32
vmw_ioctl_context_create(struct vmw_winsys_screen * vws)84 vmw_ioctl_context_create(struct vmw_winsys_screen *vws)
85 {
86 struct drm_vmw_context_arg c_arg;
87 int ret;
88
89 VMW_FUNC;
90
91 ret = drmCommandRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_CONTEXT,
92 &c_arg, sizeof(c_arg));
93
94 if (ret)
95 return -1;
96
97 vmw_printf("Context id is %d\n", c_arg.cid);
98 return c_arg.cid;
99 }
100
101 uint32
vmw_ioctl_extended_context_create(struct vmw_winsys_screen * vws,boolean vgpu10)102 vmw_ioctl_extended_context_create(struct vmw_winsys_screen *vws,
103 boolean vgpu10)
104 {
105 union drm_vmw_extended_context_arg c_arg;
106 int ret;
107
108 VMW_FUNC;
109 memset(&c_arg, 0, sizeof(c_arg));
110 c_arg.req = (vgpu10 ? drm_vmw_context_dx : drm_vmw_context_legacy);
111 ret = drmCommandWriteRead(vws->ioctl.drm_fd,
112 DRM_VMW_CREATE_EXTENDED_CONTEXT,
113 &c_arg, sizeof(c_arg));
114
115 if (ret)
116 return -1;
117
118 vmw_printf("Context id is %d\n", c_arg.cid);
119 return c_arg.rep.cid;
120 }
121
122 void
vmw_ioctl_context_destroy(struct vmw_winsys_screen * vws,uint32 cid)123 vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid)
124 {
125 struct drm_vmw_context_arg c_arg;
126
127 VMW_FUNC;
128
129 memset(&c_arg, 0, sizeof(c_arg));
130 c_arg.cid = cid;
131
132 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_CONTEXT,
133 &c_arg, sizeof(c_arg));
134
135 }
136
137 uint32
vmw_ioctl_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurface1Flags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount)138 vmw_ioctl_surface_create(struct vmw_winsys_screen *vws,
139 SVGA3dSurface1Flags flags,
140 SVGA3dSurfaceFormat format,
141 unsigned usage,
142 SVGA3dSize size,
143 uint32_t numFaces, uint32_t numMipLevels,
144 unsigned sampleCount)
145 {
146 union drm_vmw_surface_create_arg s_arg;
147 struct drm_vmw_surface_create_req *req = &s_arg.req;
148 struct drm_vmw_surface_arg *rep = &s_arg.rep;
149 struct drm_vmw_size sizes[DRM_VMW_MAX_SURFACE_FACES*
150 DRM_VMW_MAX_MIP_LEVELS];
151 struct drm_vmw_size *cur_size;
152 uint32_t iFace;
153 uint32_t iMipLevel;
154 int ret;
155
156 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
157
158 memset(&s_arg, 0, sizeof(s_arg));
159 req->flags = (uint32_t) flags;
160 req->scanout = !!(usage & SVGA_SURFACE_USAGE_SCANOUT);
161 req->format = (uint32_t) format;
162 req->shareable = !!(usage & SVGA_SURFACE_USAGE_SHARED);
163
164 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
165 DRM_VMW_MAX_MIP_LEVELS);
166 cur_size = sizes;
167 for (iFace = 0; iFace < numFaces; ++iFace) {
168 SVGA3dSize mipSize = size;
169
170 req->mip_levels[iFace] = numMipLevels;
171 for (iMipLevel = 0; iMipLevel < numMipLevels; ++iMipLevel) {
172 cur_size->width = mipSize.width;
173 cur_size->height = mipSize.height;
174 cur_size->depth = mipSize.depth;
175 mipSize.width = MAX2(mipSize.width >> 1, 1);
176 mipSize.height = MAX2(mipSize.height >> 1, 1);
177 mipSize.depth = MAX2(mipSize.depth >> 1, 1);
178 cur_size++;
179 }
180 }
181 for (iFace = numFaces; iFace < SVGA3D_MAX_SURFACE_FACES; ++iFace) {
182 req->mip_levels[iFace] = 0;
183 }
184
185 req->size_addr = (unsigned long)&sizes;
186
187 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SURFACE,
188 &s_arg, sizeof(s_arg));
189
190 if (ret)
191 return -1;
192
193 vmw_printf("Surface id is %d\n", rep->sid);
194
195 return rep->sid;
196 }
197
198
199 uint32
vmw_ioctl_gb_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurfaceAllFlags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount,uint32_t buffer_handle,SVGA3dMSPattern multisamplePattern,SVGA3dMSQualityLevel qualityLevel,struct vmw_region ** p_region)200 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
201 SVGA3dSurfaceAllFlags flags,
202 SVGA3dSurfaceFormat format,
203 unsigned usage,
204 SVGA3dSize size,
205 uint32_t numFaces,
206 uint32_t numMipLevels,
207 unsigned sampleCount,
208 uint32_t buffer_handle,
209 SVGA3dMSPattern multisamplePattern,
210 SVGA3dMSQualityLevel qualityLevel,
211 struct vmw_region **p_region)
212 {
213 union {
214 union drm_vmw_gb_surface_create_ext_arg ext_arg;
215 union drm_vmw_gb_surface_create_arg arg;
216 } s_arg;
217 struct drm_vmw_gb_surface_create_rep *rep;
218 struct vmw_region *region = NULL;
219 int ret;
220
221 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
222
223 if (p_region) {
224 region = CALLOC_STRUCT(vmw_region);
225 if (!region)
226 return SVGA3D_INVALID_ID;
227 }
228
229 memset(&s_arg, 0, sizeof(s_arg));
230
231 if (vws->ioctl.have_drm_2_15) {
232 struct drm_vmw_gb_surface_create_ext_req *req = &s_arg.ext_arg.req;
233 rep = &s_arg.ext_arg.rep;
234
235 req->version = drm_vmw_gb_surface_v1;
236 req->multisample_pattern = multisamplePattern;
237 req->quality_level = qualityLevel;
238 req->buffer_byte_stride = 0;
239 req->must_be_zero = 0;
240 req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
241 req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
242 req->base.format = (uint32_t) format;
243
244 if (usage & SVGA_SURFACE_USAGE_SCANOUT)
245 req->base.drm_surface_flags |= drm_vmw_surface_flag_scanout;
246
247 if (usage & SVGA_SURFACE_USAGE_SHARED)
248 req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
249
250 if ((usage & SVGA_SURFACE_USAGE_COHERENT) || vws->force_coherent)
251 req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
252
253 req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
254 req->base.base_size.width = size.width;
255 req->base.base_size.height = size.height;
256 req->base.base_size.depth = size.depth;
257 req->base.mip_levels = numMipLevels;
258 req->base.multisample_count = 0;
259 req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
260
261 if (vws->base.have_vgpu10) {
262 req->base.array_size = numFaces;
263 req->base.multisample_count = sampleCount;
264 } else {
265 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
266 DRM_VMW_MAX_MIP_LEVELS);
267 req->base.array_size = 0;
268 }
269
270 req->base.buffer_handle = buffer_handle ?
271 buffer_handle : SVGA3D_INVALID_ID;
272
273 ret = drmCommandWriteRead(vws->ioctl.drm_fd,
274 DRM_VMW_GB_SURFACE_CREATE_EXT, &s_arg.ext_arg,
275 sizeof(s_arg.ext_arg));
276
277 if (ret)
278 goto out_fail_create;
279 } else {
280 struct drm_vmw_gb_surface_create_req *req = &s_arg.arg.req;
281 rep = &s_arg.arg.rep;
282
283 req->svga3d_flags = (uint32_t) flags;
284 req->format = (uint32_t) format;
285
286 if (usage & SVGA_SURFACE_USAGE_SCANOUT)
287 req->drm_surface_flags |= drm_vmw_surface_flag_scanout;
288
289 if (usage & SVGA_SURFACE_USAGE_SHARED)
290 req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
291
292 req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
293 req->base_size.width = size.width;
294 req->base_size.height = size.height;
295 req->base_size.depth = size.depth;
296 req->mip_levels = numMipLevels;
297 req->multisample_count = 0;
298 req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
299
300 if (vws->base.have_vgpu10) {
301 req->array_size = numFaces;
302 req->multisample_count = sampleCount;
303 } else {
304 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
305 DRM_VMW_MAX_MIP_LEVELS);
306 req->array_size = 0;
307 }
308
309 req->buffer_handle = buffer_handle ?
310 buffer_handle : SVGA3D_INVALID_ID;
311
312 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
313 &s_arg.arg, sizeof(s_arg.arg));
314
315 if (ret)
316 goto out_fail_create;
317 }
318
319 if (p_region) {
320 region->handle = rep->buffer_handle;
321 region->map_handle = rep->buffer_map_handle;
322 region->drm_fd = vws->ioctl.drm_fd;
323 region->size = rep->backup_size;
324 *p_region = region;
325 }
326
327 vmw_printf("Surface id is %d\n", rep->sid);
328 return rep->handle;
329
330 out_fail_create:
331 FREE(region);
332 return SVGA3D_INVALID_ID;
333 }
334
335 /**
336 * vmw_ioctl_surface_req - Fill in a struct surface_req
337 *
338 * @vws: Winsys screen
339 * @whandle: Surface handle
340 * @req: The struct surface req to fill in
341 * @needs_unref: This call takes a kernel surface reference that needs to
342 * be unreferenced.
343 *
344 * Returns 0 on success, negative error type otherwise.
345 * Fills in the surface_req structure according to handle type and kernel
346 * capabilities.
347 */
348 static int
vmw_ioctl_surface_req(const struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,struct drm_vmw_surface_arg * req,boolean * needs_unref)349 vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws,
350 const struct winsys_handle *whandle,
351 struct drm_vmw_surface_arg *req,
352 boolean *needs_unref)
353 {
354 int ret;
355
356 switch(whandle->type) {
357 case WINSYS_HANDLE_TYPE_SHARED:
358 case WINSYS_HANDLE_TYPE_KMS:
359 *needs_unref = FALSE;
360 req->handle_type = DRM_VMW_HANDLE_LEGACY;
361 req->sid = whandle->handle;
362 break;
363 case WINSYS_HANDLE_TYPE_FD:
364 if (!vws->ioctl.have_drm_2_6) {
365 uint32_t handle;
366
367 ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle);
368 if (ret) {
369 vmw_error("Failed to get handle from prime fd %d.\n",
370 (int) whandle->handle);
371 return -EINVAL;
372 }
373
374 *needs_unref = TRUE;
375 req->handle_type = DRM_VMW_HANDLE_LEGACY;
376 req->sid = handle;
377 } else {
378 *needs_unref = FALSE;
379 req->handle_type = DRM_VMW_HANDLE_PRIME;
380 req->sid = whandle->handle;
381 }
382 break;
383 default:
384 vmw_error("Attempt to import unsupported handle type %d.\n",
385 whandle->type);
386 return -EINVAL;
387 }
388
389 return 0;
390 }
391
392 /**
393 * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
394 * get surface information
395 *
396 * @vws: Screen to register the reference on
397 * @handle: Kernel handle of the guest-backed surface
398 * @flags: flags used when the surface was created
399 * @format: Format used when the surface was created
400 * @numMipLevels: Number of mipmap levels of the surface
401 * @p_region: On successful return points to a newly allocated
402 * struct vmw_region holding a reference to the surface backup buffer.
403 *
404 * Returns 0 on success, a system error on failure.
405 */
406 int
vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,SVGA3dSurfaceAllFlags * flags,SVGA3dSurfaceFormat * format,uint32_t * numMipLevels,uint32_t * handle,struct vmw_region ** p_region)407 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
408 const struct winsys_handle *whandle,
409 SVGA3dSurfaceAllFlags *flags,
410 SVGA3dSurfaceFormat *format,
411 uint32_t *numMipLevels,
412 uint32_t *handle,
413 struct vmw_region **p_region)
414 {
415 struct vmw_region *region = NULL;
416 boolean needs_unref = FALSE;
417 int ret;
418
419 assert(p_region != NULL);
420 region = CALLOC_STRUCT(vmw_region);
421 if (!region)
422 return -ENOMEM;
423
424 if (vws->ioctl.have_drm_2_15) {
425 union drm_vmw_gb_surface_reference_ext_arg s_arg;
426 struct drm_vmw_surface_arg *req = &s_arg.req;
427 struct drm_vmw_gb_surface_ref_ext_rep *rep = &s_arg.rep;
428
429 memset(&s_arg, 0, sizeof(s_arg));
430 ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
431 if (ret)
432 goto out_fail_req;
433
434 *handle = req->sid;
435 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF_EXT,
436 &s_arg, sizeof(s_arg));
437
438 if (ret)
439 goto out_fail_ref;
440
441 region->handle = rep->crep.buffer_handle;
442 region->map_handle = rep->crep.buffer_map_handle;
443 region->drm_fd = vws->ioctl.drm_fd;
444 region->size = rep->crep.backup_size;
445 *p_region = region;
446
447 *handle = rep->crep.handle;
448 *flags = SVGA3D_FLAGS_64(rep->creq.svga3d_flags_upper_32_bits,
449 rep->creq.base.svga3d_flags);
450 *format = rep->creq.base.format;
451 *numMipLevels = rep->creq.base.mip_levels;
452 } else {
453 union drm_vmw_gb_surface_reference_arg s_arg;
454 struct drm_vmw_surface_arg *req = &s_arg.req;
455 struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
456
457 memset(&s_arg, 0, sizeof(s_arg));
458 ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
459 if (ret)
460 goto out_fail_req;
461
462 *handle = req->sid;
463 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
464 &s_arg, sizeof(s_arg));
465
466 if (ret)
467 goto out_fail_ref;
468
469 region->handle = rep->crep.buffer_handle;
470 region->map_handle = rep->crep.buffer_map_handle;
471 region->drm_fd = vws->ioctl.drm_fd;
472 region->size = rep->crep.backup_size;
473 *p_region = region;
474
475 *handle = rep->crep.handle;
476 *flags = rep->creq.svga3d_flags;
477 *format = rep->creq.format;
478 *numMipLevels = rep->creq.mip_levels;
479 }
480
481 vmw_printf("%s flags %d format %d\n", __FUNCTION__, *flags, *format);
482
483 if (needs_unref)
484 vmw_ioctl_surface_destroy(vws, *handle);
485
486 return 0;
487 out_fail_ref:
488 if (needs_unref)
489 vmw_ioctl_surface_destroy(vws, *handle);
490 out_fail_req:
491 FREE(region);
492 return ret;
493 }
494
495 void
vmw_ioctl_surface_destroy(struct vmw_winsys_screen * vws,uint32 sid)496 vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws, uint32 sid)
497 {
498 struct drm_vmw_surface_arg s_arg;
499
500 VMW_FUNC;
501
502 memset(&s_arg, 0, sizeof(s_arg));
503 s_arg.sid = sid;
504
505 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SURFACE,
506 &s_arg, sizeof(s_arg));
507 }
508
509 void
vmw_ioctl_command(struct vmw_winsys_screen * vws,int32_t cid,uint32_t throttle_us,void * commands,uint32_t size,struct pipe_fence_handle ** pfence,int32_t imported_fence_fd,uint32_t flags)510 vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid,
511 uint32_t throttle_us, void *commands, uint32_t size,
512 struct pipe_fence_handle **pfence, int32_t imported_fence_fd,
513 uint32_t flags)
514 {
515 struct drm_vmw_execbuf_arg arg;
516 struct drm_vmw_fence_rep rep;
517 int ret;
518 int argsize;
519
520 #ifdef DEBUG
521 {
522 static boolean firsttime = TRUE;
523 static boolean debug = FALSE;
524 static boolean skip = FALSE;
525 if (firsttime) {
526 debug = debug_get_bool_option("SVGA_DUMP_CMD", FALSE);
527 skip = debug_get_bool_option("SVGA_SKIP_CMD", FALSE);
528 }
529 if (debug) {
530 VMW_FUNC;
531 svga_dump_commands(commands, size);
532 }
533 firsttime = FALSE;
534 if (skip) {
535 size = 0;
536 }
537 }
538 #endif
539
540 memset(&arg, 0, sizeof(arg));
541 memset(&rep, 0, sizeof(rep));
542
543 if (flags & SVGA_HINT_FLAG_EXPORT_FENCE_FD) {
544 arg.flags |= DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD;
545 }
546
547 if (imported_fence_fd != -1) {
548 arg.flags |= DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD;
549 }
550
551 rep.error = -EFAULT;
552 if (pfence)
553 arg.fence_rep = (unsigned long)&rep;
554 arg.commands = (unsigned long)commands;
555 arg.command_size = size;
556 arg.throttle_us = throttle_us;
557 arg.version = vws->ioctl.drm_execbuf_version;
558 arg.context_handle = (vws->base.have_vgpu10 ? cid : SVGA3D_INVALID_ID);
559
560 /* Older DRM module requires this to be zero */
561 if (vws->base.have_fence_fd)
562 arg.imported_fence_fd = imported_fence_fd;
563
564 /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with
565 * the flags field. The structure size sent to drmCommandWrite must match
566 * the drm_execbuf_version. Otherwise, an invalid value will be returned.
567 */
568 argsize = vws->ioctl.drm_execbuf_version > 1 ? sizeof(arg) :
569 offsetof(struct drm_vmw_execbuf_arg, context_handle);
570 do {
571 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, argsize);
572 if (ret == -EBUSY)
573 usleep(1000);
574 } while(ret == -ERESTART || ret == -EBUSY);
575 if (ret) {
576 vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret));
577 abort();
578 }
579
580 if (rep.error) {
581
582 /*
583 * Kernel has already synced, or caller requested no fence.
584 */
585 if (pfence)
586 *pfence = NULL;
587 } else {
588 if (pfence) {
589 vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno,
590 TRUE);
591
592 /* Older DRM module will set this to zero, but -1 is the proper FD
593 * to use for no Fence FD support */
594 if (!vws->base.have_fence_fd)
595 rep.fd = -1;
596
597 *pfence = vmw_fence_create(vws->fence_ops, rep.handle,
598 rep.seqno, rep.mask, rep.fd);
599 if (*pfence == NULL) {
600 /*
601 * Fence creation failed. Need to sync.
602 */
603 (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask);
604 vmw_ioctl_fence_unref(vws, rep.handle);
605 }
606 }
607 }
608 }
609
610
611 struct vmw_region *
vmw_ioctl_region_create(struct vmw_winsys_screen * vws,uint32_t size)612 vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size)
613 {
614 struct vmw_region *region;
615 union drm_vmw_alloc_dmabuf_arg arg;
616 struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
617 struct drm_vmw_dmabuf_rep *rep = &arg.rep;
618 int ret;
619
620 vmw_printf("%s: size = %u\n", __FUNCTION__, size);
621
622 region = CALLOC_STRUCT(vmw_region);
623 if (!region)
624 goto out_err1;
625
626 memset(&arg, 0, sizeof(arg));
627 req->size = size;
628 do {
629 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg,
630 sizeof(arg));
631 } while (ret == -ERESTART);
632
633 if (ret) {
634 vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret));
635 goto out_err1;
636 }
637
638 region->data = NULL;
639 region->handle = rep->handle;
640 region->map_handle = rep->map_handle;
641 region->map_count = 0;
642 region->size = size;
643 region->drm_fd = vws->ioctl.drm_fd;
644
645 vmw_printf(" gmrId = %u, offset = %u\n",
646 region->ptr.gmrId, region->ptr.offset);
647
648 return region;
649
650 out_err1:
651 FREE(region);
652 return NULL;
653 }
654
655 void
vmw_ioctl_region_destroy(struct vmw_region * region)656 vmw_ioctl_region_destroy(struct vmw_region *region)
657 {
658 struct drm_vmw_unref_dmabuf_arg arg;
659
660 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
661 region->ptr.gmrId, region->ptr.offset);
662
663 if (region->data) {
664 os_munmap(region->data, region->size);
665 region->data = NULL;
666 }
667
668 memset(&arg, 0, sizeof(arg));
669 arg.handle = region->handle;
670 drmCommandWrite(region->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
671
672 FREE(region);
673 }
674
675 SVGAGuestPtr
vmw_ioctl_region_ptr(struct vmw_region * region)676 vmw_ioctl_region_ptr(struct vmw_region *region)
677 {
678 SVGAGuestPtr ptr = {region->handle, 0};
679 return ptr;
680 }
681
682 void *
vmw_ioctl_region_map(struct vmw_region * region)683 vmw_ioctl_region_map(struct vmw_region *region)
684 {
685 void *map;
686
687 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
688 region->ptr.gmrId, region->ptr.offset);
689
690 if (region->data == NULL) {
691 map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED,
692 region->drm_fd, region->map_handle);
693 if (map == MAP_FAILED) {
694 vmw_error("%s: Map failed.\n", __FUNCTION__);
695 return NULL;
696 }
697
698 // MADV_HUGEPAGE only exists on Linux
699 #ifdef MADV_HUGEPAGE
700 (void) madvise(map, region->size, MADV_HUGEPAGE);
701 #endif
702 region->data = map;
703 }
704
705 ++region->map_count;
706
707 return region->data;
708 }
709
710 void
vmw_ioctl_region_unmap(struct vmw_region * region)711 vmw_ioctl_region_unmap(struct vmw_region *region)
712 {
713 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
714 region->ptr.gmrId, region->ptr.offset);
715
716 --region->map_count;
717 os_munmap(region->data, region->size);
718 region->data = NULL;
719 }
720
721 /**
722 * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
723 *
724 * @region: Pointer to a struct vmw_region representing the buffer object.
725 * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
726 * GPU is busy with the buffer object.
727 * @readonly: Hint that the CPU access is read-only.
728 * @allow_cs: Allow concurrent command submission while the buffer is
729 * synchronized for CPU. If FALSE command submissions referencing the
730 * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
731 *
732 * This function idles any GPU activities touching the buffer and blocks
733 * command submission of commands referencing the buffer, even from
734 * other processes.
735 */
736 int
vmw_ioctl_syncforcpu(struct vmw_region * region,boolean dont_block,boolean readonly,boolean allow_cs)737 vmw_ioctl_syncforcpu(struct vmw_region *region,
738 boolean dont_block,
739 boolean readonly,
740 boolean allow_cs)
741 {
742 struct drm_vmw_synccpu_arg arg;
743
744 memset(&arg, 0, sizeof(arg));
745 arg.op = drm_vmw_synccpu_grab;
746 arg.handle = region->handle;
747 arg.flags = drm_vmw_synccpu_read;
748 if (!readonly)
749 arg.flags |= drm_vmw_synccpu_write;
750 if (dont_block)
751 arg.flags |= drm_vmw_synccpu_dontblock;
752 if (allow_cs)
753 arg.flags |= drm_vmw_synccpu_allow_cs;
754
755 return drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
756 }
757
758 /**
759 * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
760 *
761 * @region: Pointer to a struct vmw_region representing the buffer object.
762 * @readonly: Should hold the same value as the matching syncforcpu call.
763 * @allow_cs: Should hold the same value as the matching syncforcpu call.
764 */
765 void
vmw_ioctl_releasefromcpu(struct vmw_region * region,boolean readonly,boolean allow_cs)766 vmw_ioctl_releasefromcpu(struct vmw_region *region,
767 boolean readonly,
768 boolean allow_cs)
769 {
770 struct drm_vmw_synccpu_arg arg;
771
772 memset(&arg, 0, sizeof(arg));
773 arg.op = drm_vmw_synccpu_release;
774 arg.handle = region->handle;
775 arg.flags = drm_vmw_synccpu_read;
776 if (!readonly)
777 arg.flags |= drm_vmw_synccpu_write;
778 if (allow_cs)
779 arg.flags |= drm_vmw_synccpu_allow_cs;
780
781 (void) drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
782 }
783
784 void
vmw_ioctl_fence_unref(struct vmw_winsys_screen * vws,uint32_t handle)785 vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws,
786 uint32_t handle)
787 {
788 struct drm_vmw_fence_arg arg;
789 int ret;
790
791 memset(&arg, 0, sizeof(arg));
792 arg.handle = handle;
793
794 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF,
795 &arg, sizeof(arg));
796 if (ret != 0)
797 vmw_error("%s Failed\n", __FUNCTION__);
798 }
799
800 static inline uint32_t
vmw_drm_fence_flags(uint32_t flags)801 vmw_drm_fence_flags(uint32_t flags)
802 {
803 uint32_t dflags = 0;
804
805 if (flags & SVGA_FENCE_FLAG_EXEC)
806 dflags |= DRM_VMW_FENCE_FLAG_EXEC;
807 if (flags & SVGA_FENCE_FLAG_QUERY)
808 dflags |= DRM_VMW_FENCE_FLAG_QUERY;
809
810 return dflags;
811 }
812
813
814 int
vmw_ioctl_fence_signalled(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)815 vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws,
816 uint32_t handle,
817 uint32_t flags)
818 {
819 struct drm_vmw_fence_signaled_arg arg;
820 uint32_t vflags = vmw_drm_fence_flags(flags);
821 int ret;
822
823 memset(&arg, 0, sizeof(arg));
824 arg.handle = handle;
825 arg.flags = vflags;
826
827 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED,
828 &arg, sizeof(arg));
829
830 if (ret != 0)
831 return ret;
832
833 vmw_fences_signal(vws->fence_ops, arg.passed_seqno, 0, FALSE);
834
835 return (arg.signaled) ? 0 : -1;
836 }
837
838
839
840 int
vmw_ioctl_fence_finish(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)841 vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws,
842 uint32_t handle,
843 uint32_t flags)
844 {
845 struct drm_vmw_fence_wait_arg arg;
846 uint32_t vflags = vmw_drm_fence_flags(flags);
847 int ret;
848
849 memset(&arg, 0, sizeof(arg));
850
851 arg.handle = handle;
852 arg.timeout_us = VMW_FENCE_TIMEOUT_SECONDS*1000000;
853 arg.lazy = 0;
854 arg.flags = vflags;
855
856 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT,
857 &arg, sizeof(arg));
858
859 if (ret != 0)
860 vmw_error("%s Failed\n", __FUNCTION__);
861
862 return 0;
863 }
864
865 uint32
vmw_ioctl_shader_create(struct vmw_winsys_screen * vws,SVGA3dShaderType type,uint32 code_len)866 vmw_ioctl_shader_create(struct vmw_winsys_screen *vws,
867 SVGA3dShaderType type,
868 uint32 code_len)
869 {
870 struct drm_vmw_shader_create_arg sh_arg;
871 int ret;
872
873 VMW_FUNC;
874
875 memset(&sh_arg, 0, sizeof(sh_arg));
876
877 sh_arg.size = code_len;
878 sh_arg.buffer_handle = SVGA3D_INVALID_ID;
879 sh_arg.shader_handle = SVGA3D_INVALID_ID;
880 switch (type) {
881 case SVGA3D_SHADERTYPE_VS:
882 sh_arg.shader_type = drm_vmw_shader_type_vs;
883 break;
884 case SVGA3D_SHADERTYPE_PS:
885 sh_arg.shader_type = drm_vmw_shader_type_ps;
886 break;
887 default:
888 assert(!"Invalid shader type.");
889 break;
890 }
891
892 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SHADER,
893 &sh_arg, sizeof(sh_arg));
894
895 if (ret)
896 return SVGA3D_INVALID_ID;
897
898 return sh_arg.shader_handle;
899 }
900
901 void
vmw_ioctl_shader_destroy(struct vmw_winsys_screen * vws,uint32 shid)902 vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid)
903 {
904 struct drm_vmw_shader_arg sh_arg;
905
906 VMW_FUNC;
907
908 memset(&sh_arg, 0, sizeof(sh_arg));
909 sh_arg.handle = shid;
910
911 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SHADER,
912 &sh_arg, sizeof(sh_arg));
913
914 }
915
916 static int
vmw_ioctl_parse_caps(struct vmw_winsys_screen * vws,const uint32_t * cap_buffer)917 vmw_ioctl_parse_caps(struct vmw_winsys_screen *vws,
918 const uint32_t *cap_buffer)
919 {
920 int i;
921
922 if (vws->base.have_gb_objects) {
923 for (i = 0; i < vws->ioctl.num_cap_3d; ++i) {
924 vws->ioctl.cap_3d[i].has_cap = TRUE;
925 vws->ioctl.cap_3d[i].result.u = cap_buffer[i];
926 }
927 return 0;
928 } else {
929 const uint32 *capsBlock;
930 const SVGA3dCapsRecord *capsRecord = NULL;
931 uint32 offset;
932 const SVGA3dCapPair *capArray;
933 int numCaps, index;
934
935 /*
936 * Search linearly through the caps block records for the specified type.
937 */
938 capsBlock = cap_buffer;
939 for (offset = 0; capsBlock[offset] != 0; offset += capsBlock[offset]) {
940 const SVGA3dCapsRecord *record;
941 assert(offset < SVGA_FIFO_3D_CAPS_SIZE);
942 record = (const SVGA3dCapsRecord *) (capsBlock + offset);
943 if ((record->header.type >= SVGA3DCAPS_RECORD_DEVCAPS_MIN) &&
944 (record->header.type <= SVGA3DCAPS_RECORD_DEVCAPS_MAX) &&
945 (!capsRecord || (record->header.type > capsRecord->header.type))) {
946 capsRecord = record;
947 }
948 }
949
950 if(!capsRecord)
951 return -1;
952
953 /*
954 * Calculate the number of caps from the size of the record.
955 */
956 capArray = (const SVGA3dCapPair *) capsRecord->data;
957 numCaps = (int) ((capsRecord->header.length * sizeof(uint32) -
958 sizeof capsRecord->header) / (2 * sizeof(uint32)));
959
960 for (i = 0; i < numCaps; i++) {
961 index = capArray[i][0];
962 if (index < vws->ioctl.num_cap_3d) {
963 vws->ioctl.cap_3d[index].has_cap = TRUE;
964 vws->ioctl.cap_3d[index].result.u = capArray[i][1];
965 } else {
966 debug_printf("Unknown devcaps seen: %d\n", index);
967 }
968 }
969 }
970 return 0;
971 }
972
973 boolean
vmw_ioctl_init(struct vmw_winsys_screen * vws)974 vmw_ioctl_init(struct vmw_winsys_screen *vws)
975 {
976 struct drm_vmw_getparam_arg gp_arg;
977 struct drm_vmw_get_3d_cap_arg cap_arg;
978 unsigned int size;
979 int ret;
980 uint32_t *cap_buffer;
981 drmVersionPtr version;
982 boolean drm_gb_capable;
983 boolean have_drm_2_5;
984 const char *getenv_val;
985
986 VMW_FUNC;
987
988 version = drmGetVersion(vws->ioctl.drm_fd);
989 if (!version)
990 goto out_no_version;
991
992 have_drm_2_5 = version->version_major > 2 ||
993 (version->version_major == 2 && version->version_minor > 4);
994 vws->ioctl.have_drm_2_6 = version->version_major > 2 ||
995 (version->version_major == 2 && version->version_minor > 5);
996 vws->ioctl.have_drm_2_9 = version->version_major > 2 ||
997 (version->version_major == 2 && version->version_minor > 8);
998 vws->ioctl.have_drm_2_15 = version->version_major > 2 ||
999 (version->version_major == 2 && version->version_minor > 14);
1000 vws->ioctl.have_drm_2_16 = version->version_major > 2 ||
1001 (version->version_major == 2 && version->version_minor > 15);
1002 vws->ioctl.have_drm_2_17 = version->version_major > 2 ||
1003 (version->version_major == 2 && version->version_minor > 16);
1004 vws->ioctl.have_drm_2_18 = version->version_major > 2 ||
1005 (version->version_major == 2 && version->version_minor > 17);
1006 vws->ioctl.have_drm_2_19 = version->version_major > 2 ||
1007 (version->version_major == 2 && version->version_minor > 18);
1008
1009 vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
1010
1011 drm_gb_capable = have_drm_2_5;
1012
1013 memset(&gp_arg, 0, sizeof(gp_arg));
1014 gp_arg.param = DRM_VMW_PARAM_3D;
1015 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1016 &gp_arg, sizeof(gp_arg));
1017 if (ret || gp_arg.value == 0) {
1018 vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret));
1019 goto out_no_3d;
1020 }
1021
1022 memset(&gp_arg, 0, sizeof(gp_arg));
1023 gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION;
1024 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1025 &gp_arg, sizeof(gp_arg));
1026 if (ret) {
1027 vmw_error("Failed to get fifo hw version (%i, %s).\n",
1028 ret, strerror(-ret));
1029 goto out_no_3d;
1030 }
1031 vws->ioctl.hwversion = gp_arg.value;
1032 getenv_val = getenv("SVGA_FORCE_HOST_BACKED");
1033 if (!getenv_val || strcmp(getenv_val, "0") == 0) {
1034 memset(&gp_arg, 0, sizeof(gp_arg));
1035 gp_arg.param = DRM_VMW_PARAM_HW_CAPS;
1036 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1037 &gp_arg, sizeof(gp_arg));
1038 } else {
1039 ret = -EINVAL;
1040 }
1041 if (ret)
1042 vws->base.have_gb_objects = FALSE;
1043 else
1044 vws->base.have_gb_objects =
1045 !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS);
1046
1047 if (vws->base.have_gb_objects && !drm_gb_capable)
1048 goto out_no_3d;
1049
1050 vws->base.have_vgpu10 = FALSE;
1051 vws->base.have_sm4_1 = FALSE;
1052 vws->base.have_intra_surface_copy = FALSE;
1053
1054 if (vws->base.have_gb_objects) {
1055 memset(&gp_arg, 0, sizeof(gp_arg));
1056 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY;
1057 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1058 &gp_arg, sizeof(gp_arg));
1059 if (ret) {
1060 /* Just guess a large enough value. */
1061 vws->ioctl.max_mob_memory = 256*1024*1024;
1062 } else {
1063 vws->ioctl.max_mob_memory = gp_arg.value;
1064 }
1065
1066 memset(&gp_arg, 0, sizeof(gp_arg));
1067 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE;
1068 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1069 &gp_arg, sizeof(gp_arg));
1070
1071 if (ret || gp_arg.value == 0) {
1072 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
1073 } else {
1074 vws->ioctl.max_texture_size = gp_arg.value;
1075 }
1076
1077 /* Never early flush surfaces, mobs do accounting. */
1078 vws->ioctl.max_surface_memory = -1;
1079
1080 if (vws->ioctl.have_drm_2_9) {
1081 memset(&gp_arg, 0, sizeof(gp_arg));
1082 gp_arg.param = DRM_VMW_PARAM_DX;
1083 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1084 &gp_arg, sizeof(gp_arg));
1085 if (ret == 0 && gp_arg.value != 0) {
1086 const char *vgpu10_val;
1087
1088 debug_printf("Have VGPU10 interface and hardware.\n");
1089 vws->base.have_vgpu10 = TRUE;
1090 vgpu10_val = getenv("SVGA_VGPU10");
1091 if (vgpu10_val && strcmp(vgpu10_val, "0") == 0) {
1092 debug_printf("Disabling VGPU10 interface.\n");
1093 vws->base.have_vgpu10 = FALSE;
1094 } else {
1095 debug_printf("Enabling VGPU10 interface.\n");
1096 }
1097 }
1098 }
1099
1100 if (vws->ioctl.have_drm_2_15 && vws->base.have_vgpu10) {
1101 memset(&gp_arg, 0, sizeof(gp_arg));
1102 gp_arg.param = DRM_VMW_PARAM_HW_CAPS2;
1103 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1104 &gp_arg, sizeof(gp_arg));
1105 if (ret == 0 && gp_arg.value != 0) {
1106 vws->base.have_intra_surface_copy = TRUE;
1107 }
1108
1109 memset(&gp_arg, 0, sizeof(gp_arg));
1110 gp_arg.param = DRM_VMW_PARAM_SM4_1;
1111 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1112 &gp_arg, sizeof(gp_arg));
1113 if (ret == 0 && gp_arg.value != 0) {
1114 vws->base.have_sm4_1 = TRUE;
1115 }
1116 }
1117
1118 if (vws->ioctl.have_drm_2_18 && vws->base.have_sm4_1) {
1119 memset(&gp_arg, 0, sizeof(gp_arg));
1120 gp_arg.param = DRM_VMW_PARAM_SM5;
1121 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1122 &gp_arg, sizeof(gp_arg));
1123 if (ret == 0 && gp_arg.value != 0) {
1124 vws->base.have_sm5 = TRUE;
1125 }
1126 }
1127
1128 memset(&gp_arg, 0, sizeof(gp_arg));
1129 gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
1130 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1131 &gp_arg, sizeof(gp_arg));
1132 if (ret)
1133 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
1134 else
1135 size = gp_arg.value;
1136
1137 if (vws->base.have_gb_objects)
1138 vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
1139 else
1140 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
1141
1142 if (vws->ioctl.have_drm_2_16) {
1143 vws->base.have_coherent = TRUE;
1144 getenv_val = getenv("SVGA_FORCE_COHERENT");
1145 if (getenv_val && strcmp(getenv_val, "0") != 0)
1146 vws->force_coherent = TRUE;
1147 }
1148 } else {
1149 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
1150
1151 memset(&gp_arg, 0, sizeof(gp_arg));
1152 gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY;
1153 if (have_drm_2_5)
1154 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1155 &gp_arg, sizeof(gp_arg));
1156 if (!have_drm_2_5 || ret) {
1157 /* Just guess a large enough value, around 800mb. */
1158 vws->ioctl.max_surface_memory = 0x30000000;
1159 } else {
1160 vws->ioctl.max_surface_memory = gp_arg.value;
1161 }
1162
1163 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
1164
1165 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
1166 }
1167
1168 debug_printf("VGPU10 interface is %s.\n",
1169 vws->base.have_vgpu10 ? "on" : "off");
1170
1171 cap_buffer = calloc(1, size);
1172 if (!cap_buffer) {
1173 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1174 goto out_no_3d;
1175 }
1176
1177 vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d,
1178 sizeof(*vws->ioctl.cap_3d));
1179 if (!vws->ioctl.cap_3d) {
1180 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1181 goto out_no_caparray;
1182 }
1183
1184 memset(&cap_arg, 0, sizeof(cap_arg));
1185 cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer);
1186 cap_arg.max_size = size;
1187
1188 /*
1189 * This call must always be after DRM_VMW_PARAM_MAX_MOB_MEMORY and
1190 * DRM_VMW_PARAM_SM4_1. This is because, based on these calls, kernel
1191 * driver sends the supported cap.
1192 */
1193 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP,
1194 &cap_arg, sizeof(cap_arg));
1195
1196 if (ret) {
1197 debug_printf("Failed to get 3D capabilities"
1198 " (%i, %s).\n", ret, strerror(-ret));
1199 goto out_no_caps;
1200 }
1201
1202 ret = vmw_ioctl_parse_caps(vws, cap_buffer);
1203 if (ret) {
1204 debug_printf("Failed to parse 3D capabilities"
1205 " (%i, %s).\n", ret, strerror(-ret));
1206 goto out_no_caps;
1207 }
1208
1209 if (((version->version_major == 2 && version->version_minor >= 10)
1210 || version->version_major > 2) && vws->base.have_vgpu10) {
1211
1212 /* support for these commands didn't make it into vmwgfx kernel
1213 * modules before 2.10.
1214 */
1215 vws->base.have_generate_mipmap_cmd = TRUE;
1216 vws->base.have_set_predication_cmd = TRUE;
1217 }
1218
1219 if (version->version_major == 2 && version->version_minor >= 14) {
1220 vws->base.have_fence_fd = TRUE;
1221 }
1222
1223 free(cap_buffer);
1224 drmFreeVersion(version);
1225 vmw_printf("%s OK\n", __FUNCTION__);
1226 return TRUE;
1227 out_no_caps:
1228 free(vws->ioctl.cap_3d);
1229 out_no_caparray:
1230 free(cap_buffer);
1231 out_no_3d:
1232 drmFreeVersion(version);
1233 out_no_version:
1234 vws->ioctl.num_cap_3d = 0;
1235 debug_printf("%s Failed\n", __FUNCTION__);
1236 return FALSE;
1237 }
1238
1239
1240
1241 void
vmw_ioctl_cleanup(struct vmw_winsys_screen * vws)1242 vmw_ioctl_cleanup(struct vmw_winsys_screen *vws)
1243 {
1244 VMW_FUNC;
1245
1246 free(vws->ioctl.cap_3d);
1247 }
1248