1 /*
2  * Copyright 2011 VMWare, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Author: Jakob Bornecrantz <wallbraker@gmail.com>
26  * Author: Thomas Hellstrom <thellstrom@vmware.com>
27  */
28 
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32 
33 #include <stdint.h>
34 #include <errno.h>
35 #include <sys/mman.h>
36 #include "vmwgfx_drm.h"
37 #include <xf86drm.h>
38 #include "vmwgfx_drmi.h"
39 
40 #define uint32 uint32_t
41 #define int32 int32_t
42 #define uint16 uint16_t
43 #define uint8 uint8_t
44 
45 #include "svga3d_reg.h"
46 #include "vmwgfx_driver.h"
47 
48 static int
vmwgfx_fence_wait(int drm_fd,uint32_t handle,Bool unref)49 vmwgfx_fence_wait(int drm_fd, uint32_t handle, Bool unref)
50 {
51 	struct drm_vmw_fence_wait_arg farg;
52 	memset(&farg, 0, sizeof(farg));
53 
54 	farg.handle = handle;
55 	farg.flags = DRM_VMW_FENCE_FLAG_EXEC;
56 	farg.timeout_us = 10*1000000;
57 	farg.cookie_valid = 0;
58 
59 	if (unref)
60 	    farg.wait_options |= DRM_VMW_WAIT_OPTION_UNREF;
61 
62 	return drmCommandWriteRead(drm_fd, DRM_VMW_FENCE_WAIT, &farg,
63 				   sizeof(farg));
64 }
65 
66 static void
vmwgfx_fence_unref(int drm_fd,uint32_t handle)67 vmwgfx_fence_unref(int drm_fd, uint32_t handle)
68 {
69 	struct drm_vmw_fence_arg farg;
70 	memset(&farg, 0, sizeof(farg));
71 
72 	farg.handle = handle;
73 
74 	(void) drmCommandWrite(drm_fd, DRM_VMW_FENCE_UNREF, &farg,
75 			       sizeof(farg));
76 }
77 
78 
79 int
vmwgfx_present_readback(int drm_fd,uint32_t fb_id,RegionPtr region)80 vmwgfx_present_readback(int drm_fd, uint32_t fb_id, RegionPtr region)
81 {
82     BoxPtr clips = REGION_RECTS(region);
83     unsigned int num_clips = REGION_NUM_RECTS(region);
84     struct drm_vmw_fence_rep rep;
85     struct drm_vmw_present_readback_arg arg;
86     int ret;
87     unsigned i;
88     struct drm_vmw_rect *rects, *r;
89 
90     rects = calloc(num_clips, sizeof(*rects));
91     if (!rects) {
92 	LogMessage(X_ERROR, "Failed to alloc cliprects for "
93 		   "present readback.\n");
94 	return -1;
95     }
96 
97     memset(&arg, 0, sizeof(arg));
98     memset(&rep, 0, sizeof(rep));
99 
100     arg.fb_id = fb_id;
101     arg.num_clips = num_clips;
102     arg.clips_ptr = (unsigned long) rects;
103     arg.fence_rep = (unsigned long) &rep;
104     rep.error = -EFAULT;
105 
106     for (i = 0, r = rects; i < num_clips; ++i, ++r, ++clips) {
107 	r->x = clips->x1;
108 	r->y = clips->y1;
109 	r->w = clips->x2 - clips->x1;
110 	r->h = clips->y2 - clips->y1;
111     }
112 
113     ret = drmCommandWrite(drm_fd, DRM_VMW_PRESENT_READBACK, &arg, sizeof(arg));
114     if (ret)
115 	LogMessage(X_ERROR, "Present readback error %s.\n", strerror(-ret));
116     free(rects);
117 
118     /*
119      * Sync to avoid racing with Xorg SW rendering.
120      */
121 
122     if (rep.error == 0) {
123 	ret = vmwgfx_fence_wait(drm_fd, rep.handle, TRUE);
124 	if (ret) {
125 	    LogMessage(X_ERROR, "Present readback fence wait error %s.\n",
126 		       strerror(-ret));
127 	    vmwgfx_fence_unref(drm_fd, rep.handle);
128 	}
129     }
130 
131     return 0;
132 }
133 
134 
135 int
vmwgfx_present(int drm_fd,uint32_t fb_id,unsigned int dst_x,unsigned int dst_y,RegionPtr region,uint32_t handle)136 vmwgfx_present(int drm_fd, uint32_t fb_id, unsigned int dst_x,
137 	       unsigned int dst_y, RegionPtr region, uint32_t handle)
138 {
139     BoxPtr clips = REGION_RECTS(region);
140     unsigned int num_clips = REGION_NUM_RECTS(region);
141     struct drm_vmw_present_arg arg;
142     unsigned int i;
143     struct drm_vmw_rect *rects, *r;
144     int ret;
145 
146     if (num_clips == 0)
147 	return 0;
148 
149     rects = calloc(num_clips, sizeof(*rects));
150     if (!rects) {
151 	LogMessage(X_ERROR, "Failed to alloc cliprects for "
152 		   "present.\n");
153 	return -1;
154     }
155 
156     memset(&arg, 0, sizeof(arg));
157     arg.fb_id = fb_id;
158     arg.sid = handle;
159     arg.dest_x = dst_x;
160     arg.dest_y = dst_y;
161     arg.num_clips = num_clips;
162     arg.clips_ptr = (unsigned long) rects;
163 
164     for (i = 0, r = rects; i < num_clips; ++i, ++r, ++clips) {
165 	r->x = clips->x1;
166 	r->y = clips->y1;
167 	r->w = clips->x2 - clips->x1;
168 	r->h = clips->y2 - clips->y1;
169     }
170 
171     ret = drmCommandWrite(drm_fd, DRM_VMW_PRESENT, &arg, sizeof(arg));
172     if (ret) {
173 	LogMessage(X_ERROR, "Present error %s.\n", strerror(-ret));
174     }
175 
176     free(rects);
177     return ((ret != 0) ? -1 : 0);
178 }
179 
180 
181 struct vmwgfx_int_dmabuf {
182     struct vmwgfx_dmabuf buf;
183     uint64_t map_handle;
184     uint64_t sync_handle;
185     int sync_valid;
186     int drm_fd;
187     uint32_t map_count;
188     void *addr;
189 };
190 
191 static inline struct vmwgfx_int_dmabuf *
vmwgfx_int_dmabuf(struct vmwgfx_dmabuf * buf)192 vmwgfx_int_dmabuf(struct vmwgfx_dmabuf *buf)
193 {
194     return (struct vmwgfx_int_dmabuf *) buf;
195 }
196 
197 struct vmwgfx_dmabuf*
vmwgfx_dmabuf_alloc(int drm_fd,size_t size)198 vmwgfx_dmabuf_alloc(int drm_fd, size_t size)
199 {
200     union drm_vmw_alloc_dmabuf_arg arg;
201     struct vmwgfx_dmabuf *buf;
202     struct vmwgfx_int_dmabuf *ibuf;
203     int ret;
204 
205     ibuf = calloc(1, sizeof(*ibuf));
206     if (!ibuf)
207 	return NULL;
208 
209     buf = &ibuf->buf;
210     memset(&arg, 0, sizeof(arg));
211     arg.req.size = size;
212 
213     ret = drmCommandWriteRead(drm_fd, DRM_VMW_ALLOC_DMABUF, &arg,
214 			      sizeof(arg));
215     if (ret)
216 	goto out_kernel_fail;
217 
218     ibuf = vmwgfx_int_dmabuf(buf);
219     ibuf->map_handle = arg.rep.map_handle;
220     ibuf->drm_fd = drm_fd;
221     buf->handle = arg.rep.handle;
222     buf->gmr_id = arg.rep.cur_gmr_id;
223     buf->gmr_offset = arg.rep.cur_gmr_offset;
224     buf->size = size;
225 
226     return buf;
227   out_kernel_fail:
228     free(buf);
229     return NULL;
230 }
231 
232 void *
vmwgfx_dmabuf_map(struct vmwgfx_dmabuf * buf)233 vmwgfx_dmabuf_map(struct vmwgfx_dmabuf *buf)
234 {
235     struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf);
236 
237     if (ibuf->addr)
238 	return ibuf->addr;
239 
240     ibuf->addr =  mmap(NULL, buf->size, PROT_READ | PROT_WRITE, MAP_SHARED,
241 		       ibuf->drm_fd, ibuf->map_handle);
242 
243     if (ibuf->addr == MAP_FAILED) {
244 	ibuf->addr = NULL;
245 	return NULL;
246     }
247 
248     ibuf->map_count++;
249     return ibuf->addr;
250 }
251 
252 void
vmwgfx_dmabuf_unmap(struct vmwgfx_dmabuf * buf)253 vmwgfx_dmabuf_unmap(struct vmwgfx_dmabuf *buf)
254 {
255     struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf);
256 
257     if (--ibuf->map_count)
258 	return;
259 
260     /*
261      * It's a pretty important performance optimzation not to call
262      * munmap here, although we should watch out for cases where we might fill
263      * the virtual memory space of the process.
264      */
265 }
266 
267 void
vmwgfx_dmabuf_destroy(struct vmwgfx_dmabuf * buf)268 vmwgfx_dmabuf_destroy(struct vmwgfx_dmabuf *buf)
269 {
270     struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf);
271     struct drm_vmw_unref_dmabuf_arg arg;
272 
273     if (ibuf->addr) {
274 	munmap(ibuf->addr, buf->size);
275 	ibuf->addr = NULL;
276     }
277 
278     memset(&arg, 0, sizeof(arg));
279     arg.handle = buf->handle;
280 
281     (void) drmCommandWrite(ibuf->drm_fd, DRM_VMW_UNREF_DMABUF, &arg,
282 			   sizeof(arg));
283     free(buf);
284 }
285 
286 int
vmwgfx_dma(int host_x,int host_y,RegionPtr region,struct vmwgfx_dmabuf * buf,uint32_t buf_pitch,uint32_t surface_handle,int to_surface)287 vmwgfx_dma(int host_x, int host_y,
288 	   RegionPtr region, struct vmwgfx_dmabuf *buf,
289 	   uint32_t buf_pitch, uint32_t surface_handle, int to_surface)
290 {
291     BoxPtr clips = REGION_RECTS(region);
292     unsigned int num_clips = REGION_NUM_RECTS(region);
293     struct drm_vmw_execbuf_arg arg;
294     struct drm_vmw_fence_rep rep;
295     int ret;
296     unsigned int size;
297     unsigned i;
298     SVGA3dCopyBox *cb;
299     SVGA3dCmdSurfaceDMASuffix *suffix;
300     SVGA3dCmdSurfaceDMA *body;
301     struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf);
302 
303     struct {
304 	SVGA3dCmdHeader header;
305 	SVGA3dCmdSurfaceDMA body;
306 	SVGA3dCopyBox cb;
307     } *cmd;
308 
309     if (num_clips == 0)
310 	return 0;
311 
312     size = sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cb) +
313 	sizeof(*suffix);
314     cmd = malloc(size);
315     if (!cmd)
316 	return -1;
317 
318     cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
319     cmd->header.size = sizeof(cmd->body) + num_clips * sizeof(cmd->cb) +
320 	sizeof(*suffix);
321     cb = &cmd->cb;
322 
323     suffix = (SVGA3dCmdSurfaceDMASuffix *) &cb[num_clips];
324     suffix->suffixSize = sizeof(*suffix);
325     suffix->maximumOffset = (uint32_t) -1;
326     suffix->flags.discard = 0;
327     suffix->flags.unsynchronized = 0;
328     suffix->flags.reserved = 0;
329 
330     body = &cmd->body;
331     body->guest.ptr.gmrId = buf->gmr_id;
332     body->guest.ptr.offset = buf->gmr_offset;
333     body->guest.pitch = buf_pitch;
334     body->host.sid = surface_handle;
335     body->host.face = 0;
336     body->host.mipmap = 0;
337 
338     body->transfer =  (to_surface ? SVGA3D_WRITE_HOST_VRAM :
339 		       SVGA3D_READ_HOST_VRAM);
340 
341 
342     for (i=0; i < num_clips; i++, cb++, clips++) {
343 	cb->x = (uint16_t) clips->x1 + host_x;
344 	cb->y = (uint16_t) clips->y1 + host_y;
345 	cb->z = 0;
346 	cb->srcx = (uint16_t) clips->x1;
347 	cb->srcy = (uint16_t) clips->y1;
348 	cb->srcz = 0;
349 	cb->w = (uint16_t) (clips->x2 - clips->x1);
350 	cb->h = (uint16_t) (clips->y2 - clips->y1);
351 	cb->d = 1;
352 #if 0
353 	LogMessage(X_INFO, "DMA! x: %u y: %u srcx: %u srcy: %u w: %u h: %u %s\n",
354 		   cb->x, cb->y, cb->srcx, cb->srcy, cb->w, cb->h,
355 		   to_surface ? "to" : "from");
356 #endif
357 
358     }
359 
360     memset(&arg, 0, sizeof(arg));
361     memset(&rep, 0, sizeof(rep));
362 
363     rep.error = -EFAULT;
364     arg.fence_rep = ((to_surface) ? 0UL : (unsigned long)&rep);
365     arg.commands = (unsigned long)cmd;
366     arg.command_size = size;
367     arg.throttle_us = 0;
368     arg.version = DRM_VMW_EXECBUF_VERSION;
369 
370     ret = drmCommandWrite(ibuf->drm_fd, DRM_VMW_EXECBUF, &arg, sizeof(arg));
371     if (ret) {
372 	LogMessage(X_ERROR, "DMA error %s.\n", strerror(-ret));
373     }
374 
375     free(cmd);
376 
377     if (rep.error == 0) {
378 	ret = vmwgfx_fence_wait(ibuf->drm_fd, rep.handle, TRUE);
379 	if (ret) {
380 	    LogMessage(X_ERROR, "DMA from host fence wait error %s.\n",
381 		       strerror(-ret));
382 	    vmwgfx_fence_unref(ibuf->drm_fd, rep.handle);
383 	}
384     }
385 
386     return 0;
387 }
388 
389 int
vmwgfx_get_param(int drm_fd,uint32_t param,uint64_t * out)390 vmwgfx_get_param(int drm_fd, uint32_t param, uint64_t *out)
391 {
392     struct drm_vmw_getparam_arg gp_arg;
393     int ret;
394 
395     memset(&gp_arg, 0, sizeof(gp_arg));
396     gp_arg.param = param;
397     ret = drmCommandWriteRead(drm_fd, DRM_VMW_GET_PARAM,
398 	    &gp_arg, sizeof(gp_arg));
399 
400     if (ret == 0) {
401 	*out = gp_arg.value;
402     }
403 
404     return ret;
405 }
406 
407 int
vmwgfx_num_streams(int drm_fd,uint32_t * ntot,uint32_t * nfree)408 vmwgfx_num_streams(int drm_fd, uint32_t *ntot, uint32_t *nfree)
409 {
410     uint64_t v1, v2;
411     int ret;
412 
413     ret = vmwgfx_get_param(drm_fd, DRM_VMW_PARAM_NUM_STREAMS, &v1);
414     if (ret)
415 	return ret;
416 
417     ret = vmwgfx_get_param(drm_fd, DRM_VMW_PARAM_NUM_FREE_STREAMS, &v2);
418     if (ret)
419 	return ret;
420 
421     *ntot = (uint32_t)v1;
422     *nfree = (uint32_t)v2;
423 
424     return 0;
425 }
426 
427 int
vmwgfx_claim_stream(int drm_fd,uint32_t * out)428 vmwgfx_claim_stream(int drm_fd, uint32_t *out)
429 {
430     struct drm_vmw_stream_arg s_arg;
431     int ret;
432 
433     ret = drmCommandRead(drm_fd, DRM_VMW_CLAIM_STREAM,
434 			 &s_arg, sizeof(s_arg));
435 
436     if (ret)
437 	return -1;
438 
439     *out = s_arg.stream_id;
440     return 0;
441 }
442 
443 int
vmwgfx_unref_stream(int drm_fd,uint32_t stream_id)444 vmwgfx_unref_stream(int drm_fd, uint32_t stream_id)
445 {
446     struct drm_vmw_stream_arg s_arg;
447     int ret;
448 
449     memset(&s_arg, 0, sizeof(s_arg));
450     s_arg.stream_id = stream_id;
451 
452     ret = drmCommandWrite(drm_fd, DRM_VMW_UNREF_STREAM,
453 			  &s_arg, sizeof(s_arg));
454 
455     return (ret != 0) ? -1 : 0;
456 }
457 
458 int
vmwgfx_cursor_bypass(int drm_fd,int xhot,int yhot)459 vmwgfx_cursor_bypass(int drm_fd, int xhot, int yhot)
460 {
461     struct drm_vmw_cursor_bypass_arg arg;
462     int ret;
463 
464     memset(&arg, 0, sizeof(arg));
465     arg.flags = DRM_VMW_CURSOR_BYPASS_ALL;
466     arg.xhot = xhot;
467     arg.yhot = yhot;
468 
469     ret = drmCommandWrite(drm_fd, DRM_VMW_CURSOR_BYPASS,
470 			  &arg, sizeof(arg));
471 
472     return ret;
473 }
474 
475 int
vmwgfx_update_gui_layout(int drm_fd,unsigned int num_rects,struct drm_vmw_rect * rects)476 vmwgfx_update_gui_layout(int drm_fd, unsigned int num_rects,
477 			 struct drm_vmw_rect *rects)
478 {
479     struct drm_vmw_update_layout_arg arg;
480 
481     memset(&arg, 0, sizeof(arg));
482 
483     arg.num_outputs = num_rects;
484     arg.rects = (unsigned long) rects;
485 
486     return drmCommandWrite(drm_fd, DRM_VMW_UPDATE_LAYOUT, &arg,
487 			   sizeof(arg));
488 }
489 
490 
491 int
vmwgfx_max_fb_size(int drm_fd,size_t * size)492 vmwgfx_max_fb_size(int drm_fd, size_t *size)
493 {
494     uint64_t tmp_size;
495 
496     if (vmwgfx_get_param(drm_fd, DRM_VMW_PARAM_MAX_FB_SIZE, &tmp_size) != 0)
497 	return -1;
498 
499     *size = tmp_size;
500 
501     return 0;
502 }
503 
504 #ifdef HAVE_LIBDRM_2_4_38
505 /**
506  * vmwgfx_prime_fd_to_handle - Return a TTM handle to a prime object
507  *
508  * @drm_fd: File descriptor for the drm connection.
509  * @prime_fd: File descriptor identifying the prime object.
510  * @handle: Pointer to returned TTM handle.
511  *
512  * Takes a reference on the underlying object and returns a TTM handle to it.
513  */
514 int
vmwgfx_prime_fd_to_handle(int drm_fd,int prime_fd,uint32_t * handle)515 vmwgfx_prime_fd_to_handle(int drm_fd, int prime_fd, uint32_t *handle)
516 {
517     *handle = 0;
518 
519     return drmPrimeFDToHandle(drm_fd, prime_fd, handle);
520 }
521 
522 /**
523  * vmwgfx_prime_release_handle - Release a reference on a TTM object
524  *
525  * @drm_fd: File descriptor for the drm connection.
526  * @handle: TTM handle as returned by vmwgfx_prime_fd_to_handle.
527  *
528  * Releases the reference obtained by vmwgfx_prime_fd_to_handle().
529  */
530 void
vmwgfx_prime_release_handle(int drm_fd,uint32_t handle)531 vmwgfx_prime_release_handle(int drm_fd, uint32_t handle)
532 {
533     struct drm_vmw_surface_arg s_arg;
534 
535     memset(&s_arg, 0, sizeof(s_arg));
536     s_arg.sid = handle;
537 
538     (void) drmCommandWrite(drm_fd, DRM_VMW_UNREF_SURFACE, &s_arg,
539 			   sizeof(s_arg));
540 }
541 #endif /* HAVE_LIBDRM_2_4_38 */
542