xref: /qemu/contrib/vhost-user-gpu/vugbm.c (revision f917eed3)
1 /*
2  * Virtio vhost-user GPU Device
3  *
4  * DRM helpers
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "vugbm.h"
12 
13 static bool
14 mem_alloc_bo(struct vugbm_buffer *buf)
15 {
16     buf->mmap = g_malloc(buf->width * buf->height * 4);
17     buf->stride = buf->width * 4;
18     return true;
19 }
20 
21 static void
22 mem_free_bo(struct vugbm_buffer *buf)
23 {
24     g_free(buf->mmap);
25 }
26 
27 static bool
28 mem_map_bo(struct vugbm_buffer *buf)
29 {
30     return buf->mmap != NULL;
31 }
32 
33 static void
34 mem_unmap_bo(struct vugbm_buffer *buf)
35 {
36 }
37 
38 static void
39 mem_device_destroy(struct vugbm_device *dev)
40 {
41 }
42 
43 #ifdef CONFIG_MEMFD
44 struct udmabuf_create {
45         uint32_t memfd;
46         uint32_t flags;
47         uint64_t offset;
48         uint64_t size;
49 };
50 
51 #define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
52 
53 static size_t
54 udmabuf_get_size(struct vugbm_buffer *buf)
55 {
56     return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size);
57 }
58 
59 static bool
60 udmabuf_alloc_bo(struct vugbm_buffer *buf)
61 {
62     int ret;
63 
64     buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
65     if (buf->memfd < 0) {
66         return false;
67     }
68 
69     ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
70     if (ret < 0) {
71         close(buf->memfd);
72         return false;
73     }
74 
75     ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
76     if (ret < 0) {
77         close(buf->memfd);
78         return false;
79     }
80 
81     buf->stride = buf->width * 4;
82 
83     return true;
84 }
85 
86 static void
87 udmabuf_free_bo(struct vugbm_buffer *buf)
88 {
89     close(buf->memfd);
90 }
91 
92 static bool
93 udmabuf_map_bo(struct vugbm_buffer *buf)
94 {
95     buf->mmap = mmap(NULL, udmabuf_get_size(buf),
96                      PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
97     if (buf->mmap == MAP_FAILED) {
98         return false;
99     }
100 
101     return true;
102 }
103 
104 static bool
105 udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
106 {
107     struct udmabuf_create create = {
108         .memfd = buf->memfd,
109         .offset = 0,
110         .size = udmabuf_get_size(buf),
111     };
112 
113     *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
114 
115     return *fd >= 0;
116 }
117 
118 static void
119 udmabuf_unmap_bo(struct vugbm_buffer *buf)
120 {
121     munmap(buf->mmap, udmabuf_get_size(buf));
122 }
123 
124 static void
125 udmabuf_device_destroy(struct vugbm_device *dev)
126 {
127     close(dev->fd);
128 }
129 #endif
130 
131 #ifdef CONFIG_GBM
132 static bool
133 alloc_bo(struct vugbm_buffer *buf)
134 {
135     struct gbm_device *dev = buf->dev->dev;
136 
137     assert(!buf->bo);
138 
139     buf->bo = gbm_bo_create(dev, buf->width, buf->height,
140                             buf->format,
141                             GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
142 
143     if (buf->bo) {
144         buf->stride = gbm_bo_get_stride(buf->bo);
145         return true;
146     }
147 
148     return false;
149 }
150 
151 static void
152 free_bo(struct vugbm_buffer *buf)
153 {
154     gbm_bo_destroy(buf->bo);
155 }
156 
157 static bool
158 map_bo(struct vugbm_buffer *buf)
159 {
160     uint32_t stride;
161 
162     buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
163                            GBM_BO_TRANSFER_READ_WRITE, &stride,
164                            &buf->mmap_data);
165 
166     assert(stride == buf->stride);
167 
168     return buf->mmap != NULL;
169 }
170 
171 static void
172 unmap_bo(struct vugbm_buffer *buf)
173 {
174     gbm_bo_unmap(buf->bo, buf->mmap_data);
175 }
176 
177 static bool
178 get_fd(struct vugbm_buffer *buf, int *fd)
179 {
180     *fd = gbm_bo_get_fd(buf->bo);
181 
182     return *fd >= 0;
183 }
184 
185 static void
186 device_destroy(struct vugbm_device *dev)
187 {
188     gbm_device_destroy(dev->dev);
189 }
190 #endif
191 
192 void
193 vugbm_device_destroy(struct vugbm_device *dev)
194 {
195     if (!dev->inited) {
196         return;
197     }
198 
199     dev->device_destroy(dev);
200 }
201 
202 bool
203 vugbm_device_init(struct vugbm_device *dev, int fd)
204 {
205     dev->fd = fd;
206 
207 #ifdef CONFIG_GBM
208     dev->dev = gbm_create_device(fd);
209 #endif
210 
211     if (0) {
212         /* nothing */
213     }
214 #ifdef CONFIG_GBM
215     else if (dev->dev != NULL) {
216         dev->alloc_bo = alloc_bo;
217         dev->free_bo = free_bo;
218         dev->get_fd = get_fd;
219         dev->map_bo = map_bo;
220         dev->unmap_bo = unmap_bo;
221         dev->device_destroy = device_destroy;
222     }
223 #endif
224 #ifdef CONFIG_MEMFD
225     else if (g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
226         dev->fd = open("/dev/udmabuf", O_RDWR);
227         if (dev->fd < 0) {
228             return false;
229         }
230         g_debug("Using experimental udmabuf backend");
231         dev->alloc_bo = udmabuf_alloc_bo;
232         dev->free_bo = udmabuf_free_bo;
233         dev->get_fd = udmabuf_get_fd;
234         dev->map_bo = udmabuf_map_bo;
235         dev->unmap_bo = udmabuf_unmap_bo;
236         dev->device_destroy = udmabuf_device_destroy;
237     }
238 #endif
239     else {
240         g_debug("Using mem fallback");
241         dev->alloc_bo = mem_alloc_bo;
242         dev->free_bo = mem_free_bo;
243         dev->map_bo = mem_map_bo;
244         dev->unmap_bo = mem_unmap_bo;
245         dev->device_destroy = mem_device_destroy;
246         return false;
247     }
248 
249     dev->inited = true;
250     return true;
251 }
252 
253 static bool
254 vugbm_buffer_map(struct vugbm_buffer *buf)
255 {
256     struct vugbm_device *dev = buf->dev;
257 
258     return dev->map_bo(buf);
259 }
260 
261 static void
262 vugbm_buffer_unmap(struct vugbm_buffer *buf)
263 {
264     struct vugbm_device *dev = buf->dev;
265 
266     dev->unmap_bo(buf);
267 }
268 
269 bool
270 vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
271 {
272     if (!buffer->dev->get_fd) {
273         return false;
274     }
275 
276     return true;
277 }
278 
279 bool
280 vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
281 {
282     if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
283         !buffer->dev->get_fd(buffer, fd)) {
284         g_warning("Failed to get dmabuf");
285         return false;
286     }
287 
288     if (*fd < 0) {
289         g_warning("error: dmabuf_fd < 0");
290         return false;
291     }
292 
293     return true;
294 }
295 
296 bool
297 vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
298                     uint32_t width, uint32_t height)
299 {
300     buffer->dev = dev;
301     buffer->width = width;
302     buffer->height = height;
303     buffer->format = GBM_FORMAT_XRGB8888;
304     buffer->stride = 0; /* modified during alloc */
305     if (!dev->alloc_bo(buffer)) {
306         g_warning("alloc_bo failed");
307         return false;
308     }
309 
310     if (!vugbm_buffer_map(buffer)) {
311         g_warning("map_bo failed");
312         goto err;
313     }
314 
315     return true;
316 
317 err:
318     dev->free_bo(buffer);
319     return false;
320 }
321 
322 void
323 vugbm_buffer_destroy(struct vugbm_buffer *buffer)
324 {
325     struct vugbm_device *dev = buffer->dev;
326 
327     vugbm_buffer_unmap(buffer);
328     dev->free_bo(buffer);
329 }
330