xref: /qemu/contrib/vhost-user-gpu/vugbm.c (revision b2a3cbb8)
1 /*
2  * Virtio vhost-user GPU Device
3  *
4  * DRM helpers
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "vugbm.h"
12 
13 static bool
14 mem_alloc_bo(struct vugbm_buffer *buf)
15 {
16     buf->mmap = g_malloc(buf->width * buf->height * 4);
17     buf->stride = buf->width * 4;
18     return true;
19 }
20 
21 static void
22 mem_free_bo(struct vugbm_buffer *buf)
23 {
24     g_free(buf->mmap);
25 }
26 
27 static bool
28 mem_map_bo(struct vugbm_buffer *buf)
29 {
30     return buf->mmap != NULL;
31 }
32 
33 static void
34 mem_unmap_bo(struct vugbm_buffer *buf)
35 {
36 }
37 
38 static void
39 mem_device_destroy(struct vugbm_device *dev)
40 {
41 }
42 
43 #ifdef CONFIG_MEMFD
44 struct udmabuf_create {
45         uint32_t memfd;
46         uint32_t flags;
47         uint64_t offset;
48         uint64_t size;
49 };
50 
51 #define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
52 
53 static size_t
54 udmabuf_get_size(struct vugbm_buffer *buf)
55 {
56     return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size());
57 }
58 
59 static bool
60 udmabuf_alloc_bo(struct vugbm_buffer *buf)
61 {
62     int ret;
63 
64     buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
65     if (buf->memfd < 0) {
66         return false;
67     }
68 
69     ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
70     if (ret < 0) {
71         close(buf->memfd);
72         return false;
73     }
74 
75     ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
76     if (ret < 0) {
77         close(buf->memfd);
78         return false;
79     }
80 
81     buf->stride = buf->width * 4;
82 
83     return true;
84 }
85 
86 static void
87 udmabuf_free_bo(struct vugbm_buffer *buf)
88 {
89     close(buf->memfd);
90 }
91 
92 static bool
93 udmabuf_map_bo(struct vugbm_buffer *buf)
94 {
95     buf->mmap = mmap(NULL, udmabuf_get_size(buf),
96                      PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
97     if (buf->mmap == MAP_FAILED) {
98         return false;
99     }
100 
101     return true;
102 }
103 
104 static bool
105 udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
106 {
107     struct udmabuf_create create = {
108         .memfd = buf->memfd,
109         .offset = 0,
110         .size = udmabuf_get_size(buf),
111     };
112 
113     *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
114 
115     return *fd >= 0;
116 }
117 
118 static void
119 udmabuf_unmap_bo(struct vugbm_buffer *buf)
120 {
121     munmap(buf->mmap, udmabuf_get_size(buf));
122 }
123 
124 static void
125 udmabuf_device_destroy(struct vugbm_device *dev)
126 {
127     close(dev->fd);
128 }
129 #endif
130 
131 #ifdef CONFIG_GBM
132 static bool
133 alloc_bo(struct vugbm_buffer *buf)
134 {
135     struct gbm_device *dev = buf->dev->dev;
136 
137     assert(!buf->bo);
138 
139     buf->bo = gbm_bo_create(dev, buf->width, buf->height,
140                             buf->format,
141                             GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
142 
143     if (buf->bo) {
144         buf->stride = gbm_bo_get_stride(buf->bo);
145         return true;
146     }
147 
148     return false;
149 }
150 
151 static void
152 free_bo(struct vugbm_buffer *buf)
153 {
154     gbm_bo_destroy(buf->bo);
155 }
156 
157 static bool
158 map_bo(struct vugbm_buffer *buf)
159 {
160     uint32_t stride;
161 
162     buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
163                            GBM_BO_TRANSFER_READ_WRITE, &stride,
164                            &buf->mmap_data);
165 
166     assert(stride == buf->stride);
167 
168     return buf->mmap != NULL;
169 }
170 
171 static void
172 unmap_bo(struct vugbm_buffer *buf)
173 {
174     gbm_bo_unmap(buf->bo, buf->mmap_data);
175 }
176 
177 static bool
178 get_fd(struct vugbm_buffer *buf, int *fd)
179 {
180     *fd = gbm_bo_get_fd(buf->bo);
181 
182     return *fd >= 0;
183 }
184 
185 static void
186 device_destroy(struct vugbm_device *dev)
187 {
188     gbm_device_destroy(dev->dev);
189 }
190 #endif
191 
192 void
193 vugbm_device_destroy(struct vugbm_device *dev)
194 {
195     if (!dev->inited) {
196         return;
197     }
198 
199     dev->device_destroy(dev);
200 }
201 
202 void
203 vugbm_device_init(struct vugbm_device *dev, int fd)
204 {
205     assert(!dev->inited);
206 
207 #ifdef CONFIG_GBM
208     if (fd >= 0) {
209         dev->dev = gbm_create_device(fd);
210     }
211     if (dev->dev != NULL) {
212         dev->fd = fd;
213         dev->alloc_bo = alloc_bo;
214         dev->free_bo = free_bo;
215         dev->get_fd = get_fd;
216         dev->map_bo = map_bo;
217         dev->unmap_bo = unmap_bo;
218         dev->device_destroy = device_destroy;
219         dev->inited = true;
220     }
221 #endif
222 #ifdef CONFIG_MEMFD
223     if (!dev->inited && g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
224         dev->fd = open("/dev/udmabuf", O_RDWR);
225         if (dev->fd >= 0) {
226             g_debug("Using experimental udmabuf backend");
227             dev->alloc_bo = udmabuf_alloc_bo;
228             dev->free_bo = udmabuf_free_bo;
229             dev->get_fd = udmabuf_get_fd;
230             dev->map_bo = udmabuf_map_bo;
231             dev->unmap_bo = udmabuf_unmap_bo;
232             dev->device_destroy = udmabuf_device_destroy;
233             dev->inited = true;
234         }
235     }
236 #endif
237     if (!dev->inited) {
238         g_debug("Using mem fallback");
239         dev->alloc_bo = mem_alloc_bo;
240         dev->free_bo = mem_free_bo;
241         dev->map_bo = mem_map_bo;
242         dev->unmap_bo = mem_unmap_bo;
243         dev->device_destroy = mem_device_destroy;
244         dev->inited = true;
245     }
246     assert(dev->inited);
247 }
248 
249 static bool
250 vugbm_buffer_map(struct vugbm_buffer *buf)
251 {
252     struct vugbm_device *dev = buf->dev;
253 
254     return dev->map_bo(buf);
255 }
256 
257 static void
258 vugbm_buffer_unmap(struct vugbm_buffer *buf)
259 {
260     struct vugbm_device *dev = buf->dev;
261 
262     dev->unmap_bo(buf);
263 }
264 
265 bool
266 vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
267 {
268     if (!buffer->dev->get_fd) {
269         return false;
270     }
271 
272     return true;
273 }
274 
275 bool
276 vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
277 {
278     if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
279         !buffer->dev->get_fd(buffer, fd)) {
280         g_warning("Failed to get dmabuf");
281         return false;
282     }
283 
284     if (*fd < 0) {
285         g_warning("error: dmabuf_fd < 0");
286         return false;
287     }
288 
289     return true;
290 }
291 
292 bool
293 vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
294                     uint32_t width, uint32_t height)
295 {
296     buffer->dev = dev;
297     buffer->width = width;
298     buffer->height = height;
299     buffer->format = GBM_FORMAT_XRGB8888;
300     buffer->stride = 0; /* modified during alloc */
301     if (!dev->alloc_bo(buffer)) {
302         g_warning("alloc_bo failed");
303         return false;
304     }
305 
306     if (!vugbm_buffer_map(buffer)) {
307         g_warning("map_bo failed");
308         goto err;
309     }
310 
311     return true;
312 
313 err:
314     dev->free_bo(buffer);
315     return false;
316 }
317 
318 void
319 vugbm_buffer_destroy(struct vugbm_buffer *buffer)
320 {
321     struct vugbm_device *dev = buffer->dev;
322 
323     vugbm_buffer_unmap(buffer);
324     dev->free_bo(buffer);
325 }
326