1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4
5 #include "ecore_wl2_private.h"
6
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <fcntl.h>
10 #include <dlfcn.h>
11 #include <drm_fourcc.h>
12 #include <intel_bufmgr.h>
13 #include <i915_drm.h>
14 #include <vc4_drm.h>
15 #include <exynos_drm.h>
16 #include <exynos_drmif.h>
17 #include <sys/mman.h>
18 #include <sys/ioctl.h>
19
20 #if defined(__linux__)
21 #include <linux/dma-buf.h>
22 #elif defined(__FreeBSD__)
23 /* begin/end dma-buf functions used for userspace mmap. */
24 struct dma_buf_sync {
25 __u64 flags;
26 };
27
28 #define DMA_BUF_SYNC_READ (1 << 0)
29 #define DMA_BUF_SYNC_WRITE (2 << 0)
30 #define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
31 #define DMA_BUF_SYNC_START (0 << 2)
32 #define DMA_BUF_SYNC_END (1 << 2)
33 #define DMA_BUF_SYNC_VALID_FLAGS_MASK \
34 (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
35
36 #define DMA_BUF_BASE 'b'
37 #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
38 #endif
39
40 #include "linux-dmabuf-unstable-v1-client-protocol.h"
41
42 #define SYM(lib, xx) \
43 do { \
44 sym_## xx = dlsym(lib, #xx); \
45 if (!(sym_ ## xx)) { \
46 fail = EINA_TRUE; \
47 } \
48 } while (0)
49
50 static int drm_fd = -1;
51
52 typedef struct _Ecore_Wl2_Buffer Ecore_Wl2_Buffer;
53 typedef struct _Buffer_Handle Buffer_Handle;
54 typedef struct _Buffer_Manager Buffer_Manager;
55 struct _Buffer_Manager
56 {
57 Buffer_Handle *(*alloc)(Buffer_Manager *self, const char *name, int w, int h, unsigned long *stride, int32_t *fd);
58 struct wl_buffer *(*to_buffer)(Ecore_Wl2_Display *ewd, Ecore_Wl2_Buffer *db);
59 void *(*map)(Ecore_Wl2_Buffer *buf);
60 void (*unmap)(Ecore_Wl2_Buffer *buf);
61 void (*discard)(Ecore_Wl2_Buffer *buf);
62 void (*lock)(Ecore_Wl2_Buffer *buf);
63 void (*unlock)(Ecore_Wl2_Buffer *buf);
64 void (*manager_destroy)(void);
65 void *priv;
66 void *dl_handle;
67 int refcount;
68 Eina_Bool destroyed;
69 };
70
71 static Buffer_Manager *buffer_manager = NULL;
72
73 static drm_intel_bufmgr *(*sym_drm_intel_bufmgr_gem_init)(int fd, int batch_size) = NULL;
74 static int (*sym_drm_intel_bo_unmap)(drm_intel_bo *bo) = NULL;
75 static int (*sym_drm_intel_bo_map)(drm_intel_bo *bo) = NULL;
76 static drm_intel_bo *(*sym_drm_intel_bo_alloc_tiled)(drm_intel_bufmgr *mgr, const char *name, int x, int y, int cpp, uint32_t *tile, unsigned long *pitch, unsigned long flags) = NULL;
77 static void (*sym_drm_intel_bo_unreference)(drm_intel_bo *bo) = NULL;
78 static int (*sym_drmPrimeHandleToFD)(int fd, uint32_t handle, uint32_t flags, int *prime_fd) = NULL;
79 static void (*sym_drm_intel_bufmgr_destroy)(drm_intel_bufmgr *) = NULL;
80
81 static struct exynos_device *(*sym_exynos_device_create)(int fd) = NULL;
82 static struct exynos_bo *(*sym_exynos_bo_create)(struct exynos_device *dev, size_t size, uint32_t flags) = NULL;
83 static void *(*sym_exynos_bo_map)(struct exynos_bo *bo) = NULL;
84 static void (*sym_exynos_bo_destroy)(struct exynos_bo *bo) = NULL;
85 static void (*sym_exynos_device_destroy)(struct exynos_device *) = NULL;
86
87 static void
buffer_release(void * data,struct wl_buffer * buffer EINA_UNUSED)88 buffer_release(void *data, struct wl_buffer *buffer EINA_UNUSED)
89 {
90 Ecore_Wl2_Buffer *b = data;
91
92 b->busy = EINA_FALSE;
93 if (b->orphaned) ecore_wl2_buffer_destroy(b);
94 }
95
96 static const struct wl_buffer_listener buffer_listener =
97 {
98 buffer_release
99 };
100
101 static struct wl_buffer *
_evas_dmabuf_wl_buffer_from_dmabuf(Ecore_Wl2_Display * ewd,Ecore_Wl2_Buffer * db)102 _evas_dmabuf_wl_buffer_from_dmabuf(Ecore_Wl2_Display *ewd, Ecore_Wl2_Buffer *db)
103 {
104 struct wl_buffer *buf;
105 struct zwp_linux_dmabuf_v1 *dmabuf;
106 struct zwp_linux_buffer_params_v1 *dp;
107 uint32_t flags = 0;
108 uint32_t format;
109
110 if (db->alpha)
111 format = DRM_FORMAT_ARGB8888;
112 else
113 format = DRM_FORMAT_XRGB8888;
114
115 dmabuf = ecore_wl2_display_dmabuf_get(ewd);
116 dp = zwp_linux_dmabuf_v1_create_params(dmabuf);
117 zwp_linux_buffer_params_v1_add(dp, db->fd, 0, 0, db->stride, 0, 0);
118 buf = zwp_linux_buffer_params_v1_create_immed(dp, db->w, db->h,
119 format, flags);
120 wl_buffer_add_listener(buf, &buffer_listener, db);
121 zwp_linux_buffer_params_v1_destroy(dp);
122
123 return buf;
124 }
125
126 static void
_dmabuf_lock(Ecore_Wl2_Buffer * b)127 _dmabuf_lock(Ecore_Wl2_Buffer *b)
128 {
129 int ret;
130 struct dma_buf_sync s;
131
132 s.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW;
133 do
134 {
135 ret = ioctl(b->fd, DMA_BUF_IOCTL_SYNC, &s);
136 } while (ret && ((errno == EAGAIN) || (errno == EINTR)));
137
138 if (ret) WRN("Failed to lock dmabuf");
139 }
140
141 static void
_dmabuf_unlock(Ecore_Wl2_Buffer * b)142 _dmabuf_unlock(Ecore_Wl2_Buffer *b)
143 {
144 int ret;
145 struct dma_buf_sync s;
146
147 s.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_RW;
148 do
149 {
150 ret = ioctl(b->fd, DMA_BUF_IOCTL_SYNC, &s);
151 } while (ret && ((errno == EAGAIN) || (errno == EINTR)));
152
153 if (ret) WRN("Failed to unlock dmabuf");
154 }
155
156 static Buffer_Handle *
_intel_alloc(Buffer_Manager * self,const char * name,int w,int h,unsigned long * stride,int32_t * fd)157 _intel_alloc(Buffer_Manager *self, const char *name, int w, int h, unsigned long *stride, int32_t *fd)
158 {
159 uint32_t tile = I915_TILING_NONE;
160 drm_intel_bo *out;
161
162 out = sym_drm_intel_bo_alloc_tiled(self->priv, name, w, h, 4, &tile,
163 stride, 0);
164
165 if (!out) return NULL;
166
167 if (tile != I915_TILING_NONE) goto err;
168 /* First try to allocate an mmapable buffer with O_RDWR,
169 * if that fails retry unmappable - if the compositor is
170 * using GL it won't need to mmap the buffer and this can
171 * work - otherwise it'll reject this buffer and we'll
172 * have to fall back to shm rendering.
173 */
174 if (sym_drmPrimeHandleToFD(drm_fd, out->handle,
175 DRM_CLOEXEC | O_RDWR, fd) != 0)
176 if (sym_drmPrimeHandleToFD(drm_fd, out->handle,
177 DRM_CLOEXEC, fd) != 0) goto err;
178
179 return (Buffer_Handle *)out;
180
181 err:
182 sym_drm_intel_bo_unreference(out);
183 return NULL;
184 }
185
186 static void *
_intel_map(Ecore_Wl2_Buffer * buf)187 _intel_map(Ecore_Wl2_Buffer *buf)
188 {
189 drm_intel_bo *bo;
190
191 bo = (drm_intel_bo *)buf->bh;
192 if (sym_drm_intel_bo_map(bo) != 0) return NULL;
193 return bo->virtual;
194 }
195
196 static void
_intel_unmap(Ecore_Wl2_Buffer * buf)197 _intel_unmap(Ecore_Wl2_Buffer *buf)
198 {
199 drm_intel_bo *bo;
200
201 bo = (drm_intel_bo *)buf->bh;
202 sym_drm_intel_bo_unmap(bo);
203 }
204
205 static void
_intel_discard(Ecore_Wl2_Buffer * buf)206 _intel_discard(Ecore_Wl2_Buffer *buf)
207 {
208 drm_intel_bo *bo;
209
210 bo = (drm_intel_bo *)buf->bh;
211 sym_drm_intel_bo_unreference(bo);
212 }
213
214 static void
_intel_manager_destroy()215 _intel_manager_destroy()
216 {
217 sym_drm_intel_bufmgr_destroy(buffer_manager->priv);
218 }
219
220 static Eina_Bool
_intel_buffer_manager_setup(int fd)221 _intel_buffer_manager_setup(int fd)
222 {
223 Eina_Bool fail = EINA_FALSE;
224 void *drm_intel_lib;
225
226 drm_intel_lib = dlopen("libdrm_intel.so", RTLD_LAZY | RTLD_GLOBAL);
227 if (!drm_intel_lib) return EINA_FALSE;
228
229 SYM(drm_intel_lib, drm_intel_bufmgr_gem_init);
230 SYM(drm_intel_lib, drm_intel_bo_unmap);
231 SYM(drm_intel_lib, drm_intel_bo_map);
232 SYM(drm_intel_lib, drm_intel_bo_alloc_tiled);
233 SYM(drm_intel_lib, drm_intel_bo_unreference);
234 SYM(drm_intel_lib, drm_intel_bufmgr_destroy);
235 SYM(drm_intel_lib, drmPrimeHandleToFD);
236
237 if (fail) goto err;
238
239 buffer_manager->priv = sym_drm_intel_bufmgr_gem_init(fd, 32);
240 if (!buffer_manager->priv) goto err;
241
242 buffer_manager->alloc = _intel_alloc;
243 buffer_manager->to_buffer = _evas_dmabuf_wl_buffer_from_dmabuf;
244 buffer_manager->map = _intel_map;
245 buffer_manager->unmap = _intel_unmap;
246 buffer_manager->discard = _intel_discard;
247 buffer_manager->lock = _dmabuf_lock;
248 buffer_manager->unlock = _dmabuf_unlock;
249 buffer_manager->manager_destroy = _intel_manager_destroy;
250 buffer_manager->dl_handle = drm_intel_lib;
251
252 return EINA_TRUE;
253
254 err:
255 dlclose(drm_intel_lib);
256 return EINA_FALSE;
257 }
258
259 static Buffer_Handle *
_exynos_alloc(Buffer_Manager * self,const char * name EINA_UNUSED,int w,int h,unsigned long * stride,int32_t * fd)260 _exynos_alloc(Buffer_Manager *self, const char *name EINA_UNUSED, int w, int h, unsigned long *stride, int32_t *fd)
261 {
262 size_t size = w * h * 4;
263 struct exynos_bo *out;
264
265 *stride = w * 4;
266 out = sym_exynos_bo_create(self->priv, size, 0);
267 if (!out) return NULL;
268 /* First try to allocate an mmapable buffer with O_RDWR,
269 * if that fails retry unmappable - if the compositor is
270 * using GL it won't need to mmap the buffer and this can
271 * work - otherwise it'll reject this buffer and we'll
272 * have to fall back to shm rendering.
273 */
274 if (sym_drmPrimeHandleToFD(drm_fd, out->handle,
275 DRM_CLOEXEC | O_RDWR, fd) != 0)
276 if (sym_drmPrimeHandleToFD(drm_fd, out->handle,
277 DRM_CLOEXEC, fd) != 0) goto err;
278
279 return (Buffer_Handle *)out;
280
281 err:
282 sym_exynos_bo_destroy(out);
283 return NULL;
284 }
285
286 static void *
_exynos_map(Ecore_Wl2_Buffer * buf)287 _exynos_map(Ecore_Wl2_Buffer *buf)
288 {
289 struct exynos_bo *bo;
290 void *ptr;
291
292 bo = (struct exynos_bo *)buf->bh;
293 ptr = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, buf->fd, 0);
294 if (ptr == MAP_FAILED) return NULL;
295 return ptr;
296 }
297
298 static void
_exynos_unmap(Ecore_Wl2_Buffer * buf)299 _exynos_unmap(Ecore_Wl2_Buffer *buf)
300 {
301 struct exynos_bo *bo;
302
303 bo = (struct exynos_bo *)buf->bh;
304 munmap(buf->mapping, bo->size);
305 }
306
307 static void
_exynos_discard(Ecore_Wl2_Buffer * buf)308 _exynos_discard(Ecore_Wl2_Buffer *buf)
309 {
310 struct exynos_bo *bo;
311
312 bo = (struct exynos_bo *)buf->bh;
313 sym_exynos_bo_destroy(bo);
314 }
315
316 static void
_exynos_manager_destroy()317 _exynos_manager_destroy()
318 {
319 sym_exynos_device_destroy(buffer_manager->priv);
320 }
321
322 static Eina_Bool
_exynos_buffer_manager_setup(int fd)323 _exynos_buffer_manager_setup(int fd)
324 {
325 Eina_Bool fail = EINA_FALSE;
326 void *drm_exynos_lib;
327 struct exynos_bo *bo;
328
329 drm_exynos_lib = dlopen("libdrm_exynos.so", RTLD_LAZY | RTLD_GLOBAL);
330 if (!drm_exynos_lib) return EINA_FALSE;
331
332 SYM(drm_exynos_lib, exynos_device_create);
333 SYM(drm_exynos_lib, exynos_bo_create);
334 SYM(drm_exynos_lib, exynos_bo_map);
335 SYM(drm_exynos_lib, exynos_bo_destroy);
336 SYM(drm_exynos_lib, exynos_device_destroy);
337 SYM(drm_exynos_lib, drmPrimeHandleToFD);
338
339 if (fail) goto err;
340
341 buffer_manager->priv = sym_exynos_device_create(fd);
342 if (!buffer_manager->priv) goto err;
343
344 /* _device_create succeeds on any arch, test harder */
345 bo = sym_exynos_bo_create(buffer_manager->priv, 32, 0);
346 if (!bo) goto err;
347
348 sym_exynos_bo_destroy(bo);
349
350 buffer_manager->alloc = _exynos_alloc;
351 buffer_manager->to_buffer = _evas_dmabuf_wl_buffer_from_dmabuf;
352 buffer_manager->map = _exynos_map;
353 buffer_manager->unmap = _exynos_unmap;
354 buffer_manager->discard = _exynos_discard;
355 buffer_manager->lock = _dmabuf_lock;
356 buffer_manager->unlock = _dmabuf_unlock;
357 buffer_manager->manager_destroy = _exynos_manager_destroy;
358 buffer_manager->dl_handle = drm_exynos_lib;
359 return EINA_TRUE;
360
361 err:
362 dlclose(drm_exynos_lib);
363 return EINA_FALSE;
364 }
365
366 static Buffer_Handle *
_wl_shm_alloc(Buffer_Manager * self EINA_UNUSED,const char * name EINA_UNUSED,int w,int h,unsigned long * stride,int32_t * fd)367 _wl_shm_alloc(Buffer_Manager *self EINA_UNUSED, const char *name EINA_UNUSED, int w, int h, unsigned long *stride, int32_t *fd)
368 {
369 Eina_Tmpstr *fullname;
370 size_t size = w * h * 4;
371 void *out = NULL;
372 char *tmp;
373
374 // XXX try memfd, then shm open then the below...
375 tmp = eina_vpath_resolve("(:usr.run:)/evas-wayland_shm-XXXXXX");
376 *fd = eina_file_mkstemp(tmp, &fullname);
377 free(tmp);
378
379 if (*fd < 0) return NULL;
380
381 unlink(fullname);
382 eina_tmpstr_del(fullname);
383
384 *stride = w * 4;
385 if (ftruncate(*fd, size) < 0) goto err;
386
387 out = mmap(NULL, size, (PROT_READ | PROT_WRITE), MAP_SHARED, *fd, 0);
388 if (out == MAP_FAILED) goto err;
389
390 return out;
391
392 err:
393 close(*fd);
394 return NULL;
395 }
396
397 static void *
_wl_shm_map(Ecore_Wl2_Buffer * buf)398 _wl_shm_map(Ecore_Wl2_Buffer *buf)
399 {
400 return buf->bh;
401 }
402
403 static void
_wl_shm_unmap(Ecore_Wl2_Buffer * buf EINA_UNUSED)404 _wl_shm_unmap(Ecore_Wl2_Buffer *buf EINA_UNUSED)
405 {
406 /* wl_shm is mapped for its lifetime */
407 }
408
409 static void
_wl_shm_discard(Ecore_Wl2_Buffer * buf)410 _wl_shm_discard(Ecore_Wl2_Buffer *buf)
411 {
412 munmap(buf->bh, buf->size);
413 }
414
415 static void
_wl_shm_manager_destroy()416 _wl_shm_manager_destroy()
417 {
418 /* Nop. */
419 }
420
421 static struct wl_buffer *
_wl_shm_to_buffer(Ecore_Wl2_Display * ewd,Ecore_Wl2_Buffer * db)422 _wl_shm_to_buffer(Ecore_Wl2_Display *ewd, Ecore_Wl2_Buffer *db)
423 {
424 struct wl_buffer *buf;
425 struct wl_shm_pool *pool;
426 struct wl_shm *shm;
427 uint32_t format;
428
429 if (db->alpha)
430 format = WL_SHM_FORMAT_ARGB8888;
431 else
432 format = WL_SHM_FORMAT_XRGB8888;
433
434 shm = ecore_wl2_display_shm_get(ewd);
435 pool = wl_shm_create_pool(shm, db->fd, db->size);
436 buf = wl_shm_pool_create_buffer(pool, 0, db->w, db->h, db->stride, format);
437 wl_shm_pool_destroy(pool);
438 close(db->fd);
439 db->fd = -1;
440 wl_buffer_add_listener(buf, &buffer_listener, db);
441 return buf;
442 }
443
444 static Eina_Bool
_wl_shm_buffer_manager_setup(int fd EINA_UNUSED)445 _wl_shm_buffer_manager_setup(int fd EINA_UNUSED)
446 {
447 buffer_manager->alloc = _wl_shm_alloc;
448 buffer_manager->to_buffer = _wl_shm_to_buffer;
449 buffer_manager->map = _wl_shm_map;
450 buffer_manager->unmap = _wl_shm_unmap;
451 buffer_manager->discard = _wl_shm_discard;
452 buffer_manager->manager_destroy = _wl_shm_manager_destroy;
453 return EINA_TRUE;
454 }
455
456 struct internal_vc4_bo
457 {
458 __u32 handle;
459 int size;
460 int fd;
461 };
462
463 static int
align(int v,int a)464 align(int v, int a)
465 {
466 return (v + a - 1) & ~((uint64_t)a - 1);
467 }
468
469 static Buffer_Handle *
_vc4_alloc(Buffer_Manager * self EINA_UNUSED,const char * name EINA_UNUSED,int w,int h,unsigned long * stride,int32_t * fd)470 _vc4_alloc(Buffer_Manager *self EINA_UNUSED, const char *name EINA_UNUSED, int w, int h, unsigned long *stride, int32_t *fd)
471 {
472 struct drm_vc4_create_bo bo;
473 struct internal_vc4_bo *obo;
474 struct drm_gem_close cl;
475 size_t size;
476 int ret;
477
478 obo = malloc(sizeof(struct internal_vc4_bo));
479 if (!obo) return NULL;
480
481 *stride = align(w * 4, 16);
482 size = *stride * h;
483 memset(&bo, 0, sizeof(bo));
484 bo.size = size;
485 ret = ioctl(drm_fd, DRM_IOCTL_VC4_CREATE_BO, &bo);
486 if (ret)
487 {
488 free(obo);
489 return NULL;
490 }
491
492 obo->handle = bo.handle;
493 obo->size = size;
494 /* First try to allocate an mmapable buffer with O_RDWR,
495 * if that fails retry unmappable - if the compositor is
496 * using GL it won't need to mmap the buffer and this can
497 * work - otherwise it'll reject this buffer and we'll
498 * have to fall back to shm rendering.
499 */
500 if (sym_drmPrimeHandleToFD(drm_fd, bo.handle,
501 DRM_CLOEXEC | O_RDWR, fd) != 0)
502 if (sym_drmPrimeHandleToFD(drm_fd, bo.handle,
503 DRM_CLOEXEC, fd) != 0) goto err;
504
505 obo->fd = *fd;
506 return (Buffer_Handle *)obo;
507
508 err:
509 memset(&cl, 0, sizeof(cl));
510 cl.handle = bo.handle;
511 ioctl(drm_fd, DRM_IOCTL_GEM_CLOSE, &cl);
512 free(obo);
513 return NULL;
514 }
515
516 static void *
_vc4_map(Ecore_Wl2_Buffer * buf)517 _vc4_map(Ecore_Wl2_Buffer *buf)
518 {
519 struct drm_vc4_mmap_bo map;
520 struct internal_vc4_bo *bo;
521 void *ptr;
522 int ret;
523
524 bo = (struct internal_vc4_bo *)buf->bh;
525
526 memset(&map, 0, sizeof(map));
527 map.handle = bo->handle;
528 ret = ioctl(drm_fd, DRM_IOCTL_VC4_MMAP_BO, &map);
529 if (ret) return NULL;
530
531 ptr = mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, drm_fd,
532 map.offset);
533 if (ptr == MAP_FAILED) return NULL;
534
535 return ptr;
536 }
537
538 static void
_vc4_unmap(Ecore_Wl2_Buffer * buf)539 _vc4_unmap(Ecore_Wl2_Buffer *buf)
540 {
541 struct internal_vc4_bo *bo;
542
543 bo = (struct internal_vc4_bo *)buf->bh;
544 munmap(buf->mapping, bo->size);
545 }
546
547 static void
_vc4_discard(Ecore_Wl2_Buffer * buf)548 _vc4_discard(Ecore_Wl2_Buffer *buf)
549 {
550 struct drm_gem_close cl;
551 struct internal_vc4_bo *bo;
552
553 bo = (struct internal_vc4_bo *)buf->bh;
554
555 memset(&cl, 0, sizeof(cl));
556 cl.handle = bo->handle;
557 ioctl(drm_fd, DRM_IOCTL_GEM_CLOSE, &cl);
558 }
559
560 static Eina_Bool
_vc4_buffer_manager_setup(int fd)561 _vc4_buffer_manager_setup(int fd)
562 {
563 struct drm_gem_close cl;
564 struct drm_vc4_create_bo bo;
565 Eina_Bool fail = EINA_FALSE;
566 void *drm_lib;
567
568 memset(&bo, 0, sizeof(bo));
569 bo.size = 32;
570 if (ioctl(fd, DRM_IOCTL_VC4_CREATE_BO, &bo)) return EINA_FALSE;
571
572 memset(&cl, 0, sizeof(cl));
573 cl.handle = bo.handle;
574 ioctl(fd, DRM_IOCTL_GEM_CLOSE, &cl);
575
576 drm_lib = dlopen("libdrm.so", RTLD_LAZY | RTLD_GLOBAL);
577 if (!drm_lib) return EINA_FALSE;
578
579 SYM(drm_lib, drmPrimeHandleToFD);
580
581 if (fail) goto err;
582
583 buffer_manager->alloc = _vc4_alloc;
584 buffer_manager->to_buffer = _evas_dmabuf_wl_buffer_from_dmabuf;
585 buffer_manager->map = _vc4_map;
586 buffer_manager->unmap = _vc4_unmap;
587 buffer_manager->discard = _vc4_discard;
588 buffer_manager->lock = _dmabuf_lock;
589 buffer_manager->unlock = _dmabuf_unlock;
590 buffer_manager->manager_destroy = NULL;
591 buffer_manager->dl_handle = drm_lib;
592 return EINA_TRUE;
593 err:
594 dlclose(drm_lib);
595 return EINA_FALSE;
596 }
597
598 EAPI Eina_Bool
ecore_wl2_buffer_init(Ecore_Wl2_Display * ewd,Ecore_Wl2_Buffer_Type types)599 ecore_wl2_buffer_init(Ecore_Wl2_Display *ewd, Ecore_Wl2_Buffer_Type types)
600 {
601 int fd = -1;
602 Eina_Bool dmabuf = ewd->wl.dmabuf && (types & ECORE_WL2_BUFFER_DMABUF);
603 Eina_Bool shm = ewd->wl.shm && (types & ECORE_WL2_BUFFER_SHM);
604 Eina_Bool success = EINA_FALSE;
605
606 if (buffer_manager)
607 {
608 buffer_manager->refcount++;
609 return EINA_TRUE;
610 }
611
612 buffer_manager = calloc(1, sizeof(Buffer_Manager));
613 if (!buffer_manager) goto err_alloc;
614
615 if (!getenv("EVAS_WAYLAND_SHM_DISABLE_DMABUF") && dmabuf)
616 {
617 fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
618 if (fd < 0)
619 {
620 ERR("Tried to use dmabufs, but can't find /dev/dri/renderD128 . Falling back to regular SHM");
621 goto fallback_shm;
622 }
623
624 success = _intel_buffer_manager_setup(fd);
625 if (!success) success = _exynos_buffer_manager_setup(fd);
626 if (!success) success = _vc4_buffer_manager_setup(fd);
627 }
628 fallback_shm:
629 if (!success) success = shm && _wl_shm_buffer_manager_setup(0);
630 if (!success) goto err_bm;
631
632 drm_fd = fd;
633 buffer_manager->refcount = 1;
634 return EINA_TRUE;
635
636 err_bm:
637 if (fd >= 0) close(fd);
638 free(buffer_manager);
639 buffer_manager = NULL;
640 err_alloc:
641 return EINA_FALSE;
642 }
643
644 static void
_buffer_manager_ref(void)645 _buffer_manager_ref(void)
646 {
647 buffer_manager->refcount++;
648 }
649
650 static void
_buffer_manager_deref(void)651 _buffer_manager_deref(void)
652 {
653 buffer_manager->refcount--;
654 if (buffer_manager->refcount || !buffer_manager->destroyed) return;
655
656 if (buffer_manager->manager_destroy) buffer_manager->manager_destroy();
657 free(buffer_manager);
658 buffer_manager = NULL;
659 if (drm_fd >=0) close(drm_fd);
660 }
661
662 static void
_buffer_manager_destroy(void)663 _buffer_manager_destroy(void)
664 {
665 if (buffer_manager->destroyed) return;
666 buffer_manager->destroyed = EINA_TRUE;
667 _buffer_manager_deref();
668 }
669
670 static Buffer_Handle *
_buffer_manager_alloc(const char * name,int w,int h,unsigned long * stride,int32_t * fd)671 _buffer_manager_alloc(const char *name, int w, int h, unsigned long *stride, int32_t *fd)
672 {
673 Buffer_Handle *out;
674
675 _buffer_manager_ref();
676 out = buffer_manager->alloc(buffer_manager, name, w, h, stride, fd);
677 if (!out) _buffer_manager_deref();
678 return out;
679 }
680
681 EAPI struct wl_buffer *
ecore_wl2_buffer_wl_buffer_get(Ecore_Wl2_Buffer * buf)682 ecore_wl2_buffer_wl_buffer_get(Ecore_Wl2_Buffer *buf)
683 {
684 return buf->wl_buffer;
685 }
686
687 EAPI void *
ecore_wl2_buffer_map(Ecore_Wl2_Buffer * buf,int * w,int * h,int * stride)688 ecore_wl2_buffer_map(Ecore_Wl2_Buffer *buf, int *w, int *h, int *stride)
689 {
690 void *out;
691
692 EINA_SAFETY_ON_NULL_RETURN_VAL(buf, NULL);
693
694 if (buf->mapping)
695 {
696 out = buf->mapping;
697 }
698 else
699 {
700 _buffer_manager_ref();
701 out = buffer_manager->map(buf);
702 if (!out)
703 {
704 _buffer_manager_deref();
705 return NULL;
706 }
707 buf->locked = EINA_TRUE;
708 buf->mapping = out;
709 }
710 if (w) *w = buf->w;
711 if (h) *h = buf->h;
712 if (stride) *stride = (int)buf->stride;
713
714 if (!buf->locked) ecore_wl2_buffer_lock(buf);
715
716 return out;
717 }
718
719 EAPI void
ecore_wl2_buffer_unmap(Ecore_Wl2_Buffer * buf)720 ecore_wl2_buffer_unmap(Ecore_Wl2_Buffer *buf)
721 {
722 buffer_manager->unmap(buf);
723 _buffer_manager_deref();
724 }
725
726 EAPI void
ecore_wl2_buffer_discard(Ecore_Wl2_Buffer * buf)727 ecore_wl2_buffer_discard(Ecore_Wl2_Buffer *buf)
728 {
729 buffer_manager->discard(buf);
730 _buffer_manager_deref();
731 }
732
733 EAPI void
ecore_wl2_buffer_lock(Ecore_Wl2_Buffer * b)734 ecore_wl2_buffer_lock(Ecore_Wl2_Buffer *b)
735 {
736 if (b->locked) ERR("Buffer already locked\n");
737 if (buffer_manager->lock) buffer_manager->lock(b);
738 b->locked = EINA_TRUE;
739 }
740
741 EAPI void
ecore_wl2_buffer_unlock(Ecore_Wl2_Buffer * b)742 ecore_wl2_buffer_unlock(Ecore_Wl2_Buffer *b)
743 {
744 if (!b->locked) ERR("Buffer already unlocked\n");
745 if (buffer_manager->unlock) buffer_manager->unlock(b);
746 b->locked = EINA_FALSE;
747 }
748
749 EAPI void
ecore_wl2_buffer_destroy(Ecore_Wl2_Buffer * b)750 ecore_wl2_buffer_destroy(Ecore_Wl2_Buffer *b)
751 {
752 if (!b) return;
753
754 if (b->locked || b->busy)
755 {
756 b->orphaned = EINA_TRUE;
757 return;
758 }
759 if (b->fd != -1) close(b->fd);
760 if (b->mapping) ecore_wl2_buffer_unmap(b);
761 ecore_wl2_buffer_discard(b);
762 if (b->wl_buffer) wl_buffer_destroy(b->wl_buffer);
763 b->wl_buffer = NULL;
764 free(b);
765 }
766
767 EAPI Eina_Bool
ecore_wl2_buffer_busy_get(Ecore_Wl2_Buffer * buffer)768 ecore_wl2_buffer_busy_get(Ecore_Wl2_Buffer *buffer)
769 {
770 EINA_SAFETY_ON_NULL_RETURN_VAL(buffer, EINA_FALSE);
771
772 return (buffer->locked) || (buffer->busy);
773 }
774
775 EAPI void
ecore_wl2_buffer_busy_set(Ecore_Wl2_Buffer * buffer)776 ecore_wl2_buffer_busy_set(Ecore_Wl2_Buffer *buffer)
777 {
778 EINA_SAFETY_ON_NULL_RETURN(buffer);
779
780 buffer->busy = EINA_TRUE;
781 }
782
783 EAPI int
ecore_wl2_buffer_age_get(Ecore_Wl2_Buffer * buffer)784 ecore_wl2_buffer_age_get(Ecore_Wl2_Buffer *buffer)
785 {
786 EINA_SAFETY_ON_NULL_RETURN_VAL(buffer, 0);
787
788 return buffer->age;
789 }
790
ecore_wl2_buffer_age_set(Ecore_Wl2_Buffer * buffer,int age)791 EAPI void ecore_wl2_buffer_age_set(Ecore_Wl2_Buffer *buffer, int age)
792 {
793 EINA_SAFETY_ON_NULL_RETURN(buffer);
794
795 buffer->age = age;
796 }
797
798 EAPI void
ecore_wl2_buffer_age_inc(Ecore_Wl2_Buffer * buffer)799 ecore_wl2_buffer_age_inc(Ecore_Wl2_Buffer *buffer)
800 {
801 EINA_SAFETY_ON_NULL_RETURN(buffer);
802
803 buffer->age++;
804 }
805
806 /* The only user of this function has been removed, but it
807 * will likely come back later. The problem is that
808 * a dmabuf buffer needs to be resized on the compositor
809 * even if the allocation still fits. Doing the resize
810 * properly isn't something that will be fixed in the 1.21
811 * timeframe, so the optimization has been (temporarily)
812 * removed.
813 *
814 * This is currently beta api - don't move it out of beta
815 * with no users...
816 */
817 EAPI Eina_Bool
ecore_wl2_buffer_fit(Ecore_Wl2_Buffer * b,int w,int h)818 ecore_wl2_buffer_fit(Ecore_Wl2_Buffer *b, int w, int h)
819 {
820 int stride;
821
822 EINA_SAFETY_ON_NULL_RETURN_VAL(b, EINA_FALSE);
823
824 stride = b->stride;
825 if ((w >= b->w) && (w <= stride / 4) && (h == b->h))
826 {
827 b->w = w;
828 return EINA_TRUE;
829 }
830
831 return EINA_FALSE;
832 }
833
834 static Ecore_Wl2_Buffer *
_ecore_wl2_buffer_partial_create(int w,int h,Eina_Bool alpha)835 _ecore_wl2_buffer_partial_create(int w, int h, Eina_Bool alpha)
836 {
837 Ecore_Wl2_Buffer *out;
838
839 out = calloc(1, sizeof(Ecore_Wl2_Buffer));
840 if (!out) return NULL;
841
842 out->fd = -1;
843 out->alpha = alpha;
844 out->bh = _buffer_manager_alloc("name", w, h, &out->stride, &out->fd);
845 if (!out->bh)
846 {
847 free(out);
848 return NULL;
849 }
850 out->w = w;
851 out->h = h;
852 out->size = out->stride * h;
853
854 return out;
855 }
856
857 EAPI Ecore_Wl2_Buffer *
ecore_wl2_buffer_create(Ecore_Wl2_Display * ewd,int w,int h,Eina_Bool alpha)858 ecore_wl2_buffer_create(Ecore_Wl2_Display *ewd, int w, int h, Eina_Bool alpha)
859 {
860 Ecore_Wl2_Buffer *out;
861
862 out = _ecore_wl2_buffer_partial_create(w, h, alpha);
863 if (!out) return NULL;
864
865 out->wl_buffer = buffer_manager->to_buffer(ewd, out);
866
867 return out;
868 }
869
870 static void
_create_succeeded(void * data EINA_UNUSED,struct zwp_linux_buffer_params_v1 * params,struct wl_buffer * new_buffer)871 _create_succeeded(void *data EINA_UNUSED,
872 struct zwp_linux_buffer_params_v1 *params,
873 struct wl_buffer *new_buffer)
874 {
875 wl_buffer_destroy(new_buffer);
876 zwp_linux_buffer_params_v1_destroy(params);
877 }
878
879 static void
_create_failed(void * data,struct zwp_linux_buffer_params_v1 * params)880 _create_failed(void *data, struct zwp_linux_buffer_params_v1 *params)
881 {
882 Ecore_Wl2_Display *ewd = data;
883
884 zwp_linux_buffer_params_v1_destroy(params);
885 _buffer_manager_destroy();
886 ewd->wl.dmabuf = NULL;
887 }
888
889 static const struct zwp_linux_buffer_params_v1_listener params_listener =
890 {
891 _create_succeeded,
892 _create_failed
893 };
894
895 void
_ecore_wl2_buffer_test(Ecore_Wl2_Display * ewd)896 _ecore_wl2_buffer_test(Ecore_Wl2_Display *ewd)
897 {
898 struct zwp_linux_buffer_params_v1 *dp;
899 Ecore_Wl2_Buffer *buf;
900
901 if (!ecore_wl2_buffer_init(ewd, ECORE_WL2_BUFFER_DMABUF)) return;
902
903 buf = _ecore_wl2_buffer_partial_create(1, 1, EINA_TRUE);
904 if (!buf) goto fail;
905
906 dp = zwp_linux_dmabuf_v1_create_params(ewd->wl.dmabuf);
907 zwp_linux_buffer_params_v1_add(dp, buf->fd, 0, 0, buf->stride, 0, 0);
908 zwp_linux_buffer_params_v1_add_listener(dp, ¶ms_listener, ewd);
909 zwp_linux_buffer_params_v1_create(dp, buf->w, buf->h,
910 DRM_FORMAT_ARGB8888, 0);
911
912 ecore_wl2_buffer_destroy(buf);
913
914 return;
915
916 fail:
917 _buffer_manager_destroy();
918 ewd->wl.dmabuf = NULL;
919 }
920