1 /*
2  * Copyright 2016-2020, Björn Ståhl
3  * License: 3-Clause BSD, see COPYING file in arcan source repository.
4  * Reference: http://arcan-fe.com
5  * Description: egl-dri specific render-node based backend support
6  * library for setting up headless display, and passing handles
7  * handling render-node transfer
8  */
9 #define WANT_ARCAN_SHMIF_HELPER
10 #define AGP_ENABLE_UNPURE
11 #include "../arcan_shmif.h"
12 #include "../shmif_privext.h"
13 #include "video_platform.h"
14 #include "egl-dri/egl.h"
15 #include "agp/glfun.h"
16 
17 #define EGL_EGLEXT_PROTOTYPES
18 #define GL_GLEXT_PROTOTYPES
19 #define MESA_EGL_NO_X11_HEADERS
20 #include <EGL/egl.h>
arcan_shmifext_defaults(struct arcan_shmif_cont * con)21 #include <EGL/eglext.h>
22 
23 #include <inttypes.h>
24 #include <drm.h>
25 #include <drm_fourcc.h>
26 #include <xf86drm.h>
27 #include <gbm.h>
28 #include <stdatomic.h>
29 
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <fcntl.h>
33 #include <sys/ioctl.h>
34 
35 #include "egl-dri/egl_gbm_helper.h"
36 
37 _Thread_local static struct arcan_shmif_cont* active_context;
38 
39 static struct agp_fenv agp_fenv;
40 static struct egl_env agp_eglenv;
41 
arcan_shmifext_export_image(struct arcan_shmif_cont * con,uintptr_t display,uintptr_t tex_id,size_t plane_limit,struct shmifext_buffer_plane * planes)42 #define SHARED_DISPLAY (uintptr_t)(-1)
43 
44 /*
45  *
46  * for EGLStreams, we need:
47  *  1. egl->eglGetPlatformDisplayEXT : EGL_EXT_platform_base
48  *  2. egl->eglQueryDevicesEXT : EGL_EXT_device_base
49  *     OR get the device enumeration
50  *  THEN
51  *     when we get buffer method to streams
52  *
53  * we ALSO have:
54  *  GL_EXT_memory_object : glCreateMemoryObjectsEXT,
55  *  glMemoryObjectParameterivEXT, glTextStorageMem2DEXT,
56  *  GL_NVX_unix_allocator_import. 2glImportMemoryFdEXT, glTexParametervNVX
57  *  GL_EXT_memory_object_fd,
58  *  ARB_texture_storage,
59  */
60 
61 /* note: should also get:
62  * eglCreateSyncKHR,
63  * eglDestroySyncKHR,
64  * eglWaitSyncKHR,
65  * eglClientWaitSyncKHR,
66  * eglDupNativeFenceFDANDROID
67  */
68 
69 struct shmif_ext_hidden_int {
70 	struct gbm_device* dev;
71 	char* device_path;
72 
73 	struct agp_rendertarget* rtgt;
74 	struct agp_vstore buf;
75 
76 /* need to account for multiple contexts being created on the same setup */
77 	uint64_t ctx_alloc;
78 	EGLContext alt_contexts[64];
79 
80 	int type;
81 	bool managed;
82 	EGLContext context;
83 	unsigned context_ind;
84 	EGLDisplay display;
85 	EGLSurface surface;
86 };
87 
88 /*
89  * These are spilled over from AGP, and ideally, we should just
90  * separate those references or linker-script erase them as they are
91  * not needed here
92 */
93 void* platform_video_gfxsym(const char* sym)
94 {
95 	return eglGetProcAddress(sym);
96 }
97 
98 bool platform_video_map_handle(struct agp_vstore* store, int64_t handle)
99 {
arcan_shmifext_drop_context(struct arcan_shmif_cont * con)100 	return false;
101 }
102 
103 bool platform_video_map_buffer(
104 	struct agp_vstore* vs, struct agp_buffer_plane* planes, size_t n)
arcan_shmifext_swap_context(struct arcan_shmif_cont * con,unsigned context)105 {
106 	return false;
107 }
108 
109 struct agp_fenv* arcan_shmifext_getfenv(struct arcan_shmif_cont* con)
arcan_shmifext_add_context(struct arcan_shmif_cont * con,struct arcan_shmifext_setup arg)110 {
111 	if (!con || !con->privext || !con->privext->internal)
112 		return false;
113 
114 	return &agp_fenv;
115 }
arcan_shmifext_setup(struct arcan_shmif_cont * con,struct arcan_shmifext_setup arg)116 
117 static void zap_vstore(struct agp_vstore* vstore)
118 {
119 	free(vstore->vinf.text.raw);
120 	vstore->vinf.text.raw = NULL;
121 	vstore->vinf.text.s_raw = 0;
122 }
123 
124 static void gbm_drop(struct arcan_shmif_cont* con)
125 {
126 	if (!con->privext->internal)
127 		return;
128 
129 	struct shmif_ext_hidden_int* in = con->privext->internal;
130 
131 	if (in->dev){
132 /* this will actually free the gbm- resources as well */
133 		if (in->rtgt){
134 			agp_drop_rendertarget(in->rtgt);
135 		}
136 	}
137 /* are we managing the context or is the user providing his own? */
138 	if (in->managed){
139 		agp_eglenv.make_current(in->display,
140 			EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
141 		if (in->context)
142 			agp_eglenv.destroy_context(in->display, in->context);
143 		agp_eglenv.terminate(in->display);
144 	}
145 	in->dev = NULL;
146 
147 	if (in->device_path){
148 		free(in->device_path);
149 		in->device_path = NULL;
150 	}
151 
152 	free(in);
153 	con->privext->internal = NULL;
154 	con->privext->cleanup = NULL;
155 }
156 
157 struct arcan_shmifext_setup arcan_shmifext_defaults(
158 	struct arcan_shmif_cont* con)
159 {
160 	int major = getenv("AGP_GL_MAJOR") ?
161 		strtoul(getenv("AGP_GL_MAJOR"), NULL, 10) : 2;
162 
163 	int minor= getenv("AGP_GL_MINOR") ?
164 		strtoul(getenv("AGP_GL_MINOR"), NULL, 10) : 1;
165 
166 	return (struct arcan_shmifext_setup){
167 		.red = 1, .green = 1, .blue = 1,
168 		.alpha = 1, .depth = 16,
169 		.api = API_OPENGL,
170 		.builtin_fbo = 1,
171 		.major = 2, .minor = 1,
172 		.shared_context = 0
173 	};
174 }
175 
176 bool arcan_shmifext_import_buffer(
177 	struct arcan_shmif_cont* con,
178 	int format,
179 	struct shmifext_buffer_plane* planes,
180 	size_t n_planes,
181 	size_t buffer_plane_sz
182 )
183 {
184 	if (!con || !con->addr || !con->privext || !con->privext->internal)
185 		return false;
186 
187 	struct shmif_ext_hidden_int* I = con->privext->internal;
188 	EGLDisplay display = I->display;
189 
190 	if ((uintptr_t)I->display == SHARED_DISPLAY){
191 		if (!active_context)
192 			return false;
arcan_shmifext_make_current(struct arcan_shmif_cont * con)193 
194 		struct shmif_ext_hidden_int* O = active_context->privext->internal;
195 		display = O->display;
196 	}
197 
198 	struct agp_vstore* vs = &I->buf;
199 	vs->txmapped = TXSTATE_TEX2D;
200 
201 	if (I->rtgt){
202 		agp_drop_rendertarget(I->rtgt);
203 		I->rtgt = NULL;
204 		memset(vs, '\0', sizeof(struct agp_vstore));
205 	}
206 
207 	EGLImage img = helper_dmabuf_eglimage(
208 		&agp_fenv, &agp_eglenv, display, planes, n_planes);
209 
210 	if (!img)
211 		return false;
arcan_shmifext_gl_handles(struct arcan_shmif_cont * con,uintptr_t * frame,uintptr_t * color,uintptr_t * depth)212 
213 /* might have an old eglImage around */
214 	if (0 != vs->vinf.text.tag){
215 		agp_eglenv.destroy_image(display, (EGLImageKHR) vs->vinf.text.tag);
216 	}
217 
218 	I->buf.w = planes[0].w;
219 	I->buf.h = planes[0].h;
220 
221 	helper_eglimage_color(&agp_fenv, &agp_eglenv, img, &vs->vinf.text.glid);
222 	vs->vinf.text.tag = (uintptr_t) img;
arcan_shmifext_egl_meta(struct arcan_shmif_cont * con,uintptr_t * display,uintptr_t * surface,uintptr_t * context)223 
224 	return true;
225 }
226 
227 static void* lookup(void* tag, const char* sym)
228 {
229 	return eglGetProcAddress(sym);
230 }
231 
232 void* arcan_shmifext_lookup(
233 	struct arcan_shmif_cont* con, const char* fun)
234 {
235 	return eglGetProcAddress(fun);
236 }
237 
238 static void* lookup_fenv(void* tag, const char* sym, bool req)
239 {
240 	return eglGetProcAddress(sym);
arcan_shmifext_gltex_handle(struct arcan_shmif_cont * con,uintptr_t display,uintptr_t tex_id,int * dhandle,size_t * dstride,int * dfmt)241 }
242 
243 static bool get_egl_context(
244 	struct shmif_ext_hidden_int* ctx, unsigned ind, EGLContext* dst)
245 {
246 	if (ind >= 64)
247 		return false;
248 
249 	if (!ctx->managed || !((1 << ind) & ctx->ctx_alloc))
250 		return false;
251 
252 	*dst = ctx->alt_contexts[(1 << ind)-1];
253 	return true;
254 }
255 
256 static enum shmifext_setup_status add_context(
arcan_shmifext_bufferfail(struct arcan_shmif_cont * cont,bool fl)257 	struct shmif_ext_hidden_int* ctx, struct arcan_shmifext_setup* arg,
258 	unsigned* ind)
259 {
260 /* make sure the shmifext has been setup */
261 	int type;
262 	EGLint nc;
263 	switch(arg->api){
264 		case API_OPENGL: type = EGL_OPENGL_BIT; break;
265 		case API_GLES: type = EGL_OPENGL_ES2_BIT; break;
266 		default:
267 			return SHMIFEXT_NO_API;
268 		break;
269 	}
270 
271 	const EGLint attribs[] = {
272 		EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
273 		EGL_RENDERABLE_TYPE, type,
274 		EGL_RED_SIZE, arg->red,
275 		EGL_GREEN_SIZE, arg->green,
276 		EGL_BLUE_SIZE, arg->blue,
277 		EGL_ALPHA_SIZE, arg->alpha,
278 		EGL_DEPTH_SIZE, arg->depth,
279 		EGL_NONE
280 	};
281 
282 /* find first free context in bitmap */
283 	size_t i = 0;
284 	bool found = false;
285 	for (; i < 64; i++)
286 		if (!(ctx->ctx_alloc & (1 << i))){
287 			found = true;
288 			break;
289 		}
290 
291 /* common for GL applications to treat 0 as no context, so we do the same, have
292  * to add/subtract 1 from the index (or just XOR with a cookie */
293 	if (!found)
294 		return SHMIFEXT_OUT_OF_MEMORY;
295 
296 	if (!agp_eglenv.get_configs(ctx->display, NULL, 0, &nc))
297 		return SHMIFEXT_NO_CONFIG;
298 
299 	if (0 == nc)
300 		return SHMIFEXT_NO_CONFIG;
301 
302 	EGLConfig cfg;
303 	if (!agp_eglenv.choose_config(ctx->display, attribs, &cfg, 1, &nc) || nc < 1)
304 		return SHMIFEXT_NO_CONFIG;
305 
306 	EGLint cas[] = {EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE,
307 		EGL_NONE, EGL_NONE, EGL_NONE, EGL_NONE, EGL_NONE, EGL_NONE,
308 		EGL_NONE, EGL_NONE, EGL_NONE};
309 
310 	int ofs = 2;
311 	if (arg->api != API_GLES){
312 		if (arg->major){
313 			cas[ofs++] = EGL_CONTEXT_MAJOR_VERSION_KHR;
314 			cas[ofs++] = arg->major;
315 			cas[ofs++] = EGL_CONTEXT_MINOR_VERSION_KHR;
316 			cas[ofs++] = arg->minor;
317 		}
318 
319 		if (arg->mask){
320 			cas[ofs++] = EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR;
321 			cas[ofs++] = arg->mask;
322 		}
323 
324 		if (arg->flags){
325 			cas[ofs++] = EGL_CONTEXT_FLAGS_KHR;
326 			cas[ofs++] = arg->flags;
327 		}
328 	}
329 
330 /* pick a pre-existing / pre-added bit that has been manually
331  * added to the specific shmif context */
332 	EGLContext sctx = NULL;
333 	if (arg->shared_context)
334 		get_egl_context(ctx, arg->shared_context, &sctx);
335 
336 	ctx->alt_contexts[(1 << i)-1] =
337 		agp_eglenv.create_context(ctx->display, cfg, sctx, cas);
338 
339 	if (!ctx->alt_contexts[(1 << i)-1])
340 		return SHMIFEXT_NO_CONTEXT;
341 
342 	ctx->ctx_alloc |= 1 << i;
343 	*ind = i+1;
344 	return SHMIFEXT_OK;
345 }
arcan_timemillis()346 
347 unsigned arcan_shmifext_add_context(
348 	struct arcan_shmif_cont* con, struct arcan_shmifext_setup arg)
349 {
350 	if (!con || !con->privext || !con->privext->internal ||
351 		!con->privext->internal->display)
352 			return 0;
353 
354 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
355 
356 	unsigned res;
357 	if (SHMIFEXT_OK != add_context(ctx, &arg, &res)){
358 		return 0;
359 	}
360 
361 	return res;
362 }
363 
364 void arcan_shmifext_swap_context(
365 	struct arcan_shmif_cont* con, unsigned context)
366 {
367 	if (!con || !con->privext || !con->privext->internal ||
368 		!con->privext->internal->display || context > 64 || !context)
369 			return;
370 
371 	context--;
372 
373 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
374 	EGLContext egl_ctx;
375 
376 	if (!get_egl_context(ctx, context, &egl_ctx))
377 		return;
378 
379 	ctx->context_ind = context;
380 	ctx->context = egl_ctx;
381 	agp_eglenv.make_current(ctx->display, ctx->surface, ctx->surface, ctx->context);
382 }
383 
384 static bool scanout_alloc(
385 	struct agp_rendertarget* tgt, struct agp_vstore* vs, int action, void* tag)
386 {
387 	struct agp_fenv* env = agp_env();
388 	struct arcan_shmif_cont* conn = tag;
389 
390 	if (action == RTGT_ALLOC_FREE){
391 		struct shmifext_color_buffer* buf =
392 			(struct shmifext_color_buffer*) vs->vinf.text.handle;
393 
394 		if (buf)
395 			arcan_shmifext_free_color(conn, buf);
396 		else
397 			env->delete_textures(1, &vs->vinf.text.glid);
398 
399 		vs->vinf.text.handle = 0;
400 		vs->vinf.text.glid = 0;
401 
402 		return true;
403 	}
404 
405 /* this will give a color buffer that is suitable for FBO and sharing */
406 	struct shmifext_color_buffer* buf =
407 		malloc(sizeof(struct shmifext_color_buffer));
408 
409 	if (!arcan_shmifext_alloc_color(conn, buf)){
410 		agp_empty_vstore(vs, vs->w, vs->h);
411 		free(buf);
412 		return true;
413 	}
414 
415 /* remember the metadata for free later */
416 	vs->vinf.text.glid = buf->id.gl;
417 	vs->vinf.text.handle = (uintptr_t) buf;
418 
419 	return true;
420 }
421 
422 enum shmifext_setup_status arcan_shmifext_setup(
423 	struct arcan_shmif_cont* con,
424 	struct arcan_shmifext_setup arg)
425 {
426 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
427 	enum shmifext_setup_status res;
428 
429 	if (ctx && ctx->display)
430 		return SHMIFEXT_ALREADY_SETUP;
431 
432 /* don't do anything with this for the time being */
433 	if (arg.no_context){
434 		con->privext->internal = malloc(sizeof(struct shmif_ext_hidden_int));
435 		if (!con->privext->internal)
436 			return SHMIFEXT_NO_API;
437 
438 		memset(con->privext->internal, '\0', sizeof(struct shmif_ext_hidden_int));
439 		con->privext->internal->display = (EGLDisplay)((void*)SHARED_DISPLAY);
440 		return SHMIFEXT_OK;
441 	}
442 
443 /* don't use the agp_eglenv here as it has not been setup yet */
444 	switch (arg.api){
445 	case API_OPENGL:
446 		if (!((ctx && ctx->display) || eglBindAPI(EGL_OPENGL_API)))
447 			return SHMIFEXT_NO_API;
448 	break;
449 	case API_GLES:
450 		if (!((ctx && ctx->display) || eglBindAPI(EGL_OPENGL_ES_API)))
451 			return SHMIFEXT_NO_API;
452 	break;
453 	case API_VHK:
454 	default:
455 /* won't have working code here for a while, first need a working AGP_
456  * implementation that works with normal Arcan. Then there's the usual
457  * problem with getting access to a handle, for EGLStreams it should
458  * work, but with GBM? KRH VKCube has some intel- only hack */
459 		return SHMIFEXT_NO_API;
460 	break;
461 	};
462 
463 	void* display;
464 	if (!arcan_shmifext_egl(con, &display, lookup, NULL))
465 		return SHMIFEXT_NO_DISPLAY;
466 
467 	ctx = con->privext->internal;
468 
469 	if (agp_eglenv.get_platform_display){
470 		ctx->display = agp_eglenv.get_platform_display(
471 			EGL_PLATFORM_GBM_KHR, (void*) display, NULL);
472 	}
473 	else
474 		ctx->display = agp_eglenv.get_display((EGLNativeDisplayType) display);
475 
476 	if (!ctx->display)
477 		return SHMIFEXT_NO_DISPLAY;
478 
479 	if (!agp_eglenv.initialize(ctx->display, NULL, NULL))
480 		return SHMIFEXT_NO_EGL;
481 
482 /* this needs the context to be initialized */
483 	map_eglext_functions(&agp_eglenv, lookup_fenv, NULL);
484 
485 /* we have egl and a display, build a config/context and set it as the
486  * current default context for this shmif-connection */
487 	ctx->managed = true;
488 	unsigned ind;
489 	res = add_context(ctx, &arg, &ind);
490 
491 	if (SHMIFEXT_OK != res)
492 		return res;
493 
494 	arcan_shmifext_swap_context(con, ind);
495 	ctx->surface = EGL_NO_SURFACE;
496 	active_context = con;
497 
498 /*
499  * this is likely not the best way to keep it if we try to run multiple
500  * segments on different GPUs with different GL implementations, if/when
501  * that becomes a problem, move to a context specific one - should mostly
502  * be to resolve the fenv on first make-curreny
503  */
504 	if (!agp_fenv.draw_buffer){
505 		agp_glinit_fenv(&agp_fenv, lookup_fenv, NULL);
506 		agp_setenv(&agp_fenv);
507 	}
508 
509 /* the built-in render targets act as a framebuffer object container that can
510  * also mutate into having a swapchain, with DOUBLEBUFFER that happens
511  * immediately */
512 	if (arg.builtin_fbo){
513 		ctx->buf = (struct agp_vstore){
514 			.txmapped = TXSTATE_TEX2D,
515 				.w = con->w,
516 				.h = con->h
517 		};
518 
519 		ctx->rtgt = agp_setup_rendertarget(
520 			&ctx->buf, RENDERTARGET_COLOR_DEPTH_STENCIL);
521 
522 		agp_rendertarget_allocator(ctx->rtgt, scanout_alloc, con);
523 	}
524 
525 	arcan_shmifext_make_current(con);
526 	return SHMIFEXT_OK;
527 }
528 
529 bool arcan_shmifext_drop(struct arcan_shmif_cont* con)
530 {
531 	if (!con ||
532 		!con->privext || !con->privext->internal ||
533 		!con->privext->internal->display ||
534 		(uintptr_t)con->privext->internal->display == SHARED_DISPLAY
535 	)
536 		return false;
537 
538 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
539 
540 	agp_eglenv.make_current(ctx->display,
541 		EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
542 
543 	for (size_t i = 0; i < 64 && ctx->ctx_alloc; i++){
544 		if ((ctx->ctx_alloc & ((1<<i)))){
545 			ctx->ctx_alloc &= ~(1 << i);
546 			agp_eglenv.destroy_context(ctx->display, ctx->alt_contexts[i]);
547 			ctx->alt_contexts[i] = NULL;
548 		}
549 	}
550 
551 	ctx->context = NULL;
552 	gbm_drop(con);
553 	return true;
554 }
555 
556 bool arcan_shmifext_drop_context(struct arcan_shmif_cont* con)
557 {
558 	if (!con || !con->privext || !con->privext->internal ||
559 		!con->privext->internal->display)
560 		return false;
561 
562 /* might be a different context in TLS, so switch first */
563 	struct arcan_shmif_cont* old = active_context;
564 	if (active_context != con)
565 		arcan_shmifext_make_current(con);
566 
567 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
568 
569 /* it's the caller's responsibility to switch in a new ctx, but right
570  * now, we're in a state where managed = true, though there's no context */
571 	if (ctx->context){
572 		agp_eglenv.make_current(ctx->display,
573 			EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
574 		agp_eglenv.destroy_context(ctx->display, ctx->context);
575 		ctx->context = NULL;
576 	}
577 
578 /* and restore */
579 	arcan_shmifext_make_current(old);
580 	return true;
581 }
582 
583 static void authenticate_fd(struct arcan_shmif_cont* con, int fd)
584 {
585 /* is it a render node or a real device? */
586 	struct stat nodestat;
587 	if (0 == fstat(fd, &nodestat) && !(nodestat.st_rdev & 0x80)){
588 		unsigned magic;
589 		drmGetMagic(fd, &magic);
590 		atomic_store(&con->addr->vpts, magic);
591 		con->hints |= SHMIF_RHINT_AUTH_TOK;
592 		arcan_shmif_resize(con, con->w, con->h);
593 		con->hints &= ~SHMIF_RHINT_AUTH_TOK;
594 		magic = atomic_load(&con->addr->vpts);
595 	}
596 }
597 
598 void arcan_shmifext_bufferfail(struct arcan_shmif_cont* con, bool st)
599 {
600 	if (!con || !con->privext || !con->privext->internal)
601 		return;
602 
603 	con->privext->state_fl |= STATE_NOACCEL *
604 		(getenv("ARCAN_VIDEO_NO_FDPASS") ? 1 : st);
605 }
606 
607 int arcan_shmifext_dev(struct arcan_shmif_cont* con,
608 	uintptr_t* dev, bool clone)
609 {
610 	if (!con || !con->privext || !con->privext->internal)
611 		return -1;
612 
613 	struct shmif_ext_hidden_int* I = con->privext->internal;
614 
615 	if (dev)
616 		*dev = (uintptr_t)(void*)I->dev;
617 
618 /* see the note about device_path in shmifext_egl */
619 	if (clone){
620 		int fd = -1;
621 		if (I->device_path){
622 			fd = open(I->device_path, O_RDWR);
623 		}
624 
625 		if (-1 == fd)
626 			arcan_shmif_dupfd(con->privext->active_fd, -1, true);
627 
628 /* this can soon be removed entirely, the cardN path is dying */
629 		if (-1 != fd)
630 			authenticate_fd(con, fd);
631 
632 		return fd;
633 	}
634 	else
635 	  return con->privext->active_fd;
636 }
637 
638 bool arcan_shmifext_gl_handles(struct arcan_shmif_cont* con,
639 	uintptr_t* frame, uintptr_t* color, uintptr_t* depth)
640 {
641 	if (!con || !con->privext || !con->privext->internal ||
642 		!con->privext->internal->display || !con->privext->internal->rtgt)
643 		return false;
644 
645 	agp_rendertarget_ids(con->privext->internal->rtgt, frame, color, depth);
646 	return true;
647 }
648 
649 bool arcan_shmifext_egl(struct arcan_shmif_cont* con,
650 	void** display, void*(*lookup)(void*, const char*), void* tag)
651 {
652 	if (!lookup || !con || !con->addr || !display)
653 		return false;
654 
655 	int dfd = -1;
656 
657 /* case for switching to another node, we're still missing a way to extract the
658  * 'real' library paths to the GL implementation and to the EGL implementation
659  * for dynamic- GPU switching */
660 	if (con->privext->pending_fd != -1){
661 		if (-1 != con->privext->active_fd){
662 			close(con->privext->active_fd);
663 			con->privext->active_fd = -1;
664 			gbm_drop(con);
665 		}
666 		dfd = con->privext->pending_fd;
667 		con->privext->pending_fd = -1;
668 	}
669 	else if (-1 != con->privext->active_fd){
670 		dfd = con->privext->active_fd;
671 	}
672 
673 /*
674  * Or first setup without a pending_fd, with the preroll state added it is
675  * likely that we no longer need this - if we don't get a handle in the preroll
676  * state we don't have extended graphics.
677  *
678  * There is a caveat to this in that some drivers expect the render node itself
679  * to not be shared onwards to other processes. This is relevant in bridging
680  * cases like for X11 and Wayland.
681  *
682  * In particular, AMDGPU may get triggered by this and fail on context creation
683  * in the 3rd party client - bailing with a cryptic message like "failed to
684  * create context".
685  *
686  * Thus, for the 'clone' case we need to remember the backing path and open a
687  * new node rather than trying to dup the descriptor.
688  */
689 	const char* nodestr = "/dev/dri/renderD128";
690 	if (!con->privext->internal){
691 		if (getenv("ARCAN_RENDER_NODE"))
692 			nodestr = getenv("ARCAN_RENDER_NODE");
693 		dfd = open(nodestr, O_RDWR | O_CLOEXEC);
694 	}
695 /* mode-switch is no-op in init here, but we still may need
696  * to update function pointers due to possible context changes */
697 
698 	map_egl_functions(&agp_eglenv, lookup_fenv, tag);
699 	if (!agp_eglenv.initialize)
700 		return false;
701 
702 	if (-1 == dfd)
703 		return false;
704 
705 /* special cleanup to deal with gbm_device abstraction */
706 	con->privext->cleanup = gbm_drop;
707 	con->privext->active_fd = dfd;
708 	authenticate_fd(con, dfd);
709 
710 /* finally open device */
711 	if (!con->privext->internal){
712 		con->privext->internal = malloc(sizeof(struct shmif_ext_hidden_int));
713 		if (!con->privext->internal){
714 			gbm_drop(con);
715 			return false;
716 		}
717 
718 /* The 'no-fdpass' is forcing a manual readback as a way of probing if the
719  * rather fragile cross-process GPU sharing breaks. Normally this comes as
720  * an event that dynamically changes the same state_fl. This causes the
721  * signalling to revert to GPU-readback into SHM as a way of still getting
722  * the data across. */
723 		memset(con->privext->internal, '\0', sizeof(struct shmif_ext_hidden_int));
724 		con->privext->state_fl = STATE_NOACCEL * (getenv("ARCAN_VIDEO_NO_FDPASS") ? 1 : 0);
725 		con->privext->internal->device_path = strdup(nodestr);
726 
727 		if (NULL == (con->privext->internal->dev = gbm_create_device(dfd))){
728 			gbm_drop(con);
729 			return false;
730 		}
731 	}
732 
733 /* this needs the context to be initialized */
734 	map_eglext_functions(&agp_eglenv, lookup_fenv, tag);
735 	if (!agp_eglenv.destroy_image){
736 		gbm_drop(con);
737 		return false;
738 	}
739 
740 	*display = (void*) (con->privext->internal->dev);
741 	return true;
742 }
743 
744 bool arcan_shmifext_egl_meta(struct arcan_shmif_cont* con,
745 	uintptr_t* display, uintptr_t* surface, uintptr_t* context)
746 {
747 	if (!con || !con->privext || !con->privext->internal ||
748 		!con->privext->internal->display)
749 		return false;
750 
751 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
752 	if (display)
753 		*display = (uintptr_t) ctx->display;
754 	if (surface)
755 		*surface = (uintptr_t) ctx->surface;
756 	if (context)
757 		*context = (uintptr_t) ctx->context;
758 
759 	return true;
760 }
761 
762 void arcan_shmifext_bind(struct arcan_shmif_cont* con)
763 {
764 /* need to resize both potential rendertarget destinations */
765 	if (!con || !con->privext || !con->privext->internal ||
766 		!con->privext->internal->display)
767 		return;
768 
769 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
770 	if (active_context != con){
771 		arcan_shmifext_make_current(con);
772 	}
773 
774 /* with an internally managed rendertarget / swapchain, we try to resize on
775  * bind, this earlies out of the dimensions are already the same */
776 	if (ctx->rtgt){
777 		agp_resize_rendertarget(ctx->rtgt, con->w, con->h);
778 		agp_activate_rendertarget(ctx->rtgt);
779 	}
780 }
781 
782 void arcan_shmifext_free_color(
783 	struct arcan_shmif_cont* con, struct shmifext_color_buffer* in)
784 {
785 	if (!con || !in || !in->id.gl)
786 		return;
787 
788 	EGLDisplay* dpy = con->privext->internal->display;
789 
790 	struct agp_fenv* fenv = arcan_shmifext_getfenv(con);
791 	arcan_shmifext_make_current(con);
792 
793 	fenv->delete_textures(1, &in->id.gl);
794 	in->id.gl = 0;
795 
796 /* need to destroy the gbm-bo and egl image separately */
797 	agp_eglenv.destroy_image(dpy, in->alloc_tags[1]);
798 	gbm_bo_destroy(in->alloc_tags[0]);
799 	in->alloc_tags[0] = NULL;
800 	in->alloc_tags[1] = NULL;
801 }
802 
803 bool arcan_shmifext_alloc_color(
804 	struct arcan_shmif_cont* con, struct shmifext_color_buffer* out)
805 {
806 	struct gbm_bo* bo;
807 	EGLImage img;
808 	if (!con || !con->privext || !con->privext->internal)
809 		return false;
810 
811 	struct shmif_ext_hidden_int* ext = con->privext->internal;
812 	struct agp_fenv* fenv = arcan_shmifext_getfenv(con);
813 
814 /* and now EGL-image into out texture */
815 	int fmt = DRM_FORMAT_XRGB8888;
816 
817 	return
818 		helper_alloc_color(&agp_fenv, &agp_eglenv,
819 			ext->dev, ext->display, out, con->w, con->h,
820 			fmt, con->privext->state_fl,
821 			con->privext->n_modifiers, con->privext->modifiers
822 		);
823 }
824 
825 bool arcan_shmifext_make_current(struct arcan_shmif_cont* con)
826 {
827 	if (!con || !con->privext || !con->privext->internal ||
828 		!con->privext->internal->display)
829 		return false;
830 
831 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
832 
833 	if (active_context != con){
834 		agp_eglenv.make_current(
835 			ctx->display, ctx->surface, ctx->surface, ctx->context);
836 		active_context = con;
837 	}
838 	arcan_shmifext_bind(con);
839 
840 	return true;
841 }
842 
843 size_t arcan_shmifext_export_image(
844 	struct arcan_shmif_cont* con,
845 	uintptr_t display, uintptr_t tex_id,
846 	size_t plane_limit, struct shmifext_buffer_plane* planes)
847 {
848 	if (!con || !con->addr || !con->privext || !con->privext->internal)
849 		return 0;
850 
851 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
852 
853 /* built-in or provided egl-display? */
854 	EGLDisplay* dpy = display == 0 ?
855 		con->privext->internal->display : (EGLDisplay*) display;
856 
857 /* texture/FBO to egl image */
858 	EGLImage newimg = agp_eglenv.create_image(
859 		dpy,
860 		con->privext->internal->context,
861 		EGL_GL_TEXTURE_2D_KHR,
862 		(EGLClientBuffer)(tex_id), NULL
863 	);
864 
865 /* legacy - single plane / no-modifier version */
866 	if (!newimg)
867 		return 0;
868 
869 /* grab the metadata (number of planes, modifiers, ...) but also cap
870  * against both the caller limit (which might come from up high) and
871  * to a sanity check 'more than 4 planes is suspicious' */
872 	int fourcc, nplanes;
873 	uint64_t modifiers;
874 	if (!agp_eglenv.query_image_format(dpy, newimg,
875 		&fourcc, &nplanes, &modifiers) || plane_limit < nplanes || nplanes > 4){
876 		agp_eglenv.destroy_image(dpy, newimg);
877 		return 0;
878 	}
879 
880 /* bugs experienced with the alpha- channel versions of these that a
881  * quick hack is better for the time being */
882 	if (fourcc == DRM_FORMAT_ARGB8888){
883 		fourcc = DRM_FORMAT_XRGB8888;
884 	}
885 
886 /* now grab the actual dma-buf, and repackage into our planes */
887 	EGLint strides[4] = {-1, -1, -1, -1};
888 	EGLint offsets[4] = {0, 0, 0, 0};
889 	EGLint fds[4] = {-1, -1, -1, -1};
890 
891 	if (!agp_eglenv.export_dmabuf(dpy, newimg,
892 		fds, strides, offsets) || strides[0] < 0){
893 		agp_eglenv.destroy_image(dpy, newimg);
894 		return 0;
895 	}
896 
897 	for (size_t i = 0; i < nplanes; i++){
898 		planes[i] = (struct shmifext_buffer_plane){
899 			.fd = fds[i],
900 			.fence = -1,
901 			.w = con->w,
902 			.h = con->h,
903 			.gbm.format = fourcc,
904 			.gbm.stride = strides[i],
905 			.gbm.offset = offsets[i],
906 			.gbm.mod_hi = (modifiers >> 32),
907 			.gbm.mod_lo = (modifiers & 0xffffffff)
908 		};
909 	}
910 
911 	agp_eglenv.destroy_image(dpy, newimg);
912 	return nplanes;
913 }
914 
915 /* legacy interface - only support one plane so wrap it around the new */
916 bool arcan_shmifext_gltex_handle(
917 	struct arcan_shmif_cont* cont,
918 	uintptr_t display, uintptr_t tex_id,
919  int* dhandle, size_t* dstride, int* dfmt)
920 {
921 	struct shmifext_buffer_plane plane = {};
922 
923 	size_t n_planes =
924 		arcan_shmifext_export_image(cont, display, tex_id, 1, &plane);
925 
926 	*dfmt = plane.gbm.format;
927 	*dstride = plane.gbm.stride;
928 	*dhandle = plane.fd;
929 
930 	return true;
931 }
932 
933 int arcan_shmifext_isext(struct arcan_shmif_cont* con)
934 {
935 	if (!con || !con->privext || !con->privext->internal)
936 		return 0;
937 
938 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
939 	if (!ctx->display)
940 		return 0;
941 
942 	if (con->privext->state_fl == STATE_NOACCEL)
943 		return 2;
944 	else
945 		return 1;
946 }
947 
948 size_t arcan_shmifext_signal_planes(
949 	struct arcan_shmif_cont* c,
950 	int mask,
951 	size_t n_planes,
952 	struct shmifext_buffer_plane* planes
953 )
954 {
955 	if (!c || !n_planes)
956 		return 0;
957 
958 /* missing - we need to track which vbuffers that are locked using
959  * the vmask and then on stepframe check when they are released so
960  * that we can release them back to the caller */
961 
962 	struct arcan_event ev = {
963 		.category = EVENT_EXTERNAL,
964 		.ext.kind = ARCAN_EVENT(BUFFERSTREAM)
965 	};
966 
967 	for (size_t i = 0; i < n_planes; i++){
968 /* missing - another edge case is that when we transfer one plane but run out
969  * of descriptor slots on the server-side, basically the sanest case then is to
970  * simply fake-inject an event with a buffer-fail so that the rest of the setup
971  * falls back to readback and wait for a reset or device-hint to rebuild */
972 		if (!arcan_pushhandle(planes[i].fd, c->epipe))
973 			return i;
974 		close(planes[i].fd);
975 
976 /* missing - the gpuid should be set based on what gpu the context is assigned
977  * to based on initial/device-hint - this is to make sure that we don't commit
978  * buffers to something that was not intended (particularly during hand-over)
979  * */
980 		ev.ext.bstream.stride = planes[i].gbm.stride;
981 		ev.ext.bstream.format = planes[i].gbm.format;
982 		ev.ext.bstream.mod_lo = planes[i].gbm.mod_lo;
983 		ev.ext.bstream.mod_hi = planes[i].gbm.mod_hi;
984 		ev.ext.bstream.offset = planes[i].gbm.offset;
985 		ev.ext.bstream.width  = planes[i].w;
986 		ev.ext.bstream.height = planes[i].h;
987 		ev.ext.bstream.left = n_planes - i - 1;
988 
989 		arcan_shmif_enqueue(c, &ev);
990 	}
991 
992 	arcan_shmif_signal(c, mask);
993 	return n_planes;
994 }
995 
996 int arcan_shmifext_signal(struct arcan_shmif_cont* con,
997 	uintptr_t display, int mask, uintptr_t tex_id, ...)
998 {
999 	if (!con || !con->addr || !con->privext || !con->privext->internal)
1000 		return -1;
1001 
1002 	struct shmif_ext_hidden_int* ctx = con->privext->internal;
1003 
1004 	EGLDisplay* dpy;
1005 	if (display){
1006 		dpy = (EGLDisplay)((void*) display);
1007 	}
1008 	else if ((uintptr_t)ctx->display == SHARED_DISPLAY && active_context){
1009 		dpy = active_context->privext->internal->display;
1010 	}
1011 	else
1012 		dpy = ctx->display;
1013 
1014 	if (!dpy)
1015 		return -1;
1016 
1017 /* swap and forward the state of the builtin- rendertarget or the latest
1018  * imported buffer depending on how the context was configured */
1019 	if (tex_id == SHMIFEXT_BUILTIN){
1020 		if (!ctx->rtgt)
1021 			return -1;
1022 
1023 		agp_activate_rendertarget(NULL);
1024 		glFinish();
1025 
1026 		bool swap;
1027 		struct agp_vstore* vs = agp_rendertarget_swap(ctx->rtgt, &swap);
1028 		if (!swap)
1029 			return INT_MAX;
1030 
1031 		tex_id = vs->vinf.text.glid;
1032 	}
1033 
1034 /* begin extraction of the currently rendered-to buffer */
1035 	size_t nplanes;
1036 	struct shmifext_buffer_plane planes[4];
1037 
1038 	if (con->privext->state_fl & STATE_NOACCEL ||
1039 			!(nplanes=arcan_shmifext_export_image(con, display, tex_id, 4, planes)))
1040 		goto fallback;
1041 
1042 	arcan_shmifext_signal_planes(con, mask, nplanes, planes);
1043 	return INT_MAX;
1044 
1045 
1046 /* handle-passing is disabled or broken, instead perform a manual readback into
1047  * the shared memory segment and signal like a normal buffer */
1048 fallback:
1049 	if (1){
1050 		struct agp_vstore vstore = {
1051 			.w = con->w,
1052 			.h = con->h,
1053 			.txmapped = TXSTATE_TEX2D,
1054 			.vinf.text = {
1055 				.glid = tex_id,
1056 				.raw = (void*) con->vidp
1057 			},
1058 		};
1059 		agp_readback_synchronous(&vstore);
1060 	}
1061 
1062 	unsigned res = arcan_shmif_signal(con, mask);
1063 	return res > INT_MAX ? INT_MAX : res;
1064 }
1065