1 /*
2  * Copyright 2014-2021, Björn Ståhl
3  * License: 3-Clause BSD, see COPYING file in arcan source repository.
4  * Reference: http://arcan-fe.com
5  * Description: Platform that draws to an arcan display server using the shmif.
6  * Multiple displays are simulated when we explicitly get a subsegment pushed
7  * to us although they only work with the agp readback approach currently.
8  *
9  * This is not a particularly good 'arcan-client' in the sense that some
10  * event mapping and other behaviors is still being plugged in.
11  *
12  * Some things of note:
13  *
14  *  1. a pushed subsegment is treated as a new 'display'
15  *  2. custom-requested subsegments (display.sub[]) are treated as recordtargets
16  *     which, in principle, is fine - but currently transfers using readback and
17  *     copy, not accelerated buffer transfers and rotating backing stores.
18  *  3. more considerations need to happen with events that gets forwarded into
19  *     the events that go to the scripting layer handler
20  *  4. dirty-regions don't propagate in signal
21  *  5. resizing the rendertarget bound to a subsegment is also painful
22  *
23  */
24 #include <stdint.h>
25 #include <stdbool.h>
26 #include <unistd.h>
27 #include <strings.h>
28 #include <stdio.h>
29 #include <sys/types.h>
30 #include <poll.h>
31 #include <setjmp.h>
32 #include <math.h>
33 #include <stdatomic.h>
34 
35 extern jmp_buf arcanmain_recover_state;
36 
37 #include "../video_platform.h"
38 #include "../agp/glfun.h"
39 
40 #define VIDEO_PLATFORM_IMPL
41 #include "../../engine/arcan_conductor.h"
42 
43 #define WANT_ARCAN_SHMIF_HELPER
44 #include "arcan_shmif.h"
45 #include "arcan_math.h"
46 #include "arcan_general.h"
47 #include "arcan_audio.h"
48 #include "arcan_video.h"
49 #include "arcan_event.h"
50 #include "arcan_videoint.h"
51 #include "arcan_renderfun.h"
52 #include "../../engine/arcan_lua.h"
53 
54 #include "../EGL/egl.h"
55 #include "../EGL/eglext.h"
56 
57 /* shmifext doesn't expose these - and to run correctly nested we need to be
58  * able to import buffers from ourselves. While there is no other platform to
59  * grab from, just use this. */
60 #ifdef EGL_DMA_BUF
61 #include <sys/types.h>
62 #include <sys/stat.h>
63 #include <fcntl.h>
64 #include <sys/ioctl.h>
65 #include <drm.h>
66 #include <drm_mode.h>
67 #include <drm_fourcc.h>
68 
69 #include <xf86drm.h>
70 #include <xf86drmMode.h>
71 #include <gbm.h>
72 
73 #include "../egl-dri/egl.h"
74 #include "../egl-dri/egl_gbm_helper.h"
75 
76 static struct egl_env agp_eglenv;
77 
78 #endif
79 
80 #ifdef _DEBUG
81 #define DEBUG 1
82 #else
83 #define DEBUG 0
84 #endif
85 
86 #define debug_print(fmt, ...) \
87             do { if (DEBUG) arcan_warning("%lld:%s:%d:%s(): " fmt "\n",\
88 						arcan_timemillis(), "platform-arcan:", __LINE__, __func__,##__VA_ARGS__); } while (0)
89 
90 #ifndef verbose_print
91 #define verbose_print
92 #endif
93 
94 static char* input_envopts[] = {
95 	NULL
96 };
97 
98 static char* video_envopts[] = {
99 	"ARCAN_RENDER_NODE=/path/to/dev", "(env only) override accelerated GPU device",
100 	"ARCAN_VIDEO_NO_FDPASS=1", "(env only) disable handle passing and force shm/readback",
101 	NULL
102 };
103 
104 static struct monitor_mode mmodes[] = {
105 	{
106 		.id = 0,
107 		.width = 640,
108 		.height = 480,
109 		.refresh = 60,
110 		.depth = sizeof(av_pixel) * 8,
111 		.dynamic = true
112 	},
113 };
114 
115 #define MAX_DISPLAYS 8
116 
117 struct subseg_output {
118 	int id;
119 	uintptr_t cbtag;
120 	arcan_vobj_id vid;
121 	struct arcan_shmif_cont con;
122 	struct arcan_luactx* ctx;
123 };
124 
125 struct display {
126 	struct arcan_shmif_cont conn;
127 	size_t decay;
128 	bool pending;
129 	bool mapped, visible, focused, nopass;
130 	enum dpms_state dpms;
131 	struct agp_vstore* vstore;
132 	float ppcm;
133 	int id;
134 
135 /* only used for first display */
136 	uint8_t subseg_alloc;
137 	struct subseg_output sub[8];
138 } disp[MAX_DISPLAYS] = {0};
139 
140 static struct arg_arr* shmarg;
141 static bool event_process_disp(arcan_evctx* ctx, struct display* d);
142 
143 static struct {
144 	uint64_t magic;
145 	bool signal_pending;
146 	volatile uint8_t resize_pending;
147 } primary_udata = {
148 	.magic = 0xfeedface
149 };
150 
platform_video_preinit()151 void platform_video_preinit()
152 {
153 }
154 
platform_video_reset(int id,int swap)155 void platform_video_reset(int id, int swap)
156 {
157 /*
158  * no-op on this platform as the DEVICEHINT argument is responsible
159  * for swapping out the accelerated device target
160  */
161 }
162 
scanout_alloc(struct agp_rendertarget * tgt,struct agp_vstore * vs,int action,void * tag)163 static bool scanout_alloc(
164 	struct agp_rendertarget* tgt, struct agp_vstore* vs, int action, void* tag)
165 {
166 	struct agp_fenv* env = agp_env();
167 	struct arcan_shmif_cont* conn = tag;
168 
169 	if (action == RTGT_ALLOC_FREE){
170 		struct shmifext_color_buffer* buf =
171 			(struct shmifext_color_buffer*) vs->vinf.text.handle;
172 
173 		if (buf)
174 			arcan_shmifext_free_color(conn, buf);
175 		else
176 			env->delete_textures(1, &vs->vinf.text.glid);
177 
178 		vs->vinf.text.handle = 0;
179 		vs->vinf.text.glid = 0;
180 
181 		return true;
182 	}
183 
184 /* this will give a color buffer that is suitable for FBO and sharing */
185 	struct shmifext_color_buffer* buf =
186 		malloc(sizeof(struct shmifext_color_buffer));
187 
188 	if (!arcan_shmifext_alloc_color(conn, buf)){
189 		agp_empty_vstore(vs, vs->w, vs->h);
190 		free(buf);
191 		return true;
192 	}
193 
194 /* remember the metadata for free later */
195 	vs->vinf.text.glid = buf->id.gl;
196 	vs->vinf.text.handle = (uintptr_t) buf;
197 
198 	return true;
199 }
200 
lookup_egl(void * tag,const char * sym,bool req)201 static void* lookup_egl(void* tag, const char* sym, bool req)
202 {
203 	void* res = arcan_shmifext_lookup((struct arcan_shmif_cont*) tag, sym);
204 	if (!res && req)
205 		arcan_fatal("agp lookup(%s) failed, missing req. symbol.\n", sym);
206 	return res;
207 }
208 
platform_video_init(uint16_t width,uint16_t height,uint8_t bpp,bool fs,bool frames,const char * title)209 bool platform_video_init(uint16_t width, uint16_t height, uint8_t bpp,
210 	bool fs, bool frames, const char* title)
211 {
212 	static bool first_init = true;
213 /* can happen in the context of suspend/resume etc. */
214 	if (!first_init)
215 		return true;
216 
217 	uintptr_t config_tag;
218 	cfg_lookup_fun in_config = platform_config_lookup(&config_tag);
219 
220 	for (size_t i = 0; i < MAX_DISPLAYS; i++){
221 		disp[i].id = i;
222 		disp[i].nopass = in_config("video_no_fdpass", 0, NULL, config_tag);
223 	}
224 
225 /* respect the command line provided dimensions if set, otherwise just
226  * go with whatever defaults we get from the activate- phase */
227 	int flags = 0;
228 	if (width > 32 && height > 32)
229 		flags |= SHMIF_NOACTIVATE_RESIZE;
230 
231 	struct arg_arr* shmarg;
232 	disp[0].conn = arcan_shmif_open_ext(flags, &shmarg, (struct shmif_open_ext){
233 		.type = SEGID_LWA, .title = title,}, sizeof(struct shmif_open_ext)
234 	);
235 
236 	if (!disp[0].conn.addr){
237 		arcan_warning("lwa_video_init(), couldn't connect. Check ARCAN_CONNPATH and"
238 			" make sure a normal arcan instance is running\n");
239 		return false;
240 	}
241 
242 	struct arcan_shmif_initial* init;
243 	if (sizeof(struct arcan_shmif_initial) != arcan_shmif_initial(
244 		&disp[0].conn, &init)){
245 		arcan_warning("lwa_video_init(), initial structure size mismatch, "
246 			"out-of-synch header/shmif lib\n");
247 		return NULL;
248 	}
249 
250 /*
251  * we want to do our own output texture handling since it depends on
252  * how the arcan-space display is actually mapped
253  */
254 	enum shmifext_setup_status status;
255 	struct arcan_shmifext_setup defs = arcan_shmifext_defaults(&disp[0].conn);
256 	defs.builtin_fbo = false;
257 
258 	if ((status =
259 		arcan_shmifext_setup(&disp[0].conn, defs)) != SHMIFEXT_OK){
260 		arcan_warning("lwa_video_init(), couldn't setup headless graphics\n"
261 			"\t error code: %d\n", status);
262 		arcan_shmif_drop(&disp[0].conn);
263 		return false;
264 	}
265 
266 	disp[0].conn.user = &primary_udata;
267 	arcan_shmif_setprimary(SHMIF_INPUT, &disp[0].conn);
268 	arcan_shmifext_make_current(&disp[0].conn);
269 
270 #ifdef EGL_DMA_BUF
271 	map_egl_functions(&agp_eglenv, lookup_egl, &disp[0].conn);
272 	map_eglext_functions(&agp_eglenv, lookup_egl, &disp[0].conn);
273 #endif
274 
275 /*
276  * switch rendering mode since our coordinate system differs
277  */
278 	disp[0].conn.hints = SHMIF_RHINT_ORIGO_LL | SHMIF_RHINT_VSIGNAL_EV;
279 	arcan_shmif_resize(&disp[0].conn,
280 		width > 0 ? width : disp[0].conn.w,
281 		height > 0 ? height : disp[0].conn.h
282 	);
283 
284 /*
285  * map the provided initial values to match width/height, density,
286  * font size and so on.
287  */
288 	if (init->fonts[0].fd != -1){
289 		size_t pt_sz = (init->fonts[0].size_mm * 2.8346456693f);
290 		arcan_video_defaultfont("arcan-default", init->fonts[0].fd,
291 			pt_sz, init->fonts[0].hinting, 0);
292 		init->fonts[0].fd = -1;
293 
294 		if (init->fonts[1].fd != -1){
295 		size_t pt_sz = (init->fonts[1].size_mm * 2.8346456693f);
296 			arcan_video_defaultfont("arcan-default", init->fonts[1].fd,
297 				pt_sz, init->fonts[1].hinting, 1
298 			);
299 			init->fonts[1].fd = -1;
300 		}
301 	}
302 	disp[0].mapped = true;
303 	disp[0].ppcm = init->density;
304 	disp[0].dpms = ADPMS_ON;
305 	disp[0].visible = true;
306 	disp[0].focused = true;
307 
308 /* we provide our own cursor that is blended in the output, this might change
309  * when we allow map_ as layers, then we treat those as subsegments and
310  * viewport */
311 	arcan_shmif_enqueue(&disp[0].conn, &(struct arcan_event){
312 		.category = EVENT_EXTERNAL,
313 		.ext.kind = ARCAN_EVENT(CURSORHINT),
314 		.ext.message = "hidden"
315 	});
316 
317 	first_init = false;
318 	return true;
319 }
320 
321 /*
322  * These are just direct maps that will be statically sucked in
323  */
platform_video_shutdown()324 void platform_video_shutdown()
325 {
326 	for (size_t i=0; i < MAX_DISPLAYS; i++)
327 		if (disp[i].conn.addr){
328 			arcan_shmif_drop(&disp[i].conn);
329 			disp[i] = (struct display){};
330 		}
331 }
332 
platform_video_decay()333 size_t platform_video_decay()
334 {
335 	size_t decay = 0;
336 	for (size_t i = 0; i < MAX_DISPLAYS; i++){
337 		if (disp[i].decay > decay)
338 			decay = disp[i].decay;
339 		disp[i].decay = 0;
340 	}
341 	return decay;
342 }
343 
platform_video_displays(platform_display_id * dids,size_t * lim)344 size_t platform_video_displays(platform_display_id* dids, size_t* lim)
345 {
346 	size_t rv = 0;
347 
348 	for (size_t i = 0; i < MAX_DISPLAYS; i++){
349 		if (!disp[i].conn.vidp)
350 			continue;
351 
352 		if (dids && lim && *lim < rv)
353 			dids[rv] = disp[i].id;
354 		rv++;
355 	}
356 
357 	if (lim)
358 		*lim = MAX_DISPLAYS;
359 
360 	return rv;
361 }
362 
platform_video_auth(int cardn,unsigned token)363 bool platform_video_auth(int cardn, unsigned token)
364 {
365 	TRACE_MARK_ONESHOT("video", "authenticate", TRACE_SYS_DEFAULT, 0, 0, "lwa");
366 
367 	if (cardn < MAX_DISPLAYS && disp[cardn].conn.addr){
368 		disp[cardn].conn.hints |= SHMIF_RHINT_AUTH_TOK;
369 		atomic_store(&disp[cardn].conn.addr->vpts, token);
370 		arcan_shmif_resize(&disp[cardn].conn,
371 			disp[cardn].conn.w, disp[cardn].conn.h);
372 		disp[cardn].conn.hints &= ~SHMIF_RHINT_AUTH_TOK;
373 		return true;
374 	}
375 	return false;
376 }
377 
platform_video_set_display_gamma(platform_display_id did,size_t n_ramps,uint16_t * r,uint16_t * g,uint16_t * b)378 bool platform_video_set_display_gamma(platform_display_id did,
379 	size_t n_ramps, uint16_t* r, uint16_t* g, uint16_t* b)
380 {
381 	return false;
382 }
383 
platform_video_get_display_gamma(platform_display_id did,size_t * n_ramps,uint16_t ** outb)384 bool platform_video_get_display_gamma(platform_display_id did,
385 	size_t* n_ramps, uint16_t** outb)
386 {
387 	return false;
388 }
389 
platform_video_display_edid(platform_display_id did,char ** out,size_t * sz)390 bool platform_video_display_edid(platform_display_id did,
391 	char** out, size_t* sz)
392 {
393 	*out = NULL;
394 	*sz = 0;
395 	return false;
396 }
397 
platform_video_prepare_external()398 void platform_video_prepare_external()
399 {
400 /* comes with switching in AGP, should give us card- switching
401  * as well (and will be used as test case for that) */
402 	TRACE_MARK_ENTER("video", "external-handover", TRACE_SYS_DEFAULT, 0, 0, "");
403 }
404 
platform_video_restore_external()405 void platform_video_restore_external()
406 {
407 	TRACE_MARK_EXIT("video", "external-handover", TRACE_SYS_DEFAULT, 0, 0, "");
408 }
409 
platform_video_gfxsym(const char * sym)410 void* platform_video_gfxsym(const char* sym)
411 {
412 	return arcan_shmifext_lookup(&disp[0].conn, sym);
413 }
414 
platform_video_cardhandle(int cardn,int * method,size_t * msz,uint8_t ** dbuf)415 int platform_video_cardhandle(int cardn, int* method, size_t* msz, uint8_t** dbuf)
416 {
417 /* this should be retrievable from the shmifext- connection so that we can
418  * forward to any client we set up */
419 	return -1;
420 }
421 
platform_event_translation(int devid,int action,const char ** names,const char ** err)422 bool platform_event_translation(int devid,
423 	int action, const char** names, const char** err)
424 {
425 	*err = "Not Supported";
426 	return false;
427 }
428 
platform_event_device_request(int space,const char * path)429 int platform_event_device_request(int space, const char* path)
430 {
431 	return -1;
432 }
433 
platform_event_samplebase(int devid,float xyz[3])434 void platform_event_samplebase(int devid, float xyz[3])
435 {
436 }
437 
platform_video_envopts()438 const char** platform_video_envopts()
439 {
440 	return (const char**) video_envopts;
441 }
442 
platform_event_envopts()443 const char** platform_event_envopts()
444 {
445 	return (const char**) input_envopts;
446 }
447 
get_platform_mode(platform_mode_id mode)448 static struct monitor_mode* get_platform_mode(platform_mode_id mode)
449 {
450 	for (size_t i = 0; i < sizeof(mmodes)/sizeof(mmodes[0]); i++){
451 		if (mmodes[i].id == mode)
452 			return &mmodes[i];
453 	}
454 
455 	return NULL;
456 }
457 
platform_video_specify_mode(platform_display_id id,struct monitor_mode mode)458 bool platform_video_specify_mode(
459 	platform_display_id id, struct monitor_mode mode)
460 {
461 	if (!(id < MAX_DISPLAYS && disp[id].conn.addr)){
462 		verbose_print("rejected bad id/connection (%d)", (int) id);
463 		return false;
464 	}
465 
466 	primary_udata.resize_pending = 1;
467 
468 /* audio rejects */
469 	if (!arcan_shmif_lock(&disp[id].conn)){
470 		return false;
471 	}
472 
473 /* a crash during resize will trigger migration that might trigger _drop that might lock */
474 	bool rz = arcan_shmif_resize(&disp[id].conn, mode.width, mode.height);
475 	if (!rz){
476 		verbose_print("display "
477 			"id rejected resize (%d) => %zu*%zu",(int)id, mode.width, mode.height);
478 	}
479 
480 	TRACE_MARK_ONESHOT("video", "resize-display", TRACE_SYS_DEFAULT, id, mode.width * mode.height, "");
481 	arcan_shmif_unlock(&disp[id].conn);
482 	primary_udata.resize_pending = 0;
483 
484 	return rz;
485 }
486 
platform_video_dimensions()487 struct monitor_mode platform_video_dimensions()
488 {
489 	struct monitor_mode mode = {
490 		.width = disp[0].conn.w,
491 		.height = disp[0].conn.h,
492 	};
493 	mode.phy_width = (float)mode.width / disp[0].ppcm * 10.0;
494 	mode.phy_height = (float)mode.height / disp[0].ppcm * 10.0;
495 
496 	return mode;
497 }
498 
platform_video_set_mode(platform_display_id id,platform_mode_id newmode,struct platform_mode_opts opts)499 bool platform_video_set_mode(
500 	platform_display_id id, platform_mode_id newmode, struct platform_mode_opts opts)
501 {
502 	struct monitor_mode* mode = get_platform_mode(newmode);
503 
504 	if (!mode)
505 		return false;
506 
507 	verbose_print("set mode on (%d) to %zu*%zu",
508 		(int) id, mode->width, mode->height);
509 	return platform_video_specify_mode(id, *mode);
510 }
511 
check_store(platform_display_id id)512 static bool check_store(platform_display_id id)
513 {
514 	struct agp_vstore* vs = (disp[id].vstore ?
515 		disp[id].vstore : arcan_vint_world());
516 
517 	if (!vs)
518 		return false;
519 
520 	if (vs->w != disp[id].conn.w || vs->h != disp[id].conn.h){
521 		if (!platform_video_specify_mode(id,
522 			(struct monitor_mode){.width = vs->w, .height = vs->h})){
523 			arcan_warning("platform_video_map_display(), attempt to switch "
524 				"display output mode to match backing store failed.\n");
525 			return false;
526 		}
527 	}
528 	return true;
529 }
530 
platform_video_map_display(arcan_vobj_id vid,platform_display_id id,enum blitting_hint hint)531 bool platform_video_map_display(
532 	arcan_vobj_id vid, platform_display_id id, enum blitting_hint hint)
533 {
534 	struct display_layer_cfg cfg = {
535 		.opacity = 1.0,
536 		.hint = hint
537 	};
538 
539 	return platform_video_map_display_layer(vid, id, 0, cfg) >= 0;
540 }
541 
platform_video_invalidate_map(struct agp_vstore * vstore,struct agp_region region)542 void platform_video_invalidate_map(
543 	struct agp_vstore* vstore, struct agp_region region)
544 {
545 /* NOP for the time being - might change for direct forwarding of client */
546 }
547 
548 /*
549  * Two things that are currently wrong with this approach to mapping:
550  * 1. hint is ignored entirely, mapping mode is just based on WORLDID
551  * 2. the texture coordinates of the source are not being ignored.
552  *
553  * For these to be solved, we need to extend the full path of shmif rhints
554  * to cover all possible mapping modes, and a on-gpu rtarget- style blit
555  * with extra buffer or partial synch and VIEWPORT events.
556  */
platform_video_map_display_layer(arcan_vobj_id vid,platform_display_id id,size_t layer_index,struct display_layer_cfg cfg)557 ssize_t platform_video_map_display_layer(arcan_vobj_id vid,
558 	platform_display_id id, size_t layer_index, struct display_layer_cfg cfg)
559 {
560 
561 	if (id > MAX_DISPLAYS)
562 		return -1;
563 
564 	if (!disp[id].conn.addr){
565 		arcan_warning("platform_video_map_display_layer(), "
566 			"attempt to map unconnected display.\n");
567 		return -1;
568 	}
569 
570 	if (id < MAX_DISPLAYS && disp[id].vstore){
571 		if (disp[id].vstore != arcan_vint_world()){
572 			arcan_vint_drop_vstore(disp[id].vstore);
573 			disp[id].vstore = NULL;
574 		}
575 	}
576 
577 	disp[id].mapped = false;
578 
579 	if (vid == ARCAN_VIDEO_WORLDID){
580 		if (!arcan_vint_world())
581 			return -1;
582 
583 		disp[id].conn.hints = SHMIF_RHINT_ORIGO_LL;
584 		disp[id].vstore = arcan_vint_world();
585 		disp[id].mapped = true;
586 		return 0;
587 	}
588 	else if (vid == ARCAN_EID)
589 		return 0;
590 	else{
591 		arcan_vobject* vobj = arcan_video_getobject(vid);
592 		if (vobj == NULL){
593 			arcan_warning("platform_video_map_display(), "
594 				"attempted to map a non-existing video object");
595 			return 0;
596 		}
597 
598 		if (vobj->vstore->txmapped != TXSTATE_TEX2D){
599 			arcan_warning("platform_video_map_display(), "
600 				"attempted to map a video object with an invalid backing store");
601 			return 0;
602 		}
603 
604 		disp[id].conn.hints = 0;
605 		disp[id].vstore = vobj->vstore;
606 	}
607 
608 /*
609  * enforce display size constraint, this wouldn't be necessary
610  * when doing a buffer passing operation
611  */
612 	if (!check_store(id))
613 		return -1;
614 
615 	disp[id].vstore->refcount++;
616 	disp[id].mapped = true;
617 
618 	return 0;
619 }
620 
platform_video_query_modes(platform_display_id id,size_t * count)621 struct monitor_mode* platform_video_query_modes(
622 	platform_display_id id, size_t* count)
623 {
624 	*count = sizeof(mmodes) / sizeof(mmodes[0]);
625 
626 	return mmodes;
627 }
628 
platform_video_query_displays()629 void platform_video_query_displays()
630 {
631 }
632 
conn_egl_display(struct arcan_shmif_cont * con)633 static EGLDisplay conn_egl_display(struct arcan_shmif_cont* con)
634 {
635 	uintptr_t display;
636 	if (!arcan_shmifext_egl_meta(&disp[0].conn, &display, NULL, NULL))
637 		return NULL;
638 
639 	EGLDisplay disp = (EGLDisplay) display;
640 	return disp;
641 }
642 
platform_video_map_buffer(struct agp_vstore * vs,struct agp_buffer_plane * planes,size_t n)643 bool platform_video_map_buffer(
644 	struct agp_vstore* vs, struct agp_buffer_plane* planes, size_t n)
645 {
646 #ifdef EGL_DMA_BUF
647 	struct agp_fenv* fenv = arcan_shmifext_getfenv(&disp[0].conn);
648 	EGLDisplay egldpy = conn_egl_display(&disp[0].conn);
649 	if (!egldpy)
650 		return false;
651 
652 	EGLImage img = helper_dmabuf_eglimage(
653 		fenv, &agp_eglenv, egldpy, (struct shmifext_buffer_plane*) planes, n);
654 
655 	if (!img){
656 		debug_print("buffer import failed");
657 		return false;
658 	}
659 
660 /* might have an old eglImage around */
661 	if (0 != vs->vinf.text.tag){
662 		agp_eglenv.destroy_image(disp, (EGLImageKHR) vs->vinf.text.tag);
663 	}
664 
665 	vs->w = planes[0].w;
666 	vs->h = planes[0].h;
667 	vs->bpp = sizeof(shmif_pixel);
668 	vs->txmapped = TXSTATE_TEX2D;
669 
670 	agp_activate_vstore(vs);
671 		agp_eglenv.image_target_texture2D(GL_TEXTURE_2D, img);
672 	agp_deactivate_vstore(vs);
673 
674 	vs->vinf.text.tag = (uintptr_t) img;
675 	return true;
676 #endif
677 	return false;
678 }
679 
680 /*
681  * Need to do this manually here so that when we run nested, we are still able
682  * to import data from clients that give us buffers. When/ if we implement the
683  * same mechanism on OSX, Windows and Android, the code should probably be
684  * moved to another shared platform path
685  */
platform_video_map_handle(struct agp_vstore * dst,int64_t handle)686 bool platform_video_map_handle(struct agp_vstore* dst, int64_t handle)
687 {
688 	uint64_t invalid =
689 #ifdef EGL_DMA_BUF
690 		DRM_FORMAT_MOD_INVALID;
691 #else
692 	-1;
693 #endif
694 	uint32_t hi = invalid >> 32;
695 	uint32_t lo = invalid & 0xffffffff;
696 
697 /* special case, destroy the backing image */
698 	if (-1 == handle){
699 #ifdef EGL_DMA_BUF
700 		agp_eglenv.destroy_image(
701 			conn_egl_display(&disp[0].conn), (EGLImage) dst->vinf.text.tag);
702 #endif
703 		dst->vinf.text.tag = 0;
704 		return true;
705 	}
706 
707 	struct agp_buffer_plane plane = {
708 		.fd = handle,
709 		.gbm = {
710 	#ifdef EGL_DMA_BUF
711 			.mod_hi = DRM_FORMAT_MOD_INVALID >> 32,
712 			.mod_lo = DRM_FORMAT_MOD_INVALID & 0xffffffff,
713 	#endif
714 			.offset = 0,
715 			.stride = dst->vinf.text.stride,
716 			.format = dst->vinf.text.format
717 		}
718 	};
719 
720 	return platform_video_map_buffer(dst, &plane, 1);
721 }
722 
723 /*
724  * we use a deferred stub here to avoid having the headless platform
725  * sync function generate bad statistics due to our two-stage synch
726  * process
727  */
stub()728 static void stub()
729 {
730 }
731 
732 /*
733  * This one is undoubtedly slow and only used when the other side
734  * is network-remote, or otherwise distrust our right to submit GPU
735  * buffers, two adjustments could be made for making it less painful:
736  *
737  *  1. switch context to n-buffered state
738  *  2. setup asynchronous readback and change to polling state for the
739  *     backing store
740  *
741  * It might also apply when the source contents is an external client
742  * (as we have no way of duplicating a dma-buf and takes a reblit pass)
743  *
744  * another valuable high-GL optimization would be to pin the object
745  * memory to the mapped base address along with 2 and have a futex-
746  * trigger thread that forwards the buffer then.
747  */
synch_copy(struct display * disp,struct agp_vstore * vs)748 static void synch_copy(struct display* disp, struct agp_vstore* vs)
749 {
750 	check_store(disp->id);
751 	struct agp_vstore store = *vs;
752 	store.vinf.text.raw = disp->conn.vidp;
753 
754 	TRACE_MARK_ENTER("video", "copy-blit", TRACE_SYS_SLOW, 0, 0, "");
755 		agp_readback_synchronous(&store);
756 		arcan_shmif_signal(&disp->conn, SHMIF_SIGVID | SHMIF_SIGBLK_NONE);
757 		disp->pending = true;
758 		arcan_conductor_deadline(4);
759 	TRACE_MARK_EXIT("video", "copy-blit", TRACE_SYS_SLOW, 0, 0, "");
760 }
761 
762 /*
763  * The synch code here is rather rotten and should be reworked in its entirety
764  * in a bit. It is hinged on a few refactors however:
765  *
766  *  1. proper explicit fencing and pipeline semaphores.
767  *  2. screen deadline propagation.
768  *  3. per rendertarget invalidation and per rendertarget scanout.
769  *  4. rendertarget reblitter helper.
770  *
771  * This is to support both variable throughput, deadline throughput and
772  * presentation time accurate rendering where the latency in the pipeline is
773  * compensated for in animations and so on.
774  *
775  * The best configuration 'test' for this is to have two displays attached with
776  * each being on vastly different synch targets, e.g. 48hz and 140hz or so and
777  * both resizing.
778  */
platform_video_synch(uint64_t tick_count,float fract,video_synchevent pre,video_synchevent post)779 void platform_video_synch(
780 	uint64_t tick_count, float fract, video_synchevent pre, video_synchevent post)
781 {
782 /* Check back in a little bit, this is where the event_process, and vframe sig.
783  * should be able to help. Setting the conductor display to the epipe and */
784 	while (primary_udata.signal_pending){
785 		struct conductor_display d = {
786 			.fd = disp[0].conn.epipe,
787 			.refresh = -1,
788 		};
789 
790 		int ts = arcan_conductor_yield(&d, 1);
791 		platform_event_process(arcan_event_defaultctx());
792 
793 /* the event processing while yielding / waiting for synch can reach EXIT and
794  * then we should refuse to continue regardless */
795 		if (!disp[0].conn.vidp)
796 			return;
797 
798 		if (primary_udata.signal_pending && ts > 0)
799 			arcan_timesleep(ts);
800 	}
801 
802 	if (pre)
803 		pre();
804 
805 /* first frame, fake rendertarget_swap so the first frame doesn't get lost into
806  * the vstore that doesn't get hidden buffers, for the rest display mapped
807  * rendertargets case we can do that on-map */
808 	static bool got_frame;
809 	if (!got_frame){
810 		bool swap;
811 		got_frame = true;
812 		verbose_print("first-frame swap");
813 		agp_rendertarget_allocator(
814 			arcan_vint_worldrt(), scanout_alloc, &disp[0].conn);
815 		agp_rendertarget_swap(arcan_vint_worldrt(), &swap);
816 	}
817 
818 	static size_t last_nupd;
819 	size_t nupd;
820 
821 	unsigned cost = arcan_vint_refresh(fract, &nupd);
822 
823 /* nothing to do, yield with the timestep the conductor prefers - (fake 60hz
824  * now until the shmif_deadline communication is more robust, then we can just
825  * plug the next deadline and the conductor will wake us accordingly. */
826 	if (!nupd){
827 		TRACE_MARK_ONESHOT("video", "synch-stall", TRACE_SYS_SLOW, 0, 0, "nothing to do");
828 		verbose_print("skip frame");
829 
830 /* mark it as safe to process events for each display and then allow stepframe
831  * signal to break out of the block */
832 		arcan_conductor_deadline(4);
833 		goto pollout;
834 	}
835 /* so we have a buffered frame but this one didn't cause any updates,
836  * force an update (could pretty much just reblit / swap but...) */
837 	arcan_bench_register_cost(cost);
838 	agp_activate_rendertarget(NULL);
839 
840 /* needed here or handle content will be broken, though what we would actually
841  * want is a fence on the last drawcall to each mapped rendercall and yield
842  * until finished or at least send with the buffer */
843 	glFinish();
844 
845 	for (size_t i = 0; i < MAX_DISPLAYS; i++){
846 /* server-side controlled visibility or script controlled visibility */
847 		if (!disp[i].visible || !disp[i].mapped || disp[i].dpms != ADPMS_ON)
848 			continue;
849 
850 /*
851  * Missing features / issues:
852  *
853  * 1. texture coordinates are not taken into account (would require RT
854  *    indirection, manual resampling during synch-copy or shmif- rework
855  *    to allow texture coordinates as part of signalling)
856  *
857  * 2. no post-processing shader, also needs blit stage
858  *
859  * 3. same rendertarget CAN NOT! be mapped to different displays as the
860  *    frame-queueing would break
861  *
862  * solution would be extending the vstore- to have a 'repack/reblit/raster'
863  * state, which would be needed for server-side text anyhow
864  */
865 		struct rendertarget* rtgt = arcan_vint_findrt_vstore(disp[i].vstore);
866 		struct agp_rendertarget* art = arcan_vint_worldrt();
867 
868 		if (disp[i].nopass || (disp[i].vstore && !rtgt) ||
869 			!arcan_shmif_handle_permitted(&disp[i].conn)){
870 			verbose_print("force-disable readback pass");
871 			synch_copy(&disp[i],
872 				disp[i].vstore ? disp[i].vstore : arcan_vint_world());
873 			continue;
874 		}
875 
876 		if (disp[i].vstore){
877 /* there are conditions where could go with synch- handle + passing, but
878  * with streaming sources we have no reliable way of knowing if its safe */
879 			if (!rtgt){
880 				verbose_print("synch-copy non-rt source");
881 				synch_copy(&disp[i], disp[i].vstore);
882 				continue;
883 			}
884 			art = rtgt->art;
885 		}
886 
887 /* got the rendertarget vstore, export it to planes */
888 		bool swap;
889 		struct agp_vstore* vs = agp_rendertarget_swap(art, &swap);
890 		if (swap){
891 			size_t n_pl = 4;
892 			struct shmifext_buffer_plane planes[n_pl];
893 			n_pl = arcan_shmifext_export_image(
894 				&disp[i].conn, 0, vs->vinf.text.glid, n_pl, planes);
895 
896 			if (n_pl)
897 				arcan_shmifext_signal_planes(&disp[i].conn,
898 					SHMIF_SIGVID | SHMIF_SIGBLK_NONE, n_pl, planes);
899 
900 /* wait for a stepframe before we continue with this rendertarget */
901 			disp[i].pending = true;
902 			arcan_conductor_deadline(4);
903 		}
904 	}
905 
906 pollout:
907 	if (post)
908 		post();
909 }
910 
platform_event_preinit()911 void platform_event_preinit()
912 {
913 }
914 
915 /*
916  * The regular event layer is just stubbed, when the filtering etc.
917  * is broken out of the platform layer, we can re-use that to have
918  * local filtering untop of the one the engine is doing.
919  */
platform_event_analogstate(int devid,int axisid,int * lower_bound,int * upper_bound,int * deadzone,int * kernel_size,enum ARCAN_ANALOGFILTER_KIND * mode)920 arcan_errc platform_event_analogstate(int devid, int axisid,
921 	int* lower_bound, int* upper_bound, int* deadzone,
922 	int* kernel_size, enum ARCAN_ANALOGFILTER_KIND* mode)
923 {
924 	return ARCAN_ERRC_NO_SUCH_OBJECT;
925 }
926 
platform_event_analogall(bool enable,bool mouse)927 void platform_event_analogall(bool enable, bool mouse)
928 {
929 }
930 
platform_event_analogfilter(int devid,int axisid,int lower_bound,int upper_bound,int deadzone,int buffer_sz,enum ARCAN_ANALOGFILTER_KIND kind)931 void platform_event_analogfilter(int devid,
932 	int axisid, int lower_bound, int upper_bound, int deadzone,
933 	int buffer_sz, enum ARCAN_ANALOGFILTER_KIND kind)
934 {
935 }
936 
937 /*
938  * For LWA simulated multidisplay, we still simulate disable by
939  * drawing an empty output display.
940  */
941 enum dpms_state
platform_video_dpms(platform_display_id did,enum dpms_state state)942 	platform_video_dpms(platform_display_id did, enum dpms_state state)
943 {
944 	if (!(did < MAX_DISPLAYS && did[disp].mapped))
945 		return ADPMS_IGNORE;
946 
947 	if (state == ADPMS_IGNORE)
948 		return disp[did].dpms;
949 
950 	disp[did].dpms = state;
951 
952 	return state;
953 }
954 
platform_video_capstr()955 const char* platform_video_capstr()
956 {
957 	return "Video Platform (Arcan - in - Arcan)\n";
958 }
959 
platform_event_devlabel(int devid)960 const char* platform_event_devlabel(int devid)
961 {
962 	return "no device";
963 }
964 
965 /*
966  * This handler takes care of the pushed segments that don't have a
967  * corresponding request, i.e. they are force-pushed from the server
968  * side.
969  *
970  * Most types are best ignored for now (or until we can / want to
971  * provide a special handler for them, primary DEBUG where we can
972  * expose conductor timing state).
973  *
974  * Special cases:
975  *  SEGID_MEDIA - map as a low-level display that the scripts/engine
976  *                can map to.
977  *
978  * The better option is to expose them as _adopt handlers, similar
979  * to how we do stdin/stdout mapping.
980  */
map_window(struct arcan_shmif_cont * seg,arcan_evctx * ctx,int kind,const char * key)981 static void map_window(
982 	struct arcan_shmif_cont* seg, arcan_evctx* ctx, int kind, const char* key)
983 {
984 	if (kind != SEGID_MEDIA)
985 		return;
986 
987 	TRACE_MARK_ONESHOT("video", "new-display", TRACE_SYS_DEFAULT, 0, 0, "lwa");
988 
989 /*
990  * we encode all our IDs (except clipboard) with the internal VID and
991  * connected to a rendertarget slot, so re-use that fact.
992  */
993 
994 	struct display* base = NULL;
995 	size_t i = 0;
996 
997 	for (; i < MAX_DISPLAYS; i++)
998 		if (disp[i].conn.addr == NULL){
999 			base = disp + i;
1000 			break;
1001 		}
1002 
1003 	if (base == NULL){
1004 		arcan_warning("Hard-coded display-limit reached (%d), "
1005 			"ignoring new segment.\n", (int)MAX_DISPLAYS);
1006 		return;
1007 	}
1008 
1009 	base->conn = arcan_shmif_acquire(seg, key, SEGID_LWA, SHMIF_DISABLE_GUARD);
1010 	base->ppcm = ARCAN_SHMPAGE_DEFAULT_PPCM;
1011 	base->dpms = ADPMS_ON;
1012 	base->visible = true;
1013 
1014 	arcan_event ev = {
1015 		.category = EVENT_VIDEO,
1016 		.vid.kind = EVENT_VIDEO_DISPLAY_ADDED,
1017 		.vid.source = -1,
1018 		.vid.displayid = i,
1019 		.vid.width = seg->w,
1020 		.vid.height = seg->h,
1021 	};
1022 
1023 	arcan_event_enqueue(ctx, &ev);
1024 }
1025 
1026 enum arcan_ffunc_rv arcan_lwa_ffunc FFUNC_HEAD
1027 {
1028 	struct subseg_output* outptr = state.ptr;
1029 /* we don't care about the guard part here since the data goes low->
1030  * high-priv and not the other way around */
1031 
1032 	if (cmd == FFUNC_DESTROY){
1033 		arcan_shmif_drop(&outptr->con);
1034 		outptr->id = 0;
1035 		for (size_t i = 0; i < 8; i++)
1036 			if (outptr == &disp[0].sub[i]){
1037 				disp[0].subseg_alloc &= ~(1 << i);
1038 				break;
1039 			}
1040 /* don't need to free outptr as it's from the displays- structure */
1041 		return 0;
1042 	}
1043 
1044 	if (cmd == FFUNC_ADOPT){
1045 /* we don't support adopt, so will be dropped */
1046 		return 0;
1047 	}
1048 
1049 /* drain events to scripting layer, don't care about DMS here */
1050 	if (cmd == FFUNC_TICK){
1051 	}
1052 
1053 /*
1054  * This should only be reached / possible / used when we don't have the
1055  * fast-path of simply rotating backing color buffer and forwarding the handle
1056  * to the rendertarget but lack of explicit synch wiring is the big thing here.
1057  */
1058 	if (cmd == FFUNC_POLL){
1059 		struct arcan_event inev;
1060 		int ss = arcan_shmif_signalstatus(&outptr->con);
1061 		if (-1 == ss || (ss & 1))
1062 			return FRV_GOTFRAME;
1063 		else
1064 			return FRV_NOFRAME;
1065 	}
1066 
1067 /* the -1 == ss test above guarantees us that READBACK will only trigger on
1068  * a valid segment as the progression is always POLL immediately before any
1069  * READBACK is issued */
1070 	if (cmd == FFUNC_READBACK){
1071 		struct arcan_shmif_cont* c = &outptr->con;
1072 
1073 /* readback buffer is always packed as it comes from a PBO (pixel packing op)
1074  * and both vidp and buf will be forced to shmif_pixel == av_pixel */
1075 		for (size_t y = 0; y < c->h; y++){
1076 			memcpy(&outptr->con.vidp[y * c->pitch], &buf[y * width], c->stride);
1077 		}
1078 
1079 /* any audio is transfered as part of (unfortunate) patches to openAL pending
1080  * a better audio layer separation */
1081 		arcan_shmif_signal(&outptr->con, SHMIF_SIGVID | SHMIF_SIGBLK_NONE);
1082 	}
1083 
1084 /* special cases, how do we mark ourselves as invisible for popup,
1085  * or set our position relative to parent? */
1086 	return FRV_NOFRAME;
1087 }
1088 
1089 /*
1090  * Generate a subsegment request on the primary segment, bind and
1091  * add that as the feed-recipient to the recordtarget defined in [rtgt].
1092  * will fail immediately if we are out of free subsegments.
1093  *
1094  * To avoid callbacks or additional multiplex- copies, expect the
1095  * lwa_subseg_eventdrain(uintptr_t tag, arcan_event*) function to
1096  * exist.
1097  */
platform_lwa_allocbind_feed(struct arcan_luactx * ctx,arcan_vobj_id rtgt,enum ARCAN_SEGID type,uintptr_t cbtag)1098 bool platform_lwa_allocbind_feed(struct arcan_luactx* ctx,
1099 	arcan_vobj_id rtgt, enum ARCAN_SEGID type, uintptr_t cbtag)
1100 {
1101 	arcan_vobject* vobj = arcan_video_getobject(rtgt);
1102 	if (!vobj || !vobj->vstore)
1103 		return false;
1104 
1105 /* limit to 8 possible subsegments / 'display' */
1106 	if (disp[0].subseg_alloc == 255)
1107 		return false;
1108 
1109 	int ind = ffs(~disp[0].subseg_alloc)-1;
1110 	disp[0].subseg_alloc |= 1 << ind;
1111 
1112 	struct subseg_output* out = &disp[0].sub[ind];
1113 	*out = (struct subseg_output){
1114 		.id = 0xcafe + ind,
1115 		.ctx = ctx,
1116 		.cbtag = cbtag,
1117 		.vid = vobj->cellid
1118 	};
1119 
1120 	arcan_shmif_enqueue(&disp[0].conn, &(struct arcan_event){
1121 		.category = EVENT_EXTERNAL,
1122 		.ext.kind = ARCAN_EVENT(SEGREQ),
1123 		.ext.segreq.width = vobj->vstore->w,
1124 		.ext.segreq.height = vobj->vstore->h,
1125 		.ext.segreq.kind = type,
1126 		.ext.segreq.id = out->id
1127 	});
1128 
1129 	arcan_video_alterfeed(rtgt,
1130 		FFUNC_LWA, (vfunc_state){.tag = cbtag, .ptr = out});
1131 	return true;
1132 }
1133 
platform_lwa_targetevent(struct subseg_output * tgt,arcan_event * ev)1134 bool platform_lwa_targetevent(struct subseg_output* tgt, arcan_event* ev)
1135 {
1136 /* selectively convert certain events, like target_displayhint to indicate
1137  * visibility - opted for this kind of contextual reuse rather than more
1138  * functions to track */
1139 	if (!tgt){
1140 		arcan_shmif_enqueue(&disp[0].conn, ev);
1141 		return true;
1142 	}
1143 
1144 	return arcan_shmif_enqueue(&tgt->con, ev);
1145 }
1146 
scan_subseg(arcan_tgtevent * ev,bool ok)1147 static bool scan_subseg(arcan_tgtevent* ev, bool ok)
1148 {
1149 /* 0 : fd,
1150  * 1 : direction,
1151  * 2 : type,
1152  * 3 : cookie
1153  * ... */
1154 
1155 	int ind = -1;
1156 	if (ev->ioevs[1].iv != 0)
1157 		return false;
1158 
1159 	for (size_t i = 0; i < 8; i++){
1160 		if (disp[0].sub[i].id == ev->ioevs[3].iv){
1161 			ind = i;
1162 			break;
1163 		}
1164 	}
1165 	if (-1 == ind)
1166 		return false;
1167 
1168 	struct subseg_output* out = &disp[0].sub[ind];
1169 
1170 /* if !ok, mark as free, send EXIT event so the scripting side can terminate,
1171  * we won't release the bit until TERMINATE comes in the FFUNC on the vobj
1172  * itself */
1173 	if (!ok)
1174 		goto send_error;
1175 
1176 	out->con = arcan_shmif_acquire(&disp[0].conn, NULL, out->id, 0);
1177 	if (out->con.vidp)
1178 		return true;
1179 
1180 send_error:
1181 	arcan_lwa_subseg_ev(out->ctx, out->vid, out->cbtag, &(struct arcan_event){
1182 		.category = EVENT_TARGET,
1183 		.tgt.kind = TARGET_COMMAND_EXIT,
1184 		.tgt.message = "rejected"
1185 	});
1186 	return false;
1187 }
1188 
1189 /*
1190  * return true if the segment has expired
1191  */
event_process_disp(arcan_evctx * ctx,struct display * d)1192 static bool event_process_disp(arcan_evctx* ctx, struct display* d)
1193 {
1194 	if (!d->conn.addr)
1195 		return true;
1196 
1197 	arcan_event ev;
1198 
1199 	while (1 == arcan_shmif_poll(&d->conn, &ev))
1200 		if (ev.category == EVENT_TARGET)
1201 		switch(ev.tgt.kind){
1202 
1203 /*
1204  * We use subsegments forced from the parent- side as an analog for hotplug
1205  * displays, giving developers a testbed for a rather hard feature and at the
1206  * same time get to evaluate the API. This is not ideal as the _adopt handler
1207  * is more apt at testing that the script code can handle an unannounced
1208  * lwa_segment coming in.
1209  *
1210  * Similary, if an OUTPUT segment comes in such a way, that would be better
1211  * treated as an adopt on a media source. More things to reconsider in this
1212  * interface come ~0.7
1213  *
1214  * For subsegment IDs that match a pending request, with special treatment for
1215  * the DND/PASTE cases.
1216 */
1217 		case TARGET_COMMAND_NEWSEGMENT:
1218 			if (d == &disp[0]){
1219 				if (!scan_subseg(&ev.tgt, true))
1220 					map_window(&d->conn, ctx, ev.tgt.ioevs[2].iv, ev.tgt.message);
1221 			}
1222 		break;
1223 
1224 		case TARGET_COMMAND_REQFAIL:
1225 			scan_subseg(&ev.tgt, false);
1226 		break;
1227 
1228 /*
1229  * Depends on active synchronization strategy, could also be used with a
1230  * 'every tick' timer to synch clockrate to server or have a single-frame
1231  * stepping mode. This ought to be used with the ability to set RT clocking
1232  * mode
1233  */
1234 		case TARGET_COMMAND_STEPFRAME:
1235 			TRACE_MARK_ONESHOT("video", "signal-stepframe", TRACE_SYS_DEFAULT, d->id, 0, "");
1236 			arcan_conductor_deadline(0);
1237 			d->pending = false;
1238 		break;
1239 
1240 /*
1241  * We can't automatically resize as the layouting in the running appl may not
1242  * be able to handle relayouting in an event-driven manner, so we translate and
1243  * forward as a monitor event.
1244  */
1245 		case TARGET_COMMAND_DISPLAYHINT:{
1246 			bool update = false;
1247 			size_t w = d->conn.w;
1248 			size_t h = d->conn.h;
1249 
1250 			if (ev.tgt.ioevs[0].iv && ev.tgt.ioevs[1].iv){
1251 				update |= ev.tgt.ioevs[0].iv != d->conn.w;
1252 				update |= ev.tgt.ioevs[1].iv != d->conn.h;
1253 				w = ev.tgt.ioevs[0].iv;
1254 				h = ev.tgt.ioevs[1].iv;
1255 			}
1256 
1257 /*
1258  * These properties are >currently< not forwarded - as the idea of mapping
1259  * windows as 'displays' is problematic with 'visibility and focus' not
1260  * making direct sense as such.
1261  *
1262  * This should probably be forwarded as the special _LWA events so that
1263  * any focus state or mouse cursor state can be updated accordingly.
1264  * The best option is 'probably' to use arcan_lwa_subseg_ev and some
1265  * _arcan event entry-point for the primary display.
1266  *
1267  * Currently the flags are forwarded raw so the reset event handler can
1268  * take them into account, but it is not pretty.
1269  */
1270 			if (!(ev.tgt.ioevs[2].iv & 128)){
1271 				d->visible = !((ev.tgt.ioevs[2].iv & 2) > 0);
1272 				d->focused = !((ev.tgt.ioevs[2].iv & 4) > 0);
1273 			}
1274 
1275 			if (ev.tgt.ioevs[4].fv > 0 && ev.tgt.ioevs[4].fv != d->ppcm){
1276 				update = true;
1277 				d->ppcm = ev.tgt.ioevs[4].fv;
1278 			}
1279 
1280 			if (update){
1281 				arcan_event_denqueue(ctx, &(arcan_event){
1282 					.category = EVENT_VIDEO,
1283 					.vid.kind = EVENT_VIDEO_DISPLAY_RESET,
1284 					.vid.source = -1,
1285 					.vid.displayid = d->id,
1286 					.vid.width = w,
1287 					.vid.height = h,
1288 					.vid.flags = ev.tgt.ioevs[2].iv,
1289 					.vid.vppcm = d->ppcm
1290 				});
1291 			}
1292 		}
1293 		break;
1294 /*
1295  * This behavior may be a bit strong, but we allow the display server
1296  * to override the default font (if provided)
1297  */
1298 		case TARGET_COMMAND_FONTHINT:{
1299 			int newfd = BADFD;
1300 			int font_sz = 0;
1301 			int hint = ev.tgt.ioevs[3].iv;
1302 
1303 			if (ev.tgt.ioevs[1].iv == 1 && BADFD != ev.tgt.ioevs[0].iv){
1304 				newfd = dup(ev.tgt.ioevs[0].iv);
1305 			};
1306 
1307 			if (ev.tgt.ioevs[2].fv > 0)
1308 				font_sz = ceilf(d->ppcm * ev.tgt.ioevs[2].fv);
1309 
1310 			arcan_video_defaultfont("arcan-default",
1311 				newfd, font_sz, hint, ev.tgt.ioevs[4].iv);
1312 
1313 			arcan_event_enqueue(ctx, &(arcan_event){
1314 				.category = EVENT_VIDEO,
1315 				.vid.kind = EVENT_VIDEO_DISPLAY_RESET,
1316 				.vid.source = -2,
1317 				.vid.displayid = d->id,
1318 				.vid.vppcm = ev.tgt.ioevs[2].fv,
1319 
1320 			});
1321 		}
1322 		break;
1323 
1324 		case TARGET_COMMAND_RESET:
1325 			if (ev.tgt.ioevs[0].iv == 0)
1326 				longjmp(arcanmain_recover_state, ARCAN_LUA_SWITCH_APPL);
1327 			else if (ev.tgt.ioevs[0].iv == 1)
1328 				longjmp(arcanmain_recover_state, ARCAN_LUA_SWITCH_APPL_NOADOPT);
1329 			else {
1330 /* We are in migrate state, so force-mark frames as dirty */
1331 				disp[0].decay = 4;
1332 			}
1333 		break;
1334 
1335 /*
1336  * The nodes have already been unlinked, so all cleanup
1337  * can be made when the process dies.
1338  */
1339 		case TARGET_COMMAND_EXIT:
1340 			if (d == &disp[0]){
1341 				ev.category = EVENT_SYSTEM;
1342 				ev.sys.kind = EVENT_SYSTEM_EXIT;
1343 				d->conn.vidp = NULL;
1344 				arcan_event_enqueue(ctx, &ev);
1345 			}
1346 /* Need to explicitly drop single segment */
1347 			else {
1348 				arcan_event ev = {
1349 					.category = EVENT_VIDEO,
1350 					.vid.kind = EVENT_VIDEO_DISPLAY_REMOVED,
1351 					.vid.displayid = d->id
1352 				};
1353 				arcan_event_enqueue(ctx, &ev);
1354 				free(d->conn.user);
1355 				arcan_shmif_drop(&d->conn);
1356 				if (d->vstore){
1357 					arcan_vint_drop_vstore(d->vstore);
1358 					d->vstore = NULL;
1359 				}
1360 
1361 				memset(d, '\0', sizeof(struct display));
1362 			}
1363 			return true; /* it's not safe here */
1364 		break;
1365 
1366 		default:
1367 		break;
1368 		}
1369 		else
1370 			arcan_event_enqueue(ctx, &ev);
1371 
1372 	return false;
1373 }
1374 
platform_event_keyrepeat(arcan_evctx * ctx,int * period,int * delay)1375 void platform_event_keyrepeat(arcan_evctx* ctx, int* period, int* delay)
1376 {
1377 	*period = 0;
1378 	*delay = 0;
1379 /* in principle, we could use the tick, implied in _process,
1380  * track the latest input event that corresponded to a translated
1381  * keyboard device (track per devid) and emit that every oh so often */
1382 }
1383 
platform_event_process(arcan_evctx * ctx)1384 void platform_event_process(arcan_evctx* ctx)
1385 {
1386 	bool locked = primary_udata.signal_pending;
1387 	primary_udata.signal_pending = false;
1388 
1389 /*
1390  * Most events can just be added to the local queue, but we want to handle some
1391  * of the target commands separately (with a special path to LUA and a
1392  * different hook)
1393  */
1394 	for (size_t i = 0; i < MAX_DISPLAYS; i++){
1395 		event_process_disp(ctx, &disp[i]);
1396 		primary_udata.signal_pending |= disp[i].pending;
1397 	}
1398 
1399 	int subs = disp[0].subseg_alloc;
1400 	int bits;
1401 
1402 /*
1403  * Only first 'display' can have subsegments, sweep any of those if they are
1404  * allocated, this could again be done with monitoring threads on the futex
1405  * with better synch strategies. Their outputs act as recordtargets with
1406  * swapchains, not as proper 'displays'
1407  */
1408 	while ((bits = ffs(subs))){
1409 		struct subseg_output* out = &disp[0].sub[bits-1];
1410 		subs = subs & ~(1 << (bits-1));
1411 
1412 		struct arcan_event inev;
1413 		while (arcan_shmif_poll(&out->con, &inev) > 0){
1414 			arcan_lwa_subseg_ev(out->ctx, out->vid, out->cbtag, &inev);
1415 		}
1416 	}
1417 }
1418 
platform_event_rescan_idev(arcan_evctx * ctx)1419 void platform_event_rescan_idev(arcan_evctx* ctx)
1420 {
1421 }
1422 
platform_event_capabilities(const char ** out)1423 enum PLATFORM_EVENT_CAPABILITIES platform_event_capabilities(const char** out)
1424 {
1425 	if (out)
1426 		*out = "lwa";
1427 
1428 	return ACAP_TRANSLATED | ACAP_MOUSE | ACAP_TOUCH |
1429 		ACAP_POSITION | ACAP_ORIENTATION;
1430 }
1431 
platform_key_repeat(arcan_evctx * ctx,unsigned int rate)1432 void platform_key_repeat(arcan_evctx* ctx, unsigned int rate)
1433 {
1434 }
1435 
platform_event_deinit(arcan_evctx * ctx)1436 void platform_event_deinit(arcan_evctx* ctx)
1437 {
1438 	TRACE_MARK_ONESHOT("event", "event-platform-deinit", TRACE_SYS_DEFAULT, 0, 0, "lwa");
1439 }
1440 
platform_video_recovery()1441 void platform_video_recovery()
1442 {
1443 	arcan_event ev = {
1444 		.category = EVENT_VIDEO,
1445 		.vid.kind = EVENT_VIDEO_DISPLAY_ADDED
1446 	};
1447 
1448 	TRACE_MARK_ONESHOT("video", "video-platform-recovery", TRACE_SYS_DEFAULT, 0, 0, "lwa");
1449 	arcan_evctx* evctx = arcan_event_defaultctx();
1450 	arcan_event_enqueue(evctx, &ev);
1451 
1452 	for (size_t i = 0; i < MAX_DISPLAYS; i++){
1453 		disp[i].vstore = arcan_vint_world();
1454 		ev.vid.source = -1;
1455 		ev.vid.displayid = i;
1456 		arcan_event_enqueue(evctx, &ev);
1457 	}
1458 }
1459 
platform_event_reset(arcan_evctx * ctx)1460 void platform_event_reset(arcan_evctx* ctx)
1461 {
1462 	TRACE_MARK_ONESHOT("event", "event-platform-reset", TRACE_SYS_DEFAULT, 0, 0, "lwa");
1463 	platform_event_deinit(ctx);
1464 }
1465 
platform_device_lock(int devind,bool state)1466 void platform_device_lock(int devind, bool state)
1467 {
1468 }
1469 
platform_event_init(arcan_evctx * ctx)1470 void platform_event_init(arcan_evctx* ctx)
1471 {
1472 	TRACE_MARK_ONESHOT("event", "event-platform-init", TRACE_SYS_DEFAULT, 0, 0, "lwa");
1473 }
1474