1 /*
2  * Copyright: Björn Ståhl
3  * License: 3-Clause BSD, see COPYING file in arcan source repository.
4  * Reference: http://arcan-fe.com
5  * Description: The event-queue interface has gone through a lot of hacks
6  * over the years and some of the decisions made makes little to no sense
7  * anymore. It is primarily used as an ordering and queueing mechanism to
8  * avoid a ton of callbacks when mapping between Script<->Core engine but
9  * also for synchronizing transfers over the shared memory interface.
10  */
11 
12 #include <stdlib.h>
13 #include <unistd.h>
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdint.h>
17 #include <stdbool.h>
18 #include <string.h>
19 #include <fcntl.h>
20 #include <sys/types.h>
21 #include <poll.h>
22 #include <errno.h>
23 #include <math.h>
24 #include <assert.h>
25 #include <signal.h>
26 
27 /*
28  * fixed limit of allowed events in queue before we need to do something more
29  * aggressive (flush queue to script, blacklist noisy sources, rate- limit
30  * frameservers)
31  */
32 #ifndef ARCAN_EVENT_QUEUE_LIM
33 #define ARCAN_EVENT_QUEUE_LIM 255
34 #endif
35 
36 #include "arcan_math.h"
37 #include "arcan_general.h"
38 #include "arcan_video.h"
39 #include "arcan_audio.h"
40 #include "arcan_db.h"
41 #include "arcan_shmif.h"
42 #include "arcan_event.h"
43 #include "arcan_led.h"
44 
45 #include "arcan_frameserver.h"
46 
47 typedef struct queue_cell queue_cell;
48 
49 static arcan_event eventbuf[ARCAN_EVENT_QUEUE_LIM];
50 
51 static uint8_t eventfront = 0, eventback = 0;
52 static int64_t epoch;
53 
54 /* basic context is just mapped on the static buffer, the reason for this
55  * construct is to share code with the event ring buffers in shmif */
56 static struct arcan_evctx default_evctx = {
57 	.eventbuf = eventbuf,
58 	.eventbuf_sz = ARCAN_EVENT_QUEUE_LIM,
59 	.front = &eventfront,
60 	.back = &eventback,
61 	.local = true
62 };
63 
64 #ifndef FORCE_SYNCH
65 	#define FORCE_SYNCH() {\
66 		asm volatile("": : :"memory");\
67 		__sync_synchronize();\
68 	}
69 #endif
70 
71 /* set through environment variable to ensure we can shut down
72  * cleanly based on a certain keybinding */
73 static int panic_keysym = -1, panic_keymod = -1;
74 
75 /* fixed size 64 entries bitmap for dynamic event source tracking */
76 struct evsrc_meta {
77 	intptr_t tag;
78 	mode_t mode;
79 };
80 
81 static struct pollfd evsrc_pollset[64];
82 static struct evsrc_meta evsrc_meta[64];
83 static uint64_t evsrc_bitmap;
84 
arcan_event_defaultctx()85 arcan_evctx* arcan_event_defaultctx(){
86 	return &default_evctx;
87 }
88 
89 /*
90  * If the shmpage integrity is somehow compromised,
91  * if semaphore use is out of order etc.
92  */
pull_killswitch(arcan_evctx * ctx)93 static void pull_killswitch(arcan_evctx* ctx)
94 {
95 	arcan_frameserver* ks = (arcan_frameserver*) ctx->synch.killswitch;
96 	arcan_sem_post(ctx->synch.handle);
97 	arcan_warning("inconsistency while processing "
98 		"shmpage events, pulling killswitch.\n");
99 	arcan_frameserver_free(ks);
100 	ctx->synch.killswitch = NULL;
101 }
102 
queue_full(arcan_evctx * ctx)103 static bool queue_full(arcan_evctx* ctx)
104 {
105 	 return (((*ctx->back + 1) % ctx->eventbuf_sz) == *ctx->front);
106 }
107 
queue_empty(arcan_evctx * ctx)108 static bool queue_empty(arcan_evctx* ctx)
109 {
110 	return (*ctx->front == *ctx->back);
111 }
112 
arcan_event_poll(arcan_evctx * ctx,struct arcan_event * dst)113 int arcan_event_poll(arcan_evctx* ctx, struct arcan_event* dst)
114 {
115 	assert(dst);
116 	if (queue_empty(ctx))
117 		return 0;
118 
119 /* overflow in external connection? pull killswitch that will hopefully
120  * wake the guard thread that will try to safely shut down */
121 	if (ctx->local == false){
122 		FORCE_SYNCH();
123 		if ( *(ctx->front) > PP_QUEUE_SZ ){
124 			pull_killswitch(ctx);
125 			return 0;
126 		}
127 		else {
128 			*dst = ctx->eventbuf[ *(ctx->front) ];
129 			memset(&ctx->eventbuf[ *(ctx->front) ], 0xff, sizeof(struct arcan_event));
130 			*(ctx->front) = (*(ctx->front) + 1) % PP_QUEUE_SZ;
131 		}
132 	}
133 	else {
134 			*dst = ctx->eventbuf[ *(ctx->front) ];
135 			*(ctx->front) = (*(ctx->front) + 1) % ctx->eventbuf_sz;
136 	}
137 
138 	return 1;
139 }
140 
arcan_event_repl(struct arcan_evctx * ctx,enum ARCAN_EVENT_CATEGORY cat,size_t r_ofs,size_t r_b,void * cmpbuf,size_t w_ofs,size_t w_b,void * w_buf)141 void arcan_event_repl(struct arcan_evctx* ctx, enum ARCAN_EVENT_CATEGORY cat,
142 	size_t r_ofs, size_t r_b, void* cmpbuf, size_t w_ofs, size_t w_b, void* w_buf)
143 {
144 	if (!ctx->local)
145 		return;
146 
147 	unsigned front = *ctx->front;
148 
149 	while (front != *ctx->back){
150 		if (ctx->eventbuf[front].category == cat &&
151 			memcmp( (char*)(&ctx->eventbuf[front]) + r_ofs, cmpbuf, r_b) == 0){
152 				memcpy( (char*)(&ctx->eventbuf[front]) + w_ofs, w_buf, w_b );
153 		}
154 
155 		front = (front + 1) % ctx->eventbuf_sz;
156 	}
157 
158 }
159 
arcan_event_maskall(arcan_evctx * ctx)160 void arcan_event_maskall(arcan_evctx* ctx)
161 {
162 	ctx->mask_cat_inp = 0xffffffff;
163 }
164 
arcan_event_clearmask(arcan_evctx * ctx)165 void arcan_event_clearmask(arcan_evctx* ctx)
166 {
167 	ctx->mask_cat_inp = 0;
168 }
169 
arcan_event_setmask(arcan_evctx * ctx,uint32_t mask)170 void arcan_event_setmask(arcan_evctx* ctx, uint32_t mask)
171 {
172 	ctx->mask_cat_inp = mask;
173 }
174 
arcan_event_denqueue(arcan_evctx * ctx,const struct arcan_event * const src)175 int arcan_event_denqueue(arcan_evctx* ctx, const struct arcan_event* const src)
176 {
177 	if (ctx->drain){
178 		arcan_event ev = *src;
179 		if (ctx->drain(&ev, 1))
180 			return ARCAN_OK;
181 	}
182 
183 	return arcan_event_enqueue(ctx, src);
184 }
185 
186 /*
187  * enqueue to current context considering input-masking, unless label is set,
188  * assign one based on what kind of event it is This function has a similar
189  * prototype to the enqueue defined in the interop.h, but a different
190  * implementation to support waking up the child, and that blocking behaviors
191  * in the main thread is always forbidden.
192  */
arcan_event_enqueue(arcan_evctx * ctx,const struct arcan_event * const src)193 int arcan_event_enqueue(arcan_evctx* ctx, const struct arcan_event* const src)
194 {
195 /* early-out mask-filter, these are only ever used to silently
196  * discard input / output (only operate on head and tail of ringbuffer) */
197 	if (!src || (src->category & ctx->mask_cat_inp)
198 		|| (ctx->state_fl & EVSTATE_DEAD) > 0)
199 		return ARCAN_OK;
200 
201 /* One big caveat with this approach is the possibility of feedback loop with
202  * magnification - forcing us to break ordering by directly feeding drain.
203  * Given that we have special treatment for _EXPIRE and similar calls,
204  * there shouldn't be any functions that has this behavior. Still, broken
205  * ordering is better than running out of space. */
206 	 if (queue_full(ctx)){
207 		if (ctx->drain){
208 			TRACE_MARK_ONESHOT("event", "queue-drain", TRACE_SYS_SLOW, 0, 0, "drain");
209 
210 /* very rare / impossible, but safe-guard against future bad code where the
211  * force-feeding below would trigger new events that would bring us back */
212 			if ((ctx->state_fl & EVSTATE_IN_DRAIN) > 0){
213 				arcan_event ev = *src;
214 				if (ctx->drain(&ev, 1))
215 					return ARCAN_OK;
216 				return ARCAN_ERRC_OUT_OF_SPACE;
217 			}
218 /* tradeoff, can cascade to embarassing GC pause or video- stall but better
219  * than data corruption and unpredictable states -- this can theoretically
220  * have us return a broken 'custom' error code from some script */
221 			else {
222 				ctx->state_fl |= EVSTATE_IN_DRAIN;
223 					arcan_event_feed(ctx, ctx->drain, NULL);
224 				ctx->state_fl &= ~EVSTATE_IN_DRAIN;
225 			}
226 		}
227 		else {
228 			TRACE_MARK_ONESHOT("event", "queue-overflow", TRACE_SYS_WARN, 0, 0, "full");
229 			return ARCAN_ERRC_OUT_OF_SPACE;
230 		}
231 	}
232 
233 /* this is problematic to keep here - either it should be part of the watchdog
234  * process when / if we can process input there (and then it will not follow
235  * keymap) or at least move to the platform stage so the event doesn't get
236  * queued as input at all but rather sent to the watchdog and have it clean up
237  * correctly */
238 	if (panic_keysym != -1 && panic_keymod != -1 &&
239 		src->category == EVENT_IO && src->io.kind == EVENT_IO_BUTTON &&
240 		src->io.devkind == EVENT_IDEVKIND_KEYBOARD &&
241 		src->io.input.translated.modifiers == panic_keymod &&
242 		src->io.input.translated.keysym == panic_keysym
243 	){
244 		arcan_event ev = {
245 			.category = EVENT_SYSTEM,
246 			.sys.kind = EVENT_SYSTEM_EXIT,
247 			.sys.errcode = EXIT_SUCCESS
248 		};
249 
250 		TRACE_MARK_ONESHOT("event", "shutdown", TRACE_SYS_WARN, 0, 0, "panic key");
251 		return arcan_event_enqueue(ctx, &ev);
252 	}
253 
254 	ctx->eventbuf[(*ctx->back) % ctx->eventbuf_sz] = *src;
255 	*ctx->back = (*ctx->back + 1) % ctx->eventbuf_sz;
256 
257 	return ARCAN_OK;
258 }
259 
queue_used(arcan_evctx * dq)260 static inline int queue_used(arcan_evctx* dq)
261 {
262 	int rv = *(dq->front) > *(dq->back) ? dq->eventbuf_sz -
263 	*(dq->front) + *(dq->back) : *(dq->back) - *(dq->front);
264 	return rv;
265 }
266 
append_bufferstream(struct arcan_frameserver * tgt,arcan_extevent * ev)267 static bool append_bufferstream(struct arcan_frameserver* tgt, arcan_extevent* ev)
268 {
269 /* this assumes a certain ordering around fetching the handle and it being
270  * available on the descriptor, there are no obvious hard guarantees on this
271  * from the kernel, though the client libs do send descriptor before adding the
272  * event and compiler can't safely re-order around it */
273 	int fd = arcan_fetchhandle(tgt->dpipe, false);
274 	if (-1 == fd){
275 		arcan_warning("fetchhandle-bstream mismatch\n");
276 		goto fail;
277 	}
278 
279 /* if the client lies about the plane count - we arrive here: */
280 	if (tgt->vstream.incoming_used == 4){
281 		goto fail;
282 	}
283 
284 	size_t i = tgt->vstream.incoming_used;
285 	tgt->vstream.incoming[i].fd = fd;
286 	tgt->vstream.incoming[i].gbm.stride = ev->bstream.stride;
287 	tgt->vstream.incoming[i].gbm.offset = ev->bstream.offset;
288 	tgt->vstream.incoming[i].gbm.mod_hi = ev->bstream.mod_hi;
289 	tgt->vstream.incoming[i].gbm.mod_lo = ev->bstream.mod_lo;
290 	tgt->vstream.incoming[i].gbm.format = ev->bstream.format;
291 	tgt->vstream.incoming[i].w = ev->bstream.width;
292 	tgt->vstream.incoming[i].h = ev->bstream.height;
293 	tgt->vstream.incoming_used++;
294 
295 /* flush incoming to pending, but if there is already something pending, take
296  * its place so that a newer frame gets precedence (mailbox mode) */
297 	if (!ev->bstream.left){
298 		arcan_frameserver_close_bufferqueues(tgt, false, true);
299 		size_t buf_sz = sizeof(struct agp_buffer_plane) * COUNT_OF(tgt->vstream.pending);
300 		memcpy(tgt->vstream.pending, tgt->vstream.incoming, buf_sz);
301 		tgt->vstream.pending_used = tgt->vstream.incoming_used;
302 		tgt->vstream.incoming_used = 0;
303 		memset(&tgt->vstream.incoming, '\0', buf_sz);
304 		return true;
305 	}
306 
307 	return false;
308 
309 fail:
310 	arcan_frameserver_close_bufferqueues(tgt, true, true);
311 	arcan_event_enqueue(&tgt->outqueue, &(struct arcan_event){
312 		.category = EVENT_TARGET,
313 		.tgt.kind = TARGET_COMMAND_BUFFER_FAIL
314 	});
315 	tgt->vstream.dead = true;
316 	return true;
317 }
318 
319 
arcan_event_queuetransfer(arcan_evctx * dstqueue,arcan_evctx * srcqueue,enum ARCAN_EVENT_CATEGORY allowed,float sat,struct arcan_frameserver * tgt)320 int arcan_event_queuetransfer(arcan_evctx* dstqueue, arcan_evctx* srcqueue,
321 	enum ARCAN_EVENT_CATEGORY allowed, float sat, struct arcan_frameserver* tgt)
322 {
323 	if (!srcqueue || !dstqueue || (srcqueue && !srcqueue->front)
324 		|| (srcqueue && !srcqueue->back))
325 		return 0;
326 
327 	bool wake = false;
328 	bool drain = false;
329 
330 /* If we set negative saturation, it means that it is permitted for this source
331  * to send directly to drain (though mask and filters still apply). While this
332  * should not be needed for the vast majority of clients, the exception is
333  * external input device drivers and bridges e.g. X11 where a client is
334  * expected to break the ev.in / ev.out rate asymetry. */
335 	if (sat < 0.0){
336 		drain = true;
337 		sat = 1.0;
338 /* see the comments further below where the drain variable is used */
339 	}
340 
341 	size_t cap = floor((float)dstqueue->eventbuf_sz * sat);
342 
343 	while (!queue_empty(srcqueue) && queue_used(dstqueue) < cap){
344 		arcan_event inev;
345 		if (arcan_event_poll(srcqueue, &inev) == 0)
346 			break;
347 
348 /* Ioevents have special behavior as the routed path (via frameserver callback
349  * or global event handler) can be decided here: if raw transfers have been
350  * permitted we don't change the category as those events can be pushed out of
351  * loop. Otherwise we go the slow defaultpath and forward the events through
352  * the normal callback event handler. */
353 		if (inev.category == EVENT_IO && tgt){
354 			if (inev.category & allowed)
355 				;
356 			else {
357 				inev = (struct arcan_event){
358 					.category = EVENT_FSRV,
359 					.fsrv.kind = EVENT_FSRV_IONESTED,
360 					.fsrv.otag = tgt->tag,
361 					.fsrv.video = tgt->vid,
362 					.fsrv.input = inev.io
363 				};
364 			}
365 		}
366 /* a custom mask to allow certain events to be passed through or not */
367 		else if ((inev.category & allowed) == 0 )
368 			continue;
369 
370 /*
371  * update / translate to make sure the corresponding frameserver<->lua mapping
372  * can be found and tracked, there are also a few events that should be handled
373  * here rather than propagated (bufferstream for instance).
374  */
375 		if (inev.category == EVENT_EXTERNAL && tgt){
376 			switch(inev.ext.kind){
377 
378 /* to protect against scripts that would happily try to just allocate/respond
379  * to what the event says, clamp this here */
380 				case EVENT_EXTERNAL_SEGREQ:
381 					if (inev.ext.segreq.width > PP_SHMPAGE_MAXW)
382 						inev.ext.segreq.width = PP_SHMPAGE_MAXW;
383 
384 					if (inev.ext.segreq.height > PP_SHMPAGE_MAXH)
385 						inev.ext.segreq.height = PP_SHMPAGE_MAXH;
386 				break;
387 
388 				case EVENT_EXTERNAL_BUFFERSTREAM:
389 /* this assumes that we are in non-blocking state and that a single CMSG on a
390  * socket is sufficient for a non-blocking recvmsg */
391 					wake = append_bufferstream(tgt, &inev.ext);
392 					continue;
393 				break;
394 
395 				case EVENT_EXTERNAL_PRIVDROP:
396 					tgt->flags.external |= inev.ext.privdrop.external;
397 					tgt->flags.networked = inev.ext.privdrop.networked;
398 					tgt->flags.sandboxed |= inev.ext.privdrop.sandboxed;
399 /* modify the event so that no illegal transitions are forwarded or applied */
400 					inev.ext.privdrop.external = tgt->flags.external;
401 					inev.ext.privdrop.networked = tgt->flags.networked;
402 					inev.ext.privdrop.sandboxed = tgt->flags.sandboxed;
403 				break;
404 
405 				case EVENT_EXTERNAL_INPUTMASK:
406 					tgt->devicemask = inev.ext.inputmask.device;
407 					tgt->datamask   = inev.ext.inputmask.types;
408 				break;
409 
410 /* for autoclocking, only one-fire events are forwarded if flag has been set */
411 				case EVENT_EXTERNAL_CLOCKREQ:
412 					if (tgt->flags.autoclock && !inev.ext.clock.once){
413 						tgt->clock.frame = inev.ext.clock.dynamic;
414 						tgt->clock.left = tgt->clock.start = inev.ext.clock.rate;
415 						wake = true;
416 						continue;
417 					}
418 				break;
419 
420 				case EVENT_EXTERNAL_REGISTER:
421 					if (tgt->segid == SEGID_UNKNOWN){
422 /* 0.6/CRYPTO - need actual signature authentication here */
423 						if (!inev.ext.registr.guid[0] && !inev.ext.registr.guid[1]){
424 							arcan_random((uint8_t*)tgt->guid, 16);
425 						}
426 						else {
427 							tgt->guid[0] = inev.ext.registr.guid[0];
428 							tgt->guid[1] = inev.ext.registr.guid[1];
429 						}
430 					}
431 					snprintf(tgt->title,
432 						COUNT_OF(tgt->title), "%s", inev.ext.registr.title);
433 				break;
434 /* note: one could manually enable EVENT_INPUT and use separate processes
435  * as input sources (with all the risks that comes with it security wise)
436  * if that ever becomes a concern, here would be a good place to consider
437  * filtering the panic_key* */
438 
439 /* client may need more fine grained control for audio transfers when it
440  * comes to synchronized A/V playback */
441 				case EVENT_EXTERNAL_FLUSHAUD:
442 					arcan_frameserver_flush(tgt);
443 					continue;
444 				break;
445 
446 				default:
447 				break;
448 			}
449 			inev.ext.source = tgt->vid;
450 		}
451 		else if (inev.category == EVENT_IO && tgt){
452 			inev.io.subid = tgt->vid;
453 		}
454 		wake = true;
455 
456 /* it might be slightly faster to drain multiple events here, but since the
457  * source might be mapped to an untrusted in-memory queue it would still take a
458  * full filter+copy path - so likely not worth pursuing.
459  *
460  * There is a complex and subtle danger here:
461  *  0.Recall we are being called from the TRAMP_GUARD
462  *    (against sigbus on the shared page).
463  *
464  *  1.In the drain function, we will jump into the callback handler in the
465  *    Lua VM, still with the TRAMP_GUARD set. If that callback handler then
466  *    queues an event into the frameserver, a new TRAMP_GUARD will be set
467  *    for that scope, but when finished it will CLEAR the existing
468  *    tramp-guard, causing the next event transfer to be performed without
469  *    a guard in place. In a sense this is similar to the recursive mutex
470  *    problem and atfork handling.
471  *
472  *  2.Another nasty possibility is that the frameserver structure might get
473  *    killed off as a response to a transferred event. That's really bad.
474  *
475  * The mitigations are:
476  *  1. Re-arm the guard based on counter changes, forward the failure.
477  *  2. Add a fuse to _free, if that one blows, forward the failure.
478  */
479 		if (drain && dstqueue->drain){
480 			tgt->fused = true;
481 			size_t last_stamp = platform_fsrv_clock();
482 
483 			if (dstqueue->drain(&inev, 1)){
484 				if (last_stamp != platform_fsrv_clock()){
485 					TRAMP_GUARD(-1, tgt);
486 				}
487 
488 				tgt->fused = false;
489 				if (tgt->fuse_blown){
490 					break;
491 				}
492 			}
493 			tgt->fused = false;
494 			continue;
495 		}
496 
497 		arcan_event_enqueue(dstqueue, &inev);
498 	}
499 
500 	if (wake)
501 		arcan_sem_post(srcqueue->synch.handle);
502 
503 	return tgt->fuse_blown ? -2 : 0;
504 }
505 
arcan_event_blacklist(const char * idstr)506 void arcan_event_blacklist(const char* idstr)
507 {
508 /* idstr comes from a trusted context, won't exceed stack size */
509 	char buf[strlen(idstr) + sizeof("bl_")];
510 	snprintf(buf, COUNT_OF(buf), "bl_%s", idstr);
511 	const char* appl;
512 	struct arcan_dbh* dbh = arcan_db_get_shared(&appl);
513 	arcan_db_appl_kv(dbh, appl, "bl_", "block");
514 }
515 
arcan_event_blacklisted(const char * idstr)516 bool arcan_event_blacklisted(const char* idstr)
517 {
518 /* idstr comes from a trusted context, won't exceed stack size */
519 	char buf[strlen(idstr) + sizeof("bl_")];
520 	snprintf(buf, COUNT_OF(buf), "bl_%s", idstr);
521 	const char* appl;
522 	struct arcan_dbh* dbh = arcan_db_get_shared(&appl);
523 	char* res = arcan_db_appl_val(dbh, appl, "bl_");
524 	bool rv = res && strcmp(res, "block") == 0;
525 	arcan_mem_free(res);
526 	return rv;
527 }
528 
arcan_frametime()529 int64_t arcan_frametime()
530 {
531 	int64_t now = arcan_timemillis();
532 	if (now < epoch)
533 		epoch = now - (epoch - now);
534 
535 	return now - epoch;
536 }
537 
arcan_event_process(arcan_evctx * ctx,arcan_tick_cb cb)538 float arcan_event_process(arcan_evctx* ctx, arcan_tick_cb cb)
539 {
540 	int64_t base = ctx->c_ticks * ARCAN_TIMER_TICK;
541 	int64_t delta = arcan_frametime() - base;
542 
543 	platform_event_process(ctx);
544 
545 	if (delta > ARCAN_TIMER_TICK){
546 		int nticks = delta / ARCAN_TIMER_TICK;
547 		if (nticks > ARCAN_TICK_THRESHOLD){
548 			epoch += (nticks - 1) * ARCAN_TIMER_TICK;
549 			nticks = 1;
550 		}
551 
552 		ctx->c_ticks += nticks;
553 		cb(nticks);
554 		arcan_bench_register_tick(nticks);
555 		return arcan_event_process(ctx, cb);
556 	}
557 
558 	return (float)delta / (float)ARCAN_TIMER_TICK;
559 }
560 
561 arcan_benchdata benchdata = {0};
562 
563 /*
564  * keep the time tracking separate from the other
565  * timekeeping parts, discard non-monotonic values
566  */
arcan_bench_register_tick(unsigned nticks)567 void arcan_bench_register_tick(unsigned nticks)
568 {
569 	static long long int lasttick = -1;
570 	if (benchdata.bench_enabled == false)
571 		return;
572 
573 	while (nticks--){
574 		long long int ftime = arcan_timemillis();
575 		benchdata.tickcount++;
576 
577 		if (lasttick > 0 && ftime > lasttick){
578 			unsigned delta = ftime - lasttick;
579 			benchdata.ticktime[(unsigned)benchdata.tickofs] = delta;
580 			benchdata.tickofs = (benchdata.tickofs + 1) %
581 				(sizeof(benchdata.ticktime) / sizeof(benchdata.ticktime[0]));
582 		}
583 
584 		lasttick = ftime;
585 	}
586 }
587 
arcan_event_purge()588 void arcan_event_purge()
589 {
590 	eventfront = 0;
591 	eventback = 0;
592 	platform_event_reset(&default_evctx);
593 }
594 
arcan_bench_data()595 arcan_benchdata* arcan_bench_data()
596 {
597 	return &benchdata;
598 }
599 
arcan_bench_register_cost(unsigned cost)600 void arcan_bench_register_cost(unsigned cost)
601 {
602 	benchdata.framecost[(unsigned)benchdata.costofs] = cost;
603 	if (benchdata.bench_enabled == false)
604 		return;
605 
606 	benchdata.costcount++;
607 	benchdata.costofs = (benchdata.costofs + 1) %
608 		(sizeof(benchdata.framecost) / sizeof(benchdata.framecost[0]));
609 }
610 
arcan_bench_register_frame()611 void arcan_bench_register_frame()
612 {
613 	static long long int lastframe = -1;
614 	if (benchdata.bench_enabled == false)
615 		return;
616 
617 	long long int ftime = arcan_timemillis();
618 	if (lastframe > 0 && ftime > lastframe){
619 		unsigned delta = ftime - lastframe;
620 		benchdata.frametime[(unsigned)benchdata.frameofs] = delta;
621 		benchdata.framecount++;
622 		benchdata.frameofs = (benchdata.frameofs + 1) %
623 			(sizeof(benchdata.frametime) / sizeof(benchdata.frametime[0]));
624 		}
625 
626 	lastframe = ftime;
627 }
628 
arcan_event_deinit(arcan_evctx * ctx,bool flush)629 void arcan_event_deinit(arcan_evctx* ctx, bool flush)
630 {
631 	platform_event_deinit(ctx);
632 
633 /* This separation is to avoid some edge cases like VT switching causing events
634  * to be dropped even when there are dependencies such as key-down to key-up */
635 	if (!flush)
636 		return;
637 
638 	eventfront = eventback = 0;
639 }
640 
641 #ifdef _DEBUG
arcan_event_dump(struct arcan_evctx * ctx)642 void arcan_event_dump(struct arcan_evctx* ctx)
643 {
644 	unsigned front = *ctx->front;
645 	size_t count = 0;
646 
647 	while (front != *ctx->back){
648 		arcan_warning("slot: %d, category: %d, kind: %d\n",
649 			count, ctx->eventbuf[front].io.kind, ctx->eventbuf[front].category);
650 		front = (front + 1) % ctx->eventbuf_sz;
651 	}
652 }
653 #endif
654 
655 #ifdef _CLOCK_FUZZ
656 /* jump back ~34 hours */
sig_rtfuzz_a(int v)657 static void sig_rtfuzz_a(int v)
658 {
659 	epoch -= 3600 * 24 * 1000;
660 }
661 /* jump forward ~24 hours */
sig_rtfuzz_b(int v)662 static void sig_rtfuzz_b(int v)
663 {
664 	epoch += 3600 * 24 * 1000;
665 }
666 #endif
667 
arcan_event_feed(struct arcan_evctx * ctx,arcan_event_handler hnd,int * exit_code)668 bool arcan_event_feed(struct arcan_evctx* ctx,
669 	arcan_event_handler hnd, int* exit_code)
670 {
671 /* dead, but we weren't able to deal with it last time */
672 	if ((ctx->state_fl & EVSTATE_DEAD)){
673 		if (exit_code)
674 			*exit_code = ctx->exit_code;
675 		return false;
676 	}
677 
678 	while (*ctx->front != *ctx->back){
679 /* slide, we forego _poll to cut down on one copy */
680 		arcan_event* ev = &ctx->eventbuf[ *(ctx->front) ];
681 		*(ctx->front) = (*(ctx->front) + 1) % ctx->eventbuf_sz;
682 
683 		switch (ev->category){
684 			case EVENT_VIDEO:
685 				if (ev->vid.kind == EVENT_VIDEO_EXPIRE)
686 					arcan_video_deleteobject(ev->vid.source);
687 				else
688 					hnd(ev, 0);
689 			break;
690 
691 			case EVENT_SYSTEM:
692 				if (ev->sys.kind == EVENT_SYSTEM_EXIT){
693 					ctx->state_fl |= EVSTATE_DEAD;
694 					ctx->exit_code = ev->sys.errcode;
695 					if (exit_code) *exit_code = ev->sys.errcode;
696 					break;
697 				}
698 			default:
699 				hnd(ev, 0);
700 			break;
701 		}
702 	}
703 
704 	if (ctx->state_fl & EVSTATE_DEAD)
705 		return arcan_event_feed(ctx, hnd, exit_code);
706 	else
707 		return true;
708 }
709 
arcan_event_add_source(struct arcan_evctx * ctx,int fd,mode_t mode,intptr_t otag)710 bool arcan_event_add_source(
711 	struct arcan_evctx* ctx, int fd, mode_t mode, intptr_t otag)
712 {
713 	int mask = 0;
714 	if (mode == O_RDWR)
715 		mode = POLLIN | POLLOUT;
716 	else if (mode == O_WRONLY)
717 		mode = POLLOUT;
718 	else if (mode == O_RDONLY)
719 		mode = POLLIN;
720 
721 /* just update mode/tag? */
722 	for (size_t i = 0; i < 64; i++)
723 		if (evsrc_pollset[i].fd == fd){
724 			evsrc_meta[i].mode = mode;
725 			evsrc_meta[i].tag = otag;
726 			return true;
727 		}
728 
729 /* allocate new */
730 	uint64_t i = __builtin_ffsll(~evsrc_bitmap);
731 	if (!i)
732 		return false;
733 
734 	i--;
735 	evsrc_pollset[i].fd = fd;
736 	evsrc_pollset[i].events = POLLERR | POLLHUP | mode;
737 
738 	evsrc_meta[i].mode = mode;
739 	evsrc_meta[i].tag = otag;
740 	evsrc_bitmap |= (uint64_t)1 << i;
741 
742 	return true;
743 }
744 
arcan_event_poll_sources(struct arcan_evctx * ctx,int timeout)745 void arcan_event_poll_sources(struct arcan_evctx* ctx, int timeout)
746 {
747 	ssize_t nelem = poll(evsrc_pollset, 64, timeout);
748 	if (nelem <= 0){
749 		if (timeout > 0)
750 			arcan_timesleep(timeout);
751 		return;
752 	}
753 
754 	for (size_t i = 0; i < 64; i++){
755 		struct pollfd* ent = &evsrc_pollset[i];
756 		if (ent->fd <= 0 || !ent->revents)
757 			continue;
758 
759 		struct arcan_event ev = (struct arcan_event){
760 			.category = EVENT_SYSTEM,
761 			.sys.data.fd = evsrc_pollset[i].fd,
762 			.sys.data.otag = evsrc_meta[i].tag
763 		};
764 
765 /* Note that we send IN/OUT even in the case of failure. This is to force the
766  * recipient to use normal error handling for read/write to react to a
767  * monitored source failing. */
768 		if (ent->revents & POLLIN ||
769 			((ent->revents & (POLLERR | POLLHUP)) && (ent->events & POLLIN))){
770 				ev.sys.kind = EVENT_SYSTEM_DATA_IN;
771 				arcan_event_denqueue(ctx, &ev);
772 			}
773 
774 /* This is subtle - the events here go direct to drain. That means that
775  * infinitely many calls to add_source and del_source can happen between these
776  * two, possibly changing the otag being used to map to VM objects. Removing
777  * the source is save though, as the pollset is cleared when the source is
778  * removed, and this condition won't fire an extraneous event. */
779 		if (ent->revents & POLLOUT ||
780 			((ent->revents & (POLLERR | POLLHUP)) && (ent->events & POLLOUT))){
781 			ev.sys.kind = EVENT_SYSTEM_DATA_OUT;
782 			arcan_event_denqueue(ctx, &ev);
783 		}
784 	}
785 }
786 
787 /* Remove a source previously added through add_source. Will return true if
788  * the source existed and set the last known otag in *out if provided. */
arcan_event_del_source(struct arcan_evctx * ctx,int fd,intptr_t * out)789 bool arcan_event_del_source(struct arcan_evctx* ctx, int fd, intptr_t* out)
790 {
791 	for (uint64_t i = 0; i < 64; i++){
792 		if (evsrc_pollset[i].fd == fd){
793 			evsrc_pollset[i].fd = -1;
794 			evsrc_bitmap &= ~((uint64_t)1 << i);
795 			if (out)
796 				*out = evsrc_meta[i].tag;
797 			evsrc_meta[i] = (struct evsrc_meta){0};
798 			evsrc_pollset[i] = (struct pollfd){0};
799 			return true;
800 		}
801 	}
802 
803 	return false;
804 }
805 
arcan_event_setdrain(arcan_evctx * ctx,arcan_event_handler drain)806 void arcan_event_setdrain(arcan_evctx* ctx, arcan_event_handler drain)
807 {
808 	if (!ctx->local)
809 		return;
810 	ctx->drain = drain;
811 }
812 
arcan_event_init(arcan_evctx * ctx)813 void arcan_event_init(arcan_evctx* ctx)
814 {
815 /*
816  * non-local (i.e. shmpage resident) event queues has a different
817  * init approach (see frameserver_shmpage.c)
818  */
819 	if (!ctx->local){
820 		return;
821 	}
822 
823 /*
824  * used for testing response to clock skew over time
825  */
826 #if defined(_DEBUG) && defined(_CLOCK_FUZZ)
827 	sigaction(SIGRTMIN+0, &(struct sigaction) {.sa_handler = sig_rtfuzz_a}, NULL);
828 	sigaction(SIGRTMIN+1, &(struct sigaction) {.sa_handler = sig_rtfuzz_b}, NULL);
829 #endif
830 
831 	const char* panicbutton = getenv("ARCAN_EVENT_SHUTDOWN");
832 	char* cp;
833 
834 	if (panicbutton){
835 		cp = strchr(panicbutton, ':');
836 		if (cp){
837 			*cp = '\0';
838 			panic_keysym = strtol(panicbutton, NULL, 10);
839 			panic_keymod = strtol(cp+1, NULL, 10);
840 			*cp = ':';
841 		}
842 		else
843 			arcan_warning("ARCAN_EVENT_SHUTDOWN=%s, malformed key "
844 				"expecting number:number (keysym:modifiers).\n", panicbutton);
845 	}
846 
847 	epoch = arcan_timemillis() - ctx->c_ticks * ARCAN_TIMER_TICK;
848 	platform_event_init(ctx);
849 }
850 
arcan_led_removed(int devid)851 void arcan_led_removed(int devid)
852 {
853 	arcan_event_enqueue(arcan_event_defaultctx(),
854 		&(struct arcan_event){
855 		.category = EVENT_IO,
856 		.io.kind = EVENT_IO_STATUS,
857 		.io.devkind = EVENT_IDEVKIND_STATUS,
858 		.io.devid = devid,
859 		.io.input.status.domain = 1,
860 		.io.input.status.devkind = EVENT_IDEVKIND_LEDCTRL,
861 		.io.input.status.action = EVENT_IDEV_REMOVED
862 	});
863 }
864 
arcan_led_added(int devid,int refdev,const char * label)865 void arcan_led_added(int devid, int refdev, const char* label)
866 {
867 	arcan_event ev = {
868 		.category = EVENT_IO,
869 		.io.kind = EVENT_IO_STATUS,
870 		.io.devkind = EVENT_IDEVKIND_STATUS,
871 		.io.devid = devid,
872 		.io.input.status.devref = refdev,
873 		.io.input.status.domain = 1,
874 		.io.input.status.devkind = EVENT_IDEVKIND_LEDCTRL,
875 		.io.input.status.action = EVENT_IDEV_ADDED
876 	};
877 	snprintf(ev.io.label, COUNT_OF(ev.io.label), "%s", label);
878 	arcan_event_enqueue(arcan_event_defaultctx(), &ev);
879 }
880 
881 extern void platform_device_lock(int lockdev, bool lockstate);
arcan_device_lock(int lockdev,bool lockstate)882 void arcan_device_lock(int lockdev, bool lockstate)
883 {
884 	platform_device_lock(lockdev, lockstate);
885 }
886