1 #include "arcan_shmif.h"
2 #include "arcan_shmif_server.h"
3 #include <errno.h>
4 #include <stdatomic.h>
5 #include <math.h>
6 
7 /*
8  * This is needed in order to re-use some of the platform layer functions that
9  * are rather heavy. This lib act as a replacement for the things that are in
10  * engine/arcan_frameserver.c though.
11  *
12  * For that reason, we need to define some types that will actually never
13  * really be used here, pending refactoring of the whole thing. In that refact.
14  * we should share all the code between the engine- side and the server lib -
15  * no reason for the two implementations.
16  */
17 typedef int shm_handle;
18 struct arcan_aobj;
19 #include "arcan_math.h"
20 #include "arcan_general.h"
21 #include "arcan_frameserver.h"
22 #include "platform/shmif_platform.h"
23 
24 /*
25  * temporary workaround, this symbol should really have its visiblity lowered.
26  */
platform_video_auth(int cardn,unsigned token)27 bool platform_video_auth(int cardn, unsigned token)
28 {
29 	return false;
30 }
31 
32 /*
33  * wrap the normal structure as we need to pass it to the platform frameserver
34  * functions, but may need to have some tracking of our own.
35  */
36 enum connstatus {
37 	DEAD = -1,
38 	BROKEN = 0,
39 	PENDING = 1,
40 	AUTHENTICATING = 2,
41 	READY = 3
42 };
43 
44 struct shmifsrv_client {
45 /* need a 'per client' eventqueue */
46 	struct arcan_frameserver* con;
47 	enum connstatus status;
48 	size_t errors;
49 	uint64_t cookie;
50 };
51 
alloc_client()52 static struct shmifsrv_client* alloc_client()
53 {
54 	struct shmifsrv_client* res = malloc(sizeof(struct shmifsrv_client));
55 	if (!res)
56 		return NULL;
57 
58 	*res = (struct shmifsrv_client){};
59 	res->status = BROKEN;
60 	res->cookie = arcan_shmif_cookie();
61 
62 	return res;
63 }
64 
shmifsrv_client_handle(struct shmifsrv_client * cl)65 int shmifsrv_client_handle(struct shmifsrv_client* cl)
66 {
67 	if (!cl || cl->status <= BROKEN)
68 		return -1;
69 
70 	return cl->con->dpipe;
71 }
72 
shmifsrv_client_type(struct shmifsrv_client * cl)73 enum ARCAN_SEGID shmifsrv_client_type(struct shmifsrv_client* cl)
74 {
75 	if (!cl || !cl->con)
76 		return SEGID_UNKNOWN;
77 	return cl->con->segid;
78 }
79 
80 struct shmifsrv_client*
shmifsrv_send_subsegment(struct shmifsrv_client * cl,int segid,int hints,size_t init_w,size_t init_h,int reqid,uint32_t idtok)81 	shmifsrv_send_subsegment(struct shmifsrv_client* cl,
82 	int segid, int hints, size_t init_w, size_t init_h, int reqid, uint32_t idtok)
83 {
84 	if (!cl || cl->status < READY)
85 		return NULL;
86 
87 	struct shmifsrv_client* res = alloc_client();
88 	if (!res)
89 		return NULL;
90 
91 	res->con = platform_fsrv_spawn_subsegment(
92 		cl->con, segid, hints, init_w, init_h, reqid, idtok);
93 	if (!res->con){
94 		free(res);
95 		return NULL;
96 	}
97 	res->cookie = arcan_shmif_cookie();
98 	res->status = READY;
99 
100 	return res;
101 }
102 
shmifsrv_allocate_connpoint(const char * name,const char * key,mode_t permission,int fd)103 struct shmifsrv_client* shmifsrv_allocate_connpoint(
104 	const char* name, const char* key, mode_t permission, int fd)
105 {
106 	shmifsrv_monotonic_tick(NULL);
107 	struct shmifsrv_client* res = alloc_client();
108 	if (!res)
109 		return NULL;
110 
111 	res->con =
112 		platform_fsrv_listen_external(name, key, fd, permission, 32, 32, 0);
113 
114 	if (!res->con){
115 		free(res);
116 		return NULL;
117 	}
118 
119 	res->cookie = arcan_shmif_cookie();
120 	res->status = PENDING;
121 
122 	if (key)
123 		strncpy(res->con->clientkey, key, PP_SHMPAGE_SHMKEYLIM-1);
124 
125 	return res;
126 }
127 
shmifsrv_inherit_connection(int sockin,int * statuscode)128 struct shmifsrv_client* shmifsrv_inherit_connection(int sockin, int* statuscode)
129 {
130 	if (-1 == sockin){
131 		if (statuscode)
132 			*statuscode = SHMIFSRV_INVALID_ARGUMENT;
133 		return NULL;
134 	}
135 
136 	struct shmifsrv_client* res = alloc_client();
137 	if (!res){
138 		close(sockin);
139 		if (statuscode)
140 			*statuscode = SHMIFSRV_OUT_OF_MEMORY;
141 		return NULL;
142 	}
143 
144 	res->con = platform_fsrv_preset_server(sockin, SEGID_UNKNOWN, 0, 0, 0);
145 
146 	if (statuscode)
147 		*statuscode = SHMIFSRV_OK;
148 
149 	res->cookie = arcan_shmif_cookie();
150 	res->status = AUTHENTICATING;
151 
152 	return res;
153 }
154 
shmifsrv_spawn_client(struct shmifsrv_envp env,int * clsocket,int * statuscode,uint32_t idtok)155 struct shmifsrv_client* shmifsrv_spawn_client(
156 	struct shmifsrv_envp env, int* clsocket, int* statuscode, uint32_t idtok)
157 {
158 	if (!clsocket){
159 		if (statuscode)
160 			*statuscode = SHMIFSRV_INVALID_ARGUMENT;
161 		return NULL;
162 	}
163 
164 	struct shmifsrv_client* res = alloc_client();
165 
166 	if (!res){
167 		if (statuscode)
168 			*statuscode = SHMIFSRV_OUT_OF_MEMORY;
169 		return NULL;
170 	}
171 
172 	int childend;
173 	res->con = platform_fsrv_spawn_server(
174 		SEGID_UNKNOWN, env.init_w, env.init_h, 0, &childend);
175 
176 	if (!res){
177 		if (statuscode)
178 			*statuscode = SHMIFSRV_OUT_OF_MEMORY;
179 		shmifsrv_free(res, SHMIFSRV_FREE_FULL);
180 		return NULL;
181 	}
182 
183 	*clsocket = res->con->dpipe;
184 	res->cookie = arcan_shmif_cookie();
185 	res->status = AUTHENTICATING;
186 
187 	if (statuscode)
188 		*statuscode = SHMIFSRV_OK;
189 
190 /* if path is provided we switch over to build/inherit mode */
191 	if (env.path){
192 		pid_t rpid = shmif_platform_execve(
193 			childend, res->con->shm.key,
194 			env.path, env.argv, env.envv, env.detach, NULL
195 		);
196 		close(childend);
197 
198 		if (-1 == rpid){
199 			if (statuscode)
200 				*statuscode = SHMIFSRV_EXEC_FAILED;
201 
202 			shmifsrv_free(res, SHMIFSRV_FREE_FULL);
203 			return NULL;
204 		}
205 
206 /* there is no API for returning / binding the pid here, the use for that seems
207  * rather fringe (possibly for kill like mechanics), if needed we should tie it
208  * to the context and add an accessor */
209 	}
210 
211 	return res;
212 }
213 
shmifsrv_dequeue_events(struct shmifsrv_client * cl,struct arcan_event * newev,size_t limit)214 size_t shmifsrv_dequeue_events(
215 	struct shmifsrv_client* cl, struct arcan_event* newev, size_t limit)
216 {
217 	if (!cl || cl->status < READY)
218 		return 0;
219 
220 	if (shmifsrv_enter(cl)){
221 		size_t count = 0;
222 		uint8_t front = cl->con->shm.ptr->parentevq.front;
223 		uint8_t back = cl->con->shm.ptr->parentevq.back;
224 		if (front > PP_QUEUE_SZ || back > PP_QUEUE_SZ){
225 			cl->errors++;
226 			shmifsrv_leave();
227 			return 0;
228 		}
229 
230 		while (count < limit && front != back){
231 			newev[count++] = cl->con->shm.ptr->parentevq.evqueue[front];
232 			front = (front + 1) % PP_QUEUE_SZ;
233 		}
234 		asm volatile("": : :"memory");
235 		__sync_synchronize();
236 		cl->con->shm.ptr->parentevq.front = front;
237 		arcan_sem_post(cl->con->esync);
238 		shmifsrv_leave();
239 		return count;
240 	}
241 	else{
242 		cl->errors++;
243 		return 0;
244 	}
245 }
246 
autoclock_frame(arcan_frameserver * tgt)247 static void autoclock_frame(arcan_frameserver* tgt)
248 {
249 	if (!tgt->clock.left)
250 		return;
251 
252 /*
253 	if (!tgt->clock.frametime)
254 		tgt->clock.frametime = arcan_frametime();
255 
256 	int64_t delta = arcan_frametime() - tgt->clock.frametime;
257 	if (delta < 0){
258 
259 	}
260 	else if (delta == 0)
261 		return;
262 
263 	if (tgt->clock.left <= delta){
264 		tgt->clock.left = tgt->clock.start;
265 		tgt->clock.frametime = arcan_frametime();
266 		arcan_event ev = {
267 			.category = EVENT_TARGET,
268 			.tgt.kind = TARGET_COMMAND_STEPFRAME,
269 			.tgt.ioevs[0].iv = delta / tgt->clock.start,
270 			.tgt.ioevs[1].iv = 1
271 		};
272 		platform_fsrv_pushevent(tgt, &ev);
273 	}
274 	else
275 		tgt->clock.left -= delta;
276 	*/
277 }
278 
shmifsrv_enqueue_event(struct shmifsrv_client * cl,struct arcan_event * ev,int fd)279 bool shmifsrv_enqueue_event(
280 	struct shmifsrv_client* cl, struct arcan_event* ev, int fd)
281 {
282 	if (!cl || cl->status < READY || !ev)
283 		return false;
284 
285 	if (fd != -1)
286 		return platform_fsrv_pushfd(cl->con, ev, fd) == ARCAN_OK;
287 	else
288 		return platform_fsrv_pushevent(cl->con, ev) == ARCAN_OK;
289 }
290 
shmifsrv_poll(struct shmifsrv_client * cl)291 int shmifsrv_poll(struct shmifsrv_client* cl)
292 {
293 	if (!cl || cl->status <= BROKEN){
294 		cl->status = BROKEN;
295 		return CLIENT_DEAD;
296 	}
297 
298 /* we go from PENDING -> BROKEN || AUTHENTICATING -> BROKEN || READY */
299 	switch (cl->status){
300 	case PENDING:{
301 		int sc = platform_fsrv_socketpoll(cl->con);
302 		if (-1 == sc){
303 			if (errno == EBADF){
304 				cl->status = BROKEN;
305 				return CLIENT_DEAD;
306 			}
307 			return CLIENT_NOT_READY;
308 		}
309 		cl->status = AUTHENTICATING;
310 	}
311 /* consumed one character at a time up to a fixed limit */
312 	case AUTHENTICATING:
313 		while (-1 == platform_fsrv_socketauth(cl->con)){
314 			if (errno == EBADF){
315 				cl->status = BROKEN;
316 				return CLIENT_DEAD;
317 			}
318 			else if (errno == EWOULDBLOCK){
319 				return CLIENT_NOT_READY;
320 			}
321 		}
322 		cl->status = READY;
323 	case READY:
324 /* check if resynch, else check if aready or vready */
325 		if (shmifsrv_enter(cl)){
326 			if (cl->con->shm.ptr->resized){
327 				if (-1 == platform_fsrv_resynch(cl->con)){
328 					cl->status = BROKEN;
329 					shmifsrv_leave();
330 					return CLIENT_DEAD;
331 				}
332 				return CLIENT_NOT_READY;
333 			}
334 			int a = !!(atomic_load(&cl->con->shm.ptr->aready));
335 			int v = !!(atomic_load(&cl->con->shm.ptr->vready));
336 			shmifsrv_leave();
337 			return
338 				(CLIENT_VBUFFER_READY * v) | (CLIENT_ABUFFER_READY * a);
339 		}
340 		else
341 			cl->status = BROKEN;
342 	break;
343 	default:
344 		return CLIENT_DEAD;
345 	}
346 	return CLIENT_NOT_READY;
347 }
348 
shmifsrv_free(struct shmifsrv_client * cl,int mode)349 void shmifsrv_free(struct shmifsrv_client* cl, int mode)
350 {
351 	if (!cl)
352 		return;
353 
354 	if (cl->status == PENDING)
355 		cl->con->dpipe = BADFD;
356 
357 	switch(mode){
358 	case SHMIFSRV_FREE_NO_DMS:
359 		cl->con->flags.no_dms_free = true;
360 	case SHMIFSRV_FREE_FULL:
361 		platform_fsrv_destroy(cl->con);
362 	break;
363 	case SHMIFSRV_FREE_LOCAL:
364 		platform_fsrv_destroy_local(cl->con);
365 	break;
366 	}
367 
368 	cl->status = DEAD;
369 	free(cl);
370 }
371 
shmifsrv_enter(struct shmifsrv_client * cl)372 bool shmifsrv_enter(struct shmifsrv_client* cl)
373 {
374 	jmp_buf tramp;
375 	if (0 != setjmp(tramp))
376 		return false;
377 
378 	platform_fsrv_enter(cl->con, tramp);
379 	return true;
380 }
381 
shmifsrv_leave()382 void shmifsrv_leave()
383 {
384 	platform_fsrv_leave();
385 }
386 
shmifsrv_client_protomask(struct shmifsrv_client * cl,unsigned mask)387 void shmifsrv_client_protomask(struct shmifsrv_client* cl, unsigned mask)
388 {
389 	if (!cl || !cl->con)
390 		return;
391 
392 	cl->con->metamask = mask;
393 }
394 
shmifsrv_video_step(struct shmifsrv_client * cl)395 void shmifsrv_video_step(struct shmifsrv_client* cl)
396 {
397 /* signal that we're done with the buffer */
398 	atomic_store_explicit(&cl->con->shm.ptr->vready, 0, memory_order_release);
399 	arcan_sem_post(cl->con->vsync);
400 
401 /* If the frameserver has indicated that it wants a frame callback every time
402  * we consume. This is primarily for cases where a client needs to I/O mplex
403  * and the semaphores doesn't provide that */
404 	if (cl->con->desc.hints & SHMIF_RHINT_VSIGNAL_EV){
405 		platform_fsrv_pushevent(cl->con, &(struct arcan_event){
406 			.category = EVENT_TARGET,
407 			.tgt.kind = TARGET_COMMAND_STEPFRAME,
408 			.tgt.ioevs[0].iv = 1
409 		});
410 	}
411 }
412 
413 /*
414  * The reference implementation for this is really in engine/arcan_frameserver
415  * with the vframe and push_buffer implementations in particular. Some of the
416  * changes is that we need to manage fewer states, like the rz_ack control.
417  */
shmifsrv_video(struct shmifsrv_client * cl)418 struct shmifsrv_vbuffer shmifsrv_video(struct shmifsrv_client* cl)
419 {
420 	struct shmifsrv_vbuffer res = {0};
421 	if (!cl || cl->status != READY)
422 		return res;
423 
424 	cl->con->desc.hints = cl->con->desc.pending_hints;
425 	res.flags.origo_ll = cl->con->desc.hints & SHMIF_RHINT_ORIGO_LL;
426 	res.flags.ignore_alpha = cl->con->desc.hints & SHMIF_RHINT_IGNORE_ALPHA;
427 	res.flags.subregion = cl->con->desc.hints & SHMIF_RHINT_SUBREGION;
428 	res.flags.srgb = cl->con->desc.hints & SHMIF_RHINT_CSPACE_SRGB;
429 	res.flags.tpack = cl->con->desc.hints & SHMIF_RHINT_TPACK;
430 	res.vpts = atomic_load(&cl->con->shm.ptr->vpts);
431 	res.w = cl->con->desc.width;
432 	res.h = cl->con->desc.height;
433 
434 /*
435  * should have a better way of calculating this taking all the possible fmts
436  * into account, becomes more relevant when we have different vchannel types.
437  */
438 	res.stride = res.w * ARCAN_SHMPAGE_VCHANNELS;
439 	res.pitch = res.w;
440 
441 /* vpending contains the latest region that was synched, so extract the ~vready
442  * mask to figure out which is the most recent buffer to work with in the case
443  * of 'n' buffering */
444 	int vready = atomic_load_explicit(
445 		&cl->con->shm.ptr->vready, memory_order_consume);
446 	vready = (vready <= 0 || vready > cl->con->vbuf_cnt) ? 0 : vready - 1;
447 
448 	int vmask = ~atomic_load_explicit(
449 		&cl->con->shm.ptr->vpending, memory_order_consume);
450 
451 	res.buffer = cl->con->vbufs[vready];
452 	res.region = atomic_load(&cl->con->shm.ptr->dirty);
453 
454 	return res;
455 }
456 
shmifsrv_process_event(struct shmifsrv_client * cl,struct arcan_event * ev)457 bool shmifsrv_process_event(struct shmifsrv_client* cl, struct arcan_event* ev)
458 {
459 	if (!cl || !ev || cl->status != READY)
460 		return false;
461 
462 	if (ev->category == EVENT_EXTERNAL){
463 		switch (ev->ext.kind){
464 
465 /* default behavior for bufferstream is to simply send the reject, we can look
466  * into other options later but for now the main client is the network setup
467  * and accelerated buffer management is far on the list there */
468 		case EVENT_EXTERNAL_BUFFERSTREAM:
469 			shmifsrv_enqueue_event(cl, &(struct arcan_event){
470 				.category = EVENT_TARGET,
471 				.tgt.kind = TARGET_COMMAND_BUFFER_FAIL
472 			}, -1);
473 
474 	/* just fetch and wipe */
475 			int handle = arcan_fetchhandle(cl->con->dpipe, false);
476 			close(handle);
477 
478 			return true;
479 		break;
480 /* need to track the type in order to be able to apply compression */
481 		case EVENT_EXTERNAL_REGISTER:
482 			if (cl->con->segid == SEGID_UNKNOWN){
483 				cl->con->segid = ev->ext.registr.kind;
484 				return false;
485 			}
486 		break;
487 		case EVENT_EXTERNAL_CLOCKREQ:
488 			if (cl->con->flags.autoclock && !ev->ext.clock.once){
489 				cl->con->clock.frame = ev->ext.clock.dynamic;
490 				cl->con->clock.left = cl->con->clock.start = ev->ext.clock.rate;
491 				return true;
492 			}
493 		break;
494 		default:
495 		break;
496 		}
497 	}
498 	return false;
499 }
500 
shmifsrv_audio(struct shmifsrv_client * cl,void (* on_buffer)(shmif_asample * buf,size_t n_samples,unsigned channels,unsigned rate,void * tag),void * tag)501 bool shmifsrv_audio(struct shmifsrv_client* cl,
502 	void (*on_buffer)(shmif_asample* buf,
503 		size_t n_samples, unsigned channels, unsigned rate, void* tag), void* tag)
504 {
505 	struct arcan_shmif_page* src = cl->con->shm.ptr;
506 	volatile int ind = atomic_load(&src->aready) - 1;
507 	volatile int amask = atomic_load(&src->apending);
508 
509 /* invalid indice, bad client */
510 	if (ind >= cl->con->abuf_cnt || ind < 0){
511 		return false;
512 	}
513 
514 /* not readyy but signaled */
515 	if (0 == amask || ((1 << ind) & amask) == 0){
516 		atomic_store_explicit(&src->aready, 0, memory_order_release);
517 		arcan_sem_post(cl->con->async);
518 		return true;
519 	}
520 
521 /* find oldest buffer */
522 	int i = ind, prev;
523 	do {
524 		prev = i;
525 		i--;
526 		if (i < 0)
527 			i = cl->con->abuf_cnt-1;
528 	} while (i != ind && ((1<<i)&amask) > 0);
529 
530 
531 /* forward to the callback */
532 	if (on_buffer && src->abufused[prev]){
533 		on_buffer(cl->con->abufs[prev], src->abufused[prev],
534 			cl->con->desc.channels, cl->con->desc.samplerate, tag);
535 	}
536 
537 /* mark as consumed */
538 	atomic_store(&src->abufused[prev], 0);
539 	int last = atomic_fetch_and_explicit(
540 		&src->apending, ~(1 << prev), memory_order_release);
541 
542 /* and release the client */
543 	atomic_store_explicit(&src->aready, 0, memory_order_release);
544 	arcan_sem_post(cl->con->async);
545 	return true;
546 }
547 
shmifsrv_tick(struct shmifsrv_client * cl)548 bool shmifsrv_tick(struct shmifsrv_client* cl)
549 {
550 /* want the event to be queued after resize so the possible reaction (i.e.
551 	bool alive = src->flags.alive && src->shm.ptr &&
552 		src->shm.ptr->cookie == arcan_shmif_cookie() &&
553 		platform_fsrv_validchild(src);
554 
555 	if (!fail && tick){
556 		if (0 >= --src->clock.left){
557 			src->clock.left = src->clock.start;
558 			platform_fsrv_pushevent(src, &(struct arcan_event){
559 				.category = EVENT_TARGET,
560 				.tgt.kind = TARGET_COMMAND_STEPFRAME,
561 				.tgt.ioevs[0].iv = 1,
562 				.tgt.ioevs[1].iv = 1
563 			});
564 		}
565 	}
566  */
567 	return true;
568 }
569 
570 static int64_t timebase, c_ticks;
shmifsrv_monotonic_tick(int * left)571 int shmifsrv_monotonic_tick(int* left)
572 {
573 	int64_t now = arcan_timemillis();
574 	int n_ticks = 0;
575 
576 	if (now < timebase)
577 		timebase = now - (timebase - now);
578 	int64_t frametime = now - timebase;
579 
580 	int64_t base = c_ticks * ARCAN_TIMER_TICK;
581 	int64_t delta = frametime - base;
582 
583 	if (delta > ARCAN_TIMER_TICK){
584 		n_ticks = delta / ARCAN_TIMER_TICK;
585 
586 /* safeguard against stalls or clock issues */
587 		if (n_ticks > ARCAN_TICK_THRESHOLD){
588 			shmifsrv_monotonic_rebase();
589 			return shmifsrv_monotonic_tick(left);
590 		}
591 
592 		c_ticks += n_ticks;
593 	}
594 
595 	if (left)
596 		*left = ARCAN_TIMER_TICK - delta;
597 
598 	return n_ticks;
599 }
600 
shmifsrv_monotonic_rebase()601 void shmifsrv_monotonic_rebase()
602 {
603 	timebase = arcan_timemillis();
604 	c_ticks = 0;
605 }
606