1 /*
2  Arcan Shared Memory Interface, Interoperability definitions
3 
4  Copyright (c) 2014-2018, Bjorn Stahl
5  All rights reserved.
6 
7  Redistribution and use in source and binary forms,
8  with or without modification, are permitted provided that the
9  following conditions are met:
10 
11  1. Redistributions of source code must retain the above copyright notice,
12  this list of conditions and the following disclaimer.
13 
14  2. Redistributions in binary form must reproduce the above copyright notice,
15  this list of conditions and the following disclaimer in the documentation
16  and/or other materials provided with the distribution.
17 
18  3. Neither the name of the copyright holder nor the names of its contributors
19  may be used to endorse or promote products derived from this software without
20  specific prior written permission.
21 
22  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
26  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
27  OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  THE POSSIBILITY OF SUCH DAMAGE.
33 */
34 
35 #ifndef HAVE_ARCAN_SHMIF_INTEROP
36 #define HAVE_ARCAN_SHMIF_INTEROP
37 
38 /*
39  * Version number works as tag and guard- bytes in the shared memory page, it
40  * is set by arcan upon creation and verified along with the offset- cookie
41  * during _integrity_check
42  */
43 #define ASHMIF_VERSION_MAJOR 0
44 #define ASHMIF_VERSION_MINOR 14
45 
46 #ifndef LOG
47 #define LOG(X, ...) (fprintf(stderr, "[%lld]" X, arcan_timemillis(), ## __VA_ARGS__))
48 #endif
49 
50 /*
51  * For porting the shmpage interface, these functions need to be implemented
52  * and pulled in, shouldn't be more complicated than mapping to the
53  * corresponding platform/ functions. In the longer scope, these should be
54  * factored out and replaced as well.
55  */
56 #ifndef PLATFORM_HEADER
57 
58 #define BADFD -1
59 #include <sys/types.h>
60 #include <sys/stat.h>
61 #include <semaphore.h>
62 typedef int file_handle;
63 typedef pid_t process_handle;
64 typedef sem_t* sem_handle;
65 
66 long long int arcan_timemillis(void);
67 int arcan_sem_post(sem_handle sem);
68 file_handle arcan_fetchhandle(int insock, bool block);
69 bool arcan_pushhandle(int fd, int channel);
70 int arcan_sem_wait(sem_handle sem);
71 int arcan_sem_trywait(sem_handle sem);
72 int arcan_fdscan(int** listout);
73 #endif
74 
75 struct arcan_shmif_cont;
76 struct arcan_event;
77 
78 /*
79  * Note the different semantics in return- values for _poll versus _wait
80  */
81 
82 /*
83  * _poll will return as soon as possible with one of the following values:
84  *  > 0 when there are incoming events available,
85  *  = 0 when there are no incoming events available,
86  *  < 0 when the shmif_cont is unable to process events (terminal state)
87  */
88 int arcan_shmif_poll(struct arcan_shmif_cont*, struct arcan_event* dst);
89 
90 /*
91  * _wait will block an unspecified time and return:
92  * !0 when an event was successfully dequeued and placed in *dst
93  *  0 when the shmif_cont is unable to process events (terminal state)
94  */
95 int arcan_shmif_wait(struct arcan_shmif_cont*, struct arcan_event* dst);
96 
97 /*
98  * Wait for an incoming event for a maximum of ~time_ms, and update it with
99  * the amount of milliseconds left (if any) on the timer.
100  *
101  * This is a convenience wrapper combining the behavior of some low precision
102  * OS wait primitive and that of arcan_shmif_wait. The amount of milliseconds
103  * left (if any) will be stored back into time_ms.
104  */
105 int arcan_shmif_wait_timed(
106 	struct arcan_shmif_cont*, unsigned* time_us, struct arcan_event* dst);
107 
108 /*
109  * When integrating with libraries assuming that a window can be created
110  * synchronously, there is a problem with what to do with events that are
111  * incoming while waiting for the accept- or reject to our request.
112  *
113  * The easiest approach is to simply skip forwarding events until we receive
114  * the proper reply since windows allocations typically come during some init/
115  * setup phase or as low-frequent event response. Thep problem with this is
116  * that any events in between will be dropped.
117  *
118  * The other option is to buffer events, and then flush them out,
119  * essentially creating an additional event-queue. This works EXCEPT for the
120  * cases where there are events that require file descriptor transfers.
121  *
122  * This function implements this buffering indefinitely (or until OOM),
123  * dup:ing/saving descriptors while waiting and forcing the caller to cleanup.
124  *
125  * The correct use of this function is as follows:
126  * (send SEGREQ event)
127  *
128  * struct arcan_event acq_event;
129  * struct arcan_event* evpool = NULL;
130  *
131  * if (arcan_shmif_acquireloop(cont, &acq_event, &evpool, &evpool_sz){
132  * 	we have a valid segment
133  *   acq_event is valid, arcan_shmif_acquire(...);
134  * }
135  * else {
136  * 	if (!evpool){
137  *    OOM
138  * 	}
139  * 	if (evpool_sz < 0){
140  *  	shmif-state broken, only option is to terminate the connection
141  *  	arcan_shmif_drop(cont);
142  *  	return;
143  * 	}
144  *	the segment request failed
145  *}
146  *
147  * cleanup
148  * for (size_t i = 0; i < evpool_sz; i++){
149  *  forward_event(&evpool[i]);
150  *  if (arcan_shmif_descrevent(&evpool[i]) &&
151  *  	evpool[i].tgt.ioev[0].iv != -1)
152  *  		close(evpool[i].tgt.ioev[0].iv);
153  * }
154  *
155  * free(evpool);
156  *
157  * Be sure to check the cookie of the acq_event in the case of a
158  * TARGET_COMMAND_NEWSEGMENT as the server might have tried to preemptively
159  * push a new subsegment (clipboard management, output, ...)
160  */
161 bool arcan_shmif_acquireloop(struct arcan_shmif_cont*,
162 	struct arcan_event*, struct arcan_event**, ssize_t*);
163 
164 /*
165  * returns true if the provided event carries a file descriptor
166  */
167 bool arcan_shmif_descrevent(struct arcan_event*);
168 
169 /*
170  * retrieve the currently saved- context GUID
171  */
172 void arcan_shmif_guid(struct arcan_shmif_cont*, uint64_t[2]);
173 
174 /*
175  * Take a subsegment carrying event and forward to a possible default
176  * implementation of it. This is an extreme corner case use intended primarily
177  * for the TUI implementation where a user may request a subsegment that it he
178  * doesn't want to handle, and we want to fall-back to the default
179  * implementation that is dormant inside shmif and would be activated when a
180  * subsegment request isn't mapped.
181  */
182 void arcan_shmif_defimpl(
183 	struct arcan_shmif_cont* newchild, int type, void* pref);
184 
185 /*
186  * Try and enqueue the element to the queue.
187  * If the context is set to lossless, enqueue may block, sleep (or spinlock).
188  *
189  * returns the number of FREE slots left on success or a negative value on
190  * failure. The purpose of the try- approach is to let the user distinguish
191  * between necessary and merely "helpful" events (e.g. frame numbers, net
192  * ping-pongs etc.)
193  *
194  * These are THREAD_UNSAFE, lock the context before using from multiple threads.
195  */
196 int arcan_shmif_enqueue(
197 	struct arcan_shmif_cont*, const struct arcan_event* const);
198 
199 int arcan_shmif_tryenqueue(
200 	struct arcan_shmif_cont*, const struct arcan_event* const);
201 
202 /*
203  * Provide a text representation useful for logging, tracing and debugging
204  * purposes. If dbuf is NULL, a static buffer will be used (so for
205  * threadsafety, provide your own).
206  */
207 const char* arcan_shmif_eventstr(
208 	struct arcan_event* aev, char* dbuf, size_t dsz);
209 
210 /*
211  * Pack the contents of the event into an implementation specifized byte
212  * buffer. Returns the amount of bytes consumed or -1 if the supplied buffer
213  * is too small.
214  */
215 ssize_t arcan_shmif_eventpack(
216 	const struct arcan_event* const aev, uint8_t* dbuf, size_t dbuf_sz);
217 
218 /*
219  * Unpack an event from a bytebuffer, returns the number of byted consumed
220  * or -1 if the buffer did not contain a valid event.
221  */
222 ssize_t arcan_shmif_eventunpack(
223 	const uint8_t* const buf, size_t buf_sz, struct arcan_event* out);
224 
225 /*
226  * Resolve implementation- defined connection connection path based on a
227  * suggested key. Returns -num if the resolved path couldn't fit in dsz (with
228  * abs(num) indicating the number of truncated bytes) and number of characters
229  * (excluding NULL) written to dst.
230  */
231 int arcan_shmif_resolve_connpath(
232 	const char* key, char* dst, size_t dsz);
233 
234 /*
235  * get the segment kind identifier from an existing connection
236  */
237 int arcan_shmif_segkind(struct arcan_shmif_cont* con);
238 
239 /*
240  * calculates a hash of the layout of the shmpage in order to detect subtle
241  * compiler mismatches etc.
242  */
243 uint64_t arcan_shmif_cookie(void);
244 
245 /*
246  * The following functions are simple lookup/unpack support functions for
247  * argument strings usually passed on the command-line to a newly spawned
248  * frameserver in a simple (utf-8) key=value\tkey=value type format.
249  */
250 struct arg_arr {
251 	char* key;
252 	char* value;
253 };
254 
255 /* take the input string and unpack it into an array of key-value pairs */
256 struct arg_arr* arg_unpack(const char*);
257 
258 /*
259  * Lookup the [ind]th(starting at 0) argument matching [key].
260  * Returns true if there was a matching key at the desired position.
261  *
262  * If [found] is provided, the corresponding value will be stored.
263  * If no key could be found OR the lookup failed, NULL will be stored instead.
264  *
265  * Example:
266  * ARCAN_ARG=test:test=1
267  * if (arg_lookup(myarg, "test", 1, &val)){
268  *    if (val){
269  *        val will be "1" here
270  *    }
271  * }
272  */
273 bool arg_lookup(struct arg_arr* arr,
274 	const char* key, unsigned short ind, const char** found);
275 
276 /*
277  * deallocate/free the resources bound to an arg_arr struct. Don't use this
278  * on an arg_arr that comes from a shmif_open or shmif_args call as the
279  * normal context management functions will clean after that one.
280  */
281 void arg_cleanup(struct arg_arr*);
282 
283 /*
284  * ideally both rpath and wpath could be dropped when the semaphores becomes
285  * futex- only as the set now is too permissive to be comfortable
286  */
287 #define SHMIF_PLEDGE_PREFIX \
288 	"stdio unix sendfd recvfd proc ps rpath wpath cpath tmppath unveil video"
289 
290 /*
291  * Attempt to reduce the exposed set of privileges and whitelist accessible
292  * filesystem paths. This is a best-effort that might result in a no-op
293  * or a lesser set of restrictions depending on the platform and context.
294  *
295  * if a context is provided, the function may enqueue an event to indicate
296  * sandbox status to the server.
297  *
298  * [pledge] this argument matches either special preset strings of higher
299  *          roles, or the set of OpenBSD-pledge(2) syscall whitelists, with
300  *          the necessary set for opening and maintaining shmif context
301  *          changes being SHMIF_PLEDGE_PREFIX.
302  *
303  * [paths] is a NULL- terminated list of file-system paths and their
304  *         intended mode of operations
305  *
306  * [flag]  reserved for future use.
307  *
308  * alternate pledge templates:
309  *         shmif    - same as running the SHMIF_PLEDGE_PREFIX
310  *         minimal  - shared memory page
311  *         decode   - decode frameserver archetype
312  *         encode   - encode frameserver archetype
313  *         a12-srv  - a local shmif-server to network proxy
314  *         a12-cl   - a local shmif-client to network proxy
315  */
316 struct shmif_privsep_node {
317 	const char* path;
318 	const char* perm; /*r, w, x, c */
319 };
320 void arcan_shmif_privsep(struct arcan_shmif_cont* C,
321 	const char* pledge, struct shmif_privsep_node**, int opts);
322 
323 /*
324  * Duplicates a descriptor and set safe flags (e.g. CLOEXEC)
325  * if [dstnum] is >= 0, it will ATTEMPT to duplicate to the specific number,
326  * (though NOT GUARANTEED).
327  *
328  * Returns a valid descriptor or -1 on error (with errno set according
329  * to the dup() call family.
330  */
331 int arcan_shmif_dupfd(int fd, int dstnum, bool blocking);
332 
333 /*
334  * Update the short ~(32b) message that the connection will try and forward
335  * should the client crash or be terminated in some other abnormal way.
336  */
337 void arcan_shmif_last_words(struct arcan_shmif_cont* cont, const char* msg);
338 
339 /*
340  * Take a pending HANDOVER segment allocation and inherit into a new
341  * process.
342  *
343  * If env is empty, the ONLY environment that will propagate is the
344  * handover relevant variables.
345  *
346  * [detach] is treated as a bitmap, where the bits set:
347  *   1: detach process (double-fork)
348  *   2: stdin (becomes /dev/null or similar)
349  *   3: stdout (becomes /dev/null or similar)
350  *   4: stderr (becomes /dev/null or similar)
351  * Other descriptors follow normal system specific inheritance semantics.
352  *
353  * The function returns the pid of the new process, or -1 in the event
354  * of a failure (e.g. invalid arguments, empty path or argv).
355  *
356  * NOTE:
357  * call from event dispatch immediately upon receiving a NEWSEGMENT with
358  * a HANDOVER type, the function will assume allocation responsibility.
359  *
360  * If handover_exec is called WITHOUT the corresponding handover event in
361  * ev, it is the context in [cont] that will be forwarded.
362  */
363 pid_t arcan_shmif_handover_exec(
364 	struct arcan_shmif_cont* cont, struct arcan_event ev,
365 	const char* path, char* const argv[], char* const env[],
366 	int detach);
367 
368 /*
369  * Mark, for the current frame (this is reset each signal on sigvid) buffer
370  * contents as updated. Note that this does not guarantee that only the dirty
371  * regions will be synched to the next receiver in the chain, the buffer
372  * contents are required to be fully intact.
373  *
374  * This requires that the segment has been resized with the flags
375  * SHMIF_RHINT_SUBREGION (or _CHAIN) or it will have no effect.
376  *
377  * Depending on if the segment is in SHMIF_RHINT_SUBREGION or
378  * SHMIF_RHINT_SUBREGION_CHAIN, the behavior will be different.
379  *
380  * For SHMIF_RHINT_SUBREGION, the function returns 0 on success or -1 if the
381  * context is dead / broken. You are still required to use shmif_signal calls
382  * to synchronize the contents. Only the set of damaged regions will grow.
383  *
384  * [ Not yet implemented ]
385  * This interface combines a number of latency and performance sensitive
386  * usecases, with the ideal should re-add the possibility of run-ahead or
387  * a run-behind the beam on a single buffered output.
388  *
389  * For SHMIF_RHINT_SUBREGION_CHAIN, the options to the flags function are:
390  * SHMIF_DIRTY_NONBLOCK, SHMIF_DIRTY_PARTIAL and SHMIF_DIRTY_SIGNAL.
391  * Bitmask behavior is: NONBLOCK | (PARTIAL ^ SIGNAL).
392  *
393  * If NONBLOCK is set, the function returns SHMIF_DIRTY_EWOULDBLOCK if the
394  * operation would block and in that case, the dirty region won't register.
395  *
396  * if PARTIAL is set, the update will be synched to whatever output the
397  * connection may be mapped to but no other notification about the update will
398  * be triggered. Instead, if SIGNAL is set, when the region has been synched,
399  * other subsystems will be alerted as to a logical 'frame' update.
400  *
401  * This is used in order to allow 'per scanline' like updates but without
402  * storming subsystems that reason on a "logical buffer" update where the
403  * cost per frame might be too high to be invoked in smaller chunks.
404  *
405  * If the dirty region provides invalid constraints (x1 >= x2, y1 >= y2,
406  * x2 > cont->w, y2 > cont->h) the values will be clamped to the size of
407  * the segment.
408  */
409 int arcan_shmif_dirty(struct arcan_shmif_cont*,
410 	size_t x1, size_t y1, size_t x2, size_t y2, int fl);
411 
412 /*
413  * This is primarily intended for clients with special timing needs due to
414  * latency concerns, typically games and multimedia.
415  *
416  * Get an estimate on how many MICROSECONDS left until the next ideal time
417  * to synch. These are relative to the current time, thus will tick down
418  * on multiple calls until a deadline has passed.
419  *
420  * The [cost_estimate] argument is an estimate on how much processing
421  * time you would need to prepare the next frame and it can simply be
422  * the value of how much was spent rendering the last one.
423  *
424  * Possible [errc] values:
425  *
426  *  -1, invalid / dead context
427  *  -2, context in a blocked state
428  *  -3, deadline information inaccurate, values returned are defaults.
429  *
430  * The optional [tolerance] argument provides an estimate as to how large
431  * margin for error that is reasonable (in MICROSECONDS). This MAY be
432  * derived from frame delivery timings or be adjusted by the consumer
433  * to balance latency, precision and accuracy.
434  *
435  * Thus, deadline - jitter = time left until synch should be called for
436  * a chance to have your contents be updated in time. This time can thus
437  * be used to delay synching and used as a timeout (pseudo-code):
438  *
439  * int left = arcan_shmif_deadline(cont, last_cost, &jout, &errc);
440  * if (last_frame_cost - left > jout){
441  *     while(poll_timeout(cont, left)){
442  *         process_event();
443  *     }
444  * }
445  */
446 int arcan_shmif_deadline(
447 	struct arcan_shmif_cont*, unsigned last_cost, int* jitter, int* errc);
448 
449 /*
450  * Asynchronously transfer the contents of [fdin] to [fdout]. This is
451  * mainly to encourage non-blocking implementation of the bchunk handler.
452  * The descriptors will be closed when the transfer is completed or if
453  * it fails.
454  *
455  * If [sigfd] is provided (> 0),
456  * the result of the operation will be written on finish as:
457  *   -1 (read error)
458  *   -2 (write error)
459  *   -3 (alloc/arg error)
460  *    0 (ok)
461  *
462  */
463 void arcan_shmif_bgcopy(
464 	struct arcan_shmif_cont*, int fdin, int fdout, int sigfd, int flags);
465 
466 /*
467  * Used as helper to avoid dealing with all of the permutations of
468  * devkind == EVENT_IDEVKIND_MOUSE for datatype == EVENT_IDATATYPE_ANALOG.
469  * If >true< the status of have changed since last time.
470  *
471  * uint8_t mstate[ASHMIF_MSTATE_SZ];
472  * arcan_shmif_mousestate_setup(acon, false, mstate);
473  * ... in event loop ...
474  * if (arcan_shmif_mousestate(mstate, &inev, &out_x, &out_y)){
475  *  react on mouse event
476  * }
477  *
478  * if [inev] isn't provided and the state is set to absolute, the last known
479  * values will be returned.
480  *
481  * Mouse button tracking, gestures, and splitting on .devid are not included
482  * in this helper function.
483  */
484 #define ASHMIF_MSTATE_SZ 32
485 void arcan_shmif_mousestate_setup(
486 	struct arcan_shmif_cont* con, bool relative, uint8_t* state);
487 
488 bool arcan_shmif_mousestate(
489 	struct arcan_shmif_cont*, uint8_t* state,
490 	struct arcan_event* inev, int* out_x, int* out_y);
491 
492 /*
493  * Part of auxiliary library, pulls in more dependencies and boiler-plate
494  * for setting up accelerated graphics
495  */
496 #ifdef WANT_ARCAN_SHMIF_HELPER
497 
498 /*
499  * Maintain both context and display setup. This is for cases where you don't
500  * want to set up EGL or similar support yourself. For cases where you want to
501  * do the EGL setup except for the NativeDisplay part, use _egl.
502  *
503  * [Warning] stick to either _setup OR (_egl, vk), don't mix
504  *
505  */
506 enum shmifext_setup_status {
507 	SHHIFEXT_UNKNOWN = 0,
508 	SHMIFEXT_NO_API,
509 	SHMIFEXT_NO_DISPLAY,
510 	SHMIFEXT_NO_EGL,
511 	SHMIFEXT_NO_CONFIG,
512 	SHMIFEXT_NO_CONTEXT,
513 	SHMIFEXT_ALREADY_SETUP,
514 	SHMIFEXT_OUT_OF_MEMORY,
515 	SHMIFEXT_OK
516 };
517 
518 enum shmifext_api {
519 	API_OPENGL = 0,
520 	API_GLES,
521 	API_VHK
522 };
523 
524 struct arcan_shmifext_setup {
525 	uint8_t red, green, blue, alpha, depth;
526 	uint8_t api, major, minor;
527 	uint64_t flags;
528 	uint64_t mask;
529 
530 /* 0 for self-managed fbo or imported buffers
531  * >0 for internal rendertarget that swaps out */
532 	uint8_t builtin_fbo;
533 	uint8_t supersample;
534 	uint8_t stencil;
535 
536 /* don't allocate a context or display at all - whatever context is
537  * active in the current thread will be used for allocations */
538 	uint8_t no_context;
539 	uint64_t shared_context;
540 
541 /* deprecated members, but don't want to break abi, while still
542  * generating compiler visible errors for api break */
543 	uint8_t deprecated_1;
544 	uint32_t deprecated_2;
545 
546 /* workaround for versioning snafu with _setup not taking sizeof(...) */
547 	uint8_t uintfl_reserve[6];
548 	uint64_t reserved[4];
549 };
550 
551 struct arcan_shmifext_setup arcan_shmifext_defaults(
552 	struct arcan_shmif_cont* con);
553 
554 enum shmifext_setup_status arcan_shmifext_setup(
555 	struct arcan_shmif_cont* con,
556 	struct arcan_shmifext_setup arg);
557 
558 /*
559  * Check if the connection is in an extended state or not.
560  * return values:
561  *-1 - not extended, handle passing disabled
562  * 0 - not extended
563  * 1 - extended, handle passing
564  * 2 - extended, readback fallback
565  */
566 int arcan_shmifext_isext(struct arcan_shmif_cont* con);
567 
568 /*
569  * for use with the shmifext_setup approach, try and find the
570  * requested symbol within the context of the accelerated graphics backend
571  */
572 void* arcan_shmifext_lookup(
573 	struct arcan_shmif_cont* con, const char*);
574 
575 /*
576  * Sometimes, multiple contexts, possibly bound to different threads, are
577  * needed. _setup creates one context, and this function can be added to
578  * create additional ones.
579  *
580  * Returns 0 or a reference to use for shmifext_swap_context calls
581  */
582 unsigned arcan_shmifext_add_context(
583 	struct arcan_shmif_cont* con, struct arcan_shmifext_setup arg);
584 
585 /*
586  * Swap the current underlying context to use for _make_current calls.
587  * the [context] argument comes from _add_context, though the first (_setup)
588  * one will always be 1 */
589 void arcan_shmifext_swap_context(
590 	struct arcan_shmif_cont* con, unsigned context);
591 
592 /*
593  * Allocate a buffer that is valid for passing output and that can be bound as
594  * a rendertarget color output. Sets *out to point to the buffer based on the
595  * context type provided on shmifext_setup. This needs to be freed and
596  * re-allocated on GPU switch (DEVICE_HINT) and after resizing.
597  *
598  * arcan_shmifext_setup with the built-in framebuffer will already allocate
599  * such a buffer if necessary. This call should only be needed if the client
600  * itself needs to allocate FBOs that should be passed on.
601  *
602  * It will fail if there is not enough memory or the OS platform has no such
603  * mechanism. In those cases, normal GL/VK allocations should be used.
604  */
605 struct shmifext_color_buffer {
606 	union {
607 		unsigned int gl;
608 	} id;
609 
610 	void* alloc_tags[4];
611 	int type;
612 };
613 
614 bool arcan_shmifext_alloc_color(
615 	struct arcan_shmif_cont* con, struct shmifext_color_buffer* out);
616 
617 /*
618  * Release resources tied to an allocation from arcan_shmifext_alloc_color.
619  */
620 void arcan_shmifext_free_color(
621 	struct arcan_shmif_cont* con, struct shmifext_color_buffer* in);
622 
623 /*
624  * Uses lookupfun to get the function pointers needed, writes back matching
625  * EGLNativeDisplayType into *display and tags *con as accelerated.
626  * Can be called multiple times as response to DEVICE_NODE calls or to
627  * retrieve the display associated with con
628  */
629 bool arcan_shmifext_egl(struct arcan_shmif_cont* con,
630 	void** display, void*(*lookupfun)(void*, const char*), void* tag);
631 
632 /*
633  * For the corner cases where you need access to the display/surface/context
634  * but don't want to detract from the _setup
635  */
636 bool arcan_shmifext_egl_meta(struct arcan_shmif_cont* con,
637 	uintptr_t* display, uintptr_t* surface, uintptr_t* context);
638 
639 /*
640  * Similar to extracting the display, surface, context and manually
641  * making it the current eglContext. If the setup has been called with
642  * builtin- FBO, it will also manage allocating and resizing FBO.
643  */
644 bool arcan_shmifext_make_current(struct arcan_shmif_cont* con);
645 
646 /*
647  * Free and destroy an associated context, display and internal buffers
648  * in order to stop using the connection for accelerated drawing.
649  */
650 bool arcan_shmifext_drop(struct arcan_shmif_cont* con);
651 
652 /*
653  * Similar to arcan_shmifext_drop, except the display and device connection
654  * is kept alive, intended to build a new context with _setup later
655  */
656 bool arcan_shmifext_drop_context(struct arcan_shmif_cont* con);
657 
658 /*
659  * If headless setup uses a built-in FBO configuration, this function can be
660  * used to extract the opaque handles from it. These are only valid when the
661  * context is active (_make_current).
662  */
663 bool arcan_shmifext_gl_handles(struct arcan_shmif_cont* con,
664 	uintptr_t* frame, uintptr_t* color, uintptr_t* depth);
665 
666 /*
667  * Placeholder awaiting VK support
668  */
669 bool arcan_shmifext_vk(struct arcan_shmif_cont* con,
670 	void** display, void*(*lookupfun)(void*, const char*), void* tag);
671 
672 /*
673  * Set the rendertarget contained in the extended context as active.
674  */
675 void arcan_shmifext_bind(struct arcan_shmif_cont* con);
676 
677 /*
678  * Update the internal buffer-fail to slow readback fallback resulting from a
679  * failed attempt to pass an accelerated buffer. This will be called
680  * automatically on an incoming BUFFER_FAIL event.
681  */
682 void arcan_shmifext_bufferfail(struct arcan_shmif_cont*, bool);
683 
684 /*
685  * Run the platform specific dance to convert a gl texture ID to a passable
686  * descriptor (shmif_signalhandle), note that only one texture should be 'in
687  * flight' (on both sides) at any one time, and calling this a second time
688  * invalidates the resources used by the last passed one.
689  *
690  * This is slated to be changed to use the same buffer-plane format as in
691  * import buffer.
692  */
693 bool arcan_shmifext_gltex_handle(
694 	struct arcan_shmif_cont* con,
695 	uintptr_t display, uintptr_t tex_id,
696 	int* dhandle, size_t* dstride, int* dfmt);
697 
698 /*
699  * Retrieve a file-handle to the device that is currently used for the
700  * acceleration, or -1 if it is unavailable. if [outdev] is !NULL, it will
701  * be set to point to a platform specific device structure. Outside very
702  * specialized uses (Xarcan), this should be ignored.
703  */
704 int arcan_shmifext_dev(
705 	struct arcan_shmif_cont* con, uintptr_t* outdev, bool clone);
706 
707 /*
708  * Retrieve the agp function environment from the currently active context.
709  * This should only really be useful for project-coupled tools like waybridge
710  * where access to the agp_ set of functions is also guaranteed.
711  */
712 struct agp_fenv* arcan_shmifext_getfenv(struct arcan_shmif_cont*);
713 
714 /*
715  * Take an external buffer e.g. from gbm or egl-streams and import into the
716  * context as a substitution for it's current backing store, this disables
717  * rendering using the context.
718  *
719  * If the import is successful, any descriptors pointed to by the fd and fence
720  * fields will be owned by the implementation of the import function and will
721  * be closed when safe to do so.
722  *
723  * If [dst_store] is set, the default buffer of the context will not be used
724  * as the target store. Instead, [dst_store] will be updated to contain the
725  * imported buffer.
726  */
727 struct shmifext_buffer_plane {
728 	int fd;
729 	int fence;
730 	size_t w;
731 	size_t h;
732 
733 	union {
734 		struct {
735 			uint32_t format;
736 			uint64_t stride;
737 			uint64_t offset;
738 			uint32_t mod_hi;
739 			uint32_t mod_lo;
740 		} gbm;
741 	};
742 };
743 
744 /*
745  * Convert a previous allocated color buffer (shmifext_alloc_color) into
746  * an exportable set of planes that can be used with 'signal_planes'
747  */
748 size_t arcan_shmifext_export_image(
749 	struct arcan_shmif_cont* con,
750 	uintptr_t display, uintptr_t tex_id,
751 	size_t plane_limit, struct shmifext_buffer_plane* planes);
752 
753 enum shmifext_buffer_format {
754 	SHMIFEXT_BUFFER_GBM = 0,
755 };
756 
757 bool arcan_shmifext_import_buffer(
758 	struct arcan_shmif_cont*,
759 	int format,
760 	struct shmifext_buffer_plane* planes,
761 	size_t n_planes,
762 	size_t buffer_plane_sz
763 );
764 
765 /* internal or advanced use (proxying planes) */
766 size_t arcan_shmifext_signal_planes(
767 	struct arcan_shmif_cont* c,
768 	int mask,
769 	size_t n_planes,
770 	struct shmifext_buffer_plane* planes
771 );
772 
773 /*
774  * Similar behavior to signalhandle, but any conversion from the texture id
775  * in [tex_id] is handled internally in accordance with the last _egl
776  * call on [con]. The context where tex_id is valid should already be
777  * active.
778  *
779  * Display corresponds to the EGLDisplay where tex_id is valid, or
780  * 0 if the shmif_cont is managing the context.
781  *
782  * If tex_id is SHMIFEXT_BUILTIN and context was setup with FBO management OR
783  * with vidp- texture streaming, the color attachment for the active FBO OR
784  * the latest imported buffer.
785  *
786  *
787  * Returns -1 on handle- generation/passing failure, otherwise the number
788  * of miliseconds (clamped to INT_MAX) that elapsed from signal to ack.
789  */
790 #define SHMIFEXT_BUILTIN (~(uintptr_t)0)
791 int arcan_shmifext_signal(struct arcan_shmif_cont*,
792 	uintptr_t display, int mask, uintptr_t tex_id, ...);
793 #endif
794 
795 #endif
796