1 /*
2 * Copyright: Björn Ståhl
3 * Description: A12 protocol state machine, main translation unit.
4 * Maintains connection state, multiplex and demultiplex then routes
5 * to the corresponding decoding/encode stages.
6 * License: 3-Clause BSD, see COPYING file in arcan source repository.
7 * Reference: https://arcan-fe.com
8 */
9 /* shared state machine structure */
10 #include <arcan_shmif.h>
11 #include <arcan_shmif_server.h>
12
13 #include <inttypes.h>
14 #include <string.h>
15 #include <math.h>
16 #include <assert.h>
17
18 #include "a12.h"
19 #include "a12_int.h"
20
21 #include "a12_decode.h"
22 #include "a12_encode.h"
23 #include "arcan_mem.h"
24 #include "external/chacha.c"
25 #include "external/x25519.h"
26
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <unistd.h>
32 #include <errno.h>
33
34 int a12_trace_targets = 0;
35 FILE* a12_trace_dst = NULL;
36
37 static int header_sizes[] = {
38 MAC_BLOCK_SZ + 8 + 1, /* The outer frame */
39 CONTROL_PACKET_SIZE,
40 0, /* EVENT size is filled in at first open */
41 1 + 4 + 2, /* VIDEO partial: ch, stream, len */
42 1 + 4 + 2, /* AUDIO partial: ch, stream, len */
43 1 + 4 + 2, /* BINARY partial: ch, stream, len */
44 MAC_BLOCK_SZ + 8 + 1, /* First packet server side */
45 0
46 };
47
48 extern void arcan_random(uint8_t* dst, size_t);
49
a12int_header_size(int kind)50 size_t a12int_header_size(int kind)
51 {
52 assert(kind < COUNT_OF(header_sizes));
53 return header_sizes[kind];
54 }
55
56 static void unlink_node(struct a12_state*, struct blob_out*);
57
grow_array(uint8_t * dst,size_t * cur_sz,size_t new_sz,int ind)58 static uint8_t* grow_array(uint8_t* dst, size_t* cur_sz, size_t new_sz, int ind)
59 {
60 if (new_sz < *cur_sz)
61 return dst;
62
63 /* pick the nearest higher power of 2 for good measure */
64 size_t pow = 1;
65 while (pow < new_sz)
66 pow *= 2;
67
68 /* wrap around? give up */
69 if (pow < new_sz){
70 a12int_trace(A12_TRACE_SYSTEM, "error=grow_array:reason=limit");
71 DYNAMIC_FREE(dst);
72 *cur_sz = 0;
73 return NULL;
74 }
75
76 new_sz = pow;
77
78 a12int_trace(A12_TRACE_ALLOC,
79 "grow=queue:%d:from=%zu:to=%zu", ind, *cur_sz, new_sz);
80 uint8_t* res = DYNAMIC_REALLOC(dst, new_sz);
81 if (!res){
82 a12int_trace(A12_TRACE_SYSTEM, "error=grow_array:reason=malloc_fail");
83 DYNAMIC_FREE(dst);
84 *cur_sz = 0;
85 return NULL;
86 }
87
88 /* init the new region */
89 memset(&res[*cur_sz], '\0', new_sz - *cur_sz);
90
91 *cur_sz = new_sz;
92 return res;
93 }
94
95 /* never permit this to be traced in a normal build */
trace_crypto_key(bool srv,const char * domain,uint8_t * buf,size_t sz)96 static void trace_crypto_key(bool srv, const char* domain, uint8_t* buf, size_t sz)
97 {
98 #ifdef _DEBUG
99 char conv[sz * 3 + 2];
100 for (size_t i = 0; i < sz; i++){
101 sprintf(&conv[i * 3], "%02X%s", buf[i], i == sz - 1 ? "" : "-");
102 }
103 a12int_trace(A12_TRACE_CRYPTO, "%s%s:key=%s", srv ? "server:" : "client:", domain, conv);
104 #endif
105 }
106
107 /* set the LAST SEEN sequence number in a CONTROL message */
step_sequence(struct a12_state * S,uint8_t * outb)108 static void step_sequence(struct a12_state* S, uint8_t* outb)
109 {
110 pack_u64(S->last_seen_seqnr, outb);
111 }
112
build_control_header(struct a12_state * S,uint8_t * outb,uint8_t cmd)113 static void build_control_header(struct a12_state* S, uint8_t* outb, uint8_t cmd)
114 {
115 memset(outb, '\0', CONTROL_PACKET_SIZE);
116 step_sequence(S, outb);
117 arcan_random(&outb[8], 8);
118 outb[16] = S->out_channel;
119 outb[17] = cmd;
120 }
121
fail_state(struct a12_state * S)122 static void fail_state(struct a12_state* S)
123 {
124 #ifndef _DEBUG
125 /* overwrite all relevant state, dealloc mac/chacha */
126 #endif
127 S->state = STATE_BROKEN;
128 }
129
send_hello_packet(struct a12_state * S,int mode,uint8_t pubk[static32],uint8_t entropy[static8])130 static void send_hello_packet(struct a12_state* S,
131 int mode, uint8_t pubk[static 32], uint8_t entropy[static 8])
132 {
133 /* construct the reply with the proper public key */
134 uint8_t outb[CONTROL_PACKET_SIZE] = {0};
135 step_sequence(S, outb);
136 memcpy(&outb[8], entropy, 8);
137 memcpy(&outb[21], pubk, 32);
138
139 /* channel-id is empty */
140 outb[17] = COMMAND_HELLO;
141 outb[18] = ASHMIF_VERSION_MAJOR;
142 outb[19] = ASHMIF_VERSION_MINOR;
143
144 /* mode indicates if we have an ephemeral-Kp exchange first or if we go
145 * straight to the real one, it is also reserved as a migration path if
146 * a new ciphersuite will need to be added */
147 outb[20] = mode;
148
149 /* send it back to client */
150 a12int_append_out(S,
151 STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
152 }
153
154 /*
155 * Used when a full byte buffer for a packet has been prepared, important
156 * since it will also encrypt, generate MAC and add to buffer prestate.
157 *
158 * This is where more advanced and fair queueing should be made in order
159 * to not have the bandwidth hungry channels (i.e. video) consume everything.
160 * The rough idea is to have bins for a/v/b streams, with a priority on a/v
161 * unless it is getting too 'old'. There are some complications:
162 *
163 * 1. stream cancellation, can only be done on non-delta/non-compressed
164 * so mostly usable for binary then
165 * 2. control packets that are tied to an a/v/b frame
166 *
167 * Another issue is that the raw vframes are big and ugly, and here is the
168 * place where we perform an unavoidable copy unless we want interleaving (and
169 * then it becomes expensive to perform). Practically speaking it is not that
170 * bad to encrypt accordingly, it is a stream cipher afterall, BUT having a
171 * continous MAC screws with that. Now since we have a few bytes entropy and a
172 * counter as part of the message, replay attacks won't work BUT any
173 * reordering would then still need to account for rekeying.
174 */
a12int_append_out(struct a12_state * S,uint8_t type,uint8_t * out,size_t out_sz,uint8_t * prepend,size_t prepend_sz)175 void a12int_append_out(struct a12_state* S, uint8_t type,
176 uint8_t* out, size_t out_sz, uint8_t* prepend, size_t prepend_sz)
177 {
178 if (S->state == STATE_BROKEN)
179 return;
180
181 /*
182 * QUEUE-slot here,
183 * should also have the ability to probe the size of the queue slots
184 * so that encoders can react on backpressure
185 */
186 a12int_trace(A12_TRACE_CRYPTO,
187 "type=%d:size=%zu:prepend_size=%zu:ofs=%zu", type, out_sz, prepend_sz, S->buf_ofs);
188
189 /* grow write buffer if the block doesn't fit */
190 size_t required = S->buf_ofs +
191 header_sizes[STATE_NOPACKET] + out_sz + prepend_sz + 1;
192
193 S->bufs[S->buf_ind] = grow_array(
194 S->bufs[S->buf_ind],
195 &S->buf_sz[S->buf_ind],
196 required,
197 S->buf_ind
198 );
199
200 /* and if that didn't work, fatal */
201 if (S->buf_sz[S->buf_ind] < required){
202 a12int_trace(A12_TRACE_SYSTEM,
203 "realloc failed: size (%zu) vs required (%zu)", S->buf_sz[S->buf_ind], required);
204 S->state = STATE_BROKEN;
205 return;
206 }
207 uint8_t* dst = S->bufs[S->buf_ind];
208
209 /* reserve space for the MAC and remember where it starts and ends */
210 size_t mac_pos = S->buf_ofs;
211 S->buf_ofs += MAC_BLOCK_SZ;
212 size_t data_pos = S->buf_ofs;
213
214 /* MISSING OPTIMIZATION, extract n bytes of the cipherstream and apply copy
215 * operation rather than in-place modification, align with MAC block size and
216 * continuously update as we fetch as well */
217
218 /* 8 byte sequence number */
219 pack_u64(S->current_seqnr++, &dst[S->buf_ofs]);
220 S->buf_ofs += 8;
221
222 /* 1 byte command data */
223 dst[S->buf_ofs++] = type;
224
225 /* any possible prepend-to-data block */
226 if (prepend_sz){
227 memcpy(&dst[S->buf_ofs], prepend, prepend_sz);
228 S->buf_ofs += prepend_sz;
229 }
230
231 /* and our data block */
232 memcpy(&dst[S->buf_ofs], out, out_sz);
233 S->buf_ofs += out_sz;
234
235 size_t used = S->buf_ofs - data_pos;
236
237 /*
238 * If we are the client and haven't sent the first authentication request
239 * yet, setup the nonce part of the cipher to random and shorten the MAC.
240 *
241 * Thus the first packet has a half-length MAC and use the other half to
242 * provide the nonce. This is mainly to reduce code complexity somewhat
243 * and not have a different pre-header length for the first packet.
244 */
245 size_t mac_sz = MAC_BLOCK_SZ;
246 if (S->authentic != AUTH_FULL_PK && !S->server && !S->cl_firstout){
247 mac_sz >>= 1;
248 arcan_random(&dst[mac_sz], mac_sz);
249 S->cl_firstout = true;
250
251 chacha_set_nonce(S->dec_state, &dst[mac_sz]);
252 chacha_set_nonce(S->enc_state, &dst[mac_sz]);
253 a12int_trace(A12_TRACE_CRYPTO, "kind=cipher:status=init_nonce");
254
255 trace_crypto_key(S->server, "nonce", &dst[mac_sz], mac_sz);
256 /* don't forget to add the nonce to the first message MAC */
257 blake3_hasher_update(&S->out_mac, &dst[mac_sz], mac_sz);
258 }
259
260 /* apply stream-cipher to buffer contents - ETM */
261 chacha_apply(S->enc_state, &dst[data_pos], used);
262
263 /* update MAC with encrypted contents */
264 blake3_hasher_update(&S->out_mac, &dst[data_pos], used);
265
266 /* sample MAC and write to buffer pos, remember it for debugging - no need to
267 * chain separately as 'finalize' is not really finalized */
268 blake3_hasher_finalize(&S->out_mac, &dst[mac_pos], mac_sz);
269 a12int_trace(A12_TRACE_CRYPTO, "kind=mac_enc:position=%zu", S->out_mac.counter);
270 trace_crypto_key(S->server, "mac_enc", &dst[mac_pos], mac_sz);
271
272 S->stats.b_out += out_sz + prepend_sz;
273
274 /* if we have set a function that will get the buffer immediately then we set
275 * the internal buffering state, this is a short-path that can be used
276 * immediately and then we reset it. */
277 if (S->opts->sink){
278 if (!S->opts->sink(dst, S->buf_ofs, S->opts->sink_tag)){
279 fail_state(S);
280 }
281 S->buf_ofs = 0;
282 }
283 }
284
reset_state(struct a12_state * S)285 static void reset_state(struct a12_state* S)
286 {
287 /* the 'reset' from an erroneous state is basically disconnect, just right
288 * now it finishes and let validation failures etc. handle that particular
289 * scenario */
290 S->left = header_sizes[STATE_NOPACKET];
291 if (S->state != STATE_1STSRV_PACKET)
292 S->state = STATE_NOPACKET;
293 S->decode_pos = 0;
294 S->in_channel = -1;
295 }
296
derive_encdec_key(const char * ssecret,size_t secret_len,uint8_t out_mac[static BLAKE3_KEY_LEN],uint8_t out_srv[static BLAKE3_KEY_LEN],uint8_t out_cl[static BLAKE3_KEY_LEN],uint8_t * nonce)297 static void derive_encdec_key(
298 const char* ssecret, size_t secret_len,
299 uint8_t out_mac[static BLAKE3_KEY_LEN],
300 uint8_t out_srv[static BLAKE3_KEY_LEN],
301 uint8_t out_cl[static BLAKE3_KEY_LEN],
302 uint8_t* nonce)
303 {
304 blake3_hasher temp;
305 blake3_hasher_init_derive_key(&temp, "arcan-a12 init-packet");
306 blake3_hasher_update(&temp, ssecret, secret_len);
307
308 if (nonce){
309 blake3_hasher_update(&temp, nonce, NONCE_SIZE);
310 }
311
312 /* mac = H(ssecret_kdf) */
313 blake3_hasher_finalize(&temp, out_mac, BLAKE3_KEY_LEN);
314
315 /* client = H(H(ssecret_kdf)) */
316 blake3_hasher_update(&temp, out_mac, BLAKE3_KEY_LEN);
317 blake3_hasher_finalize(&temp, out_cl, BLAKE3_KEY_LEN);
318
319 /* server = H(H(H(ssecret_kdf))) */
320 blake3_hasher_update(&temp, out_cl, BLAKE3_KEY_LEN);
321 blake3_hasher_finalize(&temp, out_srv, BLAKE3_KEY_LEN);
322 }
323
update_keymaterial(struct a12_state * S,char * secret,size_t len,uint8_t * nonce)324 static void update_keymaterial(
325 struct a12_state* S, char* secret, size_t len, uint8_t* nonce)
326 {
327 /* KDF mode for building the initial keys */
328 uint8_t mac_key[BLAKE3_KEY_LEN];
329 uint8_t srv_key[BLAKE3_KEY_LEN];
330 uint8_t cl_key[BLAKE3_KEY_LEN];
331 _Static_assert(BLAKE3_KEY_LEN >= MAC_BLOCK_SZ, "misconfigured blake3 size");
332 _Static_assert(BLAKE3_KEY_LEN == 16 || BLAKE3_KEY_LEN == 32, "misconfigured blake3 size");
333
334 /* the secret is only used for the first packet */
335 derive_encdec_key(secret, len, mac_key, srv_key, cl_key, nonce);
336
337 blake3_hasher_init_keyed(&S->out_mac, mac_key);
338 blake3_hasher_init_keyed(&S->in_mac, mac_key);
339
340 if (!S->dec_state){
341 S->dec_state = DYNAMIC_MALLOC(sizeof(struct chacha_ctx));
342 if (!S->dec_state){
343 fail_state(S);
344 return;
345 }
346 }
347
348 S->enc_state = DYNAMIC_MALLOC(sizeof(struct chacha_ctx));
349 if (!S->enc_state){
350 DYNAMIC_FREE(S->dec_state);
351 fail_state(S);
352 return;
353 }
354
355 /* depending on who initates the connection, the cipher key will be different,
356 *
357 * server encodes with srv_key and decodes with cl_key
358 * client encodes with cl_key and decodes with srv_key
359 *
360 * two keys derived from the same shared secret is preferred over different
361 * positions in the cipherstream to prevent bugs that could affect position
362 * stepping to accidentally cause multiple ciphertexts being produced from the
363 * same key at the same position.
364 *
365 * the cipher-state is incomplete as we still need to apply the nonce from the
366 * helo packet before the setup is complete. */
367 if (S->server){
368 trace_crypto_key(S->server, "enc_key", srv_key, BLAKE3_KEY_LEN);
369 trace_crypto_key(S->server, "dec_key", cl_key, BLAKE3_KEY_LEN);
370
371 chacha_setup(S->dec_state, cl_key, BLAKE3_KEY_LEN, 0, CIPHER_ROUNDS);
372 chacha_setup(S->enc_state, srv_key, BLAKE3_KEY_LEN, 0, CIPHER_ROUNDS);
373 }
374 else {
375 trace_crypto_key(S->server, "dec_key", srv_key, BLAKE3_KEY_LEN);
376 trace_crypto_key(S->server, "enc_key", cl_key, BLAKE3_KEY_LEN);
377
378 chacha_setup(S->enc_state, cl_key, BLAKE3_KEY_LEN, 0, CIPHER_ROUNDS);
379 chacha_setup(S->dec_state, srv_key, BLAKE3_KEY_LEN, 0, CIPHER_ROUNDS);
380 }
381
382 /* First setup won't have a nonce until one has been received. For that case,
383 * the key-dance is only to setup MAC - just reusing the same codepath for all
384 * keymanagement */
385 if (nonce){
386 trace_crypto_key(S->server, "state=set_nonce", nonce, NONCE_SIZE);
387 chacha_set_nonce(S->enc_state, nonce);
388 chacha_set_nonce(S->dec_state, nonce);
389 }
390 }
391
a12_setup(struct a12_context_options * opt,bool srv)392 static struct a12_state* a12_setup(struct a12_context_options* opt, bool srv)
393 {
394 struct a12_state* res = DYNAMIC_MALLOC(sizeof(struct a12_state));
395 if (!res)
396 return NULL;
397 *res = (struct a12_state){
398 .server = srv
399 };
400
401 size_t len = 0;
402 res->opts = DYNAMIC_MALLOC(sizeof(struct a12_context_options));
403 if (!res->opts){
404 DYNAMIC_FREE(res);
405 return NULL;
406 }
407 memcpy(res->opts, opt, sizeof(struct a12_context_options));
408
409 if (!res->opts->secret[0]){
410 sprintf(res->opts->secret, "SETECASTRONOMY");
411 }
412
413 len = strlen(res->opts->secret);
414 update_keymaterial(res, res->opts->secret, len, NULL);
415
416 /* easy-dump for quick debugging (i.e. cmp side vs side to find offset,
417 * open/init/replay to step mac construction */
418 /* #define LOG_MAC_DATA */
419 #ifdef LOG_MAC_DATA
420 FILE* keys = fopen("macraw.key", "w");
421 fwrite(mac_key, BLAKE3_KEY_LEN, 1, keys);
422 fclose(keys);
423
424 if (srv){
425 res->out_mac.log = fopen("srv.macraw.out", "w");
426 res->in_mac.log = fopen("srv.macraw.in", "w");
427 }
428 else{
429 res->out_mac.log = fopen("cl.macraw.out", "w");
430 res->in_mac.log = fopen("cl.macraw.in", "w");
431 }
432 #endif
433
434 res->cookie = 0xfeedface;
435 res->out_stream = 1;
436
437 return res;
438 }
439
a12_init()440 static void a12_init()
441 {
442 static bool init;
443 if (init)
444 return;
445
446 /* make one nonsense- call first to figure out the current packing size */
447 uint8_t outb[512];
448 ssize_t evsz = arcan_shmif_eventpack(
449 &(struct arcan_event){.category = EVENT_IO}, outb, 512);
450
451 header_sizes[STATE_EVENT_PACKET] = evsz + SEQUENCE_NUMBER_SIZE + 1;
452 init = true;
453 }
454
a12_server(struct a12_context_options * opt)455 struct a12_state* a12_server(struct a12_context_options* opt)
456 {
457 if (!opt)
458 return NULL;
459
460 a12_init();
461
462 struct a12_state* res = a12_setup(opt, true);
463 if (!res)
464 return NULL;
465
466 res->state = STATE_1STSRV_PACKET;
467 res->left = header_sizes[res->state];
468
469 return res;
470 }
471
a12_client(struct a12_context_options * opt)472 struct a12_state* a12_client(struct a12_context_options* opt)
473 {
474 if (!opt)
475 return NULL;
476
477 a12_init();
478 int mode = 0;
479
480 struct a12_state* S = a12_setup(opt, false);
481 if (!S)
482 return NULL;
483
484 /* use x25519? - pull out the public key from the supplied private and set
485 * one or two rounds of exchange state, otherwise just mark the connection
486 * as preauthenticated should the server reply use the correct PSK */
487 uint8_t empty[32] = {0};
488 uint8_t outpk[32];
489
490 /* client didn't provide an outbound key, generate one at random */
491 if (memcmp(empty, opt->priv_key, 32) == 0){
492 a12int_trace(A12_TRACE_SECURITY, "no_private_key:generating");
493 x25519_private_key(opt->priv_key);
494 }
495
496 memcpy(S->keys.real_priv, opt->priv_key, 32);
497
498 /* single round, use correct key immediately - drop the priv-key from the input
499 * arguments, keep it temporarily here until the shared secret can be
500 * calculated */
501 if (opt->disable_ephemeral_k){
502 mode = HELLO_MODE_REALPK;
503 S->authentic = AUTH_REAL_HELLO_SENT;
504 memset(opt->priv_key, '\0', 32);
505 x25519_public_key(S->keys.real_priv, outpk);
506 trace_crypto_key(S->server, "cl-priv", S->keys.real_priv, 32);
507 }
508
509 /* double-round, start by generating ephemeral key */
510 else {
511 mode = HELLO_MODE_EPHEMPK;
512 x25519_private_key(S->keys.ephem_priv);
513 x25519_public_key(S->keys.ephem_priv, outpk);
514 S->authentic = AUTH_POLITE_HELLO_SENT;
515 }
516
517 /* the nonce in the outbound won't be used, but it should look random still */
518 uint8_t nonce[8];
519 arcan_random(nonce, 8);
520 trace_crypto_key(S->server, "hello-pub", outpk, 32);
521 send_hello_packet(S, mode, outpk, nonce);
522
523 return S;
524 }
525
526 void
a12_channel_shutdown(struct a12_state * S,const char * last_words)527 a12_channel_shutdown(struct a12_state* S, const char* last_words)
528 {
529 if (!S || S->cookie != 0xfeedface){
530 return;
531 }
532
533 uint8_t outb[CONTROL_PACKET_SIZE] = {0};
534 step_sequence(S, outb);
535 outb[16] = S->out_channel;
536 outb[17] = COMMAND_SHUTDOWN;
537 snprintf((char*)(&outb[18]), CONTROL_PACKET_SIZE - 19, "%s", last_words);
538
539 a12int_trace(A12_TRACE_SYSTEM, "channel open, add control packet");
540 a12int_append_out(S, STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
541 }
542
543 void
a12_channel_close(struct a12_state * S)544 a12_channel_close(struct a12_state* S)
545 {
546 if (!S || S->cookie != 0xfeedface){
547 return;
548 }
549
550 a12int_encode_drop(S, S->out_channel, false);
551 a12int_decode_drop(S, S->out_channel, false);
552
553 if (S->channels[S->out_channel].active){
554 S->channels[S->out_channel].cont = NULL;
555 S->channels[S->out_channel].active = false;
556 }
557
558 a12int_trace(A12_TRACE_SYSTEM, "closing channel (%d)", S->out_channel);
559 }
560
561 bool
a12_free(struct a12_state * S)562 a12_free(struct a12_state* S)
563 {
564 if (!S || S->cookie != 0xfeedface){
565 return false;
566 }
567
568 for (size_t i = 0; i < 256; i++){
569 if (S->channels[S->out_channel].active){
570 a12int_trace(A12_TRACE_SYSTEM, "free with channel (%zu) active", i);
571 return false;
572 }
573 }
574
575 if (S->prepend_unpack){
576 DYNAMIC_FREE(S->prepend_unpack);
577 S->prepend_unpack = NULL;
578 S->prepend_unpack_sz = 0;
579 }
580
581 a12int_trace(A12_TRACE_ALLOC, "a12-state machine freed");
582 DYNAMIC_FREE(S->bufs[0]);
583 DYNAMIC_FREE(S->bufs[1]);
584 DYNAMIC_FREE(S->opts);
585
586 *S = (struct a12_state){};
587 S->cookie = 0xdeadbeef;
588
589 DYNAMIC_FREE(S);
590 return true;
591 }
592
update_mac_and_decrypt(const char * source,blake3_hasher * hash,struct chacha_ctx * ctx,uint8_t * buf,size_t sz)593 static void update_mac_and_decrypt(const char* source,
594 blake3_hasher* hash, struct chacha_ctx* ctx, uint8_t* buf, size_t sz)
595 {
596 a12int_trace(A12_TRACE_CRYPTO, "src=%s:mac_update=%zu", source, sz);
597 blake3_hasher_update(hash, buf, sz);
598 if (ctx)
599 chacha_apply(ctx, buf, sz);
600 }
601
602 /*
603 * NOPACKET:
604 * MAC
605 * command byte
606 */
process_nopacket(struct a12_state * S)607 static void process_nopacket(struct a12_state* S)
608 {
609 /* save MAC tag for later comparison when we have the final packet */
610 memcpy(S->last_mac_in, S->decode, MAC_BLOCK_SZ);
611 trace_crypto_key(S->server, "ref_mac", S->last_mac_in, MAC_BLOCK_SZ);
612 update_mac_and_decrypt(__func__, &S->in_mac, S->dec_state, &S->decode[MAC_BLOCK_SZ], 9);
613
614 /* remember the last sequence number of the packet we processed */
615 unpack_u64(&S->last_seen_seqnr, &S->decode[MAC_BLOCK_SZ]);
616 if (S->last_seen_seqnr <= S->current_seqnr)
617 S->stats.packets_pending = S->last_seen_seqnr - S->current_seqnr;
618
619 /* and finally the actual type in the inner block */
620 int state_id = S->decode[MAC_BLOCK_SZ + 8];
621
622 if (state_id >= STATE_BROKEN){
623 a12int_trace(A12_TRACE_SYSTEM, "state=broken:unknown_command=%"PRIu8, S->state);
624 S->state = STATE_BROKEN;
625 return;
626 }
627
628 /* transition to state based on type, from there we know how many bytes more
629 * we need to process before the MAC can be checked */
630 S->state = state_id;
631 a12int_trace(A12_TRACE_TRANSFER, "seq=%"PRIu64
632 ":left=%"PRIu16":state=%"PRIu8, S->last_seen_seqnr, S->left, S->state);
633 S->left = header_sizes[S->state];
634 S->decode_pos = 0;
635 }
636
637 /*
638 * FIRST:
639 * MAC[8]
640 * Nonce[8]
641 * command byte (always control)
642 */
process_srvfirst(struct a12_state * S)643 static void process_srvfirst(struct a12_state* S)
644 {
645 /* only foul play could bring us here */
646 if (S->authentic > AUTH_REAL_HELLO_SENT){
647 fail_state(S);
648 return;
649 }
650
651 size_t mac_sz = MAC_BLOCK_SZ >> 1;
652 size_t nonce_sz = 8;
653 uint8_t nonce[nonce_sz];
654
655 a12int_trace(A12_TRACE_CRYPTO, "kind=mac:status=half_block");
656 memcpy(S->last_mac_in, S->decode, mac_sz);
657 memcpy(nonce, &S->decode[mac_sz], nonce_sz);
658
659 /* read the rest of the control packet */
660 S->authentic = AUTH_SERVER_HBLOCK;
661 S->left = CONTROL_PACKET_SIZE;
662 S->state = STATE_CONTROL_PACKET;
663 S->decode_pos = 0;
664
665 /* update MAC calculation with nonce and seqn+command byte */
666 blake3_hasher_update(&S->in_mac, nonce, nonce_sz);
667 blake3_hasher_update(&S->in_mac, &S->decode[mac_sz+nonce_sz], 8 + 1);
668
669 if (!S->dec_state){
670 a12int_trace(A12_TRACE_SECURITY, "srvfirst:no_decode");
671 fail_state(S);
672 return;
673 }
674
675 chacha_set_nonce(S->dec_state, nonce);
676 chacha_set_nonce(S->enc_state, nonce);
677
678 a12int_trace(A12_TRACE_CRYPTO, "kind=cipher:status=init_nonce");
679 trace_crypto_key(S->server, "nonce", nonce, nonce_sz);
680
681 /* decrypt command byte and seqn */
682 size_t base = mac_sz + nonce_sz;
683 chacha_apply(S->dec_state, &S->decode[base], 9);
684 if (S->decode[base + 8] != STATE_CONTROL_PACKET){
685 a12int_trace(A12_TRACE_CRYPTO, "kind=error:status=bad_key_or_nonce");
686 fail_state(S);
687 return;
688 }
689 }
690
command_cancelstream(struct a12_state * S,uint32_t streamid,uint8_t reason,uint8_t stype)691 static void command_cancelstream(
692 struct a12_state* S, uint32_t streamid, uint8_t reason, uint8_t stype)
693 {
694 struct blob_out* node = S->pending;
695 a12int_trace(A12_TRACE_SYSTEM, "stream_cancel:%"PRIu32":%"PRIu8, streamid, reason);
696
697 /* the other end indicated that the current codec or data source is broken,
698 * propagate the error to the client (if in direct passing mode) otherwise just
699 * switch the encoder for next frame - any recoverable decode error should
700 * really be in h264 et al. now so assume that. There might also be a point
701 * in force-inserting a RESET/STEPFRAME */
702 if (stype == 0){
703 if (reason == STREAM_CANCEL_DECODE_ERROR){
704 a12int_trace(A12_TRACE_VIDEO,
705 "kind=decode_degrade:codec=h264:reason=sink rejected format");
706 S->advenc_broken = true;
707 }
708
709 /* other reasons means that the image contents is already known or too dated,
710 * currently just ignore that - when we implement proper image hashing and can
711 * use that for known types (cursor, ...) then reconsider */
712 return;
713 }
714 else if (stype == 1){
715 return;
716 }
717
718 /* try the blobs first */
719 while (node){
720 if (node->streamid == streamid){
721 a12int_trace(A12_TRACE_BTRANSFER,
722 "kind=cancelled:stream=%"PRIu32":source=remote", streamid);
723 unlink_node(S, node);
724 return;
725 }
726 node = node->next;
727 }
728
729 }
730
command_binarystream(struct a12_state * S)731 static void command_binarystream(struct a12_state* S)
732 {
733 /*
734 * unpack / validate header
735 */
736 uint8_t channel = S->decode[16];
737 struct binary_frame* bframe = &S->channels[channel].unpack_state.bframe;
738
739 /*
740 * sign of a very broken client (or state tracking), starting a new binary
741 * stream on a channel where one already exists.
742 */
743 if (bframe->active){
744 a12int_trace(A12_TRACE_SYSTEM,
745 "kind=error:source=binarystream:kind=EEXIST:ch=%d", (int) channel);
746 a12_stream_cancel(S, channel);
747 bframe->active = false;
748 if (bframe->tmp_fd > 0)
749 bframe->tmp_fd = -1;
750 return;
751 }
752
753 uint32_t streamid;
754 unpack_u32(&streamid, &S->decode[18]);
755 bframe->streamid = streamid;
756 unpack_u64(&bframe->size, &S->decode[22]);
757 bframe->type = S->decode[30];
758 memcpy(bframe->checksum, &S->decode[35], 16);
759 bframe->tmp_fd = -1;
760
761 bframe->active = true;
762 a12int_trace(A12_TRACE_BTRANSFER,
763 "kind=header:stream=%"PRId64":left=%"PRIu64":ch=%d",
764 bframe->streamid, bframe->size, channel
765 );
766
767 /*
768 * If there is a checksum, ask the local cache process and see if we know about
769 * it already, if so, cancel the binary stream and substitute in the copy we
770 * get from the cache, but still need to process and discard incoming packages
771 * in the meanwhile.
772 *
773 * This is the point where, for non-streams, provide a table of hashes first
774 * and support resume at the first missing or broken hash
775 */
776 int sc = A12_BHANDLER_DONTWANT;
777 struct a12_bhandler_meta bm = {
778 .state = A12_BHANDLER_INITIALIZE,
779 .known_size = bframe->size,
780 .streamid = bframe->streamid,
781 .fd = -1
782 };
783
784 if (S->binary_handler){
785 struct a12_bhandler_res res = S->binary_handler(S, bm, S->binary_handler_tag);
786 bframe->tmp_fd = res.fd;
787 sc = res.flag;
788 }
789
790 if (sc == A12_BHANDLER_DONTWANT || sc == A12_BHANDLER_CACHED){
791 a12_stream_cancel(S, channel);
792 a12int_trace(A12_TRACE_BTRANSFER,
793 "kind=reject:stream=%"PRId64":ch=%d", bframe->streamid, channel);
794 }
795 }
796
a12_vstream_cancel(struct a12_state * S,uint8_t channel,int reason)797 void a12_vstream_cancel(struct a12_state* S, uint8_t channel, int reason)
798 {
799 uint8_t outb[CONTROL_PACKET_SIZE] = {0};
800 step_sequence(S, outb);
801 struct video_frame* vframe = &S->channels[channel].unpack_state.vframe;
802 vframe->commit = 255;
803
804 /* note that the cancel itself does not affect the congestion stats, it is when
805 * we get a frame ack that is within the frame_window that the pending set is
806 * decreased */
807 outb[16] = channel;
808 outb[17] = COMMAND_CANCELSTREAM;
809 pack_u32(1, &outb[18]);
810 outb[22] = reason;
811 outb[23] = STREAM_TYPE_VIDEO;
812
813 a12int_append_out(S, STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
814 }
815
a12_stream_cancel(struct a12_state * S,uint8_t channel)816 void a12_stream_cancel(struct a12_state* S, uint8_t channel)
817 {
818 uint8_t outb[CONTROL_PACKET_SIZE] = {0};
819 step_sequence(S, outb);
820 struct binary_frame* bframe = &S->channels[channel].unpack_state.bframe;
821
822 /* API misuse, trying to cancel a stream that is not active */
823 if (!bframe->active)
824 return;
825
826 outb[16] = channel;
827 outb[17] = COMMAND_CANCELSTREAM;
828 pack_u32(bframe->streamid, &outb[18]); /* [18 .. 21] stream-id */
829 outb[23] = STREAM_TYPE_BINARY;
830 bframe->active = false;
831 bframe->streamid = -1;
832 a12int_append_out(S, STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
833
834 /* forward the cancellation request to the eventhandler, due to the active tracking
835 * we are protected against bad use (handler -> cancel -> handler) */
836 if (S->binary_handler){
837 struct a12_bhandler_meta bm = {
838 .fd = bframe->tmp_fd,
839 .state = A12_BHANDLER_CANCELLED,
840 .streamid = bframe->streamid,
841 .channel = channel
842 };
843 S->binary_handler(S, bm, S->binary_handler_tag);
844 }
845
846 a12int_trace(A12_TRACE_BTRANSFER,
847 "kind=cancelled:ch=%"PRIu8":stream=%"PRId64, channel, bframe->streamid);
848 bframe->tmp_fd = -1;
849 }
850
command_audioframe(struct a12_state * S)851 static void command_audioframe(struct a12_state* S)
852 {
853 uint8_t channel = S->decode[16];
854 struct audio_frame* aframe = &S->channels[channel].unpack_state.aframe;
855
856 unpack_u32(&aframe->id, &S->decode[18]);
857 aframe->format = S->decode[22];
858 aframe->encoding = S->decode[23];
859 aframe->channels = S->decode[22];
860 unpack_u16(&aframe->nsamples, &S->decode[24]);
861 unpack_u32(&aframe->rate, &S->decode[26]);
862 S->in_channel = -1;
863
864 /* developer error (or malicious client), set to skip decode/playback */
865 if (!S->channels[channel].active){
866 a12int_trace(A12_TRACE_SYSTEM,
867 "no segment mapped on channel %d", (int)channel);
868 aframe->commit = 255;
869 return;
870 }
871
872 /* this requires an extended resize (theoretically, practically not),
873 * and in those cases we want to copy-out the vbuffer state if set and
874 if (cont->samplerate != aframe->rate){
875 a12int_trace(A12_TRACE_MISSING,
876 "rate mismatch: %zu <-> %"PRIu32, cont->samplerate, aframe->rate);
877 }
878 */
879
880 /* format should be paired against AUDIO_SAMPLE_TYPE
881 if (aframe->channels != ARCAN_SHMIF_ACHANNELS){
882 a12int_trace(A12_TRACE_MISSING, "channel format repack");
883 }
884 */
885 /* just plug the samples into the normal abuf- as part of the shmif-cont */
886 /* normal dynamic rate adjustment compensating for clock drift etc. go here */
887 }
888
889 /*
890 * used if a12_set_destination_raw has been set for the channel, some of the
891 * other metadata (typically cont->flags) have already been set - only the parts
892 * that take a resize request is considered
893 */
update_proxy_vcont(struct a12_channel * channel,struct video_frame * vframe)894 static void update_proxy_vcont(
895 struct a12_channel* channel, struct video_frame* vframe)
896 {
897 struct arcan_shmif_cont* cont = channel->cont;
898 if (!channel->raw.request_raw_buffer)
899 goto fail;
900
901 cont->vidp = channel->raw.request_raw_buffer(vframe->sw, vframe->sh,
902 &channel->cont->stride, channel->cont->hints, channel->raw.tag);
903
904 cont->pitch = channel->cont->stride / sizeof(shmif_pixel);
905 cont->w = vframe->sw;
906 cont->h = vframe->sh;
907
908 if (!cont->vidp)
909 goto fail;
910
911 return;
912
913 fail:
914 cont->w = 0;
915 cont->h = 0;
916 vframe->commit = 255;
917 }
918
update_proxy_acont(struct a12_channel * channel,struct audio_frame * aframe)919 static bool update_proxy_acont(
920 struct a12_channel* channel, struct audio_frame* aframe)
921 {
922 if (!channel->raw.request_audio_buffer)
923 goto fail;
924
925 channel->cont->audp =
926 channel->raw.request_audio_buffer(
927 aframe->channels, aframe->rate,
928 sizeof(AUDIO_SAMPLE_TYPE) * aframe->channels,
929 channel->raw.tag
930 );
931
932 if (!channel->cont->audp)
933 goto fail;
934
935 return true;
936
937 fail:
938 aframe->commit = 255;
939 return false;
940 }
941
command_newchannel(struct a12_state * S,void (* on_event)(struct arcan_shmif_cont *,int chid,struct arcan_event *,void *),void * tag)942 static void command_newchannel(
943 struct a12_state* S, void (*on_event)
944 (struct arcan_shmif_cont*, int chid, struct arcan_event*, void*), void* tag)
945 {
946 uint8_t channel = S->decode[16];
947 uint8_t new_channel = S->decode[18];
948 uint8_t type = S->decode[19];
949 uint8_t direction = S->decode[20];
950 uint32_t cookie;
951 unpack_u32(&cookie, &S->decode[21]);
952
953 a12int_trace(A12_TRACE_ALLOC, "new channel: %"PRIu8" => %"PRIu8""
954 ", kind: %"PRIu8", cookie: %"PRIu32"", channel, new_channel, type, cookie);
955
956 /* helper srv need to perform the additional segment push here,
957 * so our 'file descriptor' in slot 0 is actually the new channel id */
958 struct arcan_event ev = {
959 .category = EVENT_TARGET,
960 .tgt.kind = TARGET_COMMAND_NEWSEGMENT
961 };
962 ev.tgt.ioevs[0].iv = new_channel;
963 ev.tgt.ioevs[1].iv = direction != 0;
964 ev.tgt.ioevs[2].iv = type;
965 ev.tgt.ioevs[3].uiv = cookie;
966
967 on_event(S->channels[channel].cont, channel, &ev, tag);
968 }
969
a12int_stream_fail(struct a12_state * S,uint8_t ch,uint32_t id,int fail)970 void a12int_stream_fail(struct a12_state* S, uint8_t ch, uint32_t id, int fail)
971 {
972 uint8_t outb[CONTROL_PACKET_SIZE];
973 build_control_header(S, outb, COMMAND_CANCELSTREAM);
974
975 outb[16] = ch;
976 pack_u32(id, &outb[18]);
977 outb[22] = (uint8_t) fail; /* user-cancel, can't handle, already cached */
978
979 a12int_append_out(S,
980 STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
981 }
982
983 /*
984 static void dump_window(struct a12_state* S)
985 {
986 printf("[%zu] -", S->congestion_stats.pending);
987 for (size_t i = 0; i < VIDEO_FRAME_DRIFT_WINDOW; i++){
988 printf(" %"PRIu32, S->congestion_stats.frame_window[i]);
989 }
990 }
991 */
992
a12int_stream_ack(struct a12_state * S,uint8_t ch,uint32_t id)993 void a12int_stream_ack(struct a12_state* S, uint8_t ch, uint32_t id)
994 {
995 uint8_t outb[CONTROL_PACKET_SIZE];
996 build_control_header(S, outb, COMMAND_PING);
997
998 outb[16] = ch;
999 pack_u32(id, &outb[18]);
1000
1001 a12int_trace(A12_TRACE_DEBUG, "ack=%"PRIu32, id);
1002
1003 a12int_append_out(S,
1004 STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
1005 }
1006
command_videoframe(struct a12_state * S)1007 static void command_videoframe(struct a12_state* S)
1008 {
1009 uint8_t ch = S->decode[16];
1010 int method = S->decode[22];
1011
1012 struct a12_channel* channel = &S->channels[ch];
1013 struct video_frame* vframe = &S->channels[ch].unpack_state.vframe;
1014
1015 /*
1016 * allocation and tracking for this one is subtle! nderef- etc. has been here
1017 * in the past. The reason is that codec can be swapped at the encoder side
1018 * and that some codecs need to retain state between frames.
1019 */
1020 if (!a12int_vframe_setup(channel, vframe, method)){
1021 vframe->commit = 255;
1022 a12int_stream_fail(S, ch, 1, STREAM_FAIL_UNKNOWN);
1023 return;
1024 }
1025
1026 /* new vstream, from README.md:
1027 * currently unused
1028 * [36 ] : dataflags: uint8
1029 */
1030 /* [18..21] : stream-id: uint32 */
1031 unpack_u32(&vframe->id, &S->decode[18]);
1032 vframe->postprocess = method; /* [22] : format : uint8 */
1033 /* [23..24] : surfacew: uint16
1034 * [25..26] : surfaceh: uint16 */
1035 unpack_u16(&vframe->sw, &S->decode[23]);
1036 unpack_u16(&vframe->sh, &S->decode[25]);
1037 /* [27..28] : startx: uint16 (0..outw-1)
1038 * [29..30] : starty: uint16 (0..outh-1) */
1039 unpack_u16(&vframe->x, &S->decode[27]);
1040 unpack_u16(&vframe->y, &S->decode[29]);
1041 /* [31..32] : framew: uint16 (outw-startx + framew < outw)
1042 * [33..34] : frameh: uint16 (outh-starty + frameh < outh) */
1043 unpack_u16(&vframe->w, &S->decode[31]);
1044 unpack_u16(&vframe->h, &S->decode[33]);
1045 /* [35] : dataflags */
1046 unpack_u32(&vframe->inbuf_sz, &S->decode[36]);
1047
1048 /* [41] : commit: uint8 */
1049 unpack_u32(&vframe->expanded_sz, &S->decode[40]);
1050 vframe->commit = S->decode[44];
1051 S->in_channel = -1;
1052
1053 /* If channel set, apply resize immediately - synch cost should be offset with
1054 * the buffering being performed at lower layers. Right now the rejection of a
1055 * resize is not being forwarded, which can cause problems in some edge cases
1056 * where the WM have artificially restricted the size of a client window etc. */
1057 struct arcan_shmif_cont* cont = channel->cont;
1058 if (!cont){
1059 a12int_trace(A12_TRACE_SYSTEM,
1060 "kind=videoframe_header:status=EINVAL:channel=%d", (int) ch);
1061 vframe->commit = 255;
1062 return;
1063 }
1064
1065 /* set the possible consumer presentation / repacking options, or resize
1066 * if the source / destination dimensions no longer match */
1067 bool hints_changed = false;
1068
1069 if (S->decode[35] ^ (!!(cont->hints & SHMIF_RHINT_ORIGO_LL))){
1070 if (S->decode[35])
1071 cont->hints = cont->hints | SHMIF_RHINT_ORIGO_LL;
1072 else
1073 cont->hints = cont->hints & (~SHMIF_RHINT_ORIGO_LL);
1074 hints_changed = true;
1075 }
1076
1077 if (vframe->postprocess == POSTPROCESS_VIDEO_TZSTD &&
1078 !(cont->hints & SHMIF_RHINT_TPACK)){
1079 cont->hints |= SHMIF_RHINT_TPACK;
1080 hints_changed = true;
1081 }
1082 else if ((cont->hints & SHMIF_RHINT_TPACK) &&
1083 vframe->postprocess != POSTPROCESS_VIDEO_TZSTD){
1084 cont->hints = cont->hints & (~SHMIF_RHINT_TPACK);
1085 hints_changed = true;
1086 }
1087
1088 /* always request a new video buffer between frames for raw mode so the
1089 * caller has the option of mapping each to different destinations */
1090 if (channel->active == CHANNEL_RAW)
1091 update_proxy_vcont(channel, vframe);
1092
1093 /* shmif itself only needs the one though */
1094 else if (hints_changed || vframe->sw != cont->w || vframe->sh != cont->h){
1095 arcan_shmif_resize(cont, vframe->sw, vframe->sh);
1096
1097 if (vframe->sw != cont->w || vframe->sh != cont->h){
1098 a12int_trace(A12_TRACE_SYSTEM, "parent size rejected");
1099 vframe->commit = 255;
1100 }
1101 else
1102 a12int_trace(A12_TRACE_VIDEO, "kind=resized:channel=%d:hints=%d:"
1103 "new_w=%zu:new_h=%zu", (int) ch, (int) cont->hints,
1104 (size_t) vframe->sw, (size_t) vframe->sh
1105 );
1106 }
1107
1108 a12int_trace(A12_TRACE_VIDEO, "kind=frame_header:method=%d:"
1109 "source_w=%zu:source_h=%zu:w=%zu:h=%zu:x=%zu,y=%zu:"
1110 "bytes_in=%zu:bytes_out=%zu",
1111 (int) vframe->postprocess, (size_t) vframe->sw, (size_t) vframe->sh,
1112 (size_t) vframe->w, (size_t) vframe->h, (size_t) vframe->x, (size_t) vframe->y,
1113 (size_t) vframe->inbuf_sz, (size_t) vframe->expanded_sz
1114 );
1115
1116 /* Validation is done just above, making sure the sub- region doesn't extend
1117 * the specified source surface. Remaining length- field is verified before
1118 * the write in process_video. */
1119 if (vframe->x >= vframe->sw || vframe->y >= vframe->sh){
1120 vframe->commit = 255;
1121 a12int_trace(A12_TRACE_SYSTEM,
1122 "kind=error:status=EINVAL:x=%zu:y=%zu:w=%zu:h=%zu",
1123 (size_t) vframe->x, (size_t) vframe->y, (size_t) vframe->w, (size_t) vframe->h
1124 );
1125 return;
1126 }
1127
1128 /* For RAW pixels, note that we count row, pos, etc. in the native
1129 * shmif_pixel and thus use pitch instead of stride */
1130 if (vframe->postprocess == POSTPROCESS_VIDEO_RGBA ||
1131 vframe->postprocess == POSTPROCESS_VIDEO_RGB565 ||
1132 vframe->postprocess == POSTPROCESS_VIDEO_RGB){
1133 vframe->row_left = vframe->w - vframe->x;
1134 vframe->out_pos = vframe->y * cont->pitch + vframe->x;
1135 a12int_trace(A12_TRACE_TRANSFER,
1136 "row-length: %zu at buffer pos %"PRIu32, vframe->row_left, vframe->inbuf_pos);
1137 }
1138 /* this includes TPACK */
1139 else {
1140 size_t ulim = vframe->w * vframe->h * sizeof(shmif_pixel);
1141 if (vframe->expanded_sz > ulim){
1142 vframe->commit = 255;
1143 a12int_trace(A12_TRACE_SYSTEM,
1144 "kind=error:status=EINVAL:expanded=%zu:limit=%zu",
1145 (size_t) vframe->expanded_sz, (size_t) ulim
1146 );
1147 return;
1148 }
1149 /* rather arbitrary, but if this condition occurs, the producer should have
1150 * simply sent the data raw - the odd case is possibly miniz/tpack where the
1151 * can be a header and a non-compressible buffer. */
1152 if (vframe->inbuf_sz > vframe->expanded_sz + 24){
1153 vframe->commit = 255;
1154 a12int_trace(A12_TRACE_SYSTEM, "incoming buffer (%"
1155 PRIu32") expands to less than target (%"PRIu32")",
1156 vframe->inbuf_sz, vframe->expanded_sz
1157 );
1158 vframe->inbuf_pos = 0;
1159 return;
1160 }
1161
1162 /* out_pos gets validated in the decode stage, so no OOB ->y ->x */
1163 vframe->out_pos = vframe->y * cont->pitch + vframe->x;
1164 vframe->inbuf_pos = 0;
1165 vframe->inbuf = DYNAMIC_MALLOC(vframe->inbuf_sz);
1166 if (!vframe->inbuf){
1167 a12int_trace(A12_TRACE_ALLOC,
1168 "couldn't allocate intermediate buffer store");
1169 return;
1170 }
1171 vframe->row_left = vframe->w;
1172 a12int_trace(A12_TRACE_VIDEO, "compressed buffer in (%"
1173 PRIu32") to offset (%zu)", vframe->inbuf_sz, vframe->out_pos);
1174 }
1175 }
1176
1177 /*
1178 * Binary transfers comes in different shapes:
1179 *
1180 * - [bchunk-in / bchunk-out] non-critical blob transfer, typically on
1181 * clipboard, can be queued and interleaved
1182 * when no-other relevant transfer going on.
1183 *
1184 * - [store] priority state serialization, may be overridden by future reqs.
1185 *
1186 * - [restore] block / priority - changes event interpretation context after.
1187 *
1188 * - [fonthint] affects visual output, likely to be cacheable, higher
1189 * priority during the preroll stage
1190 *
1191 * [out and store] are easier to send on a non-output segment over an
1192 * asymmetric connection as they won't fight with other transfers.
1193 *
1194 */
a12_enqueue_bstream(struct a12_state * S,int fd,int type,bool streaming,size_t sz)1195 void a12_enqueue_bstream(
1196 struct a12_state* S, int fd, int type, bool streaming, size_t sz)
1197 {
1198 struct blob_out* next = DYNAMIC_MALLOC(sizeof(struct blob_out));
1199 if (!next){
1200 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=ENOMEM");
1201 return;
1202 }
1203
1204 *next = (struct blob_out){
1205 .type = type,
1206 .streaming = streaming,
1207 .fd = -1,
1208 .chid = S->out_channel
1209 };
1210
1211 struct blob_out** parent = &S->pending;
1212 size_t n_streaming = 0;
1213 size_t n_known = 0;
1214
1215 while(*parent){
1216 if ((*parent)->streaming)
1217 n_streaming++;
1218 else
1219 n_known += (*parent)->left;
1220 parent = &(*parent)->next;
1221
1222 /* [MISSING]
1223 * Insertion priority sort goes here, be wary of channel-id in heuristic
1224 * though, and try to prioritize channel that has focus over those that
1225 * don't - and if not appending, the unlink at the fail-state need to
1226 * preserve forward integrity
1227 **/
1228 }
1229 *parent = next;
1230
1231 /* note, next->fd will be non-blocking */
1232 next->fd = arcan_shmif_dupfd(fd, -1, false);
1233 if (-1 == next->fd){
1234 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=EBADFD");
1235 goto fail;
1236 }
1237
1238 /* For the streaming descriptor, we can only work with the reported size
1239 * and that one can still be unknown (0), i.e. the stream continues to work
1240 * until cancelled.
1241 *
1242 * For the file-backed one, we are expecting seek operations to work and
1243 * will use that size indicator along with a checksum calculation to allow
1244 * the other side to cancel the transfer if it is already known */
1245 if (streaming){
1246 next->streaming = true;
1247 return;
1248 }
1249
1250 off_t fend = lseek(fd, 0, SEEK_END);
1251 if (fend == 0){
1252 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=EEMPTY");
1253 *parent = NULL;
1254 free(next);
1255 return;
1256 }
1257
1258 if (-1 == fend){
1259 /* force streaming if the host system can't manage */
1260 switch(errno){
1261 case ESPIPE:
1262 case EOVERFLOW:
1263 next->streaming = true;
1264 a12int_trace(A12_TRACE_BTRANSFER,
1265 "kind=added:type=%d:stream=yes:size=%zu:queue=%zu:total=%zu\n",
1266 type, next->left, n_streaming, n_known
1267 );
1268 return;
1269 break;
1270 case EINVAL:
1271 case EBADF:
1272 default:
1273 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=EBADFD");
1274 break;
1275 }
1276 }
1277
1278 if (-1 == lseek(fd, 0, SEEK_SET)){
1279 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=ESEEK");
1280 goto fail;
1281 }
1282
1283 /* this has the normal sigbus problem, though we don't care about that much now
1284 * being in the same sort of privilege domain - we can also defer the entire
1285 * thing and simply thread- process it, which is probably the better solution */
1286 void* map = mmap(NULL, fend, PROT_READ, MAP_FILE | MAP_PRIVATE, fd, 0);
1287
1288 if (!map){
1289 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=EMMAP");
1290 goto fail;
1291 }
1292
1293 /* the crypted packages are MACed together, but we always use the primitive
1294 * for btransfer- checksums so the other side can compare against a cache and
1295 * cancel the stream */
1296 blake3_hasher hash;
1297 blake3_hasher_init(&hash);
1298 blake3_hasher_update(&hash, map, fend);
1299 blake3_hasher_finalize(&hash, next->checksum, 16);
1300 munmap(map, fend);
1301 next->left = fend;
1302 a12int_trace(A12_TRACE_BTRANSFER,
1303 "kind=added:type=%d:stream=no:size=%zu", type, next->left);
1304 S->active_blobs++;
1305 return;
1306
1307 fail:
1308 if (-1 != next->fd)
1309 close(next->fd);
1310
1311 *parent = NULL;
1312 free(next);
1313 }
1314
authdec_buffer(const char * src,struct a12_state * S,size_t block_sz)1315 static bool authdec_buffer(const char* src, struct a12_state* S, size_t block_sz)
1316 {
1317 size_t mac_size = MAC_BLOCK_SZ;
1318
1319 if (S->authentic == AUTH_SERVER_HBLOCK){
1320 mac_size = 8;
1321 trace_crypto_key(S->server, "auth_mac_in", S->last_mac_in, mac_size);
1322 }
1323
1324 update_mac_and_decrypt(__func__, &S->in_mac, S->dec_state, S->decode, block_sz);
1325
1326 uint8_t ref_mac[MAC_BLOCK_SZ];
1327 blake3_hasher_finalize(&S->in_mac, ref_mac, mac_size);
1328
1329 a12int_trace(A12_TRACE_CRYPTO,
1330 "kind=mac_dec:src=%s:pos=%zu", src, S->in_mac.counter);
1331 trace_crypto_key(S->server, "auth_mac_rf", ref_mac, mac_size);
1332 bool res = memcmp(ref_mac, S->last_mac_in, mac_size) == 0;
1333
1334 if (!res){
1335 trace_crypto_key(S->server, "bad_mac", S->last_mac_in, mac_size);
1336 }
1337
1338 return res;
1339 }
1340
hello_auth_server_hello(struct a12_state * S)1341 static void hello_auth_server_hello(struct a12_state* S)
1342 {
1343 uint8_t pubk[32];
1344 uint8_t nonce[8];
1345 int cfl = S->decode[20];
1346 a12int_trace(A12_TRACE_CRYPTO, "state=complete:method=%d", cfl);
1347
1348 /* here is a spot for having more authentication modes if needed (version bump) */
1349 if (cfl != HELLO_MODE_EPHEMPK && cfl != HELLO_MODE_REALPK){
1350 a12int_trace(A12_TRACE_SECURITY, "unknown_helo");
1351 fail_state(S);
1352 return;
1353 }
1354
1355 /* public key is ephemeral, generate new pair, send a hello out with the new
1356 * one THEN derive new keys for authentication and so on. After this the
1357 * conneciton is flows just like if the ephem mode wasn't used - the client
1358 * will send its real Pk and we respond in kind. The protection this affords us
1359 * is that you need an active MiM to gather Pks for tracking. */
1360 if (cfl == HELLO_MODE_EPHEMPK){
1361 uint8_t ek[32];
1362 x25519_private_key(ek);
1363 x25519_public_key(ek, pubk);
1364 arcan_random(nonce, 8);
1365 send_hello_packet(S, HELLO_MODE_EPHEMPK, pubk, nonce);
1366
1367 x25519_shared_secret((uint8_t*)S->opts->secret, ek, &S->decode[21]);
1368 trace_crypto_key(S->server, "ephem_pub", pubk, 32);
1369 update_keymaterial(S, S->opts->secret, 32, nonce);
1370 S->authentic = AUTH_EPHEMERAL_PK;
1371 return;
1372 }
1373
1374 /* public key is the real one, use an external lookup function for mapping
1375 * to keystores defined by the api user */
1376 if (!S->opts->pk_lookup){
1377 a12int_trace(A12_TRACE_CRYPTO, "state=eimpl:kind=x25519-no-lookup");
1378 fail_state(S);
1379 return;
1380 }
1381
1382 /* the lookup function returns the key that should be used in the reply
1383 * and to calculate the shared secret */
1384 trace_crypto_key(S->server, "state=client_pk", &S->decode[21], 32);
1385 struct pk_response res = S->opts->pk_lookup(&S->decode[21]);
1386 if (!res.authentic){
1387 a12int_trace(A12_TRACE_CRYPTO, "state=eperm:kind=x25519-pk-fail");
1388 fail_state(S);
1389 return;
1390 }
1391
1392 /* hello packet here will still use the keystate from the process_srvfirst
1393 * which will use the client provided nonce, KDF on preshare-pw */
1394 x25519_public_key(res.key, pubk);
1395 arcan_random(nonce, 8);
1396 send_hello_packet(S, HELLO_MODE_REALPK, pubk, nonce);
1397 trace_crypto_key(S->server, "state=client_pk_ok:respond_pk", pubk, 32);
1398
1399 /* now we can switch keys, note that the new nonce applies for both enc and dec
1400 * states regardless of the nonce the client provided in the first message */
1401 x25519_shared_secret((uint8_t*)S->opts->secret, res.key, &S->decode[21]);
1402 trace_crypto_key(S->server, "state=server_ssecret", (uint8_t*)S->opts->secret, 32);
1403 update_keymaterial(S, S->opts->secret, 32, nonce);
1404
1405 /* and done, mark latched so a12_unpack saves buffer and returns */
1406 S->authentic = AUTH_FULL_PK;
1407 S->auth_latched = true;
1408
1409 if (S->on_auth)
1410 S->on_auth(S, S->auth_tag);
1411 }
1412
hello_auth_client_hello(struct a12_state * S)1413 static void hello_auth_client_hello(struct a12_state* S)
1414 {
1415 if (!S->opts->pk_lookup){
1416 a12int_trace(A12_TRACE_CRYPTO, "state=eimpl:kind=x25519-no-lookup");
1417 fail_state(S);
1418 return;
1419 }
1420
1421 trace_crypto_key(S->server, "server_pk", &S->decode[21], 32);
1422 struct pk_response res = S->opts->pk_lookup(&S->decode[21]);
1423 if (!res.authentic){
1424 a12int_trace(A12_TRACE_CRYPTO, "state=eperm:kind=25519-pk-fail");
1425 fail_state(S);
1426 return;
1427 }
1428
1429 /* now we can calculate the x25519 shared secret, overwrite the preshared
1430 * secret slot in the initial configuration and repeat the key derivation
1431 * process to get new enc/dec/mac keys. */
1432 x25519_shared_secret((uint8_t*)S->opts->secret, S->keys.real_priv, &S->decode[21]);
1433 trace_crypto_key(S->server, "state=client_ssecret", (uint8_t*)S->opts->secret, 32);
1434 update_keymaterial(S, S->opts->secret, 32, &S->decode[8]);
1435
1436 S->authentic = AUTH_FULL_PK;
1437 S->auth_latched = true;
1438
1439 if (S->on_auth)
1440 S->on_auth(S, S->auth_tag);
1441 }
1442
process_hello_auth(struct a12_state * S)1443 static void process_hello_auth(struct a12_state* S)
1444 {
1445 /*
1446 - [18] Version major : uint8 (shmif-version until 1.0)
1447 - [19] Version minor : uint8 (shmif-version until 1.0)
1448 - [20] Flags : uint8
1449 - [21+ 32] x25519 Pk : blob
1450 */
1451 if (S->authentic == AUTH_SERVER_HBLOCK){
1452 hello_auth_server_hello(S);
1453 return;
1454 }
1455 /* the client has received the server ephemeral pubk,
1456 * now we can send the real one after a keyswitch to a shared secret */
1457 else if (S->authentic == AUTH_POLITE_HELLO_SENT){
1458 uint8_t nonce[8];
1459
1460 trace_crypto_key(S->server, "ephem-pub-in", &S->decode[21], 32);
1461 x25519_shared_secret((uint8_t*)S->opts->secret, S->keys.ephem_priv, &S->decode[21]);
1462 update_keymaterial(S, S->opts->secret, 32, &S->decode[8]);
1463
1464 uint8_t realpk[32];
1465 x25519_public_key(S->keys.real_priv, realpk);
1466
1467 S->authentic = AUTH_REAL_HELLO_SENT;
1468 arcan_random(nonce, 8);
1469 send_hello_packet(S, 1, realpk, nonce);
1470 }
1471 /* the server and client are both using a shared secret from the ephemeral key
1472 * now, and this message contains the real public key of the client, treat it
1473 * the same as AUTH_SERVER_HBLOCK though we know the mode */
1474 else if (S->authentic == AUTH_EPHEMERAL_PK){
1475 hello_auth_server_hello(S);
1476 }
1477 /* client side, authenticate public key, keyswitch to session */
1478 else if (S->authentic == AUTH_REAL_HELLO_SENT){
1479 hello_auth_client_hello(S);
1480 }
1481 else {
1482 a12int_trace(A12_TRACE_CRYPTO,
1483 "HELLO after completed authxchg (%d)", S->authentic);
1484 fail_state(S);
1485 return;
1486 }
1487 }
1488
command_pingpacket(struct a12_state * S,uint32_t sid)1489 static void command_pingpacket(struct a12_state* S, uint32_t sid)
1490 {
1491 /* might get empty pings, ignore those as they only update last_seen_seq */
1492 if (!sid)
1493 return;
1494
1495 size_t i;
1496 size_t wnd_sz = VIDEO_FRAME_DRIFT_WINDOW;
1497 for (i = 0; i < wnd_sz; i++){
1498 uint32_t cid = S->congestion_stats.frame_window[i];
1499
1500 if (!cid){
1501 a12int_trace(A12_TRACE_DEBUG, "ack-sid %"PRIu32" not in wnd", sid);
1502 return;
1503 }
1504
1505 if (cid == sid)
1506 break;
1507 }
1508
1509 /* the ID might be bad (after the last sent) or truncated or outdated.
1510 * Truncation / outdating can happen if the source continues to push frames
1511 * while fully congested. It is the user of the implementation that needs to
1512 * determine actions on congestion/backpressure. */
1513 if (i >= wnd_sz - 1){
1514 uint32_t latest =
1515 S->congestion_stats.frame_window[wnd_sz-1];
1516
1517 if (sid >= latest){
1518 a12int_trace(A12_TRACE_DEBUG, "ack-sid %"PRIu32" after wnd", sid);
1519 S->congestion_stats.pending = 0;
1520 for (size_t i = 0; i < wnd_sz; i++){
1521 S->congestion_stats.frame_window[i] = 0;
1522 }
1523 return;
1524 }
1525
1526 /* in the truncation case, we only retain the last 'sent' and slide
1527 * the rest of the window */
1528 if (i < latest)
1529 i = wnd_sz - 2;
1530 }
1531
1532 size_t i_start = i + 1;
1533 size_t to_move = 0;
1534
1535 /* shrink the moveset to only cover non-0 */
1536 while (i_start + to_move < wnd_sz &&
1537 S->congestion_stats.frame_window[i_start+to_move]){
1538 to_move++;
1539 }
1540 S->congestion_stats.pending = to_move;
1541
1542 memmove(
1543 S->congestion_stats.frame_window,
1544 &S->congestion_stats.frame_window[i_start],
1545 to_move * sizeof(uint32_t)
1546 );
1547
1548 S->stats.vframe_backpressure = to_move +
1549 (S->congestion_stats.frame_window[0] - sid);
1550
1551 for (i = to_move; i < wnd_sz; i++)
1552 S->congestion_stats.frame_window[i] = 0;
1553 }
1554
1555 /*
1556 * Control command,
1557 * current MAC calculation in s->mac_dec
1558 */
process_control(struct a12_state * S,void (* on_event)(struct arcan_shmif_cont *,int chid,struct arcan_event *,void *),void * tag)1559 static void process_control(struct a12_state* S, void (*on_event)
1560 (struct arcan_shmif_cont*, int chid, struct arcan_event*, void*), void* tag)
1561 {
1562 if (!authdec_buffer(__func__, S, header_sizes[S->state])){
1563 fail_state(S);
1564 return;
1565 }
1566
1567 /* ignore these for now
1568 uint64_t last_seen = S->decode[0];
1569 uint8_t entropy[8] = S->decode[8];
1570 uint8_t channel = S->decode[16];
1571 */
1572
1573 uint8_t command = S->decode[17];
1574 if (S->authentic < AUTH_FULL_PK && command != COMMAND_HELLO){
1575 a12int_trace(A12_TRACE_CRYPTO,
1576 "illegal command (%d) on non-auth connection", (int) command);
1577 fail_state(S);
1578 return;
1579 }
1580
1581 a12int_trace(A12_TRACE_DEBUG, "cmd=%"PRIu8, command);
1582
1583 switch(command){
1584 case COMMAND_HELLO:
1585 process_hello_auth(S);
1586 break;
1587 case COMMAND_SHUTDOWN:
1588 /* terminate specific channel */
1589 break;
1590 case COMMAND_NEWCH:
1591 command_newchannel(S, on_event, tag);
1592 break;
1593 case COMMAND_CANCELSTREAM:{
1594 uint32_t streamid;
1595 unpack_u32(&streamid, &S->decode[18]);
1596 command_cancelstream(S, streamid, S->decode[22], S->decode[23]);
1597 }
1598 break;
1599 case COMMAND_PING:{
1600 uint32_t streamid;
1601 unpack_u32(&streamid, &S->decode[18]);
1602 a12int_trace(A12_TRACE_DEBUG, "ping=%"PRIu32, streamid);
1603 command_pingpacket(S, streamid);
1604 }
1605 break;
1606 case COMMAND_VIDEOFRAME:
1607 command_videoframe(S);
1608 break;
1609 case COMMAND_AUDIOFRAME:
1610 command_audioframe(S);
1611 break;
1612 case COMMAND_BINARYSTREAM:
1613 command_binarystream(S);
1614 break;
1615 case COMMAND_REKEY:
1616 /* 1. note the sequence number that we are supposed to switch.
1617 * 2. generate new keypair and add to the rekey slot.
1618 * 3. if this is not a rekey response package, send the new pubk in response */
1619 break;
1620 default:
1621 a12int_trace(A12_TRACE_SYSTEM, "Unknown message type: %d", (int)command);
1622 break;
1623 }
1624
1625 reset_state(S);
1626 }
1627
process_event(struct a12_state * S,void * tag,void (* on_event)(struct arcan_shmif_cont * wnd,int chid,struct arcan_event *,void *))1628 static void process_event(struct a12_state* S, void* tag,
1629 void (*on_event)(
1630 struct arcan_shmif_cont* wnd, int chid, struct arcan_event*, void*))
1631 {
1632 if (!authdec_buffer(__func__, S, header_sizes[S->state])){
1633 a12int_trace(A12_TRACE_CRYPTO, "MAC mismatch on event packet");
1634 fail_state(S);
1635 return;
1636 }
1637
1638 uint8_t channel = S->decode[8];
1639
1640 struct arcan_event aev;
1641 unpack_u64(&S->last_seen_seqnr, S->decode);
1642
1643 if (-1 == arcan_shmif_eventunpack(
1644 &S->decode[SEQUENCE_NUMBER_SIZE+1],
1645 S->decode_pos-SEQUENCE_NUMBER_SIZE-1, &aev))
1646 {
1647 a12int_trace(A12_TRACE_SYSTEM, "broken event packet received");
1648 }
1649 else if (on_event){
1650 a12int_trace(A12_TRACE_EVENT, "unpack event to %d", channel);
1651 on_event(S->channels[channel].cont, channel, &aev, tag);
1652 }
1653
1654 reset_state(S);
1655 }
1656
process_blob(struct a12_state * S)1657 static void process_blob(struct a12_state* S)
1658 {
1659 /* do we have the header bytes or not? the actual callback is triggered
1660 * inside of the binarystream rather than of the individual blobs */
1661 if (S->in_channel == -1){
1662 update_mac_and_decrypt(__func__, &S->in_mac,
1663 S->dec_state, S->decode, header_sizes[S->state]);
1664
1665 S->in_channel = S->decode[0];
1666 unpack_u32(&S->in_stream, &S->decode[1]);
1667 unpack_u16(&S->left, &S->decode[5]);
1668 S->decode_pos = 0;
1669 a12int_trace(A12_TRACE_BTRANSFER,
1670 "kind=header:channel=%d:size=%"PRIu16, S->in_channel, S->left);
1671
1672 return;
1673 }
1674
1675 /* did we receive a message on a dead channel? */
1676 struct binary_frame* cbf = &S->channels[S->in_channel].unpack_state.bframe;
1677 if (!authdec_buffer(__func__, S, S->decode_pos)){
1678 fail_state(S);
1679 return;
1680 }
1681
1682 struct arcan_shmif_cont* cont = S->channels[S->in_channel].cont;
1683 if (!cont){
1684 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=EINVAL:"
1685 "ch=%d:message=no segment mapped", S->in_channel);
1686 reset_state(S);
1687 return;
1688 }
1689
1690 /* we can't stop the other side from sending data to a dead stream, so just
1691 * discard the data and hope that a previously sent cancelstream will take */
1692 if (cbf->streamid != S->in_stream || -1 == cbf->streamid){
1693 a12int_trace(A12_TRACE_BTRANSFER, "kind=notice:ch=%d:src_stream=%"PRIu32
1694 ":dst_stream=%"PRId64":message=data on cancelled stream",
1695 S->in_channel, S->in_stream, cbf->streamid);
1696 reset_state(S);
1697 return;
1698 }
1699
1700 /* or worse, a data block referencing a transfer that has not been set up? */
1701 if (!cbf->active){
1702 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=EINVAL:"
1703 "ch=%d:message=blob on inactive channel", S->in_channel);
1704 reset_state(S);
1705 return;
1706 }
1707
1708 /* Flush it out to the assigned descriptor, this is currently likely to be
1709 * blocking and can cascade quite far down the chain, consider a drag and drop
1710 * that routes via a pipe onwards to another client. Normal splice etc.
1711 * operations won't work so we are left with this. To not block video/audio
1712 * processing we would have to buffer / flush this separately, with a big
1713 * complexity leap. */
1714 if (-1 != cbf->tmp_fd){
1715 size_t pos = 0;
1716
1717 while(pos < S->decode_pos){
1718 ssize_t status = write(cbf->tmp_fd, &S->decode[pos], S->decode_pos - pos);
1719 if (-1 == status){
1720 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
1721 continue;
1722
1723 /* so there was a problem writing (dead pipe, out of space etc). send a cancel
1724 * on the stream,this will also forward the status change to the event handler
1725 * itself who is responsible for closing the tmp_fd */
1726 a12_stream_cancel(S, S->in_channel);
1727 reset_state(S);
1728 return;
1729 }
1730 else
1731 pos += status;
1732 }
1733 }
1734
1735 if (cbf->size > 0){
1736 cbf->size -= S->decode_pos;
1737
1738 if (!cbf->size){
1739 a12int_trace(A12_TRACE_BTRANSFER,
1740 "kind=completed:ch=%d:stream=%"PRId64, S->in_channel, cbf->streamid);
1741 cbf->active = false;
1742 if (!S->binary_handler)
1743 return;
1744
1745 /* finally forward all the metadata to the handler and let the recipient
1746 * pack it into the proper event structure and so on. */
1747 struct a12_bhandler_meta bm = {
1748 .type = cbf->type,
1749 .streamid = cbf->streamid,
1750 .channel = S->in_channel,
1751 .fd = cbf->tmp_fd,
1752 .dcont = cont,
1753 .state = A12_BHANDLER_COMPLETED
1754 };
1755
1756 /* note that we do trust the provided checksum here, to actually re-verify that
1757 * a possibly cache- stored checksum matches the transfer is up to the callback
1758 * handler. otherwise a possible scenario is to have a client taint a binary
1759 * cache, but such trust compartmentation should be handled by real separation
1760 * between clients. */
1761 memcpy(&bm.checksum, cbf->checksum, 16);
1762 cbf->tmp_fd = -1;
1763 S->binary_handler(S, bm, S->binary_handler_tag);
1764
1765 return;
1766 }
1767 }
1768
1769 /*
1770 * More data to come on the channel, just reset and wait for the next packet
1771 */
1772 a12int_trace(A12_TRACE_BTRANSFER,
1773 "kind=data:ch=%d:left:%"PRIu64, S->in_channel, cbf->size);
1774
1775 reset_state(S);
1776 }
1777
1778 /*
1779 * We have an incoming video packet, first we need to match it to the channel
1780 * that it represents (as we might get interleaved updates) and match the state
1781 * we are building.
1782 */
process_video(struct a12_state * S)1783 static void process_video(struct a12_state* S)
1784 {
1785 if (S->in_channel == -1){
1786 uint32_t stream;
1787
1788 /* note that the data is still unauthenticated, we need to know how much
1789 * left to expect and buffer that before we can authenticate */
1790 update_mac_and_decrypt(__func__,
1791 &S->in_mac, S->dec_state, S->decode, S->decode_pos);
1792 S->in_channel = S->decode[0];
1793 unpack_u32(&stream, &S->decode[1]);
1794 unpack_u16(&S->left, &S->decode[5]);
1795 S->decode_pos = 0;
1796
1797 a12int_trace(A12_TRACE_VIDEO,
1798 "kind=header:channel=%d:size=%"PRIu16, S->in_channel, S->left);
1799 return;
1800 }
1801
1802 /* the 'video_frame' structure for the current channel (segment) tracks
1803 * decode buffer etc. for the current stream */
1804 struct a12_channel* ch = &S->channels[S->in_channel];
1805 struct video_frame* cvf = &ch->unpack_state.vframe;
1806
1807 if (!authdec_buffer(__func__, S, S->decode_pos)){
1808 fail_state(S);
1809 return;
1810 }
1811 else {
1812 a12int_trace(A12_TRACE_CRYPTO, "kind=frame_auth");
1813 }
1814
1815 /* if we are in discard state, just continue */
1816 if (cvf->commit == 255){
1817 a12int_trace(A12_TRACE_VIDEO, "kind=discard");
1818 reset_state(S);
1819 return;
1820 }
1821
1822 struct arcan_shmif_cont* cont = ch->cont;
1823 if (!cont){
1824 a12int_trace(A12_TRACE_SYSTEM,
1825 "kind=error:source=video:type=EINVALCH:val=%d", (int) S->in_channel);
1826 reset_state(S);
1827 return;
1828 }
1829
1830 /* postprocessing that requires an intermediate decode buffer before pushing */
1831 if (a12int_buffer_format(cvf->postprocess)){
1832 size_t left = cvf->inbuf_sz - cvf->inbuf_pos;
1833 a12int_trace(A12_TRACE_VIDEO,
1834 "kind=decbuf:channel=%d:size=%"PRIu16":left=%zu",
1835 S->in_channel, S->decode_pos, left
1836 );
1837
1838 /* buffer and slide? */
1839 if (left >= S->decode_pos){
1840 memcpy(&cvf->inbuf[cvf->inbuf_pos], S->decode, S->decode_pos);
1841 cvf->inbuf_pos += S->decode_pos;
1842 left -= S->decode_pos;
1843 }
1844 /* other option is to terminate here as the client is misbehaving */
1845 else if (left != 0){
1846 a12int_trace(A12_TRACE_SYSTEM,
1847 "kind=error:source=video:channel=%d:type=EOVERFLOW", S->in_channel);
1848 reset_state(S);
1849 }
1850
1851 /* buffer is finished, decode and commit to designated channel context
1852 * unless it has already been marked as something to ignore and discard */
1853 if (left == 0 && cvf->commit != 255){
1854 a12int_trace(
1855 A12_TRACE_VIDEO, "kind=decbuf:channel=%d:commit", (int)S->in_channel);
1856 a12int_decode_vbuffer(S, ch, cvf, cont);
1857 }
1858
1859 a12int_stream_ack(S, S->in_channel, cvf->id);
1860 reset_state(S);
1861 return;
1862 }
1863
1864 /* we use a length field that match the width*height so any
1865 * overflow / wrap tricks won't work */
1866 if (cvf->inbuf_sz < S->decode_pos){
1867 a12int_trace(A12_TRACE_SYSTEM,
1868 "kind=error:source=video:channel=%d:type=EOVERFLOW", S->in_channel);
1869 cvf->commit = 255;
1870 reset_state(S);
1871 return;
1872 }
1873
1874 /* finally unpack the raw video buffer */
1875 a12int_unpack_vbuffer(S, cvf, cont);
1876 reset_state(S);
1877 }
1878
drain_audio(struct a12_channel * ch)1879 static void drain_audio(struct a12_channel* ch)
1880 {
1881 struct arcan_shmif_cont* cont = ch->cont;
1882 if (ch->active == CHANNEL_RAW){
1883 if (ch->raw.signal_audio)
1884 ch->raw.signal_audio(cont->abufused, ch->raw.tag);
1885 cont->abufused = 0;
1886 return;
1887 }
1888
1889 arcan_shmif_signal(cont, SHMIF_SIGAUD);
1890 }
1891
process_audio(struct a12_state * S)1892 static void process_audio(struct a12_state* S)
1893 {
1894 /* in_channel is used to track if we are waiting for the header or not */
1895 if (S->in_channel == -1){
1896 uint32_t stream;
1897 S->in_channel = S->decode[0];
1898 unpack_u32(&stream, &S->decode[1]);
1899 unpack_u16(&S->left, &S->decode[5]);
1900 S->decode_pos = 0;
1901 update_mac_and_decrypt(__func__,
1902 &S->in_mac, S->dec_state, S->decode, header_sizes[S->state]);
1903 a12int_trace(A12_TRACE_AUDIO,
1904 "audio[%d:%"PRIx32"], left: %"PRIu16, S->in_channel, stream, S->left);
1905 return;
1906 }
1907
1908 /* the 'audio_frame' structure for the current channel (segment) only
1909 * contains the metadata, we use the mapped shmif- segment to actually
1910 * buffer, assuming we have no compression */
1911 struct a12_channel* channel = &S->channels[S->in_channel];
1912 struct audio_frame* caf = &channel->unpack_state.aframe;
1913 struct arcan_shmif_cont* cont = channel->cont;
1914
1915 if (!authdec_buffer(__func__, S, S->decode_pos)){
1916 fail_state(S);
1917 return;
1918 }
1919 else {
1920 a12int_trace(A12_TRACE_CRYPTO, "kind=frame_auth");
1921 }
1922
1923 if (!cont){
1924 a12int_trace(A12_TRACE_SYSTEM,
1925 "audio data on unmapped channel (%d)", (int) S->in_channel);
1926 reset_state(S);
1927 return;
1928 }
1929
1930 if (channel->active == CHANNEL_RAW){
1931 if (!update_proxy_acont(channel, caf))
1932 return;
1933 }
1934
1935 /* passed the header stage, now it's the data block,
1936 * make sure the segment has registered that it can provide audio */
1937 if (!cont->audp){
1938 a12int_trace(A12_TRACE_AUDIO,
1939 "frame-resize, rate: %"PRIu32", channels: %"PRIu8,
1940 caf->rate, caf->channels
1941 );
1942
1943 /* a note with the extended resize here is that we always request a single
1944 * video buffer, which means the video part will be locked until we get an
1945 * ack from the consumer - this might need to be tunable to increase if we
1946 * detect that we stall on signalling video */
1947 if (!arcan_shmif_resize_ext(cont,
1948 cont->w, cont->h, (struct shmif_resize_ext){
1949 .abuf_sz = 1024, .samplerate = caf->rate,
1950 .abuf_cnt = 16, .vbuf_cnt = 1
1951 })){
1952 a12int_trace(A12_TRACE_ALLOC, "frame-resize failed");
1953 caf->commit = 255;
1954 return;
1955 }
1956 }
1957
1958 /* Flush out into abuffer, assuming that the context has been set to match the
1959 * defined source format in a previous stage. Resampling might be needed here,
1960 * both for rate and for drift/buffer */
1961 size_t samples_in = S->decode_pos >> 1;
1962 size_t pos = 0;
1963
1964 /* assumed s16, stereo for now, if the sender didn't align properly, shame */
1965 while (samples_in > 1){
1966 int16_t l, r;
1967 unpack_s16(&l, &S->decode[pos]);
1968 pos += 2;
1969 unpack_s16(&r, &S->decode[pos]);
1970 pos += 2;
1971 cont->audp[cont->abufpos++] = SHMIF_AINT16(l);
1972 cont->audp[cont->abufpos++] = SHMIF_AINT16(r);
1973 samples_in -= 2;
1974
1975 if (cont->abufcount - cont->abufpos <= 1){
1976 a12int_trace(A12_TRACE_AUDIO,
1977 "forward %zu samples", (size_t) cont->abufpos);
1978 drain_audio(channel);
1979 }
1980 }
1981
1982 /* now we can subtract the number of SAMPLES from the audio stream packet, when
1983 * that reaches zero we reset state, this incorrectly assumes 2 channels. */
1984 caf->nsamples -= S->decode_pos >> 1;
1985
1986 /* drain if there is data left in the buffer, but no samples left */
1987 if (!caf->nsamples && cont->abufused){
1988 drain_audio(channel);
1989 }
1990
1991 a12int_trace(A12_TRACE_TRANSFER,
1992 "audio packet over, samples left: %zu", (size_t) caf->nsamples);
1993 reset_state(S);
1994 }
1995
1996 /* helper that just forwards to set-destination */
a12_set_destination(struct a12_state * S,struct arcan_shmif_cont * wnd,uint8_t chid)1997 void a12_set_destination(
1998 struct a12_state* S, struct arcan_shmif_cont* wnd, uint8_t chid)
1999 {
2000 if (!S){
2001 a12int_trace(A12_TRACE_DEBUG, "invalid set_destination call");
2002 return;
2003 }
2004
2005 if (S->channels[chid].active == CHANNEL_RAW){
2006 free(S->channels[chid].cont);
2007 S->channels[chid].cont = NULL;
2008 }
2009
2010 S->channels[chid].cont = wnd;
2011 S->channels[chid].active = wnd ? CHANNEL_SHMIF : CHANNEL_INACTIVE;
2012 }
2013
a12_set_destination_raw(struct a12_state * S,uint8_t chid,struct a12_unpack_cfg cfg,size_t cfg_sz)2014 void a12_set_destination_raw(struct a12_state* S,
2015 uint8_t chid, struct a12_unpack_cfg cfg, size_t cfg_sz)
2016 {
2017 /* the reason for this rather odd design is that non-shmif based receiver was
2018 * an afterthought, and it was a much larger task refactoring it out versus
2019 * adding a proxy and tagging */
2020 size_t ct_sz = sizeof(struct arcan_shmif_cont);
2021 struct arcan_shmif_cont* fake = DYNAMIC_MALLOC(ct_sz);
2022 if (!fake)
2023 return;
2024
2025 *fake = (struct arcan_shmif_cont){};
2026 S->channels[chid].cont = fake;
2027 S->channels[chid].raw = cfg;
2028 S->channels[chid].active = CHANNEL_RAW;
2029 }
2030
2031 void
a12_unpack(struct a12_state * S,const uint8_t * buf,size_t buf_sz,void * tag,void (* on_event)(struct arcan_shmif_cont *,int chid,struct arcan_event *,void *))2032 a12_unpack(struct a12_state* S, const uint8_t* buf,
2033 size_t buf_sz, void* tag, void (*on_event)
2034 (struct arcan_shmif_cont*, int chid, struct arcan_event*, void*))
2035 {
2036 if (S->state == STATE_BROKEN){
2037 a12int_trace(A12_TRACE_SYSTEM,
2038 "kind=error:status=EINVAL:message=state machine broken");
2039 fail_state(S);
2040 return;
2041 }
2042
2043 /* flush any prequeued buffer, see comment at the end of the function */
2044 if (S->prepend_unpack){
2045 uint8_t* tmp_buf = S->prepend_unpack;
2046 size_t tmp_sz = S->prepend_unpack_sz;
2047 a12int_trace(A12_TRACE_SYSTEM, "kind=prebuf:size=%zu", tmp_sz);
2048 S->prepend_unpack_sz = 0;
2049 S->prepend_unpack = NULL;
2050 a12_unpack(S, tmp_buf, tmp_sz, tag, on_event);
2051 DYNAMIC_FREE(tmp_buf);
2052 }
2053
2054 /* Unknown state? then we're back waiting for a command packet */
2055 if (S->left == 0)
2056 reset_state(S);
2057
2058 /* iteratively flush, we tail- recurse should the need arise, optimization
2059 * here would be to forward buf immediately if it fits - saves a copy */
2060 size_t ntr = buf_sz > S->left ? S->left : buf_sz;
2061
2062 memcpy(&S->decode[S->decode_pos], buf, ntr);
2063
2064 S->left -= ntr;
2065 S->decode_pos += ntr;
2066 buf_sz -= ntr;
2067 S->stats.b_in += ntr;
2068
2069 /* do we need to buffer more? */
2070 if (S->left)
2071 return;
2072
2073 /* otherwise dispatch based on state */
2074 switch(S->state){
2075 case STATE_1STSRV_PACKET:
2076 process_srvfirst(S);
2077 break;
2078 case STATE_NOPACKET:
2079 process_nopacket(S);
2080 break;
2081 case STATE_CONTROL_PACKET:
2082 process_control(S, on_event, tag);
2083 break;
2084 case STATE_EVENT_PACKET:
2085 process_event(S, tag, on_event);
2086 break;
2087 /* worth noting is that these (a,v,b) have different buffer sizes for their
2088 * respective packets, so the authentication and decryption steps are somewhat
2089 * different */
2090 case STATE_VIDEO_PACKET:
2091 process_video(S);
2092 break;
2093 case STATE_AUDIO_PACKET:
2094 process_audio(S);
2095 break;
2096 case STATE_BLOB_PACKET:
2097 process_blob(S);
2098 break;
2099 default:
2100 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=EINVAL:message=bad command");
2101 fail_state(S);
2102 return;
2103 break;
2104 }
2105
2106 /* This is an ugly thing - so a buffer can contain a completed authentication
2107 * sequence and subsequent data. An API consumer may well need to do additional
2108 * work in between completed authentication and proper use or data might get
2109 * lost. The easiest way to solve this after the fact was to add a latched
2110 * buffer stage.
2111 *
2112 * on_authentication_completed:
2113 * - set latched state
2114 * - if data remaining: save a copy and return
2115 *
2116 * next unpack:
2117 * - check if buffered data exist
2118 * - process first, then move on to submitted buffer
2119 */
2120 if (buf_sz){
2121 if (!S->auth_latched){
2122 a12_unpack(S, &buf[ntr], buf_sz, tag, on_event);
2123 return;
2124 }
2125
2126 S->auth_latched = false;
2127 S->prepend_unpack = DYNAMIC_MALLOC(buf_sz);
2128
2129 if (!S->prepend_unpack){
2130 a12int_trace(A12_TRACE_ALLOC, "kind=error:latch_buffer_sz=%zu", buf_sz);
2131 fail_state(S);
2132 return;
2133 }
2134
2135 a12int_trace(A12_TRACE_SYSTEM, "kind=auth_latch:size=%zu", buf_sz);
2136 memcpy(S->prepend_unpack, &buf[ntr], buf_sz);
2137 S->prepend_unpack_sz = buf_sz;
2138 return;
2139 }
2140 if (S->auth_latched)
2141 S->auth_latched = false;
2142 }
2143
2144 /*
2145 * Several small issues that should be looked at here, one is that we don't
2146 * multiplex well with the source, risking a non-block 100% spin. Second is
2147 * that we don't have an intermediate buffer as part of the queue-node, meaning
2148 * that we risk sending very small blocks of data as part of the stream,
2149 * wasting bandwidth.
2150 */
read_data(int fd,size_t cap,uint16_t * nts,bool * die)2151 static void* read_data(int fd, size_t cap, uint16_t* nts, bool* die)
2152 {
2153 void* buf = DYNAMIC_MALLOC(65536);
2154 if (!buf){
2155 a12int_trace(A12_TRACE_SYSTEM, "kind=error:status=ENOMEM");
2156 *die = true;
2157 return NULL;
2158 }
2159
2160 ssize_t nr = read(fd, buf, cap);
2161
2162 /* possibly non-fatal or no data present yet, keep stream alive - a bad stream
2163 * source with no timeout will block / preempt other binary transfers though so
2164 * might need to consider reordering in that case. */
2165 if (-1 == nr){
2166 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
2167 *die = false;
2168 else
2169 *die = true;
2170
2171 free(buf);
2172 return NULL;
2173 }
2174
2175 *die = false;
2176 if (nr == 0){
2177 free(buf);
2178 return NULL;
2179 }
2180
2181 *nts = nr;
2182 return buf;
2183 }
2184
unlink_node(struct a12_state * S,struct blob_out * node)2185 static void unlink_node(struct a12_state* S, struct blob_out* node)
2186 {
2187 /* find the owner of the node, redirect next */
2188 /* close the socket and other resources */
2189 struct blob_out* next = node->next;
2190 struct blob_out** dst = &S->pending;
2191 while (*dst != node && *dst){
2192 dst = &((*dst)->next);
2193 }
2194
2195 if (*dst != node){
2196 a12int_trace(A12_TRACE_SYSTEM, "couldn't not unlink node");
2197 return;
2198 }
2199
2200 a12int_trace(A12_TRACE_ALLOC, "unlinked:stream=%"PRIu64, node->streamid);
2201 S->active_blobs--;
2202 *dst = next;
2203 close(node->fd);
2204 DYNAMIC_FREE(node);
2205 }
2206
queue_node(struct a12_state * S,struct blob_out * node)2207 static size_t queue_node(struct a12_state* S, struct blob_out* node)
2208 {
2209 uint16_t nts;
2210 size_t cap = node->left;
2211 if (cap == 0 || cap > 64096)
2212 cap = 64096;
2213
2214 bool die;
2215 void* buf = read_data(node->fd, cap, &nts, &die);
2216 if (!buf){
2217 /* MISSING: SEND STREAM CANCEL */
2218 if (die){
2219 unlink_node(S, node);
2220 }
2221 return 0;
2222 }
2223
2224 /* not activated, so build a header first */
2225 if (!node->active){
2226 uint8_t outb[CONTROL_PACKET_SIZE];
2227 build_control_header(S, outb, COMMAND_BINARYSTREAM);
2228 outb[16] = node->chid;
2229
2230 S->out_stream++;
2231 pack_u32(S->out_stream, &outb[18]); /* [18 .. 21] stream-id */
2232 pack_u64(node->left, &outb[22]); /* [22 .. 29] total-size */
2233 outb[30] = node->type;
2234 /* 31..34 : id-token, ignored for now */
2235 memcpy(&outb[35], node->checksum, 16);
2236 a12int_append_out(S, STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
2237 node->active = true;
2238 node->streamid = S->out_stream;
2239 a12int_trace(A12_TRACE_BTRANSFER,
2240 "kind=created:size=%zu:stream:%"PRIu64":ch=%d",
2241 node->left, node->streamid, node->chid
2242 );
2243 }
2244
2245 /* prepend the bstream header */
2246 uint8_t outb[1 + 4 + 2];
2247 outb[0] = node->chid;
2248 pack_u32(node->streamid, &outb[1]);
2249 pack_u16(nts, &outb[5]);
2250
2251 a12int_append_out(S, STATE_BLOB_PACKET, buf, nts, outb, sizeof(outb));
2252
2253 if (node->left){
2254 node->left -= nts;
2255 if (!node->left){
2256
2257 unlink_node(S, node);
2258 }
2259 else {
2260 a12int_trace(A12_TRACE_BTRANSFER,
2261 "kind=block:stream=%"PRIu64":ch=%d:size=%"PRIu16":left=%zu",
2262 node->streamid, (int)node->chid, nts, node->left
2263 );
2264 }
2265 }
2266 else {
2267 a12int_trace(A12_TRACE_BTRANSFER,
2268 "kind=block:stream=%zu:ch=%d:streaming:size=%"PRIu16,
2269 (size_t) node->streamid, (int)node->chid, nts
2270 );
2271 }
2272
2273 DYNAMIC_FREE(buf);
2274 return nts;
2275 }
2276
append_blob(struct a12_state * S,int mode)2277 static size_t append_blob(struct a12_state* S, int mode)
2278 {
2279 /* find suitable blob */
2280 if (mode == A12_FLUSH_NOBLOB || !S->pending)
2281 return 0;
2282 /* only current channel? */
2283 else if (mode == A12_FLUSH_CHONLY){
2284 struct blob_out* parent = S->pending;
2285 while (parent){
2286 if (parent->chid == S->out_channel)
2287 return queue_node(S, parent);
2288 parent = parent->next;
2289 }
2290 return 0;
2291 }
2292 return queue_node(S, S->pending);
2293 }
2294
2295 size_t
a12_flush(struct a12_state * S,uint8_t ** buf,int allow_blob)2296 a12_flush(struct a12_state* S, uint8_t** buf, int allow_blob)
2297 {
2298 if (S->state == STATE_BROKEN || S->cookie != 0xfeedface)
2299 return 0;
2300
2301 /* nothing in the outgoing buffer? then we can pull in whatever data transfer
2302 * is pending, if there are any queued */
2303 if (S->buf_ofs == 0){
2304 if (allow_blob > A12_FLUSH_NOBLOB && append_blob(S, allow_blob)){}
2305 else
2306 return 0;
2307 }
2308
2309 size_t rv = S->buf_ofs;
2310 int old_ind = S->buf_ind;
2311
2312 /* switch out "output buffer" and return how much there is to send, it is
2313 * expected that by the next non-0 returning channel_flush, its contents have
2314 * been pushed to the other side */
2315 *buf = S->bufs[S->buf_ind];
2316 S->buf_ofs = 0;
2317 S->buf_ind = (S->buf_ind + 1) % 2;
2318 a12int_trace(A12_TRACE_ALLOC, "locked %d, new buffer: %d", old_ind, S->buf_ind);
2319
2320 return rv;
2321 }
2322
2323 int
a12_poll(struct a12_state * S)2324 a12_poll(struct a12_state* S)
2325 {
2326 if (!S || S->state == STATE_BROKEN || S->cookie != 0xfeedface)
2327 return -1;
2328
2329 return S->buf_ofs || S->pending ? 1 : 0;
2330 }
2331
2332 int
a12_auth_state(struct a12_state * S)2333 a12_auth_state(struct a12_state* S)
2334 {
2335 return S->authentic;
2336 }
2337
2338 void
a12_channel_new(struct a12_state * S,uint8_t chid,uint8_t segkind,uint32_t cookie)2339 a12_channel_new(struct a12_state* S,
2340 uint8_t chid, uint8_t segkind, uint32_t cookie)
2341 {
2342 uint8_t outb[CONTROL_PACKET_SIZE];
2343 build_control_header(S, outb, COMMAND_NEWCH);
2344
2345 outb[18] = chid;
2346 outb[19] = segkind;
2347 outb[20] = 0;
2348 pack_u32(cookie, &outb[21]);
2349
2350 a12int_append_out(S, STATE_CONTROL_PACKET, outb, CONTROL_PACKET_SIZE, NULL, 0);
2351 }
2352
2353 void
a12_set_channel(struct a12_state * S,uint8_t chid)2354 a12_set_channel(struct a12_state* S, uint8_t chid)
2355 {
2356 a12int_trace(A12_TRACE_SYSTEM, "channel_out=%"PRIu8, chid);
2357 S->out_channel = chid;
2358 }
2359
2360 void
a12_channel_aframe(struct a12_state * S,shmif_asample * buf,size_t n_samples,struct a12_aframe_cfg cfg,struct a12_aframe_opts opts)2361 a12_channel_aframe(struct a12_state* S,
2362 shmif_asample* buf,
2363 size_t n_samples,
2364 struct a12_aframe_cfg cfg,
2365 struct a12_aframe_opts opts)
2366 {
2367 if (!S || S->cookie != 0xfeedface || S->state == STATE_BROKEN)
2368 return;
2369
2370 /* use a fix size now as the outb- writer lacks queueing and interleaving */
2371 size_t chunk_sz = 16428;
2372
2373 a12int_trace(A12_TRACE_AUDIO,
2374 "encode %zu samples @ %"PRIu32" Hz /%"PRIu8" ch",
2375 n_samples, cfg.samplerate, cfg.channels
2376 );
2377 a12int_encode_araw(S, S->out_channel, buf, n_samples/2, cfg, opts, chunk_sz);
2378 }
2379
2380 /*
2381 * This function merely performs basic sanity checks of the input sources
2382 * then forwards to the corresponding _encode method that match the set opts.
2383 */
2384 void
a12_channel_vframe(struct a12_state * S,struct shmifsrv_vbuffer * vb,struct a12_vframe_opts opts)2385 a12_channel_vframe(struct a12_state* S,
2386 struct shmifsrv_vbuffer* vb, struct a12_vframe_opts opts)
2387 {
2388 if (!S || S->cookie != 0xfeedface || S->state == STATE_BROKEN)
2389 return;
2390
2391 /* use a fix size now as the outb- writer lacks queueing and interleaving */
2392 size_t chunk_sz = 32768;
2393
2394 /* avoid dumb updates */
2395 size_t x = 0, y = 0, w = vb->w, h = vb->h;
2396 if (vb->flags.subregion){
2397 x = vb->region.x1;
2398 y = vb->region.y1;
2399 w = vb->region.x2 - x;
2400 h = vb->region.y2 - y;
2401 }
2402
2403 /* sanity check against a dumb client here as well */
2404 if (!w || !h){
2405 a12int_trace(A12_TRACE_SYSTEM, "kind=einval:status=bad dimensions");
2406 return;
2407 }
2408
2409 if (x + w > vb->w || y + h > vb->h){
2410 a12int_trace(A12_TRACE_SYSTEM,
2411 "client provided bad/broken subregion (%zu+%zu > %zu)"
2412 "(%zu+%zu > %zu)", x, w, vb->w, y, h, vb->h
2413 );
2414 x = 0;
2415 y = 0;
2416 w = vb->w;
2417 h = vb->h;
2418 }
2419
2420 /* option: quadtree delta- buffer and only distribute the updated
2421 * cells? should cut down on memory bandwidth on decode side and
2422 * on rle/compressing */
2423
2424 /* dealing with each flag:
2425 * origo_ll - do the coversion in our own encode- stage
2426 * ignore_alpha - set pxfmt to 3
2427 * subregion - feed as information to the delta encoder
2428 * srgb - info to encoder, other leave be
2429 * vpts - tag into the system as it is used for other things
2430 *
2431 * then we have the problem of the meta- area that should take
2432 * other package types when we get there
2433 */
2434 uint32_t sid = S->out_stream;
2435
2436 a12int_trace(A12_TRACE_VIDEO,
2437 "out vframe: %zu*%zu @%zu,%zu+%zu,%zu", vb->w, vb->h, w, h, x, y);
2438 #define argstr S, vb, opts, sid, x, y, w, h, chunk_sz, S->out_channel
2439
2440 size_t now = arcan_timemillis();
2441 switch(opts.method){
2442 case VFRAME_METHOD_RAW_RGB565:
2443 a12int_encode_rgb565(argstr);
2444 break;
2445 case VFRAME_METHOD_NORMAL:
2446 if (vb->flags.ignore_alpha)
2447 a12int_encode_rgb(argstr);
2448 else
2449 a12int_encode_rgba(argstr);
2450 break;
2451 case VFRAME_METHOD_RAW_NOALPHA:
2452 a12int_encode_rgb(argstr);
2453 break;
2454 /* these are the same, the encoder will pick which based on ref. frame */
2455 case VFRAME_METHOD_ZSTD:
2456 case VFRAME_METHOD_DZSTD:
2457 a12int_encode_dzstd(argstr);
2458 break;
2459 case VFRAME_METHOD_H264:
2460 if (S->advenc_broken)
2461 a12int_encode_dzstd(argstr);
2462 else
2463 a12int_encode_h264(argstr);
2464 break;
2465 case VFRAME_METHOD_TPACK_ZSTD:
2466 a12int_encode_ztz(argstr);
2467 break;
2468 default:
2469 a12int_trace(A12_TRACE_SYSTEM, "unknown format: %d\n", opts.method);
2470 return;
2471 break;
2472 }
2473 size_t then = arcan_timemillis();
2474 if (then > now){
2475 S->stats.ms_vframe = then - now;
2476 S->stats.ms_vframe_px = (float)(then - now) / (float)(w * h);
2477 }
2478 }
2479
2480 bool
a12_channel_enqueue(struct a12_state * S,struct arcan_event * ev)2481 a12_channel_enqueue(struct a12_state* S, struct arcan_event* ev)
2482 {
2483 if (!S || S->cookie != 0xfeedface || !ev)
2484 return false;
2485
2486 /* descriptor passing events are another complex affair, those that require
2487 * the caller to provide data outwards should already have been handled at
2488 * this stage, so it is basically STORE and BCHUNK_OUT that are allowed to
2489 * be forwarded in order for the other side to a12_queue_bstream */
2490 if (arcan_shmif_descrevent(ev)){
2491 switch(ev->tgt.kind){
2492 case TARGET_COMMAND_STORE:
2493 case TARGET_COMMAND_BCHUNK_OUT:
2494 /* we need to register a local store that tracks the descriptor here
2495 * and just replaces the [0].iv field with that key, the other side
2496 * will forward a bstream correctly then pair */
2497 break;
2498
2499 /* these events have a descriptor, just map them to the right type of
2500 * binary transfer event and the other side will synthesize and push
2501 * the rest */
2502 case TARGET_COMMAND_RESTORE:
2503 a12_enqueue_bstream(S,
2504 ev->tgt.ioevs[0].iv, A12_BTYPE_STATE, false, 0);
2505 return true;
2506 break;
2507
2508 /* let the bstream- side determine if the source is streaming or not */
2509 case TARGET_COMMAND_BCHUNK_IN:
2510 a12_enqueue_bstream(S,
2511 ev->tgt.ioevs[0].iv, A12_BTYPE_BLOB, false, 0);
2512 return true;
2513 break;
2514
2515 /* weird little detail with the fonthint is that the real fonthint event
2516 * sans descriptor will be transferred first, the other side will catch
2517 * it and merge */
2518 case TARGET_COMMAND_FONTHINT:
2519 a12_enqueue_bstream(S,
2520 ev->tgt.ioevs[0].iv, ev->tgt.ioevs[4].iv == 1 ?
2521 A12_BTYPE_FONT_SUPPL : A12_BTYPE_FONT, false, 0
2522 );
2523 break;
2524 default:
2525 a12int_trace(A12_TRACE_SYSTEM,
2526 "kind=error:status=EINVAL:message=%s", arcan_shmif_eventstr(ev, NULL, 0));
2527 return true;
2528 }
2529 }
2530
2531 /*
2532 * MAC and cipher state is managed in the append-outb stage
2533 */
2534 uint8_t outb[header_sizes[STATE_EVENT_PACKET]];
2535 size_t hdr = SEQUENCE_NUMBER_SIZE + 1;
2536 outb[SEQUENCE_NUMBER_SIZE] = S->out_channel;
2537 step_sequence(S, outb);
2538
2539 ssize_t step = arcan_shmif_eventpack(ev, &outb[hdr], sizeof(outb) - hdr);
2540 if (-1 == step)
2541 return true;
2542
2543 a12int_append_out(S, STATE_EVENT_PACKET, outb, step + hdr, NULL, 0);
2544
2545 a12int_trace(A12_TRACE_EVENT,
2546 "kind=enqueue:eventstr=%s", arcan_shmif_eventstr(ev, NULL, 0));
2547 return true;
2548 }
2549
2550 void
a12_set_trace_level(int mask,FILE * dst)2551 a12_set_trace_level(int mask, FILE* dst)
2552 {
2553 a12_trace_targets = mask;
2554 a12_trace_dst = dst;
2555 }
2556
2557 static const char* groups[] = {
2558 "video",
2559 "audio",
2560 "system",
2561 "event",
2562 "transfer",
2563 "debug",
2564 "missing",
2565 "alloc",
2566 "crypto",
2567 "vdetail",
2568 "btransfer",
2569 "security"
2570 };
2571
i_log2(uint32_t n)2572 static unsigned i_log2(uint32_t n)
2573 {
2574 unsigned res = 0;
2575 while (n >>= 1) res++;
2576 return res;
2577 }
2578
a12int_group_tostr(int group)2579 const char* a12int_group_tostr(int group)
2580 {
2581 unsigned ind = i_log2(group);
2582 if (ind >= sizeof(groups)/sizeof(groups[0]))
2583 return "bad";
2584 else
2585 return groups[ind];
2586 }
2587
2588 void
a12_set_bhandler(struct a12_state * S,struct a12_bhandler_res (* on_bevent)(struct a12_state * S,struct a12_bhandler_meta,void * tag),void * tag)2589 a12_set_bhandler(struct a12_state* S,
2590 struct a12_bhandler_res (*on_bevent)(
2591 struct a12_state* S, struct a12_bhandler_meta, void* tag),
2592 void* tag)
2593 {
2594 if (!S)
2595 return;
2596
2597 S->binary_handler = on_bevent;
2598 S->binary_handler_tag = tag;
2599 }
2600
a12_state_iostat(struct a12_state * S)2601 struct a12_iostat a12_state_iostat(struct a12_state* S)
2602 {
2603 /* just an accessor, values are updated continously */
2604 return S->stats;
2605 }
2606
a12_sensitive_alloc(size_t nb)2607 void* a12_sensitive_alloc(size_t nb)
2608 {
2609 return arcan_alloc_mem(nb,
2610 ARCAN_MEM_EXTSTRUCT, ARCAN_MEM_SENSITIVE | ARCAN_MEM_BZERO, ARCAN_MEMALIGN_PAGE);
2611 }
2612
a12int_step_vstream(struct a12_state * S,uint32_t id)2613 void a12int_step_vstream(struct a12_state* S, uint32_t id)
2614 {
2615 size_t slot = S->congestion_stats.pending;
2616
2617 /* clamp so that sz-1 compared to sz-2 can indicate how reckless the api
2618 * user is with regards to backpressure */
2619 if (S->congestion_stats.pending < VIDEO_FRAME_DRIFT_WINDOW - 1)
2620 S->congestion_stats.pending++;
2621
2622 S->congestion_stats.frame_window[slot] = S->out_stream++;
2623 }
2624
a12_ok(struct a12_state * S)2625 bool a12_ok(struct a12_state* S)
2626 {
2627 return S->state != STATE_BROKEN;
2628 }
2629
a12_sensitive_free(void * ptr,size_t buf)2630 void a12_sensitive_free(void* ptr, size_t buf)
2631 {
2632 volatile unsigned char* pos = ptr;
2633 for (size_t i = 0; i < buf; i++){
2634 pos[i] = 0;
2635 }
2636 arcan_mem_free(ptr);
2637 }
2638