1 /*
2  * Runtime in-process Debug Interface
3  * Copyright 2018-2019, Björn Ståhl
4  * License: 3-Clause BSD, see COPYING file in arcan source repository.
5  * Reference: http://arcan-fe.com
6  * Description: This provides a collection of useful user-initiated
7  * debug control and process exploration tools.
8  * Notes:
9  *  - simple:
10  *    - mim window controls (buffer size, streaming mode, view mode)
11  *    -                      inject latency
12  *    - non-visual MiM
13  *    - hash-table based FD tracking and tagging debugif FDs in FD view
14  *
15  *  - interesting / unexplored venues:
16  *    - seccomp- renderer
17  *    - sanitizer
18  *    - statistic profiler
19  *    - detach intercept / redirect window
20  *    - dynamic descriptor list refresh
21  *    - runtime symbol hijack
22  *    - memory pages to sense_mem deployment
23  *      - use llvm-symbolizer to resolve addresses to memory
24  *        and show as labels/overlays on the bytes themselves
25  *    - short-write commit (randomize commit-sizes)
26  *    - enumerate modules and their symbols? i.e. dl_iterate_phdr
27  *      and trampoline?
28  *    - 'special' tactics, e.g. malloc- intercept + backtrace on write
29  *      to pair buffers and transformations, fetch from trap page pool
30  *      and mprotect- juggle to find crypto and compression functions
31  *    - detach-thread and detach- process for FD intercept
32  *    - browse- filesystem based on cwd
33  *    - message path for the debugif to have a separate log queue
34  *    - ksy loading
35  *    - special syscall triggers (though this requires a tracer process)
36  *      - descriptor modifications
37  *      - mmap
38  * 	MIM_MODE_
39  *    - key structure interpretation:
40  *      - malloc stats
41  *      - shmif-mempage inspection
42  *
43  * Likely that some of these and other venues should be written as separate
44  * tools that jack into the menu (see src/tools/adbginject) rather than make
45  * the code here too extensive.
46  *
47  * Many of these moves are quite risky without other good primitives
48  * first though, the most pressing one is probably 'suspend all other
49  * threads'.
50  *
51  * Interesting source of problems, when debugif is active no real output
52  * can be allowed from this one, tui or shmif as any file might be
53  * redirected and cause locking
54  */
55 #include "arcan_shmif.h"
56 #include "arcan_shmif_interop.h"
57 #include "arcan_shmif_debugif.h"
58 #include <pthread.h>
59 #include <dlfcn.h>
60 #include <errno.h>
61 #include <ctype.h>
62 #include <unistd.h>
63 #include <stdatomic.h>
64 #include <fcntl.h>
65 #include <sys/time.h>
66 #include <sys/resource.h>
67 #include <sys/stat.h>
68 #include <sys/types.h>
69 #include <sys/mman.h>
70 #include <sys/wait.h>
71 #include <limits.h>
72 #include <poll.h>
73 #include <signal.h>
74 
75 #ifdef __LINUX
76 #include <sys/prctl.h>
77 #endif
78 
79 /*
80  * ideally all this would be fork/asynch-signal safe
81  * but it is a tall order to fill, need much more work and control to
82  * pool and prealloc heap memory etc.
83  */
84 
85 #define ARCAN_TUI_DYNAMIC
86 #include "arcan_tui.h"
87 #include "arcan_tuisym.h"
88 #include "arcan_tui_listwnd.h"
89 #include "arcan_tui_bufferwnd.h"
90 
91 #ifndef COUNT_OF
92 #define COUNT_OF(x) \
93 	((sizeof(x)/sizeof(0[x])) / ((size_t)(!(sizeof(x) % sizeof(0[x])))))
94 #endif
95 
96 enum debug_cmd_id {
97 	TAG_CMD_SPAWN = 0,
98 	TAG_CMD_ENVIRONMENT = 1,
99 	TAG_CMD_DESCRIPTOR = 2,
100 	TAG_CMD_PROCESS = 3,
101 	TAG_CMD_EXTERNAL = 4
102 };
103 
104 static volatile _Atomic int beancounter;
105 
106 /*
107  * menu code here is really generic enough that it should be move elsewhere,
108  * frameserver/util or a single-header TUI suppl file possibly
109  */
110 struct debug_ctx {
111 	struct arcan_shmif_cont cont;
112 	struct tui_context* tui;
113 /* tract if we are in store/restore mode */
114 	int last_fd;
115 
116 /* track if we are in interception state or not */
117 	int infd;
118 	int outfd;
119 
120 /* when the UI thread has been shut down */
121 	bool dead;
122 
123 /* hook for custom menu entries */
124 	struct debugint_ext_resolver resolver;
125 };
126 
127 enum mim_buffer_mode {
128 	MIM_MODE_CHUNK,
129 	MIM_MODE_STREAM,
130 };
131 
132 struct mim_buffer_opts {
133 	size_t size;
134 	enum mim_buffer_mode mode;
135 };
136 
137 /* basic TUI convenience loop / setups */
138 static struct tui_list_entry* run_listwnd(struct debug_ctx* dctx,
139 	struct tui_list_entry* list, size_t n_elem, const char* ident,
140 	size_t* pos);
141 
142 static int run_buffer(struct tui_context* tui, uint8_t* buffer,
143 	size_t buf_sz, struct tui_bufferwnd_opts opts, const char* ident);
144 
145 static void run_mitm(struct tui_context* tui, struct mim_buffer_opts opts,
146 	int fd, bool thdwnd, bool mitm, bool mask, const char* label);
147 
show_error_message(struct tui_context * tui,const char * msg)148 static void show_error_message(struct tui_context* tui, const char* msg)
149 {
150 	if (!msg)
151 		return;
152 
153 	run_buffer(tui,
154 		(uint8_t*) msg, strlen(msg), (struct tui_bufferwnd_opts){
155 		.read_only = true,
156 		.view_mode = BUFFERWND_VIEW_ASCII
157 	}, "error");
158 }
159 
stat_to_str(struct stat * s)160 static const char* stat_to_str(struct stat* s)
161 {
162 	const char* ret = "unknown";
163 	switch(s->st_mode & S_IFMT){
164 	case S_IFIFO:
165 		ret = "fifo";
166 	break;
167 	case S_IFCHR:
168 		ret = "char";
169 	break;
170 	case S_IFDIR:
171 		ret = " dir";
172 	break;
173 	case S_IFREG:
174 		ret = "file";
175 	break;
176 	case S_IFBLK:
177 		ret = "block";
178 	break;
179 	case S_IFSOCK:
180 		ret = "sock";
181 	break;
182 	default:
183 	break;
184 	}
185 	return ret;
186 }
187 
188 enum intercept_type {
189 	INTERCEPT_MITM_PIPE,
190 	INTERCEPT_MITM_SOCKET,
191 	INTERCEPT_MAP
192 };
193 
can_intercept(struct stat * s)194 static int can_intercept(struct stat* s)
195 {
196 	int mode = s->st_mode & S_IFMT;
197 	if (mode & S_IFIFO)
198 		return INTERCEPT_MITM_PIPE;
199 	else if (mode & S_IFREG)
200 		return INTERCEPT_MAP;
201 	else if (mode & S_IFSOCK)
202 		return INTERCEPT_MITM_SOCKET;
203 	return -1;
204 }
205 
fd_to_flags(char buf[static8],int fd)206 static void fd_to_flags(char buf[static 8], int fd)
207 {
208 	buf[7] = '\0';
209 
210 /* first, cloexec */
211 	buf[0] = '_';
212 	int rv = fcntl(fd, F_GETFD);
213 	if (-1 == rv){
214 		buf[0] = '?';
215 	}
216 	else if (!(rv & FD_CLOEXEC)){
217 		buf[0] = 'x';
218 	}
219 
220 /* now flags */
221 	buf[1] = '_';
222 	rv = fcntl(fd, F_GETFL);
223 	if (-1 == rv){
224 		buf[1] = '?';
225 	}
226 	else if ((rv & O_NONBLOCK))
227 		;
228 	else
229 		buf[1] = 'B';
230 
231 	if (rv & O_RDWR){
232 		buf[2] = 'r';
233 		buf[3] = 'w';
234 	}
235 	else if (rv & O_WRONLY){
236 		buf[2] = '_';
237 		buf[3] = 'w';
238 	}
239 	else {
240 		buf[2] = 'r';
241 		buf[3] = '_';
242 	}
243 
244 /* Other options:
245  * seals
246  * locked */
247 }
248 
mim_flush(struct tui_context * tui,uint8_t * buf,size_t buf_sz,int fdout)249 static bool mim_flush(
250 	struct tui_context* tui, uint8_t* buf, size_t buf_sz, int fdout)
251 {
252 	if (-1 == fdout)
253 		return true;
254 
255 	struct tui_bufferwnd_opts opts = {
256 		.read_only = true,
257 		.view_mode = BUFFERWND_VIEW_ASCII,
258 		.allow_exit = false
259 	};
260 
261 	char msg[32];
262 	size_t buf_pos = 0;
263 	snprintf(msg, 32, "Flushing, %zu / %zu bytes", buf_pos, buf_sz);
264 
265 /* it's not that we really can do anything in the context of errors here */
266 	int rfl = fcntl(fdout, F_GETFL);
267 	fcntl(fdout, F_SETFL, rfl | O_NONBLOCK);
268 
269 	struct pollfd pfd[2] = {
270 		{
271 			.fd = fdout,
272 			.events = POLLOUT | POLLERR | POLLNVAL | POLLHUP
273 		},
274 		{
275 			.events = POLLIN | POLLERR | POLLNVAL | POLLHUP
276 		}
277 	};
278 
279 /* keep going until the buffer is sent or something happens */
280 	int status;
281 	while(buf_pos < buf_sz &&
282 		1 == (status = arcan_tui_bufferwnd_status(tui))){
283 		arcan_tui_get_handles(&tui, 1, &pfd[1].fd, 1);
284 		int status = poll(pfd, 2, -1);
285 		if (pfd[0].revents){
286 /* error */
287 			if (!(pfd[0].revents & POLLOUT)){
288 				break;
289 			}
290 
291 /* write and update output window */
292 			ssize_t nw = write(fdout, &buf[buf_pos], buf_sz - buf_pos);
293 			if (nw > 0){
294 				buf_pos += nw;
295 				snprintf(msg, 32, "Flushing, %zu / %zu bytes", buf_pos, buf_sz);
296 				arcan_tui_bufferwnd_synch(tui, (uint8_t*)msg, strlen(msg), 0);
297 			}
298 		}
299 
300 /* and always update the window */
301 		arcan_tui_process(&tui, 1, NULL, 0, 0);
302 		arcan_tui_refresh(tui);
303 	}
304 
305 /* if the context has died and we have data left to flush, try one final big
306  * write before calling it a day or we may leave the client in a bad state */
307 	if (buf_pos < buf_sz){
308 		fcntl(fdout, F_SETFL, rfl & (~O_NONBLOCK));
309 		while (buf_pos < buf_sz){
310 			ssize_t nw = write(fdout, &buf[buf_pos], buf_sz - buf_pos);
311 			if (-1 == nw){
312 				if (errno == EAGAIN || errno == EINTR)
313 					continue;
314 				break;
315 			}
316 			buf_pos += nw;
317 		}
318 	}
319 
320 /* restore initial flag state */
321 	fcntl(fdout, F_SETFL, rfl);
322 
323 /* there is no real recovery should this be terminated prematurely */
324 	return buf_pos == buf_sz;
325 }
326 
mim_window(struct tui_context * tui,int fdin,int fdout,struct mim_buffer_opts bopts)327 static void mim_window(
328 	struct tui_context* tui, int fdin, int fdout, struct mim_buffer_opts bopts)
329 {
330 /*
331  * other options:
332  * streaming or windowed,
333  * window size,
334  * read/write
335  */
336 	struct tui_bufferwnd_opts opts = {
337 		.read_only = false,
338 		.view_mode = BUFFERWND_VIEW_HEX_DETAIL,
339 		.allow_exit = true
340 	};
341 
342 /* switch window, wait for buffer */
343 	size_t buf_sz = bopts.size;
344 	size_t buf_pos = 0;
345 	uint8_t* buf = malloc(buf_sz);
346 	if (!buf)
347 		return;
348 
349 	memset(buf, '\0', buf_sz);
350 
351 /* would be convenient with a message area that gets added, there's also the
352  * titlebar and buffer control - ideally this would even be a re-usable helper
353  * with bufferwnd rather than here */
354 refill:
355 	arcan_tui_bufferwnd_setup(tui,
356 		buf, 0, &opts, sizeof(struct tui_bufferwnd_opts));
357 
358 	bool read_data = true;
359 	int status;
360 
361 	while(1 == (status = arcan_tui_bufferwnd_status(tui))){
362 		struct tui_process_res res;
363 		if (read_data){
364 			res = arcan_tui_process(&tui, 1, &fdin, 1, -1);
365 		}
366 		else
367 			res = arcan_tui_process(&tui, 1, NULL, 0, -1);
368 
369 /* fill buffer if needed */
370 		if (res.ok){
371 			if (buf_sz - buf_pos > 0){
372 				ssize_t nr = read(fdin, &buf[buf_pos], buf_sz - buf_pos);
373 				if (nr > 0){
374 					buf_pos += nr;
375 					arcan_tui_bufferwnd_synch(tui,
376 						buf, buf_pos, arcan_tui_bufferwnd_tell(tui, NULL));
377 
378 					if (buf_sz == buf_pos)
379 						read_data = false;
380 				}
381 			}
382 		}
383 
384 /* buffer updated? grow it */
385 		if (-1 == arcan_tui_refresh(tui) && errno == EINVAL)
386 			break;
387 	}
388 
389 /* commit- flush and reset, if the connection is dead there is no real recourse
390  * until we implement a global 'lock-all-threads' then continue this one as
391  * write may continue to come in on our fdin at a higher rate than drain to
392  * fdout, which in turn would block the dup2 swapback */
393 	if (mim_flush(tui, buf, buf_pos, fdout) && status == 0){
394 		buf_pos = 0;
395 		arcan_tui_bufferwnd_tell(tui, &opts);
396 		read_data = true;
397 		arcan_tui_bufferwnd_release(tui);
398 		goto refill;
399 	}
400 
401 /* caller will clean up descriptors */
402 	arcan_tui_bufferwnd_release(tui);
403 	arcan_tui_update_handlers(tui,
404 		&(struct tui_cbcfg){}, NULL, sizeof(struct tui_cbcfg));
405 	free(buf);
406 }
407 
buf_window(struct tui_context * tui,int source,const char * lbl)408 static void buf_window(struct tui_context* tui, int source, const char* lbl)
409 {
410 /* just read-only / mmap for now */
411 	struct stat fs;
412 	if (-1 == fstat(source, &fs) || !fs.st_size)
413 		return;
414 
415 	void* buf = mmap(NULL, fs.st_size, PROT_READ, MAP_PRIVATE, source, 0);
416 	if (buf == MAP_FAILED)
417 		return;
418 
419 	struct tui_bufferwnd_opts opts = {
420 		.read_only = true,
421 		.view_mode = BUFFERWND_VIEW_HEX_DETAIL,
422 		.allow_exit = true
423 	};
424 
425 	run_buffer(tui, buf, fs.st_size, opts, lbl);
426 	munmap(buf, fs.st_size);
427 }
428 
setup_mitm(struct tui_context * tui,int source,bool mask,struct mim_buffer_opts opts)429 static void setup_mitm(
430 	struct tui_context* tui, int source, bool mask, struct mim_buffer_opts opts)
431 {
432 	int fdin = source;
433 	int fdout = -1;
434 
435 /* need a cloexec pair of pipes */
436 	int pair[2];
437 	if (-1 == pipe(pair))
438 		return;
439 
440 /* set numbers and behavior based on direction */
441 	int orig = dup(source);
442 	int fd_in, fd_out, scratch;
443 
444 	int fl = fcntl(source, F_GETFL);
445 	if ((fl & O_WRONLY) || source == STDOUT_FILENO){
446 		char ident[32];
447 		snprintf(ident, 32, "outgoing: %d", source);
448 		arcan_tui_ident(tui, ident);
449 		dup2(pair[1], source);
450 		close(pair[1]);
451 		fd_in = pair[0];
452 		scratch = fd_in;
453 		fd_out = mask ? -1 : orig;
454 	}
455 	else {
456 		char ident[32];
457 		snprintf(ident, 32, "incoming: %d", source);
458 		arcan_tui_ident(tui, ident);
459 		dup2(pair[0], source);
460 		close(pair[0]);
461 		fd_in = orig;
462 		fd_out = mask ? -1 : pair[1];
463 		scratch = pair[1];
464 	}
465 
466 /* blocking / non-blocking is not handled correctly */
467 	mim_window(tui, fd_in, fd_out, opts);
468 	dup2(orig, source);
469 	close(orig);
470 	close(scratch);
471 }
472 
473 /*
474  * intermediate menu for possible descriptor actions
475  */
476 enum {
477 	DESC_COPY = 0,
478 	DESC_VIEW,
479 	DESC_MITM_PIPE,
480 	DESC_MITM_REDIR,
481 	DESC_MITM_BIDI,
482 	DESC_MITM_RO,
483 	DESC_MITM_WO,
484 	WINDOW_METHOD
485 };
run_descriptor(struct debug_ctx * dctx,int fdin,int type,const char * label)486 static void run_descriptor(
487 	struct debug_ctx* dctx, int fdin, int type, const char* label)
488 {
489 	if (fdin <= 2){
490 		type = INTERCEPT_MITM_PIPE;
491 	}
492 
493 	const int buffer_sizes[] = {512, 1024, 2048, 4096, 8192, 16384};
494 	struct mim_buffer_opts bopts = {
495 		.size = 4096,
496 	};
497 	struct tui_list_entry lents[6];
498 	char* strpool[6] = {NULL};
499 	size_t nents = 0;
500 	bool spawn_new = false;
501 	struct tui_list_entry* ent;
502 	size_t pos = 0;
503 
504 /* mappables are typically files or shared memory */
505 	if (type == INTERCEPT_MAP){
506 		lents[nents++] = (struct tui_list_entry){
507 			.label = "Copy",
508 			.tag = DESC_COPY
509 		};
510 		lents[nents++] = (struct tui_list_entry){
511 			.label = "View",
512 			.tag = DESC_VIEW
513 		};
514 	}
515 /* on solaris/some BSDs we have these as bidirectional / go with socket */
516 	else if (type == INTERCEPT_MITM_PIPE){
517 		lents[nents++] = (struct tui_list_entry){
518 			.label = "Intercept",
519 			.tag = DESC_MITM_PIPE
520 		};
521 		lents[nents++] = (struct tui_list_entry){
522 			.label = "Redirect",
523 			.tag = DESC_MITM_REDIR
524 		};
525 
526 	/* display more metadata:
527  * possibly: F_GETPIPE_SZ */
528 	}
529 	else if (type == INTERCEPT_MITM_SOCKET){
530 		lents[nents++] = (struct tui_list_entry){
531 			.label = "Intercept (BiDi)",
532 			.tag = DESC_MITM_BIDI
533 		};
534 		lents[nents++] = (struct tui_list_entry){
535 			.label = "Intercept (Read)",
536 			.tag = DESC_MITM_RO
537 		};
538 		lents[nents++] = (struct tui_list_entry){
539 			.label = "Intercept (Write)",
540 			.tag = DESC_MITM_WO
541 		};
542 /* other tasty options:
543  * - fdswap (on pending descriptor for OOB)
544  */
545 	}
546 /* F_GETLEASE, F_GET_SEALS */
547 	lents[nents++] = (struct tui_list_entry){
548 		.label = "Window: Current",
549 		.tag = WINDOW_METHOD
550 	};
551 
552 rerun:
553 	ent = run_listwnd(dctx, lents, nents, label, &pos);
554 	if (!ent){
555 		return;
556 	}
557 
558 	switch(ent->tag){
559 	case DESC_COPY:{
560 		arcan_tui_announce_io(dctx->tui, true, NULL, "*");
561 		return run_descriptor(dctx, fdin, type, label);
562 	}
563 	break;
564 	case WINDOW_METHOD:
565 		spawn_new = !spawn_new;
566 		ent->label = spawn_new ? "Window: New" : "Window: Current";
567 		goto rerun;
568 	break;
569 	case DESC_VIEW:
570 		run_mitm(dctx->tui, bopts, fdin, spawn_new, false, true, label);
571 	break;
572 	case DESC_MITM_PIPE:
573 		run_mitm(dctx->tui, bopts, fdin, spawn_new, true, false, label);
574 	break;
575 	case DESC_MITM_REDIR:
576 		run_mitm(dctx->tui, bopts, fdin, spawn_new, true, true, label);
577 	break;
578 	case DESC_MITM_BIDI:
579 /*
580  * we need another window for this one to work or a toggle to
581  * swap in/out (perhaps better actually)
582  */
583 	break;
584 	case DESC_MITM_RO:
585 /*
586  * the nastyness here is that we need to proxy oob stuff,
587  * socket API is such a pile of garbage.
588  */
589 	break;
590 	case DESC_MITM_WO:
591 	break;
592 	}
593 }
594 
bchunk(struct tui_context * T,bool input,uint64_t size,int fd,const char * type,void * tag)595 static void bchunk(struct tui_context* T,
596 	bool input, uint64_t size, int fd, const char* type, void* tag)
597 {
598 	struct debug_ctx* dctx = tag;
599 
600 	if (dctx->last_fd == -1)
601 		return;
602 
603 	struct arcan_shmif_cont* c = arcan_tui_acon(T);
604 
605 	int copy_last = arcan_shmif_dupfd(dctx->last_fd, -1, true);
606 	if (-1 == copy_last){
607 		return;
608 	}
609 
610 	int copy_new = arcan_shmif_dupfd(fd, -1, true);
611 	if (-1 == copy_new){
612 		close(copy_last);
613 		return;
614 	}
615 
616 /* bgcopy takes care of closing */
617 	if (input){
618 		arcan_shmif_bgcopy(c, copy_new, copy_last, -1, 0);
619 	}
620 	else {
621 		arcan_shmif_bgcopy(c, copy_last, copy_new, -1, 0);
622 	}
623 }
624 
run_listwnd(struct debug_ctx * dctx,struct tui_list_entry * list,size_t n_elem,const char * ident,size_t * pos)625 static struct tui_list_entry* run_listwnd(struct debug_ctx* dctx,
626 	struct tui_list_entry* list, size_t n_elem, const char* ident, size_t* pos)
627 {
628 	struct tui_list_entry* ent = NULL;
629 	arcan_tui_update_handlers(dctx->tui,
630 		&(struct tui_cbcfg){
631 			.bchunk = bchunk,
632 			.tag = dctx
633 		}, NULL, sizeof(struct tui_cbcfg));
634 	arcan_tui_listwnd_setup(dctx->tui, list, n_elem);
635 	arcan_tui_ident(dctx->tui, ident);
636 	if (pos)
637 		arcan_tui_listwnd_setpos(dctx->tui, *pos);
638 
639 	for(;;){
640 		struct tui_process_res res = arcan_tui_process(&dctx->tui, 1, NULL, 0, -1);
641 		if (res.errc == TUI_ERRC_OK){
642 			if (-1 == arcan_tui_refresh(dctx->tui) && errno == EINVAL){
643 				break;
644 			}
645 		}
646 
647 		if (arcan_tui_listwnd_status(dctx->tui, &ent)){
648 			break;
649 		}
650 	}
651 
652 	if (pos)
653 		*pos = arcan_tui_listwnd_tell(dctx->tui);
654 	arcan_tui_listwnd_release(dctx->tui);
655 	return ent;
656 }
657 
658 struct wnd_mitm_opts {
659 	struct tui_context* tui;
660 	struct mim_buffer_opts mim_opts;
661 	int fd;
662 	bool mitm;
663 	bool mask;
664 	bool shutdown;
665 	char* label;
666 };
667 
668 /* using this pattern/wrapper to work both as a new thread and current, for
669  * thread it is slightly racy (though this applies in general, as we don't have
670  * control over what other threads are doing when the descriptor menu is
671  * generated) */
wnd_runner(void * opt)672 static void* wnd_runner(void* opt)
673 {
674 	struct wnd_mitm_opts* mitm = opt;
675 
676 	if (mitm->mitm){
677 		setup_mitm(mitm->tui, mitm->fd, mitm->mask, mitm->mim_opts);
678 	}
679 	else {
680 		buf_window(mitm->tui, mitm->fd, mitm->label);
681 	}
682 
683 	free(mitm->label);
684 	if (mitm->shutdown){
685 		arcan_tui_destroy(mitm->tui, NULL);
686 	}
687 	free(mitm);
688 	return NULL;
689 }
690 
run_mitm(struct tui_context * tui,struct mim_buffer_opts bopts,int fd,bool thdwnd,bool mitm,bool mask,const char * label)691 static void run_mitm(struct tui_context* tui, struct mim_buffer_opts bopts,
692 	int fd, bool thdwnd, bool mitm, bool mask, const char* label)
693 {
694 /* package in a dynamic 'wnd runner' struct */
695 	struct wnd_mitm_opts* opts = malloc(sizeof(struct wnd_mitm_opts));
696 	struct tui_context* dtui = tui;
697 
698 	if (!opts)
699 		return;
700 
701 /* enter a request-loop for a new tui wnd, just sidestep the normal tui
702  * processing though as we want to be able to switch into a error message
703  * buffer window */
704 	if (thdwnd){
705 		struct arcan_shmif_cont* c = arcan_tui_acon(tui);
706 		arcan_shmif_enqueue(c, &(struct arcan_event){
707 			.ext.kind = ARCAN_EVENT(SEGREQ),
708 			.ext.segreq.kind = SEGID_TUI
709 		});
710 		struct arcan_event ev;
711 		while(arcan_shmif_wait(c, &ev)){
712 			if (ev.category != EVENT_TARGET)
713 				continue;
714 /* could be slightly more careful and pair this to a request token, but
715  * since we don't use clipboard etc. this is fine */
716 			if (ev.tgt.kind == TARGET_COMMAND_NEWSEGMENT){
717 				struct arcan_shmif_cont new =
718 					arcan_shmif_acquire(c, NULL, SEGID_TUI, 0);
719 
720 /* note that tui setup copies, it doesn't alias directly so stack here is ok */
721 				dtui = arcan_tui_setup(&new,
722 					tui, &(struct tui_cbcfg){}, sizeof(struct tui_cbcfg));
723 
724 				if (!dtui){
725 					show_error_message(tui, "Couldn't bind text window");
726 					return;
727 				}
728 				break;
729 			}
730 			else if (ev.tgt.kind == TARGET_COMMAND_REQFAIL){
731 				show_error_message(tui, "Server rejected window request");
732 				return;
733 			}
734 		}
735 	}
736 
737 	*opts = (struct wnd_mitm_opts){
738 		.tui = dtui,
739 		.fd = fd,
740 		.mitm = mitm,
741 		.mask = mask,
742 		.shutdown = thdwnd,
743 		.label = strdup(label)
744 	};
745 
746 	if (thdwnd){
747 		pthread_t pth;
748 		pthread_attr_t pthattr;
749 		pthread_attr_init(&pthattr);
750 		pthread_attr_setdetachstate(&pthattr, PTHREAD_CREATE_DETACHED);
751 
752 		if (-1 == pthread_create(&pth, &pthattr, wnd_runner, opts)){
753 			arcan_tui_destroy(opts->tui, NULL);
754 			free(opts->label);
755 			free(opts);
756 			show_error_message(tui, "Couldn't spawn new thread for window");
757 		}
758 	}
759 	else
760 		wnd_runner((void*)opts);
761 }
762 
get_fd_fn(char * buf,size_t lim,int fd)763 static void get_fd_fn(char* buf, size_t lim, int fd)
764 {
765 #ifdef __LINUX
766 	snprintf(buf, 256, "/proc/self/fd/%d", fd);
767 /* using buf on both arguments should be safe here due to the whole 'need the
768  * full path before able to generate output' criteria, but explicitly terminate
769  * on truncation */
770 	char buf2[256];
771 	int rv = readlink(buf, buf2, 255);
772 	if (rv <= 0){
773 		snprintf(buf, 256, "error: %s", strerror(errno));
774 	}
775 	else{
776 		buf2[rv] = '\0';
777 		snprintf(buf, 256, "%s", buf2);
778 	}
779 #else
780 	snprintf(buf, 256, "Couldn't Resolve");
781 /* BSD: resolve to pathname if possible F_GETPATH */
782 #endif
783 }
784 
785 extern int arcan_fdscan(int** listout);
gen_descriptor_menu(struct debug_ctx * dctx)786 static void gen_descriptor_menu(struct debug_ctx* dctx)
787 {
788 /* grab a list of current descriptors */
789 	int* fds;
790 	ssize_t count = arcan_fdscan(&fds);
791 	if (-1 == count)
792 		return;
793 
794 /* convert this set to list entry values */
795 	struct tui_list_entry* lents = malloc(sizeof(struct tui_list_entry) * count);
796 	if (!lents){
797 		free(fds);
798 		return;
799 	}
800 
801 /* stat it and continue */
802 	struct dent {
803 		struct stat stat;
804 		int fd;
805 	}* dents = malloc(sizeof(struct dent) * count);
806 	if (!dents){
807 		free(lents);
808 		free(fds);
809 		return;
810 	}
811 
812 /* generate an entry even if the stat failed, as the info is indicative
813  * of a FD being detected and then inaccessible or gone */
814 	size_t used = 0;
815 	char buf[256];
816 
817 	for (size_t i = 0; i < count; i++){
818 		struct tui_list_entry* lent = &lents[count];
819 		lents[used] = (struct tui_list_entry){
820 			.tag = i
821 /*		.label = "set later" */
822 /*		.attributes =
823  *        CHECKED : already used?
824  *        PASSIVE : couldn't be stat:ed
825  */
826 		};
827 
828 		size_t lbl_len = 256;
829 		char* lbl_prefix = malloc(lbl_len);
830 		if (!lbl_prefix)
831 			continue;
832 
833 		if (-1 == fstat(fds[i], &dents[used].stat)){
834 /* mark the stat as failed but remember the descriptor and write down */
835 			if (fds[i] > 2){
836 				lents[used].attributes |= LIST_PASSIVE;
837 				snprintf(lbl_prefix, lbl_len,
838 					"%4d[](fail): %s", fds[i], strerror(errno));
839 			}
840 			else {
841 				snprintf(lbl_prefix, lbl_len, "[    ](fail): %s", strerror(errno));
842 			}
843 		}
844 /* resolve more data */
845 		else {
846 			char scratch[8] = {0, 0, 0, 0, 0, 0, 0, 0};
847 			fd_to_flags(scratch, fds[i]);
848 			if (-1 == can_intercept(&dents[used].stat) && fds[i] > 2)
849 				lents[used].attributes |= LIST_PASSIVE;
850 			get_fd_fn(buf, 256, fds[i]);
851 			if (fds[i] > 2){
852 				snprintf(lbl_prefix, lbl_len, "%4d[%s](%s)\t: %s",
853 					fds[i], scratch, stat_to_str(&dents[used].stat), buf);
854 			}
855 			else
856 				snprintf(lbl_prefix, lbl_len, "[%s](%s)\t: %s",
857 					scratch, stat_to_str(&dents[used].stat), buf);
858 		}
859 
860 /* prefix STDIO */
861 		if (fds[i] <= 2){
862 			char buf2[256];
863 			snprintf(buf2, 256, "%s", lbl_prefix);
864 			switch(fds[i]){
865 			case STDIN_FILENO:
866 				snprintf(lbl_prefix, lbl_len, "<DIN%s", buf2);
867 			break;
868 			case STDOUT_FILENO:
869 				snprintf(lbl_prefix, lbl_len, "OUT>%s", buf2);
870 			break;
871 			case STDERR_FILENO:
872 				snprintf(lbl_prefix, lbl_len, "ERR>%s", buf2);
873 			break;
874 			}
875 		}
876 
877 /* stat:able, good start, extract flags and state */
878 		dents[used].fd = fds[i];
879 		lents[used].label = lbl_prefix;
880 		used++;
881 	}
882 
883 /* switch to new menu */
884 
885 /* special treatment for STDIN, STDOUT, STDERR as well as those can go to a
886  * tty/pty, meaning that our normal 'check if pipe' won't just work by default */
887 
888 /* Pipes are 'easy', we can check if the end is read or write and setup the
889  * interception accordingly. Sockets have types and are bidirectional, so
890  * either we request a new window and use one for the read and one for the
891  * write end - as well as getsockopt on type etc. to figure out if the socket
892  * can actually be intercepted or not. */
893 	struct tui_list_entry* ent =
894 		run_listwnd(dctx, lents, count, "open descriptors", NULL);
895 
896 	if (ent){
897 		int icept = can_intercept(&dents[ent->tag].stat);
898 		dctx->last_fd = dents[ent->tag].fd;
899 		get_fd_fn(buf, 256, dents[ent->tag].fd);
900 		run_descriptor(dctx, dents[ent->tag].fd, icept, buf);
901 		dctx->last_fd = -1;
902 	}
903 
904 	for (size_t i = 0; i < count; i++){
905 		free(lents[i].label);
906 	}
907 
908 	free(fds);
909 	free(lents);
910 
911 /* finished with the buffer window, rebuild the list */
912 	if (ent){
913 		gen_descriptor_menu(dctx);
914 	}
915 }
916 
917 /* we don't want full execvpe kind of behavior here as path might be
918  * messed with, so just use a primitive list */
find_exec(const char * fname)919 static char* find_exec(const char* fname)
920 {
921 	static const char* const prefix[] = {"/usr/local/bin", "/usr/bin", ".", NULL};
922 	size_t ind = 0;
923 
924 	while(prefix[ind]){
925 		char* buf	= NULL;
926 		if (-1 == asprintf(&buf, "%s/%s", prefix[ind++], fname))
927 			continue;
928 
929 		struct stat fs;
930 		if (-1 == stat(buf, &fs)){
931 			free(buf);
932 			continue;
933 		}
934 
935 		return buf;
936 	}
937 
938 	return NULL;
939 }
940 
941 enum spawn_action {
942 	SPAWN_SHELL = 0,
943 	SPAWN_DEBUG_GDB = 1,
944 	SPAWN_DEBUG_LLDB = 2
945 };
946 
spawn_action(struct debug_ctx * dctx,char * action,struct arcan_shmif_cont * c,struct arcan_event ev)947 static const char* spawn_action(struct debug_ctx* dctx,
948 	char* action, struct arcan_shmif_cont* c, struct arcan_event ev)
949 {
950 	static const char* err_not_found = "Couldn't find executable";
951 	static const char* err_couldnt_spawn = "Couldn't spawn child process";
952 	static const char* err_read_pid = "Child didn't return a pid";
953 	static const char* err_build_env = "Out of memory on building child env";
954 	static const char* err_build_pipe = "Couldn't allocate control pipes";
955 	const char* err = NULL;
956 
957 /* attach foreplay here requires that:
958  *
959  * 1. pipes gets inherited (both read and write)
960  * 2. afsrv_terminal does the tty foreplay (until the day we have gdb/lldb FEs
961  *    that just uses tui entirely, in those cases the handshake can be done
962  *    here, writes the child pid into its 'PIDFD_OUT'.
963  * 3. we run prctl and set this child as the tracer.
964  *
965  * The idea of racing the pid to get the tracer role after the debugger has
966  * detached shouldn't be possible (see LSM_HOOK on task_free which runs
967  * yama_ptracer_del in the kernel source).
968  *
969  * The other tactic that is a bit more precise and not require the terminal as
970  * a middle man by ptracing() through each new process. stop on-enter into
971  * ptrace and do the same read/write dance.
972  */
973 	char* exec_path = find_exec("afsrv_terminal");
974 	char* argv[] = {"afsrv_terminal", NULL};
975 	if (!exec_path)
976 		return err_not_found;
977 
978 	if (!action){
979 /* spawn detached that'll ensure a double-fork like condition,
980  * meaning that the pid should be safe to block-wait on */
981 
982 		struct sigaction oldsig;
983 		sigaction(SIGCHLD, &(struct sigaction){}, &oldsig);
984 
985 		pid_t pid =
986 			arcan_shmif_handover_exec(c, ev, exec_path, argv, NULL, true);
987 
988 		while(pid != -1 && -1 == waitpid(pid, NULL, 0)){
989 			if (errno != EINTR)
990 				break;
991 		}
992 		sigaction(SIGCHLD, &oldsig, NULL);
993 
994 		free(exec_path);
995 		if (-1 == pid)
996 			return err_couldnt_spawn;
997 
998 		return NULL;
999 	}
1000 
1001 /* remove any existing tracer */
1002 #ifdef __LINUX
1003 #ifdef PR_SET_PTRACER
1004 	prctl(PR_SET_PTRACER, 0, 0, 0, 0);
1005 #endif
1006 #endif
1007 
1008 /* rest are much more involved, start with some communication pipes
1009  * and handover environment - normal signalling etc. doesn't work for
1010  * error detection and the fork detach from handover_exec */
1011 	int fdarg_out[2];
1012 	int fdarg_in[2];
1013 
1014 /* grab the pipe pairs that will be inherited into the child */
1015 	if (-1 == pipe(fdarg_out)){
1016 		return err_build_pipe;
1017 	}
1018 
1019 	if (-1 == pipe(fdarg_in)){
1020 		close(fdarg_out[0]);
1021 		close(fdarg_out[1]);
1022 		return err_build_pipe;
1023 	}
1024 
1025 /* cloexec- off our end of the descriptors */
1026 	int flags = fcntl(fdarg_out[1], F_GETFD);
1027 	if (-1 != flags)
1028 		fcntl(fdarg_out[1], F_SETFD, flags | FD_CLOEXEC);
1029 	flags = fcntl(fdarg_in[0], F_GETFD);
1030 	if (-1 != flags)
1031 		fcntl(fdarg_in[0], F_SETFD, flags | FD_CLOEXEC);
1032 
1033 /* could've done this less messier on the stack .. */
1034 	char* envv[5] = {0};
1035 	err = err_build_env;
1036 	if (-1 == asprintf(&envv[0], "ARCAN_TERMINAL_EXEC=%s", action)){
1037 		envv[0] = NULL;
1038 		goto out;
1039 	}
1040 
1041 	if (-1 == asprintf(&envv[1], "ARCAN_TERMINAL_ARGV=-p %d", (int)getpid())){
1042 		envv[1] = NULL;
1043 		goto out;
1044 	}
1045 
1046 	if (-1 == asprintf(&envv[2], "ARCAN_TERMINAL_PIDFD_OUT=%d", fdarg_in[1])){
1047 		envv[2] = NULL;
1048 		goto out;
1049 	}
1050 
1051 	if (-1 == asprintf(&envv[3], "ARCAN_TERMINAL_PIDFD_IN=%d", fdarg_out[0])){
1052 		envv[3] = NULL;
1053 		goto out;
1054 	}
1055 
1056 /* handover-execute the terminal */
1057 	struct sigaction oldsig;
1058 	sigaction(SIGCHLD, &(struct sigaction){}, &oldsig);
1059 
1060 	pid_t pid = arcan_shmif_handover_exec(c, ev, exec_path, argv, envv, true);
1061 	while(pid != -1 && -1 == waitpid(pid, NULL, 0)){
1062 		if (errno != EINTR)
1063 			break;
1064 	}
1065 	sigaction(SIGCHLD, &oldsig, NULL);
1066 	free(exec_path);
1067 
1068 	close(fdarg_out[0]);
1069 	close(fdarg_in[1]);
1070 
1071 /* wait for the pid argument */
1072 	pid_t inpid = -1;
1073 	char inbuf[8] = {0};
1074 	ssize_t nr;
1075 	while (-1 == (nr = read(fdarg_in[0], &inpid, sizeof(pid_t)))){
1076 		if (errno != EAGAIN && errno != EINTR)
1077 			break;
1078 	}
1079 
1080 	if (-1 == nr){
1081 		err = err_read_pid;
1082 		goto out;
1083 	}
1084 
1085 /* enable the tracer, doesn't look like we can do this for the BSDs atm.
1086  * but use the same setup / synch path anyhow - for testing purposes,
1087  * disable the protection right now */
1088 
1089 #ifdef __LINUX
1090 #ifdef PR_SET_PTRACER
1091 	prctl(PR_SET_PTRACER, inpid, 0, 0, 0);
1092 #endif
1093 #endif
1094 
1095 /* send the continue trigger */
1096 	uint8_t outc = '\n';
1097 	write(fdarg_out[1], &outc, 1);
1098 	err = NULL;
1099 
1100 /* other option here is to have a monitor thread for the descriptor, waiting
1101  * for that one to fail and use to release a singleton 'being traced' or have
1102  * an oracle for 'isDebuggerPresent' like behavior */
1103 out:
1104 	for (size_t i = 0; i < 5; i++){
1105 		free(envv[i]);
1106 	}
1107 	close(fdarg_in[0]);
1108 	close(fdarg_out[1]);
1109 
1110 	return err;
1111 }
1112 
run_buffer(struct tui_context * tui,uint8_t * buffer,size_t buf_sz,struct tui_bufferwnd_opts opts,const char * title)1113 static int run_buffer(struct tui_context* tui, uint8_t* buffer,
1114 	size_t buf_sz, struct tui_bufferwnd_opts opts, const char* title)
1115 {
1116 	int status = 1;
1117 	opts.allow_exit = true;
1118 	arcan_tui_ident(tui, title);
1119 	arcan_tui_bufferwnd_setup(tui, buffer, buf_sz, &opts, sizeof(opts));
1120 
1121 	while(1 == (status = arcan_tui_bufferwnd_status(tui))){
1122 		struct tui_process_res res = arcan_tui_process(&tui, 1, NULL, 0, -1);
1123 		if (res.errc == TUI_ERRC_OK){
1124 			if (-1 == arcan_tui_refresh(tui) && errno == EINVAL){
1125 				break;
1126 			}
1127 		}
1128 	}
1129 
1130 /* return the context to normal, dead-flag will propagate and free if set */
1131 	arcan_tui_bufferwnd_release(tui);
1132 	arcan_tui_update_handlers(tui,
1133 		&(struct tui_cbcfg){}, NULL, sizeof(struct tui_cbcfg));
1134 
1135 	return status;
1136 }
1137 
gen_spawn_menu(struct debug_ctx * dctx)1138 static void gen_spawn_menu(struct debug_ctx* dctx)
1139 {
1140 	struct tui_list_entry lents[] = {
1141 		{
1142 			.label = "Shell",
1143 			.tag = 0,
1144 /*			.attributes = LIST_PASSIVE, */
1145 		},
1146 		{
1147 			.label = "GNU Debugger (gdb)",
1148 			.attributes = LIST_PASSIVE,
1149 			.tag = 1
1150 		},
1151 		{
1152 			.label = "LLVM Debugger (lldb)",
1153 			.attributes = LIST_PASSIVE,
1154 			.tag = 2
1155 		}
1156 	};
1157 
1158 /* need to do a sanity check if the binaries are available, if we can actually
1159  * fork(), exec() etc. based on current sandbox settings and unmask the items
1160  * that can be used */
1161 	char* gdb = find_exec("gdb");
1162 	if (gdb){
1163 		lents[1].attributes = 0;
1164 		free(gdb);
1165 	}
1166 
1167 	char* lldb = find_exec("lldb");
1168 	if (lldb){
1169 		lents[2].attributes = 0;
1170 		free(lldb);
1171 	}
1172 
1173 	struct tui_list_entry* ent =
1174 		run_listwnd(dctx, lents, COUNT_OF(lents), "debuggers", NULL);
1175 
1176 /* for all of these we need a handover segment as we can't just give the tui
1177  * context away like this, even though when the debugger connection is setup,
1178  * we can't really 'survive' anymore as we will just get locked */
1179 	if (!ent)
1180 		return;
1181 
1182 /* this will leave us hanging until we get a response from the server side,
1183  * and other events will be dropped, so this is a very special edge case */
1184 	struct arcan_shmif_cont* c = arcan_tui_acon(dctx->tui);
1185 	arcan_shmif_enqueue(c, &(struct arcan_event){
1186 		.ext.kind = ARCAN_EVENT(SEGREQ),
1187 		.ext.segreq.kind = SEGID_HANDOVER
1188 	});
1189 
1190 	struct arcan_event ev;
1191 	pid_t child;
1192 	const char* err = NULL;
1193 
1194 	while(arcan_shmif_wait(c, &ev)){
1195 		if (ev.category != EVENT_TARGET)
1196 			continue;
1197 
1198 		if (ev.tgt.kind == TARGET_COMMAND_NEWSEGMENT){
1199 			char* fn = NULL;
1200 			if (ent->tag == SPAWN_DEBUG_GDB){
1201 				fn = find_exec("gdb");
1202 				if (!fn)
1203 					break;
1204 			}
1205 			else if (ent->tag == SPAWN_DEBUG_LLDB){
1206 				fn = find_exec("lldb");
1207 				if (!fn)
1208 					break;
1209 			}
1210 			err = spawn_action(dctx, fn, c, ev);
1211 			break;
1212 		}
1213 /* notify that the bash request failed revert */
1214 		else if (ev.tgt.kind == TARGET_COMMAND_REQFAIL){
1215 			err = "Server rejected window request";
1216 			break;
1217 		}
1218 	}
1219 
1220 	show_error_message(dctx->tui, err);
1221 	gen_spawn_menu(dctx);
1222 }
1223 
1224 /*
1225  * For this feature we would like to provide an editable view of the process
1226  * environment. This is anything but trivial as basically all other threads
1227  * would need to be suspended while we are doing this.
1228  *
1229  * Normally this could be done with some convoluted ptrace+fork dance, and
1230  * implement for each platform. At the moment we settle for unsafe probing,
1231  * and revisit later when other features are in place.
1232  *
1233  * Another option is the linux proc/env dance.
1234  *
1235  * Things to look out for:
1236  *  client setting keys to a corrupted value (modify environ and add extra)
1237  */
1238 extern char** environ;
build_env_list(size_t * outc)1239 static struct tui_list_entry* build_env_list(size_t* outc)
1240 {
1241 	size_t nelem = 0;
1242 	while(environ[nelem++]){}
1243 	if (!nelem)
1244 		return NULL;
1245 
1246 	*outc = 0;
1247 	struct tui_list_entry* nents = malloc(sizeof(struct tui_list_entry) * nelem);
1248 	if (!nents)
1249 		return NULL;
1250 
1251 	for (size_t i = 0; i < nelem; i++){
1252 		size_t len = 0;
1253 		for (; environ[i] && environ[i][len] && environ[i][len] != '='; len++){}
1254 		if (len == 0)
1255 			continue;
1256 
1257 		char* label = malloc(len+1);
1258 		if (!label)
1259 			continue;
1260 
1261 		memcpy(label, environ[i], len);
1262 		label[len] = '\0';
1263 
1264 		nents[*outc] = (struct tui_list_entry){
1265 			.attributes = LIST_HAS_SUB,
1266 			.tag = i,
1267 			.label = label
1268 		};
1269 		(*outc)++;
1270 	}
1271 
1272 	return nents;
1273 }
1274 
free_list(struct tui_list_entry * list,size_t nc)1275 static void free_list(struct tui_list_entry* list, size_t nc)
1276 {
1277 	for (size_t i = 0; i < nc; i++)
1278 		free(list[i].label);
1279 	free(list);
1280 }
1281 
gen_environment_menu(struct debug_ctx * dctx)1282 static void gen_environment_menu(struct debug_ctx* dctx)
1283 {
1284 	size_t nelem = 0;
1285 
1286 	struct tui_list_entry* list = build_env_list(&nelem);
1287 	if (!list)
1288 		return;
1289 
1290 	if (!nelem){
1291 		free(list);
1292 		return;
1293 	}
1294 
1295 	struct tui_list_entry* ent =
1296 		run_listwnd(dctx, list, nelem, "environment", NULL);
1297 	if (!ent){
1298 		free_list(list, nelem);
1299 		return;
1300 	}
1301 
1302 	char* env = getenv(ent->label);
1303 	if (!env || !(env = strdup(env)))
1304 		return gen_environment_menu(dctx);
1305 
1306 	run_buffer(dctx->tui, (uint8_t*) env, strlen(env), (struct tui_bufferwnd_opts){
1307 		.read_only = false,
1308 		.view_mode = BUFFERWND_VIEW_ASCII
1309 	}, ent->label);
1310 
1311 	return gen_environment_menu(dctx);
1312 }
1313 
arcan_shmif_debugint_alive()1314 int arcan_shmif_debugint_alive()
1315 {
1316 	return atomic_load(&beancounter);
1317 }
1318 
1319 #ifdef __LINUX
get_yama()1320 static int get_yama()
1321 {
1322 	FILE* pf = fopen("/proc/sys/kernel/yama/ptrace_scope", "r");
1323 	int rc = -1;
1324 
1325 	if (!pf)
1326 		return -1;
1327 
1328 	char inbuf[8];
1329 	if (fgets(inbuf, sizeof(inbuf), pf)){
1330 		rc = strtoul(inbuf, NULL, 10);
1331 	}
1332 
1333 	fclose(pf);
1334 	return rc;
1335 }
1336 #endif
1337 
build_process_str(FILE * fout)1338 static void build_process_str(FILE* fout)
1339 {
1340 /* bufferwnd currently 'misses' a way of taking some in-line formatted string
1341  * and resolving, the intention was to add that as a tack-on layer and use the
1342  * offset- lookup coloring to perform that resolution, simple strings for now */
1343 	pid_t cpid = getpid();
1344 	pid_t ppid = getppid();
1345 	if (!fout)
1346 		return;
1347 
1348 #ifdef __LINUX
1349 	fprintf(fout, "PID: %zd Parent: %zd\n", (ssize_t) cpid, (ssize_t) ppid);
1350 
1351 /* digging around in memory for command-line will hurt too much,
1352  * fgets also doesn't work due to the many 0 terminated strings */
1353 	char inbuf[4096];
1354 	fprintf(fout, "Cmdline:\n");
1355 	FILE* pf = fopen("/proc/self/cmdline", "r");
1356 	int ind = 0, ofs = 0;
1357 	if (pf){
1358 		while (!feof(pf)){
1359 			int ch = fgetc(pf);
1360 			if (ch == 0){
1361 				inbuf[ofs] = '\0';
1362 				fprintf(fout, "\t%d : %s\n", ind++, inbuf);
1363 				ofs = 0;
1364 			}
1365 			else if (ch > 0){
1366 				if (ofs < sizeof(inbuf)-1){
1367 					inbuf[ofs] = ch;
1368 					ofs++;
1369 				}
1370 			}
1371 		}
1372 		fclose(pf);
1373 	}
1374 
1375 /* ptrace status is nice to help figuring out debug status, even if
1376  * it isn't really a per process attribute as much as systemwide */
1377 	int yn = get_yama();
1378 	switch(yn){
1379 	case -1:
1380 		fprintf(fout, "Ptrace: Couldn't Read\n");
1381 	break;
1382 	case 0:
1383 		fprintf(fout, "Ptrace: Unrestricted\n");
1384 	break;
1385 	case 1:
1386 		fprintf(fout, "Ptrace: Restricted\n");
1387 	break;
1388 	case 2:
1389 		fprintf(fout, "Ptrace: Admin-Only\n");
1390 	break;
1391 	case 3:
1392 		fprintf(fout, "Ptrace: None\n");
1393 	break;
1394 	}
1395 
1396 /* PR_GET_CHILD_SUBREAPER
1397  * PR_GET_DUMPABLE
1398  * PR_GET_SECCOM
1399  * mountinfo?
1400  * oom_score
1401  * -- not all are cheap enough for synch
1402  * Status File:
1403  *  - TracerPid
1404  *  - Seccomp
1405  * limits
1406  */
1407 #else
1408 	fprintf(fout, "PID: %zd Parent: %zd", (ssize_t) cpid, (ssize_t) ppid);
1409 #endif
1410 /* hardening analysis,
1411  * aslr, nxstack, canaries (also extract canary)
1412  */
1413 }
1414 
set_process_window(struct debug_ctx * dctx)1415 static void set_process_window(struct debug_ctx* dctx)
1416 {
1417 /* build a process description string that we periodically update */
1418 	char* buf = NULL;
1419 	size_t buf_sz = 0;
1420 	FILE* outf = open_memstream(&buf, &buf_sz);
1421 	if (!outf)
1422 		return;
1423 
1424 /* some options, like nice-level etc. should perhaps also be exposed
1425  * here in an editable way */
1426 
1427 	build_process_str(outf);
1428 	fflush(outf);
1429 	struct tui_bufferwnd_opts opts = {
1430 		.read_only = true,
1431 		.view_mode = BUFFERWND_VIEW_ASCII,
1432 		.wrap_mode = BUFFERWND_WRAP_ACCEPT_LF,
1433 		.allow_exit = true
1434 	};
1435 
1436 	run_buffer(dctx->tui, (uint8_t*) buf, buf_sz, opts, "process");
1437 /* check return code and update if commit */
1438 
1439 	if (outf)
1440 		fclose(outf);
1441 	free(buf);
1442 }
1443 
root_menu(struct debug_ctx * dctx)1444 static void root_menu(struct debug_ctx* dctx)
1445 {
1446 	struct tui_list_entry menu_root[] = {
1447 		{
1448 			.label = "File Descriptors",
1449 			.attributes = LIST_HAS_SUB,
1450 			.tag = TAG_CMD_DESCRIPTOR
1451 		},
1452 		{
1453 			.label = "Spawn",
1454 			.attributes = LIST_HAS_SUB,
1455 			.tag = TAG_CMD_SPAWN
1456 		},
1457 /*
1458  * browse based on current-dir, openat(".") and navigate like that
1459  *  {
1460  *  	.label = "Browse",
1461  *  	.attributes = LIST_HAS_SUB,
1462  *  	.tag = TAG_CMD_BROWSEFS
1463  *  },
1464  */
1465 		{
1466 			.label = "Environment",
1467 			.attributes = LIST_HAS_SUB,
1468 			.tag = TAG_CMD_ENVIRONMENT
1469 		},
1470 		{
1471 			.label = "Process Information",
1472 			.attributes = LIST_HAS_SUB,
1473 			.tag = TAG_CMD_PROCESS
1474 		},
1475 /*
1476  * this little thing is to allow other tools to attach more entries
1477  * here, see, for instance, src/tools/adbginject.so that keeps the
1478  * process locked before continuing.
1479  */
1480 		{
1481 			.attributes = LIST_HAS_SUB,
1482 			.tag = TAG_CMD_EXTERNAL
1483 		}
1484 	};
1485 
1486 	size_t nent = 4;
1487 	struct tui_list_entry* cent = &menu_root[COUNT_OF(menu_root)-1];
1488 	if (dctx->resolver.label){
1489 		cent->label = dctx->resolver.label;
1490 		nent++;
1491 	}
1492 
1493 	while(!dctx->dead){
1494 /* update the handlers so there's no dangling handlertbl+cfg */
1495 		if (cent->label){
1496 			cent->attributes = cent->label[0] ? LIST_HAS_SUB : LIST_PASSIVE;
1497 		}
1498 
1499 		arcan_tui_update_handlers(dctx->tui,
1500 			&(struct tui_cbcfg){}, NULL, sizeof(struct tui_cbcfg));
1501 
1502 		arcan_tui_listwnd_setup(dctx->tui, menu_root, nent);
1503 		arcan_tui_ident(dctx->tui, "root");
1504 
1505 		while(!dctx->dead){
1506 			struct tui_process_res res =
1507 				arcan_tui_process(&dctx->tui, 1, NULL, 0, -1);
1508 
1509 			if (-1 == arcan_tui_refresh(dctx->tui) && errno == EINVAL){
1510 				dctx->dead = true;
1511 				return;
1512 			}
1513 
1514 			struct tui_list_entry* ent;
1515 			if (arcan_tui_listwnd_status(dctx->tui, &ent)){
1516 				arcan_tui_listwnd_release(dctx->tui);
1517 				arcan_tui_update_handlers(dctx->tui,
1518 					&(struct tui_cbcfg){}, NULL, sizeof(struct tui_cbcfg));
1519 
1520 /* this will just chain into a new listwnd setup, and if they cancel
1521  * we can just repeat the setup - until the dead state has been set */
1522 				if (ent){
1523 					switch(ent->tag){
1524 						case TAG_CMD_DESCRIPTOR :
1525 							gen_descriptor_menu(dctx);
1526 						break;
1527 						case TAG_CMD_SPAWN :
1528 							gen_spawn_menu(dctx);
1529 						break;
1530 						case TAG_CMD_ENVIRONMENT :
1531 							gen_environment_menu(dctx);
1532 						break;
1533 						case TAG_CMD_PROCESS :
1534 							set_process_window(dctx);
1535 						break;
1536 						case TAG_CMD_EXTERNAL :
1537 							dctx->resolver.handler(dctx->tui, dctx->resolver.tag);
1538 						break;
1539 					}
1540 				}
1541 /* switch to out-loop that resets the menu */
1542 				break;
1543 			}
1544 
1545 		}
1546 	}
1547 }
1548 
debug_thread(void * thr)1549 static void* debug_thread(void* thr)
1550 {
1551 	struct debug_ctx* dctx = thr;
1552 
1553 	if (!dctx->tui){
1554 		arcan_shmif_drop(&dctx->cont);
1555 		atomic_fetch_add(&beancounter, -1);
1556 		free(thr);
1557 		return NULL;
1558 	}
1559 
1560 	struct arcan_event ev = {
1561 		.category = EVENT_EXTERNAL,
1562 		.ext.kind = ARCAN_EVENT(REGISTER),
1563 		.ext.registr = {
1564 			.kind = SEGID_DEBUG
1565 		}
1566 	};
1567 
1568 	snprintf(ev.ext.registr.title, 32, "debugif(%d)", (int)getpid());
1569 
1570 	arcan_shmif_enqueue(arcan_tui_acon(dctx->tui), &ev);
1571 	root_menu(dctx);
1572 
1573 	arcan_tui_destroy(dctx->tui, NULL);
1574 	atomic_fetch_add(&beancounter, -1);
1575 	free(thr);
1576 	return NULL;
1577 }
1578 
arcan_shmif_debugint_spawn(struct arcan_shmif_cont * c,void * tuitag,struct debugint_ext_resolver * res)1579 bool arcan_shmif_debugint_spawn(
1580 	struct arcan_shmif_cont* c, void* tuitag, struct debugint_ext_resolver* res)
1581 {
1582 /* make sure we have the TUI functions for the debug thread along with
1583  * the respective widgets, dynamically load the symbols */
1584 	if (!arcan_tui_setup ||
1585 			!arcan_tui_listwnd_setup ||
1586 			!arcan_tui_bufferwnd_setup
1587 	){
1588 		void* openh = dlopen(
1589 "libarcan_tui."
1590 #ifndef __APPLE__
1591 	"so"
1592 #else
1593 	"dylib"
1594 #endif
1595 		, RTLD_LAZY);
1596 		if (!arcan_tui_dynload(dlsym, openh))
1597 			return false;
1598 
1599 		if (!arcan_tui_listwnd_dynload(dlsym, openh))
1600 			return false;
1601 
1602 		if (!arcan_tui_bufferwnd_dynload(dlsym, openh))
1603 			return false;
1604 	}
1605 
1606 	pthread_t pth;
1607 	pthread_attr_t pthattr;
1608 	pthread_attr_init(&pthattr);
1609 	pthread_attr_setdetachstate(&pthattr, PTHREAD_CREATE_DETACHED);
1610 	struct debug_ctx* hgs = malloc(sizeof(struct debug_ctx));
1611 	if (!hgs)
1612 		return false;
1613 
1614 	*hgs = (struct debug_ctx){
1615 		.infd = -1,
1616 		.outfd = -1,
1617 		.cont = *c,
1618 		.tui = arcan_tui_setup(c,
1619 			tuitag, &(struct tui_cbcfg){}, sizeof(struct tui_cbcfg))
1620 	};
1621 
1622 	if (res)
1623 		hgs->resolver = *res;
1624 
1625 	if (!hgs->tui){
1626 		free(hgs);
1627 		return false;
1628 	}
1629 
1630 	arcan_tui_set_flags(hgs->tui, TUI_HIDE_CURSOR);
1631 
1632 	if (-1 == pthread_create(&pth, &pthattr, debug_thread, hgs)){
1633 		free(hgs);
1634 		return false;
1635 	}
1636 
1637 	atomic_fetch_add(&beancounter, 1);
1638 
1639 	return true;
1640 }
1641