xref: /linux/arch/um/os-Linux/sigio.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <unistd.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <poll.h>
10 #include <pty.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <string.h>
14 #include <kern_util.h>
15 #include <init.h>
16 #include <os.h>
17 #include <sigio.h>
18 #include <um_malloc.h>
19 
20 /*
21  * Protected by sigio_lock(), also used by sigio_cleanup, which is an
22  * exitcall.
23  */
24 static int write_sigio_pid = -1;
25 static unsigned long write_sigio_stack;
26 
27 /*
28  * These arrays are initialized before the sigio thread is started, and
29  * the descriptors closed after it is killed.  So, it can't see them change.
30  * On the UML side, they are changed under the sigio_lock.
31  */
32 #define SIGIO_FDS_INIT {-1, -1}
33 
34 static int write_sigio_fds[2] = SIGIO_FDS_INIT;
35 static int sigio_private[2] = SIGIO_FDS_INIT;
36 
37 struct pollfds {
38 	struct pollfd *poll;
39 	int size;
40 	int used;
41 };
42 
43 /*
44  * Protected by sigio_lock().  Used by the sigio thread, but the UML thread
45  * synchronizes with it.
46  */
47 static struct pollfds current_poll;
48 static struct pollfds next_poll;
49 static struct pollfds all_sigio_fds;
50 
51 static int write_sigio_thread(void *unused)
52 {
53 	struct pollfds *fds, tmp;
54 	struct pollfd *p;
55 	int i, n, respond_fd;
56 	char c;
57 
58 	os_fix_helper_signals();
59 	fds = &current_poll;
60 	while (1) {
61 		n = poll(fds->poll, fds->used, -1);
62 		if (n < 0) {
63 			if (errno == EINTR)
64 				continue;
65 			printk(UM_KERN_ERR "write_sigio_thread : poll returned "
66 			       "%d, errno = %d\n", n, errno);
67 		}
68 		for (i = 0; i < fds->used; i++) {
69 			p = &fds->poll[i];
70 			if (p->revents == 0)
71 				continue;
72 			if (p->fd == sigio_private[1]) {
73 				CATCH_EINTR(n = read(sigio_private[1], &c,
74 						     sizeof(c)));
75 				if (n != sizeof(c))
76 					printk(UM_KERN_ERR
77 					       "write_sigio_thread : "
78 					       "read on socket failed, "
79 					       "err = %d\n", errno);
80 				tmp = current_poll;
81 				current_poll = next_poll;
82 				next_poll = tmp;
83 				respond_fd = sigio_private[1];
84 			}
85 			else {
86 				respond_fd = write_sigio_fds[1];
87 				fds->used--;
88 				memmove(&fds->poll[i], &fds->poll[i + 1],
89 					(fds->used - i) * sizeof(*fds->poll));
90 			}
91 
92 			CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
93 			if (n != sizeof(c))
94 				printk(UM_KERN_ERR "write_sigio_thread : "
95 				       "write on socket failed, err = %d\n",
96 				       errno);
97 		}
98 	}
99 
100 	return 0;
101 }
102 
103 static int need_poll(struct pollfds *polls, int n)
104 {
105 	struct pollfd *new;
106 
107 	if (n <= polls->size)
108 		return 0;
109 
110 	new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
111 	if (new == NULL) {
112 		printk(UM_KERN_ERR "need_poll : failed to allocate new "
113 		       "pollfds\n");
114 		return -ENOMEM;
115 	}
116 
117 	memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
118 	kfree(polls->poll);
119 
120 	polls->poll = new;
121 	polls->size = n;
122 	return 0;
123 }
124 
125 /*
126  * Must be called with sigio_lock held, because it's needed by the marked
127  * critical section.
128  */
129 static void update_thread(void)
130 {
131 	unsigned long flags;
132 	int n;
133 	char c;
134 
135 	flags = set_signals_trace(0);
136 	CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
137 	if (n != sizeof(c)) {
138 		printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
139 		       errno);
140 		goto fail;
141 	}
142 
143 	CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
144 	if (n != sizeof(c)) {
145 		printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
146 		       errno);
147 		goto fail;
148 	}
149 
150 	set_signals_trace(flags);
151 	return;
152  fail:
153 	/* Critical section start */
154 	if (write_sigio_pid != -1) {
155 		os_kill_process(write_sigio_pid, 1);
156 		free_stack(write_sigio_stack, 0);
157 	}
158 	write_sigio_pid = -1;
159 	close(sigio_private[0]);
160 	close(sigio_private[1]);
161 	close(write_sigio_fds[0]);
162 	close(write_sigio_fds[1]);
163 	/* Critical section end */
164 	set_signals_trace(flags);
165 }
166 
167 int add_sigio_fd(int fd)
168 {
169 	struct pollfd *p;
170 	int err = 0, i, n;
171 
172 	sigio_lock();
173 	for (i = 0; i < all_sigio_fds.used; i++) {
174 		if (all_sigio_fds.poll[i].fd == fd)
175 			break;
176 	}
177 	if (i == all_sigio_fds.used)
178 		goto out;
179 
180 	p = &all_sigio_fds.poll[i];
181 
182 	for (i = 0; i < current_poll.used; i++) {
183 		if (current_poll.poll[i].fd == fd)
184 			goto out;
185 	}
186 
187 	n = current_poll.used;
188 	err = need_poll(&next_poll, n + 1);
189 	if (err)
190 		goto out;
191 
192 	memcpy(next_poll.poll, current_poll.poll,
193 	       current_poll.used * sizeof(struct pollfd));
194 	next_poll.poll[n] = *p;
195 	next_poll.used = n + 1;
196 	update_thread();
197  out:
198 	sigio_unlock();
199 	return err;
200 }
201 
202 int ignore_sigio_fd(int fd)
203 {
204 	struct pollfd *p;
205 	int err = 0, i, n = 0;
206 
207 	/*
208 	 * This is called from exitcalls elsewhere in UML - if
209 	 * sigio_cleanup has already run, then update_thread will hang
210 	 * or fail because the thread is no longer running.
211 	 */
212 	if (write_sigio_pid == -1)
213 		return -EIO;
214 
215 	sigio_lock();
216 	for (i = 0; i < current_poll.used; i++) {
217 		if (current_poll.poll[i].fd == fd)
218 			break;
219 	}
220 	if (i == current_poll.used)
221 		goto out;
222 
223 	err = need_poll(&next_poll, current_poll.used - 1);
224 	if (err)
225 		goto out;
226 
227 	for (i = 0; i < current_poll.used; i++) {
228 		p = &current_poll.poll[i];
229 		if (p->fd != fd)
230 			next_poll.poll[n++] = *p;
231 	}
232 	next_poll.used = current_poll.used - 1;
233 
234 	update_thread();
235  out:
236 	sigio_unlock();
237 	return err;
238 }
239 
240 static struct pollfd *setup_initial_poll(int fd)
241 {
242 	struct pollfd *p;
243 
244 	p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
245 	if (p == NULL) {
246 		printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
247 		       "poll\n");
248 		return NULL;
249 	}
250 	*p = ((struct pollfd) { .fd		= fd,
251 				.events 	= POLLIN,
252 				.revents 	= 0 });
253 	return p;
254 }
255 
256 static void write_sigio_workaround(void)
257 {
258 	struct pollfd *p;
259 	int err;
260 	int l_write_sigio_fds[2];
261 	int l_sigio_private[2];
262 	int l_write_sigio_pid;
263 
264 	/* We call this *tons* of times - and most ones we must just fail. */
265 	sigio_lock();
266 	l_write_sigio_pid = write_sigio_pid;
267 	sigio_unlock();
268 
269 	if (l_write_sigio_pid != -1)
270 		return;
271 
272 	err = os_pipe(l_write_sigio_fds, 1, 1);
273 	if (err < 0) {
274 		printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
275 		       "err = %d\n", -err);
276 		return;
277 	}
278 	err = os_pipe(l_sigio_private, 1, 1);
279 	if (err < 0) {
280 		printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
281 		       "err = %d\n", -err);
282 		goto out_close1;
283 	}
284 
285 	p = setup_initial_poll(l_sigio_private[1]);
286 	if (!p)
287 		goto out_close2;
288 
289 	sigio_lock();
290 
291 	/*
292 	 * Did we race? Don't try to optimize this, please, it's not so likely
293 	 * to happen, and no more than once at the boot.
294 	 */
295 	if (write_sigio_pid != -1)
296 		goto out_free;
297 
298 	current_poll = ((struct pollfds) { .poll 	= p,
299 					   .used 	= 1,
300 					   .size 	= 1 });
301 
302 	if (write_sigio_irq(l_write_sigio_fds[0]))
303 		goto out_clear_poll;
304 
305 	memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
306 	memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
307 
308 	write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
309 					    CLONE_FILES | CLONE_VM,
310 					    &write_sigio_stack);
311 
312 	if (write_sigio_pid < 0)
313 		goto out_clear;
314 
315 	sigio_unlock();
316 	return;
317 
318 out_clear:
319 	write_sigio_pid = -1;
320 	write_sigio_fds[0] = -1;
321 	write_sigio_fds[1] = -1;
322 	sigio_private[0] = -1;
323 	sigio_private[1] = -1;
324 out_clear_poll:
325 	current_poll = ((struct pollfds) { .poll	= NULL,
326 					   .size	= 0,
327 					   .used	= 0 });
328 out_free:
329 	sigio_unlock();
330 	kfree(p);
331 out_close2:
332 	close(l_sigio_private[0]);
333 	close(l_sigio_private[1]);
334 out_close1:
335 	close(l_write_sigio_fds[0]);
336 	close(l_write_sigio_fds[1]);
337 }
338 
339 void sigio_broken(int fd, int read)
340 {
341 	int err;
342 
343 	write_sigio_workaround();
344 
345 	sigio_lock();
346 	err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
347 	if (err) {
348 		printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
349 		       "for descriptor %d\n", fd);
350 		goto out;
351 	}
352 
353 	all_sigio_fds.poll[all_sigio_fds.used++] =
354 		((struct pollfd) { .fd  	= fd,
355 				   .events 	= read ? POLLIN : POLLOUT,
356 				   .revents 	= 0 });
357 out:
358 	sigio_unlock();
359 }
360 
361 /* Changed during early boot */
362 static int pty_output_sigio;
363 static int pty_close_sigio;
364 
365 void maybe_sigio_broken(int fd, int read)
366 {
367 	if (!isatty(fd))
368 		return;
369 
370 	if ((read || pty_output_sigio) && (!read || pty_close_sigio))
371 		return;
372 
373 	sigio_broken(fd, read);
374 }
375 
376 static void sigio_cleanup(void)
377 {
378 	if (write_sigio_pid == -1)
379 		return;
380 
381 	os_kill_process(write_sigio_pid, 1);
382 	free_stack(write_sigio_stack, 0);
383 	write_sigio_pid = -1;
384 }
385 
386 __uml_exitcall(sigio_cleanup);
387 
388 /* Used as a flag during SIGIO testing early in boot */
389 static int got_sigio;
390 
391 static void __init handler(int sig)
392 {
393 	got_sigio = 1;
394 }
395 
396 struct openpty_arg {
397 	int master;
398 	int slave;
399 	int err;
400 };
401 
402 static void openpty_cb(void *arg)
403 {
404 	struct openpty_arg *info = arg;
405 
406 	info->err = 0;
407 	if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
408 		info->err = -errno;
409 }
410 
411 static int async_pty(int master, int slave)
412 {
413 	int flags;
414 
415 	flags = fcntl(master, F_GETFL);
416 	if (flags < 0)
417 		return -errno;
418 
419 	if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
420 	    (fcntl(master, F_SETOWN, os_getpid()) < 0))
421 		return -errno;
422 
423 	if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
424 		return -errno;
425 
426 	return 0;
427 }
428 
429 static void __init check_one_sigio(void (*proc)(int, int))
430 {
431 	struct sigaction old, new;
432 	struct openpty_arg pty = { .master = -1, .slave = -1 };
433 	int master, slave, err;
434 
435 	initial_thread_cb(openpty_cb, &pty);
436 	if (pty.err) {
437 		printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
438 		       -pty.err);
439 		return;
440 	}
441 
442 	master = pty.master;
443 	slave = pty.slave;
444 
445 	if ((master == -1) || (slave == -1)) {
446 		printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
447 		       "pty\n");
448 		return;
449 	}
450 
451 	/* Not now, but complain so we now where we failed. */
452 	err = raw(master);
453 	if (err < 0) {
454 		printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
455 		      -err);
456 		return;
457 	}
458 
459 	err = async_pty(master, slave);
460 	if (err < 0) {
461 		printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
462 		       "err = %d\n", -err);
463 		return;
464 	}
465 
466 	if (sigaction(SIGIO, NULL, &old) < 0) {
467 		printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
468 		       "errno = %d\n", errno);
469 		return;
470 	}
471 
472 	new = old;
473 	new.sa_handler = handler;
474 	if (sigaction(SIGIO, &new, NULL) < 0) {
475 		printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
476 		       "errno = %d\n", errno);
477 		return;
478 	}
479 
480 	got_sigio = 0;
481 	(*proc)(master, slave);
482 
483 	close(master);
484 	close(slave);
485 
486 	if (sigaction(SIGIO, &old, NULL) < 0)
487 		printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
488 		       "errno = %d\n", errno);
489 }
490 
491 static void tty_output(int master, int slave)
492 {
493 	int n;
494 	char buf[512];
495 
496 	printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
497 
498 	memset(buf, 0, sizeof(buf));
499 
500 	while (write(master, buf, sizeof(buf)) > 0) ;
501 	if (errno != EAGAIN)
502 		printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
503 		       errno);
504 	while (((n = read(slave, buf, sizeof(buf))) > 0) &&
505 	       !({ barrier(); got_sigio; }))
506 		;
507 
508 	if (got_sigio) {
509 		printk(UM_KERN_CONT "Yes\n");
510 		pty_output_sigio = 1;
511 	} else if (n == -EAGAIN)
512 		printk(UM_KERN_CONT "No, enabling workaround\n");
513 	else
514 		printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
515 }
516 
517 static void tty_close(int master, int slave)
518 {
519 	printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
520 	       "close...");
521 
522 	close(slave);
523 	if (got_sigio) {
524 		printk(UM_KERN_CONT "Yes\n");
525 		pty_close_sigio = 1;
526 	} else
527 		printk(UM_KERN_CONT "No, enabling workaround\n");
528 }
529 
530 static void __init check_sigio(void)
531 {
532 	if ((access("/dev/ptmx", R_OK) < 0) &&
533 	    (access("/dev/ptyp0", R_OK) < 0)) {
534 		printk(UM_KERN_WARNING "No pseudo-terminals available - "
535 		       "skipping pty SIGIO check\n");
536 		return;
537 	}
538 	check_one_sigio(tty_output);
539 	check_one_sigio(tty_close);
540 }
541 
542 /* Here because it only does the SIGIO testing for now */
543 void __init os_check_bugs(void)
544 {
545 	check_sigio();
546 }
547