xref: /openbsd/usr.sbin/smtpd/ioev.c (revision 097a140d)
1 /*	$OpenBSD: ioev.c,v 1.45 2021/04/05 15:50:11 eric Exp $	*/
2 /*
3  * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/queue.h>
20 #include <sys/socket.h>
21 
22 #include <err.h>
23 #include <errno.h>
24 #include <event.h>
25 #include <fcntl.h>
26 #include <inttypes.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30 #ifdef IO_TLS
31 #include <tls.h>
32 #endif
33 #include <unistd.h>
34 
35 #include "ioev.h"
36 #include "iobuf.h"
37 
38 enum {
39 	IO_STATE_NONE,
40 	IO_STATE_CONNECT,
41 	IO_STATE_CONNECT_TLS,
42 	IO_STATE_ACCEPT_TLS,
43 	IO_STATE_UP,
44 
45 	IO_STATE_MAX,
46 };
47 
48 #define IO_PAUSE_IN 		IO_IN
49 #define IO_PAUSE_OUT		IO_OUT
50 #define IO_READ			0x04
51 #define IO_WRITE		0x08
52 #define IO_RW			(IO_READ | IO_WRITE)
53 #define IO_RESET		0x10  /* internal */
54 #define IO_HELD			0x20  /* internal */
55 
56 struct io {
57 	int		 sock;
58 	void		*arg;
59 	void		(*cb)(struct io*, int, void *);
60 	struct iobuf	 iobuf;
61 	size_t		 lowat;
62 	int		 timeout;
63 	int		 flags;
64 	int		 state;
65 	struct event	 ev;
66 	struct tls	*tls;
67 	char		*name;
68 
69 	const char	*error; /* only valid immediately on callback */
70 };
71 
72 const char* io_strflags(int);
73 const char* io_evstr(short);
74 
75 void	_io_init(void);
76 void	io_hold(struct io *);
77 void	io_release(struct io *);
78 void	io_callback(struct io*, int);
79 void	io_dispatch(int, short, void *);
80 void	io_dispatch_connect(int, short, void *);
81 size_t	io_pending(struct io *);
82 size_t	io_queued(struct io*);
83 void	io_reset(struct io *, short, void (*)(int, short, void*));
84 void	io_frame_enter(const char *, struct io *, int);
85 void	io_frame_leave(struct io *);
86 
87 #ifdef IO_TLS
88 void	io_dispatch_handshake_tls(int, short, void *);
89 void	io_dispatch_accept_tls(int, short, void *);
90 void	io_dispatch_connect_tls(int, short, void *);
91 void	io_dispatch_read_tls(int, short, void *);
92 void	io_dispatch_write_tls(int, short, void *);
93 void	io_reload_tls(struct io *io);
94 #endif
95 
96 static struct io	*current = NULL;
97 static uint64_t		 frame = 0;
98 static int		_io_debug = 0;
99 
100 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0)
101 
102 
103 const char*
104 io_strio(struct io *io)
105 {
106 	static char	buf[128];
107 	char		ssl[128];
108 
109 	ssl[0] = '\0';
110 #ifdef IO_TLS
111 	if (io->tls) {
112 		(void)snprintf(ssl, sizeof ssl, " tls=%s:%s",
113 		    tls_conn_version(io->tls),
114 		    tls_conn_cipher(io->tls));
115 	}
116 #endif
117 
118 	(void)snprintf(buf, sizeof buf,
119 	    "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
120 	    io, io->sock, io->timeout, io_strflags(io->flags), ssl,
121 	    io_pending(io), io_queued(io));
122 
123 	return (buf);
124 }
125 
126 #define CASE(x) case x : return #x
127 
128 const char*
129 io_strevent(int evt)
130 {
131 	static char buf[32];
132 
133 	switch (evt) {
134 	CASE(IO_CONNECTED);
135 	CASE(IO_TLSREADY);
136 	CASE(IO_DATAIN);
137 	CASE(IO_LOWAT);
138 	CASE(IO_DISCONNECTED);
139 	CASE(IO_TIMEOUT);
140 	CASE(IO_ERROR);
141 	default:
142 		(void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
143 		return buf;
144 	}
145 }
146 
147 void
148 io_set_nonblocking(int fd)
149 {
150 	int	flags;
151 
152 	if ((flags = fcntl(fd, F_GETFL)) == -1)
153 		err(1, "io_set_blocking:fcntl(F_GETFL)");
154 
155 	flags |= O_NONBLOCK;
156 
157 	if (fcntl(fd, F_SETFL, flags) == -1)
158 		err(1, "io_set_blocking:fcntl(F_SETFL)");
159 }
160 
161 void
162 io_set_nolinger(int fd)
163 {
164 	struct linger    l;
165 
166 	memset(&l, 0, sizeof(l));
167 	if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1)
168 		err(1, "io_set_linger:setsockopt");
169 }
170 
171 /*
172  * Event framing must not rely on an io pointer to refer to the "same" io
173  * throughout the frame, because this is not always the case:
174  *
175  * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV
176  * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD!
177  *
178  * In both case, the problem is that the io is freed in the callback, so
179  * the pointer becomes invalid. If that happens, the user is required to
180  * call io_clear, so we can adapt the frame state there.
181  */
182 void
183 io_frame_enter(const char *where, struct io *io, int ev)
184 {
185 	io_debug("\n=== %" PRIu64 " ===\n"
186 	    "io_frame_enter(%s, %s, %s)\n",
187 	    frame, where, io_evstr(ev), io_strio(io));
188 
189 	if (current)
190 		errx(1, "io_frame_enter: interleaved frames");
191 
192 	current = io;
193 
194 	io_hold(io);
195 }
196 
197 void
198 io_frame_leave(struct io *io)
199 {
200 	io_debug("io_frame_leave(%" PRIu64 ")\n", frame);
201 
202 	if (current && current != io)
203 		errx(1, "io_frame_leave: io mismatch");
204 
205 	/* io has been cleared */
206 	if (current == NULL)
207 		goto done;
208 
209 	/* TODO: There is a possible optimization there:
210 	 * In a typical half-duplex request/response scenario,
211 	 * the io is waiting to read a request, and when done, it queues
212 	 * the response in the output buffer and goes to write mode.
213 	 * There, the write event is set and will be triggered in the next
214 	 * event frame.  In most case, the write call could be done
215 	 * immediately as part of the last read frame, thus avoiding to go
216 	 * through the event loop machinery. So, as an optimisation, we
217 	 * could detect that case here and force an event dispatching.
218 	 */
219 
220 	/* Reload the io if it has not been reset already. */
221 	io_release(io);
222 	current = NULL;
223     done:
224 	io_debug("=== /%" PRIu64 "\n", frame);
225 
226 	frame += 1;
227 }
228 
229 void
230 _io_init()
231 {
232 	static int init = 0;
233 
234 	if (init)
235 		return;
236 
237 	init = 1;
238 	_io_debug = getenv("IO_DEBUG") != NULL;
239 }
240 
241 struct io *
242 io_new(void)
243 {
244 	struct io *io;
245 
246 	_io_init();
247 
248 	if ((io = calloc(1, sizeof(*io))) == NULL)
249 		return NULL;
250 
251 	io->sock = -1;
252 	io->timeout = -1;
253 
254 	if (iobuf_init(&io->iobuf, 0, 0) == -1) {
255 		free(io);
256 		return NULL;
257 	}
258 
259 	return io;
260 }
261 
262 void
263 io_free(struct io *io)
264 {
265 	io_debug("io_clear(%p)\n", io);
266 
267 	/* the current io is virtually dead */
268 	if (io == current)
269 		current = NULL;
270 
271 #ifdef IO_TLS
272 	tls_free(io->tls);
273 	io->tls = NULL;
274 #endif
275 
276 	if (event_initialized(&io->ev))
277 		event_del(&io->ev);
278 	if (io->sock != -1) {
279 		close(io->sock);
280 		io->sock = -1;
281 	}
282 
283 	free(io->name);
284 	iobuf_clear(&io->iobuf);
285 	free(io);
286 }
287 
288 void
289 io_hold(struct io *io)
290 {
291 	io_debug("io_enter(%p)\n", io);
292 
293 	if (io->flags & IO_HELD)
294 		errx(1, "io_hold: io is already held");
295 
296 	io->flags &= ~IO_RESET;
297 	io->flags |= IO_HELD;
298 }
299 
300 void
301 io_release(struct io *io)
302 {
303 	if (!(io->flags & IO_HELD))
304 		errx(1, "io_release: io is not held");
305 
306 	io->flags &= ~IO_HELD;
307 	if (!(io->flags & IO_RESET))
308 		io_reload(io);
309 }
310 
311 void
312 io_set_fd(struct io *io, int fd)
313 {
314 	io->sock = fd;
315 	if (fd != -1)
316 		io_reload(io);
317 }
318 
319 void
320 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg)
321 {
322 	io->cb = cb;
323 	io->arg = arg;
324 }
325 
326 void
327 io_set_timeout(struct io *io, int msec)
328 {
329 	io_debug("io_set_timeout(%p, %d)\n", io, msec);
330 
331 	io->timeout = msec;
332 }
333 
334 void
335 io_set_lowat(struct io *io, size_t lowat)
336 {
337 	io_debug("io_set_lowat(%p, %zu)\n", io, lowat);
338 
339 	io->lowat = lowat;
340 }
341 
342 void
343 io_pause(struct io *io, int dir)
344 {
345 	io_debug("io_pause(%p, %x)\n", io, dir);
346 
347 	io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT);
348 	io_reload(io);
349 }
350 
351 void
352 io_resume(struct io *io, int dir)
353 {
354 	io_debug("io_resume(%p, %x)\n", io, dir);
355 
356 	io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT));
357 	io_reload(io);
358 }
359 
360 void
361 io_set_read(struct io *io)
362 {
363 	int	mode;
364 
365 	io_debug("io_set_read(%p)\n", io);
366 
367 	mode = io->flags & IO_RW;
368 	if (!(mode == 0 || mode == IO_WRITE))
369 		errx(1, "io_set_read: full-duplex or reading");
370 
371 	io->flags &= ~IO_RW;
372 	io->flags |= IO_READ;
373 	io_reload(io);
374 }
375 
376 void
377 io_set_write(struct io *io)
378 {
379 	int	mode;
380 
381 	io_debug("io_set_write(%p)\n", io);
382 
383 	mode = io->flags & IO_RW;
384 	if (!(mode == 0 || mode == IO_READ))
385 		errx(1, "io_set_write: full-duplex or writing");
386 
387 	io->flags &= ~IO_RW;
388 	io->flags |= IO_WRITE;
389 	io_reload(io);
390 }
391 
392 const char *
393 io_error(struct io *io)
394 {
395 	return io->error;
396 }
397 
398 struct tls *
399 io_tls(struct io *io)
400 {
401 	return io->tls;
402 }
403 
404 int
405 io_fileno(struct io *io)
406 {
407 	return io->sock;
408 }
409 
410 int
411 io_paused(struct io *io, int what)
412 {
413 	return (io->flags & (IO_PAUSE_IN | IO_PAUSE_OUT)) == what;
414 }
415 
416 /*
417  * Buffered output functions
418  */
419 
420 int
421 io_write(struct io *io, const void *buf, size_t len)
422 {
423 	int r;
424 
425 	r = iobuf_queue(&io->iobuf, buf, len);
426 
427 	io_reload(io);
428 
429 	return r;
430 }
431 
432 int
433 io_writev(struct io *io, const struct iovec *iov, int iovcount)
434 {
435 	int r;
436 
437 	r = iobuf_queuev(&io->iobuf, iov, iovcount);
438 
439 	io_reload(io);
440 
441 	return r;
442 }
443 
444 int
445 io_print(struct io *io, const char *s)
446 {
447 	return io_write(io, s, strlen(s));
448 }
449 
450 int
451 io_printf(struct io *io, const char *fmt, ...)
452 {
453 	va_list ap;
454 	int r;
455 
456 	va_start(ap, fmt);
457 	r = io_vprintf(io, fmt, ap);
458 	va_end(ap);
459 
460 	return r;
461 }
462 
463 int
464 io_vprintf(struct io *io, const char *fmt, va_list ap)
465 {
466 
467 	char *buf;
468 	int len;
469 
470 	len = vasprintf(&buf, fmt, ap);
471 	if (len == -1)
472 		return -1;
473 	len = io_write(io, buf, len);
474 	free(buf);
475 
476 	return len;
477 }
478 
479 size_t
480 io_queued(struct io *io)
481 {
482 	return iobuf_queued(&io->iobuf);
483 }
484 
485 /*
486  * Buffered input functions
487  */
488 
489 void *
490 io_data(struct io *io)
491 {
492 	return iobuf_data(&io->iobuf);
493 }
494 
495 size_t
496 io_datalen(struct io *io)
497 {
498 	return iobuf_len(&io->iobuf);
499 }
500 
501 char *
502 io_getline(struct io *io, size_t *sz)
503 {
504 	return iobuf_getline(&io->iobuf, sz);
505 }
506 
507 void
508 io_drop(struct io *io, size_t sz)
509 {
510 	return iobuf_drop(&io->iobuf, sz);
511 }
512 
513 
514 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
515 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
516 
517 /*
518  * Setup the necessary events as required by the current io state,
519  * honouring duplex mode and i/o pauses.
520  */
521 void
522 io_reload(struct io *io)
523 {
524 	short	events;
525 
526 	/* io will be reloaded at release time */
527 	if (io->flags & IO_HELD)
528 		return;
529 
530 	iobuf_normalize(&io->iobuf);
531 
532 #ifdef IO_TLS
533 	if (io->tls) {
534 		io_reload_tls(io);
535 		return;
536 	}
537 #endif
538 
539 	io_debug("io_reload(%p)\n", io);
540 
541 	events = 0;
542 	if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
543 		events = EV_READ;
544 	if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
545 		events |= EV_WRITE;
546 
547 	io_reset(io, events, io_dispatch);
548 }
549 
550 /* Set the requested event. */
551 void
552 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
553 {
554 	struct timeval	tv, *ptv;
555 
556 	io_debug("io_reset(%p, %s, %p) -> %s\n",
557 	    io, io_evstr(events), dispatch, io_strio(io));
558 
559 	/*
560 	 * Indicate that the event has already been reset so that reload
561 	 * is not called on frame_leave.
562 	 */
563 	io->flags |= IO_RESET;
564 
565 	if (event_initialized(&io->ev))
566 		event_del(&io->ev);
567 
568 	/*
569 	 * The io is paused by the user, so we don't want the timeout to be
570 	 * effective.
571 	 */
572 	if (events == 0)
573 		return;
574 
575 	event_set(&io->ev, io->sock, events, dispatch, io);
576 	if (io->timeout >= 0) {
577 		tv.tv_sec = io->timeout / 1000;
578 		tv.tv_usec = (io->timeout % 1000) * 1000;
579 		ptv = &tv;
580 	} else
581 		ptv = NULL;
582 
583 	event_add(&io->ev, ptv);
584 }
585 
586 size_t
587 io_pending(struct io *io)
588 {
589 	return iobuf_len(&io->iobuf);
590 }
591 
592 const char*
593 io_strflags(int flags)
594 {
595 	static char	buf[64];
596 
597 	buf[0] = '\0';
598 
599 	switch (flags & IO_RW) {
600 	case 0:
601 		(void)strlcat(buf, "rw", sizeof buf);
602 		break;
603 	case IO_READ:
604 		(void)strlcat(buf, "R", sizeof buf);
605 		break;
606 	case IO_WRITE:
607 		(void)strlcat(buf, "W", sizeof buf);
608 		break;
609 	case IO_RW:
610 		(void)strlcat(buf, "RW", sizeof buf);
611 		break;
612 	}
613 
614 	if (flags & IO_PAUSE_IN)
615 		(void)strlcat(buf, ",F_PI", sizeof buf);
616 	if (flags & IO_PAUSE_OUT)
617 		(void)strlcat(buf, ",F_PO", sizeof buf);
618 
619 	return buf;
620 }
621 
622 const char*
623 io_evstr(short ev)
624 {
625 	static char	buf[64];
626 	char		buf2[16];
627 	int		n;
628 
629 	n = 0;
630 	buf[0] = '\0';
631 
632 	if (ev == 0) {
633 		(void)strlcat(buf, "<NONE>", sizeof(buf));
634 		return buf;
635 	}
636 
637 	if (ev & EV_TIMEOUT) {
638 		(void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
639 		ev &= ~EV_TIMEOUT;
640 		n++;
641 	}
642 
643 	if (ev & EV_READ) {
644 		if (n)
645 			(void)strlcat(buf, "|", sizeof(buf));
646 		(void)strlcat(buf, "EV_READ", sizeof(buf));
647 		ev &= ~EV_READ;
648 		n++;
649 	}
650 
651 	if (ev & EV_WRITE) {
652 		if (n)
653 			(void)strlcat(buf, "|", sizeof(buf));
654 		(void)strlcat(buf, "EV_WRITE", sizeof(buf));
655 		ev &= ~EV_WRITE;
656 		n++;
657 	}
658 
659 	if (ev & EV_SIGNAL) {
660 		if (n)
661 			(void)strlcat(buf, "|", sizeof(buf));
662 		(void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
663 		ev &= ~EV_SIGNAL;
664 		n++;
665 	}
666 
667 	if (ev) {
668 		if (n)
669 			(void)strlcat(buf, "|", sizeof(buf));
670 		(void)strlcat(buf, "EV_?=0x", sizeof(buf));
671 		(void)snprintf(buf2, sizeof(buf2), "%hx", ev);
672 		(void)strlcat(buf, buf2, sizeof(buf));
673 	}
674 
675 	return buf;
676 }
677 
678 void
679 io_dispatch(int fd, short ev, void *humppa)
680 {
681 	struct io	*io = humppa;
682 	size_t		 w;
683 	ssize_t		 n;
684 	int		 saved_errno;
685 
686 	io_frame_enter("io_dispatch", io, ev);
687 
688 	if (ev == EV_TIMEOUT) {
689 		io_callback(io, IO_TIMEOUT);
690 		goto leave;
691 	}
692 
693 	if (ev & EV_WRITE && (w = io_queued(io))) {
694 		if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) {
695 			if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
696 				goto read;
697 			if (n == IOBUF_CLOSED)
698 				io_callback(io, IO_DISCONNECTED);
699 			else {
700 				saved_errno = errno;
701 				io->error = strerror(errno);
702 				errno = saved_errno;
703 				io_callback(io, IO_ERROR);
704 			}
705 			goto leave;
706 		}
707 		if (w > io->lowat && w - n <= io->lowat)
708 			io_callback(io, IO_LOWAT);
709 	}
710     read:
711 
712 	if (ev & EV_READ) {
713 		iobuf_normalize(&io->iobuf);
714 		if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) {
715 			if (n == IOBUF_CLOSED)
716 				io_callback(io, IO_DISCONNECTED);
717 			else {
718 				saved_errno = errno;
719 				io->error = strerror(errno);
720 				errno = saved_errno;
721 				io_callback(io, IO_ERROR);
722 			}
723 			goto leave;
724 		}
725 		if (n)
726 			io_callback(io, IO_DATAIN);
727 	}
728 
729 leave:
730 	io_frame_leave(io);
731 }
732 
733 void
734 io_callback(struct io *io, int evt)
735 {
736 	io->cb(io, evt, io->arg);
737 }
738 
739 int
740 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa)
741 {
742 	int	sock, errno_save;
743 
744 	if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1)
745 		goto fail;
746 
747 	io_set_nonblocking(sock);
748 	io_set_nolinger(sock);
749 
750 	if (bsa && bind(sock, bsa, bsa->sa_len) == -1)
751 		goto fail;
752 
753 	if (connect(sock, sa, sa->sa_len) == -1)
754 		if (errno != EINPROGRESS)
755 			goto fail;
756 
757 	io->sock = sock;
758 	io_reset(io, EV_WRITE, io_dispatch_connect);
759 
760 	return (sock);
761 
762     fail:
763 	if (sock != -1) {
764 		errno_save = errno;
765 		close(sock);
766 		errno = errno_save;
767 		io->error = strerror(errno);
768 	}
769 	return (-1);
770 }
771 
772 void
773 io_dispatch_connect(int fd, short ev, void *humppa)
774 {
775 	struct io	*io = humppa;
776 	int		 r, e;
777 	socklen_t	 sl;
778 
779 	io_frame_enter("io_dispatch_connect", io, ev);
780 
781 	if (ev == EV_TIMEOUT) {
782 		close(fd);
783 		io->sock = -1;
784 		io_callback(io, IO_TIMEOUT);
785 	} else {
786 		sl = sizeof(e);
787 		r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
788 		if (r == -1)  {
789 			warn("io_dispatch_connect: getsockopt");
790 			e = errno;
791 		}
792 		if (e) {
793 			close(fd);
794 			io->sock = -1;
795 			io->error = strerror(e);
796 			io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
797 		}
798 		else {
799 			io->state = IO_STATE_UP;
800 			io_callback(io, IO_CONNECTED);
801 		}
802 	}
803 
804 	io_frame_leave(io);
805 }
806 
807 #ifdef IO_TLS
808 int
809 io_connect_tls(struct io *io, struct tls *tls, const char *hostname)
810 {
811 	int	mode;
812 
813 	mode = io->flags & IO_RW;
814 	if (mode != IO_WRITE)
815 		errx(1, "io_connect_tls: expect IO_WRITE mode");
816 
817 	if (io->tls)
818 		errx(1, "io_connect_tls: TLS already started");
819 
820 	if (hostname) {
821 		if ((io->name = strdup(hostname)) == NULL)
822 			err(1, "io_connect_tls");
823 	}
824 
825 	io->tls = tls;
826 	io->state = IO_STATE_CONNECT_TLS;
827 	io_reset(io, EV_WRITE, io_dispatch_connect_tls);
828 
829 	return (0);
830 }
831 
832 int
833 io_accept_tls(struct io *io, struct tls *tls)
834 {
835 	int	mode;
836 
837 	mode = io->flags & IO_RW;
838 	if (mode != IO_READ)
839 		errx(1, "io_accept_tls: expect IO_READ mode");
840 
841 	if (io->tls)
842 		errx(1, "io_accept_tls: TLS already started");
843 	io->tls = tls;
844 	io->state = IO_STATE_ACCEPT_TLS;
845 	io_reset(io, EV_READ, io_dispatch_accept_tls);
846 
847 	return (0);
848 }
849 
850 void
851 io_dispatch_handshake_tls(int fd, short event, void *humppa)
852 {
853 	struct io	*io = humppa;
854 	int		ret;
855 
856 	io_frame_enter("io_dispatch_handshake_tls", io, event);
857 
858 	if (event == EV_TIMEOUT) {
859 		io_callback(io, IO_TIMEOUT);
860 		goto leave;
861 	}
862 
863 	if ((ret = tls_handshake(io->tls)) == 0) {
864 		io->state = IO_STATE_UP;
865 		io_callback(io, IO_TLSREADY);
866 		goto leave;
867 	}
868 	if (ret == TLS_WANT_POLLIN)
869 		io_reset(io, EV_READ, io_dispatch_handshake_tls);
870 	else if (ret == TLS_WANT_POLLOUT)
871 		io_reset(io, EV_WRITE, io_dispatch_handshake_tls);
872 	else {
873 		io->error = tls_error(io->tls);
874 		io_callback(io, IO_ERROR);
875 	}
876 
877  leave:
878 	io_frame_leave(io);
879 	return;
880 }
881 
882 void
883 io_dispatch_accept_tls(int fd, short event, void *humppa)
884 {
885 	struct io	*io = humppa;
886 	struct tls      *tls = io->tls;
887 	int		 ret;
888 
889 	io_frame_enter("io_dispatch_accept_tls", io, event);
890 
891 	/* Replaced by TLS context for accepted socket on success. */
892 	io->tls = NULL;
893 
894 	if (event == EV_TIMEOUT) {
895 		io_callback(io, IO_TIMEOUT);
896 		goto leave;
897 	}
898 
899 	if ((ret = tls_accept_socket(tls, &io->tls, io->sock)) == 0) {
900 		io_reset(io, EV_READ|EV_WRITE, io_dispatch_handshake_tls);
901 		goto leave;
902 	}
903 	io->error = tls_error(tls);
904 	io_callback(io, IO_ERROR);
905 
906  leave:
907 	io_frame_leave(io);
908 	return;
909 }
910 
911 void
912 io_dispatch_connect_tls(int fd, short event, void *humppa)
913 {
914 	struct io	*io = humppa;
915 	int		 ret;
916 
917 	io_frame_enter("io_dispatch_connect_tls", io, event);
918 
919 	if (event == EV_TIMEOUT) {
920 		io_callback(io, IO_TIMEOUT);
921 		goto leave;
922 	}
923 
924 	if ((ret = tls_connect_socket(io->tls, io->sock, io->name)) == 0) {
925 		io_reset(io, EV_READ|EV_WRITE, io_dispatch_handshake_tls);
926 		goto leave;
927 	}
928 
929 	io->error = tls_error(io->tls);
930 	io_callback(io, IO_ERROR);
931 
932  leave:
933 	io_frame_leave(io);
934 }
935 
936 void
937 io_dispatch_read_tls(int fd, short event, void *humppa)
938 {
939 	struct io	*io = humppa;
940 	int		 n;
941 
942 	io_frame_enter("io_dispatch_read_tls", io, event);
943 
944 	if (event == EV_TIMEOUT) {
945 		io_callback(io, IO_TIMEOUT);
946 		goto leave;
947 	}
948 
949 again:
950 	iobuf_normalize(&io->iobuf);
951 	switch ((n = iobuf_read_tls(&io->iobuf, io->tls))) {
952 	case IOBUF_WANT_READ:
953 		io_reset(io, EV_READ, io_dispatch_read_tls);
954 		break;
955 	case IOBUF_WANT_WRITE:
956 		io_reset(io, EV_WRITE, io_dispatch_read_tls);
957 		break;
958 	case IOBUF_CLOSED:
959 		io_callback(io, IO_DISCONNECTED);
960 		break;
961 	case IOBUF_ERROR:
962 		io->error = tls_error(io->tls);
963 		io_callback(io, IO_ERROR);
964 		break;
965 	default:
966 		io_debug("io_dispatch_read_tls(...) -> r=%d\n", n);
967 		io_callback(io, IO_DATAIN);
968 		if (current == io && IO_READING(io))
969 			goto again;
970 	}
971 
972     leave:
973 	io_frame_leave(io);
974 }
975 
976 void
977 io_dispatch_write_tls(int fd, short event, void *humppa)
978 {
979 	struct io	*io = humppa;
980 	int		 n;
981 	size_t		 w2, w;
982 
983 	io_frame_enter("io_dispatch_write_tls", io, event);
984 
985 	if (event == EV_TIMEOUT) {
986 		io_callback(io, IO_TIMEOUT);
987 		goto leave;
988 	}
989 
990 	w = io_queued(io);
991 	switch ((n = iobuf_write_tls(&io->iobuf, io->tls))) {
992 	case IOBUF_WANT_READ:
993 		io_reset(io, EV_READ, io_dispatch_write_tls);
994 		break;
995 	case IOBUF_WANT_WRITE:
996 		io_reset(io, EV_WRITE, io_dispatch_write_tls);
997 		break;
998 	case IOBUF_CLOSED:
999 		io_callback(io, IO_DISCONNECTED);
1000 		break;
1001 	case IOBUF_ERROR:
1002 		io->error = tls_error(io->tls);
1003 		io_callback(io, IO_ERROR);
1004 		break;
1005 	default:
1006 		io_debug("io_dispatch_write_tls(...) -> w=%d\n", n);
1007 		w2 = io_queued(io);
1008 		if (w > io->lowat && w2 <= io->lowat)
1009 			io_callback(io, IO_LOWAT);
1010 		break;
1011 	}
1012 
1013     leave:
1014 	io_frame_leave(io);
1015 }
1016 
1017 void
1018 io_reload_tls(struct io *io)
1019 {
1020 	short	ev = 0;
1021 	void	(*dispatch)(int, short, void*) = NULL;
1022 
1023 	switch (io->state) {
1024 	case IO_STATE_CONNECT_TLS:
1025 		ev = EV_WRITE;
1026 		dispatch = io_dispatch_connect_tls;
1027 		break;
1028 	case IO_STATE_ACCEPT_TLS:
1029 		ev = EV_READ;
1030 		dispatch = io_dispatch_accept_tls;
1031 		break;
1032 	case IO_STATE_UP:
1033 		ev = 0;
1034 		if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
1035 			ev = EV_READ;
1036 			dispatch = io_dispatch_read_tls;
1037 		}
1038 		else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
1039 		    io_queued(io)) {
1040 			ev = EV_WRITE;
1041 			dispatch = io_dispatch_write_tls;
1042 		}
1043 		if (!ev)
1044 			return; /* paused */
1045 		break;
1046 	default:
1047 		errx(1, "io_reload_tls: bad state");
1048 	}
1049 
1050 	io_reset(io, ev, dispatch);
1051 }
1052 
1053 #endif /* IO_TLS */
1054