1 /*	$OpenBSD: ioev.c,v 1.42 2019/06/12 17:42:53 eric Exp $	*/
2 /*
3  * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "includes.h"
19 
20 #include <sys/types.h>
21 #include <sys/queue.h>
22 #include <sys/socket.h>
23 
24 #include <err.h>
25 #include <errno.h>
26 #include <event.h>
27 #include <fcntl.h>
28 #include <inttypes.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <stdio.h>
32 #include <unistd.h>
33 
34 #include "ioev.h"
35 #include "iobuf.h"
36 
37 #ifdef IO_TLS
38 #include <openssl/err.h>
39 #include <openssl/ssl.h>
40 #endif
41 
42 enum {
43 	IO_STATE_NONE,
44 	IO_STATE_CONNECT,
45 	IO_STATE_CONNECT_TLS,
46 	IO_STATE_ACCEPT_TLS,
47 	IO_STATE_UP,
48 
49 	IO_STATE_MAX,
50 };
51 
52 #define IO_PAUSE_IN 		IO_IN
53 #define IO_PAUSE_OUT		IO_OUT
54 #define IO_READ			0x04
55 #define IO_WRITE		0x08
56 #define IO_RW			(IO_READ | IO_WRITE)
57 #define IO_RESET		0x10  /* internal */
58 #define IO_HELD			0x20  /* internal */
59 
60 struct io {
61 	int		 sock;
62 	void		*arg;
63 	void		(*cb)(struct io*, int, void *);
64 	struct iobuf	 iobuf;
65 	size_t		 lowat;
66 	int		 timeout;
67 	int		 flags;
68 	int		 state;
69 	struct event	 ev;
70 	void		*tls;
71 	const char	*error; /* only valid immediately on callback */
72 };
73 
74 const char* io_strflags(int);
75 const char* io_evstr(short);
76 
77 void	_io_init(void);
78 void	io_hold(struct io *);
79 void	io_release(struct io *);
80 void	io_callback(struct io*, int);
81 void	io_dispatch(int, short, void *);
82 void	io_dispatch_connect(int, short, void *);
83 size_t	io_pending(struct io *);
84 size_t	io_queued(struct io*);
85 void	io_reset(struct io *, short, void (*)(int, short, void*));
86 void	io_frame_enter(const char *, struct io *, int);
87 void	io_frame_leave(struct io *);
88 
89 #ifdef IO_TLS
90 void	ssl_error(const char *); /* XXX external */
91 
92 static const char* io_tls_error(void);
93 void	io_dispatch_accept_tls(int, short, void *);
94 void	io_dispatch_connect_tls(int, short, void *);
95 void	io_dispatch_read_tls(int, short, void *);
96 void	io_dispatch_write_tls(int, short, void *);
97 void	io_reload_tls(struct io *io);
98 #endif
99 
100 static struct io	*current = NULL;
101 static uint64_t		 frame = 0;
102 static int		_io_debug = 0;
103 
104 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0)
105 
106 
107 const char*
io_strio(struct io * io)108 io_strio(struct io *io)
109 {
110 	static char	buf[128];
111 	char		ssl[128];
112 
113 	ssl[0] = '\0';
114 #ifdef IO_TLS
115 	if (io->tls) {
116 		(void)snprintf(ssl, sizeof ssl, " tls=%s:%s:%d",
117 		    SSL_get_version(io->tls),
118 		    SSL_get_cipher_name(io->tls),
119 		    SSL_get_cipher_bits(io->tls, NULL));
120 	}
121 #endif
122 
123 	(void)snprintf(buf, sizeof buf,
124 	    "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
125 	    io, io->sock, io->timeout, io_strflags(io->flags), ssl,
126 	    io_pending(io), io_queued(io));
127 
128 	return (buf);
129 }
130 
131 #define CASE(x) case x : return #x
132 
133 const char*
io_strevent(int evt)134 io_strevent(int evt)
135 {
136 	static char buf[32];
137 
138 	switch (evt) {
139 	CASE(IO_CONNECTED);
140 	CASE(IO_TLSREADY);
141 	CASE(IO_DATAIN);
142 	CASE(IO_LOWAT);
143 	CASE(IO_DISCONNECTED);
144 	CASE(IO_TIMEOUT);
145 	CASE(IO_ERROR);
146 	default:
147 		(void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
148 		return buf;
149 	}
150 }
151 
152 void
io_set_nonblocking(int fd)153 io_set_nonblocking(int fd)
154 {
155 	int	flags;
156 
157 	if ((flags = fcntl(fd, F_GETFL)) == -1)
158 		err(1, "io_set_blocking:fcntl(F_GETFL)");
159 
160 	flags |= O_NONBLOCK;
161 
162 	if (fcntl(fd, F_SETFL, flags) == -1)
163 		err(1, "io_set_blocking:fcntl(F_SETFL)");
164 }
165 
166 void
io_set_nolinger(int fd)167 io_set_nolinger(int fd)
168 {
169 	struct linger    l;
170 
171 	memset(&l, 0, sizeof(l));
172 	if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1)
173 		err(1, "io_set_linger:setsockopt()");
174 }
175 
176 /*
177  * Event framing must not rely on an io pointer to refer to the "same" io
178  * throughout the frame, because this is not always the case:
179  *
180  * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV
181  * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD!
182  *
183  * In both case, the problem is that the io is freed in the callback, so
184  * the pointer becomes invalid. If that happens, the user is required to
185  * call io_clear, so we can adapt the frame state there.
186  */
187 void
io_frame_enter(const char * where,struct io * io,int ev)188 io_frame_enter(const char *where, struct io *io, int ev)
189 {
190 	io_debug("\n=== %" PRIu64 " ===\n"
191 	    "io_frame_enter(%s, %s, %s)\n",
192 	    frame, where, io_evstr(ev), io_strio(io));
193 
194 	if (current)
195 		errx(1, "io_frame_enter: interleaved frames");
196 
197 	current = io;
198 
199 	io_hold(io);
200 }
201 
202 void
io_frame_leave(struct io * io)203 io_frame_leave(struct io *io)
204 {
205 	io_debug("io_frame_leave(%" PRIu64 ")\n", frame);
206 
207 	if (current && current != io)
208 		errx(1, "io_frame_leave: io mismatch");
209 
210 	/* io has been cleared */
211 	if (current == NULL)
212 		goto done;
213 
214 	/* TODO: There is a possible optimization there:
215 	 * In a typical half-duplex request/response scenario,
216 	 * the io is waiting to read a request, and when done, it queues
217 	 * the response in the output buffer and goes to write mode.
218 	 * There, the write event is set and will be triggered in the next
219 	 * event frame.  In most case, the write call could be done
220 	 * immediately as part of the last read frame, thus avoiding to go
221 	 * through the event loop machinery. So, as an optimisation, we
222 	 * could detect that case here and force an event dispatching.
223 	 */
224 
225 	/* Reload the io if it has not been reset already. */
226 	io_release(io);
227 	current = NULL;
228     done:
229 	io_debug("=== /%" PRIu64 "\n", frame);
230 
231 	frame += 1;
232 }
233 
234 void
_io_init()235 _io_init()
236 {
237 	static int init = 0;
238 
239 	if (init)
240 		return;
241 
242 	init = 1;
243 	_io_debug = getenv("IO_DEBUG") != NULL;
244 }
245 
246 struct io *
io_new(void)247 io_new(void)
248 {
249 	struct io *io;
250 
251 	_io_init();
252 
253 	if ((io = calloc(1, sizeof(*io))) == NULL)
254 		return NULL;
255 
256 	io->sock = -1;
257 	io->timeout = -1;
258 
259 	if (iobuf_init(&io->iobuf, 0, 0) == -1) {
260 		free(io);
261 		return NULL;
262 	}
263 
264 	return io;
265 }
266 
267 void
io_free(struct io * io)268 io_free(struct io *io)
269 {
270 	io_debug("io_clear(%p)\n", io);
271 
272 	/* the current io is virtually dead */
273 	if (io == current)
274 		current = NULL;
275 
276 #ifdef IO_TLS
277 	SSL_free(io->tls);
278 	io->tls = NULL;
279 #endif
280 
281 	if (event_initialized(&io->ev))
282 		event_del(&io->ev);
283 	if (io->sock != -1) {
284 		close(io->sock);
285 		io->sock = -1;
286 	}
287 
288 	iobuf_clear(&io->iobuf);
289 	free(io);
290 }
291 
292 void
io_hold(struct io * io)293 io_hold(struct io *io)
294 {
295 	io_debug("io_enter(%p)\n", io);
296 
297 	if (io->flags & IO_HELD)
298 		errx(1, "io_hold: io is already held");
299 
300 	io->flags &= ~IO_RESET;
301 	io->flags |= IO_HELD;
302 }
303 
304 void
io_release(struct io * io)305 io_release(struct io *io)
306 {
307 	if (!(io->flags & IO_HELD))
308 		errx(1, "io_release: io is not held");
309 
310 	io->flags &= ~IO_HELD;
311 	if (!(io->flags & IO_RESET))
312 		io_reload(io);
313 }
314 
315 void
io_set_fd(struct io * io,int fd)316 io_set_fd(struct io *io, int fd)
317 {
318 	io->sock = fd;
319 	if (fd != -1)
320 		io_reload(io);
321 }
322 
323 void
io_set_callback(struct io * io,void (* cb)(struct io *,int,void *),void * arg)324 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg)
325 {
326 	io->cb = cb;
327 	io->arg = arg;
328 }
329 
330 void
io_set_timeout(struct io * io,int msec)331 io_set_timeout(struct io *io, int msec)
332 {
333 	io_debug("io_set_timeout(%p, %d)\n", io, msec);
334 
335 	io->timeout = msec;
336 }
337 
338 void
io_set_lowat(struct io * io,size_t lowat)339 io_set_lowat(struct io *io, size_t lowat)
340 {
341 	io_debug("io_set_lowat(%p, %zu)\n", io, lowat);
342 
343 	io->lowat = lowat;
344 }
345 
346 void
io_pause(struct io * io,int dir)347 io_pause(struct io *io, int dir)
348 {
349 	io_debug("io_pause(%p, %x)\n", io, dir);
350 
351 	io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT);
352 	io_reload(io);
353 }
354 
355 void
io_resume(struct io * io,int dir)356 io_resume(struct io *io, int dir)
357 {
358 	io_debug("io_resume(%p, %x)\n", io, dir);
359 
360 	io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT));
361 	io_reload(io);
362 }
363 
364 void
io_set_read(struct io * io)365 io_set_read(struct io *io)
366 {
367 	int	mode;
368 
369 	io_debug("io_set_read(%p)\n", io);
370 
371 	mode = io->flags & IO_RW;
372 	if (!(mode == 0 || mode == IO_WRITE))
373 		errx(1, "io_set_read(): full-duplex or reading");
374 
375 	io->flags &= ~IO_RW;
376 	io->flags |= IO_READ;
377 	io_reload(io);
378 }
379 
380 void
io_set_write(struct io * io)381 io_set_write(struct io *io)
382 {
383 	int	mode;
384 
385 	io_debug("io_set_write(%p)\n", io);
386 
387 	mode = io->flags & IO_RW;
388 	if (!(mode == 0 || mode == IO_READ))
389 		errx(1, "io_set_write(): full-duplex or writing");
390 
391 	io->flags &= ~IO_RW;
392 	io->flags |= IO_WRITE;
393 	io_reload(io);
394 }
395 
396 const char *
io_error(struct io * io)397 io_error(struct io *io)
398 {
399 	return io->error;
400 }
401 
402 void *
io_tls(struct io * io)403 io_tls(struct io *io)
404 {
405 	return io->tls;
406 }
407 
408 int
io_fileno(struct io * io)409 io_fileno(struct io *io)
410 {
411 	return io->sock;
412 }
413 
414 int
io_paused(struct io * io,int what)415 io_paused(struct io *io, int what)
416 {
417 	return (io->flags & (IO_PAUSE_IN | IO_PAUSE_OUT)) == what;
418 }
419 
420 /*
421  * Buffered output functions
422  */
423 
424 int
io_write(struct io * io,const void * buf,size_t len)425 io_write(struct io *io, const void *buf, size_t len)
426 {
427 	int r;
428 
429 	r = iobuf_queue(&io->iobuf, buf, len);
430 
431 	io_reload(io);
432 
433 	return r;
434 }
435 
436 int
io_writev(struct io * io,const struct iovec * iov,int iovcount)437 io_writev(struct io *io, const struct iovec *iov, int iovcount)
438 {
439 	int r;
440 
441 	r = iobuf_queuev(&io->iobuf, iov, iovcount);
442 
443 	io_reload(io);
444 
445 	return r;
446 }
447 
448 int
io_print(struct io * io,const char * s)449 io_print(struct io *io, const char *s)
450 {
451 	return io_write(io, s, strlen(s));
452 }
453 
454 int
io_printf(struct io * io,const char * fmt,...)455 io_printf(struct io *io, const char *fmt, ...)
456 {
457 	va_list ap;
458 	int r;
459 
460 	va_start(ap, fmt);
461 	r = io_vprintf(io, fmt, ap);
462 	va_end(ap);
463 
464 	return r;
465 }
466 
467 int
io_vprintf(struct io * io,const char * fmt,va_list ap)468 io_vprintf(struct io *io, const char *fmt, va_list ap)
469 {
470 
471 	char *buf;
472 	int len;
473 
474 	len = vasprintf(&buf, fmt, ap);
475 	if (len == -1)
476 		return -1;
477 	len = io_write(io, buf, len);
478 	free(buf);
479 
480 	return len;
481 }
482 
483 size_t
io_queued(struct io * io)484 io_queued(struct io *io)
485 {
486 	return iobuf_queued(&io->iobuf);
487 }
488 
489 /*
490  * Buffered input functions
491  */
492 
493 void *
io_data(struct io * io)494 io_data(struct io *io)
495 {
496 	return iobuf_data(&io->iobuf);
497 }
498 
499 size_t
io_datalen(struct io * io)500 io_datalen(struct io *io)
501 {
502 	return iobuf_len(&io->iobuf);
503 }
504 
505 char *
io_getline(struct io * io,size_t * sz)506 io_getline(struct io *io, size_t *sz)
507 {
508 	return iobuf_getline(&io->iobuf, sz);
509 }
510 
511 void
io_drop(struct io * io,size_t sz)512 io_drop(struct io *io, size_t sz)
513 {
514 	return iobuf_drop(&io->iobuf, sz);
515 }
516 
517 
518 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
519 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
520 
521 /*
522  * Setup the necessary events as required by the current io state,
523  * honouring duplex mode and i/o pauses.
524  */
525 void
io_reload(struct io * io)526 io_reload(struct io *io)
527 {
528 	short	events;
529 
530 	/* io will be reloaded at release time */
531 	if (io->flags & IO_HELD)
532 		return;
533 
534 	iobuf_normalize(&io->iobuf);
535 
536 #ifdef IO_TLS
537 	if (io->tls) {
538 		io_reload_tls(io);
539 		return;
540 	}
541 #endif
542 
543 	io_debug("io_reload(%p)\n", io);
544 
545 	events = 0;
546 	if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
547 		events = EV_READ;
548 	if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
549 		events |= EV_WRITE;
550 
551 	io_reset(io, events, io_dispatch);
552 }
553 
554 /* Set the requested event. */
555 void
io_reset(struct io * io,short events,void (* dispatch)(int,short,void *))556 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
557 {
558 	struct timeval	tv, *ptv;
559 
560 	io_debug("io_reset(%p, %s, %p) -> %s\n",
561 	    io, io_evstr(events), dispatch, io_strio(io));
562 
563 	/*
564 	 * Indicate that the event has already been reset so that reload
565 	 * is not called on frame_leave.
566 	 */
567 	io->flags |= IO_RESET;
568 
569 	if (event_initialized(&io->ev))
570 		event_del(&io->ev);
571 
572 	/*
573 	 * The io is paused by the user, so we don't want the timeout to be
574 	 * effective.
575 	 */
576 	if (events == 0)
577 		return;
578 
579 	event_set(&io->ev, io->sock, events, dispatch, io);
580 	if (io->timeout >= 0) {
581 		tv.tv_sec = io->timeout / 1000;
582 		tv.tv_usec = (io->timeout % 1000) * 1000;
583 		ptv = &tv;
584 	} else
585 		ptv = NULL;
586 
587 	event_add(&io->ev, ptv);
588 }
589 
590 size_t
io_pending(struct io * io)591 io_pending(struct io *io)
592 {
593 	return iobuf_len(&io->iobuf);
594 }
595 
596 const char*
io_strflags(int flags)597 io_strflags(int flags)
598 {
599 	static char	buf[64];
600 
601 	buf[0] = '\0';
602 
603 	switch (flags & IO_RW) {
604 	case 0:
605 		(void)strlcat(buf, "rw", sizeof buf);
606 		break;
607 	case IO_READ:
608 		(void)strlcat(buf, "R", sizeof buf);
609 		break;
610 	case IO_WRITE:
611 		(void)strlcat(buf, "W", sizeof buf);
612 		break;
613 	case IO_RW:
614 		(void)strlcat(buf, "RW", sizeof buf);
615 		break;
616 	}
617 
618 	if (flags & IO_PAUSE_IN)
619 		(void)strlcat(buf, ",F_PI", sizeof buf);
620 	if (flags & IO_PAUSE_OUT)
621 		(void)strlcat(buf, ",F_PO", sizeof buf);
622 
623 	return buf;
624 }
625 
626 const char*
io_evstr(short ev)627 io_evstr(short ev)
628 {
629 	static char	buf[64];
630 	char		buf2[16];
631 	int		n;
632 
633 	n = 0;
634 	buf[0] = '\0';
635 
636 	if (ev == 0) {
637 		(void)strlcat(buf, "<NONE>", sizeof(buf));
638 		return buf;
639 	}
640 
641 	if (ev & EV_TIMEOUT) {
642 		(void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
643 		ev &= ~EV_TIMEOUT;
644 		n++;
645 	}
646 
647 	if (ev & EV_READ) {
648 		if (n)
649 			(void)strlcat(buf, "|", sizeof(buf));
650 		(void)strlcat(buf, "EV_READ", sizeof(buf));
651 		ev &= ~EV_READ;
652 		n++;
653 	}
654 
655 	if (ev & EV_WRITE) {
656 		if (n)
657 			(void)strlcat(buf, "|", sizeof(buf));
658 		(void)strlcat(buf, "EV_WRITE", sizeof(buf));
659 		ev &= ~EV_WRITE;
660 		n++;
661 	}
662 
663 	if (ev & EV_SIGNAL) {
664 		if (n)
665 			(void)strlcat(buf, "|", sizeof(buf));
666 		(void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
667 		ev &= ~EV_SIGNAL;
668 		n++;
669 	}
670 
671 	if (ev) {
672 		if (n)
673 			(void)strlcat(buf, "|", sizeof(buf));
674 		(void)strlcat(buf, "EV_?=0x", sizeof(buf));
675 		(void)snprintf(buf2, sizeof(buf2), "%hx", ev);
676 		(void)strlcat(buf, buf2, sizeof(buf));
677 	}
678 
679 	return buf;
680 }
681 
682 void
io_dispatch(int fd,short ev,void * humppa)683 io_dispatch(int fd, short ev, void *humppa)
684 {
685 	struct io	*io = humppa;
686 	size_t		 w;
687 	ssize_t		 n;
688 	int		 saved_errno;
689 
690 	io_frame_enter("io_dispatch", io, ev);
691 
692 	if (ev == EV_TIMEOUT) {
693 		io_callback(io, IO_TIMEOUT);
694 		goto leave;
695 	}
696 
697 	if (ev & EV_WRITE && (w = io_queued(io))) {
698 		if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) {
699 			if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
700 				goto read;
701 			if (n == IOBUF_CLOSED)
702 				io_callback(io, IO_DISCONNECTED);
703 			else {
704 				saved_errno = errno;
705 				io->error = strerror(errno);
706 				errno = saved_errno;
707 				io_callback(io, IO_ERROR);
708 			}
709 			goto leave;
710 		}
711 		if (w > io->lowat && w - n <= io->lowat)
712 			io_callback(io, IO_LOWAT);
713 	}
714     read:
715 
716 	if (ev & EV_READ) {
717 		iobuf_normalize(&io->iobuf);
718 		if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) {
719 			if (n == IOBUF_CLOSED)
720 				io_callback(io, IO_DISCONNECTED);
721 			else {
722 				saved_errno = errno;
723 				io->error = strerror(errno);
724 				errno = saved_errno;
725 				io_callback(io, IO_ERROR);
726 			}
727 			goto leave;
728 		}
729 		if (n)
730 			io_callback(io, IO_DATAIN);
731 	}
732 
733 leave:
734 	io_frame_leave(io);
735 }
736 
737 void
io_callback(struct io * io,int evt)738 io_callback(struct io *io, int evt)
739 {
740 	io->cb(io, evt, io->arg);
741 }
742 
743 int
io_connect(struct io * io,const struct sockaddr * sa,const struct sockaddr * bsa)744 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa)
745 {
746 	int	sock, errno_save;
747 
748 	if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1)
749 		goto fail;
750 
751 	io_set_nonblocking(sock);
752 	io_set_nolinger(sock);
753 
754 	if (bsa && bind(sock, bsa, SA_LEN(bsa)) == -1)
755 		goto fail;
756 
757 	if (connect(sock, sa, SA_LEN(sa)) == -1)
758 		if (errno != EINPROGRESS)
759 			goto fail;
760 
761 	io->sock = sock;
762 	io_reset(io, EV_WRITE, io_dispatch_connect);
763 
764 	return (sock);
765 
766     fail:
767 	if (sock != -1) {
768 		errno_save = errno;
769 		close(sock);
770 		errno = errno_save;
771 		io->error = strerror(errno);
772 	}
773 	return (-1);
774 }
775 
776 void
io_dispatch_connect(int fd,short ev,void * humppa)777 io_dispatch_connect(int fd, short ev, void *humppa)
778 {
779 	struct io	*io = humppa;
780 	int		 r, e;
781 	socklen_t	 sl;
782 
783 	io_frame_enter("io_dispatch_connect", io, ev);
784 
785 	if (ev == EV_TIMEOUT) {
786 		close(fd);
787 		io->sock = -1;
788 		io_callback(io, IO_TIMEOUT);
789 	} else {
790 		sl = sizeof(e);
791 		r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
792 		if (r == -1)  {
793 			warn("io_dispatch_connect: getsockopt");
794 			e = errno;
795 		}
796 		if (e) {
797 			close(fd);
798 			io->sock = -1;
799 			io->error = strerror(e);
800 			io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
801 		}
802 		else {
803 			io->state = IO_STATE_UP;
804 			io_callback(io, IO_CONNECTED);
805 		}
806 	}
807 
808 	io_frame_leave(io);
809 }
810 
811 #ifdef IO_TLS
812 
813 static const char*
io_tls_error(void)814 io_tls_error(void)
815 {
816 	static char	buf[128];
817 	unsigned long	e;
818 
819 	e = ERR_peek_last_error();
820 	if (e) {
821 		ERR_error_string(e, buf);
822 		return (buf);
823 	}
824 
825 	return ("No TLS error");
826 }
827 
828 int
io_start_tls(struct io * io,void * tls)829 io_start_tls(struct io *io, void *tls)
830 {
831 	int	mode;
832 
833 	mode = io->flags & IO_RW;
834 	if (mode == 0 || mode == IO_RW)
835 		errx(1, "io_start_tls(): full-duplex or unset");
836 
837 	if (io->tls)
838 		errx(1, "io_start_tls(): TLS already started");
839 	io->tls = tls;
840 
841 	if (SSL_set_fd(io->tls, io->sock) == 0) {
842 		ssl_error("io_start_tls:SSL_set_fd");
843 		return (-1);
844 	}
845 
846 	if (mode == IO_WRITE) {
847 		io->state = IO_STATE_CONNECT_TLS;
848 		SSL_set_connect_state(io->tls);
849 		io_reset(io, EV_WRITE, io_dispatch_connect_tls);
850 	} else {
851 		io->state = IO_STATE_ACCEPT_TLS;
852 		SSL_set_accept_state(io->tls);
853 		io_reset(io, EV_READ, io_dispatch_accept_tls);
854 	}
855 
856 	return (0);
857 }
858 
859 void
io_dispatch_accept_tls(int fd,short event,void * humppa)860 io_dispatch_accept_tls(int fd, short event, void *humppa)
861 {
862 	struct io	*io = humppa;
863 	int		 e, ret;
864 
865 	io_frame_enter("io_dispatch_accept_tls", io, event);
866 
867 	if (event == EV_TIMEOUT) {
868 		io_callback(io, IO_TIMEOUT);
869 		goto leave;
870 	}
871 
872 	if ((ret = SSL_accept(io->tls)) > 0) {
873 		io->state = IO_STATE_UP;
874 		io_callback(io, IO_TLSREADY);
875 		goto leave;
876 	}
877 
878 	switch ((e = SSL_get_error(io->tls, ret))) {
879 	case SSL_ERROR_WANT_READ:
880 		io_reset(io, EV_READ, io_dispatch_accept_tls);
881 		break;
882 	case SSL_ERROR_WANT_WRITE:
883 		io_reset(io, EV_WRITE, io_dispatch_accept_tls);
884 		break;
885 	default:
886 		io->error = io_tls_error();
887 		ssl_error("io_dispatch_accept_tls:SSL_accept");
888 		io_callback(io, IO_ERROR);
889 		break;
890 	}
891 
892     leave:
893 	io_frame_leave(io);
894 }
895 
896 void
io_dispatch_connect_tls(int fd,short event,void * humppa)897 io_dispatch_connect_tls(int fd, short event, void *humppa)
898 {
899 	struct io	*io = humppa;
900 	int		 e, ret;
901 
902 	io_frame_enter("io_dispatch_connect_tls", io, event);
903 
904 	if (event == EV_TIMEOUT) {
905 		io_callback(io, IO_TIMEOUT);
906 		goto leave;
907 	}
908 
909 	if ((ret = SSL_connect(io->tls)) > 0) {
910 		io->state = IO_STATE_UP;
911 		io_callback(io, IO_TLSREADY);
912 		goto leave;
913 	}
914 
915 	switch ((e = SSL_get_error(io->tls, ret))) {
916 	case SSL_ERROR_WANT_READ:
917 		io_reset(io, EV_READ, io_dispatch_connect_tls);
918 		break;
919 	case SSL_ERROR_WANT_WRITE:
920 		io_reset(io, EV_WRITE, io_dispatch_connect_tls);
921 		break;
922 	default:
923 		io->error = io_tls_error();
924 		ssl_error("io_dispatch_connect_ssl:SSL_connect");
925 		io_callback(io, IO_TLSERROR);
926 		break;
927 	}
928 
929     leave:
930 	io_frame_leave(io);
931 }
932 
933 void
io_dispatch_read_tls(int fd,short event,void * humppa)934 io_dispatch_read_tls(int fd, short event, void *humppa)
935 {
936 	struct io	*io = humppa;
937 	int		 n, saved_errno;
938 
939 	io_frame_enter("io_dispatch_read_tls", io, event);
940 
941 	if (event == EV_TIMEOUT) {
942 		io_callback(io, IO_TIMEOUT);
943 		goto leave;
944 	}
945 
946 again:
947 	iobuf_normalize(&io->iobuf);
948 	switch ((n = iobuf_read_tls(&io->iobuf, (SSL*)io->tls))) {
949 	case IOBUF_WANT_READ:
950 		io_reset(io, EV_READ, io_dispatch_read_tls);
951 		break;
952 	case IOBUF_WANT_WRITE:
953 		io_reset(io, EV_WRITE, io_dispatch_read_tls);
954 		break;
955 	case IOBUF_CLOSED:
956 		io_callback(io, IO_DISCONNECTED);
957 		break;
958 	case IOBUF_ERROR:
959 		saved_errno = errno;
960 		io->error = strerror(errno);
961 		errno = saved_errno;
962 		io_callback(io, IO_ERROR);
963 		break;
964 	case IOBUF_TLSERROR:
965 		io->error = io_tls_error();
966 		ssl_error("io_dispatch_read_tls:SSL_read");
967 		io_callback(io, IO_ERROR);
968 		break;
969 	default:
970 		io_debug("io_dispatch_read_tls(...) -> r=%d\n", n);
971 		io_callback(io, IO_DATAIN);
972 		if (current == io && IO_READING(io) && SSL_pending(io->tls))
973 			goto again;
974 	}
975 
976     leave:
977 	io_frame_leave(io);
978 }
979 
980 void
io_dispatch_write_tls(int fd,short event,void * humppa)981 io_dispatch_write_tls(int fd, short event, void *humppa)
982 {
983 	struct io	*io = humppa;
984 	int		 n, saved_errno;
985 	size_t		 w2, w;
986 
987 	io_frame_enter("io_dispatch_write_tls", io, event);
988 
989 	if (event == EV_TIMEOUT) {
990 		io_callback(io, IO_TIMEOUT);
991 		goto leave;
992 	}
993 
994 	w = io_queued(io);
995 	switch ((n = iobuf_write_tls(&io->iobuf, (SSL*)io->tls))) {
996 	case IOBUF_WANT_READ:
997 		io_reset(io, EV_READ, io_dispatch_write_tls);
998 		break;
999 	case IOBUF_WANT_WRITE:
1000 		io_reset(io, EV_WRITE, io_dispatch_write_tls);
1001 		break;
1002 	case IOBUF_CLOSED:
1003 		io_callback(io, IO_DISCONNECTED);
1004 		break;
1005 	case IOBUF_ERROR:
1006 		saved_errno = errno;
1007 		io->error = strerror(errno);
1008 		errno = saved_errno;
1009 		io_callback(io, IO_ERROR);
1010 		break;
1011 	case IOBUF_TLSERROR:
1012 		io->error = io_tls_error();
1013 		ssl_error("io_dispatch_write_tls:SSL_write");
1014 		io_callback(io, IO_ERROR);
1015 		break;
1016 	default:
1017 		io_debug("io_dispatch_write_tls(...) -> w=%d\n", n);
1018 		w2 = io_queued(io);
1019 		if (w > io->lowat && w2 <= io->lowat)
1020 			io_callback(io, IO_LOWAT);
1021 		break;
1022 	}
1023 
1024     leave:
1025 	io_frame_leave(io);
1026 }
1027 
1028 void
io_reload_tls(struct io * io)1029 io_reload_tls(struct io *io)
1030 {
1031 	short	ev = 0;
1032 	void	(*dispatch)(int, short, void*) = NULL;
1033 
1034 	switch (io->state) {
1035 	case IO_STATE_CONNECT_TLS:
1036 		ev = EV_WRITE;
1037 		dispatch = io_dispatch_connect_tls;
1038 		break;
1039 	case IO_STATE_ACCEPT_TLS:
1040 		ev = EV_READ;
1041 		dispatch = io_dispatch_accept_tls;
1042 		break;
1043 	case IO_STATE_UP:
1044 		ev = 0;
1045 		if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
1046 			ev = EV_READ;
1047 			dispatch = io_dispatch_read_tls;
1048 		}
1049 		else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
1050 		    io_queued(io)) {
1051 			ev = EV_WRITE;
1052 			dispatch = io_dispatch_write_tls;
1053 		}
1054 		if (!ev)
1055 			return; /* paused */
1056 		break;
1057 	default:
1058 		errx(1, "io_reload_tls(): bad state");
1059 	}
1060 
1061 	io_reset(io, ev, dispatch);
1062 }
1063 
1064 #endif /* IO_TLS */
1065