1 /* $OpenBSD: io.c,v 1.1.1.1 2018/04/27 16:14:36 eric Exp $ */
2
3 /*
4 * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/types.h>
20 #include <sys/queue.h>
21 #include <sys/socket.h>
22
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <inttypes.h>
26 #include <netdb.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30 #include <unistd.h>
31
32 #include "io.h"
33 #include "iobuf.h"
34 #include "log.h"
35
36 #ifdef IO_SSL
37 #include <openssl/err.h>
38 #include <openssl/ssl.h>
39 #endif
40
41 enum {
42 IO_STATE_DOWN,
43 IO_STATE_UP,
44 IO_STATE_CONNECT,
45 IO_STATE_CONNECT_TLS,
46 IO_STATE_ACCEPT_TLS
47 };
48
49 #define IO_PAUSE_IN IO_IN
50 #define IO_PAUSE_OUT IO_OUT
51
52 #define IO_READ 0x0100
53 #define IO_WRITE 0x0200
54 #define IO_RW (IO_READ | IO_WRITE)
55 #define IO_RESET 0x1000
56 #define IO_HELD 0x2000
57
58 struct io {
59 int sock;
60 void *arg;
61 void (*cb)(struct io*, int, void *);
62 struct iobuf iobuf;
63 size_t lowat;
64 int timeout;
65 int flags;
66 int state;
67 struct event ev;
68 void *tls;
69 const char *error; /* only valid immediately on callback */
70 struct sockaddr *bind;
71 struct addrinfo *ai; /* for connecting */
72 };
73
74 static const char* io_strflags(int);
75 static const char* io_strevents(short);
76
77 static void io_reload(struct io *);
78 static void io_reset(struct io *, short, void (*)(int, short, void*));
79 static void io_frame_enter(const char *, struct io *, int);
80 static void io_frame_leave(struct io *);
81 static void io_hold(struct io *);
82 static void io_release(struct io *);
83 static void io_callback(struct io*, int);
84 static void io_dispatch(int, short, void *);
85 static void io_dispatch_connect(int, short, void *);
86 static int io_connect_next(struct io *);
87
88 #ifdef IO_SSL
89 void ssl_error(const char *); /* XXX external */
90 static const char* io_ssl_error(void);
91 static void io_dispatch_accept_tls(int, short, void *);
92 static void io_dispatch_connect_tls(int, short, void *);
93 static void io_dispatch_read_tls(int, short, void *);
94 static void io_dispatch_write_tls(int, short, void *);
95 static void io_reload_tls(struct io *io);
96 #endif
97
98 static struct io *current = NULL;
99 static long long unsigned frame = 0;
100 static int _io_trace = 0;
101
102 static const char *states[] = {
103 "DOWN",
104 "UP",
105 "CONNECT",
106 "CONNECT_TLS",
107 "ACCEPT_TLS"
108 };
109
110 #define io_debug(args...) do { if (_io_trace) log_debug(args); } while(0)
111 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
112 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
113
114 void
io_trace(int on)115 io_trace(int on)
116 {
117 _io_trace = on;
118 }
119
120 const char*
io_strio(struct io * io)121 io_strio(struct io *io)
122 {
123 static char buf[128];
124 char ssl[128];
125
126 ssl[0] = '\0';
127 #ifdef IO_SSL
128 if (io->tls) {
129 (void)snprintf(ssl, sizeof ssl, " ssl=%s:%s:%d",
130 SSL_get_version(io->tls),
131 SSL_get_cipher_name(io->tls),
132 SSL_get_cipher_bits(io->tls, NULL));
133 }
134 #endif
135 (void)snprintf(buf, sizeof buf,
136 "<io:%p st=%s, fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
137 io, states[io->state], io->sock, io->timeout,
138 io_strflags(io->flags), ssl, io_datalen(io), io_queued(io));
139
140 return buf;
141 }
142
143 const char*
io_strevent(int evt)144 io_strevent(int evt)
145 {
146 static char buf[32];
147
148 switch (evt) {
149 case IO_CONNECTED:
150 return "IO_CONNECTED";
151 case IO_TLSREADY:
152 return "IO_TLSREADY";
153 case IO_DATAIN:
154 return "IO_DATAIN";
155 case IO_LOWAT:
156 return "IO_LOWAT";
157 case IO_CLOSED:
158 return "IO_CLOSED";
159 case IO_DISCONNECTED:
160 return "IO_DISCONNECTED";
161 case IO_TIMEOUT:
162 return "IO_TIMEOUT";
163 case IO_ERROR:
164 return "IO_ERROR";
165 case IO_TLSERROR:
166 return "IO_TLSERROR";
167 default:
168 (void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
169 return buf;
170 }
171 }
172
173 struct io *
io_new(void)174 io_new(void)
175 {
176 struct io *io;
177
178 io = calloc(1, sizeof(*io));
179 if (io == NULL)
180 return NULL;
181
182 iobuf_init(&io->iobuf, 0, 0);
183 io->sock = -1;
184 io->timeout = -1;
185
186 return io;
187 }
188
189 void
io_free(struct io * io)190 io_free(struct io *io)
191 {
192 io_debug("%s(%p)", __func__, io);
193
194 /* the current io is virtually dead */
195 if (io == current)
196 current = NULL;
197
198 #ifdef IO_SSL
199 if (io->tls) {
200 SSL_free(io->tls);
201 io->tls = NULL;
202 }
203 #endif
204
205 if (io->ai)
206 freeaddrinfo(io->ai);
207 if (event_initialized(&io->ev))
208 event_del(&io->ev);
209 if (io->sock != -1) {
210 (void)close(io->sock);
211 io->sock = -1;
212 }
213
214 iobuf_clear(&io->iobuf);
215 free(io->bind);
216 free(io);
217 }
218
219 int
io_set_callback(struct io * io,void (* cb)(struct io *,int,void *),void * arg)220 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg)
221 {
222 io->cb = cb;
223 io->arg = arg;
224
225 return 0;
226 }
227
228 int
io_set_bindaddr(struct io * io,const struct sockaddr * sa)229 io_set_bindaddr(struct io *io, const struct sockaddr *sa)
230 {
231 struct sockaddr *t;
232
233 if (io->state != IO_STATE_DOWN) {
234 errno = EISCONN;
235 return -1;
236 }
237
238 t = malloc(sa->sa_len);
239 if (t == NULL)
240 return -1;
241 memmove(t, sa, sa->sa_len);
242
243 free(io->bind);
244 io->bind = t;
245
246 return 0;
247 }
248
249 int
io_set_bufsize(struct io * io,size_t sz)250 io_set_bufsize(struct io *io, size_t sz)
251 {
252 errno = ENOSYS;
253 return -1;
254 }
255
256 void
io_set_timeout(struct io * io,int msec)257 io_set_timeout(struct io *io, int msec)
258 {
259 io_debug("%s(%p, %d)", __func__, io, msec);
260
261 io->timeout = msec;
262 }
263
264 void
io_set_lowat(struct io * io,size_t lowat)265 io_set_lowat(struct io *io, size_t lowat)
266 {
267 io_debug("%s(%p, %zu)", __func__, io, lowat);
268
269 io->lowat = lowat;
270 }
271
272 const char *
io_error(struct io * io)273 io_error(struct io *io)
274 {
275 const char *e;
276
277 e = io->error;
278 io->error = NULL;
279 return e;
280 }
281
282 int
io_fileno(struct io * io)283 io_fileno(struct io *io)
284 {
285 return io->sock;
286 }
287
288 int
io_attach(struct io * io,int sock)289 io_attach(struct io *io, int sock)
290 {
291 if (io->state != IO_STATE_DOWN) {
292 errno = EISCONN;
293 return -1;
294 }
295
296 io->state = IO_STATE_UP;
297 io->sock = sock;
298 io_reload(io);
299 return 0;
300 }
301
302 int
io_detach(struct io * io)303 io_detach(struct io *io)
304 {
305 errno = ENOSYS;
306 return -1;
307 }
308
309 int
io_close(struct io * io)310 io_close(struct io *io)
311 {
312 errno = ENOSYS;
313 return -1;
314 }
315
316 int
io_connect(struct io * io,struct addrinfo * ai)317 io_connect(struct io *io, struct addrinfo *ai)
318 {
319 if (ai == NULL) {
320 errno = EINVAL;
321 fatal("%s", __func__);
322 return -1;
323 }
324
325 if (io->state != IO_STATE_DOWN) {
326 freeaddrinfo(ai);
327 errno = EISCONN;
328 fatal("%s", __func__);
329 return -1;
330 }
331
332 io->ai = ai;
333 return io_connect_next(io);
334 }
335
336 int
io_disconnect(struct io * io)337 io_disconnect(struct io *io)
338 {
339 errno = ENOSYS;
340 fatal("%s", __func__);
341 return -1;
342 }
343
344 int
io_starttls(struct io * io,void * ssl)345 io_starttls(struct io *io, void *ssl)
346 {
347 #ifdef IO_SSL
348 int mode;
349
350 mode = io->flags & IO_RW;
351 if (mode == 0 || mode == IO_RW)
352 fatalx("%s: full-duplex or unset", __func__);
353
354 if (io->tls)
355 fatalx("%s: SSL already started", __func__);
356 io->tls = ssl;
357
358 if (SSL_set_fd(io->tls, io->sock) == 0) {
359 ssl_error("io_start_tls:SSL_set_fd");
360 return -1;
361 }
362
363 if (mode == IO_WRITE) {
364 io->state = IO_STATE_CONNECT_TLS;
365 SSL_set_connect_state(io->tls);
366 io_reset(io, EV_WRITE, io_dispatch_connect_tls);
367 } else {
368 io->state = IO_STATE_ACCEPT_TLS;
369 SSL_set_accept_state(io->tls);
370 io_reset(io, EV_READ, io_dispatch_accept_tls);
371 }
372
373 return 0;
374 #else
375 errno = ENOSYS;
376 return -1;
377 #endif
378 }
379
380 void
io_pause(struct io * io,int dir)381 io_pause(struct io *io, int dir)
382 {
383 io_debug("%s(%p, %x)", __func__, io, dir);
384
385 io->flags |= dir & (IO_IN | IO_OUT);
386 io_reload(io);
387 }
388
389 void
io_resume(struct io * io,int dir)390 io_resume(struct io *io, int dir)
391 {
392 io_debug("%s(%p, %x)", __func__, io, dir);
393
394 io->flags &= ~(dir & (IO_IN | IO_OUT));
395 io_reload(io);
396 }
397
398 void
io_set_read(struct io * io)399 io_set_read(struct io *io)
400 {
401 int mode;
402
403 io_debug("%s(%p)", __func__, io);
404
405 mode = io->flags & IO_RW;
406 if (!(mode == 0 || mode == IO_WRITE))
407 fatalx("%s: full-duplex or reading", __func__);
408
409 io->flags &= ~IO_RW;
410 io->flags |= IO_READ;
411 io_reload(io);
412 }
413
414 void
io_set_write(struct io * io)415 io_set_write(struct io *io)
416 {
417 int mode;
418
419 io_debug("%s(%p)", __func__, io);
420
421 mode = io->flags & IO_RW;
422 if (!(mode == 0 || mode == IO_READ))
423 fatalx("%s: full-duplex or writing", __func__);
424
425 io->flags &= ~IO_RW;
426 io->flags |= IO_WRITE;
427 io_reload(io);
428 }
429
430 int
io_write(struct io * io,const void * buf,size_t len)431 io_write(struct io *io, const void *buf, size_t len)
432 {
433 int r;
434
435 r = iobuf_queue(&io->iobuf, buf, len);
436
437 io_reload(io);
438
439 return r;
440 }
441
442 int
io_writev(struct io * io,const struct iovec * iov,int iovcount)443 io_writev(struct io *io, const struct iovec *iov, int iovcount)
444 {
445 int r;
446
447 r = iobuf_queuev(&io->iobuf, iov, iovcount);
448
449 io_reload(io);
450
451 return r;
452 }
453
454 int
io_print(struct io * io,const char * s)455 io_print(struct io *io, const char *s)
456 {
457 return io_write(io, s, strlen(s));
458 }
459
460 int
io_printf(struct io * io,const char * fmt,...)461 io_printf(struct io *io, const char *fmt, ...)
462 {
463 va_list ap;
464 int r;
465
466 va_start(ap, fmt);
467 r = io_vprintf(io, fmt, ap);
468 va_end(ap);
469
470 return r;
471 }
472
473 int
io_vprintf(struct io * io,const char * fmt,va_list ap)474 io_vprintf(struct io *io, const char *fmt, va_list ap)
475 {
476
477 char *buf;
478 int len;
479
480 len = vasprintf(&buf, fmt, ap);
481 if (len == -1)
482 return -1;
483
484 len = io_write(io, buf, len);
485 free(buf);
486
487 return len;
488 }
489
490 size_t
io_queued(struct io * io)491 io_queued(struct io *io)
492 {
493 return iobuf_queued(&io->iobuf);
494 }
495
496 void *
io_data(struct io * io)497 io_data(struct io *io)
498 {
499 return iobuf_data(&io->iobuf);
500 }
501
502 size_t
io_datalen(struct io * io)503 io_datalen(struct io *io)
504 {
505 return iobuf_len(&io->iobuf);
506 }
507
508 char *
io_getline(struct io * io,size_t * sz)509 io_getline(struct io *io, size_t *sz)
510 {
511 return iobuf_getline(&io->iobuf, sz);
512 }
513
514 void
io_drop(struct io * io,size_t sz)515 io_drop(struct io *io, size_t sz)
516 {
517 return iobuf_drop(&io->iobuf, sz);
518 }
519
520 const char*
io_strflags(int flags)521 io_strflags(int flags)
522 {
523 static char buf[64];
524
525 buf[0] = '\0';
526
527 switch (flags & IO_RW) {
528 case 0:
529 (void)strlcat(buf, "rw", sizeof buf);
530 break;
531 case IO_READ:
532 (void)strlcat(buf, "R", sizeof buf);
533 break;
534 case IO_WRITE:
535 (void)strlcat(buf, "W", sizeof buf);
536 break;
537 case IO_RW:
538 (void)strlcat(buf, "RW", sizeof buf);
539 break;
540 }
541
542 if (flags & IO_PAUSE_IN)
543 (void)strlcat(buf, ",F_PI", sizeof buf);
544 if (flags & IO_PAUSE_OUT)
545 (void)strlcat(buf, ",F_PO", sizeof buf);
546
547 return buf;
548 }
549
550 const char*
io_strevents(short ev)551 io_strevents(short ev)
552 {
553 static char buf[64];
554 char buf2[16];
555 int n;
556
557 n = 0;
558 buf[0] = '\0';
559
560 if (ev == 0) {
561 (void)strlcat(buf, "<NONE>", sizeof(buf));
562 return buf;
563 }
564
565 if (ev & EV_TIMEOUT) {
566 (void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
567 ev &= ~EV_TIMEOUT;
568 n++;
569 }
570
571 if (ev & EV_READ) {
572 if (n)
573 (void)strlcat(buf, "|", sizeof(buf));
574 (void)strlcat(buf, "EV_READ", sizeof(buf));
575 ev &= ~EV_READ;
576 n++;
577 }
578
579 if (ev & EV_WRITE) {
580 if (n)
581 (void)strlcat(buf, "|", sizeof(buf));
582 (void)strlcat(buf, "EV_WRITE", sizeof(buf));
583 ev &= ~EV_WRITE;
584 n++;
585 }
586
587 if (ev & EV_SIGNAL) {
588 if (n)
589 (void)strlcat(buf, "|", sizeof(buf));
590 (void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
591 ev &= ~EV_SIGNAL;
592 n++;
593 }
594
595 if (ev) {
596 if (n)
597 (void)strlcat(buf, "|", sizeof(buf));
598 (void)strlcat(buf, "EV_?=0x", sizeof(buf));
599 (void)snprintf(buf2, sizeof(buf2), "%hx", ev);
600 (void)strlcat(buf, buf2, sizeof(buf));
601 }
602
603 return buf;
604 }
605
606 /*
607 * Setup the necessary events as required by the current io state,
608 * honouring duplex mode and i/o pause.
609 */
610 static void
io_reload(struct io * io)611 io_reload(struct io *io)
612 {
613 short events;
614
615 /* The io will be reloaded at release time. */
616 if (io->flags & IO_HELD)
617 return;
618
619 /* Do nothing if no socket. */
620 if (io->sock == -1)
621 return;
622
623 #ifdef IO_SSL
624 if (io->tls) {
625 io_reload_tls(io);
626 return;
627 }
628 #endif
629
630 io_debug("%s(%p)", __func__, io);
631
632 events = 0;
633 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
634 events = EV_READ;
635 if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
636 events |= EV_WRITE;
637
638 io_reset(io, events, io_dispatch);
639 }
640
641 static void
io_reset(struct io * io,short events,void (* dispatch)(int,short,void *))642 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
643 {
644 struct timeval tv, *ptv;
645
646 io_debug("%s(%p, %s, %p) -> %s", __func__, io,
647 io_strevents(events), dispatch, io_strio(io));
648
649 /*
650 * Indicate that the event has already been reset so that reload
651 * is not called on frame_leave.
652 */
653 io->flags |= IO_RESET;
654
655 if (event_initialized(&io->ev))
656 event_del(&io->ev);
657
658 /*
659 * The io is paused by the user, so we don't want the timeout to be
660 * effective.
661 */
662 if (events == 0)
663 return;
664
665 event_set(&io->ev, io->sock, events, dispatch, io);
666 if (io->timeout >= 0) {
667 tv.tv_sec = io->timeout / 1000;
668 tv.tv_usec = (io->timeout % 1000) * 1000;
669 ptv = &tv;
670 } else
671 ptv = NULL;
672
673 event_add(&io->ev, ptv);
674 }
675
676 static void
io_frame_enter(const char * where,struct io * io,int ev)677 io_frame_enter(const char *where, struct io *io, int ev)
678 {
679 io_debug("io: BEGIN %llu", frame);
680 io_debug("%s(%s, %s, %s)", __func__, where, io_strevents(ev),
681 io_strio(io));
682
683 if (current)
684 fatalx("%s: interleaved frames", __func__);
685
686 current = io;
687
688 io_hold(io);
689 }
690
691 static void
io_frame_leave(struct io * io)692 io_frame_leave(struct io *io)
693 {
694 io_debug("%s(%llu)", __func__, frame);
695
696 if (current && current != io)
697 fatalx("%s: io mismatch", __func__);
698
699 /* The io has been cleared. */
700 if (current == NULL)
701 goto done;
702
703 /*
704 * TODO: There is a possible optimization there:
705 * In a typical half-duplex request/response scenario,
706 * the io is waiting to read a request, and when done, it queues
707 * the response in the output buffer and goes to write mode.
708 * There, the write event is set and will be triggered in the next
709 * event frame. In most case, the write call could be done
710 * immediately as part of the last read frame, thus avoiding to go
711 * through the event loop machinery. So, as an optimisation, we
712 * could detect that case here and force an event dispatching.
713 */
714
715 /* Reload the io if it has not been reset already. */
716 io_release(io);
717 current = NULL;
718 done:
719 io_debug("io: END %llu", frame);
720
721 frame += 1;
722 }
723
724 static void
io_hold(struct io * io)725 io_hold(struct io *io)
726 {
727 io_debug("%s(%p)", __func__, io);
728
729 if (io->flags & IO_HELD)
730 fatalx("%s: already held", __func__);
731
732 io->flags &= ~IO_RESET;
733 io->flags |= IO_HELD;
734 }
735
736 static void
io_release(struct io * io)737 io_release(struct io *io)
738 {
739 io_debug("%s(%p)", __func__, io);
740
741 if (!(io->flags & IO_HELD))
742 fatalx("%s: not held", __func__);
743
744 io->flags &= ~IO_HELD;
745 if (!(io->flags & IO_RESET))
746 io_reload(io);
747 }
748
749 static void
io_callback(struct io * io,int evt)750 io_callback(struct io *io, int evt)
751 {
752 io_debug("%s(%s, %s)", __func__, io_strio(io), io_strevent(evt));
753
754 io->cb(io, evt, io->arg);
755 }
756
757 static void
io_dispatch(int fd,short ev,void * arg)758 io_dispatch(int fd, short ev, void *arg)
759 {
760 struct io *io = arg;
761 size_t w;
762 ssize_t n;
763 int saved_errno;
764
765 io_frame_enter(__func__, io, ev);
766
767 if (ev == EV_TIMEOUT) {
768 io_callback(io, IO_TIMEOUT);
769 goto leave;
770 }
771
772 if (ev & EV_WRITE && (w = io_queued(io))) {
773 if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) {
774 if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
775 goto read;
776 if (n == IOBUF_CLOSED)
777 io_callback(io, IO_DISCONNECTED);
778 else {
779 log_warn("%s: iobuf_write", __func__);
780 saved_errno = errno;
781 io->error = strerror(errno);
782 errno = saved_errno;
783 io_callback(io, IO_ERROR);
784 }
785 goto leave;
786 }
787 if (w > io->lowat && w - n <= io->lowat)
788 io_callback(io, IO_LOWAT);
789 }
790 read:
791
792 if (ev & EV_READ) {
793 iobuf_normalize(&io->iobuf);
794 if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) {
795 if (n == IOBUF_CLOSED)
796 io_callback(io, IO_DISCONNECTED);
797 else {
798 log_warn("%s: iobuf_read", __func__);
799 saved_errno = errno;
800 io->error = strerror(errno);
801 errno = saved_errno;
802 io_callback(io, IO_ERROR);
803 }
804 goto leave;
805 }
806 if (n)
807 io_callback(io, IO_DATAIN);
808 }
809
810 leave:
811 io_frame_leave(io);
812 }
813
814 static void
io_dispatch_connect(int fd,short ev,void * arg)815 io_dispatch_connect(int fd, short ev, void *arg)
816 {
817 struct io *io = arg;
818 socklen_t sl;
819 int r, e;
820
821 io_frame_enter(__func__, io, ev);
822
823 if (ev == EV_TIMEOUT)
824 e = ETIMEDOUT;
825 else {
826 sl = sizeof(e);
827 r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
828 if (r == -1) {
829 log_warn("%s: getsockopt", __func__);
830 e = errno;
831 }
832 else if (e) {
833 errno = e;
834 log_warn("%s: (connect)", __func__);
835 }
836 }
837
838 if (e == 0) {
839 io->state = IO_STATE_UP;
840 io_callback(io, IO_CONNECTED);
841 goto done;
842 }
843
844 while (io->ai) {
845 r = io_connect_next(io);
846 if (r == 0)
847 goto done;
848 e = errno;
849 }
850
851 (void)close(fd);
852 io->sock = -1;
853 io->error = strerror(e);
854 io->state = IO_STATE_DOWN;
855 io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
856 done:
857 io_frame_leave(io);
858 }
859
860 static int
io_connect_next(struct io * io)861 io_connect_next(struct io *io)
862 {
863 struct addrinfo *ai;
864 struct linger l;
865 int saved_errno;
866
867 while ((ai = io->ai)) {
868 io->ai = ai->ai_next;
869 ai->ai_next = NULL;
870 if (ai->ai_socktype == SOCK_STREAM)
871 break;
872 freeaddrinfo(ai);
873 }
874
875 if (ai == NULL) {
876 errno = ESOCKTNOSUPPORT;
877 log_warn("%s", __func__);
878 return -1;
879 }
880
881 if ((io->sock = socket(ai->ai_family, ai->ai_socktype | SOCK_NONBLOCK,
882 0)) == -1) {
883 log_warn("%s: socket", __func__);
884 goto fail;
885 }
886
887 memset(&l, 0, sizeof(l));
888 if (setsockopt(io->sock, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1) {
889 log_warn("%s: setsockopt", __func__);
890 goto fail;
891 }
892
893 if (io->bind && bind(io->sock, io->bind, io->bind->sa_len) == -1) {
894 log_warn("%s: bind", __func__);
895 goto fail;
896 }
897
898 if (connect(io->sock, ai->ai_addr, ai->ai_addr->sa_len) == -1)
899 if (errno != EINPROGRESS) {
900 log_warn("%s: connect", __func__);
901 goto fail;
902 }
903
904 freeaddrinfo(ai);
905 io->state = IO_STATE_CONNECT;
906 io_reset(io, EV_WRITE, io_dispatch_connect);
907 return 0;
908
909 fail:
910 if (io->sock != -1) {
911 saved_errno = errno;
912 close(io->sock);
913 errno = saved_errno;
914 io->error = strerror(errno);
915 io->sock = -1;
916 }
917 freeaddrinfo(ai);
918 if (io->ai) {
919 freeaddrinfo(io->ai);
920 io->ai = NULL;
921 }
922 return -1;
923 }
924
925 #ifdef IO_SSL
926
927 static const char*
io_ssl_error(void)928 io_ssl_error(void)
929 {
930 static char buf[128];
931 unsigned long e;
932
933 e = ERR_peek_last_error();
934 if (e) {
935 ERR_error_string(e, buf);
936 return buf;
937 }
938
939 return "No SSL error";
940 }
941
942 static void
io_dispatch_accept_tls(int fd,short event,void * arg)943 io_dispatch_accept_tls(int fd, short event, void *arg)
944 {
945 struct io *io = arg;
946 int e, ret;
947
948 io_frame_enter(__func__, io, event);
949
950 if (event == EV_TIMEOUT) {
951 io_callback(io, IO_TIMEOUT);
952 goto leave;
953 }
954
955 if ((ret = SSL_accept(io->tls)) > 0) {
956 io->state = IO_STATE_UP;
957 io_callback(io, IO_TLSREADY);
958 goto leave;
959 }
960
961 switch ((e = SSL_get_error(io->tls, ret))) {
962 case SSL_ERROR_WANT_READ:
963 io_reset(io, EV_READ, io_dispatch_accept_tls);
964 break;
965 case SSL_ERROR_WANT_WRITE:
966 io_reset(io, EV_WRITE, io_dispatch_accept_tls);
967 break;
968 default:
969 io->error = io_ssl_error();
970 ssl_error("io_dispatch_accept_tls:SSL_accept");
971 io_callback(io, IO_TLSERROR);
972 break;
973 }
974
975 leave:
976 io_frame_leave(io);
977 }
978
979 static void
io_dispatch_connect_tls(int fd,short event,void * arg)980 io_dispatch_connect_tls(int fd, short event, void *arg)
981 {
982 struct io *io = arg;
983 int e, ret;
984
985 io_frame_enter(__func__, io, event);
986
987 if (event == EV_TIMEOUT) {
988 io_callback(io, IO_TIMEOUT);
989 goto leave;
990 }
991
992 if ((ret = SSL_connect(io->tls)) > 0) {
993 io->state = IO_STATE_UP;
994 io_callback(io, IO_TLSREADY);
995 goto leave;
996 }
997
998 switch ((e = SSL_get_error(io->tls, ret))) {
999 case SSL_ERROR_WANT_READ:
1000 io_reset(io, EV_READ, io_dispatch_connect_tls);
1001 break;
1002 case SSL_ERROR_WANT_WRITE:
1003 io_reset(io, EV_WRITE, io_dispatch_connect_tls);
1004 break;
1005 default:
1006 io->error = io_ssl_error();
1007 ssl_error("io_dispatch_connect_tls:SSL_connect");
1008 io_callback(io, IO_TLSERROR);
1009 break;
1010 }
1011
1012 leave:
1013 io_frame_leave(io);
1014 }
1015
1016 static void
io_dispatch_read_tls(int fd,short event,void * arg)1017 io_dispatch_read_tls(int fd, short event, void *arg)
1018 {
1019 struct io *io = arg;
1020 int n, saved_errno;
1021
1022 io_frame_enter(__func__, io, event);
1023
1024 if (event == EV_TIMEOUT) {
1025 io_callback(io, IO_TIMEOUT);
1026 goto leave;
1027 }
1028
1029 again:
1030 iobuf_normalize(&io->iobuf);
1031 switch ((n = iobuf_read_ssl(&io->iobuf, (SSL*)io->tls))) {
1032 case IOBUF_WANT_READ:
1033 io_reset(io, EV_READ, io_dispatch_read_tls);
1034 break;
1035 case IOBUF_WANT_WRITE:
1036 io_reset(io, EV_WRITE, io_dispatch_read_tls);
1037 break;
1038 case IOBUF_CLOSED:
1039 io_callback(io, IO_DISCONNECTED);
1040 break;
1041 case IOBUF_ERROR:
1042 saved_errno = errno;
1043 io->error = strerror(errno);
1044 errno = saved_errno;
1045 log_warn("%s: iobuf_read_ssl", __func__);
1046 io_callback(io, IO_ERROR);
1047 break;
1048 case IOBUF_SSLERROR:
1049 io->error = io_ssl_error();
1050 ssl_error("io_dispatch_read_tls:SSL_read");
1051 io_callback(io, IO_TLSERROR);
1052 break;
1053 default:
1054 io_debug("%s(...) -> r=%d", __func__, n);
1055 io_callback(io, IO_DATAIN);
1056 if (current == io && IO_READING(io) && SSL_pending(io->tls))
1057 goto again;
1058 }
1059
1060 leave:
1061 io_frame_leave(io);
1062 }
1063
1064 static void
io_dispatch_write_tls(int fd,short event,void * arg)1065 io_dispatch_write_tls(int fd, short event, void *arg)
1066 {
1067 struct io *io = arg;
1068 size_t w2, w;
1069 int n, saved_errno;
1070
1071 io_frame_enter(__func__, io, event);
1072
1073 if (event == EV_TIMEOUT) {
1074 io_callback(io, IO_TIMEOUT);
1075 goto leave;
1076 }
1077
1078 w = io_queued(io);
1079 switch ((n = iobuf_write_ssl(&io->iobuf, (SSL*)io->tls))) {
1080 case IOBUF_WANT_READ:
1081 io_reset(io, EV_READ, io_dispatch_write_tls);
1082 break;
1083 case IOBUF_WANT_WRITE:
1084 io_reset(io, EV_WRITE, io_dispatch_write_tls);
1085 break;
1086 case IOBUF_CLOSED:
1087 io_callback(io, IO_DISCONNECTED);
1088 break;
1089 case IOBUF_ERROR:
1090 saved_errno = errno;
1091 io->error = strerror(errno);
1092 errno = saved_errno;
1093 log_warn("%s: iobuf_write_ssl", __func__);
1094 io_callback(io, IO_ERROR);
1095 break;
1096 case IOBUF_SSLERROR:
1097 io->error = io_ssl_error();
1098 ssl_error("io_dispatch_write_tls:SSL_write");
1099 io_callback(io, IO_TLSERROR);
1100 break;
1101 default:
1102 io_debug("%s(...) -> w=%d", __func__, n);
1103 w2 = io_queued(io);
1104 if (w > io->lowat && w2 <= io->lowat)
1105 io_callback(io, IO_LOWAT);
1106 break;
1107 }
1108
1109 leave:
1110 io_frame_leave(io);
1111 }
1112
1113 static void
io_reload_tls(struct io * io)1114 io_reload_tls(struct io *io)
1115 {
1116 short ev = 0;
1117 void (*dispatch)(int, short, void*) = NULL;
1118
1119 switch (io->state) {
1120 case IO_STATE_CONNECT_TLS:
1121 ev = EV_WRITE;
1122 dispatch = io_dispatch_connect_tls;
1123 break;
1124 case IO_STATE_ACCEPT_TLS:
1125 ev = EV_READ;
1126 dispatch = io_dispatch_accept_tls;
1127 break;
1128 case IO_STATE_UP:
1129 ev = 0;
1130 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
1131 ev = EV_READ;
1132 dispatch = io_dispatch_read_tls;
1133 }
1134 else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
1135 io_queued(io)) {
1136 ev = EV_WRITE;
1137 dispatch = io_dispatch_write_tls;
1138 }
1139 if (!ev)
1140 return; /* paused */
1141 break;
1142 default:
1143 fatalx("%s: unexpected state %d", __func__, io->state);
1144 }
1145
1146 io_reset(io, ev, dispatch);
1147 }
1148
1149 #endif /* IO_SSL */
1150