1 /*	$NetBSD: listener.c,v 1.6 2020/05/25 20:47:33 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "event2/event-config.h"
30 #include "evconfig-private.h"
31 
32 #include <sys/types.h>
33 
34 #ifdef _WIN32
35 #ifndef _WIN32_WINNT
36 /* Minimum required for InitializeCriticalSectionAndSpinCount */
37 #define _WIN32_WINNT 0x0403
38 #endif
39 #include <winsock2.h>
40 #include <ws2tcpip.h>
41 #include <mswsock.h>
42 #endif
43 #include <errno.h>
44 #ifdef EVENT__HAVE_SYS_SOCKET_H
45 #include <sys/socket.h>
46 #endif
47 #ifdef EVENT__HAVE_FCNTL_H
48 #include <fcntl.h>
49 #endif
50 #ifdef EVENT__HAVE_UNISTD_H
51 #include <unistd.h>
52 #endif
53 
54 #include "event2/listener.h"
55 #include "event2/util.h"
56 #include "event2/event.h"
57 #include "event2/event_struct.h"
58 #include "mm-internal.h"
59 #include "util-internal.h"
60 #include "log-internal.h"
61 #include "evthread-internal.h"
62 #ifdef _WIN32
63 #include "iocp-internal.h"
64 #include "defer-internal.h"
65 #include "event-internal.h"
66 #endif
67 
68 struct evconnlistener_ops {
69 	int (*enable)(struct evconnlistener *);
70 	int (*disable)(struct evconnlistener *);
71 	void (*destroy)(struct evconnlistener *);
72 	void (*shutdown)(struct evconnlistener *);
73 	evutil_socket_t (*getfd)(struct evconnlistener *);
74 	struct event_base *(*getbase)(struct evconnlistener *);
75 };
76 
77 struct evconnlistener {
78 	const struct evconnlistener_ops *ops;
79 	void *lock;
80 	evconnlistener_cb cb;
81 	evconnlistener_errorcb errorcb;
82 	void *user_data;
83 	unsigned flags;
84 	short refcnt;
85 	int accept4_flags;
86 	unsigned enabled : 1;
87 };
88 
89 struct evconnlistener_event {
90 	struct evconnlistener base;
91 	struct event listener;
92 };
93 
94 #ifdef _WIN32
95 struct evconnlistener_iocp {
96 	struct evconnlistener base;
97 	evutil_socket_t fd;
98 	struct event_base *event_base;
99 	struct event_iocp_port *port;
100 	short n_accepting;
101 	unsigned shutting_down : 1;
102 	unsigned event_added : 1;
103 	struct accepting_socket **accepting;
104 };
105 #endif
106 
107 #define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0)
108 #define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0)
109 
110 struct evconnlistener *
111 evconnlistener_new_async(struct event_base *base,
112     evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
113     evutil_socket_t fd); /* XXXX export this? */
114 
115 static int event_listener_enable(struct evconnlistener *);
116 static int event_listener_disable(struct evconnlistener *);
117 static void event_listener_destroy(struct evconnlistener *);
118 static evutil_socket_t event_listener_getfd(struct evconnlistener *);
119 static struct event_base *event_listener_getbase(struct evconnlistener *);
120 
121 #if 0
122 static void
123 listener_incref_and_lock(struct evconnlistener *listener)
124 {
125 	LOCK(listener);
126 	++listener->refcnt;
127 }
128 #endif
129 
130 static int
listener_decref_and_unlock(struct evconnlistener * listener)131 listener_decref_and_unlock(struct evconnlistener *listener)
132 {
133 	int refcnt = --listener->refcnt;
134 	if (refcnt == 0) {
135 		listener->ops->destroy(listener);
136 		UNLOCK(listener);
137 		EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
138 		mm_free(listener);
139 		return 1;
140 	} else {
141 		UNLOCK(listener);
142 		return 0;
143 	}
144 }
145 
146 static const struct evconnlistener_ops evconnlistener_event_ops = {
147 	event_listener_enable,
148 	event_listener_disable,
149 	event_listener_destroy,
150 	NULL, /* shutdown */
151 	event_listener_getfd,
152 	event_listener_getbase
153 };
154 
155 static void listener_read_cb(evutil_socket_t, short, void *);
156 
157 struct evconnlistener *
evconnlistener_new(struct event_base * base,evconnlistener_cb cb,void * ptr,unsigned flags,int backlog,evutil_socket_t fd)158 evconnlistener_new(struct event_base *base,
159     evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
160     evutil_socket_t fd)
161 {
162 	struct evconnlistener_event *lev;
163 
164 #ifdef _WIN32
165 	if (base && event_base_get_iocp_(base)) {
166 		const struct win32_extension_fns *ext =
167 			event_get_win32_extension_fns_();
168 		if (ext->AcceptEx && ext->GetAcceptExSockaddrs)
169 			return evconnlistener_new_async(base, cb, ptr, flags,
170 				backlog, fd);
171 	}
172 #endif
173 
174 	if (backlog > 0) {
175 		if (listen(fd, backlog) < 0)
176 			return NULL;
177 	} else if (backlog < 0) {
178 		if (listen(fd, 128) < 0)
179 			return NULL;
180 	}
181 
182 	lev = mm_calloc(1, sizeof(struct evconnlistener_event));
183 	if (!lev)
184 		return NULL;
185 
186 	lev->base.ops = &evconnlistener_event_ops;
187 	lev->base.cb = cb;
188 	lev->base.user_data = ptr;
189 	lev->base.flags = flags;
190 	lev->base.refcnt = 1;
191 
192 	lev->base.accept4_flags = 0;
193 	if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
194 		lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK;
195 	if (flags & LEV_OPT_CLOSE_ON_EXEC)
196 		lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC;
197 
198 	if (flags & LEV_OPT_THREADSAFE) {
199 		EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
200 	}
201 
202 	event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST,
203 	    listener_read_cb, lev);
204 
205 	if (!(flags & LEV_OPT_DISABLED))
206 	    evconnlistener_enable(&lev->base);
207 
208 	return &lev->base;
209 }
210 
211 struct evconnlistener *
evconnlistener_new_bind(struct event_base * base,evconnlistener_cb cb,void * ptr,unsigned flags,int backlog,const struct sockaddr * sa,int socklen)212 evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb,
213     void *ptr, unsigned flags, int backlog, const struct sockaddr *sa,
214     int socklen)
215 {
216 	struct evconnlistener *listener;
217 	evutil_socket_t fd;
218 	int on = 1;
219 	int family = sa ? sa->sa_family : AF_UNSPEC;
220 	int socktype = SOCK_STREAM | EVUTIL_SOCK_NONBLOCK;
221 
222 	if (backlog == 0)
223 		return NULL;
224 
225 	if (flags & LEV_OPT_CLOSE_ON_EXEC)
226 		socktype |= EVUTIL_SOCK_CLOEXEC;
227 
228 	fd = evutil_socket_(family, socktype, 0);
229 	if (fd == -1)
230 		return NULL;
231 
232 	if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on))<0)
233 		goto err;
234 
235 	if (flags & LEV_OPT_REUSEABLE) {
236 		if (evutil_make_listen_socket_reuseable(fd) < 0)
237 			goto err;
238 	}
239 
240 	if (flags & LEV_OPT_REUSEABLE_PORT) {
241 		if (evutil_make_listen_socket_reuseable_port(fd) < 0)
242 			goto err;
243 	}
244 
245 	if (flags & LEV_OPT_DEFERRED_ACCEPT) {
246 		if (evutil_make_tcp_listen_socket_deferred(fd) < 0)
247 			goto err;
248 	}
249 
250 	if (sa) {
251 		if (bind(fd, sa, socklen)<0)
252 			goto err;
253 	}
254 
255 	listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd);
256 	if (!listener)
257 		goto err;
258 
259 	return listener;
260 err:
261 	evutil_closesocket(fd);
262 	return NULL;
263 }
264 
265 void
evconnlistener_free(struct evconnlistener * lev)266 evconnlistener_free(struct evconnlistener *lev)
267 {
268 	LOCK(lev);
269 	lev->cb = NULL;
270 	lev->errorcb = NULL;
271 	if (lev->ops->shutdown)
272 		lev->ops->shutdown(lev);
273 	listener_decref_and_unlock(lev);
274 }
275 
276 static void
event_listener_destroy(struct evconnlistener * lev)277 event_listener_destroy(struct evconnlistener *lev)
278 {
279 	struct evconnlistener_event *lev_e =
280 	    EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
281 
282 	event_del(&lev_e->listener);
283 	if (lev->flags & LEV_OPT_CLOSE_ON_FREE)
284 		evutil_closesocket(event_get_fd(&lev_e->listener));
285 	event_debug_unassign(&lev_e->listener);
286 }
287 
288 int
evconnlistener_enable(struct evconnlistener * lev)289 evconnlistener_enable(struct evconnlistener *lev)
290 {
291 	int r;
292 	LOCK(lev);
293 	lev->enabled = 1;
294 	if (lev->cb)
295 		r = lev->ops->enable(lev);
296 	else
297 		r = 0;
298 	UNLOCK(lev);
299 	return r;
300 }
301 
302 int
evconnlistener_disable(struct evconnlistener * lev)303 evconnlistener_disable(struct evconnlistener *lev)
304 {
305 	int r;
306 	LOCK(lev);
307 	lev->enabled = 0;
308 	r = lev->ops->disable(lev);
309 	UNLOCK(lev);
310 	return r;
311 }
312 
313 static int
event_listener_enable(struct evconnlistener * lev)314 event_listener_enable(struct evconnlistener *lev)
315 {
316 	struct evconnlistener_event *lev_e =
317 	    EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
318 	return event_add(&lev_e->listener, NULL);
319 }
320 
321 static int
event_listener_disable(struct evconnlistener * lev)322 event_listener_disable(struct evconnlistener *lev)
323 {
324 	struct evconnlistener_event *lev_e =
325 	    EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
326 	return event_del(&lev_e->listener);
327 }
328 
329 evutil_socket_t
evconnlistener_get_fd(struct evconnlistener * lev)330 evconnlistener_get_fd(struct evconnlistener *lev)
331 {
332 	evutil_socket_t fd;
333 	LOCK(lev);
334 	fd = lev->ops->getfd(lev);
335 	UNLOCK(lev);
336 	return fd;
337 }
338 
339 static evutil_socket_t
event_listener_getfd(struct evconnlistener * lev)340 event_listener_getfd(struct evconnlistener *lev)
341 {
342 	struct evconnlistener_event *lev_e =
343 	    EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
344 	return event_get_fd(&lev_e->listener);
345 }
346 
347 struct event_base *
evconnlistener_get_base(struct evconnlistener * lev)348 evconnlistener_get_base(struct evconnlistener *lev)
349 {
350 	struct event_base *base;
351 	LOCK(lev);
352 	base = lev->ops->getbase(lev);
353 	UNLOCK(lev);
354 	return base;
355 }
356 
357 static struct event_base *
event_listener_getbase(struct evconnlistener * lev)358 event_listener_getbase(struct evconnlistener *lev)
359 {
360 	struct evconnlistener_event *lev_e =
361 	    EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
362 	return event_get_base(&lev_e->listener);
363 }
364 
365 void
evconnlistener_set_cb(struct evconnlistener * lev,evconnlistener_cb cb,void * arg)366 evconnlistener_set_cb(struct evconnlistener *lev,
367     evconnlistener_cb cb, void *arg)
368 {
369 	int enable = 0;
370 	LOCK(lev);
371 	if (lev->enabled && !lev->cb)
372 		enable = 1;
373 	lev->cb = cb;
374 	lev->user_data = arg;
375 	if (enable)
376 		evconnlistener_enable(lev);
377 	UNLOCK(lev);
378 }
379 
380 void
evconnlistener_set_error_cb(struct evconnlistener * lev,evconnlistener_errorcb errorcb)381 evconnlistener_set_error_cb(struct evconnlistener *lev,
382     evconnlistener_errorcb errorcb)
383 {
384 	LOCK(lev);
385 	lev->errorcb = errorcb;
386 	UNLOCK(lev);
387 }
388 
389 static void
listener_read_cb(evutil_socket_t fd,short what,void * p)390 listener_read_cb(evutil_socket_t fd, short what, void *p)
391 {
392 	struct evconnlistener *lev = p;
393 	int err;
394 	evconnlistener_cb cb;
395 	evconnlistener_errorcb errorcb;
396 	void *user_data;
397 	LOCK(lev);
398 	while (1) {
399 		struct sockaddr_storage ss;
400 		ev_socklen_t socklen = sizeof(ss);
401 		evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags);
402 		if (new_fd < 0)
403 			break;
404 		if (socklen == 0) {
405 			/* This can happen with some older linux kernels in
406 			 * response to nmap. */
407 			evutil_closesocket(new_fd);
408 			continue;
409 		}
410 
411 		if (lev->cb == NULL) {
412 			evutil_closesocket(new_fd);
413 			UNLOCK(lev);
414 			return;
415 		}
416 		++lev->refcnt;
417 		cb = lev->cb;
418 		user_data = lev->user_data;
419 		UNLOCK(lev);
420 		cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen,
421 		    user_data);
422 		LOCK(lev);
423 		if (lev->refcnt == 1) {
424 			int freed = listener_decref_and_unlock(lev);
425 			EVUTIL_ASSERT(freed);
426 
427 			evutil_closesocket(new_fd);
428 			return;
429 		}
430 		--lev->refcnt;
431 	}
432 	err = evutil_socket_geterror(fd);
433 	if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) {
434 		UNLOCK(lev);
435 		return;
436 	}
437 	if (lev->errorcb != NULL) {
438 		++lev->refcnt;
439 		errorcb = lev->errorcb;
440 		user_data = lev->user_data;
441 		UNLOCK(lev);
442 		errorcb(lev, user_data);
443 		LOCK(lev);
444 		listener_decref_and_unlock(lev);
445 	} else {
446 		event_sock_warn(fd, "Error from accept() call");
447 	}
448 }
449 
450 #ifdef _WIN32
451 struct accepting_socket {
452 	CRITICAL_SECTION lock;
453 	struct event_overlapped overlapped;
454 	SOCKET s;
455 	int error;
456 	struct event_callback deferred;
457 	struct evconnlistener_iocp *lev;
458 	ev_uint8_t buflen;
459 	ev_uint8_t family;
460 	unsigned free_on_cb:1;
461 	char addrbuf[1];
462 };
463 
464 static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key,
465     ev_ssize_t n, int ok);
466 static void accepted_socket_invoke_user_cb(struct event_callback *cb, void *arg);
467 
468 static void
iocp_listener_event_add(struct evconnlistener_iocp * lev)469 iocp_listener_event_add(struct evconnlistener_iocp *lev)
470 {
471 	if (lev->event_added)
472 		return;
473 
474 	lev->event_added = 1;
475 	event_base_add_virtual_(lev->event_base);
476 }
477 
478 static void
iocp_listener_event_del(struct evconnlistener_iocp * lev)479 iocp_listener_event_del(struct evconnlistener_iocp *lev)
480 {
481 	if (!lev->event_added)
482 		return;
483 
484 	lev->event_added = 0;
485 	event_base_del_virtual_(lev->event_base);
486 }
487 
488 static struct accepting_socket *
new_accepting_socket(struct evconnlistener_iocp * lev,int family)489 new_accepting_socket(struct evconnlistener_iocp *lev, int family)
490 {
491 	struct accepting_socket *res;
492 	int addrlen;
493 	int buflen;
494 
495 	if (family == AF_INET)
496 		addrlen = sizeof(struct sockaddr_in);
497 	else if (family == AF_INET6)
498 		addrlen = sizeof(struct sockaddr_in6);
499 	else
500 		return NULL;
501 	buflen = (addrlen+16)*2;
502 
503 	res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen);
504 	if (!res)
505 		return NULL;
506 
507 	event_overlapped_init_(&res->overlapped, accepted_socket_cb);
508 	res->s = INVALID_SOCKET;
509 	res->lev = lev;
510 	res->buflen = buflen;
511 	res->family = family;
512 
513 	event_deferred_cb_init_(&res->deferred,
514 	    event_base_get_npriorities(lev->event_base) / 2,
515 	    accepted_socket_invoke_user_cb, res);
516 
517 	InitializeCriticalSectionAndSpinCount(&res->lock, 1000);
518 
519 	return res;
520 }
521 
522 static void
free_and_unlock_accepting_socket(struct accepting_socket * as)523 free_and_unlock_accepting_socket(struct accepting_socket *as)
524 {
525 	/* requires lock. */
526 	if (as->s != INVALID_SOCKET)
527 		closesocket(as->s);
528 
529 	LeaveCriticalSection(&as->lock);
530 	DeleteCriticalSection(&as->lock);
531 	mm_free(as);
532 }
533 
534 static int
start_accepting(struct accepting_socket * as)535 start_accepting(struct accepting_socket *as)
536 {
537 	/* requires lock */
538 	const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
539 	DWORD pending = 0;
540 	SOCKET s = socket(as->family, SOCK_STREAM, 0);
541 	int error = 0;
542 
543 	if (!as->lev->base.enabled)
544 		return 0;
545 
546 	if (s == INVALID_SOCKET) {
547 		error = WSAGetLastError();
548 		goto report_err;
549 	}
550 
551 	/* XXXX It turns out we need to do this again later.  Does this call
552 	 * have any effect? */
553 	setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
554 	    (char *)&as->lev->fd, sizeof(&as->lev->fd));
555 
556 	if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
557 		evutil_make_socket_nonblocking(s);
558 
559 	if (event_iocp_port_associate_(as->lev->port, s, 1) < 0) {
560 		closesocket(s);
561 		return -1;
562 	}
563 
564 	as->s = s;
565 
566 	if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0,
567 		as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped))
568 	{
569 		/* Immediate success! */
570 		accepted_socket_cb(&as->overlapped, 1, 0, 1);
571 	} else {
572 		error = WSAGetLastError();
573 		if (error != ERROR_IO_PENDING) {
574 			goto report_err;
575 		}
576 	}
577 
578 	return 0;
579 
580 report_err:
581 	as->error = error;
582 	event_deferred_cb_schedule_(
583 		as->lev->event_base,
584 		&as->deferred);
585 	return 0;
586 }
587 
588 static void
stop_accepting(struct accepting_socket * as)589 stop_accepting(struct accepting_socket *as)
590 {
591 	/* requires lock. */
592 	SOCKET s = as->s;
593 	as->s = INVALID_SOCKET;
594 	closesocket(s);
595 }
596 
597 static void
accepted_socket_invoke_user_cb(struct event_callback * dcb,void * arg)598 accepted_socket_invoke_user_cb(struct event_callback *dcb, void *arg)
599 {
600 	struct accepting_socket *as = arg;
601 
602 	struct sockaddr *sa_local=NULL, *sa_remote=NULL;
603 	int socklen_local=0, socklen_remote=0;
604 	const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
605 	struct evconnlistener *lev = &as->lev->base;
606 	evutil_socket_t sock=-1;
607 	void *data;
608 	evconnlistener_cb cb=NULL;
609 	evconnlistener_errorcb errorcb=NULL;
610 	int error;
611 
612 	EVUTIL_ASSERT(ext->GetAcceptExSockaddrs);
613 
614 	LOCK(lev);
615 	EnterCriticalSection(&as->lock);
616 	if (as->free_on_cb) {
617 		free_and_unlock_accepting_socket(as);
618 		listener_decref_and_unlock(lev);
619 		return;
620 	}
621 
622 	++lev->refcnt;
623 
624 	error = as->error;
625 	if (error) {
626 		as->error = 0;
627 		errorcb = lev->errorcb;
628 	} else {
629 		ext->GetAcceptExSockaddrs(
630 			as->addrbuf, 0, as->buflen/2, as->buflen/2,
631 			&sa_local, &socklen_local, &sa_remote,
632 			&socklen_remote);
633 		sock = as->s;
634 		cb = lev->cb;
635 		as->s = INVALID_SOCKET;
636 
637 		/* We need to call this so getsockname, getpeername, and
638 		 * shutdown work correctly on the accepted socket. */
639 		/* XXXX handle error? */
640 		setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
641 		    (char *)&as->lev->fd, sizeof(&as->lev->fd));
642 	}
643 	data = lev->user_data;
644 
645 	LeaveCriticalSection(&as->lock);
646 	UNLOCK(lev);
647 
648 	if (errorcb) {
649 		WSASetLastError(error);
650 		errorcb(lev, data);
651 	} else if (cb) {
652 		cb(lev, sock, sa_remote, socklen_remote, data);
653 	}
654 
655 	LOCK(lev);
656 	if (listener_decref_and_unlock(lev))
657 		return;
658 
659 	EnterCriticalSection(&as->lock);
660 	start_accepting(as);
661 	LeaveCriticalSection(&as->lock);
662 }
663 
664 static void
accepted_socket_cb(struct event_overlapped * o,ev_uintptr_t key,ev_ssize_t n,int ok)665 accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok)
666 {
667 	struct accepting_socket *as =
668 	    EVUTIL_UPCAST(o, struct accepting_socket, overlapped);
669 
670 	LOCK(&as->lev->base);
671 	EnterCriticalSection(&as->lock);
672 	if (ok) {
673 		/* XXXX Don't do this if some EV_MT flag is set. */
674 		event_deferred_cb_schedule_(
675 			as->lev->event_base,
676 			&as->deferred);
677 		LeaveCriticalSection(&as->lock);
678 	} else if (as->free_on_cb) {
679 		struct evconnlistener *lev = &as->lev->base;
680 		free_and_unlock_accepting_socket(as);
681 		listener_decref_and_unlock(lev);
682 		return;
683 	} else if (as->s == INVALID_SOCKET) {
684 		/* This is okay; we were disabled by iocp_listener_disable. */
685 		LeaveCriticalSection(&as->lock);
686 	} else {
687 		/* Some error on accept that we couldn't actually handle. */
688 		BOOL ok;
689 		DWORD transfer = 0, flags=0;
690 		event_sock_warn(as->s, "Unexpected error on AcceptEx");
691 		ok = WSAGetOverlappedResult(as->s, &o->overlapped,
692 		    &transfer, FALSE, &flags);
693 		if (ok) {
694 			/* well, that was confusing! */
695 			as->error = 1;
696 		} else {
697 			as->error = WSAGetLastError();
698 		}
699 		event_deferred_cb_schedule_(
700 			as->lev->event_base,
701 			&as->deferred);
702 		LeaveCriticalSection(&as->lock);
703 	}
704 	UNLOCK(&as->lev->base);
705 }
706 
707 static int
iocp_listener_enable(struct evconnlistener * lev)708 iocp_listener_enable(struct evconnlistener *lev)
709 {
710 	int i;
711 	struct evconnlistener_iocp *lev_iocp =
712 	    EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
713 
714 	LOCK(lev);
715 	iocp_listener_event_add(lev_iocp);
716 	for (i = 0; i < lev_iocp->n_accepting; ++i) {
717 		struct accepting_socket *as = lev_iocp->accepting[i];
718 		if (!as)
719 			continue;
720 		EnterCriticalSection(&as->lock);
721 		if (!as->free_on_cb && as->s == INVALID_SOCKET)
722 			start_accepting(as);
723 		LeaveCriticalSection(&as->lock);
724 	}
725 	UNLOCK(lev);
726 	return 0;
727 }
728 
729 static int
iocp_listener_disable_impl(struct evconnlistener * lev,int shutdown)730 iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown)
731 {
732 	int i;
733 	struct evconnlistener_iocp *lev_iocp =
734 	    EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
735 
736 	LOCK(lev);
737 	iocp_listener_event_del(lev_iocp);
738 	for (i = 0; i < lev_iocp->n_accepting; ++i) {
739 		struct accepting_socket *as = lev_iocp->accepting[i];
740 		if (!as)
741 			continue;
742 		EnterCriticalSection(&as->lock);
743 		if (!as->free_on_cb && as->s != INVALID_SOCKET) {
744 			if (shutdown)
745 				as->free_on_cb = 1;
746 			stop_accepting(as);
747 		}
748 		LeaveCriticalSection(&as->lock);
749 	}
750 
751 	if (shutdown && lev->flags & LEV_OPT_CLOSE_ON_FREE)
752 		evutil_closesocket(lev_iocp->fd);
753 
754 	UNLOCK(lev);
755 	return 0;
756 }
757 
758 static int
iocp_listener_disable(struct evconnlistener * lev)759 iocp_listener_disable(struct evconnlistener *lev)
760 {
761 	return iocp_listener_disable_impl(lev,0);
762 }
763 
764 static void
iocp_listener_destroy(struct evconnlistener * lev)765 iocp_listener_destroy(struct evconnlistener *lev)
766 {
767 	struct evconnlistener_iocp *lev_iocp =
768 	    EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
769 
770 	if (! lev_iocp->shutting_down) {
771 		lev_iocp->shutting_down = 1;
772 		iocp_listener_disable_impl(lev,1);
773 	}
774 
775 }
776 
777 static evutil_socket_t
iocp_listener_getfd(struct evconnlistener * lev)778 iocp_listener_getfd(struct evconnlistener *lev)
779 {
780 	struct evconnlistener_iocp *lev_iocp =
781 	    EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
782 	return lev_iocp->fd;
783 }
784 static struct event_base *
iocp_listener_getbase(struct evconnlistener * lev)785 iocp_listener_getbase(struct evconnlistener *lev)
786 {
787 	struct evconnlistener_iocp *lev_iocp =
788 	    EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
789 	return lev_iocp->event_base;
790 }
791 
792 static const struct evconnlistener_ops evconnlistener_iocp_ops = {
793 	iocp_listener_enable,
794 	iocp_listener_disable,
795 	iocp_listener_destroy,
796 	iocp_listener_destroy, /* shutdown */
797 	iocp_listener_getfd,
798 	iocp_listener_getbase
799 };
800 
801 /* XXX define some way to override this. */
802 #define N_SOCKETS_PER_LISTENER 4
803 
804 struct evconnlistener *
evconnlistener_new_async(struct event_base * base,evconnlistener_cb cb,void * ptr,unsigned flags,int backlog,evutil_socket_t fd)805 evconnlistener_new_async(struct event_base *base,
806     evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
807     evutil_socket_t fd)
808 {
809 	struct sockaddr_storage ss;
810 	int socklen = sizeof(ss);
811 	struct evconnlistener_iocp *lev;
812 	int i;
813 
814 	flags |= LEV_OPT_THREADSAFE;
815 
816 	if (!base || !event_base_get_iocp_(base))
817 		goto err;
818 
819 	/* XXXX duplicate code */
820 	if (backlog > 0) {
821 		if (listen(fd, backlog) < 0)
822 			goto err;
823 	} else if (backlog < 0) {
824 		if (listen(fd, 128) < 0)
825 			goto err;
826 	}
827 	if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) {
828 		event_sock_warn(fd, "getsockname");
829 		goto err;
830 	}
831 	lev = mm_calloc(1, sizeof(struct evconnlistener_iocp));
832 	if (!lev) {
833 		event_warn("calloc");
834 		goto err;
835 	}
836 	lev->base.ops = &evconnlistener_iocp_ops;
837 	lev->base.cb = cb;
838 	lev->base.user_data = ptr;
839 	lev->base.flags = flags;
840 	lev->base.refcnt = 1;
841 	lev->base.enabled = 1;
842 
843 	lev->port = event_base_get_iocp_(base);
844 	lev->fd = fd;
845 	lev->event_base = base;
846 
847 
848 	if (event_iocp_port_associate_(lev->port, fd, 1) < 0)
849 		goto err_free_lev;
850 
851 	EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
852 
853 	lev->n_accepting = N_SOCKETS_PER_LISTENER;
854 	lev->accepting = mm_calloc(lev->n_accepting,
855 	    sizeof(struct accepting_socket *));
856 	if (!lev->accepting) {
857 		event_warn("calloc");
858 		goto err_delete_lock;
859 	}
860 	for (i = 0; i < lev->n_accepting; ++i) {
861 		lev->accepting[i] = new_accepting_socket(lev, ss.ss_family);
862 		if (!lev->accepting[i]) {
863 			event_warnx("Couldn't create accepting socket");
864 			goto err_free_accepting;
865 		}
866 		if (cb && start_accepting(lev->accepting[i]) < 0) {
867 			event_warnx("Couldn't start accepting on socket");
868 			EnterCriticalSection(&lev->accepting[i]->lock);
869 			free_and_unlock_accepting_socket(lev->accepting[i]);
870 			goto err_free_accepting;
871 		}
872 		++lev->base.refcnt;
873 	}
874 
875 	iocp_listener_event_add(lev);
876 
877 	return &lev->base;
878 
879 err_free_accepting:
880 	mm_free(lev->accepting);
881 	/* XXXX free the other elements. */
882 err_delete_lock:
883 	EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
884 err_free_lev:
885 	mm_free(lev);
886 err:
887 	/* Don't close the fd, it is caller's responsibility. */
888 	return NULL;
889 }
890 
891 #endif
892