xref: /minix/external/bsd/dhcpcd/dist/eloop.c (revision 9f20bfa6)
1 #include <sys/cdefs.h>
2  __RCSID("$NetBSD: eloop.c,v 1.11 2015/05/16 23:31:32 roy Exp $");
3 
4 /*
5  * dhcpcd - DHCP client daemon
6  * Copyright (c) 2006-2015 Roy Marples <roy@marples.name>
7  * All rights reserved
8 
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/time.h>
32 
33 #include <assert.h>
34 #include <errno.h>
35 #include <limits.h>
36 #include <signal.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <unistd.h>
40 
41 /* config.h should define HAVE_KQUEUE, HAVE_EPOLL, etc */
42 #include "config.h"
43 #include "eloop.h"
44 
45 #ifndef UNUSED
46 #define UNUSED(a) (void)((a))
47 #endif
48 #ifndef __unused
49 #ifdef __GNUC__
50 #define __unused   __attribute__((__unused__))
51 #else
52 #define __unused
53 #endif
54 #endif
55 
56 #ifndef MSEC_PER_SEC
57 #define MSEC_PER_SEC	1000L
58 #define NSEC_PER_MSEC	1000000L
59 #endif
60 
61 #if defined(HAVE_KQUEUE)
62 #include <sys/event.h>
63 #include <fcntl.h>
64 #ifdef __NetBSD__
65 /* udata is void * except on NetBSD
66  * lengths are int except on NetBSD */
67 #define UPTR(x)	((intptr_t)(x))
68 #define LENC(x)	(x)
69 #else
70 #define UPTR(x)	(x)
71 #define LENC(x)	((int)(x))
72 #endif
73 #define eloop_event_setup_fds(eloop)
74 #elif defined(HAVE_EPOLL)
75 #include <sys/epoll.h>
76 #define eloop_event_setup_fds(eloop)
77 #else
78 #include <poll.h>
79 static void
eloop_event_setup_fds(struct eloop * eloop)80 eloop_event_setup_fds(struct eloop *eloop)
81 {
82 	struct eloop_event *e;
83 	size_t i;
84 
85 	i = 0;
86 	TAILQ_FOREACH(e, &eloop->events, next) {
87 		eloop->fds[i].fd = e->fd;
88 		eloop->fds[i].events = 0;
89 		if (e->read_cb)
90 			eloop->fds[i].events |= POLLIN;
91 		if (e->write_cb)
92 			eloop->fds[i].events |= POLLOUT;
93 		eloop->fds[i].revents = 0;
94 		e->pollfd = &eloop->fds[i];
95 		i++;
96 	}
97 }
98 
99 #ifndef pollts
100 /* Wrapper around pselect, to imitate the NetBSD pollts call. */
101 #if !defined(__minix)
102 static int
103 #else /* defined(__minix) */
104 int
105 #endif /* defined(__minix) */
pollts(struct pollfd * fds,nfds_t nfds,const struct timespec * ts,const sigset_t * sigmask)106 pollts(struct pollfd * fds, nfds_t nfds,
107     const struct timespec *ts, const sigset_t *sigmask)
108 {
109 	fd_set read_fds;
110 	nfds_t n;
111 	int maxfd, r;
112 #if defined(__minix)
113 	sigset_t omask;
114 	struct timeval tv, *tvp;
115 #endif /* defined(__minix) */
116 
117 	FD_ZERO(&read_fds);
118 	maxfd = 0;
119 	for (n = 0; n < nfds; n++) {
120 		if (fds[n].events & POLLIN) {
121 			FD_SET(fds[n].fd, &read_fds);
122 			if (fds[n].fd > maxfd)
123 				maxfd = fds[n].fd;
124 		}
125 	}
126 
127 #if !defined(__minix)
128 	r = pselect(maxfd + 1, &read_fds, NULL, NULL, ts, sigmask);
129 #else /* defined(__minix) */
130 	/* XXX FIXME - horrible hack with race condition */
131 	sigprocmask(SIG_SETMASK, sigmask, &omask);
132 	if (ts != NULL) {
133 		tv.tv_sec = ts->tv_sec;
134 		tv.tv_usec = ts->tv_nsec / 1000;
135 		tvp = &tv;
136 	} else
137 		tvp = NULL;
138 	r = select(maxfd + 1, &read_fds, NULL, NULL, tvp);
139 	sigprocmask(SIG_SETMASK, &omask, NULL);
140 #endif /* defined(__minix) */
141 	if (r > 0) {
142 		for (n = 0; n < nfds; n++) {
143 			fds[n].revents =
144 			    FD_ISSET(fds[n].fd, &read_fds) ? POLLIN : 0;
145 		}
146 	}
147 
148 	return r;
149 }
150 #endif
151 #endif
152 
153 int
eloop_event_add(struct eloop * eloop,int fd,void (* read_cb)(void *),void * read_cb_arg,void (* write_cb)(void *),void * write_cb_arg)154 eloop_event_add(struct eloop *eloop, int fd,
155     void (*read_cb)(void *), void *read_cb_arg,
156     void (*write_cb)(void *), void *write_cb_arg)
157 {
158 	struct eloop_event *e;
159 #if defined(HAVE_KQUEUE)
160 	struct kevent ke[2];
161 #elif defined(HAVE_EPOLL)
162 	struct epoll_event epe;
163 #else
164 	struct pollfd *nfds;
165 #endif
166 
167 	assert(eloop != NULL);
168 	assert(read_cb != NULL || write_cb != NULL);
169 	if (fd == -1) {
170 		errno = EINVAL;
171 		return -1;
172 	}
173 
174 #ifdef HAVE_EPOLL
175 	memset(&epe, 0, sizeof(epe));
176 	epe.data.fd = fd;
177 	epe.events = EPOLLIN;
178 	if (write_cb)
179 		epe.events |= EPOLLOUT;
180 #endif
181 
182 	/* We should only have one callback monitoring the fd */
183 	TAILQ_FOREACH(e, &eloop->events, next) {
184 		if (e->fd == fd) {
185 			int error;
186 
187 #if defined(HAVE_KQUEUE)
188 			EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ, EV_ADD,
189 			    0, 0, UPTR(e));
190 			if (write_cb)
191 				EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
192 				    EV_ADD, 0, 0, UPTR(e));
193 			else if (e->write_cb)
194 				EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
195 				    EV_DELETE, 0, 0, UPTR(e));
196 			error = kevent(eloop->poll_fd, ke,
197 			    e->write_cb || write_cb ? 2 : 1, NULL, 0, NULL);
198 #elif defined(HAVE_EPOLL)
199 			epe.data.ptr = e;
200 			error = epoll_ctl(eloop->poll_fd, EPOLL_CTL_MOD,
201 			    fd, &epe);
202 #else
203 			error = 0;
204 #endif
205 			if (read_cb) {
206 				e->read_cb = read_cb;
207 				e->read_cb_arg = read_cb_arg;
208 			}
209 			if (write_cb) {
210 				e->write_cb = write_cb;
211 				e->write_cb_arg = write_cb_arg;
212 			}
213 			eloop_event_setup_fds(eloop);
214 			return error;
215 		}
216 	}
217 
218 	/* Allocate a new event if no free ones already allocated */
219 	if ((e = TAILQ_FIRST(&eloop->free_events))) {
220 		TAILQ_REMOVE(&eloop->free_events, e, next);
221 	} else {
222 		e = malloc(sizeof(*e));
223 		if (e == NULL)
224 			goto err;
225 	}
226 
227 	/* Ensure we can actually listen to it */
228 	eloop->events_len++;
229 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL)
230 	if (eloop->events_len > eloop->fds_len) {
231 		nfds = realloc(eloop->fds,
232 		    sizeof(*eloop->fds) * (eloop->fds_len + 5));
233 		if (nfds == NULL)
234 			goto err;
235 		eloop->fds_len += 5;
236 		eloop->fds = nfds;
237 	}
238 #endif
239 
240 	/* Now populate the structure and add it to the list */
241 	e->fd = fd;
242 	e->read_cb = read_cb;
243 	e->read_cb_arg = read_cb_arg;
244 	e->write_cb = write_cb;
245 	e->write_cb_arg = write_cb_arg;
246 
247 #if defined(HAVE_KQUEUE)
248 	if (read_cb != NULL)
249 		EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
250 		    EV_ADD, 0, 0, UPTR(e));
251 	if (write_cb != NULL)
252 		EV_SET(&ke[1], (uintptr_t)fd, EVFILT_WRITE,
253 		    EV_ADD, 0, 0, UPTR(e));
254 	if (kevent(eloop->poll_fd, ke, write_cb ? 2 : 1, NULL, 0, NULL) == -1)
255 		goto err;
256 #elif defined(HAVE_EPOLL)
257 	epe.data.ptr = e;
258 	if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, fd, &epe) == -1)
259 		goto err;
260 #endif
261 
262 	/* The order of events should not matter.
263 	 * However, some PPP servers love to close the link right after
264 	 * sending their final message. So to ensure dhcpcd processes this
265 	 * message (which is likely to be that the DHCP addresses are wrong)
266 	 * we insert new events at the queue head as the link fd will be
267 	 * the first event added. */
268 	TAILQ_INSERT_HEAD(&eloop->events, e, next);
269 	eloop_event_setup_fds(eloop);
270 	return 0;
271 
272 err:
273 	if (e) {
274 		eloop->events_len--;
275 		TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
276 	}
277 	return -1;
278 }
279 
280 void
eloop_event_delete_write(struct eloop * eloop,int fd,int write_only)281 eloop_event_delete_write(struct eloop *eloop, int fd, int write_only)
282 {
283 	struct eloop_event *e;
284 #if defined(HAVE_KQUEUE)
285 	struct kevent ke[2];
286 #elif defined(HAVE_EPOLL)
287 	struct epoll_event epe;
288 #endif
289 
290 	assert(eloop != NULL);
291 
292 	TAILQ_FOREACH(e, &eloop->events, next) {
293 		if (e->fd == fd) {
294 			if (write_only && e->read_cb != NULL) {
295 				if (e->write_cb != NULL) {
296 					e->write_cb = NULL;
297 					e->write_cb_arg = NULL;
298 #if defined(HAVE_KQUEUE)
299 					EV_SET(&ke[0], (uintptr_t)fd,
300 					    EVFILT_WRITE, EV_DELETE,
301 					    0, 0, UPTR(NULL));
302 					kevent(eloop->poll_fd, ke, 1, NULL, 0,
303 					    NULL);
304 #elif defined(HAVE_EPOLL)
305 					memset(&epe, 0, sizeof(epe));
306 					epe.data.fd = e->fd;
307 					epe.data.ptr = e;
308 					epe.events = EPOLLIN;
309 					epoll_ctl(eloop->poll_fd,
310 					    EPOLL_CTL_MOD, fd, &epe);
311 #endif
312 				}
313 			} else {
314 				TAILQ_REMOVE(&eloop->events, e, next);
315 #if defined(HAVE_KQUEUE)
316 				EV_SET(&ke[0], (uintptr_t)fd, EVFILT_READ,
317 				    EV_DELETE, 0, 0, UPTR(NULL));
318 				if (e->write_cb)
319 					EV_SET(&ke[1], (uintptr_t)fd,
320 					    EVFILT_WRITE, EV_DELETE,
321 					    0, 0, UPTR(NULL));
322 				kevent(eloop->poll_fd, ke, e->write_cb ? 2 : 1,
323 				    NULL, 0, NULL);
324 #elif defined(HAVE_EPOLL)
325 				/* NULL event is safe because we
326 				 * rely on epoll_pwait which as added
327 				 * after the delete without event was fixed. */
328 				epoll_ctl(eloop->poll_fd, EPOLL_CTL_DEL,
329 				    fd, NULL);
330 #endif
331 				TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
332 				eloop->events_len--;
333 			}
334 			eloop_event_setup_fds(eloop);
335 			break;
336 		}
337 	}
338 }
339 
340 int
eloop_q_timeout_add_tv(struct eloop * eloop,int queue,const struct timespec * when,void (* callback)(void *),void * arg)341 eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
342     const struct timespec *when, void (*callback)(void *), void *arg)
343 {
344 	struct timespec now, w;
345 	struct eloop_timeout *t, *tt = NULL;
346 
347 	assert(eloop != NULL);
348 	assert(when != NULL);
349 	assert(callback != NULL);
350 
351 	clock_gettime(CLOCK_MONOTONIC, &now);
352 	timespecadd(&now, when, &w);
353 	/* Check for time_t overflow. */
354 	if (timespeccmp(&w, &now, <)) {
355 		errno = ERANGE;
356 		return -1;
357 	}
358 
359 	/* Remove existing timeout if present */
360 	TAILQ_FOREACH(t, &eloop->timeouts, next) {
361 		if (t->callback == callback && t->arg == arg) {
362 			TAILQ_REMOVE(&eloop->timeouts, t, next);
363 			break;
364 		}
365 	}
366 
367 	if (t == NULL) {
368 		/* No existing, so allocate or grab one from the free pool */
369 		if ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
370 			TAILQ_REMOVE(&eloop->free_timeouts, t, next);
371 		} else {
372 			if ((t = malloc(sizeof(*t))) == NULL)
373 				return -1;
374 		}
375 	}
376 
377 	t->when = w;
378 	t->callback = callback;
379 	t->arg = arg;
380 	t->queue = queue;
381 
382 	/* The timeout list should be in chronological order,
383 	 * soonest first. */
384 	TAILQ_FOREACH(tt, &eloop->timeouts, next) {
385 		if (timespeccmp(&t->when, &tt->when, <)) {
386 			TAILQ_INSERT_BEFORE(tt, t, next);
387 			return 0;
388 		}
389 	}
390 	TAILQ_INSERT_TAIL(&eloop->timeouts, t, next);
391 	return 0;
392 }
393 
394 int
eloop_q_timeout_add_sec(struct eloop * eloop,int queue,time_t when,void (* callback)(void *),void * arg)395 eloop_q_timeout_add_sec(struct eloop *eloop, int queue, time_t when,
396     void (*callback)(void *), void *arg)
397 {
398 	struct timespec tv;
399 
400 	tv.tv_sec = when;
401 	tv.tv_nsec = 0;
402 	return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
403 }
404 
405 int
eloop_q_timeout_add_msec(struct eloop * eloop,int queue,long when,void (* callback)(void *),void * arg)406 eloop_q_timeout_add_msec(struct eloop *eloop, int queue, long when,
407     void (*callback)(void *), void *arg)
408 {
409 	struct timespec tv;
410 
411 	tv.tv_sec = when / MSEC_PER_SEC;
412 	tv.tv_nsec = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
413 	return eloop_q_timeout_add_tv(eloop, queue, &tv, callback, arg);
414 }
415 
416 #if !defined(HAVE_KQUEUE)
417 static int
eloop_timeout_add_now(struct eloop * eloop,void (* callback)(void *),void * arg)418 eloop_timeout_add_now(struct eloop *eloop,
419     void (*callback)(void *), void *arg)
420 {
421 
422 	assert(eloop->timeout0 == NULL);
423 	eloop->timeout0 = callback;
424 	eloop->timeout0_arg = arg;
425 	return 0;
426 }
427 #endif
428 
429 void
eloop_q_timeout_delete(struct eloop * eloop,int queue,void (* callback)(void *),void * arg)430 eloop_q_timeout_delete(struct eloop *eloop, int queue,
431     void (*callback)(void *), void *arg)
432 {
433 	struct eloop_timeout *t, *tt;
434 
435 	assert(eloop != NULL);
436 
437 	TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) {
438 		if ((queue == 0 || t->queue == queue) &&
439 		    t->arg == arg &&
440 		    (!callback || t->callback == callback))
441 		{
442 			TAILQ_REMOVE(&eloop->timeouts, t, next);
443 			TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
444 		}
445 	}
446 }
447 
448 void
eloop_exit(struct eloop * eloop,int code)449 eloop_exit(struct eloop *eloop, int code)
450 {
451 
452 	assert(eloop != NULL);
453 
454 	eloop->exitcode = code;
455 	eloop->exitnow = 1;
456 }
457 
458 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
459 static int
eloop_open(struct eloop * eloop)460 eloop_open(struct eloop *eloop)
461 {
462 
463 #if defined(HAVE_KQUEUE1)
464 	return (eloop->poll_fd = kqueue1(O_CLOEXEC));
465 #elif defined(HAVE_KQUEUE)
466 	int i;
467 
468 	if ((eloop->poll_fd = kqueue()) == -1)
469 		return -1;
470 	if ((i = fcntl(eloop->poll_fd, F_GETFD, 0)) == -1 ||
471 	    fcntl(eloop->poll_fd, F_SETFD, i | FD_CLOEXEC) == -1)
472 	{
473 		close(eloop->poll_fd);
474 		eloop->poll_fd = -1;
475 		return -1;
476 	}
477 
478 	return eloop->poll_fd;
479 #elif defined (HAVE_EPOLL)
480 	return (eloop->poll_fd = epoll_create1(EPOLL_CLOEXEC));
481 #endif
482 }
483 
484 int
eloop_requeue(struct eloop * eloop)485 eloop_requeue(struct eloop *eloop)
486 {
487 	struct eloop_event *e;
488 	int error;
489 #if defined(HAVE_KQUEUE)
490 	size_t i;
491 	struct kevent *ke;
492 #elif defined(HAVE_EPOLL)
493 	struct epoll_event epe;
494 #endif
495 
496 	assert(eloop != NULL);
497 
498 	if (eloop->poll_fd != -1)
499 		close(eloop->poll_fd);
500 	if (eloop_open(eloop) == -1)
501 		return -1;
502 #if defined (HAVE_KQUEUE)
503 	i = eloop->signals_len;
504 	TAILQ_FOREACH(e, &eloop->events, next) {
505 		i++;
506 		if (e->write_cb)
507 			i++;
508 	}
509 
510 	if ((ke = malloc(sizeof(*ke) * i)) == NULL)
511 		return -1;
512 
513 	for (i = 0; i < eloop->signals_len; i++)
514 		EV_SET(&ke[i], (uintptr_t)eloop->signals[i],
515 		    EVFILT_SIGNAL, EV_ADD, 0, 0, UPTR(NULL));
516 
517 	TAILQ_FOREACH(e, &eloop->events, next) {
518 		EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_READ,
519 		    EV_ADD, 0, 0, UPTR(e));
520 		i++;
521 		if (e->write_cb) {
522 			EV_SET(&ke[i], (uintptr_t)e->fd, EVFILT_WRITE,
523 			    EV_ADD, 0, 0, UPTR(e));
524 			i++;
525 		}
526 	}
527 
528 	error =  kevent(eloop->poll_fd, ke, LENC(i), NULL, 0, NULL);
529 	free(ke);
530 
531 #elif defined(HAVE_EPOLL)
532 
533 	error = 0;
534 	TAILQ_FOREACH(e, &eloop->events, next) {
535 		memset(&epe, 0, sizeof(epe));
536 		epe.data.fd = e->fd;
537 		epe.events = EPOLLIN;
538 		if (e->write_cb)
539 			epe.events |= EPOLLOUT;
540 		epe.data.ptr = e;
541 		if (epoll_ctl(eloop->poll_fd, EPOLL_CTL_ADD, e->fd, &epe) == -1)
542 			error = -1;
543 	}
544 #endif
545 
546 	return error;
547 }
548 #endif
549 
550 int
eloop_signal_set_cb(struct eloop * eloop,const int * signals,size_t signals_len,void (* signal_cb)(int,void *),void * signal_cb_ctx)551 eloop_signal_set_cb(struct eloop *eloop,
552     const int *signals, size_t signals_len,
553     void (*signal_cb)(int, void *), void *signal_cb_ctx)
554 {
555 
556 	assert(eloop != NULL);
557 
558 	eloop->signals = signals;
559 	eloop->signals_len = signals_len;
560 	eloop->signal_cb = signal_cb;
561 	eloop->signal_cb_ctx = signal_cb_ctx;
562 	return eloop_requeue(eloop);
563 }
564 
565 #ifndef HAVE_KQUEUE
566 struct eloop_siginfo {
567 	int sig;
568 	struct eloop *eloop;
569 };
570 static struct eloop_siginfo _eloop_siginfo;
571 static struct eloop *_eloop;
572 
573 static void
eloop_signal1(void * arg)574 eloop_signal1(void *arg)
575 {
576 	struct eloop_siginfo *si = arg;
577 
578 	si->eloop->signal_cb(si->sig, si->eloop->signal_cb_ctx);
579 }
580 
581 static void
582 #if !defined(__minix)
eloop_signal3(int sig,__unused siginfo_t * siginfo,__unused void * arg)583 eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg)
584 #else /* defined(__minix) */
585 eloop_signal3(int sig)
586 #endif /* defined(__minix) */
587 {
588 
589 	/* So that we can operate safely under a signal we instruct
590 	 * eloop to pass a copy of the siginfo structure to handle_signal1
591 	 * as the very first thing to do. */
592 	_eloop_siginfo.eloop = _eloop;
593 	_eloop_siginfo.sig = sig;
594 	eloop_timeout_add_now(_eloop_siginfo.eloop,
595 	    eloop_signal1, &_eloop_siginfo);
596 }
597 #endif
598 
599 int
eloop_signal_mask(struct eloop * eloop,sigset_t * oldset)600 eloop_signal_mask(struct eloop *eloop, sigset_t *oldset)
601 {
602 	sigset_t newset;
603 #ifndef HAVE_KQUEUE
604 	size_t i;
605 	struct sigaction sa;
606 #endif
607 
608 	assert(eloop != NULL);
609 
610 	sigfillset(&newset);
611 	if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1)
612 		return -1;
613 
614 #ifdef HAVE_KQUEUE
615 	UNUSED(eloop);
616 #else
617 	memset(&sa, 0, sizeof(sa));
618 #if !defined(__minix)
619 	sa.sa_sigaction = eloop_signal3;
620 	sa.sa_flags = SA_SIGINFO;
621 #else /* defined(__minix) */
622 	sa.sa_handler = eloop_signal3;
623 #endif /* defined(__minix) */
624 	sigemptyset(&sa.sa_mask);
625 
626 	for (i = 0; i < eloop->signals_len; i++) {
627 		if (sigaction(eloop->signals[i], &sa, NULL) == -1)
628 			return -1;
629 	}
630 #endif
631 	return 0;
632 }
633 
634 struct eloop *
eloop_new(void)635 eloop_new(void)
636 {
637 	struct eloop *eloop;
638 	struct timespec now;
639 
640 	/* Check we have a working monotonic clock. */
641 	if (clock_gettime(CLOCK_MONOTONIC, &now) == -1)
642 		return NULL;
643 
644 	eloop = calloc(1, sizeof(*eloop));
645 	if (eloop) {
646 		TAILQ_INIT(&eloop->events);
647 		TAILQ_INIT(&eloop->free_events);
648 		TAILQ_INIT(&eloop->timeouts);
649 		TAILQ_INIT(&eloop->free_timeouts);
650 		eloop->exitcode = EXIT_FAILURE;
651 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
652 		eloop->poll_fd = -1;
653 		eloop_open(eloop);
654 #endif
655 	}
656 
657 	return eloop;
658 }
659 
eloop_free(struct eloop * eloop)660 void eloop_free(struct eloop *eloop)
661 {
662 	struct eloop_event *e;
663 	struct eloop_timeout *t;
664 
665 	if (eloop == NULL)
666 		return;
667 
668 	while ((e = TAILQ_FIRST(&eloop->events))) {
669 		TAILQ_REMOVE(&eloop->events, e, next);
670 		free(e);
671 	}
672 	while ((e = TAILQ_FIRST(&eloop->free_events))) {
673 		TAILQ_REMOVE(&eloop->free_events, e, next);
674 		free(e);
675 	}
676 	while ((t = TAILQ_FIRST(&eloop->timeouts))) {
677 		TAILQ_REMOVE(&eloop->timeouts, t, next);
678 		free(t);
679 	}
680 	while ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
681 		TAILQ_REMOVE(&eloop->free_timeouts, t, next);
682 		free(t);
683 	}
684 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
685 	close(eloop->poll_fd);
686 #else
687 	free(eloop->fds);
688 #endif
689 	free(eloop);
690 }
691 
692 int
eloop_start(struct eloop * eloop,sigset_t * signals)693 eloop_start(struct eloop *eloop, sigset_t *signals)
694 {
695 	int n;
696 	struct eloop_event *e;
697 	struct eloop_timeout *t;
698 	struct timespec now, ts, *tsp;
699 	void (*t0)(void *);
700 #if defined(HAVE_KQUEUE)
701 	struct kevent ke;
702 	UNUSED(signals);
703 #elif defined(HAVE_EPOLL)
704 	struct epoll_event epe;
705 #endif
706 #ifndef HAVE_KQUEUE
707 	int timeout;
708 
709 	_eloop = eloop;
710 #endif
711 
712 	assert(eloop != NULL);
713 
714 	for (;;) {
715 		if (eloop->exitnow)
716 			break;
717 
718 		/* Run all timeouts first */
719 		if (eloop->timeout0) {
720 			t0 = eloop->timeout0;
721 			eloop->timeout0 = NULL;
722 			t0(eloop->timeout0_arg);
723 			continue;
724 		}
725 		if ((t = TAILQ_FIRST(&eloop->timeouts))) {
726 			clock_gettime(CLOCK_MONOTONIC, &now);
727 			if (timespeccmp(&now, &t->when, >)) {
728 				TAILQ_REMOVE(&eloop->timeouts, t, next);
729 				t->callback(t->arg);
730 				TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
731 				continue;
732 			}
733 			timespecsub(&t->when, &now, &ts);
734 			tsp = &ts;
735 		} else
736 			/* No timeouts, so wait forever */
737 			tsp = NULL;
738 
739 		if (tsp == NULL && eloop->events_len == 0)
740 			break;
741 
742 #ifndef HAVE_KQUEUE
743 		if (tsp == NULL)
744 			timeout = -1;
745 		else if (tsp->tv_sec > INT_MAX / 1000 ||
746 		    (tsp->tv_sec == INT_MAX / 1000 &&
747 		    (tsp->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))
748 			timeout = INT_MAX;
749 		else
750 			timeout = (int)(tsp->tv_sec * 1000 +
751 			    (tsp->tv_nsec + 999999) / 1000000);
752 #endif
753 
754 #if defined(HAVE_KQUEUE)
755 		n = kevent(eloop->poll_fd, NULL, 0, &ke, 1, tsp);
756 #elif defined(HAVE_EPOLL)
757 		if (signals)
758 			n = epoll_pwait(eloop->poll_fd, &epe, 1,
759 			    timeout, signals);
760 		else
761 			n = epoll_wait(eloop->poll_fd, &epe, 1, timeout);
762 #else
763 		if (signals)
764 			n = pollts(eloop->fds, (nfds_t)eloop->events_len,
765 			    tsp, signals);
766 		else
767 			n = poll(eloop->fds, (nfds_t)eloop->events_len,
768 			    timeout);
769 #endif
770 		if (n == -1) {
771 			if (errno == EINTR)
772 				continue;
773 			return -errno;
774 		}
775 
776 		/* Process any triggered events.
777 		 * We go back to the start after calling each callback incase
778 		 * the current event or next event is removed. */
779 #if defined(HAVE_KQUEUE)
780 		if (n) {
781 			if (ke.filter == EVFILT_SIGNAL) {
782 				eloop->signal_cb((int)ke.ident,
783 				    eloop->signal_cb_ctx);
784 				continue;
785 			}
786 			e = (struct eloop_event *)ke.udata;
787 			if (ke.filter == EVFILT_WRITE) {
788 				e->write_cb(e->write_cb_arg);
789 				continue;
790 			} else if (ke.filter == EVFILT_READ) {
791 				e->read_cb(e->read_cb_arg);
792 				continue;
793 			}
794 		}
795 #elif defined(HAVE_EPOLL)
796 		if (n) {
797 			e = (struct eloop_event *)epe.data.ptr;
798 			if (epe.events & EPOLLOUT && e->write_cb != NULL) {
799 				e->write_cb(e->write_cb_arg);
800 				continue;
801 			}
802 			if (epe.events &
803 			    (EPOLLIN | EPOLLERR | EPOLLHUP) &&
804 			    e->read_cb != NULL)
805 			{
806 				e->read_cb(e->read_cb_arg);
807 				continue;
808 			}
809 		}
810 #else
811 		if (n > 0) {
812 			TAILQ_FOREACH(e, &eloop->events, next) {
813 				if (e->pollfd->revents & POLLOUT &&
814 				    e->write_cb != NULL)
815 				{
816 					e->write_cb(e->write_cb_arg);
817 					break;
818 				}
819 				if (e->pollfd->revents && e->read_cb != NULL) {
820 					e->read_cb(e->read_cb_arg);
821 					break;
822 				}
823 			}
824 		}
825 #endif
826 	}
827 
828 	return eloop->exitcode;
829 }
830