xref: /dragonfly/contrib/dhcpcd/src/eloop.c (revision 0de61e28)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * eloop - portable event based main loop.
4  * Copyright (c) 2006-2020 Roy Marples <roy@marples.name>
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/time.h>
30 
31 #include <assert.h>
32 #include <errno.h>
33 #include <limits.h>
34 #include <poll.h>
35 #include <signal.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <unistd.h>
40 
41 /* config.h should define HAVE_PPOLL, etc. */
42 #if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H)
43 #include "config.h"
44 #endif
45 
46 #if defined(HAVE_PPOLL)
47 #elif defined(HAVE_POLLTS)
48 #define ppoll pollts
49 #elif !defined(HAVE_PSELECT)
50 #pragma message("Compiling eloop with pselect(2) support.")
51 #define HAVE_PSELECT
52 #define ppoll eloop_ppoll
53 #endif
54 
55 #include "eloop.h"
56 
57 #ifndef UNUSED
58 #define UNUSED(a) (void)((a))
59 #endif
60 #ifndef __unused
61 #ifdef __GNUC__
62 #define __unused   __attribute__((__unused__))
63 #else
64 #define __unused
65 #endif
66 #endif
67 
68 #ifdef HAVE_PSELECT
69 #include <sys/select.h>
70 #endif
71 
72 /* Our structures require TAILQ macros, which really every libc should
73  * ship as they are useful beyond belief.
74  * Sadly some libc's don't have sys/queue.h and some that do don't have
75  * the TAILQ_FOREACH macro. For those that don't, the application using
76  * this implementation will need to ship a working queue.h somewhere.
77  * If we don't have sys/queue.h found in config.h, then
78  * allow QUEUE_H to override loading queue.h in the current directory. */
79 #ifndef TAILQ_FOREACH
80 #ifdef HAVE_SYS_QUEUE_H
81 #include <sys/queue.h>
82 #elif defined(QUEUE_H)
83 #define __QUEUE_HEADER(x) #x
84 #define _QUEUE_HEADER(x) __QUEUE_HEADER(x)
85 #include _QUEUE_HEADER(QUEUE_H)
86 #else
87 #include "queue.h"
88 #endif
89 #endif
90 
91 #ifdef ELOOP_DEBUG
92 #include <stdio.h>
93 #endif
94 
95 /*
96  * time_t is a signed integer of an unspecified size.
97  * To adjust for time_t wrapping, we need to work the maximum signed
98  * value and use that as a maximum.
99  */
100 #ifndef TIME_MAX
101 #define	TIME_MAX	((1ULL << (sizeof(time_t) * NBBY - 1)) - 1)
102 #endif
103 /* The unsigned maximum is then simple - multiply by two and add one. */
104 #ifndef UTIME_MAX
105 #define	UTIME_MAX	(TIME_MAX * 2) + 1
106 #endif
107 
108 struct eloop_event {
109 	TAILQ_ENTRY(eloop_event) next;
110 	int fd;
111 	void (*read_cb)(void *);
112 	void *read_cb_arg;
113 	void (*write_cb)(void *);
114 	void *write_cb_arg;
115 	struct pollfd *pollfd;
116 };
117 
118 struct eloop_timeout {
119 	TAILQ_ENTRY(eloop_timeout) next;
120 	unsigned int seconds;
121 	unsigned int nseconds;
122 	void (*callback)(void *);
123 	void *arg;
124 	int queue;
125 };
126 
127 struct eloop {
128 	TAILQ_HEAD (event_head, eloop_event) events;
129 	size_t nevents;
130 	struct event_head free_events;
131 
132 	struct timespec now;
133 	TAILQ_HEAD (timeout_head, eloop_timeout) timeouts;
134 	struct timeout_head free_timeouts;
135 
136 	void (*timeout0)(void *);
137 	void *timeout0_arg;
138 	const int *signals;
139 	size_t signals_len;
140 	void (*signal_cb)(int, void *);
141 	void *signal_cb_ctx;
142 
143 	struct pollfd *fds;
144 	size_t nfds;
145 
146 	int exitnow;
147 	int exitcode;
148 };
149 
150 #ifdef HAVE_REALLOCARRAY
151 #define	eloop_realloca	reallocarray
152 #else
153 /* Handy routing to check for potential overflow.
154  * reallocarray(3) and reallocarr(3) are not portable. */
155 #define SQRT_SIZE_MAX (((size_t)1) << (sizeof(size_t) * CHAR_BIT / 2))
156 static void *
157 eloop_realloca(void *ptr, size_t n, size_t size)
158 {
159 
160 	if ((n | size) >= SQRT_SIZE_MAX && n > SIZE_MAX / size) {
161 		errno = EOVERFLOW;
162 		return NULL;
163 	}
164 	return realloc(ptr, n * size);
165 }
166 #endif
167 
168 #ifdef HAVE_PSELECT
169 /* Wrapper around pselect, to imitate the ppoll call. */
170 static int
171 eloop_ppoll(struct pollfd * fds, nfds_t nfds,
172     const struct timespec *ts, const sigset_t *sigmask)
173 {
174 	fd_set read_fds, write_fds;
175 	nfds_t n;
176 	int maxfd, r;
177 
178 	FD_ZERO(&read_fds);
179 	FD_ZERO(&write_fds);
180 	maxfd = 0;
181 	for (n = 0; n < nfds; n++) {
182 		if (fds[n].events & POLLIN) {
183 			FD_SET(fds[n].fd, &read_fds);
184 			if (fds[n].fd > maxfd)
185 				maxfd = fds[n].fd;
186 		}
187 		if (fds[n].events & POLLOUT) {
188 			FD_SET(fds[n].fd, &write_fds);
189 			if (fds[n].fd > maxfd)
190 				maxfd = fds[n].fd;
191 		}
192 	}
193 
194 	r = pselect(maxfd + 1, &read_fds, &write_fds, NULL, ts, sigmask);
195 	if (r > 0) {
196 		for (n = 0; n < nfds; n++) {
197 			fds[n].revents =
198 			    FD_ISSET(fds[n].fd, &read_fds) ? POLLIN : 0;
199 			if (FD_ISSET(fds[n].fd, &write_fds))
200 				fds[n].revents |= POLLOUT;
201 		}
202 	}
203 
204 	return r;
205 }
206 #endif
207 
208 unsigned long long
209 eloop_timespec_diff(const struct timespec *tsp, const struct timespec *usp,
210     unsigned int *nsp)
211 {
212 	unsigned long long tsecs, usecs, secs;
213 	long nsecs;
214 
215 	if (tsp->tv_sec < 0) /* time wreapped */
216 		tsecs = UTIME_MAX - (unsigned long long)(-tsp->tv_sec);
217 	else
218 		tsecs = (unsigned long long)tsp->tv_sec;
219 	if (usp->tv_sec < 0) /* time wrapped */
220 		usecs = UTIME_MAX - (unsigned long long)(-usp->tv_sec);
221 	else
222 		usecs = (unsigned long long)usp->tv_sec;
223 
224 	if (usecs > tsecs) /* time wrapped */
225 		secs = (UTIME_MAX - usecs) + tsecs;
226 	else
227 		secs = tsecs - usecs;
228 
229 	nsecs = tsp->tv_nsec - usp->tv_nsec;
230 	if (nsecs < 0) {
231 		if (secs == 0)
232 			nsecs = 0;
233 		else {
234 			secs--;
235 			nsecs += NSEC_PER_SEC;
236 		}
237 	}
238 	if (nsp != NULL)
239 		*nsp = (unsigned int)nsecs;
240 	return secs;
241 }
242 
243 static void
244 eloop_reduce_timers(struct eloop *eloop)
245 {
246 	struct timespec now;
247 	unsigned long long secs;
248 	unsigned int nsecs;
249 	struct eloop_timeout *t;
250 
251 	clock_gettime(CLOCK_MONOTONIC, &now);
252 	secs = eloop_timespec_diff(&now, &eloop->now, &nsecs);
253 
254 	TAILQ_FOREACH(t, &eloop->timeouts, next) {
255 		if (secs > t->seconds) {
256 			t->seconds = 0;
257 			t->nseconds = 0;
258 		} else {
259 			t->seconds -= (unsigned int)secs;
260 			if (nsecs > t->nseconds) {
261 				if (t->seconds == 0)
262 					t->nseconds = 0;
263 				else {
264 					t->seconds--;
265 					t->nseconds = NSEC_PER_SEC
266 					    - (nsecs - t->nseconds);
267 				}
268 			} else
269 				t->nseconds -= nsecs;
270 		}
271 	}
272 
273 	eloop->now = now;
274 }
275 
276 static void
277 eloop_event_setup_fds(struct eloop *eloop)
278 {
279 	struct eloop_event *e;
280 	struct pollfd *pfd;
281 
282 	pfd = eloop->fds;
283 	TAILQ_FOREACH(e, &eloop->events, next) {
284 #ifdef ELOOP_DEBUG
285 		fprintf(stderr, "%s(%d) fd=%d, rcb=%p, wcb=%p\n",
286 		    __func__, getpid(), e->fd, e->read_cb, e->write_cb);
287 #endif
288 		e->pollfd = pfd;
289 		pfd->fd = e->fd;
290 		pfd->events = 0;
291 		if (e->read_cb != NULL)
292 			pfd->events |= POLLIN;
293 		if (e->write_cb != NULL)
294 			pfd->events |= POLLOUT;
295 		pfd->revents = 0;
296 		pfd++;
297 	}
298 }
299 
300 size_t
301 eloop_event_count(const struct eloop *eloop)
302 {
303 
304 	return eloop->nevents;
305 }
306 
307 int
308 eloop_event_add_rw(struct eloop *eloop, int fd,
309     void (*read_cb)(void *), void *read_cb_arg,
310     void (*write_cb)(void *), void *write_cb_arg)
311 {
312 	struct eloop_event *e;
313 	struct pollfd *pfd;
314 
315 	assert(eloop != NULL);
316 	assert(read_cb != NULL || write_cb != NULL);
317 	if (fd == -1) {
318 		errno = EINVAL;
319 		return -1;
320 	}
321 
322 	TAILQ_FOREACH(e, &eloop->events, next) {
323 		if (e->fd == fd)
324 			break;
325 	}
326 
327 	if (e == NULL) {
328 		if (eloop->nevents + 1 > eloop->nfds) {
329 			pfd = eloop_realloca(eloop->fds, eloop->nevents + 1,
330 			    sizeof(*pfd));
331 			if (pfd == NULL)
332 				return -1;
333 			eloop->fds = pfd;
334 			eloop->nfds++;
335 		}
336 
337 		e = TAILQ_FIRST(&eloop->free_events);
338 		if (e != NULL)
339 			TAILQ_REMOVE(&eloop->free_events, e, next);
340 		else {
341 			e = malloc(sizeof(*e));
342 			if (e == NULL)
343 				return -1;
344 		}
345 		TAILQ_INSERT_HEAD(&eloop->events, e, next);
346 		eloop->nevents++;
347 		e->fd = fd;
348 		e->read_cb = read_cb;
349 		e->read_cb_arg = read_cb_arg;
350 		e->write_cb = write_cb;
351 		e->write_cb_arg = write_cb_arg;
352 		goto setup;
353 	}
354 
355 	if (read_cb) {
356 		e->read_cb = read_cb;
357 		e->read_cb_arg = read_cb_arg;
358 	}
359 	if (write_cb) {
360 		e->write_cb = write_cb;
361 		e->write_cb_arg = write_cb_arg;
362 	}
363 
364 setup:
365 	eloop_event_setup_fds(eloop);
366 	return 0;
367 }
368 
369 int
370 eloop_event_add(struct eloop *eloop, int fd,
371     void (*read_cb)(void *), void *read_cb_arg)
372 {
373 
374 	return eloop_event_add_rw(eloop, fd, read_cb, read_cb_arg, NULL, NULL);
375 }
376 
377 int
378 eloop_event_add_w(struct eloop *eloop, int fd,
379     void (*write_cb)(void *), void *write_cb_arg)
380 {
381 
382 	return eloop_event_add_rw(eloop, fd, NULL,NULL, write_cb, write_cb_arg);
383 }
384 
385 int
386 eloop_event_delete_write(struct eloop *eloop, int fd, int write_only)
387 {
388 	struct eloop_event *e;
389 
390 	assert(eloop != NULL);
391 
392 	TAILQ_FOREACH(e, &eloop->events, next) {
393 		if (e->fd == fd)
394 			break;
395 	}
396 	if (e == NULL) {
397 		errno = ENOENT;
398 		return -1;
399 	}
400 
401 	if (write_only) {
402 		if (e->read_cb == NULL)
403 			goto remove;
404 		e->write_cb = NULL;
405 		e->write_cb_arg = NULL;
406 		goto done;
407 	}
408 
409 remove:
410 	TAILQ_REMOVE(&eloop->events, e, next);
411 	TAILQ_INSERT_TAIL(&eloop->free_events, e, next);
412 	eloop->nevents--;
413 
414 done:
415 	eloop_event_setup_fds(eloop);
416 	return 1;
417 }
418 
419 /*
420  * This implementation should cope with UINT_MAX seconds on a system
421  * where time_t is INT32_MAX. It should also cope with the monotonic timer
422  * wrapping, although this is highly unlikely.
423  * unsigned int should match or be greater than any on wire specified timeout.
424  */
425 static int
426 eloop_q_timeout_add(struct eloop *eloop, int queue,
427     unsigned int seconds, unsigned int nseconds,
428     void (*callback)(void *), void *arg)
429 {
430 	struct eloop_timeout *t, *tt = NULL;
431 
432 	assert(eloop != NULL);
433 	assert(callback != NULL);
434 	assert(nseconds <= NSEC_PER_SEC);
435 
436 	/* Remove existing timeout if present. */
437 	TAILQ_FOREACH(t, &eloop->timeouts, next) {
438 		if (t->callback == callback && t->arg == arg) {
439 			TAILQ_REMOVE(&eloop->timeouts, t, next);
440 			break;
441 		}
442 	}
443 
444 	if (t == NULL) {
445 		/* No existing, so allocate or grab one from the free pool. */
446 		if ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
447 			TAILQ_REMOVE(&eloop->free_timeouts, t, next);
448 		} else {
449 			if ((t = malloc(sizeof(*t))) == NULL)
450 				return -1;
451 		}
452 	}
453 
454 	eloop_reduce_timers(eloop);
455 
456 	t->seconds = seconds;
457 	t->nseconds = nseconds;
458 	t->callback = callback;
459 	t->arg = arg;
460 	t->queue = queue;
461 
462 	/* The timeout list should be in chronological order,
463 	 * soonest first. */
464 	TAILQ_FOREACH(tt, &eloop->timeouts, next) {
465 		if (t->seconds < tt->seconds ||
466 		    (t->seconds == tt->seconds && t->nseconds < tt->nseconds))
467 		{
468 			TAILQ_INSERT_BEFORE(tt, t, next);
469 			return 0;
470 		}
471 	}
472 	TAILQ_INSERT_TAIL(&eloop->timeouts, t, next);
473 	return 0;
474 }
475 
476 int
477 eloop_q_timeout_add_tv(struct eloop *eloop, int queue,
478     const struct timespec *when, void (*callback)(void *), void *arg)
479 {
480 
481 	if (when->tv_sec < 0 || (unsigned long)when->tv_sec > UINT_MAX) {
482 		errno = EINVAL;
483 		return -1;
484 	}
485 	if (when->tv_nsec < 0 || when->tv_nsec > NSEC_PER_SEC) {
486 		errno = EINVAL;
487 		return -1;
488 	}
489 
490 	return eloop_q_timeout_add(eloop, queue,
491 	    (unsigned int)when->tv_sec, (unsigned int)when->tv_sec,
492 	    callback, arg);
493 }
494 
495 int
496 eloop_q_timeout_add_sec(struct eloop *eloop, int queue, unsigned int seconds,
497     void (*callback)(void *), void *arg)
498 {
499 
500 	return eloop_q_timeout_add(eloop, queue, seconds, 0, callback, arg);
501 }
502 
503 int
504 eloop_q_timeout_add_msec(struct eloop *eloop, int queue, unsigned long when,
505     void (*callback)(void *), void *arg)
506 {
507 	unsigned long seconds, nseconds;
508 
509 	seconds = when / MSEC_PER_SEC;
510 	if (seconds > UINT_MAX) {
511 		errno = EINVAL;
512 		return -1;
513 	}
514 
515 	nseconds = (when % MSEC_PER_SEC) * NSEC_PER_MSEC;
516 	return eloop_q_timeout_add(eloop, queue,
517 		(unsigned int)seconds, (unsigned int)nseconds, callback, arg);
518 }
519 
520 static int
521 eloop_timeout_add_now(struct eloop *eloop,
522     void (*callback)(void *), void *arg)
523 {
524 
525 	assert(eloop->timeout0 == NULL);
526 	eloop->timeout0 = callback;
527 	eloop->timeout0_arg = arg;
528 	return 0;
529 }
530 
531 int
532 eloop_q_timeout_delete(struct eloop *eloop, int queue,
533     void (*callback)(void *), void *arg)
534 {
535 	struct eloop_timeout *t, *tt;
536 	int n;
537 
538 	assert(eloop != NULL);
539 
540 	n = 0;
541 	TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) {
542 		if ((queue == 0 || t->queue == queue) &&
543 		    t->arg == arg &&
544 		    (!callback || t->callback == callback))
545 		{
546 			TAILQ_REMOVE(&eloop->timeouts, t, next);
547 			TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
548 			n++;
549 		}
550 	}
551 	return n;
552 }
553 
554 void
555 eloop_exit(struct eloop *eloop, int code)
556 {
557 
558 	assert(eloop != NULL);
559 
560 	eloop->exitcode = code;
561 	eloop->exitnow = 1;
562 }
563 
564 void
565 eloop_enter(struct eloop *eloop)
566 {
567 
568 	eloop->exitnow = 0;
569 }
570 
571 void
572 eloop_signal_set_cb(struct eloop *eloop,
573     const int *signals, size_t signals_len,
574     void (*signal_cb)(int, void *), void *signal_cb_ctx)
575 {
576 
577 	assert(eloop != NULL);
578 
579 	eloop->signals = signals;
580 	eloop->signals_len = signals_len;
581 	eloop->signal_cb = signal_cb;
582 	eloop->signal_cb_ctx = signal_cb_ctx;
583 }
584 
585 struct eloop_siginfo {
586 	int sig;
587 	struct eloop *eloop;
588 };
589 static struct eloop_siginfo _eloop_siginfo;
590 static struct eloop *_eloop;
591 
592 static void
593 eloop_signal1(void *arg)
594 {
595 	struct eloop_siginfo *si = arg;
596 
597 	si->eloop->signal_cb(si->sig, si->eloop->signal_cb_ctx);
598 }
599 
600 static void
601 eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg)
602 {
603 
604 	/* So that we can operate safely under a signal we instruct
605 	 * eloop to pass a copy of the siginfo structure to handle_signal1
606 	 * as the very first thing to do. */
607 	_eloop_siginfo.eloop = _eloop;
608 	_eloop_siginfo.sig = sig;
609 	eloop_timeout_add_now(_eloop_siginfo.eloop,
610 	    eloop_signal1, &_eloop_siginfo);
611 }
612 
613 int
614 eloop_signal_mask(struct eloop *eloop, sigset_t *oldset)
615 {
616 	sigset_t newset;
617 	size_t i;
618 	struct sigaction sa = {
619 	    .sa_sigaction = eloop_signal3,
620 	    .sa_flags = SA_SIGINFO,
621 	};
622 
623 	assert(eloop != NULL);
624 
625 	sigemptyset(&newset);
626 	for (i = 0; i < eloop->signals_len; i++)
627 		sigaddset(&newset, eloop->signals[i]);
628 	if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1)
629 		return -1;
630 
631 	_eloop = eloop;
632 	sigemptyset(&sa.sa_mask);
633 
634 	for (i = 0; i < eloop->signals_len; i++) {
635 		if (sigaction(eloop->signals[i], &sa, NULL) == -1)
636 			return -1;
637 	}
638 	return 0;
639 }
640 
641 struct eloop *
642 eloop_new(void)
643 {
644 	struct eloop *eloop;
645 
646 	eloop = calloc(1, sizeof(*eloop));
647 	if (eloop == NULL)
648 		return NULL;
649 
650 	/* Check we have a working monotonic clock. */
651 	if (clock_gettime(CLOCK_MONOTONIC, &eloop->now) == -1) {
652 		free(eloop);
653 		return NULL;
654 	}
655 
656 	TAILQ_INIT(&eloop->events);
657 	TAILQ_INIT(&eloop->free_events);
658 	TAILQ_INIT(&eloop->timeouts);
659 	TAILQ_INIT(&eloop->free_timeouts);
660 	eloop->exitcode = EXIT_FAILURE;
661 
662 	return eloop;
663 }
664 
665 void
666 eloop_clear(struct eloop *eloop)
667 {
668 	struct eloop_event *e;
669 	struct eloop_timeout *t;
670 
671 	if (eloop == NULL)
672 		return;
673 
674 	eloop->nevents = 0;
675 	eloop->signals = NULL;
676 	eloop->signals_len = 0;
677 
678 	while ((e = TAILQ_FIRST(&eloop->events))) {
679 		TAILQ_REMOVE(&eloop->events, e, next);
680 		free(e);
681 	}
682 	while ((e = TAILQ_FIRST(&eloop->free_events))) {
683 		TAILQ_REMOVE(&eloop->free_events, e, next);
684 		free(e);
685 	}
686 	while ((t = TAILQ_FIRST(&eloop->timeouts))) {
687 		TAILQ_REMOVE(&eloop->timeouts, t, next);
688 		free(t);
689 	}
690 	while ((t = TAILQ_FIRST(&eloop->free_timeouts))) {
691 		TAILQ_REMOVE(&eloop->free_timeouts, t, next);
692 		free(t);
693 	}
694 
695 	free(eloop->fds);
696 	eloop->fds = NULL;
697 	eloop->nfds = 0;
698 }
699 
700 void
701 eloop_free(struct eloop *eloop)
702 {
703 
704 	eloop_clear(eloop);
705 	free(eloop);
706 }
707 
708 int
709 eloop_start(struct eloop *eloop, sigset_t *signals)
710 {
711 	int n;
712 	struct eloop_event *e;
713 	struct eloop_timeout *t;
714 	void (*t0)(void *);
715 	struct timespec ts, *tsp;
716 
717 	assert(eloop != NULL);
718 
719 	for (;;) {
720 		if (eloop->exitnow)
721 			break;
722 
723 		/* Run all timeouts first. */
724 		if (eloop->timeout0) {
725 			t0 = eloop->timeout0;
726 			eloop->timeout0 = NULL;
727 			t0(eloop->timeout0_arg);
728 			continue;
729 		}
730 
731 		t = TAILQ_FIRST(&eloop->timeouts);
732 		if (t == NULL && eloop->nevents == 0)
733 			break;
734 
735 		if (t != NULL)
736 			eloop_reduce_timers(eloop);
737 
738 		if (t != NULL && t->seconds == 0 && t->nseconds == 0) {
739 			TAILQ_REMOVE(&eloop->timeouts, t, next);
740 			t->callback(t->arg);
741 			TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next);
742 			continue;
743 		}
744 
745 		if (t != NULL) {
746 			if (t->seconds > INT_MAX) {
747 				ts.tv_sec = (time_t)INT_MAX;
748 				ts.tv_nsec = 0;
749 			} else {
750 				ts.tv_sec = (time_t)t->seconds;
751 				ts.tv_nsec = (long)t->nseconds;
752 			}
753 			tsp = &ts;
754 		} else
755 			tsp = NULL;
756 
757 		n = ppoll(eloop->fds, (nfds_t)eloop->nevents, tsp, signals);
758 		if (n == -1) {
759 			if (errno == EINTR)
760 				continue;
761 			return -errno;
762 		}
763 		if (n == 0)
764 			continue;
765 
766 		TAILQ_FOREACH(e, &eloop->events, next) {
767 			if (e->pollfd->revents & POLLOUT) {
768 				if (e->write_cb != NULL) {
769 					e->write_cb(e->write_cb_arg);
770 					break;
771 				}
772 			}
773 			if (e->pollfd->revents) {
774 				if (e->read_cb != NULL) {
775 					e->read_cb(e->read_cb_arg);
776 					break;
777 				}
778 			}
779 		}
780 	}
781 
782 	return eloop->exitcode;
783 }
784