xref: /netbsd/external/bsd/ntp/dist/sntp/libevent/event.c (revision 9034ec65)
1*9034ec65Schristos /*	$NetBSD: event.c,v 1.6 2020/05/25 20:47:33 christos Exp $	*/
22b3787f6Schristos 
32b3787f6Schristos /*
42b3787f6Schristos  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
52b3787f6Schristos  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
62b3787f6Schristos  *
72b3787f6Schristos  * Redistribution and use in source and binary forms, with or without
82b3787f6Schristos  * modification, are permitted provided that the following conditions
92b3787f6Schristos  * are met:
102b3787f6Schristos  * 1. Redistributions of source code must retain the above copyright
112b3787f6Schristos  *    notice, this list of conditions and the following disclaimer.
122b3787f6Schristos  * 2. Redistributions in binary form must reproduce the above copyright
132b3787f6Schristos  *    notice, this list of conditions and the following disclaimer in the
142b3787f6Schristos  *    documentation and/or other materials provided with the distribution.
152b3787f6Schristos  * 3. The name of the author may not be used to endorse or promote products
162b3787f6Schristos  *    derived from this software without specific prior written permission.
172b3787f6Schristos  *
182b3787f6Schristos  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
192b3787f6Schristos  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
202b3787f6Schristos  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
212b3787f6Schristos  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
222b3787f6Schristos  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
232b3787f6Schristos  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
242b3787f6Schristos  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
252b3787f6Schristos  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
262b3787f6Schristos  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
272b3787f6Schristos  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
282b3787f6Schristos  */
292b3787f6Schristos #include "event2/event-config.h"
302b3787f6Schristos #include "evconfig-private.h"
312b3787f6Schristos 
322b3787f6Schristos #ifdef _WIN32
332b3787f6Schristos #include <winsock2.h>
342b3787f6Schristos #define WIN32_LEAN_AND_MEAN
352b3787f6Schristos #include <windows.h>
362b3787f6Schristos #undef WIN32_LEAN_AND_MEAN
372b3787f6Schristos #endif
382b3787f6Schristos #include <sys/types.h>
392b3787f6Schristos #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
402b3787f6Schristos #include <sys/time.h>
412b3787f6Schristos #endif
422b3787f6Schristos #include <sys/queue.h>
432b3787f6Schristos #ifdef EVENT__HAVE_SYS_SOCKET_H
442b3787f6Schristos #include <sys/socket.h>
452b3787f6Schristos #endif
462b3787f6Schristos #include <stdio.h>
472b3787f6Schristos #include <stdlib.h>
482b3787f6Schristos #ifdef EVENT__HAVE_UNISTD_H
492b3787f6Schristos #include <unistd.h>
502b3787f6Schristos #endif
512b3787f6Schristos #include <ctype.h>
522b3787f6Schristos #include <errno.h>
532b3787f6Schristos #include <signal.h>
542b3787f6Schristos #include <string.h>
552b3787f6Schristos #include <time.h>
562b3787f6Schristos #include <limits.h>
572b3787f6Schristos 
582b3787f6Schristos #include "event2/event.h"
592b3787f6Schristos #include "event2/event_struct.h"
602b3787f6Schristos #include "event2/event_compat.h"
612b3787f6Schristos #include "event-internal.h"
622b3787f6Schristos #include "defer-internal.h"
632b3787f6Schristos #include "evthread-internal.h"
642b3787f6Schristos #include "event2/thread.h"
652b3787f6Schristos #include "event2/util.h"
662b3787f6Schristos #include "log-internal.h"
672b3787f6Schristos #include "evmap-internal.h"
682b3787f6Schristos #include "iocp-internal.h"
692b3787f6Schristos #include "changelist-internal.h"
702b3787f6Schristos #define HT_NO_CACHE_HASH_VALUES
712b3787f6Schristos #include "ht-internal.h"
722b3787f6Schristos #include "util-internal.h"
732b3787f6Schristos 
742b3787f6Schristos 
752b3787f6Schristos #ifdef EVENT__HAVE_WORKING_KQUEUE
762b3787f6Schristos #include "kqueue-internal.h"
772b3787f6Schristos #endif
782b3787f6Schristos 
792b3787f6Schristos #ifdef EVENT__HAVE_EVENT_PORTS
802b3787f6Schristos extern const struct eventop evportops;
812b3787f6Schristos #endif
822b3787f6Schristos #ifdef EVENT__HAVE_SELECT
832b3787f6Schristos extern const struct eventop selectops;
842b3787f6Schristos #endif
852b3787f6Schristos #ifdef EVENT__HAVE_POLL
862b3787f6Schristos extern const struct eventop pollops;
872b3787f6Schristos #endif
882b3787f6Schristos #ifdef EVENT__HAVE_EPOLL
892b3787f6Schristos extern const struct eventop epollops;
902b3787f6Schristos #endif
912b3787f6Schristos #ifdef EVENT__HAVE_WORKING_KQUEUE
922b3787f6Schristos extern const struct eventop kqops;
932b3787f6Schristos #endif
942b3787f6Schristos #ifdef EVENT__HAVE_DEVPOLL
952b3787f6Schristos extern const struct eventop devpollops;
962b3787f6Schristos #endif
972b3787f6Schristos #ifdef _WIN32
982b3787f6Schristos extern const struct eventop win32ops;
992b3787f6Schristos #endif
1002b3787f6Schristos 
1012b3787f6Schristos /* Array of backends in order of preference. */
1022b3787f6Schristos static const struct eventop *eventops[] = {
1032b3787f6Schristos #ifdef EVENT__HAVE_EVENT_PORTS
1042b3787f6Schristos 	&evportops,
1052b3787f6Schristos #endif
1062b3787f6Schristos #ifdef EVENT__HAVE_WORKING_KQUEUE
1072b3787f6Schristos 	&kqops,
1082b3787f6Schristos #endif
1092b3787f6Schristos #ifdef EVENT__HAVE_EPOLL
1102b3787f6Schristos 	&epollops,
1112b3787f6Schristos #endif
1122b3787f6Schristos #ifdef EVENT__HAVE_DEVPOLL
1132b3787f6Schristos 	&devpollops,
1142b3787f6Schristos #endif
1152b3787f6Schristos #ifdef EVENT__HAVE_POLL
1162b3787f6Schristos 	&pollops,
1172b3787f6Schristos #endif
1182b3787f6Schristos #ifdef EVENT__HAVE_SELECT
1192b3787f6Schristos 	&selectops,
1202b3787f6Schristos #endif
1212b3787f6Schristos #ifdef _WIN32
1222b3787f6Schristos 	&win32ops,
1232b3787f6Schristos #endif
1242b3787f6Schristos 	NULL
1252b3787f6Schristos };
1262b3787f6Schristos 
1272b3787f6Schristos /* Global state; deprecated */
1282b3787f6Schristos struct event_base *event_global_current_base_ = NULL;
1292b3787f6Schristos #define current_base event_global_current_base_
1302b3787f6Schristos 
1312b3787f6Schristos /* Global state */
1322b3787f6Schristos 
1332b3787f6Schristos static void *event_self_cbarg_ptr_ = NULL;
1342b3787f6Schristos 
1352b3787f6Schristos /* Prototypes */
1362b3787f6Schristos static void	event_queue_insert_active(struct event_base *, struct event_callback *);
1372b3787f6Schristos static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
1382b3787f6Schristos static void	event_queue_insert_timeout(struct event_base *, struct event *);
1392b3787f6Schristos static void	event_queue_insert_inserted(struct event_base *, struct event *);
1402b3787f6Schristos static void	event_queue_remove_active(struct event_base *, struct event_callback *);
1412b3787f6Schristos static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
1422b3787f6Schristos static void	event_queue_remove_timeout(struct event_base *, struct event *);
1432b3787f6Schristos static void	event_queue_remove_inserted(struct event_base *, struct event *);
1442b3787f6Schristos static void event_queue_make_later_events_active(struct event_base *base);
1452b3787f6Schristos 
1462b3787f6Schristos static int evthread_make_base_notifiable_nolock_(struct event_base *base);
1471b6f2cd4Schristos static int event_del_(struct event *ev, int blocking);
1482b3787f6Schristos 
1492b3787f6Schristos #ifdef USE_REINSERT_TIMEOUT
1502b3787f6Schristos /* This code seems buggy; only turn it on if we find out what the trouble is. */
1512b3787f6Schristos static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
1522b3787f6Schristos #endif
1532b3787f6Schristos 
1542b3787f6Schristos static int	event_haveevents(struct event_base *);
1552b3787f6Schristos 
1562b3787f6Schristos static int	event_process_active(struct event_base *);
1572b3787f6Schristos 
1582b3787f6Schristos static int	timeout_next(struct event_base *, struct timeval **);
1592b3787f6Schristos static void	timeout_process(struct event_base *);
1602b3787f6Schristos 
1612b3787f6Schristos static inline void	event_signal_closure(struct event_base *, struct event *ev);
1622b3787f6Schristos static inline void	event_persist_closure(struct event_base *, struct event *ev);
1632b3787f6Schristos 
1642b3787f6Schristos static int	evthread_notify_base(struct event_base *base);
1652b3787f6Schristos 
1662b3787f6Schristos static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
1672b3787f6Schristos     struct event *ev);
1682b3787f6Schristos 
1692b3787f6Schristos #ifndef EVENT__DISABLE_DEBUG_MODE
1702b3787f6Schristos /* These functions implement a hashtable of which 'struct event *' structures
1712b3787f6Schristos  * have been setup or added.  We don't want to trust the content of the struct
1722b3787f6Schristos  * event itself, since we're trying to work through cases where an event gets
1732b3787f6Schristos  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
1742b3787f6Schristos  */
1752b3787f6Schristos 
1762b3787f6Schristos struct event_debug_entry {
1772b3787f6Schristos 	HT_ENTRY(event_debug_entry) node;
1782b3787f6Schristos 	const struct event *ptr;
1792b3787f6Schristos 	unsigned added : 1;
1802b3787f6Schristos };
1812b3787f6Schristos 
1822b3787f6Schristos static inline unsigned
hash_debug_entry(const struct event_debug_entry * e)1832b3787f6Schristos hash_debug_entry(const struct event_debug_entry *e)
1842b3787f6Schristos {
1852b3787f6Schristos 	/* We need to do this silliness to convince compilers that we
1862b3787f6Schristos 	 * honestly mean to cast e->ptr to an integer, and discard any
1872b3787f6Schristos 	 * part of it that doesn't fit in an unsigned.
1882b3787f6Schristos 	 */
1892b3787f6Schristos 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
1902b3787f6Schristos 	/* Our hashtable implementation is pretty sensitive to low bits,
1912b3787f6Schristos 	 * and every struct event is over 64 bytes in size, so we can
1922b3787f6Schristos 	 * just say >>6. */
1932b3787f6Schristos 	return (u >> 6);
1942b3787f6Schristos }
1952b3787f6Schristos 
1962b3787f6Schristos static inline int
eq_debug_entry(const struct event_debug_entry * a,const struct event_debug_entry * b)1972b3787f6Schristos eq_debug_entry(const struct event_debug_entry *a,
1982b3787f6Schristos     const struct event_debug_entry *b)
1992b3787f6Schristos {
2002b3787f6Schristos 	return a->ptr == b->ptr;
2012b3787f6Schristos }
2022b3787f6Schristos 
2032b3787f6Schristos int event_debug_mode_on_ = 0;
2042b3787f6Schristos /* Set if it's too late to enable event_debug_mode. */
2052b3787f6Schristos static int event_debug_mode_too_late = 0;
2062b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
2072b3787f6Schristos static void *event_debug_map_lock_ = NULL;
2082b3787f6Schristos #endif
2092b3787f6Schristos static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
2102b3787f6Schristos 	HT_INITIALIZER();
2112b3787f6Schristos 
HT_PROTOTYPE(event_debug_map,event_debug_entry,node,hash_debug_entry,eq_debug_entry)2122b3787f6Schristos HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
2132b3787f6Schristos     eq_debug_entry)
2142b3787f6Schristos HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
2152b3787f6Schristos     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
2162b3787f6Schristos 
2172b3787f6Schristos /* Macro: record that ev is now setup (that is, ready for an add) */
2182b3787f6Schristos #define event_debug_note_setup_(ev) do {				\
2192b3787f6Schristos 	if (event_debug_mode_on_) {					\
2202b3787f6Schristos 		struct event_debug_entry *dent,find;			\
2212b3787f6Schristos 		find.ptr = (ev);					\
2222b3787f6Schristos 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
2232b3787f6Schristos 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
2242b3787f6Schristos 		if (dent) {						\
2252b3787f6Schristos 			dent->added = 0;				\
2262b3787f6Schristos 		} else {						\
2272b3787f6Schristos 			dent = mm_malloc(sizeof(*dent));		\
2282b3787f6Schristos 			if (!dent)					\
2292b3787f6Schristos 				event_err(1,				\
2302b3787f6Schristos 				    "Out of memory in debugging code");	\
2312b3787f6Schristos 			dent->ptr = (ev);				\
2322b3787f6Schristos 			dent->added = 0;				\
2332b3787f6Schristos 			HT_INSERT(event_debug_map, &global_debug_map, dent); \
2342b3787f6Schristos 		}							\
2352b3787f6Schristos 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
2362b3787f6Schristos 	}								\
2372b3787f6Schristos 	event_debug_mode_too_late = 1;					\
2382b3787f6Schristos 	} while (0)
2392b3787f6Schristos /* Macro: record that ev is no longer setup */
2402b3787f6Schristos #define event_debug_note_teardown_(ev) do {				\
2412b3787f6Schristos 	if (event_debug_mode_on_) {					\
2422b3787f6Schristos 		struct event_debug_entry *dent,find;			\
2432b3787f6Schristos 		find.ptr = (ev);					\
2442b3787f6Schristos 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
2452b3787f6Schristos 		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
2462b3787f6Schristos 		if (dent)						\
2472b3787f6Schristos 			mm_free(dent);					\
2482b3787f6Schristos 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
2492b3787f6Schristos 	}								\
2502b3787f6Schristos 	event_debug_mode_too_late = 1;					\
2512b3787f6Schristos 	} while (0)
2522b3787f6Schristos /* Macro: record that ev is now added */
2532b3787f6Schristos #define event_debug_note_add_(ev)	do {				\
2542b3787f6Schristos 	if (event_debug_mode_on_) {					\
2552b3787f6Schristos 		struct event_debug_entry *dent,find;			\
2562b3787f6Schristos 		find.ptr = (ev);					\
2572b3787f6Schristos 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
2582b3787f6Schristos 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
2592b3787f6Schristos 		if (dent) {						\
2602b3787f6Schristos 			dent->added = 1;				\
2612b3787f6Schristos 		} else {						\
2622b3787f6Schristos 			event_errx(EVENT_ERR_ABORT_,			\
2632b3787f6Schristos 			    "%s: noting an add on a non-setup event %p" \
2642b3787f6Schristos 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
2652b3787f6Schristos 			    ", flags: 0x%x)",				\
2662b3787f6Schristos 			    __func__, (ev), (ev)->ev_events,		\
2672b3787f6Schristos 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
2682b3787f6Schristos 		}							\
2692b3787f6Schristos 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
2702b3787f6Schristos 	}								\
2712b3787f6Schristos 	event_debug_mode_too_late = 1;					\
2722b3787f6Schristos 	} while (0)
2732b3787f6Schristos /* Macro: record that ev is no longer added */
2742b3787f6Schristos #define event_debug_note_del_(ev) do {					\
2752b3787f6Schristos 	if (event_debug_mode_on_) {					\
2762b3787f6Schristos 		struct event_debug_entry *dent,find;			\
2772b3787f6Schristos 		find.ptr = (ev);					\
2782b3787f6Schristos 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
2792b3787f6Schristos 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
2802b3787f6Schristos 		if (dent) {						\
2812b3787f6Schristos 			dent->added = 0;				\
2822b3787f6Schristos 		} else {						\
2832b3787f6Schristos 			event_errx(EVENT_ERR_ABORT_,			\
2842b3787f6Schristos 			    "%s: noting a del on a non-setup event %p"	\
2852b3787f6Schristos 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
2862b3787f6Schristos 			    ", flags: 0x%x)",				\
2872b3787f6Schristos 			    __func__, (ev), (ev)->ev_events,		\
2882b3787f6Schristos 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
2892b3787f6Schristos 		}							\
2902b3787f6Schristos 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
2912b3787f6Schristos 	}								\
2922b3787f6Schristos 	event_debug_mode_too_late = 1;					\
2932b3787f6Schristos 	} while (0)
2942b3787f6Schristos /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
2952b3787f6Schristos #define event_debug_assert_is_setup_(ev) do {				\
2962b3787f6Schristos 	if (event_debug_mode_on_) {					\
2972b3787f6Schristos 		struct event_debug_entry *dent,find;			\
2982b3787f6Schristos 		find.ptr = (ev);					\
2992b3787f6Schristos 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
3002b3787f6Schristos 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
3012b3787f6Schristos 		if (!dent) {						\
3022b3787f6Schristos 			event_errx(EVENT_ERR_ABORT_,			\
3032b3787f6Schristos 			    "%s called on a non-initialized event %p"	\
3042b3787f6Schristos 			    " (events: 0x%x, fd: "EV_SOCK_FMT\
3052b3787f6Schristos 			    ", flags: 0x%x)",				\
3062b3787f6Schristos 			    __func__, (ev), (ev)->ev_events,		\
3072b3787f6Schristos 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
3082b3787f6Schristos 		}							\
3092b3787f6Schristos 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
3102b3787f6Schristos 	}								\
3112b3787f6Schristos 	} while (0)
3122b3787f6Schristos /* Macro: assert that ev is not added (i.e., okay to tear down or set
3132b3787f6Schristos  * up again) */
3142b3787f6Schristos #define event_debug_assert_not_added_(ev) do {				\
3152b3787f6Schristos 	if (event_debug_mode_on_) {					\
3162b3787f6Schristos 		struct event_debug_entry *dent,find;			\
3172b3787f6Schristos 		find.ptr = (ev);					\
3182b3787f6Schristos 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
3192b3787f6Schristos 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
3202b3787f6Schristos 		if (dent && dent->added) {				\
3212b3787f6Schristos 			event_errx(EVENT_ERR_ABORT_,			\
3222b3787f6Schristos 			    "%s called on an already added event %p"	\
3232b3787f6Schristos 			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
3242b3787f6Schristos 			    "flags: 0x%x)",				\
3252b3787f6Schristos 			    __func__, (ev), (ev)->ev_events,		\
3262b3787f6Schristos 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
3272b3787f6Schristos 		}							\
3282b3787f6Schristos 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
3292b3787f6Schristos 	}								\
3302b3787f6Schristos 	} while (0)
3312b3787f6Schristos #else
3322b3787f6Schristos #define event_debug_note_setup_(ev) \
3332b3787f6Schristos 	((void)0)
3342b3787f6Schristos #define event_debug_note_teardown_(ev) \
3352b3787f6Schristos 	((void)0)
3362b3787f6Schristos #define event_debug_note_add_(ev) \
3372b3787f6Schristos 	((void)0)
3382b3787f6Schristos #define event_debug_note_del_(ev) \
3392b3787f6Schristos 	((void)0)
3402b3787f6Schristos #define event_debug_assert_is_setup_(ev) \
3412b3787f6Schristos 	((void)0)
3422b3787f6Schristos #define event_debug_assert_not_added_(ev) \
3432b3787f6Schristos 	((void)0)
3442b3787f6Schristos #endif
3452b3787f6Schristos 
3462b3787f6Schristos #define EVENT_BASE_ASSERT_LOCKED(base)		\
3472b3787f6Schristos 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
3482b3787f6Schristos 
3492b3787f6Schristos /* How often (in seconds) do we check for changes in wall clock time relative
3502b3787f6Schristos  * to monotonic time?  Set this to -1 for 'never.' */
3512b3787f6Schristos #define CLOCK_SYNC_INTERVAL 5
3522b3787f6Schristos 
3532b3787f6Schristos /** Set 'tp' to the current time according to 'base'.  We must hold the lock
3542b3787f6Schristos  * on 'base'.  If there is a cached time, return it.  Otherwise, use
3552b3787f6Schristos  * clock_gettime or gettimeofday as appropriate to find out the right time.
3562b3787f6Schristos  * Return 0 on success, -1 on failure.
3572b3787f6Schristos  */
3582b3787f6Schristos static int
3592b3787f6Schristos gettime(struct event_base *base, struct timeval *tp)
3602b3787f6Schristos {
3612b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
3622b3787f6Schristos 
3632b3787f6Schristos 	if (base->tv_cache.tv_sec) {
3642b3787f6Schristos 		*tp = base->tv_cache;
3652b3787f6Schristos 		return (0);
3662b3787f6Schristos 	}
3672b3787f6Schristos 
3682b3787f6Schristos 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
3692b3787f6Schristos 		return -1;
3702b3787f6Schristos 	}
3712b3787f6Schristos 
3722b3787f6Schristos 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
3732b3787f6Schristos 	    < tp->tv_sec) {
3742b3787f6Schristos 		struct timeval tv;
3752b3787f6Schristos 		evutil_gettimeofday(&tv,NULL);
3762b3787f6Schristos 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
3772b3787f6Schristos 		base->last_updated_clock_diff = tp->tv_sec;
3782b3787f6Schristos 	}
3792b3787f6Schristos 
3802b3787f6Schristos 	return 0;
3812b3787f6Schristos }
3822b3787f6Schristos 
3832b3787f6Schristos int
event_base_gettimeofday_cached(struct event_base * base,struct timeval * tv)3842b3787f6Schristos event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
3852b3787f6Schristos {
3862b3787f6Schristos 	int r;
3872b3787f6Schristos 	if (!base) {
3882b3787f6Schristos 		base = current_base;
3892b3787f6Schristos 		if (!current_base)
3902b3787f6Schristos 			return evutil_gettimeofday(tv, NULL);
3912b3787f6Schristos 	}
3922b3787f6Schristos 
3932b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3942b3787f6Schristos 	if (base->tv_cache.tv_sec == 0) {
3952b3787f6Schristos 		r = evutil_gettimeofday(tv, NULL);
3962b3787f6Schristos 	} else {
3972b3787f6Schristos 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
3982b3787f6Schristos 		r = 0;
3992b3787f6Schristos 	}
4002b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
4012b3787f6Schristos 	return r;
4022b3787f6Schristos }
4032b3787f6Schristos 
4042b3787f6Schristos /** Make 'base' have no current cached time. */
4052b3787f6Schristos static inline void
clear_time_cache(struct event_base * base)4062b3787f6Schristos clear_time_cache(struct event_base *base)
4072b3787f6Schristos {
4082b3787f6Schristos 	base->tv_cache.tv_sec = 0;
4092b3787f6Schristos }
4102b3787f6Schristos 
4112b3787f6Schristos /** Replace the cached time in 'base' with the current time. */
4122b3787f6Schristos static inline void
update_time_cache(struct event_base * base)4132b3787f6Schristos update_time_cache(struct event_base *base)
4142b3787f6Schristos {
4152b3787f6Schristos 	base->tv_cache.tv_sec = 0;
4162b3787f6Schristos 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
4172b3787f6Schristos 	    gettime(base, &base->tv_cache);
4182b3787f6Schristos }
4192b3787f6Schristos 
4202b3787f6Schristos int
event_base_update_cache_time(struct event_base * base)4212b3787f6Schristos event_base_update_cache_time(struct event_base *base)
4222b3787f6Schristos {
4232b3787f6Schristos 
4242b3787f6Schristos 	if (!base) {
4252b3787f6Schristos 		base = current_base;
4262b3787f6Schristos 		if (!current_base)
4272b3787f6Schristos 			return -1;
4282b3787f6Schristos 	}
4292b3787f6Schristos 
4302b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
4311b6f2cd4Schristos 	if (base->running_loop)
4322b3787f6Schristos 		update_time_cache(base);
4332b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
4342b3787f6Schristos 	return 0;
4352b3787f6Schristos }
4362b3787f6Schristos 
4372b3787f6Schristos static inline struct event *
event_callback_to_event(struct event_callback * evcb)4382b3787f6Schristos event_callback_to_event(struct event_callback *evcb)
4392b3787f6Schristos {
4402b3787f6Schristos 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
4412b3787f6Schristos 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
4422b3787f6Schristos }
4432b3787f6Schristos 
4442b3787f6Schristos static inline struct event_callback *
event_to_event_callback(struct event * ev)4452b3787f6Schristos event_to_event_callback(struct event *ev)
4462b3787f6Schristos {
4472b3787f6Schristos 	return &ev->ev_evcallback;
4482b3787f6Schristos }
4492b3787f6Schristos 
4502b3787f6Schristos struct event_base *
event_init(void)4512b3787f6Schristos event_init(void)
4522b3787f6Schristos {
4532b3787f6Schristos 	struct event_base *base = event_base_new_with_config(NULL);
4542b3787f6Schristos 
4552b3787f6Schristos 	if (base == NULL) {
4562b3787f6Schristos 		event_errx(1, "%s: Unable to construct event_base", __func__);
4572b3787f6Schristos 		return NULL;
4582b3787f6Schristos 	}
4592b3787f6Schristos 
4602b3787f6Schristos 	current_base = base;
4612b3787f6Schristos 
4622b3787f6Schristos 	return (base);
4632b3787f6Schristos }
4642b3787f6Schristos 
4652b3787f6Schristos struct event_base *
event_base_new(void)4662b3787f6Schristos event_base_new(void)
4672b3787f6Schristos {
4682b3787f6Schristos 	struct event_base *base = NULL;
4692b3787f6Schristos 	struct event_config *cfg = event_config_new();
4702b3787f6Schristos 	if (cfg) {
4712b3787f6Schristos 		base = event_base_new_with_config(cfg);
4722b3787f6Schristos 		event_config_free(cfg);
4732b3787f6Schristos 	}
4742b3787f6Schristos 	return base;
4752b3787f6Schristos }
4762b3787f6Schristos 
4772b3787f6Schristos /** Return true iff 'method' is the name of a method that 'cfg' tells us to
4782b3787f6Schristos  * avoid. */
4792b3787f6Schristos static int
event_config_is_avoided_method(const struct event_config * cfg,const char * method)4802b3787f6Schristos event_config_is_avoided_method(const struct event_config *cfg,
4812b3787f6Schristos     const char *method)
4822b3787f6Schristos {
4832b3787f6Schristos 	struct event_config_entry *entry;
4842b3787f6Schristos 
4852b3787f6Schristos 	TAILQ_FOREACH(entry, &cfg->entries, next) {
4862b3787f6Schristos 		if (entry->avoid_method != NULL &&
4872b3787f6Schristos 		    strcmp(entry->avoid_method, method) == 0)
4882b3787f6Schristos 			return (1);
4892b3787f6Schristos 	}
4902b3787f6Schristos 
4912b3787f6Schristos 	return (0);
4922b3787f6Schristos }
4932b3787f6Schristos 
4942b3787f6Schristos /** Return true iff 'method' is disabled according to the environment. */
4952b3787f6Schristos static int
event_is_method_disabled(const char * name)4962b3787f6Schristos event_is_method_disabled(const char *name)
4972b3787f6Schristos {
4982b3787f6Schristos 	char environment[64];
4992b3787f6Schristos 	int i;
5002b3787f6Schristos 
5012b3787f6Schristos 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
5022b3787f6Schristos 	for (i = 8; environment[i] != '\0'; ++i)
5032b3787f6Schristos 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
5042b3787f6Schristos 	/* Note that evutil_getenv_() ignores the environment entirely if
5052b3787f6Schristos 	 * we're setuid */
5062b3787f6Schristos 	return (evutil_getenv_(environment) != NULL);
5072b3787f6Schristos }
5082b3787f6Schristos 
5092b3787f6Schristos int
event_base_get_features(const struct event_base * base)5102b3787f6Schristos event_base_get_features(const struct event_base *base)
5112b3787f6Schristos {
5122b3787f6Schristos 	return base->evsel->features;
5132b3787f6Schristos }
5142b3787f6Schristos 
5152b3787f6Schristos void
event_enable_debug_mode(void)5162b3787f6Schristos event_enable_debug_mode(void)
5172b3787f6Schristos {
5182b3787f6Schristos #ifndef EVENT__DISABLE_DEBUG_MODE
5192b3787f6Schristos 	if (event_debug_mode_on_)
5202b3787f6Schristos 		event_errx(1, "%s was called twice!", __func__);
5212b3787f6Schristos 	if (event_debug_mode_too_late)
5222b3787f6Schristos 		event_errx(1, "%s must be called *before* creating any events "
5232b3787f6Schristos 		    "or event_bases",__func__);
5242b3787f6Schristos 
5252b3787f6Schristos 	event_debug_mode_on_ = 1;
5262b3787f6Schristos 
5272b3787f6Schristos 	HT_INIT(event_debug_map, &global_debug_map);
5282b3787f6Schristos #endif
5292b3787f6Schristos }
5302b3787f6Schristos 
5312b3787f6Schristos void
event_disable_debug_mode(void)5322b3787f6Schristos event_disable_debug_mode(void)
5332b3787f6Schristos {
53450cc4415Schristos #ifndef EVENT__DISABLE_DEBUG_MODE
5352b3787f6Schristos 	struct event_debug_entry **ent, *victim;
5362b3787f6Schristos 
5372b3787f6Schristos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
5382b3787f6Schristos 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
5392b3787f6Schristos 		victim = *ent;
5402b3787f6Schristos 		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
5412b3787f6Schristos 		mm_free(victim);
5422b3787f6Schristos 	}
5432b3787f6Schristos 	HT_CLEAR(event_debug_map, &global_debug_map);
5442b3787f6Schristos 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
54550cc4415Schristos 
54650cc4415Schristos 	event_debug_mode_on_  = 0;
5472b3787f6Schristos #endif
54850cc4415Schristos }
5492b3787f6Schristos 
5502b3787f6Schristos struct event_base *
event_base_new_with_config(const struct event_config * cfg)5512b3787f6Schristos event_base_new_with_config(const struct event_config *cfg)
5522b3787f6Schristos {
5532b3787f6Schristos 	int i;
5542b3787f6Schristos 	struct event_base *base;
5552b3787f6Schristos 	int should_check_environment;
5562b3787f6Schristos 
5572b3787f6Schristos #ifndef EVENT__DISABLE_DEBUG_MODE
5582b3787f6Schristos 	event_debug_mode_too_late = 1;
5592b3787f6Schristos #endif
5602b3787f6Schristos 
5612b3787f6Schristos 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
5622b3787f6Schristos 		event_warn("%s: calloc", __func__);
5632b3787f6Schristos 		return NULL;
5642b3787f6Schristos 	}
5652b3787f6Schristos 
5662b3787f6Schristos 	if (cfg)
5672b3787f6Schristos 		base->flags = cfg->flags;
5682b3787f6Schristos 
5692b3787f6Schristos 	should_check_environment =
5702b3787f6Schristos 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
5712b3787f6Schristos 
5722b3787f6Schristos 	{
5732b3787f6Schristos 		struct timeval tmp;
5742b3787f6Schristos 		int precise_time =
5752b3787f6Schristos 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
5762b3787f6Schristos 		int flags;
5772b3787f6Schristos 		if (should_check_environment && !precise_time) {
5782b3787f6Schristos 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
5792b3787f6Schristos 			base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
5802b3787f6Schristos 		}
5812b3787f6Schristos 		flags = precise_time ? EV_MONOT_PRECISE : 0;
5822b3787f6Schristos 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
5832b3787f6Schristos 
5842b3787f6Schristos 		gettime(base, &tmp);
5852b3787f6Schristos 	}
5862b3787f6Schristos 
5872b3787f6Schristos 	min_heap_ctor_(&base->timeheap);
5882b3787f6Schristos 
5892b3787f6Schristos 	base->sig.ev_signal_pair[0] = -1;
5902b3787f6Schristos 	base->sig.ev_signal_pair[1] = -1;
5912b3787f6Schristos 	base->th_notify_fd[0] = -1;
5922b3787f6Schristos 	base->th_notify_fd[1] = -1;
5932b3787f6Schristos 
5942b3787f6Schristos 	TAILQ_INIT(&base->active_later_queue);
5952b3787f6Schristos 
5962b3787f6Schristos 	evmap_io_initmap_(&base->io);
5972b3787f6Schristos 	evmap_signal_initmap_(&base->sigmap);
5982b3787f6Schristos 	event_changelist_init_(&base->changelist);
5992b3787f6Schristos 
6002b3787f6Schristos 	base->evbase = NULL;
6012b3787f6Schristos 
6022b3787f6Schristos 	if (cfg) {
6032b3787f6Schristos 		memcpy(&base->max_dispatch_time,
6042b3787f6Schristos 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
6052b3787f6Schristos 		base->limit_callbacks_after_prio =
6062b3787f6Schristos 		    cfg->limit_callbacks_after_prio;
6072b3787f6Schristos 	} else {
6082b3787f6Schristos 		base->max_dispatch_time.tv_sec = -1;
6092b3787f6Schristos 		base->limit_callbacks_after_prio = 1;
6102b3787f6Schristos 	}
6112b3787f6Schristos 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
6122b3787f6Schristos 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
6132b3787f6Schristos 	} else {
6142b3787f6Schristos 		base->max_dispatch_callbacks = INT_MAX;
6152b3787f6Schristos 	}
6162b3787f6Schristos 	if (base->max_dispatch_callbacks == INT_MAX &&
6172b3787f6Schristos 	    base->max_dispatch_time.tv_sec == -1)
6182b3787f6Schristos 		base->limit_callbacks_after_prio = INT_MAX;
6192b3787f6Schristos 
6202b3787f6Schristos 	for (i = 0; eventops[i] && !base->evbase; i++) {
6212b3787f6Schristos 		if (cfg != NULL) {
6222b3787f6Schristos 			/* determine if this backend should be avoided */
6232b3787f6Schristos 			if (event_config_is_avoided_method(cfg,
6242b3787f6Schristos 				eventops[i]->name))
6252b3787f6Schristos 				continue;
6262b3787f6Schristos 			if ((eventops[i]->features & cfg->require_features)
6272b3787f6Schristos 			    != cfg->require_features)
6282b3787f6Schristos 				continue;
6292b3787f6Schristos 		}
6302b3787f6Schristos 
6312b3787f6Schristos 		/* also obey the environment variables */
6322b3787f6Schristos 		if (should_check_environment &&
6332b3787f6Schristos 		    event_is_method_disabled(eventops[i]->name))
6342b3787f6Schristos 			continue;
6352b3787f6Schristos 
6362b3787f6Schristos 		base->evsel = eventops[i];
6372b3787f6Schristos 
6382b3787f6Schristos 		base->evbase = base->evsel->init(base);
6392b3787f6Schristos 	}
6402b3787f6Schristos 
6412b3787f6Schristos 	if (base->evbase == NULL) {
6422b3787f6Schristos 		event_warnx("%s: no event mechanism available",
6432b3787f6Schristos 		    __func__);
6442b3787f6Schristos 		base->evsel = NULL;
6452b3787f6Schristos 		event_base_free(base);
6462b3787f6Schristos 		return NULL;
6472b3787f6Schristos 	}
6482b3787f6Schristos 
6492b3787f6Schristos 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
6502b3787f6Schristos 		event_msgx("libevent using: %s", base->evsel->name);
6512b3787f6Schristos 
6522b3787f6Schristos 	/* allocate a single active event queue */
6532b3787f6Schristos 	if (event_base_priority_init(base, 1) < 0) {
6542b3787f6Schristos 		event_base_free(base);
6552b3787f6Schristos 		return NULL;
6562b3787f6Schristos 	}
6572b3787f6Schristos 
6582b3787f6Schristos 	/* prepare for threading */
6592b3787f6Schristos 
6602b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
6612b3787f6Schristos 	if (EVTHREAD_LOCKING_ENABLED() &&
6622b3787f6Schristos 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
6632b3787f6Schristos 		int r;
6642b3787f6Schristos 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
6652b3787f6Schristos 		EVTHREAD_ALLOC_COND(base->current_event_cond);
6662b3787f6Schristos 		r = evthread_make_base_notifiable(base);
6672b3787f6Schristos 		if (r<0) {
6682b3787f6Schristos 			event_warnx("%s: Unable to make base notifiable.", __func__);
6692b3787f6Schristos 			event_base_free(base);
6702b3787f6Schristos 			return NULL;
6712b3787f6Schristos 		}
6722b3787f6Schristos 	}
6732b3787f6Schristos #endif
6742b3787f6Schristos 
6752b3787f6Schristos #ifdef _WIN32
6762b3787f6Schristos 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
6772b3787f6Schristos 		event_base_start_iocp_(base, cfg->n_cpus_hint);
6782b3787f6Schristos #endif
6792b3787f6Schristos 
6802b3787f6Schristos 	return (base);
6812b3787f6Schristos }
6822b3787f6Schristos 
6832b3787f6Schristos int
event_base_start_iocp_(struct event_base * base,int n_cpus)6842b3787f6Schristos event_base_start_iocp_(struct event_base *base, int n_cpus)
6852b3787f6Schristos {
6862b3787f6Schristos #ifdef _WIN32
6872b3787f6Schristos 	if (base->iocp)
6882b3787f6Schristos 		return 0;
6892b3787f6Schristos 	base->iocp = event_iocp_port_launch_(n_cpus);
6902b3787f6Schristos 	if (!base->iocp) {
6912b3787f6Schristos 		event_warnx("%s: Couldn't launch IOCP", __func__);
6922b3787f6Schristos 		return -1;
6932b3787f6Schristos 	}
6942b3787f6Schristos 	return 0;
6952b3787f6Schristos #else
6962b3787f6Schristos 	return -1;
6972b3787f6Schristos #endif
6982b3787f6Schristos }
6992b3787f6Schristos 
7002b3787f6Schristos void
event_base_stop_iocp_(struct event_base * base)7012b3787f6Schristos event_base_stop_iocp_(struct event_base *base)
7022b3787f6Schristos {
7032b3787f6Schristos #ifdef _WIN32
7042b3787f6Schristos 	int rv;
7052b3787f6Schristos 
7062b3787f6Schristos 	if (!base->iocp)
7072b3787f6Schristos 		return;
7082b3787f6Schristos 	rv = event_iocp_shutdown_(base->iocp, -1);
7092b3787f6Schristos 	EVUTIL_ASSERT(rv >= 0);
7102b3787f6Schristos 	base->iocp = NULL;
7112b3787f6Schristos #endif
7122b3787f6Schristos }
7132b3787f6Schristos 
7141b6f2cd4Schristos static int
event_base_cancel_single_callback_(struct event_base * base,struct event_callback * evcb,int run_finalizers)7151b6f2cd4Schristos event_base_cancel_single_callback_(struct event_base *base,
7161b6f2cd4Schristos     struct event_callback *evcb,
7171b6f2cd4Schristos     int run_finalizers)
7181b6f2cd4Schristos {
7191b6f2cd4Schristos 	int result = 0;
7201b6f2cd4Schristos 
7211b6f2cd4Schristos 	if (evcb->evcb_flags & EVLIST_INIT) {
7221b6f2cd4Schristos 		struct event *ev = event_callback_to_event(evcb);
7231b6f2cd4Schristos 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
7241b6f2cd4Schristos 			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
7251b6f2cd4Schristos 			result = 1;
7261b6f2cd4Schristos 		}
7271b6f2cd4Schristos 	} else {
7281b6f2cd4Schristos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
7291b6f2cd4Schristos 		event_callback_cancel_nolock_(base, evcb, 1);
7301b6f2cd4Schristos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
7311b6f2cd4Schristos 		result = 1;
7321b6f2cd4Schristos 	}
7331b6f2cd4Schristos 
7341b6f2cd4Schristos 	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
7351b6f2cd4Schristos 		switch (evcb->evcb_closure) {
7361b6f2cd4Schristos 		case EV_CLOSURE_EVENT_FINALIZE:
7371b6f2cd4Schristos 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
7381b6f2cd4Schristos 			struct event *ev = event_callback_to_event(evcb);
7391b6f2cd4Schristos 			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
7401b6f2cd4Schristos 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
7411b6f2cd4Schristos 				mm_free(ev);
7421b6f2cd4Schristos 			break;
7431b6f2cd4Schristos 		}
7441b6f2cd4Schristos 		case EV_CLOSURE_CB_FINALIZE:
7451b6f2cd4Schristos 			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
7461b6f2cd4Schristos 			break;
7471b6f2cd4Schristos 		default:
7481b6f2cd4Schristos 			break;
7491b6f2cd4Schristos 		}
7501b6f2cd4Schristos 	}
7511b6f2cd4Schristos 	return result;
7521b6f2cd4Schristos }
7531b6f2cd4Schristos 
7541b6f2cd4Schristos static void
event_base_free_(struct event_base * base,int run_finalizers)7551b6f2cd4Schristos event_base_free_(struct event_base *base, int run_finalizers)
7562b3787f6Schristos {
7572b3787f6Schristos 	int i, n_deleted=0;
7582b3787f6Schristos 	struct event *ev;
7592b3787f6Schristos 	/* XXXX grab the lock? If there is contention when one thread frees
7602b3787f6Schristos 	 * the base, then the contending thread will be very sad soon. */
7612b3787f6Schristos 
7622b3787f6Schristos 	/* event_base_free(NULL) is how to free the current_base if we
7632b3787f6Schristos 	 * made it with event_init and forgot to hold a reference to it. */
7642b3787f6Schristos 	if (base == NULL && current_base)
7652b3787f6Schristos 		base = current_base;
7662b3787f6Schristos 	/* Don't actually free NULL. */
7672b3787f6Schristos 	if (base == NULL) {
7682b3787f6Schristos 		event_warnx("%s: no base to free", __func__);
7692b3787f6Schristos 		return;
7702b3787f6Schristos 	}
7712b3787f6Schristos 	/* XXX(niels) - check for internal events first */
7722b3787f6Schristos 
7732b3787f6Schristos #ifdef _WIN32
7742b3787f6Schristos 	event_base_stop_iocp_(base);
7752b3787f6Schristos #endif
7762b3787f6Schristos 
7772b3787f6Schristos 	/* threading fds if we have them */
7782b3787f6Schristos 	if (base->th_notify_fd[0] != -1) {
7792b3787f6Schristos 		event_del(&base->th_notify);
7802b3787f6Schristos 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
7812b3787f6Schristos 		if (base->th_notify_fd[1] != -1)
7822b3787f6Schristos 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
7832b3787f6Schristos 		base->th_notify_fd[0] = -1;
7842b3787f6Schristos 		base->th_notify_fd[1] = -1;
7852b3787f6Schristos 		event_debug_unassign(&base->th_notify);
7862b3787f6Schristos 	}
7872b3787f6Schristos 
7882b3787f6Schristos 	/* Delete all non-internal events. */
7892b3787f6Schristos 	evmap_delete_all_(base);
7902b3787f6Schristos 
7912b3787f6Schristos 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
7922b3787f6Schristos 		event_del(ev);
7932b3787f6Schristos 		++n_deleted;
7942b3787f6Schristos 	}
7952b3787f6Schristos 	for (i = 0; i < base->n_common_timeouts; ++i) {
7962b3787f6Schristos 		struct common_timeout_list *ctl =
7972b3787f6Schristos 		    base->common_timeout_queues[i];
7982b3787f6Schristos 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
7992b3787f6Schristos 		event_debug_unassign(&ctl->timeout_event);
8002b3787f6Schristos 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
8012b3787f6Schristos 			struct event *next = TAILQ_NEXT(ev,
8022b3787f6Schristos 			    ev_timeout_pos.ev_next_with_common_timeout);
8032b3787f6Schristos 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
8042b3787f6Schristos 				event_del(ev);
8052b3787f6Schristos 				++n_deleted;
8062b3787f6Schristos 			}
8072b3787f6Schristos 			ev = next;
8082b3787f6Schristos 		}
8092b3787f6Schristos 		mm_free(ctl);
8102b3787f6Schristos 	}
8112b3787f6Schristos 	if (base->common_timeout_queues)
8122b3787f6Schristos 		mm_free(base->common_timeout_queues);
8132b3787f6Schristos 
8142b3787f6Schristos 	for (i = 0; i < base->nactivequeues; ++i) {
8152b3787f6Schristos 		struct event_callback *evcb, *next;
8162b3787f6Schristos 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
8172b3787f6Schristos 			next = TAILQ_NEXT(evcb, evcb_active_next);
8181b6f2cd4Schristos 			n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
8192b3787f6Schristos 			evcb = next;
8202b3787f6Schristos 		}
8212b3787f6Schristos 	}
8222b3787f6Schristos 	{
8232b3787f6Schristos 		struct event_callback *evcb;
8242b3787f6Schristos 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
8251b6f2cd4Schristos 			n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
8262b3787f6Schristos 		}
8272b3787f6Schristos 	}
8282b3787f6Schristos 
8292b3787f6Schristos 
8302b3787f6Schristos 	if (n_deleted)
8312b3787f6Schristos 		event_debug(("%s: %d events were still set in base",
8322b3787f6Schristos 			__func__, n_deleted));
8332b3787f6Schristos 
8342b3787f6Schristos 	while (LIST_FIRST(&base->once_events)) {
8352b3787f6Schristos 		struct event_once *eonce = LIST_FIRST(&base->once_events);
8362b3787f6Schristos 		LIST_REMOVE(eonce, next_once);
8372b3787f6Schristos 		mm_free(eonce);
8382b3787f6Schristos 	}
8392b3787f6Schristos 
8402b3787f6Schristos 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
8412b3787f6Schristos 		base->evsel->dealloc(base);
8422b3787f6Schristos 
8432b3787f6Schristos 	for (i = 0; i < base->nactivequeues; ++i)
8442b3787f6Schristos 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
8452b3787f6Schristos 
8462b3787f6Schristos 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
8472b3787f6Schristos 	min_heap_dtor_(&base->timeheap);
8482b3787f6Schristos 
8492b3787f6Schristos 	mm_free(base->activequeues);
8502b3787f6Schristos 
8512b3787f6Schristos 	evmap_io_clear_(&base->io);
8522b3787f6Schristos 	evmap_signal_clear_(&base->sigmap);
8532b3787f6Schristos 	event_changelist_freemem_(&base->changelist);
8542b3787f6Schristos 
8552b3787f6Schristos 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
8562b3787f6Schristos 	EVTHREAD_FREE_COND(base->current_event_cond);
8572b3787f6Schristos 
8581b6f2cd4Schristos 	/* If we're freeing current_base, there won't be a current_base. */
8591b6f2cd4Schristos 	if (base == current_base)
8601b6f2cd4Schristos 		current_base = NULL;
8612b3787f6Schristos 	mm_free(base);
8622b3787f6Schristos }
8632b3787f6Schristos 
8641b6f2cd4Schristos void
event_base_free_nofinalize(struct event_base * base)8651b6f2cd4Schristos event_base_free_nofinalize(struct event_base *base)
8661b6f2cd4Schristos {
8671b6f2cd4Schristos 	event_base_free_(base, 0);
8681b6f2cd4Schristos }
8691b6f2cd4Schristos 
8701b6f2cd4Schristos void
event_base_free(struct event_base * base)8711b6f2cd4Schristos event_base_free(struct event_base *base)
8721b6f2cd4Schristos {
8731b6f2cd4Schristos 	event_base_free_(base, 1);
8741b6f2cd4Schristos }
8751b6f2cd4Schristos 
8762b3787f6Schristos /* Fake eventop; used to disable the backend temporarily inside event_reinit
8772b3787f6Schristos  * so that we can call event_del() on an event without telling the backend.
8782b3787f6Schristos  */
8792b3787f6Schristos static int
nil_backend_del(struct event_base * b,evutil_socket_t fd,short old,short events,void * fdinfo)8802b3787f6Schristos nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
8812b3787f6Schristos     short events, void *fdinfo)
8822b3787f6Schristos {
8832b3787f6Schristos 	return 0;
8842b3787f6Schristos }
8852b3787f6Schristos const struct eventop nil_eventop = {
8862b3787f6Schristos 	"nil",
8872b3787f6Schristos 	NULL, /* init: unused. */
8882b3787f6Schristos 	NULL, /* add: unused. */
8892b3787f6Schristos 	nil_backend_del, /* del: used, so needs to be killed. */
8902b3787f6Schristos 	NULL, /* dispatch: unused. */
8912b3787f6Schristos 	NULL, /* dealloc: unused. */
8922b3787f6Schristos 	0, 0, 0
8932b3787f6Schristos };
8942b3787f6Schristos 
8952b3787f6Schristos /* reinitialize the event base after a fork */
8962b3787f6Schristos int
event_reinit(struct event_base * base)8972b3787f6Schristos event_reinit(struct event_base *base)
8982b3787f6Schristos {
8992b3787f6Schristos 	const struct eventop *evsel;
9002b3787f6Schristos 	int res = 0;
9012b3787f6Schristos 	int was_notifiable = 0;
9022b3787f6Schristos 	int had_signal_added = 0;
9032b3787f6Schristos 
9042b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
9052b3787f6Schristos 
9062b3787f6Schristos 	evsel = base->evsel;
9072b3787f6Schristos 
9082b3787f6Schristos 	/* check if this event mechanism requires reinit on the backend */
9092b3787f6Schristos 	if (evsel->need_reinit) {
9102b3787f6Schristos 		/* We're going to call event_del() on our notify events (the
9112b3787f6Schristos 		 * ones that tell about signals and wakeup events).  But we
9122b3787f6Schristos 		 * don't actually want to tell the backend to change its
9132b3787f6Schristos 		 * state, since it might still share some resource (a kqueue,
9142b3787f6Schristos 		 * an epoll fd) with the parent process, and we don't want to
9152b3787f6Schristos 		 * delete the fds from _that_ backend, we temporarily stub out
9162b3787f6Schristos 		 * the evsel with a replacement.
9172b3787f6Schristos 		 */
9182b3787f6Schristos 		base->evsel = &nil_eventop;
9192b3787f6Schristos 	}
9202b3787f6Schristos 
9212b3787f6Schristos 	/* We need to re-create a new signal-notification fd and a new
9222b3787f6Schristos 	 * thread-notification fd.  Otherwise, we'll still share those with
9232b3787f6Schristos 	 * the parent process, which would make any notification sent to them
9242b3787f6Schristos 	 * get received by one or both of the event loops, more or less at
9252b3787f6Schristos 	 * random.
9262b3787f6Schristos 	 */
9272b3787f6Schristos 	if (base->sig.ev_signal_added) {
9281b6f2cd4Schristos 		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
9292b3787f6Schristos 		event_debug_unassign(&base->sig.ev_signal);
9302b3787f6Schristos 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
9312b3787f6Schristos 		if (base->sig.ev_signal_pair[0] != -1)
9322b3787f6Schristos 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
9332b3787f6Schristos 		if (base->sig.ev_signal_pair[1] != -1)
9342b3787f6Schristos 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
9352b3787f6Schristos 		had_signal_added = 1;
9362b3787f6Schristos 		base->sig.ev_signal_added = 0;
9372b3787f6Schristos 	}
9382b3787f6Schristos 	if (base->th_notify_fn != NULL) {
9392b3787f6Schristos 		was_notifiable = 1;
9402b3787f6Schristos 		base->th_notify_fn = NULL;
9412b3787f6Schristos 	}
9422b3787f6Schristos 	if (base->th_notify_fd[0] != -1) {
9431b6f2cd4Schristos 		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
9442b3787f6Schristos 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
9452b3787f6Schristos 		if (base->th_notify_fd[1] != -1)
9462b3787f6Schristos 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
9472b3787f6Schristos 		base->th_notify_fd[0] = -1;
9482b3787f6Schristos 		base->th_notify_fd[1] = -1;
9492b3787f6Schristos 		event_debug_unassign(&base->th_notify);
9502b3787f6Schristos 	}
9512b3787f6Schristos 
9522b3787f6Schristos 	/* Replace the original evsel. */
9532b3787f6Schristos         base->evsel = evsel;
9542b3787f6Schristos 
9552b3787f6Schristos 	if (evsel->need_reinit) {
9562b3787f6Schristos 		/* Reconstruct the backend through brute-force, so that we do
9572b3787f6Schristos 		 * not share any structures with the parent process. For some
9582b3787f6Schristos 		 * backends, this is necessary: epoll and kqueue, for
9592b3787f6Schristos 		 * instance, have events associated with a kernel
9602b3787f6Schristos 		 * structure. If didn't reinitialize, we'd share that
9612b3787f6Schristos 		 * structure with the parent process, and any changes made by
9622b3787f6Schristos 		 * the parent would affect our backend's behavior (and vice
9632b3787f6Schristos 		 * versa).
9642b3787f6Schristos 		 */
9652b3787f6Schristos 		if (base->evsel->dealloc != NULL)
9662b3787f6Schristos 			base->evsel->dealloc(base);
9672b3787f6Schristos 		base->evbase = evsel->init(base);
9682b3787f6Schristos 		if (base->evbase == NULL) {
9692b3787f6Schristos 			event_errx(1,
9702b3787f6Schristos 			   "%s: could not reinitialize event mechanism",
9712b3787f6Schristos 			   __func__);
9722b3787f6Schristos 			res = -1;
9732b3787f6Schristos 			goto done;
9742b3787f6Schristos 		}
9752b3787f6Schristos 
9762b3787f6Schristos 		/* Empty out the changelist (if any): we are starting from a
9772b3787f6Schristos 		 * blank slate. */
9782b3787f6Schristos 		event_changelist_freemem_(&base->changelist);
9792b3787f6Schristos 
9802b3787f6Schristos 		/* Tell the event maps to re-inform the backend about all
9812b3787f6Schristos 		 * pending events. This will make the signal notification
9822b3787f6Schristos 		 * event get re-created if necessary. */
9832b3787f6Schristos 		if (evmap_reinit_(base) < 0)
9842b3787f6Schristos 			res = -1;
9852b3787f6Schristos 	} else {
9862b3787f6Schristos 		if (had_signal_added)
9872b3787f6Schristos 			res = evsig_init_(base);
9882b3787f6Schristos 	}
9892b3787f6Schristos 
9902b3787f6Schristos 	/* If we were notifiable before, and nothing just exploded, become
9912b3787f6Schristos 	 * notifiable again. */
9922b3787f6Schristos 	if (was_notifiable && res == 0)
9932b3787f6Schristos 		res = evthread_make_base_notifiable_nolock_(base);
9942b3787f6Schristos 
9952b3787f6Schristos done:
9962b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
9972b3787f6Schristos 	return (res);
9982b3787f6Schristos }
9992b3787f6Schristos 
100050cc4415Schristos /* Get the monotonic time for this event_base' timer */
100150cc4415Schristos int
event_gettime_monotonic(struct event_base * base,struct timeval * tv)100250cc4415Schristos event_gettime_monotonic(struct event_base *base, struct timeval *tv)
100350cc4415Schristos {
100450cc4415Schristos   int rv = -1;
100550cc4415Schristos 
100650cc4415Schristos   if (base && tv) {
100750cc4415Schristos     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
100850cc4415Schristos     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
100950cc4415Schristos     EVBASE_RELEASE_LOCK(base, th_base_lock);
101050cc4415Schristos   }
101150cc4415Schristos 
101250cc4415Schristos   return rv;
101350cc4415Schristos }
101450cc4415Schristos 
10152b3787f6Schristos const char **
event_get_supported_methods(void)10162b3787f6Schristos event_get_supported_methods(void)
10172b3787f6Schristos {
10182b3787f6Schristos 	static const char **methods = NULL;
10192b3787f6Schristos 	const struct eventop **method;
10202b3787f6Schristos 	const char **tmp;
10212b3787f6Schristos 	int i = 0, k;
10222b3787f6Schristos 
10232b3787f6Schristos 	/* count all methods */
10242b3787f6Schristos 	for (method = &eventops[0]; *method != NULL; ++method) {
10252b3787f6Schristos 		++i;
10262b3787f6Schristos 	}
10272b3787f6Schristos 
10282b3787f6Schristos 	/* allocate one more than we need for the NULL pointer */
10292b3787f6Schristos 	tmp = mm_calloc((i + 1), sizeof(char *));
10302b3787f6Schristos 	if (tmp == NULL)
10312b3787f6Schristos 		return (NULL);
10322b3787f6Schristos 
10332b3787f6Schristos 	/* populate the array with the supported methods */
10342b3787f6Schristos 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
10352b3787f6Schristos 		tmp[i++] = eventops[k]->name;
10362b3787f6Schristos 	}
10372b3787f6Schristos 	tmp[i] = NULL;
10382b3787f6Schristos 
10392b3787f6Schristos 	if (methods != NULL)
10402b3787f6Schristos 		mm_free((char**)methods);
10412b3787f6Schristos 
10422b3787f6Schristos 	methods = tmp;
10432b3787f6Schristos 
10442b3787f6Schristos 	return (methods);
10452b3787f6Schristos }
10462b3787f6Schristos 
10472b3787f6Schristos struct event_config *
event_config_new(void)10482b3787f6Schristos event_config_new(void)
10492b3787f6Schristos {
10502b3787f6Schristos 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
10512b3787f6Schristos 
10522b3787f6Schristos 	if (cfg == NULL)
10532b3787f6Schristos 		return (NULL);
10542b3787f6Schristos 
10552b3787f6Schristos 	TAILQ_INIT(&cfg->entries);
10562b3787f6Schristos 	cfg->max_dispatch_interval.tv_sec = -1;
10572b3787f6Schristos 	cfg->max_dispatch_callbacks = INT_MAX;
10582b3787f6Schristos 	cfg->limit_callbacks_after_prio = 1;
10592b3787f6Schristos 
10602b3787f6Schristos 	return (cfg);
10612b3787f6Schristos }
10622b3787f6Schristos 
10632b3787f6Schristos static void
event_config_entry_free(struct event_config_entry * entry)10642b3787f6Schristos event_config_entry_free(struct event_config_entry *entry)
10652b3787f6Schristos {
10662b3787f6Schristos 	if (entry->avoid_method != NULL)
10672b3787f6Schristos 		mm_free((char *)entry->avoid_method);
10682b3787f6Schristos 	mm_free(entry);
10692b3787f6Schristos }
10702b3787f6Schristos 
10712b3787f6Schristos void
event_config_free(struct event_config * cfg)10722b3787f6Schristos event_config_free(struct event_config *cfg)
10732b3787f6Schristos {
10742b3787f6Schristos 	struct event_config_entry *entry;
10752b3787f6Schristos 
10762b3787f6Schristos 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
10772b3787f6Schristos 		TAILQ_REMOVE(&cfg->entries, entry, next);
10782b3787f6Schristos 		event_config_entry_free(entry);
10792b3787f6Schristos 	}
10802b3787f6Schristos 	mm_free(cfg);
10812b3787f6Schristos }
10822b3787f6Schristos 
10832b3787f6Schristos int
event_config_set_flag(struct event_config * cfg,int flag)10842b3787f6Schristos event_config_set_flag(struct event_config *cfg, int flag)
10852b3787f6Schristos {
10862b3787f6Schristos 	if (!cfg)
10872b3787f6Schristos 		return -1;
10882b3787f6Schristos 	cfg->flags |= flag;
10892b3787f6Schristos 	return 0;
10902b3787f6Schristos }
10912b3787f6Schristos 
10922b3787f6Schristos int
event_config_avoid_method(struct event_config * cfg,const char * method)10932b3787f6Schristos event_config_avoid_method(struct event_config *cfg, const char *method)
10942b3787f6Schristos {
10952b3787f6Schristos 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
10962b3787f6Schristos 	if (entry == NULL)
10972b3787f6Schristos 		return (-1);
10982b3787f6Schristos 
10992b3787f6Schristos 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
11002b3787f6Schristos 		mm_free(entry);
11012b3787f6Schristos 		return (-1);
11022b3787f6Schristos 	}
11032b3787f6Schristos 
11042b3787f6Schristos 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
11052b3787f6Schristos 
11062b3787f6Schristos 	return (0);
11072b3787f6Schristos }
11082b3787f6Schristos 
11092b3787f6Schristos int
event_config_require_features(struct event_config * cfg,int features)11102b3787f6Schristos event_config_require_features(struct event_config *cfg,
11112b3787f6Schristos     int features)
11122b3787f6Schristos {
11132b3787f6Schristos 	if (!cfg)
11142b3787f6Schristos 		return (-1);
11152b3787f6Schristos 	cfg->require_features = features;
11162b3787f6Schristos 	return (0);
11172b3787f6Schristos }
11182b3787f6Schristos 
11192b3787f6Schristos int
event_config_set_num_cpus_hint(struct event_config * cfg,int cpus)11202b3787f6Schristos event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
11212b3787f6Schristos {
11222b3787f6Schristos 	if (!cfg)
11232b3787f6Schristos 		return (-1);
11242b3787f6Schristos 	cfg->n_cpus_hint = cpus;
11252b3787f6Schristos 	return (0);
11262b3787f6Schristos }
11272b3787f6Schristos 
11282b3787f6Schristos int
event_config_set_max_dispatch_interval(struct event_config * cfg,const struct timeval * max_interval,int max_callbacks,int min_priority)11292b3787f6Schristos event_config_set_max_dispatch_interval(struct event_config *cfg,
11302b3787f6Schristos     const struct timeval *max_interval, int max_callbacks, int min_priority)
11312b3787f6Schristos {
11322b3787f6Schristos 	if (max_interval)
11332b3787f6Schristos 		memcpy(&cfg->max_dispatch_interval, max_interval,
11342b3787f6Schristos 		    sizeof(struct timeval));
11352b3787f6Schristos 	else
11362b3787f6Schristos 		cfg->max_dispatch_interval.tv_sec = -1;
11372b3787f6Schristos 	cfg->max_dispatch_callbacks =
11382b3787f6Schristos 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
11392b3787f6Schristos 	if (min_priority < 0)
11402b3787f6Schristos 		min_priority = 0;
11412b3787f6Schristos 	cfg->limit_callbacks_after_prio = min_priority;
11422b3787f6Schristos 	return (0);
11432b3787f6Schristos }
11442b3787f6Schristos 
11452b3787f6Schristos int
event_priority_init(int npriorities)11462b3787f6Schristos event_priority_init(int npriorities)
11472b3787f6Schristos {
11482b3787f6Schristos 	return event_base_priority_init(current_base, npriorities);
11492b3787f6Schristos }
11502b3787f6Schristos 
11512b3787f6Schristos int
event_base_priority_init(struct event_base * base,int npriorities)11522b3787f6Schristos event_base_priority_init(struct event_base *base, int npriorities)
11532b3787f6Schristos {
11542b3787f6Schristos 	int i, r;
11552b3787f6Schristos 	r = -1;
11562b3787f6Schristos 
11572b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
11582b3787f6Schristos 
11592b3787f6Schristos 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
11602b3787f6Schristos 	    || npriorities >= EVENT_MAX_PRIORITIES)
11612b3787f6Schristos 		goto err;
11622b3787f6Schristos 
11632b3787f6Schristos 	if (npriorities == base->nactivequeues)
11642b3787f6Schristos 		goto ok;
11652b3787f6Schristos 
11662b3787f6Schristos 	if (base->nactivequeues) {
11672b3787f6Schristos 		mm_free(base->activequeues);
11682b3787f6Schristos 		base->nactivequeues = 0;
11692b3787f6Schristos 	}
11702b3787f6Schristos 
11712b3787f6Schristos 	/* Allocate our priority queues */
11722b3787f6Schristos 	base->activequeues = (struct evcallback_list *)
11732b3787f6Schristos 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
11742b3787f6Schristos 	if (base->activequeues == NULL) {
11752b3787f6Schristos 		event_warn("%s: calloc", __func__);
11762b3787f6Schristos 		goto err;
11772b3787f6Schristos 	}
11782b3787f6Schristos 	base->nactivequeues = npriorities;
11792b3787f6Schristos 
11802b3787f6Schristos 	for (i = 0; i < base->nactivequeues; ++i) {
11812b3787f6Schristos 		TAILQ_INIT(&base->activequeues[i]);
11822b3787f6Schristos 	}
11832b3787f6Schristos 
11842b3787f6Schristos ok:
11852b3787f6Schristos 	r = 0;
11862b3787f6Schristos err:
11872b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
11882b3787f6Schristos 	return (r);
11892b3787f6Schristos }
11902b3787f6Schristos 
11912b3787f6Schristos int
event_base_get_npriorities(struct event_base * base)11922b3787f6Schristos event_base_get_npriorities(struct event_base *base)
11932b3787f6Schristos {
11942b3787f6Schristos 
11952b3787f6Schristos 	int n;
11962b3787f6Schristos 	if (base == NULL)
11972b3787f6Schristos 		base = current_base;
11982b3787f6Schristos 
11992b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
12002b3787f6Schristos 	n = base->nactivequeues;
12012b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
12022b3787f6Schristos 	return (n);
12032b3787f6Schristos }
12042b3787f6Schristos 
12051b6f2cd4Schristos int
event_base_get_num_events(struct event_base * base,unsigned int type)12061b6f2cd4Schristos event_base_get_num_events(struct event_base *base, unsigned int type)
12071b6f2cd4Schristos {
12081b6f2cd4Schristos 	int r = 0;
12091b6f2cd4Schristos 
12101b6f2cd4Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
12111b6f2cd4Schristos 
12121b6f2cd4Schristos 	if (type & EVENT_BASE_COUNT_ACTIVE)
12131b6f2cd4Schristos 		r += base->event_count_active;
12141b6f2cd4Schristos 
12151b6f2cd4Schristos 	if (type & EVENT_BASE_COUNT_VIRTUAL)
12161b6f2cd4Schristos 		r += base->virtual_event_count;
12171b6f2cd4Schristos 
12181b6f2cd4Schristos 	if (type & EVENT_BASE_COUNT_ADDED)
12191b6f2cd4Schristos 		r += base->event_count;
12201b6f2cd4Schristos 
12211b6f2cd4Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
12221b6f2cd4Schristos 
12231b6f2cd4Schristos 	return r;
12241b6f2cd4Schristos }
12251b6f2cd4Schristos 
12261b6f2cd4Schristos int
event_base_get_max_events(struct event_base * base,unsigned int type,int clear)12271b6f2cd4Schristos event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
12281b6f2cd4Schristos {
12291b6f2cd4Schristos 	int r = 0;
12301b6f2cd4Schristos 
12311b6f2cd4Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
12321b6f2cd4Schristos 
12331b6f2cd4Schristos 	if (type & EVENT_BASE_COUNT_ACTIVE) {
12341b6f2cd4Schristos 		r += base->event_count_active_max;
12351b6f2cd4Schristos 		if (clear)
12361b6f2cd4Schristos 			base->event_count_active_max = 0;
12371b6f2cd4Schristos 	}
12381b6f2cd4Schristos 
12391b6f2cd4Schristos 	if (type & EVENT_BASE_COUNT_VIRTUAL) {
12401b6f2cd4Schristos 		r += base->virtual_event_count_max;
12411b6f2cd4Schristos 		if (clear)
12421b6f2cd4Schristos 			base->virtual_event_count_max = 0;
12431b6f2cd4Schristos 	}
12441b6f2cd4Schristos 
12451b6f2cd4Schristos 	if (type & EVENT_BASE_COUNT_ADDED) {
12461b6f2cd4Schristos 		r += base->event_count_max;
12471b6f2cd4Schristos 		if (clear)
12481b6f2cd4Schristos 			base->event_count_max = 0;
12491b6f2cd4Schristos 	}
12501b6f2cd4Schristos 
12511b6f2cd4Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
12521b6f2cd4Schristos 
12531b6f2cd4Schristos 	return r;
12541b6f2cd4Schristos }
12551b6f2cd4Schristos 
12562b3787f6Schristos /* Returns true iff we're currently watching any events. */
12572b3787f6Schristos static int
event_haveevents(struct event_base * base)12582b3787f6Schristos event_haveevents(struct event_base *base)
12592b3787f6Schristos {
12602b3787f6Schristos 	/* Caller must hold th_base_lock */
12612b3787f6Schristos 	return (base->virtual_event_count > 0 || base->event_count > 0);
12622b3787f6Schristos }
12632b3787f6Schristos 
12642b3787f6Schristos /* "closure" function called when processing active signal events */
12652b3787f6Schristos static inline void
event_signal_closure(struct event_base * base,struct event * ev)12662b3787f6Schristos event_signal_closure(struct event_base *base, struct event *ev)
12672b3787f6Schristos {
12682b3787f6Schristos 	short ncalls;
12692b3787f6Schristos 	int should_break;
12702b3787f6Schristos 
12712b3787f6Schristos 	/* Allows deletes to work */
12722b3787f6Schristos 	ncalls = ev->ev_ncalls;
12732b3787f6Schristos 	if (ncalls != 0)
12742b3787f6Schristos 		ev->ev_pncalls = &ncalls;
12752b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
12762b3787f6Schristos 	while (ncalls) {
12772b3787f6Schristos 		ncalls--;
12782b3787f6Schristos 		ev->ev_ncalls = ncalls;
12792b3787f6Schristos 		if (ncalls == 0)
12802b3787f6Schristos 			ev->ev_pncalls = NULL;
12812b3787f6Schristos 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
12822b3787f6Schristos 
12832b3787f6Schristos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
12842b3787f6Schristos 		should_break = base->event_break;
12852b3787f6Schristos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
12862b3787f6Schristos 
12872b3787f6Schristos 		if (should_break) {
12882b3787f6Schristos 			if (ncalls != 0)
12892b3787f6Schristos 				ev->ev_pncalls = NULL;
12902b3787f6Schristos 			return;
12912b3787f6Schristos 		}
12922b3787f6Schristos 	}
12932b3787f6Schristos }
12942b3787f6Schristos 
12952b3787f6Schristos /* Common timeouts are special timeouts that are handled as queues rather than
12962b3787f6Schristos  * in the minheap.  This is more efficient than the minheap if we happen to
12972b3787f6Schristos  * know that we're going to get several thousands of timeout events all with
12982b3787f6Schristos  * the same timeout value.
12992b3787f6Schristos  *
13002b3787f6Schristos  * Since all our timeout handling code assumes timevals can be copied,
13012b3787f6Schristos  * assigned, etc, we can't use "magic pointer" to encode these common
13022b3787f6Schristos  * timeouts.  Searching through a list to see if every timeout is common could
13032b3787f6Schristos  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
13042b3787f6Schristos  * is 32 bits long, but only uses 20 of those bits (since it can never be over
13052b3787f6Schristos  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
13062b3787f6Schristos  * of index into the event_base's aray of common timeouts.
13072b3787f6Schristos  */
13082b3787f6Schristos 
13092b3787f6Schristos #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
13102b3787f6Schristos #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
13112b3787f6Schristos #define COMMON_TIMEOUT_IDX_SHIFT 20
13122b3787f6Schristos #define COMMON_TIMEOUT_MASK     0xf0000000
13132b3787f6Schristos #define COMMON_TIMEOUT_MAGIC    0x50000000
13142b3787f6Schristos 
13152b3787f6Schristos #define COMMON_TIMEOUT_IDX(tv) \
13162b3787f6Schristos 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
13172b3787f6Schristos 
13182b3787f6Schristos /** Return true iff if 'tv' is a common timeout in 'base' */
13192b3787f6Schristos static inline int
is_common_timeout(const struct timeval * tv,const struct event_base * base)13202b3787f6Schristos is_common_timeout(const struct timeval *tv,
13212b3787f6Schristos     const struct event_base *base)
13222b3787f6Schristos {
13232b3787f6Schristos 	int idx;
13242b3787f6Schristos 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
13252b3787f6Schristos 		return 0;
13262b3787f6Schristos 	idx = COMMON_TIMEOUT_IDX(tv);
13272b3787f6Schristos 	return idx < base->n_common_timeouts;
13282b3787f6Schristos }
13292b3787f6Schristos 
13302b3787f6Schristos /* True iff tv1 and tv2 have the same common-timeout index, or if neither
13312b3787f6Schristos  * one is a common timeout. */
13322b3787f6Schristos static inline int
is_same_common_timeout(const struct timeval * tv1,const struct timeval * tv2)13332b3787f6Schristos is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
13342b3787f6Schristos {
13352b3787f6Schristos 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
13362b3787f6Schristos 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
13372b3787f6Schristos }
13382b3787f6Schristos 
13392b3787f6Schristos /** Requires that 'tv' is a common timeout.  Return the corresponding
13402b3787f6Schristos  * common_timeout_list. */
13412b3787f6Schristos static inline struct common_timeout_list *
get_common_timeout_list(struct event_base * base,const struct timeval * tv)13422b3787f6Schristos get_common_timeout_list(struct event_base *base, const struct timeval *tv)
13432b3787f6Schristos {
13442b3787f6Schristos 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
13452b3787f6Schristos }
13462b3787f6Schristos 
13472b3787f6Schristos #if 0
13482b3787f6Schristos static inline int
13492b3787f6Schristos common_timeout_ok(const struct timeval *tv,
13502b3787f6Schristos     struct event_base *base)
13512b3787f6Schristos {
13522b3787f6Schristos 	const struct timeval *expect =
13532b3787f6Schristos 	    &get_common_timeout_list(base, tv)->duration;
13542b3787f6Schristos 	return tv->tv_sec == expect->tv_sec &&
13552b3787f6Schristos 	    tv->tv_usec == expect->tv_usec;
13562b3787f6Schristos }
13572b3787f6Schristos #endif
13582b3787f6Schristos 
13592b3787f6Schristos /* Add the timeout for the first event in given common timeout list to the
13602b3787f6Schristos  * event_base's minheap. */
13612b3787f6Schristos static void
common_timeout_schedule(struct common_timeout_list * ctl,const struct timeval * now,struct event * head)13622b3787f6Schristos common_timeout_schedule(struct common_timeout_list *ctl,
13632b3787f6Schristos     const struct timeval *now, struct event *head)
13642b3787f6Schristos {
13652b3787f6Schristos 	struct timeval timeout = head->ev_timeout;
13662b3787f6Schristos 	timeout.tv_usec &= MICROSECONDS_MASK;
13672b3787f6Schristos 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
13682b3787f6Schristos }
13692b3787f6Schristos 
13702b3787f6Schristos /* Callback: invoked when the timeout for a common timeout queue triggers.
13712b3787f6Schristos  * This means that (at least) the first event in that queue should be run,
13722b3787f6Schristos  * and the timeout should be rescheduled if there are more events. */
13732b3787f6Schristos static void
common_timeout_callback(evutil_socket_t fd,short what,void * arg)13742b3787f6Schristos common_timeout_callback(evutil_socket_t fd, short what, void *arg)
13752b3787f6Schristos {
13762b3787f6Schristos 	struct timeval now;
13772b3787f6Schristos 	struct common_timeout_list *ctl = arg;
13782b3787f6Schristos 	struct event_base *base = ctl->base;
13792b3787f6Schristos 	struct event *ev = NULL;
13802b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
13812b3787f6Schristos 	gettime(base, &now);
13822b3787f6Schristos 	while (1) {
13832b3787f6Schristos 		ev = TAILQ_FIRST(&ctl->events);
13842b3787f6Schristos 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
13852b3787f6Schristos 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
13862b3787f6Schristos 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
13872b3787f6Schristos 			break;
13881b6f2cd4Schristos 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
13892b3787f6Schristos 		event_active_nolock_(ev, EV_TIMEOUT, 1);
13902b3787f6Schristos 	}
13912b3787f6Schristos 	if (ev)
13922b3787f6Schristos 		common_timeout_schedule(ctl, &now, ev);
13932b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
13942b3787f6Schristos }
13952b3787f6Schristos 
13962b3787f6Schristos #define MAX_COMMON_TIMEOUTS 256
13972b3787f6Schristos 
13982b3787f6Schristos const struct timeval *
event_base_init_common_timeout(struct event_base * base,const struct timeval * duration)13992b3787f6Schristos event_base_init_common_timeout(struct event_base *base,
14002b3787f6Schristos     const struct timeval *duration)
14012b3787f6Schristos {
14022b3787f6Schristos 	int i;
14032b3787f6Schristos 	struct timeval tv;
14042b3787f6Schristos 	const struct timeval *result=NULL;
14052b3787f6Schristos 	struct common_timeout_list *new_ctl;
14062b3787f6Schristos 
14072b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
14082b3787f6Schristos 	if (duration->tv_usec > 1000000) {
14092b3787f6Schristos 		memcpy(&tv, duration, sizeof(struct timeval));
14102b3787f6Schristos 		if (is_common_timeout(duration, base))
14112b3787f6Schristos 			tv.tv_usec &= MICROSECONDS_MASK;
14122b3787f6Schristos 		tv.tv_sec += tv.tv_usec / 1000000;
14132b3787f6Schristos 		tv.tv_usec %= 1000000;
14142b3787f6Schristos 		duration = &tv;
14152b3787f6Schristos 	}
14162b3787f6Schristos 	for (i = 0; i < base->n_common_timeouts; ++i) {
14172b3787f6Schristos 		const struct common_timeout_list *ctl =
14182b3787f6Schristos 		    base->common_timeout_queues[i];
14192b3787f6Schristos 		if (duration->tv_sec == ctl->duration.tv_sec &&
14202b3787f6Schristos 		    duration->tv_usec ==
14212b3787f6Schristos 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
14222b3787f6Schristos 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
14232b3787f6Schristos 			result = &ctl->duration;
14242b3787f6Schristos 			goto done;
14252b3787f6Schristos 		}
14262b3787f6Schristos 	}
14272b3787f6Schristos 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
14282b3787f6Schristos 		event_warnx("%s: Too many common timeouts already in use; "
14292b3787f6Schristos 		    "we only support %d per event_base", __func__,
14302b3787f6Schristos 		    MAX_COMMON_TIMEOUTS);
14312b3787f6Schristos 		goto done;
14322b3787f6Schristos 	}
14332b3787f6Schristos 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
14342b3787f6Schristos 		int n = base->n_common_timeouts < 16 ? 16 :
14352b3787f6Schristos 		    base->n_common_timeouts*2;
14362b3787f6Schristos 		struct common_timeout_list **newqueues =
14372b3787f6Schristos 		    mm_realloc(base->common_timeout_queues,
14382b3787f6Schristos 			n*sizeof(struct common_timeout_queue *));
14392b3787f6Schristos 		if (!newqueues) {
14402b3787f6Schristos 			event_warn("%s: realloc",__func__);
14412b3787f6Schristos 			goto done;
14422b3787f6Schristos 		}
14432b3787f6Schristos 		base->n_common_timeouts_allocated = n;
14442b3787f6Schristos 		base->common_timeout_queues = newqueues;
14452b3787f6Schristos 	}
14462b3787f6Schristos 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
14472b3787f6Schristos 	if (!new_ctl) {
14482b3787f6Schristos 		event_warn("%s: calloc",__func__);
14492b3787f6Schristos 		goto done;
14502b3787f6Schristos 	}
14512b3787f6Schristos 	TAILQ_INIT(&new_ctl->events);
14522b3787f6Schristos 	new_ctl->duration.tv_sec = duration->tv_sec;
14532b3787f6Schristos 	new_ctl->duration.tv_usec =
14542b3787f6Schristos 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
14552b3787f6Schristos 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
14562b3787f6Schristos 	evtimer_assign(&new_ctl->timeout_event, base,
14572b3787f6Schristos 	    common_timeout_callback, new_ctl);
14582b3787f6Schristos 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
14592b3787f6Schristos 	event_priority_set(&new_ctl->timeout_event, 0);
14602b3787f6Schristos 	new_ctl->base = base;
14612b3787f6Schristos 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
14622b3787f6Schristos 	result = &new_ctl->duration;
14632b3787f6Schristos 
14642b3787f6Schristos done:
14652b3787f6Schristos 	if (result)
14662b3787f6Schristos 		EVUTIL_ASSERT(is_common_timeout(result, base));
14672b3787f6Schristos 
14682b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
14692b3787f6Schristos 	return result;
14702b3787f6Schristos }
14712b3787f6Schristos 
14722b3787f6Schristos /* Closure function invoked when we're activating a persistent event. */
14732b3787f6Schristos static inline void
event_persist_closure(struct event_base * base,struct event * ev)14742b3787f6Schristos event_persist_closure(struct event_base *base, struct event *ev)
14752b3787f6Schristos {
14761b6f2cd4Schristos 	void (*evcb_callback)(evutil_socket_t, short, void *);
14771b6f2cd4Schristos 
147850cc4415Schristos         // Other fields of *ev that must be stored before executing
147950cc4415Schristos         evutil_socket_t evcb_fd;
148050cc4415Schristos         short evcb_res;
148150cc4415Schristos         void *evcb_arg;
148250cc4415Schristos 
14832b3787f6Schristos 	/* reschedule the persistent event if we have a timeout. */
14842b3787f6Schristos 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
14852b3787f6Schristos 		/* If there was a timeout, we want it to run at an interval of
14862b3787f6Schristos 		 * ev_io_timeout after the last time it was _scheduled_ for,
14872b3787f6Schristos 		 * not ev_io_timeout after _now_.  If it fired for another
14882b3787f6Schristos 		 * reason, though, the timeout ought to start ticking _now_. */
14892b3787f6Schristos 		struct timeval run_at, relative_to, delay, now;
14902b3787f6Schristos 		ev_uint32_t usec_mask = 0;
14912b3787f6Schristos 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
14922b3787f6Schristos 			&ev->ev_io_timeout));
14932b3787f6Schristos 		gettime(base, &now);
14942b3787f6Schristos 		if (is_common_timeout(&ev->ev_timeout, base)) {
14952b3787f6Schristos 			delay = ev->ev_io_timeout;
14962b3787f6Schristos 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
14972b3787f6Schristos 			delay.tv_usec &= MICROSECONDS_MASK;
14982b3787f6Schristos 			if (ev->ev_res & EV_TIMEOUT) {
14992b3787f6Schristos 				relative_to = ev->ev_timeout;
15002b3787f6Schristos 				relative_to.tv_usec &= MICROSECONDS_MASK;
15012b3787f6Schristos 			} else {
15022b3787f6Schristos 				relative_to = now;
15032b3787f6Schristos 			}
15042b3787f6Schristos 		} else {
15052b3787f6Schristos 			delay = ev->ev_io_timeout;
15062b3787f6Schristos 			if (ev->ev_res & EV_TIMEOUT) {
15072b3787f6Schristos 				relative_to = ev->ev_timeout;
15082b3787f6Schristos 			} else {
15092b3787f6Schristos 				relative_to = now;
15102b3787f6Schristos 			}
15112b3787f6Schristos 		}
15122b3787f6Schristos 		evutil_timeradd(&relative_to, &delay, &run_at);
15132b3787f6Schristos 		if (evutil_timercmp(&run_at, &now, <)) {
15142b3787f6Schristos 			/* Looks like we missed at least one invocation due to
15152b3787f6Schristos 			 * a clock jump, not running the event loop for a
15162b3787f6Schristos 			 * while, really slow callbacks, or
15172b3787f6Schristos 			 * something. Reschedule relative to now.
15182b3787f6Schristos 			 */
15192b3787f6Schristos 			evutil_timeradd(&now, &delay, &run_at);
15202b3787f6Schristos 		}
15212b3787f6Schristos 		run_at.tv_usec |= usec_mask;
15222b3787f6Schristos 		event_add_nolock_(ev, &run_at, 1);
15232b3787f6Schristos 	}
15241b6f2cd4Schristos 
15251b6f2cd4Schristos 	// Save our callback before we release the lock
152650cc4415Schristos 	evcb_callback = ev->ev_callback;
152750cc4415Schristos         evcb_fd = ev->ev_fd;
152850cc4415Schristos         evcb_res = ev->ev_res;
152950cc4415Schristos         evcb_arg = ev->ev_arg;
15301b6f2cd4Schristos 
15311b6f2cd4Schristos 	// Release the lock
15322b3787f6Schristos  	EVBASE_RELEASE_LOCK(base, th_base_lock);
15331b6f2cd4Schristos 
15341b6f2cd4Schristos 	// Execute the callback
153550cc4415Schristos         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
15362b3787f6Schristos }
15372b3787f6Schristos 
15382b3787f6Schristos /*
15392b3787f6Schristos   Helper for event_process_active to process all the events in a single queue,
15402b3787f6Schristos   releasing the lock as we go.  This function requires that the lock be held
15412b3787f6Schristos   when it's invoked.  Returns -1 if we get a signal or an event_break that
15422b3787f6Schristos   means we should stop processing any active events now.  Otherwise returns
15432b3787f6Schristos   the number of non-internal event_callbacks that we processed.
15442b3787f6Schristos */
15452b3787f6Schristos static int
event_process_active_single_queue(struct event_base * base,struct evcallback_list * activeq,int max_to_process,const struct timeval * endtime)15462b3787f6Schristos event_process_active_single_queue(struct event_base *base,
15472b3787f6Schristos     struct evcallback_list *activeq,
15482b3787f6Schristos     int max_to_process, const struct timeval *endtime)
15492b3787f6Schristos {
15502b3787f6Schristos 	struct event_callback *evcb;
15512b3787f6Schristos 	int count = 0;
15522b3787f6Schristos 
15532b3787f6Schristos 	EVUTIL_ASSERT(activeq != NULL);
15542b3787f6Schristos 
15552b3787f6Schristos 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
15562b3787f6Schristos 		struct event *ev=NULL;
15572b3787f6Schristos 		if (evcb->evcb_flags & EVLIST_INIT) {
15582b3787f6Schristos 			ev = event_callback_to_event(evcb);
15592b3787f6Schristos 
15601b6f2cd4Schristos 			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
15612b3787f6Schristos 				event_queue_remove_active(base, evcb);
15622b3787f6Schristos 			else
15631b6f2cd4Schristos 				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
15642b3787f6Schristos 			event_debug((
15651b6f2cd4Schristos 			    "event_process_active: event: %p, %s%s%scall %p",
15662b3787f6Schristos 			    ev,
15672b3787f6Schristos 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
15682b3787f6Schristos 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
15691b6f2cd4Schristos 			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
15702b3787f6Schristos 			    ev->ev_callback));
15712b3787f6Schristos 		} else {
15722b3787f6Schristos 			event_queue_remove_active(base, evcb);
15732b3787f6Schristos 			event_debug(("event_process_active: event_callback %p, "
15742b3787f6Schristos 				"closure %d, call %p",
15752b3787f6Schristos 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
15762b3787f6Schristos 		}
15772b3787f6Schristos 
15782b3787f6Schristos 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
15792b3787f6Schristos 			++count;
15802b3787f6Schristos 
15812b3787f6Schristos 
15822b3787f6Schristos 		base->current_event = evcb;
15832b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
15842b3787f6Schristos 		base->current_event_waiters = 0;
15852b3787f6Schristos #endif
15862b3787f6Schristos 
15872b3787f6Schristos 		switch (evcb->evcb_closure) {
15882b3787f6Schristos 		case EV_CLOSURE_EVENT_SIGNAL:
15891b6f2cd4Schristos 			EVUTIL_ASSERT(ev != NULL);
15902b3787f6Schristos 			event_signal_closure(base, ev);
15912b3787f6Schristos 			break;
15922b3787f6Schristos 		case EV_CLOSURE_EVENT_PERSIST:
15931b6f2cd4Schristos 			EVUTIL_ASSERT(ev != NULL);
15942b3787f6Schristos 			event_persist_closure(base, ev);
15952b3787f6Schristos 			break;
15961b6f2cd4Schristos 		case EV_CLOSURE_EVENT: {
159750cc4415Schristos 			void (*evcb_callback)(evutil_socket_t, short, void *);
15981b6f2cd4Schristos 			EVUTIL_ASSERT(ev != NULL);
159950cc4415Schristos 			evcb_callback = *ev->ev_callback;
16002b3787f6Schristos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
16011b6f2cd4Schristos 			evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
16021b6f2cd4Schristos 		}
16032b3787f6Schristos 		break;
16041b6f2cd4Schristos 		case EV_CLOSURE_CB_SELF: {
16051b6f2cd4Schristos 			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
16062b3787f6Schristos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
16071b6f2cd4Schristos 			evcb_selfcb(evcb, evcb->evcb_arg);
16081b6f2cd4Schristos 		}
16091b6f2cd4Schristos 		break;
16101b6f2cd4Schristos 		case EV_CLOSURE_EVENT_FINALIZE:
16111b6f2cd4Schristos 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
161250cc4415Schristos 			void (*evcb_evfinalize)(struct event *, void *);
161350cc4415Schristos 			int evcb_closure = evcb->evcb_closure;
16141b6f2cd4Schristos 			EVUTIL_ASSERT(ev != NULL);
16151b6f2cd4Schristos 			base->current_event = NULL;
161650cc4415Schristos 			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
16171b6f2cd4Schristos 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
16181b6f2cd4Schristos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
16191b6f2cd4Schristos 			evcb_evfinalize(ev, ev->ev_arg);
16201b6f2cd4Schristos 			event_debug_note_teardown_(ev);
162150cc4415Schristos 			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
16221b6f2cd4Schristos 				mm_free(ev);
16231b6f2cd4Schristos 		}
16241b6f2cd4Schristos 		break;
16251b6f2cd4Schristos 		case EV_CLOSURE_CB_FINALIZE: {
16261b6f2cd4Schristos 			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
16271b6f2cd4Schristos 			base->current_event = NULL;
16281b6f2cd4Schristos 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
16291b6f2cd4Schristos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
16301b6f2cd4Schristos 			evcb_cbfinalize(evcb, evcb->evcb_arg);
16311b6f2cd4Schristos 		}
16322b3787f6Schristos 		break;
16332b3787f6Schristos 		default:
16342b3787f6Schristos 			EVUTIL_ASSERT(0);
16352b3787f6Schristos 		}
16362b3787f6Schristos 
16372b3787f6Schristos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
16382b3787f6Schristos 		base->current_event = NULL;
16392b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
16402b3787f6Schristos 		if (base->current_event_waiters) {
16412b3787f6Schristos 			base->current_event_waiters = 0;
16422b3787f6Schristos 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
16432b3787f6Schristos 		}
16442b3787f6Schristos #endif
16452b3787f6Schristos 
16462b3787f6Schristos 		if (base->event_break)
16472b3787f6Schristos 			return -1;
16482b3787f6Schristos 		if (count >= max_to_process)
16492b3787f6Schristos 			return count;
16502b3787f6Schristos 		if (count && endtime) {
16512b3787f6Schristos 			struct timeval now;
16522b3787f6Schristos 			update_time_cache(base);
16532b3787f6Schristos 			gettime(base, &now);
16542b3787f6Schristos 			if (evutil_timercmp(&now, endtime, >=))
16552b3787f6Schristos 				return count;
16562b3787f6Schristos 		}
16572b3787f6Schristos 		if (base->event_continue)
16582b3787f6Schristos 			break;
16592b3787f6Schristos 	}
16602b3787f6Schristos 	return count;
16612b3787f6Schristos }
16622b3787f6Schristos 
16632b3787f6Schristos /*
16642b3787f6Schristos  * Active events are stored in priority queues.  Lower priorities are always
16652b3787f6Schristos  * process before higher priorities.  Low priority events can starve high
16662b3787f6Schristos  * priority ones.
16672b3787f6Schristos  */
16682b3787f6Schristos 
16692b3787f6Schristos static int
event_process_active(struct event_base * base)16702b3787f6Schristos event_process_active(struct event_base *base)
16712b3787f6Schristos {
16722b3787f6Schristos 	/* Caller must hold th_base_lock */
16732b3787f6Schristos 	struct evcallback_list *activeq = NULL;
16742b3787f6Schristos 	int i, c = 0;
16752b3787f6Schristos 	const struct timeval *endtime;
16762b3787f6Schristos 	struct timeval tv;
16772b3787f6Schristos 	const int maxcb = base->max_dispatch_callbacks;
16782b3787f6Schristos 	const int limit_after_prio = base->limit_callbacks_after_prio;
16792b3787f6Schristos 	if (base->max_dispatch_time.tv_sec >= 0) {
16802b3787f6Schristos 		update_time_cache(base);
16812b3787f6Schristos 		gettime(base, &tv);
16822b3787f6Schristos 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
16832b3787f6Schristos 		endtime = &tv;
16842b3787f6Schristos 	} else {
16852b3787f6Schristos 		endtime = NULL;
16862b3787f6Schristos 	}
16872b3787f6Schristos 
16882b3787f6Schristos 	for (i = 0; i < base->nactivequeues; ++i) {
16892b3787f6Schristos 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
16902b3787f6Schristos 			base->event_running_priority = i;
16912b3787f6Schristos 			activeq = &base->activequeues[i];
16922b3787f6Schristos 			if (i < limit_after_prio)
16932b3787f6Schristos 				c = event_process_active_single_queue(base, activeq,
16942b3787f6Schristos 				    INT_MAX, NULL);
16952b3787f6Schristos 			else
16962b3787f6Schristos 				c = event_process_active_single_queue(base, activeq,
16972b3787f6Schristos 				    maxcb, endtime);
16982b3787f6Schristos 			if (c < 0) {
16992b3787f6Schristos 				goto done;
17002b3787f6Schristos 			} else if (c > 0)
17012b3787f6Schristos 				break; /* Processed a real event; do not
17022b3787f6Schristos 					* consider lower-priority events */
17032b3787f6Schristos 			/* If we get here, all of the events we processed
17042b3787f6Schristos 			 * were internal.  Continue. */
17052b3787f6Schristos 		}
17062b3787f6Schristos 	}
17072b3787f6Schristos 
17082b3787f6Schristos done:
17092b3787f6Schristos 	base->event_running_priority = -1;
17102b3787f6Schristos 
17112b3787f6Schristos 	return c;
17122b3787f6Schristos }
17132b3787f6Schristos 
17142b3787f6Schristos /*
17152b3787f6Schristos  * Wait continuously for events.  We exit only if no events are left.
17162b3787f6Schristos  */
17172b3787f6Schristos 
17182b3787f6Schristos int
event_dispatch(void)17192b3787f6Schristos event_dispatch(void)
17202b3787f6Schristos {
17212b3787f6Schristos 	return (event_loop(0));
17222b3787f6Schristos }
17232b3787f6Schristos 
17242b3787f6Schristos int
event_base_dispatch(struct event_base * event_base)17252b3787f6Schristos event_base_dispatch(struct event_base *event_base)
17262b3787f6Schristos {
17272b3787f6Schristos 	return (event_base_loop(event_base, 0));
17282b3787f6Schristos }
17292b3787f6Schristos 
17302b3787f6Schristos const char *
event_base_get_method(const struct event_base * base)17312b3787f6Schristos event_base_get_method(const struct event_base *base)
17322b3787f6Schristos {
17332b3787f6Schristos 	EVUTIL_ASSERT(base);
17342b3787f6Schristos 	return (base->evsel->name);
17352b3787f6Schristos }
17362b3787f6Schristos 
17372b3787f6Schristos /** Callback: used to implement event_base_loopexit by telling the event_base
17382b3787f6Schristos  * that it's time to exit its loop. */
17392b3787f6Schristos static void
event_loopexit_cb(evutil_socket_t fd,short what,void * arg)17402b3787f6Schristos event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
17412b3787f6Schristos {
17422b3787f6Schristos 	struct event_base *base = arg;
17432b3787f6Schristos 	base->event_gotterm = 1;
17442b3787f6Schristos }
17452b3787f6Schristos 
17462b3787f6Schristos int
event_loopexit(const struct timeval * tv)17472b3787f6Schristos event_loopexit(const struct timeval *tv)
17482b3787f6Schristos {
17492b3787f6Schristos 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
17502b3787f6Schristos 		    current_base, tv));
17512b3787f6Schristos }
17522b3787f6Schristos 
17532b3787f6Schristos int
event_base_loopexit(struct event_base * event_base,const struct timeval * tv)17542b3787f6Schristos event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
17552b3787f6Schristos {
17562b3787f6Schristos 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
17572b3787f6Schristos 		    event_base, tv));
17582b3787f6Schristos }
17592b3787f6Schristos 
17602b3787f6Schristos int
event_loopbreak(void)17612b3787f6Schristos event_loopbreak(void)
17622b3787f6Schristos {
17632b3787f6Schristos 	return (event_base_loopbreak(current_base));
17642b3787f6Schristos }
17652b3787f6Schristos 
17662b3787f6Schristos int
event_base_loopbreak(struct event_base * event_base)17672b3787f6Schristos event_base_loopbreak(struct event_base *event_base)
17682b3787f6Schristos {
17692b3787f6Schristos 	int r = 0;
17702b3787f6Schristos 	if (event_base == NULL)
17712b3787f6Schristos 		return (-1);
17722b3787f6Schristos 
17732b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
17742b3787f6Schristos 	event_base->event_break = 1;
17752b3787f6Schristos 
17762b3787f6Schristos 	if (EVBASE_NEED_NOTIFY(event_base)) {
17772b3787f6Schristos 		r = evthread_notify_base(event_base);
17782b3787f6Schristos 	} else {
17792b3787f6Schristos 		r = (0);
17802b3787f6Schristos 	}
17812b3787f6Schristos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
17822b3787f6Schristos 	return r;
17832b3787f6Schristos }
17842b3787f6Schristos 
17852b3787f6Schristos int
event_base_loopcontinue(struct event_base * event_base)17862b3787f6Schristos event_base_loopcontinue(struct event_base *event_base)
17872b3787f6Schristos {
17882b3787f6Schristos 	int r = 0;
17892b3787f6Schristos 	if (event_base == NULL)
17902b3787f6Schristos 		return (-1);
17912b3787f6Schristos 
17922b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
17932b3787f6Schristos 	event_base->event_continue = 1;
17942b3787f6Schristos 
17952b3787f6Schristos 	if (EVBASE_NEED_NOTIFY(event_base)) {
17962b3787f6Schristos 		r = evthread_notify_base(event_base);
17972b3787f6Schristos 	} else {
17982b3787f6Schristos 		r = (0);
17992b3787f6Schristos 	}
18002b3787f6Schristos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
18012b3787f6Schristos 	return r;
18022b3787f6Schristos }
18032b3787f6Schristos 
18042b3787f6Schristos int
event_base_got_break(struct event_base * event_base)18052b3787f6Schristos event_base_got_break(struct event_base *event_base)
18062b3787f6Schristos {
18072b3787f6Schristos 	int res;
18082b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
18092b3787f6Schristos 	res = event_base->event_break;
18102b3787f6Schristos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
18112b3787f6Schristos 	return res;
18122b3787f6Schristos }
18132b3787f6Schristos 
18142b3787f6Schristos int
event_base_got_exit(struct event_base * event_base)18152b3787f6Schristos event_base_got_exit(struct event_base *event_base)
18162b3787f6Schristos {
18172b3787f6Schristos 	int res;
18182b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
18192b3787f6Schristos 	res = event_base->event_gotterm;
18202b3787f6Schristos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
18212b3787f6Schristos 	return res;
18222b3787f6Schristos }
18232b3787f6Schristos 
18242b3787f6Schristos /* not thread safe */
18252b3787f6Schristos 
18262b3787f6Schristos int
event_loop(int flags)18272b3787f6Schristos event_loop(int flags)
18282b3787f6Schristos {
18292b3787f6Schristos 	return event_base_loop(current_base, flags);
18302b3787f6Schristos }
18312b3787f6Schristos 
18322b3787f6Schristos int
event_base_loop(struct event_base * base,int flags)18332b3787f6Schristos event_base_loop(struct event_base *base, int flags)
18342b3787f6Schristos {
18352b3787f6Schristos 	const struct eventop *evsel = base->evsel;
18362b3787f6Schristos 	struct timeval tv;
18372b3787f6Schristos 	struct timeval *tv_p;
18382b3787f6Schristos 	int res, done, retval = 0;
18392b3787f6Schristos 
18402b3787f6Schristos 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
18412b3787f6Schristos 	 * as we invoke user callbacks. */
18422b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
18432b3787f6Schristos 
18442b3787f6Schristos 	if (base->running_loop) {
18452b3787f6Schristos 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
18462b3787f6Schristos 		    " can run on each event_base at once.", __func__);
18472b3787f6Schristos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
18482b3787f6Schristos 		return -1;
18492b3787f6Schristos 	}
18502b3787f6Schristos 
18512b3787f6Schristos 	base->running_loop = 1;
18522b3787f6Schristos 
18532b3787f6Schristos 	clear_time_cache(base);
18542b3787f6Schristos 
18552b3787f6Schristos 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
18562b3787f6Schristos 		evsig_set_base_(base);
18572b3787f6Schristos 
18582b3787f6Schristos 	done = 0;
18592b3787f6Schristos 
18602b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
18612b3787f6Schristos 	base->th_owner_id = EVTHREAD_GET_ID();
18622b3787f6Schristos #endif
18632b3787f6Schristos 
18642b3787f6Schristos 	base->event_gotterm = base->event_break = 0;
18652b3787f6Schristos 
18662b3787f6Schristos 	while (!done) {
18672b3787f6Schristos 		base->event_continue = 0;
18682b3787f6Schristos 		base->n_deferreds_queued = 0;
18692b3787f6Schristos 
18702b3787f6Schristos 		/* Terminate the loop if we have been asked to */
18712b3787f6Schristos 		if (base->event_gotterm) {
18722b3787f6Schristos 			break;
18732b3787f6Schristos 		}
18742b3787f6Schristos 
18752b3787f6Schristos 		if (base->event_break) {
18762b3787f6Schristos 			break;
18772b3787f6Schristos 		}
18782b3787f6Schristos 
18792b3787f6Schristos 		tv_p = &tv;
18802b3787f6Schristos 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
18812b3787f6Schristos 			timeout_next(base, &tv_p);
18822b3787f6Schristos 		} else {
18832b3787f6Schristos 			/*
18842b3787f6Schristos 			 * if we have active events, we just poll new events
18852b3787f6Schristos 			 * without waiting.
18862b3787f6Schristos 			 */
18872b3787f6Schristos 			evutil_timerclear(&tv);
18882b3787f6Schristos 		}
18892b3787f6Schristos 
18902b3787f6Schristos 		/* If we have no events, we just exit */
18912b3787f6Schristos 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
18922b3787f6Schristos 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
18932b3787f6Schristos 			event_debug(("%s: no events registered.", __func__));
18942b3787f6Schristos 			retval = 1;
18952b3787f6Schristos 			goto done;
18962b3787f6Schristos 		}
18972b3787f6Schristos 
18982b3787f6Schristos 		event_queue_make_later_events_active(base);
18992b3787f6Schristos 
19002b3787f6Schristos 		clear_time_cache(base);
19012b3787f6Schristos 
19022b3787f6Schristos 		res = evsel->dispatch(base, tv_p);
19032b3787f6Schristos 
19042b3787f6Schristos 		if (res == -1) {
19052b3787f6Schristos 			event_debug(("%s: dispatch returned unsuccessfully.",
19062b3787f6Schristos 				__func__));
19072b3787f6Schristos 			retval = -1;
19082b3787f6Schristos 			goto done;
19092b3787f6Schristos 		}
19102b3787f6Schristos 
19112b3787f6Schristos 		update_time_cache(base);
19122b3787f6Schristos 
19132b3787f6Schristos 		timeout_process(base);
19142b3787f6Schristos 
19152b3787f6Schristos 		if (N_ACTIVE_CALLBACKS(base)) {
19162b3787f6Schristos 			int n = event_process_active(base);
19172b3787f6Schristos 			if ((flags & EVLOOP_ONCE)
19182b3787f6Schristos 			    && N_ACTIVE_CALLBACKS(base) == 0
19192b3787f6Schristos 			    && n != 0)
19202b3787f6Schristos 				done = 1;
19212b3787f6Schristos 		} else if (flags & EVLOOP_NONBLOCK)
19222b3787f6Schristos 			done = 1;
19232b3787f6Schristos 	}
19242b3787f6Schristos 	event_debug(("%s: asked to terminate loop.", __func__));
19252b3787f6Schristos 
19262b3787f6Schristos done:
19272b3787f6Schristos 	clear_time_cache(base);
19282b3787f6Schristos 	base->running_loop = 0;
19292b3787f6Schristos 
19302b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
19312b3787f6Schristos 
19322b3787f6Schristos 	return (retval);
19332b3787f6Schristos }
19342b3787f6Schristos 
19352b3787f6Schristos /* One-time callback to implement event_base_once: invokes the user callback,
19362b3787f6Schristos  * then deletes the allocated storage */
19372b3787f6Schristos static void
event_once_cb(evutil_socket_t fd,short events,void * arg)19382b3787f6Schristos event_once_cb(evutil_socket_t fd, short events, void *arg)
19392b3787f6Schristos {
19402b3787f6Schristos 	struct event_once *eonce = arg;
19412b3787f6Schristos 
19422b3787f6Schristos 	(*eonce->cb)(fd, events, eonce->arg);
19432b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
19442b3787f6Schristos 	LIST_REMOVE(eonce, next_once);
19452b3787f6Schristos 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
19462b3787f6Schristos 	event_debug_unassign(&eonce->ev);
19472b3787f6Schristos 	mm_free(eonce);
19482b3787f6Schristos }
19492b3787f6Schristos 
19502b3787f6Schristos /* not threadsafe, event scheduled once. */
19512b3787f6Schristos int
event_once(evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)19522b3787f6Schristos event_once(evutil_socket_t fd, short events,
19532b3787f6Schristos     void (*callback)(evutil_socket_t, short, void *),
19542b3787f6Schristos     void *arg, const struct timeval *tv)
19552b3787f6Schristos {
19562b3787f6Schristos 	return event_base_once(current_base, fd, events, callback, arg, tv);
19572b3787f6Schristos }
19582b3787f6Schristos 
19592b3787f6Schristos /* Schedules an event once */
19602b3787f6Schristos int
event_base_once(struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)19612b3787f6Schristos event_base_once(struct event_base *base, evutil_socket_t fd, short events,
19622b3787f6Schristos     void (*callback)(evutil_socket_t, short, void *),
19632b3787f6Schristos     void *arg, const struct timeval *tv)
19642b3787f6Schristos {
19652b3787f6Schristos 	struct event_once *eonce;
19662b3787f6Schristos 	int res = 0;
19672b3787f6Schristos 	int activate = 0;
19682b3787f6Schristos 
19692b3787f6Schristos 	/* We cannot support signals that just fire once, or persistent
19702b3787f6Schristos 	 * events. */
19712b3787f6Schristos 	if (events & (EV_SIGNAL|EV_PERSIST))
19722b3787f6Schristos 		return (-1);
19732b3787f6Schristos 
19742b3787f6Schristos 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
19752b3787f6Schristos 		return (-1);
19762b3787f6Schristos 
19772b3787f6Schristos 	eonce->cb = callback;
19782b3787f6Schristos 	eonce->arg = arg;
19792b3787f6Schristos 
19801b6f2cd4Schristos 	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
19812b3787f6Schristos 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
19822b3787f6Schristos 
19832b3787f6Schristos 		if (tv == NULL || ! evutil_timerisset(tv)) {
19842b3787f6Schristos 			/* If the event is going to become active immediately,
19852b3787f6Schristos 			 * don't put it on the timeout queue.  This is one
19862b3787f6Schristos 			 * idiom for scheduling a callback, so let's make
19872b3787f6Schristos 			 * it fast (and order-preserving). */
19882b3787f6Schristos 			activate = 1;
19892b3787f6Schristos 		}
19901b6f2cd4Schristos 	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
19911b6f2cd4Schristos 		events &= EV_READ|EV_WRITE|EV_CLOSED;
19922b3787f6Schristos 
19932b3787f6Schristos 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
19942b3787f6Schristos 	} else {
19952b3787f6Schristos 		/* Bad event combination */
19962b3787f6Schristos 		mm_free(eonce);
19972b3787f6Schristos 		return (-1);
19982b3787f6Schristos 	}
19992b3787f6Schristos 
20002b3787f6Schristos 	if (res == 0) {
20012b3787f6Schristos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
20022b3787f6Schristos 		if (activate)
20032b3787f6Schristos 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
20042b3787f6Schristos 		else
20052b3787f6Schristos 			res = event_add_nolock_(&eonce->ev, tv, 0);
20062b3787f6Schristos 
20072b3787f6Schristos 		if (res != 0) {
20082b3787f6Schristos 			mm_free(eonce);
20092b3787f6Schristos 			return (res);
20102b3787f6Schristos 		} else {
20112b3787f6Schristos 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
20122b3787f6Schristos 		}
20132b3787f6Schristos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
20142b3787f6Schristos 	}
20152b3787f6Schristos 
20162b3787f6Schristos 	return (0);
20172b3787f6Schristos }
20182b3787f6Schristos 
20192b3787f6Schristos int
event_assign(struct event * ev,struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)20202b3787f6Schristos event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
20212b3787f6Schristos {
20222b3787f6Schristos 	if (!base)
20232b3787f6Schristos 		base = current_base;
20242b3787f6Schristos 	if (arg == &event_self_cbarg_ptr_)
20252b3787f6Schristos 		arg = ev;
20262b3787f6Schristos 
20272b3787f6Schristos 	event_debug_assert_not_added_(ev);
20282b3787f6Schristos 
20292b3787f6Schristos 	ev->ev_base = base;
20302b3787f6Schristos 
20312b3787f6Schristos 	ev->ev_callback = callback;
20322b3787f6Schristos 	ev->ev_arg = arg;
20332b3787f6Schristos 	ev->ev_fd = fd;
20342b3787f6Schristos 	ev->ev_events = events;
20352b3787f6Schristos 	ev->ev_res = 0;
20362b3787f6Schristos 	ev->ev_flags = EVLIST_INIT;
20372b3787f6Schristos 	ev->ev_ncalls = 0;
20382b3787f6Schristos 	ev->ev_pncalls = NULL;
20392b3787f6Schristos 
20402b3787f6Schristos 	if (events & EV_SIGNAL) {
20411b6f2cd4Schristos 		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
20422b3787f6Schristos 			event_warnx("%s: EV_SIGNAL is not compatible with "
20431b6f2cd4Schristos 			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
20442b3787f6Schristos 			return -1;
20452b3787f6Schristos 		}
20462b3787f6Schristos 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
20472b3787f6Schristos 	} else {
20482b3787f6Schristos 		if (events & EV_PERSIST) {
20492b3787f6Schristos 			evutil_timerclear(&ev->ev_io_timeout);
20502b3787f6Schristos 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
20512b3787f6Schristos 		} else {
20522b3787f6Schristos 			ev->ev_closure = EV_CLOSURE_EVENT;
20532b3787f6Schristos 		}
20542b3787f6Schristos 	}
20552b3787f6Schristos 
20562b3787f6Schristos 	min_heap_elem_init_(ev);
20572b3787f6Schristos 
20582b3787f6Schristos 	if (base != NULL) {
20592b3787f6Schristos 		/* by default, we put new events into the middle priority */
20602b3787f6Schristos 		ev->ev_pri = base->nactivequeues / 2;
20612b3787f6Schristos 	}
20622b3787f6Schristos 
20632b3787f6Schristos 	event_debug_note_setup_(ev);
20642b3787f6Schristos 
20652b3787f6Schristos 	return 0;
20662b3787f6Schristos }
20672b3787f6Schristos 
20682b3787f6Schristos int
event_base_set(struct event_base * base,struct event * ev)20692b3787f6Schristos event_base_set(struct event_base *base, struct event *ev)
20702b3787f6Schristos {
20712b3787f6Schristos 	/* Only innocent events may be assigned to a different base */
20722b3787f6Schristos 	if (ev->ev_flags != EVLIST_INIT)
20732b3787f6Schristos 		return (-1);
20742b3787f6Schristos 
20752b3787f6Schristos 	event_debug_assert_is_setup_(ev);
20762b3787f6Schristos 
20772b3787f6Schristos 	ev->ev_base = base;
20782b3787f6Schristos 	ev->ev_pri = base->nactivequeues/2;
20792b3787f6Schristos 
20802b3787f6Schristos 	return (0);
20812b3787f6Schristos }
20822b3787f6Schristos 
20832b3787f6Schristos void
event_set(struct event * ev,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)20842b3787f6Schristos event_set(struct event *ev, evutil_socket_t fd, short events,
20852b3787f6Schristos 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
20862b3787f6Schristos {
20872b3787f6Schristos 	int r;
20882b3787f6Schristos 	r = event_assign(ev, current_base, fd, events, callback, arg);
20892b3787f6Schristos 	EVUTIL_ASSERT(r == 0);
20902b3787f6Schristos }
20912b3787f6Schristos 
20922b3787f6Schristos void *
event_self_cbarg(void)20932b3787f6Schristos event_self_cbarg(void)
20942b3787f6Schristos {
20952b3787f6Schristos 	return &event_self_cbarg_ptr_;
20962b3787f6Schristos }
20972b3787f6Schristos 
20982b3787f6Schristos struct event *
event_base_get_running_event(struct event_base * base)20992b3787f6Schristos event_base_get_running_event(struct event_base *base)
21002b3787f6Schristos {
21012b3787f6Schristos 	struct event *ev = NULL;
21022b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
21032b3787f6Schristos 	if (EVBASE_IN_THREAD(base)) {
21042b3787f6Schristos 		struct event_callback *evcb = base->current_event;
21052b3787f6Schristos 		if (evcb->evcb_flags & EVLIST_INIT)
21062b3787f6Schristos 			ev = event_callback_to_event(evcb);
21072b3787f6Schristos 	}
21082b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
21092b3787f6Schristos 	return ev;
21102b3787f6Schristos }
21112b3787f6Schristos 
21122b3787f6Schristos struct event *
event_new(struct event_base * base,evutil_socket_t fd,short events,void (* cb)(evutil_socket_t,short,void *),void * arg)21132b3787f6Schristos event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
21142b3787f6Schristos {
21152b3787f6Schristos 	struct event *ev;
21162b3787f6Schristos 	ev = mm_malloc(sizeof(struct event));
21172b3787f6Schristos 	if (ev == NULL)
21182b3787f6Schristos 		return (NULL);
21192b3787f6Schristos 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
21202b3787f6Schristos 		mm_free(ev);
21212b3787f6Schristos 		return (NULL);
21222b3787f6Schristos 	}
21232b3787f6Schristos 
21242b3787f6Schristos 	return (ev);
21252b3787f6Schristos }
21262b3787f6Schristos 
21272b3787f6Schristos void
event_free(struct event * ev)21282b3787f6Schristos event_free(struct event *ev)
21292b3787f6Schristos {
21301b6f2cd4Schristos 	/* This is disabled, so that events which have been finalized be a
21311b6f2cd4Schristos 	 * valid target for event_free(). That's */
21321b6f2cd4Schristos 	// event_debug_assert_is_setup_(ev);
21332b3787f6Schristos 
21342b3787f6Schristos 	/* make sure that this event won't be coming back to haunt us. */
21352b3787f6Schristos 	event_del(ev);
21362b3787f6Schristos 	event_debug_note_teardown_(ev);
21372b3787f6Schristos 	mm_free(ev);
21382b3787f6Schristos 
21392b3787f6Schristos }
21402b3787f6Schristos 
21412b3787f6Schristos void
event_debug_unassign(struct event * ev)21422b3787f6Schristos event_debug_unassign(struct event *ev)
21432b3787f6Schristos {
21442b3787f6Schristos 	event_debug_assert_not_added_(ev);
21452b3787f6Schristos 	event_debug_note_teardown_(ev);
21462b3787f6Schristos 
21472b3787f6Schristos 	ev->ev_flags &= ~EVLIST_INIT;
21482b3787f6Schristos }
21492b3787f6Schristos 
21501b6f2cd4Schristos #define EVENT_FINALIZE_FREE_ 0x10000
21511b6f2cd4Schristos static int
event_finalize_nolock_(struct event_base * base,unsigned flags,struct event * ev,event_finalize_callback_fn cb)21521b6f2cd4Schristos event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
21531b6f2cd4Schristos {
21541b6f2cd4Schristos 	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
21551b6f2cd4Schristos 	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
21561b6f2cd4Schristos 
21571b6f2cd4Schristos 	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
21581b6f2cd4Schristos 	ev->ev_closure = closure;
21591b6f2cd4Schristos 	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
21601b6f2cd4Schristos 	event_active_nolock_(ev, EV_FINALIZE, 1);
21611b6f2cd4Schristos 	ev->ev_flags |= EVLIST_FINALIZING;
21621b6f2cd4Schristos 	return 0;
21631b6f2cd4Schristos }
21641b6f2cd4Schristos 
21651b6f2cd4Schristos static int
event_finalize_impl_(unsigned flags,struct event * ev,event_finalize_callback_fn cb)21661b6f2cd4Schristos event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
21671b6f2cd4Schristos {
21681b6f2cd4Schristos 	int r;
21691b6f2cd4Schristos 	struct event_base *base = ev->ev_base;
21701b6f2cd4Schristos 	if (EVUTIL_FAILURE_CHECK(!base)) {
21711b6f2cd4Schristos 		event_warnx("%s: event has no event_base set.", __func__);
21721b6f2cd4Schristos 		return -1;
21731b6f2cd4Schristos 	}
21741b6f2cd4Schristos 
21751b6f2cd4Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
21761b6f2cd4Schristos 	r = event_finalize_nolock_(base, flags, ev, cb);
21771b6f2cd4Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
21781b6f2cd4Schristos 	return r;
21791b6f2cd4Schristos }
21801b6f2cd4Schristos 
21811b6f2cd4Schristos int
event_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)21821b6f2cd4Schristos event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
21831b6f2cd4Schristos {
21841b6f2cd4Schristos 	return event_finalize_impl_(flags, ev, cb);
21851b6f2cd4Schristos }
21861b6f2cd4Schristos 
21871b6f2cd4Schristos int
event_free_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)21881b6f2cd4Schristos event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
21891b6f2cd4Schristos {
21901b6f2cd4Schristos 	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
21911b6f2cd4Schristos }
21921b6f2cd4Schristos 
21931b6f2cd4Schristos void
event_callback_finalize_nolock_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))21941b6f2cd4Schristos event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
21951b6f2cd4Schristos {
21961b6f2cd4Schristos 	struct event *ev = NULL;
21971b6f2cd4Schristos 	if (evcb->evcb_flags & EVLIST_INIT) {
21981b6f2cd4Schristos 		ev = event_callback_to_event(evcb);
21991b6f2cd4Schristos 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
22001b6f2cd4Schristos 	} else {
22011b6f2cd4Schristos 		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
22021b6f2cd4Schristos 	}
22031b6f2cd4Schristos 
22041b6f2cd4Schristos 	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
22051b6f2cd4Schristos 	evcb->evcb_cb_union.evcb_cbfinalize = cb;
22061b6f2cd4Schristos 	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
22071b6f2cd4Schristos 	evcb->evcb_flags |= EVLIST_FINALIZING;
22081b6f2cd4Schristos }
22091b6f2cd4Schristos 
22101b6f2cd4Schristos void
event_callback_finalize_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))22111b6f2cd4Schristos event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
22121b6f2cd4Schristos {
22131b6f2cd4Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
22141b6f2cd4Schristos 	event_callback_finalize_nolock_(base, flags, evcb, cb);
22151b6f2cd4Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
22161b6f2cd4Schristos }
22171b6f2cd4Schristos 
22181b6f2cd4Schristos /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
22191b6f2cd4Schristos  * callback will be invoked on *one of them*, after they have *all* been
22201b6f2cd4Schristos  * finalized. */
22211b6f2cd4Schristos int
event_callback_finalize_many_(struct event_base * base,int n_cbs,struct event_callback ** evcbs,void (* cb)(struct event_callback *,void *))22221b6f2cd4Schristos event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
22231b6f2cd4Schristos {
22241b6f2cd4Schristos 	int n_pending = 0, i;
22251b6f2cd4Schristos 
22261b6f2cd4Schristos 	if (base == NULL)
22271b6f2cd4Schristos 		base = current_base;
22281b6f2cd4Schristos 
22291b6f2cd4Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
22301b6f2cd4Schristos 
22311b6f2cd4Schristos 	event_debug(("%s: %d events finalizing", __func__, n_cbs));
22321b6f2cd4Schristos 
22331b6f2cd4Schristos 	/* At most one can be currently executing; the rest we just
22341b6f2cd4Schristos 	 * cancel... But we always make sure that the finalize callback
22351b6f2cd4Schristos 	 * runs. */
22361b6f2cd4Schristos 	for (i = 0; i < n_cbs; ++i) {
22371b6f2cd4Schristos 		struct event_callback *evcb = evcbs[i];
22381b6f2cd4Schristos 		if (evcb == base->current_event) {
22391b6f2cd4Schristos 			event_callback_finalize_nolock_(base, 0, evcb, cb);
22401b6f2cd4Schristos 			++n_pending;
22411b6f2cd4Schristos 		} else {
22421b6f2cd4Schristos 			event_callback_cancel_nolock_(base, evcb, 0);
22431b6f2cd4Schristos 		}
22441b6f2cd4Schristos 	}
22451b6f2cd4Schristos 
22461b6f2cd4Schristos 	if (n_pending == 0) {
22471b6f2cd4Schristos 		/* Just do the first one. */
22481b6f2cd4Schristos 		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
22491b6f2cd4Schristos 	}
22501b6f2cd4Schristos 
22511b6f2cd4Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
22521b6f2cd4Schristos 	return 0;
22531b6f2cd4Schristos }
22541b6f2cd4Schristos 
22552b3787f6Schristos /*
22562b3787f6Schristos  * Set's the priority of an event - if an event is already scheduled
22572b3787f6Schristos  * changing the priority is going to fail.
22582b3787f6Schristos  */
22592b3787f6Schristos 
22602b3787f6Schristos int
event_priority_set(struct event * ev,int pri)22612b3787f6Schristos event_priority_set(struct event *ev, int pri)
22622b3787f6Schristos {
22632b3787f6Schristos 	event_debug_assert_is_setup_(ev);
22642b3787f6Schristos 
22652b3787f6Schristos 	if (ev->ev_flags & EVLIST_ACTIVE)
22662b3787f6Schristos 		return (-1);
22672b3787f6Schristos 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
22682b3787f6Schristos 		return (-1);
22692b3787f6Schristos 
22702b3787f6Schristos 	ev->ev_pri = pri;
22712b3787f6Schristos 
22722b3787f6Schristos 	return (0);
22732b3787f6Schristos }
22742b3787f6Schristos 
22752b3787f6Schristos /*
22762b3787f6Schristos  * Checks if a specific event is pending or scheduled.
22772b3787f6Schristos  */
22782b3787f6Schristos 
22792b3787f6Schristos int
event_pending(const struct event * ev,short event,struct timeval * tv)22802b3787f6Schristos event_pending(const struct event *ev, short event, struct timeval *tv)
22812b3787f6Schristos {
22822b3787f6Schristos 	int flags = 0;
22832b3787f6Schristos 
22842b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
22852b3787f6Schristos 		event_warnx("%s: event has no event_base set.", __func__);
22862b3787f6Schristos 		return 0;
22872b3787f6Schristos 	}
22882b3787f6Schristos 
22892b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
22902b3787f6Schristos 	event_debug_assert_is_setup_(ev);
22912b3787f6Schristos 
22922b3787f6Schristos 	if (ev->ev_flags & EVLIST_INSERTED)
22931b6f2cd4Schristos 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
22942b3787f6Schristos 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
22952b3787f6Schristos 		flags |= ev->ev_res;
22962b3787f6Schristos 	if (ev->ev_flags & EVLIST_TIMEOUT)
22972b3787f6Schristos 		flags |= EV_TIMEOUT;
22982b3787f6Schristos 
22991b6f2cd4Schristos 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
23002b3787f6Schristos 
23012b3787f6Schristos 	/* See if there is a timeout that we should report */
23022b3787f6Schristos 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
23032b3787f6Schristos 		struct timeval tmp = ev->ev_timeout;
23042b3787f6Schristos 		tmp.tv_usec &= MICROSECONDS_MASK;
23052b3787f6Schristos 		/* correctly remamp to real time */
23062b3787f6Schristos 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
23072b3787f6Schristos 	}
23082b3787f6Schristos 
23092b3787f6Schristos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
23102b3787f6Schristos 
23112b3787f6Schristos 	return (flags & event);
23122b3787f6Schristos }
23132b3787f6Schristos 
23142b3787f6Schristos int
event_initialized(const struct event * ev)23152b3787f6Schristos event_initialized(const struct event *ev)
23162b3787f6Schristos {
23172b3787f6Schristos 	if (!(ev->ev_flags & EVLIST_INIT))
23182b3787f6Schristos 		return 0;
23192b3787f6Schristos 
23202b3787f6Schristos 	return 1;
23212b3787f6Schristos }
23222b3787f6Schristos 
23232b3787f6Schristos void
event_get_assignment(const struct event * event,struct event_base ** base_out,evutil_socket_t * fd_out,short * events_out,event_callback_fn * callback_out,void ** arg_out)23242b3787f6Schristos event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
23252b3787f6Schristos {
23262b3787f6Schristos 	event_debug_assert_is_setup_(event);
23272b3787f6Schristos 
23282b3787f6Schristos 	if (base_out)
23292b3787f6Schristos 		*base_out = event->ev_base;
23302b3787f6Schristos 	if (fd_out)
23312b3787f6Schristos 		*fd_out = event->ev_fd;
23322b3787f6Schristos 	if (events_out)
23332b3787f6Schristos 		*events_out = event->ev_events;
23342b3787f6Schristos 	if (callback_out)
23352b3787f6Schristos 		*callback_out = event->ev_callback;
23362b3787f6Schristos 	if (arg_out)
23372b3787f6Schristos 		*arg_out = event->ev_arg;
23382b3787f6Schristos }
23392b3787f6Schristos 
23402b3787f6Schristos size_t
event_get_struct_event_size(void)23412b3787f6Schristos event_get_struct_event_size(void)
23422b3787f6Schristos {
23432b3787f6Schristos 	return sizeof(struct event);
23442b3787f6Schristos }
23452b3787f6Schristos 
23462b3787f6Schristos evutil_socket_t
event_get_fd(const struct event * ev)23472b3787f6Schristos event_get_fd(const struct event *ev)
23482b3787f6Schristos {
23492b3787f6Schristos 	event_debug_assert_is_setup_(ev);
23502b3787f6Schristos 	return ev->ev_fd;
23512b3787f6Schristos }
23522b3787f6Schristos 
23532b3787f6Schristos struct event_base *
event_get_base(const struct event * ev)23542b3787f6Schristos event_get_base(const struct event *ev)
23552b3787f6Schristos {
23562b3787f6Schristos 	event_debug_assert_is_setup_(ev);
23572b3787f6Schristos 	return ev->ev_base;
23582b3787f6Schristos }
23592b3787f6Schristos 
23602b3787f6Schristos short
event_get_events(const struct event * ev)23612b3787f6Schristos event_get_events(const struct event *ev)
23622b3787f6Schristos {
23632b3787f6Schristos 	event_debug_assert_is_setup_(ev);
23642b3787f6Schristos 	return ev->ev_events;
23652b3787f6Schristos }
23662b3787f6Schristos 
23672b3787f6Schristos event_callback_fn
event_get_callback(const struct event * ev)23682b3787f6Schristos event_get_callback(const struct event *ev)
23692b3787f6Schristos {
23702b3787f6Schristos 	event_debug_assert_is_setup_(ev);
23712b3787f6Schristos 	return ev->ev_callback;
23722b3787f6Schristos }
23732b3787f6Schristos 
23742b3787f6Schristos void *
event_get_callback_arg(const struct event * ev)23752b3787f6Schristos event_get_callback_arg(const struct event *ev)
23762b3787f6Schristos {
23772b3787f6Schristos 	event_debug_assert_is_setup_(ev);
23782b3787f6Schristos 	return ev->ev_arg;
23792b3787f6Schristos }
23802b3787f6Schristos 
23812b3787f6Schristos int
event_get_priority(const struct event * ev)23822b3787f6Schristos event_get_priority(const struct event *ev)
23832b3787f6Schristos {
23842b3787f6Schristos 	event_debug_assert_is_setup_(ev);
23852b3787f6Schristos 	return ev->ev_pri;
23862b3787f6Schristos }
23872b3787f6Schristos 
23882b3787f6Schristos int
event_add(struct event * ev,const struct timeval * tv)23892b3787f6Schristos event_add(struct event *ev, const struct timeval *tv)
23902b3787f6Schristos {
23912b3787f6Schristos 	int res;
23922b3787f6Schristos 
23932b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
23942b3787f6Schristos 		event_warnx("%s: event has no event_base set.", __func__);
23952b3787f6Schristos 		return -1;
23962b3787f6Schristos 	}
23972b3787f6Schristos 
23982b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
23992b3787f6Schristos 
24002b3787f6Schristos 	res = event_add_nolock_(ev, tv, 0);
24012b3787f6Schristos 
24022b3787f6Schristos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
24032b3787f6Schristos 
24042b3787f6Schristos 	return (res);
24052b3787f6Schristos }
24062b3787f6Schristos 
24072b3787f6Schristos /* Helper callback: wake an event_base from another thread.  This version
24082b3787f6Schristos  * works by writing a byte to one end of a socketpair, so that the event_base
24092b3787f6Schristos  * listening on the other end will wake up as the corresponding event
24102b3787f6Schristos  * triggers */
24112b3787f6Schristos static int
evthread_notify_base_default(struct event_base * base)24122b3787f6Schristos evthread_notify_base_default(struct event_base *base)
24132b3787f6Schristos {
24142b3787f6Schristos 	char buf[1];
24152b3787f6Schristos 	int r;
24162b3787f6Schristos 	buf[0] = (char) 0;
24172b3787f6Schristos #ifdef _WIN32
24182b3787f6Schristos 	r = send(base->th_notify_fd[1], buf, 1, 0);
24192b3787f6Schristos #else
24202b3787f6Schristos 	r = write(base->th_notify_fd[1], buf, 1);
24212b3787f6Schristos #endif
24222b3787f6Schristos 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
24232b3787f6Schristos }
24242b3787f6Schristos 
24252b3787f6Schristos #ifdef EVENT__HAVE_EVENTFD
24262b3787f6Schristos /* Helper callback: wake an event_base from another thread.  This version
24272b3787f6Schristos  * assumes that you have a working eventfd() implementation. */
24282b3787f6Schristos static int
evthread_notify_base_eventfd(struct event_base * base)24292b3787f6Schristos evthread_notify_base_eventfd(struct event_base *base)
24302b3787f6Schristos {
24312b3787f6Schristos 	ev_uint64_t msg = 1;
24322b3787f6Schristos 	int r;
24332b3787f6Schristos 	do {
24342b3787f6Schristos 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
24352b3787f6Schristos 	} while (r < 0 && errno == EAGAIN);
24362b3787f6Schristos 
24372b3787f6Schristos 	return (r < 0) ? -1 : 0;
24382b3787f6Schristos }
24392b3787f6Schristos #endif
24402b3787f6Schristos 
24412b3787f6Schristos 
24422b3787f6Schristos /** Tell the thread currently running the event_loop for base (if any) that it
24432b3787f6Schristos  * needs to stop waiting in its dispatch function (if it is) and process all
24442b3787f6Schristos  * active callbacks. */
24452b3787f6Schristos static int
evthread_notify_base(struct event_base * base)24462b3787f6Schristos evthread_notify_base(struct event_base *base)
24472b3787f6Schristos {
24482b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
24492b3787f6Schristos 	if (!base->th_notify_fn)
24502b3787f6Schristos 		return -1;
24512b3787f6Schristos 	if (base->is_notify_pending)
24522b3787f6Schristos 		return 0;
24532b3787f6Schristos 	base->is_notify_pending = 1;
24542b3787f6Schristos 	return base->th_notify_fn(base);
24552b3787f6Schristos }
24562b3787f6Schristos 
24572b3787f6Schristos /* Implementation function to remove a timeout on a currently pending event.
24582b3787f6Schristos  */
24592b3787f6Schristos int
event_remove_timer_nolock_(struct event * ev)24602b3787f6Schristos event_remove_timer_nolock_(struct event *ev)
24612b3787f6Schristos {
24622b3787f6Schristos 	struct event_base *base = ev->ev_base;
24632b3787f6Schristos 
24642b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
24652b3787f6Schristos 	event_debug_assert_is_setup_(ev);
24662b3787f6Schristos 
24672b3787f6Schristos 	event_debug(("event_remove_timer_nolock: event: %p", ev));
24682b3787f6Schristos 
24692b3787f6Schristos 	/* If it's not pending on a timeout, we don't need to do anything. */
24702b3787f6Schristos 	if (ev->ev_flags & EVLIST_TIMEOUT) {
24712b3787f6Schristos 		event_queue_remove_timeout(base, ev);
24722b3787f6Schristos 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
24732b3787f6Schristos 	}
24742b3787f6Schristos 
24752b3787f6Schristos 	return (0);
24762b3787f6Schristos }
24772b3787f6Schristos 
24782b3787f6Schristos int
event_remove_timer(struct event * ev)24792b3787f6Schristos event_remove_timer(struct event *ev)
24802b3787f6Schristos {
24812b3787f6Schristos 	int res;
24822b3787f6Schristos 
24832b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
24842b3787f6Schristos 		event_warnx("%s: event has no event_base set.", __func__);
24852b3787f6Schristos 		return -1;
24862b3787f6Schristos 	}
24872b3787f6Schristos 
24882b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
24892b3787f6Schristos 
24902b3787f6Schristos 	res = event_remove_timer_nolock_(ev);
24912b3787f6Schristos 
24922b3787f6Schristos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
24932b3787f6Schristos 
24942b3787f6Schristos 	return (res);
24952b3787f6Schristos }
24962b3787f6Schristos 
24972b3787f6Schristos /* Implementation function to add an event.  Works just like event_add,
24982b3787f6Schristos  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
24992b3787f6Schristos  * we treat tv as an absolute time, not as an interval to add to the current
25002b3787f6Schristos  * time */
25012b3787f6Schristos int
event_add_nolock_(struct event * ev,const struct timeval * tv,int tv_is_absolute)25022b3787f6Schristos event_add_nolock_(struct event *ev, const struct timeval *tv,
25032b3787f6Schristos     int tv_is_absolute)
25042b3787f6Schristos {
25052b3787f6Schristos 	struct event_base *base = ev->ev_base;
25062b3787f6Schristos 	int res = 0;
25072b3787f6Schristos 	int notify = 0;
25082b3787f6Schristos 
25092b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
25102b3787f6Schristos 	event_debug_assert_is_setup_(ev);
25112b3787f6Schristos 
25122b3787f6Schristos 	event_debug((
25131b6f2cd4Schristos 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
25142b3787f6Schristos 		 ev,
25152b3787f6Schristos 		 EV_SOCK_ARG(ev->ev_fd),
25162b3787f6Schristos 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
25172b3787f6Schristos 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
25181b6f2cd4Schristos 		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
25192b3787f6Schristos 		 tv ? "EV_TIMEOUT " : " ",
25202b3787f6Schristos 		 ev->ev_callback));
25212b3787f6Schristos 
25222b3787f6Schristos 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
25232b3787f6Schristos 
25241b6f2cd4Schristos 	if (ev->ev_flags & EVLIST_FINALIZING) {
25251b6f2cd4Schristos 		/* XXXX debug */
25261b6f2cd4Schristos 		return (-1);
25271b6f2cd4Schristos 	}
25281b6f2cd4Schristos 
25292b3787f6Schristos 	/*
25302b3787f6Schristos 	 * prepare for timeout insertion further below, if we get a
25312b3787f6Schristos 	 * failure on any step, we should not change any state.
25322b3787f6Schristos 	 */
25332b3787f6Schristos 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
25342b3787f6Schristos 		if (min_heap_reserve_(&base->timeheap,
25352b3787f6Schristos 			1 + min_heap_size_(&base->timeheap)) == -1)
25362b3787f6Schristos 			return (-1);  /* ENOMEM == errno */
25372b3787f6Schristos 	}
25382b3787f6Schristos 
25392b3787f6Schristos 	/* If the main thread is currently executing a signal event's
25402b3787f6Schristos 	 * callback, and we are not the main thread, then we want to wait
25412b3787f6Schristos 	 * until the callback is done before we mess with the event, or else
25422b3787f6Schristos 	 * we can race on ev_ncalls and ev_pncalls below. */
25432b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
25442b3787f6Schristos 	if (base->current_event == event_to_event_callback(ev) &&
25452b3787f6Schristos 	    (ev->ev_events & EV_SIGNAL)
25462b3787f6Schristos 	    && !EVBASE_IN_THREAD(base)) {
25472b3787f6Schristos 		++base->current_event_waiters;
25482b3787f6Schristos 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
25492b3787f6Schristos 	}
25502b3787f6Schristos #endif
25512b3787f6Schristos 
25521b6f2cd4Schristos 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
25532b3787f6Schristos 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
25541b6f2cd4Schristos 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
25552b3787f6Schristos 			res = evmap_io_add_(base, ev->ev_fd, ev);
25562b3787f6Schristos 		else if (ev->ev_events & EV_SIGNAL)
25572b3787f6Schristos 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
25582b3787f6Schristos 		if (res != -1)
25592b3787f6Schristos 			event_queue_insert_inserted(base, ev);
25602b3787f6Schristos 		if (res == 1) {
25612b3787f6Schristos 			/* evmap says we need to notify the main thread. */
25622b3787f6Schristos 			notify = 1;
25632b3787f6Schristos 			res = 0;
25642b3787f6Schristos 		}
25652b3787f6Schristos 	}
25662b3787f6Schristos 
25672b3787f6Schristos 	/*
25682b3787f6Schristos 	 * we should change the timeout state only if the previous event
25692b3787f6Schristos 	 * addition succeeded.
25702b3787f6Schristos 	 */
25712b3787f6Schristos 	if (res != -1 && tv != NULL) {
25722b3787f6Schristos 		struct timeval now;
25732b3787f6Schristos 		int common_timeout;
25742b3787f6Schristos #ifdef USE_REINSERT_TIMEOUT
25752b3787f6Schristos 		int was_common;
25762b3787f6Schristos 		int old_timeout_idx;
25772b3787f6Schristos #endif
25782b3787f6Schristos 
25792b3787f6Schristos 		/*
25802b3787f6Schristos 		 * for persistent timeout events, we remember the
25812b3787f6Schristos 		 * timeout value and re-add the event.
25822b3787f6Schristos 		 *
25832b3787f6Schristos 		 * If tv_is_absolute, this was already set.
25842b3787f6Schristos 		 */
25852b3787f6Schristos 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
25862b3787f6Schristos 			ev->ev_io_timeout = *tv;
25872b3787f6Schristos 
25882b3787f6Schristos #ifndef USE_REINSERT_TIMEOUT
25892b3787f6Schristos 		if (ev->ev_flags & EVLIST_TIMEOUT) {
25902b3787f6Schristos 			event_queue_remove_timeout(base, ev);
25912b3787f6Schristos 		}
25922b3787f6Schristos #endif
25932b3787f6Schristos 
25942b3787f6Schristos 		/* Check if it is active due to a timeout.  Rescheduling
25952b3787f6Schristos 		 * this timeout before the callback can be executed
25962b3787f6Schristos 		 * removes it from the active list. */
25972b3787f6Schristos 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
25982b3787f6Schristos 		    (ev->ev_res & EV_TIMEOUT)) {
25992b3787f6Schristos 			if (ev->ev_events & EV_SIGNAL) {
26002b3787f6Schristos 				/* See if we are just active executing
26012b3787f6Schristos 				 * this event in a loop
26022b3787f6Schristos 				 */
26032b3787f6Schristos 				if (ev->ev_ncalls && ev->ev_pncalls) {
26042b3787f6Schristos 					/* Abort loop */
26052b3787f6Schristos 					*ev->ev_pncalls = 0;
26062b3787f6Schristos 				}
26072b3787f6Schristos 			}
26082b3787f6Schristos 
26092b3787f6Schristos 			event_queue_remove_active(base, event_to_event_callback(ev));
26102b3787f6Schristos 		}
26112b3787f6Schristos 
26122b3787f6Schristos 		gettime(base, &now);
26132b3787f6Schristos 
26142b3787f6Schristos 		common_timeout = is_common_timeout(tv, base);
26152b3787f6Schristos #ifdef USE_REINSERT_TIMEOUT
26162b3787f6Schristos 		was_common = is_common_timeout(&ev->ev_timeout, base);
26172b3787f6Schristos 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
26182b3787f6Schristos #endif
26192b3787f6Schristos 
26202b3787f6Schristos 		if (tv_is_absolute) {
26212b3787f6Schristos 			ev->ev_timeout = *tv;
26222b3787f6Schristos 		} else if (common_timeout) {
26232b3787f6Schristos 			struct timeval tmp = *tv;
26242b3787f6Schristos 			tmp.tv_usec &= MICROSECONDS_MASK;
26252b3787f6Schristos 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
26262b3787f6Schristos 			ev->ev_timeout.tv_usec |=
26272b3787f6Schristos 			    (tv->tv_usec & ~MICROSECONDS_MASK);
26282b3787f6Schristos 		} else {
26292b3787f6Schristos 			evutil_timeradd(&now, tv, &ev->ev_timeout);
26302b3787f6Schristos 		}
26312b3787f6Schristos 
26322b3787f6Schristos 		event_debug((
26332b3787f6Schristos 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
26342b3787f6Schristos 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
26352b3787f6Schristos 
26362b3787f6Schristos #ifdef USE_REINSERT_TIMEOUT
26372b3787f6Schristos 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
26382b3787f6Schristos #else
26392b3787f6Schristos 		event_queue_insert_timeout(base, ev);
26402b3787f6Schristos #endif
26412b3787f6Schristos 
26422b3787f6Schristos 		if (common_timeout) {
26432b3787f6Schristos 			struct common_timeout_list *ctl =
26442b3787f6Schristos 			    get_common_timeout_list(base, &ev->ev_timeout);
26452b3787f6Schristos 			if (ev == TAILQ_FIRST(&ctl->events)) {
26462b3787f6Schristos 				common_timeout_schedule(ctl, &now, ev);
26472b3787f6Schristos 			}
26482b3787f6Schristos 		} else {
26492b3787f6Schristos 			struct event* top = NULL;
26502b3787f6Schristos 			/* See if the earliest timeout is now earlier than it
26512b3787f6Schristos 			 * was before: if so, we will need to tell the main
26522b3787f6Schristos 			 * thread to wake up earlier than it would otherwise.
26532b3787f6Schristos 			 * We double check the timeout of the top element to
26542b3787f6Schristos 			 * handle time distortions due to system suspension.
26552b3787f6Schristos 			 */
26562b3787f6Schristos 			if (min_heap_elt_is_top_(ev))
26572b3787f6Schristos 				notify = 1;
26582b3787f6Schristos 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
26592b3787f6Schristos 					 evutil_timercmp(&top->ev_timeout, &now, <))
26602b3787f6Schristos 				notify = 1;
26612b3787f6Schristos 		}
26622b3787f6Schristos 	}
26632b3787f6Schristos 
26642b3787f6Schristos 	/* if we are not in the right thread, we need to wake up the loop */
26652b3787f6Schristos 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
26662b3787f6Schristos 		evthread_notify_base(base);
26672b3787f6Schristos 
26682b3787f6Schristos 	event_debug_note_add_(ev);
26692b3787f6Schristos 
26702b3787f6Schristos 	return (res);
26712b3787f6Schristos }
26722b3787f6Schristos 
26731b6f2cd4Schristos static int
event_del_(struct event * ev,int blocking)26741b6f2cd4Schristos event_del_(struct event *ev, int blocking)
26752b3787f6Schristos {
26762b3787f6Schristos 	int res;
26772b3787f6Schristos 
26782b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
26792b3787f6Schristos 		event_warnx("%s: event has no event_base set.", __func__);
26802b3787f6Schristos 		return -1;
26812b3787f6Schristos 	}
26822b3787f6Schristos 
26832b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
26842b3787f6Schristos 
26851b6f2cd4Schristos 	res = event_del_nolock_(ev, blocking);
26862b3787f6Schristos 
26872b3787f6Schristos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
26882b3787f6Schristos 
26892b3787f6Schristos 	return (res);
26902b3787f6Schristos }
26912b3787f6Schristos 
26922b3787f6Schristos int
event_del(struct event * ev)26931b6f2cd4Schristos event_del(struct event *ev)
26941b6f2cd4Schristos {
26951b6f2cd4Schristos 	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
26961b6f2cd4Schristos }
26971b6f2cd4Schristos 
26981b6f2cd4Schristos int
event_del_block(struct event * ev)26991b6f2cd4Schristos event_del_block(struct event *ev)
27001b6f2cd4Schristos {
27011b6f2cd4Schristos 	return event_del_(ev, EVENT_DEL_BLOCK);
27021b6f2cd4Schristos }
27031b6f2cd4Schristos 
27041b6f2cd4Schristos int
event_del_noblock(struct event * ev)27051b6f2cd4Schristos event_del_noblock(struct event *ev)
27061b6f2cd4Schristos {
27071b6f2cd4Schristos 	return event_del_(ev, EVENT_DEL_NOBLOCK);
27081b6f2cd4Schristos }
27091b6f2cd4Schristos 
27101b6f2cd4Schristos /** Helper for event_del: always called with th_base_lock held.
27111b6f2cd4Schristos  *
27121b6f2cd4Schristos  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
27131b6f2cd4Schristos  * EVEN_IF_FINALIZING} values. See those for more information.
27141b6f2cd4Schristos  */
27151b6f2cd4Schristos int
event_del_nolock_(struct event * ev,int blocking)27161b6f2cd4Schristos event_del_nolock_(struct event *ev, int blocking)
27172b3787f6Schristos {
27182b3787f6Schristos 	struct event_base *base;
27192b3787f6Schristos 	int res = 0, notify = 0;
27202b3787f6Schristos 
27212b3787f6Schristos 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
27222b3787f6Schristos 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
27232b3787f6Schristos 
27242b3787f6Schristos 	/* An event without a base has not been added */
27252b3787f6Schristos 	if (ev->ev_base == NULL)
27262b3787f6Schristos 		return (-1);
27272b3787f6Schristos 
27282b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
27292b3787f6Schristos 
27301b6f2cd4Schristos 	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
27311b6f2cd4Schristos 		if (ev->ev_flags & EVLIST_FINALIZING) {
27321b6f2cd4Schristos 			/* XXXX Debug */
27331b6f2cd4Schristos 			return 0;
27341b6f2cd4Schristos 		}
27351b6f2cd4Schristos 	}
27361b6f2cd4Schristos 
27372b3787f6Schristos 	/* If the main thread is currently executing this event's callback,
27382b3787f6Schristos 	 * and we are not the main thread, then we want to wait until the
27392b3787f6Schristos 	 * callback is done before we start removing the event.  That way,
27402b3787f6Schristos 	 * when this function returns, it will be safe to free the
27412b3787f6Schristos 	 * user-supplied argument. */
27422b3787f6Schristos 	base = ev->ev_base;
27432b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
27441b6f2cd4Schristos 	if (blocking != EVENT_DEL_NOBLOCK &&
27451b6f2cd4Schristos 	    base->current_event == event_to_event_callback(ev) &&
27461b6f2cd4Schristos 	    !EVBASE_IN_THREAD(base) &&
27471b6f2cd4Schristos 	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
27482b3787f6Schristos 		++base->current_event_waiters;
27492b3787f6Schristos 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
27502b3787f6Schristos 	}
27512b3787f6Schristos #endif
27522b3787f6Schristos 
27532b3787f6Schristos 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
27542b3787f6Schristos 
27552b3787f6Schristos 	/* See if we are just active executing this event in a loop */
27562b3787f6Schristos 	if (ev->ev_events & EV_SIGNAL) {
27572b3787f6Schristos 		if (ev->ev_ncalls && ev->ev_pncalls) {
27582b3787f6Schristos 			/* Abort loop */
27592b3787f6Schristos 			*ev->ev_pncalls = 0;
27602b3787f6Schristos 		}
27612b3787f6Schristos 	}
27622b3787f6Schristos 
27632b3787f6Schristos 	if (ev->ev_flags & EVLIST_TIMEOUT) {
27642b3787f6Schristos 		/* NOTE: We never need to notify the main thread because of a
27652b3787f6Schristos 		 * deleted timeout event: all that could happen if we don't is
27662b3787f6Schristos 		 * that the dispatch loop might wake up too early.  But the
27672b3787f6Schristos 		 * point of notifying the main thread _is_ to wake up the
27682b3787f6Schristos 		 * dispatch loop early anyway, so we wouldn't gain anything by
27692b3787f6Schristos 		 * doing it.
27702b3787f6Schristos 		 */
27712b3787f6Schristos 		event_queue_remove_timeout(base, ev);
27722b3787f6Schristos 	}
27732b3787f6Schristos 
27742b3787f6Schristos 	if (ev->ev_flags & EVLIST_ACTIVE)
27752b3787f6Schristos 		event_queue_remove_active(base, event_to_event_callback(ev));
27762b3787f6Schristos 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
27772b3787f6Schristos 		event_queue_remove_active_later(base, event_to_event_callback(ev));
27782b3787f6Schristos 
27792b3787f6Schristos 	if (ev->ev_flags & EVLIST_INSERTED) {
27802b3787f6Schristos 		event_queue_remove_inserted(base, ev);
27811b6f2cd4Schristos 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
27822b3787f6Schristos 			res = evmap_io_del_(base, ev->ev_fd, ev);
27832b3787f6Schristos 		else
27842b3787f6Schristos 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
27852b3787f6Schristos 		if (res == 1) {
27862b3787f6Schristos 			/* evmap says we need to notify the main thread. */
27872b3787f6Schristos 			notify = 1;
27882b3787f6Schristos 			res = 0;
27892b3787f6Schristos 		}
27902b3787f6Schristos 	}
27912b3787f6Schristos 
27922b3787f6Schristos 	/* if we are not in the right thread, we need to wake up the loop */
27932b3787f6Schristos 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
27942b3787f6Schristos 		evthread_notify_base(base);
27952b3787f6Schristos 
27962b3787f6Schristos 	event_debug_note_del_(ev);
27972b3787f6Schristos 
27982b3787f6Schristos 	return (res);
27992b3787f6Schristos }
28002b3787f6Schristos 
28012b3787f6Schristos void
event_active(struct event * ev,int res,short ncalls)28022b3787f6Schristos event_active(struct event *ev, int res, short ncalls)
28032b3787f6Schristos {
28042b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
28052b3787f6Schristos 		event_warnx("%s: event has no event_base set.", __func__);
28062b3787f6Schristos 		return;
28072b3787f6Schristos 	}
28082b3787f6Schristos 
28092b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
28102b3787f6Schristos 
28112b3787f6Schristos 	event_debug_assert_is_setup_(ev);
28122b3787f6Schristos 
28132b3787f6Schristos 	event_active_nolock_(ev, res, ncalls);
28142b3787f6Schristos 
28152b3787f6Schristos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
28162b3787f6Schristos }
28172b3787f6Schristos 
28182b3787f6Schristos 
28192b3787f6Schristos void
event_active_nolock_(struct event * ev,int res,short ncalls)28202b3787f6Schristos event_active_nolock_(struct event *ev, int res, short ncalls)
28212b3787f6Schristos {
28222b3787f6Schristos 	struct event_base *base;
28232b3787f6Schristos 
28242b3787f6Schristos 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
28252b3787f6Schristos 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
28262b3787f6Schristos 
28272b3787f6Schristos 	base = ev->ev_base;
28282b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
28292b3787f6Schristos 
28301b6f2cd4Schristos 	if (ev->ev_flags & EVLIST_FINALIZING) {
28311b6f2cd4Schristos 		/* XXXX debug */
28321b6f2cd4Schristos 		return;
28331b6f2cd4Schristos 	}
28341b6f2cd4Schristos 
28352b3787f6Schristos 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
28362b3787f6Schristos 	default:
28372b3787f6Schristos 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
28382b3787f6Schristos 		EVUTIL_ASSERT(0);
28392b3787f6Schristos 		break;
28402b3787f6Schristos 	case EVLIST_ACTIVE:
28412b3787f6Schristos 		/* We get different kinds of events, add them together */
28422b3787f6Schristos 		ev->ev_res |= res;
28432b3787f6Schristos 		return;
28442b3787f6Schristos 	case EVLIST_ACTIVE_LATER:
28452b3787f6Schristos 		ev->ev_res |= res;
28462b3787f6Schristos 		break;
28472b3787f6Schristos 	case 0:
28482b3787f6Schristos 		ev->ev_res = res;
28492b3787f6Schristos 		break;
28502b3787f6Schristos 	}
28512b3787f6Schristos 
28522b3787f6Schristos 	if (ev->ev_pri < base->event_running_priority)
28532b3787f6Schristos 		base->event_continue = 1;
28542b3787f6Schristos 
28552b3787f6Schristos 	if (ev->ev_events & EV_SIGNAL) {
28562b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
28572b3787f6Schristos 		if (base->current_event == event_to_event_callback(ev) &&
28582b3787f6Schristos 		    !EVBASE_IN_THREAD(base)) {
28592b3787f6Schristos 			++base->current_event_waiters;
28602b3787f6Schristos 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
28612b3787f6Schristos 		}
28622b3787f6Schristos #endif
28632b3787f6Schristos 		ev->ev_ncalls = ncalls;
28642b3787f6Schristos 		ev->ev_pncalls = NULL;
28652b3787f6Schristos 	}
28662b3787f6Schristos 
28672b3787f6Schristos 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
28682b3787f6Schristos }
28692b3787f6Schristos 
28702b3787f6Schristos void
event_active_later_(struct event * ev,int res)28712b3787f6Schristos event_active_later_(struct event *ev, int res)
28722b3787f6Schristos {
28732b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
28742b3787f6Schristos 	event_active_later_nolock_(ev, res);
28752b3787f6Schristos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
28762b3787f6Schristos }
28772b3787f6Schristos 
28782b3787f6Schristos void
event_active_later_nolock_(struct event * ev,int res)28792b3787f6Schristos event_active_later_nolock_(struct event *ev, int res)
28802b3787f6Schristos {
28812b3787f6Schristos 	struct event_base *base = ev->ev_base;
28822b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
28832b3787f6Schristos 
28842b3787f6Schristos 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
28852b3787f6Schristos 		/* We get different kinds of events, add them together */
28862b3787f6Schristos 		ev->ev_res |= res;
28872b3787f6Schristos 		return;
28882b3787f6Schristos 	}
28892b3787f6Schristos 
28902b3787f6Schristos 	ev->ev_res = res;
28912b3787f6Schristos 
28922b3787f6Schristos 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
28932b3787f6Schristos }
28942b3787f6Schristos 
28952b3787f6Schristos int
event_callback_activate_(struct event_base * base,struct event_callback * evcb)28962b3787f6Schristos event_callback_activate_(struct event_base *base,
28972b3787f6Schristos     struct event_callback *evcb)
28982b3787f6Schristos {
28992b3787f6Schristos 	int r;
29002b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
29012b3787f6Schristos 	r = event_callback_activate_nolock_(base, evcb);
29022b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
29032b3787f6Schristos 	return r;
29042b3787f6Schristos }
29052b3787f6Schristos 
29062b3787f6Schristos int
event_callback_activate_nolock_(struct event_base * base,struct event_callback * evcb)29072b3787f6Schristos event_callback_activate_nolock_(struct event_base *base,
29082b3787f6Schristos     struct event_callback *evcb)
29092b3787f6Schristos {
29102b3787f6Schristos 	int r = 1;
29112b3787f6Schristos 
29121b6f2cd4Schristos 	if (evcb->evcb_flags & EVLIST_FINALIZING)
29131b6f2cd4Schristos 		return 0;
29141b6f2cd4Schristos 
29152b3787f6Schristos 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
29162b3787f6Schristos 	default:
29172b3787f6Schristos 		EVUTIL_ASSERT(0);
29182b3787f6Schristos 	case EVLIST_ACTIVE_LATER:
29192b3787f6Schristos 		event_queue_remove_active_later(base, evcb);
29202b3787f6Schristos 		r = 0;
29212b3787f6Schristos 		break;
29222b3787f6Schristos 	case EVLIST_ACTIVE:
29232b3787f6Schristos 		return 0;
29242b3787f6Schristos 	case 0:
29252b3787f6Schristos 		break;
29262b3787f6Schristos 	}
29272b3787f6Schristos 
29282b3787f6Schristos 	event_queue_insert_active(base, evcb);
29292b3787f6Schristos 
29302b3787f6Schristos 	if (EVBASE_NEED_NOTIFY(base))
29312b3787f6Schristos 		evthread_notify_base(base);
29322b3787f6Schristos 
29332b3787f6Schristos 	return r;
29342b3787f6Schristos }
29352b3787f6Schristos 
29362b3787f6Schristos void
event_callback_activate_later_nolock_(struct event_base * base,struct event_callback * evcb)29372b3787f6Schristos event_callback_activate_later_nolock_(struct event_base *base,
29382b3787f6Schristos     struct event_callback *evcb)
29392b3787f6Schristos {
29402b3787f6Schristos 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
29412b3787f6Schristos 		return;
29422b3787f6Schristos 
29432b3787f6Schristos 	event_queue_insert_active_later(base, evcb);
29442b3787f6Schristos 	if (EVBASE_NEED_NOTIFY(base))
29452b3787f6Schristos 		evthread_notify_base(base);
29462b3787f6Schristos }
29472b3787f6Schristos 
29482b3787f6Schristos void
event_callback_init_(struct event_base * base,struct event_callback * cb)29492b3787f6Schristos event_callback_init_(struct event_base *base,
29502b3787f6Schristos     struct event_callback *cb)
29512b3787f6Schristos {
29522b3787f6Schristos 	memset(cb, 0, sizeof(*cb));
29532b3787f6Schristos 	cb->evcb_pri = base->nactivequeues - 1;
29542b3787f6Schristos }
29552b3787f6Schristos 
29562b3787f6Schristos int
event_callback_cancel_(struct event_base * base,struct event_callback * evcb)29572b3787f6Schristos event_callback_cancel_(struct event_base *base,
29582b3787f6Schristos     struct event_callback *evcb)
29592b3787f6Schristos {
29602b3787f6Schristos 	int r;
29612b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
29621b6f2cd4Schristos 	r = event_callback_cancel_nolock_(base, evcb, 0);
29632b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
29642b3787f6Schristos 	return r;
29652b3787f6Schristos }
29662b3787f6Schristos 
29672b3787f6Schristos int
event_callback_cancel_nolock_(struct event_base * base,struct event_callback * evcb,int even_if_finalizing)29682b3787f6Schristos event_callback_cancel_nolock_(struct event_base *base,
29691b6f2cd4Schristos     struct event_callback *evcb, int even_if_finalizing)
29702b3787f6Schristos {
29711b6f2cd4Schristos 	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
29721b6f2cd4Schristos 		return 0;
29731b6f2cd4Schristos 
29742b3787f6Schristos 	if (evcb->evcb_flags & EVLIST_INIT)
29751b6f2cd4Schristos 		return event_del_nolock_(event_callback_to_event(evcb),
29761b6f2cd4Schristos 		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
29772b3787f6Schristos 
29782b3787f6Schristos 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
29792b3787f6Schristos 	default:
29802b3787f6Schristos 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
29812b3787f6Schristos 		EVUTIL_ASSERT(0);
29822b3787f6Schristos 		break;
29832b3787f6Schristos 	case EVLIST_ACTIVE:
29842b3787f6Schristos 		/* We get different kinds of events, add them together */
29852b3787f6Schristos 		event_queue_remove_active(base, evcb);
29862b3787f6Schristos 		return 0;
29872b3787f6Schristos 	case EVLIST_ACTIVE_LATER:
29882b3787f6Schristos 		event_queue_remove_active_later(base, evcb);
29892b3787f6Schristos 		break;
29902b3787f6Schristos 	case 0:
29912b3787f6Schristos 		break;
29922b3787f6Schristos 	}
29932b3787f6Schristos 
29942b3787f6Schristos 	return 0;
29952b3787f6Schristos }
29962b3787f6Schristos 
29972b3787f6Schristos void
event_deferred_cb_init_(struct event_callback * cb,ev_uint8_t priority,deferred_cb_fn fn,void * arg)29982b3787f6Schristos event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
29992b3787f6Schristos {
30002b3787f6Schristos 	memset(cb, 0, sizeof(*cb));
30012b3787f6Schristos 	cb->evcb_cb_union.evcb_selfcb = fn;
30022b3787f6Schristos 	cb->evcb_arg = arg;
30032b3787f6Schristos 	cb->evcb_pri = priority;
30042b3787f6Schristos 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
30052b3787f6Schristos }
30062b3787f6Schristos 
30072b3787f6Schristos void
event_deferred_cb_set_priority_(struct event_callback * cb,ev_uint8_t priority)30082b3787f6Schristos event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
30092b3787f6Schristos {
30102b3787f6Schristos 	cb->evcb_pri = priority;
30112b3787f6Schristos }
30122b3787f6Schristos 
30132b3787f6Schristos void
event_deferred_cb_cancel_(struct event_base * base,struct event_callback * cb)30142b3787f6Schristos event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
30152b3787f6Schristos {
30162b3787f6Schristos 	if (!base)
30172b3787f6Schristos 		base = current_base;
30182b3787f6Schristos 	event_callback_cancel_(base, cb);
30192b3787f6Schristos }
30202b3787f6Schristos 
30212b3787f6Schristos #define MAX_DEFERREDS_QUEUED 32
30222b3787f6Schristos int
event_deferred_cb_schedule_(struct event_base * base,struct event_callback * cb)30232b3787f6Schristos event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
30242b3787f6Schristos {
30252b3787f6Schristos 	int r = 1;
30262b3787f6Schristos 	if (!base)
30272b3787f6Schristos 		base = current_base;
30282b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
30292b3787f6Schristos 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
30302b3787f6Schristos 		event_callback_activate_later_nolock_(base, cb);
30312b3787f6Schristos 	} else {
30322b3787f6Schristos 		++base->n_deferreds_queued;
30332b3787f6Schristos 		r = event_callback_activate_nolock_(base, cb);
30342b3787f6Schristos 	}
30352b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
30362b3787f6Schristos 	return r;
30372b3787f6Schristos }
30382b3787f6Schristos 
30392b3787f6Schristos static int
timeout_next(struct event_base * base,struct timeval ** tv_p)30402b3787f6Schristos timeout_next(struct event_base *base, struct timeval **tv_p)
30412b3787f6Schristos {
30422b3787f6Schristos 	/* Caller must hold th_base_lock */
30432b3787f6Schristos 	struct timeval now;
30442b3787f6Schristos 	struct event *ev;
30452b3787f6Schristos 	struct timeval *tv = *tv_p;
30462b3787f6Schristos 	int res = 0;
30472b3787f6Schristos 
30482b3787f6Schristos 	ev = min_heap_top_(&base->timeheap);
30492b3787f6Schristos 
30502b3787f6Schristos 	if (ev == NULL) {
30512b3787f6Schristos 		/* if no time-based events are active wait for I/O */
30522b3787f6Schristos 		*tv_p = NULL;
30532b3787f6Schristos 		goto out;
30542b3787f6Schristos 	}
30552b3787f6Schristos 
30562b3787f6Schristos 	if (gettime(base, &now) == -1) {
30572b3787f6Schristos 		res = -1;
30582b3787f6Schristos 		goto out;
30592b3787f6Schristos 	}
30602b3787f6Schristos 
30612b3787f6Schristos 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
30622b3787f6Schristos 		evutil_timerclear(tv);
30632b3787f6Schristos 		goto out;
30642b3787f6Schristos 	}
30652b3787f6Schristos 
30662b3787f6Schristos 	evutil_timersub(&ev->ev_timeout, &now, tv);
30672b3787f6Schristos 
30682b3787f6Schristos 	EVUTIL_ASSERT(tv->tv_sec >= 0);
30692b3787f6Schristos 	EVUTIL_ASSERT(tv->tv_usec >= 0);
30702b3787f6Schristos 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
30712b3787f6Schristos 
30722b3787f6Schristos out:
30732b3787f6Schristos 	return (res);
30742b3787f6Schristos }
30752b3787f6Schristos 
30762b3787f6Schristos /* Activate every event whose timeout has elapsed. */
30772b3787f6Schristos static void
timeout_process(struct event_base * base)30782b3787f6Schristos timeout_process(struct event_base *base)
30792b3787f6Schristos {
30802b3787f6Schristos 	/* Caller must hold lock. */
30812b3787f6Schristos 	struct timeval now;
30822b3787f6Schristos 	struct event *ev;
30832b3787f6Schristos 
30842b3787f6Schristos 	if (min_heap_empty_(&base->timeheap)) {
30852b3787f6Schristos 		return;
30862b3787f6Schristos 	}
30872b3787f6Schristos 
30882b3787f6Schristos 	gettime(base, &now);
30892b3787f6Schristos 
30902b3787f6Schristos 	while ((ev = min_heap_top_(&base->timeheap))) {
30912b3787f6Schristos 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
30922b3787f6Schristos 			break;
30932b3787f6Schristos 
30942b3787f6Schristos 		/* delete this event from the I/O queues */
30951b6f2cd4Schristos 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
30962b3787f6Schristos 
30972b3787f6Schristos 		event_debug(("timeout_process: event: %p, call %p",
30982b3787f6Schristos 			 ev, ev->ev_callback));
30992b3787f6Schristos 		event_active_nolock_(ev, EV_TIMEOUT, 1);
31002b3787f6Schristos 	}
31012b3787f6Schristos }
31022b3787f6Schristos 
31032b3787f6Schristos #if (EVLIST_INTERNAL >> 4) != 1
31042b3787f6Schristos #error "Mismatch for value of EVLIST_INTERNAL"
31052b3787f6Schristos #endif
31061b6f2cd4Schristos 
31071b6f2cd4Schristos #ifndef MAX
31081b6f2cd4Schristos #define MAX(a,b) (((a)>(b))?(a):(b))
31091b6f2cd4Schristos #endif
31101b6f2cd4Schristos 
31111b6f2cd4Schristos #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
31121b6f2cd4Schristos 
31132b3787f6Schristos /* These are a fancy way to spell
31142b3787f6Schristos      if (flags & EVLIST_INTERNAL)
31152b3787f6Schristos          base->event_count--/++;
31162b3787f6Schristos */
31172b3787f6Schristos #define DECR_EVENT_COUNT(base,flags) \
31182b3787f6Schristos 	((base)->event_count -= (~((flags) >> 4) & 1))
31191b6f2cd4Schristos #define INCR_EVENT_COUNT(base,flags) do {					\
31201b6f2cd4Schristos 	((base)->event_count += (~((flags) >> 4) & 1));				\
31211b6f2cd4Schristos 	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
31221b6f2cd4Schristos } while (0)
31232b3787f6Schristos 
31242b3787f6Schristos static void
event_queue_remove_inserted(struct event_base * base,struct event * ev)31252b3787f6Schristos event_queue_remove_inserted(struct event_base *base, struct event *ev)
31262b3787f6Schristos {
31272b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
31282b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
31292b3787f6Schristos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
31302b3787f6Schristos 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
31312b3787f6Schristos 		return;
31322b3787f6Schristos 	}
31332b3787f6Schristos 	DECR_EVENT_COUNT(base, ev->ev_flags);
31342b3787f6Schristos 	ev->ev_flags &= ~EVLIST_INSERTED;
31352b3787f6Schristos }
31362b3787f6Schristos static void
event_queue_remove_active(struct event_base * base,struct event_callback * evcb)31372b3787f6Schristos event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
31382b3787f6Schristos {
31392b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
31402b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
31412b3787f6Schristos 		event_errx(1, "%s: %p not on queue %x", __func__,
31422b3787f6Schristos 			   evcb, EVLIST_ACTIVE);
31432b3787f6Schristos 		return;
31442b3787f6Schristos 	}
31452b3787f6Schristos 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
31462b3787f6Schristos 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
31472b3787f6Schristos 	base->event_count_active--;
31482b3787f6Schristos 
31492b3787f6Schristos 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
31502b3787f6Schristos 	    evcb, evcb_active_next);
31512b3787f6Schristos }
31522b3787f6Schristos static void
event_queue_remove_active_later(struct event_base * base,struct event_callback * evcb)31532b3787f6Schristos event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
31542b3787f6Schristos {
31552b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
31562b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
31572b3787f6Schristos 		event_errx(1, "%s: %p not on queue %x", __func__,
31582b3787f6Schristos 			   evcb, EVLIST_ACTIVE_LATER);
31592b3787f6Schristos 		return;
31602b3787f6Schristos 	}
31612b3787f6Schristos 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
31622b3787f6Schristos 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
31632b3787f6Schristos 	base->event_count_active--;
31642b3787f6Schristos 
31652b3787f6Schristos 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
31662b3787f6Schristos }
31672b3787f6Schristos static void
event_queue_remove_timeout(struct event_base * base,struct event * ev)31682b3787f6Schristos event_queue_remove_timeout(struct event_base *base, struct event *ev)
31692b3787f6Schristos {
31702b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
31712b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
31722b3787f6Schristos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
31732b3787f6Schristos 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
31742b3787f6Schristos 		return;
31752b3787f6Schristos 	}
31762b3787f6Schristos 	DECR_EVENT_COUNT(base, ev->ev_flags);
31772b3787f6Schristos 	ev->ev_flags &= ~EVLIST_TIMEOUT;
31782b3787f6Schristos 
31792b3787f6Schristos 	if (is_common_timeout(&ev->ev_timeout, base)) {
31802b3787f6Schristos 		struct common_timeout_list *ctl =
31812b3787f6Schristos 		    get_common_timeout_list(base, &ev->ev_timeout);
31822b3787f6Schristos 		TAILQ_REMOVE(&ctl->events, ev,
31832b3787f6Schristos 		    ev_timeout_pos.ev_next_with_common_timeout);
31842b3787f6Schristos 	} else {
31852b3787f6Schristos 		min_heap_erase_(&base->timeheap, ev);
31862b3787f6Schristos 	}
31872b3787f6Schristos }
31882b3787f6Schristos 
31892b3787f6Schristos #ifdef USE_REINSERT_TIMEOUT
31902b3787f6Schristos /* Remove and reinsert 'ev' into the timeout queue. */
31912b3787f6Schristos static void
event_queue_reinsert_timeout(struct event_base * base,struct event * ev,int was_common,int is_common,int old_timeout_idx)31922b3787f6Schristos event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
31932b3787f6Schristos     int was_common, int is_common, int old_timeout_idx)
31942b3787f6Schristos {
31952b3787f6Schristos 	struct common_timeout_list *ctl;
31962b3787f6Schristos 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
31972b3787f6Schristos 		event_queue_insert_timeout(base, ev);
31982b3787f6Schristos 		return;
31992b3787f6Schristos 	}
32002b3787f6Schristos 
32012b3787f6Schristos 	switch ((was_common<<1) | is_common) {
32022b3787f6Schristos 	case 3: /* Changing from one common timeout to another */
32032b3787f6Schristos 		ctl = base->common_timeout_queues[old_timeout_idx];
32042b3787f6Schristos 		TAILQ_REMOVE(&ctl->events, ev,
32052b3787f6Schristos 		    ev_timeout_pos.ev_next_with_common_timeout);
32062b3787f6Schristos 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
32072b3787f6Schristos 		insert_common_timeout_inorder(ctl, ev);
32082b3787f6Schristos 		break;
32092b3787f6Schristos 	case 2: /* Was common; is no longer common */
32102b3787f6Schristos 		ctl = base->common_timeout_queues[old_timeout_idx];
32112b3787f6Schristos 		TAILQ_REMOVE(&ctl->events, ev,
32122b3787f6Schristos 		    ev_timeout_pos.ev_next_with_common_timeout);
32132b3787f6Schristos 		min_heap_push_(&base->timeheap, ev);
32142b3787f6Schristos 		break;
32152b3787f6Schristos 	case 1: /* Wasn't common; has become common. */
32162b3787f6Schristos 		min_heap_erase_(&base->timeheap, ev);
32172b3787f6Schristos 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
32182b3787f6Schristos 		insert_common_timeout_inorder(ctl, ev);
32192b3787f6Schristos 		break;
32202b3787f6Schristos 	case 0: /* was in heap; is still on heap. */
32212b3787f6Schristos 		min_heap_adjust_(&base->timeheap, ev);
32222b3787f6Schristos 		break;
32232b3787f6Schristos 	default:
32242b3787f6Schristos 		EVUTIL_ASSERT(0); /* unreachable */
32252b3787f6Schristos 		break;
32262b3787f6Schristos 	}
32272b3787f6Schristos }
32282b3787f6Schristos #endif
32292b3787f6Schristos 
32302b3787f6Schristos /* Add 'ev' to the common timeout list in 'ev'. */
32312b3787f6Schristos static void
insert_common_timeout_inorder(struct common_timeout_list * ctl,struct event * ev)32322b3787f6Schristos insert_common_timeout_inorder(struct common_timeout_list *ctl,
32332b3787f6Schristos     struct event *ev)
32342b3787f6Schristos {
32352b3787f6Schristos 	struct event *e;
32362b3787f6Schristos 	/* By all logic, we should just be able to append 'ev' to the end of
32372b3787f6Schristos 	 * ctl->events, since the timeout on each 'ev' is set to {the common
32382b3787f6Schristos 	 * timeout} + {the time when we add the event}, and so the events
32392b3787f6Schristos 	 * should arrive in order of their timeeouts.  But just in case
32402b3787f6Schristos 	 * there's some wacky threading issue going on, we do a search from
32412b3787f6Schristos 	 * the end of 'ev' to find the right insertion point.
32422b3787f6Schristos 	 */
32432b3787f6Schristos 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
32442b3787f6Schristos 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
32452b3787f6Schristos 		/* This timercmp is a little sneaky, since both ev and e have
32462b3787f6Schristos 		 * magic values in tv_usec.  Fortunately, they ought to have
32472b3787f6Schristos 		 * the _same_ magic values in tv_usec.  Let's assert for that.
32482b3787f6Schristos 		 */
32492b3787f6Schristos 		EVUTIL_ASSERT(
32502b3787f6Schristos 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
32512b3787f6Schristos 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
32522b3787f6Schristos 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
32532b3787f6Schristos 			    ev_timeout_pos.ev_next_with_common_timeout);
32542b3787f6Schristos 			return;
32552b3787f6Schristos 		}
32562b3787f6Schristos 	}
32572b3787f6Schristos 	TAILQ_INSERT_HEAD(&ctl->events, ev,
32582b3787f6Schristos 	    ev_timeout_pos.ev_next_with_common_timeout);
32592b3787f6Schristos }
32602b3787f6Schristos 
32612b3787f6Schristos static void
event_queue_insert_inserted(struct event_base * base,struct event * ev)32622b3787f6Schristos event_queue_insert_inserted(struct event_base *base, struct event *ev)
32632b3787f6Schristos {
32642b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
32652b3787f6Schristos 
32662b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
32672b3787f6Schristos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
32682b3787f6Schristos 		    ev, EV_SOCK_ARG(ev->ev_fd));
32692b3787f6Schristos 		return;
32702b3787f6Schristos 	}
32712b3787f6Schristos 
32722b3787f6Schristos 	INCR_EVENT_COUNT(base, ev->ev_flags);
32732b3787f6Schristos 
32742b3787f6Schristos 	ev->ev_flags |= EVLIST_INSERTED;
32752b3787f6Schristos }
32762b3787f6Schristos 
32772b3787f6Schristos static void
event_queue_insert_active(struct event_base * base,struct event_callback * evcb)32782b3787f6Schristos event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
32792b3787f6Schristos {
32802b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
32812b3787f6Schristos 
32822b3787f6Schristos 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
32832b3787f6Schristos 		/* Double insertion is possible for active events */
32842b3787f6Schristos 		return;
32852b3787f6Schristos 	}
32862b3787f6Schristos 
32872b3787f6Schristos 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
32882b3787f6Schristos 
32892b3787f6Schristos 	evcb->evcb_flags |= EVLIST_ACTIVE;
32902b3787f6Schristos 
32912b3787f6Schristos 	base->event_count_active++;
32921b6f2cd4Schristos 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
32932b3787f6Schristos 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
32942b3787f6Schristos 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
32952b3787f6Schristos 	    evcb, evcb_active_next);
32962b3787f6Schristos }
32972b3787f6Schristos 
32982b3787f6Schristos static void
event_queue_insert_active_later(struct event_base * base,struct event_callback * evcb)32992b3787f6Schristos event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
33002b3787f6Schristos {
33012b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
33022b3787f6Schristos 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
33032b3787f6Schristos 		/* Double insertion is possible */
33042b3787f6Schristos 		return;
33052b3787f6Schristos 	}
33062b3787f6Schristos 
33072b3787f6Schristos 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
33082b3787f6Schristos 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
33092b3787f6Schristos 	base->event_count_active++;
33101b6f2cd4Schristos 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
33112b3787f6Schristos 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
33122b3787f6Schristos 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
33132b3787f6Schristos }
33142b3787f6Schristos 
33152b3787f6Schristos static void
event_queue_insert_timeout(struct event_base * base,struct event * ev)33162b3787f6Schristos event_queue_insert_timeout(struct event_base *base, struct event *ev)
33172b3787f6Schristos {
33182b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
33192b3787f6Schristos 
33202b3787f6Schristos 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
33212b3787f6Schristos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
33222b3787f6Schristos 		    ev, EV_SOCK_ARG(ev->ev_fd));
33232b3787f6Schristos 		return;
33242b3787f6Schristos 	}
33252b3787f6Schristos 
33262b3787f6Schristos 	INCR_EVENT_COUNT(base, ev->ev_flags);
33272b3787f6Schristos 
33282b3787f6Schristos 	ev->ev_flags |= EVLIST_TIMEOUT;
33292b3787f6Schristos 
33302b3787f6Schristos 	if (is_common_timeout(&ev->ev_timeout, base)) {
33312b3787f6Schristos 		struct common_timeout_list *ctl =
33322b3787f6Schristos 		    get_common_timeout_list(base, &ev->ev_timeout);
33332b3787f6Schristos 		insert_common_timeout_inorder(ctl, ev);
33342b3787f6Schristos 	} else {
33352b3787f6Schristos 		min_heap_push_(&base->timeheap, ev);
33362b3787f6Schristos 	}
33372b3787f6Schristos }
33382b3787f6Schristos 
33392b3787f6Schristos static void
event_queue_make_later_events_active(struct event_base * base)33402b3787f6Schristos event_queue_make_later_events_active(struct event_base *base)
33412b3787f6Schristos {
33422b3787f6Schristos 	struct event_callback *evcb;
33432b3787f6Schristos 	EVENT_BASE_ASSERT_LOCKED(base);
33442b3787f6Schristos 
33452b3787f6Schristos 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
33462b3787f6Schristos 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
33472b3787f6Schristos 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
33482b3787f6Schristos 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
33492b3787f6Schristos 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
33502b3787f6Schristos 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
33512b3787f6Schristos 	}
33522b3787f6Schristos }
33532b3787f6Schristos 
33542b3787f6Schristos /* Functions for debugging */
33552b3787f6Schristos 
33562b3787f6Schristos const char *
event_get_version(void)33572b3787f6Schristos event_get_version(void)
33582b3787f6Schristos {
33592b3787f6Schristos 	return (EVENT__VERSION);
33602b3787f6Schristos }
33612b3787f6Schristos 
33622b3787f6Schristos ev_uint32_t
event_get_version_number(void)33632b3787f6Schristos event_get_version_number(void)
33642b3787f6Schristos {
33652b3787f6Schristos 	return (EVENT__NUMERIC_VERSION);
33662b3787f6Schristos }
33672b3787f6Schristos 
33682b3787f6Schristos /*
33692b3787f6Schristos  * No thread-safe interface needed - the information should be the same
33702b3787f6Schristos  * for all threads.
33712b3787f6Schristos  */
33722b3787f6Schristos 
33732b3787f6Schristos const char *
event_get_method(void)33742b3787f6Schristos event_get_method(void)
33752b3787f6Schristos {
33762b3787f6Schristos 	return (current_base->evsel->name);
33772b3787f6Schristos }
33782b3787f6Schristos 
33792b3787f6Schristos #ifndef EVENT__DISABLE_MM_REPLACEMENT
33802b3787f6Schristos static void *(*mm_malloc_fn_)(size_t sz) = NULL;
33812b3787f6Schristos static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
33822b3787f6Schristos static void (*mm_free_fn_)(void *p) = NULL;
33832b3787f6Schristos 
33842b3787f6Schristos void *
event_mm_malloc_(size_t sz)33852b3787f6Schristos event_mm_malloc_(size_t sz)
33862b3787f6Schristos {
33872b3787f6Schristos 	if (sz == 0)
33882b3787f6Schristos 		return NULL;
33892b3787f6Schristos 
33902b3787f6Schristos 	if (mm_malloc_fn_)
33912b3787f6Schristos 		return mm_malloc_fn_(sz);
33922b3787f6Schristos 	else
33932b3787f6Schristos 		return malloc(sz);
33942b3787f6Schristos }
33952b3787f6Schristos 
33962b3787f6Schristos void *
event_mm_calloc_(size_t count,size_t size)33972b3787f6Schristos event_mm_calloc_(size_t count, size_t size)
33982b3787f6Schristos {
33992b3787f6Schristos 	if (count == 0 || size == 0)
34002b3787f6Schristos 		return NULL;
34012b3787f6Schristos 
34022b3787f6Schristos 	if (mm_malloc_fn_) {
34032b3787f6Schristos 		size_t sz = count * size;
34042b3787f6Schristos 		void *p = NULL;
34052b3787f6Schristos 		if (count > EV_SIZE_MAX / size)
34062b3787f6Schristos 			goto error;
34072b3787f6Schristos 		p = mm_malloc_fn_(sz);
34082b3787f6Schristos 		if (p)
34092b3787f6Schristos 			return memset(p, 0, sz);
34102b3787f6Schristos 	} else {
34112b3787f6Schristos 		void *p = calloc(count, size);
34122b3787f6Schristos #ifdef _WIN32
34132b3787f6Schristos 		/* Windows calloc doesn't reliably set ENOMEM */
34142b3787f6Schristos 		if (p == NULL)
34152b3787f6Schristos 			goto error;
34162b3787f6Schristos #endif
34172b3787f6Schristos 		return p;
34182b3787f6Schristos 	}
34192b3787f6Schristos 
34202b3787f6Schristos error:
34212b3787f6Schristos 	errno = ENOMEM;
34222b3787f6Schristos 	return NULL;
34232b3787f6Schristos }
34242b3787f6Schristos 
34252b3787f6Schristos char *
event_mm_strdup_(const char * str)34262b3787f6Schristos event_mm_strdup_(const char *str)
34272b3787f6Schristos {
34282b3787f6Schristos 	if (!str) {
34292b3787f6Schristos 		errno = EINVAL;
34302b3787f6Schristos 		return NULL;
34312b3787f6Schristos 	}
34322b3787f6Schristos 
34332b3787f6Schristos 	if (mm_malloc_fn_) {
34342b3787f6Schristos 		size_t ln = strlen(str);
34352b3787f6Schristos 		void *p = NULL;
34362b3787f6Schristos 		if (ln == EV_SIZE_MAX)
34372b3787f6Schristos 			goto error;
34382b3787f6Schristos 		p = mm_malloc_fn_(ln+1);
34392b3787f6Schristos 		if (p)
34402b3787f6Schristos 			return memcpy(p, str, ln+1);
34412b3787f6Schristos 	} else
34422b3787f6Schristos #ifdef _WIN32
34432b3787f6Schristos 		return _strdup(str);
34442b3787f6Schristos #else
34452b3787f6Schristos 		return strdup(str);
34462b3787f6Schristos #endif
34472b3787f6Schristos 
34482b3787f6Schristos error:
34492b3787f6Schristos 	errno = ENOMEM;
34502b3787f6Schristos 	return NULL;
34512b3787f6Schristos }
34522b3787f6Schristos 
34532b3787f6Schristos void *
event_mm_realloc_(void * ptr,size_t sz)34542b3787f6Schristos event_mm_realloc_(void *ptr, size_t sz)
34552b3787f6Schristos {
34562b3787f6Schristos 	if (mm_realloc_fn_)
34572b3787f6Schristos 		return mm_realloc_fn_(ptr, sz);
34582b3787f6Schristos 	else
34592b3787f6Schristos 		return realloc(ptr, sz);
34602b3787f6Schristos }
34612b3787f6Schristos 
34622b3787f6Schristos void
event_mm_free_(void * ptr)34632b3787f6Schristos event_mm_free_(void *ptr)
34642b3787f6Schristos {
34652b3787f6Schristos 	if (mm_free_fn_)
34662b3787f6Schristos 		mm_free_fn_(ptr);
34672b3787f6Schristos 	else
34682b3787f6Schristos 		free(ptr);
34692b3787f6Schristos }
34702b3787f6Schristos 
34712b3787f6Schristos void
event_set_mem_functions(void * (* malloc_fn)(size_t sz),void * (* realloc_fn)(void * ptr,size_t sz),void (* free_fn)(void * ptr))34722b3787f6Schristos event_set_mem_functions(void *(*malloc_fn)(size_t sz),
34732b3787f6Schristos 			void *(*realloc_fn)(void *ptr, size_t sz),
34742b3787f6Schristos 			void (*free_fn)(void *ptr))
34752b3787f6Schristos {
34762b3787f6Schristos 	mm_malloc_fn_ = malloc_fn;
34772b3787f6Schristos 	mm_realloc_fn_ = realloc_fn;
34782b3787f6Schristos 	mm_free_fn_ = free_fn;
34792b3787f6Schristos }
34802b3787f6Schristos #endif
34812b3787f6Schristos 
34822b3787f6Schristos #ifdef EVENT__HAVE_EVENTFD
34832b3787f6Schristos static void
evthread_notify_drain_eventfd(evutil_socket_t fd,short what,void * arg)34842b3787f6Schristos evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
34852b3787f6Schristos {
34862b3787f6Schristos 	ev_uint64_t msg;
34872b3787f6Schristos 	ev_ssize_t r;
34882b3787f6Schristos 	struct event_base *base = arg;
34892b3787f6Schristos 
34902b3787f6Schristos 	r = read(fd, (void*) &msg, sizeof(msg));
34912b3787f6Schristos 	if (r<0 && errno != EAGAIN) {
34922b3787f6Schristos 		event_sock_warn(fd, "Error reading from eventfd");
34932b3787f6Schristos 	}
34942b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
34952b3787f6Schristos 	base->is_notify_pending = 0;
34962b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
34972b3787f6Schristos }
34982b3787f6Schristos #endif
34992b3787f6Schristos 
35002b3787f6Schristos static void
evthread_notify_drain_default(evutil_socket_t fd,short what,void * arg)35012b3787f6Schristos evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
35022b3787f6Schristos {
35032b3787f6Schristos 	unsigned char buf[1024];
35042b3787f6Schristos 	struct event_base *base = arg;
35052b3787f6Schristos #ifdef _WIN32
35062b3787f6Schristos 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
35072b3787f6Schristos 		;
35082b3787f6Schristos #else
35092b3787f6Schristos 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
35102b3787f6Schristos 		;
35112b3787f6Schristos #endif
35122b3787f6Schristos 
35132b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
35142b3787f6Schristos 	base->is_notify_pending = 0;
35152b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
35162b3787f6Schristos }
35172b3787f6Schristos 
35182b3787f6Schristos int
evthread_make_base_notifiable(struct event_base * base)35192b3787f6Schristos evthread_make_base_notifiable(struct event_base *base)
35202b3787f6Schristos {
35212b3787f6Schristos 	int r;
35222b3787f6Schristos 	if (!base)
35232b3787f6Schristos 		return -1;
35242b3787f6Schristos 
35252b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
35262b3787f6Schristos 	r = evthread_make_base_notifiable_nolock_(base);
35272b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
35282b3787f6Schristos 	return r;
35292b3787f6Schristos }
35302b3787f6Schristos 
35312b3787f6Schristos static int
evthread_make_base_notifiable_nolock_(struct event_base * base)35322b3787f6Schristos evthread_make_base_notifiable_nolock_(struct event_base *base)
35332b3787f6Schristos {
35342b3787f6Schristos 	void (*cb)(evutil_socket_t, short, void *);
35352b3787f6Schristos 	int (*notify)(struct event_base *);
35362b3787f6Schristos 
35372b3787f6Schristos 	if (base->th_notify_fn != NULL) {
35382b3787f6Schristos 		/* The base is already notifiable: we're doing fine. */
35392b3787f6Schristos 		return 0;
35402b3787f6Schristos 	}
35412b3787f6Schristos 
35422b3787f6Schristos #if defined(EVENT__HAVE_WORKING_KQUEUE)
35432b3787f6Schristos 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
35442b3787f6Schristos 		base->th_notify_fn = event_kq_notify_base_;
35452b3787f6Schristos 		/* No need to add an event here; the backend can wake
35462b3787f6Schristos 		 * itself up just fine. */
35472b3787f6Schristos 		return 0;
35482b3787f6Schristos 	}
35492b3787f6Schristos #endif
35502b3787f6Schristos 
35512b3787f6Schristos #ifdef EVENT__HAVE_EVENTFD
35522b3787f6Schristos 	base->th_notify_fd[0] = evutil_eventfd_(0,
35532b3787f6Schristos 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
35542b3787f6Schristos 	if (base->th_notify_fd[0] >= 0) {
35552b3787f6Schristos 		base->th_notify_fd[1] = -1;
35562b3787f6Schristos 		notify = evthread_notify_base_eventfd;
35572b3787f6Schristos 		cb = evthread_notify_drain_eventfd;
35582b3787f6Schristos 	} else
35592b3787f6Schristos #endif
35602b3787f6Schristos 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
35612b3787f6Schristos 		notify = evthread_notify_base_default;
35622b3787f6Schristos 		cb = evthread_notify_drain_default;
35632b3787f6Schristos 	} else {
35642b3787f6Schristos 		return -1;
35652b3787f6Schristos 	}
35662b3787f6Schristos 
35672b3787f6Schristos 	base->th_notify_fn = notify;
35682b3787f6Schristos 
35692b3787f6Schristos 	/* prepare an event that we can use for wakeup */
35702b3787f6Schristos 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
35712b3787f6Schristos 				 EV_READ|EV_PERSIST, cb, base);
35722b3787f6Schristos 
35732b3787f6Schristos 	/* we need to mark this as internal event */
35742b3787f6Schristos 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
35752b3787f6Schristos 	event_priority_set(&base->th_notify, 0);
35762b3787f6Schristos 
35772b3787f6Schristos 	return event_add_nolock_(&base->th_notify, NULL, 0);
35782b3787f6Schristos }
35792b3787f6Schristos 
35802b3787f6Schristos int
event_base_foreach_event_nolock_(struct event_base * base,event_base_foreach_event_cb fn,void * arg)35812b3787f6Schristos event_base_foreach_event_nolock_(struct event_base *base,
35822b3787f6Schristos     event_base_foreach_event_cb fn, void *arg)
35832b3787f6Schristos {
35842b3787f6Schristos 	int r, i;
35852b3787f6Schristos 	unsigned u;
35862b3787f6Schristos 	struct event *ev;
35872b3787f6Schristos 
35882b3787f6Schristos 	/* Start out with all the EVLIST_INSERTED events. */
35892b3787f6Schristos 	if ((r = evmap_foreach_event_(base, fn, arg)))
35902b3787f6Schristos 		return r;
35912b3787f6Schristos 
35922b3787f6Schristos 	/* Okay, now we deal with those events that have timeouts and are in
35932b3787f6Schristos 	 * the min-heap. */
35942b3787f6Schristos 	for (u = 0; u < base->timeheap.n; ++u) {
35952b3787f6Schristos 		ev = base->timeheap.p[u];
35962b3787f6Schristos 		if (ev->ev_flags & EVLIST_INSERTED) {
35972b3787f6Schristos 			/* we already processed this one */
35982b3787f6Schristos 			continue;
35992b3787f6Schristos 		}
36002b3787f6Schristos 		if ((r = fn(base, ev, arg)))
36012b3787f6Schristos 			return r;
36022b3787f6Schristos 	}
36032b3787f6Schristos 
36042b3787f6Schristos 	/* Now for the events in one of the timeout queues.
36052b3787f6Schristos 	 * the min-heap. */
36062b3787f6Schristos 	for (i = 0; i < base->n_common_timeouts; ++i) {
36072b3787f6Schristos 		struct common_timeout_list *ctl =
36082b3787f6Schristos 		    base->common_timeout_queues[i];
36092b3787f6Schristos 		TAILQ_FOREACH(ev, &ctl->events,
36102b3787f6Schristos 		    ev_timeout_pos.ev_next_with_common_timeout) {
36112b3787f6Schristos 			if (ev->ev_flags & EVLIST_INSERTED) {
36122b3787f6Schristos 				/* we already processed this one */
36132b3787f6Schristos 				continue;
36142b3787f6Schristos 			}
36152b3787f6Schristos 			if ((r = fn(base, ev, arg)))
36162b3787f6Schristos 				return r;
36172b3787f6Schristos 		}
36182b3787f6Schristos 	}
36192b3787f6Schristos 
36202b3787f6Schristos 	/* Finally, we deal wit all the active events that we haven't touched
36212b3787f6Schristos 	 * yet. */
36222b3787f6Schristos 	for (i = 0; i < base->nactivequeues; ++i) {
36232b3787f6Schristos 		struct event_callback *evcb;
36242b3787f6Schristos 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
36252b3787f6Schristos 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
36262b3787f6Schristos 				/* This isn't an event (evlist_init clear), or
36272b3787f6Schristos 				 * we already processed it. (inserted or
36282b3787f6Schristos 				 * timeout set */
36292b3787f6Schristos 				continue;
36302b3787f6Schristos 			}
36312b3787f6Schristos 			ev = event_callback_to_event(evcb);
36322b3787f6Schristos 			if ((r = fn(base, ev, arg)))
36332b3787f6Schristos 				return r;
36342b3787f6Schristos 		}
36352b3787f6Schristos 	}
36362b3787f6Schristos 
36372b3787f6Schristos 	return 0;
36382b3787f6Schristos }
36392b3787f6Schristos 
36402b3787f6Schristos /* Helper for event_base_dump_events: called on each event in the event base;
36412b3787f6Schristos  * dumps only the inserted events. */
36422b3787f6Schristos static int
dump_inserted_event_fn(const struct event_base * base,const struct event * e,void * arg)36432b3787f6Schristos dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
36442b3787f6Schristos {
36452b3787f6Schristos 	FILE *output = arg;
36462b3787f6Schristos 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
36472b3787f6Schristos 	    "sig" : "fd ";
36482b3787f6Schristos 
36492b3787f6Schristos 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
36502b3787f6Schristos 		return 0;
36512b3787f6Schristos 
36521b6f2cd4Schristos 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
36532b3787f6Schristos 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
36542b3787f6Schristos 	    (e->ev_events&EV_READ)?" Read":"",
36552b3787f6Schristos 	    (e->ev_events&EV_WRITE)?" Write":"",
36561b6f2cd4Schristos 	    (e->ev_events&EV_CLOSED)?" EOF":"",
36572b3787f6Schristos 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
36582b3787f6Schristos 	    (e->ev_events&EV_PERSIST)?" Persist":"",
36592b3787f6Schristos 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
36602b3787f6Schristos 	if (e->ev_flags & EVLIST_TIMEOUT) {
36612b3787f6Schristos 		struct timeval tv;
36622b3787f6Schristos 		tv.tv_sec = e->ev_timeout.tv_sec;
36632b3787f6Schristos 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
36642b3787f6Schristos 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
36652b3787f6Schristos 		fprintf(output, " Timeout=%ld.%06d",
36662b3787f6Schristos 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
36672b3787f6Schristos 	}
36682b3787f6Schristos 	fputc('\n', output);
36692b3787f6Schristos 
36702b3787f6Schristos 	return 0;
36712b3787f6Schristos }
36722b3787f6Schristos 
36732b3787f6Schristos /* Helper for event_base_dump_events: called on each event in the event base;
36742b3787f6Schristos  * dumps only the active events. */
36752b3787f6Schristos static int
dump_active_event_fn(const struct event_base * base,const struct event * e,void * arg)36762b3787f6Schristos dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
36772b3787f6Schristos {
36782b3787f6Schristos 	FILE *output = arg;
36792b3787f6Schristos 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
36802b3787f6Schristos 	    "sig" : "fd ";
36812b3787f6Schristos 
36822b3787f6Schristos 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
36832b3787f6Schristos 		return 0;
36842b3787f6Schristos 
36851b6f2cd4Schristos 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
36862b3787f6Schristos 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
36872b3787f6Schristos 	    (e->ev_res&EV_READ)?" Read":"",
36882b3787f6Schristos 	    (e->ev_res&EV_WRITE)?" Write":"",
36891b6f2cd4Schristos 	    (e->ev_res&EV_CLOSED)?" EOF":"",
36902b3787f6Schristos 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
36912b3787f6Schristos 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
36922b3787f6Schristos 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
36932b3787f6Schristos 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
36942b3787f6Schristos 
36952b3787f6Schristos 	return 0;
36962b3787f6Schristos }
36972b3787f6Schristos 
36982b3787f6Schristos int
event_base_foreach_event(struct event_base * base,event_base_foreach_event_cb fn,void * arg)36992b3787f6Schristos event_base_foreach_event(struct event_base *base,
37002b3787f6Schristos     event_base_foreach_event_cb fn, void *arg)
37012b3787f6Schristos {
37022b3787f6Schristos 	int r;
37032b3787f6Schristos 	if ((!fn) || (!base)) {
37042b3787f6Schristos 		return -1;
37052b3787f6Schristos 	}
37062b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
37072b3787f6Schristos 	r = event_base_foreach_event_nolock_(base, fn, arg);
37082b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
37092b3787f6Schristos 	return r;
37102b3787f6Schristos }
37112b3787f6Schristos 
37122b3787f6Schristos 
37132b3787f6Schristos void
event_base_dump_events(struct event_base * base,FILE * output)37142b3787f6Schristos event_base_dump_events(struct event_base *base, FILE *output)
37152b3787f6Schristos {
37162b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
37172b3787f6Schristos 	fprintf(output, "Inserted events:\n");
37182b3787f6Schristos 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
37192b3787f6Schristos 
37202b3787f6Schristos 	fprintf(output, "Active events:\n");
37212b3787f6Schristos 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
37222b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
37232b3787f6Schristos }
37242b3787f6Schristos 
37252b3787f6Schristos void
event_base_active_by_fd(struct event_base * base,evutil_socket_t fd,short events)37261b6f2cd4Schristos event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
37271b6f2cd4Schristos {
37281b6f2cd4Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
37291b6f2cd4Schristos 	evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
37301b6f2cd4Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
37311b6f2cd4Schristos }
37321b6f2cd4Schristos 
37331b6f2cd4Schristos void
event_base_active_by_signal(struct event_base * base,int sig)37341b6f2cd4Schristos event_base_active_by_signal(struct event_base *base, int sig)
37351b6f2cd4Schristos {
37361b6f2cd4Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
37371b6f2cd4Schristos 	evmap_signal_active_(base, sig, 1);
37381b6f2cd4Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
37391b6f2cd4Schristos }
37401b6f2cd4Schristos 
37411b6f2cd4Schristos 
37421b6f2cd4Schristos void
event_base_add_virtual_(struct event_base * base)37432b3787f6Schristos event_base_add_virtual_(struct event_base *base)
37442b3787f6Schristos {
37452b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
37462b3787f6Schristos 	base->virtual_event_count++;
37471b6f2cd4Schristos 	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
37482b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
37492b3787f6Schristos }
37502b3787f6Schristos 
37512b3787f6Schristos void
event_base_del_virtual_(struct event_base * base)37522b3787f6Schristos event_base_del_virtual_(struct event_base *base)
37532b3787f6Schristos {
37542b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
37552b3787f6Schristos 	EVUTIL_ASSERT(base->virtual_event_count > 0);
37562b3787f6Schristos 	base->virtual_event_count--;
37572b3787f6Schristos 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
37582b3787f6Schristos 		evthread_notify_base(base);
37592b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
37602b3787f6Schristos }
37612b3787f6Schristos 
37622b3787f6Schristos static void
event_free_debug_globals_locks(void)37632b3787f6Schristos event_free_debug_globals_locks(void)
37642b3787f6Schristos {
37652b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
37662b3787f6Schristos #ifndef EVENT__DISABLE_DEBUG_MODE
37672b3787f6Schristos 	if (event_debug_map_lock_ != NULL) {
37682b3787f6Schristos 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
37692b3787f6Schristos 		event_debug_map_lock_ = NULL;
377050cc4415Schristos 		evthreadimpl_disable_lock_debugging_();
37712b3787f6Schristos 	}
37722b3787f6Schristos #endif /* EVENT__DISABLE_DEBUG_MODE */
37732b3787f6Schristos #endif /* EVENT__DISABLE_THREAD_SUPPORT */
37742b3787f6Schristos 	return;
37752b3787f6Schristos }
37762b3787f6Schristos 
37772b3787f6Schristos static void
event_free_debug_globals(void)37782b3787f6Schristos event_free_debug_globals(void)
37792b3787f6Schristos {
37802b3787f6Schristos 	event_free_debug_globals_locks();
37812b3787f6Schristos }
37822b3787f6Schristos 
37832b3787f6Schristos static void
event_free_evsig_globals(void)37842b3787f6Schristos event_free_evsig_globals(void)
37852b3787f6Schristos {
37862b3787f6Schristos 	evsig_free_globals_();
37872b3787f6Schristos }
37882b3787f6Schristos 
37892b3787f6Schristos static void
event_free_evutil_globals(void)37902b3787f6Schristos event_free_evutil_globals(void)
37912b3787f6Schristos {
37922b3787f6Schristos 	evutil_free_globals_();
37932b3787f6Schristos }
37942b3787f6Schristos 
37952b3787f6Schristos static void
event_free_globals(void)37962b3787f6Schristos event_free_globals(void)
37972b3787f6Schristos {
37982b3787f6Schristos 	event_free_debug_globals();
37992b3787f6Schristos 	event_free_evsig_globals();
38002b3787f6Schristos 	event_free_evutil_globals();
38012b3787f6Schristos }
38022b3787f6Schristos 
38032b3787f6Schristos void
libevent_global_shutdown(void)38042b3787f6Schristos libevent_global_shutdown(void)
38052b3787f6Schristos {
380650cc4415Schristos 	event_disable_debug_mode();
38072b3787f6Schristos 	event_free_globals();
38082b3787f6Schristos }
38092b3787f6Schristos 
38102b3787f6Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT
38112b3787f6Schristos int
event_global_setup_locks_(const int enable_locks)38122b3787f6Schristos event_global_setup_locks_(const int enable_locks)
38132b3787f6Schristos {
38142b3787f6Schristos #ifndef EVENT__DISABLE_DEBUG_MODE
38152b3787f6Schristos 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
38162b3787f6Schristos #endif
38172b3787f6Schristos 	if (evsig_global_setup_locks_(enable_locks) < 0)
38182b3787f6Schristos 		return -1;
38192b3787f6Schristos 	if (evutil_global_setup_locks_(enable_locks) < 0)
38202b3787f6Schristos 		return -1;
38212b3787f6Schristos 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
38222b3787f6Schristos 		return -1;
38232b3787f6Schristos 	return 0;
38242b3787f6Schristos }
38252b3787f6Schristos #endif
38262b3787f6Schristos 
38272b3787f6Schristos void
event_base_assert_ok_(struct event_base * base)38282b3787f6Schristos event_base_assert_ok_(struct event_base *base)
38292b3787f6Schristos {
38302b3787f6Schristos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
38312b3787f6Schristos 	event_base_assert_ok_nolock_(base);
38322b3787f6Schristos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
38332b3787f6Schristos }
38342b3787f6Schristos 
38352b3787f6Schristos void
event_base_assert_ok_nolock_(struct event_base * base)38362b3787f6Schristos event_base_assert_ok_nolock_(struct event_base *base)
38372b3787f6Schristos {
38382b3787f6Schristos 	int i;
38392b3787f6Schristos 	int count;
38402b3787f6Schristos 
38412b3787f6Schristos 	/* First do checks on the per-fd and per-signal lists */
38422b3787f6Schristos 	evmap_check_integrity_(base);
38432b3787f6Schristos 
38442b3787f6Schristos 	/* Check the heap property */
38452b3787f6Schristos 	for (i = 1; i < (int)base->timeheap.n; ++i) {
38462b3787f6Schristos 		int parent = (i - 1) / 2;
38472b3787f6Schristos 		struct event *ev, *p_ev;
38482b3787f6Schristos 		ev = base->timeheap.p[i];
38492b3787f6Schristos 		p_ev = base->timeheap.p[parent];
38502b3787f6Schristos 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
38512b3787f6Schristos 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
38522b3787f6Schristos 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
38532b3787f6Schristos 	}
38542b3787f6Schristos 
38552b3787f6Schristos 	/* Check that the common timeouts are fine */
38562b3787f6Schristos 	for (i = 0; i < base->n_common_timeouts; ++i) {
38572b3787f6Schristos 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
38582b3787f6Schristos 		struct event *last=NULL, *ev;
38592b3787f6Schristos 
38602b3787f6Schristos 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
38612b3787f6Schristos 
38622b3787f6Schristos 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
38632b3787f6Schristos 			if (last)
38642b3787f6Schristos 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
38652b3787f6Schristos 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
38662b3787f6Schristos 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
38672b3787f6Schristos 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
38682b3787f6Schristos 			last = ev;
38692b3787f6Schristos 		}
38702b3787f6Schristos 	}
38712b3787f6Schristos 
38722b3787f6Schristos 	/* Check the active queues. */
38732b3787f6Schristos 	count = 0;
38742b3787f6Schristos 	for (i = 0; i < base->nactivequeues; ++i) {
38752b3787f6Schristos 		struct event_callback *evcb;
38762b3787f6Schristos 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
38772b3787f6Schristos 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
38782b3787f6Schristos 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
38792b3787f6Schristos 			EVUTIL_ASSERT(evcb->evcb_pri == i);
38802b3787f6Schristos 			++count;
38812b3787f6Schristos 		}
38822b3787f6Schristos 	}
38832b3787f6Schristos 
38842b3787f6Schristos 	{
38852b3787f6Schristos 		struct event_callback *evcb;
38862b3787f6Schristos 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
38872b3787f6Schristos 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
38882b3787f6Schristos 			++count;
38892b3787f6Schristos 		}
38902b3787f6Schristos 	}
38912b3787f6Schristos 	EVUTIL_ASSERT(count == base->event_count_active);
38922b3787f6Schristos }
3893