xref: /netbsd/external/bsd/libevent/dist/event.c (revision 4109d450)
1 /*	$NetBSD: event.c,v 1.5 2021/04/07 03:36:48 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #include "event2/event-config.h"
30 #include <sys/cdefs.h>
31 __RCSID("$NetBSD: event.c,v 1.5 2021/04/07 03:36:48 christos Exp $");
32 #include "evconfig-private.h"
33 
34 #ifdef _WIN32
35 #include <winsock2.h>
36 #define WIN32_LEAN_AND_MEAN
37 #include <windows.h>
38 #undef WIN32_LEAN_AND_MEAN
39 #endif
40 #include <sys/types.h>
41 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
42 #include <sys/time.h>
43 #endif
44 #include <sys/queue.h>
45 #ifdef EVENT__HAVE_SYS_SOCKET_H
46 #include <sys/socket.h>
47 #endif
48 #include <stdio.h>
49 #include <stdlib.h>
50 #ifdef EVENT__HAVE_UNISTD_H
51 #include <unistd.h>
52 #endif
53 #include <ctype.h>
54 #include <errno.h>
55 #include <signal.h>
56 #include <string.h>
57 #include <time.h>
58 #include <limits.h>
59 #ifdef EVENT__HAVE_FCNTL_H
60 #include <fcntl.h>
61 #endif
62 
63 #include "event2/event.h"
64 #include "event2/event_struct.h"
65 #include "event2/event_compat.h"
66 #include "event-internal.h"
67 #include "defer-internal.h"
68 #include "evthread-internal.h"
69 #include "event2/thread.h"
70 #include "event2/util.h"
71 #include "log-internal.h"
72 #include "evmap-internal.h"
73 #include "iocp-internal.h"
74 #include "changelist-internal.h"
75 #define HT_NO_CACHE_HASH_VALUES
76 #include "ht-internal.h"
77 #include "util-internal.h"
78 
79 
80 #ifdef EVENT__HAVE_WORKING_KQUEUE
81 #include "kqueue-internal.h"
82 #endif
83 
84 #ifdef EVENT__HAVE_EVENT_PORTS
85 extern const struct eventop evportops;
86 #endif
87 #ifdef EVENT__HAVE_SELECT
88 extern const struct eventop selectops;
89 #endif
90 #ifdef EVENT__HAVE_POLL
91 extern const struct eventop pollops;
92 #endif
93 #ifdef EVENT__HAVE_EPOLL
94 extern const struct eventop epollops;
95 #endif
96 #ifdef EVENT__HAVE_WORKING_KQUEUE
97 extern const struct eventop kqops;
98 #endif
99 #ifdef EVENT__HAVE_DEVPOLL
100 extern const struct eventop devpollops;
101 #endif
102 #ifdef _WIN32
103 extern const struct eventop win32ops;
104 #endif
105 
106 /* Array of backends in order of preference. */
107 static const struct eventop *eventops[] = {
108 #ifdef EVENT__HAVE_EVENT_PORTS
109 	&evportops,
110 #endif
111 #ifdef EVENT__HAVE_WORKING_KQUEUE
112 	&kqops,
113 #endif
114 #ifdef EVENT__HAVE_EPOLL
115 	&epollops,
116 #endif
117 #ifdef EVENT__HAVE_DEVPOLL
118 	&devpollops,
119 #endif
120 #ifdef EVENT__HAVE_POLL
121 	&pollops,
122 #endif
123 #ifdef EVENT__HAVE_SELECT
124 	&selectops,
125 #endif
126 #ifdef _WIN32
127 	&win32ops,
128 #endif
129 	NULL
130 };
131 
132 /* Global state; deprecated */
133 EVENT2_EXPORT_SYMBOL
134 struct event_base *event_global_current_base_ = NULL;
135 #define current_base event_global_current_base_
136 
137 /* Global state */
138 
139 static void *event_self_cbarg_ptr_ = NULL;
140 
141 /* Prototypes */
142 static void	event_queue_insert_active(struct event_base *, struct event_callback *);
143 static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
144 static void	event_queue_insert_timeout(struct event_base *, struct event *);
145 static void	event_queue_insert_inserted(struct event_base *, struct event *);
146 static void	event_queue_remove_active(struct event_base *, struct event_callback *);
147 static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
148 static void	event_queue_remove_timeout(struct event_base *, struct event *);
149 static void	event_queue_remove_inserted(struct event_base *, struct event *);
150 static void event_queue_make_later_events_active(struct event_base *base);
151 
152 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
153 static int event_del_(struct event *ev, int blocking);
154 
155 #ifdef USE_REINSERT_TIMEOUT
156 /* This code seems buggy; only turn it on if we find out what the trouble is. */
157 static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
158 #endif
159 
160 static int	event_haveevents(struct event_base *);
161 
162 static int	event_process_active(struct event_base *);
163 
164 static int	timeout_next(struct event_base *, struct timeval **);
165 static void	timeout_process(struct event_base *);
166 
167 static inline void	event_signal_closure(struct event_base *, struct event *ev);
168 static inline void	event_persist_closure(struct event_base *, struct event *ev);
169 
170 static int	evthread_notify_base(struct event_base *base);
171 
172 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
173     struct event *ev);
174 
175 #ifndef EVENT__DISABLE_DEBUG_MODE
176 /* These functions implement a hashtable of which 'struct event *' structures
177  * have been setup or added.  We don't want to trust the content of the struct
178  * event itself, since we're trying to work through cases where an event gets
179  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
180  */
181 
182 struct event_debug_entry {
183 	HT_ENTRY(event_debug_entry) node;
184 	const struct event *ptr;
185 	unsigned added : 1;
186 };
187 
188 static inline unsigned
hash_debug_entry(const struct event_debug_entry * e)189 hash_debug_entry(const struct event_debug_entry *e)
190 {
191 	/* We need to do this silliness to convince compilers that we
192 	 * honestly mean to cast e->ptr to an integer, and discard any
193 	 * part of it that doesn't fit in an unsigned.
194 	 */
195 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
196 	/* Our hashtable implementation is pretty sensitive to low bits,
197 	 * and every struct event is over 64 bytes in size, so we can
198 	 * just say >>6. */
199 	return (u >> 6);
200 }
201 
202 static inline int
eq_debug_entry(const struct event_debug_entry * a,const struct event_debug_entry * b)203 eq_debug_entry(const struct event_debug_entry *a,
204     const struct event_debug_entry *b)
205 {
206 	return a->ptr == b->ptr;
207 }
208 
209 int event_debug_mode_on_ = 0;
210 
211 
212 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
213 /**
214  * @brief debug mode variable which is set for any function/structure that needs
215  *        to be shared across threads (if thread support is enabled).
216  *
217  *        When and if evthreads are initialized, this variable will be evaluated,
218  *        and if set to something other than zero, this means the evthread setup
219  *        functions were called out of order.
220  *
221  *        See: "Locks and threading" in the documentation.
222  */
223 int event_debug_created_threadable_ctx_ = 0;
224 #endif
225 
226 /* Set if it's too late to enable event_debug_mode. */
227 static int event_debug_mode_too_late = 0;
228 #ifndef EVENT__DISABLE_THREAD_SUPPORT
229 static void *event_debug_map_lock_ = NULL;
230 #endif
231 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
232 	HT_INITIALIZER();
233 
HT_PROTOTYPE(event_debug_map,event_debug_entry,node,hash_debug_entry,eq_debug_entry)234 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
235     eq_debug_entry)
236 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
237     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
238 
239 /* record that ev is now setup (that is, ready for an add) */
240 static void event_debug_note_setup_(const struct event *ev)
241 {
242 	struct event_debug_entry *dent, find;
243 
244 	if (!event_debug_mode_on_)
245 		goto out;
246 
247 	find.ptr = ev;
248 	EVLOCK_LOCK(event_debug_map_lock_, 0);
249 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
250 	if (dent) {
251 		dent->added = 0;
252 	} else {
253 		dent = mm_malloc(sizeof(*dent));
254 		if (!dent)
255 			event_err(1,
256 			    "Out of memory in debugging code");
257 		dent->ptr = ev;
258 		dent->added = 0;
259 		HT_INSERT(event_debug_map, &global_debug_map, dent);
260 	}
261 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
262 
263 out:
264 	event_debug_mode_too_late = 1;
265 }
266 /* record that ev is no longer setup */
event_debug_note_teardown_(const struct event * ev)267 static void event_debug_note_teardown_(const struct event *ev)
268 {
269 	struct event_debug_entry *dent, find;
270 
271 	if (!event_debug_mode_on_)
272 		goto out;
273 
274 	find.ptr = ev;
275 	EVLOCK_LOCK(event_debug_map_lock_, 0);
276 	dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
277 	if (dent)
278 		mm_free(dent);
279 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
280 
281 out:
282 	event_debug_mode_too_late = 1;
283 }
284 /* Macro: record that ev is now added */
event_debug_note_add_(const struct event * ev)285 static void event_debug_note_add_(const struct event *ev)
286 {
287 	struct event_debug_entry *dent,find;
288 
289 	if (!event_debug_mode_on_)
290 		goto out;
291 
292 	find.ptr = ev;
293 	EVLOCK_LOCK(event_debug_map_lock_, 0);
294 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
295 	if (dent) {
296 		dent->added = 1;
297 	} else {
298 		event_errx(EVENT_ERR_ABORT_,
299 		    "%s: noting an add on a non-setup event %p"
300 		    " (events: 0x%x, fd: "EV_SOCK_FMT
301 		    ", flags: 0x%x)",
302 		    __func__, ev, ev->ev_events,
303 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
304 	}
305 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
306 
307 out:
308 	event_debug_mode_too_late = 1;
309 }
310 /* record that ev is no longer added */
event_debug_note_del_(const struct event * ev)311 static void event_debug_note_del_(const struct event *ev)
312 {
313 	struct event_debug_entry *dent, find;
314 
315 	if (!event_debug_mode_on_)
316 		goto out;
317 
318 	find.ptr = ev;
319 	EVLOCK_LOCK(event_debug_map_lock_, 0);
320 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
321 	if (dent) {
322 		dent->added = 0;
323 	} else {
324 		event_errx(EVENT_ERR_ABORT_,
325 		    "%s: noting a del on a non-setup event %p"
326 		    " (events: 0x%x, fd: "EV_SOCK_FMT
327 		    ", flags: 0x%x)",
328 		    __func__, ev, ev->ev_events,
329 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
330 	}
331 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
332 
333 out:
334 	event_debug_mode_too_late = 1;
335 }
336 /* assert that ev is setup (i.e., okay to add or inspect) */
event_debug_assert_is_setup_(const struct event * ev)337 static void event_debug_assert_is_setup_(const struct event *ev)
338 {
339 	struct event_debug_entry *dent, find;
340 
341 	if (!event_debug_mode_on_)
342 		return;
343 
344 	find.ptr = ev;
345 	EVLOCK_LOCK(event_debug_map_lock_, 0);
346 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
347 	if (!dent) {
348 		event_errx(EVENT_ERR_ABORT_,
349 		    "%s called on a non-initialized event %p"
350 		    " (events: 0x%x, fd: "EV_SOCK_FMT
351 		    ", flags: 0x%x)",
352 		    __func__, ev, ev->ev_events,
353 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
354 	}
355 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
356 }
357 /* assert that ev is not added (i.e., okay to tear down or set up again) */
event_debug_assert_not_added_(const struct event * ev)358 static void event_debug_assert_not_added_(const struct event *ev)
359 {
360 	struct event_debug_entry *dent, find;
361 
362 	if (!event_debug_mode_on_)
363 		return;
364 
365 	find.ptr = ev;
366 	EVLOCK_LOCK(event_debug_map_lock_, 0);
367 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
368 	if (dent && dent->added) {
369 		event_errx(EVENT_ERR_ABORT_,
370 		    "%s called on an already added event %p"
371 		    " (events: 0x%x, fd: "EV_SOCK_FMT", "
372 		    "flags: 0x%x)",
373 		    __func__, ev, ev->ev_events,
374 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
375 	}
376 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
377 }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)378 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
379 {
380 	if (!event_debug_mode_on_)
381 		return;
382 	if (fd < 0)
383 		return;
384 
385 #ifndef _WIN32
386 	{
387 		int flags;
388 		if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
389 			EVUTIL_ASSERT(flags & O_NONBLOCK);
390 		}
391 	}
392 #endif
393 }
394 #else
event_debug_note_setup_(const struct event * ev)395 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
event_debug_note_teardown_(const struct event * ev)396 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
event_debug_note_add_(const struct event * ev)397 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
event_debug_note_del_(const struct event * ev)398 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
event_debug_assert_is_setup_(const struct event * ev)399 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
event_debug_assert_not_added_(const struct event * ev)400 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
event_debug_assert_socket_nonblocking_(evutil_socket_t fd)401 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
402 #endif
403 
404 #define EVENT_BASE_ASSERT_LOCKED(base)		\
405 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
406 
407 /* How often (in seconds) do we check for changes in wall clock time relative
408  * to monotonic time?  Set this to -1 for 'never.' */
409 #define CLOCK_SYNC_INTERVAL 5
410 
411 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
412  * on 'base'.  If there is a cached time, return it.  Otherwise, use
413  * clock_gettime or gettimeofday as appropriate to find out the right time.
414  * Return 0 on success, -1 on failure.
415  */
416 static int
gettime(struct event_base * base,struct timeval * tp)417 gettime(struct event_base *base, struct timeval *tp)
418 {
419 	EVENT_BASE_ASSERT_LOCKED(base);
420 
421 	if (base->tv_cache.tv_sec) {
422 		*tp = base->tv_cache;
423 		return (0);
424 	}
425 
426 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
427 		return -1;
428 	}
429 
430 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
431 	    < tp->tv_sec) {
432 		struct timeval tv;
433 		evutil_gettimeofday(&tv,NULL);
434 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
435 		base->last_updated_clock_diff = tp->tv_sec;
436 	}
437 
438 	return 0;
439 }
440 
441 int
event_base_gettimeofday_cached(struct event_base * base,struct timeval * tv)442 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
443 {
444 	int r;
445 	if (!base) {
446 		base = current_base;
447 		if (!current_base)
448 			return evutil_gettimeofday(tv, NULL);
449 	}
450 
451 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
452 	if (base->tv_cache.tv_sec == 0) {
453 		r = evutil_gettimeofday(tv, NULL);
454 	} else {
455 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
456 		r = 0;
457 	}
458 	EVBASE_RELEASE_LOCK(base, th_base_lock);
459 	return r;
460 }
461 
462 /** Make 'base' have no current cached time. */
463 static inline void
clear_time_cache(struct event_base * base)464 clear_time_cache(struct event_base *base)
465 {
466 	base->tv_cache.tv_sec = 0;
467 }
468 
469 /** Replace the cached time in 'base' with the current time. */
470 static inline void
update_time_cache(struct event_base * base)471 update_time_cache(struct event_base *base)
472 {
473 	base->tv_cache.tv_sec = 0;
474 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
475 	    gettime(base, &base->tv_cache);
476 }
477 
478 int
event_base_update_cache_time(struct event_base * base)479 event_base_update_cache_time(struct event_base *base)
480 {
481 
482 	if (!base) {
483 		base = current_base;
484 		if (!current_base)
485 			return -1;
486 	}
487 
488 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
489 	if (base->running_loop)
490 		update_time_cache(base);
491 	EVBASE_RELEASE_LOCK(base, th_base_lock);
492 	return 0;
493 }
494 
495 static inline struct event *
event_callback_to_event(struct event_callback * evcb)496 event_callback_to_event(struct event_callback *evcb)
497 {
498 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
499 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
500 }
501 
502 static inline struct event_callback *
event_to_event_callback(struct event * ev)503 event_to_event_callback(struct event *ev)
504 {
505 	return &ev->ev_evcallback;
506 }
507 
508 struct event_base *
event_init(void)509 event_init(void)
510 {
511 	struct event_base *base = event_base_new_with_config(NULL);
512 
513 	if (base == NULL) {
514 		event_errx(1, "%s: Unable to construct event_base", __func__);
515 		return NULL;
516 	}
517 
518 	current_base = base;
519 
520 	return (base);
521 }
522 
523 struct event_base *
event_base_new(void)524 event_base_new(void)
525 {
526 	struct event_base *base = NULL;
527 	struct event_config *cfg = event_config_new();
528 	if (cfg) {
529 		base = event_base_new_with_config(cfg);
530 		event_config_free(cfg);
531 	}
532 	return base;
533 }
534 
535 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
536  * avoid. */
537 static int
event_config_is_avoided_method(const struct event_config * cfg,const char * method)538 event_config_is_avoided_method(const struct event_config *cfg,
539     const char *method)
540 {
541 	struct event_config_entry *entry;
542 
543 	TAILQ_FOREACH(entry, &cfg->entries, next) {
544 		if (entry->avoid_method != NULL &&
545 		    strcmp(entry->avoid_method, method) == 0)
546 			return (1);
547 	}
548 
549 	return (0);
550 }
551 
552 /** Return true iff 'method' is disabled according to the environment. */
553 static int
event_is_method_disabled(const char * name)554 event_is_method_disabled(const char *name)
555 {
556 	char environment[64];
557 	int i;
558 
559 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
560 	for (i = 8; environment[i] != '\0'; ++i)
561 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
562 	/* Note that evutil_getenv_() ignores the environment entirely if
563 	 * we're setuid */
564 	return (evutil_getenv_(environment) != NULL);
565 }
566 
567 int
event_base_get_features(const struct event_base * base)568 event_base_get_features(const struct event_base *base)
569 {
570 	return base->evsel->features;
571 }
572 
573 void
event_enable_debug_mode(void)574 event_enable_debug_mode(void)
575 {
576 #ifndef EVENT__DISABLE_DEBUG_MODE
577 	if (event_debug_mode_on_)
578 		event_errx(1, "%s was called twice!", __func__);
579 	if (event_debug_mode_too_late)
580 		event_errx(1, "%s must be called *before* creating any events "
581 		    "or event_bases",__func__);
582 
583 	event_debug_mode_on_ = 1;
584 
585 	HT_INIT(event_debug_map, &global_debug_map);
586 #endif
587 }
588 
589 void
event_disable_debug_mode(void)590 event_disable_debug_mode(void)
591 {
592 #ifndef EVENT__DISABLE_DEBUG_MODE
593 	struct event_debug_entry **ent, *victim;
594 
595 	EVLOCK_LOCK(event_debug_map_lock_, 0);
596 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
597 		victim = *ent;
598 		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
599 		mm_free(victim);
600 	}
601 	HT_CLEAR(event_debug_map, &global_debug_map);
602 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
603 
604 	event_debug_mode_on_  = 0;
605 #endif
606 }
607 
608 struct event_base *
event_base_new_with_config(const struct event_config * cfg)609 event_base_new_with_config(const struct event_config *cfg)
610 {
611 	int i;
612 	struct event_base *base;
613 	int should_check_environment;
614 
615 #ifndef EVENT__DISABLE_DEBUG_MODE
616 	event_debug_mode_too_late = 1;
617 #endif
618 
619 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
620 		event_warn("%s: calloc", __func__);
621 		return NULL;
622 	}
623 
624 	if (cfg)
625 		base->flags = cfg->flags;
626 
627 	should_check_environment =
628 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
629 
630 	{
631 		struct timeval tmp;
632 		int precise_time =
633 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
634 		int flags;
635 		if (should_check_environment && !precise_time) {
636 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
637 			if (precise_time) {
638 				base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
639 			}
640 		}
641 		flags = precise_time ? EV_MONOT_PRECISE : 0;
642 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
643 
644 		gettime(base, &tmp);
645 	}
646 
647 	min_heap_ctor_(&base->timeheap);
648 
649 	base->sig.ev_signal_pair[0] = -1;
650 	base->sig.ev_signal_pair[1] = -1;
651 	base->th_notify_fd[0] = -1;
652 	base->th_notify_fd[1] = -1;
653 
654 	TAILQ_INIT(&base->active_later_queue);
655 
656 	evmap_io_initmap_(&base->io);
657 	evmap_signal_initmap_(&base->sigmap);
658 	event_changelist_init_(&base->changelist);
659 
660 	base->evbase = NULL;
661 
662 	if (cfg) {
663 		memcpy(&base->max_dispatch_time,
664 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
665 		base->limit_callbacks_after_prio =
666 		    cfg->limit_callbacks_after_prio;
667 	} else {
668 		base->max_dispatch_time.tv_sec = -1;
669 		base->limit_callbacks_after_prio = 1;
670 	}
671 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
672 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
673 	} else {
674 		base->max_dispatch_callbacks = INT_MAX;
675 	}
676 	if (base->max_dispatch_callbacks == INT_MAX &&
677 	    base->max_dispatch_time.tv_sec == -1)
678 		base->limit_callbacks_after_prio = INT_MAX;
679 
680 	for (i = 0; eventops[i] && !base->evbase; i++) {
681 		if (cfg != NULL) {
682 			/* determine if this backend should be avoided */
683 			if (event_config_is_avoided_method(cfg,
684 				eventops[i]->name))
685 				continue;
686 			if ((eventops[i]->features & cfg->require_features)
687 			    != cfg->require_features)
688 				continue;
689 		}
690 
691 		/* also obey the environment variables */
692 		if (should_check_environment &&
693 		    event_is_method_disabled(eventops[i]->name))
694 			continue;
695 
696 		base->evsel = eventops[i];
697 
698 		base->evbase = base->evsel->init(base);
699 	}
700 
701 	if (base->evbase == NULL) {
702 		event_warnx("%s: no event mechanism available",
703 		    __func__);
704 		base->evsel = NULL;
705 		event_base_free(base);
706 		return NULL;
707 	}
708 
709 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
710 		event_msgx("libevent using: %s", base->evsel->name);
711 
712 	/* allocate a single active event queue */
713 	if (event_base_priority_init(base, 1) < 0) {
714 		event_base_free(base);
715 		return NULL;
716 	}
717 
718 	/* prepare for threading */
719 
720 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
721 	event_debug_created_threadable_ctx_ = 1;
722 #endif
723 
724 #ifndef EVENT__DISABLE_THREAD_SUPPORT
725 	if (EVTHREAD_LOCKING_ENABLED() &&
726 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
727 		int r;
728 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
729 		EVTHREAD_ALLOC_COND(base->current_event_cond);
730 		r = evthread_make_base_notifiable(base);
731 		if (r<0) {
732 			event_warnx("%s: Unable to make base notifiable.", __func__);
733 			event_base_free(base);
734 			return NULL;
735 		}
736 	}
737 #endif
738 
739 #ifdef _WIN32
740 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
741 		event_base_start_iocp_(base, cfg->n_cpus_hint);
742 #endif
743 
744 	return (base);
745 }
746 
747 int
event_base_start_iocp_(struct event_base * base,int n_cpus)748 event_base_start_iocp_(struct event_base *base, int n_cpus)
749 {
750 #ifdef _WIN32
751 	if (base->iocp)
752 		return 0;
753 	base->iocp = event_iocp_port_launch_(n_cpus);
754 	if (!base->iocp) {
755 		event_warnx("%s: Couldn't launch IOCP", __func__);
756 		return -1;
757 	}
758 	return 0;
759 #else
760 	return -1;
761 #endif
762 }
763 
764 void
event_base_stop_iocp_(struct event_base * base)765 event_base_stop_iocp_(struct event_base *base)
766 {
767 #ifdef _WIN32
768 	int rv;
769 
770 	if (!base->iocp)
771 		return;
772 	rv = event_iocp_shutdown_(base->iocp, -1);
773 	EVUTIL_ASSERT(rv >= 0);
774 	base->iocp = NULL;
775 #endif
776 }
777 
778 static int
event_base_cancel_single_callback_(struct event_base * base,struct event_callback * evcb,int run_finalizers)779 event_base_cancel_single_callback_(struct event_base *base,
780     struct event_callback *evcb,
781     int run_finalizers)
782 {
783 	int result = 0;
784 
785 	if (evcb->evcb_flags & EVLIST_INIT) {
786 		struct event *ev = event_callback_to_event(evcb);
787 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
788 			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
789 			result = 1;
790 		}
791 	} else {
792 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
793 		event_callback_cancel_nolock_(base, evcb, 1);
794 		EVBASE_RELEASE_LOCK(base, th_base_lock);
795 		result = 1;
796 	}
797 
798 	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
799 		switch (evcb->evcb_closure) {
800 		case EV_CLOSURE_EVENT_FINALIZE:
801 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
802 			struct event *ev = event_callback_to_event(evcb);
803 			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
804 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
805 				mm_free(ev);
806 			break;
807 		}
808 		case EV_CLOSURE_CB_FINALIZE:
809 			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
810 			break;
811 		default:
812 			break;
813 		}
814 	}
815 	return result;
816 }
817 
event_base_free_queues_(struct event_base * base,int run_finalizers)818 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
819 {
820 	int deleted = 0, i;
821 
822 	for (i = 0; i < base->nactivequeues; ++i) {
823 		struct event_callback *evcb, *next;
824 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
825 			next = TAILQ_NEXT(evcb, evcb_active_next);
826 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
827 			evcb = next;
828 		}
829 	}
830 
831 	{
832 		struct event_callback *evcb;
833 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
834 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
835 		}
836 	}
837 
838 	return deleted;
839 }
840 
841 static void
event_base_free_(struct event_base * base,int run_finalizers)842 event_base_free_(struct event_base *base, int run_finalizers)
843 {
844 	int i, n_deleted=0;
845 	struct event *ev;
846 	/* XXXX grab the lock? If there is contention when one thread frees
847 	 * the base, then the contending thread will be very sad soon. */
848 
849 	/* event_base_free(NULL) is how to free the current_base if we
850 	 * made it with event_init and forgot to hold a reference to it. */
851 	if (base == NULL && current_base)
852 		base = current_base;
853 	/* Don't actually free NULL. */
854 	if (base == NULL) {
855 		event_warnx("%s: no base to free", __func__);
856 		return;
857 	}
858 	/* XXX(niels) - check for internal events first */
859 
860 #ifdef _WIN32
861 	event_base_stop_iocp_(base);
862 #endif
863 
864 	/* threading fds if we have them */
865 	if (base->th_notify_fd[0] != -1) {
866 		event_del(&base->th_notify);
867 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
868 		if (base->th_notify_fd[1] != -1)
869 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
870 		base->th_notify_fd[0] = -1;
871 		base->th_notify_fd[1] = -1;
872 		event_debug_unassign(&base->th_notify);
873 	}
874 
875 	/* Delete all non-internal events. */
876 	evmap_delete_all_(base);
877 
878 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
879 		event_del(ev);
880 		++n_deleted;
881 	}
882 	for (i = 0; i < base->n_common_timeouts; ++i) {
883 		struct common_timeout_list *ctl =
884 		    base->common_timeout_queues[i];
885 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
886 		event_debug_unassign(&ctl->timeout_event);
887 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
888 			struct event *next = TAILQ_NEXT(ev,
889 			    ev_timeout_pos.ev_next_with_common_timeout);
890 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
891 				event_del(ev);
892 				++n_deleted;
893 			}
894 			ev = next;
895 		}
896 		mm_free(ctl);
897 	}
898 	if (base->common_timeout_queues)
899 		mm_free(base->common_timeout_queues);
900 
901 	for (;;) {
902 		/* For finalizers we can register yet another finalizer out from
903 		 * finalizer, and iff finalizer will be in active_later_queue we can
904 		 * add finalizer to activequeues, and we will have events in
905 		 * activequeues after this function returns, which is not what we want
906 		 * (we even have an assertion for this).
907 		 *
908 		 * A simple case is bufferevent with underlying (i.e. filters).
909 		 */
910 		int ii = event_base_free_queues_(base, run_finalizers);
911 		event_debug(("%s: %d events freed", __func__, ii));
912 		if (!i) {
913 			break;
914 		}
915 		n_deleted += ii;
916 	}
917 
918 	if (n_deleted)
919 		event_debug(("%s: %d events were still set in base",
920 			__func__, n_deleted));
921 
922 	while (LIST_FIRST(&base->once_events)) {
923 		struct event_once *eonce = LIST_FIRST(&base->once_events);
924 		LIST_REMOVE(eonce, next_once);
925 		mm_free(eonce);
926 	}
927 
928 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
929 		base->evsel->dealloc(base);
930 
931 	for (i = 0; i < base->nactivequeues; ++i)
932 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
933 
934 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
935 	min_heap_dtor_(&base->timeheap);
936 
937 	mm_free(base->activequeues);
938 
939 	evmap_io_clear_(&base->io);
940 	evmap_signal_clear_(&base->sigmap);
941 	event_changelist_freemem_(&base->changelist);
942 
943 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
944 	EVTHREAD_FREE_COND(base->current_event_cond);
945 
946 	/* If we're freeing current_base, there won't be a current_base. */
947 	if (base == current_base)
948 		current_base = NULL;
949 	mm_free(base);
950 }
951 
952 void
event_base_free_nofinalize(struct event_base * base)953 event_base_free_nofinalize(struct event_base *base)
954 {
955 	event_base_free_(base, 0);
956 }
957 
958 void
event_base_free(struct event_base * base)959 event_base_free(struct event_base *base)
960 {
961 	event_base_free_(base, 1);
962 }
963 
964 /* Fake eventop; used to disable the backend temporarily inside event_reinit
965  * so that we can call event_del() on an event without telling the backend.
966  */
967 static int
nil_backend_del(struct event_base * b,evutil_socket_t fd,short old,short events,void * fdinfo)968 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
969     short events, void *fdinfo)
970 {
971 	return 0;
972 }
973 const struct eventop nil_eventop = {
974 	"nil",
975 	NULL, /* init: unused. */
976 	NULL, /* add: unused. */
977 	nil_backend_del, /* del: used, so needs to be killed. */
978 	NULL, /* dispatch: unused. */
979 	NULL, /* dealloc: unused. */
980 	0, 0, 0
981 };
982 
983 /* reinitialize the event base after a fork */
984 int
event_reinit(struct event_base * base)985 event_reinit(struct event_base *base)
986 {
987 	const struct eventop *evsel;
988 	int res = 0;
989 	int was_notifiable = 0;
990 	int had_signal_added = 0;
991 
992 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
993 
994 	evsel = base->evsel;
995 
996 	/* check if this event mechanism requires reinit on the backend */
997 	if (evsel->need_reinit) {
998 		/* We're going to call event_del() on our notify events (the
999 		 * ones that tell about signals and wakeup events).  But we
1000 		 * don't actually want to tell the backend to change its
1001 		 * state, since it might still share some resource (a kqueue,
1002 		 * an epoll fd) with the parent process, and we don't want to
1003 		 * delete the fds from _that_ backend, we temporarily stub out
1004 		 * the evsel with a replacement.
1005 		 */
1006 		base->evsel = &nil_eventop;
1007 	}
1008 
1009 	/* We need to re-create a new signal-notification fd and a new
1010 	 * thread-notification fd.  Otherwise, we'll still share those with
1011 	 * the parent process, which would make any notification sent to them
1012 	 * get received by one or both of the event loops, more or less at
1013 	 * random.
1014 	 */
1015 	if (base->sig.ev_signal_added) {
1016 		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1017 		event_debug_unassign(&base->sig.ev_signal);
1018 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1019 		had_signal_added = 1;
1020 		base->sig.ev_signal_added = 0;
1021 	}
1022 	if (base->sig.ev_signal_pair[0] != -1)
1023 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1024 	if (base->sig.ev_signal_pair[1] != -1)
1025 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1026 	if (base->th_notify_fn != NULL) {
1027 		was_notifiable = 1;
1028 		base->th_notify_fn = NULL;
1029 	}
1030 	if (base->th_notify_fd[0] != -1) {
1031 		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1032 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1033 		if (base->th_notify_fd[1] != -1)
1034 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1035 		base->th_notify_fd[0] = -1;
1036 		base->th_notify_fd[1] = -1;
1037 		event_debug_unassign(&base->th_notify);
1038 	}
1039 
1040 	/* Replace the original evsel. */
1041         base->evsel = evsel;
1042 
1043 	if (evsel->need_reinit) {
1044 		/* Reconstruct the backend through brute-force, so that we do
1045 		 * not share any structures with the parent process. For some
1046 		 * backends, this is necessary: epoll and kqueue, for
1047 		 * instance, have events associated with a kernel
1048 		 * structure. If didn't reinitialize, we'd share that
1049 		 * structure with the parent process, and any changes made by
1050 		 * the parent would affect our backend's behavior (and vice
1051 		 * versa).
1052 		 */
1053 		if (base->evsel->dealloc != NULL)
1054 			base->evsel->dealloc(base);
1055 		base->evbase = evsel->init(base);
1056 		if (base->evbase == NULL) {
1057 			event_errx(1,
1058 			   "%s: could not reinitialize event mechanism",
1059 			   __func__);
1060 			res = -1;
1061 			goto done;
1062 		}
1063 
1064 		/* Empty out the changelist (if any): we are starting from a
1065 		 * blank slate. */
1066 		event_changelist_freemem_(&base->changelist);
1067 
1068 		/* Tell the event maps to re-inform the backend about all
1069 		 * pending events. This will make the signal notification
1070 		 * event get re-created if necessary. */
1071 		if (evmap_reinit_(base) < 0)
1072 			res = -1;
1073 	} else {
1074 		res = evsig_init_(base);
1075 		if (res == 0 && had_signal_added) {
1076 			res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1077 			if (res == 0)
1078 				base->sig.ev_signal_added = 1;
1079 		}
1080 	}
1081 
1082 	/* If we were notifiable before, and nothing just exploded, become
1083 	 * notifiable again. */
1084 	if (was_notifiable && res == 0)
1085 		res = evthread_make_base_notifiable_nolock_(base);
1086 
1087 done:
1088 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1089 	return (res);
1090 }
1091 
1092 /* Get the monotonic time for this event_base' timer */
1093 int
event_gettime_monotonic(struct event_base * base,struct timeval * tv)1094 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1095 {
1096   int rv = -1;
1097 
1098   if (base && tv) {
1099     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1100     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1101     EVBASE_RELEASE_LOCK(base, th_base_lock);
1102   }
1103 
1104   return rv;
1105 }
1106 
1107 const char **
event_get_supported_methods(void)1108 event_get_supported_methods(void)
1109 {
1110 	static const char **methods = NULL;
1111 	const struct eventop **method;
1112 	const char **tmp;
1113 	int i = 0, k;
1114 
1115 	/* count all methods */
1116 	for (method = &eventops[0]; *method != NULL; ++method) {
1117 		++i;
1118 	}
1119 
1120 	/* allocate one more than we need for the NULL pointer */
1121 	tmp = mm_calloc((i + 1), sizeof(char *));
1122 	if (tmp == NULL)
1123 		return (NULL);
1124 
1125 	/* populate the array with the supported methods */
1126 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1127 		tmp[i++] = eventops[k]->name;
1128 	}
1129 	tmp[i] = NULL;
1130 
1131 	if (methods != NULL)
1132 		mm_free(__UNCONST(methods));
1133 
1134 	methods = tmp;
1135 
1136 	return (methods);
1137 }
1138 
1139 struct event_config *
event_config_new(void)1140 event_config_new(void)
1141 {
1142 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1143 
1144 	if (cfg == NULL)
1145 		return (NULL);
1146 
1147 	TAILQ_INIT(&cfg->entries);
1148 	cfg->max_dispatch_interval.tv_sec = -1;
1149 	cfg->max_dispatch_callbacks = INT_MAX;
1150 	cfg->limit_callbacks_after_prio = 1;
1151 
1152 	return (cfg);
1153 }
1154 
1155 static void
event_config_entry_free(struct event_config_entry * entry)1156 event_config_entry_free(struct event_config_entry *entry)
1157 {
1158 	if (entry->avoid_method != NULL)
1159 		mm_free(__UNCONST(entry->avoid_method));
1160 	mm_free(entry);
1161 }
1162 
1163 void
event_config_free(struct event_config * cfg)1164 event_config_free(struct event_config *cfg)
1165 {
1166 	struct event_config_entry *entry;
1167 
1168 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1169 		TAILQ_REMOVE(&cfg->entries, entry, next);
1170 		event_config_entry_free(entry);
1171 	}
1172 	mm_free(cfg);
1173 }
1174 
1175 int
event_config_set_flag(struct event_config * cfg,int flag)1176 event_config_set_flag(struct event_config *cfg, int flag)
1177 {
1178 	if (!cfg)
1179 		return -1;
1180 	cfg->flags |= flag;
1181 	return 0;
1182 }
1183 
1184 int
event_config_avoid_method(struct event_config * cfg,const char * method)1185 event_config_avoid_method(struct event_config *cfg, const char *method)
1186 {
1187 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1188 	if (entry == NULL)
1189 		return (-1);
1190 
1191 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1192 		mm_free(entry);
1193 		return (-1);
1194 	}
1195 
1196 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1197 
1198 	return (0);
1199 }
1200 
1201 int
event_config_require_features(struct event_config * cfg,int features)1202 event_config_require_features(struct event_config *cfg,
1203     int features)
1204 {
1205 	if (!cfg)
1206 		return (-1);
1207 	cfg->require_features = features;
1208 	return (0);
1209 }
1210 
1211 int
event_config_set_num_cpus_hint(struct event_config * cfg,int cpus)1212 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1213 {
1214 	if (!cfg)
1215 		return (-1);
1216 	cfg->n_cpus_hint = cpus;
1217 	return (0);
1218 }
1219 
1220 int
event_config_set_max_dispatch_interval(struct event_config * cfg,const struct timeval * max_interval,int max_callbacks,int min_priority)1221 event_config_set_max_dispatch_interval(struct event_config *cfg,
1222     const struct timeval *max_interval, int max_callbacks, int min_priority)
1223 {
1224 	if (max_interval)
1225 		memcpy(&cfg->max_dispatch_interval, max_interval,
1226 		    sizeof(struct timeval));
1227 	else
1228 		cfg->max_dispatch_interval.tv_sec = -1;
1229 	cfg->max_dispatch_callbacks =
1230 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
1231 	if (min_priority < 0)
1232 		min_priority = 0;
1233 	cfg->limit_callbacks_after_prio = min_priority;
1234 	return (0);
1235 }
1236 
1237 int
event_priority_init(int npriorities)1238 event_priority_init(int npriorities)
1239 {
1240 	return event_base_priority_init(current_base, npriorities);
1241 }
1242 
1243 int
event_base_priority_init(struct event_base * base,int npriorities)1244 event_base_priority_init(struct event_base *base, int npriorities)
1245 {
1246 	int i, r;
1247 	r = -1;
1248 
1249 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1250 
1251 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1252 	    || npriorities >= EVENT_MAX_PRIORITIES)
1253 		goto err;
1254 
1255 	if (npriorities == base->nactivequeues)
1256 		goto ok;
1257 
1258 	if (base->nactivequeues) {
1259 		mm_free(base->activequeues);
1260 		base->nactivequeues = 0;
1261 	}
1262 
1263 	/* Allocate our priority queues */
1264 	base->activequeues = (struct evcallback_list *)
1265 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
1266 	if (base->activequeues == NULL) {
1267 		event_warn("%s: calloc", __func__);
1268 		goto err;
1269 	}
1270 	base->nactivequeues = npriorities;
1271 
1272 	for (i = 0; i < base->nactivequeues; ++i) {
1273 		TAILQ_INIT(&base->activequeues[i]);
1274 	}
1275 
1276 ok:
1277 	r = 0;
1278 err:
1279 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1280 	return (r);
1281 }
1282 
1283 int
event_base_get_npriorities(struct event_base * base)1284 event_base_get_npriorities(struct event_base *base)
1285 {
1286 
1287 	int n;
1288 	if (base == NULL)
1289 		base = current_base;
1290 
1291 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1292 	n = base->nactivequeues;
1293 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1294 	return (n);
1295 }
1296 
1297 int
event_base_get_num_events(struct event_base * base,unsigned int type)1298 event_base_get_num_events(struct event_base *base, unsigned int type)
1299 {
1300 	int r = 0;
1301 
1302 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1303 
1304 	if (type & EVENT_BASE_COUNT_ACTIVE)
1305 		r += base->event_count_active;
1306 
1307 	if (type & EVENT_BASE_COUNT_VIRTUAL)
1308 		r += base->virtual_event_count;
1309 
1310 	if (type & EVENT_BASE_COUNT_ADDED)
1311 		r += base->event_count;
1312 
1313 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1314 
1315 	return r;
1316 }
1317 
1318 int
event_base_get_max_events(struct event_base * base,unsigned int type,int clear)1319 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1320 {
1321 	int r = 0;
1322 
1323 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1324 
1325 	if (type & EVENT_BASE_COUNT_ACTIVE) {
1326 		r += base->event_count_active_max;
1327 		if (clear)
1328 			base->event_count_active_max = 0;
1329 	}
1330 
1331 	if (type & EVENT_BASE_COUNT_VIRTUAL) {
1332 		r += base->virtual_event_count_max;
1333 		if (clear)
1334 			base->virtual_event_count_max = 0;
1335 	}
1336 
1337 	if (type & EVENT_BASE_COUNT_ADDED) {
1338 		r += base->event_count_max;
1339 		if (clear)
1340 			base->event_count_max = 0;
1341 	}
1342 
1343 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1344 
1345 	return r;
1346 }
1347 
1348 /* Returns true iff we're currently watching any events. */
1349 static int
event_haveevents(struct event_base * base)1350 event_haveevents(struct event_base *base)
1351 {
1352 	/* Caller must hold th_base_lock */
1353 	return (base->virtual_event_count > 0 || base->event_count > 0);
1354 }
1355 
1356 /* "closure" function called when processing active signal events */
1357 static inline void
event_signal_closure(struct event_base * base,struct event * ev)1358 event_signal_closure(struct event_base *base, struct event *ev)
1359 {
1360 	short ncalls;
1361 	int should_break;
1362 
1363 	/* Allows deletes to work */
1364 	ncalls = ev->ev_ncalls;
1365 	if (ncalls != 0)
1366 		ev->ev_pncalls = &ncalls;
1367 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1368 	while (ncalls) {
1369 		ncalls--;
1370 		ev->ev_ncalls = ncalls;
1371 		if (ncalls == 0)
1372 			ev->ev_pncalls = NULL;
1373 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1374 
1375 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1376 		should_break = base->event_break;
1377 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1378 
1379 		if (should_break) {
1380 			if (ncalls != 0)
1381 				ev->ev_pncalls = NULL;
1382 			return;
1383 		}
1384 	}
1385 }
1386 
1387 /* Common timeouts are special timeouts that are handled as queues rather than
1388  * in the minheap.  This is more efficient than the minheap if we happen to
1389  * know that we're going to get several thousands of timeout events all with
1390  * the same timeout value.
1391  *
1392  * Since all our timeout handling code assumes timevals can be copied,
1393  * assigned, etc, we can't use "magic pointer" to encode these common
1394  * timeouts.  Searching through a list to see if every timeout is common could
1395  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1396  * is 32 bits long, but only uses 20 of those bits (since it can never be over
1397  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1398  * of index into the event_base's aray of common timeouts.
1399  */
1400 
1401 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1402 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1403 #define COMMON_TIMEOUT_IDX_SHIFT 20
1404 #define COMMON_TIMEOUT_MASK     0xf0000000
1405 #define COMMON_TIMEOUT_MAGIC    0x50000000
1406 
1407 #define COMMON_TIMEOUT_IDX(tv) \
1408 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1409 
1410 /** Return true iff if 'tv' is a common timeout in 'base' */
1411 static inline int
is_common_timeout(const struct timeval * tv,const struct event_base * base)1412 is_common_timeout(const struct timeval *tv,
1413     const struct event_base *base)
1414 {
1415 	int idx;
1416 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1417 		return 0;
1418 	idx = COMMON_TIMEOUT_IDX(tv);
1419 	return idx < base->n_common_timeouts;
1420 }
1421 
1422 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1423  * one is a common timeout. */
1424 static inline int
is_same_common_timeout(const struct timeval * tv1,const struct timeval * tv2)1425 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1426 {
1427 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1428 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
1429 }
1430 
1431 /** Requires that 'tv' is a common timeout.  Return the corresponding
1432  * common_timeout_list. */
1433 static inline struct common_timeout_list *
get_common_timeout_list(struct event_base * base,const struct timeval * tv)1434 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1435 {
1436 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1437 }
1438 
1439 #if 0
1440 static inline int
1441 common_timeout_ok(const struct timeval *tv,
1442     struct event_base *base)
1443 {
1444 	const struct timeval *expect =
1445 	    &get_common_timeout_list(base, tv)->duration;
1446 	return tv->tv_sec == expect->tv_sec &&
1447 	    tv->tv_usec == expect->tv_usec;
1448 }
1449 #endif
1450 
1451 /* Add the timeout for the first event in given common timeout list to the
1452  * event_base's minheap. */
1453 static void
common_timeout_schedule(struct common_timeout_list * ctl,const struct timeval * now,struct event * head)1454 common_timeout_schedule(struct common_timeout_list *ctl,
1455     const struct timeval *now, struct event *head)
1456 {
1457 	struct timeval timeout = head->ev_timeout;
1458 	timeout.tv_usec &= MICROSECONDS_MASK;
1459 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1460 }
1461 
1462 /* Callback: invoked when the timeout for a common timeout queue triggers.
1463  * This means that (at least) the first event in that queue should be run,
1464  * and the timeout should be rescheduled if there are more events. */
1465 static void
common_timeout_callback(evutil_socket_t fd,short what,void * arg)1466 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1467 {
1468 	struct timeval now;
1469 	struct common_timeout_list *ctl = arg;
1470 	struct event_base *base = ctl->base;
1471 	struct event *ev = NULL;
1472 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1473 	gettime(base, &now);
1474 	while (1) {
1475 		ev = TAILQ_FIRST(&ctl->events);
1476 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1477 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
1478 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1479 			break;
1480 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1481 		event_active_nolock_(ev, EV_TIMEOUT, 1);
1482 	}
1483 	if (ev)
1484 		common_timeout_schedule(ctl, &now, ev);
1485 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1486 }
1487 
1488 #define MAX_COMMON_TIMEOUTS 256
1489 
1490 const struct timeval *
event_base_init_common_timeout(struct event_base * base,const struct timeval * duration)1491 event_base_init_common_timeout(struct event_base *base,
1492     const struct timeval *duration)
1493 {
1494 	int i;
1495 	struct timeval tv;
1496 	const struct timeval *result=NULL;
1497 	struct common_timeout_list *new_ctl;
1498 
1499 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1500 	if (duration->tv_usec > 1000000) {
1501 		memcpy(&tv, duration, sizeof(struct timeval));
1502 		if (is_common_timeout(duration, base))
1503 			tv.tv_usec &= MICROSECONDS_MASK;
1504 		tv.tv_sec += tv.tv_usec / 1000000;
1505 		tv.tv_usec %= 1000000;
1506 		duration = &tv;
1507 	}
1508 	for (i = 0; i < base->n_common_timeouts; ++i) {
1509 		const struct common_timeout_list *ctl =
1510 		    base->common_timeout_queues[i];
1511 		if (duration->tv_sec == ctl->duration.tv_sec &&
1512 		    duration->tv_usec ==
1513 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1514 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1515 			result = &ctl->duration;
1516 			goto done;
1517 		}
1518 	}
1519 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1520 		event_warnx("%s: Too many common timeouts already in use; "
1521 		    "we only support %d per event_base", __func__,
1522 		    MAX_COMMON_TIMEOUTS);
1523 		goto done;
1524 	}
1525 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1526 		int n = base->n_common_timeouts < 16 ? 16 :
1527 		    base->n_common_timeouts*2;
1528 		struct common_timeout_list **newqueues =
1529 		    mm_realloc(base->common_timeout_queues,
1530 			n*sizeof(struct common_timeout_queue *));
1531 		if (!newqueues) {
1532 			event_warn("%s: realloc",__func__);
1533 			goto done;
1534 		}
1535 		base->n_common_timeouts_allocated = n;
1536 		base->common_timeout_queues = newqueues;
1537 	}
1538 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1539 	if (!new_ctl) {
1540 		event_warn("%s: calloc",__func__);
1541 		goto done;
1542 	}
1543 	TAILQ_INIT(&new_ctl->events);
1544 	new_ctl->duration.tv_sec = duration->tv_sec;
1545 	new_ctl->duration.tv_usec =
1546 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1547 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1548 	evtimer_assign(&new_ctl->timeout_event, base,
1549 	    common_timeout_callback, new_ctl);
1550 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1551 	event_priority_set(&new_ctl->timeout_event, 0);
1552 	new_ctl->base = base;
1553 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1554 	result = &new_ctl->duration;
1555 
1556 done:
1557 	if (result)
1558 		EVUTIL_ASSERT(is_common_timeout(result, base));
1559 
1560 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1561 	return result;
1562 }
1563 
1564 /* Closure function invoked when we're activating a persistent event. */
1565 static inline void
event_persist_closure(struct event_base * base,struct event * ev)1566 event_persist_closure(struct event_base *base, struct event *ev)
1567 {
1568 	void (*evcb_callback)(evutil_socket_t, short, void *);
1569 
1570         // Other fields of *ev that must be stored before executing
1571         evutil_socket_t evcb_fd;
1572         short evcb_res;
1573         void *evcb_arg;
1574 
1575 	/* reschedule the persistent event if we have a timeout. */
1576 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1577 		/* If there was a timeout, we want it to run at an interval of
1578 		 * ev_io_timeout after the last time it was _scheduled_ for,
1579 		 * not ev_io_timeout after _now_.  If it fired for another
1580 		 * reason, though, the timeout ought to start ticking _now_. */
1581 		struct timeval run_at, relative_to, delay, now;
1582 		ev_uint32_t usec_mask = 0;
1583 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1584 			&ev->ev_io_timeout));
1585 		gettime(base, &now);
1586 		if (is_common_timeout(&ev->ev_timeout, base)) {
1587 			delay = ev->ev_io_timeout;
1588 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1589 			delay.tv_usec &= MICROSECONDS_MASK;
1590 			if (ev->ev_res & EV_TIMEOUT) {
1591 				relative_to = ev->ev_timeout;
1592 				relative_to.tv_usec &= MICROSECONDS_MASK;
1593 			} else {
1594 				relative_to = now;
1595 			}
1596 		} else {
1597 			delay = ev->ev_io_timeout;
1598 			if (ev->ev_res & EV_TIMEOUT) {
1599 				relative_to = ev->ev_timeout;
1600 			} else {
1601 				relative_to = now;
1602 			}
1603 		}
1604 		evutil_timeradd(&relative_to, &delay, &run_at);
1605 		if (evutil_timercmp(&run_at, &now, <)) {
1606 			/* Looks like we missed at least one invocation due to
1607 			 * a clock jump, not running the event loop for a
1608 			 * while, really slow callbacks, or
1609 			 * something. Reschedule relative to now.
1610 			 */
1611 			evutil_timeradd(&now, &delay, &run_at);
1612 		}
1613 		run_at.tv_usec |= usec_mask;
1614 		event_add_nolock_(ev, &run_at, 1);
1615 	}
1616 
1617 	// Save our callback before we release the lock
1618 	evcb_callback = ev->ev_callback;
1619         evcb_fd = ev->ev_fd;
1620         evcb_res = ev->ev_res;
1621         evcb_arg = ev->ev_arg;
1622 
1623 	// Release the lock
1624  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1625 
1626 	// Execute the callback
1627         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1628 }
1629 
1630 /*
1631   Helper for event_process_active to process all the events in a single queue,
1632   releasing the lock as we go.  This function requires that the lock be held
1633   when it's invoked.  Returns -1 if we get a signal or an event_break that
1634   means we should stop processing any active events now.  Otherwise returns
1635   the number of non-internal event_callbacks that we processed.
1636 */
1637 static int
event_process_active_single_queue(struct event_base * base,struct evcallback_list * activeq,int max_to_process,const struct timeval * endtime)1638 event_process_active_single_queue(struct event_base *base,
1639     struct evcallback_list *activeq,
1640     int max_to_process, const struct timeval *endtime)
1641 {
1642 	struct event_callback *evcb;
1643 	int count = 0;
1644 
1645 	EVUTIL_ASSERT(activeq != NULL);
1646 
1647 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1648 		struct event *ev=NULL;
1649 		if (evcb->evcb_flags & EVLIST_INIT) {
1650 			ev = event_callback_to_event(evcb);
1651 
1652 			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1653 				event_queue_remove_active(base, evcb);
1654 			else
1655 				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1656 			event_debug((
1657 			    "event_process_active: event: %p, %s%s%scall %p",
1658 			    ev,
1659 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
1660 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1661 			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1662 			    ev->ev_callback));
1663 		} else {
1664 			event_queue_remove_active(base, evcb);
1665 			event_debug(("event_process_active: event_callback %p, "
1666 				"closure %d, call %p",
1667 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1668 		}
1669 
1670 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1671 			++count;
1672 
1673 
1674 		base->current_event = evcb;
1675 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1676 		base->current_event_waiters = 0;
1677 #endif
1678 
1679 		switch (evcb->evcb_closure) {
1680 		case EV_CLOSURE_EVENT_SIGNAL:
1681 			EVUTIL_ASSERT(ev != NULL);
1682 			event_signal_closure(base, ev);
1683 			break;
1684 		case EV_CLOSURE_EVENT_PERSIST:
1685 			EVUTIL_ASSERT(ev != NULL);
1686 			event_persist_closure(base, ev);
1687 			break;
1688 		case EV_CLOSURE_EVENT: {
1689 			void (*evcb_callback)(evutil_socket_t, short, void *);
1690 			short res;
1691 			EVUTIL_ASSERT(ev != NULL);
1692 			evcb_callback = *ev->ev_callback;
1693 			res = ev->ev_res;
1694 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1695 			evcb_callback(ev->ev_fd, res, ev->ev_arg);
1696 		}
1697 		break;
1698 		case EV_CLOSURE_CB_SELF: {
1699 			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1700 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1701 			evcb_selfcb(evcb, evcb->evcb_arg);
1702 		}
1703 		break;
1704 		case EV_CLOSURE_EVENT_FINALIZE:
1705 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1706 			void (*evcb_evfinalize)(struct event *, void *);
1707 			int evcb_closure = evcb->evcb_closure;
1708 			EVUTIL_ASSERT(ev != NULL);
1709 			base->current_event = NULL;
1710 			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1711 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1712 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1713 			event_debug_note_teardown_(ev);
1714 			evcb_evfinalize(ev, ev->ev_arg);
1715 			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1716 				mm_free(ev);
1717 		}
1718 		break;
1719 		case EV_CLOSURE_CB_FINALIZE: {
1720 			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1721 			base->current_event = NULL;
1722 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1723 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1724 			evcb_cbfinalize(evcb, evcb->evcb_arg);
1725 		}
1726 		break;
1727 		default:
1728 			EVUTIL_ASSERT(0);
1729 		}
1730 
1731 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1732 		base->current_event = NULL;
1733 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1734 		if (base->current_event_waiters) {
1735 			base->current_event_waiters = 0;
1736 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
1737 		}
1738 #endif
1739 
1740 		if (base->event_break)
1741 			return -1;
1742 		if (count >= max_to_process)
1743 			return count;
1744 		if (count && endtime) {
1745 			struct timeval now;
1746 			update_time_cache(base);
1747 			gettime(base, &now);
1748 			if (evutil_timercmp(&now, endtime, >=))
1749 				return count;
1750 		}
1751 		if (base->event_continue)
1752 			break;
1753 	}
1754 	return count;
1755 }
1756 
1757 /*
1758  * Active events are stored in priority queues.  Lower priorities are always
1759  * process before higher priorities.  Low priority events can starve high
1760  * priority ones.
1761  */
1762 
1763 static int
event_process_active(struct event_base * base)1764 event_process_active(struct event_base *base)
1765 {
1766 	/* Caller must hold th_base_lock */
1767 	struct evcallback_list *activeq = NULL;
1768 	int i, c = 0;
1769 	const struct timeval *endtime;
1770 	struct timeval tv;
1771 	const int maxcb = base->max_dispatch_callbacks;
1772 	const int limit_after_prio = base->limit_callbacks_after_prio;
1773 	if (base->max_dispatch_time.tv_sec >= 0) {
1774 		update_time_cache(base);
1775 		gettime(base, &tv);
1776 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1777 		endtime = &tv;
1778 	} else {
1779 		endtime = NULL;
1780 	}
1781 
1782 	for (i = 0; i < base->nactivequeues; ++i) {
1783 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1784 			base->event_running_priority = i;
1785 			activeq = &base->activequeues[i];
1786 			if (i < limit_after_prio)
1787 				c = event_process_active_single_queue(base, activeq,
1788 				    INT_MAX, NULL);
1789 			else
1790 				c = event_process_active_single_queue(base, activeq,
1791 				    maxcb, endtime);
1792 			if (c < 0) {
1793 				goto done;
1794 			} else if (c > 0)
1795 				break; /* Processed a real event; do not
1796 					* consider lower-priority events */
1797 			/* If we get here, all of the events we processed
1798 			 * were internal.  Continue. */
1799 		}
1800 	}
1801 
1802 done:
1803 	base->event_running_priority = -1;
1804 
1805 	return c;
1806 }
1807 
1808 /*
1809  * Wait continuously for events.  We exit only if no events are left.
1810  */
1811 
1812 int
event_dispatch(void)1813 event_dispatch(void)
1814 {
1815 	return (event_loop(0));
1816 }
1817 
1818 int
event_base_dispatch(struct event_base * event_base)1819 event_base_dispatch(struct event_base *event_base)
1820 {
1821 	return (event_base_loop(event_base, 0));
1822 }
1823 
1824 const char *
event_base_get_method(const struct event_base * base)1825 event_base_get_method(const struct event_base *base)
1826 {
1827 	EVUTIL_ASSERT(base);
1828 	return (base->evsel->name);
1829 }
1830 
1831 /** Callback: used to implement event_base_loopexit by telling the event_base
1832  * that it's time to exit its loop. */
1833 static void
event_loopexit_cb(evutil_socket_t fd,short what,void * arg)1834 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1835 {
1836 	struct event_base *base = arg;
1837 	base->event_gotterm = 1;
1838 }
1839 
1840 int
event_loopexit(const struct timeval * tv)1841 event_loopexit(const struct timeval *tv)
1842 {
1843 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1844 		    current_base, tv));
1845 }
1846 
1847 int
event_base_loopexit(struct event_base * event_base,const struct timeval * tv)1848 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1849 {
1850 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1851 		    event_base, tv));
1852 }
1853 
1854 int
event_loopbreak(void)1855 event_loopbreak(void)
1856 {
1857 	return (event_base_loopbreak(current_base));
1858 }
1859 
1860 int
event_base_loopbreak(struct event_base * event_base)1861 event_base_loopbreak(struct event_base *event_base)
1862 {
1863 	int r = 0;
1864 	if (event_base == NULL)
1865 		return (-1);
1866 
1867 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1868 	event_base->event_break = 1;
1869 
1870 	if (EVBASE_NEED_NOTIFY(event_base)) {
1871 		r = evthread_notify_base(event_base);
1872 	} else {
1873 		r = (0);
1874 	}
1875 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1876 	return r;
1877 }
1878 
1879 int
event_base_loopcontinue(struct event_base * event_base)1880 event_base_loopcontinue(struct event_base *event_base)
1881 {
1882 	int r = 0;
1883 	if (event_base == NULL)
1884 		return (-1);
1885 
1886 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1887 	event_base->event_continue = 1;
1888 
1889 	if (EVBASE_NEED_NOTIFY(event_base)) {
1890 		r = evthread_notify_base(event_base);
1891 	} else {
1892 		r = (0);
1893 	}
1894 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1895 	return r;
1896 }
1897 
1898 int
event_base_got_break(struct event_base * event_base)1899 event_base_got_break(struct event_base *event_base)
1900 {
1901 	int res;
1902 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1903 	res = event_base->event_break;
1904 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1905 	return res;
1906 }
1907 
1908 int
event_base_got_exit(struct event_base * event_base)1909 event_base_got_exit(struct event_base *event_base)
1910 {
1911 	int res;
1912 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1913 	res = event_base->event_gotterm;
1914 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1915 	return res;
1916 }
1917 
1918 /* not thread safe */
1919 
1920 int
event_loop(int flags)1921 event_loop(int flags)
1922 {
1923 	return event_base_loop(current_base, flags);
1924 }
1925 
1926 int
event_base_loop(struct event_base * base,int flags)1927 event_base_loop(struct event_base *base, int flags)
1928 {
1929 	const struct eventop *evsel = base->evsel;
1930 	struct timeval tv;
1931 	struct timeval *tv_p;
1932 	int res, done, retval = 0;
1933 
1934 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
1935 	 * as we invoke user callbacks. */
1936 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1937 
1938 	if (base->running_loop) {
1939 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1940 		    " can run on each event_base at once.", __func__);
1941 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1942 		return -1;
1943 	}
1944 
1945 	base->running_loop = 1;
1946 
1947 	clear_time_cache(base);
1948 
1949 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1950 		evsig_set_base_(base);
1951 
1952 	done = 0;
1953 
1954 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1955 	base->th_owner_id = EVTHREAD_GET_ID();
1956 #endif
1957 
1958 	base->event_gotterm = base->event_break = 0;
1959 
1960 	while (!done) {
1961 		base->event_continue = 0;
1962 		base->n_deferreds_queued = 0;
1963 
1964 		/* Terminate the loop if we have been asked to */
1965 		if (base->event_gotterm) {
1966 			break;
1967 		}
1968 
1969 		if (base->event_break) {
1970 			break;
1971 		}
1972 
1973 		tv_p = &tv;
1974 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1975 			timeout_next(base, &tv_p);
1976 		} else {
1977 			/*
1978 			 * if we have active events, we just poll new events
1979 			 * without waiting.
1980 			 */
1981 			evutil_timerclear(&tv);
1982 		}
1983 
1984 		/* If we have no events, we just exit */
1985 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1986 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1987 			event_debug(("%s: no events registered.", __func__));
1988 			retval = 1;
1989 			goto done;
1990 		}
1991 
1992 		event_queue_make_later_events_active(base);
1993 
1994 		clear_time_cache(base);
1995 
1996 		res = evsel->dispatch(base, tv_p);
1997 
1998 		if (res == -1) {
1999 			event_debug(("%s: dispatch returned unsuccessfully.",
2000 				__func__));
2001 			retval = -1;
2002 			goto done;
2003 		}
2004 
2005 		update_time_cache(base);
2006 
2007 		timeout_process(base);
2008 
2009 		if (N_ACTIVE_CALLBACKS(base)) {
2010 			int n = event_process_active(base);
2011 			if ((flags & EVLOOP_ONCE)
2012 			    && N_ACTIVE_CALLBACKS(base) == 0
2013 			    && n != 0)
2014 				done = 1;
2015 		} else if (flags & EVLOOP_NONBLOCK)
2016 			done = 1;
2017 	}
2018 	event_debug(("%s: asked to terminate loop.", __func__));
2019 
2020 done:
2021 	clear_time_cache(base);
2022 	base->running_loop = 0;
2023 
2024 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2025 
2026 	return (retval);
2027 }
2028 
2029 /* One-time callback to implement event_base_once: invokes the user callback,
2030  * then deletes the allocated storage */
2031 static void
event_once_cb(evutil_socket_t fd,short events,void * arg)2032 event_once_cb(evutil_socket_t fd, short events, void *arg)
2033 {
2034 	struct event_once *eonce = arg;
2035 
2036 	(*eonce->cb)(fd, events, eonce->arg);
2037 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2038 	LIST_REMOVE(eonce, next_once);
2039 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2040 	event_debug_unassign(&eonce->ev);
2041 	mm_free(eonce);
2042 }
2043 
2044 /* not threadsafe, event scheduled once. */
2045 int
event_once(evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)2046 event_once(evutil_socket_t fd, short events,
2047     void (*callback)(evutil_socket_t, short, void *),
2048     void *arg, const struct timeval *tv)
2049 {
2050 	return event_base_once(current_base, fd, events, callback, arg, tv);
2051 }
2052 
2053 /* Schedules an event once */
2054 int
event_base_once(struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)2055 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2056     void (*callback)(evutil_socket_t, short, void *),
2057     void *arg, const struct timeval *tv)
2058 {
2059 	struct event_once *eonce;
2060 	int res = 0;
2061 	int activate = 0;
2062 
2063 	if (!base)
2064 		return (-1);
2065 
2066 	/* We cannot support signals that just fire once, or persistent
2067 	 * events. */
2068 	if (events & (EV_SIGNAL|EV_PERSIST))
2069 		return (-1);
2070 
2071 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2072 		return (-1);
2073 
2074 	eonce->cb = callback;
2075 	eonce->arg = arg;
2076 
2077 	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2078 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2079 
2080 		if (tv == NULL || ! evutil_timerisset(tv)) {
2081 			/* If the event is going to become active immediately,
2082 			 * don't put it on the timeout queue.  This is one
2083 			 * idiom for scheduling a callback, so let's make
2084 			 * it fast (and order-preserving). */
2085 			activate = 1;
2086 		}
2087 	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2088 		events &= EV_READ|EV_WRITE|EV_CLOSED;
2089 
2090 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2091 	} else {
2092 		/* Bad event combination */
2093 		mm_free(eonce);
2094 		return (-1);
2095 	}
2096 
2097 	if (res == 0) {
2098 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2099 		if (activate)
2100 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2101 		else
2102 			res = event_add_nolock_(&eonce->ev, tv, 0);
2103 
2104 		if (res != 0) {
2105 			mm_free(eonce);
2106 			return (res);
2107 		} else {
2108 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2109 		}
2110 		EVBASE_RELEASE_LOCK(base, th_base_lock);
2111 	}
2112 
2113 	return (0);
2114 }
2115 
2116 int
event_assign(struct event * ev,struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2117 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2118 {
2119 	if (!base)
2120 		base = current_base;
2121 	if (arg == &event_self_cbarg_ptr_)
2122 		arg = ev;
2123 
2124 	if (!(events & EV_SIGNAL))
2125 		event_debug_assert_socket_nonblocking_(fd);
2126 	event_debug_assert_not_added_(ev);
2127 
2128 	ev->ev_base = base;
2129 
2130 	ev->ev_callback = callback;
2131 	ev->ev_arg = arg;
2132 	ev->ev_fd = fd;
2133 	ev->ev_events = events;
2134 	ev->ev_res = 0;
2135 	ev->ev_flags = EVLIST_INIT;
2136 	ev->ev_ncalls = 0;
2137 	ev->ev_pncalls = NULL;
2138 
2139 	if (events & EV_SIGNAL) {
2140 		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2141 			event_warnx("%s: EV_SIGNAL is not compatible with "
2142 			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2143 			return -1;
2144 		}
2145 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2146 	} else {
2147 		if (events & EV_PERSIST) {
2148 			evutil_timerclear(&ev->ev_io_timeout);
2149 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2150 		} else {
2151 			ev->ev_closure = EV_CLOSURE_EVENT;
2152 		}
2153 	}
2154 
2155 	min_heap_elem_init_(ev);
2156 
2157 	if (base != NULL) {
2158 		/* by default, we put new events into the middle priority */
2159 		ev->ev_pri = base->nactivequeues / 2;
2160 	}
2161 
2162 	event_debug_note_setup_(ev);
2163 
2164 	return 0;
2165 }
2166 
2167 int
event_base_set(struct event_base * base,struct event * ev)2168 event_base_set(struct event_base *base, struct event *ev)
2169 {
2170 	/* Only innocent events may be assigned to a different base */
2171 	if (ev->ev_flags != EVLIST_INIT)
2172 		return (-1);
2173 
2174 	event_debug_assert_is_setup_(ev);
2175 
2176 	ev->ev_base = base;
2177 	ev->ev_pri = base->nactivequeues/2;
2178 
2179 	return (0);
2180 }
2181 
2182 void
event_set(struct event * ev,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2183 event_set(struct event *ev, evutil_socket_t fd, short events,
2184 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
2185 {
2186 	int r;
2187 	r = event_assign(ev, current_base, fd, events, callback, arg);
2188 	EVUTIL_ASSERT(r == 0);
2189 }
2190 
2191 void *
event_self_cbarg(void)2192 event_self_cbarg(void)
2193 {
2194 	return &event_self_cbarg_ptr_;
2195 }
2196 
2197 struct event *
event_base_get_running_event(struct event_base * base)2198 event_base_get_running_event(struct event_base *base)
2199 {
2200 	struct event *ev = NULL;
2201 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2202 	if (EVBASE_IN_THREAD(base)) {
2203 		struct event_callback *evcb = base->current_event;
2204 		if (evcb->evcb_flags & EVLIST_INIT)
2205 			ev = event_callback_to_event(evcb);
2206 	}
2207 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2208 	return ev;
2209 }
2210 
2211 struct event *
event_new(struct event_base * base,evutil_socket_t fd,short events,void (* cb)(evutil_socket_t,short,void *),void * arg)2212 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2213 {
2214 	struct event *ev;
2215 	ev = mm_malloc(sizeof(struct event));
2216 	if (ev == NULL)
2217 		return (NULL);
2218 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2219 		mm_free(ev);
2220 		return (NULL);
2221 	}
2222 
2223 	return (ev);
2224 }
2225 
2226 void
event_free(struct event * ev)2227 event_free(struct event *ev)
2228 {
2229 	/* This is disabled, so that events which have been finalized be a
2230 	 * valid target for event_free(). That's */
2231 	// event_debug_assert_is_setup_(ev);
2232 
2233 	/* make sure that this event won't be coming back to haunt us. */
2234 	event_del(ev);
2235 	event_debug_note_teardown_(ev);
2236 	mm_free(ev);
2237 
2238 }
2239 
2240 void
event_debug_unassign(struct event * ev)2241 event_debug_unassign(struct event *ev)
2242 {
2243 	event_debug_assert_not_added_(ev);
2244 	event_debug_note_teardown_(ev);
2245 
2246 	ev->ev_flags &= ~EVLIST_INIT;
2247 }
2248 
2249 #define EVENT_FINALIZE_FREE_ 0x10000
2250 static int
event_finalize_nolock_(struct event_base * base,unsigned flags,struct event * ev,event_finalize_callback_fn cb)2251 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2252 {
2253 	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2254 	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2255 
2256 	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2257 	ev->ev_closure = closure;
2258 	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2259 	event_active_nolock_(ev, EV_FINALIZE, 1);
2260 	ev->ev_flags |= EVLIST_FINALIZING;
2261 	return 0;
2262 }
2263 
2264 static int
event_finalize_impl_(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2265 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2266 {
2267 	int r;
2268 	struct event_base *base = ev->ev_base;
2269 	if (EVUTIL_FAILURE_CHECK(!base)) {
2270 		event_warnx("%s: event has no event_base set.", __func__);
2271 		return -1;
2272 	}
2273 
2274 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2275 	r = event_finalize_nolock_(base, flags, ev, cb);
2276 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2277 	return r;
2278 }
2279 
2280 int
event_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2281 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2282 {
2283 	return event_finalize_impl_(flags, ev, cb);
2284 }
2285 
2286 int
event_free_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2287 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2288 {
2289 	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2290 }
2291 
2292 void
event_callback_finalize_nolock_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2293 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2294 {
2295 	struct event *ev = NULL;
2296 	if (evcb->evcb_flags & EVLIST_INIT) {
2297 		ev = event_callback_to_event(evcb);
2298 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2299 	} else {
2300 		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2301 	}
2302 
2303 	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2304 	evcb->evcb_cb_union.evcb_cbfinalize = cb;
2305 	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2306 	evcb->evcb_flags |= EVLIST_FINALIZING;
2307 }
2308 
2309 void
event_callback_finalize_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2310 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2311 {
2312 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2313 	event_callback_finalize_nolock_(base, flags, evcb, cb);
2314 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2315 }
2316 
2317 /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2318  * callback will be invoked on *one of them*, after they have *all* been
2319  * finalized. */
2320 int
event_callback_finalize_many_(struct event_base * base,int n_cbs,struct event_callback ** evcbs,void (* cb)(struct event_callback *,void *))2321 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2322 {
2323 	int n_pending = 0, i;
2324 
2325 	if (base == NULL)
2326 		base = current_base;
2327 
2328 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2329 
2330 	event_debug(("%s: %d events finalizing", __func__, n_cbs));
2331 
2332 	/* At most one can be currently executing; the rest we just
2333 	 * cancel... But we always make sure that the finalize callback
2334 	 * runs. */
2335 	for (i = 0; i < n_cbs; ++i) {
2336 		struct event_callback *evcb = evcbs[i];
2337 		if (evcb == base->current_event) {
2338 			event_callback_finalize_nolock_(base, 0, evcb, cb);
2339 			++n_pending;
2340 		} else {
2341 			event_callback_cancel_nolock_(base, evcb, 0);
2342 		}
2343 	}
2344 
2345 	if (n_pending == 0) {
2346 		/* Just do the first one. */
2347 		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2348 	}
2349 
2350 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2351 	return 0;
2352 }
2353 
2354 /*
2355  * Set's the priority of an event - if an event is already scheduled
2356  * changing the priority is going to fail.
2357  */
2358 
2359 int
event_priority_set(struct event * ev,int pri)2360 event_priority_set(struct event *ev, int pri)
2361 {
2362 	event_debug_assert_is_setup_(ev);
2363 
2364 	if (ev->ev_flags & EVLIST_ACTIVE)
2365 		return (-1);
2366 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2367 		return (-1);
2368 
2369 	ev->ev_pri = pri;
2370 
2371 	return (0);
2372 }
2373 
2374 /*
2375  * Checks if a specific event is pending or scheduled.
2376  */
2377 
2378 int
event_pending(const struct event * ev,short event,struct timeval * tv)2379 event_pending(const struct event *ev, short event, struct timeval *tv)
2380 {
2381 	int flags = 0;
2382 
2383 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2384 		event_warnx("%s: event has no event_base set.", __func__);
2385 		return 0;
2386 	}
2387 
2388 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2389 	event_debug_assert_is_setup_(ev);
2390 
2391 	if (ev->ev_flags & EVLIST_INSERTED)
2392 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2393 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2394 		flags |= ev->ev_res;
2395 	if (ev->ev_flags & EVLIST_TIMEOUT)
2396 		flags |= EV_TIMEOUT;
2397 
2398 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2399 
2400 	/* See if there is a timeout that we should report */
2401 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2402 		struct timeval tmp = ev->ev_timeout;
2403 		tmp.tv_usec &= MICROSECONDS_MASK;
2404 		/* correctly remamp to real time */
2405 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2406 	}
2407 
2408 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2409 
2410 	return (flags & event);
2411 }
2412 
2413 int
event_initialized(const struct event * ev)2414 event_initialized(const struct event *ev)
2415 {
2416 	if (!(ev->ev_flags & EVLIST_INIT))
2417 		return 0;
2418 
2419 	return 1;
2420 }
2421 
2422 void
event_get_assignment(const struct event * event,struct event_base ** base_out,evutil_socket_t * fd_out,short * events_out,event_callback_fn * callback_out,void ** arg_out)2423 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2424 {
2425 	event_debug_assert_is_setup_(event);
2426 
2427 	if (base_out)
2428 		*base_out = event->ev_base;
2429 	if (fd_out)
2430 		*fd_out = event->ev_fd;
2431 	if (events_out)
2432 		*events_out = event->ev_events;
2433 	if (callback_out)
2434 		*callback_out = event->ev_callback;
2435 	if (arg_out)
2436 		*arg_out = event->ev_arg;
2437 }
2438 
2439 size_t
event_get_struct_event_size(void)2440 event_get_struct_event_size(void)
2441 {
2442 	return sizeof(struct event);
2443 }
2444 
2445 evutil_socket_t
event_get_fd(const struct event * ev)2446 event_get_fd(const struct event *ev)
2447 {
2448 	event_debug_assert_is_setup_(ev);
2449 	return ev->ev_fd;
2450 }
2451 
2452 struct event_base *
event_get_base(const struct event * ev)2453 event_get_base(const struct event *ev)
2454 {
2455 	event_debug_assert_is_setup_(ev);
2456 	return ev->ev_base;
2457 }
2458 
2459 short
event_get_events(const struct event * ev)2460 event_get_events(const struct event *ev)
2461 {
2462 	event_debug_assert_is_setup_(ev);
2463 	return ev->ev_events;
2464 }
2465 
2466 event_callback_fn
event_get_callback(const struct event * ev)2467 event_get_callback(const struct event *ev)
2468 {
2469 	event_debug_assert_is_setup_(ev);
2470 	return ev->ev_callback;
2471 }
2472 
2473 void *
event_get_callback_arg(const struct event * ev)2474 event_get_callback_arg(const struct event *ev)
2475 {
2476 	event_debug_assert_is_setup_(ev);
2477 	return ev->ev_arg;
2478 }
2479 
2480 int
event_get_priority(const struct event * ev)2481 event_get_priority(const struct event *ev)
2482 {
2483 	event_debug_assert_is_setup_(ev);
2484 	return ev->ev_pri;
2485 }
2486 
2487 int
event_add(struct event * ev,const struct timeval * tv)2488 event_add(struct event *ev, const struct timeval *tv)
2489 {
2490 	int res;
2491 
2492 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2493 		event_warnx("%s: event has no event_base set.", __func__);
2494 		return -1;
2495 	}
2496 
2497 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2498 
2499 	res = event_add_nolock_(ev, tv, 0);
2500 
2501 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2502 
2503 	return (res);
2504 }
2505 
2506 /* Helper callback: wake an event_base from another thread.  This version
2507  * works by writing a byte to one end of a socketpair, so that the event_base
2508  * listening on the other end will wake up as the corresponding event
2509  * triggers */
2510 static int
evthread_notify_base_default(struct event_base * base)2511 evthread_notify_base_default(struct event_base *base)
2512 {
2513 	char buf[1];
2514 	int r;
2515 	buf[0] = (char) 0;
2516 #ifdef _WIN32
2517 	r = send(base->th_notify_fd[1], buf, 1, 0);
2518 #else
2519 	r = write(base->th_notify_fd[1], buf, 1);
2520 #endif
2521 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2522 }
2523 
2524 #ifdef EVENT__HAVE_EVENTFD
2525 /* Helper callback: wake an event_base from another thread.  This version
2526  * assumes that you have a working eventfd() implementation. */
2527 static int
evthread_notify_base_eventfd(struct event_base * base)2528 evthread_notify_base_eventfd(struct event_base *base)
2529 {
2530 	ev_uint64_t msg = 1;
2531 	int r;
2532 	do {
2533 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2534 	} while (r < 0 && errno == EAGAIN);
2535 
2536 	return (r < 0) ? -1 : 0;
2537 }
2538 #endif
2539 
2540 
2541 /** Tell the thread currently running the event_loop for base (if any) that it
2542  * needs to stop waiting in its dispatch function (if it is) and process all
2543  * active callbacks. */
2544 static int
evthread_notify_base(struct event_base * base)2545 evthread_notify_base(struct event_base *base)
2546 {
2547 	EVENT_BASE_ASSERT_LOCKED(base);
2548 	if (!base->th_notify_fn)
2549 		return -1;
2550 	if (base->is_notify_pending)
2551 		return 0;
2552 	base->is_notify_pending = 1;
2553 	return base->th_notify_fn(base);
2554 }
2555 
2556 /* Implementation function to remove a timeout on a currently pending event.
2557  */
2558 int
event_remove_timer_nolock_(struct event * ev)2559 event_remove_timer_nolock_(struct event *ev)
2560 {
2561 	struct event_base *base = ev->ev_base;
2562 
2563 	EVENT_BASE_ASSERT_LOCKED(base);
2564 	event_debug_assert_is_setup_(ev);
2565 
2566 	event_debug(("event_remove_timer_nolock: event: %p", ev));
2567 
2568 	/* If it's not pending on a timeout, we don't need to do anything. */
2569 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2570 		event_queue_remove_timeout(base, ev);
2571 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2572 	}
2573 
2574 	return (0);
2575 }
2576 
2577 int
event_remove_timer(struct event * ev)2578 event_remove_timer(struct event *ev)
2579 {
2580 	int res;
2581 
2582 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2583 		event_warnx("%s: event has no event_base set.", __func__);
2584 		return -1;
2585 	}
2586 
2587 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2588 
2589 	res = event_remove_timer_nolock_(ev);
2590 
2591 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2592 
2593 	return (res);
2594 }
2595 
2596 /* Implementation function to add an event.  Works just like event_add,
2597  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2598  * we treat tv as an absolute time, not as an interval to add to the current
2599  * time */
2600 int
event_add_nolock_(struct event * ev,const struct timeval * tv,int tv_is_absolute)2601 event_add_nolock_(struct event *ev, const struct timeval *tv,
2602     int tv_is_absolute)
2603 {
2604 	struct event_base *base = ev->ev_base;
2605 	int res = 0;
2606 	int notify = 0;
2607 
2608 	EVENT_BASE_ASSERT_LOCKED(base);
2609 	event_debug_assert_is_setup_(ev);
2610 
2611 	event_debug((
2612 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2613 		 ev,
2614 		 EV_SOCK_ARG(ev->ev_fd),
2615 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
2616 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2617 		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2618 		 tv ? "EV_TIMEOUT " : " ",
2619 		 ev->ev_callback));
2620 
2621 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2622 
2623 	if (ev->ev_flags & EVLIST_FINALIZING) {
2624 		/* XXXX debug */
2625 		return (-1);
2626 	}
2627 
2628 	/*
2629 	 * prepare for timeout insertion further below, if we get a
2630 	 * failure on any step, we should not change any state.
2631 	 */
2632 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2633 		if (min_heap_reserve_(&base->timeheap,
2634 			1 + min_heap_size_(&base->timeheap)) == -1)
2635 			return (-1);  /* ENOMEM == errno */
2636 	}
2637 
2638 	/* If the main thread is currently executing a signal event's
2639 	 * callback, and we are not the main thread, then we want to wait
2640 	 * until the callback is done before we mess with the event, or else
2641 	 * we can race on ev_ncalls and ev_pncalls below. */
2642 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2643 	if (base->current_event == event_to_event_callback(ev) &&
2644 	    (ev->ev_events & EV_SIGNAL)
2645 	    && !EVBASE_IN_THREAD(base)) {
2646 		++base->current_event_waiters;
2647 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2648 	}
2649 #endif
2650 
2651 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2652 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2653 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2654 			res = evmap_io_add_(base, ev->ev_fd, ev);
2655 		else if (ev->ev_events & EV_SIGNAL)
2656 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2657 		if (res != -1)
2658 			event_queue_insert_inserted(base, ev);
2659 		if (res == 1) {
2660 			/* evmap says we need to notify the main thread. */
2661 			notify = 1;
2662 			res = 0;
2663 		}
2664 	}
2665 
2666 	/*
2667 	 * we should change the timeout state only if the previous event
2668 	 * addition succeeded.
2669 	 */
2670 	if (res != -1 && tv != NULL) {
2671 		struct timeval now;
2672 		int common_timeout;
2673 #ifdef USE_REINSERT_TIMEOUT
2674 		int was_common;
2675 		int old_timeout_idx;
2676 #endif
2677 
2678 		/*
2679 		 * for persistent timeout events, we remember the
2680 		 * timeout value and re-add the event.
2681 		 *
2682 		 * If tv_is_absolute, this was already set.
2683 		 */
2684 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2685 			ev->ev_io_timeout = *tv;
2686 
2687 #ifndef USE_REINSERT_TIMEOUT
2688 		if (ev->ev_flags & EVLIST_TIMEOUT) {
2689 			event_queue_remove_timeout(base, ev);
2690 		}
2691 #endif
2692 
2693 		/* Check if it is active due to a timeout.  Rescheduling
2694 		 * this timeout before the callback can be executed
2695 		 * removes it from the active list. */
2696 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
2697 		    (ev->ev_res & EV_TIMEOUT)) {
2698 			if (ev->ev_events & EV_SIGNAL) {
2699 				/* See if we are just active executing
2700 				 * this event in a loop
2701 				 */
2702 				if (ev->ev_ncalls && ev->ev_pncalls) {
2703 					/* Abort loop */
2704 					*ev->ev_pncalls = 0;
2705 				}
2706 			}
2707 
2708 			event_queue_remove_active(base, event_to_event_callback(ev));
2709 		}
2710 
2711 		gettime(base, &now);
2712 
2713 		common_timeout = is_common_timeout(tv, base);
2714 #ifdef USE_REINSERT_TIMEOUT
2715 		was_common = is_common_timeout(&ev->ev_timeout, base);
2716 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2717 #endif
2718 
2719 		if (tv_is_absolute) {
2720 			ev->ev_timeout = *tv;
2721 		} else if (common_timeout) {
2722 			struct timeval tmp = *tv;
2723 			tmp.tv_usec &= MICROSECONDS_MASK;
2724 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2725 			ev->ev_timeout.tv_usec |=
2726 			    (tv->tv_usec & ~MICROSECONDS_MASK);
2727 		} else {
2728 			evutil_timeradd(&now, tv, &ev->ev_timeout);
2729 		}
2730 
2731 		event_debug((
2732 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2733 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2734 
2735 #ifdef USE_REINSERT_TIMEOUT
2736 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2737 #else
2738 		event_queue_insert_timeout(base, ev);
2739 #endif
2740 
2741 		if (common_timeout) {
2742 			struct common_timeout_list *ctl =
2743 			    get_common_timeout_list(base, &ev->ev_timeout);
2744 			if (ev == TAILQ_FIRST(&ctl->events)) {
2745 				common_timeout_schedule(ctl, &now, ev);
2746 			}
2747 		} else {
2748 			struct event* top = NULL;
2749 			/* See if the earliest timeout is now earlier than it
2750 			 * was before: if so, we will need to tell the main
2751 			 * thread to wake up earlier than it would otherwise.
2752 			 * We double check the timeout of the top element to
2753 			 * handle time distortions due to system suspension.
2754 			 */
2755 			if (min_heap_elt_is_top_(ev))
2756 				notify = 1;
2757 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2758 					 evutil_timercmp(&top->ev_timeout, &now, <))
2759 				notify = 1;
2760 		}
2761 	}
2762 
2763 	/* if we are not in the right thread, we need to wake up the loop */
2764 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2765 		evthread_notify_base(base);
2766 
2767 	event_debug_note_add_(ev);
2768 
2769 	return (res);
2770 }
2771 
2772 static int
event_del_(struct event * ev,int blocking)2773 event_del_(struct event *ev, int blocking)
2774 {
2775 	int res;
2776 	struct event_base *base = ev->ev_base;
2777 
2778 	if (EVUTIL_FAILURE_CHECK(!base)) {
2779 		event_warnx("%s: event has no event_base set.", __func__);
2780 		return -1;
2781 	}
2782 
2783 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2784 	res = event_del_nolock_(ev, blocking);
2785 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2786 
2787 	return (res);
2788 }
2789 
2790 int
event_del(struct event * ev)2791 event_del(struct event *ev)
2792 {
2793 	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2794 }
2795 
2796 int
event_del_block(struct event * ev)2797 event_del_block(struct event *ev)
2798 {
2799 	return event_del_(ev, EVENT_DEL_BLOCK);
2800 }
2801 
2802 int
event_del_noblock(struct event * ev)2803 event_del_noblock(struct event *ev)
2804 {
2805 	return event_del_(ev, EVENT_DEL_NOBLOCK);
2806 }
2807 
2808 /** Helper for event_del: always called with th_base_lock held.
2809  *
2810  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2811  * EVEN_IF_FINALIZING} values. See those for more information.
2812  */
2813 int
event_del_nolock_(struct event * ev,int blocking)2814 event_del_nolock_(struct event *ev, int blocking)
2815 {
2816 	struct event_base *base;
2817 	int res = 0, notify = 0;
2818 
2819 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2820 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2821 
2822 	/* An event without a base has not been added */
2823 	if (ev->ev_base == NULL)
2824 		return (-1);
2825 
2826 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2827 
2828 	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2829 		if (ev->ev_flags & EVLIST_FINALIZING) {
2830 			/* XXXX Debug */
2831 			return 0;
2832 		}
2833 	}
2834 
2835 	base = ev->ev_base;
2836 
2837 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2838 
2839 	/* See if we are just active executing this event in a loop */
2840 	if (ev->ev_events & EV_SIGNAL) {
2841 		if (ev->ev_ncalls && ev->ev_pncalls) {
2842 			/* Abort loop */
2843 			*ev->ev_pncalls = 0;
2844 		}
2845 	}
2846 
2847 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2848 		/* NOTE: We never need to notify the main thread because of a
2849 		 * deleted timeout event: all that could happen if we don't is
2850 		 * that the dispatch loop might wake up too early.  But the
2851 		 * point of notifying the main thread _is_ to wake up the
2852 		 * dispatch loop early anyway, so we wouldn't gain anything by
2853 		 * doing it.
2854 		 */
2855 		event_queue_remove_timeout(base, ev);
2856 	}
2857 
2858 	if (ev->ev_flags & EVLIST_ACTIVE)
2859 		event_queue_remove_active(base, event_to_event_callback(ev));
2860 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2861 		event_queue_remove_active_later(base, event_to_event_callback(ev));
2862 
2863 	if (ev->ev_flags & EVLIST_INSERTED) {
2864 		event_queue_remove_inserted(base, ev);
2865 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2866 			res = evmap_io_del_(base, ev->ev_fd, ev);
2867 		else
2868 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2869 		if (res == 1) {
2870 			/* evmap says we need to notify the main thread. */
2871 			notify = 1;
2872 			res = 0;
2873 		}
2874 		/* If we do not have events, let's notify event base so it can
2875 		 * exit without waiting */
2876 		if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2877 			notify = 1;
2878 	}
2879 
2880 	/* if we are not in the right thread, we need to wake up the loop */
2881 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2882 		evthread_notify_base(base);
2883 
2884 	event_debug_note_del_(ev);
2885 
2886 	/* If the main thread is currently executing this event's callback,
2887 	 * and we are not the main thread, then we want to wait until the
2888 	 * callback is done before returning. That way, when this function
2889 	 * returns, it will be safe to free the user-supplied argument.
2890 	 */
2891 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2892 	if (blocking != EVENT_DEL_NOBLOCK &&
2893 	    base->current_event == event_to_event_callback(ev) &&
2894 	    !EVBASE_IN_THREAD(base) &&
2895 	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2896 		++base->current_event_waiters;
2897 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2898 	}
2899 #endif
2900 
2901 	return (res);
2902 }
2903 
2904 void
event_active(struct event * ev,int res,short ncalls)2905 event_active(struct event *ev, int res, short ncalls)
2906 {
2907 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2908 		event_warnx("%s: event has no event_base set.", __func__);
2909 		return;
2910 	}
2911 
2912 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2913 
2914 	event_debug_assert_is_setup_(ev);
2915 
2916 	event_active_nolock_(ev, res, ncalls);
2917 
2918 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2919 }
2920 
2921 
2922 void
event_active_nolock_(struct event * ev,int res,short ncalls)2923 event_active_nolock_(struct event *ev, int res, short ncalls)
2924 {
2925 	struct event_base *base;
2926 
2927 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2928 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2929 
2930 	base = ev->ev_base;
2931 	EVENT_BASE_ASSERT_LOCKED(base);
2932 
2933 	if (ev->ev_flags & EVLIST_FINALIZING) {
2934 		/* XXXX debug */
2935 		return;
2936 	}
2937 
2938 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2939 	default:
2940 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2941 		EVUTIL_ASSERT(0);
2942 		break;
2943 	case EVLIST_ACTIVE:
2944 		/* We get different kinds of events, add them together */
2945 		ev->ev_res |= res;
2946 		return;
2947 	case EVLIST_ACTIVE_LATER:
2948 		ev->ev_res |= res;
2949 		break;
2950 	case 0:
2951 		ev->ev_res = res;
2952 		break;
2953 	}
2954 
2955 	if (ev->ev_pri < base->event_running_priority)
2956 		base->event_continue = 1;
2957 
2958 	if (ev->ev_events & EV_SIGNAL) {
2959 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2960 		if (base->current_event == event_to_event_callback(ev) &&
2961 		    !EVBASE_IN_THREAD(base)) {
2962 			++base->current_event_waiters;
2963 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2964 		}
2965 #endif
2966 		ev->ev_ncalls = ncalls;
2967 		ev->ev_pncalls = NULL;
2968 	}
2969 
2970 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
2971 }
2972 
2973 void
event_active_later_(struct event * ev,int res)2974 event_active_later_(struct event *ev, int res)
2975 {
2976 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2977 	event_active_later_nolock_(ev, res);
2978 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2979 }
2980 
2981 void
event_active_later_nolock_(struct event * ev,int res)2982 event_active_later_nolock_(struct event *ev, int res)
2983 {
2984 	struct event_base *base = ev->ev_base;
2985 	EVENT_BASE_ASSERT_LOCKED(base);
2986 
2987 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2988 		/* We get different kinds of events, add them together */
2989 		ev->ev_res |= res;
2990 		return;
2991 	}
2992 
2993 	ev->ev_res = res;
2994 
2995 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2996 }
2997 
2998 int
event_callback_activate_(struct event_base * base,struct event_callback * evcb)2999 event_callback_activate_(struct event_base *base,
3000     struct event_callback *evcb)
3001 {
3002 	int r;
3003 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3004 	r = event_callback_activate_nolock_(base, evcb);
3005 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3006 	return r;
3007 }
3008 
3009 int
event_callback_activate_nolock_(struct event_base * base,struct event_callback * evcb)3010 event_callback_activate_nolock_(struct event_base *base,
3011     struct event_callback *evcb)
3012 {
3013 	int r = 1;
3014 
3015 	if (evcb->evcb_flags & EVLIST_FINALIZING)
3016 		return 0;
3017 
3018 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3019 	default:
3020 		EVUTIL_ASSERT(0);
3021 		EVUTIL_FALLTHROUGH;
3022 	case EVLIST_ACTIVE_LATER:
3023 		event_queue_remove_active_later(base, evcb);
3024 		r = 0;
3025 		break;
3026 	case EVLIST_ACTIVE:
3027 		return 0;
3028 	case 0:
3029 		break;
3030 	}
3031 
3032 	event_queue_insert_active(base, evcb);
3033 
3034 	if (EVBASE_NEED_NOTIFY(base))
3035 		evthread_notify_base(base);
3036 
3037 	return r;
3038 }
3039 
3040 int
event_callback_activate_later_nolock_(struct event_base * base,struct event_callback * evcb)3041 event_callback_activate_later_nolock_(struct event_base *base,
3042     struct event_callback *evcb)
3043 {
3044 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3045 		return 0;
3046 
3047 	event_queue_insert_active_later(base, evcb);
3048 	if (EVBASE_NEED_NOTIFY(base))
3049 		evthread_notify_base(base);
3050 	return 1;
3051 }
3052 
3053 void
event_callback_init_(struct event_base * base,struct event_callback * cb)3054 event_callback_init_(struct event_base *base,
3055     struct event_callback *cb)
3056 {
3057 	memset(cb, 0, sizeof(*cb));
3058 	cb->evcb_pri = base->nactivequeues - 1;
3059 }
3060 
3061 int
event_callback_cancel_(struct event_base * base,struct event_callback * evcb)3062 event_callback_cancel_(struct event_base *base,
3063     struct event_callback *evcb)
3064 {
3065 	int r;
3066 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3067 	r = event_callback_cancel_nolock_(base, evcb, 0);
3068 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3069 	return r;
3070 }
3071 
3072 int
event_callback_cancel_nolock_(struct event_base * base,struct event_callback * evcb,int even_if_finalizing)3073 event_callback_cancel_nolock_(struct event_base *base,
3074     struct event_callback *evcb, int even_if_finalizing)
3075 {
3076 	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3077 		return 0;
3078 
3079 	if (evcb->evcb_flags & EVLIST_INIT)
3080 		return event_del_nolock_(event_callback_to_event(evcb),
3081 		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3082 
3083 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3084 	default:
3085 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3086 		EVUTIL_ASSERT(0);
3087 		break;
3088 	case EVLIST_ACTIVE:
3089 		/* We get different kinds of events, add them together */
3090 		event_queue_remove_active(base, evcb);
3091 		return 0;
3092 	case EVLIST_ACTIVE_LATER:
3093 		event_queue_remove_active_later(base, evcb);
3094 		break;
3095 	case 0:
3096 		break;
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 void
event_deferred_cb_init_(struct event_callback * cb,ev_uint8_t priority,deferred_cb_fn fn,void * arg)3103 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3104 {
3105 	memset(cb, 0, sizeof(*cb));
3106 	cb->evcb_cb_union.evcb_selfcb = fn;
3107 	cb->evcb_arg = arg;
3108 	cb->evcb_pri = priority;
3109 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
3110 }
3111 
3112 void
event_deferred_cb_set_priority_(struct event_callback * cb,ev_uint8_t priority)3113 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3114 {
3115 	cb->evcb_pri = priority;
3116 }
3117 
3118 void
event_deferred_cb_cancel_(struct event_base * base,struct event_callback * cb)3119 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3120 {
3121 	if (!base)
3122 		base = current_base;
3123 	event_callback_cancel_(base, cb);
3124 }
3125 
3126 #define MAX_DEFERREDS_QUEUED 32
3127 int
event_deferred_cb_schedule_(struct event_base * base,struct event_callback * cb)3128 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3129 {
3130 	int r = 1;
3131 	if (!base)
3132 		base = current_base;
3133 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3134 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3135 		r = event_callback_activate_later_nolock_(base, cb);
3136 	} else {
3137 		r = event_callback_activate_nolock_(base, cb);
3138 		if (r) {
3139 			++base->n_deferreds_queued;
3140 		}
3141 	}
3142 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3143 	return r;
3144 }
3145 
3146 static int
timeout_next(struct event_base * base,struct timeval ** tv_p)3147 timeout_next(struct event_base *base, struct timeval **tv_p)
3148 {
3149 	/* Caller must hold th_base_lock */
3150 	struct timeval now;
3151 	struct event *ev;
3152 	struct timeval *tv = *tv_p;
3153 	int res = 0;
3154 
3155 	ev = min_heap_top_(&base->timeheap);
3156 
3157 	if (ev == NULL) {
3158 		/* if no time-based events are active wait for I/O */
3159 		*tv_p = NULL;
3160 		goto out;
3161 	}
3162 
3163 	if (gettime(base, &now) == -1) {
3164 		res = -1;
3165 		goto out;
3166 	}
3167 
3168 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3169 		evutil_timerclear(tv);
3170 		goto out;
3171 	}
3172 
3173 	evutil_timersub(&ev->ev_timeout, &now, tv);
3174 
3175 	EVUTIL_ASSERT(tv->tv_sec >= 0);
3176 	EVUTIL_ASSERT(tv->tv_usec >= 0);
3177 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3178 
3179 out:
3180 	return (res);
3181 }
3182 
3183 /* Activate every event whose timeout has elapsed. */
3184 static void
timeout_process(struct event_base * base)3185 timeout_process(struct event_base *base)
3186 {
3187 	/* Caller must hold lock. */
3188 	struct timeval now;
3189 	struct event *ev;
3190 
3191 	if (min_heap_empty_(&base->timeheap)) {
3192 		return;
3193 	}
3194 
3195 	gettime(base, &now);
3196 
3197 	while ((ev = min_heap_top_(&base->timeheap))) {
3198 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
3199 			break;
3200 
3201 		/* delete this event from the I/O queues */
3202 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3203 
3204 		event_debug(("timeout_process: event: %p, call %p",
3205 			 ev, ev->ev_callback));
3206 		event_active_nolock_(ev, EV_TIMEOUT, 1);
3207 	}
3208 }
3209 
3210 #ifndef MAX
3211 #define MAX(a,b) (((a)>(b))?(a):(b))
3212 #endif
3213 
3214 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3215 
3216 /* These are a fancy way to spell
3217      if (~flags & EVLIST_INTERNAL)
3218          base->event_count--/++;
3219 */
3220 #define DECR_EVENT_COUNT(base,flags) \
3221 	((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3222 #define INCR_EVENT_COUNT(base,flags) do {					\
3223 	((base)->event_count += !((flags) & EVLIST_INTERNAL));			\
3224 	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
3225 } while (0)
3226 
3227 static void
event_queue_remove_inserted(struct event_base * base,struct event * ev)3228 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3229 {
3230 	EVENT_BASE_ASSERT_LOCKED(base);
3231 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3232 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3233 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3234 		return;
3235 	}
3236 	DECR_EVENT_COUNT(base, ev->ev_flags);
3237 	ev->ev_flags &= ~EVLIST_INSERTED;
3238 }
3239 static void
event_queue_remove_active(struct event_base * base,struct event_callback * evcb)3240 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3241 {
3242 	EVENT_BASE_ASSERT_LOCKED(base);
3243 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3244 		event_errx(1, "%s: %p not on queue %x", __func__,
3245 			   evcb, EVLIST_ACTIVE);
3246 		return;
3247 	}
3248 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3249 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
3250 	base->event_count_active--;
3251 
3252 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3253 	    evcb, evcb_active_next);
3254 }
3255 static void
event_queue_remove_active_later(struct event_base * base,struct event_callback * evcb)3256 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3257 {
3258 	EVENT_BASE_ASSERT_LOCKED(base);
3259 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3260 		event_errx(1, "%s: %p not on queue %x", __func__,
3261 			   evcb, EVLIST_ACTIVE_LATER);
3262 		return;
3263 	}
3264 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3265 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3266 	base->event_count_active--;
3267 
3268 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3269 }
3270 static void
event_queue_remove_timeout(struct event_base * base,struct event * ev)3271 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3272 {
3273 	EVENT_BASE_ASSERT_LOCKED(base);
3274 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3275 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3276 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3277 		return;
3278 	}
3279 	DECR_EVENT_COUNT(base, ev->ev_flags);
3280 	ev->ev_flags &= ~EVLIST_TIMEOUT;
3281 
3282 	if (is_common_timeout(&ev->ev_timeout, base)) {
3283 		struct common_timeout_list *ctl =
3284 		    get_common_timeout_list(base, &ev->ev_timeout);
3285 		TAILQ_REMOVE(&ctl->events, ev,
3286 		    ev_timeout_pos.ev_next_with_common_timeout);
3287 	} else {
3288 		min_heap_erase_(&base->timeheap, ev);
3289 	}
3290 }
3291 
3292 #ifdef USE_REINSERT_TIMEOUT
3293 /* Remove and reinsert 'ev' into the timeout queue. */
3294 static void
event_queue_reinsert_timeout(struct event_base * base,struct event * ev,int was_common,int is_common,int old_timeout_idx)3295 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3296     int was_common, int is_common, int old_timeout_idx)
3297 {
3298 	struct common_timeout_list *ctl;
3299 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3300 		event_queue_insert_timeout(base, ev);
3301 		return;
3302 	}
3303 
3304 	switch ((was_common<<1) | is_common) {
3305 	case 3: /* Changing from one common timeout to another */
3306 		ctl = base->common_timeout_queues[old_timeout_idx];
3307 		TAILQ_REMOVE(&ctl->events, ev,
3308 		    ev_timeout_pos.ev_next_with_common_timeout);
3309 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3310 		insert_common_timeout_inorder(ctl, ev);
3311 		break;
3312 	case 2: /* Was common; is no longer common */
3313 		ctl = base->common_timeout_queues[old_timeout_idx];
3314 		TAILQ_REMOVE(&ctl->events, ev,
3315 		    ev_timeout_pos.ev_next_with_common_timeout);
3316 		min_heap_push_(&base->timeheap, ev);
3317 		break;
3318 	case 1: /* Wasn't common; has become common. */
3319 		min_heap_erase_(&base->timeheap, ev);
3320 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3321 		insert_common_timeout_inorder(ctl, ev);
3322 		break;
3323 	case 0: /* was in heap; is still on heap. */
3324 		min_heap_adjust_(&base->timeheap, ev);
3325 		break;
3326 	default:
3327 		EVUTIL_ASSERT(0); /* unreachable */
3328 		break;
3329 	}
3330 }
3331 #endif
3332 
3333 /* Add 'ev' to the common timeout list in 'ev'. */
3334 static void
insert_common_timeout_inorder(struct common_timeout_list * ctl,struct event * ev)3335 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3336     struct event *ev)
3337 {
3338 	struct event *e;
3339 	/* By all logic, we should just be able to append 'ev' to the end of
3340 	 * ctl->events, since the timeout on each 'ev' is set to {the common
3341 	 * timeout} + {the time when we add the event}, and so the events
3342 	 * should arrive in order of their timeeouts.  But just in case
3343 	 * there's some wacky threading issue going on, we do a search from
3344 	 * the end of 'ev' to find the right insertion point.
3345 	 */
3346 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
3347 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3348 		/* This timercmp is a little sneaky, since both ev and e have
3349 		 * magic values in tv_usec.  Fortunately, they ought to have
3350 		 * the _same_ magic values in tv_usec.  Let's assert for that.
3351 		 */
3352 		EVUTIL_ASSERT(
3353 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3354 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3355 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3356 			    ev_timeout_pos.ev_next_with_common_timeout);
3357 			return;
3358 		}
3359 	}
3360 	TAILQ_INSERT_HEAD(&ctl->events, ev,
3361 	    ev_timeout_pos.ev_next_with_common_timeout);
3362 }
3363 
3364 static void
event_queue_insert_inserted(struct event_base * base,struct event * ev)3365 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3366 {
3367 	EVENT_BASE_ASSERT_LOCKED(base);
3368 
3369 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3370 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3371 		    ev, EV_SOCK_ARG(ev->ev_fd));
3372 		return;
3373 	}
3374 
3375 	INCR_EVENT_COUNT(base, ev->ev_flags);
3376 
3377 	ev->ev_flags |= EVLIST_INSERTED;
3378 }
3379 
3380 static void
event_queue_insert_active(struct event_base * base,struct event_callback * evcb)3381 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3382 {
3383 	EVENT_BASE_ASSERT_LOCKED(base);
3384 
3385 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
3386 		/* Double insertion is possible for active events */
3387 		return;
3388 	}
3389 
3390 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3391 
3392 	evcb->evcb_flags |= EVLIST_ACTIVE;
3393 
3394 	base->event_count_active++;
3395 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3396 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3397 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3398 	    evcb, evcb_active_next);
3399 }
3400 
3401 static void
event_queue_insert_active_later(struct event_base * base,struct event_callback * evcb)3402 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3403 {
3404 	EVENT_BASE_ASSERT_LOCKED(base);
3405 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3406 		/* Double insertion is possible */
3407 		return;
3408 	}
3409 
3410 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3411 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3412 	base->event_count_active++;
3413 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3414 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3415 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3416 }
3417 
3418 static void
event_queue_insert_timeout(struct event_base * base,struct event * ev)3419 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3420 {
3421 	EVENT_BASE_ASSERT_LOCKED(base);
3422 
3423 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3424 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3425 		    ev, EV_SOCK_ARG(ev->ev_fd));
3426 		return;
3427 	}
3428 
3429 	INCR_EVENT_COUNT(base, ev->ev_flags);
3430 
3431 	ev->ev_flags |= EVLIST_TIMEOUT;
3432 
3433 	if (is_common_timeout(&ev->ev_timeout, base)) {
3434 		struct common_timeout_list *ctl =
3435 		    get_common_timeout_list(base, &ev->ev_timeout);
3436 		insert_common_timeout_inorder(ctl, ev);
3437 	} else {
3438 		min_heap_push_(&base->timeheap, ev);
3439 	}
3440 }
3441 
3442 static void
event_queue_make_later_events_active(struct event_base * base)3443 event_queue_make_later_events_active(struct event_base *base)
3444 {
3445 	struct event_callback *evcb;
3446 	EVENT_BASE_ASSERT_LOCKED(base);
3447 
3448 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3449 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3450 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3451 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3452 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3453 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3454 	}
3455 }
3456 
3457 /* Functions for debugging */
3458 
3459 const char *
event_get_version(void)3460 event_get_version(void)
3461 {
3462 	return (EVENT__VERSION);
3463 }
3464 
3465 ev_uint32_t
event_get_version_number(void)3466 event_get_version_number(void)
3467 {
3468 	return (EVENT__NUMERIC_VERSION);
3469 }
3470 
3471 /*
3472  * No thread-safe interface needed - the information should be the same
3473  * for all threads.
3474  */
3475 
3476 const char *
event_get_method(void)3477 event_get_method(void)
3478 {
3479 	return (current_base->evsel->name);
3480 }
3481 
3482 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3483 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3484 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3485 static void (*mm_free_fn_)(void *p) = NULL;
3486 
3487 void *
event_mm_malloc_(size_t sz)3488 event_mm_malloc_(size_t sz)
3489 {
3490 	if (sz == 0)
3491 		return NULL;
3492 
3493 	if (mm_malloc_fn_)
3494 		return mm_malloc_fn_(sz);
3495 	else
3496 		return malloc(sz);
3497 }
3498 
3499 void *
event_mm_calloc_(size_t count,size_t size)3500 event_mm_calloc_(size_t count, size_t size)
3501 {
3502 	if (count == 0 || size == 0)
3503 		return NULL;
3504 
3505 	if (mm_malloc_fn_) {
3506 		size_t sz = count * size;
3507 		void *p = NULL;
3508 		if (count > EV_SIZE_MAX / size)
3509 			goto error;
3510 		p = mm_malloc_fn_(sz);
3511 		if (p)
3512 			return memset(p, 0, sz);
3513 	} else {
3514 		void *p = calloc(count, size);
3515 #ifdef _WIN32
3516 		/* Windows calloc doesn't reliably set ENOMEM */
3517 		if (p == NULL)
3518 			goto error;
3519 #endif
3520 		return p;
3521 	}
3522 
3523 error:
3524 	errno = ENOMEM;
3525 	return NULL;
3526 }
3527 
3528 char *
event_mm_strdup_(const char * str)3529 event_mm_strdup_(const char *str)
3530 {
3531 	if (!str) {
3532 		errno = EINVAL;
3533 		return NULL;
3534 	}
3535 
3536 	if (mm_malloc_fn_) {
3537 		size_t ln = strlen(str);
3538 		void *p = NULL;
3539 		if (ln == EV_SIZE_MAX)
3540 			goto error;
3541 		p = mm_malloc_fn_(ln+1);
3542 		if (p)
3543 			return memcpy(p, str, ln+1);
3544 	} else
3545 #ifdef _WIN32
3546 		return _strdup(str);
3547 #else
3548 		return strdup(str);
3549 #endif
3550 
3551 error:
3552 	errno = ENOMEM;
3553 	return NULL;
3554 }
3555 
3556 void *
event_mm_realloc_(void * ptr,size_t sz)3557 event_mm_realloc_(void *ptr, size_t sz)
3558 {
3559 	if (mm_realloc_fn_)
3560 		return mm_realloc_fn_(ptr, sz);
3561 	else
3562 		return realloc(ptr, sz);
3563 }
3564 
3565 void
event_mm_free_(void * ptr)3566 event_mm_free_(void *ptr)
3567 {
3568 	if (mm_free_fn_)
3569 		mm_free_fn_(ptr);
3570 	else
3571 		free(ptr);
3572 }
3573 
3574 void
event_set_mem_functions(void * (* malloc_fn)(size_t sz),void * (* realloc_fn)(void * ptr,size_t sz),void (* free_fn)(void * ptr))3575 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3576 			void *(*realloc_fn)(void *ptr, size_t sz),
3577 			void (*free_fn)(void *ptr))
3578 {
3579 	mm_malloc_fn_ = malloc_fn;
3580 	mm_realloc_fn_ = realloc_fn;
3581 	mm_free_fn_ = free_fn;
3582 }
3583 #endif
3584 
3585 #ifdef EVENT__HAVE_EVENTFD
3586 static void
evthread_notify_drain_eventfd(evutil_socket_t fd,short what,void * arg)3587 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3588 {
3589 	ev_uint64_t msg;
3590 	ev_ssize_t r;
3591 	struct event_base *base = arg;
3592 
3593 	r = read(fd, (void*) &msg, sizeof(msg));
3594 	if (r<0 && errno != EAGAIN) {
3595 		event_sock_warn(fd, "Error reading from eventfd");
3596 	}
3597 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3598 	base->is_notify_pending = 0;
3599 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3600 }
3601 #endif
3602 
3603 static void
evthread_notify_drain_default(evutil_socket_t fd,short what,void * arg)3604 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3605 {
3606 	unsigned char buf[1024];
3607 	struct event_base *base = arg;
3608 #ifdef _WIN32
3609 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3610 		;
3611 #else
3612 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
3613 		;
3614 #endif
3615 
3616 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3617 	base->is_notify_pending = 0;
3618 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3619 }
3620 
3621 int
evthread_make_base_notifiable(struct event_base * base)3622 evthread_make_base_notifiable(struct event_base *base)
3623 {
3624 	int r;
3625 	if (!base)
3626 		return -1;
3627 
3628 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3629 	r = evthread_make_base_notifiable_nolock_(base);
3630 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3631 	return r;
3632 }
3633 
3634 static int
evthread_make_base_notifiable_nolock_(struct event_base * base)3635 evthread_make_base_notifiable_nolock_(struct event_base *base)
3636 {
3637 	void (*cb)(evutil_socket_t, short, void *);
3638 	int (*notify)(struct event_base *);
3639 
3640 	if (base->th_notify_fn != NULL) {
3641 		/* The base is already notifiable: we're doing fine. */
3642 		return 0;
3643 	}
3644 
3645 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3646 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3647 		base->th_notify_fn = event_kq_notify_base_;
3648 		/* No need to add an event here; the backend can wake
3649 		 * itself up just fine. */
3650 		return 0;
3651 	}
3652 #endif
3653 
3654 #ifdef EVENT__HAVE_EVENTFD
3655 	base->th_notify_fd[0] = evutil_eventfd_(0,
3656 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3657 	if (base->th_notify_fd[0] >= 0) {
3658 		base->th_notify_fd[1] = -1;
3659 		notify = evthread_notify_base_eventfd;
3660 		cb = evthread_notify_drain_eventfd;
3661 	} else
3662 #endif
3663 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3664 		notify = evthread_notify_base_default;
3665 		cb = evthread_notify_drain_default;
3666 	} else {
3667 		return -1;
3668 	}
3669 
3670 	base->th_notify_fn = notify;
3671 
3672 	/* prepare an event that we can use for wakeup */
3673 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
3674 				 EV_READ|EV_PERSIST, cb, base);
3675 
3676 	/* we need to mark this as internal event */
3677 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
3678 	event_priority_set(&base->th_notify, 0);
3679 
3680 	return event_add_nolock_(&base->th_notify, NULL, 0);
3681 }
3682 
3683 int
event_base_foreach_event_nolock_(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3684 event_base_foreach_event_nolock_(struct event_base *base,
3685     event_base_foreach_event_cb fn, void *arg)
3686 {
3687 	int r, i;
3688 	unsigned u;
3689 	struct event *ev;
3690 
3691 	/* Start out with all the EVLIST_INSERTED events. */
3692 	if ((r = evmap_foreach_event_(base, fn, arg)))
3693 		return r;
3694 
3695 	/* Okay, now we deal with those events that have timeouts and are in
3696 	 * the min-heap. */
3697 	for (u = 0; u < base->timeheap.n; ++u) {
3698 		ev = base->timeheap.p[u];
3699 		if (ev->ev_flags & EVLIST_INSERTED) {
3700 			/* we already processed this one */
3701 			continue;
3702 		}
3703 		if ((r = fn(base, ev, arg)))
3704 			return r;
3705 	}
3706 
3707 	/* Now for the events in one of the timeout queues.
3708 	 * the min-heap. */
3709 	for (i = 0; i < base->n_common_timeouts; ++i) {
3710 		struct common_timeout_list *ctl =
3711 		    base->common_timeout_queues[i];
3712 		TAILQ_FOREACH(ev, &ctl->events,
3713 		    ev_timeout_pos.ev_next_with_common_timeout) {
3714 			if (ev->ev_flags & EVLIST_INSERTED) {
3715 				/* we already processed this one */
3716 				continue;
3717 			}
3718 			if ((r = fn(base, ev, arg)))
3719 				return r;
3720 		}
3721 	}
3722 
3723 	/* Finally, we deal wit all the active events that we haven't touched
3724 	 * yet. */
3725 	for (i = 0; i < base->nactivequeues; ++i) {
3726 		struct event_callback *evcb;
3727 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3728 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3729 				/* This isn't an event (evlist_init clear), or
3730 				 * we already processed it. (inserted or
3731 				 * timeout set */
3732 				continue;
3733 			}
3734 			ev = event_callback_to_event(evcb);
3735 			if ((r = fn(base, ev, arg)))
3736 				return r;
3737 		}
3738 	}
3739 
3740 	return 0;
3741 }
3742 
3743 /* Helper for event_base_dump_events: called on each event in the event base;
3744  * dumps only the inserted events. */
3745 static int
dump_inserted_event_fn(const struct event_base * base,const struct event * e,void * arg)3746 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3747 {
3748 	FILE *output = arg;
3749 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3750 	    "sig" : "fd ";
3751 
3752 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3753 		return 0;
3754 
3755 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3756 	    e, gloss, EV_SOCK_ARG(e->ev_fd),
3757 	    (e->ev_events&EV_READ)?" Read":"",
3758 	    (e->ev_events&EV_WRITE)?" Write":"",
3759 	    (e->ev_events&EV_CLOSED)?" EOF":"",
3760 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
3761 	    (e->ev_events&EV_PERSIST)?" Persist":"",
3762 	    (e->ev_events&EV_ET)?" ET":"",
3763 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3764 	if (e->ev_flags & EVLIST_TIMEOUT) {
3765 		struct timeval tv;
3766 		tv.tv_sec = e->ev_timeout.tv_sec;
3767 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3768 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3769 		fprintf(output, " Timeout=%ld.%06d",
3770 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3771 	}
3772 	fputc('\n', output);
3773 
3774 	return 0;
3775 }
3776 
3777 /* Helper for event_base_dump_events: called on each event in the event base;
3778  * dumps only the active events. */
3779 static int
dump_active_event_fn(const struct event_base * base,const struct event * e,void * arg)3780 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3781 {
3782 	FILE *output = arg;
3783 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3784 	    "sig" : "fd ";
3785 
3786 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3787 		return 0;
3788 
3789 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3790 	    e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3791 	    (e->ev_res&EV_READ)?" Read":"",
3792 	    (e->ev_res&EV_WRITE)?" Write":"",
3793 	    (e->ev_res&EV_CLOSED)?" EOF":"",
3794 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
3795 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3796 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3797 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3798 
3799 	return 0;
3800 }
3801 
3802 int
event_base_foreach_event(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3803 event_base_foreach_event(struct event_base *base,
3804     event_base_foreach_event_cb fn, void *arg)
3805 {
3806 	int r;
3807 	if ((!fn) || (!base)) {
3808 		return -1;
3809 	}
3810 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3811 	r = event_base_foreach_event_nolock_(base, fn, arg);
3812 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3813 	return r;
3814 }
3815 
3816 
3817 void
event_base_dump_events(struct event_base * base,FILE * output)3818 event_base_dump_events(struct event_base *base, FILE *output)
3819 {
3820 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3821 	fprintf(output, "Inserted events:\n");
3822 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3823 
3824 	fprintf(output, "Active events:\n");
3825 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3826 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3827 }
3828 
3829 void
event_base_active_by_fd(struct event_base * base,evutil_socket_t fd,short events)3830 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3831 {
3832 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3833 
3834 	/* Activate any non timer events */
3835 	if (!(events & EV_TIMEOUT)) {
3836 		evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3837 	} else {
3838 		/* If we want to activate timer events, loop and activate each event with
3839 		 * the same fd in both the timeheap and common timeouts list */
3840 		int i;
3841 		unsigned u;
3842 		struct event *ev;
3843 
3844 		for (u = 0; u < base->timeheap.n; ++u) {
3845 			ev = base->timeheap.p[u];
3846 			if (ev->ev_fd == fd) {
3847 				event_active_nolock_(ev, EV_TIMEOUT, 1);
3848 			}
3849 		}
3850 
3851 		for (i = 0; i < base->n_common_timeouts; ++i) {
3852 			struct common_timeout_list *ctl = base->common_timeout_queues[i];
3853 			TAILQ_FOREACH(ev, &ctl->events,
3854 				ev_timeout_pos.ev_next_with_common_timeout) {
3855 				if (ev->ev_fd == fd) {
3856 					event_active_nolock_(ev, EV_TIMEOUT, 1);
3857 				}
3858 			}
3859 		}
3860 	}
3861 
3862 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3863 }
3864 
3865 void
event_base_active_by_signal(struct event_base * base,int sig)3866 event_base_active_by_signal(struct event_base *base, int sig)
3867 {
3868 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3869 	evmap_signal_active_(base, sig, 1);
3870 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3871 }
3872 
3873 
3874 void
event_base_add_virtual_(struct event_base * base)3875 event_base_add_virtual_(struct event_base *base)
3876 {
3877 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3878 	base->virtual_event_count++;
3879 	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3880 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3881 }
3882 
3883 void
event_base_del_virtual_(struct event_base * base)3884 event_base_del_virtual_(struct event_base *base)
3885 {
3886 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3887 	EVUTIL_ASSERT(base->virtual_event_count > 0);
3888 	base->virtual_event_count--;
3889 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3890 		evthread_notify_base(base);
3891 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3892 }
3893 
3894 static void
event_free_debug_globals_locks(void)3895 event_free_debug_globals_locks(void)
3896 {
3897 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3898 #ifndef EVENT__DISABLE_DEBUG_MODE
3899 	if (event_debug_map_lock_ != NULL) {
3900 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3901 		event_debug_map_lock_ = NULL;
3902 		evthreadimpl_disable_lock_debugging_();
3903 	}
3904 #endif /* EVENT__DISABLE_DEBUG_MODE */
3905 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3906 	return;
3907 }
3908 
3909 static void
event_free_debug_globals(void)3910 event_free_debug_globals(void)
3911 {
3912 	event_free_debug_globals_locks();
3913 }
3914 
3915 static void
event_free_evsig_globals(void)3916 event_free_evsig_globals(void)
3917 {
3918 	evsig_free_globals_();
3919 }
3920 
3921 static void
event_free_evutil_globals(void)3922 event_free_evutil_globals(void)
3923 {
3924 	evutil_free_globals_();
3925 }
3926 
3927 static void
event_free_globals(void)3928 event_free_globals(void)
3929 {
3930 	event_free_debug_globals();
3931 	event_free_evsig_globals();
3932 	event_free_evutil_globals();
3933 }
3934 
3935 void
libevent_global_shutdown(void)3936 libevent_global_shutdown(void)
3937 {
3938 	event_disable_debug_mode();
3939 	event_free_globals();
3940 }
3941 
3942 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3943 int
event_global_setup_locks_(const int enable_locks)3944 event_global_setup_locks_(const int enable_locks)
3945 {
3946 #ifndef EVENT__DISABLE_DEBUG_MODE
3947 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3948 #endif
3949 	if (evsig_global_setup_locks_(enable_locks) < 0)
3950 		return -1;
3951 	if (evutil_global_setup_locks_(enable_locks) < 0)
3952 		return -1;
3953 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3954 		return -1;
3955 	return 0;
3956 }
3957 #endif
3958 
3959 void
event_base_assert_ok_(struct event_base * base)3960 event_base_assert_ok_(struct event_base *base)
3961 {
3962 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3963 	event_base_assert_ok_nolock_(base);
3964 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3965 }
3966 
3967 void
event_base_assert_ok_nolock_(struct event_base * base)3968 event_base_assert_ok_nolock_(struct event_base *base)
3969 {
3970 	int i;
3971 	int count;
3972 
3973 	/* First do checks on the per-fd and per-signal lists */
3974 	evmap_check_integrity_(base);
3975 
3976 	/* Check the heap property */
3977 	for (i = 1; i < (int)base->timeheap.n; ++i) {
3978 		int parent = (i - 1) / 2;
3979 		struct event *ev, *p_ev;
3980 		ev = base->timeheap.p[i];
3981 		p_ev = base->timeheap.p[parent];
3982 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3983 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3984 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3985 	}
3986 
3987 	/* Check that the common timeouts are fine */
3988 	for (i = 0; i < base->n_common_timeouts; ++i) {
3989 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
3990 		struct event *last=NULL, *ev;
3991 
3992 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3993 
3994 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3995 			if (last)
3996 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3997 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3998 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3999 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4000 			last = ev;
4001 		}
4002 	}
4003 
4004 	/* Check the active queues. */
4005 	count = 0;
4006 	for (i = 0; i < base->nactivequeues; ++i) {
4007 		struct event_callback *evcb;
4008 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4009 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4010 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4011 			EVUTIL_ASSERT(evcb->evcb_pri == i);
4012 			++count;
4013 		}
4014 	}
4015 
4016 	{
4017 		struct event_callback *evcb;
4018 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4019 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4020 			++count;
4021 		}
4022 	}
4023 	EVUTIL_ASSERT(count == base->event_count_active);
4024 }
4025