1 /*
2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29
30 #ifdef _WIN32
31 #include <winsock2.h>
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38 #include <sys/time.h>
39 #endif
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
43 #endif
44 #include <stdio.h>
45 #include <stdlib.h>
46 #ifdef EVENT__HAVE_UNISTD_H
47 #include <unistd.h>
48 #endif
49 #include <ctype.h>
50 #include <errno.h>
51 #include <signal.h>
52 #include <string.h>
53 #include <time.h>
54 #include <limits.h>
55
56 #include "event2/event.h"
57 #include "event2/event_struct.h"
58 #include "event2/event_compat.h"
59 #include "event-internal.h"
60 #include "defer-internal.h"
61 #include "evthread-internal.h"
62 #include "event2/thread.h"
63 #include "event2/util.h"
64 #include "log-internal.h"
65 #include "evmap-internal.h"
66 #include "iocp-internal.h"
67 #include "changelist-internal.h"
68 #define HT_NO_CACHE_HASH_VALUES
69 #include "ht-internal.h"
70 #include "util-internal.h"
71
72
73 #ifdef EVENT__HAVE_WORKING_KQUEUE
74 #include "kqueue-internal.h"
75 #endif
76
77 #ifdef EVENT__HAVE_EVENT_PORTS
78 extern const struct eventop evportops;
79 #endif
80 #ifdef EVENT__HAVE_SELECT
81 extern const struct eventop selectops;
82 #endif
83 #ifdef EVENT__HAVE_POLL
84 extern const struct eventop pollops;
85 #endif
86 #ifdef EVENT__HAVE_EPOLL
87 extern const struct eventop epollops;
88 #endif
89 #ifdef EVENT__HAVE_WORKING_KQUEUE
90 extern const struct eventop kqops;
91 #endif
92 #ifdef EVENT__HAVE_DEVPOLL
93 extern const struct eventop devpollops;
94 #endif
95 #ifdef _WIN32
96 extern const struct eventop win32ops;
97 #endif
98
99 /* Array of backends in order of preference. */
100 static const struct eventop *eventops[] = {
101 #ifdef EVENT__HAVE_EVENT_PORTS
102 &evportops,
103 #endif
104 #ifdef EVENT__HAVE_WORKING_KQUEUE
105 &kqops,
106 #endif
107 #ifdef EVENT__HAVE_EPOLL
108 &epollops,
109 #endif
110 #ifdef EVENT__HAVE_DEVPOLL
111 &devpollops,
112 #endif
113 #ifdef EVENT__HAVE_POLL
114 &pollops,
115 #endif
116 #ifdef EVENT__HAVE_SELECT
117 &selectops,
118 #endif
119 #ifdef _WIN32
120 &win32ops,
121 #endif
122 NULL
123 };
124
125 /* Global state; deprecated */
126 struct event_base *event_global_current_base_ = NULL;
127 #define current_base event_global_current_base_
128
129 /* Global state */
130
131 static void *event_self_cbarg_ptr_ = NULL;
132
133 /* Prototypes */
134 static void event_queue_insert_active(struct event_base *, struct event_callback *);
135 static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
136 static void event_queue_insert_timeout(struct event_base *, struct event *);
137 static void event_queue_insert_inserted(struct event_base *, struct event *);
138 static void event_queue_remove_active(struct event_base *, struct event_callback *);
139 static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
140 static void event_queue_remove_timeout(struct event_base *, struct event *);
141 static void event_queue_remove_inserted(struct event_base *, struct event *);
142 static void event_queue_make_later_events_active(struct event_base *base);
143
144 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
145 static int event_del_(struct event *ev, int blocking);
146
147 #ifdef USE_REINSERT_TIMEOUT
148 /* This code seems buggy; only turn it on if we find out what the trouble is. */
149 static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
150 #endif
151
152 static int event_haveevents(struct event_base *);
153
154 static int event_process_active(struct event_base *);
155
156 static int timeout_next(struct event_base *, struct timeval **);
157 static void timeout_process(struct event_base *);
158
159 static inline void event_signal_closure(struct event_base *, struct event *ev);
160 static inline void event_persist_closure(struct event_base *, struct event *ev);
161
162 static int evthread_notify_base(struct event_base *base);
163
164 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
165 struct event *ev);
166
167 #ifndef EVENT__DISABLE_DEBUG_MODE
168 /* These functions implement a hashtable of which 'struct event *' structures
169 * have been setup or added. We don't want to trust the content of the struct
170 * event itself, since we're trying to work through cases where an event gets
171 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
172 */
173
174 struct event_debug_entry {
175 HT_ENTRY(event_debug_entry) node;
176 const struct event *ptr;
177 unsigned added : 1;
178 };
179
180 static inline unsigned
hash_debug_entry(const struct event_debug_entry * e)181 hash_debug_entry(const struct event_debug_entry *e)
182 {
183 /* We need to do this silliness to convince compilers that we
184 * honestly mean to cast e->ptr to an integer, and discard any
185 * part of it that doesn't fit in an unsigned.
186 */
187 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
188 /* Our hashtable implementation is pretty sensitive to low bits,
189 * and every struct event is over 64 bytes in size, so we can
190 * just say >>6. */
191 return (u >> 6);
192 }
193
194 static inline int
eq_debug_entry(const struct event_debug_entry * a,const struct event_debug_entry * b)195 eq_debug_entry(const struct event_debug_entry *a,
196 const struct event_debug_entry *b)
197 {
198 return a->ptr == b->ptr;
199 }
200
201 int event_debug_mode_on_ = 0;
202 /* Set if it's too late to enable event_debug_mode. */
203 static int event_debug_mode_too_late = 0;
204 #ifndef EVENT__DISABLE_THREAD_SUPPORT
205 static void *event_debug_map_lock_ = NULL;
206 #endif
207 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
208 HT_INITIALIZER();
209
HT_PROTOTYPE(event_debug_map,event_debug_entry,node,hash_debug_entry,eq_debug_entry)210 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
211 eq_debug_entry)
212 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
213 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
214
215 /* Macro: record that ev is now setup (that is, ready for an add) */
216 #define event_debug_note_setup_(ev) do { \
217 if (event_debug_mode_on_) { \
218 struct event_debug_entry *dent,find; \
219 find.ptr = (ev); \
220 EVLOCK_LOCK(event_debug_map_lock_, 0); \
221 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
222 if (dent) { \
223 dent->added = 0; \
224 } else { \
225 dent = mm_malloc(sizeof(*dent)); \
226 if (!dent) \
227 event_err(1, \
228 "Out of memory in debugging code"); \
229 dent->ptr = (ev); \
230 dent->added = 0; \
231 HT_INSERT(event_debug_map, &global_debug_map, dent); \
232 } \
233 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
234 } \
235 event_debug_mode_too_late = 1; \
236 } while (0)
237 /* Macro: record that ev is no longer setup */
238 #define event_debug_note_teardown_(ev) do { \
239 if (event_debug_mode_on_) { \
240 struct event_debug_entry *dent,find; \
241 find.ptr = (ev); \
242 EVLOCK_LOCK(event_debug_map_lock_, 0); \
243 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
244 if (dent) \
245 mm_free(dent); \
246 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
247 } \
248 event_debug_mode_too_late = 1; \
249 } while (0)
250 /* Macro: record that ev is now added */
251 #define event_debug_note_add_(ev) do { \
252 if (event_debug_mode_on_) { \
253 struct event_debug_entry *dent,find; \
254 find.ptr = (ev); \
255 EVLOCK_LOCK(event_debug_map_lock_, 0); \
256 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
257 if (dent) { \
258 dent->added = 1; \
259 } else { \
260 event_errx(EVENT_ERR_ABORT_, \
261 "%s: noting an add on a non-setup event %p" \
262 " (events: 0x%x, fd: "EV_SOCK_FMT \
263 ", flags: 0x%x)", \
264 __func__, (ev), (ev)->ev_events, \
265 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
266 } \
267 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
268 } \
269 event_debug_mode_too_late = 1; \
270 } while (0)
271 /* Macro: record that ev is no longer added */
272 #define event_debug_note_del_(ev) do { \
273 if (event_debug_mode_on_) { \
274 struct event_debug_entry *dent,find; \
275 find.ptr = (ev); \
276 EVLOCK_LOCK(event_debug_map_lock_, 0); \
277 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
278 if (dent) { \
279 dent->added = 0; \
280 } else { \
281 event_errx(EVENT_ERR_ABORT_, \
282 "%s: noting a del on a non-setup event %p" \
283 " (events: 0x%x, fd: "EV_SOCK_FMT \
284 ", flags: 0x%x)", \
285 __func__, (ev), (ev)->ev_events, \
286 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
287 } \
288 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
289 } \
290 event_debug_mode_too_late = 1; \
291 } while (0)
292 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
293 #define event_debug_assert_is_setup_(ev) do { \
294 if (event_debug_mode_on_) { \
295 struct event_debug_entry *dent,find; \
296 find.ptr = (ev); \
297 EVLOCK_LOCK(event_debug_map_lock_, 0); \
298 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
299 if (!dent) { \
300 event_errx(EVENT_ERR_ABORT_, \
301 "%s called on a non-initialized event %p" \
302 " (events: 0x%x, fd: "EV_SOCK_FMT\
303 ", flags: 0x%x)", \
304 __func__, (ev), (ev)->ev_events, \
305 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
306 } \
307 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
308 } \
309 } while (0)
310 /* Macro: assert that ev is not added (i.e., okay to tear down or set
311 * up again) */
312 #define event_debug_assert_not_added_(ev) do { \
313 if (event_debug_mode_on_) { \
314 struct event_debug_entry *dent,find; \
315 find.ptr = (ev); \
316 EVLOCK_LOCK(event_debug_map_lock_, 0); \
317 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
318 if (dent && dent->added) { \
319 event_errx(EVENT_ERR_ABORT_, \
320 "%s called on an already added event %p" \
321 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
322 "flags: 0x%x)", \
323 __func__, (ev), (ev)->ev_events, \
324 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
325 } \
326 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
327 } \
328 } while (0)
329 #else
330 #define event_debug_note_setup_(ev) \
331 ((void)0)
332 #define event_debug_note_teardown_(ev) \
333 ((void)0)
334 #define event_debug_note_add_(ev) \
335 ((void)0)
336 #define event_debug_note_del_(ev) \
337 ((void)0)
338 #define event_debug_assert_is_setup_(ev) \
339 ((void)0)
340 #define event_debug_assert_not_added_(ev) \
341 ((void)0)
342 #endif
343
344 #define EVENT_BASE_ASSERT_LOCKED(base) \
345 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
346
347 /* How often (in seconds) do we check for changes in wall clock time relative
348 * to monotonic time? Set this to -1 for 'never.' */
349 #define CLOCK_SYNC_INTERVAL 5
350
351 /** Set 'tp' to the current time according to 'base'. We must hold the lock
352 * on 'base'. If there is a cached time, return it. Otherwise, use
353 * clock_gettime or gettimeofday as appropriate to find out the right time.
354 * Return 0 on success, -1 on failure.
355 */
356 static int
357 gettime(struct event_base *base, struct timeval *tp)
358 {
359 EVENT_BASE_ASSERT_LOCKED(base);
360
361 if (base->tv_cache.tv_sec) {
362 *tp = base->tv_cache;
363 return (0);
364 }
365
366 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
367 return -1;
368 }
369
370 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
371 < tp->tv_sec) {
372 struct timeval tv;
373 evutil_gettimeofday(&tv,NULL);
374 evutil_timersub(&tv, tp, &base->tv_clock_diff);
375 base->last_updated_clock_diff = tp->tv_sec;
376 }
377
378 return 0;
379 }
380
381 int
event_base_gettimeofday_cached(struct event_base * base,struct timeval * tv)382 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
383 {
384 int r;
385 if (!base) {
386 base = current_base;
387 if (!current_base)
388 return evutil_gettimeofday(tv, NULL);
389 }
390
391 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
392 if (base->tv_cache.tv_sec == 0) {
393 r = evutil_gettimeofday(tv, NULL);
394 } else {
395 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
396 r = 0;
397 }
398 EVBASE_RELEASE_LOCK(base, th_base_lock);
399 return r;
400 }
401
402 /** Make 'base' have no current cached time. */
403 static inline void
clear_time_cache(struct event_base * base)404 clear_time_cache(struct event_base *base)
405 {
406 base->tv_cache.tv_sec = 0;
407 }
408
409 /** Replace the cached time in 'base' with the current time. */
410 static inline void
update_time_cache(struct event_base * base)411 update_time_cache(struct event_base *base)
412 {
413 base->tv_cache.tv_sec = 0;
414 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
415 gettime(base, &base->tv_cache);
416 }
417
418 int
event_base_update_cache_time(struct event_base * base)419 event_base_update_cache_time(struct event_base *base)
420 {
421
422 if (!base) {
423 base = current_base;
424 if (!current_base)
425 return -1;
426 }
427
428 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
429 if (base->running_loop)
430 update_time_cache(base);
431 EVBASE_RELEASE_LOCK(base, th_base_lock);
432 return 0;
433 }
434
435 static inline struct event *
event_callback_to_event(struct event_callback * evcb)436 event_callback_to_event(struct event_callback *evcb)
437 {
438 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
439 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
440 }
441
442 static inline struct event_callback *
event_to_event_callback(struct event * ev)443 event_to_event_callback(struct event *ev)
444 {
445 return &ev->ev_evcallback;
446 }
447
448 struct event_base *
event_init(void)449 event_init(void)
450 {
451 struct event_base *base = event_base_new_with_config(NULL);
452
453 if (base == NULL) {
454 event_errx(1, "%s: Unable to construct event_base", __func__);
455 return NULL;
456 }
457
458 current_base = base;
459
460 return (base);
461 }
462
463 struct event_base *
event_base_new(void)464 event_base_new(void)
465 {
466 struct event_base *base = NULL;
467 struct event_config *cfg = event_config_new();
468 if (cfg) {
469 base = event_base_new_with_config(cfg);
470 event_config_free(cfg);
471 }
472 return base;
473 }
474
475 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
476 * avoid. */
477 static int
event_config_is_avoided_method(const struct event_config * cfg,const char * method)478 event_config_is_avoided_method(const struct event_config *cfg,
479 const char *method)
480 {
481 struct event_config_entry *entry;
482
483 TAILQ_FOREACH(entry, &cfg->entries, next) {
484 if (entry->avoid_method != NULL &&
485 strcmp(entry->avoid_method, method) == 0)
486 return (1);
487 }
488
489 return (0);
490 }
491
492 /** Return true iff 'method' is disabled according to the environment. */
493 static int
event_is_method_disabled(const char * name)494 event_is_method_disabled(const char *name)
495 {
496 char environment[64];
497 int i;
498
499 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
500 for (i = 8; environment[i] != '\0'; ++i)
501 environment[i] = EVUTIL_TOUPPER_(environment[i]);
502 /* Note that evutil_getenv_() ignores the environment entirely if
503 * we're setuid */
504 return (evutil_getenv_(environment) != NULL);
505 }
506
507 int
event_base_get_features(const struct event_base * base)508 event_base_get_features(const struct event_base *base)
509 {
510 return base->evsel->features;
511 }
512
513 void
event_enable_debug_mode(void)514 event_enable_debug_mode(void)
515 {
516 #ifndef EVENT__DISABLE_DEBUG_MODE
517 if (event_debug_mode_on_)
518 event_errx(1, "%s was called twice!", __func__);
519 if (event_debug_mode_too_late)
520 event_errx(1, "%s must be called *before* creating any events "
521 "or event_bases",__func__);
522
523 event_debug_mode_on_ = 1;
524
525 HT_INIT(event_debug_map, &global_debug_map);
526 #endif
527 }
528
529 void
event_disable_debug_mode(void)530 event_disable_debug_mode(void)
531 {
532 #ifndef EVENT__DISABLE_DEBUG_MODE
533 struct event_debug_entry **ent, *victim;
534
535 EVLOCK_LOCK(event_debug_map_lock_, 0);
536 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
537 victim = *ent;
538 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
539 mm_free(victim);
540 }
541 HT_CLEAR(event_debug_map, &global_debug_map);
542 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
543
544 event_debug_mode_on_ = 0;
545 #endif
546 }
547
548 struct event_base *
event_base_new_with_config(const struct event_config * cfg)549 event_base_new_with_config(const struct event_config *cfg)
550 {
551 int i;
552 struct event_base *base;
553 int should_check_environment;
554
555 #ifndef EVENT__DISABLE_DEBUG_MODE
556 event_debug_mode_too_late = 1;
557 #endif
558
559 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
560 event_warn("%s: calloc", __func__);
561 return NULL;
562 }
563
564 if (cfg)
565 base->flags = cfg->flags;
566
567 should_check_environment =
568 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
569
570 {
571 struct timeval tmp;
572 int precise_time =
573 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
574 int flags;
575 if (should_check_environment && !precise_time) {
576 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
577 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
578 }
579 flags = precise_time ? EV_MONOT_PRECISE : 0;
580 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
581
582 gettime(base, &tmp);
583 }
584
585 min_heap_ctor_(&base->timeheap);
586
587 base->sig.ev_signal_pair[0] = -1;
588 base->sig.ev_signal_pair[1] = -1;
589 base->th_notify_fd[0] = -1;
590 base->th_notify_fd[1] = -1;
591
592 TAILQ_INIT(&base->active_later_queue);
593
594 evmap_io_initmap_(&base->io);
595 evmap_signal_initmap_(&base->sigmap);
596 event_changelist_init_(&base->changelist);
597
598 base->evbase = NULL;
599
600 if (cfg) {
601 memcpy(&base->max_dispatch_time,
602 &cfg->max_dispatch_interval, sizeof(struct timeval));
603 base->limit_callbacks_after_prio =
604 cfg->limit_callbacks_after_prio;
605 } else {
606 base->max_dispatch_time.tv_sec = -1;
607 base->limit_callbacks_after_prio = 1;
608 }
609 if (cfg && cfg->max_dispatch_callbacks >= 0) {
610 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
611 } else {
612 base->max_dispatch_callbacks = INT_MAX;
613 }
614 if (base->max_dispatch_callbacks == INT_MAX &&
615 base->max_dispatch_time.tv_sec == -1)
616 base->limit_callbacks_after_prio = INT_MAX;
617
618 for (i = 0; eventops[i] && !base->evbase; i++) {
619 if (cfg != NULL) {
620 /* determine if this backend should be avoided */
621 if (event_config_is_avoided_method(cfg,
622 eventops[i]->name))
623 continue;
624 if ((eventops[i]->features & cfg->require_features)
625 != cfg->require_features)
626 continue;
627 }
628
629 /* also obey the environment variables */
630 if (should_check_environment &&
631 event_is_method_disabled(eventops[i]->name))
632 continue;
633
634 base->evsel = eventops[i];
635
636 base->evbase = base->evsel->init(base);
637 }
638
639 if (base->evbase == NULL) {
640 event_warnx("%s: no event mechanism available",
641 __func__);
642 base->evsel = NULL;
643 event_base_free(base);
644 return NULL;
645 }
646
647 if (evutil_getenv_("EVENT_SHOW_METHOD"))
648 event_msgx("libevent using: %s", base->evsel->name);
649
650 /* allocate a single active event queue */
651 if (event_base_priority_init(base, 1) < 0) {
652 event_base_free(base);
653 return NULL;
654 }
655
656 /* prepare for threading */
657
658 #ifndef EVENT__DISABLE_THREAD_SUPPORT
659 if (EVTHREAD_LOCKING_ENABLED() &&
660 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
661 int r;
662 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
663 EVTHREAD_ALLOC_COND(base->current_event_cond);
664 r = evthread_make_base_notifiable(base);
665 if (r<0) {
666 event_warnx("%s: Unable to make base notifiable.", __func__);
667 event_base_free(base);
668 return NULL;
669 }
670 }
671 #endif
672
673 #ifdef _WIN32
674 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
675 event_base_start_iocp_(base, cfg->n_cpus_hint);
676 #endif
677
678 return (base);
679 }
680
681 int
event_base_start_iocp_(struct event_base * base,int n_cpus)682 event_base_start_iocp_(struct event_base *base, int n_cpus)
683 {
684 #ifdef _WIN32
685 if (base->iocp)
686 return 0;
687 base->iocp = event_iocp_port_launch_(n_cpus);
688 if (!base->iocp) {
689 event_warnx("%s: Couldn't launch IOCP", __func__);
690 return -1;
691 }
692 return 0;
693 #else
694 return -1;
695 #endif
696 }
697
698 void
event_base_stop_iocp_(struct event_base * base)699 event_base_stop_iocp_(struct event_base *base)
700 {
701 #ifdef _WIN32
702 int rv;
703
704 if (!base->iocp)
705 return;
706 rv = event_iocp_shutdown_(base->iocp, -1);
707 EVUTIL_ASSERT(rv >= 0);
708 base->iocp = NULL;
709 #endif
710 }
711
712 static int
event_base_cancel_single_callback_(struct event_base * base,struct event_callback * evcb,int run_finalizers)713 event_base_cancel_single_callback_(struct event_base *base,
714 struct event_callback *evcb,
715 int run_finalizers)
716 {
717 int result = 0;
718
719 if (evcb->evcb_flags & EVLIST_INIT) {
720 struct event *ev = event_callback_to_event(evcb);
721 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
722 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
723 result = 1;
724 }
725 } else {
726 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
727 event_callback_cancel_nolock_(base, evcb, 1);
728 EVBASE_RELEASE_LOCK(base, th_base_lock);
729 result = 1;
730 }
731
732 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
733 switch (evcb->evcb_closure) {
734 case EV_CLOSURE_EVENT_FINALIZE:
735 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
736 struct event *ev = event_callback_to_event(evcb);
737 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
738 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
739 mm_free(ev);
740 break;
741 }
742 case EV_CLOSURE_CB_FINALIZE:
743 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
744 break;
745 default:
746 break;
747 }
748 }
749 return result;
750 }
751
752 static void
event_base_free_(struct event_base * base,int run_finalizers)753 event_base_free_(struct event_base *base, int run_finalizers)
754 {
755 int i, n_deleted=0;
756 struct event *ev;
757 /* XXXX grab the lock? If there is contention when one thread frees
758 * the base, then the contending thread will be very sad soon. */
759
760 /* event_base_free(NULL) is how to free the current_base if we
761 * made it with event_init and forgot to hold a reference to it. */
762 if (base == NULL && current_base)
763 base = current_base;
764 /* Don't actually free NULL. */
765 if (base == NULL) {
766 event_warnx("%s: no base to free", __func__);
767 return;
768 }
769 /* XXX(niels) - check for internal events first */
770
771 #ifdef _WIN32
772 event_base_stop_iocp_(base);
773 #endif
774
775 /* threading fds if we have them */
776 if (base->th_notify_fd[0] != -1) {
777 event_del(&base->th_notify);
778 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
779 if (base->th_notify_fd[1] != -1)
780 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
781 base->th_notify_fd[0] = -1;
782 base->th_notify_fd[1] = -1;
783 event_debug_unassign(&base->th_notify);
784 }
785
786 /* Delete all non-internal events. */
787 evmap_delete_all_(base);
788
789 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
790 event_del(ev);
791 ++n_deleted;
792 }
793 for (i = 0; i < base->n_common_timeouts; ++i) {
794 struct common_timeout_list *ctl =
795 base->common_timeout_queues[i];
796 event_del(&ctl->timeout_event); /* Internal; doesn't count */
797 event_debug_unassign(&ctl->timeout_event);
798 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
799 struct event *next = TAILQ_NEXT(ev,
800 ev_timeout_pos.ev_next_with_common_timeout);
801 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
802 event_del(ev);
803 ++n_deleted;
804 }
805 ev = next;
806 }
807 mm_free(ctl);
808 }
809 if (base->common_timeout_queues)
810 mm_free(base->common_timeout_queues);
811
812 for (i = 0; i < base->nactivequeues; ++i) {
813 struct event_callback *evcb, *next;
814 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
815 next = TAILQ_NEXT(evcb, evcb_active_next);
816 n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
817 evcb = next;
818 }
819 }
820 {
821 struct event_callback *evcb;
822 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
823 n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
824 }
825 }
826
827
828 if (n_deleted)
829 event_debug(("%s: %d events were still set in base",
830 __func__, n_deleted));
831
832 while (LIST_FIRST(&base->once_events)) {
833 struct event_once *eonce = LIST_FIRST(&base->once_events);
834 LIST_REMOVE(eonce, next_once);
835 mm_free(eonce);
836 }
837
838 if (base->evsel != NULL && base->evsel->dealloc != NULL)
839 base->evsel->dealloc(base);
840
841 for (i = 0; i < base->nactivequeues; ++i)
842 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
843
844 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
845 min_heap_dtor_(&base->timeheap);
846
847 mm_free(base->activequeues);
848
849 evmap_io_clear_(&base->io);
850 evmap_signal_clear_(&base->sigmap);
851 event_changelist_freemem_(&base->changelist);
852
853 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
854 EVTHREAD_FREE_COND(base->current_event_cond);
855
856 /* If we're freeing current_base, there won't be a current_base. */
857 if (base == current_base)
858 current_base = NULL;
859 mm_free(base);
860 }
861
862 void
event_base_free_nofinalize(struct event_base * base)863 event_base_free_nofinalize(struct event_base *base)
864 {
865 event_base_free_(base, 0);
866 }
867
868 void
event_base_free(struct event_base * base)869 event_base_free(struct event_base *base)
870 {
871 event_base_free_(base, 1);
872 }
873
874 /* Fake eventop; used to disable the backend temporarily inside event_reinit
875 * so that we can call event_del() on an event without telling the backend.
876 */
877 static int
nil_backend_del(struct event_base * b,evutil_socket_t fd,short old,short events,void * fdinfo)878 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
879 short events, void *fdinfo)
880 {
881 return 0;
882 }
883 const struct eventop nil_eventop = {
884 "nil",
885 NULL, /* init: unused. */
886 NULL, /* add: unused. */
887 nil_backend_del, /* del: used, so needs to be killed. */
888 NULL, /* dispatch: unused. */
889 NULL, /* dealloc: unused. */
890 0, 0, 0
891 };
892
893 /* reinitialize the event base after a fork */
894 int
event_reinit(struct event_base * base)895 event_reinit(struct event_base *base)
896 {
897 const struct eventop *evsel;
898 int res = 0;
899 int was_notifiable = 0;
900 int had_signal_added = 0;
901
902 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
903
904 evsel = base->evsel;
905
906 /* check if this event mechanism requires reinit on the backend */
907 if (evsel->need_reinit) {
908 /* We're going to call event_del() on our notify events (the
909 * ones that tell about signals and wakeup events). But we
910 * don't actually want to tell the backend to change its
911 * state, since it might still share some resource (a kqueue,
912 * an epoll fd) with the parent process, and we don't want to
913 * delete the fds from _that_ backend, we temporarily stub out
914 * the evsel with a replacement.
915 */
916 base->evsel = &nil_eventop;
917 }
918
919 /* We need to re-create a new signal-notification fd and a new
920 * thread-notification fd. Otherwise, we'll still share those with
921 * the parent process, which would make any notification sent to them
922 * get received by one or both of the event loops, more or less at
923 * random.
924 */
925 if (base->sig.ev_signal_added) {
926 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
927 event_debug_unassign(&base->sig.ev_signal);
928 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
929 if (base->sig.ev_signal_pair[0] != -1)
930 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
931 if (base->sig.ev_signal_pair[1] != -1)
932 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
933 had_signal_added = 1;
934 base->sig.ev_signal_added = 0;
935 }
936 if (base->th_notify_fn != NULL) {
937 was_notifiable = 1;
938 base->th_notify_fn = NULL;
939 }
940 if (base->th_notify_fd[0] != -1) {
941 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
942 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
943 if (base->th_notify_fd[1] != -1)
944 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
945 base->th_notify_fd[0] = -1;
946 base->th_notify_fd[1] = -1;
947 event_debug_unassign(&base->th_notify);
948 }
949
950 /* Replace the original evsel. */
951 base->evsel = evsel;
952
953 if (evsel->need_reinit) {
954 /* Reconstruct the backend through brute-force, so that we do
955 * not share any structures with the parent process. For some
956 * backends, this is necessary: epoll and kqueue, for
957 * instance, have events associated with a kernel
958 * structure. If didn't reinitialize, we'd share that
959 * structure with the parent process, and any changes made by
960 * the parent would affect our backend's behavior (and vice
961 * versa).
962 */
963 if (base->evsel->dealloc != NULL)
964 base->evsel->dealloc(base);
965 base->evbase = evsel->init(base);
966 if (base->evbase == NULL) {
967 event_errx(1,
968 "%s: could not reinitialize event mechanism",
969 __func__);
970 res = -1;
971 goto done;
972 }
973
974 /* Empty out the changelist (if any): we are starting from a
975 * blank slate. */
976 event_changelist_freemem_(&base->changelist);
977
978 /* Tell the event maps to re-inform the backend about all
979 * pending events. This will make the signal notification
980 * event get re-created if necessary. */
981 if (evmap_reinit_(base) < 0)
982 res = -1;
983 } else {
984 if (had_signal_added)
985 res = evsig_init_(base);
986 }
987
988 /* If we were notifiable before, and nothing just exploded, become
989 * notifiable again. */
990 if (was_notifiable && res == 0)
991 res = evthread_make_base_notifiable_nolock_(base);
992
993 done:
994 EVBASE_RELEASE_LOCK(base, th_base_lock);
995 return (res);
996 }
997
998 /* Get the monotonic time for this event_base' timer */
999 int
event_gettime_monotonic(struct event_base * base,struct timeval * tv)1000 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1001 {
1002 int rv = -1;
1003
1004 if (base && tv) {
1005 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1006 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1007 EVBASE_RELEASE_LOCK(base, th_base_lock);
1008 }
1009
1010 return rv;
1011 }
1012
1013 const char **
event_get_supported_methods(void)1014 event_get_supported_methods(void)
1015 {
1016 static const char **methods = NULL;
1017 const struct eventop **method;
1018 const char **tmp;
1019 int i = 0, k;
1020
1021 /* count all methods */
1022 for (method = &eventops[0]; *method != NULL; ++method) {
1023 ++i;
1024 }
1025
1026 /* allocate one more than we need for the NULL pointer */
1027 tmp = mm_calloc((i + 1), sizeof(char *));
1028 if (tmp == NULL)
1029 return (NULL);
1030
1031 /* populate the array with the supported methods */
1032 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1033 tmp[i++] = eventops[k]->name;
1034 }
1035 tmp[i] = NULL;
1036
1037 if (methods != NULL)
1038 mm_free((char**)methods);
1039
1040 methods = tmp;
1041
1042 return (methods);
1043 }
1044
1045 struct event_config *
event_config_new(void)1046 event_config_new(void)
1047 {
1048 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1049
1050 if (cfg == NULL)
1051 return (NULL);
1052
1053 TAILQ_INIT(&cfg->entries);
1054 cfg->max_dispatch_interval.tv_sec = -1;
1055 cfg->max_dispatch_callbacks = INT_MAX;
1056 cfg->limit_callbacks_after_prio = 1;
1057
1058 return (cfg);
1059 }
1060
1061 static void
event_config_entry_free(struct event_config_entry * entry)1062 event_config_entry_free(struct event_config_entry *entry)
1063 {
1064 if (entry->avoid_method != NULL)
1065 mm_free((char *)entry->avoid_method);
1066 mm_free(entry);
1067 }
1068
1069 void
event_config_free(struct event_config * cfg)1070 event_config_free(struct event_config *cfg)
1071 {
1072 struct event_config_entry *entry;
1073
1074 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1075 TAILQ_REMOVE(&cfg->entries, entry, next);
1076 event_config_entry_free(entry);
1077 }
1078 mm_free(cfg);
1079 }
1080
1081 int
event_config_set_flag(struct event_config * cfg,int flag)1082 event_config_set_flag(struct event_config *cfg, int flag)
1083 {
1084 if (!cfg)
1085 return -1;
1086 cfg->flags |= flag;
1087 return 0;
1088 }
1089
1090 int
event_config_avoid_method(struct event_config * cfg,const char * method)1091 event_config_avoid_method(struct event_config *cfg, const char *method)
1092 {
1093 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1094 if (entry == NULL)
1095 return (-1);
1096
1097 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1098 mm_free(entry);
1099 return (-1);
1100 }
1101
1102 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1103
1104 return (0);
1105 }
1106
1107 int
event_config_require_features(struct event_config * cfg,int features)1108 event_config_require_features(struct event_config *cfg,
1109 int features)
1110 {
1111 if (!cfg)
1112 return (-1);
1113 cfg->require_features = features;
1114 return (0);
1115 }
1116
1117 int
event_config_set_num_cpus_hint(struct event_config * cfg,int cpus)1118 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1119 {
1120 if (!cfg)
1121 return (-1);
1122 cfg->n_cpus_hint = cpus;
1123 return (0);
1124 }
1125
1126 int
event_config_set_max_dispatch_interval(struct event_config * cfg,const struct timeval * max_interval,int max_callbacks,int min_priority)1127 event_config_set_max_dispatch_interval(struct event_config *cfg,
1128 const struct timeval *max_interval, int max_callbacks, int min_priority)
1129 {
1130 if (max_interval)
1131 memcpy(&cfg->max_dispatch_interval, max_interval,
1132 sizeof(struct timeval));
1133 else
1134 cfg->max_dispatch_interval.tv_sec = -1;
1135 cfg->max_dispatch_callbacks =
1136 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1137 if (min_priority < 0)
1138 min_priority = 0;
1139 cfg->limit_callbacks_after_prio = min_priority;
1140 return (0);
1141 }
1142
1143 int
event_priority_init(int npriorities)1144 event_priority_init(int npriorities)
1145 {
1146 return event_base_priority_init(current_base, npriorities);
1147 }
1148
1149 int
event_base_priority_init(struct event_base * base,int npriorities)1150 event_base_priority_init(struct event_base *base, int npriorities)
1151 {
1152 int i, r;
1153 r = -1;
1154
1155 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1156
1157 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1158 || npriorities >= EVENT_MAX_PRIORITIES)
1159 goto err;
1160
1161 if (npriorities == base->nactivequeues)
1162 goto ok;
1163
1164 if (base->nactivequeues) {
1165 mm_free(base->activequeues);
1166 base->nactivequeues = 0;
1167 }
1168
1169 /* Allocate our priority queues */
1170 base->activequeues = (struct evcallback_list *)
1171 mm_calloc(npriorities, sizeof(struct evcallback_list));
1172 if (base->activequeues == NULL) {
1173 event_warn("%s: calloc", __func__);
1174 goto err;
1175 }
1176 base->nactivequeues = npriorities;
1177
1178 for (i = 0; i < base->nactivequeues; ++i) {
1179 TAILQ_INIT(&base->activequeues[i]);
1180 }
1181
1182 ok:
1183 r = 0;
1184 err:
1185 EVBASE_RELEASE_LOCK(base, th_base_lock);
1186 return (r);
1187 }
1188
1189 int
event_base_get_npriorities(struct event_base * base)1190 event_base_get_npriorities(struct event_base *base)
1191 {
1192
1193 int n;
1194 if (base == NULL)
1195 base = current_base;
1196
1197 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1198 n = base->nactivequeues;
1199 EVBASE_RELEASE_LOCK(base, th_base_lock);
1200 return (n);
1201 }
1202
1203 int
event_base_get_num_events(struct event_base * base,unsigned int type)1204 event_base_get_num_events(struct event_base *base, unsigned int type)
1205 {
1206 int r = 0;
1207
1208 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1209
1210 if (type & EVENT_BASE_COUNT_ACTIVE)
1211 r += base->event_count_active;
1212
1213 if (type & EVENT_BASE_COUNT_VIRTUAL)
1214 r += base->virtual_event_count;
1215
1216 if (type & EVENT_BASE_COUNT_ADDED)
1217 r += base->event_count;
1218
1219 EVBASE_RELEASE_LOCK(base, th_base_lock);
1220
1221 return r;
1222 }
1223
1224 int
event_base_get_max_events(struct event_base * base,unsigned int type,int clear)1225 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1226 {
1227 int r = 0;
1228
1229 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1230
1231 if (type & EVENT_BASE_COUNT_ACTIVE) {
1232 r += base->event_count_active_max;
1233 if (clear)
1234 base->event_count_active_max = 0;
1235 }
1236
1237 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1238 r += base->virtual_event_count_max;
1239 if (clear)
1240 base->virtual_event_count_max = 0;
1241 }
1242
1243 if (type & EVENT_BASE_COUNT_ADDED) {
1244 r += base->event_count_max;
1245 if (clear)
1246 base->event_count_max = 0;
1247 }
1248
1249 EVBASE_RELEASE_LOCK(base, th_base_lock);
1250
1251 return r;
1252 }
1253
1254 /* Returns true iff we're currently watching any events. */
1255 static int
event_haveevents(struct event_base * base)1256 event_haveevents(struct event_base *base)
1257 {
1258 /* Caller must hold th_base_lock */
1259 return (base->virtual_event_count > 0 || base->event_count > 0);
1260 }
1261
1262 /* "closure" function called when processing active signal events */
1263 static inline void
event_signal_closure(struct event_base * base,struct event * ev)1264 event_signal_closure(struct event_base *base, struct event *ev)
1265 {
1266 short ncalls;
1267 int should_break;
1268
1269 /* Allows deletes to work */
1270 ncalls = ev->ev_ncalls;
1271 if (ncalls != 0)
1272 ev->ev_pncalls = &ncalls;
1273 EVBASE_RELEASE_LOCK(base, th_base_lock);
1274 while (ncalls) {
1275 ncalls--;
1276 ev->ev_ncalls = ncalls;
1277 if (ncalls == 0)
1278 ev->ev_pncalls = NULL;
1279 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1280
1281 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1282 should_break = base->event_break;
1283 EVBASE_RELEASE_LOCK(base, th_base_lock);
1284
1285 if (should_break) {
1286 if (ncalls != 0)
1287 ev->ev_pncalls = NULL;
1288 return;
1289 }
1290 }
1291 }
1292
1293 /* Common timeouts are special timeouts that are handled as queues rather than
1294 * in the minheap. This is more efficient than the minheap if we happen to
1295 * know that we're going to get several thousands of timeout events all with
1296 * the same timeout value.
1297 *
1298 * Since all our timeout handling code assumes timevals can be copied,
1299 * assigned, etc, we can't use "magic pointer" to encode these common
1300 * timeouts. Searching through a list to see if every timeout is common could
1301 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1302 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1303 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1304 * of index into the event_base's aray of common timeouts.
1305 */
1306
1307 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1308 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1309 #define COMMON_TIMEOUT_IDX_SHIFT 20
1310 #define COMMON_TIMEOUT_MASK 0xf0000000
1311 #define COMMON_TIMEOUT_MAGIC 0x50000000
1312
1313 #define COMMON_TIMEOUT_IDX(tv) \
1314 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1315
1316 /** Return true iff if 'tv' is a common timeout in 'base' */
1317 static inline int
is_common_timeout(const struct timeval * tv,const struct event_base * base)1318 is_common_timeout(const struct timeval *tv,
1319 const struct event_base *base)
1320 {
1321 int idx;
1322 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1323 return 0;
1324 idx = COMMON_TIMEOUT_IDX(tv);
1325 return idx < base->n_common_timeouts;
1326 }
1327
1328 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1329 * one is a common timeout. */
1330 static inline int
is_same_common_timeout(const struct timeval * tv1,const struct timeval * tv2)1331 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1332 {
1333 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1334 (tv2->tv_usec & ~MICROSECONDS_MASK);
1335 }
1336
1337 /** Requires that 'tv' is a common timeout. Return the corresponding
1338 * common_timeout_list. */
1339 static inline struct common_timeout_list *
get_common_timeout_list(struct event_base * base,const struct timeval * tv)1340 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1341 {
1342 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1343 }
1344
1345 #if 0
1346 static inline int
1347 common_timeout_ok(const struct timeval *tv,
1348 struct event_base *base)
1349 {
1350 const struct timeval *expect =
1351 &get_common_timeout_list(base, tv)->duration;
1352 return tv->tv_sec == expect->tv_sec &&
1353 tv->tv_usec == expect->tv_usec;
1354 }
1355 #endif
1356
1357 /* Add the timeout for the first event in given common timeout list to the
1358 * event_base's minheap. */
1359 static void
common_timeout_schedule(struct common_timeout_list * ctl,const struct timeval * now,struct event * head)1360 common_timeout_schedule(struct common_timeout_list *ctl,
1361 const struct timeval *now, struct event *head)
1362 {
1363 struct timeval timeout = head->ev_timeout;
1364 timeout.tv_usec &= MICROSECONDS_MASK;
1365 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1366 }
1367
1368 /* Callback: invoked when the timeout for a common timeout queue triggers.
1369 * This means that (at least) the first event in that queue should be run,
1370 * and the timeout should be rescheduled if there are more events. */
1371 static void
common_timeout_callback(evutil_socket_t fd,short what,void * arg)1372 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1373 {
1374 struct timeval now;
1375 struct common_timeout_list *ctl = arg;
1376 struct event_base *base = ctl->base;
1377 struct event *ev = NULL;
1378 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1379 gettime(base, &now);
1380 while (1) {
1381 ev = TAILQ_FIRST(&ctl->events);
1382 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1383 (ev->ev_timeout.tv_sec == now.tv_sec &&
1384 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1385 break;
1386 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1387 event_active_nolock_(ev, EV_TIMEOUT, 1);
1388 }
1389 if (ev)
1390 common_timeout_schedule(ctl, &now, ev);
1391 EVBASE_RELEASE_LOCK(base, th_base_lock);
1392 }
1393
1394 #define MAX_COMMON_TIMEOUTS 256
1395
1396 const struct timeval *
event_base_init_common_timeout(struct event_base * base,const struct timeval * duration)1397 event_base_init_common_timeout(struct event_base *base,
1398 const struct timeval *duration)
1399 {
1400 int i;
1401 struct timeval tv;
1402 const struct timeval *result=NULL;
1403 struct common_timeout_list *new_ctl;
1404
1405 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1406 if (duration->tv_usec > 1000000) {
1407 memcpy(&tv, duration, sizeof(struct timeval));
1408 if (is_common_timeout(duration, base))
1409 tv.tv_usec &= MICROSECONDS_MASK;
1410 tv.tv_sec += tv.tv_usec / 1000000;
1411 tv.tv_usec %= 1000000;
1412 duration = &tv;
1413 }
1414 for (i = 0; i < base->n_common_timeouts; ++i) {
1415 const struct common_timeout_list *ctl =
1416 base->common_timeout_queues[i];
1417 if (duration->tv_sec == ctl->duration.tv_sec &&
1418 duration->tv_usec ==
1419 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1420 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1421 result = &ctl->duration;
1422 goto done;
1423 }
1424 }
1425 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1426 event_warnx("%s: Too many common timeouts already in use; "
1427 "we only support %d per event_base", __func__,
1428 MAX_COMMON_TIMEOUTS);
1429 goto done;
1430 }
1431 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1432 int n = base->n_common_timeouts < 16 ? 16 :
1433 base->n_common_timeouts*2;
1434 struct common_timeout_list **newqueues =
1435 mm_realloc(base->common_timeout_queues,
1436 n*sizeof(struct common_timeout_queue *));
1437 if (!newqueues) {
1438 event_warn("%s: realloc",__func__);
1439 goto done;
1440 }
1441 base->n_common_timeouts_allocated = n;
1442 base->common_timeout_queues = newqueues;
1443 }
1444 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1445 if (!new_ctl) {
1446 event_warn("%s: calloc",__func__);
1447 goto done;
1448 }
1449 TAILQ_INIT(&new_ctl->events);
1450 new_ctl->duration.tv_sec = duration->tv_sec;
1451 new_ctl->duration.tv_usec =
1452 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1453 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1454 evtimer_assign(&new_ctl->timeout_event, base,
1455 common_timeout_callback, new_ctl);
1456 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1457 event_priority_set(&new_ctl->timeout_event, 0);
1458 new_ctl->base = base;
1459 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1460 result = &new_ctl->duration;
1461
1462 done:
1463 if (result)
1464 EVUTIL_ASSERT(is_common_timeout(result, base));
1465
1466 EVBASE_RELEASE_LOCK(base, th_base_lock);
1467 return result;
1468 }
1469
1470 /* Closure function invoked when we're activating a persistent event. */
1471 static inline void
event_persist_closure(struct event_base * base,struct event * ev)1472 event_persist_closure(struct event_base *base, struct event *ev)
1473 {
1474 void (*evcb_callback)(evutil_socket_t, short, void *);
1475
1476 // Other fields of *ev that must be stored before executing
1477 evutil_socket_t evcb_fd;
1478 short evcb_res;
1479 void *evcb_arg;
1480
1481 /* reschedule the persistent event if we have a timeout. */
1482 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1483 /* If there was a timeout, we want it to run at an interval of
1484 * ev_io_timeout after the last time it was _scheduled_ for,
1485 * not ev_io_timeout after _now_. If it fired for another
1486 * reason, though, the timeout ought to start ticking _now_. */
1487 struct timeval run_at, relative_to, delay, now;
1488 ev_uint32_t usec_mask = 0;
1489 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1490 &ev->ev_io_timeout));
1491 gettime(base, &now);
1492 if (is_common_timeout(&ev->ev_timeout, base)) {
1493 delay = ev->ev_io_timeout;
1494 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1495 delay.tv_usec &= MICROSECONDS_MASK;
1496 if (ev->ev_res & EV_TIMEOUT) {
1497 relative_to = ev->ev_timeout;
1498 relative_to.tv_usec &= MICROSECONDS_MASK;
1499 } else {
1500 relative_to = now;
1501 }
1502 } else {
1503 delay = ev->ev_io_timeout;
1504 if (ev->ev_res & EV_TIMEOUT) {
1505 relative_to = ev->ev_timeout;
1506 } else {
1507 relative_to = now;
1508 }
1509 }
1510 evutil_timeradd(&relative_to, &delay, &run_at);
1511 if (evutil_timercmp(&run_at, &now, <)) {
1512 /* Looks like we missed at least one invocation due to
1513 * a clock jump, not running the event loop for a
1514 * while, really slow callbacks, or
1515 * something. Reschedule relative to now.
1516 */
1517 evutil_timeradd(&now, &delay, &run_at);
1518 }
1519 run_at.tv_usec |= usec_mask;
1520 event_add_nolock_(ev, &run_at, 1);
1521 }
1522
1523 // Save our callback before we release the lock
1524 evcb_callback = ev->ev_callback;
1525 evcb_fd = ev->ev_fd;
1526 evcb_res = ev->ev_res;
1527 evcb_arg = ev->ev_arg;
1528
1529 // Release the lock
1530 EVBASE_RELEASE_LOCK(base, th_base_lock);
1531
1532 // Execute the callback
1533 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1534 }
1535
1536 /*
1537 Helper for event_process_active to process all the events in a single queue,
1538 releasing the lock as we go. This function requires that the lock be held
1539 when it's invoked. Returns -1 if we get a signal or an event_break that
1540 means we should stop processing any active events now. Otherwise returns
1541 the number of non-internal event_callbacks that we processed.
1542 */
1543 static int
event_process_active_single_queue(struct event_base * base,struct evcallback_list * activeq,int max_to_process,const struct timeval * endtime)1544 event_process_active_single_queue(struct event_base *base,
1545 struct evcallback_list *activeq,
1546 int max_to_process, const struct timeval *endtime)
1547 {
1548 struct event_callback *evcb;
1549 int count = 0;
1550
1551 EVUTIL_ASSERT(activeq != NULL);
1552
1553 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1554 struct event *ev=NULL;
1555 if (evcb->evcb_flags & EVLIST_INIT) {
1556 ev = event_callback_to_event(evcb);
1557
1558 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1559 event_queue_remove_active(base, evcb);
1560 else
1561 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1562 event_debug((
1563 "event_process_active: event: %p, %s%s%scall %p",
1564 ev,
1565 ev->ev_res & EV_READ ? "EV_READ " : " ",
1566 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1567 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1568 ev->ev_callback));
1569 } else {
1570 event_queue_remove_active(base, evcb);
1571 event_debug(("event_process_active: event_callback %p, "
1572 "closure %d, call %p",
1573 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1574 }
1575
1576 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1577 ++count;
1578
1579
1580 base->current_event = evcb;
1581 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1582 base->current_event_waiters = 0;
1583 #endif
1584
1585 switch (evcb->evcb_closure) {
1586 case EV_CLOSURE_EVENT_SIGNAL:
1587 EVUTIL_ASSERT(ev != NULL);
1588 event_signal_closure(base, ev);
1589 break;
1590 case EV_CLOSURE_EVENT_PERSIST:
1591 EVUTIL_ASSERT(ev != NULL);
1592 event_persist_closure(base, ev);
1593 break;
1594 case EV_CLOSURE_EVENT: {
1595 void (*evcb_callback)(evutil_socket_t, short, void *);
1596 EVUTIL_ASSERT(ev != NULL);
1597 evcb_callback = *ev->ev_callback;
1598 EVBASE_RELEASE_LOCK(base, th_base_lock);
1599 evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1600 }
1601 break;
1602 case EV_CLOSURE_CB_SELF: {
1603 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1604 EVBASE_RELEASE_LOCK(base, th_base_lock);
1605 evcb_selfcb(evcb, evcb->evcb_arg);
1606 }
1607 break;
1608 case EV_CLOSURE_EVENT_FINALIZE:
1609 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1610 void (*evcb_evfinalize)(struct event *, void *);
1611 int evcb_closure = evcb->evcb_closure;
1612 EVUTIL_ASSERT(ev != NULL);
1613 base->current_event = NULL;
1614 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1615 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1616 EVBASE_RELEASE_LOCK(base, th_base_lock);
1617 evcb_evfinalize(ev, ev->ev_arg);
1618 event_debug_note_teardown_(ev);
1619 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1620 mm_free(ev);
1621 }
1622 break;
1623 case EV_CLOSURE_CB_FINALIZE: {
1624 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1625 base->current_event = NULL;
1626 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1627 EVBASE_RELEASE_LOCK(base, th_base_lock);
1628 evcb_cbfinalize(evcb, evcb->evcb_arg);
1629 }
1630 break;
1631 default:
1632 EVUTIL_ASSERT(0);
1633 }
1634
1635 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1636 base->current_event = NULL;
1637 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1638 if (base->current_event_waiters) {
1639 base->current_event_waiters = 0;
1640 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1641 }
1642 #endif
1643
1644 if (base->event_break)
1645 return -1;
1646 if (count >= max_to_process)
1647 return count;
1648 if (count && endtime) {
1649 struct timeval now;
1650 update_time_cache(base);
1651 gettime(base, &now);
1652 if (evutil_timercmp(&now, endtime, >=))
1653 return count;
1654 }
1655 if (base->event_continue)
1656 break;
1657 }
1658 return count;
1659 }
1660
1661 /*
1662 * Active events are stored in priority queues. Lower priorities are always
1663 * process before higher priorities. Low priority events can starve high
1664 * priority ones.
1665 */
1666
1667 static int
event_process_active(struct event_base * base)1668 event_process_active(struct event_base *base)
1669 {
1670 /* Caller must hold th_base_lock */
1671 struct evcallback_list *activeq = NULL;
1672 int i, c = 0;
1673 const struct timeval *endtime;
1674 struct timeval tv;
1675 const int maxcb = base->max_dispatch_callbacks;
1676 const int limit_after_prio = base->limit_callbacks_after_prio;
1677 if (base->max_dispatch_time.tv_sec >= 0) {
1678 update_time_cache(base);
1679 gettime(base, &tv);
1680 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1681 endtime = &tv;
1682 } else {
1683 endtime = NULL;
1684 }
1685
1686 for (i = 0; i < base->nactivequeues; ++i) {
1687 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1688 base->event_running_priority = i;
1689 activeq = &base->activequeues[i];
1690 if (i < limit_after_prio)
1691 c = event_process_active_single_queue(base, activeq,
1692 INT_MAX, NULL);
1693 else
1694 c = event_process_active_single_queue(base, activeq,
1695 maxcb, endtime);
1696 if (c < 0) {
1697 goto done;
1698 } else if (c > 0)
1699 break; /* Processed a real event; do not
1700 * consider lower-priority events */
1701 /* If we get here, all of the events we processed
1702 * were internal. Continue. */
1703 }
1704 }
1705
1706 done:
1707 base->event_running_priority = -1;
1708
1709 return c;
1710 }
1711
1712 /*
1713 * Wait continuously for events. We exit only if no events are left.
1714 */
1715
1716 int
event_dispatch(void)1717 event_dispatch(void)
1718 {
1719 return (event_loop(0));
1720 }
1721
1722 int
event_base_dispatch(struct event_base * event_base)1723 event_base_dispatch(struct event_base *event_base)
1724 {
1725 return (event_base_loop(event_base, 0));
1726 }
1727
1728 const char *
event_base_get_method(const struct event_base * base)1729 event_base_get_method(const struct event_base *base)
1730 {
1731 EVUTIL_ASSERT(base);
1732 return (base->evsel->name);
1733 }
1734
1735 /** Callback: used to implement event_base_loopexit by telling the event_base
1736 * that it's time to exit its loop. */
1737 static void
event_loopexit_cb(evutil_socket_t fd,short what,void * arg)1738 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1739 {
1740 struct event_base *base = arg;
1741 base->event_gotterm = 1;
1742 }
1743
1744 int
event_loopexit(const struct timeval * tv)1745 event_loopexit(const struct timeval *tv)
1746 {
1747 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1748 current_base, tv));
1749 }
1750
1751 int
event_base_loopexit(struct event_base * event_base,const struct timeval * tv)1752 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1753 {
1754 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1755 event_base, tv));
1756 }
1757
1758 int
event_loopbreak(void)1759 event_loopbreak(void)
1760 {
1761 return (event_base_loopbreak(current_base));
1762 }
1763
1764 int
event_base_loopbreak(struct event_base * event_base)1765 event_base_loopbreak(struct event_base *event_base)
1766 {
1767 int r = 0;
1768 if (event_base == NULL)
1769 return (-1);
1770
1771 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1772 event_base->event_break = 1;
1773
1774 if (EVBASE_NEED_NOTIFY(event_base)) {
1775 r = evthread_notify_base(event_base);
1776 } else {
1777 r = (0);
1778 }
1779 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1780 return r;
1781 }
1782
1783 int
event_base_loopcontinue(struct event_base * event_base)1784 event_base_loopcontinue(struct event_base *event_base)
1785 {
1786 int r = 0;
1787 if (event_base == NULL)
1788 return (-1);
1789
1790 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1791 event_base->event_continue = 1;
1792
1793 if (EVBASE_NEED_NOTIFY(event_base)) {
1794 r = evthread_notify_base(event_base);
1795 } else {
1796 r = (0);
1797 }
1798 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1799 return r;
1800 }
1801
1802 int
event_base_got_break(struct event_base * event_base)1803 event_base_got_break(struct event_base *event_base)
1804 {
1805 int res;
1806 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1807 res = event_base->event_break;
1808 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1809 return res;
1810 }
1811
1812 int
event_base_got_exit(struct event_base * event_base)1813 event_base_got_exit(struct event_base *event_base)
1814 {
1815 int res;
1816 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1817 res = event_base->event_gotterm;
1818 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1819 return res;
1820 }
1821
1822 /* not thread safe */
1823
1824 int
event_loop(int flags)1825 event_loop(int flags)
1826 {
1827 return event_base_loop(current_base, flags);
1828 }
1829
1830 int
event_base_loop(struct event_base * base,int flags)1831 event_base_loop(struct event_base *base, int flags)
1832 {
1833 const struct eventop *evsel = base->evsel;
1834 struct timeval tv;
1835 struct timeval *tv_p;
1836 int res, done, retval = 0;
1837
1838 /* Grab the lock. We will release it inside evsel.dispatch, and again
1839 * as we invoke user callbacks. */
1840 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1841
1842 if (base->running_loop) {
1843 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1844 " can run on each event_base at once.", __func__);
1845 EVBASE_RELEASE_LOCK(base, th_base_lock);
1846 return -1;
1847 }
1848
1849 base->running_loop = 1;
1850
1851 clear_time_cache(base);
1852
1853 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1854 evsig_set_base_(base);
1855
1856 done = 0;
1857
1858 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1859 base->th_owner_id = EVTHREAD_GET_ID();
1860 #endif
1861
1862 base->event_gotterm = base->event_break = 0;
1863
1864 while (!done) {
1865 base->event_continue = 0;
1866 base->n_deferreds_queued = 0;
1867
1868 /* Terminate the loop if we have been asked to */
1869 if (base->event_gotterm) {
1870 break;
1871 }
1872
1873 if (base->event_break) {
1874 break;
1875 }
1876
1877 tv_p = &tv;
1878 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1879 timeout_next(base, &tv_p);
1880 } else {
1881 /*
1882 * if we have active events, we just poll new events
1883 * without waiting.
1884 */
1885 evutil_timerclear(&tv);
1886 }
1887
1888 /* If we have no events, we just exit */
1889 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1890 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1891 event_debug(("%s: no events registered.", __func__));
1892 retval = 1;
1893 goto done;
1894 }
1895
1896 event_queue_make_later_events_active(base);
1897
1898 clear_time_cache(base);
1899
1900 res = evsel->dispatch(base, tv_p);
1901
1902 if (res == -1) {
1903 event_debug(("%s: dispatch returned unsuccessfully.",
1904 __func__));
1905 retval = -1;
1906 goto done;
1907 }
1908
1909 update_time_cache(base);
1910
1911 timeout_process(base);
1912
1913 if (N_ACTIVE_CALLBACKS(base)) {
1914 int n = event_process_active(base);
1915 if ((flags & EVLOOP_ONCE)
1916 && N_ACTIVE_CALLBACKS(base) == 0
1917 && n != 0)
1918 done = 1;
1919 } else if (flags & EVLOOP_NONBLOCK)
1920 done = 1;
1921 }
1922 event_debug(("%s: asked to terminate loop.", __func__));
1923
1924 done:
1925 clear_time_cache(base);
1926 base->running_loop = 0;
1927
1928 EVBASE_RELEASE_LOCK(base, th_base_lock);
1929
1930 return (retval);
1931 }
1932
1933 /* One-time callback to implement event_base_once: invokes the user callback,
1934 * then deletes the allocated storage */
1935 static void
event_once_cb(evutil_socket_t fd,short events,void * arg)1936 event_once_cb(evutil_socket_t fd, short events, void *arg)
1937 {
1938 struct event_once *eonce = arg;
1939
1940 (*eonce->cb)(fd, events, eonce->arg);
1941 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1942 LIST_REMOVE(eonce, next_once);
1943 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1944 event_debug_unassign(&eonce->ev);
1945 mm_free(eonce);
1946 }
1947
1948 /* not threadsafe, event scheduled once. */
1949 int
event_once(evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)1950 event_once(evutil_socket_t fd, short events,
1951 void (*callback)(evutil_socket_t, short, void *),
1952 void *arg, const struct timeval *tv)
1953 {
1954 return event_base_once(current_base, fd, events, callback, arg, tv);
1955 }
1956
1957 /* Schedules an event once */
1958 int
event_base_once(struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg,const struct timeval * tv)1959 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1960 void (*callback)(evutil_socket_t, short, void *),
1961 void *arg, const struct timeval *tv)
1962 {
1963 struct event_once *eonce;
1964 int res = 0;
1965 int activate = 0;
1966
1967 /* We cannot support signals that just fire once, or persistent
1968 * events. */
1969 if (events & (EV_SIGNAL|EV_PERSIST))
1970 return (-1);
1971
1972 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1973 return (-1);
1974
1975 eonce->cb = callback;
1976 eonce->arg = arg;
1977
1978 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
1979 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1980
1981 if (tv == NULL || ! evutil_timerisset(tv)) {
1982 /* If the event is going to become active immediately,
1983 * don't put it on the timeout queue. This is one
1984 * idiom for scheduling a callback, so let's make
1985 * it fast (and order-preserving). */
1986 activate = 1;
1987 }
1988 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
1989 events &= EV_READ|EV_WRITE|EV_CLOSED;
1990
1991 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1992 } else {
1993 /* Bad event combination */
1994 mm_free(eonce);
1995 return (-1);
1996 }
1997
1998 if (res == 0) {
1999 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2000 if (activate)
2001 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2002 else
2003 res = event_add_nolock_(&eonce->ev, tv, 0);
2004
2005 if (res != 0) {
2006 mm_free(eonce);
2007 return (res);
2008 } else {
2009 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2010 }
2011 EVBASE_RELEASE_LOCK(base, th_base_lock);
2012 }
2013
2014 return (0);
2015 }
2016
2017 int
event_assign(struct event * ev,struct event_base * base,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2018 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2019 {
2020 if (!base)
2021 base = current_base;
2022 if (arg == &event_self_cbarg_ptr_)
2023 arg = ev;
2024
2025 event_debug_assert_not_added_(ev);
2026
2027 ev->ev_base = base;
2028
2029 ev->ev_callback = callback;
2030 ev->ev_arg = arg;
2031 ev->ev_fd = fd;
2032 ev->ev_events = events;
2033 ev->ev_res = 0;
2034 ev->ev_flags = EVLIST_INIT;
2035 ev->ev_ncalls = 0;
2036 ev->ev_pncalls = NULL;
2037
2038 if (events & EV_SIGNAL) {
2039 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2040 event_warnx("%s: EV_SIGNAL is not compatible with "
2041 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2042 return -1;
2043 }
2044 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2045 } else {
2046 if (events & EV_PERSIST) {
2047 evutil_timerclear(&ev->ev_io_timeout);
2048 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2049 } else {
2050 ev->ev_closure = EV_CLOSURE_EVENT;
2051 }
2052 }
2053
2054 min_heap_elem_init_(ev);
2055
2056 if (base != NULL) {
2057 /* by default, we put new events into the middle priority */
2058 ev->ev_pri = base->nactivequeues / 2;
2059 }
2060
2061 event_debug_note_setup_(ev);
2062
2063 return 0;
2064 }
2065
2066 int
event_base_set(struct event_base * base,struct event * ev)2067 event_base_set(struct event_base *base, struct event *ev)
2068 {
2069 /* Only innocent events may be assigned to a different base */
2070 if (ev->ev_flags != EVLIST_INIT)
2071 return (-1);
2072
2073 event_debug_assert_is_setup_(ev);
2074
2075 ev->ev_base = base;
2076 ev->ev_pri = base->nactivequeues/2;
2077
2078 return (0);
2079 }
2080
2081 void
event_set(struct event * ev,evutil_socket_t fd,short events,void (* callback)(evutil_socket_t,short,void *),void * arg)2082 event_set(struct event *ev, evutil_socket_t fd, short events,
2083 void (*callback)(evutil_socket_t, short, void *), void *arg)
2084 {
2085 int r;
2086 r = event_assign(ev, current_base, fd, events, callback, arg);
2087 EVUTIL_ASSERT(r == 0);
2088 }
2089
2090 void *
event_self_cbarg(void)2091 event_self_cbarg(void)
2092 {
2093 return &event_self_cbarg_ptr_;
2094 }
2095
2096 struct event *
event_base_get_running_event(struct event_base * base)2097 event_base_get_running_event(struct event_base *base)
2098 {
2099 struct event *ev = NULL;
2100 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2101 if (EVBASE_IN_THREAD(base)) {
2102 struct event_callback *evcb = base->current_event;
2103 if (evcb->evcb_flags & EVLIST_INIT)
2104 ev = event_callback_to_event(evcb);
2105 }
2106 EVBASE_RELEASE_LOCK(base, th_base_lock);
2107 return ev;
2108 }
2109
2110 struct event *
event_new(struct event_base * base,evutil_socket_t fd,short events,void (* cb)(evutil_socket_t,short,void *),void * arg)2111 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2112 {
2113 struct event *ev;
2114 ev = mm_malloc(sizeof(struct event));
2115 if (ev == NULL)
2116 return (NULL);
2117 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2118 mm_free(ev);
2119 return (NULL);
2120 }
2121
2122 return (ev);
2123 }
2124
2125 void
event_free(struct event * ev)2126 event_free(struct event *ev)
2127 {
2128 /* This is disabled, so that events which have been finalized be a
2129 * valid target for event_free(). That's */
2130 // event_debug_assert_is_setup_(ev);
2131
2132 /* make sure that this event won't be coming back to haunt us. */
2133 event_del(ev);
2134 event_debug_note_teardown_(ev);
2135 mm_free(ev);
2136
2137 }
2138
2139 void
event_debug_unassign(struct event * ev)2140 event_debug_unassign(struct event *ev)
2141 {
2142 event_debug_assert_not_added_(ev);
2143 event_debug_note_teardown_(ev);
2144
2145 ev->ev_flags &= ~EVLIST_INIT;
2146 }
2147
2148 #define EVENT_FINALIZE_FREE_ 0x10000
2149 static int
event_finalize_nolock_(struct event_base * base,unsigned flags,struct event * ev,event_finalize_callback_fn cb)2150 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2151 {
2152 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2153 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2154
2155 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2156 ev->ev_closure = closure;
2157 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2158 event_active_nolock_(ev, EV_FINALIZE, 1);
2159 ev->ev_flags |= EVLIST_FINALIZING;
2160 return 0;
2161 }
2162
2163 static int
event_finalize_impl_(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2164 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2165 {
2166 int r;
2167 struct event_base *base = ev->ev_base;
2168 if (EVUTIL_FAILURE_CHECK(!base)) {
2169 event_warnx("%s: event has no event_base set.", __func__);
2170 return -1;
2171 }
2172
2173 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2174 r = event_finalize_nolock_(base, flags, ev, cb);
2175 EVBASE_RELEASE_LOCK(base, th_base_lock);
2176 return r;
2177 }
2178
2179 int
event_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2180 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2181 {
2182 return event_finalize_impl_(flags, ev, cb);
2183 }
2184
2185 int
event_free_finalize(unsigned flags,struct event * ev,event_finalize_callback_fn cb)2186 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2187 {
2188 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2189 }
2190
2191 void
event_callback_finalize_nolock_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2192 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2193 {
2194 struct event *ev = NULL;
2195 if (evcb->evcb_flags & EVLIST_INIT) {
2196 ev = event_callback_to_event(evcb);
2197 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2198 } else {
2199 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2200 }
2201
2202 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2203 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2204 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2205 evcb->evcb_flags |= EVLIST_FINALIZING;
2206 }
2207
2208 void
event_callback_finalize_(struct event_base * base,unsigned flags,struct event_callback * evcb,void (* cb)(struct event_callback *,void *))2209 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2210 {
2211 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2212 event_callback_finalize_nolock_(base, flags, evcb, cb);
2213 EVBASE_RELEASE_LOCK(base, th_base_lock);
2214 }
2215
2216 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2217 * callback will be invoked on *one of them*, after they have *all* been
2218 * finalized. */
2219 int
event_callback_finalize_many_(struct event_base * base,int n_cbs,struct event_callback ** evcbs,void (* cb)(struct event_callback *,void *))2220 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2221 {
2222 int n_pending = 0, i;
2223
2224 if (base == NULL)
2225 base = current_base;
2226
2227 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2228
2229 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2230
2231 /* At most one can be currently executing; the rest we just
2232 * cancel... But we always make sure that the finalize callback
2233 * runs. */
2234 for (i = 0; i < n_cbs; ++i) {
2235 struct event_callback *evcb = evcbs[i];
2236 if (evcb == base->current_event) {
2237 event_callback_finalize_nolock_(base, 0, evcb, cb);
2238 ++n_pending;
2239 } else {
2240 event_callback_cancel_nolock_(base, evcb, 0);
2241 }
2242 }
2243
2244 if (n_pending == 0) {
2245 /* Just do the first one. */
2246 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2247 }
2248
2249 EVBASE_RELEASE_LOCK(base, th_base_lock);
2250 return 0;
2251 }
2252
2253 /*
2254 * Set's the priority of an event - if an event is already scheduled
2255 * changing the priority is going to fail.
2256 */
2257
2258 int
event_priority_set(struct event * ev,int pri)2259 event_priority_set(struct event *ev, int pri)
2260 {
2261 event_debug_assert_is_setup_(ev);
2262
2263 if (ev->ev_flags & EVLIST_ACTIVE)
2264 return (-1);
2265 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2266 return (-1);
2267
2268 ev->ev_pri = pri;
2269
2270 return (0);
2271 }
2272
2273 /*
2274 * Checks if a specific event is pending or scheduled.
2275 */
2276
2277 int
event_pending(const struct event * ev,short event,struct timeval * tv)2278 event_pending(const struct event *ev, short event, struct timeval *tv)
2279 {
2280 int flags = 0;
2281
2282 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2283 event_warnx("%s: event has no event_base set.", __func__);
2284 return 0;
2285 }
2286
2287 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2288 event_debug_assert_is_setup_(ev);
2289
2290 if (ev->ev_flags & EVLIST_INSERTED)
2291 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2292 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2293 flags |= ev->ev_res;
2294 if (ev->ev_flags & EVLIST_TIMEOUT)
2295 flags |= EV_TIMEOUT;
2296
2297 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2298
2299 /* See if there is a timeout that we should report */
2300 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2301 struct timeval tmp = ev->ev_timeout;
2302 tmp.tv_usec &= MICROSECONDS_MASK;
2303 /* correctly remamp to real time */
2304 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2305 }
2306
2307 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2308
2309 return (flags & event);
2310 }
2311
2312 int
event_initialized(const struct event * ev)2313 event_initialized(const struct event *ev)
2314 {
2315 if (!(ev->ev_flags & EVLIST_INIT))
2316 return 0;
2317
2318 return 1;
2319 }
2320
2321 void
event_get_assignment(const struct event * event,struct event_base ** base_out,evutil_socket_t * fd_out,short * events_out,event_callback_fn * callback_out,void ** arg_out)2322 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2323 {
2324 event_debug_assert_is_setup_(event);
2325
2326 if (base_out)
2327 *base_out = event->ev_base;
2328 if (fd_out)
2329 *fd_out = event->ev_fd;
2330 if (events_out)
2331 *events_out = event->ev_events;
2332 if (callback_out)
2333 *callback_out = event->ev_callback;
2334 if (arg_out)
2335 *arg_out = event->ev_arg;
2336 }
2337
2338 size_t
event_get_struct_event_size(void)2339 event_get_struct_event_size(void)
2340 {
2341 return sizeof(struct event);
2342 }
2343
2344 evutil_socket_t
event_get_fd(const struct event * ev)2345 event_get_fd(const struct event *ev)
2346 {
2347 event_debug_assert_is_setup_(ev);
2348 return ev->ev_fd;
2349 }
2350
2351 struct event_base *
event_get_base(const struct event * ev)2352 event_get_base(const struct event *ev)
2353 {
2354 event_debug_assert_is_setup_(ev);
2355 return ev->ev_base;
2356 }
2357
2358 short
event_get_events(const struct event * ev)2359 event_get_events(const struct event *ev)
2360 {
2361 event_debug_assert_is_setup_(ev);
2362 return ev->ev_events;
2363 }
2364
2365 event_callback_fn
event_get_callback(const struct event * ev)2366 event_get_callback(const struct event *ev)
2367 {
2368 event_debug_assert_is_setup_(ev);
2369 return ev->ev_callback;
2370 }
2371
2372 void *
event_get_callback_arg(const struct event * ev)2373 event_get_callback_arg(const struct event *ev)
2374 {
2375 event_debug_assert_is_setup_(ev);
2376 return ev->ev_arg;
2377 }
2378
2379 int
event_get_priority(const struct event * ev)2380 event_get_priority(const struct event *ev)
2381 {
2382 event_debug_assert_is_setup_(ev);
2383 return ev->ev_pri;
2384 }
2385
2386 int
event_add(struct event * ev,const struct timeval * tv)2387 event_add(struct event *ev, const struct timeval *tv)
2388 {
2389 int res;
2390
2391 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2392 event_warnx("%s: event has no event_base set.", __func__);
2393 return -1;
2394 }
2395
2396 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2397
2398 res = event_add_nolock_(ev, tv, 0);
2399
2400 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2401
2402 return (res);
2403 }
2404
2405 /* Helper callback: wake an event_base from another thread. This version
2406 * works by writing a byte to one end of a socketpair, so that the event_base
2407 * listening on the other end will wake up as the corresponding event
2408 * triggers */
2409 static int
evthread_notify_base_default(struct event_base * base)2410 evthread_notify_base_default(struct event_base *base)
2411 {
2412 char buf[1];
2413 int r;
2414 buf[0] = (char) 0;
2415 #ifdef _WIN32
2416 r = send(base->th_notify_fd[1], buf, 1, 0);
2417 #else
2418 r = write(base->th_notify_fd[1], buf, 1);
2419 #endif
2420 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2421 }
2422
2423 #ifdef EVENT__HAVE_EVENTFD
2424 /* Helper callback: wake an event_base from another thread. This version
2425 * assumes that you have a working eventfd() implementation. */
2426 static int
evthread_notify_base_eventfd(struct event_base * base)2427 evthread_notify_base_eventfd(struct event_base *base)
2428 {
2429 ev_uint64_t msg = 1;
2430 int r;
2431 do {
2432 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2433 } while (r < 0 && errno == EAGAIN);
2434
2435 return (r < 0) ? -1 : 0;
2436 }
2437 #endif
2438
2439
2440 /** Tell the thread currently running the event_loop for base (if any) that it
2441 * needs to stop waiting in its dispatch function (if it is) and process all
2442 * active callbacks. */
2443 static int
evthread_notify_base(struct event_base * base)2444 evthread_notify_base(struct event_base *base)
2445 {
2446 EVENT_BASE_ASSERT_LOCKED(base);
2447 if (!base->th_notify_fn)
2448 return -1;
2449 if (base->is_notify_pending)
2450 return 0;
2451 base->is_notify_pending = 1;
2452 return base->th_notify_fn(base);
2453 }
2454
2455 /* Implementation function to remove a timeout on a currently pending event.
2456 */
2457 int
event_remove_timer_nolock_(struct event * ev)2458 event_remove_timer_nolock_(struct event *ev)
2459 {
2460 struct event_base *base = ev->ev_base;
2461
2462 EVENT_BASE_ASSERT_LOCKED(base);
2463 event_debug_assert_is_setup_(ev);
2464
2465 event_debug(("event_remove_timer_nolock: event: %p", ev));
2466
2467 /* If it's not pending on a timeout, we don't need to do anything. */
2468 if (ev->ev_flags & EVLIST_TIMEOUT) {
2469 event_queue_remove_timeout(base, ev);
2470 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2471 }
2472
2473 return (0);
2474 }
2475
2476 int
event_remove_timer(struct event * ev)2477 event_remove_timer(struct event *ev)
2478 {
2479 int res;
2480
2481 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2482 event_warnx("%s: event has no event_base set.", __func__);
2483 return -1;
2484 }
2485
2486 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2487
2488 res = event_remove_timer_nolock_(ev);
2489
2490 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2491
2492 return (res);
2493 }
2494
2495 /* Implementation function to add an event. Works just like event_add,
2496 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2497 * we treat tv as an absolute time, not as an interval to add to the current
2498 * time */
2499 int
event_add_nolock_(struct event * ev,const struct timeval * tv,int tv_is_absolute)2500 event_add_nolock_(struct event *ev, const struct timeval *tv,
2501 int tv_is_absolute)
2502 {
2503 struct event_base *base = ev->ev_base;
2504 int res = 0;
2505 int notify = 0;
2506
2507 EVENT_BASE_ASSERT_LOCKED(base);
2508 event_debug_assert_is_setup_(ev);
2509
2510 event_debug((
2511 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2512 ev,
2513 EV_SOCK_ARG(ev->ev_fd),
2514 ev->ev_events & EV_READ ? "EV_READ " : " ",
2515 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2516 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2517 tv ? "EV_TIMEOUT " : " ",
2518 ev->ev_callback));
2519
2520 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2521
2522 if (ev->ev_flags & EVLIST_FINALIZING) {
2523 /* XXXX debug */
2524 return (-1);
2525 }
2526
2527 /*
2528 * prepare for timeout insertion further below, if we get a
2529 * failure on any step, we should not change any state.
2530 */
2531 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2532 if (min_heap_reserve_(&base->timeheap,
2533 1 + min_heap_size_(&base->timeheap)) == -1)
2534 return (-1); /* ENOMEM == errno */
2535 }
2536
2537 /* If the main thread is currently executing a signal event's
2538 * callback, and we are not the main thread, then we want to wait
2539 * until the callback is done before we mess with the event, or else
2540 * we can race on ev_ncalls and ev_pncalls below. */
2541 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2542 if (base->current_event == event_to_event_callback(ev) &&
2543 (ev->ev_events & EV_SIGNAL)
2544 && !EVBASE_IN_THREAD(base)) {
2545 ++base->current_event_waiters;
2546 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2547 }
2548 #endif
2549
2550 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2551 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2552 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2553 res = evmap_io_add_(base, ev->ev_fd, ev);
2554 else if (ev->ev_events & EV_SIGNAL)
2555 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2556 if (res != -1)
2557 event_queue_insert_inserted(base, ev);
2558 if (res == 1) {
2559 /* evmap says we need to notify the main thread. */
2560 notify = 1;
2561 res = 0;
2562 }
2563 }
2564
2565 /*
2566 * we should change the timeout state only if the previous event
2567 * addition succeeded.
2568 */
2569 if (res != -1 && tv != NULL) {
2570 struct timeval now;
2571 int common_timeout;
2572 #ifdef USE_REINSERT_TIMEOUT
2573 int was_common;
2574 int old_timeout_idx;
2575 #endif
2576
2577 /*
2578 * for persistent timeout events, we remember the
2579 * timeout value and re-add the event.
2580 *
2581 * If tv_is_absolute, this was already set.
2582 */
2583 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2584 ev->ev_io_timeout = *tv;
2585
2586 #ifndef USE_REINSERT_TIMEOUT
2587 if (ev->ev_flags & EVLIST_TIMEOUT) {
2588 event_queue_remove_timeout(base, ev);
2589 }
2590 #endif
2591
2592 /* Check if it is active due to a timeout. Rescheduling
2593 * this timeout before the callback can be executed
2594 * removes it from the active list. */
2595 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2596 (ev->ev_res & EV_TIMEOUT)) {
2597 if (ev->ev_events & EV_SIGNAL) {
2598 /* See if we are just active executing
2599 * this event in a loop
2600 */
2601 if (ev->ev_ncalls && ev->ev_pncalls) {
2602 /* Abort loop */
2603 *ev->ev_pncalls = 0;
2604 }
2605 }
2606
2607 event_queue_remove_active(base, event_to_event_callback(ev));
2608 }
2609
2610 gettime(base, &now);
2611
2612 common_timeout = is_common_timeout(tv, base);
2613 #ifdef USE_REINSERT_TIMEOUT
2614 was_common = is_common_timeout(&ev->ev_timeout, base);
2615 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2616 #endif
2617
2618 if (tv_is_absolute) {
2619 ev->ev_timeout = *tv;
2620 } else if (common_timeout) {
2621 struct timeval tmp = *tv;
2622 tmp.tv_usec &= MICROSECONDS_MASK;
2623 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2624 ev->ev_timeout.tv_usec |=
2625 (tv->tv_usec & ~MICROSECONDS_MASK);
2626 } else {
2627 evutil_timeradd(&now, tv, &ev->ev_timeout);
2628 }
2629
2630 event_debug((
2631 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2632 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2633
2634 #ifdef USE_REINSERT_TIMEOUT
2635 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2636 #else
2637 event_queue_insert_timeout(base, ev);
2638 #endif
2639
2640 if (common_timeout) {
2641 struct common_timeout_list *ctl =
2642 get_common_timeout_list(base, &ev->ev_timeout);
2643 if (ev == TAILQ_FIRST(&ctl->events)) {
2644 common_timeout_schedule(ctl, &now, ev);
2645 }
2646 } else {
2647 struct event* top = NULL;
2648 /* See if the earliest timeout is now earlier than it
2649 * was before: if so, we will need to tell the main
2650 * thread to wake up earlier than it would otherwise.
2651 * We double check the timeout of the top element to
2652 * handle time distortions due to system suspension.
2653 */
2654 if (min_heap_elt_is_top_(ev))
2655 notify = 1;
2656 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2657 evutil_timercmp(&top->ev_timeout, &now, <))
2658 notify = 1;
2659 }
2660 }
2661
2662 /* if we are not in the right thread, we need to wake up the loop */
2663 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2664 evthread_notify_base(base);
2665
2666 event_debug_note_add_(ev);
2667
2668 return (res);
2669 }
2670
2671 static int
event_del_(struct event * ev,int blocking)2672 event_del_(struct event *ev, int blocking)
2673 {
2674 int res;
2675
2676 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2677 event_warnx("%s: event has no event_base set.", __func__);
2678 return -1;
2679 }
2680
2681 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2682
2683 res = event_del_nolock_(ev, blocking);
2684
2685 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2686
2687 return (res);
2688 }
2689
2690 int
event_del(struct event * ev)2691 event_del(struct event *ev)
2692 {
2693 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2694 }
2695
2696 int
event_del_block(struct event * ev)2697 event_del_block(struct event *ev)
2698 {
2699 return event_del_(ev, EVENT_DEL_BLOCK);
2700 }
2701
2702 int
event_del_noblock(struct event * ev)2703 event_del_noblock(struct event *ev)
2704 {
2705 return event_del_(ev, EVENT_DEL_NOBLOCK);
2706 }
2707
2708 /** Helper for event_del: always called with th_base_lock held.
2709 *
2710 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2711 * EVEN_IF_FINALIZING} values. See those for more information.
2712 */
2713 int
event_del_nolock_(struct event * ev,int blocking)2714 event_del_nolock_(struct event *ev, int blocking)
2715 {
2716 struct event_base *base;
2717 int res = 0, notify = 0;
2718
2719 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2720 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2721
2722 /* An event without a base has not been added */
2723 if (ev->ev_base == NULL)
2724 return (-1);
2725
2726 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2727
2728 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2729 if (ev->ev_flags & EVLIST_FINALIZING) {
2730 /* XXXX Debug */
2731 return 0;
2732 }
2733 }
2734
2735 /* If the main thread is currently executing this event's callback,
2736 * and we are not the main thread, then we want to wait until the
2737 * callback is done before we start removing the event. That way,
2738 * when this function returns, it will be safe to free the
2739 * user-supplied argument. */
2740 base = ev->ev_base;
2741 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2742 if (blocking != EVENT_DEL_NOBLOCK &&
2743 base->current_event == event_to_event_callback(ev) &&
2744 !EVBASE_IN_THREAD(base) &&
2745 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2746 ++base->current_event_waiters;
2747 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2748 }
2749 #endif
2750
2751 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2752
2753 /* See if we are just active executing this event in a loop */
2754 if (ev->ev_events & EV_SIGNAL) {
2755 if (ev->ev_ncalls && ev->ev_pncalls) {
2756 /* Abort loop */
2757 *ev->ev_pncalls = 0;
2758 }
2759 }
2760
2761 if (ev->ev_flags & EVLIST_TIMEOUT) {
2762 /* NOTE: We never need to notify the main thread because of a
2763 * deleted timeout event: all that could happen if we don't is
2764 * that the dispatch loop might wake up too early. But the
2765 * point of notifying the main thread _is_ to wake up the
2766 * dispatch loop early anyway, so we wouldn't gain anything by
2767 * doing it.
2768 */
2769 event_queue_remove_timeout(base, ev);
2770 }
2771
2772 if (ev->ev_flags & EVLIST_ACTIVE)
2773 event_queue_remove_active(base, event_to_event_callback(ev));
2774 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2775 event_queue_remove_active_later(base, event_to_event_callback(ev));
2776
2777 if (ev->ev_flags & EVLIST_INSERTED) {
2778 event_queue_remove_inserted(base, ev);
2779 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2780 res = evmap_io_del_(base, ev->ev_fd, ev);
2781 else
2782 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2783 if (res == 1) {
2784 /* evmap says we need to notify the main thread. */
2785 notify = 1;
2786 res = 0;
2787 }
2788 }
2789
2790 /* if we are not in the right thread, we need to wake up the loop */
2791 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2792 evthread_notify_base(base);
2793
2794 event_debug_note_del_(ev);
2795
2796 return (res);
2797 }
2798
2799 void
event_active(struct event * ev,int res,short ncalls)2800 event_active(struct event *ev, int res, short ncalls)
2801 {
2802 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2803 event_warnx("%s: event has no event_base set.", __func__);
2804 return;
2805 }
2806
2807 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2808
2809 event_debug_assert_is_setup_(ev);
2810
2811 event_active_nolock_(ev, res, ncalls);
2812
2813 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2814 }
2815
2816
2817 void
event_active_nolock_(struct event * ev,int res,short ncalls)2818 event_active_nolock_(struct event *ev, int res, short ncalls)
2819 {
2820 struct event_base *base;
2821
2822 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2823 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2824
2825 base = ev->ev_base;
2826 EVENT_BASE_ASSERT_LOCKED(base);
2827
2828 if (ev->ev_flags & EVLIST_FINALIZING) {
2829 /* XXXX debug */
2830 return;
2831 }
2832
2833 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2834 default:
2835 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2836 EVUTIL_ASSERT(0);
2837 break;
2838 case EVLIST_ACTIVE:
2839 /* We get different kinds of events, add them together */
2840 ev->ev_res |= res;
2841 return;
2842 case EVLIST_ACTIVE_LATER:
2843 ev->ev_res |= res;
2844 break;
2845 case 0:
2846 ev->ev_res = res;
2847 break;
2848 }
2849
2850 if (ev->ev_pri < base->event_running_priority)
2851 base->event_continue = 1;
2852
2853 if (ev->ev_events & EV_SIGNAL) {
2854 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2855 if (base->current_event == event_to_event_callback(ev) &&
2856 !EVBASE_IN_THREAD(base)) {
2857 ++base->current_event_waiters;
2858 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2859 }
2860 #endif
2861 ev->ev_ncalls = ncalls;
2862 ev->ev_pncalls = NULL;
2863 }
2864
2865 event_callback_activate_nolock_(base, event_to_event_callback(ev));
2866 }
2867
2868 void
event_active_later_(struct event * ev,int res)2869 event_active_later_(struct event *ev, int res)
2870 {
2871 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2872 event_active_later_nolock_(ev, res);
2873 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2874 }
2875
2876 void
event_active_later_nolock_(struct event * ev,int res)2877 event_active_later_nolock_(struct event *ev, int res)
2878 {
2879 struct event_base *base = ev->ev_base;
2880 EVENT_BASE_ASSERT_LOCKED(base);
2881
2882 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2883 /* We get different kinds of events, add them together */
2884 ev->ev_res |= res;
2885 return;
2886 }
2887
2888 ev->ev_res = res;
2889
2890 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2891 }
2892
2893 int
event_callback_activate_(struct event_base * base,struct event_callback * evcb)2894 event_callback_activate_(struct event_base *base,
2895 struct event_callback *evcb)
2896 {
2897 int r;
2898 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2899 r = event_callback_activate_nolock_(base, evcb);
2900 EVBASE_RELEASE_LOCK(base, th_base_lock);
2901 return r;
2902 }
2903
2904 int
event_callback_activate_nolock_(struct event_base * base,struct event_callback * evcb)2905 event_callback_activate_nolock_(struct event_base *base,
2906 struct event_callback *evcb)
2907 {
2908 int r = 1;
2909
2910 if (evcb->evcb_flags & EVLIST_FINALIZING)
2911 return 0;
2912
2913 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2914 default:
2915 EVUTIL_ASSERT(0);
2916 case EVLIST_ACTIVE_LATER:
2917 event_queue_remove_active_later(base, evcb);
2918 r = 0;
2919 break;
2920 case EVLIST_ACTIVE:
2921 return 0;
2922 case 0:
2923 break;
2924 }
2925
2926 event_queue_insert_active(base, evcb);
2927
2928 if (EVBASE_NEED_NOTIFY(base))
2929 evthread_notify_base(base);
2930
2931 return r;
2932 }
2933
2934 void
event_callback_activate_later_nolock_(struct event_base * base,struct event_callback * evcb)2935 event_callback_activate_later_nolock_(struct event_base *base,
2936 struct event_callback *evcb)
2937 {
2938 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2939 return;
2940
2941 event_queue_insert_active_later(base, evcb);
2942 if (EVBASE_NEED_NOTIFY(base))
2943 evthread_notify_base(base);
2944 }
2945
2946 void
event_callback_init_(struct event_base * base,struct event_callback * cb)2947 event_callback_init_(struct event_base *base,
2948 struct event_callback *cb)
2949 {
2950 memset(cb, 0, sizeof(*cb));
2951 cb->evcb_pri = base->nactivequeues - 1;
2952 }
2953
2954 int
event_callback_cancel_(struct event_base * base,struct event_callback * evcb)2955 event_callback_cancel_(struct event_base *base,
2956 struct event_callback *evcb)
2957 {
2958 int r;
2959 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2960 r = event_callback_cancel_nolock_(base, evcb, 0);
2961 EVBASE_RELEASE_LOCK(base, th_base_lock);
2962 return r;
2963 }
2964
2965 int
event_callback_cancel_nolock_(struct event_base * base,struct event_callback * evcb,int even_if_finalizing)2966 event_callback_cancel_nolock_(struct event_base *base,
2967 struct event_callback *evcb, int even_if_finalizing)
2968 {
2969 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
2970 return 0;
2971
2972 if (evcb->evcb_flags & EVLIST_INIT)
2973 return event_del_nolock_(event_callback_to_event(evcb),
2974 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
2975
2976 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2977 default:
2978 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2979 EVUTIL_ASSERT(0);
2980 break;
2981 case EVLIST_ACTIVE:
2982 /* We get different kinds of events, add them together */
2983 event_queue_remove_active(base, evcb);
2984 return 0;
2985 case EVLIST_ACTIVE_LATER:
2986 event_queue_remove_active_later(base, evcb);
2987 break;
2988 case 0:
2989 break;
2990 }
2991
2992 return 0;
2993 }
2994
2995 void
event_deferred_cb_init_(struct event_callback * cb,ev_uint8_t priority,deferred_cb_fn fn,void * arg)2996 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
2997 {
2998 memset(cb, 0, sizeof(*cb));
2999 cb->evcb_cb_union.evcb_selfcb = fn;
3000 cb->evcb_arg = arg;
3001 cb->evcb_pri = priority;
3002 cb->evcb_closure = EV_CLOSURE_CB_SELF;
3003 }
3004
3005 void
event_deferred_cb_set_priority_(struct event_callback * cb,ev_uint8_t priority)3006 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3007 {
3008 cb->evcb_pri = priority;
3009 }
3010
3011 void
event_deferred_cb_cancel_(struct event_base * base,struct event_callback * cb)3012 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3013 {
3014 if (!base)
3015 base = current_base;
3016 event_callback_cancel_(base, cb);
3017 }
3018
3019 #define MAX_DEFERREDS_QUEUED 32
3020 int
event_deferred_cb_schedule_(struct event_base * base,struct event_callback * cb)3021 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3022 {
3023 int r = 1;
3024 if (!base)
3025 base = current_base;
3026 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3027 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3028 event_callback_activate_later_nolock_(base, cb);
3029 } else {
3030 ++base->n_deferreds_queued;
3031 r = event_callback_activate_nolock_(base, cb);
3032 }
3033 EVBASE_RELEASE_LOCK(base, th_base_lock);
3034 return r;
3035 }
3036
3037 static int
timeout_next(struct event_base * base,struct timeval ** tv_p)3038 timeout_next(struct event_base *base, struct timeval **tv_p)
3039 {
3040 /* Caller must hold th_base_lock */
3041 struct timeval now;
3042 struct event *ev;
3043 struct timeval *tv = *tv_p;
3044 int res = 0;
3045
3046 ev = min_heap_top_(&base->timeheap);
3047
3048 if (ev == NULL) {
3049 /* if no time-based events are active wait for I/O */
3050 *tv_p = NULL;
3051 goto out;
3052 }
3053
3054 if (gettime(base, &now) == -1) {
3055 res = -1;
3056 goto out;
3057 }
3058
3059 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3060 evutil_timerclear(tv);
3061 goto out;
3062 }
3063
3064 evutil_timersub(&ev->ev_timeout, &now, tv);
3065
3066 EVUTIL_ASSERT(tv->tv_sec >= 0);
3067 EVUTIL_ASSERT(tv->tv_usec >= 0);
3068 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3069
3070 out:
3071 return (res);
3072 }
3073
3074 /* Activate every event whose timeout has elapsed. */
3075 static void
timeout_process(struct event_base * base)3076 timeout_process(struct event_base *base)
3077 {
3078 /* Caller must hold lock. */
3079 struct timeval now;
3080 struct event *ev;
3081
3082 if (min_heap_empty_(&base->timeheap)) {
3083 return;
3084 }
3085
3086 gettime(base, &now);
3087
3088 while ((ev = min_heap_top_(&base->timeheap))) {
3089 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3090 break;
3091
3092 /* delete this event from the I/O queues */
3093 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3094
3095 event_debug(("timeout_process: event: %p, call %p",
3096 ev, ev->ev_callback));
3097 event_active_nolock_(ev, EV_TIMEOUT, 1);
3098 }
3099 }
3100
3101 #if (EVLIST_INTERNAL >> 4) != 1
3102 #error "Mismatch for value of EVLIST_INTERNAL"
3103 #endif
3104
3105 #ifndef MAX
3106 #define MAX(a,b) (((a)>(b))?(a):(b))
3107 #endif
3108
3109 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3110
3111 /* These are a fancy way to spell
3112 if (flags & EVLIST_INTERNAL)
3113 base->event_count--/++;
3114 */
3115 #define DECR_EVENT_COUNT(base,flags) \
3116 ((base)->event_count -= (~((flags) >> 4) & 1))
3117 #define INCR_EVENT_COUNT(base,flags) do { \
3118 ((base)->event_count += (~((flags) >> 4) & 1)); \
3119 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3120 } while (0)
3121
3122 static void
event_queue_remove_inserted(struct event_base * base,struct event * ev)3123 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3124 {
3125 EVENT_BASE_ASSERT_LOCKED(base);
3126 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3127 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3128 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3129 return;
3130 }
3131 DECR_EVENT_COUNT(base, ev->ev_flags);
3132 ev->ev_flags &= ~EVLIST_INSERTED;
3133 }
3134 static void
event_queue_remove_active(struct event_base * base,struct event_callback * evcb)3135 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3136 {
3137 EVENT_BASE_ASSERT_LOCKED(base);
3138 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3139 event_errx(1, "%s: %p not on queue %x", __func__,
3140 evcb, EVLIST_ACTIVE);
3141 return;
3142 }
3143 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3144 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3145 base->event_count_active--;
3146
3147 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3148 evcb, evcb_active_next);
3149 }
3150 static void
event_queue_remove_active_later(struct event_base * base,struct event_callback * evcb)3151 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3152 {
3153 EVENT_BASE_ASSERT_LOCKED(base);
3154 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3155 event_errx(1, "%s: %p not on queue %x", __func__,
3156 evcb, EVLIST_ACTIVE_LATER);
3157 return;
3158 }
3159 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3160 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3161 base->event_count_active--;
3162
3163 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3164 }
3165 static void
event_queue_remove_timeout(struct event_base * base,struct event * ev)3166 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3167 {
3168 EVENT_BASE_ASSERT_LOCKED(base);
3169 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3170 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3171 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3172 return;
3173 }
3174 DECR_EVENT_COUNT(base, ev->ev_flags);
3175 ev->ev_flags &= ~EVLIST_TIMEOUT;
3176
3177 if (is_common_timeout(&ev->ev_timeout, base)) {
3178 struct common_timeout_list *ctl =
3179 get_common_timeout_list(base, &ev->ev_timeout);
3180 TAILQ_REMOVE(&ctl->events, ev,
3181 ev_timeout_pos.ev_next_with_common_timeout);
3182 } else {
3183 min_heap_erase_(&base->timeheap, ev);
3184 }
3185 }
3186
3187 #ifdef USE_REINSERT_TIMEOUT
3188 /* Remove and reinsert 'ev' into the timeout queue. */
3189 static void
event_queue_reinsert_timeout(struct event_base * base,struct event * ev,int was_common,int is_common,int old_timeout_idx)3190 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3191 int was_common, int is_common, int old_timeout_idx)
3192 {
3193 struct common_timeout_list *ctl;
3194 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3195 event_queue_insert_timeout(base, ev);
3196 return;
3197 }
3198
3199 switch ((was_common<<1) | is_common) {
3200 case 3: /* Changing from one common timeout to another */
3201 ctl = base->common_timeout_queues[old_timeout_idx];
3202 TAILQ_REMOVE(&ctl->events, ev,
3203 ev_timeout_pos.ev_next_with_common_timeout);
3204 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3205 insert_common_timeout_inorder(ctl, ev);
3206 break;
3207 case 2: /* Was common; is no longer common */
3208 ctl = base->common_timeout_queues[old_timeout_idx];
3209 TAILQ_REMOVE(&ctl->events, ev,
3210 ev_timeout_pos.ev_next_with_common_timeout);
3211 min_heap_push_(&base->timeheap, ev);
3212 break;
3213 case 1: /* Wasn't common; has become common. */
3214 min_heap_erase_(&base->timeheap, ev);
3215 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3216 insert_common_timeout_inorder(ctl, ev);
3217 break;
3218 case 0: /* was in heap; is still on heap. */
3219 min_heap_adjust_(&base->timeheap, ev);
3220 break;
3221 default:
3222 EVUTIL_ASSERT(0); /* unreachable */
3223 break;
3224 }
3225 }
3226 #endif
3227
3228 /* Add 'ev' to the common timeout list in 'ev'. */
3229 static void
insert_common_timeout_inorder(struct common_timeout_list * ctl,struct event * ev)3230 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3231 struct event *ev)
3232 {
3233 struct event *e;
3234 /* By all logic, we should just be able to append 'ev' to the end of
3235 * ctl->events, since the timeout on each 'ev' is set to {the common
3236 * timeout} + {the time when we add the event}, and so the events
3237 * should arrive in order of their timeeouts. But just in case
3238 * there's some wacky threading issue going on, we do a search from
3239 * the end of 'ev' to find the right insertion point.
3240 */
3241 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3242 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3243 /* This timercmp is a little sneaky, since both ev and e have
3244 * magic values in tv_usec. Fortunately, they ought to have
3245 * the _same_ magic values in tv_usec. Let's assert for that.
3246 */
3247 EVUTIL_ASSERT(
3248 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3249 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3250 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3251 ev_timeout_pos.ev_next_with_common_timeout);
3252 return;
3253 }
3254 }
3255 TAILQ_INSERT_HEAD(&ctl->events, ev,
3256 ev_timeout_pos.ev_next_with_common_timeout);
3257 }
3258
3259 static void
event_queue_insert_inserted(struct event_base * base,struct event * ev)3260 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3261 {
3262 EVENT_BASE_ASSERT_LOCKED(base);
3263
3264 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3265 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3266 ev, EV_SOCK_ARG(ev->ev_fd));
3267 return;
3268 }
3269
3270 INCR_EVENT_COUNT(base, ev->ev_flags);
3271
3272 ev->ev_flags |= EVLIST_INSERTED;
3273 }
3274
3275 static void
event_queue_insert_active(struct event_base * base,struct event_callback * evcb)3276 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3277 {
3278 EVENT_BASE_ASSERT_LOCKED(base);
3279
3280 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3281 /* Double insertion is possible for active events */
3282 return;
3283 }
3284
3285 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3286
3287 evcb->evcb_flags |= EVLIST_ACTIVE;
3288
3289 base->event_count_active++;
3290 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3291 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3292 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3293 evcb, evcb_active_next);
3294 }
3295
3296 static void
event_queue_insert_active_later(struct event_base * base,struct event_callback * evcb)3297 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3298 {
3299 EVENT_BASE_ASSERT_LOCKED(base);
3300 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3301 /* Double insertion is possible */
3302 return;
3303 }
3304
3305 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3306 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3307 base->event_count_active++;
3308 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3309 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3310 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3311 }
3312
3313 static void
event_queue_insert_timeout(struct event_base * base,struct event * ev)3314 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3315 {
3316 EVENT_BASE_ASSERT_LOCKED(base);
3317
3318 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3319 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3320 ev, EV_SOCK_ARG(ev->ev_fd));
3321 return;
3322 }
3323
3324 INCR_EVENT_COUNT(base, ev->ev_flags);
3325
3326 ev->ev_flags |= EVLIST_TIMEOUT;
3327
3328 if (is_common_timeout(&ev->ev_timeout, base)) {
3329 struct common_timeout_list *ctl =
3330 get_common_timeout_list(base, &ev->ev_timeout);
3331 insert_common_timeout_inorder(ctl, ev);
3332 } else {
3333 min_heap_push_(&base->timeheap, ev);
3334 }
3335 }
3336
3337 static void
event_queue_make_later_events_active(struct event_base * base)3338 event_queue_make_later_events_active(struct event_base *base)
3339 {
3340 struct event_callback *evcb;
3341 EVENT_BASE_ASSERT_LOCKED(base);
3342
3343 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3344 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3345 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3346 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3347 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3348 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3349 }
3350 }
3351
3352 /* Functions for debugging */
3353
3354 const char *
event_get_version(void)3355 event_get_version(void)
3356 {
3357 return (EVENT__VERSION);
3358 }
3359
3360 ev_uint32_t
event_get_version_number(void)3361 event_get_version_number(void)
3362 {
3363 return (EVENT__NUMERIC_VERSION);
3364 }
3365
3366 /*
3367 * No thread-safe interface needed - the information should be the same
3368 * for all threads.
3369 */
3370
3371 const char *
event_get_method(void)3372 event_get_method(void)
3373 {
3374 return (current_base->evsel->name);
3375 }
3376
3377 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3378 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3379 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3380 static void (*mm_free_fn_)(void *p) = NULL;
3381
3382 void *
event_mm_malloc_(size_t sz)3383 event_mm_malloc_(size_t sz)
3384 {
3385 if (sz == 0)
3386 return NULL;
3387
3388 if (mm_malloc_fn_)
3389 return mm_malloc_fn_(sz);
3390 else
3391 return malloc(sz);
3392 }
3393
3394 void *
event_mm_calloc_(size_t count,size_t size)3395 event_mm_calloc_(size_t count, size_t size)
3396 {
3397 if (count == 0 || size == 0)
3398 return NULL;
3399
3400 if (mm_malloc_fn_) {
3401 size_t sz = count * size;
3402 void *p = NULL;
3403 if (count > EV_SIZE_MAX / size)
3404 goto error;
3405 p = mm_malloc_fn_(sz);
3406 if (p)
3407 return memset(p, 0, sz);
3408 } else {
3409 void *p = calloc(count, size);
3410 #ifdef _WIN32
3411 /* Windows calloc doesn't reliably set ENOMEM */
3412 if (p == NULL)
3413 goto error;
3414 #endif
3415 return p;
3416 }
3417
3418 error:
3419 errno = ENOMEM;
3420 return NULL;
3421 }
3422
3423 char *
event_mm_strdup_(const char * str)3424 event_mm_strdup_(const char *str)
3425 {
3426 if (!str) {
3427 errno = EINVAL;
3428 return NULL;
3429 }
3430
3431 if (mm_malloc_fn_) {
3432 size_t ln = strlen(str);
3433 void *p = NULL;
3434 if (ln == EV_SIZE_MAX)
3435 goto error;
3436 p = mm_malloc_fn_(ln+1);
3437 if (p)
3438 return memcpy(p, str, ln+1);
3439 } else
3440 #ifdef _WIN32
3441 return _strdup(str);
3442 #else
3443 return strdup(str);
3444 #endif
3445
3446 error:
3447 errno = ENOMEM;
3448 return NULL;
3449 }
3450
3451 void *
event_mm_realloc_(void * ptr,size_t sz)3452 event_mm_realloc_(void *ptr, size_t sz)
3453 {
3454 if (mm_realloc_fn_)
3455 return mm_realloc_fn_(ptr, sz);
3456 else
3457 return realloc(ptr, sz);
3458 }
3459
3460 void
event_mm_free_(void * ptr)3461 event_mm_free_(void *ptr)
3462 {
3463 if (mm_free_fn_)
3464 mm_free_fn_(ptr);
3465 else
3466 free(ptr);
3467 }
3468
3469 void
event_set_mem_functions(void * (* malloc_fn)(size_t sz),void * (* realloc_fn)(void * ptr,size_t sz),void (* free_fn)(void * ptr))3470 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3471 void *(*realloc_fn)(void *ptr, size_t sz),
3472 void (*free_fn)(void *ptr))
3473 {
3474 mm_malloc_fn_ = malloc_fn;
3475 mm_realloc_fn_ = realloc_fn;
3476 mm_free_fn_ = free_fn;
3477 }
3478 #endif
3479
3480 #ifdef EVENT__HAVE_EVENTFD
3481 static void
evthread_notify_drain_eventfd(evutil_socket_t fd,short what,void * arg)3482 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3483 {
3484 ev_uint64_t msg;
3485 ev_ssize_t r;
3486 struct event_base *base = arg;
3487
3488 r = read(fd, (void*) &msg, sizeof(msg));
3489 if (r<0 && errno != EAGAIN) {
3490 event_sock_warn(fd, "Error reading from eventfd");
3491 }
3492 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3493 base->is_notify_pending = 0;
3494 EVBASE_RELEASE_LOCK(base, th_base_lock);
3495 }
3496 #endif
3497
3498 static void
evthread_notify_drain_default(evutil_socket_t fd,short what,void * arg)3499 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3500 {
3501 unsigned char buf[1024];
3502 struct event_base *base = arg;
3503 #ifdef _WIN32
3504 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3505 ;
3506 #else
3507 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3508 ;
3509 #endif
3510
3511 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3512 base->is_notify_pending = 0;
3513 EVBASE_RELEASE_LOCK(base, th_base_lock);
3514 }
3515
3516 int
evthread_make_base_notifiable(struct event_base * base)3517 evthread_make_base_notifiable(struct event_base *base)
3518 {
3519 int r;
3520 if (!base)
3521 return -1;
3522
3523 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3524 r = evthread_make_base_notifiable_nolock_(base);
3525 EVBASE_RELEASE_LOCK(base, th_base_lock);
3526 return r;
3527 }
3528
3529 static int
evthread_make_base_notifiable_nolock_(struct event_base * base)3530 evthread_make_base_notifiable_nolock_(struct event_base *base)
3531 {
3532 void (*cb)(evutil_socket_t, short, void *);
3533 int (*notify)(struct event_base *);
3534
3535 if (base->th_notify_fn != NULL) {
3536 /* The base is already notifiable: we're doing fine. */
3537 return 0;
3538 }
3539
3540 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3541 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3542 base->th_notify_fn = event_kq_notify_base_;
3543 /* No need to add an event here; the backend can wake
3544 * itself up just fine. */
3545 return 0;
3546 }
3547 #endif
3548
3549 #ifdef EVENT__HAVE_EVENTFD
3550 base->th_notify_fd[0] = evutil_eventfd_(0,
3551 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3552 if (base->th_notify_fd[0] >= 0) {
3553 base->th_notify_fd[1] = -1;
3554 notify = evthread_notify_base_eventfd;
3555 cb = evthread_notify_drain_eventfd;
3556 } else
3557 #endif
3558 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3559 notify = evthread_notify_base_default;
3560 cb = evthread_notify_drain_default;
3561 } else {
3562 return -1;
3563 }
3564
3565 base->th_notify_fn = notify;
3566
3567 /* prepare an event that we can use for wakeup */
3568 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3569 EV_READ|EV_PERSIST, cb, base);
3570
3571 /* we need to mark this as internal event */
3572 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3573 event_priority_set(&base->th_notify, 0);
3574
3575 return event_add_nolock_(&base->th_notify, NULL, 0);
3576 }
3577
3578 int
event_base_foreach_event_nolock_(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3579 event_base_foreach_event_nolock_(struct event_base *base,
3580 event_base_foreach_event_cb fn, void *arg)
3581 {
3582 int r, i;
3583 unsigned u;
3584 struct event *ev;
3585
3586 /* Start out with all the EVLIST_INSERTED events. */
3587 if ((r = evmap_foreach_event_(base, fn, arg)))
3588 return r;
3589
3590 /* Okay, now we deal with those events that have timeouts and are in
3591 * the min-heap. */
3592 for (u = 0; u < base->timeheap.n; ++u) {
3593 ev = base->timeheap.p[u];
3594 if (ev->ev_flags & EVLIST_INSERTED) {
3595 /* we already processed this one */
3596 continue;
3597 }
3598 if ((r = fn(base, ev, arg)))
3599 return r;
3600 }
3601
3602 /* Now for the events in one of the timeout queues.
3603 * the min-heap. */
3604 for (i = 0; i < base->n_common_timeouts; ++i) {
3605 struct common_timeout_list *ctl =
3606 base->common_timeout_queues[i];
3607 TAILQ_FOREACH(ev, &ctl->events,
3608 ev_timeout_pos.ev_next_with_common_timeout) {
3609 if (ev->ev_flags & EVLIST_INSERTED) {
3610 /* we already processed this one */
3611 continue;
3612 }
3613 if ((r = fn(base, ev, arg)))
3614 return r;
3615 }
3616 }
3617
3618 /* Finally, we deal wit all the active events that we haven't touched
3619 * yet. */
3620 for (i = 0; i < base->nactivequeues; ++i) {
3621 struct event_callback *evcb;
3622 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3623 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3624 /* This isn't an event (evlist_init clear), or
3625 * we already processed it. (inserted or
3626 * timeout set */
3627 continue;
3628 }
3629 ev = event_callback_to_event(evcb);
3630 if ((r = fn(base, ev, arg)))
3631 return r;
3632 }
3633 }
3634
3635 return 0;
3636 }
3637
3638 /* Helper for event_base_dump_events: called on each event in the event base;
3639 * dumps only the inserted events. */
3640 static int
dump_inserted_event_fn(const struct event_base * base,const struct event * e,void * arg)3641 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3642 {
3643 FILE *output = arg;
3644 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3645 "sig" : "fd ";
3646
3647 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3648 return 0;
3649
3650 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3651 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3652 (e->ev_events&EV_READ)?" Read":"",
3653 (e->ev_events&EV_WRITE)?" Write":"",
3654 (e->ev_events&EV_CLOSED)?" EOF":"",
3655 (e->ev_events&EV_SIGNAL)?" Signal":"",
3656 (e->ev_events&EV_PERSIST)?" Persist":"",
3657 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3658 if (e->ev_flags & EVLIST_TIMEOUT) {
3659 struct timeval tv;
3660 tv.tv_sec = e->ev_timeout.tv_sec;
3661 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3662 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3663 fprintf(output, " Timeout=%ld.%06d",
3664 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3665 }
3666 fputc('\n', output);
3667
3668 return 0;
3669 }
3670
3671 /* Helper for event_base_dump_events: called on each event in the event base;
3672 * dumps only the active events. */
3673 static int
dump_active_event_fn(const struct event_base * base,const struct event * e,void * arg)3674 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3675 {
3676 FILE *output = arg;
3677 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3678 "sig" : "fd ";
3679
3680 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3681 return 0;
3682
3683 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3684 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3685 (e->ev_res&EV_READ)?" Read":"",
3686 (e->ev_res&EV_WRITE)?" Write":"",
3687 (e->ev_res&EV_CLOSED)?" EOF":"",
3688 (e->ev_res&EV_SIGNAL)?" Signal":"",
3689 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3690 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3691 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3692
3693 return 0;
3694 }
3695
3696 int
event_base_foreach_event(struct event_base * base,event_base_foreach_event_cb fn,void * arg)3697 event_base_foreach_event(struct event_base *base,
3698 event_base_foreach_event_cb fn, void *arg)
3699 {
3700 int r;
3701 if ((!fn) || (!base)) {
3702 return -1;
3703 }
3704 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3705 r = event_base_foreach_event_nolock_(base, fn, arg);
3706 EVBASE_RELEASE_LOCK(base, th_base_lock);
3707 return r;
3708 }
3709
3710
3711 void
event_base_dump_events(struct event_base * base,FILE * output)3712 event_base_dump_events(struct event_base *base, FILE *output)
3713 {
3714 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3715 fprintf(output, "Inserted events:\n");
3716 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3717
3718 fprintf(output, "Active events:\n");
3719 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3720 EVBASE_RELEASE_LOCK(base, th_base_lock);
3721 }
3722
3723 void
event_base_active_by_fd(struct event_base * base,evutil_socket_t fd,short events)3724 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3725 {
3726 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3727 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3728 EVBASE_RELEASE_LOCK(base, th_base_lock);
3729 }
3730
3731 void
event_base_active_by_signal(struct event_base * base,int sig)3732 event_base_active_by_signal(struct event_base *base, int sig)
3733 {
3734 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3735 evmap_signal_active_(base, sig, 1);
3736 EVBASE_RELEASE_LOCK(base, th_base_lock);
3737 }
3738
3739
3740 void
event_base_add_virtual_(struct event_base * base)3741 event_base_add_virtual_(struct event_base *base)
3742 {
3743 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3744 base->virtual_event_count++;
3745 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3746 EVBASE_RELEASE_LOCK(base, th_base_lock);
3747 }
3748
3749 void
event_base_del_virtual_(struct event_base * base)3750 event_base_del_virtual_(struct event_base *base)
3751 {
3752 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3753 EVUTIL_ASSERT(base->virtual_event_count > 0);
3754 base->virtual_event_count--;
3755 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3756 evthread_notify_base(base);
3757 EVBASE_RELEASE_LOCK(base, th_base_lock);
3758 }
3759
3760 static void
event_free_debug_globals_locks(void)3761 event_free_debug_globals_locks(void)
3762 {
3763 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3764 #ifndef EVENT__DISABLE_DEBUG_MODE
3765 if (event_debug_map_lock_ != NULL) {
3766 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3767 event_debug_map_lock_ = NULL;
3768 evthreadimpl_disable_lock_debugging_();
3769 }
3770 #endif /* EVENT__DISABLE_DEBUG_MODE */
3771 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3772 return;
3773 }
3774
3775 static void
event_free_debug_globals(void)3776 event_free_debug_globals(void)
3777 {
3778 event_free_debug_globals_locks();
3779 }
3780
3781 static void
event_free_evsig_globals(void)3782 event_free_evsig_globals(void)
3783 {
3784 evsig_free_globals_();
3785 }
3786
3787 static void
event_free_evutil_globals(void)3788 event_free_evutil_globals(void)
3789 {
3790 evutil_free_globals_();
3791 }
3792
3793 static void
event_free_globals(void)3794 event_free_globals(void)
3795 {
3796 event_free_debug_globals();
3797 event_free_evsig_globals();
3798 event_free_evutil_globals();
3799 }
3800
3801 void
libevent_global_shutdown(void)3802 libevent_global_shutdown(void)
3803 {
3804 event_disable_debug_mode();
3805 event_free_globals();
3806 }
3807
3808 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3809 int
event_global_setup_locks_(const int enable_locks)3810 event_global_setup_locks_(const int enable_locks)
3811 {
3812 #ifndef EVENT__DISABLE_DEBUG_MODE
3813 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3814 #endif
3815 if (evsig_global_setup_locks_(enable_locks) < 0)
3816 return -1;
3817 if (evutil_global_setup_locks_(enable_locks) < 0)
3818 return -1;
3819 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3820 return -1;
3821 return 0;
3822 }
3823 #endif
3824
3825 void
event_base_assert_ok_(struct event_base * base)3826 event_base_assert_ok_(struct event_base *base)
3827 {
3828 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3829 event_base_assert_ok_nolock_(base);
3830 EVBASE_RELEASE_LOCK(base, th_base_lock);
3831 }
3832
3833 void
event_base_assert_ok_nolock_(struct event_base * base)3834 event_base_assert_ok_nolock_(struct event_base *base)
3835 {
3836 int i;
3837 int count;
3838
3839 /* First do checks on the per-fd and per-signal lists */
3840 evmap_check_integrity_(base);
3841
3842 /* Check the heap property */
3843 for (i = 1; i < (int)base->timeheap.n; ++i) {
3844 int parent = (i - 1) / 2;
3845 struct event *ev, *p_ev;
3846 ev = base->timeheap.p[i];
3847 p_ev = base->timeheap.p[parent];
3848 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3849 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3850 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3851 }
3852
3853 /* Check that the common timeouts are fine */
3854 for (i = 0; i < base->n_common_timeouts; ++i) {
3855 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3856 struct event *last=NULL, *ev;
3857
3858 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3859
3860 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3861 if (last)
3862 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3863 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3864 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3865 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3866 last = ev;
3867 }
3868 }
3869
3870 /* Check the active queues. */
3871 count = 0;
3872 for (i = 0; i < base->nactivequeues; ++i) {
3873 struct event_callback *evcb;
3874 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3875 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3876 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3877 EVUTIL_ASSERT(evcb->evcb_pri == i);
3878 ++count;
3879 }
3880 }
3881
3882 {
3883 struct event_callback *evcb;
3884 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3885 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3886 ++count;
3887 }
3888 }
3889 EVUTIL_ASSERT(count == base->event_count_active);
3890 }
3891