xref: /minix/external/bsd/libevent/dist/evmap.c (revision e3b78ef1)
1 /*	$NetBSD: evmap.c,v 1.2 2013/04/11 16:56:41 christos Exp $	*/
2 /*
3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include "event2/event-config.h"
28 #include <sys/cdefs.h>
29 __RCSID("$NetBSD: evmap.c,v 1.2 2013/04/11 16:56:41 christos Exp $");
30 
31 #ifdef WIN32
32 #include <winsock2.h>
33 #define WIN32_LEAN_AND_MEAN
34 #include <windows.h>
35 #undef WIN32_LEAN_AND_MEAN
36 #endif
37 #include <sys/types.h>
38 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
39 #include <sys/time.h>
40 #endif
41 #include <sys/queue.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #ifndef WIN32
45 #include <unistd.h>
46 #endif
47 #include <errno.h>
48 #include <signal.h>
49 #include <string.h>
50 #include <time.h>
51 
52 #include "event-internal.h"
53 #include "evmap-internal.h"
54 #include "mm-internal.h"
55 #include "changelist-internal.h"
56 
57 /** An entry for an evmap_io list: notes all the events that want to read or
58 	write on a given fd, and the number of each.
59   */
60 struct evmap_io {
61 	struct event_list events;
62 	ev_uint16_t nread;
63 	ev_uint16_t nwrite;
64 };
65 
66 /* An entry for an evmap_signal list: notes all the events that want to know
67    when a signal triggers. */
68 struct evmap_signal {
69 	struct event_list events;
70 };
71 
72 /* On some platforms, fds start at 0 and increment by 1 as they are
73    allocated, and old numbers get used.  For these platforms, we
74    implement io maps just like signal maps: as an array of pointers to
75    struct evmap_io.  But on other platforms (windows), sockets are not
76    0-indexed, not necessarily consecutive, and not necessarily reused.
77    There, we use a hashtable to implement evmap_io.
78 */
79 #ifdef EVMAP_USE_HT
80 struct event_map_entry {
81 	HT_ENTRY(event_map_entry) map_node;
82 	evutil_socket_t fd;
83 	union { /* This is a union in case we need to make more things that can
84 			   be in the hashtable. */
85 		struct evmap_io evmap_io;
86 	} ent;
87 };
88 
89 /* Helper used by the event_io_map hashtable code; tries to return a good hash
90  * of the fd in e->fd. */
91 static inline unsigned
92 hashsocket(struct event_map_entry *e)
93 {
94 	/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95 	 * matter.  Our hashtable implementation really likes low-order bits,
96 	 * though, so let's do the rotate-and-add trick. */
97 	unsigned h = (unsigned) e->fd;
98 	h += (h >> 2) | (h << 30);
99 	return h;
100 }
101 
102 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103  * have the same e->fd. */
104 static inline int
105 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
106 {
107 	return e1->fd == e2->fd;
108 }
109 
110 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
111 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
112 			0.5, mm_malloc, mm_realloc, mm_free)
113 
114 #define GET_IO_SLOT(x, map, slot, type)					\
115 	do {								\
116 		struct event_map_entry _key, *_ent;			\
117 		_key.fd = slot;						\
118 		_ent = HT_FIND(event_io_map, map, &_key);		\
119 		(x) = _ent ? &_ent->ent.type : NULL;			\
120 	} while (/*CONSTCOND*/0);
121 
122 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
123 	do {								\
124 		struct event_map_entry _key, *_ent;			\
125 		_key.fd = slot;						\
126 		_HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
127 		    event_map_entry, &_key, ptr,			\
128 		    {							\
129 			    _ent = *ptr;				\
130 		    },							\
131 		    {							\
132 			    _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133 			    if (EVUTIL_UNLIKELY(_ent == NULL))		\
134 				    return (-1);			\
135 			    _ent->fd = slot;				\
136 			    (ctor)(&_ent->ent.type);			\
137 			    _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
138 				});					\
139 		(x) = &_ent->ent.type;					\
140 	} while (/*CONSTCOND*/0)
141 
142 void evmap_io_initmap(struct event_io_map *ctx)
143 {
144 	HT_INIT(event_io_map, ctx);
145 }
146 
147 void evmap_io_clear(struct event_io_map *ctx)
148 {
149 	struct event_map_entry **ent, **next, *this;
150 	for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
151 		this = *ent;
152 		next = HT_NEXT_RMV(event_io_map, ctx, ent);
153 		mm_free(this);
154 	}
155 	HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
156 }
157 #endif
158 
159 /* Set the variable 'x' to the field in event_map 'map' with fields of type
160    'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL
161    if there are no entries for 'slot'.  Does no bounds-checking. */
162 #define GET_SIGNAL_SLOT(x, map, slot, type)			\
163 	(x) = (struct type *)((map)->entries[slot])
164 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165    by allocating enough memory for a 'struct type', and initializing the new
166    value by calling the function 'ctor' on it.  Makes the function
167    return -1 on allocation failure.
168  */
169 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
170 	do {								\
171 		if ((map)->entries[slot] == NULL) {			\
172 			(map)->entries[slot] =				\
173 			    mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174 			if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
175 				return (-1);				\
176 			(ctor)((struct type *)(map)->entries[slot]);	\
177 		}							\
178 		(x) = (struct type *)((map)->entries[slot]);		\
179 	} while (/*CONSTCOND*/0)
180 
181 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
182    as thin aliases over the SIGNAL_SLOT versions. */
183 #ifndef EVMAP_USE_HT
184 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)	\
186 	GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187 #define FDINFO_OFFSET sizeof(struct evmap_io)
188 void
189 evmap_io_initmap(struct event_io_map* ctx)
190 {
191 	evmap_signal_initmap(ctx);
192 }
193 void
194 evmap_io_clear(struct event_io_map* ctx)
195 {
196 	evmap_signal_clear(ctx);
197 }
198 #endif
199 
200 
201 /** Expand 'map' with new entries of width 'msize' until it is big enough
202 	to store a value in 'slot'.
203  */
204 static int
205 evmap_make_space(struct event_signal_map *map, int slot, int msize)
206 {
207 	if (map->nentries <= slot) {
208 		int nentries = map->nentries ? map->nentries : 32;
209 		void **tmp;
210 
211 		while (nentries <= slot)
212 			nentries <<= 1;
213 
214 		tmp = (void **)mm_realloc(map->entries, nentries * msize);
215 		if (tmp == NULL)
216 			return (-1);
217 
218 		memset(&tmp[map->nentries], 0,
219 		    (nentries - map->nentries) * msize);
220 
221 		map->nentries = nentries;
222 		map->entries = tmp;
223 	}
224 
225 	return (0);
226 }
227 
228 void
229 evmap_signal_initmap(struct event_signal_map *ctx)
230 {
231 	ctx->nentries = 0;
232 	ctx->entries = NULL;
233 }
234 
235 void
236 evmap_signal_clear(struct event_signal_map *ctx)
237 {
238 	if (ctx->entries != NULL) {
239 		int i;
240 		for (i = 0; i < ctx->nentries; ++i) {
241 			if (ctx->entries[i] != NULL)
242 				mm_free(ctx->entries[i]);
243 		}
244 		mm_free(ctx->entries);
245 		ctx->entries = NULL;
246 	}
247 	ctx->nentries = 0;
248 }
249 
250 
251 /* code specific to file descriptors */
252 
253 /** Constructor for struct evmap_io */
254 static void
255 evmap_io_init(struct evmap_io *entry)
256 {
257 	TAILQ_INIT(&entry->events);
258 	entry->nread = 0;
259 	entry->nwrite = 0;
260 }
261 
262 
263 /* return -1 on error, 0 on success if nothing changed in the event backend,
264  * and 1 on success if something did. */
265 int
266 evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
267 {
268 	const struct eventop *evsel = base->evsel;
269 	struct event_io_map *io = &base->io;
270 	struct evmap_io *ctx = NULL;
271 	int nread, nwrite, retval = 0;
272 	short res = 0, old = 0;
273 	struct event *old_ev;
274 
275 	EVUTIL_ASSERT(fd == ev->ev_fd);
276 
277 	if (fd < 0)
278 		return 0;
279 
280 #ifndef EVMAP_USE_HT
281 	if (fd >= io->nentries) {
282 		if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
283 			return (-1);
284 	}
285 #endif
286 	GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
287 						 evsel->fdinfo_len);
288 
289 	nread = ctx->nread;
290 	nwrite = ctx->nwrite;
291 
292 	if (nread)
293 		old |= EV_READ;
294 	if (nwrite)
295 		old |= EV_WRITE;
296 
297 	if (ev->ev_events & EV_READ) {
298 		if (++nread == 1)
299 			res |= EV_READ;
300 	}
301 	if (ev->ev_events & EV_WRITE) {
302 		if (++nwrite == 1)
303 			res |= EV_WRITE;
304 	}
305 	if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
306 		event_warnx("Too many events reading or writing on fd %d",
307 		    (int)fd);
308 		return -1;
309 	}
310 	if (EVENT_DEBUG_MODE_IS_ON() &&
311 	    (old_ev = TAILQ_FIRST(&ctx->events)) &&
312 	    (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
313 		event_warnx("Tried to mix edge-triggered and non-edge-triggered"
314 		    " events on fd %d", (int)fd);
315 		return -1;
316 	}
317 
318 	if (res) {
319 		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
320 		/* XXX(niels): we cannot mix edge-triggered and
321 		 * level-triggered, we should probably assert on
322 		 * this. */
323 		if (evsel->add(base, ev->ev_fd,
324 			old, (ev->ev_events & EV_ET) | res, extra) == -1)
325 			return (-1);
326 		retval = 1;
327 	}
328 
329 	ctx->nread = (ev_uint16_t) nread;
330 	ctx->nwrite = (ev_uint16_t) nwrite;
331 	TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
332 
333 	return (retval);
334 }
335 
336 /* return -1 on error, 0 on success if nothing changed in the event backend,
337  * and 1 on success if something did. */
338 int
339 evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
340 {
341 	const struct eventop *evsel = base->evsel;
342 	struct event_io_map *io = &base->io;
343 	struct evmap_io *ctx;
344 	int nread, nwrite, retval = 0;
345 	short res = 0, old = 0;
346 
347 	if (fd < 0)
348 		return 0;
349 
350 	EVUTIL_ASSERT(fd == ev->ev_fd);
351 
352 #ifndef EVMAP_USE_HT
353 	if (fd >= io->nentries)
354 		return (-1);
355 #endif
356 
357 	GET_IO_SLOT(ctx, io, fd, evmap_io);
358 
359 	nread = ctx->nread;
360 	nwrite = ctx->nwrite;
361 
362 	if (nread)
363 		old |= EV_READ;
364 	if (nwrite)
365 		old |= EV_WRITE;
366 
367 	if (ev->ev_events & EV_READ) {
368 		if (--nread == 0)
369 			res |= EV_READ;
370 		EVUTIL_ASSERT(nread >= 0);
371 	}
372 	if (ev->ev_events & EV_WRITE) {
373 		if (--nwrite == 0)
374 			res |= EV_WRITE;
375 		EVUTIL_ASSERT(nwrite >= 0);
376 	}
377 
378 	if (res) {
379 		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
380 		if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
381 			return (-1);
382 		retval = 1;
383 	}
384 
385 	ctx->nread = nread;
386 	ctx->nwrite = nwrite;
387 	TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
388 
389 	return (retval);
390 }
391 
392 void
393 evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
394 {
395 	struct event_io_map *io = &base->io;
396 	struct evmap_io *ctx;
397 	struct event *ev;
398 
399 #ifndef EVMAP_USE_HT
400 	EVUTIL_ASSERT(fd < io->nentries);
401 #endif
402 	GET_IO_SLOT(ctx, io, fd, evmap_io);
403 
404 	EVUTIL_ASSERT(ctx);
405 	TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
406 		if (ev->ev_events & events)
407 			event_active_nolock(ev, ev->ev_events & events, 1);
408 	}
409 }
410 
411 /* code specific to signals */
412 
413 static void
414 evmap_signal_init(struct evmap_signal *entry)
415 {
416 	TAILQ_INIT(&entry->events);
417 }
418 
419 
420 int
421 evmap_signal_add(struct event_base *base, int sig, struct event *ev)
422 {
423 	const struct eventop *evsel = base->evsigsel;
424 	struct event_signal_map *map = &base->sigmap;
425 	struct evmap_signal *ctx = NULL;
426 
427 	if (sig >= map->nentries) {
428 		if (evmap_make_space(
429 			map, sig, sizeof(struct evmap_signal *)) == -1)
430 			return (-1);
431 	}
432 	GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
433 	    base->evsigsel->fdinfo_len);
434 
435 	if (TAILQ_EMPTY(&ctx->events)) {
436 		if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
437 		    == -1)
438 			return (-1);
439 	}
440 
441 	TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
442 
443 	return (1);
444 }
445 
446 int
447 evmap_signal_del(struct event_base *base, int sig, struct event *ev)
448 {
449 	const struct eventop *evsel = base->evsigsel;
450 	struct event_signal_map *map = &base->sigmap;
451 	struct evmap_signal *ctx;
452 
453 	if (sig >= map->nentries)
454 		return (-1);
455 
456 	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
457 
458 	if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
459 		if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
460 			return (-1);
461 	}
462 
463 	TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
464 
465 	return (1);
466 }
467 
468 void
469 evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
470 {
471 	struct event_signal_map *map = &base->sigmap;
472 	struct evmap_signal *ctx;
473 	struct event *ev;
474 
475 	EVUTIL_ASSERT(sig < map->nentries);
476 	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
477 
478 	TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
479 		event_active_nolock(ev, EV_SIGNAL, ncalls);
480 }
481 
482 void *
483 evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
484 {
485 	struct evmap_io *ctx;
486 	GET_IO_SLOT(ctx, map, fd, evmap_io);
487 	if (ctx)
488 		return ((char*)ctx) + sizeof(struct evmap_io);
489 	else
490 		return NULL;
491 }
492 
493 /** Per-fd structure for use with changelists.  It keeps track, for each fd or
494  * signal using the changelist, of where its entry in the changelist is.
495  */
496 struct event_changelist_fdinfo {
497 	int idxplus1; /* this is the index +1, so that memset(0) will make it
498 		       * a no-such-element */
499 };
500 
501 void
502 event_changelist_init(struct event_changelist *changelist)
503 {
504 	changelist->changes = NULL;
505 	changelist->changes_size = 0;
506 	changelist->n_changes = 0;
507 }
508 
509 /** Helper: return the changelist_fdinfo corresponding to a given change. */
510 static inline struct event_changelist_fdinfo *
511 event_change_get_fdinfo(struct event_base *base,
512     const struct event_change *change)
513 {
514 	char *ptr;
515 	if (change->read_change & EV_CHANGE_SIGNAL) {
516 		struct evmap_signal *ctx;
517 		GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
518 		ptr = ((char*)ctx) + sizeof(struct evmap_signal);
519 	} else {
520 		struct evmap_io *ctx;
521 		GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
522 		ptr = ((char*)ctx) + sizeof(struct evmap_io);
523 	}
524 	return (void*)ptr;
525 }
526 
527 #ifdef DEBUG_CHANGELIST
528 /** Make sure that the changelist is consistent with the evmap structures. */
529 static void
530 event_changelist_check(struct event_base *base)
531 {
532 	int i;
533 	struct event_changelist *changelist = &base->changelist;
534 
535 	EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
536 	for (i = 0; i < changelist->n_changes; ++i) {
537 		struct event_change *c = &changelist->changes[i];
538 		struct event_changelist_fdinfo *f;
539 		EVUTIL_ASSERT(c->fd >= 0);
540 		f = event_change_get_fdinfo(base, c);
541 		EVUTIL_ASSERT(f);
542 		EVUTIL_ASSERT(f->idxplus1 == i + 1);
543 	}
544 
545 	for (i = 0; i < base->io.nentries; ++i) {
546 		struct evmap_io *io = base->io.entries[i];
547 		struct event_changelist_fdinfo *f;
548 		if (!io)
549 			continue;
550 		f = (void*)
551 		    ( ((char*)io) + sizeof(struct evmap_io) );
552 		if (f->idxplus1) {
553 			struct event_change *c = &changelist->changes[f->idxplus1 - 1];
554 			EVUTIL_ASSERT(c->fd == i);
555 		}
556 	}
557 }
558 #else
559 #define event_changelist_check(base)  ((void)0)
560 #endif
561 
562 void
563 event_changelist_remove_all(struct event_changelist *changelist,
564     struct event_base *base)
565 {
566 	int i;
567 
568 	event_changelist_check(base);
569 
570 	for (i = 0; i < changelist->n_changes; ++i) {
571 		struct event_change *ch = &changelist->changes[i];
572 		struct event_changelist_fdinfo *fdinfo =
573 		    event_change_get_fdinfo(base, ch);
574 		EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
575 		fdinfo->idxplus1 = 0;
576 	}
577 
578 	changelist->n_changes = 0;
579 
580 	event_changelist_check(base);
581 }
582 
583 void
584 event_changelist_freemem(struct event_changelist *changelist)
585 {
586 	if (changelist->changes)
587 		mm_free(changelist->changes);
588 	event_changelist_init(changelist); /* zero it all out. */
589 }
590 
591 /** Increase the size of 'changelist' to hold more changes. */
592 static int
593 event_changelist_grow(struct event_changelist *changelist)
594 {
595 	int new_size;
596 	struct event_change *new_changes;
597 	if (changelist->changes_size < 64)
598 		new_size = 64;
599 	else
600 		new_size = changelist->changes_size * 2;
601 
602 	new_changes = mm_realloc(changelist->changes,
603 	    new_size * sizeof(struct event_change));
604 
605 	if (EVUTIL_UNLIKELY(new_changes == NULL))
606 		return (-1);
607 
608 	changelist->changes = new_changes;
609 	changelist->changes_size = new_size;
610 
611 	return (0);
612 }
613 
614 /** Return a pointer to the changelist entry for the file descriptor or signal
615  * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its
616  * old_events field to old_events.
617  */
618 static struct event_change *
619 event_changelist_get_or_construct(struct event_changelist *changelist,
620     evutil_socket_t fd,
621     short old_events,
622     struct event_changelist_fdinfo *fdinfo)
623 {
624 	struct event_change *change;
625 
626 	if (fdinfo->idxplus1 == 0) {
627 		int idx;
628 		EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
629 
630 		if (changelist->n_changes == changelist->changes_size) {
631 			if (event_changelist_grow(changelist) < 0)
632 				return NULL;
633 		}
634 
635 		idx = changelist->n_changes++;
636 		change = &changelist->changes[idx];
637 		fdinfo->idxplus1 = idx + 1;
638 
639 		memset(change, 0, sizeof(struct event_change));
640 		change->fd = fd;
641 		change->old_events = old_events;
642 	} else {
643 		change = &changelist->changes[fdinfo->idxplus1 - 1];
644 		EVUTIL_ASSERT(change->fd == fd);
645 	}
646 	return change;
647 }
648 
649 int
650 event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
651     void *p)
652 {
653 	struct event_changelist *changelist = &base->changelist;
654 	struct event_changelist_fdinfo *fdinfo = p;
655 	struct event_change *change;
656 
657 	event_changelist_check(base);
658 
659 	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
660 	if (!change)
661 		return -1;
662 
663 	/* An add replaces any previous delete, but doesn't result in a no-op,
664 	 * since the delete might fail (because the fd had been closed since
665 	 * the last add, for instance. */
666 
667 	if (events & (EV_READ|EV_SIGNAL)) {
668 		change->read_change = EV_CHANGE_ADD |
669 		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
670 	}
671 	if (events & EV_WRITE) {
672 		change->write_change = EV_CHANGE_ADD |
673 		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
674 	}
675 
676 	event_changelist_check(base);
677 	return (0);
678 }
679 
680 int
681 event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
682     void *p)
683 {
684 	struct event_changelist *changelist = &base->changelist;
685 	struct event_changelist_fdinfo *fdinfo = p;
686 	struct event_change *change;
687 
688 	event_changelist_check(base);
689 	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
690 	event_changelist_check(base);
691 	if (!change)
692 		return -1;
693 
694 	/* A delete removes any previous add, rather than replacing it:
695 	   on those platforms where "add, delete, dispatch" is not the same
696 	   as "no-op, dispatch", we want the no-op behavior.
697 
698 	   As well as checking the current operation we should also check
699 	   the original set of events to make sure were not ignoring
700 	   the case where the add operation is present on an event that
701 	   was already set.
702 
703 	   If we have a no-op item, we could remove it it from the list
704 	   entirely, but really there's not much point: skipping the no-op
705 	   change when we do the dispatch later is far cheaper than rejuggling
706 	   the array now.
707 
708 	   As this stands, it also lets through deletions of events that are
709 	   not currently set.
710 	 */
711 
712 	if (events & (EV_READ|EV_SIGNAL)) {
713 		if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
714 		    (change->read_change & EV_CHANGE_ADD))
715 			change->read_change = 0;
716 		else
717 			change->read_change = EV_CHANGE_DEL;
718 	}
719 	if (events & EV_WRITE) {
720 		if (!(change->old_events & EV_WRITE) &&
721 		    (change->write_change & EV_CHANGE_ADD))
722 			change->write_change = 0;
723 		else
724 			change->write_change = EV_CHANGE_DEL;
725 	}
726 
727 	event_changelist_check(base);
728 	return (0);
729 }
730 
731 void
732 evmap_check_integrity(struct event_base *base)
733 {
734 #define EVLIST_X_SIGFOUND 0x1000
735 #define EVLIST_X_IOFOUND 0x2000
736 
737 	evutil_socket_t i;
738 	struct event *ev;
739 	struct event_io_map *io = &base->io;
740 	struct event_signal_map *sigmap = &base->sigmap;
741 #ifdef EVMAP_USE_HT
742 	struct event_map_entry **mapent;
743 #endif
744 	int nsignals, ntimers, nio;
745 	nsignals = ntimers = nio = 0;
746 
747 	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
748 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
749 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INIT);
750 		ev->ev_flags &= ~(EVLIST_X_SIGFOUND|EVLIST_X_IOFOUND);
751 	}
752 
753 #ifdef EVMAP_USE_HT
754 	HT_FOREACH(mapent, event_io_map, io) {
755 		struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
756 		i = (*mapent)->fd;
757 #else
758 	for (i = 0; i < io->nentries; ++i) {
759 		struct evmap_io *ctx = io->entries[i];
760 
761 		if (!ctx)
762 			continue;
763 #endif
764 
765 		TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
766 			EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_IOFOUND));
767 			EVUTIL_ASSERT(ev->ev_fd == i);
768 			ev->ev_flags |= EVLIST_X_IOFOUND;
769 			nio++;
770 		}
771 	}
772 
773 	for (i = 0; i < sigmap->nentries; ++i) {
774 		struct evmap_signal *ctx = sigmap->entries[i];
775 		if (!ctx)
776 			continue;
777 
778 		TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) {
779 			EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_SIGFOUND));
780 			EVUTIL_ASSERT(ev->ev_fd == i);
781 			ev->ev_flags |= EVLIST_X_SIGFOUND;
782 			nsignals++;
783 		}
784 	}
785 
786 	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
787 		if (ev->ev_events & (EV_READ|EV_WRITE)) {
788 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_IOFOUND);
789 			--nio;
790 		}
791 		if (ev->ev_events & EV_SIGNAL) {
792 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_SIGFOUND);
793 			--nsignals;
794 		}
795 	}
796 
797 	EVUTIL_ASSERT(nio == 0);
798 	EVUTIL_ASSERT(nsignals == 0);
799 	/* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for
800 	 * pending signals and io events.
801 	 */
802 }
803