1 /*
2  * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include "util-internal.h"
28 
29 #ifdef _WIN32
30 #include <winsock2.h>
31 #include <windows.h>
32 #endif
33 
34 #ifdef EVENT__HAVE_PTHREADS
35 #include <pthread.h>
36 #endif
37 
38 #include "event2/event-config.h"
39 
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #ifdef EVENT__HAVE_SYS_TIME_H
43 #include <sys/time.h>
44 #endif
45 #include <sys/queue.h>
46 #ifndef _WIN32
47 #include <sys/socket.h>
48 #include <sys/wait.h>
49 #include <signal.h>
50 #include <unistd.h>
51 #include <netdb.h>
52 #endif
53 #include <fcntl.h>
54 #include <signal.h>
55 #include <stdlib.h>
56 #include <stdio.h>
57 #include <string.h>
58 #include <errno.h>
59 #include <assert.h>
60 #include <ctype.h>
61 
62 #include "event2/event.h"
63 #include "event2/event_struct.h"
64 #include "event2/event_compat.h"
65 #include "event2/tag.h"
66 #include "event2/buffer.h"
67 #include "event2/buffer_compat.h"
68 #include "event2/util.h"
69 #include "event-internal.h"
70 #include "evthread-internal.h"
71 #include "log-internal.h"
72 #include "time-internal.h"
73 
74 #include "regress.h"
75 
76 #ifndef _WIN32
77 #include "regress.gen.h"
78 #endif
79 
80 evutil_socket_t pair[2];
81 int test_ok;
82 int called;
83 struct event_base *global_base;
84 
85 static char wbuf[4096];
86 static char rbuf[4096];
87 static int woff;
88 static int roff;
89 static int usepersist;
90 static struct timeval tset;
91 static struct timeval tcalled;
92 
93 
94 #define TEST1	"this is a test"
95 
96 #ifdef _WIN32
97 #define write(fd,buf,len) send((fd),(buf),(int)(len),0)
98 #define read(fd,buf,len) recv((fd),(buf),(int)(len),0)
99 #endif
100 
101 struct basic_cb_args
102 {
103 	struct event_base *eb;
104 	struct event *ev;
105 	unsigned int callcount;
106 };
107 
108 static void
simple_read_cb(evutil_socket_t fd,short event,void * arg)109 simple_read_cb(evutil_socket_t fd, short event, void *arg)
110 {
111 	char buf[256];
112 	int len;
113 
114 	len = read(fd, buf, sizeof(buf));
115 
116 	if (len) {
117 		if (!called) {
118 			if (event_add(arg, NULL) == -1)
119 				exit(1);
120 		}
121 	} else if (called == 1)
122 		test_ok = 1;
123 
124 	called++;
125 }
126 
127 static void
basic_read_cb(evutil_socket_t fd,short event,void * data)128 basic_read_cb(evutil_socket_t fd, short event, void *data)
129 {
130 	char buf[256];
131 	int len;
132 	struct basic_cb_args *arg = data;
133 
134 	len = read(fd, buf, sizeof(buf));
135 
136 	if (len < 0) {
137 		tt_fail_perror("read (callback)");
138 	} else {
139 		switch (arg->callcount++) {
140 		case 0:	 /* first call: expect to read data; cycle */
141 			if (len > 0)
142 				return;
143 
144 			tt_fail_msg("EOF before data read");
145 			break;
146 
147 		case 1:	 /* second call: expect EOF; stop */
148 			if (len > 0)
149 				tt_fail_msg("not all data read on first cycle");
150 			break;
151 
152 		default:  /* third call: should not happen */
153 			tt_fail_msg("too many cycles");
154 		}
155 	}
156 
157 	event_del(arg->ev);
158 	event_base_loopexit(arg->eb, NULL);
159 }
160 
161 static void
dummy_read_cb(evutil_socket_t fd,short event,void * arg)162 dummy_read_cb(evutil_socket_t fd, short event, void *arg)
163 {
164 }
165 
166 static void
simple_write_cb(evutil_socket_t fd,short event,void * arg)167 simple_write_cb(evutil_socket_t fd, short event, void *arg)
168 {
169 	int len;
170 
171 	len = write(fd, TEST1, strlen(TEST1) + 1);
172 	if (len == -1)
173 		test_ok = 0;
174 	else
175 		test_ok = 1;
176 }
177 
178 static void
multiple_write_cb(evutil_socket_t fd,short event,void * arg)179 multiple_write_cb(evutil_socket_t fd, short event, void *arg)
180 {
181 	struct event *ev = arg;
182 	int len;
183 
184 	len = 128;
185 	if (woff + len >= (int)sizeof(wbuf))
186 		len = sizeof(wbuf) - woff;
187 
188 	len = write(fd, wbuf + woff, len);
189 	if (len == -1) {
190 		fprintf(stderr, "%s: write\n", __func__);
191 		if (usepersist)
192 			event_del(ev);
193 		return;
194 	}
195 
196 	woff += len;
197 
198 	if (woff >= (int)sizeof(wbuf)) {
199 		shutdown(fd, EVUTIL_SHUT_WR);
200 		if (usepersist)
201 			event_del(ev);
202 		return;
203 	}
204 
205 	if (!usepersist) {
206 		if (event_add(ev, NULL) == -1)
207 			exit(1);
208 	}
209 }
210 
211 static void
multiple_read_cb(evutil_socket_t fd,short event,void * arg)212 multiple_read_cb(evutil_socket_t fd, short event, void *arg)
213 {
214 	struct event *ev = arg;
215 	int len;
216 
217 	len = read(fd, rbuf + roff, sizeof(rbuf) - roff);
218 	if (len == -1)
219 		fprintf(stderr, "%s: read\n", __func__);
220 	if (len <= 0) {
221 		if (usepersist)
222 			event_del(ev);
223 		return;
224 	}
225 
226 	roff += len;
227 	if (!usepersist) {
228 		if (event_add(ev, NULL) == -1)
229 			exit(1);
230 	}
231 }
232 
233 static void
timeout_cb(evutil_socket_t fd,short event,void * arg)234 timeout_cb(evutil_socket_t fd, short event, void *arg)
235 {
236 	evutil_gettimeofday(&tcalled, NULL);
237 }
238 
239 struct both {
240 	struct event ev;
241 	int nread;
242 };
243 
244 static void
combined_read_cb(evutil_socket_t fd,short event,void * arg)245 combined_read_cb(evutil_socket_t fd, short event, void *arg)
246 {
247 	struct both *both = arg;
248 	char buf[128];
249 	int len;
250 
251 	len = read(fd, buf, sizeof(buf));
252 	if (len == -1)
253 		fprintf(stderr, "%s: read\n", __func__);
254 	if (len <= 0)
255 		return;
256 
257 	both->nread += len;
258 	if (event_add(&both->ev, NULL) == -1)
259 		exit(1);
260 }
261 
262 static void
combined_write_cb(evutil_socket_t fd,short event,void * arg)263 combined_write_cb(evutil_socket_t fd, short event, void *arg)
264 {
265 	struct both *both = arg;
266 	char buf[128];
267 	int len;
268 
269 	len = sizeof(buf);
270 	if (len > both->nread)
271 		len = both->nread;
272 
273 	memset(buf, 'q', len);
274 
275 	len = write(fd, buf, len);
276 	if (len == -1)
277 		fprintf(stderr, "%s: write\n", __func__);
278 	if (len <= 0) {
279 		shutdown(fd, EVUTIL_SHUT_WR);
280 		return;
281 	}
282 
283 	both->nread -= len;
284 	if (event_add(&both->ev, NULL) == -1)
285 		exit(1);
286 }
287 
288 /* These macros used to replicate the work of the legacy test wrapper code */
289 #define setup_test(x) do {						\
290 	if (!in_legacy_test_wrapper) {					\
291 		TT_FAIL(("Legacy test %s not wrapped properly", x));	\
292 		return;							\
293 	}								\
294 	} while (0)
295 #define cleanup_test() setup_test("cleanup")
296 
297 static void
test_simpleread(void)298 test_simpleread(void)
299 {
300 	struct event ev;
301 
302 	/* Very simple read test */
303 	setup_test("Simple read: ");
304 
305 	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
306 		tt_fail_perror("write");
307 	}
308 
309 	shutdown(pair[0], EVUTIL_SHUT_WR);
310 
311 	event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
312 	if (event_add(&ev, NULL) == -1)
313 		exit(1);
314 	event_dispatch();
315 
316 	cleanup_test();
317 }
318 
319 static void
test_simplewrite(void)320 test_simplewrite(void)
321 {
322 	struct event ev;
323 
324 	/* Very simple write test */
325 	setup_test("Simple write: ");
326 
327 	event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev);
328 	if (event_add(&ev, NULL) == -1)
329 		exit(1);
330 	event_dispatch();
331 
332 	cleanup_test();
333 }
334 
335 static void
simpleread_multiple_cb(evutil_socket_t fd,short event,void * arg)336 simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg)
337 {
338 	if (++called == 2)
339 		test_ok = 1;
340 }
341 
342 static void
test_simpleread_multiple(void)343 test_simpleread_multiple(void)
344 {
345 	struct event one, two;
346 
347 	/* Very simple read test */
348 	setup_test("Simple read to multiple evens: ");
349 
350 	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
351 		tt_fail_perror("write");
352 	}
353 
354 	shutdown(pair[0], EVUTIL_SHUT_WR);
355 
356 	event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL);
357 	if (event_add(&one, NULL) == -1)
358 		exit(1);
359 	event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL);
360 	if (event_add(&two, NULL) == -1)
361 		exit(1);
362 	event_dispatch();
363 
364 	cleanup_test();
365 }
366 
367 static int have_closed = 0;
368 static int premature_event = 0;
369 static void
simpleclose_close_fd_cb(evutil_socket_t s,short what,void * ptr)370 simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr)
371 {
372 	evutil_socket_t **fds = ptr;
373 	TT_BLATHER(("Closing"));
374 	evutil_closesocket(*fds[0]);
375 	evutil_closesocket(*fds[1]);
376 	*fds[0] = -1;
377 	*fds[1] = -1;
378 	have_closed = 1;
379 }
380 
381 static void
record_event_cb(evutil_socket_t s,short what,void * ptr)382 record_event_cb(evutil_socket_t s, short what, void *ptr)
383 {
384 	short *whatp = ptr;
385 	if (!have_closed)
386 		premature_event = 1;
387 	*whatp = what;
388 	TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s));
389 }
390 
391 static void
test_simpleclose(void * ptr)392 test_simpleclose(void *ptr)
393 {
394 	/* Test that a close of FD is detected as a read and as a write. */
395 	struct event_base *base = event_base_new();
396 	evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1};
397 	evutil_socket_t *to_close[2];
398 	struct event *rev=NULL, *wev=NULL, *closeev=NULL;
399 	struct timeval tv;
400 	short got_read_on_close = 0, got_write_on_close = 0;
401 	char buf[1024];
402 	memset(buf, 99, sizeof(buf));
403 #ifdef _WIN32
404 #define LOCAL_SOCKETPAIR_AF AF_INET
405 #else
406 #define LOCAL_SOCKETPAIR_AF AF_UNIX
407 #endif
408 	if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0)
409 		TT_DIE(("socketpair: %s", strerror(errno)));
410 	if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0)
411 		TT_DIE(("socketpair: %s", strerror(errno)));
412 	if (evutil_make_socket_nonblocking(pair1[1]) < 0)
413 		TT_DIE(("make_socket_nonblocking"));
414 	if (evutil_make_socket_nonblocking(pair2[1]) < 0)
415 		TT_DIE(("make_socket_nonblocking"));
416 
417 	/** Stuff pair2[1] full of data, until write fails */
418 	while (1) {
419 		int r = write(pair2[1], buf, sizeof(buf));
420 		if (r<0) {
421 			int err = evutil_socket_geterror(pair2[1]);
422 			if (! EVUTIL_ERR_RW_RETRIABLE(err))
423 				TT_DIE(("write failed strangely: %s",
424 					evutil_socket_error_to_string(err)));
425 			break;
426 		}
427 	}
428 	to_close[0] = &pair1[0];
429 	to_close[1] = &pair2[0];
430 
431 	closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb,
432 	    to_close);
433 	rev = event_new(base, pair1[1], EV_READ, record_event_cb,
434 	    &got_read_on_close);
435 	TT_BLATHER(("Waiting for read on %d", (int)pair1[1]));
436 	wev = event_new(base, pair2[1], EV_WRITE, record_event_cb,
437 	    &got_write_on_close);
438 	TT_BLATHER(("Waiting for write on %d", (int)pair2[1]));
439 	tv.tv_sec = 0;
440 	tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make
441 			       * sure we get a read event. */
442 	event_add(closeev, &tv);
443 	event_add(rev, NULL);
444 	event_add(wev, NULL);
445 	/* Don't let the test go on too long. */
446 	tv.tv_sec = 0;
447 	tv.tv_usec = 200*1000;
448 	event_base_loopexit(base, &tv);
449 	event_base_loop(base, 0);
450 
451 	tt_int_op(got_read_on_close, ==, EV_READ);
452 	tt_int_op(got_write_on_close, ==, EV_WRITE);
453 	tt_int_op(premature_event, ==, 0);
454 
455 end:
456 	if (pair1[0] >= 0)
457 		evutil_closesocket(pair1[0]);
458 	if (pair1[1] >= 0)
459 		evutil_closesocket(pair1[1]);
460 	if (pair2[0] >= 0)
461 		evutil_closesocket(pair2[0]);
462 	if (pair2[1] >= 0)
463 		evutil_closesocket(pair2[1]);
464 	if (rev)
465 		event_free(rev);
466 	if (wev)
467 		event_free(wev);
468 	if (closeev)
469 		event_free(closeev);
470 	if (base)
471 		event_base_free(base);
472 }
473 
474 
475 static void
test_multiple(void)476 test_multiple(void)
477 {
478 	struct event ev, ev2;
479 	int i;
480 
481 	/* Multiple read and write test */
482 	setup_test("Multiple read/write: ");
483 	memset(rbuf, 0, sizeof(rbuf));
484 	for (i = 0; i < (int)sizeof(wbuf); i++)
485 		wbuf[i] = i;
486 
487 	roff = woff = 0;
488 	usepersist = 0;
489 
490 	event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev);
491 	if (event_add(&ev, NULL) == -1)
492 		exit(1);
493 	event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2);
494 	if (event_add(&ev2, NULL) == -1)
495 		exit(1);
496 	event_dispatch();
497 
498 	if (roff == woff)
499 		test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
500 
501 	cleanup_test();
502 }
503 
504 static void
test_persistent(void)505 test_persistent(void)
506 {
507 	struct event ev, ev2;
508 	int i;
509 
510 	/* Multiple read and write test with persist */
511 	setup_test("Persist read/write: ");
512 	memset(rbuf, 0, sizeof(rbuf));
513 	for (i = 0; i < (int)sizeof(wbuf); i++)
514 		wbuf[i] = i;
515 
516 	roff = woff = 0;
517 	usepersist = 1;
518 
519 	event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev);
520 	if (event_add(&ev, NULL) == -1)
521 		exit(1);
522 	event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2);
523 	if (event_add(&ev2, NULL) == -1)
524 		exit(1);
525 	event_dispatch();
526 
527 	if (roff == woff)
528 		test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
529 
530 	cleanup_test();
531 }
532 
533 static void
test_combined(void)534 test_combined(void)
535 {
536 	struct both r1, r2, w1, w2;
537 
538 	setup_test("Combined read/write: ");
539 	memset(&r1, 0, sizeof(r1));
540 	memset(&r2, 0, sizeof(r2));
541 	memset(&w1, 0, sizeof(w1));
542 	memset(&w2, 0, sizeof(w2));
543 
544 	w1.nread = 4096;
545 	w2.nread = 8192;
546 
547 	event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1);
548 	event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1);
549 	event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2);
550 	event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2);
551 	tt_assert(event_add(&r1.ev, NULL) != -1);
552 	tt_assert(!event_add(&w1.ev, NULL));
553 	tt_assert(!event_add(&r2.ev, NULL));
554 	tt_assert(!event_add(&w2.ev, NULL));
555 	event_dispatch();
556 
557 	if (r1.nread == 8192 && r2.nread == 4096)
558 		test_ok = 1;
559 
560 end:
561 	cleanup_test();
562 }
563 
564 static void
test_simpletimeout(void)565 test_simpletimeout(void)
566 {
567 	struct timeval tv;
568 	struct event ev;
569 
570 	setup_test("Simple timeout: ");
571 
572 	tv.tv_usec = 200*1000;
573 	tv.tv_sec = 0;
574 	evutil_timerclear(&tcalled);
575 	evtimer_set(&ev, timeout_cb, NULL);
576 	evtimer_add(&ev, &tv);
577 
578 	evutil_gettimeofday(&tset, NULL);
579 	event_dispatch();
580 	test_timeval_diff_eq(&tset, &tcalled, 200);
581 
582 	test_ok = 1;
583 end:
584 	cleanup_test();
585 }
586 
587 static void
periodic_timeout_cb(evutil_socket_t fd,short event,void * arg)588 periodic_timeout_cb(evutil_socket_t fd, short event, void *arg)
589 {
590 	int *count = arg;
591 
592 	(*count)++;
593 	if (*count == 6) {
594 		/* call loopexit only once - on slow machines(?), it is
595 		 * apparently possible for this to get called twice. */
596 		test_ok = 1;
597 		event_base_loopexit(global_base, NULL);
598 	}
599 }
600 
601 static void
test_persistent_timeout(void)602 test_persistent_timeout(void)
603 {
604 	struct timeval tv;
605 	struct event ev;
606 	int count = 0;
607 
608 	evutil_timerclear(&tv);
609 	tv.tv_usec = 10000;
610 
611 	event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST,
612 	    periodic_timeout_cb, &count);
613 	event_add(&ev, &tv);
614 
615 	event_dispatch();
616 
617 	event_del(&ev);
618 }
619 
620 static void
test_persistent_timeout_jump(void * ptr)621 test_persistent_timeout_jump(void *ptr)
622 {
623 	struct basic_test_data *data = ptr;
624 	struct event ev;
625 	int count = 0;
626 	struct timeval msec100 = { 0, 100 * 1000 };
627 	struct timeval msec50 = { 0, 50 * 1000 };
628 	struct timeval msec300 = { 0, 300 * 1000 };
629 
630 	event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count);
631 	event_add(&ev, &msec100);
632 	/* Wait for a bit */
633 	evutil_usleep_(&msec300);
634 	event_base_loopexit(data->base, &msec50);
635 	event_base_dispatch(data->base);
636 	tt_int_op(count, ==, 1);
637 
638 end:
639 	event_del(&ev);
640 }
641 
642 struct persist_active_timeout_called {
643 	int n;
644 	short events[16];
645 	struct timeval tvs[16];
646 };
647 
648 static void
activate_cb(evutil_socket_t fd,short event,void * arg)649 activate_cb(evutil_socket_t fd, short event, void *arg)
650 {
651 	struct event *ev = arg;
652 	event_active(ev, EV_READ, 1);
653 }
654 
655 static void
persist_active_timeout_cb(evutil_socket_t fd,short event,void * arg)656 persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg)
657 {
658 	struct persist_active_timeout_called *c = arg;
659 	if (c->n < 15) {
660 		c->events[c->n] = event;
661 		evutil_gettimeofday(&c->tvs[c->n], NULL);
662 		++c->n;
663 	}
664 }
665 
666 static void
test_persistent_active_timeout(void * ptr)667 test_persistent_active_timeout(void *ptr)
668 {
669 	struct timeval tv, tv2, tv_exit, start;
670 	struct event ev;
671 	struct persist_active_timeout_called res;
672 
673 	struct basic_test_data *data = ptr;
674 	struct event_base *base = data->base;
675 
676 	memset(&res, 0, sizeof(res));
677 
678 	tv.tv_sec = 0;
679 	tv.tv_usec = 200 * 1000;
680 	event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST,
681 	    persist_active_timeout_cb, &res);
682 	event_add(&ev, &tv);
683 
684 	tv2.tv_sec = 0;
685 	tv2.tv_usec = 100 * 1000;
686 	event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2);
687 
688 	tv_exit.tv_sec = 0;
689 	tv_exit.tv_usec = 600 * 1000;
690 	event_base_loopexit(base, &tv_exit);
691 
692 	event_base_assert_ok_(base);
693 	evutil_gettimeofday(&start, NULL);
694 
695 	event_base_dispatch(base);
696 	event_base_assert_ok_(base);
697 
698 	tt_int_op(res.n, ==, 3);
699 	tt_int_op(res.events[0], ==, EV_READ);
700 	tt_int_op(res.events[1], ==, EV_TIMEOUT);
701 	tt_int_op(res.events[2], ==, EV_TIMEOUT);
702 	test_timeval_diff_eq(&start, &res.tvs[0], 100);
703 	test_timeval_diff_eq(&start, &res.tvs[1], 300);
704 	test_timeval_diff_eq(&start, &res.tvs[2], 500);
705 end:
706 	event_del(&ev);
707 }
708 
709 struct common_timeout_info {
710 	struct event ev;
711 	struct timeval called_at;
712 	int which;
713 	int count;
714 };
715 
716 static void
common_timeout_cb(evutil_socket_t fd,short event,void * arg)717 common_timeout_cb(evutil_socket_t fd, short event, void *arg)
718 {
719 	struct common_timeout_info *ti = arg;
720 	++ti->count;
721 	evutil_gettimeofday(&ti->called_at, NULL);
722 	if (ti->count >= 4)
723 		event_del(&ti->ev);
724 }
725 
726 static void
test_common_timeout(void * ptr)727 test_common_timeout(void *ptr)
728 {
729 	struct basic_test_data *data = ptr;
730 
731 	struct event_base *base = data->base;
732 	int i;
733 	struct common_timeout_info info[100];
734 
735 	struct timeval start;
736 	struct timeval tmp_100_ms = { 0, 100*1000 };
737 	struct timeval tmp_200_ms = { 0, 200*1000 };
738 	struct timeval tmp_5_sec = { 5, 0 };
739 	struct timeval tmp_5M_usec = { 0, 5*1000*1000 };
740 
741 	const struct timeval *ms_100, *ms_200, *sec_5;
742 
743 	ms_100 = event_base_init_common_timeout(base, &tmp_100_ms);
744 	ms_200 = event_base_init_common_timeout(base, &tmp_200_ms);
745 	sec_5 = event_base_init_common_timeout(base, &tmp_5_sec);
746 	tt_assert(ms_100);
747 	tt_assert(ms_200);
748 	tt_assert(sec_5);
749 	tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms),
750 	    ==, ms_200);
751 	tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200);
752 	tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5);
753 	tt_int_op(ms_100->tv_sec, ==, 0);
754 	tt_int_op(ms_200->tv_sec, ==, 0);
755 	tt_int_op(sec_5->tv_sec, ==, 5);
756 	tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000);
757 	tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000);
758 	tt_int_op(sec_5->tv_usec, ==, 0|0x50200000);
759 
760 	memset(info, 0, sizeof(info));
761 
762 	for (i=0; i<100; ++i) {
763 		info[i].which = i;
764 		event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST,
765 		    common_timeout_cb, &info[i]);
766 		if (i % 2) {
767 			if ((i%20)==1) {
768 				/* Glass-box test: Make sure we survive the
769 				 * transition to non-common timeouts. It's
770 				 * a little tricky. */
771 				event_add(&info[i].ev, ms_200);
772 				event_add(&info[i].ev, &tmp_100_ms);
773 			} else if ((i%20)==3) {
774 				/* Check heap-to-common too. */
775 				event_add(&info[i].ev, &tmp_200_ms);
776 				event_add(&info[i].ev, ms_100);
777 			} else if ((i%20)==5) {
778 				/* Also check common-to-common. */
779 				event_add(&info[i].ev, ms_200);
780 				event_add(&info[i].ev, ms_100);
781 			} else {
782 				event_add(&info[i].ev, ms_100);
783 			}
784 		} else {
785 			event_add(&info[i].ev, ms_200);
786 		}
787 	}
788 
789 	event_base_assert_ok_(base);
790 	evutil_gettimeofday(&start, NULL);
791 	event_base_dispatch(base);
792 
793 	event_base_assert_ok_(base);
794 
795 	for (i=0; i<10; ++i) {
796 		tt_int_op(info[i].count, ==, 4);
797 		if (i % 2) {
798 			test_timeval_diff_eq(&start, &info[i].called_at, 400);
799 		} else {
800 			test_timeval_diff_eq(&start, &info[i].called_at, 800);
801 		}
802 	}
803 
804 	/* Make sure we can free the base with some events in. */
805 	for (i=0; i<100; ++i) {
806 		if (i % 2) {
807 			event_add(&info[i].ev, ms_100);
808 		} else {
809 			event_add(&info[i].ev, ms_200);
810 		}
811 	}
812 
813 end:
814 	event_base_free(data->base); /* need to do this here before info is
815 				      * out-of-scope */
816 	data->base = NULL;
817 }
818 
819 #ifndef _WIN32
820 
821 #define current_base event_global_current_base_
822 extern struct event_base *current_base;
823 
824 static void
fork_signal_cb(evutil_socket_t fd,short events,void * arg)825 fork_signal_cb(evutil_socket_t fd, short events, void *arg)
826 {
827 	event_del(arg);
828 }
829 
830 int child_pair[2] = { -1, -1 };
831 static void
simple_child_read_cb(evutil_socket_t fd,short event,void * arg)832 simple_child_read_cb(evutil_socket_t fd, short event, void *arg)
833 {
834 	char buf[256];
835 	int len;
836 
837 	len = read(fd, buf, sizeof(buf));
838 	if (write(child_pair[0], "", 1) < 0)
839 		tt_fail_perror("write");
840 
841 	if (len) {
842 		if (!called) {
843 			if (event_add(arg, NULL) == -1)
844 				exit(1);
845 		}
846 	} else if (called == 1)
847 		test_ok = 1;
848 
849 	called++;
850 }
851 static void
test_fork(void)852 test_fork(void)
853 {
854 	char c;
855 	int status;
856 	struct event ev, sig_ev, usr_ev, existing_ev;
857 	pid_t pid;
858 	int wait_flags = 0;
859 
860 #ifdef EVENT__HAVE_WAITPID_WITH_WNOWAIT
861 	wait_flags |= WNOWAIT;
862 #endif
863 
864 	setup_test("After fork: ");
865 
866 	{
867 		if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, child_pair) == -1) {
868 			fprintf(stderr, "%s: socketpair\n", __func__);
869 			exit(1);
870 		}
871 
872 		if (evutil_make_socket_nonblocking(child_pair[0]) == -1) {
873 			fprintf(stderr, "fcntl(O_NONBLOCK)");
874 			exit(1);
875 		}
876 	}
877 
878 	tt_assert(current_base);
879 	evthread_make_base_notifiable(current_base);
880 
881 	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
882 		tt_fail_perror("write");
883 	}
884 
885 	event_set(&ev, pair[1], EV_READ, simple_child_read_cb, &ev);
886 	if (event_add(&ev, NULL) == -1)
887 		exit(1);
888 
889 	evsignal_set(&sig_ev, SIGCHLD, fork_signal_cb, &sig_ev);
890 	evsignal_add(&sig_ev, NULL);
891 
892 	evsignal_set(&existing_ev, SIGUSR2, fork_signal_cb, &existing_ev);
893 	evsignal_add(&existing_ev, NULL);
894 
895 	event_base_assert_ok_(current_base);
896 	TT_BLATHER(("Before fork"));
897 	if ((pid = regress_fork()) == 0) {
898 		/* in the child */
899 		TT_BLATHER(("In child, before reinit"));
900 		event_base_assert_ok_(current_base);
901 		if (event_reinit(current_base) == -1) {
902 			fprintf(stdout, "FAILED (reinit)\n");
903 			exit(1);
904 		}
905 		TT_BLATHER(("After reinit"));
906 		event_base_assert_ok_(current_base);
907 		TT_BLATHER(("After assert-ok"));
908 
909 		evsignal_del(&sig_ev);
910 
911 		evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
912 		evsignal_add(&usr_ev, NULL);
913 		raise(SIGUSR1);
914 		raise(SIGUSR2);
915 
916 		called = 0;
917 
918 		event_dispatch();
919 
920 		event_base_free(current_base);
921 
922 		/* we do not send an EOF; simple_read_cb requires an EOF
923 		 * to set test_ok.  we just verify that the callback was
924 		 * called. */
925 		exit(test_ok != 0 || called != 2 ? -2 : 76);
926 	}
927 
928 	/** wait until client read first message */
929 	if (read(child_pair[1], &c, 1) < 0) {
930 		tt_fail_perror("read");
931 	}
932 	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
933 		tt_fail_perror("write");
934 	}
935 
936 	TT_BLATHER(("Before waitpid"));
937 	if (waitpid(pid, &status, wait_flags) == -1) {
938 		perror("waitpid");
939 		exit(1);
940 	}
941 	TT_BLATHER(("After waitpid"));
942 
943 	if (WEXITSTATUS(status) != 76) {
944 		fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status));
945 		exit(1);
946 	}
947 
948 	/* test that the current event loop still works */
949 	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
950 		fprintf(stderr, "%s: write\n", __func__);
951 	}
952 
953 	shutdown(pair[0], EVUTIL_SHUT_WR);
954 
955 	evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
956 	evsignal_add(&usr_ev, NULL);
957 	raise(SIGUSR1);
958 	raise(SIGUSR2);
959 
960 	event_dispatch();
961 
962 	evsignal_del(&sig_ev);
963 	tt_int_op(test_ok, ==, 1);
964 
965 	end:
966 	cleanup_test();
967 	if (child_pair[0] != -1)
968 		evutil_closesocket(child_pair[0]);
969 	if (child_pair[1] != -1)
970 		evutil_closesocket(child_pair[1]);
971 }
972 
973 #ifdef EVENT__HAVE_PTHREADS
del_wait_thread(void * arg)974 static void* del_wait_thread(void *arg)
975 {
976 	struct timeval tv_start, tv_end;
977 
978 	evutil_gettimeofday(&tv_start, NULL);
979 	event_dispatch();
980 	evutil_gettimeofday(&tv_end, NULL);
981 
982 	test_timeval_diff_eq(&tv_start, &tv_end, 300);
983 
984 	end:
985 	return NULL;
986 }
987 
988 static void
del_wait_cb(evutil_socket_t fd,short event,void * arg)989 del_wait_cb(evutil_socket_t fd, short event, void *arg)
990 {
991 	struct timeval delay = { 0, 300*1000 };
992 	TT_BLATHER(("Sleeping"));
993 	evutil_usleep_(&delay);
994 	test_ok = 1;
995 }
996 
997 static void
test_del_wait(void)998 test_del_wait(void)
999 {
1000 	struct event ev;
1001 	pthread_t thread;
1002 
1003 	setup_test("event_del will wait: ");
1004 
1005 	event_set(&ev, pair[1], EV_READ, del_wait_cb, &ev);
1006 	event_add(&ev, NULL);
1007 
1008 	pthread_create(&thread, NULL, del_wait_thread, NULL);
1009 
1010 	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
1011 		tt_fail_perror("write");
1012 	}
1013 
1014 	{
1015 		struct timeval delay = { 0, 30*1000 };
1016 		evutil_usleep_(&delay);
1017 	}
1018 
1019 	{
1020 		struct timeval tv_start, tv_end;
1021 		evutil_gettimeofday(&tv_start, NULL);
1022 		event_del(&ev);
1023 		evutil_gettimeofday(&tv_end, NULL);
1024 		test_timeval_diff_eq(&tv_start, &tv_end, 270);
1025 	}
1026 
1027 	pthread_join(thread, NULL);
1028 
1029 	end:
1030 	;
1031 }
1032 #endif
1033 
1034 static void
signal_cb_sa(int sig)1035 signal_cb_sa(int sig)
1036 {
1037 	test_ok = 2;
1038 }
1039 
1040 static void
signal_cb(evutil_socket_t fd,short event,void * arg)1041 signal_cb(evutil_socket_t fd, short event, void *arg)
1042 {
1043 	struct event *ev = arg;
1044 
1045 	evsignal_del(ev);
1046 	test_ok = 1;
1047 }
1048 
1049 static void
test_simplesignal_impl(int find_reorder)1050 test_simplesignal_impl(int find_reorder)
1051 {
1052 	struct event ev;
1053 	struct itimerval itv;
1054 
1055 	evsignal_set(&ev, SIGALRM, signal_cb, &ev);
1056 	evsignal_add(&ev, NULL);
1057 	/* find bugs in which operations are re-ordered */
1058 	if (find_reorder) {
1059 		evsignal_del(&ev);
1060 		evsignal_add(&ev, NULL);
1061 	}
1062 
1063 	memset(&itv, 0, sizeof(itv));
1064 	itv.it_value.tv_sec = 0;
1065 	itv.it_value.tv_usec = 100000;
1066 	if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
1067 		goto skip_simplesignal;
1068 
1069 	event_dispatch();
1070  skip_simplesignal:
1071 	if (evsignal_del(&ev) == -1)
1072 		test_ok = 0;
1073 
1074 	cleanup_test();
1075 }
1076 
1077 static void
test_simplestsignal(void)1078 test_simplestsignal(void)
1079 {
1080 	setup_test("Simplest one signal: ");
1081 	test_simplesignal_impl(0);
1082 }
1083 
1084 static void
test_simplesignal(void)1085 test_simplesignal(void)
1086 {
1087 	setup_test("Simple signal: ");
1088 	test_simplesignal_impl(1);
1089 }
1090 
1091 static void
test_multiplesignal(void)1092 test_multiplesignal(void)
1093 {
1094 	struct event ev_one, ev_two;
1095 	struct itimerval itv;
1096 
1097 	setup_test("Multiple signal: ");
1098 
1099 	evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one);
1100 	evsignal_add(&ev_one, NULL);
1101 
1102 	evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two);
1103 	evsignal_add(&ev_two, NULL);
1104 
1105 	memset(&itv, 0, sizeof(itv));
1106 	itv.it_value.tv_sec = 0;
1107 	itv.it_value.tv_usec = 100000;
1108 	if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
1109 		goto skip_simplesignal;
1110 
1111 	event_dispatch();
1112 
1113  skip_simplesignal:
1114 	if (evsignal_del(&ev_one) == -1)
1115 		test_ok = 0;
1116 	if (evsignal_del(&ev_two) == -1)
1117 		test_ok = 0;
1118 
1119 	cleanup_test();
1120 }
1121 
1122 static void
test_immediatesignal(void)1123 test_immediatesignal(void)
1124 {
1125 	struct event ev;
1126 
1127 	test_ok = 0;
1128 	evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1129 	evsignal_add(&ev, NULL);
1130 	raise(SIGUSR1);
1131 	event_loop(EVLOOP_NONBLOCK);
1132 	evsignal_del(&ev);
1133 	cleanup_test();
1134 }
1135 
1136 static void
test_signal_dealloc(void)1137 test_signal_dealloc(void)
1138 {
1139 	/* make sure that evsignal_event is event_del'ed and pipe closed */
1140 	struct event ev;
1141 	struct event_base *base = event_init();
1142 	evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1143 	evsignal_add(&ev, NULL);
1144 	evsignal_del(&ev);
1145 	event_base_free(base);
1146 	/* If we got here without asserting, we're fine. */
1147 	test_ok = 1;
1148 	cleanup_test();
1149 }
1150 
1151 static void
test_signal_pipeloss(void)1152 test_signal_pipeloss(void)
1153 {
1154 	/* make sure that the base1 pipe is closed correctly. */
1155 	struct event_base *base1, *base2;
1156 	int pipe1;
1157 	test_ok = 0;
1158 	base1 = event_init();
1159 	pipe1 = base1->sig.ev_signal_pair[0];
1160 	base2 = event_init();
1161 	event_base_free(base2);
1162 	event_base_free(base1);
1163 	if (close(pipe1) != -1 || errno!=EBADF) {
1164 		/* fd must be closed, so second close gives -1, EBADF */
1165 		printf("signal pipe not closed. ");
1166 		test_ok = 0;
1167 	} else {
1168 		test_ok = 1;
1169 	}
1170 	cleanup_test();
1171 }
1172 
1173 /*
1174  * make two bases to catch signals, use both of them.  this only works
1175  * for event mechanisms that use our signal pipe trick.	 kqueue handles
1176  * signals internally, and all interested kqueues get all the signals.
1177  */
1178 static void
test_signal_switchbase(void)1179 test_signal_switchbase(void)
1180 {
1181 	struct event ev1, ev2;
1182 	struct event_base *base1, *base2;
1183 	int is_kqueue;
1184 	test_ok = 0;
1185 	base1 = event_init();
1186 	base2 = event_init();
1187 	is_kqueue = !strcmp(event_get_method(),"kqueue");
1188 	evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1);
1189 	evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2);
1190 	if (event_base_set(base1, &ev1) ||
1191 	    event_base_set(base2, &ev2) ||
1192 	    event_add(&ev1, NULL) ||
1193 	    event_add(&ev2, NULL)) {
1194 		fprintf(stderr, "%s: cannot set base, add\n", __func__);
1195 		exit(1);
1196 	}
1197 
1198 	tt_ptr_op(event_get_base(&ev1), ==, base1);
1199 	tt_ptr_op(event_get_base(&ev2), ==, base2);
1200 
1201 	test_ok = 0;
1202 	/* can handle signal before loop is called */
1203 	raise(SIGUSR1);
1204 	event_base_loop(base2, EVLOOP_NONBLOCK);
1205 	if (is_kqueue) {
1206 		if (!test_ok)
1207 			goto end;
1208 		test_ok = 0;
1209 	}
1210 	event_base_loop(base1, EVLOOP_NONBLOCK);
1211 	if (test_ok && !is_kqueue) {
1212 		test_ok = 0;
1213 
1214 		/* set base1 to handle signals */
1215 		event_base_loop(base1, EVLOOP_NONBLOCK);
1216 		raise(SIGUSR1);
1217 		event_base_loop(base1, EVLOOP_NONBLOCK);
1218 		event_base_loop(base2, EVLOOP_NONBLOCK);
1219 	}
1220 end:
1221 	event_base_free(base1);
1222 	event_base_free(base2);
1223 	cleanup_test();
1224 }
1225 
1226 /*
1227  * assert that a signal event removed from the event queue really is
1228  * removed - with no possibility of it's parent handler being fired.
1229  */
1230 static void
test_signal_assert(void)1231 test_signal_assert(void)
1232 {
1233 	struct event ev;
1234 	struct event_base *base = event_init();
1235 	test_ok = 0;
1236 	/* use SIGCONT so we don't kill ourselves when we signal to nowhere */
1237 	evsignal_set(&ev, SIGCONT, signal_cb, &ev);
1238 	evsignal_add(&ev, NULL);
1239 	/*
1240 	 * if evsignal_del() fails to reset the handler, it's current handler
1241 	 * will still point to evsig_handler().
1242 	 */
1243 	evsignal_del(&ev);
1244 
1245 	raise(SIGCONT);
1246 #if 0
1247 	/* only way to verify we were in evsig_handler() */
1248 	/* XXXX Now there's no longer a good way. */
1249 	if (base->sig.evsig_caught)
1250 		test_ok = 0;
1251 	else
1252 		test_ok = 1;
1253 #else
1254 	test_ok = 1;
1255 #endif
1256 
1257 	event_base_free(base);
1258 	cleanup_test();
1259 	return;
1260 }
1261 
1262 /*
1263  * assert that we restore our previous signal handler properly.
1264  */
1265 static void
test_signal_restore(void)1266 test_signal_restore(void)
1267 {
1268 	struct event ev;
1269 	struct event_base *base = event_init();
1270 #ifdef EVENT__HAVE_SIGACTION
1271 	struct sigaction sa;
1272 #endif
1273 
1274 	test_ok = 0;
1275 #ifdef EVENT__HAVE_SIGACTION
1276 	sa.sa_handler = signal_cb_sa;
1277 	sa.sa_flags = 0x0;
1278 	sigemptyset(&sa.sa_mask);
1279 	if (sigaction(SIGUSR1, &sa, NULL) == -1)
1280 		goto out;
1281 #else
1282 	if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR)
1283 		goto out;
1284 #endif
1285 	evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1286 	evsignal_add(&ev, NULL);
1287 	evsignal_del(&ev);
1288 
1289 	raise(SIGUSR1);
1290 	/* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */
1291 	if (test_ok != 2)
1292 		test_ok = 0;
1293 out:
1294 	event_base_free(base);
1295 	cleanup_test();
1296 	return;
1297 }
1298 
1299 static void
signal_cb_swp(int sig,short event,void * arg)1300 signal_cb_swp(int sig, short event, void *arg)
1301 {
1302 	called++;
1303 	if (called < 5)
1304 		raise(sig);
1305 	else
1306 		event_loopexit(NULL);
1307 }
1308 static void
timeout_cb_swp(evutil_socket_t fd,short event,void * arg)1309 timeout_cb_swp(evutil_socket_t fd, short event, void *arg)
1310 {
1311 	if (called == -1) {
1312 		struct timeval tv = {5, 0};
1313 
1314 		called = 0;
1315 		evtimer_add((struct event *)arg, &tv);
1316 		raise(SIGUSR1);
1317 		return;
1318 	}
1319 	test_ok = 0;
1320 	event_loopexit(NULL);
1321 }
1322 
1323 static void
test_signal_while_processing(void)1324 test_signal_while_processing(void)
1325 {
1326 	struct event_base *base = event_init();
1327 	struct event ev, ev_timer;
1328 	struct timeval tv = {0, 0};
1329 
1330 	setup_test("Receiving a signal while processing other signal: ");
1331 
1332 	called = -1;
1333 	test_ok = 1;
1334 	signal_set(&ev, SIGUSR1, signal_cb_swp, NULL);
1335 	signal_add(&ev, NULL);
1336 	evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer);
1337 	evtimer_add(&ev_timer, &tv);
1338 	event_dispatch();
1339 
1340 	event_base_free(base);
1341 	cleanup_test();
1342 	return;
1343 }
1344 #endif
1345 
1346 static void
test_free_active_base(void * ptr)1347 test_free_active_base(void *ptr)
1348 {
1349 	struct basic_test_data *data = ptr;
1350 	struct event_base *base1;
1351 	struct event ev1;
1352 
1353 	base1 = event_init();
1354 	if (base1) {
1355 		event_assign(&ev1, base1, data->pair[1], EV_READ,
1356 			     dummy_read_cb, NULL);
1357 		event_add(&ev1, NULL);
1358 		event_base_free(base1);	 /* should not crash */
1359 	} else {
1360 		tt_fail_msg("failed to create event_base for test");
1361 	}
1362 
1363 	base1 = event_init();
1364 	tt_assert(base1);
1365 	event_assign(&ev1, base1, 0, 0, dummy_read_cb, NULL);
1366 	event_active(&ev1, EV_READ, 1);
1367 	event_base_free(base1);
1368 end:
1369 	;
1370 }
1371 
1372 static void
test_manipulate_active_events(void * ptr)1373 test_manipulate_active_events(void *ptr)
1374 {
1375 	struct basic_test_data *data = ptr;
1376 	struct event_base *base = data->base;
1377 	struct event ev1;
1378 
1379 	event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL);
1380 
1381 	/* Make sure an active event is pending. */
1382 	event_active(&ev1, EV_READ, 1);
1383 	tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1384 	    ==, EV_READ);
1385 
1386 	/* Make sure that activating an event twice works. */
1387 	event_active(&ev1, EV_WRITE, 1);
1388 	tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1389 	    ==, EV_READ|EV_WRITE);
1390 
1391 end:
1392 	event_del(&ev1);
1393 }
1394 
1395 static void
event_selfarg_cb(evutil_socket_t fd,short event,void * arg)1396 event_selfarg_cb(evutil_socket_t fd, short event, void *arg)
1397 {
1398 	struct event *ev = arg;
1399 	struct event_base *base = event_get_base(ev);
1400 	event_base_assert_ok_(base);
1401 	event_base_loopexit(base, NULL);
1402 	tt_want(ev == event_base_get_running_event(base));
1403 }
1404 
1405 static void
test_event_new_selfarg(void * ptr)1406 test_event_new_selfarg(void *ptr)
1407 {
1408 	struct basic_test_data *data = ptr;
1409 	struct event_base *base = data->base;
1410 	struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb,
1411                                      event_self_cbarg());
1412 
1413 	event_active(ev, EV_READ, 1);
1414 	event_base_dispatch(base);
1415 
1416 	event_free(ev);
1417 }
1418 
1419 static void
test_event_assign_selfarg(void * ptr)1420 test_event_assign_selfarg(void *ptr)
1421 {
1422 	struct basic_test_data *data = ptr;
1423 	struct event_base *base = data->base;
1424 	struct event ev;
1425 
1426 	event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1427                      event_self_cbarg());
1428 	event_active(&ev, EV_READ, 1);
1429 	event_base_dispatch(base);
1430 }
1431 
1432 static void
test_event_base_get_num_events(void * ptr)1433 test_event_base_get_num_events(void *ptr)
1434 {
1435 	struct basic_test_data *data = ptr;
1436 	struct event_base *base = data->base;
1437 	struct event ev;
1438 	int event_count_active;
1439 	int event_count_virtual;
1440 	int event_count_added;
1441 	int event_count_active_virtual;
1442 	int event_count_active_added;
1443 	int event_count_virtual_added;
1444 	int event_count_active_added_virtual;
1445 
1446 	struct timeval qsec = {0, 100000};
1447 
1448 	event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1449 	    event_self_cbarg());
1450 
1451 	event_add(&ev, &qsec);
1452 	event_count_active = event_base_get_num_events(base,
1453 	    EVENT_BASE_COUNT_ACTIVE);
1454 	event_count_virtual = event_base_get_num_events(base,
1455 	    EVENT_BASE_COUNT_VIRTUAL);
1456 	event_count_added = event_base_get_num_events(base,
1457 	    EVENT_BASE_COUNT_ADDED);
1458 	event_count_active_virtual = event_base_get_num_events(base,
1459 	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1460 	event_count_active_added = event_base_get_num_events(base,
1461 	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1462 	event_count_virtual_added = event_base_get_num_events(base,
1463 	    EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1464 	event_count_active_added_virtual = event_base_get_num_events(base,
1465 	    EVENT_BASE_COUNT_ACTIVE|
1466 	    EVENT_BASE_COUNT_ADDED|
1467 	    EVENT_BASE_COUNT_VIRTUAL);
1468 	tt_int_op(event_count_active, ==, 0);
1469 	tt_int_op(event_count_virtual, ==, 0);
1470 	/* libevent itself adds a timeout event, so the event_count is 2 here */
1471 	tt_int_op(event_count_added, ==, 2);
1472 	tt_int_op(event_count_active_virtual, ==, 0);
1473 	tt_int_op(event_count_active_added, ==, 2);
1474 	tt_int_op(event_count_virtual_added, ==, 2);
1475 	tt_int_op(event_count_active_added_virtual, ==, 2);
1476 
1477 	event_active(&ev, EV_READ, 1);
1478 	event_count_active = event_base_get_num_events(base,
1479 	    EVENT_BASE_COUNT_ACTIVE);
1480 	event_count_virtual = event_base_get_num_events(base,
1481 	    EVENT_BASE_COUNT_VIRTUAL);
1482 	event_count_added = event_base_get_num_events(base,
1483 	    EVENT_BASE_COUNT_ADDED);
1484 	event_count_active_virtual = event_base_get_num_events(base,
1485 	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1486 	event_count_active_added = event_base_get_num_events(base,
1487 	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1488 	event_count_virtual_added = event_base_get_num_events(base,
1489 	    EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1490 	event_count_active_added_virtual = event_base_get_num_events(base,
1491 	    EVENT_BASE_COUNT_ACTIVE|
1492 	    EVENT_BASE_COUNT_ADDED|
1493 	    EVENT_BASE_COUNT_VIRTUAL);
1494 	tt_int_op(event_count_active, ==, 1);
1495 	tt_int_op(event_count_virtual, ==, 0);
1496 	tt_int_op(event_count_added, ==, 3);
1497 	tt_int_op(event_count_active_virtual, ==, 1);
1498 	tt_int_op(event_count_active_added, ==, 4);
1499 	tt_int_op(event_count_virtual_added, ==, 3);
1500 	tt_int_op(event_count_active_added_virtual, ==, 4);
1501 
1502        event_base_loop(base, 0);
1503        event_count_active = event_base_get_num_events(base,
1504 	   EVENT_BASE_COUNT_ACTIVE);
1505        event_count_virtual = event_base_get_num_events(base,
1506 	   EVENT_BASE_COUNT_VIRTUAL);
1507        event_count_added = event_base_get_num_events(base,
1508 	   EVENT_BASE_COUNT_ADDED);
1509        event_count_active_virtual = event_base_get_num_events(base,
1510 	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1511        event_count_active_added = event_base_get_num_events(base,
1512 	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1513        event_count_virtual_added = event_base_get_num_events(base,
1514 	   EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1515        event_count_active_added_virtual = event_base_get_num_events(base,
1516 	   EVENT_BASE_COUNT_ACTIVE|
1517 	   EVENT_BASE_COUNT_ADDED|
1518 	   EVENT_BASE_COUNT_VIRTUAL);
1519        tt_int_op(event_count_active, ==, 0);
1520        tt_int_op(event_count_virtual, ==, 0);
1521        tt_int_op(event_count_added, ==, 0);
1522        tt_int_op(event_count_active_virtual, ==, 0);
1523        tt_int_op(event_count_active_added, ==, 0);
1524        tt_int_op(event_count_virtual_added, ==, 0);
1525        tt_int_op(event_count_active_added_virtual, ==, 0);
1526 
1527        event_base_add_virtual_(base);
1528        event_count_active = event_base_get_num_events(base,
1529 	   EVENT_BASE_COUNT_ACTIVE);
1530        event_count_virtual = event_base_get_num_events(base,
1531 	   EVENT_BASE_COUNT_VIRTUAL);
1532        event_count_added = event_base_get_num_events(base,
1533 	   EVENT_BASE_COUNT_ADDED);
1534        event_count_active_virtual = event_base_get_num_events(base,
1535 	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1536        event_count_active_added = event_base_get_num_events(base,
1537 	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1538        event_count_virtual_added = event_base_get_num_events(base,
1539 	   EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1540        event_count_active_added_virtual = event_base_get_num_events(base,
1541 	   EVENT_BASE_COUNT_ACTIVE|
1542 	   EVENT_BASE_COUNT_ADDED|
1543 	   EVENT_BASE_COUNT_VIRTUAL);
1544        tt_int_op(event_count_active, ==, 0);
1545        tt_int_op(event_count_virtual, ==, 1);
1546        tt_int_op(event_count_added, ==, 0);
1547        tt_int_op(event_count_active_virtual, ==, 1);
1548        tt_int_op(event_count_active_added, ==, 0);
1549        tt_int_op(event_count_virtual_added, ==, 1);
1550        tt_int_op(event_count_active_added_virtual, ==, 1);
1551 
1552 end:
1553        ;
1554 }
1555 
1556 static void
test_event_base_get_max_events(void * ptr)1557 test_event_base_get_max_events(void *ptr)
1558 {
1559 	struct basic_test_data *data = ptr;
1560 	struct event_base *base = data->base;
1561 	struct event ev;
1562 	struct event ev2;
1563 	int event_count_active;
1564 	int event_count_virtual;
1565 	int event_count_added;
1566 	int event_count_active_virtual;
1567 	int event_count_active_added;
1568 	int event_count_virtual_added;
1569 	int event_count_active_added_virtual;
1570 
1571 	struct timeval qsec = {0, 100000};
1572 
1573 	event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1574 	    event_self_cbarg());
1575 	event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb,
1576 	    event_self_cbarg());
1577 
1578 	event_add(&ev, &qsec);
1579 	event_add(&ev2, &qsec);
1580 	event_del(&ev2);
1581 
1582 	event_count_active = event_base_get_max_events(base,
1583 	    EVENT_BASE_COUNT_ACTIVE, 0);
1584 	event_count_virtual = event_base_get_max_events(base,
1585 	    EVENT_BASE_COUNT_VIRTUAL, 0);
1586 	event_count_added = event_base_get_max_events(base,
1587 	    EVENT_BASE_COUNT_ADDED, 0);
1588 	event_count_active_virtual = event_base_get_max_events(base,
1589 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1590 	event_count_active_added = event_base_get_max_events(base,
1591 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1592 	event_count_virtual_added = event_base_get_max_events(base,
1593 	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1594 	event_count_active_added_virtual = event_base_get_max_events(base,
1595 	    EVENT_BASE_COUNT_ACTIVE |
1596 	    EVENT_BASE_COUNT_ADDED |
1597 	    EVENT_BASE_COUNT_VIRTUAL, 0);
1598 
1599 	tt_int_op(event_count_active, ==, 0);
1600 	tt_int_op(event_count_virtual, ==, 0);
1601 	/* libevent itself adds a timeout event, so the event_count is 4 here */
1602 	tt_int_op(event_count_added, ==, 4);
1603 	tt_int_op(event_count_active_virtual, ==, 0);
1604 	tt_int_op(event_count_active_added, ==, 4);
1605 	tt_int_op(event_count_virtual_added, ==, 4);
1606 	tt_int_op(event_count_active_added_virtual, ==, 4);
1607 
1608 	event_active(&ev, EV_READ, 1);
1609 	event_count_active = event_base_get_max_events(base,
1610 	    EVENT_BASE_COUNT_ACTIVE, 0);
1611 	event_count_virtual = event_base_get_max_events(base,
1612 	    EVENT_BASE_COUNT_VIRTUAL, 0);
1613 	event_count_added = event_base_get_max_events(base,
1614 	    EVENT_BASE_COUNT_ADDED, 0);
1615 	event_count_active_virtual = event_base_get_max_events(base,
1616 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1617 	event_count_active_added = event_base_get_max_events(base,
1618 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1619 	event_count_virtual_added = event_base_get_max_events(base,
1620 	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1621 	event_count_active_added_virtual = event_base_get_max_events(base,
1622 	    EVENT_BASE_COUNT_ACTIVE |
1623 	    EVENT_BASE_COUNT_ADDED |
1624 	    EVENT_BASE_COUNT_VIRTUAL, 0);
1625 
1626 	tt_int_op(event_count_active, ==, 1);
1627 	tt_int_op(event_count_virtual, ==, 0);
1628 	tt_int_op(event_count_added, ==, 4);
1629 	tt_int_op(event_count_active_virtual, ==, 1);
1630 	tt_int_op(event_count_active_added, ==, 5);
1631 	tt_int_op(event_count_virtual_added, ==, 4);
1632 	tt_int_op(event_count_active_added_virtual, ==, 5);
1633 
1634 	event_base_loop(base, 0);
1635 	event_count_active = event_base_get_max_events(base,
1636 	    EVENT_BASE_COUNT_ACTIVE, 1);
1637 	event_count_virtual = event_base_get_max_events(base,
1638 	    EVENT_BASE_COUNT_VIRTUAL, 1);
1639 	event_count_added = event_base_get_max_events(base,
1640 	    EVENT_BASE_COUNT_ADDED, 1);
1641 	event_count_active_virtual = event_base_get_max_events(base,
1642 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1643 	event_count_active_added = event_base_get_max_events(base,
1644 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1645 	event_count_virtual_added = event_base_get_max_events(base,
1646 	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1647 	event_count_active_added_virtual = event_base_get_max_events(base,
1648 	    EVENT_BASE_COUNT_ACTIVE |
1649 	    EVENT_BASE_COUNT_ADDED |
1650 	    EVENT_BASE_COUNT_VIRTUAL, 1);
1651 
1652 	tt_int_op(event_count_active, ==, 1);
1653 	tt_int_op(event_count_virtual, ==, 0);
1654 	tt_int_op(event_count_added, ==, 4);
1655 	tt_int_op(event_count_active_virtual, ==, 0);
1656 	tt_int_op(event_count_active_added, ==, 0);
1657 	tt_int_op(event_count_virtual_added, ==, 0);
1658 	tt_int_op(event_count_active_added_virtual, ==, 0);
1659 
1660 	event_count_active = event_base_get_max_events(base,
1661 	    EVENT_BASE_COUNT_ACTIVE, 0);
1662 	event_count_virtual = event_base_get_max_events(base,
1663 	    EVENT_BASE_COUNT_VIRTUAL, 0);
1664 	event_count_added = event_base_get_max_events(base,
1665 	    EVENT_BASE_COUNT_ADDED, 0);
1666 	tt_int_op(event_count_active, ==, 0);
1667 	tt_int_op(event_count_virtual, ==, 0);
1668 	tt_int_op(event_count_added, ==, 0);
1669 
1670 	event_base_add_virtual_(base);
1671 	event_count_active = event_base_get_max_events(base,
1672 	    EVENT_BASE_COUNT_ACTIVE, 0);
1673 	event_count_virtual = event_base_get_max_events(base,
1674 	    EVENT_BASE_COUNT_VIRTUAL, 0);
1675 	event_count_added = event_base_get_max_events(base,
1676 	    EVENT_BASE_COUNT_ADDED, 0);
1677 	event_count_active_virtual = event_base_get_max_events(base,
1678 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1679 	event_count_active_added = event_base_get_max_events(base,
1680 	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1681 	event_count_virtual_added = event_base_get_max_events(base,
1682 	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1683 	event_count_active_added_virtual = event_base_get_max_events(base,
1684 	    EVENT_BASE_COUNT_ACTIVE |
1685 	    EVENT_BASE_COUNT_ADDED |
1686 	    EVENT_BASE_COUNT_VIRTUAL, 0);
1687 
1688 	tt_int_op(event_count_active, ==, 0);
1689 	tt_int_op(event_count_virtual, ==, 1);
1690 	tt_int_op(event_count_added, ==, 0);
1691 	tt_int_op(event_count_active_virtual, ==, 1);
1692 	tt_int_op(event_count_active_added, ==, 0);
1693 	tt_int_op(event_count_virtual_added, ==, 1);
1694 	tt_int_op(event_count_active_added_virtual, ==, 1);
1695 
1696 end:
1697        ;
1698 }
1699 
1700 static void
test_bad_assign(void * ptr)1701 test_bad_assign(void *ptr)
1702 {
1703 	struct event ev;
1704 	int r;
1705 	/* READ|SIGNAL is not allowed */
1706 	r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL);
1707 	tt_int_op(r,==,-1);
1708 
1709 end:
1710 	;
1711 }
1712 
1713 static int reentrant_cb_run = 0;
1714 
1715 static void
bad_reentrant_run_loop_cb(evutil_socket_t fd,short what,void * ptr)1716 bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr)
1717 {
1718 	struct event_base *base = ptr;
1719 	int r;
1720 	reentrant_cb_run = 1;
1721 	/* This reentrant call to event_base_loop should be detected and
1722 	 * should fail */
1723 	r = event_base_loop(base, 0);
1724 	tt_int_op(r, ==, -1);
1725 end:
1726 	;
1727 }
1728 
1729 static void
test_bad_reentrant(void * ptr)1730 test_bad_reentrant(void *ptr)
1731 {
1732 	struct basic_test_data *data = ptr;
1733 	struct event_base *base = data->base;
1734 	struct event ev;
1735 	int r;
1736 	event_assign(&ev, base, -1,
1737 	    0, bad_reentrant_run_loop_cb, base);
1738 
1739 	event_active(&ev, EV_WRITE, 1);
1740 	r = event_base_loop(base, 0);
1741 	tt_int_op(r, ==, 1);
1742 	tt_int_op(reentrant_cb_run, ==, 1);
1743 end:
1744 	;
1745 }
1746 
1747 static int n_write_a_byte_cb=0;
1748 static int n_read_and_drain_cb=0;
1749 static int n_activate_other_event_cb=0;
1750 static void
write_a_byte_cb(evutil_socket_t fd,short what,void * arg)1751 write_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1752 {
1753 	char buf[] = "x";
1754 	if (write(fd, buf, 1) == 1)
1755 		++n_write_a_byte_cb;
1756 }
1757 static void
read_and_drain_cb(evutil_socket_t fd,short what,void * arg)1758 read_and_drain_cb(evutil_socket_t fd, short what, void *arg)
1759 {
1760 	char buf[128];
1761 	int n;
1762 	++n_read_and_drain_cb;
1763 	while ((n = read(fd, buf, sizeof(buf))) > 0)
1764 		;
1765 }
1766 
1767 static void
activate_other_event_cb(evutil_socket_t fd,short what,void * other_)1768 activate_other_event_cb(evutil_socket_t fd, short what, void *other_)
1769 {
1770 	struct event *ev_activate = other_;
1771 	++n_activate_other_event_cb;
1772 	event_active_later_(ev_activate, EV_READ);
1773 }
1774 
1775 static void
test_active_later(void * ptr)1776 test_active_later(void *ptr)
1777 {
1778 	struct basic_test_data *data = ptr;
1779 	struct event *ev1 = NULL, *ev2 = NULL;
1780 	struct event ev3, ev4;
1781 	struct timeval qsec = {0, 100000};
1782 	ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL);
1783 	ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL);
1784 	event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4);
1785 	event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3);
1786 	event_add(ev1, NULL);
1787 	event_add(ev2, NULL);
1788 	event_active_later_(&ev3, EV_READ);
1789 
1790 	event_base_loopexit(data->base, &qsec);
1791 
1792 	event_base_loop(data->base, 0);
1793 
1794 	TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.",
1795 		n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb));
1796 	event_del(&ev3);
1797 	event_del(&ev4);
1798 
1799 	tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb);
1800 	tt_int_op(n_write_a_byte_cb, >, 100);
1801 	tt_int_op(n_read_and_drain_cb, >, 100);
1802 	tt_int_op(n_activate_other_event_cb, >, 100);
1803 
1804 	event_active_later_(&ev4, EV_READ);
1805 	event_active(&ev4, EV_READ, 1); /* This should make the event
1806 					   active immediately. */
1807 	tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0);
1808 	tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0);
1809 
1810 	/* Now leave this one around, so that event_free sees it and removes
1811 	 * it. */
1812 	event_active_later_(&ev3, EV_READ);
1813 	event_base_assert_ok_(data->base);
1814 
1815 end:
1816 	if (ev1)
1817 		event_free(ev1);
1818 	if (ev2)
1819 		event_free(ev2);
1820 
1821 	event_base_free(data->base);
1822 	data->base = NULL;
1823 }
1824 
1825 
incr_arg_cb(evutil_socket_t fd,short what,void * arg)1826 static void incr_arg_cb(evutil_socket_t fd, short what, void *arg)
1827 {
1828 	int *intptr = arg;
1829 	(void) fd; (void) what;
1830 	++*intptr;
1831 }
remove_timers_cb(evutil_socket_t fd,short what,void * arg)1832 static void remove_timers_cb(evutil_socket_t fd, short what, void *arg)
1833 {
1834 	struct event **ep = arg;
1835 	(void) fd; (void) what;
1836 	event_remove_timer(ep[0]);
1837 	event_remove_timer(ep[1]);
1838 }
send_a_byte_cb(evutil_socket_t fd,short what,void * arg)1839 static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1840 {
1841 	evutil_socket_t *sockp = arg;
1842 	(void) fd; (void) what;
1843 	(void) write(*sockp, "A", 1);
1844 }
1845 struct read_not_timeout_param
1846 {
1847 	struct event **ev;
1848 	int events;
1849 	int count;
1850 };
read_not_timeout_cb(evutil_socket_t fd,short what,void * arg)1851 static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg)
1852 {
1853 	struct read_not_timeout_param *rntp = arg;
1854 	char c;
1855 	ev_ssize_t n;
1856 	(void) fd; (void) what;
1857 	n = read(fd, &c, 1);
1858 	tt_int_op(n, ==, 1);
1859 	rntp->events |= what;
1860 	++rntp->count;
1861 	if(2 == rntp->count) event_del(rntp->ev[0]);
1862 end:
1863 	;
1864 }
1865 
1866 static void
test_event_remove_timeout(void * ptr)1867 test_event_remove_timeout(void *ptr)
1868 {
1869 	struct basic_test_data *data = ptr;
1870 	struct event_base *base = data->base;
1871 	struct event *ev[5];
1872 	int ev1_fired=0;
1873 	struct timeval ms25 = { 0, 25*1000 },
1874 		ms40 = { 0, 40*1000 },
1875 		ms75 = { 0, 75*1000 },
1876 		ms125 = { 0, 125*1000 };
1877 	struct read_not_timeout_param rntp = { ev, 0, 0 };
1878 
1879 	event_base_assert_ok_(base);
1880 
1881 	ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST,
1882 	    read_not_timeout_cb, &rntp);
1883 	ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired);
1884 	ev[2] = evtimer_new(base, remove_timers_cb, ev);
1885 	ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1886 	ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1887 	tt_assert(base);
1888 	event_add(ev[2], &ms25); /* remove timers */
1889 	event_add(ev[4], &ms40); /* write to test if timer re-activates */
1890 	event_add(ev[0], &ms75); /* read */
1891 	event_add(ev[1], &ms75); /* timer */
1892 	event_add(ev[3], &ms125); /* timeout. */
1893 	event_base_assert_ok_(base);
1894 
1895 	event_base_dispatch(base);
1896 
1897 	tt_int_op(ev1_fired, ==, 0);
1898 	tt_int_op(rntp.events, ==, EV_READ);
1899 
1900 	event_base_assert_ok_(base);
1901 end:
1902 	event_free(ev[0]);
1903 	event_free(ev[1]);
1904 	event_free(ev[2]);
1905 	event_free(ev[3]);
1906 	event_free(ev[4]);
1907 }
1908 
1909 static void
test_event_base_new(void * ptr)1910 test_event_base_new(void *ptr)
1911 {
1912 	struct basic_test_data *data = ptr;
1913 	struct event_base *base = 0;
1914 	struct event ev1;
1915 	struct basic_cb_args args;
1916 
1917 	int towrite = (int)strlen(TEST1)+1;
1918 	int len = write(data->pair[0], TEST1, towrite);
1919 
1920 	if (len < 0)
1921 		tt_abort_perror("initial write");
1922 	else if (len != towrite)
1923 		tt_abort_printf(("initial write fell short (%d of %d bytes)",
1924 				 len, towrite));
1925 
1926 	if (shutdown(data->pair[0], EVUTIL_SHUT_WR))
1927 		tt_abort_perror("initial write shutdown");
1928 
1929 	base = event_base_new();
1930 	if (!base)
1931 		tt_abort_msg("failed to create event base");
1932 
1933 	args.eb = base;
1934 	args.ev = &ev1;
1935 	args.callcount = 0;
1936 	event_assign(&ev1, base, data->pair[1],
1937 		     EV_READ|EV_PERSIST, basic_read_cb, &args);
1938 
1939 	if (event_add(&ev1, NULL))
1940 		tt_abort_perror("initial event_add");
1941 
1942 	if (event_base_loop(base, 0))
1943 		tt_abort_msg("unsuccessful exit from event loop");
1944 
1945 end:
1946 	if (base)
1947 		event_base_free(base);
1948 }
1949 
1950 static void
test_loopexit(void)1951 test_loopexit(void)
1952 {
1953 	struct timeval tv, tv_start, tv_end;
1954 	struct event ev;
1955 
1956 	setup_test("Loop exit: ");
1957 
1958 	tv.tv_usec = 0;
1959 	tv.tv_sec = 60*60*24;
1960 	evtimer_set(&ev, timeout_cb, NULL);
1961 	evtimer_add(&ev, &tv);
1962 
1963 	tv.tv_usec = 300*1000;
1964 	tv.tv_sec = 0;
1965 	event_loopexit(&tv);
1966 
1967 	evutil_gettimeofday(&tv_start, NULL);
1968 	event_dispatch();
1969 	evutil_gettimeofday(&tv_end, NULL);
1970 
1971 	evtimer_del(&ev);
1972 
1973 	tt_assert(event_base_got_exit(global_base));
1974 	tt_assert(!event_base_got_break(global_base));
1975 
1976 	test_timeval_diff_eq(&tv_start, &tv_end, 300);
1977 
1978 	test_ok = 1;
1979 end:
1980 	cleanup_test();
1981 }
1982 
1983 static void
test_loopexit_multiple(void)1984 test_loopexit_multiple(void)
1985 {
1986 	struct timeval tv, tv_start, tv_end;
1987 	struct event_base *base;
1988 
1989 	setup_test("Loop Multiple exit: ");
1990 
1991 	base = event_base_new();
1992 
1993 	tv.tv_usec = 200*1000;
1994 	tv.tv_sec = 0;
1995 	event_base_loopexit(base, &tv);
1996 
1997 	tv.tv_usec = 0;
1998 	tv.tv_sec = 3;
1999 	event_base_loopexit(base, &tv);
2000 
2001 	evutil_gettimeofday(&tv_start, NULL);
2002 	event_base_dispatch(base);
2003 	evutil_gettimeofday(&tv_end, NULL);
2004 
2005 	tt_assert(event_base_got_exit(base));
2006 	tt_assert(!event_base_got_break(base));
2007 
2008 	event_base_free(base);
2009 
2010 	test_timeval_diff_eq(&tv_start, &tv_end, 200);
2011 
2012 	test_ok = 1;
2013 
2014 end:
2015 	cleanup_test();
2016 }
2017 
2018 static void
break_cb(evutil_socket_t fd,short events,void * arg)2019 break_cb(evutil_socket_t fd, short events, void *arg)
2020 {
2021 	test_ok = 1;
2022 	event_loopbreak();
2023 }
2024 
2025 static void
fail_cb(evutil_socket_t fd,short events,void * arg)2026 fail_cb(evutil_socket_t fd, short events, void *arg)
2027 {
2028 	test_ok = 0;
2029 }
2030 
2031 static void
test_loopbreak(void)2032 test_loopbreak(void)
2033 {
2034 	struct event ev1, ev2;
2035 	struct timeval tv;
2036 
2037 	setup_test("Loop break: ");
2038 
2039 	tv.tv_sec = 0;
2040 	tv.tv_usec = 0;
2041 	evtimer_set(&ev1, break_cb, NULL);
2042 	evtimer_add(&ev1, &tv);
2043 	evtimer_set(&ev2, fail_cb, NULL);
2044 	evtimer_add(&ev2, &tv);
2045 
2046 	event_dispatch();
2047 
2048 	tt_assert(!event_base_got_exit(global_base));
2049 	tt_assert(event_base_got_break(global_base));
2050 
2051 	evtimer_del(&ev1);
2052 	evtimer_del(&ev2);
2053 
2054 end:
2055 	cleanup_test();
2056 }
2057 
2058 static struct event *readd_test_event_last_added = NULL;
2059 static void
re_add_read_cb(evutil_socket_t fd,short event,void * arg)2060 re_add_read_cb(evutil_socket_t fd, short event, void *arg)
2061 {
2062 	char buf[256];
2063 	struct event *ev_other = arg;
2064 	ev_ssize_t n_read;
2065 
2066 	readd_test_event_last_added = ev_other;
2067 
2068 	n_read = read(fd, buf, sizeof(buf));
2069 
2070 	if (n_read < 0) {
2071 		tt_fail_perror("read");
2072 		event_base_loopbreak(event_get_base(ev_other));
2073 		return;
2074 	} else {
2075 		event_add(ev_other, NULL);
2076 		++test_ok;
2077 	}
2078 }
2079 
2080 static void
test_nonpersist_readd(void)2081 test_nonpersist_readd(void)
2082 {
2083 	struct event ev1, ev2;
2084 
2085 	setup_test("Re-add nonpersistent events: ");
2086 	event_set(&ev1, pair[0], EV_READ, re_add_read_cb, &ev2);
2087 	event_set(&ev2, pair[1], EV_READ, re_add_read_cb, &ev1);
2088 
2089 	if (write(pair[0], "Hello", 5) < 0) {
2090 		tt_fail_perror("write(pair[0])");
2091 	}
2092 
2093 	if (write(pair[1], "Hello", 5) < 0) {
2094 		tt_fail_perror("write(pair[1])\n");
2095 	}
2096 
2097 	if (event_add(&ev1, NULL) == -1 ||
2098 	    event_add(&ev2, NULL) == -1) {
2099 		test_ok = 0;
2100 	}
2101 	if (test_ok != 0)
2102 		exit(1);
2103 	event_loop(EVLOOP_ONCE);
2104 	if (test_ok != 2)
2105 		exit(1);
2106 	/* At this point, we executed both callbacks.  Whichever one got
2107 	 * called first added the second, but the second then immediately got
2108 	 * deleted before its callback was called.  At this point, though, it
2109 	 * re-added the first.
2110 	 */
2111 	if (!readd_test_event_last_added) {
2112 		test_ok = 0;
2113 	} else if (readd_test_event_last_added == &ev1) {
2114 		if (!event_pending(&ev1, EV_READ, NULL) ||
2115 		    event_pending(&ev2, EV_READ, NULL))
2116 			test_ok = 0;
2117 	} else {
2118 		if (event_pending(&ev1, EV_READ, NULL) ||
2119 		    !event_pending(&ev2, EV_READ, NULL))
2120 			test_ok = 0;
2121 	}
2122 
2123 	event_del(&ev1);
2124 	event_del(&ev2);
2125 
2126 	cleanup_test();
2127 }
2128 
2129 struct test_pri_event {
2130 	struct event ev;
2131 	int count;
2132 };
2133 
2134 static void
test_priorities_cb(evutil_socket_t fd,short what,void * arg)2135 test_priorities_cb(evutil_socket_t fd, short what, void *arg)
2136 {
2137 	struct test_pri_event *pri = arg;
2138 	struct timeval tv;
2139 
2140 	if (pri->count == 3) {
2141 		event_loopexit(NULL);
2142 		return;
2143 	}
2144 
2145 	pri->count++;
2146 
2147 	evutil_timerclear(&tv);
2148 	event_add(&pri->ev, &tv);
2149 }
2150 
2151 static void
test_priorities_impl(int npriorities)2152 test_priorities_impl(int npriorities)
2153 {
2154 	struct test_pri_event one, two;
2155 	struct timeval tv;
2156 
2157 	TT_BLATHER(("Testing Priorities %d: ", npriorities));
2158 
2159 	event_base_priority_init(global_base, npriorities);
2160 
2161 	memset(&one, 0, sizeof(one));
2162 	memset(&two, 0, sizeof(two));
2163 
2164 	timeout_set(&one.ev, test_priorities_cb, &one);
2165 	if (event_priority_set(&one.ev, 0) == -1) {
2166 		fprintf(stderr, "%s: failed to set priority", __func__);
2167 		exit(1);
2168 	}
2169 
2170 	timeout_set(&two.ev, test_priorities_cb, &two);
2171 	if (event_priority_set(&two.ev, npriorities - 1) == -1) {
2172 		fprintf(stderr, "%s: failed to set priority", __func__);
2173 		exit(1);
2174 	}
2175 
2176 	evutil_timerclear(&tv);
2177 
2178 	if (event_add(&one.ev, &tv) == -1)
2179 		exit(1);
2180 	if (event_add(&two.ev, &tv) == -1)
2181 		exit(1);
2182 
2183 	event_dispatch();
2184 
2185 	event_del(&one.ev);
2186 	event_del(&two.ev);
2187 
2188 	if (npriorities == 1) {
2189 		if (one.count == 3 && two.count == 3)
2190 			test_ok = 1;
2191 	} else if (npriorities == 2) {
2192 		/* Two is called once because event_loopexit is priority 1 */
2193 		if (one.count == 3 && two.count == 1)
2194 			test_ok = 1;
2195 	} else {
2196 		if (one.count == 3 && two.count == 0)
2197 			test_ok = 1;
2198 	}
2199 }
2200 
2201 static void
test_priorities(void)2202 test_priorities(void)
2203 {
2204 	test_priorities_impl(1);
2205 	if (test_ok)
2206 		test_priorities_impl(2);
2207 	if (test_ok)
2208 		test_priorities_impl(3);
2209 }
2210 
2211 /* priority-active-inversion: activate a higher-priority event, and make sure
2212  * it keeps us from running a lower-priority event first. */
2213 static int n_pai_calls = 0;
2214 static struct event pai_events[3];
2215 
2216 static void
prio_active_inversion_cb(evutil_socket_t fd,short what,void * arg)2217 prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg)
2218 {
2219 	int *call_order = arg;
2220 	*call_order = n_pai_calls++;
2221 	if (n_pai_calls == 1) {
2222 		/* This should activate later, even though it shares a
2223 		   priority with us. */
2224 		event_active(&pai_events[1], EV_READ, 1);
2225 		/* This should activate next, since its priority is higher,
2226 		   even though we activated it second. */
2227 		event_active(&pai_events[2], EV_TIMEOUT, 1);
2228 	}
2229 }
2230 
2231 static void
test_priority_active_inversion(void * data_)2232 test_priority_active_inversion(void *data_)
2233 {
2234 	struct basic_test_data *data = data_;
2235 	struct event_base *base = data->base;
2236 	int call_order[3];
2237 	int i;
2238 	tt_int_op(event_base_priority_init(base, 8), ==, 0);
2239 
2240 	n_pai_calls = 0;
2241 	memset(call_order, 0, sizeof(call_order));
2242 
2243 	for (i=0;i<3;++i) {
2244 		event_assign(&pai_events[i], data->base, -1, 0,
2245 		    prio_active_inversion_cb, &call_order[i]);
2246 	}
2247 
2248 	event_priority_set(&pai_events[0], 4);
2249 	event_priority_set(&pai_events[1], 4);
2250 	event_priority_set(&pai_events[2], 0);
2251 
2252 	event_active(&pai_events[0], EV_WRITE, 1);
2253 
2254 	event_base_dispatch(base);
2255 	tt_int_op(n_pai_calls, ==, 3);
2256 	tt_int_op(call_order[0], ==, 0);
2257 	tt_int_op(call_order[1], ==, 2);
2258 	tt_int_op(call_order[2], ==, 1);
2259 end:
2260 	;
2261 }
2262 
2263 
2264 static void
test_multiple_cb(evutil_socket_t fd,short event,void * arg)2265 test_multiple_cb(evutil_socket_t fd, short event, void *arg)
2266 {
2267 	if (event & EV_READ)
2268 		test_ok |= 1;
2269 	else if (event & EV_WRITE)
2270 		test_ok |= 2;
2271 }
2272 
2273 static void
test_multiple_events_for_same_fd(void)2274 test_multiple_events_for_same_fd(void)
2275 {
2276    struct event e1, e2;
2277 
2278    setup_test("Multiple events for same fd: ");
2279 
2280    event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL);
2281    event_add(&e1, NULL);
2282    event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL);
2283    event_add(&e2, NULL);
2284    event_loop(EVLOOP_ONCE);
2285    event_del(&e2);
2286 
2287    if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) {
2288 	   tt_fail_perror("write");
2289    }
2290 
2291    event_loop(EVLOOP_ONCE);
2292    event_del(&e1);
2293 
2294    if (test_ok != 3)
2295 	   test_ok = 0;
2296 
2297    cleanup_test();
2298 }
2299 
2300 int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2301 int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
2302 int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number);
2303 int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2304 
2305 static void
read_once_cb(evutil_socket_t fd,short event,void * arg)2306 read_once_cb(evutil_socket_t fd, short event, void *arg)
2307 {
2308 	char buf[256];
2309 	int len;
2310 
2311 	len = read(fd, buf, sizeof(buf));
2312 
2313 	if (called) {
2314 		test_ok = 0;
2315 	} else if (len) {
2316 		/* Assumes global pair[0] can be used for writing */
2317 		if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2318 			tt_fail_perror("write");
2319 			test_ok = 0;
2320 		} else {
2321 			test_ok = 1;
2322 		}
2323 	}
2324 
2325 	called++;
2326 }
2327 
2328 static void
test_want_only_once(void)2329 test_want_only_once(void)
2330 {
2331 	struct event ev;
2332 	struct timeval tv;
2333 
2334 	/* Very simple read test */
2335 	setup_test("Want read only once: ");
2336 
2337 	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2338 		tt_fail_perror("write");
2339 	}
2340 
2341 	/* Setup the loop termination */
2342 	evutil_timerclear(&tv);
2343 	tv.tv_usec = 300*1000;
2344 	event_loopexit(&tv);
2345 
2346 	event_set(&ev, pair[1], EV_READ, read_once_cb, &ev);
2347 	if (event_add(&ev, NULL) == -1)
2348 		exit(1);
2349 	event_dispatch();
2350 
2351 	cleanup_test();
2352 }
2353 
2354 #define TEST_MAX_INT	6
2355 
2356 static void
evtag_int_test(void * ptr)2357 evtag_int_test(void *ptr)
2358 {
2359 	struct evbuffer *tmp = evbuffer_new();
2360 	ev_uint32_t integers[TEST_MAX_INT] = {
2361 		0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2362 	};
2363 	ev_uint32_t integer;
2364 	ev_uint64_t big_int;
2365 	int i;
2366 
2367 	evtag_init();
2368 
2369 	for (i = 0; i < TEST_MAX_INT; i++) {
2370 		int oldlen, newlen;
2371 		oldlen = (int)EVBUFFER_LENGTH(tmp);
2372 		evtag_encode_int(tmp, integers[i]);
2373 		newlen = (int)EVBUFFER_LENGTH(tmp);
2374 		TT_BLATHER(("encoded 0x%08x with %d bytes",
2375 			(unsigned)integers[i], newlen - oldlen));
2376 		big_int = integers[i];
2377 		big_int *= 1000000000; /* 1 billion */
2378 		evtag_encode_int64(tmp, big_int);
2379 	}
2380 
2381 	for (i = 0; i < TEST_MAX_INT; i++) {
2382 		tt_int_op(evtag_decode_int(&integer, tmp), !=, -1);
2383 		tt_uint_op(integer, ==, integers[i]);
2384 		tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1);
2385 		tt_assert((big_int / 1000000000) == integers[i]);
2386 	}
2387 
2388 	tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2389 end:
2390 	evbuffer_free(tmp);
2391 }
2392 
2393 static void
evtag_fuzz(void * ptr)2394 evtag_fuzz(void *ptr)
2395 {
2396 	unsigned char buffer[4096];
2397 	struct evbuffer *tmp = evbuffer_new();
2398 	struct timeval tv;
2399 	int i, j;
2400 
2401 	int not_failed = 0;
2402 
2403 	evtag_init();
2404 
2405 	for (j = 0; j < 100; j++) {
2406 		for (i = 0; i < (int)sizeof(buffer); i++)
2407 			buffer[i] = test_weakrand();
2408 		evbuffer_drain(tmp, -1);
2409 		evbuffer_add(tmp, buffer, sizeof(buffer));
2410 
2411 		if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1)
2412 			not_failed++;
2413 	}
2414 
2415 	/* The majority of decodes should fail */
2416 	tt_int_op(not_failed, <, 10);
2417 
2418 	/* Now insert some corruption into the tag length field */
2419 	evbuffer_drain(tmp, -1);
2420 	evutil_timerclear(&tv);
2421 	tv.tv_sec = 1;
2422 	evtag_marshal_timeval(tmp, 0, &tv);
2423 	evbuffer_add(tmp, buffer, sizeof(buffer));
2424 
2425 	((char *)EVBUFFER_DATA(tmp))[1] = '\xff';
2426 	if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
2427 		tt_abort_msg("evtag_unmarshal_timeval should have failed");
2428 	}
2429 
2430 end:
2431 	evbuffer_free(tmp);
2432 }
2433 
2434 static void
evtag_tag_encoding(void * ptr)2435 evtag_tag_encoding(void *ptr)
2436 {
2437 	struct evbuffer *tmp = evbuffer_new();
2438 	ev_uint32_t integers[TEST_MAX_INT] = {
2439 		0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2440 	};
2441 	ev_uint32_t integer;
2442 	int i;
2443 
2444 	evtag_init();
2445 
2446 	for (i = 0; i < TEST_MAX_INT; i++) {
2447 		int oldlen, newlen;
2448 		oldlen = (int)EVBUFFER_LENGTH(tmp);
2449 		evtag_encode_tag(tmp, integers[i]);
2450 		newlen = (int)EVBUFFER_LENGTH(tmp);
2451 		TT_BLATHER(("encoded 0x%08x with %d bytes",
2452 			(unsigned)integers[i], newlen - oldlen));
2453 	}
2454 
2455 	for (i = 0; i < TEST_MAX_INT; i++) {
2456 		tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1);
2457 		tt_uint_op(integer, ==, integers[i]);
2458 	}
2459 
2460 	tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2461 
2462 end:
2463 	evbuffer_free(tmp);
2464 }
2465 
2466 static void
evtag_test_peek(void * ptr)2467 evtag_test_peek(void *ptr)
2468 {
2469 	struct evbuffer *tmp = evbuffer_new();
2470 	ev_uint32_t u32;
2471 
2472 	evtag_marshal_int(tmp, 30, 0);
2473 	evtag_marshal_string(tmp, 40, "Hello world");
2474 
2475 	tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2476 	tt_int_op(u32, ==, 30);
2477 	tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2478 	tt_int_op(u32, ==, 1+1+1);
2479 	tt_int_op(evtag_consume(tmp), ==, 0);
2480 
2481 	tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2482 	tt_int_op(u32, ==, 40);
2483 	tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2484 	tt_int_op(u32, ==, 1+1+11);
2485 	tt_int_op(evtag_payload_length(tmp, &u32), ==, 0);
2486 	tt_int_op(u32, ==, 11);
2487 
2488 end:
2489 	evbuffer_free(tmp);
2490 }
2491 
2492 
2493 static void
test_methods(void * ptr)2494 test_methods(void *ptr)
2495 {
2496 	const char **methods = event_get_supported_methods();
2497 	struct event_config *cfg = NULL;
2498 	struct event_base *base = NULL;
2499 	const char *backend;
2500 	int n_methods = 0;
2501 
2502 	tt_assert(methods);
2503 
2504 	backend = methods[0];
2505 	while (*methods != NULL) {
2506 		TT_BLATHER(("Support method: %s", *methods));
2507 		++methods;
2508 		++n_methods;
2509 	}
2510 
2511 	cfg = event_config_new();
2512 	assert(cfg != NULL);
2513 
2514 	tt_int_op(event_config_avoid_method(cfg, backend), ==, 0);
2515 	event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2516 
2517 	base = event_base_new_with_config(cfg);
2518 	if (n_methods > 1) {
2519 		tt_assert(base);
2520 		tt_str_op(backend, !=, event_base_get_method(base));
2521 	} else {
2522 		tt_assert(base == NULL);
2523 	}
2524 
2525 end:
2526 	if (base)
2527 		event_base_free(base);
2528 	if (cfg)
2529 		event_config_free(cfg);
2530 }
2531 
2532 static void
test_version(void * arg)2533 test_version(void *arg)
2534 {
2535 	const char *vstr;
2536 	ev_uint32_t vint;
2537 	int major, minor, patch, n;
2538 
2539 	vstr = event_get_version();
2540 	vint = event_get_version_number();
2541 
2542 	tt_assert(vstr);
2543 	tt_assert(vint);
2544 
2545 	tt_str_op(vstr, ==, LIBEVENT_VERSION);
2546 	tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER);
2547 
2548 	n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch);
2549 	tt_assert(3 == n);
2550 	tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8)));
2551 end:
2552 	;
2553 }
2554 
2555 static void
test_base_features(void * arg)2556 test_base_features(void *arg)
2557 {
2558 	struct event_base *base = NULL;
2559 	struct event_config *cfg = NULL;
2560 
2561 	cfg = event_config_new();
2562 
2563 	tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET));
2564 
2565 	base = event_base_new_with_config(cfg);
2566 	if (base) {
2567 		tt_int_op(EV_FEATURE_ET, ==,
2568 		    event_base_get_features(base) & EV_FEATURE_ET);
2569 	} else {
2570 		base = event_base_new();
2571 		tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET);
2572 	}
2573 
2574 end:
2575 	if (base)
2576 		event_base_free(base);
2577 	if (cfg)
2578 		event_config_free(cfg);
2579 }
2580 
2581 #ifdef EVENT__HAVE_SETENV
2582 #define SETENV_OK
2583 #elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV)
setenv(const char * k,const char * v,int o_)2584 static void setenv(const char *k, const char *v, int o_)
2585 {
2586 	char b[256];
2587 	evutil_snprintf(b, sizeof(b), "%s=%s",k,v);
2588 	putenv(b);
2589 }
2590 #define SETENV_OK
2591 #endif
2592 
2593 #ifdef EVENT__HAVE_UNSETENV
2594 #define UNSETENV_OK
2595 #elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV)
unsetenv(const char * k)2596 static void unsetenv(const char *k)
2597 {
2598 	char b[256];
2599 	evutil_snprintf(b, sizeof(b), "%s=",k);
2600 	putenv(b);
2601 }
2602 #define UNSETENV_OK
2603 #endif
2604 
2605 #if defined(SETENV_OK) && defined(UNSETENV_OK)
2606 static void
methodname_to_envvar(const char * mname,char * buf,size_t buflen)2607 methodname_to_envvar(const char *mname, char *buf, size_t buflen)
2608 {
2609 	char *cp;
2610 	evutil_snprintf(buf, buflen, "EVENT_NO%s", mname);
2611 	for (cp = buf; *cp; ++cp) {
2612 		*cp = EVUTIL_TOUPPER_(*cp);
2613 	}
2614 }
2615 #endif
2616 
2617 static void
test_base_environ(void * arg)2618 test_base_environ(void *arg)
2619 {
2620 	struct event_base *base = NULL;
2621 	struct event_config *cfg = NULL;
2622 
2623 #if defined(SETENV_OK) && defined(UNSETENV_OK)
2624 	const char **basenames;
2625 	int i, n_methods=0;
2626 	char varbuf[128];
2627 	const char *defaultname, *ignoreenvname;
2628 
2629 	/* See if unsetenv works before we rely on it. */
2630 	setenv("EVENT_NOWAFFLES", "1", 1);
2631 	unsetenv("EVENT_NOWAFFLES");
2632 	if (getenv("EVENT_NOWAFFLES") != NULL) {
2633 #ifndef EVENT__HAVE_UNSETENV
2634 		TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test"));
2635 #else
2636 		TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test"));
2637 #endif
2638 		tt_skip();
2639 	}
2640 
2641 	basenames = event_get_supported_methods();
2642 	for (i = 0; basenames[i]; ++i) {
2643 		methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf));
2644 		unsetenv(varbuf);
2645 		++n_methods;
2646 	}
2647 
2648 	base = event_base_new();
2649 	tt_assert(base);
2650 
2651 	defaultname = event_base_get_method(base);
2652 	TT_BLATHER(("default is <%s>", defaultname));
2653 	event_base_free(base);
2654 	base = NULL;
2655 
2656 	/* Can we disable the method with EVENT_NOfoo ? */
2657 	if (!strcmp(defaultname, "epoll (with changelist)")) {
2658  		setenv("EVENT_NOEPOLL", "1", 1);
2659 		ignoreenvname = "epoll";
2660 	} else {
2661 		methodname_to_envvar(defaultname, varbuf, sizeof(varbuf));
2662 		setenv(varbuf, "1", 1);
2663 		ignoreenvname = defaultname;
2664 	}
2665 
2666 	/* Use an empty cfg rather than NULL so a failure doesn't exit() */
2667 	cfg = event_config_new();
2668 	base = event_base_new_with_config(cfg);
2669 	event_config_free(cfg);
2670 	cfg = NULL;
2671 	if (n_methods == 1) {
2672 		tt_assert(!base);
2673 	} else {
2674 		tt_assert(base);
2675 		tt_str_op(defaultname, !=, event_base_get_method(base));
2676 		event_base_free(base);
2677 		base = NULL;
2678 	}
2679 
2680 	/* Can we disable looking at the environment with IGNORE_ENV ? */
2681 	cfg = event_config_new();
2682 	event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2683 	base = event_base_new_with_config(cfg);
2684 	tt_assert(base);
2685 	tt_str_op(ignoreenvname, ==, event_base_get_method(base));
2686 #else
2687 	tt_skip();
2688 #endif
2689 
2690 end:
2691 	if (base)
2692 		event_base_free(base);
2693 	if (cfg)
2694 		event_config_free(cfg);
2695 }
2696 
2697 static void
read_called_once_cb(evutil_socket_t fd,short event,void * arg)2698 read_called_once_cb(evutil_socket_t fd, short event, void *arg)
2699 {
2700 	tt_int_op(event, ==, EV_READ);
2701 	called += 1;
2702 end:
2703 	;
2704 }
2705 
2706 static void
timeout_called_once_cb(evutil_socket_t fd,short event,void * arg)2707 timeout_called_once_cb(evutil_socket_t fd, short event, void *arg)
2708 {
2709 	tt_int_op(event, ==, EV_TIMEOUT);
2710 	called += 100;
2711 end:
2712 	;
2713 }
2714 
2715 static void
immediate_called_twice_cb(evutil_socket_t fd,short event,void * arg)2716 immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg)
2717 {
2718 	tt_int_op(event, ==, EV_TIMEOUT);
2719 	called += 1000;
2720 end:
2721 	;
2722 }
2723 
2724 static void
test_event_once(void * ptr)2725 test_event_once(void *ptr)
2726 {
2727 	struct basic_test_data *data = ptr;
2728 	struct timeval tv;
2729 	int r;
2730 
2731 	tv.tv_sec = 0;
2732 	tv.tv_usec = 50*1000;
2733 	called = 0;
2734 	r = event_base_once(data->base, data->pair[0], EV_READ,
2735 	    read_called_once_cb, NULL, NULL);
2736 	tt_int_op(r, ==, 0);
2737 	r = event_base_once(data->base, -1, EV_TIMEOUT,
2738 	    timeout_called_once_cb, NULL, &tv);
2739 	tt_int_op(r, ==, 0);
2740 	r = event_base_once(data->base, -1, 0, NULL, NULL, NULL);
2741 	tt_int_op(r, <, 0);
2742 	r = event_base_once(data->base, -1, EV_TIMEOUT,
2743 	    immediate_called_twice_cb, NULL, NULL);
2744 	tt_int_op(r, ==, 0);
2745 	tv.tv_sec = 0;
2746 	tv.tv_usec = 0;
2747 	r = event_base_once(data->base, -1, EV_TIMEOUT,
2748 	    immediate_called_twice_cb, NULL, &tv);
2749 	tt_int_op(r, ==, 0);
2750 
2751 	if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) {
2752 		tt_fail_perror("write");
2753 	}
2754 
2755 	shutdown(data->pair[1], EVUTIL_SHUT_WR);
2756 
2757 	event_base_dispatch(data->base);
2758 
2759 	tt_int_op(called, ==, 2101);
2760 end:
2761 	;
2762 }
2763 
2764 static void
test_event_once_never(void * ptr)2765 test_event_once_never(void *ptr)
2766 {
2767 	struct basic_test_data *data = ptr;
2768 	struct timeval tv;
2769 
2770 	/* Have one trigger in 10 seconds (don't worry, because) */
2771 	tv.tv_sec = 10;
2772 	tv.tv_usec = 0;
2773 	called = 0;
2774 	event_base_once(data->base, -1, EV_TIMEOUT,
2775 	    timeout_called_once_cb, NULL, &tv);
2776 
2777 	/* But shut down the base in 75 msec. */
2778 	tv.tv_sec = 0;
2779 	tv.tv_usec = 75*1000;
2780 	event_base_loopexit(data->base, &tv);
2781 
2782 	event_base_dispatch(data->base);
2783 
2784 	tt_int_op(called, ==, 0);
2785 end:
2786 	;
2787 }
2788 
2789 static void
test_event_pending(void * ptr)2790 test_event_pending(void *ptr)
2791 {
2792 	struct basic_test_data *data = ptr;
2793 	struct event *r=NULL, *w=NULL, *t=NULL;
2794 	struct timeval tv, now, tv2;
2795 
2796 	tv.tv_sec = 0;
2797 	tv.tv_usec = 500 * 1000;
2798 	r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb,
2799 	    NULL);
2800 	w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb,
2801 	    NULL);
2802 	t = evtimer_new(data->base, timeout_cb, NULL);
2803 
2804 	tt_assert(r);
2805 	tt_assert(w);
2806 	tt_assert(t);
2807 
2808 	evutil_gettimeofday(&now, NULL);
2809 	event_add(r, NULL);
2810 	event_add(t, &tv);
2811 
2812 	tt_assert( event_pending(r, EV_READ, NULL));
2813 	tt_assert(!event_pending(w, EV_WRITE, NULL));
2814 	tt_assert(!event_pending(r, EV_WRITE, NULL));
2815 	tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL));
2816 	tt_assert(!event_pending(r, EV_TIMEOUT, NULL));
2817 	tt_assert( event_pending(t, EV_TIMEOUT, NULL));
2818 	tt_assert( event_pending(t, EV_TIMEOUT, &tv2));
2819 
2820 	tt_assert(evutil_timercmp(&tv2, &now, >));
2821 
2822 	test_timeval_diff_eq(&now, &tv2, 500);
2823 
2824 end:
2825 	if (r) {
2826 		event_del(r);
2827 		event_free(r);
2828 	}
2829 	if (w) {
2830 		event_del(w);
2831 		event_free(w);
2832 	}
2833 	if (t) {
2834 		event_del(t);
2835 		event_free(t);
2836 	}
2837 }
2838 
2839 static void
dfd_cb(evutil_socket_t fd,short e,void * data)2840 dfd_cb(evutil_socket_t fd, short e, void *data)
2841 {
2842 	*(int*)data = (int)e;
2843 }
2844 
2845 static void
test_event_closed_fd_poll(void * arg)2846 test_event_closed_fd_poll(void *arg)
2847 {
2848 	struct timeval tv;
2849 	struct event *e;
2850 	struct basic_test_data *data = (struct basic_test_data *)arg;
2851 	int i = 0;
2852 
2853 	if (strcmp(event_base_get_method(data->base), "poll")) {
2854 		tinytest_set_test_skipped_();
2855 		return;
2856 	}
2857 
2858 	e = event_new(data->base, data->pair[0], EV_READ, dfd_cb, &i);
2859 	tt_assert(e);
2860 
2861 	tv.tv_sec = 0;
2862 	tv.tv_usec = 500 * 1000;
2863 	event_add(e, &tv);
2864 	tt_assert(event_pending(e, EV_READ, NULL));
2865 	close(data->pair[0]);
2866 	data->pair[0] = -1; /** avoids double-close */
2867 	event_base_loop(data->base, EVLOOP_ONCE);
2868 	tt_int_op(i, ==, EV_READ);
2869 
2870 end:
2871 	if (e) {
2872 		event_del(e);
2873 		event_free(e);
2874 	}
2875 }
2876 
2877 #ifndef _WIN32
2878 /* You can't do this test on windows, since dup2 doesn't work on sockets */
2879 
2880 /* Regression test for our workaround for a fun epoll/linux related bug
2881  * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2)
2882  * will get you an EEXIST */
2883 static void
test_dup_fd(void * arg)2884 test_dup_fd(void *arg)
2885 {
2886 	struct basic_test_data *data = arg;
2887 	struct event_base *base = data->base;
2888 	struct event *ev1=NULL, *ev2=NULL;
2889 	int fd, dfd=-1;
2890 	int ev1_got, ev2_got;
2891 
2892 	tt_int_op(write(data->pair[0], "Hello world",
2893 		strlen("Hello world")), >, 0);
2894 	fd = data->pair[1];
2895 
2896 	dfd = dup(fd);
2897 	tt_int_op(dfd, >=, 0);
2898 
2899 	ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got);
2900 	ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got);
2901 	ev1_got = ev2_got = 0;
2902 	event_add(ev1, NULL);
2903 	event_add(ev2, NULL);
2904 	event_base_loop(base, EVLOOP_ONCE);
2905 	tt_int_op(ev1_got, ==, EV_READ);
2906 	tt_int_op(ev2_got, ==, EV_READ);
2907 
2908 	/* Now close and delete dfd then dispatch.  We need to do the
2909 	 * dispatch here so that when we add it later, we think there
2910 	 * was an intermediate delete. */
2911 	close(dfd);
2912 	event_del(ev2);
2913 	ev1_got = ev2_got = 0;
2914 	event_base_loop(base, EVLOOP_ONCE);
2915 	tt_want_int_op(ev1_got, ==, EV_READ);
2916 	tt_int_op(ev2_got, ==, 0);
2917 
2918 	/* Re-duplicate the fd.  We need to get the same duplicated
2919 	 * value that we closed to provoke the epoll quirk.  Also, we
2920 	 * need to change the events to write, or else the old lingering
2921 	 * read event will make the test pass whether the change was
2922 	 * successful or not. */
2923 	tt_int_op(dup2(fd, dfd), ==, dfd);
2924 	event_free(ev2);
2925 	ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got);
2926 	event_add(ev2, NULL);
2927 	ev1_got = ev2_got = 0;
2928 	event_base_loop(base, EVLOOP_ONCE);
2929 	tt_want_int_op(ev1_got, ==, EV_READ);
2930 	tt_int_op(ev2_got, ==, EV_WRITE);
2931 
2932 end:
2933 	if (ev1)
2934 		event_free(ev1);
2935 	if (ev2)
2936 		event_free(ev2);
2937 	if (dfd >= 0)
2938 		close(dfd);
2939 }
2940 #endif
2941 
2942 #ifdef EVENT__DISABLE_MM_REPLACEMENT
2943 static void
test_mm_functions(void * arg)2944 test_mm_functions(void *arg)
2945 {
2946 	tinytest_set_test_skipped_();
2947 }
2948 #else
2949 static int
check_dummy_mem_ok(void * mem_)2950 check_dummy_mem_ok(void *mem_)
2951 {
2952 	char *mem = mem_;
2953 	mem -= 16;
2954 	return !memcmp(mem, "{[<guardedram>]}", 16);
2955 }
2956 
2957 static void *
dummy_malloc(size_t len)2958 dummy_malloc(size_t len)
2959 {
2960 	char *mem = malloc(len+16);
2961 	memcpy(mem, "{[<guardedram>]}", 16);
2962 	return mem+16;
2963 }
2964 
2965 static void *
dummy_realloc(void * mem_,size_t len)2966 dummy_realloc(void *mem_, size_t len)
2967 {
2968 	char *mem = mem_;
2969 	if (!mem)
2970 		return dummy_malloc(len);
2971 	tt_want(check_dummy_mem_ok(mem_));
2972 	mem -= 16;
2973 	mem = realloc(mem, len+16);
2974 	return mem+16;
2975 }
2976 
2977 static void
dummy_free(void * mem_)2978 dummy_free(void *mem_)
2979 {
2980 	char *mem = mem_;
2981 	tt_want(check_dummy_mem_ok(mem_));
2982 	mem -= 16;
2983 	free(mem);
2984 }
2985 
2986 static void
test_mm_functions(void * arg)2987 test_mm_functions(void *arg)
2988 {
2989 	struct event_base *b = NULL;
2990 	struct event_config *cfg = NULL;
2991 	event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free);
2992 	cfg = event_config_new();
2993 	event_config_avoid_method(cfg, "Nonesuch");
2994 	b = event_base_new_with_config(cfg);
2995 	tt_assert(b);
2996 	tt_assert(check_dummy_mem_ok(b));
2997 end:
2998 	if (cfg)
2999 		event_config_free(cfg);
3000 	if (b)
3001 		event_base_free(b);
3002 }
3003 #endif
3004 
3005 static void
many_event_cb(evutil_socket_t fd,short event,void * arg)3006 many_event_cb(evutil_socket_t fd, short event, void *arg)
3007 {
3008 	int *calledp = arg;
3009 	*calledp += 1;
3010 }
3011 
3012 static void
test_many_events(void * arg)3013 test_many_events(void *arg)
3014 {
3015 	/* Try 70 events that should all be ready at once.  This will
3016 	 * exercise the "resize" code on most of the backends, and will make
3017 	 * sure that we can get past the 64-handle limit of some windows
3018 	 * functions. */
3019 #define MANY 70
3020 
3021 	struct basic_test_data *data = arg;
3022 	struct event_base *base = data->base;
3023 	int one_at_a_time = data->setup_data != NULL;
3024 	evutil_socket_t sock[MANY];
3025 	struct event *ev[MANY];
3026 	int called[MANY];
3027 	int i;
3028 	int loopflags = EVLOOP_NONBLOCK, evflags=0;
3029 	if (one_at_a_time) {
3030 		loopflags |= EVLOOP_ONCE;
3031 		evflags = EV_PERSIST;
3032 	}
3033 
3034 	memset(sock, 0xff, sizeof(sock));
3035 	memset(ev, 0, sizeof(ev));
3036 	memset(called, 0, sizeof(called));
3037 
3038 	for (i = 0; i < MANY; ++i) {
3039 		/* We need an event that will hit the backend, and that will
3040 		 * be ready immediately.  "Send a datagram" is an easy
3041 		 * instance of that. */
3042 		sock[i] = socket(AF_INET, SOCK_DGRAM, 0);
3043 		tt_assert(sock[i] >= 0);
3044 		called[i] = 0;
3045 		ev[i] = event_new(base, sock[i], EV_WRITE|evflags,
3046 		    many_event_cb, &called[i]);
3047 		event_add(ev[i], NULL);
3048 		if (one_at_a_time)
3049 			event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE);
3050 	}
3051 
3052 	event_base_loop(base, loopflags);
3053 
3054 	for (i = 0; i < MANY; ++i) {
3055 		if (one_at_a_time)
3056 			tt_int_op(called[i], ==, MANY - i + 1);
3057 		else
3058 			tt_int_op(called[i], ==, 1);
3059 	}
3060 
3061 end:
3062 	for (i = 0; i < MANY; ++i) {
3063 		if (ev[i])
3064 			event_free(ev[i]);
3065 		if (sock[i] >= 0)
3066 			evutil_closesocket(sock[i]);
3067 	}
3068 #undef MANY
3069 }
3070 
3071 static void
test_struct_event_size(void * arg)3072 test_struct_event_size(void *arg)
3073 {
3074 	tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event));
3075 end:
3076 	;
3077 }
3078 
3079 static void
test_get_assignment(void * arg)3080 test_get_assignment(void *arg)
3081 {
3082 	struct basic_test_data *data = arg;
3083 	struct event_base *base = data->base;
3084 	struct event *ev1 = NULL;
3085 	const char *str = "foo";
3086 
3087 	struct event_base *b;
3088 	evutil_socket_t s;
3089 	short what;
3090 	event_callback_fn cb;
3091 	void *cb_arg;
3092 
3093 	ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, (void*)str);
3094 	event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg);
3095 
3096 	tt_ptr_op(b, ==, base);
3097 	tt_int_op(s, ==, data->pair[1]);
3098 	tt_int_op(what, ==, EV_READ);
3099 	tt_ptr_op(cb, ==, dummy_read_cb);
3100 	tt_ptr_op(cb_arg, ==, str);
3101 
3102 	/* Now make sure this doesn't crash. */
3103 	event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL);
3104 
3105 end:
3106 	if (ev1)
3107 		event_free(ev1);
3108 }
3109 
3110 struct foreach_helper {
3111 	int count;
3112 	const struct event *ev;
3113 };
3114 
3115 static int
foreach_count_cb(const struct event_base * base,const struct event * ev,void * arg)3116 foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg)
3117 {
3118 	struct foreach_helper *h = event_get_callback_arg(ev);
3119 	struct timeval *tv = arg;
3120 	if (event_get_callback(ev) != timeout_cb)
3121 		return 0;
3122 	tt_ptr_op(event_get_base(ev), ==, base);
3123 	tt_int_op(tv->tv_sec, ==, 10);
3124 	h->ev = ev;
3125 	h->count++;
3126 	return 0;
3127 end:
3128 	return -1;
3129 }
3130 
3131 static int
foreach_find_cb(const struct event_base * base,const struct event * ev,void * arg)3132 foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg)
3133 {
3134 	const struct event **ev_out = arg;
3135 	struct foreach_helper *h = event_get_callback_arg(ev);
3136 	if (event_get_callback(ev) != timeout_cb)
3137 		return 0;
3138 	if (h->count == 99) {
3139 		*ev_out = ev;
3140 		return 101;
3141 	}
3142 	return 0;
3143 }
3144 
3145 static void
test_event_foreach(void * arg)3146 test_event_foreach(void *arg)
3147 {
3148 	struct basic_test_data *data = arg;
3149 	struct event_base *base = data->base;
3150 	struct event *ev[5];
3151 	struct foreach_helper visited[5];
3152 	int i;
3153 	struct timeval ten_sec = {10,0};
3154 	const struct event *ev_found = NULL;
3155 
3156 	for (i = 0; i < 5; ++i) {
3157 		visited[i].count = 0;
3158 		visited[i].ev = NULL;
3159 		ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]);
3160 	}
3161 
3162 	tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL));
3163 	tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL));
3164 
3165 	event_add(ev[0], &ten_sec);
3166 	event_add(ev[1], &ten_sec);
3167 	event_active(ev[1], EV_TIMEOUT, 1);
3168 	event_active(ev[2], EV_TIMEOUT, 1);
3169 	event_add(ev[3], &ten_sec);
3170 	/* Don't touch ev[4]. */
3171 
3172 	tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb,
3173 		&ten_sec));
3174 	tt_int_op(1, ==, visited[0].count);
3175 	tt_int_op(1, ==, visited[1].count);
3176 	tt_int_op(1, ==, visited[2].count);
3177 	tt_int_op(1, ==, visited[3].count);
3178 	tt_ptr_op(ev[0], ==, visited[0].ev);
3179 	tt_ptr_op(ev[1], ==, visited[1].ev);
3180 	tt_ptr_op(ev[2], ==, visited[2].ev);
3181 	tt_ptr_op(ev[3], ==, visited[3].ev);
3182 
3183 	visited[2].count = 99;
3184 	tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb,
3185 		&ev_found));
3186 	tt_ptr_op(ev_found, ==, ev[2]);
3187 
3188 end:
3189 	for (i=0; i<5; ++i) {
3190 		event_free(ev[i]);
3191 	}
3192 }
3193 
3194 static struct event_base *cached_time_base = NULL;
3195 static int cached_time_reset = 0;
3196 static int cached_time_sleep = 0;
3197 static void
cache_time_cb(evutil_socket_t fd,short what,void * arg)3198 cache_time_cb(evutil_socket_t fd, short what, void *arg)
3199 {
3200 	struct timeval *tv = arg;
3201 	tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv));
3202 	if (cached_time_sleep) {
3203 		struct timeval delay = { 0, 30*1000 };
3204 		evutil_usleep_(&delay);
3205 	}
3206 	if (cached_time_reset) {
3207 		event_base_update_cache_time(cached_time_base);
3208 	}
3209 end:
3210 	;
3211 }
3212 
3213 static void
test_gettimeofday_cached(void * arg)3214 test_gettimeofday_cached(void *arg)
3215 {
3216 	struct basic_test_data *data = arg;
3217 	struct event_config *cfg = NULL;
3218 	struct event_base *base = NULL;
3219 	struct timeval tv1, tv2, tv3, now;
3220 	struct event *ev1=NULL, *ev2=NULL, *ev3=NULL;
3221 	int cached_time_disable = strstr(data->setup_data, "disable") != NULL;
3222 
3223 	cfg = event_config_new();
3224 	if (cached_time_disable) {
3225 		event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME);
3226 	}
3227 	cached_time_base = base = event_base_new_with_config(cfg);
3228 	tt_assert(base);
3229 
3230 	/* Try gettimeofday_cached outside of an event loop. */
3231 	evutil_gettimeofday(&now, NULL);
3232 	tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1));
3233 	tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2));
3234 	tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10);
3235 	tt_int_op(timeval_msec_diff(&tv1, &now), <, 10);
3236 
3237 	cached_time_reset = strstr(data->setup_data, "reset") != NULL;
3238 	cached_time_sleep = strstr(data->setup_data, "sleep") != NULL;
3239 
3240 	ev1 = event_new(base, -1, 0, cache_time_cb, &tv1);
3241 	ev2 = event_new(base, -1, 0, cache_time_cb, &tv2);
3242 	ev3 = event_new(base, -1, 0, cache_time_cb, &tv3);
3243 
3244 	event_active(ev1, EV_TIMEOUT, 1);
3245 	event_active(ev2, EV_TIMEOUT, 1);
3246 	event_active(ev3, EV_TIMEOUT, 1);
3247 
3248 	event_base_dispatch(base);
3249 
3250 	if (cached_time_reset && cached_time_sleep) {
3251 		tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3252 		tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3253 	} else if (cached_time_disable && cached_time_sleep) {
3254 		tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3255 		tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3256 	} else if (! cached_time_disable) {
3257 		tt_assert(evutil_timercmp(&tv1, &tv2, ==));
3258 		tt_assert(evutil_timercmp(&tv2, &tv3, ==));
3259 	}
3260 
3261 end:
3262 	if (ev1)
3263 		event_free(ev1);
3264 	if (ev2)
3265 		event_free(ev2);
3266 	if (ev3)
3267 		event_free(ev3);
3268 	if (base)
3269 		event_base_free(base);
3270 	if (cfg)
3271 		event_config_free(cfg);
3272 }
3273 
3274 static void
tabf_cb(evutil_socket_t fd,short what,void * arg)3275 tabf_cb(evutil_socket_t fd, short what, void *arg)
3276 {
3277 	int *ptr = arg;
3278 	*ptr = what;
3279 	*ptr += 0x10000;
3280 }
3281 
3282 static void
test_active_by_fd(void * arg)3283 test_active_by_fd(void *arg)
3284 {
3285 	struct basic_test_data *data = arg;
3286 	struct event_base *base = data->base;
3287 	struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL;
3288 	int e1,e2,e3,e4;
3289 #ifndef _WIN32
3290 	struct event *evsig = NULL;
3291 	int es;
3292 #endif
3293 	struct timeval tenmin = { 600, 0 };
3294 
3295 	/* Ensure no crash on nonexistent FD. */
3296 	event_base_active_by_fd(base, 1000, EV_READ);
3297 
3298 	/* Ensure no crash on bogus FD. */
3299 	event_base_active_by_fd(base, -1, EV_READ);
3300 
3301 	/* Ensure no crash on nonexistent/bogus signal. */
3302 	event_base_active_by_signal(base, 1000);
3303 	event_base_active_by_signal(base, -1);
3304 
3305 	event_base_assert_ok_(base);
3306 
3307 	e1 = e2 = e3 = e4 = 0;
3308 	ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1);
3309 	ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2);
3310 	ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3);
3311 	ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4);
3312 	tt_assert(ev1);
3313 	tt_assert(ev2);
3314 	tt_assert(ev3);
3315 	tt_assert(ev4);
3316 #ifndef _WIN32
3317 	evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es);
3318 	tt_assert(evsig);
3319 	event_add(evsig, &tenmin);
3320 #endif
3321 
3322 	event_add(ev1, &tenmin);
3323 	event_add(ev2, NULL);
3324 	event_add(ev3, NULL);
3325 	event_add(ev4, &tenmin);
3326 
3327 
3328 	event_base_assert_ok_(base);
3329 
3330 	/* Trigger 2, 3, 4 */
3331 	event_base_active_by_fd(base, data->pair[0], EV_WRITE);
3332 	event_base_active_by_fd(base, data->pair[1], EV_READ);
3333 #ifndef _WIN32
3334 	event_base_active_by_signal(base, SIGHUP);
3335 #endif
3336 
3337 	event_base_assert_ok_(base);
3338 
3339 	event_base_loop(base, EVLOOP_ONCE);
3340 
3341 	tt_int_op(e1, ==, 0);
3342 	tt_int_op(e2, ==, EV_WRITE | 0x10000);
3343 	tt_int_op(e3, ==, EV_READ | 0x10000);
3344 	/* Mask out EV_WRITE here, since it could be genuinely writeable. */
3345 	tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | 0x10000);
3346 #ifndef _WIN32
3347 	tt_int_op(es, ==, EV_SIGNAL | 0x10000);
3348 #endif
3349 
3350 end:
3351 	if (ev1)
3352 		event_free(ev1);
3353 	if (ev2)
3354 		event_free(ev2);
3355 	if (ev3)
3356 		event_free(ev3);
3357 	if (ev4)
3358 		event_free(ev4);
3359 #ifndef _WIN32
3360 	if (evsig)
3361 		event_free(evsig);
3362 #endif
3363 }
3364 
3365 struct testcase_t main_testcases[] = {
3366 	/* Some converted-over tests */
3367 	{ "methods", test_methods, TT_FORK, NULL, NULL },
3368 	{ "version", test_version, 0, NULL, NULL },
3369 	BASIC(base_features, TT_FORK|TT_NO_LOGS),
3370 	{ "base_environ", test_base_environ, TT_FORK, NULL, NULL },
3371 
3372 	BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR),
3373 	BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR),
3374 
3375 	BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE),
3376 	BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE),
3377 	BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE),
3378 	BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE),
3379 	BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE),
3380 
3381 	BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3382 	BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3383 	BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3384 	BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3385 
3386 	/* These are still using the old API */
3387 	LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE),
3388 	{ "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
3389 	{ "persistent_active_timeout", test_persistent_active_timeout,
3390 	  TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
3391 	LEGACY(priorities, TT_FORK|TT_NEED_BASE),
3392 	BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE),
3393 	{ "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE,
3394 	  &basic_setup, NULL },
3395 
3396 	/* These legacy tests may not all need all of these flags. */
3397 	LEGACY(simpleread, TT_ISOLATED),
3398 	LEGACY(simpleread_multiple, TT_ISOLATED),
3399 	LEGACY(simplewrite, TT_ISOLATED),
3400 	{ "simpleclose", test_simpleclose, TT_FORK, &basic_setup,
3401 	  NULL },
3402 	LEGACY(multiple, TT_ISOLATED),
3403 	LEGACY(persistent, TT_ISOLATED),
3404 	LEGACY(combined, TT_ISOLATED),
3405 	LEGACY(simpletimeout, TT_ISOLATED),
3406 	LEGACY(loopbreak, TT_ISOLATED),
3407 	LEGACY(loopexit, TT_ISOLATED),
3408 	LEGACY(loopexit_multiple, TT_ISOLATED),
3409 	LEGACY(nonpersist_readd, TT_ISOLATED),
3410 	LEGACY(multiple_events_for_same_fd, TT_ISOLATED),
3411 	LEGACY(want_only_once, TT_ISOLATED),
3412 	{ "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL },
3413 	{ "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL },
3414 	{ "event_pending", test_event_pending, TT_ISOLATED, &basic_setup,
3415 	  NULL },
3416 	{ "event_closed_fd_poll", test_event_closed_fd_poll, TT_ISOLATED, &basic_setup,
3417 	  NULL },
3418 
3419 #ifndef _WIN32
3420 	{ "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL },
3421 #endif
3422 	{ "mm_functions", test_mm_functions, TT_FORK, NULL, NULL },
3423 	{ "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL },
3424 	{ "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 },
3425 
3426 	{ "struct_event_size", test_struct_event_size, 0, NULL, NULL },
3427 	BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3428 
3429 	BASIC(event_foreach, TT_FORK|TT_NEED_BASE),
3430 	{ "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"" },
3431 	{ "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep" },
3432 	{ "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep reset" },
3433 	{ "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep disable" },
3434 	{ "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"disable" },
3435 
3436 	BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3437 
3438 #ifndef _WIN32
3439 	LEGACY(fork, TT_ISOLATED),
3440 #endif
3441 #ifdef EVENT__HAVE_PTHREADS
3442 	/** TODO: support win32 */
3443 	LEGACY(del_wait, TT_ISOLATED|TT_NEED_THREADS),
3444 #endif
3445 
3446 	END_OF_TESTCASES
3447 };
3448 
3449 struct testcase_t evtag_testcases[] = {
3450 	{ "int", evtag_int_test, TT_FORK, NULL, NULL },
3451 	{ "fuzz", evtag_fuzz, TT_FORK, NULL, NULL },
3452 	{ "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL },
3453 	{ "peek", evtag_test_peek, 0, NULL, NULL },
3454 
3455 	END_OF_TESTCASES
3456 };
3457 
3458 struct testcase_t signal_testcases[] = {
3459 #ifndef _WIN32
3460 	LEGACY(simplestsignal, TT_ISOLATED),
3461 	LEGACY(simplesignal, TT_ISOLATED),
3462 	LEGACY(multiplesignal, TT_ISOLATED),
3463 	LEGACY(immediatesignal, TT_ISOLATED),
3464 	LEGACY(signal_dealloc, TT_ISOLATED),
3465 	LEGACY(signal_pipeloss, TT_ISOLATED),
3466 	LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS),
3467 	LEGACY(signal_restore, TT_ISOLATED),
3468 	LEGACY(signal_assert, TT_ISOLATED),
3469 	LEGACY(signal_while_processing, TT_ISOLATED),
3470 #endif
3471 	END_OF_TESTCASES
3472 };
3473 
3474