1 /*	$NetBSD: regress_thread.c,v 1.6 2020/05/25 20:47:34 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 #include "util-internal.h"
29 
30 /* The old tests here need assertions to work. */
31 #undef NDEBUG
32 
33 #include "event2/event-config.h"
34 
35 #include <sys/types.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #ifdef EVENT__HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 #ifdef EVENT__HAVE_SYS_WAIT_H
43 #include <sys/wait.h>
44 #endif
45 
46 #ifdef EVENT__HAVE_PTHREADS
47 #include <pthread.h>
48 #elif defined(_WIN32)
49 #include <process.h>
50 #endif
51 #include <assert.h>
52 #ifdef EVENT__HAVE_UNISTD_H
53 #include <unistd.h>
54 #endif
55 #include <time.h>
56 
57 #include "sys/queue.h"
58 
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/thread.h"
62 #include "event2/util.h"
63 #include "evthread-internal.h"
64 #include "event-internal.h"
65 #include "defer-internal.h"
66 #include "regress.h"
67 #include "tinytest_macros.h"
68 #include "time-internal.h"
69 #include "regress_thread.h"
70 
71 struct cond_wait {
72 	void *lock;
73 	void *cond;
74 };
75 
76 static void
wake_all_timeout(evutil_socket_t fd,short what,void * arg)77 wake_all_timeout(evutil_socket_t fd, short what, void *arg)
78 {
79 	struct cond_wait *cw = arg;
80 	EVLOCK_LOCK(cw->lock, 0);
81 	EVTHREAD_COND_BROADCAST(cw->cond);
82 	EVLOCK_UNLOCK(cw->lock, 0);
83 
84 }
85 
86 static void
wake_one_timeout(evutil_socket_t fd,short what,void * arg)87 wake_one_timeout(evutil_socket_t fd, short what, void *arg)
88 {
89 	struct cond_wait *cw = arg;
90 	EVLOCK_LOCK(cw->lock, 0);
91 	EVTHREAD_COND_SIGNAL(cw->cond);
92 	EVLOCK_UNLOCK(cw->lock, 0);
93 }
94 
95 #define NUM_THREADS	100
96 #define NUM_ITERATIONS  100
97 void *count_lock;
98 static int count;
99 
100 static THREAD_FN
basic_thread(void * arg)101 basic_thread(void *arg)
102 {
103 	struct cond_wait cw;
104 	struct event_base *base = arg;
105 	struct event ev;
106 	int i = 0;
107 
108 	EVTHREAD_ALLOC_LOCK(cw.lock, 0);
109 	EVTHREAD_ALLOC_COND(cw.cond);
110 	assert(cw.lock);
111 	assert(cw.cond);
112 
113 	evtimer_assign(&ev, base, wake_all_timeout, &cw);
114 	for (i = 0; i < NUM_ITERATIONS; i++) {
115 		struct timeval tv;
116 		evutil_timerclear(&tv);
117 		tv.tv_sec = 0;
118 		tv.tv_usec = 3000;
119 
120 		EVLOCK_LOCK(cw.lock, 0);
121 		/* we need to make sure that event does not happen before
122 		 * we get to wait on the conditional variable */
123 		assert(evtimer_add(&ev, &tv) == 0);
124 
125 		assert(EVTHREAD_COND_WAIT(cw.cond, cw.lock) == 0);
126 		EVLOCK_UNLOCK(cw.lock, 0);
127 
128 		EVLOCK_LOCK(count_lock, 0);
129 		++count;
130 		EVLOCK_UNLOCK(count_lock, 0);
131 	}
132 
133 	/* exit the loop only if all threads fired all timeouts */
134 	EVLOCK_LOCK(count_lock, 0);
135 	if (count >= NUM_THREADS * NUM_ITERATIONS)
136 		event_base_loopexit(base, NULL);
137 	EVLOCK_UNLOCK(count_lock, 0);
138 
139 	EVTHREAD_FREE_LOCK(cw.lock, 0);
140 	EVTHREAD_FREE_COND(cw.cond);
141 
142 	THREAD_RETURN();
143 }
144 
145 static int notification_fd_used = 0;
146 #ifndef _WIN32
147 static int got_sigchld = 0;
148 static void
sigchld_cb(evutil_socket_t fd,short event,void * arg)149 sigchld_cb(evutil_socket_t fd, short event, void *arg)
150 {
151 	struct timeval tv;
152 	struct event_base *base = arg;
153 
154 	got_sigchld++;
155 	tv.tv_usec = 100000;
156 	tv.tv_sec = 0;
157 	event_base_loopexit(base, &tv);
158 }
159 
160 
161 static void
notify_fd_cb(evutil_socket_t fd,short event,void * arg)162 notify_fd_cb(evutil_socket_t fd, short event, void *arg)
163 {
164 	++notification_fd_used;
165 }
166 #endif
167 
168 static void
thread_basic(void * arg)169 thread_basic(void *arg)
170 {
171 	THREAD_T threads[NUM_THREADS];
172 	struct event ev;
173 	struct timeval tv;
174 	int i;
175 	struct basic_test_data *data = arg;
176 	struct event_base *base = data->base;
177 
178 	struct event *notification_event = NULL;
179 	struct event *sigchld_event = NULL;
180 
181 	EVTHREAD_ALLOC_LOCK(count_lock, 0);
182 	tt_assert(count_lock);
183 
184 	tt_assert(base);
185 	if (evthread_make_base_notifiable(base)<0) {
186 		tt_abort_msg("Couldn't make base notifiable!");
187 	}
188 
189 #ifndef _WIN32
190 	if (data->setup_data && !strcmp(data->setup_data, "forking")) {
191 		pid_t pid;
192 		int status;
193 		sigchld_event = evsignal_new(base, SIGCHLD, sigchld_cb, base);
194 		/* This piggybacks on the th_notify_fd weirdly, and looks
195 		 * inside libevent internals.  Not a good idea in non-testing
196 		 * code! */
197 		notification_event = event_new(base,
198 		    base->th_notify_fd[0], EV_READ|EV_PERSIST, notify_fd_cb,
199 		    NULL);
200 		event_add(sigchld_event, NULL);
201 		event_add(notification_event, NULL);
202 
203 		if ((pid = fork()) == 0) {
204 			event_del(notification_event);
205 			if (event_reinit(base) < 0) {
206 				TT_FAIL(("reinit"));
207 				exit(1);
208 			}
209 			event_assign(notification_event, base,
210 			    base->th_notify_fd[0], EV_READ|EV_PERSIST,
211 			    notify_fd_cb, NULL);
212 			event_add(notification_event, NULL);
213 	 		goto child;
214 		}
215 
216 		event_base_dispatch(base);
217 
218 		if (waitpid(pid, &status, 0) == -1)
219 			tt_abort_perror("waitpid");
220 		TT_BLATHER(("Waitpid okay\n"));
221 
222 		tt_assert(got_sigchld);
223 		tt_int_op(notification_fd_used, ==, 0);
224 
225 		goto end;
226 	}
227 
228 child:
229 #endif
230 	for (i = 0; i < NUM_THREADS; ++i)
231 		THREAD_START(threads[i], basic_thread, base);
232 
233 	evtimer_assign(&ev, base, NULL, NULL);
234 	evutil_timerclear(&tv);
235 	tv.tv_sec = 1000;
236 	event_add(&ev, &tv);
237 
238 	event_base_dispatch(base);
239 
240 	for (i = 0; i < NUM_THREADS; ++i)
241 		THREAD_JOIN(threads[i]);
242 
243 	event_del(&ev);
244 
245 	tt_int_op(count, ==, NUM_THREADS * NUM_ITERATIONS);
246 
247 	EVTHREAD_FREE_LOCK(count_lock, 0);
248 
249 	TT_BLATHER(("notifiations==%d", notification_fd_used));
250 
251 end:
252 
253 	if (notification_event)
254 		event_free(notification_event);
255 	if (sigchld_event)
256 		event_free(sigchld_event);
257 }
258 
259 #undef NUM_THREADS
260 #define NUM_THREADS 10
261 
262 struct alerted_record {
263 	struct cond_wait *cond;
264 	struct timeval delay;
265 	struct timeval alerted_at;
266 	int timed_out;
267 };
268 
269 static THREAD_FN
wait_for_condition(void * arg)270 wait_for_condition(void *arg)
271 {
272 	struct alerted_record *rec = arg;
273 	int r;
274 
275 	EVLOCK_LOCK(rec->cond->lock, 0);
276 	if (rec->delay.tv_sec || rec->delay.tv_usec) {
277 		r = EVTHREAD_COND_WAIT_TIMED(rec->cond->cond, rec->cond->lock,
278 		    &rec->delay);
279 	} else {
280 		r = EVTHREAD_COND_WAIT(rec->cond->cond, rec->cond->lock);
281 	}
282 	EVLOCK_UNLOCK(rec->cond->lock, 0);
283 
284 	evutil_gettimeofday(&rec->alerted_at, NULL);
285 	if (r == 1)
286 		rec->timed_out = 1;
287 
288 	THREAD_RETURN();
289 }
290 
291 static void
thread_conditions_simple(void * arg)292 thread_conditions_simple(void *arg)
293 {
294 	struct timeval tv_signal, tv_timeout, tv_broadcast;
295 	struct alerted_record alerted[NUM_THREADS];
296 	THREAD_T threads[NUM_THREADS];
297 	struct cond_wait cond;
298 	int i;
299 	struct timeval launched_at;
300 	struct event wake_one;
301 	struct event wake_all;
302 	struct basic_test_data *data = arg;
303 	struct event_base *base = data->base;
304 	int n_timed_out=0, n_signal=0, n_broadcast=0;
305 
306 	tv_signal.tv_sec = tv_timeout.tv_sec = tv_broadcast.tv_sec = 0;
307 	tv_signal.tv_usec = 30*1000;
308 	tv_timeout.tv_usec = 150*1000;
309 	tv_broadcast.tv_usec = 500*1000;
310 
311 	EVTHREAD_ALLOC_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
312 	EVTHREAD_ALLOC_COND(cond.cond);
313 	tt_assert(cond.lock);
314 	tt_assert(cond.cond);
315 	for (i = 0; i < NUM_THREADS; ++i) {
316 		memset(&alerted[i], 0, sizeof(struct alerted_record));
317 		alerted[i].cond = &cond;
318 	}
319 
320 	/* Threads 5 and 6 will be allowed to time out */
321 	memcpy(&alerted[5].delay, &tv_timeout, sizeof(tv_timeout));
322 	memcpy(&alerted[6].delay, &tv_timeout, sizeof(tv_timeout));
323 
324 	evtimer_assign(&wake_one, base, wake_one_timeout, &cond);
325 	evtimer_assign(&wake_all, base, wake_all_timeout, &cond);
326 
327 	evutil_gettimeofday(&launched_at, NULL);
328 
329 	/* Launch the threads... */
330 	for (i = 0; i < NUM_THREADS; ++i) {
331 		THREAD_START(threads[i], wait_for_condition, &alerted[i]);
332 	}
333 
334 	/* Start the timers... */
335 	tt_int_op(event_add(&wake_one, &tv_signal), ==, 0);
336 	tt_int_op(event_add(&wake_all, &tv_broadcast), ==, 0);
337 
338 	/* And run for a bit... */
339 	event_base_dispatch(base);
340 
341 	/* And wait till the threads are done. */
342 	for (i = 0; i < NUM_THREADS; ++i)
343 		THREAD_JOIN(threads[i]);
344 
345 	/* Now, let's see what happened. At least one of 5 or 6 should
346 	 * have timed out. */
347 	n_timed_out = alerted[5].timed_out + alerted[6].timed_out;
348 	tt_int_op(n_timed_out, >=, 1);
349 	tt_int_op(n_timed_out, <=, 2);
350 
351 	for (i = 0; i < NUM_THREADS; ++i) {
352 		const struct timeval *target_delay;
353 		struct timeval target_time, actual_delay;
354 		if (alerted[i].timed_out) {
355 			TT_BLATHER(("%d looks like a timeout\n", i));
356 			target_delay = &tv_timeout;
357 			tt_assert(i == 5 || i == 6);
358 		} else if (evutil_timerisset(&alerted[i].alerted_at)) {
359 			long diff1,diff2;
360 			evutil_timersub(&alerted[i].alerted_at,
361 			    &launched_at, &actual_delay);
362 			diff1 = timeval_msec_diff(&actual_delay,
363 			    &tv_signal);
364 			diff2 = timeval_msec_diff(&actual_delay,
365 			    &tv_broadcast);
366 			if (labs(diff1) < labs(diff2)) {
367 				TT_BLATHER(("%d looks like a signal\n", i));
368 				target_delay = &tv_signal;
369 				++n_signal;
370 			} else {
371 				TT_BLATHER(("%d looks like a broadcast\n", i));
372 				target_delay = &tv_broadcast;
373 				++n_broadcast;
374 			}
375 		} else {
376 			TT_FAIL(("Thread %d never got woken", i));
377 			continue;
378 		}
379 		evutil_timeradd(target_delay, &launched_at, &target_time);
380 		test_timeval_diff_leq(&target_time, &alerted[i].alerted_at,
381 		    0, 50);
382 	}
383 	tt_int_op(n_broadcast + n_signal + n_timed_out, ==, NUM_THREADS);
384 	tt_int_op(n_signal, ==, 1);
385 
386 end:
387 	EVTHREAD_FREE_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
388 	EVTHREAD_FREE_COND(cond.cond);
389 }
390 
391 #define CB_COUNT 128
392 #define QUEUE_THREAD_COUNT 8
393 
394 static void
SLEEP_MS(int ms)395 SLEEP_MS(int ms)
396 {
397 	struct timeval tv;
398 	tv.tv_sec = ms/1000;
399 	tv.tv_usec = (ms%1000)*1000;
400 	evutil_usleep_(&tv);
401 }
402 
403 struct deferred_test_data {
404 	struct event_callback cbs[CB_COUNT];
405 	struct event_base *queue;
406 };
407 
408 static struct timeval timer_start = {0,0};
409 static struct timeval timer_end = {0,0};
410 static unsigned callback_count = 0;
411 static THREAD_T load_threads[QUEUE_THREAD_COUNT];
412 static struct deferred_test_data deferred_data[QUEUE_THREAD_COUNT];
413 
414 static void
deferred_callback(struct event_callback * cb,void * arg)415 deferred_callback(struct event_callback *cb, void *arg)
416 {
417 	SLEEP_MS(1);
418 	callback_count += 1;
419 }
420 
421 static THREAD_FN
load_deferred_queue(void * arg)422 load_deferred_queue(void *arg)
423 {
424 	struct deferred_test_data *data = arg;
425 	size_t i;
426 
427 	for (i = 0; i < CB_COUNT; ++i) {
428 		event_deferred_cb_init_(&data->cbs[i], 0, deferred_callback,
429 		    NULL);
430 		event_deferred_cb_schedule_(data->queue, &data->cbs[i]);
431 		SLEEP_MS(1);
432 	}
433 
434 	THREAD_RETURN();
435 }
436 
437 static void
timer_callback(evutil_socket_t fd,short what,void * arg)438 timer_callback(evutil_socket_t fd, short what, void *arg)
439 {
440 	evutil_gettimeofday(&timer_end, NULL);
441 }
442 
443 static void
start_threads_callback(evutil_socket_t fd,short what,void * arg)444 start_threads_callback(evutil_socket_t fd, short what, void *arg)
445 {
446 	int i;
447 
448 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i) {
449 		THREAD_START(load_threads[i], load_deferred_queue,
450 				&deferred_data[i]);
451 	}
452 }
453 
454 static void
thread_deferred_cb_skew(void * arg)455 thread_deferred_cb_skew(void *arg)
456 {
457 	struct timeval tv_timer = {1, 0};
458 	struct event_base *base = NULL;
459 	struct event_config *cfg = NULL;
460 	struct timeval elapsed;
461 	int elapsed_usec;
462 	int i;
463 
464 	cfg = event_config_new();
465 	tt_assert(cfg);
466 	event_config_set_max_dispatch_interval(cfg, NULL, 16, 0);
467 
468 	base = event_base_new_with_config(cfg);
469 	tt_assert(base);
470 
471 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
472 		deferred_data[i].queue = base;
473 
474 	evutil_gettimeofday(&timer_start, NULL);
475 	event_base_once(base, -1, EV_TIMEOUT, timer_callback, NULL,
476 			&tv_timer);
477 	event_base_once(base, -1, EV_TIMEOUT, start_threads_callback,
478 			NULL, NULL);
479 	event_base_dispatch(base);
480 
481 	evutil_timersub(&timer_end, &timer_start, &elapsed);
482 	TT_BLATHER(("callback count, %u", callback_count));
483 	elapsed_usec =
484 	    (unsigned)(elapsed.tv_sec*1000000 + elapsed.tv_usec);
485 	TT_BLATHER(("elapsed time, %u usec", elapsed_usec));
486 
487 	/* XXX be more intelligent here.  just make sure skew is
488 	 * within .4 seconds for now. */
489 	tt_assert(elapsed_usec >= 600000 && elapsed_usec <= 1400000);
490 
491 end:
492 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
493 		THREAD_JOIN(load_threads[i]);
494 	if (base)
495 		event_base_free(base);
496 	if (cfg)
497 		event_config_free(cfg);
498 }
499 
500 static struct event time_events[5];
501 static struct timeval times[5];
502 static struct event_base *exit_base = NULL;
503 static void
note_time_cb(evutil_socket_t fd,short what,void * arg)504 note_time_cb(evutil_socket_t fd, short what, void *arg)
505 {
506 	evutil_gettimeofday(arg, NULL);
507 	if (arg == &times[4]) {
508 		event_base_loopbreak(exit_base);
509 	}
510 }
511 static THREAD_FN
register_events_subthread(void * arg)512 register_events_subthread(void *arg)
513 {
514 	struct timeval tv = {0,0};
515 	SLEEP_MS(100);
516 	event_active(&time_events[0], EV_TIMEOUT, 1);
517 	SLEEP_MS(100);
518 	event_active(&time_events[1], EV_TIMEOUT, 1);
519 	SLEEP_MS(100);
520 	tv.tv_usec = 100*1000;
521 	event_add(&time_events[2], &tv);
522 	tv.tv_usec = 150*1000;
523 	event_add(&time_events[3], &tv);
524 	SLEEP_MS(200);
525 	event_active(&time_events[4], EV_TIMEOUT, 1);
526 
527 	THREAD_RETURN();
528 }
529 
530 static void
thread_no_events(void * arg)531 thread_no_events(void *arg)
532 {
533 	THREAD_T thread;
534 	struct basic_test_data *data = arg;
535 	struct timeval starttime, endtime;
536 	int i;
537 	exit_base = data->base;
538 
539 	memset(times,0,sizeof(times));
540 	for (i=0;i<5;++i) {
541 		event_assign(&time_events[i], data->base,
542 		    -1, 0, note_time_cb, &times[i]);
543 	}
544 
545 	evutil_gettimeofday(&starttime, NULL);
546 	THREAD_START(thread, register_events_subthread, data->base);
547 	event_base_loop(data->base, EVLOOP_NO_EXIT_ON_EMPTY);
548 	evutil_gettimeofday(&endtime, NULL);
549 	tt_assert(event_base_got_break(data->base));
550 	THREAD_JOIN(thread);
551 	for (i=0; i<5; ++i) {
552 		struct timeval diff;
553 		double sec;
554 		evutil_timersub(&times[i], &starttime, &diff);
555 		sec = diff.tv_sec + diff.tv_usec/1.0e6;
556 		TT_BLATHER(("event %d at %.4f seconds", i, sec));
557 	}
558 	test_timeval_diff_eq(&starttime, &times[0], 100);
559 	test_timeval_diff_eq(&starttime, &times[1], 200);
560 	test_timeval_diff_eq(&starttime, &times[2], 400);
561 	test_timeval_diff_eq(&starttime, &times[3], 450);
562 	test_timeval_diff_eq(&starttime, &times[4], 500);
563 	test_timeval_diff_eq(&starttime, &endtime,  500);
564 
565 end:
566 	;
567 }
568 
569 #define TEST(name)							\
570 	{ #name, thread_##name, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,	\
571 	  &basic_setup, NULL }
572 
573 struct testcase_t thread_testcases[] = {
574 	{ "basic", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
575 	  &basic_setup, NULL },
576 #ifndef _WIN32
577 	{ "forking", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
578 	  &basic_setup, (char*)"forking" },
579 #endif
580 	TEST(conditions_simple),
581 	{ "deferred_cb_skew", thread_deferred_cb_skew,
582 	  TT_FORK|TT_NEED_THREADS|TT_OFF_BY_DEFAULT,
583 	  &basic_setup, NULL },
584 	TEST(no_events),
585 	END_OF_TESTCASES
586 };
587 
588