1 /* $Id$ */
2 /*
3 * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
4 * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "test.h"
21
22 /**
23 * \page page_pjlib_timer_test Test: Timer
24 *
25 * This file provides implementation of \b timer_test(). It tests the
26 * functionality of the timer heap.
27 *
28 *
29 * This file is <b>pjlib-test/timer.c</b>
30 *
31 * \include pjlib-test/timer.c
32 */
33
34
35 #if INCLUDE_TIMER_TEST
36
37 #include <pjlib.h>
38
39 #define LOOP 16
40 #define MIN_COUNT 250
41 #define MAX_COUNT (LOOP * MIN_COUNT)
42 #define MIN_DELAY 2
43 #define D (MAX_COUNT / 32000)
44 #define DELAY (D < MIN_DELAY ? MIN_DELAY : D)
45 #define THIS_FILE "timer_test"
46
47
timer_callback(pj_timer_heap_t * ht,pj_timer_entry * e)48 static void timer_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
49 {
50 PJ_UNUSED_ARG(ht);
51 PJ_UNUSED_ARG(e);
52 }
53
test_timer_heap(void)54 static int test_timer_heap(void)
55 {
56 int i, j;
57 pj_timer_entry *entry;
58 pj_pool_t *pool;
59 pj_timer_heap_t *timer;
60 pj_time_val delay;
61 pj_status_t status;
62 int err=0;
63 pj_size_t size;
64 unsigned count;
65
66 PJ_LOG(3,("test", "...Basic test"));
67
68 size = pj_timer_heap_mem_size(MAX_COUNT)+MAX_COUNT*sizeof(pj_timer_entry);
69 pool = pj_pool_create( mem, NULL, size, 4000, NULL);
70 if (!pool) {
71 PJ_LOG(3,("test", "...error: unable to create pool of %u bytes",
72 size));
73 return -10;
74 }
75
76 entry = (pj_timer_entry*)pj_pool_calloc(pool, MAX_COUNT, sizeof(*entry));
77 if (!entry)
78 return -20;
79
80 for (i=0; i<MAX_COUNT; ++i) {
81 entry[i].cb = &timer_callback;
82 }
83 status = pj_timer_heap_create(pool, MAX_COUNT, &timer);
84 if (status != PJ_SUCCESS) {
85 app_perror("...error: unable to create timer heap", status);
86 return -30;
87 }
88
89 count = MIN_COUNT;
90 for (i=0; i<LOOP; ++i) {
91 int early = 0;
92 int done=0;
93 int cancelled=0;
94 int rc;
95 pj_timestamp t1, t2, t_sched, t_cancel, t_poll;
96 pj_time_val now, expire;
97
98 pj_gettimeofday(&now);
99 pj_srand(now.sec);
100 t_sched.u32.lo = t_cancel.u32.lo = t_poll.u32.lo = 0;
101
102 // Register timers
103 for (j=0; j<(int)count; ++j) {
104 delay.sec = pj_rand() % DELAY;
105 delay.msec = pj_rand() % 1000;
106
107 // Schedule timer
108 pj_get_timestamp(&t1);
109 rc = pj_timer_heap_schedule(timer, &entry[j], &delay);
110 if (rc != 0)
111 return -40;
112 pj_get_timestamp(&t2);
113
114 t_sched.u32.lo += (t2.u32.lo - t1.u32.lo);
115
116 // Poll timers.
117 pj_get_timestamp(&t1);
118 rc = pj_timer_heap_poll(timer, NULL);
119 pj_get_timestamp(&t2);
120 if (rc > 0) {
121 t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
122 early += rc;
123 }
124 }
125
126 // Set the time where all timers should finish
127 pj_gettimeofday(&expire);
128 delay.sec = DELAY;
129 delay.msec = 0;
130 PJ_TIME_VAL_ADD(expire, delay);
131
132 // Wait unfil all timers finish, cancel some of them.
133 do {
134 int index = pj_rand() % count;
135 pj_get_timestamp(&t1);
136 rc = pj_timer_heap_cancel(timer, &entry[index]);
137 pj_get_timestamp(&t2);
138 if (rc > 0) {
139 cancelled += rc;
140 t_cancel.u32.lo += (t2.u32.lo - t1.u32.lo);
141 }
142
143 pj_gettimeofday(&now);
144
145 pj_get_timestamp(&t1);
146 #if defined(PJ_SYMBIAN) && PJ_SYMBIAN!=0
147 /* On Symbian, we must use OS poll (Active Scheduler poll) since
148 * timer is implemented using Active Object.
149 */
150 rc = 0;
151 while (pj_symbianos_poll(-1, 0))
152 ++rc;
153 #else
154 rc = pj_timer_heap_poll(timer, NULL);
155 #endif
156 pj_get_timestamp(&t2);
157 if (rc > 0) {
158 done += rc;
159 t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
160 }
161
162 } while (PJ_TIME_VAL_LTE(now, expire)&&pj_timer_heap_count(timer) > 0);
163
164 if (pj_timer_heap_count(timer)) {
165 PJ_LOG(3, (THIS_FILE, "ERROR: %d timers left",
166 pj_timer_heap_count(timer)));
167 ++err;
168 }
169 t_sched.u32.lo /= count;
170 t_cancel.u32.lo /= count;
171 t_poll.u32.lo /= count;
172 PJ_LOG(4, (THIS_FILE,
173 "...ok (count:%d, early:%d, cancelled:%d, "
174 "sched:%d, cancel:%d poll:%d)",
175 count, early, cancelled, t_sched.u32.lo, t_cancel.u32.lo,
176 t_poll.u32.lo));
177
178 count = count * 2;
179 if (count > MAX_COUNT)
180 break;
181 }
182
183 pj_pool_release(pool);
184 return err;
185 }
186
187
188 /***************
189 * Stress test *
190 ***************
191 * Test scenario (if RANDOMIZED_TEST is 0):
192 * 1. Create and schedule a number of timer entries.
193 * 2. Start threads for polling (simulating normal worker thread).
194 * Each expired entry will try to cancel and re-schedule itself
195 * from within the callback.
196 * 3. Start threads for cancelling random entries. Each successfully
197 * cancelled entry will be re-scheduled after some random delay.
198 *
199 * Test scenario (if RANDOMIZED_TEST is 1):
200 * 1. Create and schedule a number of timer entries.
201 * 2. Start threads which will, based on a configurable probability
202 * setting, randomly perform timer scheduling, cancelling, or
203 * polling (simulating normal worker thread).
204 * This test is considered a failure if:
205 * - It triggers assertion/crash.
206 * - There's an error message in the log, which indicates a potential
207 * bug in the implementation (note that race message is ok).
208 */
209 #define RANDOMIZED_TEST 1
210 #define SIMULATE_CRASH PJ_TIMER_USE_COPY
211
212 #if RANDOMIZED_TEST
213 #define ST_STRESS_THREAD_COUNT 20
214 #define ST_POLL_THREAD_COUNT 0
215 #define ST_CANCEL_THREAD_COUNT 0
216 #else
217 #define ST_STRESS_THREAD_COUNT 0
218 #define ST_POLL_THREAD_COUNT 10
219 #define ST_CANCEL_THREAD_COUNT 10
220 #endif
221
222 #define ST_ENTRY_COUNT 10000
223 #define ST_DURATION 30000
224 #define ST_ENTRY_MAX_TIMEOUT_MS ST_DURATION/10
225
226 /* Number of group lock, may be zero, shared by timer entries, group lock
227 * can be useful to evaluate poll vs cancel race condition scenario, i.e:
228 * each group lock must have ref count==1 at the end of the test, otherwise
229 * assertion will raise.
230 */
231 #define ST_ENTRY_GROUP_LOCK_COUNT 1
232
233 #define BT_ENTRY_COUNT 100000
234 #define BT_ENTRY_SHOW_START 100
235 #define BT_ENTRY_SHOW_MULT 10
236 #define BT_REPEAT_RANDOM_TEST 4
237 #define BT_REPEAT_INC_TEST 4
238
239 struct thread_param
240 {
241 pj_timer_heap_t *timer;
242 pj_bool_t stopping;
243 pj_timer_entry *entries;
244 pj_atomic_t **status;
245 pj_atomic_t *n_sched, *n_cancel, *n_poll;
246 pj_grp_lock_t **grp_locks;
247 int err;
248
249 pj_atomic_t *idx;
250 struct {
251 pj_bool_t is_poll;
252 unsigned cnt;
253 } stat[ST_POLL_THREAD_COUNT + ST_CANCEL_THREAD_COUNT + 1];
254 /* Plus one here to avoid compile warning of zero-sized array */
255 };
256
st_schedule_entry(pj_timer_heap_t * ht,pj_timer_entry * e)257 static pj_status_t st_schedule_entry(pj_timer_heap_t *ht, pj_timer_entry *e)
258 {
259 pj_time_val delay = {0};
260 pj_grp_lock_t *grp_lock = NULL;
261 pj_status_t status;
262 struct thread_param *tparam = (struct thread_param *)e->user_data;
263
264 if (ST_ENTRY_GROUP_LOCK_COUNT && pj_rand() % 10) {
265 /* About 90% of entries should have group lock */
266 grp_lock = tparam->grp_locks[pj_rand() % ST_ENTRY_GROUP_LOCK_COUNT];
267 }
268
269 delay.msec = pj_rand() % ST_ENTRY_MAX_TIMEOUT_MS;
270 pj_time_val_normalize(&delay);
271 status = pj_timer_heap_schedule_w_grp_lock(ht, e, &delay, 1, grp_lock);
272 return status;
273 }
274
dummy_callback(pj_timer_heap_t * ht,pj_timer_entry * e)275 static void dummy_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
276 {
277 PJ_UNUSED_ARG(ht);
278 PJ_LOG(4,("test", "dummy callback called %p %p", e, e->user_data));
279 }
280
st_entry_callback(pj_timer_heap_t * ht,pj_timer_entry * e)281 static void st_entry_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
282 {
283 struct thread_param *tparam = (struct thread_param *)e->user_data;
284
285 #if RANDOMIZED_TEST
286 /* Make sure the flag has been set. */
287 while (pj_atomic_get(tparam->status[e - tparam->entries]) != 1)
288 pj_thread_sleep(10);
289 pj_atomic_set(tparam->status[e - tparam->entries], 0);
290 #endif
291
292 /* try to cancel this */
293 pj_timer_heap_cancel_if_active(ht, e, 10);
294
295 /* busy doing something */
296 pj_thread_sleep(pj_rand() % 50);
297
298 /* reschedule entry */
299 if (!ST_STRESS_THREAD_COUNT)
300 st_schedule_entry(ht, e);
301 }
302
303 /* Randomized stress worker thread function. */
stress_worker(void * arg)304 static int stress_worker(void *arg)
305 {
306 /* Enumeration of possible task. */
307 enum {
308 SCHEDULING = 0,
309 CANCELLING = 1,
310 POLLING = 2,
311 NOTHING = 3
312 };
313 /* Probability of a certain task being chosen.
314 * The first number indicates the probability of the first task,
315 * the second number for the second task, and so on.
316 */
317 int prob[3] = {75, 15, 5};
318 struct thread_param *tparam = (struct thread_param*)arg;
319 int t_idx, i;
320
321 t_idx = pj_atomic_inc_and_get(tparam->idx);
322 PJ_LOG(4,("test", "...thread #%d (random) started", t_idx));
323 while (!tparam->stopping) {
324 int job, task;
325 int idx, count;
326 pj_status_t prev_status, status;
327
328 /* Randomly choose which task to do */
329 job = pj_rand() % 100;
330 if (job < prob[0]) task = SCHEDULING;
331 else if (job < (prob[0] + prob[1])) task = CANCELLING;
332 else if (job < (prob[0] + prob[1] + prob[2])) task = POLLING;
333 else task = NOTHING;
334
335 idx = pj_rand() % ST_ENTRY_COUNT;
336 prev_status = pj_atomic_get(tparam->status[idx]);
337 if (task == SCHEDULING) {
338 if (prev_status != 0) continue;
339 status = st_schedule_entry(tparam->timer, &tparam->entries[idx]);
340 if (prev_status == 0 && status != PJ_SUCCESS) {
341 /* To make sure the flag has been set. */
342 pj_thread_sleep(20);
343 if (pj_atomic_get(tparam->status[idx]) == 1) {
344 /* Race condition with another scheduling. */
345 PJ_LOG(3,("test", "race schedule-schedule %d: %p",
346 idx, &tparam->entries[idx]));
347 } else {
348 if (tparam->err != 0) tparam->err = -210;
349 PJ_LOG(3,("test", "error: failed to schedule entry %d: %p",
350 idx, &tparam->entries[idx]));
351 }
352 } else if (prev_status == 1 && status == PJ_SUCCESS) {
353 /* Race condition with another cancellation or
354 * timer poll.
355 */
356 pj_thread_sleep(20);
357 PJ_LOG(3,("test", "race schedule-cancel/poll %d: %p",
358 idx, &tparam->entries[idx]));
359 }
360 if (status == PJ_SUCCESS) {
361 pj_atomic_set(tparam->status[idx], 1);
362 pj_atomic_inc(tparam->n_sched);
363 }
364 } else if (task == CANCELLING) {
365 count = pj_timer_heap_cancel_if_active(tparam->timer,
366 &tparam->entries[idx], 10);
367 if (prev_status == 0 && count > 0) {
368 /* To make sure the flag has been set. */
369 pj_thread_sleep(20);
370 if (pj_atomic_get(tparam->status[idx]) == 1) {
371 /* Race condition with scheduling. */
372 PJ_LOG(3,("test", "race cancel-schedule %d: %p",
373 idx, &tparam->entries[idx]));
374 } else {
375 if (tparam->err != 0) tparam->err = -220;
376 PJ_LOG(3,("test", "error: cancelling invalid entry %d: %p",
377 idx, &tparam->entries[idx]));
378 }
379 } else if (prev_status == 1 && count == 0) {
380 /* To make sure the flag has been cleared. */
381 pj_thread_sleep(20);
382 if (pj_atomic_get(tparam->status[idx]) == 0) {
383 /* Race condition with polling. */
384 PJ_LOG(3,("test", "race cancel-poll %d: %p",
385 idx, &tparam->entries[idx]));
386 } else {
387 if (tparam->err != 0) tparam->err = -230;
388 PJ_LOG(3,("test", "error: failed to cancel entry %d: %p",
389 idx, &tparam->entries[idx]));
390 }
391 }
392 if (count > 0) {
393 /* Make sure the flag has been set. */
394 while (pj_atomic_get(tparam->status[idx]) != 1)
395 pj_thread_sleep(10);
396 pj_atomic_set(tparam->status[idx], 0);
397 pj_atomic_inc(tparam->n_cancel);
398 }
399 } else if (task == POLLING) {
400 count = pj_timer_heap_poll(tparam->timer, NULL);
401 for (i = 0; i < count; i++) {
402 pj_atomic_inc_and_get(tparam->n_poll);
403 }
404 } else {
405 pj_thread_sleep(10);
406 }
407 }
408 PJ_LOG(4,("test", "...thread #%d (poll) stopped", t_idx));
409
410 return 0;
411 }
412
413 /* Poll worker thread function. */
poll_worker(void * arg)414 static int poll_worker(void *arg)
415 {
416 struct thread_param *tparam = (struct thread_param*)arg;
417 int idx;
418
419 idx = pj_atomic_inc_and_get(tparam->idx);
420 tparam->stat[idx].is_poll = PJ_TRUE;
421
422 PJ_LOG(4,("test", "...thread #%d (poll) started", idx));
423 while (!tparam->stopping) {
424 unsigned count;
425 count = pj_timer_heap_poll(tparam->timer, NULL);
426 if (count > 0) {
427 /* Count expired entries */
428 PJ_LOG(5,("test", "...thread #%d called %d entries",
429 idx, count));
430 tparam->stat[idx].cnt += count;
431 } else {
432 pj_thread_sleep(10);
433 }
434 }
435 PJ_LOG(4,("test", "...thread #%d (poll) stopped", idx));
436
437 return 0;
438 }
439
440 /* Cancel worker thread function. */
cancel_worker(void * arg)441 static int cancel_worker(void *arg)
442 {
443 struct thread_param *tparam = (struct thread_param*)arg;
444 int idx;
445
446 idx = pj_atomic_inc_and_get(tparam->idx);
447 tparam->stat[idx].is_poll = PJ_FALSE;
448
449 PJ_LOG(4,("test", "...thread #%d (cancel) started", idx));
450 while (!tparam->stopping) {
451 int count;
452 pj_timer_entry *e = &tparam->entries[pj_rand() % ST_ENTRY_COUNT];
453
454 count = pj_timer_heap_cancel_if_active(tparam->timer, e, 2);
455 if (count > 0) {
456 /* Count cancelled entries */
457 PJ_LOG(5,("test", "...thread #%d cancelled %d entries",
458 idx, count));
459 tparam->stat[idx].cnt += count;
460
461 /* Reschedule entry after some delay */
462 pj_thread_sleep(pj_rand() % 100);
463 st_schedule_entry(tparam->timer, e);
464 }
465 }
466 PJ_LOG(4,("test", "...thread #%d (cancel) stopped", idx));
467
468 return 0;
469 }
470
timer_stress_test(void)471 static int timer_stress_test(void)
472 {
473 unsigned count = 0, n_sched = 0, n_cancel = 0, n_poll = 0;
474 int i;
475 pj_timer_entry *entries = NULL;
476 pj_atomic_t **entries_status = NULL;
477 pj_grp_lock_t **grp_locks = NULL;
478 pj_pool_t *pool;
479 pj_timer_heap_t *timer = NULL;
480 pj_lock_t *timer_lock;
481 pj_status_t status;
482 int err=0;
483 pj_thread_t **stress_threads = NULL;
484 pj_thread_t **poll_threads = NULL;
485 pj_thread_t **cancel_threads = NULL;
486 struct thread_param tparam = {0};
487 pj_time_val now;
488 #if SIMULATE_CRASH
489 pj_timer_entry *entry;
490 pj_pool_t *tmp_pool;
491 pj_time_val delay = {0};
492 #endif
493
494 PJ_LOG(3,("test", "...Stress test"));
495
496 pj_gettimeofday(&now);
497 pj_srand(now.sec);
498
499 pool = pj_pool_create( mem, NULL, 128, 128, NULL);
500 if (!pool) {
501 PJ_LOG(3,("test", "...error: unable to create pool"));
502 err = -10;
503 goto on_return;
504 }
505
506 /* Create timer heap.
507 * Initially we only create a fraction of what's required,
508 * to test the timer heap growth algorithm.
509 */
510 status = pj_timer_heap_create(pool, ST_ENTRY_COUNT/64, &timer);
511 if (status != PJ_SUCCESS) {
512 app_perror("...error: unable to create timer heap", status);
513 err = -20;
514 goto on_return;
515 }
516
517 /* Set recursive lock for the timer heap. */
518 status = pj_lock_create_recursive_mutex( pool, "lock", &timer_lock);
519 if (status != PJ_SUCCESS) {
520 app_perror("...error: unable to create lock", status);
521 err = -30;
522 goto on_return;
523 }
524 pj_timer_heap_set_lock(timer, timer_lock, PJ_TRUE);
525
526 /* Create group locks for the timer entry. */
527 if (ST_ENTRY_GROUP_LOCK_COUNT) {
528 grp_locks = (pj_grp_lock_t**)
529 pj_pool_calloc(pool, ST_ENTRY_GROUP_LOCK_COUNT,
530 sizeof(pj_grp_lock_t*));
531 tparam.grp_locks = grp_locks;
532 }
533 for (i=0; i<ST_ENTRY_GROUP_LOCK_COUNT; ++i) {
534 status = pj_grp_lock_create(pool, NULL, &grp_locks[i]);
535 if (status != PJ_SUCCESS) {
536 app_perror("...error: unable to create group lock", status);
537 err = -40;
538 goto on_return;
539 }
540 pj_grp_lock_add_ref(grp_locks[i]);
541 }
542
543 /* Create and schedule timer entries */
544 entries = (pj_timer_entry*)pj_pool_calloc(pool, ST_ENTRY_COUNT,
545 sizeof(*entries));
546 if (!entries) {
547 err = -50;
548 goto on_return;
549 }
550 entries_status = (pj_atomic_t**)pj_pool_calloc(pool, ST_ENTRY_COUNT,
551 sizeof(*entries_status));
552 if (!entries_status) {
553 err = -55;
554 goto on_return;
555 }
556
557 for (i=0; i<ST_ENTRY_COUNT; ++i) {
558 pj_timer_entry_init(&entries[i], 0, &tparam, &st_entry_callback);
559
560 status = pj_atomic_create(pool, -1, &entries_status[i]);
561 if (status != PJ_SUCCESS) {
562 err = -60;
563 goto on_return;
564 }
565 pj_atomic_set(entries_status[i], 0);
566
567 /* For randomized test, we schedule the entry inside the thread */
568 if (!ST_STRESS_THREAD_COUNT) {
569 status = st_schedule_entry(timer, &entries[i]);
570 if (status != PJ_SUCCESS) {
571 app_perror("...error: unable to schedule entry", status);
572 err = -60;
573 goto on_return;
574 }
575 }
576 }
577
578 tparam.stopping = PJ_FALSE;
579 tparam.timer = timer;
580 tparam.entries = entries;
581 tparam.status = entries_status;
582 status = pj_atomic_create(pool, -1, &tparam.idx);
583 if (status != PJ_SUCCESS) {
584 app_perror("...error: unable to create atomic", status);
585 err = -70;
586 goto on_return;
587 }
588 status = pj_atomic_create(pool, -1, &tparam.n_sched);
589 pj_assert (status == PJ_SUCCESS);
590 pj_atomic_set(tparam.n_sched, 0);
591 status = pj_atomic_create(pool, -1, &tparam.n_cancel);
592 pj_assert (status == PJ_SUCCESS);
593 pj_atomic_set(tparam.n_cancel, 0);
594 status = pj_atomic_create(pool, -1, &tparam.n_poll);
595 pj_assert (status == PJ_SUCCESS);
596 pj_atomic_set(tparam.n_poll, 0);
597
598 /* Start stress worker threads */
599 if (ST_STRESS_THREAD_COUNT) {
600 stress_threads = (pj_thread_t**)
601 pj_pool_calloc(pool, ST_STRESS_THREAD_COUNT,
602 sizeof(pj_thread_t*));
603 }
604 for (i=0; i<ST_STRESS_THREAD_COUNT; ++i) {
605 status = pj_thread_create( pool, "poll", &stress_worker, &tparam,
606 0, 0, &stress_threads[i]);
607 if (status != PJ_SUCCESS) {
608 app_perror("...error: unable to create stress thread", status);
609 err = -75;
610 goto on_return;
611 }
612 }
613
614 /* Start poll worker threads */
615 if (ST_POLL_THREAD_COUNT) {
616 poll_threads = (pj_thread_t**)
617 pj_pool_calloc(pool, ST_POLL_THREAD_COUNT,
618 sizeof(pj_thread_t*));
619 }
620 for (i=0; i<ST_POLL_THREAD_COUNT; ++i) {
621 status = pj_thread_create( pool, "poll", &poll_worker, &tparam,
622 0, 0, &poll_threads[i]);
623 if (status != PJ_SUCCESS) {
624 app_perror("...error: unable to create poll thread", status);
625 err = -80;
626 goto on_return;
627 }
628 }
629
630 /* Start cancel worker threads */
631 if (ST_CANCEL_THREAD_COUNT) {
632 cancel_threads = (pj_thread_t**)
633 pj_pool_calloc(pool, ST_CANCEL_THREAD_COUNT,
634 sizeof(pj_thread_t*));
635 }
636 for (i=0; i<ST_CANCEL_THREAD_COUNT; ++i) {
637 status = pj_thread_create( pool, "cancel", &cancel_worker, &tparam,
638 0, 0, &cancel_threads[i]);
639 if (status != PJ_SUCCESS) {
640 app_perror("...error: unable to create cancel thread", status);
641 err = -90;
642 goto on_return;
643 }
644 }
645
646 #if SIMULATE_CRASH
647 tmp_pool = pj_pool_create( mem, NULL, 4096, 128, NULL);
648 pj_assert(tmp_pool);
649 entry = (pj_timer_entry*)pj_pool_calloc(tmp_pool, 1, sizeof(*entry));
650 pj_assert(entry);
651 pj_timer_entry_init(entry, 0, &tparam, &dummy_callback);
652 delay.sec = 6;
653 status = pj_timer_heap_schedule(timer, entry, &delay);
654 pj_assert(status == PJ_SUCCESS);
655 pj_thread_sleep(1000);
656 PJ_LOG(3,("test", "...Releasing timer entry %p without cancelling it",
657 entry));
658 pj_pool_secure_release(&tmp_pool);
659 //pj_pool_release(tmp_pool);
660 //pj_memset(tmp_pool, 128, 4096);
661 #endif
662
663 /* Wait */
664 pj_thread_sleep(ST_DURATION);
665
666 on_return:
667
668 PJ_LOG(3,("test", "...Cleaning up resources"));
669 tparam.stopping = PJ_TRUE;
670
671 for (i=0; i<ST_STRESS_THREAD_COUNT; ++i) {
672 if (!stress_threads[i])
673 continue;
674 pj_thread_join(stress_threads[i]);
675 pj_thread_destroy(stress_threads[i]);
676 }
677
678 for (i=0; i<ST_POLL_THREAD_COUNT; ++i) {
679 if (!poll_threads[i])
680 continue;
681 pj_thread_join(poll_threads[i]);
682 pj_thread_destroy(poll_threads[i]);
683 }
684
685 for (i=0; i<ST_CANCEL_THREAD_COUNT; ++i) {
686 if (!cancel_threads[i])
687 continue;
688 pj_thread_join(cancel_threads[i]);
689 pj_thread_destroy(cancel_threads[i]);
690 }
691
692 for (i=0; i<ST_POLL_THREAD_COUNT+ST_CANCEL_THREAD_COUNT; ++i) {
693 PJ_LOG(3,("test", "...Thread #%d (%s) executed %d entries",
694 i, (tparam.stat[i].is_poll? "poll":"cancel"),
695 tparam.stat[i].cnt));
696 }
697
698 for (i=0; i<ST_ENTRY_COUNT; ++i) {
699 count += pj_timer_heap_cancel_if_active(timer, &entries[i], 10);
700 if (entries_status)
701 pj_atomic_destroy(entries_status[i]);
702 }
703
704 for (i=0; i<ST_ENTRY_GROUP_LOCK_COUNT; ++i) {
705 /* Ref count must be equal to 1 */
706 if (pj_grp_lock_get_ref(grp_locks[i]) != 1) {
707 pj_assert(!"Group lock ref count must be equal to 1");
708 if (!err) err = -100;
709 }
710 pj_grp_lock_dec_ref(grp_locks[i]);
711 }
712
713 if (timer)
714 pj_timer_heap_destroy(timer);
715
716 PJ_LOG(3,("test", "Total memory of timer heap: %d",
717 pj_timer_heap_mem_size(ST_ENTRY_COUNT)));
718
719 if (tparam.idx)
720 pj_atomic_destroy(tparam.idx);
721 if (tparam.n_sched) {
722 n_sched = pj_atomic_get(tparam.n_sched);
723 PJ_LOG(3,("test", "Total number of scheduled entries: %d", n_sched));
724 pj_atomic_destroy(tparam.n_sched);
725 }
726 if (tparam.n_cancel) {
727 n_cancel = pj_atomic_get(tparam.n_cancel);
728 PJ_LOG(3,("test", "Total number of cancelled entries: %d", n_cancel));
729 pj_atomic_destroy(tparam.n_cancel);
730 }
731 if (tparam.n_poll) {
732 n_poll = pj_atomic_get(tparam.n_poll);
733 PJ_LOG(3,("test", "Total number of polled entries: %d", n_poll));
734 pj_atomic_destroy(tparam.n_poll);
735 }
736 PJ_LOG(3,("test", "Number of remaining active entries: %d", count));
737 if (n_sched) {
738 pj_bool_t match = PJ_TRUE;
739
740 #if SIMULATE_CRASH
741 n_sched++;
742 #endif
743 if (n_sched != (n_cancel + n_poll + count)) {
744 if (tparam.err != 0) tparam.err = -250;
745 match = PJ_FALSE;
746 }
747 PJ_LOG(3,("test", "Scheduled = cancelled + polled + remaining?: %s",
748 (match? "yes": "no")));
749 }
750
751 pj_pool_safe_release(&pool);
752
753 return (err? err: tparam.err);
754 }
755
get_random_delay()756 static int get_random_delay()
757 {
758 return pj_rand() % BT_ENTRY_COUNT;
759 }
760
get_next_delay(int delay)761 static int get_next_delay(int delay)
762 {
763 return ++delay;
764 }
765
766 typedef enum BENCH_TEST_TYPE {
767 RANDOM_SCH = 0,
768 RANDOM_CAN = 1,
769 INCREMENT_SCH = 2,
770 INCREMENT_CAN = 3
771 } BENCH_TEST_TYPE;
772
get_test_name(BENCH_TEST_TYPE test_type)773 static char *get_test_name(BENCH_TEST_TYPE test_type) {
774 switch (test_type) {
775 case RANDOM_SCH:
776 case INCREMENT_SCH:
777 return "schedule";
778 case RANDOM_CAN:
779 case INCREMENT_CAN:
780 return "cancel";
781 }
782 return "undefined";
783 }
784
get_format_num(unsigned n,char * out)785 static void *get_format_num(unsigned n, char *out)
786 {
787 int c;
788 char buf[64];
789 char *p;
790
791 pj_ansi_snprintf(buf, 64, "%d", n);
792 c = 2 - pj_ansi_strlen(buf) % 3;
793 for (p = buf; *p != 0; ++p) {
794 *out++ = *p;
795 if (c == 1) {
796 *out++ = ',';
797 }
798 c = (c + 1) % 3;
799 }
800 *--out = 0;
801 return out;
802 }
803
print_bench(BENCH_TEST_TYPE test_type,pj_timestamp time_freq,pj_timestamp time_start,int start_idx,int end_idx)804 static void print_bench(BENCH_TEST_TYPE test_type, pj_timestamp time_freq,
805 pj_timestamp time_start, int start_idx, int end_idx)
806 {
807 char start_idx_str[64];
808 char end_idx_str[64];
809 char num_req_str[64];
810 unsigned num_req;
811 pj_timestamp t2;
812
813 pj_get_timestamp(&t2);
814 pj_sub_timestamp(&t2, &time_start);
815
816 num_req = (unsigned)(time_freq.u64 * (end_idx-start_idx) / t2.u64);
817 if (test_type == RANDOM_CAN || test_type == INCREMENT_CAN) {
818 start_idx = BT_ENTRY_COUNT - start_idx;
819 end_idx = BT_ENTRY_COUNT - end_idx;
820 }
821 get_format_num(start_idx, start_idx_str);
822 get_format_num(end_idx, end_idx_str);
823 get_format_num(num_req, num_req_str);
824
825 PJ_LOG(3, (THIS_FILE, " Entries %s-%s: %s %s ent/sec",
826 start_idx_str, end_idx_str, get_test_name(test_type),
827 num_req_str));
828 }
829
bench_test(pj_timer_heap_t * timer,pj_timer_entry * entries,pj_timestamp freq,BENCH_TEST_TYPE test_type)830 static int bench_test(pj_timer_heap_t *timer,
831 pj_timer_entry *entries,
832 pj_timestamp freq,
833 BENCH_TEST_TYPE test_type)
834 {
835 pj_timestamp t1;
836 unsigned mult = BT_ENTRY_SHOW_START;
837 int i, j;
838
839 pj_get_timestamp(&t1);
840 /*Schedule random entry.*/
841 for (i=0, j=0; j < BT_ENTRY_COUNT; ++j) {
842 pj_time_val delay = { 0 };
843 pj_status_t status;
844
845 if (test_type == RANDOM_SCH || test_type == INCREMENT_SCH) {
846 if (test_type == RANDOM_SCH)
847 delay.msec = get_random_delay();
848 else
849 delay.msec = get_next_delay(delay.msec);
850
851 pj_timer_entry_init(&entries[j], 0, NULL, &dummy_callback);
852
853 status = pj_timer_heap_schedule(timer, &entries[j], &delay);
854 if (status != PJ_SUCCESS) {
855 app_perror("...error: unable to schedule timer entry", status);
856 return -50;
857 }
858 } else if (test_type == RANDOM_CAN || test_type == INCREMENT_CAN) {
859 unsigned num_ent = pj_timer_heap_cancel(timer, &entries[j]);
860 if (num_ent == 0) {
861 PJ_LOG(3, ("test", "...error: unable to cancel timer entry"));
862 return -60;
863 }
864 } else {
865 return -70;
866 }
867
868 if (j && (j % mult) == 0) {
869 print_bench(test_type, freq, t1, i, j);
870
871 i = j+1;
872 pj_get_timestamp(&t1);
873 mult *= BT_ENTRY_SHOW_MULT;
874 }
875 }
876 if (j > 0 && ((j-1) % mult != 0)) {
877 print_bench(test_type, freq, t1, i, j);
878 }
879 return 0;
880 }
881
timer_bench_test(void)882 static int timer_bench_test(void)
883 {
884 pj_pool_t *pool = NULL;
885 pj_timer_heap_t *timer = NULL;
886 pj_status_t status;
887 int err=0;
888 pj_timer_entry *entries = NULL;
889 pj_timestamp freq;
890 int i;
891
892 PJ_LOG(3,("test", "...Benchmark test"));
893
894 status = pj_get_timestamp_freq(&freq);
895 if (status != PJ_SUCCESS) {
896 PJ_LOG(3,("test", "...error: unable to get timestamp freq"));
897 err = -10;
898 goto on_return;
899 }
900
901 pool = pj_pool_create( mem, NULL, 128, 128, NULL);
902 if (!pool) {
903 PJ_LOG(3,("test", "...error: unable to create pool"));
904 err = -20;
905 goto on_return;
906 }
907
908 /* Create timer heap.*/
909 status = pj_timer_heap_create(pool, BT_ENTRY_COUNT/64, &timer);
910 if (status != PJ_SUCCESS) {
911 app_perror("...error: unable to create timer heap", status);
912 err = -30;
913 goto on_return;
914 }
915
916 /* Create and schedule timer entries */
917 entries = (pj_timer_entry*)pj_pool_calloc(pool, BT_ENTRY_COUNT,
918 sizeof(*entries));
919 if (!entries) {
920 err = -40;
921 goto on_return;
922 }
923
924 PJ_LOG(3,("test", "....random scheduling/cancelling test.."));
925 for (i = 0; i < BT_REPEAT_RANDOM_TEST; ++i) {
926 PJ_LOG(3,("test", " test %d of %d..", i+1, BT_REPEAT_RANDOM_TEST));
927 err = bench_test(timer, entries, freq, RANDOM_SCH);
928 if (err < 0)
929 goto on_return;
930
931 err = bench_test(timer, entries, freq, RANDOM_CAN);
932 if (err < 0)
933 goto on_return;
934 }
935
936 PJ_LOG(3,("test", "....increment scheduling/cancelling test.."));
937 for (i = 0; i < BT_REPEAT_INC_TEST; ++i) {
938 PJ_LOG(3,("test", " test %d of %d..", i+1, BT_REPEAT_INC_TEST));
939 err = bench_test(timer, entries, freq, INCREMENT_SCH);
940 if (err < 0)
941 goto on_return;
942
943 err = bench_test(timer, entries, freq, INCREMENT_CAN);
944 if (err < 0)
945 goto on_return;
946 }
947 on_return:
948 PJ_LOG(3,("test", "...Cleaning up resources"));
949 if (pool)
950 pj_pool_safe_release(&pool);
951 return err;
952 }
953
timer_test()954 int timer_test()
955 {
956 int rc;
957
958 rc = test_timer_heap();
959 if (rc != 0)
960 return rc;
961
962 rc = timer_stress_test();
963 if (rc != 0)
964 return rc;
965
966 #if WITH_BENCHMARK
967 rc = timer_bench_test();
968 if (rc != 0)
969 return rc;
970 #endif
971
972 return 0;
973 }
974
975 #else
976 /* To prevent warning about "translation unit is empty"
977 * when this test is disabled.
978 */
979 int dummy_timer_test;
980 #endif /* INCLUDE_TIMER_TEST */
981
982
983