1 /*
2 * AioContext tests
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "block/aio.h"
15 #include "qapi/error.h"
16 #include "qemu/timer.h"
17 #include "qemu/sockets.h"
18 #include "qemu/error-report.h"
19 #include "qemu/coroutine-core.h"
20 #include "qemu/main-loop.h"
21
22 static AioContext *ctx;
23
24 typedef struct {
25 EventNotifier e;
26 int n;
27 int active;
28 bool auto_set;
29 } EventNotifierTestData;
30
31 /* Wait until event notifier becomes inactive */
wait_until_inactive(EventNotifierTestData * data)32 static void wait_until_inactive(EventNotifierTestData *data)
33 {
34 while (data->active > 0) {
35 aio_poll(ctx, true);
36 }
37 }
38
39 /* Simple callbacks for testing. */
40
41 typedef struct {
42 QEMUBH *bh;
43 int n;
44 int max;
45 } BHTestData;
46
47 typedef struct {
48 QEMUTimer timer;
49 QEMUClockType clock_type;
50 int n;
51 int max;
52 int64_t ns;
53 AioContext *ctx;
54 } TimerTestData;
55
bh_test_cb(void * opaque)56 static void bh_test_cb(void *opaque)
57 {
58 BHTestData *data = opaque;
59 if (++data->n < data->max) {
60 qemu_bh_schedule(data->bh);
61 }
62 }
63
timer_test_cb(void * opaque)64 static void timer_test_cb(void *opaque)
65 {
66 TimerTestData *data = opaque;
67 if (++data->n < data->max) {
68 timer_mod(&data->timer,
69 qemu_clock_get_ns(data->clock_type) + data->ns);
70 }
71 }
72
dummy_io_handler_read(EventNotifier * e)73 static void dummy_io_handler_read(EventNotifier *e)
74 {
75 }
76
bh_delete_cb(void * opaque)77 static void bh_delete_cb(void *opaque)
78 {
79 BHTestData *data = opaque;
80 if (++data->n < data->max) {
81 qemu_bh_schedule(data->bh);
82 } else {
83 qemu_bh_delete(data->bh);
84 data->bh = NULL;
85 }
86 }
87
event_ready_cb(EventNotifier * e)88 static void event_ready_cb(EventNotifier *e)
89 {
90 EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
91 g_assert(event_notifier_test_and_clear(e));
92 data->n++;
93 if (data->active > 0) {
94 data->active--;
95 }
96 if (data->auto_set && data->active) {
97 event_notifier_set(e);
98 }
99 }
100
101 /* Tests using aio_*. */
102
set_event_notifier(AioContext * nctx,EventNotifier * notifier,EventNotifierHandler * handler)103 static void set_event_notifier(AioContext *nctx, EventNotifier *notifier,
104 EventNotifierHandler *handler)
105 {
106 aio_set_event_notifier(nctx, notifier, handler, NULL, NULL);
107 }
108
test_bh_schedule(void)109 static void test_bh_schedule(void)
110 {
111 BHTestData data = { .n = 0 };
112 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
113
114 qemu_bh_schedule(data.bh);
115 g_assert_cmpint(data.n, ==, 0);
116
117 g_assert(aio_poll(ctx, true));
118 g_assert_cmpint(data.n, ==, 1);
119
120 g_assert(!aio_poll(ctx, false));
121 g_assert_cmpint(data.n, ==, 1);
122 qemu_bh_delete(data.bh);
123 }
124
test_bh_schedule10(void)125 static void test_bh_schedule10(void)
126 {
127 BHTestData data = { .n = 0, .max = 10 };
128 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
129
130 qemu_bh_schedule(data.bh);
131 g_assert_cmpint(data.n, ==, 0);
132
133 g_assert(aio_poll(ctx, false));
134 g_assert_cmpint(data.n, ==, 1);
135
136 g_assert(aio_poll(ctx, true));
137 g_assert_cmpint(data.n, ==, 2);
138
139 while (data.n < 10) {
140 aio_poll(ctx, true);
141 }
142 g_assert_cmpint(data.n, ==, 10);
143
144 g_assert(!aio_poll(ctx, false));
145 g_assert_cmpint(data.n, ==, 10);
146 qemu_bh_delete(data.bh);
147 }
148
test_bh_cancel(void)149 static void test_bh_cancel(void)
150 {
151 BHTestData data = { .n = 0 };
152 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
153
154 qemu_bh_schedule(data.bh);
155 g_assert_cmpint(data.n, ==, 0);
156
157 qemu_bh_cancel(data.bh);
158 g_assert_cmpint(data.n, ==, 0);
159
160 g_assert(!aio_poll(ctx, false));
161 g_assert_cmpint(data.n, ==, 0);
162 qemu_bh_delete(data.bh);
163 }
164
test_bh_delete(void)165 static void test_bh_delete(void)
166 {
167 BHTestData data = { .n = 0 };
168 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
169
170 qemu_bh_schedule(data.bh);
171 g_assert_cmpint(data.n, ==, 0);
172
173 qemu_bh_delete(data.bh);
174 g_assert_cmpint(data.n, ==, 0);
175
176 g_assert(!aio_poll(ctx, false));
177 g_assert_cmpint(data.n, ==, 0);
178 }
179
test_bh_delete_from_cb(void)180 static void test_bh_delete_from_cb(void)
181 {
182 BHTestData data1 = { .n = 0, .max = 1 };
183
184 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
185
186 qemu_bh_schedule(data1.bh);
187 g_assert_cmpint(data1.n, ==, 0);
188
189 while (data1.n < data1.max) {
190 aio_poll(ctx, true);
191 }
192 g_assert_cmpint(data1.n, ==, data1.max);
193 g_assert(data1.bh == NULL);
194
195 g_assert(!aio_poll(ctx, false));
196 }
197
test_bh_delete_from_cb_many(void)198 static void test_bh_delete_from_cb_many(void)
199 {
200 BHTestData data1 = { .n = 0, .max = 1 };
201 BHTestData data2 = { .n = 0, .max = 3 };
202 BHTestData data3 = { .n = 0, .max = 2 };
203 BHTestData data4 = { .n = 0, .max = 4 };
204
205 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
206 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
207 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
208 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
209
210 qemu_bh_schedule(data1.bh);
211 qemu_bh_schedule(data2.bh);
212 qemu_bh_schedule(data3.bh);
213 qemu_bh_schedule(data4.bh);
214 g_assert_cmpint(data1.n, ==, 0);
215 g_assert_cmpint(data2.n, ==, 0);
216 g_assert_cmpint(data3.n, ==, 0);
217 g_assert_cmpint(data4.n, ==, 0);
218
219 g_assert(aio_poll(ctx, false));
220 g_assert_cmpint(data1.n, ==, 1);
221 g_assert_cmpint(data2.n, ==, 1);
222 g_assert_cmpint(data3.n, ==, 1);
223 g_assert_cmpint(data4.n, ==, 1);
224 g_assert(data1.bh == NULL);
225
226 while (data1.n < data1.max ||
227 data2.n < data2.max ||
228 data3.n < data3.max ||
229 data4.n < data4.max) {
230 aio_poll(ctx, true);
231 }
232 g_assert_cmpint(data1.n, ==, data1.max);
233 g_assert_cmpint(data2.n, ==, data2.max);
234 g_assert_cmpint(data3.n, ==, data3.max);
235 g_assert_cmpint(data4.n, ==, data4.max);
236 g_assert(data1.bh == NULL);
237 g_assert(data2.bh == NULL);
238 g_assert(data3.bh == NULL);
239 g_assert(data4.bh == NULL);
240 }
241
test_bh_flush(void)242 static void test_bh_flush(void)
243 {
244 BHTestData data = { .n = 0 };
245 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
246
247 qemu_bh_schedule(data.bh);
248 g_assert_cmpint(data.n, ==, 0);
249
250 g_assert(aio_poll(ctx, true));
251 g_assert_cmpint(data.n, ==, 1);
252
253 g_assert(!aio_poll(ctx, false));
254 g_assert_cmpint(data.n, ==, 1);
255 qemu_bh_delete(data.bh);
256 }
257
test_set_event_notifier(void)258 static void test_set_event_notifier(void)
259 {
260 EventNotifierTestData data = { .n = 0, .active = 0 };
261 event_notifier_init(&data.e, false);
262 set_event_notifier(ctx, &data.e, event_ready_cb);
263 g_assert(!aio_poll(ctx, false));
264 g_assert_cmpint(data.n, ==, 0);
265
266 set_event_notifier(ctx, &data.e, NULL);
267 g_assert(!aio_poll(ctx, false));
268 g_assert_cmpint(data.n, ==, 0);
269 event_notifier_cleanup(&data.e);
270 }
271
test_wait_event_notifier(void)272 static void test_wait_event_notifier(void)
273 {
274 EventNotifierTestData data = { .n = 0, .active = 1 };
275 event_notifier_init(&data.e, false);
276 set_event_notifier(ctx, &data.e, event_ready_cb);
277 while (aio_poll(ctx, false));
278 g_assert_cmpint(data.n, ==, 0);
279 g_assert_cmpint(data.active, ==, 1);
280
281 event_notifier_set(&data.e);
282 g_assert(aio_poll(ctx, false));
283 g_assert_cmpint(data.n, ==, 1);
284 g_assert_cmpint(data.active, ==, 0);
285
286 g_assert(!aio_poll(ctx, false));
287 g_assert_cmpint(data.n, ==, 1);
288 g_assert_cmpint(data.active, ==, 0);
289
290 set_event_notifier(ctx, &data.e, NULL);
291 g_assert(!aio_poll(ctx, false));
292 g_assert_cmpint(data.n, ==, 1);
293
294 event_notifier_cleanup(&data.e);
295 }
296
test_flush_event_notifier(void)297 static void test_flush_event_notifier(void)
298 {
299 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
300 event_notifier_init(&data.e, false);
301 set_event_notifier(ctx, &data.e, event_ready_cb);
302 while (aio_poll(ctx, false));
303 g_assert_cmpint(data.n, ==, 0);
304 g_assert_cmpint(data.active, ==, 10);
305
306 event_notifier_set(&data.e);
307 g_assert(aio_poll(ctx, false));
308 g_assert_cmpint(data.n, ==, 1);
309 g_assert_cmpint(data.active, ==, 9);
310 g_assert(aio_poll(ctx, false));
311
312 wait_until_inactive(&data);
313 g_assert_cmpint(data.n, ==, 10);
314 g_assert_cmpint(data.active, ==, 0);
315 g_assert(!aio_poll(ctx, false));
316
317 set_event_notifier(ctx, &data.e, NULL);
318 g_assert(!aio_poll(ctx, false));
319 event_notifier_cleanup(&data.e);
320 }
321
test_wait_event_notifier_noflush(void)322 static void test_wait_event_notifier_noflush(void)
323 {
324 EventNotifierTestData data = { .n = 0 };
325 EventNotifierTestData dummy = { .n = 0, .active = 1 };
326
327 event_notifier_init(&data.e, false);
328 set_event_notifier(ctx, &data.e, event_ready_cb);
329
330 g_assert(!aio_poll(ctx, false));
331 g_assert_cmpint(data.n, ==, 0);
332
333 /* Until there is an active descriptor, aio_poll may or may not call
334 * event_ready_cb. Still, it must not block. */
335 event_notifier_set(&data.e);
336 g_assert(aio_poll(ctx, true));
337 data.n = 0;
338
339 /* An active event notifier forces aio_poll to look at EventNotifiers. */
340 event_notifier_init(&dummy.e, false);
341 set_event_notifier(ctx, &dummy.e, event_ready_cb);
342
343 event_notifier_set(&data.e);
344 g_assert(aio_poll(ctx, false));
345 g_assert_cmpint(data.n, ==, 1);
346 g_assert(!aio_poll(ctx, false));
347 g_assert_cmpint(data.n, ==, 1);
348
349 event_notifier_set(&data.e);
350 g_assert(aio_poll(ctx, false));
351 g_assert_cmpint(data.n, ==, 2);
352 g_assert(!aio_poll(ctx, false));
353 g_assert_cmpint(data.n, ==, 2);
354
355 event_notifier_set(&dummy.e);
356 wait_until_inactive(&dummy);
357 g_assert_cmpint(data.n, ==, 2);
358 g_assert_cmpint(dummy.n, ==, 1);
359 g_assert_cmpint(dummy.active, ==, 0);
360
361 set_event_notifier(ctx, &dummy.e, NULL);
362 event_notifier_cleanup(&dummy.e);
363
364 set_event_notifier(ctx, &data.e, NULL);
365 g_assert(!aio_poll(ctx, false));
366 g_assert_cmpint(data.n, ==, 2);
367
368 event_notifier_cleanup(&data.e);
369 }
370
test_timer_schedule(void)371 static void test_timer_schedule(void)
372 {
373 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
374 .max = 2,
375 .clock_type = QEMU_CLOCK_REALTIME };
376 EventNotifier e;
377
378 /* aio_poll will not block to wait for timers to complete unless it has
379 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
380 */
381 event_notifier_init(&e, false);
382 set_event_notifier(ctx, &e, dummy_io_handler_read);
383 aio_poll(ctx, false);
384
385 aio_timer_init(ctx, &data.timer, data.clock_type,
386 SCALE_NS, timer_test_cb, &data);
387 timer_mod(&data.timer,
388 qemu_clock_get_ns(data.clock_type) +
389 data.ns);
390
391 g_assert_cmpint(data.n, ==, 0);
392
393 /* timer_mod may well cause an event notifier to have gone off,
394 * so clear that
395 */
396 do {} while (aio_poll(ctx, false));
397
398 g_assert(!aio_poll(ctx, false));
399 g_assert_cmpint(data.n, ==, 0);
400
401 g_usleep(1 * G_USEC_PER_SEC);
402 g_assert_cmpint(data.n, ==, 0);
403
404 g_assert(aio_poll(ctx, false));
405 g_assert_cmpint(data.n, ==, 1);
406
407 /* timer_mod called by our callback */
408 do {} while (aio_poll(ctx, false));
409
410 g_assert(!aio_poll(ctx, false));
411 g_assert_cmpint(data.n, ==, 1);
412
413 g_assert(aio_poll(ctx, true));
414 g_assert_cmpint(data.n, ==, 2);
415
416 /* As max is now 2, an event notifier should not have gone off */
417
418 g_assert(!aio_poll(ctx, false));
419 g_assert_cmpint(data.n, ==, 2);
420
421 set_event_notifier(ctx, &e, NULL);
422 event_notifier_cleanup(&e);
423
424 timer_del(&data.timer);
425 }
426
427 /* Now the same tests, using the context as a GSource. They are
428 * very similar to the ones above, with g_main_context_iteration
429 * replacing aio_poll. However:
430 * - sometimes both the AioContext and the glib main loop wake
431 * themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
432 * are replaced by "while (g_main_context_iteration(NULL, false));".
433 * - there is no exact replacement for a blocking wait.
434 * "while (g_main_context_iteration(NULL, true)" seems to work,
435 * but it is not documented _why_ it works. For these tests a
436 * non-blocking loop like "while (g_main_context_iteration(NULL, false)"
437 * works well, and that's what I am using.
438 */
439
test_source_flush(void)440 static void test_source_flush(void)
441 {
442 g_assert(!g_main_context_iteration(NULL, false));
443 aio_notify(ctx);
444 while (g_main_context_iteration(NULL, false));
445 g_assert(!g_main_context_iteration(NULL, false));
446 }
447
test_source_bh_schedule(void)448 static void test_source_bh_schedule(void)
449 {
450 BHTestData data = { .n = 0 };
451 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
452
453 qemu_bh_schedule(data.bh);
454 g_assert_cmpint(data.n, ==, 0);
455
456 g_assert(g_main_context_iteration(NULL, true));
457 g_assert_cmpint(data.n, ==, 1);
458
459 g_assert(!g_main_context_iteration(NULL, false));
460 g_assert_cmpint(data.n, ==, 1);
461 qemu_bh_delete(data.bh);
462 }
463
test_source_bh_schedule10(void)464 static void test_source_bh_schedule10(void)
465 {
466 BHTestData data = { .n = 0, .max = 10 };
467 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
468
469 qemu_bh_schedule(data.bh);
470 g_assert_cmpint(data.n, ==, 0);
471
472 g_assert(g_main_context_iteration(NULL, false));
473 g_assert_cmpint(data.n, ==, 1);
474
475 g_assert(g_main_context_iteration(NULL, true));
476 g_assert_cmpint(data.n, ==, 2);
477
478 while (g_main_context_iteration(NULL, false));
479 g_assert_cmpint(data.n, ==, 10);
480
481 g_assert(!g_main_context_iteration(NULL, false));
482 g_assert_cmpint(data.n, ==, 10);
483 qemu_bh_delete(data.bh);
484 }
485
test_source_bh_cancel(void)486 static void test_source_bh_cancel(void)
487 {
488 BHTestData data = { .n = 0 };
489 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
490
491 qemu_bh_schedule(data.bh);
492 g_assert_cmpint(data.n, ==, 0);
493
494 qemu_bh_cancel(data.bh);
495 g_assert_cmpint(data.n, ==, 0);
496
497 while (g_main_context_iteration(NULL, false));
498 g_assert_cmpint(data.n, ==, 0);
499 qemu_bh_delete(data.bh);
500 }
501
test_source_bh_delete(void)502 static void test_source_bh_delete(void)
503 {
504 BHTestData data = { .n = 0 };
505 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
506
507 qemu_bh_schedule(data.bh);
508 g_assert_cmpint(data.n, ==, 0);
509
510 qemu_bh_delete(data.bh);
511 g_assert_cmpint(data.n, ==, 0);
512
513 while (g_main_context_iteration(NULL, false));
514 g_assert_cmpint(data.n, ==, 0);
515 }
516
test_source_bh_delete_from_cb(void)517 static void test_source_bh_delete_from_cb(void)
518 {
519 BHTestData data1 = { .n = 0, .max = 1 };
520
521 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
522
523 qemu_bh_schedule(data1.bh);
524 g_assert_cmpint(data1.n, ==, 0);
525
526 g_main_context_iteration(NULL, true);
527 g_assert_cmpint(data1.n, ==, data1.max);
528 g_assert(data1.bh == NULL);
529
530 assert(g_main_context_iteration(NULL, false));
531 assert(!g_main_context_iteration(NULL, false));
532 }
533
test_source_bh_delete_from_cb_many(void)534 static void test_source_bh_delete_from_cb_many(void)
535 {
536 BHTestData data1 = { .n = 0, .max = 1 };
537 BHTestData data2 = { .n = 0, .max = 3 };
538 BHTestData data3 = { .n = 0, .max = 2 };
539 BHTestData data4 = { .n = 0, .max = 4 };
540
541 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
542 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
543 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
544 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
545
546 qemu_bh_schedule(data1.bh);
547 qemu_bh_schedule(data2.bh);
548 qemu_bh_schedule(data3.bh);
549 qemu_bh_schedule(data4.bh);
550 g_assert_cmpint(data1.n, ==, 0);
551 g_assert_cmpint(data2.n, ==, 0);
552 g_assert_cmpint(data3.n, ==, 0);
553 g_assert_cmpint(data4.n, ==, 0);
554
555 g_assert(g_main_context_iteration(NULL, false));
556 g_assert_cmpint(data1.n, ==, 1);
557 g_assert_cmpint(data2.n, ==, 1);
558 g_assert_cmpint(data3.n, ==, 1);
559 g_assert_cmpint(data4.n, ==, 1);
560 g_assert(data1.bh == NULL);
561
562 while (g_main_context_iteration(NULL, false));
563 g_assert_cmpint(data1.n, ==, data1.max);
564 g_assert_cmpint(data2.n, ==, data2.max);
565 g_assert_cmpint(data3.n, ==, data3.max);
566 g_assert_cmpint(data4.n, ==, data4.max);
567 g_assert(data1.bh == NULL);
568 g_assert(data2.bh == NULL);
569 g_assert(data3.bh == NULL);
570 g_assert(data4.bh == NULL);
571 }
572
test_source_bh_flush(void)573 static void test_source_bh_flush(void)
574 {
575 BHTestData data = { .n = 0 };
576 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
577
578 qemu_bh_schedule(data.bh);
579 g_assert_cmpint(data.n, ==, 0);
580
581 g_assert(g_main_context_iteration(NULL, true));
582 g_assert_cmpint(data.n, ==, 1);
583
584 g_assert(!g_main_context_iteration(NULL, false));
585 g_assert_cmpint(data.n, ==, 1);
586 qemu_bh_delete(data.bh);
587 }
588
test_source_set_event_notifier(void)589 static void test_source_set_event_notifier(void)
590 {
591 EventNotifierTestData data = { .n = 0, .active = 0 };
592 event_notifier_init(&data.e, false);
593 set_event_notifier(ctx, &data.e, event_ready_cb);
594 while (g_main_context_iteration(NULL, false));
595 g_assert_cmpint(data.n, ==, 0);
596
597 set_event_notifier(ctx, &data.e, NULL);
598 while (g_main_context_iteration(NULL, false));
599 g_assert_cmpint(data.n, ==, 0);
600 event_notifier_cleanup(&data.e);
601 }
602
test_source_wait_event_notifier(void)603 static void test_source_wait_event_notifier(void)
604 {
605 EventNotifierTestData data = { .n = 0, .active = 1 };
606 event_notifier_init(&data.e, false);
607 set_event_notifier(ctx, &data.e, event_ready_cb);
608 while (g_main_context_iteration(NULL, false));
609 g_assert_cmpint(data.n, ==, 0);
610 g_assert_cmpint(data.active, ==, 1);
611
612 event_notifier_set(&data.e);
613 g_assert(g_main_context_iteration(NULL, false));
614 g_assert_cmpint(data.n, ==, 1);
615 g_assert_cmpint(data.active, ==, 0);
616
617 while (g_main_context_iteration(NULL, false));
618 g_assert_cmpint(data.n, ==, 1);
619 g_assert_cmpint(data.active, ==, 0);
620
621 set_event_notifier(ctx, &data.e, NULL);
622 while (g_main_context_iteration(NULL, false));
623 g_assert_cmpint(data.n, ==, 1);
624
625 event_notifier_cleanup(&data.e);
626 }
627
test_source_flush_event_notifier(void)628 static void test_source_flush_event_notifier(void)
629 {
630 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
631 event_notifier_init(&data.e, false);
632 set_event_notifier(ctx, &data.e, event_ready_cb);
633 while (g_main_context_iteration(NULL, false));
634 g_assert_cmpint(data.n, ==, 0);
635 g_assert_cmpint(data.active, ==, 10);
636
637 event_notifier_set(&data.e);
638 g_assert(g_main_context_iteration(NULL, false));
639 g_assert_cmpint(data.n, ==, 1);
640 g_assert_cmpint(data.active, ==, 9);
641 g_assert(g_main_context_iteration(NULL, false));
642
643 while (g_main_context_iteration(NULL, false));
644 g_assert_cmpint(data.n, ==, 10);
645 g_assert_cmpint(data.active, ==, 0);
646 g_assert(!g_main_context_iteration(NULL, false));
647
648 set_event_notifier(ctx, &data.e, NULL);
649 while (g_main_context_iteration(NULL, false));
650 event_notifier_cleanup(&data.e);
651 }
652
test_source_wait_event_notifier_noflush(void)653 static void test_source_wait_event_notifier_noflush(void)
654 {
655 EventNotifierTestData data = { .n = 0 };
656 EventNotifierTestData dummy = { .n = 0, .active = 1 };
657
658 event_notifier_init(&data.e, false);
659 set_event_notifier(ctx, &data.e, event_ready_cb);
660
661 while (g_main_context_iteration(NULL, false));
662 g_assert_cmpint(data.n, ==, 0);
663
664 /* Until there is an active descriptor, glib may or may not call
665 * event_ready_cb. Still, it must not block. */
666 event_notifier_set(&data.e);
667 g_main_context_iteration(NULL, true);
668 data.n = 0;
669
670 /* An active event notifier forces aio_poll to look at EventNotifiers. */
671 event_notifier_init(&dummy.e, false);
672 set_event_notifier(ctx, &dummy.e, event_ready_cb);
673
674 event_notifier_set(&data.e);
675 g_assert(g_main_context_iteration(NULL, false));
676 g_assert_cmpint(data.n, ==, 1);
677 g_assert(!g_main_context_iteration(NULL, false));
678 g_assert_cmpint(data.n, ==, 1);
679
680 event_notifier_set(&data.e);
681 g_assert(g_main_context_iteration(NULL, false));
682 g_assert_cmpint(data.n, ==, 2);
683 g_assert(!g_main_context_iteration(NULL, false));
684 g_assert_cmpint(data.n, ==, 2);
685
686 event_notifier_set(&dummy.e);
687 while (g_main_context_iteration(NULL, false));
688 g_assert_cmpint(data.n, ==, 2);
689 g_assert_cmpint(dummy.n, ==, 1);
690 g_assert_cmpint(dummy.active, ==, 0);
691
692 set_event_notifier(ctx, &dummy.e, NULL);
693 event_notifier_cleanup(&dummy.e);
694
695 set_event_notifier(ctx, &data.e, NULL);
696 while (g_main_context_iteration(NULL, false));
697 g_assert_cmpint(data.n, ==, 2);
698
699 event_notifier_cleanup(&data.e);
700 }
701
test_source_timer_schedule(void)702 static void test_source_timer_schedule(void)
703 {
704 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
705 .max = 2,
706 .clock_type = QEMU_CLOCK_REALTIME };
707 EventNotifier e;
708 int64_t expiry;
709
710 /* aio_poll will not block to wait for timers to complete unless it has
711 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
712 */
713 event_notifier_init(&e, false);
714 set_event_notifier(ctx, &e, dummy_io_handler_read);
715 do {} while (g_main_context_iteration(NULL, false));
716
717 aio_timer_init(ctx, &data.timer, data.clock_type,
718 SCALE_NS, timer_test_cb, &data);
719 expiry = qemu_clock_get_ns(data.clock_type) +
720 data.ns;
721 timer_mod(&data.timer, expiry);
722
723 g_assert_cmpint(data.n, ==, 0);
724
725 g_usleep(1 * G_USEC_PER_SEC);
726 g_assert_cmpint(data.n, ==, 0);
727
728 g_assert(g_main_context_iteration(NULL, true));
729 g_assert_cmpint(data.n, ==, 1);
730 expiry += data.ns;
731
732 while (data.n < 2) {
733 g_main_context_iteration(NULL, true);
734 }
735
736 g_assert_cmpint(data.n, ==, 2);
737 g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
738
739 set_event_notifier(ctx, &e, NULL);
740 event_notifier_cleanup(&e);
741
742 timer_del(&data.timer);
743 }
744
745 /*
746 * Check that aio_co_enter() can chain many times
747 *
748 * Two coroutines should be able to invoke each other via aio_co_enter() many
749 * times without hitting a limit like stack exhaustion. In other words, the
750 * calls should be chained instead of nested.
751 */
752
753 typedef struct {
754 Coroutine *other;
755 unsigned i;
756 unsigned max;
757 } ChainData;
758
chain(void * opaque)759 static void coroutine_fn chain(void *opaque)
760 {
761 ChainData *data = opaque;
762
763 for (data->i = 0; data->i < data->max; data->i++) {
764 /* Queue up the other coroutine... */
765 aio_co_enter(ctx, data->other);
766
767 /* ...and give control to it */
768 qemu_coroutine_yield();
769 }
770 }
771
test_queue_chaining(void)772 static void test_queue_chaining(void)
773 {
774 /* This number of iterations hit stack exhaustion in the past: */
775 ChainData data_a = { .max = 25000 };
776 ChainData data_b = { .max = 25000 };
777
778 data_b.other = qemu_coroutine_create(chain, &data_a);
779 data_a.other = qemu_coroutine_create(chain, &data_b);
780
781 qemu_coroutine_enter(data_b.other);
782
783 g_assert_cmpint(data_a.i, ==, data_a.max);
784 g_assert_cmpint(data_b.i, ==, data_b.max - 1);
785
786 /* Allow the second coroutine to terminate */
787 qemu_coroutine_enter(data_a.other);
788
789 g_assert_cmpint(data_b.i, ==, data_b.max);
790 }
791
co_check_current_thread(void * opaque)792 static void co_check_current_thread(void *opaque)
793 {
794 QemuThread *main_thread = opaque;
795 assert(qemu_thread_is_self(main_thread));
796 }
797
test_aio_co_enter(void * co)798 static void *test_aio_co_enter(void *co)
799 {
800 /*
801 * qemu_get_current_aio_context() should not to be the main thread
802 * AioContext, because this is a worker thread that has not taken
803 * the BQL. So aio_co_enter will schedule the coroutine in the
804 * main thread AioContext.
805 */
806 aio_co_enter(qemu_get_aio_context(), co);
807 return NULL;
808 }
809
test_worker_thread_co_enter(void)810 static void test_worker_thread_co_enter(void)
811 {
812 QemuThread this_thread, worker_thread;
813 Coroutine *co;
814
815 qemu_thread_get_self(&this_thread);
816 co = qemu_coroutine_create(co_check_current_thread, &this_thread);
817
818 qemu_thread_create(&worker_thread, "test_aio_co_enter",
819 test_aio_co_enter,
820 co, QEMU_THREAD_JOINABLE);
821
822 /* Test aio_co_enter from a worker thread. */
823 qemu_thread_join(&worker_thread);
824 g_assert(aio_poll(ctx, true));
825 g_assert(!aio_poll(ctx, false));
826 }
827
828 /* End of tests. */
829
main(int argc,char ** argv)830 int main(int argc, char **argv)
831 {
832 qemu_init_main_loop(&error_fatal);
833 ctx = qemu_get_aio_context();
834
835 while (g_main_context_iteration(NULL, false));
836
837 g_test_init(&argc, &argv, NULL);
838 g_test_add_func("/aio/bh/schedule", test_bh_schedule);
839 g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
840 g_test_add_func("/aio/bh/cancel", test_bh_cancel);
841 g_test_add_func("/aio/bh/delete", test_bh_delete);
842 g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb);
843 g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many);
844 g_test_add_func("/aio/bh/flush", test_bh_flush);
845 g_test_add_func("/aio/event/add-remove", test_set_event_notifier);
846 g_test_add_func("/aio/event/wait", test_wait_event_notifier);
847 g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
848 g_test_add_func("/aio/event/flush", test_flush_event_notifier);
849 g_test_add_func("/aio/timer/schedule", test_timer_schedule);
850
851 g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining);
852 g_test_add_func("/aio/coroutine/worker-thread-co-enter", test_worker_thread_co_enter);
853
854 g_test_add_func("/aio-gsource/flush", test_source_flush);
855 g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
856 g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
857 g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel);
858 g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete);
859 g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb);
860 g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many);
861 g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush);
862 g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier);
863 g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
864 g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
865 g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
866 g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
867 return g_test_run();
868 }
869