]> git.proxmox.com Git - mirror_qemu.git/blob - tests/test-aio.c
migration/postcopy: enable compress during postcopy
[mirror_qemu.git] / tests / test-aio.c
1 /*
2 * AioContext tests
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "block/aio.h"
15 #include "qapi/error.h"
16 #include "qemu/timer.h"
17 #include "qemu/sockets.h"
18 #include "qemu/error-report.h"
19 #include "qemu/coroutine.h"
20 #include "qemu/main-loop.h"
21
22 static AioContext *ctx;
23
24 typedef struct {
25 EventNotifier e;
26 int n;
27 int active;
28 bool auto_set;
29 } EventNotifierTestData;
30
31 /* Wait until event notifier becomes inactive */
32 static void wait_until_inactive(EventNotifierTestData *data)
33 {
34 while (data->active > 0) {
35 aio_poll(ctx, true);
36 }
37 }
38
39 /* Simple callbacks for testing. */
40
41 typedef struct {
42 QEMUBH *bh;
43 int n;
44 int max;
45 } BHTestData;
46
47 typedef struct {
48 QEMUTimer timer;
49 QEMUClockType clock_type;
50 int n;
51 int max;
52 int64_t ns;
53 AioContext *ctx;
54 } TimerTestData;
55
56 static void bh_test_cb(void *opaque)
57 {
58 BHTestData *data = opaque;
59 if (++data->n < data->max) {
60 qemu_bh_schedule(data->bh);
61 }
62 }
63
64 static void timer_test_cb(void *opaque)
65 {
66 TimerTestData *data = opaque;
67 if (++data->n < data->max) {
68 timer_mod(&data->timer,
69 qemu_clock_get_ns(data->clock_type) + data->ns);
70 }
71 }
72
73 static void dummy_io_handler_read(EventNotifier *e)
74 {
75 }
76
77 static void bh_delete_cb(void *opaque)
78 {
79 BHTestData *data = opaque;
80 if (++data->n < data->max) {
81 qemu_bh_schedule(data->bh);
82 } else {
83 qemu_bh_delete(data->bh);
84 data->bh = NULL;
85 }
86 }
87
88 static void event_ready_cb(EventNotifier *e)
89 {
90 EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
91 g_assert(event_notifier_test_and_clear(e));
92 data->n++;
93 if (data->active > 0) {
94 data->active--;
95 }
96 if (data->auto_set && data->active) {
97 event_notifier_set(e);
98 }
99 }
100
101 /* Tests using aio_*. */
102
103 typedef struct {
104 QemuMutex start_lock;
105 EventNotifier notifier;
106 bool thread_acquired;
107 } AcquireTestData;
108
109 static void *test_acquire_thread(void *opaque)
110 {
111 AcquireTestData *data = opaque;
112
113 /* Wait for other thread to let us start */
114 qemu_mutex_lock(&data->start_lock);
115 qemu_mutex_unlock(&data->start_lock);
116
117 /* event_notifier_set might be called either before or after
118 * the main thread's call to poll(). The test case's outcome
119 * should be the same in either case.
120 */
121 event_notifier_set(&data->notifier);
122 aio_context_acquire(ctx);
123 aio_context_release(ctx);
124
125 data->thread_acquired = true; /* success, we got here */
126
127 return NULL;
128 }
129
130 static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
131 EventNotifierHandler *handler)
132 {
133 aio_set_event_notifier(ctx, notifier, false, handler, NULL);
134 }
135
136 static void dummy_notifier_read(EventNotifier *n)
137 {
138 event_notifier_test_and_clear(n);
139 }
140
141 static void test_acquire(void)
142 {
143 QemuThread thread;
144 AcquireTestData data;
145
146 /* Dummy event notifier ensures aio_poll() will block */
147 event_notifier_init(&data.notifier, false);
148 set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
149 g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
150
151 qemu_mutex_init(&data.start_lock);
152 qemu_mutex_lock(&data.start_lock);
153 data.thread_acquired = false;
154
155 qemu_thread_create(&thread, "test_acquire_thread",
156 test_acquire_thread,
157 &data, QEMU_THREAD_JOINABLE);
158
159 /* Block in aio_poll(), let other thread kick us and acquire context */
160 aio_context_acquire(ctx);
161 qemu_mutex_unlock(&data.start_lock); /* let the thread run */
162 g_assert(aio_poll(ctx, true));
163 g_assert(!data.thread_acquired);
164 aio_context_release(ctx);
165
166 qemu_thread_join(&thread);
167 set_event_notifier(ctx, &data.notifier, NULL);
168 event_notifier_cleanup(&data.notifier);
169
170 g_assert(data.thread_acquired);
171 }
172
173 static void test_bh_schedule(void)
174 {
175 BHTestData data = { .n = 0 };
176 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
177
178 qemu_bh_schedule(data.bh);
179 g_assert_cmpint(data.n, ==, 0);
180
181 g_assert(aio_poll(ctx, true));
182 g_assert_cmpint(data.n, ==, 1);
183
184 g_assert(!aio_poll(ctx, false));
185 g_assert_cmpint(data.n, ==, 1);
186 qemu_bh_delete(data.bh);
187 }
188
189 static void test_bh_schedule10(void)
190 {
191 BHTestData data = { .n = 0, .max = 10 };
192 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
193
194 qemu_bh_schedule(data.bh);
195 g_assert_cmpint(data.n, ==, 0);
196
197 g_assert(aio_poll(ctx, false));
198 g_assert_cmpint(data.n, ==, 1);
199
200 g_assert(aio_poll(ctx, true));
201 g_assert_cmpint(data.n, ==, 2);
202
203 while (data.n < 10) {
204 aio_poll(ctx, true);
205 }
206 g_assert_cmpint(data.n, ==, 10);
207
208 g_assert(!aio_poll(ctx, false));
209 g_assert_cmpint(data.n, ==, 10);
210 qemu_bh_delete(data.bh);
211 }
212
213 static void test_bh_cancel(void)
214 {
215 BHTestData data = { .n = 0 };
216 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
217
218 qemu_bh_schedule(data.bh);
219 g_assert_cmpint(data.n, ==, 0);
220
221 qemu_bh_cancel(data.bh);
222 g_assert_cmpint(data.n, ==, 0);
223
224 g_assert(!aio_poll(ctx, false));
225 g_assert_cmpint(data.n, ==, 0);
226 qemu_bh_delete(data.bh);
227 }
228
229 static void test_bh_delete(void)
230 {
231 BHTestData data = { .n = 0 };
232 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
233
234 qemu_bh_schedule(data.bh);
235 g_assert_cmpint(data.n, ==, 0);
236
237 qemu_bh_delete(data.bh);
238 g_assert_cmpint(data.n, ==, 0);
239
240 g_assert(!aio_poll(ctx, false));
241 g_assert_cmpint(data.n, ==, 0);
242 }
243
244 static void test_bh_delete_from_cb(void)
245 {
246 BHTestData data1 = { .n = 0, .max = 1 };
247
248 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
249
250 qemu_bh_schedule(data1.bh);
251 g_assert_cmpint(data1.n, ==, 0);
252
253 while (data1.n < data1.max) {
254 aio_poll(ctx, true);
255 }
256 g_assert_cmpint(data1.n, ==, data1.max);
257 g_assert(data1.bh == NULL);
258
259 g_assert(!aio_poll(ctx, false));
260 }
261
262 static void test_bh_delete_from_cb_many(void)
263 {
264 BHTestData data1 = { .n = 0, .max = 1 };
265 BHTestData data2 = { .n = 0, .max = 3 };
266 BHTestData data3 = { .n = 0, .max = 2 };
267 BHTestData data4 = { .n = 0, .max = 4 };
268
269 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
270 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
271 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
272 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
273
274 qemu_bh_schedule(data1.bh);
275 qemu_bh_schedule(data2.bh);
276 qemu_bh_schedule(data3.bh);
277 qemu_bh_schedule(data4.bh);
278 g_assert_cmpint(data1.n, ==, 0);
279 g_assert_cmpint(data2.n, ==, 0);
280 g_assert_cmpint(data3.n, ==, 0);
281 g_assert_cmpint(data4.n, ==, 0);
282
283 g_assert(aio_poll(ctx, false));
284 g_assert_cmpint(data1.n, ==, 1);
285 g_assert_cmpint(data2.n, ==, 1);
286 g_assert_cmpint(data3.n, ==, 1);
287 g_assert_cmpint(data4.n, ==, 1);
288 g_assert(data1.bh == NULL);
289
290 while (data1.n < data1.max ||
291 data2.n < data2.max ||
292 data3.n < data3.max ||
293 data4.n < data4.max) {
294 aio_poll(ctx, true);
295 }
296 g_assert_cmpint(data1.n, ==, data1.max);
297 g_assert_cmpint(data2.n, ==, data2.max);
298 g_assert_cmpint(data3.n, ==, data3.max);
299 g_assert_cmpint(data4.n, ==, data4.max);
300 g_assert(data1.bh == NULL);
301 g_assert(data2.bh == NULL);
302 g_assert(data3.bh == NULL);
303 g_assert(data4.bh == NULL);
304 }
305
306 static void test_bh_flush(void)
307 {
308 BHTestData data = { .n = 0 };
309 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
310
311 qemu_bh_schedule(data.bh);
312 g_assert_cmpint(data.n, ==, 0);
313
314 g_assert(aio_poll(ctx, true));
315 g_assert_cmpint(data.n, ==, 1);
316
317 g_assert(!aio_poll(ctx, false));
318 g_assert_cmpint(data.n, ==, 1);
319 qemu_bh_delete(data.bh);
320 }
321
322 static void test_set_event_notifier(void)
323 {
324 EventNotifierTestData data = { .n = 0, .active = 0 };
325 event_notifier_init(&data.e, false);
326 set_event_notifier(ctx, &data.e, event_ready_cb);
327 g_assert(!aio_poll(ctx, false));
328 g_assert_cmpint(data.n, ==, 0);
329
330 set_event_notifier(ctx, &data.e, NULL);
331 g_assert(!aio_poll(ctx, false));
332 g_assert_cmpint(data.n, ==, 0);
333 event_notifier_cleanup(&data.e);
334 }
335
336 static void test_wait_event_notifier(void)
337 {
338 EventNotifierTestData data = { .n = 0, .active = 1 };
339 event_notifier_init(&data.e, false);
340 set_event_notifier(ctx, &data.e, event_ready_cb);
341 while (aio_poll(ctx, false));
342 g_assert_cmpint(data.n, ==, 0);
343 g_assert_cmpint(data.active, ==, 1);
344
345 event_notifier_set(&data.e);
346 g_assert(aio_poll(ctx, false));
347 g_assert_cmpint(data.n, ==, 1);
348 g_assert_cmpint(data.active, ==, 0);
349
350 g_assert(!aio_poll(ctx, false));
351 g_assert_cmpint(data.n, ==, 1);
352 g_assert_cmpint(data.active, ==, 0);
353
354 set_event_notifier(ctx, &data.e, NULL);
355 g_assert(!aio_poll(ctx, false));
356 g_assert_cmpint(data.n, ==, 1);
357
358 event_notifier_cleanup(&data.e);
359 }
360
361 static void test_flush_event_notifier(void)
362 {
363 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
364 event_notifier_init(&data.e, false);
365 set_event_notifier(ctx, &data.e, event_ready_cb);
366 while (aio_poll(ctx, false));
367 g_assert_cmpint(data.n, ==, 0);
368 g_assert_cmpint(data.active, ==, 10);
369
370 event_notifier_set(&data.e);
371 g_assert(aio_poll(ctx, false));
372 g_assert_cmpint(data.n, ==, 1);
373 g_assert_cmpint(data.active, ==, 9);
374 g_assert(aio_poll(ctx, false));
375
376 wait_until_inactive(&data);
377 g_assert_cmpint(data.n, ==, 10);
378 g_assert_cmpint(data.active, ==, 0);
379 g_assert(!aio_poll(ctx, false));
380
381 set_event_notifier(ctx, &data.e, NULL);
382 g_assert(!aio_poll(ctx, false));
383 event_notifier_cleanup(&data.e);
384 }
385
386 static void test_aio_external_client(void)
387 {
388 int i, j;
389
390 for (i = 1; i < 3; i++) {
391 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
392 event_notifier_init(&data.e, false);
393 aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL);
394 event_notifier_set(&data.e);
395 for (j = 0; j < i; j++) {
396 aio_disable_external(ctx);
397 }
398 for (j = 0; j < i; j++) {
399 assert(!aio_poll(ctx, false));
400 assert(event_notifier_test_and_clear(&data.e));
401 event_notifier_set(&data.e);
402 aio_enable_external(ctx);
403 }
404 assert(aio_poll(ctx, false));
405 set_event_notifier(ctx, &data.e, NULL);
406 event_notifier_cleanup(&data.e);
407 }
408 }
409
410 static void test_wait_event_notifier_noflush(void)
411 {
412 EventNotifierTestData data = { .n = 0 };
413 EventNotifierTestData dummy = { .n = 0, .active = 1 };
414
415 event_notifier_init(&data.e, false);
416 set_event_notifier(ctx, &data.e, event_ready_cb);
417
418 g_assert(!aio_poll(ctx, false));
419 g_assert_cmpint(data.n, ==, 0);
420
421 /* Until there is an active descriptor, aio_poll may or may not call
422 * event_ready_cb. Still, it must not block. */
423 event_notifier_set(&data.e);
424 g_assert(aio_poll(ctx, true));
425 data.n = 0;
426
427 /* An active event notifier forces aio_poll to look at EventNotifiers. */
428 event_notifier_init(&dummy.e, false);
429 set_event_notifier(ctx, &dummy.e, event_ready_cb);
430
431 event_notifier_set(&data.e);
432 g_assert(aio_poll(ctx, false));
433 g_assert_cmpint(data.n, ==, 1);
434 g_assert(!aio_poll(ctx, false));
435 g_assert_cmpint(data.n, ==, 1);
436
437 event_notifier_set(&data.e);
438 g_assert(aio_poll(ctx, false));
439 g_assert_cmpint(data.n, ==, 2);
440 g_assert(!aio_poll(ctx, false));
441 g_assert_cmpint(data.n, ==, 2);
442
443 event_notifier_set(&dummy.e);
444 wait_until_inactive(&dummy);
445 g_assert_cmpint(data.n, ==, 2);
446 g_assert_cmpint(dummy.n, ==, 1);
447 g_assert_cmpint(dummy.active, ==, 0);
448
449 set_event_notifier(ctx, &dummy.e, NULL);
450 event_notifier_cleanup(&dummy.e);
451
452 set_event_notifier(ctx, &data.e, NULL);
453 g_assert(!aio_poll(ctx, false));
454 g_assert_cmpint(data.n, ==, 2);
455
456 event_notifier_cleanup(&data.e);
457 }
458
459 static void test_timer_schedule(void)
460 {
461 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
462 .max = 2,
463 .clock_type = QEMU_CLOCK_REALTIME };
464 EventNotifier e;
465
466 /* aio_poll will not block to wait for timers to complete unless it has
467 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
468 */
469 event_notifier_init(&e, false);
470 set_event_notifier(ctx, &e, dummy_io_handler_read);
471 aio_poll(ctx, false);
472
473 aio_timer_init(ctx, &data.timer, data.clock_type,
474 SCALE_NS, timer_test_cb, &data);
475 timer_mod(&data.timer,
476 qemu_clock_get_ns(data.clock_type) +
477 data.ns);
478
479 g_assert_cmpint(data.n, ==, 0);
480
481 /* timer_mod may well cause an event notifer to have gone off,
482 * so clear that
483 */
484 do {} while (aio_poll(ctx, false));
485
486 g_assert(!aio_poll(ctx, false));
487 g_assert_cmpint(data.n, ==, 0);
488
489 g_usleep(1 * G_USEC_PER_SEC);
490 g_assert_cmpint(data.n, ==, 0);
491
492 g_assert(aio_poll(ctx, false));
493 g_assert_cmpint(data.n, ==, 1);
494
495 /* timer_mod called by our callback */
496 do {} while (aio_poll(ctx, false));
497
498 g_assert(!aio_poll(ctx, false));
499 g_assert_cmpint(data.n, ==, 1);
500
501 g_assert(aio_poll(ctx, true));
502 g_assert_cmpint(data.n, ==, 2);
503
504 /* As max is now 2, an event notifier should not have gone off */
505
506 g_assert(!aio_poll(ctx, false));
507 g_assert_cmpint(data.n, ==, 2);
508
509 set_event_notifier(ctx, &e, NULL);
510 event_notifier_cleanup(&e);
511
512 timer_del(&data.timer);
513 }
514
515 /* Now the same tests, using the context as a GSource. They are
516 * very similar to the ones above, with g_main_context_iteration
517 * replacing aio_poll. However:
518 * - sometimes both the AioContext and the glib main loop wake
519 * themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
520 * are replaced by "while (g_main_context_iteration(NULL, false));".
521 * - there is no exact replacement for a blocking wait.
522 * "while (g_main_context_iteration(NULL, true)" seems to work,
523 * but it is not documented _why_ it works. For these tests a
524 * non-blocking loop like "while (g_main_context_iteration(NULL, false)"
525 * works well, and that's what I am using.
526 */
527
528 static void test_source_flush(void)
529 {
530 g_assert(!g_main_context_iteration(NULL, false));
531 aio_notify(ctx);
532 while (g_main_context_iteration(NULL, false));
533 g_assert(!g_main_context_iteration(NULL, false));
534 }
535
536 static void test_source_bh_schedule(void)
537 {
538 BHTestData data = { .n = 0 };
539 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
540
541 qemu_bh_schedule(data.bh);
542 g_assert_cmpint(data.n, ==, 0);
543
544 g_assert(g_main_context_iteration(NULL, true));
545 g_assert_cmpint(data.n, ==, 1);
546
547 g_assert(!g_main_context_iteration(NULL, false));
548 g_assert_cmpint(data.n, ==, 1);
549 qemu_bh_delete(data.bh);
550 }
551
552 static void test_source_bh_schedule10(void)
553 {
554 BHTestData data = { .n = 0, .max = 10 };
555 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
556
557 qemu_bh_schedule(data.bh);
558 g_assert_cmpint(data.n, ==, 0);
559
560 g_assert(g_main_context_iteration(NULL, false));
561 g_assert_cmpint(data.n, ==, 1);
562
563 g_assert(g_main_context_iteration(NULL, true));
564 g_assert_cmpint(data.n, ==, 2);
565
566 while (g_main_context_iteration(NULL, false));
567 g_assert_cmpint(data.n, ==, 10);
568
569 g_assert(!g_main_context_iteration(NULL, false));
570 g_assert_cmpint(data.n, ==, 10);
571 qemu_bh_delete(data.bh);
572 }
573
574 static void test_source_bh_cancel(void)
575 {
576 BHTestData data = { .n = 0 };
577 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
578
579 qemu_bh_schedule(data.bh);
580 g_assert_cmpint(data.n, ==, 0);
581
582 qemu_bh_cancel(data.bh);
583 g_assert_cmpint(data.n, ==, 0);
584
585 while (g_main_context_iteration(NULL, false));
586 g_assert_cmpint(data.n, ==, 0);
587 qemu_bh_delete(data.bh);
588 }
589
590 static void test_source_bh_delete(void)
591 {
592 BHTestData data = { .n = 0 };
593 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
594
595 qemu_bh_schedule(data.bh);
596 g_assert_cmpint(data.n, ==, 0);
597
598 qemu_bh_delete(data.bh);
599 g_assert_cmpint(data.n, ==, 0);
600
601 while (g_main_context_iteration(NULL, false));
602 g_assert_cmpint(data.n, ==, 0);
603 }
604
605 static void test_source_bh_delete_from_cb(void)
606 {
607 BHTestData data1 = { .n = 0, .max = 1 };
608
609 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
610
611 qemu_bh_schedule(data1.bh);
612 g_assert_cmpint(data1.n, ==, 0);
613
614 g_main_context_iteration(NULL, true);
615 g_assert_cmpint(data1.n, ==, data1.max);
616 g_assert(data1.bh == NULL);
617
618 g_assert(!g_main_context_iteration(NULL, false));
619 }
620
621 static void test_source_bh_delete_from_cb_many(void)
622 {
623 BHTestData data1 = { .n = 0, .max = 1 };
624 BHTestData data2 = { .n = 0, .max = 3 };
625 BHTestData data3 = { .n = 0, .max = 2 };
626 BHTestData data4 = { .n = 0, .max = 4 };
627
628 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
629 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
630 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
631 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
632
633 qemu_bh_schedule(data1.bh);
634 qemu_bh_schedule(data2.bh);
635 qemu_bh_schedule(data3.bh);
636 qemu_bh_schedule(data4.bh);
637 g_assert_cmpint(data1.n, ==, 0);
638 g_assert_cmpint(data2.n, ==, 0);
639 g_assert_cmpint(data3.n, ==, 0);
640 g_assert_cmpint(data4.n, ==, 0);
641
642 g_assert(g_main_context_iteration(NULL, false));
643 g_assert_cmpint(data1.n, ==, 1);
644 g_assert_cmpint(data2.n, ==, 1);
645 g_assert_cmpint(data3.n, ==, 1);
646 g_assert_cmpint(data4.n, ==, 1);
647 g_assert(data1.bh == NULL);
648
649 while (g_main_context_iteration(NULL, false));
650 g_assert_cmpint(data1.n, ==, data1.max);
651 g_assert_cmpint(data2.n, ==, data2.max);
652 g_assert_cmpint(data3.n, ==, data3.max);
653 g_assert_cmpint(data4.n, ==, data4.max);
654 g_assert(data1.bh == NULL);
655 g_assert(data2.bh == NULL);
656 g_assert(data3.bh == NULL);
657 g_assert(data4.bh == NULL);
658 }
659
660 static void test_source_bh_flush(void)
661 {
662 BHTestData data = { .n = 0 };
663 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
664
665 qemu_bh_schedule(data.bh);
666 g_assert_cmpint(data.n, ==, 0);
667
668 g_assert(g_main_context_iteration(NULL, true));
669 g_assert_cmpint(data.n, ==, 1);
670
671 g_assert(!g_main_context_iteration(NULL, false));
672 g_assert_cmpint(data.n, ==, 1);
673 qemu_bh_delete(data.bh);
674 }
675
676 static void test_source_set_event_notifier(void)
677 {
678 EventNotifierTestData data = { .n = 0, .active = 0 };
679 event_notifier_init(&data.e, false);
680 set_event_notifier(ctx, &data.e, event_ready_cb);
681 while (g_main_context_iteration(NULL, false));
682 g_assert_cmpint(data.n, ==, 0);
683
684 set_event_notifier(ctx, &data.e, NULL);
685 while (g_main_context_iteration(NULL, false));
686 g_assert_cmpint(data.n, ==, 0);
687 event_notifier_cleanup(&data.e);
688 }
689
690 static void test_source_wait_event_notifier(void)
691 {
692 EventNotifierTestData data = { .n = 0, .active = 1 };
693 event_notifier_init(&data.e, false);
694 set_event_notifier(ctx, &data.e, event_ready_cb);
695 while (g_main_context_iteration(NULL, false));
696 g_assert_cmpint(data.n, ==, 0);
697 g_assert_cmpint(data.active, ==, 1);
698
699 event_notifier_set(&data.e);
700 g_assert(g_main_context_iteration(NULL, false));
701 g_assert_cmpint(data.n, ==, 1);
702 g_assert_cmpint(data.active, ==, 0);
703
704 while (g_main_context_iteration(NULL, false));
705 g_assert_cmpint(data.n, ==, 1);
706 g_assert_cmpint(data.active, ==, 0);
707
708 set_event_notifier(ctx, &data.e, NULL);
709 while (g_main_context_iteration(NULL, false));
710 g_assert_cmpint(data.n, ==, 1);
711
712 event_notifier_cleanup(&data.e);
713 }
714
715 static void test_source_flush_event_notifier(void)
716 {
717 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
718 event_notifier_init(&data.e, false);
719 set_event_notifier(ctx, &data.e, event_ready_cb);
720 while (g_main_context_iteration(NULL, false));
721 g_assert_cmpint(data.n, ==, 0);
722 g_assert_cmpint(data.active, ==, 10);
723
724 event_notifier_set(&data.e);
725 g_assert(g_main_context_iteration(NULL, false));
726 g_assert_cmpint(data.n, ==, 1);
727 g_assert_cmpint(data.active, ==, 9);
728 g_assert(g_main_context_iteration(NULL, false));
729
730 while (g_main_context_iteration(NULL, false));
731 g_assert_cmpint(data.n, ==, 10);
732 g_assert_cmpint(data.active, ==, 0);
733 g_assert(!g_main_context_iteration(NULL, false));
734
735 set_event_notifier(ctx, &data.e, NULL);
736 while (g_main_context_iteration(NULL, false));
737 event_notifier_cleanup(&data.e);
738 }
739
740 static void test_source_wait_event_notifier_noflush(void)
741 {
742 EventNotifierTestData data = { .n = 0 };
743 EventNotifierTestData dummy = { .n = 0, .active = 1 };
744
745 event_notifier_init(&data.e, false);
746 set_event_notifier(ctx, &data.e, event_ready_cb);
747
748 while (g_main_context_iteration(NULL, false));
749 g_assert_cmpint(data.n, ==, 0);
750
751 /* Until there is an active descriptor, glib may or may not call
752 * event_ready_cb. Still, it must not block. */
753 event_notifier_set(&data.e);
754 g_main_context_iteration(NULL, true);
755 data.n = 0;
756
757 /* An active event notifier forces aio_poll to look at EventNotifiers. */
758 event_notifier_init(&dummy.e, false);
759 set_event_notifier(ctx, &dummy.e, event_ready_cb);
760
761 event_notifier_set(&data.e);
762 g_assert(g_main_context_iteration(NULL, false));
763 g_assert_cmpint(data.n, ==, 1);
764 g_assert(!g_main_context_iteration(NULL, false));
765 g_assert_cmpint(data.n, ==, 1);
766
767 event_notifier_set(&data.e);
768 g_assert(g_main_context_iteration(NULL, false));
769 g_assert_cmpint(data.n, ==, 2);
770 g_assert(!g_main_context_iteration(NULL, false));
771 g_assert_cmpint(data.n, ==, 2);
772
773 event_notifier_set(&dummy.e);
774 while (g_main_context_iteration(NULL, false));
775 g_assert_cmpint(data.n, ==, 2);
776 g_assert_cmpint(dummy.n, ==, 1);
777 g_assert_cmpint(dummy.active, ==, 0);
778
779 set_event_notifier(ctx, &dummy.e, NULL);
780 event_notifier_cleanup(&dummy.e);
781
782 set_event_notifier(ctx, &data.e, NULL);
783 while (g_main_context_iteration(NULL, false));
784 g_assert_cmpint(data.n, ==, 2);
785
786 event_notifier_cleanup(&data.e);
787 }
788
789 static void test_source_timer_schedule(void)
790 {
791 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
792 .max = 2,
793 .clock_type = QEMU_CLOCK_REALTIME };
794 EventNotifier e;
795 int64_t expiry;
796
797 /* aio_poll will not block to wait for timers to complete unless it has
798 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
799 */
800 event_notifier_init(&e, false);
801 set_event_notifier(ctx, &e, dummy_io_handler_read);
802 do {} while (g_main_context_iteration(NULL, false));
803
804 aio_timer_init(ctx, &data.timer, data.clock_type,
805 SCALE_NS, timer_test_cb, &data);
806 expiry = qemu_clock_get_ns(data.clock_type) +
807 data.ns;
808 timer_mod(&data.timer, expiry);
809
810 g_assert_cmpint(data.n, ==, 0);
811
812 g_usleep(1 * G_USEC_PER_SEC);
813 g_assert_cmpint(data.n, ==, 0);
814
815 g_assert(g_main_context_iteration(NULL, true));
816 g_assert_cmpint(data.n, ==, 1);
817 expiry += data.ns;
818
819 while (data.n < 2) {
820 g_main_context_iteration(NULL, true);
821 }
822
823 g_assert_cmpint(data.n, ==, 2);
824 g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
825
826 set_event_notifier(ctx, &e, NULL);
827 event_notifier_cleanup(&e);
828
829 timer_del(&data.timer);
830 }
831
832 /*
833 * Check that aio_co_enter() can chain many times
834 *
835 * Two coroutines should be able to invoke each other via aio_co_enter() many
836 * times without hitting a limit like stack exhaustion. In other words, the
837 * calls should be chained instead of nested.
838 */
839
840 typedef struct {
841 Coroutine *other;
842 unsigned i;
843 unsigned max;
844 } ChainData;
845
846 static void coroutine_fn chain(void *opaque)
847 {
848 ChainData *data = opaque;
849
850 for (data->i = 0; data->i < data->max; data->i++) {
851 /* Queue up the other coroutine... */
852 aio_co_enter(ctx, data->other);
853
854 /* ...and give control to it */
855 qemu_coroutine_yield();
856 }
857 }
858
859 static void test_queue_chaining(void)
860 {
861 /* This number of iterations hit stack exhaustion in the past: */
862 ChainData data_a = { .max = 25000 };
863 ChainData data_b = { .max = 25000 };
864
865 data_b.other = qemu_coroutine_create(chain, &data_a);
866 data_a.other = qemu_coroutine_create(chain, &data_b);
867
868 qemu_coroutine_enter(data_b.other);
869
870 g_assert_cmpint(data_a.i, ==, data_a.max);
871 g_assert_cmpint(data_b.i, ==, data_b.max - 1);
872
873 /* Allow the second coroutine to terminate */
874 qemu_coroutine_enter(data_a.other);
875
876 g_assert_cmpint(data_b.i, ==, data_b.max);
877 }
878
879 /* End of tests. */
880
881 int main(int argc, char **argv)
882 {
883 qemu_init_main_loop(&error_fatal);
884 ctx = qemu_get_aio_context();
885
886 while (g_main_context_iteration(NULL, false));
887
888 g_test_init(&argc, &argv, NULL);
889 g_test_add_func("/aio/acquire", test_acquire);
890 g_test_add_func("/aio/bh/schedule", test_bh_schedule);
891 g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
892 g_test_add_func("/aio/bh/cancel", test_bh_cancel);
893 g_test_add_func("/aio/bh/delete", test_bh_delete);
894 g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb);
895 g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many);
896 g_test_add_func("/aio/bh/flush", test_bh_flush);
897 g_test_add_func("/aio/event/add-remove", test_set_event_notifier);
898 g_test_add_func("/aio/event/wait", test_wait_event_notifier);
899 g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
900 g_test_add_func("/aio/event/flush", test_flush_event_notifier);
901 g_test_add_func("/aio/external-client", test_aio_external_client);
902 g_test_add_func("/aio/timer/schedule", test_timer_schedule);
903
904 g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining);
905
906 g_test_add_func("/aio-gsource/flush", test_source_flush);
907 g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
908 g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
909 g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel);
910 g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete);
911 g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb);
912 g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many);
913 g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush);
914 g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier);
915 g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
916 g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
917 g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
918 g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
919 return g_test_run();
920 }