]> git.proxmox.com Git - mirror_qemu.git/blob - tests/unit/test-aio.c
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
[mirror_qemu.git] / tests / unit / test-aio.c
1 /*
2 * AioContext tests
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "block/aio.h"
15 #include "qapi/error.h"
16 #include "qemu/timer.h"
17 #include "qemu/sockets.h"
18 #include "qemu/error-report.h"
19 #include "qemu/coroutine-core.h"
20 #include "qemu/main-loop.h"
21
22 static AioContext *ctx;
23
24 typedef struct {
25 EventNotifier e;
26 int n;
27 int active;
28 bool auto_set;
29 } EventNotifierTestData;
30
31 /* Wait until event notifier becomes inactive */
32 static void wait_until_inactive(EventNotifierTestData *data)
33 {
34 while (data->active > 0) {
35 aio_poll(ctx, true);
36 }
37 }
38
39 /* Simple callbacks for testing. */
40
41 typedef struct {
42 QEMUBH *bh;
43 int n;
44 int max;
45 } BHTestData;
46
47 typedef struct {
48 QEMUTimer timer;
49 QEMUClockType clock_type;
50 int n;
51 int max;
52 int64_t ns;
53 AioContext *ctx;
54 } TimerTestData;
55
56 static void bh_test_cb(void *opaque)
57 {
58 BHTestData *data = opaque;
59 if (++data->n < data->max) {
60 qemu_bh_schedule(data->bh);
61 }
62 }
63
64 static void timer_test_cb(void *opaque)
65 {
66 TimerTestData *data = opaque;
67 if (++data->n < data->max) {
68 timer_mod(&data->timer,
69 qemu_clock_get_ns(data->clock_type) + data->ns);
70 }
71 }
72
73 static void dummy_io_handler_read(EventNotifier *e)
74 {
75 }
76
77 static void bh_delete_cb(void *opaque)
78 {
79 BHTestData *data = opaque;
80 if (++data->n < data->max) {
81 qemu_bh_schedule(data->bh);
82 } else {
83 qemu_bh_delete(data->bh);
84 data->bh = NULL;
85 }
86 }
87
88 static void event_ready_cb(EventNotifier *e)
89 {
90 EventNotifierTestData *data = container_of(e, EventNotifierTestData, e);
91 g_assert(event_notifier_test_and_clear(e));
92 data->n++;
93 if (data->active > 0) {
94 data->active--;
95 }
96 if (data->auto_set && data->active) {
97 event_notifier_set(e);
98 }
99 }
100
101 /* Tests using aio_*. */
102
103 typedef struct {
104 QemuMutex start_lock;
105 EventNotifier notifier;
106 bool thread_acquired;
107 } AcquireTestData;
108
109 static void *test_acquire_thread(void *opaque)
110 {
111 AcquireTestData *data = opaque;
112
113 /* Wait for other thread to let us start */
114 qemu_mutex_lock(&data->start_lock);
115 qemu_mutex_unlock(&data->start_lock);
116
117 /* event_notifier_set might be called either before or after
118 * the main thread's call to poll(). The test case's outcome
119 * should be the same in either case.
120 */
121 event_notifier_set(&data->notifier);
122 aio_context_acquire(ctx);
123 aio_context_release(ctx);
124
125 data->thread_acquired = true; /* success, we got here */
126
127 return NULL;
128 }
129
130 static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
131 EventNotifierHandler *handler)
132 {
133 aio_set_event_notifier(ctx, notifier, handler, NULL, NULL);
134 }
135
136 static void dummy_notifier_read(EventNotifier *n)
137 {
138 event_notifier_test_and_clear(n);
139 }
140
141 static void test_acquire(void)
142 {
143 QemuThread thread;
144 AcquireTestData data;
145
146 /* Dummy event notifier ensures aio_poll() will block */
147 event_notifier_init(&data.notifier, false);
148 set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
149 g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
150
151 qemu_mutex_init(&data.start_lock);
152 qemu_mutex_lock(&data.start_lock);
153 data.thread_acquired = false;
154
155 qemu_thread_create(&thread, "test_acquire_thread",
156 test_acquire_thread,
157 &data, QEMU_THREAD_JOINABLE);
158
159 /* Block in aio_poll(), let other thread kick us and acquire context */
160 aio_context_acquire(ctx);
161 qemu_mutex_unlock(&data.start_lock); /* let the thread run */
162 g_assert(aio_poll(ctx, true));
163 g_assert(!data.thread_acquired);
164 aio_context_release(ctx);
165
166 qemu_thread_join(&thread);
167 set_event_notifier(ctx, &data.notifier, NULL);
168 event_notifier_cleanup(&data.notifier);
169
170 g_assert(data.thread_acquired);
171 }
172
173 static void test_bh_schedule(void)
174 {
175 BHTestData data = { .n = 0 };
176 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
177
178 qemu_bh_schedule(data.bh);
179 g_assert_cmpint(data.n, ==, 0);
180
181 g_assert(aio_poll(ctx, true));
182 g_assert_cmpint(data.n, ==, 1);
183
184 g_assert(!aio_poll(ctx, false));
185 g_assert_cmpint(data.n, ==, 1);
186 qemu_bh_delete(data.bh);
187 }
188
189 static void test_bh_schedule10(void)
190 {
191 BHTestData data = { .n = 0, .max = 10 };
192 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
193
194 qemu_bh_schedule(data.bh);
195 g_assert_cmpint(data.n, ==, 0);
196
197 g_assert(aio_poll(ctx, false));
198 g_assert_cmpint(data.n, ==, 1);
199
200 g_assert(aio_poll(ctx, true));
201 g_assert_cmpint(data.n, ==, 2);
202
203 while (data.n < 10) {
204 aio_poll(ctx, true);
205 }
206 g_assert_cmpint(data.n, ==, 10);
207
208 g_assert(!aio_poll(ctx, false));
209 g_assert_cmpint(data.n, ==, 10);
210 qemu_bh_delete(data.bh);
211 }
212
213 static void test_bh_cancel(void)
214 {
215 BHTestData data = { .n = 0 };
216 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
217
218 qemu_bh_schedule(data.bh);
219 g_assert_cmpint(data.n, ==, 0);
220
221 qemu_bh_cancel(data.bh);
222 g_assert_cmpint(data.n, ==, 0);
223
224 g_assert(!aio_poll(ctx, false));
225 g_assert_cmpint(data.n, ==, 0);
226 qemu_bh_delete(data.bh);
227 }
228
229 static void test_bh_delete(void)
230 {
231 BHTestData data = { .n = 0 };
232 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
233
234 qemu_bh_schedule(data.bh);
235 g_assert_cmpint(data.n, ==, 0);
236
237 qemu_bh_delete(data.bh);
238 g_assert_cmpint(data.n, ==, 0);
239
240 g_assert(!aio_poll(ctx, false));
241 g_assert_cmpint(data.n, ==, 0);
242 }
243
244 static void test_bh_delete_from_cb(void)
245 {
246 BHTestData data1 = { .n = 0, .max = 1 };
247
248 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
249
250 qemu_bh_schedule(data1.bh);
251 g_assert_cmpint(data1.n, ==, 0);
252
253 while (data1.n < data1.max) {
254 aio_poll(ctx, true);
255 }
256 g_assert_cmpint(data1.n, ==, data1.max);
257 g_assert(data1.bh == NULL);
258
259 g_assert(!aio_poll(ctx, false));
260 }
261
262 static void test_bh_delete_from_cb_many(void)
263 {
264 BHTestData data1 = { .n = 0, .max = 1 };
265 BHTestData data2 = { .n = 0, .max = 3 };
266 BHTestData data3 = { .n = 0, .max = 2 };
267 BHTestData data4 = { .n = 0, .max = 4 };
268
269 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
270 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
271 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
272 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
273
274 qemu_bh_schedule(data1.bh);
275 qemu_bh_schedule(data2.bh);
276 qemu_bh_schedule(data3.bh);
277 qemu_bh_schedule(data4.bh);
278 g_assert_cmpint(data1.n, ==, 0);
279 g_assert_cmpint(data2.n, ==, 0);
280 g_assert_cmpint(data3.n, ==, 0);
281 g_assert_cmpint(data4.n, ==, 0);
282
283 g_assert(aio_poll(ctx, false));
284 g_assert_cmpint(data1.n, ==, 1);
285 g_assert_cmpint(data2.n, ==, 1);
286 g_assert_cmpint(data3.n, ==, 1);
287 g_assert_cmpint(data4.n, ==, 1);
288 g_assert(data1.bh == NULL);
289
290 while (data1.n < data1.max ||
291 data2.n < data2.max ||
292 data3.n < data3.max ||
293 data4.n < data4.max) {
294 aio_poll(ctx, true);
295 }
296 g_assert_cmpint(data1.n, ==, data1.max);
297 g_assert_cmpint(data2.n, ==, data2.max);
298 g_assert_cmpint(data3.n, ==, data3.max);
299 g_assert_cmpint(data4.n, ==, data4.max);
300 g_assert(data1.bh == NULL);
301 g_assert(data2.bh == NULL);
302 g_assert(data3.bh == NULL);
303 g_assert(data4.bh == NULL);
304 }
305
306 static void test_bh_flush(void)
307 {
308 BHTestData data = { .n = 0 };
309 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
310
311 qemu_bh_schedule(data.bh);
312 g_assert_cmpint(data.n, ==, 0);
313
314 g_assert(aio_poll(ctx, true));
315 g_assert_cmpint(data.n, ==, 1);
316
317 g_assert(!aio_poll(ctx, false));
318 g_assert_cmpint(data.n, ==, 1);
319 qemu_bh_delete(data.bh);
320 }
321
322 static void test_set_event_notifier(void)
323 {
324 EventNotifierTestData data = { .n = 0, .active = 0 };
325 event_notifier_init(&data.e, false);
326 set_event_notifier(ctx, &data.e, event_ready_cb);
327 g_assert(!aio_poll(ctx, false));
328 g_assert_cmpint(data.n, ==, 0);
329
330 set_event_notifier(ctx, &data.e, NULL);
331 g_assert(!aio_poll(ctx, false));
332 g_assert_cmpint(data.n, ==, 0);
333 event_notifier_cleanup(&data.e);
334 }
335
336 static void test_wait_event_notifier(void)
337 {
338 EventNotifierTestData data = { .n = 0, .active = 1 };
339 event_notifier_init(&data.e, false);
340 set_event_notifier(ctx, &data.e, event_ready_cb);
341 while (aio_poll(ctx, false));
342 g_assert_cmpint(data.n, ==, 0);
343 g_assert_cmpint(data.active, ==, 1);
344
345 event_notifier_set(&data.e);
346 g_assert(aio_poll(ctx, false));
347 g_assert_cmpint(data.n, ==, 1);
348 g_assert_cmpint(data.active, ==, 0);
349
350 g_assert(!aio_poll(ctx, false));
351 g_assert_cmpint(data.n, ==, 1);
352 g_assert_cmpint(data.active, ==, 0);
353
354 set_event_notifier(ctx, &data.e, NULL);
355 g_assert(!aio_poll(ctx, false));
356 g_assert_cmpint(data.n, ==, 1);
357
358 event_notifier_cleanup(&data.e);
359 }
360
361 static void test_flush_event_notifier(void)
362 {
363 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
364 event_notifier_init(&data.e, false);
365 set_event_notifier(ctx, &data.e, event_ready_cb);
366 while (aio_poll(ctx, false));
367 g_assert_cmpint(data.n, ==, 0);
368 g_assert_cmpint(data.active, ==, 10);
369
370 event_notifier_set(&data.e);
371 g_assert(aio_poll(ctx, false));
372 g_assert_cmpint(data.n, ==, 1);
373 g_assert_cmpint(data.active, ==, 9);
374 g_assert(aio_poll(ctx, false));
375
376 wait_until_inactive(&data);
377 g_assert_cmpint(data.n, ==, 10);
378 g_assert_cmpint(data.active, ==, 0);
379 g_assert(!aio_poll(ctx, false));
380
381 set_event_notifier(ctx, &data.e, NULL);
382 g_assert(!aio_poll(ctx, false));
383 event_notifier_cleanup(&data.e);
384 }
385
386 static void test_wait_event_notifier_noflush(void)
387 {
388 EventNotifierTestData data = { .n = 0 };
389 EventNotifierTestData dummy = { .n = 0, .active = 1 };
390
391 event_notifier_init(&data.e, false);
392 set_event_notifier(ctx, &data.e, event_ready_cb);
393
394 g_assert(!aio_poll(ctx, false));
395 g_assert_cmpint(data.n, ==, 0);
396
397 /* Until there is an active descriptor, aio_poll may or may not call
398 * event_ready_cb. Still, it must not block. */
399 event_notifier_set(&data.e);
400 g_assert(aio_poll(ctx, true));
401 data.n = 0;
402
403 /* An active event notifier forces aio_poll to look at EventNotifiers. */
404 event_notifier_init(&dummy.e, false);
405 set_event_notifier(ctx, &dummy.e, event_ready_cb);
406
407 event_notifier_set(&data.e);
408 g_assert(aio_poll(ctx, false));
409 g_assert_cmpint(data.n, ==, 1);
410 g_assert(!aio_poll(ctx, false));
411 g_assert_cmpint(data.n, ==, 1);
412
413 event_notifier_set(&data.e);
414 g_assert(aio_poll(ctx, false));
415 g_assert_cmpint(data.n, ==, 2);
416 g_assert(!aio_poll(ctx, false));
417 g_assert_cmpint(data.n, ==, 2);
418
419 event_notifier_set(&dummy.e);
420 wait_until_inactive(&dummy);
421 g_assert_cmpint(data.n, ==, 2);
422 g_assert_cmpint(dummy.n, ==, 1);
423 g_assert_cmpint(dummy.active, ==, 0);
424
425 set_event_notifier(ctx, &dummy.e, NULL);
426 event_notifier_cleanup(&dummy.e);
427
428 set_event_notifier(ctx, &data.e, NULL);
429 g_assert(!aio_poll(ctx, false));
430 g_assert_cmpint(data.n, ==, 2);
431
432 event_notifier_cleanup(&data.e);
433 }
434
435 static void test_timer_schedule(void)
436 {
437 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
438 .max = 2,
439 .clock_type = QEMU_CLOCK_REALTIME };
440 EventNotifier e;
441
442 /* aio_poll will not block to wait for timers to complete unless it has
443 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
444 */
445 event_notifier_init(&e, false);
446 set_event_notifier(ctx, &e, dummy_io_handler_read);
447 aio_poll(ctx, false);
448
449 aio_timer_init(ctx, &data.timer, data.clock_type,
450 SCALE_NS, timer_test_cb, &data);
451 timer_mod(&data.timer,
452 qemu_clock_get_ns(data.clock_type) +
453 data.ns);
454
455 g_assert_cmpint(data.n, ==, 0);
456
457 /* timer_mod may well cause an event notifer to have gone off,
458 * so clear that
459 */
460 do {} while (aio_poll(ctx, false));
461
462 g_assert(!aio_poll(ctx, false));
463 g_assert_cmpint(data.n, ==, 0);
464
465 g_usleep(1 * G_USEC_PER_SEC);
466 g_assert_cmpint(data.n, ==, 0);
467
468 g_assert(aio_poll(ctx, false));
469 g_assert_cmpint(data.n, ==, 1);
470
471 /* timer_mod called by our callback */
472 do {} while (aio_poll(ctx, false));
473
474 g_assert(!aio_poll(ctx, false));
475 g_assert_cmpint(data.n, ==, 1);
476
477 g_assert(aio_poll(ctx, true));
478 g_assert_cmpint(data.n, ==, 2);
479
480 /* As max is now 2, an event notifier should not have gone off */
481
482 g_assert(!aio_poll(ctx, false));
483 g_assert_cmpint(data.n, ==, 2);
484
485 set_event_notifier(ctx, &e, NULL);
486 event_notifier_cleanup(&e);
487
488 timer_del(&data.timer);
489 }
490
491 /* Now the same tests, using the context as a GSource. They are
492 * very similar to the ones above, with g_main_context_iteration
493 * replacing aio_poll. However:
494 * - sometimes both the AioContext and the glib main loop wake
495 * themselves up. Hence, some "g_assert(!aio_poll(ctx, false));"
496 * are replaced by "while (g_main_context_iteration(NULL, false));".
497 * - there is no exact replacement for a blocking wait.
498 * "while (g_main_context_iteration(NULL, true)" seems to work,
499 * but it is not documented _why_ it works. For these tests a
500 * non-blocking loop like "while (g_main_context_iteration(NULL, false)"
501 * works well, and that's what I am using.
502 */
503
504 static void test_source_flush(void)
505 {
506 g_assert(!g_main_context_iteration(NULL, false));
507 aio_notify(ctx);
508 while (g_main_context_iteration(NULL, false));
509 g_assert(!g_main_context_iteration(NULL, false));
510 }
511
512 static void test_source_bh_schedule(void)
513 {
514 BHTestData data = { .n = 0 };
515 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
516
517 qemu_bh_schedule(data.bh);
518 g_assert_cmpint(data.n, ==, 0);
519
520 g_assert(g_main_context_iteration(NULL, true));
521 g_assert_cmpint(data.n, ==, 1);
522
523 g_assert(!g_main_context_iteration(NULL, false));
524 g_assert_cmpint(data.n, ==, 1);
525 qemu_bh_delete(data.bh);
526 }
527
528 static void test_source_bh_schedule10(void)
529 {
530 BHTestData data = { .n = 0, .max = 10 };
531 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
532
533 qemu_bh_schedule(data.bh);
534 g_assert_cmpint(data.n, ==, 0);
535
536 g_assert(g_main_context_iteration(NULL, false));
537 g_assert_cmpint(data.n, ==, 1);
538
539 g_assert(g_main_context_iteration(NULL, true));
540 g_assert_cmpint(data.n, ==, 2);
541
542 while (g_main_context_iteration(NULL, false));
543 g_assert_cmpint(data.n, ==, 10);
544
545 g_assert(!g_main_context_iteration(NULL, false));
546 g_assert_cmpint(data.n, ==, 10);
547 qemu_bh_delete(data.bh);
548 }
549
550 static void test_source_bh_cancel(void)
551 {
552 BHTestData data = { .n = 0 };
553 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
554
555 qemu_bh_schedule(data.bh);
556 g_assert_cmpint(data.n, ==, 0);
557
558 qemu_bh_cancel(data.bh);
559 g_assert_cmpint(data.n, ==, 0);
560
561 while (g_main_context_iteration(NULL, false));
562 g_assert_cmpint(data.n, ==, 0);
563 qemu_bh_delete(data.bh);
564 }
565
566 static void test_source_bh_delete(void)
567 {
568 BHTestData data = { .n = 0 };
569 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
570
571 qemu_bh_schedule(data.bh);
572 g_assert_cmpint(data.n, ==, 0);
573
574 qemu_bh_delete(data.bh);
575 g_assert_cmpint(data.n, ==, 0);
576
577 while (g_main_context_iteration(NULL, false));
578 g_assert_cmpint(data.n, ==, 0);
579 }
580
581 static void test_source_bh_delete_from_cb(void)
582 {
583 BHTestData data1 = { .n = 0, .max = 1 };
584
585 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
586
587 qemu_bh_schedule(data1.bh);
588 g_assert_cmpint(data1.n, ==, 0);
589
590 g_main_context_iteration(NULL, true);
591 g_assert_cmpint(data1.n, ==, data1.max);
592 g_assert(data1.bh == NULL);
593
594 assert(g_main_context_iteration(NULL, false));
595 assert(!g_main_context_iteration(NULL, false));
596 }
597
598 static void test_source_bh_delete_from_cb_many(void)
599 {
600 BHTestData data1 = { .n = 0, .max = 1 };
601 BHTestData data2 = { .n = 0, .max = 3 };
602 BHTestData data3 = { .n = 0, .max = 2 };
603 BHTestData data4 = { .n = 0, .max = 4 };
604
605 data1.bh = aio_bh_new(ctx, bh_delete_cb, &data1);
606 data2.bh = aio_bh_new(ctx, bh_delete_cb, &data2);
607 data3.bh = aio_bh_new(ctx, bh_delete_cb, &data3);
608 data4.bh = aio_bh_new(ctx, bh_delete_cb, &data4);
609
610 qemu_bh_schedule(data1.bh);
611 qemu_bh_schedule(data2.bh);
612 qemu_bh_schedule(data3.bh);
613 qemu_bh_schedule(data4.bh);
614 g_assert_cmpint(data1.n, ==, 0);
615 g_assert_cmpint(data2.n, ==, 0);
616 g_assert_cmpint(data3.n, ==, 0);
617 g_assert_cmpint(data4.n, ==, 0);
618
619 g_assert(g_main_context_iteration(NULL, false));
620 g_assert_cmpint(data1.n, ==, 1);
621 g_assert_cmpint(data2.n, ==, 1);
622 g_assert_cmpint(data3.n, ==, 1);
623 g_assert_cmpint(data4.n, ==, 1);
624 g_assert(data1.bh == NULL);
625
626 while (g_main_context_iteration(NULL, false));
627 g_assert_cmpint(data1.n, ==, data1.max);
628 g_assert_cmpint(data2.n, ==, data2.max);
629 g_assert_cmpint(data3.n, ==, data3.max);
630 g_assert_cmpint(data4.n, ==, data4.max);
631 g_assert(data1.bh == NULL);
632 g_assert(data2.bh == NULL);
633 g_assert(data3.bh == NULL);
634 g_assert(data4.bh == NULL);
635 }
636
637 static void test_source_bh_flush(void)
638 {
639 BHTestData data = { .n = 0 };
640 data.bh = aio_bh_new(ctx, bh_test_cb, &data);
641
642 qemu_bh_schedule(data.bh);
643 g_assert_cmpint(data.n, ==, 0);
644
645 g_assert(g_main_context_iteration(NULL, true));
646 g_assert_cmpint(data.n, ==, 1);
647
648 g_assert(!g_main_context_iteration(NULL, false));
649 g_assert_cmpint(data.n, ==, 1);
650 qemu_bh_delete(data.bh);
651 }
652
653 static void test_source_set_event_notifier(void)
654 {
655 EventNotifierTestData data = { .n = 0, .active = 0 };
656 event_notifier_init(&data.e, false);
657 set_event_notifier(ctx, &data.e, event_ready_cb);
658 while (g_main_context_iteration(NULL, false));
659 g_assert_cmpint(data.n, ==, 0);
660
661 set_event_notifier(ctx, &data.e, NULL);
662 while (g_main_context_iteration(NULL, false));
663 g_assert_cmpint(data.n, ==, 0);
664 event_notifier_cleanup(&data.e);
665 }
666
667 static void test_source_wait_event_notifier(void)
668 {
669 EventNotifierTestData data = { .n = 0, .active = 1 };
670 event_notifier_init(&data.e, false);
671 set_event_notifier(ctx, &data.e, event_ready_cb);
672 while (g_main_context_iteration(NULL, false));
673 g_assert_cmpint(data.n, ==, 0);
674 g_assert_cmpint(data.active, ==, 1);
675
676 event_notifier_set(&data.e);
677 g_assert(g_main_context_iteration(NULL, false));
678 g_assert_cmpint(data.n, ==, 1);
679 g_assert_cmpint(data.active, ==, 0);
680
681 while (g_main_context_iteration(NULL, false));
682 g_assert_cmpint(data.n, ==, 1);
683 g_assert_cmpint(data.active, ==, 0);
684
685 set_event_notifier(ctx, &data.e, NULL);
686 while (g_main_context_iteration(NULL, false));
687 g_assert_cmpint(data.n, ==, 1);
688
689 event_notifier_cleanup(&data.e);
690 }
691
692 static void test_source_flush_event_notifier(void)
693 {
694 EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
695 event_notifier_init(&data.e, false);
696 set_event_notifier(ctx, &data.e, event_ready_cb);
697 while (g_main_context_iteration(NULL, false));
698 g_assert_cmpint(data.n, ==, 0);
699 g_assert_cmpint(data.active, ==, 10);
700
701 event_notifier_set(&data.e);
702 g_assert(g_main_context_iteration(NULL, false));
703 g_assert_cmpint(data.n, ==, 1);
704 g_assert_cmpint(data.active, ==, 9);
705 g_assert(g_main_context_iteration(NULL, false));
706
707 while (g_main_context_iteration(NULL, false));
708 g_assert_cmpint(data.n, ==, 10);
709 g_assert_cmpint(data.active, ==, 0);
710 g_assert(!g_main_context_iteration(NULL, false));
711
712 set_event_notifier(ctx, &data.e, NULL);
713 while (g_main_context_iteration(NULL, false));
714 event_notifier_cleanup(&data.e);
715 }
716
717 static void test_source_wait_event_notifier_noflush(void)
718 {
719 EventNotifierTestData data = { .n = 0 };
720 EventNotifierTestData dummy = { .n = 0, .active = 1 };
721
722 event_notifier_init(&data.e, false);
723 set_event_notifier(ctx, &data.e, event_ready_cb);
724
725 while (g_main_context_iteration(NULL, false));
726 g_assert_cmpint(data.n, ==, 0);
727
728 /* Until there is an active descriptor, glib may or may not call
729 * event_ready_cb. Still, it must not block. */
730 event_notifier_set(&data.e);
731 g_main_context_iteration(NULL, true);
732 data.n = 0;
733
734 /* An active event notifier forces aio_poll to look at EventNotifiers. */
735 event_notifier_init(&dummy.e, false);
736 set_event_notifier(ctx, &dummy.e, event_ready_cb);
737
738 event_notifier_set(&data.e);
739 g_assert(g_main_context_iteration(NULL, false));
740 g_assert_cmpint(data.n, ==, 1);
741 g_assert(!g_main_context_iteration(NULL, false));
742 g_assert_cmpint(data.n, ==, 1);
743
744 event_notifier_set(&data.e);
745 g_assert(g_main_context_iteration(NULL, false));
746 g_assert_cmpint(data.n, ==, 2);
747 g_assert(!g_main_context_iteration(NULL, false));
748 g_assert_cmpint(data.n, ==, 2);
749
750 event_notifier_set(&dummy.e);
751 while (g_main_context_iteration(NULL, false));
752 g_assert_cmpint(data.n, ==, 2);
753 g_assert_cmpint(dummy.n, ==, 1);
754 g_assert_cmpint(dummy.active, ==, 0);
755
756 set_event_notifier(ctx, &dummy.e, NULL);
757 event_notifier_cleanup(&dummy.e);
758
759 set_event_notifier(ctx, &data.e, NULL);
760 while (g_main_context_iteration(NULL, false));
761 g_assert_cmpint(data.n, ==, 2);
762
763 event_notifier_cleanup(&data.e);
764 }
765
766 static void test_source_timer_schedule(void)
767 {
768 TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
769 .max = 2,
770 .clock_type = QEMU_CLOCK_REALTIME };
771 EventNotifier e;
772 int64_t expiry;
773
774 /* aio_poll will not block to wait for timers to complete unless it has
775 * an fd to wait on. Fixing this breaks other tests. So create a dummy one.
776 */
777 event_notifier_init(&e, false);
778 set_event_notifier(ctx, &e, dummy_io_handler_read);
779 do {} while (g_main_context_iteration(NULL, false));
780
781 aio_timer_init(ctx, &data.timer, data.clock_type,
782 SCALE_NS, timer_test_cb, &data);
783 expiry = qemu_clock_get_ns(data.clock_type) +
784 data.ns;
785 timer_mod(&data.timer, expiry);
786
787 g_assert_cmpint(data.n, ==, 0);
788
789 g_usleep(1 * G_USEC_PER_SEC);
790 g_assert_cmpint(data.n, ==, 0);
791
792 g_assert(g_main_context_iteration(NULL, true));
793 g_assert_cmpint(data.n, ==, 1);
794 expiry += data.ns;
795
796 while (data.n < 2) {
797 g_main_context_iteration(NULL, true);
798 }
799
800 g_assert_cmpint(data.n, ==, 2);
801 g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
802
803 set_event_notifier(ctx, &e, NULL);
804 event_notifier_cleanup(&e);
805
806 timer_del(&data.timer);
807 }
808
809 /*
810 * Check that aio_co_enter() can chain many times
811 *
812 * Two coroutines should be able to invoke each other via aio_co_enter() many
813 * times without hitting a limit like stack exhaustion. In other words, the
814 * calls should be chained instead of nested.
815 */
816
817 typedef struct {
818 Coroutine *other;
819 unsigned i;
820 unsigned max;
821 } ChainData;
822
823 static void coroutine_fn chain(void *opaque)
824 {
825 ChainData *data = opaque;
826
827 for (data->i = 0; data->i < data->max; data->i++) {
828 /* Queue up the other coroutine... */
829 aio_co_enter(ctx, data->other);
830
831 /* ...and give control to it */
832 qemu_coroutine_yield();
833 }
834 }
835
836 static void test_queue_chaining(void)
837 {
838 /* This number of iterations hit stack exhaustion in the past: */
839 ChainData data_a = { .max = 25000 };
840 ChainData data_b = { .max = 25000 };
841
842 data_b.other = qemu_coroutine_create(chain, &data_a);
843 data_a.other = qemu_coroutine_create(chain, &data_b);
844
845 qemu_coroutine_enter(data_b.other);
846
847 g_assert_cmpint(data_a.i, ==, data_a.max);
848 g_assert_cmpint(data_b.i, ==, data_b.max - 1);
849
850 /* Allow the second coroutine to terminate */
851 qemu_coroutine_enter(data_a.other);
852
853 g_assert_cmpint(data_b.i, ==, data_b.max);
854 }
855
856 static void co_check_current_thread(void *opaque)
857 {
858 QemuThread *main_thread = opaque;
859 assert(qemu_thread_is_self(main_thread));
860 }
861
862 static void *test_aio_co_enter(void *co)
863 {
864 /*
865 * qemu_get_current_aio_context() should not to be the main thread
866 * AioContext, because this is a worker thread that has not taken
867 * the BQL. So aio_co_enter will schedule the coroutine in the
868 * main thread AioContext.
869 */
870 aio_co_enter(qemu_get_aio_context(), co);
871 return NULL;
872 }
873
874 static void test_worker_thread_co_enter(void)
875 {
876 QemuThread this_thread, worker_thread;
877 Coroutine *co;
878
879 qemu_thread_get_self(&this_thread);
880 co = qemu_coroutine_create(co_check_current_thread, &this_thread);
881
882 qemu_thread_create(&worker_thread, "test_acquire_thread",
883 test_aio_co_enter,
884 co, QEMU_THREAD_JOINABLE);
885
886 /* Test aio_co_enter from a worker thread. */
887 qemu_thread_join(&worker_thread);
888 g_assert(aio_poll(ctx, true));
889 g_assert(!aio_poll(ctx, false));
890 }
891
892 /* End of tests. */
893
894 int main(int argc, char **argv)
895 {
896 qemu_init_main_loop(&error_fatal);
897 ctx = qemu_get_aio_context();
898
899 while (g_main_context_iteration(NULL, false));
900
901 g_test_init(&argc, &argv, NULL);
902 g_test_add_func("/aio/acquire", test_acquire);
903 g_test_add_func("/aio/bh/schedule", test_bh_schedule);
904 g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
905 g_test_add_func("/aio/bh/cancel", test_bh_cancel);
906 g_test_add_func("/aio/bh/delete", test_bh_delete);
907 g_test_add_func("/aio/bh/callback-delete/one", test_bh_delete_from_cb);
908 g_test_add_func("/aio/bh/callback-delete/many", test_bh_delete_from_cb_many);
909 g_test_add_func("/aio/bh/flush", test_bh_flush);
910 g_test_add_func("/aio/event/add-remove", test_set_event_notifier);
911 g_test_add_func("/aio/event/wait", test_wait_event_notifier);
912 g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
913 g_test_add_func("/aio/event/flush", test_flush_event_notifier);
914 g_test_add_func("/aio/timer/schedule", test_timer_schedule);
915
916 g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining);
917 g_test_add_func("/aio/coroutine/worker-thread-co-enter", test_worker_thread_co_enter);
918
919 g_test_add_func("/aio-gsource/flush", test_source_flush);
920 g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
921 g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
922 g_test_add_func("/aio-gsource/bh/cancel", test_source_bh_cancel);
923 g_test_add_func("/aio-gsource/bh/delete", test_source_bh_delete);
924 g_test_add_func("/aio-gsource/bh/callback-delete/one", test_source_bh_delete_from_cb);
925 g_test_add_func("/aio-gsource/bh/callback-delete/many", test_source_bh_delete_from_cb_many);
926 g_test_add_func("/aio-gsource/bh/flush", test_source_bh_flush);
927 g_test_add_func("/aio-gsource/event/add-remove", test_source_set_event_notifier);
928 g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
929 g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
930 g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
931 g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
932 return g_test_run();
933 }