4 * Copyright IBM, Corp. 2011
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/coroutine.h"
16 #include "qemu/coroutine_int.h"
19 * Check that qemu_in_coroutine() works
22 static void coroutine_fn
verify_in_coroutine(void *opaque
)
24 g_assert(qemu_in_coroutine());
27 static void test_in_coroutine(void)
31 g_assert(!qemu_in_coroutine());
33 coroutine
= qemu_coroutine_create(verify_in_coroutine
, NULL
);
34 qemu_coroutine_enter(coroutine
);
38 * Check that qemu_coroutine_self() works
41 static void coroutine_fn
verify_self(void *opaque
)
43 Coroutine
**p_co
= opaque
;
44 g_assert(qemu_coroutine_self() == *p_co
);
47 static void test_self(void)
51 coroutine
= qemu_coroutine_create(verify_self
, &coroutine
);
52 qemu_coroutine_enter(coroutine
);
56 * Check that qemu_coroutine_entered() works
59 static void coroutine_fn
verify_entered_step_2(void *opaque
)
61 Coroutine
*caller
= (Coroutine
*)opaque
;
63 g_assert(qemu_coroutine_entered(caller
));
64 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
65 qemu_coroutine_yield();
67 /* Once more to check it still works after yielding */
68 g_assert(qemu_coroutine_entered(caller
));
69 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
72 static void coroutine_fn
verify_entered_step_1(void *opaque
)
74 Coroutine
*self
= qemu_coroutine_self();
77 g_assert(qemu_coroutine_entered(self
));
79 coroutine
= qemu_coroutine_create(verify_entered_step_2
, self
);
80 g_assert(!qemu_coroutine_entered(coroutine
));
81 qemu_coroutine_enter(coroutine
);
82 g_assert(!qemu_coroutine_entered(coroutine
));
83 qemu_coroutine_enter(coroutine
);
86 static void test_entered(void)
90 coroutine
= qemu_coroutine_create(verify_entered_step_1
, NULL
);
91 g_assert(!qemu_coroutine_entered(coroutine
));
92 qemu_coroutine_enter(coroutine
);
96 * Check that coroutines may nest multiple levels
100 unsigned int n_enter
; /* num coroutines entered */
101 unsigned int n_return
; /* num coroutines returned */
102 unsigned int max
; /* maximum level of nesting */
105 static void coroutine_fn
nest(void *opaque
)
107 NestData
*nd
= opaque
;
111 if (nd
->n_enter
< nd
->max
) {
114 child
= qemu_coroutine_create(nest
, nd
);
115 qemu_coroutine_enter(child
);
121 static void test_nesting(void)
130 root
= qemu_coroutine_create(nest
, &nd
);
131 qemu_coroutine_enter(root
);
133 /* Must enter and return from max nesting level */
134 g_assert_cmpint(nd
.n_enter
, ==, nd
.max
);
135 g_assert_cmpint(nd
.n_return
, ==, nd
.max
);
139 * Check that yield/enter transfer control correctly
142 static void coroutine_fn
yield_5_times(void *opaque
)
147 for (i
= 0; i
< 5; i
++) {
148 qemu_coroutine_yield();
153 static void test_yield(void)
155 Coroutine
*coroutine
;
157 int i
= -1; /* one extra time to return from coroutine */
159 coroutine
= qemu_coroutine_create(yield_5_times
, &done
);
161 qemu_coroutine_enter(coroutine
);
164 g_assert_cmpint(i
, ==, 5); /* coroutine must yield 5 times */
167 static void coroutine_fn
c2_fn(void *opaque
)
169 qemu_coroutine_yield();
172 static void coroutine_fn
c1_fn(void *opaque
)
174 Coroutine
*c2
= opaque
;
175 qemu_coroutine_enter(c2
);
178 static void test_no_dangling_access(void)
184 c2
= qemu_coroutine_create(c2_fn
, NULL
);
185 c1
= qemu_coroutine_create(c1_fn
, c2
);
187 qemu_coroutine_enter(c1
);
189 /* c1 shouldn't be used any more now; make sure we segfault if it is */
191 memset(c1
, 0xff, sizeof(Coroutine
));
192 qemu_coroutine_enter(c2
);
194 /* Must restore the coroutine now to avoid corrupted pool */
201 static void coroutine_fn
mutex_fn(void *opaque
)
204 qemu_co_mutex_lock(m
);
207 qemu_coroutine_yield();
209 qemu_co_mutex_unlock(m
);
213 static void do_test_co_mutex(CoroutineEntry
*entry
, void *opaque
)
215 Coroutine
*c1
= qemu_coroutine_create(entry
, opaque
);
216 Coroutine
*c2
= qemu_coroutine_create(entry
, opaque
);
219 qemu_coroutine_enter(c1
);
221 qemu_coroutine_enter(c2
);
223 /* Unlock queues c2. It is then started automatically when c1 yields or
226 qemu_coroutine_enter(c1
);
227 g_assert_cmpint(done
, ==, 1);
230 qemu_coroutine_enter(c2
);
231 g_assert_cmpint(done
, ==, 2);
235 static void test_co_mutex(void)
239 qemu_co_mutex_init(&m
);
240 do_test_co_mutex(mutex_fn
, &m
);
244 * Check that creation, enter, and return work
247 static void coroutine_fn
set_and_exit(void *opaque
)
254 static void test_lifecycle(void)
256 Coroutine
*coroutine
;
259 /* Create, enter, and return from coroutine */
260 coroutine
= qemu_coroutine_create(set_and_exit
, &done
);
261 qemu_coroutine_enter(coroutine
);
262 g_assert(done
); /* expect done to be true (first time) */
264 /* Repeat to check that no state affects this test */
266 coroutine
= qemu_coroutine_create(set_and_exit
, &done
);
267 qemu_coroutine_enter(coroutine
);
268 g_assert(done
); /* expect done to be true (second time) */
272 #define RECORD_SIZE 10 /* Leave some room for expansion */
273 struct coroutine_position
{
277 static struct coroutine_position records
[RECORD_SIZE
];
278 static unsigned record_pos
;
280 static void record_push(int func
, int state
)
282 struct coroutine_position
*cp
= &records
[record_pos
++];
283 g_assert_cmpint(record_pos
, <, RECORD_SIZE
);
288 static void coroutine_fn
co_order_test(void *opaque
)
291 g_assert(qemu_in_coroutine());
292 qemu_coroutine_yield();
294 g_assert(qemu_in_coroutine());
297 static void do_order_test(void)
301 co
= qemu_coroutine_create(co_order_test
, NULL
);
303 qemu_coroutine_enter(co
);
305 g_assert(!qemu_in_coroutine());
306 qemu_coroutine_enter(co
);
308 g_assert(!qemu_in_coroutine());
311 static void test_order(void)
314 const struct coroutine_position expected_pos
[] = {
315 {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
318 g_assert_cmpint(record_pos
, ==, 5);
319 for (i
= 0; i
< record_pos
; i
++) {
320 g_assert_cmpint(records
[i
].func
, ==, expected_pos
[i
].func
);
321 g_assert_cmpint(records
[i
].state
, ==, expected_pos
[i
].state
);
325 * Lifecycle benchmark
328 static void coroutine_fn
empty_coroutine(void *opaque
)
333 static void perf_lifecycle(void)
335 Coroutine
*coroutine
;
341 g_test_timer_start();
342 for (i
= 0; i
< max
; i
++) {
343 coroutine
= qemu_coroutine_create(empty_coroutine
, NULL
);
344 qemu_coroutine_enter(coroutine
);
346 duration
= g_test_timer_elapsed();
348 g_test_message("Lifecycle %u iterations: %f s\n", max
, duration
);
351 static void perf_nesting(void)
353 unsigned int i
, maxcycles
, maxnesting
;
360 g_test_timer_start();
361 for (i
= 0; i
< maxcycles
; i
++) {
367 root
= qemu_coroutine_create(nest
, &nd
);
368 qemu_coroutine_enter(root
);
370 duration
= g_test_timer_elapsed();
372 g_test_message("Nesting %u iterations of %u depth each: %f s\n",
373 maxcycles
, maxnesting
, duration
);
380 static void coroutine_fn
yield_loop(void *opaque
)
382 unsigned int *counter
= opaque
;
384 while ((*counter
) > 0) {
386 qemu_coroutine_yield();
390 static void perf_yield(void)
392 unsigned int i
, maxcycles
;
395 maxcycles
= 100000000;
397 Coroutine
*coroutine
= qemu_coroutine_create(yield_loop
, &i
);
399 g_test_timer_start();
401 qemu_coroutine_enter(coroutine
);
403 duration
= g_test_timer_elapsed();
405 g_test_message("Yield %u iterations: %f s\n",
406 maxcycles
, duration
);
409 static __attribute__((noinline
)) void dummy(unsigned *i
)
414 static void perf_baseline(void)
416 unsigned int i
, maxcycles
;
419 maxcycles
= 100000000;
422 g_test_timer_start();
426 duration
= g_test_timer_elapsed();
428 g_test_message("Function call %u iterations: %f s\n",
429 maxcycles
, duration
);
432 static __attribute__((noinline
)) void perf_cost_func(void *opaque
)
434 qemu_coroutine_yield();
437 static void perf_cost(void)
439 const unsigned long maxcycles
= 40000000;
445 g_test_timer_start();
446 while (i
++ < maxcycles
) {
447 co
= qemu_coroutine_create(perf_cost_func
, &i
);
448 qemu_coroutine_enter(co
);
449 qemu_coroutine_enter(co
);
451 duration
= g_test_timer_elapsed();
452 ops
= (long)(maxcycles
/ (duration
* 1000));
454 g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
455 "%luns per coroutine",
458 (unsigned long)(1000000000.0 * duration
/ maxcycles
));
461 int main(int argc
, char **argv
)
463 g_test_init(&argc
, &argv
, NULL
);
465 /* This test assumes there is a freelist and marks freed coroutine memory
466 * with a sentinel value. If there is no freelist this would legitimately
469 if (CONFIG_COROUTINE_POOL
) {
470 g_test_add_func("/basic/no-dangling-access", test_no_dangling_access
);
473 g_test_add_func("/basic/lifecycle", test_lifecycle
);
474 g_test_add_func("/basic/yield", test_yield
);
475 g_test_add_func("/basic/nesting", test_nesting
);
476 g_test_add_func("/basic/self", test_self
);
477 g_test_add_func("/basic/entered", test_entered
);
478 g_test_add_func("/basic/in_coroutine", test_in_coroutine
);
479 g_test_add_func("/basic/order", test_order
);
480 g_test_add_func("/locking/co-mutex", test_co_mutex
);
482 g_test_add_func("/perf/lifecycle", perf_lifecycle
);
483 g_test_add_func("/perf/nesting", perf_nesting
);
484 g_test_add_func("/perf/yield", perf_yield
);
485 g_test_add_func("/perf/function-call", perf_baseline
);
486 g_test_add_func("/perf/cost", perf_cost
);