]> git.proxmox.com Git - mirror_qemu.git/blob - tests/test-coroutine.c
test-coroutine: add simple CoMutex test
[mirror_qemu.git] / tests / test-coroutine.c
1 /*
2 * Coroutine tests
3 *
4 * Copyright IBM, Corp. 2011
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/coroutine.h"
16 #include "qemu/coroutine_int.h"
17
18 /*
19 * Check that qemu_in_coroutine() works
20 */
21
22 static void coroutine_fn verify_in_coroutine(void *opaque)
23 {
24 g_assert(qemu_in_coroutine());
25 }
26
27 static void test_in_coroutine(void)
28 {
29 Coroutine *coroutine;
30
31 g_assert(!qemu_in_coroutine());
32
33 coroutine = qemu_coroutine_create(verify_in_coroutine, NULL);
34 qemu_coroutine_enter(coroutine);
35 }
36
37 /*
38 * Check that qemu_coroutine_self() works
39 */
40
41 static void coroutine_fn verify_self(void *opaque)
42 {
43 Coroutine **p_co = opaque;
44 g_assert(qemu_coroutine_self() == *p_co);
45 }
46
47 static void test_self(void)
48 {
49 Coroutine *coroutine;
50
51 coroutine = qemu_coroutine_create(verify_self, &coroutine);
52 qemu_coroutine_enter(coroutine);
53 }
54
55 /*
56 * Check that qemu_coroutine_entered() works
57 */
58
59 static void coroutine_fn verify_entered_step_2(void *opaque)
60 {
61 Coroutine *caller = (Coroutine *)opaque;
62
63 g_assert(qemu_coroutine_entered(caller));
64 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
65 qemu_coroutine_yield();
66
67 /* Once more to check it still works after yielding */
68 g_assert(qemu_coroutine_entered(caller));
69 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
70 }
71
72 static void coroutine_fn verify_entered_step_1(void *opaque)
73 {
74 Coroutine *self = qemu_coroutine_self();
75 Coroutine *coroutine;
76
77 g_assert(qemu_coroutine_entered(self));
78
79 coroutine = qemu_coroutine_create(verify_entered_step_2, self);
80 g_assert(!qemu_coroutine_entered(coroutine));
81 qemu_coroutine_enter(coroutine);
82 g_assert(!qemu_coroutine_entered(coroutine));
83 qemu_coroutine_enter(coroutine);
84 }
85
86 static void test_entered(void)
87 {
88 Coroutine *coroutine;
89
90 coroutine = qemu_coroutine_create(verify_entered_step_1, NULL);
91 g_assert(!qemu_coroutine_entered(coroutine));
92 qemu_coroutine_enter(coroutine);
93 }
94
95 /*
96 * Check that coroutines may nest multiple levels
97 */
98
99 typedef struct {
100 unsigned int n_enter; /* num coroutines entered */
101 unsigned int n_return; /* num coroutines returned */
102 unsigned int max; /* maximum level of nesting */
103 } NestData;
104
105 static void coroutine_fn nest(void *opaque)
106 {
107 NestData *nd = opaque;
108
109 nd->n_enter++;
110
111 if (nd->n_enter < nd->max) {
112 Coroutine *child;
113
114 child = qemu_coroutine_create(nest, nd);
115 qemu_coroutine_enter(child);
116 }
117
118 nd->n_return++;
119 }
120
121 static void test_nesting(void)
122 {
123 Coroutine *root;
124 NestData nd = {
125 .n_enter = 0,
126 .n_return = 0,
127 .max = 128,
128 };
129
130 root = qemu_coroutine_create(nest, &nd);
131 qemu_coroutine_enter(root);
132
133 /* Must enter and return from max nesting level */
134 g_assert_cmpint(nd.n_enter, ==, nd.max);
135 g_assert_cmpint(nd.n_return, ==, nd.max);
136 }
137
138 /*
139 * Check that yield/enter transfer control correctly
140 */
141
142 static void coroutine_fn yield_5_times(void *opaque)
143 {
144 bool *done = opaque;
145 int i;
146
147 for (i = 0; i < 5; i++) {
148 qemu_coroutine_yield();
149 }
150 *done = true;
151 }
152
153 static void test_yield(void)
154 {
155 Coroutine *coroutine;
156 bool done = false;
157 int i = -1; /* one extra time to return from coroutine */
158
159 coroutine = qemu_coroutine_create(yield_5_times, &done);
160 while (!done) {
161 qemu_coroutine_enter(coroutine);
162 i++;
163 }
164 g_assert_cmpint(i, ==, 5); /* coroutine must yield 5 times */
165 }
166
167 static void coroutine_fn c2_fn(void *opaque)
168 {
169 qemu_coroutine_yield();
170 }
171
172 static void coroutine_fn c1_fn(void *opaque)
173 {
174 Coroutine *c2 = opaque;
175 qemu_coroutine_enter(c2);
176 }
177
178 static void test_no_dangling_access(void)
179 {
180 Coroutine *c1;
181 Coroutine *c2;
182 Coroutine tmp;
183
184 c2 = qemu_coroutine_create(c2_fn, NULL);
185 c1 = qemu_coroutine_create(c1_fn, c2);
186
187 qemu_coroutine_enter(c1);
188
189 /* c1 shouldn't be used any more now; make sure we segfault if it is */
190 tmp = *c1;
191 memset(c1, 0xff, sizeof(Coroutine));
192 qemu_coroutine_enter(c2);
193
194 /* Must restore the coroutine now to avoid corrupted pool */
195 *c1 = tmp;
196 }
197
198 static bool locked;
199 static int done;
200
201 static void coroutine_fn mutex_fn(void *opaque)
202 {
203 CoMutex *m = opaque;
204 qemu_co_mutex_lock(m);
205 assert(!locked);
206 locked = true;
207 qemu_coroutine_yield();
208 locked = false;
209 qemu_co_mutex_unlock(m);
210 done++;
211 }
212
213 static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
214 {
215 Coroutine *c1 = qemu_coroutine_create(entry, opaque);
216 Coroutine *c2 = qemu_coroutine_create(entry, opaque);
217
218 done = 0;
219 qemu_coroutine_enter(c1);
220 g_assert(locked);
221 qemu_coroutine_enter(c2);
222
223 /* Unlock queues c2. It is then started automatically when c1 yields or
224 * terminates.
225 */
226 qemu_coroutine_enter(c1);
227 g_assert_cmpint(done, ==, 1);
228 g_assert(locked);
229
230 qemu_coroutine_enter(c2);
231 g_assert_cmpint(done, ==, 2);
232 g_assert(!locked);
233 }
234
235 static void test_co_mutex(void)
236 {
237 CoMutex m;
238
239 qemu_co_mutex_init(&m);
240 do_test_co_mutex(mutex_fn, &m);
241 }
242
243 /*
244 * Check that creation, enter, and return work
245 */
246
247 static void coroutine_fn set_and_exit(void *opaque)
248 {
249 bool *done = opaque;
250
251 *done = true;
252 }
253
254 static void test_lifecycle(void)
255 {
256 Coroutine *coroutine;
257 bool done = false;
258
259 /* Create, enter, and return from coroutine */
260 coroutine = qemu_coroutine_create(set_and_exit, &done);
261 qemu_coroutine_enter(coroutine);
262 g_assert(done); /* expect done to be true (first time) */
263
264 /* Repeat to check that no state affects this test */
265 done = false;
266 coroutine = qemu_coroutine_create(set_and_exit, &done);
267 qemu_coroutine_enter(coroutine);
268 g_assert(done); /* expect done to be true (second time) */
269 }
270
271
272 #define RECORD_SIZE 10 /* Leave some room for expansion */
273 struct coroutine_position {
274 int func;
275 int state;
276 };
277 static struct coroutine_position records[RECORD_SIZE];
278 static unsigned record_pos;
279
280 static void record_push(int func, int state)
281 {
282 struct coroutine_position *cp = &records[record_pos++];
283 g_assert_cmpint(record_pos, <, RECORD_SIZE);
284 cp->func = func;
285 cp->state = state;
286 }
287
288 static void coroutine_fn co_order_test(void *opaque)
289 {
290 record_push(2, 1);
291 g_assert(qemu_in_coroutine());
292 qemu_coroutine_yield();
293 record_push(2, 2);
294 g_assert(qemu_in_coroutine());
295 }
296
297 static void do_order_test(void)
298 {
299 Coroutine *co;
300
301 co = qemu_coroutine_create(co_order_test, NULL);
302 record_push(1, 1);
303 qemu_coroutine_enter(co);
304 record_push(1, 2);
305 g_assert(!qemu_in_coroutine());
306 qemu_coroutine_enter(co);
307 record_push(1, 3);
308 g_assert(!qemu_in_coroutine());
309 }
310
311 static void test_order(void)
312 {
313 int i;
314 const struct coroutine_position expected_pos[] = {
315 {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
316 };
317 do_order_test();
318 g_assert_cmpint(record_pos, ==, 5);
319 for (i = 0; i < record_pos; i++) {
320 g_assert_cmpint(records[i].func , ==, expected_pos[i].func );
321 g_assert_cmpint(records[i].state, ==, expected_pos[i].state);
322 }
323 }
324 /*
325 * Lifecycle benchmark
326 */
327
328 static void coroutine_fn empty_coroutine(void *opaque)
329 {
330 /* Do nothing */
331 }
332
333 static void perf_lifecycle(void)
334 {
335 Coroutine *coroutine;
336 unsigned int i, max;
337 double duration;
338
339 max = 1000000;
340
341 g_test_timer_start();
342 for (i = 0; i < max; i++) {
343 coroutine = qemu_coroutine_create(empty_coroutine, NULL);
344 qemu_coroutine_enter(coroutine);
345 }
346 duration = g_test_timer_elapsed();
347
348 g_test_message("Lifecycle %u iterations: %f s\n", max, duration);
349 }
350
351 static void perf_nesting(void)
352 {
353 unsigned int i, maxcycles, maxnesting;
354 double duration;
355
356 maxcycles = 10000;
357 maxnesting = 1000;
358 Coroutine *root;
359
360 g_test_timer_start();
361 for (i = 0; i < maxcycles; i++) {
362 NestData nd = {
363 .n_enter = 0,
364 .n_return = 0,
365 .max = maxnesting,
366 };
367 root = qemu_coroutine_create(nest, &nd);
368 qemu_coroutine_enter(root);
369 }
370 duration = g_test_timer_elapsed();
371
372 g_test_message("Nesting %u iterations of %u depth each: %f s\n",
373 maxcycles, maxnesting, duration);
374 }
375
376 /*
377 * Yield benchmark
378 */
379
380 static void coroutine_fn yield_loop(void *opaque)
381 {
382 unsigned int *counter = opaque;
383
384 while ((*counter) > 0) {
385 (*counter)--;
386 qemu_coroutine_yield();
387 }
388 }
389
390 static void perf_yield(void)
391 {
392 unsigned int i, maxcycles;
393 double duration;
394
395 maxcycles = 100000000;
396 i = maxcycles;
397 Coroutine *coroutine = qemu_coroutine_create(yield_loop, &i);
398
399 g_test_timer_start();
400 while (i > 0) {
401 qemu_coroutine_enter(coroutine);
402 }
403 duration = g_test_timer_elapsed();
404
405 g_test_message("Yield %u iterations: %f s\n",
406 maxcycles, duration);
407 }
408
409 static __attribute__((noinline)) void dummy(unsigned *i)
410 {
411 (*i)--;
412 }
413
414 static void perf_baseline(void)
415 {
416 unsigned int i, maxcycles;
417 double duration;
418
419 maxcycles = 100000000;
420 i = maxcycles;
421
422 g_test_timer_start();
423 while (i > 0) {
424 dummy(&i);
425 }
426 duration = g_test_timer_elapsed();
427
428 g_test_message("Function call %u iterations: %f s\n",
429 maxcycles, duration);
430 }
431
432 static __attribute__((noinline)) void perf_cost_func(void *opaque)
433 {
434 qemu_coroutine_yield();
435 }
436
437 static void perf_cost(void)
438 {
439 const unsigned long maxcycles = 40000000;
440 unsigned long i = 0;
441 double duration;
442 unsigned long ops;
443 Coroutine *co;
444
445 g_test_timer_start();
446 while (i++ < maxcycles) {
447 co = qemu_coroutine_create(perf_cost_func, &i);
448 qemu_coroutine_enter(co);
449 qemu_coroutine_enter(co);
450 }
451 duration = g_test_timer_elapsed();
452 ops = (long)(maxcycles / (duration * 1000));
453
454 g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
455 "%luns per coroutine",
456 maxcycles,
457 duration, ops,
458 (unsigned long)(1000000000.0 * duration / maxcycles));
459 }
460
461 int main(int argc, char **argv)
462 {
463 g_test_init(&argc, &argv, NULL);
464
465 /* This test assumes there is a freelist and marks freed coroutine memory
466 * with a sentinel value. If there is no freelist this would legitimately
467 * crash, so skip it.
468 */
469 if (CONFIG_COROUTINE_POOL) {
470 g_test_add_func("/basic/no-dangling-access", test_no_dangling_access);
471 }
472
473 g_test_add_func("/basic/lifecycle", test_lifecycle);
474 g_test_add_func("/basic/yield", test_yield);
475 g_test_add_func("/basic/nesting", test_nesting);
476 g_test_add_func("/basic/self", test_self);
477 g_test_add_func("/basic/entered", test_entered);
478 g_test_add_func("/basic/in_coroutine", test_in_coroutine);
479 g_test_add_func("/basic/order", test_order);
480 g_test_add_func("/locking/co-mutex", test_co_mutex);
481 if (g_test_perf()) {
482 g_test_add_func("/perf/lifecycle", perf_lifecycle);
483 g_test_add_func("/perf/nesting", perf_nesting);
484 g_test_add_func("/perf/yield", perf_yield);
485 g_test_add_func("/perf/function-call", perf_baseline);
486 g_test_add_func("/perf/cost", perf_cost);
487 }
488 return g_test_run();
489 }