]> git.proxmox.com Git - mirror_qemu.git/blame - tests/test-aio-multithread.c
Update version for v2.9.0-rc3 release
[mirror_qemu.git] / tests / test-aio-multithread.c
CommitLineData
0c330a73
PB
1/*
2 * AioContext multithreading tests
3 *
4 * Copyright Red Hat, Inc. 2016
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 */
12
13#include "qemu/osdep.h"
14#include <glib.h>
15#include "block/aio.h"
16#include "qapi/error.h"
17#include "qemu/coroutine.h"
18#include "qemu/thread.h"
19#include "qemu/error-report.h"
20#include "iothread.h"
21
22/* AioContext management */
23
24#define NUM_CONTEXTS 5
25
26static IOThread *threads[NUM_CONTEXTS];
27static AioContext *ctx[NUM_CONTEXTS];
28static __thread int id = -1;
29
30static QemuEvent done_event;
31
32/* Run a function synchronously on a remote iothread. */
33
34typedef struct CtxRunData {
35 QEMUBHFunc *cb;
36 void *arg;
37} CtxRunData;
38
39static void ctx_run_bh_cb(void *opaque)
40{
41 CtxRunData *data = opaque;
42
43 data->cb(data->arg);
44 qemu_event_set(&done_event);
45}
46
47static void ctx_run(int i, QEMUBHFunc *cb, void *opaque)
48{
49 CtxRunData data = {
50 .cb = cb,
51 .arg = opaque
52 };
53
54 qemu_event_reset(&done_event);
55 aio_bh_schedule_oneshot(ctx[i], ctx_run_bh_cb, &data);
56 qemu_event_wait(&done_event);
57}
58
59/* Starting the iothreads. */
60
61static void set_id_cb(void *opaque)
62{
63 int *i = opaque;
64
65 id = *i;
66}
67
68static void create_aio_contexts(void)
69{
70 int i;
71
72 for (i = 0; i < NUM_CONTEXTS; i++) {
73 threads[i] = iothread_new();
74 ctx[i] = iothread_get_aio_context(threads[i]);
75 }
76
77 qemu_event_init(&done_event, false);
78 for (i = 0; i < NUM_CONTEXTS; i++) {
79 ctx_run(i, set_id_cb, &i);
80 }
81}
82
83/* Stopping the iothreads. */
84
85static void join_aio_contexts(void)
86{
87 int i;
88
89 for (i = 0; i < NUM_CONTEXTS; i++) {
90 aio_context_ref(ctx[i]);
91 }
92 for (i = 0; i < NUM_CONTEXTS; i++) {
93 iothread_join(threads[i]);
94 }
95 for (i = 0; i < NUM_CONTEXTS; i++) {
96 aio_context_unref(ctx[i]);
97 }
98 qemu_event_destroy(&done_event);
99}
100
101/* Basic test for the stuff above. */
102
103static void test_lifecycle(void)
104{
105 create_aio_contexts();
106 join_aio_contexts();
107}
108
109/* aio_co_schedule test. */
110
111static Coroutine *to_schedule[NUM_CONTEXTS];
112
113static bool now_stopping;
114
115static int count_retry;
116static int count_here;
117static int count_other;
118
119static bool schedule_next(int n)
120{
121 Coroutine *co;
122
123 co = atomic_xchg(&to_schedule[n], NULL);
124 if (!co) {
125 atomic_inc(&count_retry);
126 return false;
127 }
128
129 if (n == id) {
130 atomic_inc(&count_here);
131 } else {
132 atomic_inc(&count_other);
133 }
134
135 aio_co_schedule(ctx[n], co);
136 return true;
137}
138
139static void finish_cb(void *opaque)
140{
141 schedule_next(id);
142}
143
144static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
145{
146 g_assert(to_schedule[id] == NULL);
147 atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
148
149 while (!atomic_mb_read(&now_stopping)) {
150 int n;
151
152 n = g_test_rand_int_range(0, NUM_CONTEXTS);
153 schedule_next(n);
154 qemu_coroutine_yield();
155
156 g_assert(to_schedule[id] == NULL);
157 atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
158 }
159}
160
161
162static void test_multi_co_schedule(int seconds)
163{
164 int i;
165
166 count_here = count_other = count_retry = 0;
167 now_stopping = false;
168
169 create_aio_contexts();
170 for (i = 0; i < NUM_CONTEXTS; i++) {
171 Coroutine *co1 = qemu_coroutine_create(test_multi_co_schedule_entry, NULL);
172 aio_co_schedule(ctx[i], co1);
173 }
174
175 g_usleep(seconds * 1000000);
176
177 atomic_mb_set(&now_stopping, true);
178 for (i = 0; i < NUM_CONTEXTS; i++) {
179 ctx_run(i, finish_cb, NULL);
180 to_schedule[i] = NULL;
181 }
182
183 join_aio_contexts();
184 g_test_message("scheduled %d, queued %d, retry %d, total %d\n",
185 count_other, count_here, count_retry,
186 count_here + count_other + count_retry);
187}
188
189static void test_multi_co_schedule_1(void)
190{
191 test_multi_co_schedule(1);
192}
193
194static void test_multi_co_schedule_10(void)
195{
196 test_multi_co_schedule(10);
197}
198
fed20a70
PB
199/* CoMutex thread-safety. */
200
201static uint32_t atomic_counter;
202static uint32_t running;
203static uint32_t counter;
204static CoMutex comutex;
205
206static void coroutine_fn test_multi_co_mutex_entry(void *opaque)
207{
208 while (!atomic_mb_read(&now_stopping)) {
209 qemu_co_mutex_lock(&comutex);
210 counter++;
211 qemu_co_mutex_unlock(&comutex);
212
213 /* Increase atomic_counter *after* releasing the mutex. Otherwise
214 * there is a chance (it happens about 1 in 3 runs) that the iothread
215 * exits before the coroutine is woken up, causing a spurious
216 * assertion failure.
217 */
218 atomic_inc(&atomic_counter);
219 }
220 atomic_dec(&running);
221}
222
223static void test_multi_co_mutex(int threads, int seconds)
224{
225 int i;
226
227 qemu_co_mutex_init(&comutex);
228 counter = 0;
229 atomic_counter = 0;
230 now_stopping = false;
231
232 create_aio_contexts();
233 assert(threads <= NUM_CONTEXTS);
234 running = threads;
235 for (i = 0; i < threads; i++) {
236 Coroutine *co1 = qemu_coroutine_create(test_multi_co_mutex_entry, NULL);
237 aio_co_schedule(ctx[i], co1);
238 }
239
240 g_usleep(seconds * 1000000);
241
242 atomic_mb_set(&now_stopping, true);
243 while (running > 0) {
244 g_usleep(100000);
245 }
246
247 join_aio_contexts();
248 g_test_message("%d iterations/second\n", counter / seconds);
249 g_assert_cmpint(counter, ==, atomic_counter);
250}
251
252/* Testing with NUM_CONTEXTS threads focuses on the queue. The mutex however
253 * is too contended (and the threads spend too much time in aio_poll)
254 * to actually stress the handoff protocol.
255 */
256static void test_multi_co_mutex_1(void)
257{
258 test_multi_co_mutex(NUM_CONTEXTS, 1);
259}
260
261static void test_multi_co_mutex_10(void)
262{
263 test_multi_co_mutex(NUM_CONTEXTS, 10);
264}
265
266/* Testing with fewer threads stresses the handoff protocol too. Still, the
267 * case where the locker _can_ pick up a handoff is very rare, happening
268 * about 10 times in 1 million, so increase the runtime a bit compared to
269 * other "quick" testcases that only run for 1 second.
270 */
271static void test_multi_co_mutex_2_3(void)
272{
273 test_multi_co_mutex(2, 3);
274}
275
276static void test_multi_co_mutex_2_30(void)
277{
278 test_multi_co_mutex(2, 30);
279}
280
c05df34a
PB
281/* Same test with fair mutexes, for performance comparison. */
282
283#ifdef CONFIG_LINUX
284#include "qemu/futex.h"
285
286/* The nodes for the mutex reside in this structure (on which we try to avoid
287 * false sharing). The head of the mutex is in the "mutex_head" variable.
288 */
289static struct {
290 int next, locked;
291 int padding[14];
292} nodes[NUM_CONTEXTS] __attribute__((__aligned__(64)));
293
294static int mutex_head = -1;
295
296static void mcs_mutex_lock(void)
297{
298 int prev;
299
300 nodes[id].next = -1;
301 nodes[id].locked = 1;
302 prev = atomic_xchg(&mutex_head, id);
303 if (prev != -1) {
304 atomic_set(&nodes[prev].next, id);
305 qemu_futex_wait(&nodes[id].locked, 1);
306 }
307}
308
309static void mcs_mutex_unlock(void)
310{
311 int next;
3b1d8169 312 if (atomic_read(&nodes[id].next) == -1) {
c05df34a
PB
313 if (atomic_read(&mutex_head) == id &&
314 atomic_cmpxchg(&mutex_head, id, -1) == id) {
315 /* Last item in the list, exit. */
316 return;
317 }
318 while (atomic_read(&nodes[id].next) == -1) {
319 /* mcs_mutex_lock did the xchg, but has not updated
320 * nodes[prev].next yet.
321 */
322 }
323 }
324
325 /* Wake up the next in line. */
3b1d8169 326 next = atomic_read(&nodes[id].next);
c05df34a
PB
327 nodes[next].locked = 0;
328 qemu_futex_wake(&nodes[next].locked, 1);
329}
330
331static void test_multi_fair_mutex_entry(void *opaque)
332{
333 while (!atomic_mb_read(&now_stopping)) {
334 mcs_mutex_lock();
335 counter++;
336 mcs_mutex_unlock();
337 atomic_inc(&atomic_counter);
338 }
339 atomic_dec(&running);
340}
341
342static void test_multi_fair_mutex(int threads, int seconds)
343{
344 int i;
345
346 assert(mutex_head == -1);
347 counter = 0;
348 atomic_counter = 0;
349 now_stopping = false;
350
351 create_aio_contexts();
352 assert(threads <= NUM_CONTEXTS);
353 running = threads;
354 for (i = 0; i < threads; i++) {
355 Coroutine *co1 = qemu_coroutine_create(test_multi_fair_mutex_entry, NULL);
356 aio_co_schedule(ctx[i], co1);
357 }
358
359 g_usleep(seconds * 1000000);
360
361 atomic_mb_set(&now_stopping, true);
362 while (running > 0) {
363 g_usleep(100000);
364 }
365
366 join_aio_contexts();
367 g_test_message("%d iterations/second\n", counter / seconds);
368 g_assert_cmpint(counter, ==, atomic_counter);
369}
370
371static void test_multi_fair_mutex_1(void)
372{
373 test_multi_fair_mutex(NUM_CONTEXTS, 1);
374}
375
376static void test_multi_fair_mutex_10(void)
377{
378 test_multi_fair_mutex(NUM_CONTEXTS, 10);
379}
380#endif
381
382/* Same test with pthread mutexes, for performance comparison and
383 * portability. */
384
385static QemuMutex mutex;
386
387static void test_multi_mutex_entry(void *opaque)
388{
389 while (!atomic_mb_read(&now_stopping)) {
390 qemu_mutex_lock(&mutex);
391 counter++;
392 qemu_mutex_unlock(&mutex);
393 atomic_inc(&atomic_counter);
394 }
395 atomic_dec(&running);
396}
397
398static void test_multi_mutex(int threads, int seconds)
399{
400 int i;
401
402 qemu_mutex_init(&mutex);
403 counter = 0;
404 atomic_counter = 0;
405 now_stopping = false;
406
407 create_aio_contexts();
408 assert(threads <= NUM_CONTEXTS);
409 running = threads;
410 for (i = 0; i < threads; i++) {
411 Coroutine *co1 = qemu_coroutine_create(test_multi_mutex_entry, NULL);
412 aio_co_schedule(ctx[i], co1);
413 }
414
415 g_usleep(seconds * 1000000);
416
417 atomic_mb_set(&now_stopping, true);
418 while (running > 0) {
419 g_usleep(100000);
420 }
421
422 join_aio_contexts();
423 g_test_message("%d iterations/second\n", counter / seconds);
424 g_assert_cmpint(counter, ==, atomic_counter);
425}
426
427static void test_multi_mutex_1(void)
428{
429 test_multi_mutex(NUM_CONTEXTS, 1);
430}
431
432static void test_multi_mutex_10(void)
433{
434 test_multi_mutex(NUM_CONTEXTS, 10);
435}
436
0c330a73
PB
437/* End of tests. */
438
439int main(int argc, char **argv)
440{
3f53bc61 441 init_clocks(NULL);
0c330a73
PB
442
443 g_test_init(&argc, &argv, NULL);
444 g_test_add_func("/aio/multi/lifecycle", test_lifecycle);
445 if (g_test_quick()) {
446 g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
fed20a70
PB
447 g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1);
448 g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3);
c05df34a
PB
449#ifdef CONFIG_LINUX
450 g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_1);
451#endif
452 g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_1);
0c330a73
PB
453 } else {
454 g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
fed20a70
PB
455 g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10);
456 g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30);
c05df34a
PB
457#ifdef CONFIG_LINUX
458 g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_10);
459#endif
460 g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_10);
0c330a73
PB
461 }
462 return g_test_run();
463}