]> git.proxmox.com Git - mirror_qemu.git/blob - tests/test-thread-pool.c
tests: adjust test-thread-pool to new aio_poll() semantics
[mirror_qemu.git] / tests / test-thread-pool.c
1 #include <glib.h>
2 #include "qemu-common.h"
3 #include "block/aio.h"
4 #include "block/thread-pool.h"
5 #include "block/block.h"
6
7 static AioContext *ctx;
8 static ThreadPool *pool;
9 static int active;
10
11 typedef struct {
12 BlockDriverAIOCB *aiocb;
13 int n;
14 int ret;
15 } WorkerTestData;
16
17 static int worker_cb(void *opaque)
18 {
19 WorkerTestData *data = opaque;
20 return atomic_fetch_inc(&data->n);
21 }
22
23 static int long_cb(void *opaque)
24 {
25 WorkerTestData *data = opaque;
26 atomic_inc(&data->n);
27 g_usleep(2000000);
28 atomic_inc(&data->n);
29 return 0;
30 }
31
32 static void done_cb(void *opaque, int ret)
33 {
34 WorkerTestData *data = opaque;
35 g_assert_cmpint(data->ret, ==, -EINPROGRESS);
36 data->ret = ret;
37 data->aiocb = NULL;
38
39 /* Callbacks are serialized, so no need to use atomic ops. */
40 active--;
41 }
42
43 static void test_submit(void)
44 {
45 WorkerTestData data = { .n = 0 };
46 thread_pool_submit(pool, worker_cb, &data);
47 while (data.n == 0) {
48 aio_poll(ctx, true);
49 }
50 g_assert_cmpint(data.n, ==, 1);
51 }
52
53 static void test_submit_aio(void)
54 {
55 WorkerTestData data = { .n = 0, .ret = -EINPROGRESS };
56 data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data,
57 done_cb, &data);
58
59 /* The callbacks are not called until after the first wait. */
60 active = 1;
61 g_assert_cmpint(data.ret, ==, -EINPROGRESS);
62 while (data.ret == -EINPROGRESS) {
63 aio_poll(ctx, true);
64 }
65 g_assert_cmpint(active, ==, 0);
66 g_assert_cmpint(data.n, ==, 1);
67 g_assert_cmpint(data.ret, ==, 0);
68 }
69
70 static void co_test_cb(void *opaque)
71 {
72 WorkerTestData *data = opaque;
73
74 active = 1;
75 data->n = 0;
76 data->ret = -EINPROGRESS;
77 thread_pool_submit_co(pool, worker_cb, data);
78
79 /* The test continues in test_submit_co, after qemu_coroutine_enter... */
80
81 g_assert_cmpint(data->n, ==, 1);
82 data->ret = 0;
83 active--;
84
85 /* The test continues in test_submit_co, after qemu_aio_wait_all... */
86 }
87
88 static void test_submit_co(void)
89 {
90 WorkerTestData data;
91 Coroutine *co = qemu_coroutine_create(co_test_cb);
92
93 qemu_coroutine_enter(co, &data);
94
95 /* Back here once the worker has started. */
96
97 g_assert_cmpint(active, ==, 1);
98 g_assert_cmpint(data.ret, ==, -EINPROGRESS);
99
100 /* qemu_aio_wait_all will execute the rest of the coroutine. */
101
102 while (data.ret == -EINPROGRESS) {
103 aio_poll(ctx, true);
104 }
105
106 /* Back here after the coroutine has finished. */
107
108 g_assert_cmpint(active, ==, 0);
109 g_assert_cmpint(data.ret, ==, 0);
110 }
111
112 static void test_submit_many(void)
113 {
114 WorkerTestData data[100];
115 int i;
116
117 /* Start more work items than there will be threads. */
118 for (i = 0; i < 100; i++) {
119 data[i].n = 0;
120 data[i].ret = -EINPROGRESS;
121 thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]);
122 }
123
124 active = 100;
125 while (active > 0) {
126 aio_poll(ctx, true);
127 }
128 for (i = 0; i < 100; i++) {
129 g_assert_cmpint(data[i].n, ==, 1);
130 g_assert_cmpint(data[i].ret, ==, 0);
131 }
132 }
133
134 static void test_cancel(void)
135 {
136 WorkerTestData data[100];
137 int num_canceled;
138 int i;
139
140 /* Start more work items than there will be threads, to ensure
141 * the pool is full.
142 */
143 test_submit_many();
144
145 /* Start long running jobs, to ensure we can cancel some. */
146 for (i = 0; i < 100; i++) {
147 data[i].n = 0;
148 data[i].ret = -EINPROGRESS;
149 data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i],
150 done_cb, &data[i]);
151 }
152
153 /* Starting the threads may be left to a bottom half. Let it
154 * run, but do not waste too much time...
155 */
156 active = 100;
157 aio_notify(ctx);
158 aio_poll(ctx, false);
159
160 /* Wait some time for the threads to start, with some sanity
161 * testing on the behavior of the scheduler...
162 */
163 g_assert_cmpint(active, ==, 100);
164 g_usleep(1000000);
165 g_assert_cmpint(active, >, 50);
166
167 /* Cancel the jobs that haven't been started yet. */
168 num_canceled = 0;
169 for (i = 0; i < 100; i++) {
170 if (atomic_cmpxchg(&data[i].n, 0, 3) == 0) {
171 data[i].ret = -ECANCELED;
172 bdrv_aio_cancel(data[i].aiocb);
173 active--;
174 num_canceled++;
175 }
176 }
177 g_assert_cmpint(active, >, 0);
178 g_assert_cmpint(num_canceled, <, 100);
179
180 /* Canceling the others will be a blocking operation. */
181 for (i = 0; i < 100; i++) {
182 if (data[i].n != 3) {
183 bdrv_aio_cancel(data[i].aiocb);
184 }
185 }
186
187 /* Finish execution and execute any remaining callbacks. */
188 while (active > 0) {
189 aio_poll(ctx, true);
190 }
191 g_assert_cmpint(active, ==, 0);
192 for (i = 0; i < 100; i++) {
193 if (data[i].n == 3) {
194 g_assert_cmpint(data[i].ret, ==, -ECANCELED);
195 g_assert(data[i].aiocb != NULL);
196 } else {
197 g_assert_cmpint(data[i].n, ==, 2);
198 g_assert_cmpint(data[i].ret, ==, 0);
199 g_assert(data[i].aiocb == NULL);
200 }
201 }
202 }
203
204 int main(int argc, char **argv)
205 {
206 int ret;
207
208 ctx = aio_context_new();
209 pool = aio_get_thread_pool(ctx);
210
211 g_test_init(&argc, &argv, NULL);
212 g_test_add_func("/thread-pool/submit", test_submit);
213 g_test_add_func("/thread-pool/submit-aio", test_submit_aio);
214 g_test_add_func("/thread-pool/submit-co", test_submit_co);
215 g_test_add_func("/thread-pool/submit-many", test_submit_many);
216 g_test_add_func("/thread-pool/cancel", test_cancel);
217
218 ret = g_test_run();
219
220 aio_context_unref(ctx);
221 return ret;
222 }