]>
Commit | Line | Data |
---|---|---|
4f999d05 | 1 | /* |
c2b38b27 | 2 | * Data plane event loop |
4f999d05 KW |
3 | * |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
c2b38b27 | 5 | * Copyright (c) 2009-2017 QEMU contributors |
4f999d05 KW |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
d38ea87a | 26 | #include "qemu/osdep.h" |
da34e65c | 27 | #include "qapi/error.h" |
737e150e | 28 | #include "block/aio.h" |
9b34277d | 29 | #include "block/thread-pool.h" |
587d82fa | 30 | #include "block/graph-lock.h" |
1de7afc9 | 31 | #include "qemu/main-loop.h" |
0ceb849b | 32 | #include "qemu/atomic.h" |
8c6b0356 | 33 | #include "qemu/rcu_queue.h" |
0187f5c9 | 34 | #include "block/raw-aio.h" |
0c330a73 | 35 | #include "qemu/coroutine_int.h" |
47b74464 | 36 | #include "qemu/coroutine-tls.h" |
75bbe5e5 | 37 | #include "sysemu/cpu-timers.h" |
0c330a73 | 38 | #include "trace.h" |
9a1e9481 | 39 | |
4f999d05 KW |
40 | /***********************************************************/ |
41 | /* bottom halves (can be seen as timers which expire ASAP) */ | |
42 | ||
8c6b0356 SH |
43 | /* QEMUBH::flags values */ |
44 | enum { | |
45 | /* Already enqueued and waiting for aio_bh_poll() */ | |
46 | BH_PENDING = (1 << 0), | |
47 | ||
48 | /* Invoke the callback */ | |
49 | BH_SCHEDULED = (1 << 1), | |
50 | ||
51 | /* Delete without invoking callback */ | |
52 | BH_DELETED = (1 << 2), | |
53 | ||
54 | /* Delete after invoking callback */ | |
55 | BH_ONESHOT = (1 << 3), | |
56 | ||
57 | /* Schedule periodically when the event loop is idle */ | |
58 | BH_IDLE = (1 << 4), | |
59 | }; | |
60 | ||
4f999d05 | 61 | struct QEMUBH { |
2f4dc3c1 | 62 | AioContext *ctx; |
0f08586c | 63 | const char *name; |
4f999d05 KW |
64 | QEMUBHFunc *cb; |
65 | void *opaque; | |
8c6b0356 SH |
66 | QSLIST_ENTRY(QEMUBH) next; |
67 | unsigned flags; | |
4f999d05 KW |
68 | }; |
69 | ||
8c6b0356 SH |
70 | /* Called concurrently from any thread */ |
71 | static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) | |
72 | { | |
73 | AioContext *ctx = bh->ctx; | |
74 | unsigned old_flags; | |
75 | ||
76 | /* | |
8dd48650 PB |
77 | * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that |
78 | * insertion starts after BH_PENDING is set. | |
8c6b0356 | 79 | */ |
d73415a3 | 80 | old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); |
8dd48650 | 81 | |
8c6b0356 | 82 | if (!(old_flags & BH_PENDING)) { |
8dd48650 PB |
83 | /* |
84 | * At this point the bottom half becomes visible to aio_bh_poll(). | |
85 | * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in | |
86 | * aio_bh_poll(), ensuring that: | |
87 | * 1. any writes needed by the callback are visible from the callback | |
88 | * after aio_bh_dequeue() returns bh. | |
89 | * 2. ctx is loaded before the callback has a chance to execute and bh | |
90 | * could be freed. | |
91 | */ | |
8c6b0356 SH |
92 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); |
93 | } | |
94 | ||
95 | aio_notify(ctx); | |
75bbe5e5 PD |
96 | /* |
97 | * Workaround for record/replay. | |
98 | * vCPU execution should be suspended when new BH is set. | |
99 | * This is needed to avoid guest timeouts caused | |
100 | * by the long cycles of the execution. | |
101 | */ | |
102 | icount_notify_exit(); | |
8c6b0356 SH |
103 | } |
104 | ||
105 | /* Only called from aio_bh_poll() and aio_ctx_finalize() */ | |
106 | static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) | |
107 | { | |
108 | QEMUBH *bh = QSLIST_FIRST_RCU(head); | |
109 | ||
110 | if (!bh) { | |
111 | return NULL; | |
112 | } | |
113 | ||
114 | QSLIST_REMOVE_HEAD(head, next); | |
115 | ||
116 | /* | |
8dd48650 PB |
117 | * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that |
118 | * the removal finishes before BH_PENDING is reset. | |
8c6b0356 | 119 | */ |
d73415a3 | 120 | *flags = qatomic_fetch_and(&bh->flags, |
8c6b0356 SH |
121 | ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); |
122 | return bh; | |
123 | } | |
124 | ||
0f08586c SH |
125 | void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, |
126 | void *opaque, const char *name) | |
5b8bb359 PB |
127 | { |
128 | QEMUBH *bh; | |
129 | bh = g_new(QEMUBH, 1); | |
130 | *bh = (QEMUBH){ | |
131 | .ctx = ctx, | |
132 | .cb = cb, | |
133 | .opaque = opaque, | |
0f08586c | 134 | .name = name, |
5b8bb359 | 135 | }; |
8c6b0356 | 136 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); |
5b8bb359 PB |
137 | } |
138 | ||
0f08586c SH |
139 | QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, |
140 | const char *name) | |
4f999d05 KW |
141 | { |
142 | QEMUBH *bh; | |
ee82310f PB |
143 | bh = g_new(QEMUBH, 1); |
144 | *bh = (QEMUBH){ | |
145 | .ctx = ctx, | |
146 | .cb = cb, | |
147 | .opaque = opaque, | |
0f08586c | 148 | .name = name, |
ee82310f | 149 | }; |
4f999d05 KW |
150 | return bh; |
151 | } | |
152 | ||
df281b80 PD |
153 | void aio_bh_call(QEMUBH *bh) |
154 | { | |
155 | bh->cb(bh->opaque); | |
156 | } | |
157 | ||
8c6b0356 | 158 | /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ |
f627aab1 | 159 | int aio_bh_poll(AioContext *ctx) |
4f999d05 | 160 | { |
8c6b0356 SH |
161 | BHListSlice slice; |
162 | BHListSlice *s; | |
163 | int ret = 0; | |
164 | ||
8dd48650 | 165 | /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ |
8c6b0356 SH |
166 | QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); |
167 | QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); | |
168 | ||
169 | while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { | |
170 | QEMUBH *bh; | |
171 | unsigned flags; | |
172 | ||
173 | bh = aio_bh_dequeue(&s->bh_list, &flags); | |
174 | if (!bh) { | |
175 | QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); | |
176 | continue; | |
177 | } | |
178 | ||
179 | if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
65c1b5b6 | 180 | /* Idle BHs don't count as progress */ |
8c6b0356 | 181 | if (!(flags & BH_IDLE)) { |
4f999d05 | 182 | ret = 1; |
ca96ac44 | 183 | } |
df281b80 | 184 | aio_bh_call(bh); |
4f999d05 | 185 | } |
8c6b0356 SH |
186 | if (flags & (BH_DELETED | BH_ONESHOT)) { |
187 | g_free(bh); | |
7d506c90 | 188 | } |
4f999d05 KW |
189 | } |
190 | ||
4f999d05 KW |
191 | return ret; |
192 | } | |
193 | ||
194 | void qemu_bh_schedule_idle(QEMUBH *bh) | |
195 | { | |
8c6b0356 | 196 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); |
4f999d05 KW |
197 | } |
198 | ||
199 | void qemu_bh_schedule(QEMUBH *bh) | |
200 | { | |
8c6b0356 | 201 | aio_bh_enqueue(bh, BH_SCHEDULED); |
4f999d05 KW |
202 | } |
203 | ||
dcc772e2 LPF |
204 | /* This func is async. |
205 | */ | |
4f999d05 KW |
206 | void qemu_bh_cancel(QEMUBH *bh) |
207 | { | |
d73415a3 | 208 | qatomic_and(&bh->flags, ~BH_SCHEDULED); |
4f999d05 KW |
209 | } |
210 | ||
dcc772e2 LPF |
211 | /* This func is async.The bottom half will do the delete action at the finial |
212 | * end. | |
213 | */ | |
4f999d05 KW |
214 | void qemu_bh_delete(QEMUBH *bh) |
215 | { | |
8c6b0356 | 216 | aio_bh_enqueue(bh, BH_DELETED); |
4f999d05 KW |
217 | } |
218 | ||
8c6b0356 | 219 | static int64_t aio_compute_bh_timeout(BHList *head, int timeout) |
4f999d05 KW |
220 | { |
221 | QEMUBH *bh; | |
222 | ||
8c6b0356 SH |
223 | QSLIST_FOREACH_RCU(bh, head, next) { |
224 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
225 | if (bh->flags & BH_IDLE) { | |
4f999d05 KW |
226 | /* idle bottom halves will be polled at least |
227 | * every 10ms */ | |
845ca10d | 228 | timeout = 10000000; |
4f999d05 KW |
229 | } else { |
230 | /* non-idle bottom halves will be executed | |
231 | * immediately */ | |
845ca10d | 232 | return 0; |
4f999d05 KW |
233 | } |
234 | } | |
235 | } | |
e3713e00 | 236 | |
8c6b0356 SH |
237 | return timeout; |
238 | } | |
239 | ||
240 | int64_t | |
241 | aio_compute_timeout(AioContext *ctx) | |
242 | { | |
243 | BHListSlice *s; | |
244 | int64_t deadline; | |
245 | int timeout = -1; | |
246 | ||
247 | timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); | |
248 | if (timeout == 0) { | |
249 | return 0; | |
250 | } | |
251 | ||
252 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { | |
253 | timeout = aio_compute_bh_timeout(&s->bh_list, timeout); | |
254 | if (timeout == 0) { | |
255 | return 0; | |
256 | } | |
257 | } | |
258 | ||
845ca10d | 259 | deadline = timerlistgroup_deadline_ns(&ctx->tlg); |
533a8cf3 | 260 | if (deadline == 0) { |
845ca10d | 261 | return 0; |
533a8cf3 | 262 | } else { |
845ca10d | 263 | return qemu_soonest_timeout(timeout, deadline); |
533a8cf3 | 264 | } |
845ca10d | 265 | } |
533a8cf3 | 266 | |
845ca10d PB |
267 | static gboolean |
268 | aio_ctx_prepare(GSource *source, gint *timeout) | |
269 | { | |
270 | AioContext *ctx = (AioContext *) source; | |
271 | ||
d73415a3 | 272 | qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); |
5710a3e0 PB |
273 | |
274 | /* | |
275 | * Write ctx->notify_me before computing the timeout | |
276 | * (reading bottom half flags, etc.). Pairs with | |
277 | * smp_mb in aio_notify(). | |
278 | */ | |
279 | smp_mb(); | |
eabc9779 | 280 | |
845ca10d PB |
281 | /* We assume there is no timeout already supplied */ |
282 | *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); | |
a3462c65 PB |
283 | |
284 | if (aio_prepare(ctx)) { | |
285 | *timeout = 0; | |
286 | } | |
287 | ||
845ca10d | 288 | return *timeout == 0; |
e3713e00 PB |
289 | } |
290 | ||
291 | static gboolean | |
292 | aio_ctx_check(GSource *source) | |
293 | { | |
294 | AioContext *ctx = (AioContext *) source; | |
295 | QEMUBH *bh; | |
8c6b0356 | 296 | BHListSlice *s; |
e3713e00 | 297 | |
5710a3e0 | 298 | /* Finish computing the timeout before clearing the flag. */ |
d73415a3 | 299 | qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); |
05e514b1 | 300 | aio_notify_accept(ctx); |
21a03d17 | 301 | |
8c6b0356 SH |
302 | QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { |
303 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
e3713e00 | 304 | return true; |
6977d901 | 305 | } |
e3713e00 | 306 | } |
8c6b0356 SH |
307 | |
308 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { | |
309 | QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { | |
310 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
311 | return true; | |
312 | } | |
313 | } | |
314 | } | |
533a8cf3 | 315 | return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); |
e3713e00 PB |
316 | } |
317 | ||
318 | static gboolean | |
319 | aio_ctx_dispatch(GSource *source, | |
320 | GSourceFunc callback, | |
321 | gpointer user_data) | |
322 | { | |
323 | AioContext *ctx = (AioContext *) source; | |
324 | ||
325 | assert(callback == NULL); | |
a153bf52 | 326 | aio_dispatch(ctx); |
e3713e00 PB |
327 | return true; |
328 | } | |
329 | ||
2f4dc3c1 PB |
330 | static void |
331 | aio_ctx_finalize(GSource *source) | |
332 | { | |
333 | AioContext *ctx = (AioContext *) source; | |
8c6b0356 SH |
334 | QEMUBH *bh; |
335 | unsigned flags; | |
2f4dc3c1 | 336 | |
9b34277d | 337 | thread_pool_free(ctx->thread_pool); |
a076972a | 338 | |
0187f5c9 PB |
339 | #ifdef CONFIG_LINUX_AIO |
340 | if (ctx->linux_aio) { | |
341 | laio_detach_aio_context(ctx->linux_aio, ctx); | |
342 | laio_cleanup(ctx->linux_aio); | |
343 | ctx->linux_aio = NULL; | |
344 | } | |
345 | #endif | |
346 | ||
fcb7a4a4 AM |
347 | #ifdef CONFIG_LINUX_IO_URING |
348 | if (ctx->linux_io_uring) { | |
349 | luring_detach_aio_context(ctx->linux_io_uring, ctx); | |
350 | luring_cleanup(ctx->linux_io_uring); | |
351 | ctx->linux_io_uring = NULL; | |
352 | } | |
353 | #endif | |
354 | ||
0c330a73 PB |
355 | assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); |
356 | qemu_bh_delete(ctx->co_schedule_bh); | |
357 | ||
8c6b0356 SH |
358 | /* There must be no aio_bh_poll() calls going on */ |
359 | assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); | |
a076972a | 360 | |
8c6b0356 | 361 | while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { |
023ca420 SH |
362 | /* |
363 | * qemu_bh_delete() must have been called on BHs in this AioContext. In | |
364 | * many cases memory leaks, hangs, or inconsistent state occur when a | |
365 | * BH is leaked because something still expects it to run. | |
366 | * | |
367 | * If you hit this, fix the lifecycle of the BH so that | |
368 | * qemu_bh_delete() and any associated cleanup is called before the | |
369 | * AioContext is finalized. | |
370 | */ | |
371 | if (unlikely(!(flags & BH_DELETED))) { | |
372 | fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", | |
373 | __func__, bh->name); | |
374 | abort(); | |
375 | } | |
a076972a | 376 | |
8c6b0356 | 377 | g_free(bh); |
a076972a | 378 | } |
a076972a | 379 | |
826cc324 | 380 | aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); |
2f4dc3c1 | 381 | event_notifier_cleanup(&ctx->notifier); |
3fe71223 | 382 | qemu_rec_mutex_destroy(&ctx->lock); |
d7c99a12 | 383 | qemu_lockcnt_destroy(&ctx->list_lock); |
dae21b98 | 384 | timerlistgroup_deinit(&ctx->tlg); |
587d82fa | 385 | unregister_aiocontext(ctx); |
cd0a6d2b | 386 | aio_context_destroy(ctx); |
2f4dc3c1 PB |
387 | } |
388 | ||
e3713e00 PB |
389 | static GSourceFuncs aio_source_funcs = { |
390 | aio_ctx_prepare, | |
391 | aio_ctx_check, | |
392 | aio_ctx_dispatch, | |
2f4dc3c1 | 393 | aio_ctx_finalize |
e3713e00 PB |
394 | }; |
395 | ||
396 | GSource *aio_get_g_source(AioContext *ctx) | |
397 | { | |
ba607ca8 | 398 | aio_context_use_g_source(ctx); |
e3713e00 PB |
399 | g_source_ref(&ctx->source); |
400 | return &ctx->source; | |
401 | } | |
a915f4bc | 402 | |
9b34277d SH |
403 | ThreadPool *aio_get_thread_pool(AioContext *ctx) |
404 | { | |
405 | if (!ctx->thread_pool) { | |
406 | ctx->thread_pool = thread_pool_new(ctx); | |
407 | } | |
408 | return ctx->thread_pool; | |
409 | } | |
410 | ||
0187f5c9 | 411 | #ifdef CONFIG_LINUX_AIO |
ed6e2161 | 412 | LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) |
0187f5c9 PB |
413 | { |
414 | if (!ctx->linux_aio) { | |
ed6e2161 NA |
415 | ctx->linux_aio = laio_init(errp); |
416 | if (ctx->linux_aio) { | |
417 | laio_attach_aio_context(ctx->linux_aio, ctx); | |
418 | } | |
0187f5c9 PB |
419 | } |
420 | return ctx->linux_aio; | |
421 | } | |
ed6e2161 NA |
422 | |
423 | LinuxAioState *aio_get_linux_aio(AioContext *ctx) | |
424 | { | |
425 | assert(ctx->linux_aio); | |
426 | return ctx->linux_aio; | |
427 | } | |
0187f5c9 PB |
428 | #endif |
429 | ||
fcb7a4a4 AM |
430 | #ifdef CONFIG_LINUX_IO_URING |
431 | LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) | |
432 | { | |
433 | if (ctx->linux_io_uring) { | |
434 | return ctx->linux_io_uring; | |
435 | } | |
436 | ||
437 | ctx->linux_io_uring = luring_init(errp); | |
438 | if (!ctx->linux_io_uring) { | |
439 | return NULL; | |
440 | } | |
441 | ||
442 | luring_attach_aio_context(ctx->linux_io_uring, ctx); | |
443 | return ctx->linux_io_uring; | |
444 | } | |
445 | ||
446 | LuringState *aio_get_linux_io_uring(AioContext *ctx) | |
447 | { | |
448 | assert(ctx->linux_io_uring); | |
449 | return ctx->linux_io_uring; | |
450 | } | |
451 | #endif | |
452 | ||
2f4dc3c1 PB |
453 | void aio_notify(AioContext *ctx) |
454 | { | |
601829f8 | 455 | /* |
8dd48650 PB |
456 | * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with |
457 | * smp_mb() in aio_notify_accept(). | |
601829f8 SH |
458 | */ |
459 | smp_wmb(); | |
d73415a3 | 460 | qatomic_set(&ctx->notified, true); |
601829f8 SH |
461 | |
462 | /* | |
8dd48650 PB |
463 | * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. |
464 | * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. | |
eabc9779 | 465 | */ |
0ceb849b | 466 | smp_mb(); |
d73415a3 | 467 | if (qatomic_read(&ctx->notify_me)) { |
0ceb849b | 468 | event_notifier_set(&ctx->notifier); |
05e514b1 PB |
469 | } |
470 | } | |
471 | ||
472 | void aio_notify_accept(AioContext *ctx) | |
473 | { | |
d73415a3 | 474 | qatomic_set(&ctx->notified, false); |
601829f8 SH |
475 | |
476 | /* | |
6229438c PB |
477 | * Order reads of ctx->notified (in aio_context_notifier_poll()) and the |
478 | * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs | |
479 | * with smp_wmb() in aio_notify. | |
601829f8 SH |
480 | */ |
481 | smp_mb(); | |
2f4dc3c1 PB |
482 | } |
483 | ||
3f53bc61 | 484 | static void aio_timerlist_notify(void *opaque, QEMUClockType type) |
d5541d86 AB |
485 | { |
486 | aio_notify(opaque); | |
487 | } | |
488 | ||
601829f8 | 489 | static void aio_context_notifier_cb(EventNotifier *e) |
21a03d17 | 490 | { |
601829f8 SH |
491 | AioContext *ctx = container_of(e, AioContext, notifier); |
492 | ||
493 | event_notifier_test_and_clear(&ctx->notifier); | |
21a03d17 PB |
494 | } |
495 | ||
4a1cba38 | 496 | /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ |
c13be5a1 | 497 | static bool aio_context_notifier_poll(void *opaque) |
4a1cba38 SH |
498 | { |
499 | EventNotifier *e = opaque; | |
500 | AioContext *ctx = container_of(e, AioContext, notifier); | |
501 | ||
6229438c PB |
502 | /* |
503 | * No need for load-acquire because we just want to kick the | |
504 | * event loop. aio_notify_accept() takes care of synchronizing | |
505 | * the event loop with the producers. | |
506 | */ | |
d73415a3 | 507 | return qatomic_read(&ctx->notified); |
4a1cba38 SH |
508 | } |
509 | ||
826cc324 SH |
510 | static void aio_context_notifier_poll_ready(EventNotifier *e) |
511 | { | |
512 | /* Do nothing, we just wanted to kick the event loop */ | |
513 | } | |
514 | ||
0c330a73 PB |
515 | static void co_schedule_bh_cb(void *opaque) |
516 | { | |
517 | AioContext *ctx = opaque; | |
518 | QSLIST_HEAD(, Coroutine) straight, reversed; | |
519 | ||
520 | QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); | |
521 | QSLIST_INIT(&straight); | |
522 | ||
523 | while (!QSLIST_EMPTY(&reversed)) { | |
524 | Coroutine *co = QSLIST_FIRST(&reversed); | |
525 | QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); | |
526 | QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); | |
527 | } | |
528 | ||
529 | while (!QSLIST_EMPTY(&straight)) { | |
530 | Coroutine *co = QSLIST_FIRST(&straight); | |
531 | QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); | |
532 | trace_aio_co_schedule_bh_cb(ctx, co); | |
1919631e | 533 | aio_context_acquire(ctx); |
6133b39f JC |
534 | |
535 | /* Protected by write barrier in qemu_aio_coroutine_enter */ | |
d73415a3 | 536 | qatomic_set(&co->scheduled, NULL); |
6808ae04 | 537 | qemu_aio_coroutine_enter(ctx, co); |
1919631e | 538 | aio_context_release(ctx); |
0c330a73 PB |
539 | } |
540 | } | |
541 | ||
2f78e491 | 542 | AioContext *aio_context_new(Error **errp) |
f627aab1 | 543 | { |
2f78e491 | 544 | int ret; |
2f4dc3c1 | 545 | AioContext *ctx; |
37fcee5d | 546 | |
2f4dc3c1 | 547 | ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); |
8c6b0356 SH |
548 | QSLIST_INIT(&ctx->bh_list); |
549 | QSIMPLEQ_INIT(&ctx->bh_slice_list); | |
7e003465 C |
550 | aio_context_setup(ctx); |
551 | ||
2f78e491 CN |
552 | ret = event_notifier_init(&ctx->notifier, false); |
553 | if (ret < 0) { | |
2f78e491 | 554 | error_setg_errno(errp, -ret, "Failed to initialize event notifier"); |
37fcee5d | 555 | goto fail; |
2f78e491 | 556 | } |
fcf5def1 | 557 | g_source_set_can_recurse(&ctx->source, true); |
d7c99a12 | 558 | qemu_lockcnt_init(&ctx->list_lock); |
0c330a73 PB |
559 | |
560 | ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); | |
561 | QSLIST_INIT(&ctx->scheduled_coroutines); | |
562 | ||
2f78e491 | 563 | aio_set_event_notifier(ctx, &ctx->notifier, |
dca21ef2 | 564 | false, |
601829f8 | 565 | aio_context_notifier_cb, |
826cc324 SH |
566 | aio_context_notifier_poll, |
567 | aio_context_notifier_poll_ready); | |
0187f5c9 PB |
568 | #ifdef CONFIG_LINUX_AIO |
569 | ctx->linux_aio = NULL; | |
570 | #endif | |
fcb7a4a4 AM |
571 | |
572 | #ifdef CONFIG_LINUX_IO_URING | |
573 | ctx->linux_io_uring = NULL; | |
574 | #endif | |
575 | ||
9b34277d | 576 | ctx->thread_pool = NULL; |
3fe71223 | 577 | qemu_rec_mutex_init(&ctx->lock); |
d5541d86 | 578 | timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); |
2f4dc3c1 | 579 | |
82a41186 | 580 | ctx->poll_ns = 0; |
4a1cba38 | 581 | ctx->poll_max_ns = 0; |
82a41186 SH |
582 | ctx->poll_grow = 0; |
583 | ctx->poll_shrink = 0; | |
4a1cba38 | 584 | |
1793ad02 SG |
585 | ctx->aio_max_batch = 0; |
586 | ||
71ad4713 NSJ |
587 | ctx->thread_pool_min = 0; |
588 | ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; | |
589 | ||
587d82fa EGE |
590 | register_aiocontext(ctx); |
591 | ||
2f4dc3c1 | 592 | return ctx; |
37fcee5d FZ |
593 | fail: |
594 | g_source_destroy(&ctx->source); | |
595 | return NULL; | |
e3713e00 PB |
596 | } |
597 | ||
0c330a73 PB |
598 | void aio_co_schedule(AioContext *ctx, Coroutine *co) |
599 | { | |
600 | trace_aio_co_schedule(ctx, co); | |
d73415a3 | 601 | const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, |
6133b39f JC |
602 | __func__); |
603 | ||
604 | if (scheduled) { | |
605 | fprintf(stderr, | |
606 | "%s: Co-routine was already scheduled in '%s'\n", | |
607 | __func__, scheduled); | |
608 | abort(); | |
609 | } | |
610 | ||
f0f81002 SH |
611 | /* The coroutine might run and release the last ctx reference before we |
612 | * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until | |
613 | * we're done. | |
614 | */ | |
615 | aio_context_ref(ctx); | |
616 | ||
0c330a73 PB |
617 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, |
618 | co, co_scheduled_next); | |
619 | qemu_bh_schedule(ctx->co_schedule_bh); | |
f0f81002 SH |
620 | |
621 | aio_context_unref(ctx); | |
0c330a73 PB |
622 | } |
623 | ||
26b0b698 KW |
624 | typedef struct AioCoRescheduleSelf { |
625 | Coroutine *co; | |
626 | AioContext *new_ctx; | |
627 | } AioCoRescheduleSelf; | |
628 | ||
629 | static void aio_co_reschedule_self_bh(void *opaque) | |
630 | { | |
631 | AioCoRescheduleSelf *data = opaque; | |
632 | aio_co_schedule(data->new_ctx, data->co); | |
633 | } | |
634 | ||
635 | void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) | |
636 | { | |
637 | AioContext *old_ctx = qemu_get_current_aio_context(); | |
638 | ||
639 | if (old_ctx != new_ctx) { | |
640 | AioCoRescheduleSelf data = { | |
641 | .co = qemu_coroutine_self(), | |
642 | .new_ctx = new_ctx, | |
643 | }; | |
644 | /* | |
645 | * We can't directly schedule the coroutine in the target context | |
646 | * because this would be racy: The other thread could try to enter the | |
647 | * coroutine before it has yielded in this one. | |
648 | */ | |
649 | aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); | |
650 | qemu_coroutine_yield(); | |
651 | } | |
652 | } | |
653 | ||
43695601 | 654 | void aio_co_wake(Coroutine *co) |
0c330a73 PB |
655 | { |
656 | AioContext *ctx; | |
657 | ||
658 | /* Read coroutine before co->ctx. Matches smp_wmb in | |
659 | * qemu_coroutine_enter. | |
660 | */ | |
661 | smp_read_barrier_depends(); | |
d73415a3 | 662 | ctx = qatomic_read(&co->ctx); |
0c330a73 | 663 | |
8865852e FZ |
664 | aio_co_enter(ctx, co); |
665 | } | |
666 | ||
43695601 | 667 | void aio_co_enter(AioContext *ctx, Coroutine *co) |
8865852e | 668 | { |
0c330a73 PB |
669 | if (ctx != qemu_get_current_aio_context()) { |
670 | aio_co_schedule(ctx, co); | |
671 | return; | |
672 | } | |
673 | ||
674 | if (qemu_in_coroutine()) { | |
675 | Coroutine *self = qemu_coroutine_self(); | |
676 | assert(self != co); | |
677 | QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); | |
678 | } else { | |
679 | aio_context_acquire(ctx); | |
8865852e | 680 | qemu_aio_coroutine_enter(ctx, co); |
0c330a73 PB |
681 | aio_context_release(ctx); |
682 | } | |
683 | } | |
684 | ||
e3713e00 PB |
685 | void aio_context_ref(AioContext *ctx) |
686 | { | |
687 | g_source_ref(&ctx->source); | |
688 | } | |
689 | ||
690 | void aio_context_unref(AioContext *ctx) | |
691 | { | |
692 | g_source_unref(&ctx->source); | |
f627aab1 | 693 | } |
98563fc3 SH |
694 | |
695 | void aio_context_acquire(AioContext *ctx) | |
696 | { | |
3fe71223 | 697 | qemu_rec_mutex_lock(&ctx->lock); |
98563fc3 SH |
698 | } |
699 | ||
700 | void aio_context_release(AioContext *ctx) | |
701 | { | |
3fe71223 | 702 | qemu_rec_mutex_unlock(&ctx->lock); |
98563fc3 | 703 | } |
5f50be9b | 704 | |
47b74464 | 705 | QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) |
5f50be9b PB |
706 | |
707 | AioContext *qemu_get_current_aio_context(void) | |
708 | { | |
47b74464 SH |
709 | AioContext *ctx = get_my_aiocontext(); |
710 | if (ctx) { | |
711 | return ctx; | |
5f50be9b PB |
712 | } |
713 | if (qemu_mutex_iothread_locked()) { | |
714 | /* Possibly in a vCPU thread. */ | |
715 | return qemu_get_aio_context(); | |
716 | } | |
717 | return NULL; | |
718 | } | |
719 | ||
720 | void qemu_set_current_aio_context(AioContext *ctx) | |
721 | { | |
47b74464 SH |
722 | assert(!get_my_aiocontext()); |
723 | set_my_aiocontext(ctx); | |
5f50be9b | 724 | } |
71ad4713 NSJ |
725 | |
726 | void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, | |
727 | int64_t max, Error **errp) | |
728 | { | |
729 | ||
730 | if (min > max || !max || min > INT_MAX || max > INT_MAX) { | |
731 | error_setg(errp, "bad thread-pool-min/thread-pool-max values"); | |
732 | return; | |
733 | } | |
734 | ||
735 | ctx->thread_pool_min = min; | |
736 | ctx->thread_pool_max = max; | |
737 | ||
738 | if (ctx->thread_pool) { | |
739 | thread_pool_update_params(ctx->thread_pool, ctx); | |
740 | } | |
741 | } |