]>
Commit | Line | Data |
---|---|---|
4f999d05 | 1 | /* |
c2b38b27 | 2 | * Data plane event loop |
4f999d05 KW |
3 | * |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
c2b38b27 | 5 | * Copyright (c) 2009-2017 QEMU contributors |
4f999d05 KW |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
d38ea87a | 26 | #include "qemu/osdep.h" |
da34e65c | 27 | #include "qapi/error.h" |
737e150e | 28 | #include "block/aio.h" |
9b34277d | 29 | #include "block/thread-pool.h" |
587d82fa | 30 | #include "block/graph-lock.h" |
1de7afc9 | 31 | #include "qemu/main-loop.h" |
0ceb849b | 32 | #include "qemu/atomic.h" |
8c6b0356 | 33 | #include "qemu/rcu_queue.h" |
0187f5c9 | 34 | #include "block/raw-aio.h" |
0c330a73 | 35 | #include "qemu/coroutine_int.h" |
47b74464 | 36 | #include "qemu/coroutine-tls.h" |
75bbe5e5 | 37 | #include "sysemu/cpu-timers.h" |
0c330a73 | 38 | #include "trace.h" |
9a1e9481 | 39 | |
4f999d05 KW |
40 | /***********************************************************/ |
41 | /* bottom halves (can be seen as timers which expire ASAP) */ | |
42 | ||
8c6b0356 SH |
43 | /* QEMUBH::flags values */ |
44 | enum { | |
45 | /* Already enqueued and waiting for aio_bh_poll() */ | |
46 | BH_PENDING = (1 << 0), | |
47 | ||
48 | /* Invoke the callback */ | |
49 | BH_SCHEDULED = (1 << 1), | |
50 | ||
51 | /* Delete without invoking callback */ | |
52 | BH_DELETED = (1 << 2), | |
53 | ||
54 | /* Delete after invoking callback */ | |
55 | BH_ONESHOT = (1 << 3), | |
56 | ||
57 | /* Schedule periodically when the event loop is idle */ | |
58 | BH_IDLE = (1 << 4), | |
59 | }; | |
60 | ||
4f999d05 | 61 | struct QEMUBH { |
2f4dc3c1 | 62 | AioContext *ctx; |
0f08586c | 63 | const char *name; |
4f999d05 KW |
64 | QEMUBHFunc *cb; |
65 | void *opaque; | |
8c6b0356 SH |
66 | QSLIST_ENTRY(QEMUBH) next; |
67 | unsigned flags; | |
9c86c97f | 68 | MemReentrancyGuard *reentrancy_guard; |
4f999d05 KW |
69 | }; |
70 | ||
8c6b0356 SH |
71 | /* Called concurrently from any thread */ |
72 | static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) | |
73 | { | |
74 | AioContext *ctx = bh->ctx; | |
75 | unsigned old_flags; | |
76 | ||
77 | /* | |
8dd48650 PB |
78 | * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that |
79 | * insertion starts after BH_PENDING is set. | |
8c6b0356 | 80 | */ |
d73415a3 | 81 | old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); |
8dd48650 | 82 | |
8c6b0356 | 83 | if (!(old_flags & BH_PENDING)) { |
8dd48650 PB |
84 | /* |
85 | * At this point the bottom half becomes visible to aio_bh_poll(). | |
86 | * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in | |
87 | * aio_bh_poll(), ensuring that: | |
88 | * 1. any writes needed by the callback are visible from the callback | |
89 | * after aio_bh_dequeue() returns bh. | |
90 | * 2. ctx is loaded before the callback has a chance to execute and bh | |
91 | * could be freed. | |
92 | */ | |
8c6b0356 SH |
93 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); |
94 | } | |
95 | ||
96 | aio_notify(ctx); | |
75bbe5e5 PD |
97 | /* |
98 | * Workaround for record/replay. | |
99 | * vCPU execution should be suspended when new BH is set. | |
100 | * This is needed to avoid guest timeouts caused | |
101 | * by the long cycles of the execution. | |
102 | */ | |
103 | icount_notify_exit(); | |
8c6b0356 SH |
104 | } |
105 | ||
106 | /* Only called from aio_bh_poll() and aio_ctx_finalize() */ | |
107 | static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) | |
108 | { | |
109 | QEMUBH *bh = QSLIST_FIRST_RCU(head); | |
110 | ||
111 | if (!bh) { | |
112 | return NULL; | |
113 | } | |
114 | ||
115 | QSLIST_REMOVE_HEAD(head, next); | |
116 | ||
117 | /* | |
8dd48650 PB |
118 | * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that |
119 | * the removal finishes before BH_PENDING is reset. | |
8c6b0356 | 120 | */ |
d73415a3 | 121 | *flags = qatomic_fetch_and(&bh->flags, |
8c6b0356 SH |
122 | ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); |
123 | return bh; | |
124 | } | |
125 | ||
0f08586c SH |
126 | void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, |
127 | void *opaque, const char *name) | |
5b8bb359 PB |
128 | { |
129 | QEMUBH *bh; | |
130 | bh = g_new(QEMUBH, 1); | |
131 | *bh = (QEMUBH){ | |
132 | .ctx = ctx, | |
133 | .cb = cb, | |
134 | .opaque = opaque, | |
0f08586c | 135 | .name = name, |
5b8bb359 | 136 | }; |
8c6b0356 | 137 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); |
5b8bb359 PB |
138 | } |
139 | ||
0f08586c | 140 | QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, |
9c86c97f | 141 | const char *name, MemReentrancyGuard *reentrancy_guard) |
4f999d05 KW |
142 | { |
143 | QEMUBH *bh; | |
ee82310f PB |
144 | bh = g_new(QEMUBH, 1); |
145 | *bh = (QEMUBH){ | |
146 | .ctx = ctx, | |
147 | .cb = cb, | |
148 | .opaque = opaque, | |
0f08586c | 149 | .name = name, |
9c86c97f | 150 | .reentrancy_guard = reentrancy_guard, |
ee82310f | 151 | }; |
4f999d05 KW |
152 | return bh; |
153 | } | |
154 | ||
df281b80 PD |
155 | void aio_bh_call(QEMUBH *bh) |
156 | { | |
9c86c97f AB |
157 | bool last_engaged_in_io = false; |
158 | ||
7915bd06 AB |
159 | /* Make a copy of the guard-pointer as cb may free the bh */ |
160 | MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard; | |
161 | if (reentrancy_guard) { | |
162 | last_engaged_in_io = reentrancy_guard->engaged_in_io; | |
163 | if (reentrancy_guard->engaged_in_io) { | |
9c86c97f AB |
164 | trace_reentrant_aio(bh->ctx, bh->name); |
165 | } | |
7915bd06 | 166 | reentrancy_guard->engaged_in_io = true; |
9c86c97f AB |
167 | } |
168 | ||
df281b80 | 169 | bh->cb(bh->opaque); |
9c86c97f | 170 | |
7915bd06 AB |
171 | if (reentrancy_guard) { |
172 | reentrancy_guard->engaged_in_io = last_engaged_in_io; | |
9c86c97f | 173 | } |
df281b80 PD |
174 | } |
175 | ||
8c6b0356 | 176 | /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ |
f627aab1 | 177 | int aio_bh_poll(AioContext *ctx) |
4f999d05 | 178 | { |
8c6b0356 SH |
179 | BHListSlice slice; |
180 | BHListSlice *s; | |
181 | int ret = 0; | |
182 | ||
8dd48650 | 183 | /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ |
8c6b0356 | 184 | QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); |
d66ba6dc CLG |
185 | |
186 | /* | |
187 | * GCC13 [-Werror=dangling-pointer=] complains that the local variable | |
188 | * 'slice' is being stored in the global 'ctx->bh_slice_list' but the | |
189 | * list is emptied before this function returns. | |
190 | */ | |
191 | #if !defined(__clang__) | |
192 | #pragma GCC diagnostic push | |
193 | #pragma GCC diagnostic ignored "-Wpragmas" | |
194 | #pragma GCC diagnostic ignored "-Wdangling-pointer=" | |
195 | #endif | |
8c6b0356 | 196 | QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); |
d66ba6dc CLG |
197 | #if !defined(__clang__) |
198 | #pragma GCC diagnostic pop | |
199 | #endif | |
8c6b0356 SH |
200 | |
201 | while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { | |
202 | QEMUBH *bh; | |
203 | unsigned flags; | |
204 | ||
205 | bh = aio_bh_dequeue(&s->bh_list, &flags); | |
206 | if (!bh) { | |
207 | QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); | |
208 | continue; | |
209 | } | |
210 | ||
211 | if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
65c1b5b6 | 212 | /* Idle BHs don't count as progress */ |
8c6b0356 | 213 | if (!(flags & BH_IDLE)) { |
4f999d05 | 214 | ret = 1; |
ca96ac44 | 215 | } |
df281b80 | 216 | aio_bh_call(bh); |
4f999d05 | 217 | } |
8c6b0356 SH |
218 | if (flags & (BH_DELETED | BH_ONESHOT)) { |
219 | g_free(bh); | |
7d506c90 | 220 | } |
4f999d05 KW |
221 | } |
222 | ||
4f999d05 KW |
223 | return ret; |
224 | } | |
225 | ||
226 | void qemu_bh_schedule_idle(QEMUBH *bh) | |
227 | { | |
8c6b0356 | 228 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); |
4f999d05 KW |
229 | } |
230 | ||
231 | void qemu_bh_schedule(QEMUBH *bh) | |
232 | { | |
8c6b0356 | 233 | aio_bh_enqueue(bh, BH_SCHEDULED); |
4f999d05 KW |
234 | } |
235 | ||
dcc772e2 LPF |
236 | /* This func is async. |
237 | */ | |
4f999d05 KW |
238 | void qemu_bh_cancel(QEMUBH *bh) |
239 | { | |
d73415a3 | 240 | qatomic_and(&bh->flags, ~BH_SCHEDULED); |
4f999d05 KW |
241 | } |
242 | ||
dcc772e2 LPF |
243 | /* This func is async.The bottom half will do the delete action at the finial |
244 | * end. | |
245 | */ | |
4f999d05 KW |
246 | void qemu_bh_delete(QEMUBH *bh) |
247 | { | |
8c6b0356 | 248 | aio_bh_enqueue(bh, BH_DELETED); |
4f999d05 KW |
249 | } |
250 | ||
8c6b0356 | 251 | static int64_t aio_compute_bh_timeout(BHList *head, int timeout) |
4f999d05 KW |
252 | { |
253 | QEMUBH *bh; | |
254 | ||
8c6b0356 SH |
255 | QSLIST_FOREACH_RCU(bh, head, next) { |
256 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
257 | if (bh->flags & BH_IDLE) { | |
4f999d05 KW |
258 | /* idle bottom halves will be polled at least |
259 | * every 10ms */ | |
845ca10d | 260 | timeout = 10000000; |
4f999d05 KW |
261 | } else { |
262 | /* non-idle bottom halves will be executed | |
263 | * immediately */ | |
845ca10d | 264 | return 0; |
4f999d05 KW |
265 | } |
266 | } | |
267 | } | |
e3713e00 | 268 | |
8c6b0356 SH |
269 | return timeout; |
270 | } | |
271 | ||
272 | int64_t | |
273 | aio_compute_timeout(AioContext *ctx) | |
274 | { | |
275 | BHListSlice *s; | |
276 | int64_t deadline; | |
277 | int timeout = -1; | |
278 | ||
279 | timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); | |
280 | if (timeout == 0) { | |
281 | return 0; | |
282 | } | |
283 | ||
284 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { | |
285 | timeout = aio_compute_bh_timeout(&s->bh_list, timeout); | |
286 | if (timeout == 0) { | |
287 | return 0; | |
288 | } | |
289 | } | |
290 | ||
845ca10d | 291 | deadline = timerlistgroup_deadline_ns(&ctx->tlg); |
533a8cf3 | 292 | if (deadline == 0) { |
845ca10d | 293 | return 0; |
533a8cf3 | 294 | } else { |
845ca10d | 295 | return qemu_soonest_timeout(timeout, deadline); |
533a8cf3 | 296 | } |
845ca10d | 297 | } |
533a8cf3 | 298 | |
845ca10d PB |
299 | static gboolean |
300 | aio_ctx_prepare(GSource *source, gint *timeout) | |
301 | { | |
302 | AioContext *ctx = (AioContext *) source; | |
303 | ||
d73415a3 | 304 | qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); |
5710a3e0 PB |
305 | |
306 | /* | |
307 | * Write ctx->notify_me before computing the timeout | |
308 | * (reading bottom half flags, etc.). Pairs with | |
309 | * smp_mb in aio_notify(). | |
310 | */ | |
311 | smp_mb(); | |
eabc9779 | 312 | |
845ca10d PB |
313 | /* We assume there is no timeout already supplied */ |
314 | *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); | |
a3462c65 PB |
315 | |
316 | if (aio_prepare(ctx)) { | |
317 | *timeout = 0; | |
318 | } | |
319 | ||
845ca10d | 320 | return *timeout == 0; |
e3713e00 PB |
321 | } |
322 | ||
323 | static gboolean | |
324 | aio_ctx_check(GSource *source) | |
325 | { | |
326 | AioContext *ctx = (AioContext *) source; | |
327 | QEMUBH *bh; | |
8c6b0356 | 328 | BHListSlice *s; |
e3713e00 | 329 | |
5710a3e0 | 330 | /* Finish computing the timeout before clearing the flag. */ |
d73415a3 | 331 | qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); |
05e514b1 | 332 | aio_notify_accept(ctx); |
21a03d17 | 333 | |
8c6b0356 SH |
334 | QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { |
335 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
e3713e00 | 336 | return true; |
6977d901 | 337 | } |
e3713e00 | 338 | } |
8c6b0356 SH |
339 | |
340 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { | |
341 | QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { | |
342 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
343 | return true; | |
344 | } | |
345 | } | |
346 | } | |
533a8cf3 | 347 | return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); |
e3713e00 PB |
348 | } |
349 | ||
350 | static gboolean | |
351 | aio_ctx_dispatch(GSource *source, | |
352 | GSourceFunc callback, | |
353 | gpointer user_data) | |
354 | { | |
355 | AioContext *ctx = (AioContext *) source; | |
356 | ||
357 | assert(callback == NULL); | |
a153bf52 | 358 | aio_dispatch(ctx); |
e3713e00 PB |
359 | return true; |
360 | } | |
361 | ||
2f4dc3c1 PB |
362 | static void |
363 | aio_ctx_finalize(GSource *source) | |
364 | { | |
365 | AioContext *ctx = (AioContext *) source; | |
8c6b0356 SH |
366 | QEMUBH *bh; |
367 | unsigned flags; | |
2f4dc3c1 | 368 | |
9b34277d | 369 | thread_pool_free(ctx->thread_pool); |
a076972a | 370 | |
0187f5c9 PB |
371 | #ifdef CONFIG_LINUX_AIO |
372 | if (ctx->linux_aio) { | |
373 | laio_detach_aio_context(ctx->linux_aio, ctx); | |
374 | laio_cleanup(ctx->linux_aio); | |
375 | ctx->linux_aio = NULL; | |
376 | } | |
377 | #endif | |
378 | ||
fcb7a4a4 AM |
379 | #ifdef CONFIG_LINUX_IO_URING |
380 | if (ctx->linux_io_uring) { | |
381 | luring_detach_aio_context(ctx->linux_io_uring, ctx); | |
382 | luring_cleanup(ctx->linux_io_uring); | |
383 | ctx->linux_io_uring = NULL; | |
384 | } | |
385 | #endif | |
386 | ||
0c330a73 PB |
387 | assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); |
388 | qemu_bh_delete(ctx->co_schedule_bh); | |
389 | ||
8c6b0356 SH |
390 | /* There must be no aio_bh_poll() calls going on */ |
391 | assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); | |
a076972a | 392 | |
8c6b0356 | 393 | while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { |
023ca420 SH |
394 | /* |
395 | * qemu_bh_delete() must have been called on BHs in this AioContext. In | |
396 | * many cases memory leaks, hangs, or inconsistent state occur when a | |
397 | * BH is leaked because something still expects it to run. | |
398 | * | |
399 | * If you hit this, fix the lifecycle of the BH so that | |
400 | * qemu_bh_delete() and any associated cleanup is called before the | |
401 | * AioContext is finalized. | |
402 | */ | |
403 | if (unlikely(!(flags & BH_DELETED))) { | |
404 | fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", | |
405 | __func__, bh->name); | |
406 | abort(); | |
407 | } | |
a076972a | 408 | |
8c6b0356 | 409 | g_free(bh); |
a076972a | 410 | } |
a076972a | 411 | |
60f782b6 | 412 | aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL); |
2f4dc3c1 | 413 | event_notifier_cleanup(&ctx->notifier); |
3fe71223 | 414 | qemu_rec_mutex_destroy(&ctx->lock); |
d7c99a12 | 415 | qemu_lockcnt_destroy(&ctx->list_lock); |
dae21b98 | 416 | timerlistgroup_deinit(&ctx->tlg); |
587d82fa | 417 | unregister_aiocontext(ctx); |
cd0a6d2b | 418 | aio_context_destroy(ctx); |
2f4dc3c1 PB |
419 | } |
420 | ||
e3713e00 PB |
421 | static GSourceFuncs aio_source_funcs = { |
422 | aio_ctx_prepare, | |
423 | aio_ctx_check, | |
424 | aio_ctx_dispatch, | |
2f4dc3c1 | 425 | aio_ctx_finalize |
e3713e00 PB |
426 | }; |
427 | ||
428 | GSource *aio_get_g_source(AioContext *ctx) | |
429 | { | |
ba607ca8 | 430 | aio_context_use_g_source(ctx); |
e3713e00 PB |
431 | g_source_ref(&ctx->source); |
432 | return &ctx->source; | |
433 | } | |
a915f4bc | 434 | |
9b34277d SH |
435 | ThreadPool *aio_get_thread_pool(AioContext *ctx) |
436 | { | |
437 | if (!ctx->thread_pool) { | |
438 | ctx->thread_pool = thread_pool_new(ctx); | |
439 | } | |
440 | return ctx->thread_pool; | |
441 | } | |
442 | ||
0187f5c9 | 443 | #ifdef CONFIG_LINUX_AIO |
ed6e2161 | 444 | LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) |
0187f5c9 PB |
445 | { |
446 | if (!ctx->linux_aio) { | |
ed6e2161 NA |
447 | ctx->linux_aio = laio_init(errp); |
448 | if (ctx->linux_aio) { | |
449 | laio_attach_aio_context(ctx->linux_aio, ctx); | |
450 | } | |
0187f5c9 PB |
451 | } |
452 | return ctx->linux_aio; | |
453 | } | |
ed6e2161 NA |
454 | |
455 | LinuxAioState *aio_get_linux_aio(AioContext *ctx) | |
456 | { | |
457 | assert(ctx->linux_aio); | |
458 | return ctx->linux_aio; | |
459 | } | |
0187f5c9 PB |
460 | #endif |
461 | ||
fcb7a4a4 AM |
462 | #ifdef CONFIG_LINUX_IO_URING |
463 | LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) | |
464 | { | |
465 | if (ctx->linux_io_uring) { | |
466 | return ctx->linux_io_uring; | |
467 | } | |
468 | ||
469 | ctx->linux_io_uring = luring_init(errp); | |
470 | if (!ctx->linux_io_uring) { | |
471 | return NULL; | |
472 | } | |
473 | ||
474 | luring_attach_aio_context(ctx->linux_io_uring, ctx); | |
475 | return ctx->linux_io_uring; | |
476 | } | |
477 | ||
478 | LuringState *aio_get_linux_io_uring(AioContext *ctx) | |
479 | { | |
480 | assert(ctx->linux_io_uring); | |
481 | return ctx->linux_io_uring; | |
482 | } | |
483 | #endif | |
484 | ||
2f4dc3c1 PB |
485 | void aio_notify(AioContext *ctx) |
486 | { | |
601829f8 | 487 | /* |
8dd48650 PB |
488 | * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with |
489 | * smp_mb() in aio_notify_accept(). | |
601829f8 SH |
490 | */ |
491 | smp_wmb(); | |
d73415a3 | 492 | qatomic_set(&ctx->notified, true); |
601829f8 SH |
493 | |
494 | /* | |
8dd48650 PB |
495 | * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. |
496 | * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. | |
eabc9779 | 497 | */ |
0ceb849b | 498 | smp_mb(); |
d73415a3 | 499 | if (qatomic_read(&ctx->notify_me)) { |
0ceb849b | 500 | event_notifier_set(&ctx->notifier); |
05e514b1 PB |
501 | } |
502 | } | |
503 | ||
504 | void aio_notify_accept(AioContext *ctx) | |
505 | { | |
d73415a3 | 506 | qatomic_set(&ctx->notified, false); |
601829f8 SH |
507 | |
508 | /* | |
6229438c PB |
509 | * Order reads of ctx->notified (in aio_context_notifier_poll()) and the |
510 | * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs | |
511 | * with smp_wmb() in aio_notify. | |
601829f8 SH |
512 | */ |
513 | smp_mb(); | |
2f4dc3c1 PB |
514 | } |
515 | ||
3f53bc61 | 516 | static void aio_timerlist_notify(void *opaque, QEMUClockType type) |
d5541d86 AB |
517 | { |
518 | aio_notify(opaque); | |
519 | } | |
520 | ||
601829f8 | 521 | static void aio_context_notifier_cb(EventNotifier *e) |
21a03d17 | 522 | { |
601829f8 SH |
523 | AioContext *ctx = container_of(e, AioContext, notifier); |
524 | ||
525 | event_notifier_test_and_clear(&ctx->notifier); | |
21a03d17 PB |
526 | } |
527 | ||
4a1cba38 | 528 | /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ |
c13be5a1 | 529 | static bool aio_context_notifier_poll(void *opaque) |
4a1cba38 SH |
530 | { |
531 | EventNotifier *e = opaque; | |
532 | AioContext *ctx = container_of(e, AioContext, notifier); | |
533 | ||
6229438c PB |
534 | /* |
535 | * No need for load-acquire because we just want to kick the | |
536 | * event loop. aio_notify_accept() takes care of synchronizing | |
537 | * the event loop with the producers. | |
538 | */ | |
d73415a3 | 539 | return qatomic_read(&ctx->notified); |
4a1cba38 SH |
540 | } |
541 | ||
826cc324 SH |
542 | static void aio_context_notifier_poll_ready(EventNotifier *e) |
543 | { | |
544 | /* Do nothing, we just wanted to kick the event loop */ | |
545 | } | |
546 | ||
0c330a73 PB |
547 | static void co_schedule_bh_cb(void *opaque) |
548 | { | |
549 | AioContext *ctx = opaque; | |
550 | QSLIST_HEAD(, Coroutine) straight, reversed; | |
551 | ||
552 | QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); | |
553 | QSLIST_INIT(&straight); | |
554 | ||
555 | while (!QSLIST_EMPTY(&reversed)) { | |
556 | Coroutine *co = QSLIST_FIRST(&reversed); | |
557 | QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); | |
558 | QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); | |
559 | } | |
560 | ||
561 | while (!QSLIST_EMPTY(&straight)) { | |
562 | Coroutine *co = QSLIST_FIRST(&straight); | |
563 | QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); | |
564 | trace_aio_co_schedule_bh_cb(ctx, co); | |
1919631e | 565 | aio_context_acquire(ctx); |
6133b39f JC |
566 | |
567 | /* Protected by write barrier in qemu_aio_coroutine_enter */ | |
d73415a3 | 568 | qatomic_set(&co->scheduled, NULL); |
6808ae04 | 569 | qemu_aio_coroutine_enter(ctx, co); |
1919631e | 570 | aio_context_release(ctx); |
0c330a73 PB |
571 | } |
572 | } | |
573 | ||
2f78e491 | 574 | AioContext *aio_context_new(Error **errp) |
f627aab1 | 575 | { |
2f78e491 | 576 | int ret; |
2f4dc3c1 | 577 | AioContext *ctx; |
37fcee5d | 578 | |
2f4dc3c1 | 579 | ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); |
8c6b0356 SH |
580 | QSLIST_INIT(&ctx->bh_list); |
581 | QSIMPLEQ_INIT(&ctx->bh_slice_list); | |
7e003465 C |
582 | aio_context_setup(ctx); |
583 | ||
2f78e491 CN |
584 | ret = event_notifier_init(&ctx->notifier, false); |
585 | if (ret < 0) { | |
2f78e491 | 586 | error_setg_errno(errp, -ret, "Failed to initialize event notifier"); |
37fcee5d | 587 | goto fail; |
2f78e491 | 588 | } |
fcf5def1 | 589 | g_source_set_can_recurse(&ctx->source, true); |
d7c99a12 | 590 | qemu_lockcnt_init(&ctx->list_lock); |
0c330a73 PB |
591 | |
592 | ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); | |
593 | QSLIST_INIT(&ctx->scheduled_coroutines); | |
594 | ||
2f78e491 | 595 | aio_set_event_notifier(ctx, &ctx->notifier, |
601829f8 | 596 | aio_context_notifier_cb, |
826cc324 SH |
597 | aio_context_notifier_poll, |
598 | aio_context_notifier_poll_ready); | |
0187f5c9 PB |
599 | #ifdef CONFIG_LINUX_AIO |
600 | ctx->linux_aio = NULL; | |
601 | #endif | |
fcb7a4a4 AM |
602 | |
603 | #ifdef CONFIG_LINUX_IO_URING | |
604 | ctx->linux_io_uring = NULL; | |
605 | #endif | |
606 | ||
9b34277d | 607 | ctx->thread_pool = NULL; |
3fe71223 | 608 | qemu_rec_mutex_init(&ctx->lock); |
d5541d86 | 609 | timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); |
2f4dc3c1 | 610 | |
82a41186 | 611 | ctx->poll_ns = 0; |
4a1cba38 | 612 | ctx->poll_max_ns = 0; |
82a41186 SH |
613 | ctx->poll_grow = 0; |
614 | ctx->poll_shrink = 0; | |
4a1cba38 | 615 | |
1793ad02 SG |
616 | ctx->aio_max_batch = 0; |
617 | ||
71ad4713 NSJ |
618 | ctx->thread_pool_min = 0; |
619 | ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; | |
620 | ||
587d82fa EGE |
621 | register_aiocontext(ctx); |
622 | ||
2f4dc3c1 | 623 | return ctx; |
37fcee5d FZ |
624 | fail: |
625 | g_source_destroy(&ctx->source); | |
626 | return NULL; | |
e3713e00 PB |
627 | } |
628 | ||
0c330a73 PB |
629 | void aio_co_schedule(AioContext *ctx, Coroutine *co) |
630 | { | |
631 | trace_aio_co_schedule(ctx, co); | |
d73415a3 | 632 | const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, |
6133b39f JC |
633 | __func__); |
634 | ||
635 | if (scheduled) { | |
636 | fprintf(stderr, | |
637 | "%s: Co-routine was already scheduled in '%s'\n", | |
638 | __func__, scheduled); | |
639 | abort(); | |
640 | } | |
641 | ||
f0f81002 SH |
642 | /* The coroutine might run and release the last ctx reference before we |
643 | * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until | |
644 | * we're done. | |
645 | */ | |
646 | aio_context_ref(ctx); | |
647 | ||
0c330a73 PB |
648 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, |
649 | co, co_scheduled_next); | |
650 | qemu_bh_schedule(ctx->co_schedule_bh); | |
f0f81002 SH |
651 | |
652 | aio_context_unref(ctx); | |
0c330a73 PB |
653 | } |
654 | ||
26b0b698 KW |
655 | typedef struct AioCoRescheduleSelf { |
656 | Coroutine *co; | |
657 | AioContext *new_ctx; | |
658 | } AioCoRescheduleSelf; | |
659 | ||
660 | static void aio_co_reschedule_self_bh(void *opaque) | |
661 | { | |
662 | AioCoRescheduleSelf *data = opaque; | |
663 | aio_co_schedule(data->new_ctx, data->co); | |
664 | } | |
665 | ||
666 | void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) | |
667 | { | |
668 | AioContext *old_ctx = qemu_get_current_aio_context(); | |
669 | ||
670 | if (old_ctx != new_ctx) { | |
671 | AioCoRescheduleSelf data = { | |
672 | .co = qemu_coroutine_self(), | |
673 | .new_ctx = new_ctx, | |
674 | }; | |
675 | /* | |
676 | * We can't directly schedule the coroutine in the target context | |
677 | * because this would be racy: The other thread could try to enter the | |
678 | * coroutine before it has yielded in this one. | |
679 | */ | |
680 | aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); | |
681 | qemu_coroutine_yield(); | |
682 | } | |
683 | } | |
684 | ||
43695601 | 685 | void aio_co_wake(Coroutine *co) |
0c330a73 PB |
686 | { |
687 | AioContext *ctx; | |
688 | ||
689 | /* Read coroutine before co->ctx. Matches smp_wmb in | |
690 | * qemu_coroutine_enter. | |
691 | */ | |
692 | smp_read_barrier_depends(); | |
d73415a3 | 693 | ctx = qatomic_read(&co->ctx); |
0c330a73 | 694 | |
8865852e FZ |
695 | aio_co_enter(ctx, co); |
696 | } | |
697 | ||
43695601 | 698 | void aio_co_enter(AioContext *ctx, Coroutine *co) |
8865852e | 699 | { |
0c330a73 PB |
700 | if (ctx != qemu_get_current_aio_context()) { |
701 | aio_co_schedule(ctx, co); | |
702 | return; | |
703 | } | |
704 | ||
705 | if (qemu_in_coroutine()) { | |
706 | Coroutine *self = qemu_coroutine_self(); | |
707 | assert(self != co); | |
708 | QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); | |
709 | } else { | |
710 | aio_context_acquire(ctx); | |
8865852e | 711 | qemu_aio_coroutine_enter(ctx, co); |
0c330a73 PB |
712 | aio_context_release(ctx); |
713 | } | |
714 | } | |
715 | ||
e3713e00 PB |
716 | void aio_context_ref(AioContext *ctx) |
717 | { | |
718 | g_source_ref(&ctx->source); | |
719 | } | |
720 | ||
721 | void aio_context_unref(AioContext *ctx) | |
722 | { | |
723 | g_source_unref(&ctx->source); | |
f627aab1 | 724 | } |
98563fc3 SH |
725 | |
726 | void aio_context_acquire(AioContext *ctx) | |
727 | { | |
3fe71223 | 728 | qemu_rec_mutex_lock(&ctx->lock); |
98563fc3 SH |
729 | } |
730 | ||
731 | void aio_context_release(AioContext *ctx) | |
732 | { | |
3fe71223 | 733 | qemu_rec_mutex_unlock(&ctx->lock); |
98563fc3 | 734 | } |
5f50be9b | 735 | |
47b74464 | 736 | QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) |
5f50be9b PB |
737 | |
738 | AioContext *qemu_get_current_aio_context(void) | |
739 | { | |
47b74464 SH |
740 | AioContext *ctx = get_my_aiocontext(); |
741 | if (ctx) { | |
742 | return ctx; | |
5f50be9b PB |
743 | } |
744 | if (qemu_mutex_iothread_locked()) { | |
745 | /* Possibly in a vCPU thread. */ | |
746 | return qemu_get_aio_context(); | |
747 | } | |
748 | return NULL; | |
749 | } | |
750 | ||
751 | void qemu_set_current_aio_context(AioContext *ctx) | |
752 | { | |
47b74464 SH |
753 | assert(!get_my_aiocontext()); |
754 | set_my_aiocontext(ctx); | |
5f50be9b | 755 | } |
71ad4713 NSJ |
756 | |
757 | void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, | |
758 | int64_t max, Error **errp) | |
759 | { | |
760 | ||
761 | if (min > max || !max || min > INT_MAX || max > INT_MAX) { | |
762 | error_setg(errp, "bad thread-pool-min/thread-pool-max values"); | |
763 | return; | |
764 | } | |
765 | ||
766 | ctx->thread_pool_min = min; | |
767 | ctx->thread_pool_max = max; | |
768 | ||
769 | if (ctx->thread_pool) { | |
770 | thread_pool_update_params(ctx->thread_pool, ctx); | |
771 | } | |
772 | } |