]>
Commit | Line | Data |
---|---|---|
4f999d05 | 1 | /* |
c2b38b27 | 2 | * Data plane event loop |
4f999d05 KW |
3 | * |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
c2b38b27 | 5 | * Copyright (c) 2009-2017 QEMU contributors |
4f999d05 KW |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
d38ea87a | 26 | #include "qemu/osdep.h" |
da34e65c | 27 | #include "qapi/error.h" |
737e150e | 28 | #include "block/aio.h" |
9b34277d | 29 | #include "block/thread-pool.h" |
587d82fa | 30 | #include "block/graph-lock.h" |
1de7afc9 | 31 | #include "qemu/main-loop.h" |
0ceb849b | 32 | #include "qemu/atomic.h" |
8c6b0356 | 33 | #include "qemu/rcu_queue.h" |
0187f5c9 | 34 | #include "block/raw-aio.h" |
0c330a73 | 35 | #include "qemu/coroutine_int.h" |
47b74464 | 36 | #include "qemu/coroutine-tls.h" |
75bbe5e5 | 37 | #include "sysemu/cpu-timers.h" |
0c330a73 | 38 | #include "trace.h" |
9a1e9481 | 39 | |
4f999d05 KW |
40 | /***********************************************************/ |
41 | /* bottom halves (can be seen as timers which expire ASAP) */ | |
42 | ||
8c6b0356 SH |
43 | /* QEMUBH::flags values */ |
44 | enum { | |
45 | /* Already enqueued and waiting for aio_bh_poll() */ | |
46 | BH_PENDING = (1 << 0), | |
47 | ||
48 | /* Invoke the callback */ | |
49 | BH_SCHEDULED = (1 << 1), | |
50 | ||
51 | /* Delete without invoking callback */ | |
52 | BH_DELETED = (1 << 2), | |
53 | ||
54 | /* Delete after invoking callback */ | |
55 | BH_ONESHOT = (1 << 3), | |
56 | ||
57 | /* Schedule periodically when the event loop is idle */ | |
58 | BH_IDLE = (1 << 4), | |
59 | }; | |
60 | ||
4f999d05 | 61 | struct QEMUBH { |
2f4dc3c1 | 62 | AioContext *ctx; |
0f08586c | 63 | const char *name; |
4f999d05 KW |
64 | QEMUBHFunc *cb; |
65 | void *opaque; | |
8c6b0356 SH |
66 | QSLIST_ENTRY(QEMUBH) next; |
67 | unsigned flags; | |
4f999d05 KW |
68 | }; |
69 | ||
8c6b0356 SH |
70 | /* Called concurrently from any thread */ |
71 | static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) | |
72 | { | |
73 | AioContext *ctx = bh->ctx; | |
74 | unsigned old_flags; | |
75 | ||
76 | /* | |
d73415a3 | 77 | * The memory barrier implicit in qatomic_fetch_or makes sure that: |
8c6b0356 SH |
78 | * 1. idle & any writes needed by the callback are done before the |
79 | * locations are read in the aio_bh_poll. | |
80 | * 2. ctx is loaded before the callback has a chance to execute and bh | |
81 | * could be freed. | |
82 | */ | |
d73415a3 | 83 | old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); |
8c6b0356 SH |
84 | if (!(old_flags & BH_PENDING)) { |
85 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); | |
86 | } | |
87 | ||
88 | aio_notify(ctx); | |
75bbe5e5 PD |
89 | /* |
90 | * Workaround for record/replay. | |
91 | * vCPU execution should be suspended when new BH is set. | |
92 | * This is needed to avoid guest timeouts caused | |
93 | * by the long cycles of the execution. | |
94 | */ | |
95 | icount_notify_exit(); | |
8c6b0356 SH |
96 | } |
97 | ||
98 | /* Only called from aio_bh_poll() and aio_ctx_finalize() */ | |
99 | static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) | |
100 | { | |
101 | QEMUBH *bh = QSLIST_FIRST_RCU(head); | |
102 | ||
103 | if (!bh) { | |
104 | return NULL; | |
105 | } | |
106 | ||
107 | QSLIST_REMOVE_HEAD(head, next); | |
108 | ||
109 | /* | |
d73415a3 | 110 | * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory |
8c6b0356 SH |
111 | * barrier ensures that the callback sees all writes done by the scheduling |
112 | * thread. It also ensures that the scheduling thread sees the cleared | |
113 | * flag before bh->cb has run, and thus will call aio_notify again if | |
114 | * necessary. | |
115 | */ | |
d73415a3 | 116 | *flags = qatomic_fetch_and(&bh->flags, |
8c6b0356 SH |
117 | ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); |
118 | return bh; | |
119 | } | |
120 | ||
0f08586c SH |
121 | void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, |
122 | void *opaque, const char *name) | |
5b8bb359 PB |
123 | { |
124 | QEMUBH *bh; | |
125 | bh = g_new(QEMUBH, 1); | |
126 | *bh = (QEMUBH){ | |
127 | .ctx = ctx, | |
128 | .cb = cb, | |
129 | .opaque = opaque, | |
0f08586c | 130 | .name = name, |
5b8bb359 | 131 | }; |
8c6b0356 | 132 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); |
5b8bb359 PB |
133 | } |
134 | ||
0f08586c SH |
135 | QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, |
136 | const char *name) | |
4f999d05 KW |
137 | { |
138 | QEMUBH *bh; | |
ee82310f PB |
139 | bh = g_new(QEMUBH, 1); |
140 | *bh = (QEMUBH){ | |
141 | .ctx = ctx, | |
142 | .cb = cb, | |
143 | .opaque = opaque, | |
0f08586c | 144 | .name = name, |
ee82310f | 145 | }; |
4f999d05 KW |
146 | return bh; |
147 | } | |
148 | ||
df281b80 PD |
149 | void aio_bh_call(QEMUBH *bh) |
150 | { | |
151 | bh->cb(bh->opaque); | |
152 | } | |
153 | ||
8c6b0356 | 154 | /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ |
f627aab1 | 155 | int aio_bh_poll(AioContext *ctx) |
4f999d05 | 156 | { |
8c6b0356 SH |
157 | BHListSlice slice; |
158 | BHListSlice *s; | |
159 | int ret = 0; | |
160 | ||
161 | QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); | |
162 | QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); | |
163 | ||
164 | while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { | |
165 | QEMUBH *bh; | |
166 | unsigned flags; | |
167 | ||
168 | bh = aio_bh_dequeue(&s->bh_list, &flags); | |
169 | if (!bh) { | |
170 | QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); | |
171 | continue; | |
172 | } | |
173 | ||
174 | if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
65c1b5b6 | 175 | /* Idle BHs don't count as progress */ |
8c6b0356 | 176 | if (!(flags & BH_IDLE)) { |
4f999d05 | 177 | ret = 1; |
ca96ac44 | 178 | } |
df281b80 | 179 | aio_bh_call(bh); |
4f999d05 | 180 | } |
8c6b0356 SH |
181 | if (flags & (BH_DELETED | BH_ONESHOT)) { |
182 | g_free(bh); | |
7d506c90 | 183 | } |
4f999d05 KW |
184 | } |
185 | ||
4f999d05 KW |
186 | return ret; |
187 | } | |
188 | ||
189 | void qemu_bh_schedule_idle(QEMUBH *bh) | |
190 | { | |
8c6b0356 | 191 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); |
4f999d05 KW |
192 | } |
193 | ||
194 | void qemu_bh_schedule(QEMUBH *bh) | |
195 | { | |
8c6b0356 | 196 | aio_bh_enqueue(bh, BH_SCHEDULED); |
4f999d05 KW |
197 | } |
198 | ||
dcc772e2 LPF |
199 | /* This func is async. |
200 | */ | |
4f999d05 KW |
201 | void qemu_bh_cancel(QEMUBH *bh) |
202 | { | |
d73415a3 | 203 | qatomic_and(&bh->flags, ~BH_SCHEDULED); |
4f999d05 KW |
204 | } |
205 | ||
dcc772e2 LPF |
206 | /* This func is async.The bottom half will do the delete action at the finial |
207 | * end. | |
208 | */ | |
4f999d05 KW |
209 | void qemu_bh_delete(QEMUBH *bh) |
210 | { | |
8c6b0356 | 211 | aio_bh_enqueue(bh, BH_DELETED); |
4f999d05 KW |
212 | } |
213 | ||
8c6b0356 | 214 | static int64_t aio_compute_bh_timeout(BHList *head, int timeout) |
4f999d05 KW |
215 | { |
216 | QEMUBH *bh; | |
217 | ||
8c6b0356 SH |
218 | QSLIST_FOREACH_RCU(bh, head, next) { |
219 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
220 | if (bh->flags & BH_IDLE) { | |
4f999d05 KW |
221 | /* idle bottom halves will be polled at least |
222 | * every 10ms */ | |
845ca10d | 223 | timeout = 10000000; |
4f999d05 KW |
224 | } else { |
225 | /* non-idle bottom halves will be executed | |
226 | * immediately */ | |
845ca10d | 227 | return 0; |
4f999d05 KW |
228 | } |
229 | } | |
230 | } | |
e3713e00 | 231 | |
8c6b0356 SH |
232 | return timeout; |
233 | } | |
234 | ||
235 | int64_t | |
236 | aio_compute_timeout(AioContext *ctx) | |
237 | { | |
238 | BHListSlice *s; | |
239 | int64_t deadline; | |
240 | int timeout = -1; | |
241 | ||
242 | timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); | |
243 | if (timeout == 0) { | |
244 | return 0; | |
245 | } | |
246 | ||
247 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { | |
248 | timeout = aio_compute_bh_timeout(&s->bh_list, timeout); | |
249 | if (timeout == 0) { | |
250 | return 0; | |
251 | } | |
252 | } | |
253 | ||
845ca10d | 254 | deadline = timerlistgroup_deadline_ns(&ctx->tlg); |
533a8cf3 | 255 | if (deadline == 0) { |
845ca10d | 256 | return 0; |
533a8cf3 | 257 | } else { |
845ca10d | 258 | return qemu_soonest_timeout(timeout, deadline); |
533a8cf3 | 259 | } |
845ca10d | 260 | } |
533a8cf3 | 261 | |
845ca10d PB |
262 | static gboolean |
263 | aio_ctx_prepare(GSource *source, gint *timeout) | |
264 | { | |
265 | AioContext *ctx = (AioContext *) source; | |
266 | ||
d73415a3 | 267 | qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); |
5710a3e0 PB |
268 | |
269 | /* | |
270 | * Write ctx->notify_me before computing the timeout | |
271 | * (reading bottom half flags, etc.). Pairs with | |
272 | * smp_mb in aio_notify(). | |
273 | */ | |
274 | smp_mb(); | |
eabc9779 | 275 | |
845ca10d PB |
276 | /* We assume there is no timeout already supplied */ |
277 | *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); | |
a3462c65 PB |
278 | |
279 | if (aio_prepare(ctx)) { | |
280 | *timeout = 0; | |
281 | } | |
282 | ||
845ca10d | 283 | return *timeout == 0; |
e3713e00 PB |
284 | } |
285 | ||
286 | static gboolean | |
287 | aio_ctx_check(GSource *source) | |
288 | { | |
289 | AioContext *ctx = (AioContext *) source; | |
290 | QEMUBH *bh; | |
8c6b0356 | 291 | BHListSlice *s; |
e3713e00 | 292 | |
5710a3e0 | 293 | /* Finish computing the timeout before clearing the flag. */ |
d73415a3 | 294 | qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); |
05e514b1 | 295 | aio_notify_accept(ctx); |
21a03d17 | 296 | |
8c6b0356 SH |
297 | QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { |
298 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
e3713e00 | 299 | return true; |
6977d901 | 300 | } |
e3713e00 | 301 | } |
8c6b0356 SH |
302 | |
303 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { | |
304 | QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { | |
305 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { | |
306 | return true; | |
307 | } | |
308 | } | |
309 | } | |
533a8cf3 | 310 | return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); |
e3713e00 PB |
311 | } |
312 | ||
313 | static gboolean | |
314 | aio_ctx_dispatch(GSource *source, | |
315 | GSourceFunc callback, | |
316 | gpointer user_data) | |
317 | { | |
318 | AioContext *ctx = (AioContext *) source; | |
319 | ||
320 | assert(callback == NULL); | |
a153bf52 | 321 | aio_dispatch(ctx); |
e3713e00 PB |
322 | return true; |
323 | } | |
324 | ||
2f4dc3c1 PB |
325 | static void |
326 | aio_ctx_finalize(GSource *source) | |
327 | { | |
328 | AioContext *ctx = (AioContext *) source; | |
8c6b0356 SH |
329 | QEMUBH *bh; |
330 | unsigned flags; | |
2f4dc3c1 | 331 | |
9b34277d | 332 | thread_pool_free(ctx->thread_pool); |
a076972a | 333 | |
0187f5c9 PB |
334 | #ifdef CONFIG_LINUX_AIO |
335 | if (ctx->linux_aio) { | |
336 | laio_detach_aio_context(ctx->linux_aio, ctx); | |
337 | laio_cleanup(ctx->linux_aio); | |
338 | ctx->linux_aio = NULL; | |
339 | } | |
340 | #endif | |
341 | ||
fcb7a4a4 AM |
342 | #ifdef CONFIG_LINUX_IO_URING |
343 | if (ctx->linux_io_uring) { | |
344 | luring_detach_aio_context(ctx->linux_io_uring, ctx); | |
345 | luring_cleanup(ctx->linux_io_uring); | |
346 | ctx->linux_io_uring = NULL; | |
347 | } | |
348 | #endif | |
349 | ||
0c330a73 PB |
350 | assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); |
351 | qemu_bh_delete(ctx->co_schedule_bh); | |
352 | ||
8c6b0356 SH |
353 | /* There must be no aio_bh_poll() calls going on */ |
354 | assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); | |
a076972a | 355 | |
8c6b0356 | 356 | while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { |
023ca420 SH |
357 | /* |
358 | * qemu_bh_delete() must have been called on BHs in this AioContext. In | |
359 | * many cases memory leaks, hangs, or inconsistent state occur when a | |
360 | * BH is leaked because something still expects it to run. | |
361 | * | |
362 | * If you hit this, fix the lifecycle of the BH so that | |
363 | * qemu_bh_delete() and any associated cleanup is called before the | |
364 | * AioContext is finalized. | |
365 | */ | |
366 | if (unlikely(!(flags & BH_DELETED))) { | |
367 | fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", | |
368 | __func__, bh->name); | |
369 | abort(); | |
370 | } | |
a076972a | 371 | |
8c6b0356 | 372 | g_free(bh); |
a076972a | 373 | } |
a076972a | 374 | |
826cc324 | 375 | aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); |
2f4dc3c1 | 376 | event_notifier_cleanup(&ctx->notifier); |
3fe71223 | 377 | qemu_rec_mutex_destroy(&ctx->lock); |
d7c99a12 | 378 | qemu_lockcnt_destroy(&ctx->list_lock); |
dae21b98 | 379 | timerlistgroup_deinit(&ctx->tlg); |
587d82fa | 380 | unregister_aiocontext(ctx); |
cd0a6d2b | 381 | aio_context_destroy(ctx); |
2f4dc3c1 PB |
382 | } |
383 | ||
e3713e00 PB |
384 | static GSourceFuncs aio_source_funcs = { |
385 | aio_ctx_prepare, | |
386 | aio_ctx_check, | |
387 | aio_ctx_dispatch, | |
2f4dc3c1 | 388 | aio_ctx_finalize |
e3713e00 PB |
389 | }; |
390 | ||
391 | GSource *aio_get_g_source(AioContext *ctx) | |
392 | { | |
ba607ca8 | 393 | aio_context_use_g_source(ctx); |
e3713e00 PB |
394 | g_source_ref(&ctx->source); |
395 | return &ctx->source; | |
396 | } | |
a915f4bc | 397 | |
9b34277d SH |
398 | ThreadPool *aio_get_thread_pool(AioContext *ctx) |
399 | { | |
400 | if (!ctx->thread_pool) { | |
401 | ctx->thread_pool = thread_pool_new(ctx); | |
402 | } | |
403 | return ctx->thread_pool; | |
404 | } | |
405 | ||
0187f5c9 | 406 | #ifdef CONFIG_LINUX_AIO |
ed6e2161 | 407 | LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) |
0187f5c9 PB |
408 | { |
409 | if (!ctx->linux_aio) { | |
ed6e2161 NA |
410 | ctx->linux_aio = laio_init(errp); |
411 | if (ctx->linux_aio) { | |
412 | laio_attach_aio_context(ctx->linux_aio, ctx); | |
413 | } | |
0187f5c9 PB |
414 | } |
415 | return ctx->linux_aio; | |
416 | } | |
ed6e2161 NA |
417 | |
418 | LinuxAioState *aio_get_linux_aio(AioContext *ctx) | |
419 | { | |
420 | assert(ctx->linux_aio); | |
421 | return ctx->linux_aio; | |
422 | } | |
0187f5c9 PB |
423 | #endif |
424 | ||
fcb7a4a4 AM |
425 | #ifdef CONFIG_LINUX_IO_URING |
426 | LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) | |
427 | { | |
428 | if (ctx->linux_io_uring) { | |
429 | return ctx->linux_io_uring; | |
430 | } | |
431 | ||
432 | ctx->linux_io_uring = luring_init(errp); | |
433 | if (!ctx->linux_io_uring) { | |
434 | return NULL; | |
435 | } | |
436 | ||
437 | luring_attach_aio_context(ctx->linux_io_uring, ctx); | |
438 | return ctx->linux_io_uring; | |
439 | } | |
440 | ||
441 | LuringState *aio_get_linux_io_uring(AioContext *ctx) | |
442 | { | |
443 | assert(ctx->linux_io_uring); | |
444 | return ctx->linux_io_uring; | |
445 | } | |
446 | #endif | |
447 | ||
2f4dc3c1 PB |
448 | void aio_notify(AioContext *ctx) |
449 | { | |
601829f8 SH |
450 | /* |
451 | * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in | |
452 | * aio_notify_accept. | |
453 | */ | |
454 | smp_wmb(); | |
d73415a3 | 455 | qatomic_set(&ctx->notified, true); |
601829f8 SH |
456 | |
457 | /* | |
458 | * Write ctx->notified before reading ctx->notify_me. Pairs | |
5710a3e0 | 459 | * with smp_mb in aio_ctx_prepare or aio_poll. |
eabc9779 | 460 | */ |
0ceb849b | 461 | smp_mb(); |
d73415a3 | 462 | if (qatomic_read(&ctx->notify_me)) { |
0ceb849b | 463 | event_notifier_set(&ctx->notifier); |
05e514b1 PB |
464 | } |
465 | } | |
466 | ||
467 | void aio_notify_accept(AioContext *ctx) | |
468 | { | |
d73415a3 | 469 | qatomic_set(&ctx->notified, false); |
601829f8 SH |
470 | |
471 | /* | |
472 | * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb | |
473 | * in aio_notify. | |
474 | */ | |
475 | smp_mb(); | |
2f4dc3c1 PB |
476 | } |
477 | ||
3f53bc61 | 478 | static void aio_timerlist_notify(void *opaque, QEMUClockType type) |
d5541d86 AB |
479 | { |
480 | aio_notify(opaque); | |
481 | } | |
482 | ||
601829f8 | 483 | static void aio_context_notifier_cb(EventNotifier *e) |
21a03d17 | 484 | { |
601829f8 SH |
485 | AioContext *ctx = container_of(e, AioContext, notifier); |
486 | ||
487 | event_notifier_test_and_clear(&ctx->notifier); | |
21a03d17 PB |
488 | } |
489 | ||
4a1cba38 | 490 | /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ |
c13be5a1 | 491 | static bool aio_context_notifier_poll(void *opaque) |
4a1cba38 SH |
492 | { |
493 | EventNotifier *e = opaque; | |
494 | AioContext *ctx = container_of(e, AioContext, notifier); | |
495 | ||
d73415a3 | 496 | return qatomic_read(&ctx->notified); |
4a1cba38 SH |
497 | } |
498 | ||
826cc324 SH |
499 | static void aio_context_notifier_poll_ready(EventNotifier *e) |
500 | { | |
501 | /* Do nothing, we just wanted to kick the event loop */ | |
502 | } | |
503 | ||
0c330a73 PB |
504 | static void co_schedule_bh_cb(void *opaque) |
505 | { | |
506 | AioContext *ctx = opaque; | |
507 | QSLIST_HEAD(, Coroutine) straight, reversed; | |
508 | ||
509 | QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); | |
510 | QSLIST_INIT(&straight); | |
511 | ||
512 | while (!QSLIST_EMPTY(&reversed)) { | |
513 | Coroutine *co = QSLIST_FIRST(&reversed); | |
514 | QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); | |
515 | QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); | |
516 | } | |
517 | ||
518 | while (!QSLIST_EMPTY(&straight)) { | |
519 | Coroutine *co = QSLIST_FIRST(&straight); | |
520 | QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); | |
521 | trace_aio_co_schedule_bh_cb(ctx, co); | |
1919631e | 522 | aio_context_acquire(ctx); |
6133b39f JC |
523 | |
524 | /* Protected by write barrier in qemu_aio_coroutine_enter */ | |
d73415a3 | 525 | qatomic_set(&co->scheduled, NULL); |
6808ae04 | 526 | qemu_aio_coroutine_enter(ctx, co); |
1919631e | 527 | aio_context_release(ctx); |
0c330a73 PB |
528 | } |
529 | } | |
530 | ||
2f78e491 | 531 | AioContext *aio_context_new(Error **errp) |
f627aab1 | 532 | { |
2f78e491 | 533 | int ret; |
2f4dc3c1 | 534 | AioContext *ctx; |
37fcee5d | 535 | |
2f4dc3c1 | 536 | ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); |
8c6b0356 SH |
537 | QSLIST_INIT(&ctx->bh_list); |
538 | QSIMPLEQ_INIT(&ctx->bh_slice_list); | |
7e003465 C |
539 | aio_context_setup(ctx); |
540 | ||
2f78e491 CN |
541 | ret = event_notifier_init(&ctx->notifier, false); |
542 | if (ret < 0) { | |
2f78e491 | 543 | error_setg_errno(errp, -ret, "Failed to initialize event notifier"); |
37fcee5d | 544 | goto fail; |
2f78e491 | 545 | } |
fcf5def1 | 546 | g_source_set_can_recurse(&ctx->source, true); |
d7c99a12 | 547 | qemu_lockcnt_init(&ctx->list_lock); |
0c330a73 PB |
548 | |
549 | ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); | |
550 | QSLIST_INIT(&ctx->scheduled_coroutines); | |
551 | ||
2f78e491 | 552 | aio_set_event_notifier(ctx, &ctx->notifier, |
dca21ef2 | 553 | false, |
601829f8 | 554 | aio_context_notifier_cb, |
826cc324 SH |
555 | aio_context_notifier_poll, |
556 | aio_context_notifier_poll_ready); | |
0187f5c9 PB |
557 | #ifdef CONFIG_LINUX_AIO |
558 | ctx->linux_aio = NULL; | |
559 | #endif | |
fcb7a4a4 AM |
560 | |
561 | #ifdef CONFIG_LINUX_IO_URING | |
562 | ctx->linux_io_uring = NULL; | |
563 | #endif | |
564 | ||
9b34277d | 565 | ctx->thread_pool = NULL; |
3fe71223 | 566 | qemu_rec_mutex_init(&ctx->lock); |
d5541d86 | 567 | timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); |
2f4dc3c1 | 568 | |
82a41186 | 569 | ctx->poll_ns = 0; |
4a1cba38 | 570 | ctx->poll_max_ns = 0; |
82a41186 SH |
571 | ctx->poll_grow = 0; |
572 | ctx->poll_shrink = 0; | |
4a1cba38 | 573 | |
1793ad02 SG |
574 | ctx->aio_max_batch = 0; |
575 | ||
71ad4713 NSJ |
576 | ctx->thread_pool_min = 0; |
577 | ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; | |
578 | ||
587d82fa EGE |
579 | register_aiocontext(ctx); |
580 | ||
2f4dc3c1 | 581 | return ctx; |
37fcee5d FZ |
582 | fail: |
583 | g_source_destroy(&ctx->source); | |
584 | return NULL; | |
e3713e00 PB |
585 | } |
586 | ||
0c330a73 PB |
587 | void aio_co_schedule(AioContext *ctx, Coroutine *co) |
588 | { | |
589 | trace_aio_co_schedule(ctx, co); | |
d73415a3 | 590 | const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, |
6133b39f JC |
591 | __func__); |
592 | ||
593 | if (scheduled) { | |
594 | fprintf(stderr, | |
595 | "%s: Co-routine was already scheduled in '%s'\n", | |
596 | __func__, scheduled); | |
597 | abort(); | |
598 | } | |
599 | ||
f0f81002 SH |
600 | /* The coroutine might run and release the last ctx reference before we |
601 | * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until | |
602 | * we're done. | |
603 | */ | |
604 | aio_context_ref(ctx); | |
605 | ||
0c330a73 PB |
606 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, |
607 | co, co_scheduled_next); | |
608 | qemu_bh_schedule(ctx->co_schedule_bh); | |
f0f81002 SH |
609 | |
610 | aio_context_unref(ctx); | |
0c330a73 PB |
611 | } |
612 | ||
26b0b698 KW |
613 | typedef struct AioCoRescheduleSelf { |
614 | Coroutine *co; | |
615 | AioContext *new_ctx; | |
616 | } AioCoRescheduleSelf; | |
617 | ||
618 | static void aio_co_reschedule_self_bh(void *opaque) | |
619 | { | |
620 | AioCoRescheduleSelf *data = opaque; | |
621 | aio_co_schedule(data->new_ctx, data->co); | |
622 | } | |
623 | ||
624 | void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) | |
625 | { | |
626 | AioContext *old_ctx = qemu_get_current_aio_context(); | |
627 | ||
628 | if (old_ctx != new_ctx) { | |
629 | AioCoRescheduleSelf data = { | |
630 | .co = qemu_coroutine_self(), | |
631 | .new_ctx = new_ctx, | |
632 | }; | |
633 | /* | |
634 | * We can't directly schedule the coroutine in the target context | |
635 | * because this would be racy: The other thread could try to enter the | |
636 | * coroutine before it has yielded in this one. | |
637 | */ | |
638 | aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); | |
639 | qemu_coroutine_yield(); | |
640 | } | |
641 | } | |
642 | ||
0c330a73 PB |
643 | void aio_co_wake(struct Coroutine *co) |
644 | { | |
645 | AioContext *ctx; | |
646 | ||
647 | /* Read coroutine before co->ctx. Matches smp_wmb in | |
648 | * qemu_coroutine_enter. | |
649 | */ | |
650 | smp_read_barrier_depends(); | |
d73415a3 | 651 | ctx = qatomic_read(&co->ctx); |
0c330a73 | 652 | |
8865852e FZ |
653 | aio_co_enter(ctx, co); |
654 | } | |
655 | ||
656 | void aio_co_enter(AioContext *ctx, struct Coroutine *co) | |
657 | { | |
0c330a73 PB |
658 | if (ctx != qemu_get_current_aio_context()) { |
659 | aio_co_schedule(ctx, co); | |
660 | return; | |
661 | } | |
662 | ||
663 | if (qemu_in_coroutine()) { | |
664 | Coroutine *self = qemu_coroutine_self(); | |
665 | assert(self != co); | |
666 | QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); | |
667 | } else { | |
668 | aio_context_acquire(ctx); | |
8865852e | 669 | qemu_aio_coroutine_enter(ctx, co); |
0c330a73 PB |
670 | aio_context_release(ctx); |
671 | } | |
672 | } | |
673 | ||
e3713e00 PB |
674 | void aio_context_ref(AioContext *ctx) |
675 | { | |
676 | g_source_ref(&ctx->source); | |
677 | } | |
678 | ||
679 | void aio_context_unref(AioContext *ctx) | |
680 | { | |
681 | g_source_unref(&ctx->source); | |
f627aab1 | 682 | } |
98563fc3 SH |
683 | |
684 | void aio_context_acquire(AioContext *ctx) | |
685 | { | |
3fe71223 | 686 | qemu_rec_mutex_lock(&ctx->lock); |
98563fc3 SH |
687 | } |
688 | ||
689 | void aio_context_release(AioContext *ctx) | |
690 | { | |
3fe71223 | 691 | qemu_rec_mutex_unlock(&ctx->lock); |
98563fc3 | 692 | } |
5f50be9b | 693 | |
47b74464 | 694 | QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) |
5f50be9b PB |
695 | |
696 | AioContext *qemu_get_current_aio_context(void) | |
697 | { | |
47b74464 SH |
698 | AioContext *ctx = get_my_aiocontext(); |
699 | if (ctx) { | |
700 | return ctx; | |
5f50be9b PB |
701 | } |
702 | if (qemu_mutex_iothread_locked()) { | |
703 | /* Possibly in a vCPU thread. */ | |
704 | return qemu_get_aio_context(); | |
705 | } | |
706 | return NULL; | |
707 | } | |
708 | ||
709 | void qemu_set_current_aio_context(AioContext *ctx) | |
710 | { | |
47b74464 SH |
711 | assert(!get_my_aiocontext()); |
712 | set_my_aiocontext(ctx); | |
5f50be9b | 713 | } |
71ad4713 NSJ |
714 | |
715 | void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, | |
716 | int64_t max, Error **errp) | |
717 | { | |
718 | ||
719 | if (min > max || !max || min > INT_MAX || max > INT_MAX) { | |
720 | error_setg(errp, "bad thread-pool-min/thread-pool-max values"); | |
721 | return; | |
722 | } | |
723 | ||
724 | ctx->thread_pool_min = min; | |
725 | ctx->thread_pool_max = max; | |
726 | ||
727 | if (ctx->thread_pool) { | |
728 | thread_pool_update_params(ctx->thread_pool, ctx); | |
729 | } | |
730 | } |