]>
Commit | Line | Data |
---|---|---|
f063a8aa TL |
1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
2 | From: Kevin Wolf <kwolf@redhat.com> | |
3 | Date: Wed, 27 May 2020 11:33:20 +0200 | |
4 | Subject: [PATCH] util/async: Add aio_co_reschedule_self() | |
5 | ||
f063a8aa TL |
6 | Add a function that can be used to move the currently running coroutine |
7 | to a different AioContext (and therefore potentially a different | |
8 | thread). | |
9 | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | |
11 | --- | |
12 | include/block/aio.h | 10 ++++++++++ | |
13 | util/async.c | 30 ++++++++++++++++++++++++++++++ | |
14 | 2 files changed, 40 insertions(+) | |
15 | ||
16 | diff --git a/include/block/aio.h b/include/block/aio.h | |
17 | index 62ed954344..d5399c67d6 100644 | |
18 | --- a/include/block/aio.h | |
19 | +++ b/include/block/aio.h | |
20 | @@ -17,6 +17,7 @@ | |
21 | #ifdef CONFIG_LINUX_IO_URING | |
22 | #include <liburing.h> | |
23 | #endif | |
24 | +#include "qemu/coroutine.h" | |
25 | #include "qemu/queue.h" | |
26 | #include "qemu/event_notifier.h" | |
27 | #include "qemu/thread.h" | |
28 | @@ -654,6 +655,15 @@ static inline bool aio_node_check(AioContext *ctx, bool is_external) | |
29 | */ | |
30 | void aio_co_schedule(AioContext *ctx, struct Coroutine *co); | |
31 | ||
32 | +/** | |
33 | + * aio_co_reschedule_self: | |
34 | + * @new_ctx: the new context | |
35 | + * | |
36 | + * Move the currently running coroutine to new_ctx. If the coroutine is already | |
37 | + * running in new_ctx, do nothing. | |
38 | + */ | |
39 | +void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx); | |
40 | + | |
41 | /** | |
42 | * aio_co_wake: | |
43 | * @co: the coroutine | |
44 | diff --git a/util/async.c b/util/async.c | |
45 | index 3165a28f2f..4eba1e6f1b 100644 | |
46 | --- a/util/async.c | |
47 | +++ b/util/async.c | |
48 | @@ -558,6 +558,36 @@ void aio_co_schedule(AioContext *ctx, Coroutine *co) | |
49 | aio_context_unref(ctx); | |
50 | } | |
51 | ||
52 | +typedef struct AioCoRescheduleSelf { | |
53 | + Coroutine *co; | |
54 | + AioContext *new_ctx; | |
55 | +} AioCoRescheduleSelf; | |
56 | + | |
57 | +static void aio_co_reschedule_self_bh(void *opaque) | |
58 | +{ | |
59 | + AioCoRescheduleSelf *data = opaque; | |
60 | + aio_co_schedule(data->new_ctx, data->co); | |
61 | +} | |
62 | + | |
63 | +void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) | |
64 | +{ | |
65 | + AioContext *old_ctx = qemu_get_current_aio_context(); | |
66 | + | |
67 | + if (old_ctx != new_ctx) { | |
68 | + AioCoRescheduleSelf data = { | |
69 | + .co = qemu_coroutine_self(), | |
70 | + .new_ctx = new_ctx, | |
71 | + }; | |
72 | + /* | |
73 | + * We can't directly schedule the coroutine in the target context | |
74 | + * because this would be racy: The other thread could try to enter the | |
75 | + * coroutine before it has yielded in this one. | |
76 | + */ | |
77 | + aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); | |
78 | + qemu_coroutine_yield(); | |
79 | + } | |
80 | +} | |
81 | + | |
82 | void aio_co_wake(struct Coroutine *co) | |
83 | { | |
84 | AioContext *ctx; |