X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=async.c;h=b2de360c2303f7ccab8278a55ce63b4da512f723;hb=b8093d38e8dce0413fe8999fe2dee48a96ab1104;hp=2b51e87679a3154078d17af3b2881ea8d119d729;hpb=a6f2cb037a82fb8679e70e175cfbc879dd829e06;p=mirror_qemu.git diff --git a/async.c b/async.c index 2b51e87679..b2de360c23 100644 --- a/async.c +++ b/async.c @@ -22,11 +22,14 @@ * THE SOFTWARE. */ +#include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu-common.h" #include "block/aio.h" #include "block/thread-pool.h" #include "qemu/main-loop.h" #include "qemu/atomic.h" +#include "block/raw-aio.h" /***********************************************************/ /* bottom halves (can be seen as timers which expire ASAP) */ @@ -41,6 +44,26 @@ struct QEMUBH { bool deleted; }; +void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque) +{ + QEMUBH *bh; + bh = g_new(QEMUBH, 1); + *bh = (QEMUBH){ + .ctx = ctx, + .cb = cb, + .opaque = opaque, + }; + qemu_mutex_lock(&ctx->bh_lock); + bh->next = ctx->first_bh; + bh->scheduled = 1; + bh->deleted = 1; + /* Make sure that the members are ready before putting bh into list */ + smp_wmb(); + ctx->first_bh = bh; + qemu_mutex_unlock(&ctx->bh_lock); + aio_notify(ctx); +} + QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; @@ -59,6 +82,11 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) return bh; } +void aio_bh_call(QEMUBH *bh) +{ + bh->cb(bh->opaque); +} + /* Multiple occurrences of aio_bh_poll cannot be called concurrently */ int aio_bh_poll(AioContext *ctx) { @@ -78,11 +106,13 @@ int aio_bh_poll(AioContext *ctx) * thread sees the zero before bh->cb has run, and thus will call * aio_notify again if necessary. */ - if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) { - if (!bh->idle) + if (atomic_xchg(&bh->scheduled, 0)) { + /* Idle BHs don't count as progress */ + if (!bh->idle) { ret = 1; + } bh->idle = 0; - bh->cb(bh->opaque); + aio_bh_call(bh); } } @@ -94,7 +124,7 @@ int aio_bh_poll(AioContext *ctx) bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; - if (bh->deleted) { + if (bh->deleted && !bh->scheduled) { *bhp = bh->next; g_free(bh); } else { @@ -158,7 +188,7 @@ aio_compute_timeout(AioContext *ctx) QEMUBH *bh; for (bh = ctx->first_bh; bh; bh = bh->next) { - if (!bh->deleted && bh->scheduled) { + if (bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ @@ -184,6 +214,8 @@ aio_ctx_prepare(GSource *source, gint *timeout) { AioContext *ctx = (AioContext *) source; + atomic_or(&ctx->notify_me, 1); + /* We assume there is no timeout already supplied */ *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); @@ -200,10 +232,13 @@ aio_ctx_check(GSource *source) AioContext *ctx = (AioContext *) source; QEMUBH *bh; + atomic_and(&ctx->notify_me, ~1); + aio_notify_accept(ctx); + for (bh = ctx->first_bh; bh; bh = bh->next) { - if (!bh->deleted && bh->scheduled) { + if (bh->scheduled) { return true; - } + } } return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); } @@ -226,11 +261,31 @@ aio_ctx_finalize(GSource *source) AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); - aio_set_event_notifier(ctx, &ctx->notifier, NULL); + +#ifdef CONFIG_LINUX_AIO + if (ctx->linux_aio) { + laio_detach_aio_context(ctx->linux_aio, ctx); + laio_cleanup(ctx->linux_aio); + ctx->linux_aio = NULL; + } +#endif + + qemu_mutex_lock(&ctx->bh_lock); + while (ctx->first_bh) { + QEMUBH *next = ctx->first_bh->next; + + /* qemu_bh_delete() must have been called on BHs in this AioContext */ + assert(ctx->first_bh->deleted); + + g_free(ctx->first_bh); + ctx->first_bh = next; + } + qemu_mutex_unlock(&ctx->bh_lock); + + aio_set_event_notifier(ctx, &ctx->notifier, false, NULL); event_notifier_cleanup(&ctx->notifier); - rfifolock_destroy(&ctx->lock); + qemu_rec_mutex_destroy(&ctx->lock); qemu_mutex_destroy(&ctx->bh_lock); - g_array_free(ctx->pollfds, TRUE); timerlistgroup_deinit(&ctx->tlg); } @@ -255,24 +310,33 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx) return ctx->thread_pool; } -void aio_set_dispatching(AioContext *ctx, bool dispatching) +#ifdef CONFIG_LINUX_AIO +LinuxAioState *aio_get_linux_aio(AioContext *ctx) { - ctx->dispatching = dispatching; - if (!dispatching) { - /* Write ctx->dispatching before reading e.g. bh->scheduled. - * Optimization: this is only needed when we're entering the "unsafe" - * phase where other threads must call event_notifier_set. - */ - smp_mb(); + if (!ctx->linux_aio) { + ctx->linux_aio = laio_init(); + laio_attach_aio_context(ctx->linux_aio, ctx); } + return ctx->linux_aio; } +#endif void aio_notify(AioContext *ctx) { - /* Write e.g. bh->scheduled before reading ctx->dispatching. */ + /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs + * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. + */ smp_mb(); - if (!ctx->dispatching) { + if (ctx->notify_me) { event_notifier_set(&ctx->notifier); + atomic_mb_set(&ctx->notified, true); + } +} + +void aio_notify_accept(AioContext *ctx) +{ + if (atomic_xchg(&ctx->notified, false)) { + event_notifier_test_and_clear(&ctx->notifier); } } @@ -281,34 +345,40 @@ static void aio_timerlist_notify(void *opaque) aio_notify(opaque); } -static void aio_rfifolock_cb(void *opaque) +static void event_notifier_dummy_cb(EventNotifier *e) { - /* Kick owner thread in case they are blocked in aio_poll() */ - aio_notify(opaque); } AioContext *aio_context_new(Error **errp) { int ret; AioContext *ctx; + ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); + aio_context_setup(ctx); + ret = event_notifier_init(&ctx->notifier, false); if (ret < 0) { - g_source_destroy(&ctx->source); error_setg_errno(errp, -ret, "Failed to initialize event notifier"); - return NULL; + goto fail; } g_source_set_can_recurse(&ctx->source, true); aio_set_event_notifier(ctx, &ctx->notifier, + false, (EventNotifierHandler *) - event_notifier_test_and_clear); - ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); + event_notifier_dummy_cb); +#ifdef CONFIG_LINUX_AIO + ctx->linux_aio = NULL; +#endif ctx->thread_pool = NULL; qemu_mutex_init(&ctx->bh_lock); - rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx); + qemu_rec_mutex_init(&ctx->lock); timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); return ctx; +fail: + g_source_destroy(&ctx->source); + return NULL; } void aio_context_ref(AioContext *ctx) @@ -323,10 +393,10 @@ void aio_context_unref(AioContext *ctx) void aio_context_acquire(AioContext *ctx) { - rfifolock_lock(&ctx->lock); + qemu_rec_mutex_lock(&ctx->lock); } void aio_context_release(AioContext *ctx) { - rfifolock_unlock(&ctx->lock); + qemu_rec_mutex_unlock(&ctx->lock); }