X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=async.c;h=5fb3fa61dff7e7cd6c103fc5a2b5545b2d989ff0;hb=30c367ed446b6ea53245589a5cf373578ac075d7;hp=ecdaf15887c6afcbfd4883b09f583505d802f3a6;hpb=a75bfc5fdda8b87ff969d68e020ffdf1008751b1;p=qemu.git diff --git a/async.c b/async.c index ecdaf1588..5fb3fa61d 100644 --- a/async.c +++ b/async.c @@ -23,48 +23,58 @@ */ #include "qemu-common.h" -#include "qemu-aio.h" -#include "main-loop.h" - -/* Anchor of the list of Bottom Halves belonging to the context */ -static struct QEMUBH *first_bh; +#include "block/aio.h" +#include "block/thread-pool.h" +#include "qemu/main-loop.h" /***********************************************************/ /* bottom halves (can be seen as timers which expire ASAP) */ struct QEMUBH { + AioContext *ctx; QEMUBHFunc *cb; void *opaque; - int scheduled; - int idle; - int deleted; QEMUBH *next; + bool scheduled; + bool idle; + bool deleted; }; -QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque) +QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; bh = g_malloc0(sizeof(QEMUBH)); + bh->ctx = ctx; bh->cb = cb; bh->opaque = opaque; - bh->next = first_bh; - first_bh = bh; + qemu_mutex_lock(&ctx->bh_lock); + bh->next = ctx->first_bh; + /* Make sure that the members are ready before putting bh into list */ + smp_wmb(); + ctx->first_bh = bh; + qemu_mutex_unlock(&ctx->bh_lock); return bh; } -int qemu_bh_poll(void) +/* Multiple occurrences of aio_bh_poll cannot be called concurrently */ +int aio_bh_poll(AioContext *ctx) { QEMUBH *bh, **bhp, *next; int ret; - static int nesting = 0; - nesting++; + ctx->walking_bh++; ret = 0; - for (bh = first_bh; bh; bh = next) { + for (bh = ctx->first_bh; bh; bh = next) { + /* Make sure that fetching bh happens before accessing its members */ + smp_read_barrier_depends(); next = bh->next; if (!bh->deleted && bh->scheduled) { bh->scheduled = 0; + /* Paired with write barrier in bh schedule to ensure reading for + * idle & callbacks coming after bh's scheduling. + */ + smp_rmb(); if (!bh->idle) ret = 1; bh->idle = 0; @@ -72,11 +82,12 @@ int qemu_bh_poll(void) } } - nesting--; + ctx->walking_bh--; /* remove deleted bhs */ - if (!nesting) { - bhp = &first_bh; + if (!ctx->walking_bh) { + qemu_mutex_lock(&ctx->bh_lock); + bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; if (bh->deleted) { @@ -86,6 +97,7 @@ int qemu_bh_poll(void) bhp = &bh->next; } } + qemu_mutex_unlock(&ctx->bh_lock); } return ret; @@ -95,48 +107,171 @@ void qemu_bh_schedule_idle(QEMUBH *bh) { if (bh->scheduled) return; - bh->scheduled = 1; bh->idle = 1; + /* Make sure that idle & any writes needed by the callback are done + * before the locations are read in the aio_bh_poll. + */ + smp_wmb(); + bh->scheduled = 1; } void qemu_bh_schedule(QEMUBH *bh) { if (bh->scheduled) return; - bh->scheduled = 1; bh->idle = 0; - /* stop the currently executing CPU to execute the BH ASAP */ - qemu_notify_event(); + /* Make sure that idle & any writes needed by the callback are done + * before the locations are read in the aio_bh_poll. + */ + smp_wmb(); + bh->scheduled = 1; + aio_notify(bh->ctx); } + +/* This func is async. + */ void qemu_bh_cancel(QEMUBH *bh) { bh->scheduled = 0; } +/* This func is async.The bottom half will do the delete action at the finial + * end. + */ void qemu_bh_delete(QEMUBH *bh) { bh->scheduled = 0; bh->deleted = 1; } -void qemu_bh_update_timeout(uint32_t *timeout) +static gboolean +aio_ctx_prepare(GSource *source, gint *timeout) { + AioContext *ctx = (AioContext *) source; QEMUBH *bh; + int deadline; - for (bh = first_bh; bh; bh = bh->next) { + /* We assume there is no timeout already supplied */ + *timeout = -1; + for (bh = ctx->first_bh; bh; bh = bh->next) { if (!bh->deleted && bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ - *timeout = MIN(10, *timeout); + *timeout = 10; } else { /* non-idle bottom halves will be executed * immediately */ *timeout = 0; - break; + return true; } } } + + deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)); + if (deadline == 0) { + *timeout = 0; + return true; + } else { + *timeout = qemu_soonest_timeout(*timeout, deadline); + } + + return false; +} + +static gboolean +aio_ctx_check(GSource *source) +{ + AioContext *ctx = (AioContext *) source; + QEMUBH *bh; + + for (bh = ctx->first_bh; bh; bh = bh->next) { + if (!bh->deleted && bh->scheduled) { + return true; + } + } + return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); +} + +static gboolean +aio_ctx_dispatch(GSource *source, + GSourceFunc callback, + gpointer user_data) +{ + AioContext *ctx = (AioContext *) source; + + assert(callback == NULL); + aio_poll(ctx, false); + return true; +} + +static void +aio_ctx_finalize(GSource *source) +{ + AioContext *ctx = (AioContext *) source; + + thread_pool_free(ctx->thread_pool); + aio_set_event_notifier(ctx, &ctx->notifier, NULL); + event_notifier_cleanup(&ctx->notifier); + qemu_mutex_destroy(&ctx->bh_lock); + g_array_free(ctx->pollfds, TRUE); + timerlistgroup_deinit(&ctx->tlg); +} + +static GSourceFuncs aio_source_funcs = { + aio_ctx_prepare, + aio_ctx_check, + aio_ctx_dispatch, + aio_ctx_finalize +}; + +GSource *aio_get_g_source(AioContext *ctx) +{ + g_source_ref(&ctx->source); + return &ctx->source; +} + +ThreadPool *aio_get_thread_pool(AioContext *ctx) +{ + if (!ctx->thread_pool) { + ctx->thread_pool = thread_pool_new(ctx); + } + return ctx->thread_pool; +} + +void aio_notify(AioContext *ctx) +{ + event_notifier_set(&ctx->notifier); } +static void aio_timerlist_notify(void *opaque) +{ + aio_notify(opaque); +} + +AioContext *aio_context_new(void) +{ + AioContext *ctx; + ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); + ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); + ctx->thread_pool = NULL; + qemu_mutex_init(&ctx->bh_lock); + event_notifier_init(&ctx->notifier, false); + aio_set_event_notifier(ctx, &ctx->notifier, + (EventNotifierHandler *) + event_notifier_test_and_clear); + timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); + + return ctx; +} + +void aio_context_ref(AioContext *ctx) +{ + g_source_ref(&ctx->source); +} + +void aio_context_unref(AioContext *ctx) +{ + g_source_unref(&ctx->source); +}