X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=async.c;h=90fe906539f70815a5cfbaa95b42fab9f11ce228;hb=8593e050871c632e245190725b11f1e10c629ff2;hp=4ffdd986f19e6c7bb3d0b18c486e52838cce7114;hpb=e3713e001fb7d4d82f6de82800c1463e758e4289;p=qemu.git diff --git a/async.c b/async.c index 4ffdd986f..90fe90653 100644 --- a/async.c +++ b/async.c @@ -23,13 +23,15 @@ */ #include "qemu-common.h" -#include "qemu-aio.h" -#include "main-loop.h" +#include "block/aio.h" +#include "block/thread-pool.h" +#include "qemu/main-loop.h" /***********************************************************/ /* bottom halves (can be seen as timers which expire ASAP) */ struct QEMUBH { + AioContext *ctx; QEMUBHFunc *cb; void *opaque; QEMUBH *next; @@ -42,6 +44,7 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; bh = g_malloc0(sizeof(QEMUBH)); + bh->ctx = ctx; bh->cb = cb; bh->opaque = opaque; bh->next = ctx->first_bh; @@ -101,8 +104,7 @@ void qemu_bh_schedule(QEMUBH *bh) return; bh->scheduled = 1; bh->idle = 0; - /* stop the currently executing CPU to execute the BH ASAP */ - qemu_notify_event(); + aio_notify(bh->ctx); } void qemu_bh_cancel(QEMUBH *bh) @@ -116,8 +118,10 @@ void qemu_bh_delete(QEMUBH *bh) bh->deleted = 1; } -void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout) +static gboolean +aio_ctx_prepare(GSource *source, gint *timeout) { + AioContext *ctx = (AioContext *) source; QEMUBH *bh; for (bh = ctx->first_bh; bh; bh = bh->next) { @@ -125,28 +129,15 @@ void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout) if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ - *timeout = MIN(10, *timeout); + *timeout = 10; } else { /* non-idle bottom halves will be executed * immediately */ *timeout = 0; - break; + return true; } } } -} - -static gboolean -aio_ctx_prepare(GSource *source, gint *timeout) -{ - AioContext *ctx = (AioContext *) source; - uint32_t wait = -1; - aio_bh_update_timeout(ctx, &wait); - - if (wait != -1) { - *timeout = MIN(*timeout, wait); - return wait == 0; - } return false; } @@ -177,11 +168,22 @@ aio_ctx_dispatch(GSource *source, return true; } +static void +aio_ctx_finalize(GSource *source) +{ + AioContext *ctx = (AioContext *) source; + + thread_pool_free(ctx->thread_pool); + aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); + event_notifier_cleanup(&ctx->notifier); + g_array_free(ctx->pollfds, TRUE); +} + static GSourceFuncs aio_source_funcs = { aio_ctx_prepare, aio_ctx_check, aio_ctx_dispatch, - NULL + aio_ctx_finalize }; GSource *aio_get_g_source(AioContext *ctx) @@ -190,9 +192,31 @@ GSource *aio_get_g_source(AioContext *ctx) return &ctx->source; } +ThreadPool *aio_get_thread_pool(AioContext *ctx) +{ + if (!ctx->thread_pool) { + ctx->thread_pool = thread_pool_new(ctx); + } + return ctx->thread_pool; +} + +void aio_notify(AioContext *ctx) +{ + event_notifier_set(&ctx->notifier); +} + AioContext *aio_context_new(void) { - return (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); + AioContext *ctx; + ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); + ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); + ctx->thread_pool = NULL; + event_notifier_init(&ctx->notifier, false); + aio_set_event_notifier(ctx, &ctx->notifier, + (EventNotifierHandler *) + event_notifier_test_and_clear, NULL); + + return ctx; } void aio_context_ref(AioContext *ctx) @@ -204,8 +228,3 @@ void aio_context_unref(AioContext *ctx) { g_source_unref(&ctx->source); } - -void aio_flush(AioContext *ctx) -{ - while (aio_poll(ctx, true)); -}