X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=aio-posix.c;h=b68eccd40ccc6d4c2af25349f61694f3cddb4d16;hb=c49fdf137f0ff74804e421f157297ab0ded5d012;hp=88d09e1cfb5b631a114c74f899fb8d767efaa37b;hpb=27dd7730582be85c7d4f680f5f71146629809c86;p=qemu.git diff --git a/aio-posix.c b/aio-posix.c index 88d09e1cf..b68eccd40 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -25,6 +25,7 @@ struct AioHandler IOHandler *io_write; AioFlushHandler *io_flush; int deleted; + int pollfds_idx; void *opaque; QLIST_ENTRY(AioHandler) node; }; @@ -85,9 +86,10 @@ void aio_set_fd_handler(AioContext *ctx, node->io_write = io_write; node->io_flush = io_flush; node->opaque = opaque; + node->pollfds_idx = -1; - node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0); - node->pfd.events |= (io_write ? G_IO_OUT : 0); + node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0); + node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); } aio_notify(ctx); @@ -110,13 +112,6 @@ bool aio_pending(AioContext *ctx) QLIST_FOREACH(node, &ctx->aio_handlers, node) { int revents; - /* - * FIXME: right now we cannot get G_IO_HUP and G_IO_ERR because - * main-loop.c is still select based (due to the slirp legacy). - * If main-loop.c ever switches to poll, G_IO_ERR should be - * tested too. Dispatching G_IO_ERR to both handlers should be - * okay, since handlers need to be ready for spurious wakeups. - */ revents = node->pfd.revents & node->pfd.events; if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { return true; @@ -129,30 +124,12 @@ bool aio_pending(AioContext *ctx) return false; } -bool aio_poll(AioContext *ctx, bool blocking) +static bool aio_dispatch(AioContext *ctx) { - static struct timeval tv0; AioHandler *node; - fd_set rdfds, wrfds; - int max_fd = -1; - int ret; - bool busy, progress; - - progress = false; + bool progress = false; /* - * If there are callbacks left that have been queued, we need to call then. - * Do not call select in this case, because it is possible that the caller - * does not need a complete flush (as is the case for qemu_aio_wait loops). - */ - if (aio_bh_poll(ctx)) { - blocking = false; - progress = true; - } - - /* - * Then dispatch any pending callbacks from the GSource. - * * We have to walk very carefully in case qemu_aio_set_fd_handler is * called while we're walking. */ @@ -166,12 +143,15 @@ bool aio_poll(AioContext *ctx, bool blocking) revents = node->pfd.revents & node->pfd.events; node->pfd.revents = 0; - /* See comment in aio_pending. */ - if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { + if (!node->deleted && + (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && + node->io_read) { node->io_read(node->opaque); progress = true; } - if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { + if (!node->deleted && + (revents & (G_IO_OUT | G_IO_ERR)) && + node->io_write) { node->io_write(node->opaque); progress = true; } @@ -186,6 +166,30 @@ bool aio_poll(AioContext *ctx, bool blocking) g_free(tmp); } } + return progress; +} + +bool aio_poll(AioContext *ctx, bool blocking) +{ + AioHandler *node; + int ret; + bool busy, progress; + + progress = false; + + /* + * If there are callbacks left that have been queued, we need to call them. + * Do not call select in this case, because it is possible that the caller + * does not need a complete flush (as is the case for qemu_aio_wait loops). + */ + if (aio_bh_poll(ctx)) { + blocking = false; + progress = true; + } + + if (aio_dispatch(ctx)) { + progress = true; + } if (progress && !blocking) { return true; @@ -193,12 +197,13 @@ bool aio_poll(AioContext *ctx, bool blocking) ctx->walking_handlers++; - FD_ZERO(&rdfds); - FD_ZERO(&wrfds); + g_array_set_size(ctx->pollfds, 0); - /* fill fd sets */ + /* fill pollfds */ busy = false; QLIST_FOREACH(node, &ctx->aio_handlers, node) { + node->pollfds_idx = -1; + /* If there aren't pending AIO operations, don't invoke callbacks. * Otherwise, if there are no AIO requests, qemu_aio_wait() would * wait indefinitely. @@ -209,13 +214,13 @@ bool aio_poll(AioContext *ctx, bool blocking) } busy = true; } - if (!node->deleted && node->io_read) { - FD_SET(node->pfd.fd, &rdfds); - max_fd = MAX(max_fd, node->pfd.fd + 1); - } - if (!node->deleted && node->io_write) { - FD_SET(node->pfd.fd, &wrfds); - max_fd = MAX(max_fd, node->pfd.fd + 1); + if (!node->deleted && node->pfd.events) { + GPollFD pfd = { + .fd = node->pfd.fd, + .events = node->pfd.events, + }; + node->pollfds_idx = ctx->pollfds->len; + g_array_append_val(ctx->pollfds, pfd); } } @@ -227,42 +232,24 @@ bool aio_poll(AioContext *ctx, bool blocking) } /* wait until next event */ - ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0); + ret = g_poll((GPollFD *)ctx->pollfds->data, + ctx->pollfds->len, + blocking ? -1 : 0); /* if we have any readable fds, dispatch event */ if (ret > 0) { - /* we have to walk very carefully in case - * qemu_aio_set_fd_handler is called while we're walking */ - node = QLIST_FIRST(&ctx->aio_handlers); - while (node) { - AioHandler *tmp; - - ctx->walking_handlers++; - - if (!node->deleted && - FD_ISSET(node->pfd.fd, &rdfds) && - node->io_read) { - node->io_read(node->opaque); - progress = true; - } - if (!node->deleted && - FD_ISSET(node->pfd.fd, &wrfds) && - node->io_write) { - node->io_write(node->opaque); - progress = true; - } - - tmp = node; - node = QLIST_NEXT(node, node); - - ctx->walking_handlers--; - - if (!ctx->walking_handlers && tmp->deleted) { - QLIST_REMOVE(tmp, node); - g_free(tmp); + QLIST_FOREACH(node, &ctx->aio_handlers, node) { + if (node->pollfds_idx != -1) { + GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD, + node->pollfds_idx); + node->pfd.revents = pfd->revents; } } + if (aio_dispatch(ctx)) { + progress = true; + } } - return progress; + assert(progress || busy); + return true; }