]> git.proxmox.com Git - qemu.git/blame - aio.c
aio: add I/O handlers to the AioContext interface
[qemu.git] / aio.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
14 */
15
16#include "qemu-common.h"
17#include "block.h"
72cf2d4f 18#include "qemu-queue.h"
a76bab49
AL
19#include "qemu_socket.h"
20
a76bab49
AL
21struct AioHandler
22{
23 int fd;
24 IOHandler *io_read;
25 IOHandler *io_write;
26 AioFlushHandler *io_flush;
27 int deleted;
28 void *opaque;
72cf2d4f 29 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
30};
31
a915f4bc 32static AioHandler *find_aio_handler(AioContext *ctx, int fd)
a76bab49
AL
33{
34 AioHandler *node;
35
a915f4bc 36 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
a76bab49 37 if (node->fd == fd)
79d5ca56
AG
38 if (!node->deleted)
39 return node;
a76bab49
AL
40 }
41
42 return NULL;
43}
44
a915f4bc
PB
45void aio_set_fd_handler(AioContext *ctx,
46 int fd,
47 IOHandler *io_read,
48 IOHandler *io_write,
49 AioFlushHandler *io_flush,
50 void *opaque)
a76bab49
AL
51{
52 AioHandler *node;
53
a915f4bc 54 node = find_aio_handler(ctx, fd);
a76bab49
AL
55
56 /* Are we deleting the fd handler? */
57 if (!io_read && !io_write) {
58 if (node) {
59 /* If the lock is held, just mark the node as deleted */
a915f4bc 60 if (ctx->walking_handlers)
a76bab49
AL
61 node->deleted = 1;
62 else {
63 /* Otherwise, delete it for real. We can't just mark it as
64 * deleted because deleted nodes are only cleaned up after
65 * releasing the walking_handlers lock.
66 */
72cf2d4f 67 QLIST_REMOVE(node, node);
7267c094 68 g_free(node);
a76bab49
AL
69 }
70 }
71 } else {
72 if (node == NULL) {
73 /* Alloc and insert if it's not already there */
7267c094 74 node = g_malloc0(sizeof(AioHandler));
a76bab49 75 node->fd = fd;
a915f4bc 76 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
a76bab49
AL
77 }
78 /* Update handler with latest information */
79 node->io_read = io_read;
80 node->io_write = io_write;
81 node->io_flush = io_flush;
82 node->opaque = opaque;
83 }
9958c351
PB
84}
85
a915f4bc
PB
86void aio_set_event_notifier(AioContext *ctx,
87 EventNotifier *notifier,
88 EventNotifierHandler *io_read,
89 AioFlushEventNotifierHandler *io_flush)
a76bab49 90{
a915f4bc
PB
91 aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
92 (IOHandler *)io_read, NULL,
93 (AioFlushHandler *)io_flush, notifier);
a76bab49
AL
94}
95
a915f4bc 96bool aio_wait(AioContext *ctx)
a76bab49 97{
9eb0bfca
PB
98 AioHandler *node;
99 fd_set rdfds, wrfds;
100 int max_fd = -1;
a76bab49 101 int ret;
9eb0bfca 102 bool busy;
a76bab49 103
8febfa26
KW
104 /*
105 * If there are callbacks left that have been queued, we need to call then.
bcdc1857
PB
106 * Do not call select in this case, because it is possible that the caller
107 * does not need a complete flush (as is the case for qemu_aio_wait loops).
8febfa26 108 */
a915f4bc 109 if (aio_bh_poll(ctx)) {
bcdc1857 110 return true;
bafbd6a1 111 }
8febfa26 112
a915f4bc 113 ctx->walking_handlers++;
a76bab49 114
9eb0bfca
PB
115 FD_ZERO(&rdfds);
116 FD_ZERO(&wrfds);
a76bab49 117
9eb0bfca
PB
118 /* fill fd sets */
119 busy = false;
a915f4bc 120 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
9eb0bfca
PB
121 /* If there aren't pending AIO operations, don't invoke callbacks.
122 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
123 * wait indefinitely.
124 */
125 if (node->io_flush) {
126 if (node->io_flush(node->opaque) == 0) {
127 continue;
a76bab49 128 }
9eb0bfca
PB
129 busy = true;
130 }
131 if (!node->deleted && node->io_read) {
132 FD_SET(node->fd, &rdfds);
133 max_fd = MAX(max_fd, node->fd + 1);
a76bab49 134 }
9eb0bfca
PB
135 if (!node->deleted && node->io_write) {
136 FD_SET(node->fd, &wrfds);
137 max_fd = MAX(max_fd, node->fd + 1);
138 }
139 }
a76bab49 140
a915f4bc 141 ctx->walking_handlers--;
a76bab49 142
9eb0bfca
PB
143 /* No AIO operations? Get us out of here */
144 if (!busy) {
145 return false;
146 }
a76bab49 147
9eb0bfca
PB
148 /* wait until next event */
149 ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
150
151 /* if we have any readable fds, dispatch event */
152 if (ret > 0) {
9eb0bfca
PB
153 /* we have to walk very carefully in case
154 * qemu_aio_set_fd_handler is called while we're walking */
a915f4bc 155 node = QLIST_FIRST(&ctx->aio_handlers);
9eb0bfca
PB
156 while (node) {
157 AioHandler *tmp;
158
a915f4bc 159 ctx->walking_handlers++;
2db2bfc0 160
9eb0bfca
PB
161 if (!node->deleted &&
162 FD_ISSET(node->fd, &rdfds) &&
163 node->io_read) {
164 node->io_read(node->opaque);
165 }
166 if (!node->deleted &&
167 FD_ISSET(node->fd, &wrfds) &&
168 node->io_write) {
169 node->io_write(node->opaque);
a76bab49
AL
170 }
171
9eb0bfca
PB
172 tmp = node;
173 node = QLIST_NEXT(node, node);
174
a915f4bc 175 ctx->walking_handlers--;
2db2bfc0 176
a915f4bc 177 if (!ctx->walking_handlers && tmp->deleted) {
9eb0bfca
PB
178 QLIST_REMOVE(tmp, node);
179 g_free(tmp);
180 }
a76bab49 181 }
9eb0bfca 182 }
bcdc1857
PB
183
184 return true;
a76bab49 185}