]> git.proxmox.com Git - qemu.git/blame - aio.c
aio: add non-blocking variant of aio_wait
[qemu.git] / aio.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
14 */
15
16#include "qemu-common.h"
17#include "block.h"
72cf2d4f 18#include "qemu-queue.h"
a76bab49
AL
19#include "qemu_socket.h"
20
a76bab49
AL
21struct AioHandler
22{
23 int fd;
24 IOHandler *io_read;
25 IOHandler *io_write;
26 AioFlushHandler *io_flush;
27 int deleted;
28 void *opaque;
72cf2d4f 29 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
30};
31
a915f4bc 32static AioHandler *find_aio_handler(AioContext *ctx, int fd)
a76bab49
AL
33{
34 AioHandler *node;
35
a915f4bc 36 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
a76bab49 37 if (node->fd == fd)
79d5ca56
AG
38 if (!node->deleted)
39 return node;
a76bab49
AL
40 }
41
42 return NULL;
43}
44
a915f4bc
PB
45void aio_set_fd_handler(AioContext *ctx,
46 int fd,
47 IOHandler *io_read,
48 IOHandler *io_write,
49 AioFlushHandler *io_flush,
50 void *opaque)
a76bab49
AL
51{
52 AioHandler *node;
53
a915f4bc 54 node = find_aio_handler(ctx, fd);
a76bab49
AL
55
56 /* Are we deleting the fd handler? */
57 if (!io_read && !io_write) {
58 if (node) {
59 /* If the lock is held, just mark the node as deleted */
a915f4bc 60 if (ctx->walking_handlers)
a76bab49
AL
61 node->deleted = 1;
62 else {
63 /* Otherwise, delete it for real. We can't just mark it as
64 * deleted because deleted nodes are only cleaned up after
65 * releasing the walking_handlers lock.
66 */
72cf2d4f 67 QLIST_REMOVE(node, node);
7267c094 68 g_free(node);
a76bab49
AL
69 }
70 }
71 } else {
72 if (node == NULL) {
73 /* Alloc and insert if it's not already there */
7267c094 74 node = g_malloc0(sizeof(AioHandler));
a76bab49 75 node->fd = fd;
a915f4bc 76 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
a76bab49
AL
77 }
78 /* Update handler with latest information */
79 node->io_read = io_read;
80 node->io_write = io_write;
81 node->io_flush = io_flush;
82 node->opaque = opaque;
83 }
9958c351
PB
84}
85
a915f4bc
PB
86void aio_set_event_notifier(AioContext *ctx,
87 EventNotifier *notifier,
88 EventNotifierHandler *io_read,
89 AioFlushEventNotifierHandler *io_flush)
a76bab49 90{
a915f4bc
PB
91 aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
92 (IOHandler *)io_read, NULL,
93 (AioFlushHandler *)io_flush, notifier);
a76bab49
AL
94}
95
7c0628b2 96bool aio_poll(AioContext *ctx, bool blocking)
a76bab49 97{
7c0628b2 98 static struct timeval tv0;
9eb0bfca
PB
99 AioHandler *node;
100 fd_set rdfds, wrfds;
101 int max_fd = -1;
a76bab49 102 int ret;
7c0628b2
PB
103 bool busy, progress;
104
105 progress = false;
a76bab49 106
8febfa26
KW
107 /*
108 * If there are callbacks left that have been queued, we need to call then.
bcdc1857
PB
109 * Do not call select in this case, because it is possible that the caller
110 * does not need a complete flush (as is the case for qemu_aio_wait loops).
8febfa26 111 */
a915f4bc 112 if (aio_bh_poll(ctx)) {
7c0628b2
PB
113 blocking = false;
114 progress = true;
115 }
116
117 if (progress && !blocking) {
bcdc1857 118 return true;
bafbd6a1 119 }
8febfa26 120
a915f4bc 121 ctx->walking_handlers++;
a76bab49 122
9eb0bfca
PB
123 FD_ZERO(&rdfds);
124 FD_ZERO(&wrfds);
a76bab49 125
9eb0bfca
PB
126 /* fill fd sets */
127 busy = false;
a915f4bc 128 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
9eb0bfca
PB
129 /* If there aren't pending AIO operations, don't invoke callbacks.
130 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
131 * wait indefinitely.
132 */
4231c88d 133 if (!node->deleted && node->io_flush) {
9eb0bfca
PB
134 if (node->io_flush(node->opaque) == 0) {
135 continue;
a76bab49 136 }
9eb0bfca
PB
137 busy = true;
138 }
139 if (!node->deleted && node->io_read) {
140 FD_SET(node->fd, &rdfds);
141 max_fd = MAX(max_fd, node->fd + 1);
a76bab49 142 }
9eb0bfca
PB
143 if (!node->deleted && node->io_write) {
144 FD_SET(node->fd, &wrfds);
145 max_fd = MAX(max_fd, node->fd + 1);
146 }
147 }
a76bab49 148
a915f4bc 149 ctx->walking_handlers--;
a76bab49 150
9eb0bfca
PB
151 /* No AIO operations? Get us out of here */
152 if (!busy) {
7c0628b2 153 return progress;
9eb0bfca 154 }
a76bab49 155
9eb0bfca 156 /* wait until next event */
7c0628b2 157 ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
9eb0bfca
PB
158
159 /* if we have any readable fds, dispatch event */
160 if (ret > 0) {
9eb0bfca
PB
161 /* we have to walk very carefully in case
162 * qemu_aio_set_fd_handler is called while we're walking */
a915f4bc 163 node = QLIST_FIRST(&ctx->aio_handlers);
9eb0bfca
PB
164 while (node) {
165 AioHandler *tmp;
166
a915f4bc 167 ctx->walking_handlers++;
2db2bfc0 168
9eb0bfca
PB
169 if (!node->deleted &&
170 FD_ISSET(node->fd, &rdfds) &&
171 node->io_read) {
7c0628b2 172 progress = true;
9eb0bfca
PB
173 node->io_read(node->opaque);
174 }
175 if (!node->deleted &&
176 FD_ISSET(node->fd, &wrfds) &&
177 node->io_write) {
7c0628b2 178 progress = true;
9eb0bfca 179 node->io_write(node->opaque);
a76bab49
AL
180 }
181
9eb0bfca
PB
182 tmp = node;
183 node = QLIST_NEXT(node, node);
184
a915f4bc 185 ctx->walking_handlers--;
2db2bfc0 186
a915f4bc 187 if (!ctx->walking_handlers && tmp->deleted) {
9eb0bfca
PB
188 QLIST_REMOVE(tmp, node);
189 g_free(tmp);
190 }
a76bab49 191 }
9eb0bfca 192 }
bcdc1857 193
7c0628b2 194 return progress;
a76bab49 195}