]> git.proxmox.com Git - qemu.git/blame - aio.c
aio: return "AIO in progress" state from qemu_aio_wait
[qemu.git] / aio.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
14 */
15
16#include "qemu-common.h"
17#include "block.h"
72cf2d4f 18#include "qemu-queue.h"
a76bab49
AL
19#include "qemu_socket.h"
20
21typedef struct AioHandler AioHandler;
22
23/* The list of registered AIO handlers */
72cf2d4f 24static QLIST_HEAD(, AioHandler) aio_handlers;
a76bab49
AL
25
26/* This is a simple lock used to protect the aio_handlers list. Specifically,
27 * it's used to ensure that no callbacks are removed while we're walking and
28 * dispatching callbacks.
29 */
30static int walking_handlers;
31
32struct AioHandler
33{
34 int fd;
35 IOHandler *io_read;
36 IOHandler *io_write;
37 AioFlushHandler *io_flush;
38 int deleted;
39 void *opaque;
72cf2d4f 40 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
41};
42
43static AioHandler *find_aio_handler(int fd)
44{
45 AioHandler *node;
46
72cf2d4f 47 QLIST_FOREACH(node, &aio_handlers, node) {
a76bab49 48 if (node->fd == fd)
79d5ca56
AG
49 if (!node->deleted)
50 return node;
a76bab49
AL
51 }
52
53 return NULL;
54}
55
56int qemu_aio_set_fd_handler(int fd,
57 IOHandler *io_read,
58 IOHandler *io_write,
59 AioFlushHandler *io_flush,
60 void *opaque)
61{
62 AioHandler *node;
63
64 node = find_aio_handler(fd);
65
66 /* Are we deleting the fd handler? */
67 if (!io_read && !io_write) {
68 if (node) {
69 /* If the lock is held, just mark the node as deleted */
70 if (walking_handlers)
71 node->deleted = 1;
72 else {
73 /* Otherwise, delete it for real. We can't just mark it as
74 * deleted because deleted nodes are only cleaned up after
75 * releasing the walking_handlers lock.
76 */
72cf2d4f 77 QLIST_REMOVE(node, node);
7267c094 78 g_free(node);
a76bab49
AL
79 }
80 }
81 } else {
82 if (node == NULL) {
83 /* Alloc and insert if it's not already there */
7267c094 84 node = g_malloc0(sizeof(AioHandler));
a76bab49 85 node->fd = fd;
72cf2d4f 86 QLIST_INSERT_HEAD(&aio_handlers, node, node);
a76bab49
AL
87 }
88 /* Update handler with latest information */
89 node->io_read = io_read;
90 node->io_write = io_write;
91 node->io_flush = io_flush;
92 node->opaque = opaque;
93 }
94
95 qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
96
97 return 0;
98}
99
100void qemu_aio_flush(void)
101{
bcdc1857 102 while (qemu_aio_wait());
a76bab49
AL
103}
104
bcdc1857 105bool qemu_aio_wait(void)
a76bab49
AL
106{
107 int ret;
108
8febfa26
KW
109 /*
110 * If there are callbacks left that have been queued, we need to call then.
bcdc1857
PB
111 * Do not call select in this case, because it is possible that the caller
112 * does not need a complete flush (as is the case for qemu_aio_wait loops).
8febfa26 113 */
bafbd6a1 114 if (qemu_bh_poll()) {
bcdc1857 115 return true;
bafbd6a1 116 }
8febfa26 117
a76bab49
AL
118 do {
119 AioHandler *node;
120 fd_set rdfds, wrfds;
bcdc1857 121 bool busy;
a76bab49
AL
122 int max_fd = -1;
123
124 walking_handlers = 1;
125
f71903d0
AL
126 FD_ZERO(&rdfds);
127 FD_ZERO(&wrfds);
128
a76bab49 129 /* fill fd sets */
bcdc1857 130 busy = false;
72cf2d4f 131 QLIST_FOREACH(node, &aio_handlers, node) {
a76bab49
AL
132 /* If there aren't pending AIO operations, don't invoke callbacks.
133 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
134 * wait indefinitely.
135 */
bcdc1857
PB
136 if (node->io_flush) {
137 if (node->io_flush(node->opaque) == 0) {
138 continue;
139 }
140 busy = true;
141 }
a76bab49
AL
142 if (!node->deleted && node->io_read) {
143 FD_SET(node->fd, &rdfds);
144 max_fd = MAX(max_fd, node->fd + 1);
145 }
146 if (!node->deleted && node->io_write) {
147 FD_SET(node->fd, &wrfds);
148 max_fd = MAX(max_fd, node->fd + 1);
149 }
150 }
151
152 walking_handlers = 0;
153
154 /* No AIO operations? Get us out of here */
bcdc1857
PB
155 if (!busy) {
156 return false;
157 }
a76bab49
AL
158
159 /* wait until next event */
160 ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
161 if (ret == -1 && errno == EINTR)
162 continue;
163
164 /* if we have any readable fds, dispatch event */
165 if (ret > 0) {
166 walking_handlers = 1;
167
168 /* we have to walk very carefully in case
169 * qemu_aio_set_fd_handler is called while we're walking */
72cf2d4f 170 node = QLIST_FIRST(&aio_handlers);
a76bab49
AL
171 while (node) {
172 AioHandler *tmp;
173
174 if (!node->deleted &&
175 FD_ISSET(node->fd, &rdfds) &&
176 node->io_read) {
177 node->io_read(node->opaque);
178 }
179 if (!node->deleted &&
180 FD_ISSET(node->fd, &wrfds) &&
181 node->io_write) {
182 node->io_write(node->opaque);
183 }
184
185 tmp = node;
72cf2d4f 186 node = QLIST_NEXT(node, node);
a76bab49
AL
187
188 if (tmp->deleted) {
72cf2d4f 189 QLIST_REMOVE(tmp, node);
7267c094 190 g_free(tmp);
a76bab49
AL
191 }
192 }
193
194 walking_handlers = 0;
195 }
196 } while (ret == 0);
bcdc1857
PB
197
198 return true;
a76bab49 199}