]> git.proxmox.com Git - qemu.git/blame - aio.c
aio: introduce AioContext, move bottom halves there
[qemu.git] / aio.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
14 */
15
16#include "qemu-common.h"
17#include "block.h"
72cf2d4f 18#include "qemu-queue.h"
a76bab49
AL
19#include "qemu_socket.h"
20
a76bab49 21/* The list of registered AIO handlers */
72cf2d4f 22static QLIST_HEAD(, AioHandler) aio_handlers;
a76bab49
AL
23
24/* This is a simple lock used to protect the aio_handlers list. Specifically,
25 * it's used to ensure that no callbacks are removed while we're walking and
26 * dispatching callbacks.
27 */
28static int walking_handlers;
29
30struct AioHandler
31{
32 int fd;
33 IOHandler *io_read;
34 IOHandler *io_write;
35 AioFlushHandler *io_flush;
36 int deleted;
37 void *opaque;
72cf2d4f 38 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
39};
40
41static AioHandler *find_aio_handler(int fd)
42{
43 AioHandler *node;
44
72cf2d4f 45 QLIST_FOREACH(node, &aio_handlers, node) {
a76bab49 46 if (node->fd == fd)
79d5ca56
AG
47 if (!node->deleted)
48 return node;
a76bab49
AL
49 }
50
51 return NULL;
52}
53
b078dc3c
PB
54void qemu_aio_set_fd_handler(int fd,
55 IOHandler *io_read,
56 IOHandler *io_write,
57 AioFlushHandler *io_flush,
58 void *opaque)
a76bab49
AL
59{
60 AioHandler *node;
61
62 node = find_aio_handler(fd);
63
64 /* Are we deleting the fd handler? */
65 if (!io_read && !io_write) {
66 if (node) {
67 /* If the lock is held, just mark the node as deleted */
68 if (walking_handlers)
69 node->deleted = 1;
70 else {
71 /* Otherwise, delete it for real. We can't just mark it as
72 * deleted because deleted nodes are only cleaned up after
73 * releasing the walking_handlers lock.
74 */
72cf2d4f 75 QLIST_REMOVE(node, node);
7267c094 76 g_free(node);
a76bab49
AL
77 }
78 }
79 } else {
80 if (node == NULL) {
81 /* Alloc and insert if it's not already there */
7267c094 82 node = g_malloc0(sizeof(AioHandler));
a76bab49 83 node->fd = fd;
72cf2d4f 84 QLIST_INSERT_HEAD(&aio_handlers, node, node);
a76bab49
AL
85 }
86 /* Update handler with latest information */
87 node->io_read = io_read;
88 node->io_write = io_write;
89 node->io_flush = io_flush;
90 node->opaque = opaque;
91 }
92
93 qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
a76bab49
AL
94}
95
9958c351
PB
96void qemu_aio_set_event_notifier(EventNotifier *notifier,
97 EventNotifierHandler *io_read,
98 AioFlushEventNotifierHandler *io_flush)
99{
100 qemu_aio_set_fd_handler(event_notifier_get_fd(notifier),
101 (IOHandler *)io_read, NULL,
102 (AioFlushHandler *)io_flush, notifier);
103}
104
a76bab49
AL
105void qemu_aio_flush(void)
106{
bcdc1857 107 while (qemu_aio_wait());
a76bab49
AL
108}
109
bcdc1857 110bool qemu_aio_wait(void)
a76bab49 111{
9eb0bfca
PB
112 AioHandler *node;
113 fd_set rdfds, wrfds;
114 int max_fd = -1;
a76bab49 115 int ret;
9eb0bfca 116 bool busy;
a76bab49 117
8febfa26
KW
118 /*
119 * If there are callbacks left that have been queued, we need to call then.
bcdc1857
PB
120 * Do not call select in this case, because it is possible that the caller
121 * does not need a complete flush (as is the case for qemu_aio_wait loops).
8febfa26 122 */
bafbd6a1 123 if (qemu_bh_poll()) {
bcdc1857 124 return true;
bafbd6a1 125 }
8febfa26 126
00f78533 127 walking_handlers++;
a76bab49 128
9eb0bfca
PB
129 FD_ZERO(&rdfds);
130 FD_ZERO(&wrfds);
a76bab49 131
9eb0bfca
PB
132 /* fill fd sets */
133 busy = false;
134 QLIST_FOREACH(node, &aio_handlers, node) {
135 /* If there aren't pending AIO operations, don't invoke callbacks.
136 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
137 * wait indefinitely.
138 */
139 if (node->io_flush) {
140 if (node->io_flush(node->opaque) == 0) {
141 continue;
a76bab49 142 }
9eb0bfca
PB
143 busy = true;
144 }
145 if (!node->deleted && node->io_read) {
146 FD_SET(node->fd, &rdfds);
147 max_fd = MAX(max_fd, node->fd + 1);
a76bab49 148 }
9eb0bfca
PB
149 if (!node->deleted && node->io_write) {
150 FD_SET(node->fd, &wrfds);
151 max_fd = MAX(max_fd, node->fd + 1);
152 }
153 }
a76bab49 154
00f78533 155 walking_handlers--;
a76bab49 156
9eb0bfca
PB
157 /* No AIO operations? Get us out of here */
158 if (!busy) {
159 return false;
160 }
a76bab49 161
9eb0bfca
PB
162 /* wait until next event */
163 ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
164
165 /* if we have any readable fds, dispatch event */
166 if (ret > 0) {
9eb0bfca
PB
167 /* we have to walk very carefully in case
168 * qemu_aio_set_fd_handler is called while we're walking */
169 node = QLIST_FIRST(&aio_handlers);
170 while (node) {
171 AioHandler *tmp;
172
2db2bfc0
PB
173 walking_handlers++;
174
9eb0bfca
PB
175 if (!node->deleted &&
176 FD_ISSET(node->fd, &rdfds) &&
177 node->io_read) {
178 node->io_read(node->opaque);
179 }
180 if (!node->deleted &&
181 FD_ISSET(node->fd, &wrfds) &&
182 node->io_write) {
183 node->io_write(node->opaque);
a76bab49
AL
184 }
185
9eb0bfca
PB
186 tmp = node;
187 node = QLIST_NEXT(node, node);
188
2db2bfc0
PB
189 walking_handlers--;
190
191 if (!walking_handlers && tmp->deleted) {
9eb0bfca
PB
192 QLIST_REMOVE(tmp, node);
193 g_free(tmp);
194 }
a76bab49 195 }
9eb0bfca 196 }
bcdc1857
PB
197
198 return true;
a76bab49 199}