]> git.proxmox.com Git - qemu.git/blame - aio.c
aio: remove process_queue callback and qemu_aio_process_queue
[qemu.git] / aio.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
14 */
15
16#include "qemu-common.h"
17#include "block.h"
72cf2d4f 18#include "qemu-queue.h"
a76bab49
AL
19#include "qemu_socket.h"
20
21typedef struct AioHandler AioHandler;
22
23/* The list of registered AIO handlers */
72cf2d4f 24static QLIST_HEAD(, AioHandler) aio_handlers;
a76bab49
AL
25
26/* This is a simple lock used to protect the aio_handlers list. Specifically,
27 * it's used to ensure that no callbacks are removed while we're walking and
28 * dispatching callbacks.
29 */
30static int walking_handlers;
31
32struct AioHandler
33{
34 int fd;
35 IOHandler *io_read;
36 IOHandler *io_write;
37 AioFlushHandler *io_flush;
38 int deleted;
39 void *opaque;
72cf2d4f 40 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
41};
42
43static AioHandler *find_aio_handler(int fd)
44{
45 AioHandler *node;
46
72cf2d4f 47 QLIST_FOREACH(node, &aio_handlers, node) {
a76bab49 48 if (node->fd == fd)
79d5ca56
AG
49 if (!node->deleted)
50 return node;
a76bab49
AL
51 }
52
53 return NULL;
54}
55
56int qemu_aio_set_fd_handler(int fd,
57 IOHandler *io_read,
58 IOHandler *io_write,
59 AioFlushHandler *io_flush,
60 void *opaque)
61{
62 AioHandler *node;
63
64 node = find_aio_handler(fd);
65
66 /* Are we deleting the fd handler? */
67 if (!io_read && !io_write) {
68 if (node) {
69 /* If the lock is held, just mark the node as deleted */
70 if (walking_handlers)
71 node->deleted = 1;
72 else {
73 /* Otherwise, delete it for real. We can't just mark it as
74 * deleted because deleted nodes are only cleaned up after
75 * releasing the walking_handlers lock.
76 */
72cf2d4f 77 QLIST_REMOVE(node, node);
7267c094 78 g_free(node);
a76bab49
AL
79 }
80 }
81 } else {
82 if (node == NULL) {
83 /* Alloc and insert if it's not already there */
7267c094 84 node = g_malloc0(sizeof(AioHandler));
a76bab49 85 node->fd = fd;
72cf2d4f 86 QLIST_INSERT_HEAD(&aio_handlers, node, node);
a76bab49
AL
87 }
88 /* Update handler with latest information */
89 node->io_read = io_read;
90 node->io_write = io_write;
91 node->io_flush = io_flush;
92 node->opaque = opaque;
93 }
94
95 qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
96
97 return 0;
98}
99
100void qemu_aio_flush(void)
101{
102 AioHandler *node;
103 int ret;
104
105 do {
106 ret = 0;
107
986c28d6
AA
108 /*
109 * If there are pending emulated aio start them now so flush
110 * will be able to return 1.
111 */
112 qemu_aio_wait();
113
72cf2d4f 114 QLIST_FOREACH(node, &aio_handlers, node) {
c53a7285
AK
115 if (node->io_flush) {
116 ret |= node->io_flush(node->opaque);
117 }
a76bab49 118 }
6e5d97d0 119 } while (qemu_bh_poll() || ret > 0);
a76bab49
AL
120}
121
122void qemu_aio_wait(void)
123{
124 int ret;
125
8febfa26
KW
126 /*
127 * If there are callbacks left that have been queued, we need to call then.
128 * Return afterwards to avoid waiting needlessly in select().
129 */
bafbd6a1 130 if (qemu_bh_poll()) {
8febfa26 131 return;
bafbd6a1 132 }
8febfa26 133
a76bab49
AL
134 do {
135 AioHandler *node;
136 fd_set rdfds, wrfds;
137 int max_fd = -1;
138
139 walking_handlers = 1;
140
f71903d0
AL
141 FD_ZERO(&rdfds);
142 FD_ZERO(&wrfds);
143
a76bab49 144 /* fill fd sets */
72cf2d4f 145 QLIST_FOREACH(node, &aio_handlers, node) {
a76bab49
AL
146 /* If there aren't pending AIO operations, don't invoke callbacks.
147 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
148 * wait indefinitely.
149 */
150 if (node->io_flush && node->io_flush(node->opaque) == 0)
151 continue;
152
153 if (!node->deleted && node->io_read) {
154 FD_SET(node->fd, &rdfds);
155 max_fd = MAX(max_fd, node->fd + 1);
156 }
157 if (!node->deleted && node->io_write) {
158 FD_SET(node->fd, &wrfds);
159 max_fd = MAX(max_fd, node->fd + 1);
160 }
161 }
162
163 walking_handlers = 0;
164
165 /* No AIO operations? Get us out of here */
166 if (max_fd == -1)
167 break;
168
169 /* wait until next event */
170 ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
171 if (ret == -1 && errno == EINTR)
172 continue;
173
174 /* if we have any readable fds, dispatch event */
175 if (ret > 0) {
176 walking_handlers = 1;
177
178 /* we have to walk very carefully in case
179 * qemu_aio_set_fd_handler is called while we're walking */
72cf2d4f 180 node = QLIST_FIRST(&aio_handlers);
a76bab49
AL
181 while (node) {
182 AioHandler *tmp;
183
184 if (!node->deleted &&
185 FD_ISSET(node->fd, &rdfds) &&
186 node->io_read) {
187 node->io_read(node->opaque);
188 }
189 if (!node->deleted &&
190 FD_ISSET(node->fd, &wrfds) &&
191 node->io_write) {
192 node->io_write(node->opaque);
193 }
194
195 tmp = node;
72cf2d4f 196 node = QLIST_NEXT(node, node);
a76bab49
AL
197
198 if (tmp->deleted) {
72cf2d4f 199 QLIST_REMOVE(tmp, node);
7267c094 200 g_free(tmp);
a76bab49
AL
201 }
202 }
203
204 walking_handlers = 0;
205 }
206 } while (ret == 0);
207}