]> git.proxmox.com Git - mirror_qemu.git/blame - aio-posix.c
vga: Add mechanism to force the use of a shadow surface
[mirror_qemu.git] / aio-posix.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
14 */
15
16#include "qemu-common.h"
737e150e 17#include "block/block.h"
1de7afc9
PB
18#include "qemu/queue.h"
19#include "qemu/sockets.h"
a76bab49 20
a76bab49
AL
21struct AioHandler
22{
cd9ba1eb 23 GPollFD pfd;
a76bab49
AL
24 IOHandler *io_read;
25 IOHandler *io_write;
a76bab49 26 int deleted;
6b5f8762 27 int pollfds_idx;
a76bab49 28 void *opaque;
72cf2d4f 29 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
30};
31
a915f4bc 32static AioHandler *find_aio_handler(AioContext *ctx, int fd)
a76bab49
AL
33{
34 AioHandler *node;
35
a915f4bc 36 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
cd9ba1eb 37 if (node->pfd.fd == fd)
79d5ca56
AG
38 if (!node->deleted)
39 return node;
a76bab49
AL
40 }
41
42 return NULL;
43}
44
a915f4bc
PB
45void aio_set_fd_handler(AioContext *ctx,
46 int fd,
47 IOHandler *io_read,
48 IOHandler *io_write,
a915f4bc 49 void *opaque)
a76bab49
AL
50{
51 AioHandler *node;
52
a915f4bc 53 node = find_aio_handler(ctx, fd);
a76bab49
AL
54
55 /* Are we deleting the fd handler? */
56 if (!io_read && !io_write) {
57 if (node) {
e3713e00
PB
58 g_source_remove_poll(&ctx->source, &node->pfd);
59
a76bab49 60 /* If the lock is held, just mark the node as deleted */
cd9ba1eb 61 if (ctx->walking_handlers) {
a76bab49 62 node->deleted = 1;
cd9ba1eb
PB
63 node->pfd.revents = 0;
64 } else {
a76bab49
AL
65 /* Otherwise, delete it for real. We can't just mark it as
66 * deleted because deleted nodes are only cleaned up after
67 * releasing the walking_handlers lock.
68 */
72cf2d4f 69 QLIST_REMOVE(node, node);
7267c094 70 g_free(node);
a76bab49
AL
71 }
72 }
73 } else {
74 if (node == NULL) {
75 /* Alloc and insert if it's not already there */
3ba235a0 76 node = g_new0(AioHandler, 1);
cd9ba1eb 77 node->pfd.fd = fd;
a915f4bc 78 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
e3713e00
PB
79
80 g_source_add_poll(&ctx->source, &node->pfd);
a76bab49
AL
81 }
82 /* Update handler with latest information */
83 node->io_read = io_read;
84 node->io_write = io_write;
a76bab49 85 node->opaque = opaque;
6b5f8762 86 node->pollfds_idx = -1;
cd9ba1eb 87
b5a01a70
SH
88 node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
89 node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
a76bab49 90 }
7ed2b24c
PB
91
92 aio_notify(ctx);
9958c351
PB
93}
94
a915f4bc
PB
95void aio_set_event_notifier(AioContext *ctx,
96 EventNotifier *notifier,
f2e5dca4 97 EventNotifierHandler *io_read)
a76bab49 98{
a915f4bc 99 aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
f2e5dca4 100 (IOHandler *)io_read, NULL, notifier);
a76bab49
AL
101}
102
a3462c65
PB
103bool aio_prepare(AioContext *ctx)
104{
105 return false;
106}
107
cd9ba1eb
PB
108bool aio_pending(AioContext *ctx)
109{
110 AioHandler *node;
111
112 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
113 int revents;
114
cd9ba1eb
PB
115 revents = node->pfd.revents & node->pfd.events;
116 if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
117 return true;
118 }
119 if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
120 return true;
121 }
122 }
123
124 return false;
125}
126
e4c7e2d1 127bool aio_dispatch(AioContext *ctx)
a76bab49 128{
9eb0bfca 129 AioHandler *node;
d0c8d2c0 130 bool progress = false;
7c0628b2 131
e4c7e2d1
PB
132 /*
133 * If there are callbacks left that have been queued, we need to call them.
134 * Do not call select in this case, because it is possible that the caller
135 * does not need a complete flush (as is the case for aio_poll loops).
136 */
137 if (aio_bh_poll(ctx)) {
138 progress = true;
139 }
140
cd9ba1eb 141 /*
87f68d31 142 * We have to walk very carefully in case aio_set_fd_handler is
cd9ba1eb
PB
143 * called while we're walking.
144 */
145 node = QLIST_FIRST(&ctx->aio_handlers);
146 while (node) {
147 AioHandler *tmp;
148 int revents;
149
150 ctx->walking_handlers++;
151
152 revents = node->pfd.revents & node->pfd.events;
153 node->pfd.revents = 0;
154
d0c8d2c0
SH
155 if (!node->deleted &&
156 (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
157 node->io_read) {
cd9ba1eb 158 node->io_read(node->opaque);
164a101f
SH
159
160 /* aio_notify() does not count as progress */
161 if (node->opaque != &ctx->notifier) {
162 progress = true;
163 }
cd9ba1eb 164 }
d0c8d2c0
SH
165 if (!node->deleted &&
166 (revents & (G_IO_OUT | G_IO_ERR)) &&
167 node->io_write) {
cd9ba1eb
PB
168 node->io_write(node->opaque);
169 progress = true;
170 }
171
172 tmp = node;
173 node = QLIST_NEXT(node, node);
174
175 ctx->walking_handlers--;
176
177 if (!ctx->walking_handlers && tmp->deleted) {
178 QLIST_REMOVE(tmp, node);
179 g_free(tmp);
180 }
181 }
438e1f47
AB
182
183 /* Run our timers */
184 progress |= timerlistgroup_run_timers(&ctx->tlg);
185
d0c8d2c0
SH
186 return progress;
187}
188
189bool aio_poll(AioContext *ctx, bool blocking)
190{
d0c8d2c0 191 AioHandler *node;
0ceb849b 192 bool was_dispatching;
d0c8d2c0 193 int ret;
164a101f 194 bool progress;
d0c8d2c0 195
0ceb849b 196 was_dispatching = ctx->dispatching;
d0c8d2c0
SH
197 progress = false;
198
0ceb849b
PB
199 /* aio_notify can avoid the expensive event_notifier_set if
200 * everything (file descriptors, bottom halves, timers) will
e4c7e2d1
PB
201 * be re-evaluated before the next blocking poll(). This is
202 * already true when aio_poll is called with blocking == false;
203 * if blocking == true, it is only true after poll() returns.
0ceb849b
PB
204 *
205 * If we're in a nested event loop, ctx->dispatching might be true.
206 * In that case we can restore it just before returning, but we
207 * have to clear it now.
208 */
209 aio_set_dispatching(ctx, !blocking);
210
a915f4bc 211 ctx->walking_handlers++;
a76bab49 212
6b5f8762 213 g_array_set_size(ctx->pollfds, 0);
a76bab49 214
6b5f8762 215 /* fill pollfds */
a915f4bc 216 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
6b5f8762 217 node->pollfds_idx = -1;
6b5f8762
SH
218 if (!node->deleted && node->pfd.events) {
219 GPollFD pfd = {
220 .fd = node->pfd.fd,
221 .events = node->pfd.events,
222 };
223 node->pollfds_idx = ctx->pollfds->len;
224 g_array_append_val(ctx->pollfds, pfd);
9eb0bfca
PB
225 }
226 }
a76bab49 227
a915f4bc 228 ctx->walking_handlers--;
a76bab49 229
9eb0bfca 230 /* wait until next event */
438e1f47
AB
231 ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
232 ctx->pollfds->len,
845ca10d 233 blocking ? aio_compute_timeout(ctx) : 0);
9eb0bfca
PB
234
235 /* if we have any readable fds, dispatch event */
236 if (ret > 0) {
6b5f8762
SH
237 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
238 if (node->pollfds_idx != -1) {
239 GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
240 node->pollfds_idx);
241 node->pfd.revents = pfd->revents;
9eb0bfca 242 }
a76bab49 243 }
438e1f47
AB
244 }
245
246 /* Run dispatch even if there were no readable fds to run timers */
0ceb849b 247 aio_set_dispatching(ctx, true);
438e1f47
AB
248 if (aio_dispatch(ctx)) {
249 progress = true;
9eb0bfca 250 }
bcdc1857 251
0ceb849b 252 aio_set_dispatching(ctx, was_dispatching);
164a101f 253 return progress;
a76bab49 254}