2 * QEMU aio implementation
4 * Copyright IBM Corp., 2008
5 * Copyright Red Hat Inc., 2012
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu-common.h"
20 #include "qemu-queue.h"
21 #include "qemu_socket.h"
25 EventNotifierHandler
*io_notify
;
26 AioFlushEventNotifierHandler
*io_flush
;
29 QLIST_ENTRY(AioHandler
) node
;
32 void aio_set_event_notifier(AioContext
*ctx
,
34 EventNotifierHandler
*io_notify
,
35 AioFlushEventNotifierHandler
*io_flush
)
39 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
40 if (node
->e
== e
&& !node
->deleted
) {
45 /* Are we deleting the fd handler? */
48 g_source_remove_poll(&ctx
->source
, &node
->pfd
);
50 /* If the lock is held, just mark the node as deleted */
51 if (ctx
->walking_handlers
) {
53 node
->pfd
.revents
= 0;
55 /* Otherwise, delete it for real. We can't just mark it as
56 * deleted because deleted nodes are only cleaned up after
57 * releasing the walking_handlers lock.
59 QLIST_REMOVE(node
, node
);
65 /* Alloc and insert if it's not already there */
66 node
= g_malloc0(sizeof(AioHandler
));
68 node
->pfd
.fd
= (uintptr_t)event_notifier_get_handle(e
);
69 node
->pfd
.events
= G_IO_IN
;
70 QLIST_INSERT_HEAD(&ctx
->aio_handlers
, node
, node
);
72 g_source_add_poll(&ctx
->source
, &node
->pfd
);
74 /* Update handler with latest information */
75 node
->io_notify
= io_notify
;
76 node
->io_flush
= io_flush
;
80 bool aio_pending(AioContext
*ctx
)
84 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
85 if (node
->pfd
.revents
&& node
->io_notify
) {
93 bool aio_poll(AioContext
*ctx
, bool blocking
)
96 HANDLE events
[MAXIMUM_WAIT_OBJECTS
+ 1];
103 * If there are callbacks left that have been queued, we need to call then.
104 * Do not call select in this case, because it is possible that the caller
105 * does not need a complete flush (as is the case for qemu_aio_wait loops).
107 if (aio_bh_poll(ctx
)) {
113 * Then dispatch any pending callbacks from the GSource.
115 * We have to walk very carefully in case qemu_aio_set_fd_handler is
116 * called while we're walking.
118 node
= QLIST_FIRST(&ctx
->aio_handlers
);
122 ctx
->walking_handlers
++;
124 if (node
->pfd
.revents
&& node
->io_notify
) {
125 node
->pfd
.revents
= 0;
126 node
->io_notify(node
->e
);
131 node
= QLIST_NEXT(node
, node
);
133 ctx
->walking_handlers
--;
135 if (!ctx
->walking_handlers
&& tmp
->deleted
) {
136 QLIST_REMOVE(tmp
, node
);
141 if (progress
&& !blocking
) {
145 ctx
->walking_handlers
++;
150 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
151 /* If there aren't pending AIO operations, don't invoke callbacks.
152 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
155 if (!node
->deleted
&& node
->io_flush
) {
156 if (node
->io_flush(node
->e
) == 0) {
161 if (!node
->deleted
&& node
->io_notify
) {
162 events
[count
++] = event_notifier_get_handle(node
->e
);
166 ctx
->walking_handlers
--;
168 /* No AIO operations? Get us out of here */
173 /* wait until next event */
175 int timeout
= blocking
? INFINITE
: 0;
176 int ret
= WaitForMultipleObjects(count
, events
, FALSE
, timeout
);
178 /* if we have any signaled events, dispatch event */
179 if ((DWORD
) (ret
- WAIT_OBJECT_0
) >= count
) {
185 /* we have to walk very carefully in case
186 * qemu_aio_set_fd_handler is called while we're walking */
187 node
= QLIST_FIRST(&ctx
->aio_handlers
);
191 ctx
->walking_handlers
++;
193 if (!node
->deleted
&&
194 event_notifier_get_handle(node
->e
) == events
[ret
- WAIT_OBJECT_0
] &&
196 node
->io_notify(node
->e
);
201 node
= QLIST_NEXT(node
, node
);
203 ctx
->walking_handlers
--;
205 if (!ctx
->walking_handlers
&& tmp
->deleted
) {
206 QLIST_REMOVE(tmp
, node
);