]> git.proxmox.com Git - mirror_qemu.git/blob - aio-win32.c
aio: make AioContexts GSources
[mirror_qemu.git] / aio-win32.c
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM Corp., 2008
5 * Copyright Red Hat Inc., 2012
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include "qemu-common.h"
19 #include "block.h"
20 #include "qemu-queue.h"
21 #include "qemu_socket.h"
22
23 struct AioHandler {
24 EventNotifier *e;
25 EventNotifierHandler *io_notify;
26 AioFlushEventNotifierHandler *io_flush;
27 GPollFD pfd;
28 int deleted;
29 QLIST_ENTRY(AioHandler) node;
30 };
31
32 void aio_set_event_notifier(AioContext *ctx,
33 EventNotifier *e,
34 EventNotifierHandler *io_notify,
35 AioFlushEventNotifierHandler *io_flush)
36 {
37 AioHandler *node;
38
39 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
40 if (node->e == e && !node->deleted) {
41 break;
42 }
43 }
44
45 /* Are we deleting the fd handler? */
46 if (!io_notify) {
47 if (node) {
48 g_source_remove_poll(&ctx->source, &node->pfd);
49
50 /* If the lock is held, just mark the node as deleted */
51 if (ctx->walking_handlers) {
52 node->deleted = 1;
53 node->pfd.revents = 0;
54 } else {
55 /* Otherwise, delete it for real. We can't just mark it as
56 * deleted because deleted nodes are only cleaned up after
57 * releasing the walking_handlers lock.
58 */
59 QLIST_REMOVE(node, node);
60 g_free(node);
61 }
62 }
63 } else {
64 if (node == NULL) {
65 /* Alloc and insert if it's not already there */
66 node = g_malloc0(sizeof(AioHandler));
67 node->e = e;
68 node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
69 node->pfd.events = G_IO_IN;
70 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
71
72 g_source_add_poll(&ctx->source, &node->pfd);
73 }
74 /* Update handler with latest information */
75 node->io_notify = io_notify;
76 node->io_flush = io_flush;
77 }
78 }
79
80 bool aio_pending(AioContext *ctx)
81 {
82 AioHandler *node;
83
84 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
85 if (node->pfd.revents && node->io_notify) {
86 return true;
87 }
88 }
89
90 return false;
91 }
92
93 bool aio_poll(AioContext *ctx, bool blocking)
94 {
95 AioHandler *node;
96 HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
97 bool busy, progress;
98 int count;
99
100 progress = false;
101
102 /*
103 * If there are callbacks left that have been queued, we need to call then.
104 * Do not call select in this case, because it is possible that the caller
105 * does not need a complete flush (as is the case for qemu_aio_wait loops).
106 */
107 if (aio_bh_poll(ctx)) {
108 blocking = false;
109 progress = true;
110 }
111
112 /*
113 * Then dispatch any pending callbacks from the GSource.
114 *
115 * We have to walk very carefully in case qemu_aio_set_fd_handler is
116 * called while we're walking.
117 */
118 node = QLIST_FIRST(&ctx->aio_handlers);
119 while (node) {
120 AioHandler *tmp;
121
122 ctx->walking_handlers++;
123
124 if (node->pfd.revents && node->io_notify) {
125 node->pfd.revents = 0;
126 node->io_notify(node->e);
127 progress = true;
128 }
129
130 tmp = node;
131 node = QLIST_NEXT(node, node);
132
133 ctx->walking_handlers--;
134
135 if (!ctx->walking_handlers && tmp->deleted) {
136 QLIST_REMOVE(tmp, node);
137 g_free(tmp);
138 }
139 }
140
141 if (progress && !blocking) {
142 return true;
143 }
144
145 ctx->walking_handlers++;
146
147 /* fill fd sets */
148 busy = false;
149 count = 0;
150 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
151 /* If there aren't pending AIO operations, don't invoke callbacks.
152 * Otherwise, if there are no AIO requests, qemu_aio_wait() would
153 * wait indefinitely.
154 */
155 if (!node->deleted && node->io_flush) {
156 if (node->io_flush(node->e) == 0) {
157 continue;
158 }
159 busy = true;
160 }
161 if (!node->deleted && node->io_notify) {
162 events[count++] = event_notifier_get_handle(node->e);
163 }
164 }
165
166 ctx->walking_handlers--;
167
168 /* No AIO operations? Get us out of here */
169 if (!busy) {
170 return progress;
171 }
172
173 /* wait until next event */
174 for (;;) {
175 int timeout = blocking ? INFINITE : 0;
176 int ret = WaitForMultipleObjects(count, events, FALSE, timeout);
177
178 /* if we have any signaled events, dispatch event */
179 if ((DWORD) (ret - WAIT_OBJECT_0) >= count) {
180 break;
181 }
182
183 blocking = false;
184
185 /* we have to walk very carefully in case
186 * qemu_aio_set_fd_handler is called while we're walking */
187 node = QLIST_FIRST(&ctx->aio_handlers);
188 while (node) {
189 AioHandler *tmp;
190
191 ctx->walking_handlers++;
192
193 if (!node->deleted &&
194 event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
195 node->io_notify) {
196 node->io_notify(node->e);
197 progress = true;
198 }
199
200 tmp = node;
201 node = QLIST_NEXT(node, node);
202
203 ctx->walking_handlers--;
204
205 if (!ctx->walking_handlers && tmp->deleted) {
206 QLIST_REMOVE(tmp, node);
207 g_free(tmp);
208 }
209 }
210 }
211
212 return progress;
213 }