]> git.proxmox.com Git - mirror_qemu.git/blob - aio-win32.c
qmp: Report QOM type name on query-cpu-definitions
[mirror_qemu.git] / aio-win32.c
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM Corp., 2008
5 * Copyright Red Hat Inc., 2012
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include "qemu/osdep.h"
19 #include "qemu-common.h"
20 #include "block/block.h"
21 #include "qemu/queue.h"
22 #include "qemu/sockets.h"
23 #include "qapi/error.h"
24
25 struct AioHandler {
26 EventNotifier *e;
27 IOHandler *io_read;
28 IOHandler *io_write;
29 EventNotifierHandler *io_notify;
30 GPollFD pfd;
31 int deleted;
32 void *opaque;
33 bool is_external;
34 QLIST_ENTRY(AioHandler) node;
35 };
36
37 void aio_set_fd_handler(AioContext *ctx,
38 int fd,
39 bool is_external,
40 IOHandler *io_read,
41 IOHandler *io_write,
42 AioPollFn *io_poll,
43 void *opaque)
44 {
45 /* fd is a SOCKET in our case */
46 AioHandler *node;
47
48 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
49 if (node->pfd.fd == fd && !node->deleted) {
50 break;
51 }
52 }
53
54 /* Are we deleting the fd handler? */
55 if (!io_read && !io_write) {
56 if (node) {
57 /* If the lock is held, just mark the node as deleted */
58 if (ctx->walking_handlers) {
59 node->deleted = 1;
60 node->pfd.revents = 0;
61 } else {
62 /* Otherwise, delete it for real. We can't just mark it as
63 * deleted because deleted nodes are only cleaned up after
64 * releasing the walking_handlers lock.
65 */
66 QLIST_REMOVE(node, node);
67 g_free(node);
68 }
69 }
70 } else {
71 HANDLE event;
72
73 if (node == NULL) {
74 /* Alloc and insert if it's not already there */
75 node = g_new0(AioHandler, 1);
76 node->pfd.fd = fd;
77 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
78 }
79
80 node->pfd.events = 0;
81 if (node->io_read) {
82 node->pfd.events |= G_IO_IN;
83 }
84 if (node->io_write) {
85 node->pfd.events |= G_IO_OUT;
86 }
87
88 node->e = &ctx->notifier;
89
90 /* Update handler with latest information */
91 node->opaque = opaque;
92 node->io_read = io_read;
93 node->io_write = io_write;
94 node->is_external = is_external;
95
96 event = event_notifier_get_handle(&ctx->notifier);
97 WSAEventSelect(node->pfd.fd, event,
98 FD_READ | FD_ACCEPT | FD_CLOSE |
99 FD_CONNECT | FD_WRITE | FD_OOB);
100 }
101
102 aio_notify(ctx);
103 }
104
105 void aio_set_fd_poll(AioContext *ctx, int fd,
106 IOHandler *io_poll_begin,
107 IOHandler *io_poll_end)
108 {
109 /* Not implemented */
110 }
111
112 void aio_set_event_notifier(AioContext *ctx,
113 EventNotifier *e,
114 bool is_external,
115 EventNotifierHandler *io_notify,
116 AioPollFn *io_poll)
117 {
118 AioHandler *node;
119
120 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
121 if (node->e == e && !node->deleted) {
122 break;
123 }
124 }
125
126 /* Are we deleting the fd handler? */
127 if (!io_notify) {
128 if (node) {
129 g_source_remove_poll(&ctx->source, &node->pfd);
130
131 /* If the lock is held, just mark the node as deleted */
132 if (ctx->walking_handlers) {
133 node->deleted = 1;
134 node->pfd.revents = 0;
135 } else {
136 /* Otherwise, delete it for real. We can't just mark it as
137 * deleted because deleted nodes are only cleaned up after
138 * releasing the walking_handlers lock.
139 */
140 QLIST_REMOVE(node, node);
141 g_free(node);
142 }
143 }
144 } else {
145 if (node == NULL) {
146 /* Alloc and insert if it's not already there */
147 node = g_new0(AioHandler, 1);
148 node->e = e;
149 node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
150 node->pfd.events = G_IO_IN;
151 node->is_external = is_external;
152 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
153
154 g_source_add_poll(&ctx->source, &node->pfd);
155 }
156 /* Update handler with latest information */
157 node->io_notify = io_notify;
158 }
159
160 aio_notify(ctx);
161 }
162
163 void aio_set_event_notifier_poll(AioContext *ctx,
164 EventNotifier *notifier,
165 EventNotifierHandler *io_poll_begin,
166 EventNotifierHandler *io_poll_end)
167 {
168 /* Not implemented */
169 }
170
171 bool aio_prepare(AioContext *ctx)
172 {
173 static struct timeval tv0;
174 AioHandler *node;
175 bool have_select_revents = false;
176 fd_set rfds, wfds;
177
178 /* fill fd sets */
179 FD_ZERO(&rfds);
180 FD_ZERO(&wfds);
181 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
182 if (node->io_read) {
183 FD_SET ((SOCKET)node->pfd.fd, &rfds);
184 }
185 if (node->io_write) {
186 FD_SET ((SOCKET)node->pfd.fd, &wfds);
187 }
188 }
189
190 if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
191 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
192 node->pfd.revents = 0;
193 if (FD_ISSET(node->pfd.fd, &rfds)) {
194 node->pfd.revents |= G_IO_IN;
195 have_select_revents = true;
196 }
197
198 if (FD_ISSET(node->pfd.fd, &wfds)) {
199 node->pfd.revents |= G_IO_OUT;
200 have_select_revents = true;
201 }
202 }
203 }
204
205 return have_select_revents;
206 }
207
208 bool aio_pending(AioContext *ctx)
209 {
210 AioHandler *node;
211
212 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
213 if (node->pfd.revents && node->io_notify) {
214 return true;
215 }
216
217 if ((node->pfd.revents & G_IO_IN) && node->io_read) {
218 return true;
219 }
220 if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
221 return true;
222 }
223 }
224
225 return false;
226 }
227
228 static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
229 {
230 AioHandler *node;
231 bool progress = false;
232
233 /*
234 * We have to walk very carefully in case aio_set_fd_handler is
235 * called while we're walking.
236 */
237 node = QLIST_FIRST(&ctx->aio_handlers);
238 while (node) {
239 AioHandler *tmp;
240 int revents = node->pfd.revents;
241
242 ctx->walking_handlers++;
243
244 if (!node->deleted &&
245 (revents || event_notifier_get_handle(node->e) == event) &&
246 node->io_notify) {
247 node->pfd.revents = 0;
248 node->io_notify(node->e);
249
250 /* aio_notify() does not count as progress */
251 if (node->e != &ctx->notifier) {
252 progress = true;
253 }
254 }
255
256 if (!node->deleted &&
257 (node->io_read || node->io_write)) {
258 node->pfd.revents = 0;
259 if ((revents & G_IO_IN) && node->io_read) {
260 node->io_read(node->opaque);
261 progress = true;
262 }
263 if ((revents & G_IO_OUT) && node->io_write) {
264 node->io_write(node->opaque);
265 progress = true;
266 }
267
268 /* if the next select() will return an event, we have progressed */
269 if (event == event_notifier_get_handle(&ctx->notifier)) {
270 WSANETWORKEVENTS ev;
271 WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
272 if (ev.lNetworkEvents) {
273 progress = true;
274 }
275 }
276 }
277
278 tmp = node;
279 node = QLIST_NEXT(node, node);
280
281 ctx->walking_handlers--;
282
283 if (!ctx->walking_handlers && tmp->deleted) {
284 QLIST_REMOVE(tmp, node);
285 g_free(tmp);
286 }
287 }
288
289 return progress;
290 }
291
292 bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
293 {
294 bool progress;
295
296 progress = aio_bh_poll(ctx);
297 if (dispatch_fds) {
298 progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
299 }
300 progress |= timerlistgroup_run_timers(&ctx->tlg);
301 return progress;
302 }
303
304 bool aio_poll(AioContext *ctx, bool blocking)
305 {
306 AioHandler *node;
307 HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
308 bool progress, have_select_revents, first;
309 int count;
310 int timeout;
311
312 aio_context_acquire(ctx);
313 progress = false;
314
315 /* aio_notify can avoid the expensive event_notifier_set if
316 * everything (file descriptors, bottom halves, timers) will
317 * be re-evaluated before the next blocking poll(). This is
318 * already true when aio_poll is called with blocking == false;
319 * if blocking == true, it is only true after poll() returns,
320 * so disable the optimization now.
321 */
322 if (blocking) {
323 atomic_add(&ctx->notify_me, 2);
324 }
325
326 have_select_revents = aio_prepare(ctx);
327
328 ctx->walking_handlers++;
329
330 /* fill fd sets */
331 count = 0;
332 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
333 if (!node->deleted && node->io_notify
334 && aio_node_check(ctx, node->is_external)) {
335 events[count++] = event_notifier_get_handle(node->e);
336 }
337 }
338
339 ctx->walking_handlers--;
340 first = true;
341
342 /* ctx->notifier is always registered. */
343 assert(count > 0);
344
345 /* Multiple iterations, all of them non-blocking except the first,
346 * may be necessary to process all pending events. After the first
347 * WaitForMultipleObjects call ctx->notify_me will be decremented.
348 */
349 do {
350 HANDLE event;
351 int ret;
352
353 timeout = blocking && !have_select_revents
354 ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
355 if (timeout) {
356 aio_context_release(ctx);
357 }
358 ret = WaitForMultipleObjects(count, events, FALSE, timeout);
359 if (blocking) {
360 assert(first);
361 atomic_sub(&ctx->notify_me, 2);
362 }
363 if (timeout) {
364 aio_context_acquire(ctx);
365 }
366
367 if (first) {
368 aio_notify_accept(ctx);
369 progress |= aio_bh_poll(ctx);
370 first = false;
371 }
372
373 /* if we have any signaled events, dispatch event */
374 event = NULL;
375 if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
376 event = events[ret - WAIT_OBJECT_0];
377 events[ret - WAIT_OBJECT_0] = events[--count];
378 } else if (!have_select_revents) {
379 break;
380 }
381
382 have_select_revents = false;
383 blocking = false;
384
385 progress |= aio_dispatch_handlers(ctx, event);
386 } while (count > 0);
387
388 progress |= timerlistgroup_run_timers(&ctx->tlg);
389
390 aio_context_release(ctx);
391 return progress;
392 }
393
394 void aio_context_setup(AioContext *ctx)
395 {
396 }
397
398 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
399 int64_t grow, int64_t shrink, Error **errp)
400 {
401 error_setg(errp, "AioContext polling is not implemented on Windows");
402 }