]> git.proxmox.com Git - mirror_qemu.git/blame - util/aio-win32.c
update seabios binaries
[mirror_qemu.git] / util / aio-win32.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
f42b2207
PB
4 * Copyright IBM Corp., 2008
5 * Copyright Red Hat Inc., 2012
a76bab49
AL
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
f42b2207 9 * Paolo Bonzini <pbonzini@redhat.com>
a76bab49
AL
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
6b620ca3
PB
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
16 */
17
d38ea87a 18#include "qemu/osdep.h"
a76bab49 19#include "qemu-common.h"
737e150e 20#include "block/block.h"
eada6d92 21#include "qemu/main-loop.h"
1de7afc9
PB
22#include "qemu/queue.h"
23#include "qemu/sockets.h"
4a1cba38 24#include "qapi/error.h"
b92d9a91 25#include "qemu/rcu_queue.h"
a76bab49 26
f42b2207
PB
27struct AioHandler {
28 EventNotifier *e;
b493317d
PB
29 IOHandler *io_read;
30 IOHandler *io_write;
f42b2207 31 EventNotifierHandler *io_notify;
cd9ba1eb 32 GPollFD pfd;
a76bab49 33 int deleted;
b493317d 34 void *opaque;
dca21ef2 35 bool is_external;
72cf2d4f 36 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
37};
38
fef16601
RN
39static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
40{
da0652c0
YL
41 /*
42 * If the GSource is in the process of being destroyed then
43 * g_source_remove_poll() causes an assertion failure. Skip
44 * removal in that case, because glib cleans up its state during
45 * destruction anyway.
46 */
47 if (!g_source_is_destroyed(&ctx->source)) {
48 g_source_remove_poll(&ctx->source, &node->pfd);
49 }
50
fef16601
RN
51 /* If aio_poll is in progress, just mark the node as deleted */
52 if (qemu_lockcnt_count(&ctx->list_lock)) {
53 node->deleted = 1;
54 node->pfd.revents = 0;
55 } else {
56 /* Otherwise, delete it for real. We can't just mark it as
57 * deleted because deleted nodes are only cleaned up after
58 * releasing the list_lock.
59 */
60 QLIST_REMOVE(node, node);
61 g_free(node);
62 }
63}
64
b493317d
PB
65void aio_set_fd_handler(AioContext *ctx,
66 int fd,
dca21ef2 67 bool is_external,
b493317d
PB
68 IOHandler *io_read,
69 IOHandler *io_write,
4a1cba38 70 AioPollFn *io_poll,
b493317d
PB
71 void *opaque)
72{
73 /* fd is a SOCKET in our case */
fef16601
RN
74 AioHandler *old_node;
75 AioHandler *node = NULL;
b493317d 76
b92d9a91 77 qemu_lockcnt_lock(&ctx->list_lock);
fef16601
RN
78 QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
79 if (old_node->pfd.fd == fd && !old_node->deleted) {
b493317d
PB
80 break;
81 }
82 }
83
fef16601 84 if (io_read || io_write) {
b493317d 85 HANDLE event;
55d41b16 86 long bitmask = 0;
b493317d 87
fef16601
RN
88 /* Alloc and insert if it's not already there */
89 node = g_new0(AioHandler, 1);
90 node->pfd.fd = fd;
b493317d
PB
91
92 node->pfd.events = 0;
93 if (node->io_read) {
94 node->pfd.events |= G_IO_IN;
95 }
96 if (node->io_write) {
97 node->pfd.events |= G_IO_OUT;
98 }
99
100 node->e = &ctx->notifier;
101
102 /* Update handler with latest information */
103 node->opaque = opaque;
104 node->io_read = io_read;
105 node->io_write = io_write;
dca21ef2 106 node->is_external = is_external;
b493317d 107
55d41b16
AF
108 if (io_read) {
109 bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE;
110 }
111
112 if (io_write) {
113 bitmask |= FD_WRITE | FD_CONNECT;
114 }
115
fef16601 116 QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
b493317d 117 event = event_notifier_get_handle(&ctx->notifier);
55d41b16 118 WSAEventSelect(node->pfd.fd, event, bitmask);
b493317d 119 }
fef16601
RN
120 if (old_node) {
121 aio_remove_fd_handler(ctx, old_node);
122 }
b493317d 123
b92d9a91 124 qemu_lockcnt_unlock(&ctx->list_lock);
b493317d
PB
125 aio_notify(ctx);
126}
127
684e508c
SH
128void aio_set_fd_poll(AioContext *ctx, int fd,
129 IOHandler *io_poll_begin,
130 IOHandler *io_poll_end)
131{
132 /* Not implemented */
133}
134
f42b2207
PB
135void aio_set_event_notifier(AioContext *ctx,
136 EventNotifier *e,
dca21ef2 137 bool is_external,
4a1cba38
SH
138 EventNotifierHandler *io_notify,
139 AioPollFn *io_poll)
a76bab49
AL
140{
141 AioHandler *node;
142
b92d9a91 143 qemu_lockcnt_lock(&ctx->list_lock);
a915f4bc 144 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
f42b2207
PB
145 if (node->e == e && !node->deleted) {
146 break;
147 }
a76bab49
AL
148 }
149
a76bab49 150 /* Are we deleting the fd handler? */
f42b2207 151 if (!io_notify) {
a76bab49 152 if (node) {
fef16601 153 aio_remove_fd_handler(ctx, node);
a76bab49
AL
154 }
155 } else {
156 if (node == NULL) {
157 /* Alloc and insert if it's not already there */
3ba235a0 158 node = g_new0(AioHandler, 1);
f42b2207
PB
159 node->e = e;
160 node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
161 node->pfd.events = G_IO_IN;
dca21ef2 162 node->is_external = is_external;
b92d9a91 163 QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
e3713e00
PB
164
165 g_source_add_poll(&ctx->source, &node->pfd);
a76bab49
AL
166 }
167 /* Update handler with latest information */
f42b2207 168 node->io_notify = io_notify;
a76bab49 169 }
7ed2b24c 170
b92d9a91 171 qemu_lockcnt_unlock(&ctx->list_lock);
7ed2b24c 172 aio_notify(ctx);
9958c351
PB
173}
174
684e508c
SH
175void aio_set_event_notifier_poll(AioContext *ctx,
176 EventNotifier *notifier,
177 EventNotifierHandler *io_poll_begin,
178 EventNotifierHandler *io_poll_end)
179{
180 /* Not implemented */
181}
182
a3462c65
PB
183bool aio_prepare(AioContext *ctx)
184{
b493317d
PB
185 static struct timeval tv0;
186 AioHandler *node;
187 bool have_select_revents = false;
188 fd_set rfds, wfds;
189
b92d9a91
PB
190 /*
191 * We have to walk very carefully in case aio_set_fd_handler is
192 * called while we're walking.
193 */
194 qemu_lockcnt_inc(&ctx->list_lock);
195
b493317d
PB
196 /* fill fd sets */
197 FD_ZERO(&rfds);
198 FD_ZERO(&wfds);
b92d9a91 199 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
b493317d
PB
200 if (node->io_read) {
201 FD_SET ((SOCKET)node->pfd.fd, &rfds);
202 }
203 if (node->io_write) {
204 FD_SET ((SOCKET)node->pfd.fd, &wfds);
205 }
206 }
207
208 if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
b92d9a91 209 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
b493317d
PB
210 node->pfd.revents = 0;
211 if (FD_ISSET(node->pfd.fd, &rfds)) {
212 node->pfd.revents |= G_IO_IN;
213 have_select_revents = true;
214 }
215
216 if (FD_ISSET(node->pfd.fd, &wfds)) {
217 node->pfd.revents |= G_IO_OUT;
218 have_select_revents = true;
219 }
220 }
221 }
222
b92d9a91 223 qemu_lockcnt_dec(&ctx->list_lock);
b493317d 224 return have_select_revents;
a3462c65
PB
225}
226
cd9ba1eb
PB
227bool aio_pending(AioContext *ctx)
228{
229 AioHandler *node;
b92d9a91 230 bool result = false;
cd9ba1eb 231
b92d9a91
PB
232 /*
233 * We have to walk very carefully in case aio_set_fd_handler is
234 * called while we're walking.
235 */
236 qemu_lockcnt_inc(&ctx->list_lock);
237 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
f42b2207 238 if (node->pfd.revents && node->io_notify) {
b92d9a91
PB
239 result = true;
240 break;
cd9ba1eb 241 }
b493317d
PB
242
243 if ((node->pfd.revents & G_IO_IN) && node->io_read) {
b92d9a91
PB
244 result = true;
245 break;
b493317d
PB
246 }
247 if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
b92d9a91
PB
248 result = true;
249 break;
b493317d 250 }
cd9ba1eb
PB
251 }
252
b92d9a91
PB
253 qemu_lockcnt_dec(&ctx->list_lock);
254 return result;
cd9ba1eb
PB
255}
256
a398dea3 257static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
a76bab49 258{
b92d9a91 259 AioHandler *node;
a398dea3 260 bool progress = false;
b92d9a91 261 AioHandler *tmp;
7c0628b2 262
cd9ba1eb 263 /*
87f68d31 264 * We have to walk very carefully in case aio_set_fd_handler is
cd9ba1eb
PB
265 * called while we're walking.
266 */
b92d9a91 267 QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
b493317d 268 int revents = node->pfd.revents;
cd9ba1eb 269
a398dea3 270 if (!node->deleted &&
b493317d 271 (revents || event_notifier_get_handle(node->e) == event) &&
a398dea3 272 node->io_notify) {
f42b2207
PB
273 node->pfd.revents = 0;
274 node->io_notify(node->e);
164a101f
SH
275
276 /* aio_notify() does not count as progress */
8b2d42d2 277 if (node->e != &ctx->notifier) {
164a101f
SH
278 progress = true;
279 }
cd9ba1eb
PB
280 }
281
b493317d
PB
282 if (!node->deleted &&
283 (node->io_read || node->io_write)) {
284 node->pfd.revents = 0;
285 if ((revents & G_IO_IN) && node->io_read) {
286 node->io_read(node->opaque);
287 progress = true;
288 }
289 if ((revents & G_IO_OUT) && node->io_write) {
290 node->io_write(node->opaque);
291 progress = true;
292 }
293
294 /* if the next select() will return an event, we have progressed */
295 if (event == event_notifier_get_handle(&ctx->notifier)) {
296 WSANETWORKEVENTS ev;
297 WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
298 if (ev.lNetworkEvents) {
299 progress = true;
300 }
301 }
302 }
303
abf90d39 304 if (node->deleted) {
b92d9a91 305 if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
abf90d39
PB
306 QLIST_REMOVE(node, node);
307 g_free(node);
b92d9a91 308 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
abf90d39 309 }
cd9ba1eb
PB
310 }
311 }
312
a398dea3
PB
313 return progress;
314}
315
a153bf52 316void aio_dispatch(AioContext *ctx)
a398dea3 317{
bd451435 318 qemu_lockcnt_inc(&ctx->list_lock);
a153bf52
PB
319 aio_bh_poll(ctx);
320 aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
bd451435 321 qemu_lockcnt_dec(&ctx->list_lock);
a153bf52 322 timerlistgroup_run_timers(&ctx->tlg);
a398dea3
PB
323}
324
325bool aio_poll(AioContext *ctx, bool blocking)
326{
327 AioHandler *node;
328 HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
eabc9779 329 bool progress, have_select_revents, first;
a398dea3
PB
330 int count;
331 int timeout;
332
5710a3e0
PB
333 /*
334 * There cannot be two concurrent aio_poll calls for the same AioContext (or
335 * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
336 * We rely on this below to avoid slow locked accesses to ctx->notify_me.
eada6d92
VR
337 *
338 * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
339 * is special in that it runs in the main thread, but that thread's context
340 * is qemu_aio_context.
5710a3e0 341 */
eada6d92
VR
342 assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
343 qemu_get_aio_context() : ctx));
a398dea3
PB
344 progress = false;
345
0a9dd166
PB
346 /* aio_notify can avoid the expensive event_notifier_set if
347 * everything (file descriptors, bottom halves, timers) will
348 * be re-evaluated before the next blocking poll(). This is
349 * already true when aio_poll is called with blocking == false;
eabc9779
PB
350 * if blocking == true, it is only true after poll() returns,
351 * so disable the optimization now.
0a9dd166 352 */
eabc9779 353 if (blocking) {
d73415a3 354 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
5710a3e0
PB
355 /*
356 * Write ctx->notify_me before computing the timeout
357 * (reading bottom half flags, etc.). Pairs with
358 * smp_mb in aio_notify().
359 */
360 smp_mb();
eabc9779 361 }
0a9dd166 362
b92d9a91 363 qemu_lockcnt_inc(&ctx->list_lock);
6493c975
PB
364 have_select_revents = aio_prepare(ctx);
365
9eb0bfca 366 /* fill fd sets */
f42b2207 367 count = 0;
b92d9a91 368 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
c1e1e5fa
FZ
369 if (!node->deleted && node->io_notify
370 && aio_node_check(ctx, node->is_external)) {
f42b2207 371 events[count++] = event_notifier_get_handle(node->e);
9eb0bfca
PB
372 }
373 }
a76bab49 374
3672fa50 375 first = true;
a76bab49 376
6493c975
PB
377 /* ctx->notifier is always registered. */
378 assert(count > 0);
379
380 /* Multiple iterations, all of them non-blocking except the first,
381 * may be necessary to process all pending events. After the first
382 * WaitForMultipleObjects call ctx->notify_me will be decremented.
383 */
384 do {
b493317d 385 HANDLE event;
438e1f47
AB
386 int ret;
387
6493c975 388 timeout = blocking && !have_select_revents
845ca10d 389 ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
438e1f47 390 ret = WaitForMultipleObjects(count, events, FALSE, timeout);
eabc9779
PB
391 if (blocking) {
392 assert(first);
d73415a3
SH
393 qatomic_store_release(&ctx->notify_me,
394 qatomic_read(&ctx->notify_me) - 2);
b37548fc 395 aio_notify_accept(ctx);
eabc9779 396 }
f42b2207 397
21a03d17 398 if (first) {
21a03d17
PB
399 progress |= aio_bh_poll(ctx);
400 first = false;
3672fa50 401 }
3672fa50 402
f42b2207 403 /* if we have any signaled events, dispatch event */
b493317d
PB
404 event = NULL;
405 if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
406 event = events[ret - WAIT_OBJECT_0];
a90d411e 407 events[ret - WAIT_OBJECT_0] = events[--count];
b493317d 408 } else if (!have_select_revents) {
f42b2207
PB
409 break;
410 }
411
b493317d 412 have_select_revents = false;
f42b2207 413 blocking = false;
9eb0bfca 414
b493317d 415 progress |= aio_dispatch_handlers(ctx, event);
6493c975 416 } while (count > 0);
bcdc1857 417
bd451435
PB
418 qemu_lockcnt_dec(&ctx->list_lock);
419
e4c7e2d1 420 progress |= timerlistgroup_run_timers(&ctx->tlg);
164a101f 421 return progress;
a76bab49 422}
37fcee5d 423
7e003465 424void aio_context_setup(AioContext *ctx)
37fcee5d
FZ
425{
426}
4a1cba38 427
cd0a6d2b
JW
428void aio_context_destroy(AioContext *ctx)
429{
430}
431
ba607ca8
SH
432void aio_context_use_g_source(AioContext *ctx)
433{
434}
435
82a41186
SH
436void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
437 int64_t grow, int64_t shrink, Error **errp)
4a1cba38 438{
90c558be
PX
439 if (max_ns) {
440 error_setg(errp, "AioContext polling is not implemented on Windows");
441 }
4a1cba38 442}
1793ad02
SG
443
444void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
445 Error **errp)
446{
447}