]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
f42b2207 | 9 | * Paolo Bonzini <pbonzini@redhat.com> |
a76bab49 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
16 | */ |
17 | ||
18 | #include "qemu-common.h" | |
19 | #include "block.h" | |
72cf2d4f | 20 | #include "qemu-queue.h" |
a76bab49 AL |
21 | #include "qemu_socket.h" |
22 | ||
f42b2207 PB |
23 | struct AioHandler { |
24 | EventNotifier *e; | |
25 | EventNotifierHandler *io_notify; | |
26 | AioFlushEventNotifierHandler *io_flush; | |
cd9ba1eb | 27 | GPollFD pfd; |
a76bab49 | 28 | int deleted; |
72cf2d4f | 29 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
30 | }; |
31 | ||
f42b2207 PB |
32 | void aio_set_event_notifier(AioContext *ctx, |
33 | EventNotifier *e, | |
34 | EventNotifierHandler *io_notify, | |
35 | AioFlushEventNotifierHandler *io_flush) | |
a76bab49 AL |
36 | { |
37 | AioHandler *node; | |
38 | ||
a915f4bc | 39 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
40 | if (node->e == e && !node->deleted) { |
41 | break; | |
42 | } | |
a76bab49 AL |
43 | } |
44 | ||
a76bab49 | 45 | /* Are we deleting the fd handler? */ |
f42b2207 | 46 | if (!io_notify) { |
a76bab49 AL |
47 | if (node) { |
48 | /* If the lock is held, just mark the node as deleted */ | |
cd9ba1eb | 49 | if (ctx->walking_handlers) { |
a76bab49 | 50 | node->deleted = 1; |
cd9ba1eb PB |
51 | node->pfd.revents = 0; |
52 | } else { | |
a76bab49 AL |
53 | /* Otherwise, delete it for real. We can't just mark it as |
54 | * deleted because deleted nodes are only cleaned up after | |
55 | * releasing the walking_handlers lock. | |
56 | */ | |
72cf2d4f | 57 | QLIST_REMOVE(node, node); |
7267c094 | 58 | g_free(node); |
a76bab49 AL |
59 | } |
60 | } | |
61 | } else { | |
62 | if (node == NULL) { | |
63 | /* Alloc and insert if it's not already there */ | |
7267c094 | 64 | node = g_malloc0(sizeof(AioHandler)); |
f42b2207 PB |
65 | node->e = e; |
66 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
67 | node->pfd.events = G_IO_IN; | |
a915f4bc | 68 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
a76bab49 AL |
69 | } |
70 | /* Update handler with latest information */ | |
f42b2207 | 71 | node->io_notify = io_notify; |
a76bab49 | 72 | node->io_flush = io_flush; |
a76bab49 | 73 | } |
9958c351 PB |
74 | } |
75 | ||
cd9ba1eb PB |
76 | bool aio_pending(AioContext *ctx) |
77 | { | |
78 | AioHandler *node; | |
79 | ||
80 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
f42b2207 | 81 | if (node->pfd.revents && node->io_notify) { |
cd9ba1eb PB |
82 | return true; |
83 | } | |
84 | } | |
85 | ||
86 | return false; | |
87 | } | |
88 | ||
7c0628b2 | 89 | bool aio_poll(AioContext *ctx, bool blocking) |
a76bab49 | 90 | { |
9eb0bfca | 91 | AioHandler *node; |
f42b2207 | 92 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; |
7c0628b2 | 93 | bool busy, progress; |
f42b2207 | 94 | int count; |
7c0628b2 PB |
95 | |
96 | progress = false; | |
a76bab49 | 97 | |
8febfa26 KW |
98 | /* |
99 | * If there are callbacks left that have been queued, we need to call then. | |
bcdc1857 PB |
100 | * Do not call select in this case, because it is possible that the caller |
101 | * does not need a complete flush (as is the case for qemu_aio_wait loops). | |
8febfa26 | 102 | */ |
a915f4bc | 103 | if (aio_bh_poll(ctx)) { |
7c0628b2 PB |
104 | blocking = false; |
105 | progress = true; | |
106 | } | |
107 | ||
cd9ba1eb PB |
108 | /* |
109 | * Then dispatch any pending callbacks from the GSource. | |
110 | * | |
111 | * We have to walk very carefully in case qemu_aio_set_fd_handler is | |
112 | * called while we're walking. | |
113 | */ | |
114 | node = QLIST_FIRST(&ctx->aio_handlers); | |
115 | while (node) { | |
116 | AioHandler *tmp; | |
cd9ba1eb PB |
117 | |
118 | ctx->walking_handlers++; | |
119 | ||
f42b2207 PB |
120 | if (node->pfd.revents && node->io_notify) { |
121 | node->pfd.revents = 0; | |
122 | node->io_notify(node->e); | |
cd9ba1eb PB |
123 | progress = true; |
124 | } | |
125 | ||
126 | tmp = node; | |
127 | node = QLIST_NEXT(node, node); | |
128 | ||
129 | ctx->walking_handlers--; | |
130 | ||
131 | if (!ctx->walking_handlers && tmp->deleted) { | |
132 | QLIST_REMOVE(tmp, node); | |
133 | g_free(tmp); | |
134 | } | |
135 | } | |
136 | ||
7c0628b2 | 137 | if (progress && !blocking) { |
bcdc1857 | 138 | return true; |
bafbd6a1 | 139 | } |
8febfa26 | 140 | |
a915f4bc | 141 | ctx->walking_handlers++; |
a76bab49 | 142 | |
9eb0bfca PB |
143 | /* fill fd sets */ |
144 | busy = false; | |
f42b2207 | 145 | count = 0; |
a915f4bc | 146 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
9eb0bfca PB |
147 | /* If there aren't pending AIO operations, don't invoke callbacks. |
148 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would | |
149 | * wait indefinitely. | |
150 | */ | |
4231c88d | 151 | if (!node->deleted && node->io_flush) { |
f42b2207 | 152 | if (node->io_flush(node->e) == 0) { |
9eb0bfca | 153 | continue; |
a76bab49 | 154 | } |
9eb0bfca PB |
155 | busy = true; |
156 | } | |
f42b2207 PB |
157 | if (!node->deleted && node->io_notify) { |
158 | events[count++] = event_notifier_get_handle(node->e); | |
9eb0bfca PB |
159 | } |
160 | } | |
a76bab49 | 161 | |
a915f4bc | 162 | ctx->walking_handlers--; |
a76bab49 | 163 | |
9eb0bfca PB |
164 | /* No AIO operations? Get us out of here */ |
165 | if (!busy) { | |
7c0628b2 | 166 | return progress; |
9eb0bfca | 167 | } |
a76bab49 | 168 | |
9eb0bfca | 169 | /* wait until next event */ |
f42b2207 PB |
170 | for (;;) { |
171 | int timeout = blocking ? INFINITE : 0; | |
172 | int ret = WaitForMultipleObjects(count, events, FALSE, timeout); | |
173 | ||
174 | /* if we have any signaled events, dispatch event */ | |
175 | if ((DWORD) (ret - WAIT_OBJECT_0) >= count) { | |
176 | break; | |
177 | } | |
178 | ||
179 | blocking = false; | |
9eb0bfca | 180 | |
9eb0bfca PB |
181 | /* we have to walk very carefully in case |
182 | * qemu_aio_set_fd_handler is called while we're walking */ | |
a915f4bc | 183 | node = QLIST_FIRST(&ctx->aio_handlers); |
9eb0bfca PB |
184 | while (node) { |
185 | AioHandler *tmp; | |
186 | ||
a915f4bc | 187 | ctx->walking_handlers++; |
2db2bfc0 | 188 | |
9eb0bfca | 189 | if (!node->deleted && |
f42b2207 PB |
190 | event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] && |
191 | node->io_notify) { | |
192 | node->io_notify(node->e); | |
cd9ba1eb | 193 | progress = true; |
a76bab49 AL |
194 | } |
195 | ||
9eb0bfca PB |
196 | tmp = node; |
197 | node = QLIST_NEXT(node, node); | |
198 | ||
a915f4bc | 199 | ctx->walking_handlers--; |
2db2bfc0 | 200 | |
a915f4bc | 201 | if (!ctx->walking_handlers && tmp->deleted) { |
9eb0bfca PB |
202 | QLIST_REMOVE(tmp, node); |
203 | g_free(tmp); | |
204 | } | |
a76bab49 | 205 | } |
9eb0bfca | 206 | } |
bcdc1857 | 207 | |
7c0628b2 | 208 | return progress; |
a76bab49 | 209 | } |