]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM Corp., 2008 | |
5 | * Copyright Red Hat Inc., 2012 | |
6 | * | |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * Paolo Bonzini <pbonzini@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
14 | * Contributions after 2012-01-13 are licensed under the terms of the | |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #include "qemu-common.h" | |
19 | #include "block/block.h" | |
20 | #include "qemu/queue.h" | |
21 | #include "qemu/sockets.h" | |
22 | ||
23 | struct AioHandler { | |
24 | EventNotifier *e; | |
25 | IOHandler *io_read; | |
26 | IOHandler *io_write; | |
27 | EventNotifierHandler *io_notify; | |
28 | GPollFD pfd; | |
29 | int deleted; | |
30 | void *opaque; | |
31 | bool is_external; | |
32 | QLIST_ENTRY(AioHandler) node; | |
33 | }; | |
34 | ||
35 | void aio_set_fd_handler(AioContext *ctx, | |
36 | int fd, | |
37 | bool is_external, | |
38 | IOHandler *io_read, | |
39 | IOHandler *io_write, | |
40 | void *opaque) | |
41 | { | |
42 | /* fd is a SOCKET in our case */ | |
43 | AioHandler *node; | |
44 | ||
45 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
46 | if (node->pfd.fd == fd && !node->deleted) { | |
47 | break; | |
48 | } | |
49 | } | |
50 | ||
51 | /* Are we deleting the fd handler? */ | |
52 | if (!io_read && !io_write) { | |
53 | if (node) { | |
54 | /* If the lock is held, just mark the node as deleted */ | |
55 | if (ctx->walking_handlers) { | |
56 | node->deleted = 1; | |
57 | node->pfd.revents = 0; | |
58 | } else { | |
59 | /* Otherwise, delete it for real. We can't just mark it as | |
60 | * deleted because deleted nodes are only cleaned up after | |
61 | * releasing the walking_handlers lock. | |
62 | */ | |
63 | QLIST_REMOVE(node, node); | |
64 | g_free(node); | |
65 | } | |
66 | } | |
67 | } else { | |
68 | HANDLE event; | |
69 | ||
70 | if (node == NULL) { | |
71 | /* Alloc and insert if it's not already there */ | |
72 | node = g_new0(AioHandler, 1); | |
73 | node->pfd.fd = fd; | |
74 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); | |
75 | } | |
76 | ||
77 | node->pfd.events = 0; | |
78 | if (node->io_read) { | |
79 | node->pfd.events |= G_IO_IN; | |
80 | } | |
81 | if (node->io_write) { | |
82 | node->pfd.events |= G_IO_OUT; | |
83 | } | |
84 | ||
85 | node->e = &ctx->notifier; | |
86 | ||
87 | /* Update handler with latest information */ | |
88 | node->opaque = opaque; | |
89 | node->io_read = io_read; | |
90 | node->io_write = io_write; | |
91 | node->is_external = is_external; | |
92 | ||
93 | event = event_notifier_get_handle(&ctx->notifier); | |
94 | WSAEventSelect(node->pfd.fd, event, | |
95 | FD_READ | FD_ACCEPT | FD_CLOSE | | |
96 | FD_CONNECT | FD_WRITE | FD_OOB); | |
97 | } | |
98 | ||
99 | aio_notify(ctx); | |
100 | } | |
101 | ||
102 | void aio_set_event_notifier(AioContext *ctx, | |
103 | EventNotifier *e, | |
104 | bool is_external, | |
105 | EventNotifierHandler *io_notify) | |
106 | { | |
107 | AioHandler *node; | |
108 | ||
109 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
110 | if (node->e == e && !node->deleted) { | |
111 | break; | |
112 | } | |
113 | } | |
114 | ||
115 | /* Are we deleting the fd handler? */ | |
116 | if (!io_notify) { | |
117 | if (node) { | |
118 | g_source_remove_poll(&ctx->source, &node->pfd); | |
119 | ||
120 | /* If the lock is held, just mark the node as deleted */ | |
121 | if (ctx->walking_handlers) { | |
122 | node->deleted = 1; | |
123 | node->pfd.revents = 0; | |
124 | } else { | |
125 | /* Otherwise, delete it for real. We can't just mark it as | |
126 | * deleted because deleted nodes are only cleaned up after | |
127 | * releasing the walking_handlers lock. | |
128 | */ | |
129 | QLIST_REMOVE(node, node); | |
130 | g_free(node); | |
131 | } | |
132 | } | |
133 | } else { | |
134 | if (node == NULL) { | |
135 | /* Alloc and insert if it's not already there */ | |
136 | node = g_new0(AioHandler, 1); | |
137 | node->e = e; | |
138 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
139 | node->pfd.events = G_IO_IN; | |
140 | node->is_external = is_external; | |
141 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); | |
142 | ||
143 | g_source_add_poll(&ctx->source, &node->pfd); | |
144 | } | |
145 | /* Update handler with latest information */ | |
146 | node->io_notify = io_notify; | |
147 | } | |
148 | ||
149 | aio_notify(ctx); | |
150 | } | |
151 | ||
152 | bool aio_prepare(AioContext *ctx) | |
153 | { | |
154 | static struct timeval tv0; | |
155 | AioHandler *node; | |
156 | bool have_select_revents = false; | |
157 | fd_set rfds, wfds; | |
158 | ||
159 | /* fill fd sets */ | |
160 | FD_ZERO(&rfds); | |
161 | FD_ZERO(&wfds); | |
162 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
163 | if (node->io_read) { | |
164 | FD_SET ((SOCKET)node->pfd.fd, &rfds); | |
165 | } | |
166 | if (node->io_write) { | |
167 | FD_SET ((SOCKET)node->pfd.fd, &wfds); | |
168 | } | |
169 | } | |
170 | ||
171 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { | |
172 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
173 | node->pfd.revents = 0; | |
174 | if (FD_ISSET(node->pfd.fd, &rfds)) { | |
175 | node->pfd.revents |= G_IO_IN; | |
176 | have_select_revents = true; | |
177 | } | |
178 | ||
179 | if (FD_ISSET(node->pfd.fd, &wfds)) { | |
180 | node->pfd.revents |= G_IO_OUT; | |
181 | have_select_revents = true; | |
182 | } | |
183 | } | |
184 | } | |
185 | ||
186 | return have_select_revents; | |
187 | } | |
188 | ||
189 | bool aio_pending(AioContext *ctx) | |
190 | { | |
191 | AioHandler *node; | |
192 | ||
193 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
194 | if (node->pfd.revents && node->io_notify) { | |
195 | return true; | |
196 | } | |
197 | ||
198 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { | |
199 | return true; | |
200 | } | |
201 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { | |
202 | return true; | |
203 | } | |
204 | } | |
205 | ||
206 | return false; | |
207 | } | |
208 | ||
209 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | |
210 | { | |
211 | AioHandler *node; | |
212 | bool progress = false; | |
213 | ||
214 | /* | |
215 | * We have to walk very carefully in case aio_set_fd_handler is | |
216 | * called while we're walking. | |
217 | */ | |
218 | node = QLIST_FIRST(&ctx->aio_handlers); | |
219 | while (node) { | |
220 | AioHandler *tmp; | |
221 | int revents = node->pfd.revents; | |
222 | ||
223 | ctx->walking_handlers++; | |
224 | ||
225 | if (!node->deleted && | |
226 | (revents || event_notifier_get_handle(node->e) == event) && | |
227 | node->io_notify) { | |
228 | node->pfd.revents = 0; | |
229 | node->io_notify(node->e); | |
230 | ||
231 | /* aio_notify() does not count as progress */ | |
232 | if (node->e != &ctx->notifier) { | |
233 | progress = true; | |
234 | } | |
235 | } | |
236 | ||
237 | if (!node->deleted && | |
238 | (node->io_read || node->io_write)) { | |
239 | node->pfd.revents = 0; | |
240 | if ((revents & G_IO_IN) && node->io_read) { | |
241 | node->io_read(node->opaque); | |
242 | progress = true; | |
243 | } | |
244 | if ((revents & G_IO_OUT) && node->io_write) { | |
245 | node->io_write(node->opaque); | |
246 | progress = true; | |
247 | } | |
248 | ||
249 | /* if the next select() will return an event, we have progressed */ | |
250 | if (event == event_notifier_get_handle(&ctx->notifier)) { | |
251 | WSANETWORKEVENTS ev; | |
252 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); | |
253 | if (ev.lNetworkEvents) { | |
254 | progress = true; | |
255 | } | |
256 | } | |
257 | } | |
258 | ||
259 | tmp = node; | |
260 | node = QLIST_NEXT(node, node); | |
261 | ||
262 | ctx->walking_handlers--; | |
263 | ||
264 | if (!ctx->walking_handlers && tmp->deleted) { | |
265 | QLIST_REMOVE(tmp, node); | |
266 | g_free(tmp); | |
267 | } | |
268 | } | |
269 | ||
270 | return progress; | |
271 | } | |
272 | ||
273 | bool aio_dispatch(AioContext *ctx) | |
274 | { | |
275 | bool progress; | |
276 | ||
277 | progress = aio_bh_poll(ctx); | |
278 | progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | |
279 | progress |= timerlistgroup_run_timers(&ctx->tlg); | |
280 | return progress; | |
281 | } | |
282 | ||
283 | bool aio_poll(AioContext *ctx, bool blocking) | |
284 | { | |
285 | AioHandler *node; | |
286 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; | |
287 | bool progress, have_select_revents, first; | |
288 | int count; | |
289 | int timeout; | |
290 | ||
291 | aio_context_acquire(ctx); | |
292 | progress = false; | |
293 | ||
294 | /* aio_notify can avoid the expensive event_notifier_set if | |
295 | * everything (file descriptors, bottom halves, timers) will | |
296 | * be re-evaluated before the next blocking poll(). This is | |
297 | * already true when aio_poll is called with blocking == false; | |
298 | * if blocking == true, it is only true after poll() returns, | |
299 | * so disable the optimization now. | |
300 | */ | |
301 | if (blocking) { | |
302 | atomic_add(&ctx->notify_me, 2); | |
303 | } | |
304 | ||
305 | have_select_revents = aio_prepare(ctx); | |
306 | ||
307 | ctx->walking_handlers++; | |
308 | ||
309 | /* fill fd sets */ | |
310 | count = 0; | |
311 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
312 | if (!node->deleted && node->io_notify | |
313 | && aio_node_check(ctx, node->is_external)) { | |
314 | events[count++] = event_notifier_get_handle(node->e); | |
315 | } | |
316 | } | |
317 | ||
318 | ctx->walking_handlers--; | |
319 | first = true; | |
320 | ||
321 | /* ctx->notifier is always registered. */ | |
322 | assert(count > 0); | |
323 | ||
324 | /* Multiple iterations, all of them non-blocking except the first, | |
325 | * may be necessary to process all pending events. After the first | |
326 | * WaitForMultipleObjects call ctx->notify_me will be decremented. | |
327 | */ | |
328 | do { | |
329 | HANDLE event; | |
330 | int ret; | |
331 | ||
332 | timeout = blocking && !have_select_revents | |
333 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; | |
334 | if (timeout) { | |
335 | aio_context_release(ctx); | |
336 | } | |
337 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); | |
338 | if (blocking) { | |
339 | assert(first); | |
340 | atomic_sub(&ctx->notify_me, 2); | |
341 | } | |
342 | if (timeout) { | |
343 | aio_context_acquire(ctx); | |
344 | } | |
345 | ||
346 | if (first) { | |
347 | aio_notify_accept(ctx); | |
348 | progress |= aio_bh_poll(ctx); | |
349 | first = false; | |
350 | } | |
351 | ||
352 | /* if we have any signaled events, dispatch event */ | |
353 | event = NULL; | |
354 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { | |
355 | event = events[ret - WAIT_OBJECT_0]; | |
356 | events[ret - WAIT_OBJECT_0] = events[--count]; | |
357 | } else if (!have_select_revents) { | |
358 | break; | |
359 | } | |
360 | ||
361 | have_select_revents = false; | |
362 | blocking = false; | |
363 | ||
364 | progress |= aio_dispatch_handlers(ctx, event); | |
365 | } while (count > 0); | |
366 | ||
367 | progress |= timerlistgroup_run_timers(&ctx->tlg); | |
368 | ||
369 | aio_context_release(ctx); | |
370 | return progress; | |
371 | } | |
372 | ||
373 | void aio_context_setup(AioContext *ctx, Error **errp) | |
374 | { | |
375 | } |