]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
f42b2207 PB |
4 | * Copyright IBM Corp., 2008 |
5 | * Copyright Red Hat Inc., 2012 | |
a76bab49 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
f42b2207 | 9 | * Paolo Bonzini <pbonzini@redhat.com> |
a76bab49 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
a76bab49 AL |
16 | */ |
17 | ||
18 | #include "qemu-common.h" | |
737e150e | 19 | #include "block/block.h" |
1de7afc9 PB |
20 | #include "qemu/queue.h" |
21 | #include "qemu/sockets.h" | |
a76bab49 | 22 | |
f42b2207 PB |
23 | struct AioHandler { |
24 | EventNotifier *e; | |
25 | EventNotifierHandler *io_notify; | |
cd9ba1eb | 26 | GPollFD pfd; |
a76bab49 | 27 | int deleted; |
72cf2d4f | 28 | QLIST_ENTRY(AioHandler) node; |
a76bab49 AL |
29 | }; |
30 | ||
f42b2207 PB |
31 | void aio_set_event_notifier(AioContext *ctx, |
32 | EventNotifier *e, | |
33 | EventNotifierHandler *io_notify, | |
34 | AioFlushEventNotifierHandler *io_flush) | |
a76bab49 AL |
35 | { |
36 | AioHandler *node; | |
37 | ||
a915f4bc | 38 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
39 | if (node->e == e && !node->deleted) { |
40 | break; | |
41 | } | |
a76bab49 AL |
42 | } |
43 | ||
a76bab49 | 44 | /* Are we deleting the fd handler? */ |
f42b2207 | 45 | if (!io_notify) { |
a76bab49 | 46 | if (node) { |
e3713e00 PB |
47 | g_source_remove_poll(&ctx->source, &node->pfd); |
48 | ||
a76bab49 | 49 | /* If the lock is held, just mark the node as deleted */ |
cd9ba1eb | 50 | if (ctx->walking_handlers) { |
a76bab49 | 51 | node->deleted = 1; |
cd9ba1eb PB |
52 | node->pfd.revents = 0; |
53 | } else { | |
a76bab49 AL |
54 | /* Otherwise, delete it for real. We can't just mark it as |
55 | * deleted because deleted nodes are only cleaned up after | |
56 | * releasing the walking_handlers lock. | |
57 | */ | |
72cf2d4f | 58 | QLIST_REMOVE(node, node); |
7267c094 | 59 | g_free(node); |
a76bab49 AL |
60 | } |
61 | } | |
62 | } else { | |
63 | if (node == NULL) { | |
64 | /* Alloc and insert if it's not already there */ | |
7267c094 | 65 | node = g_malloc0(sizeof(AioHandler)); |
f42b2207 PB |
66 | node->e = e; |
67 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); | |
68 | node->pfd.events = G_IO_IN; | |
a915f4bc | 69 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
e3713e00 PB |
70 | |
71 | g_source_add_poll(&ctx->source, &node->pfd); | |
a76bab49 AL |
72 | } |
73 | /* Update handler with latest information */ | |
f42b2207 | 74 | node->io_notify = io_notify; |
a76bab49 | 75 | } |
7ed2b24c PB |
76 | |
77 | aio_notify(ctx); | |
9958c351 PB |
78 | } |
79 | ||
cd9ba1eb PB |
80 | bool aio_pending(AioContext *ctx) |
81 | { | |
82 | AioHandler *node; | |
83 | ||
84 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { | |
f42b2207 | 85 | if (node->pfd.revents && node->io_notify) { |
cd9ba1eb PB |
86 | return true; |
87 | } | |
88 | } | |
89 | ||
90 | return false; | |
91 | } | |
92 | ||
7c0628b2 | 93 | bool aio_poll(AioContext *ctx, bool blocking) |
a76bab49 | 94 | { |
9eb0bfca | 95 | AioHandler *node; |
f42b2207 | 96 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; |
164a101f | 97 | bool progress; |
f42b2207 | 98 | int count; |
7c0628b2 PB |
99 | |
100 | progress = false; | |
a76bab49 | 101 | |
8febfa26 KW |
102 | /* |
103 | * If there are callbacks left that have been queued, we need to call then. | |
bcdc1857 PB |
104 | * Do not call select in this case, because it is possible that the caller |
105 | * does not need a complete flush (as is the case for qemu_aio_wait loops). | |
8febfa26 | 106 | */ |
a915f4bc | 107 | if (aio_bh_poll(ctx)) { |
7c0628b2 PB |
108 | blocking = false; |
109 | progress = true; | |
110 | } | |
111 | ||
cd9ba1eb PB |
112 | /* |
113 | * Then dispatch any pending callbacks from the GSource. | |
114 | * | |
115 | * We have to walk very carefully in case qemu_aio_set_fd_handler is | |
116 | * called while we're walking. | |
117 | */ | |
118 | node = QLIST_FIRST(&ctx->aio_handlers); | |
119 | while (node) { | |
120 | AioHandler *tmp; | |
cd9ba1eb PB |
121 | |
122 | ctx->walking_handlers++; | |
123 | ||
f42b2207 PB |
124 | if (node->pfd.revents && node->io_notify) { |
125 | node->pfd.revents = 0; | |
126 | node->io_notify(node->e); | |
164a101f SH |
127 | |
128 | /* aio_notify() does not count as progress */ | |
129 | if (node->opaque != &ctx->notifier) { | |
130 | progress = true; | |
131 | } | |
cd9ba1eb PB |
132 | } |
133 | ||
134 | tmp = node; | |
135 | node = QLIST_NEXT(node, node); | |
136 | ||
137 | ctx->walking_handlers--; | |
138 | ||
139 | if (!ctx->walking_handlers && tmp->deleted) { | |
140 | QLIST_REMOVE(tmp, node); | |
141 | g_free(tmp); | |
142 | } | |
143 | } | |
144 | ||
7c0628b2 | 145 | if (progress && !blocking) { |
bcdc1857 | 146 | return true; |
bafbd6a1 | 147 | } |
8febfa26 | 148 | |
a915f4bc | 149 | ctx->walking_handlers++; |
a76bab49 | 150 | |
9eb0bfca | 151 | /* fill fd sets */ |
f42b2207 | 152 | count = 0; |
a915f4bc | 153 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
f42b2207 PB |
154 | if (!node->deleted && node->io_notify) { |
155 | events[count++] = event_notifier_get_handle(node->e); | |
9eb0bfca PB |
156 | } |
157 | } | |
a76bab49 | 158 | |
a915f4bc | 159 | ctx->walking_handlers--; |
a76bab49 | 160 | |
164a101f SH |
161 | /* early return if we only have the aio_notify() fd */ |
162 | if (count == 1) { | |
7c0628b2 | 163 | return progress; |
9eb0bfca | 164 | } |
a76bab49 | 165 | |
9eb0bfca | 166 | /* wait until next event */ |
b022b4a4 | 167 | while (count > 0) { |
f42b2207 PB |
168 | int timeout = blocking ? INFINITE : 0; |
169 | int ret = WaitForMultipleObjects(count, events, FALSE, timeout); | |
170 | ||
171 | /* if we have any signaled events, dispatch event */ | |
172 | if ((DWORD) (ret - WAIT_OBJECT_0) >= count) { | |
173 | break; | |
174 | } | |
175 | ||
176 | blocking = false; | |
9eb0bfca | 177 | |
9eb0bfca PB |
178 | /* we have to walk very carefully in case |
179 | * qemu_aio_set_fd_handler is called while we're walking */ | |
a915f4bc | 180 | node = QLIST_FIRST(&ctx->aio_handlers); |
9eb0bfca PB |
181 | while (node) { |
182 | AioHandler *tmp; | |
183 | ||
a915f4bc | 184 | ctx->walking_handlers++; |
2db2bfc0 | 185 | |
9eb0bfca | 186 | if (!node->deleted && |
f42b2207 PB |
187 | event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] && |
188 | node->io_notify) { | |
189 | node->io_notify(node->e); | |
164a101f SH |
190 | |
191 | /* aio_notify() does not count as progress */ | |
192 | if (node->opaque != &ctx->notifier) { | |
193 | progress = true; | |
194 | } | |
a76bab49 AL |
195 | } |
196 | ||
9eb0bfca PB |
197 | tmp = node; |
198 | node = QLIST_NEXT(node, node); | |
199 | ||
a915f4bc | 200 | ctx->walking_handlers--; |
2db2bfc0 | 201 | |
a915f4bc | 202 | if (!ctx->walking_handlers && tmp->deleted) { |
9eb0bfca PB |
203 | QLIST_REMOVE(tmp, node); |
204 | g_free(tmp); | |
205 | } | |
a76bab49 | 206 | } |
b022b4a4 PB |
207 | |
208 | /* Try again, but only call each handler once. */ | |
209 | events[ret - WAIT_OBJECT_0] = events[--count]; | |
9eb0bfca | 210 | } |
bcdc1857 | 211 | |
164a101f | 212 | return progress; |
a76bab49 | 213 | } |