]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - async.c
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20160304' into...
[mirror_qemu.git] / async.c
... / ...
CommitLineData
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "qemu-common.h"
27#include "block/aio.h"
28#include "block/thread-pool.h"
29#include "qemu/main-loop.h"
30#include "qemu/atomic.h"
31
32/***********************************************************/
33/* bottom halves (can be seen as timers which expire ASAP) */
34
35struct QEMUBH {
36 AioContext *ctx;
37 QEMUBHFunc *cb;
38 void *opaque;
39 QEMUBH *next;
40 bool scheduled;
41 bool idle;
42 bool deleted;
43};
44
45QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
46{
47 QEMUBH *bh;
48 bh = g_new(QEMUBH, 1);
49 *bh = (QEMUBH){
50 .ctx = ctx,
51 .cb = cb,
52 .opaque = opaque,
53 };
54 qemu_mutex_lock(&ctx->bh_lock);
55 bh->next = ctx->first_bh;
56 /* Make sure that the members are ready before putting bh into list */
57 smp_wmb();
58 ctx->first_bh = bh;
59 qemu_mutex_unlock(&ctx->bh_lock);
60 return bh;
61}
62
63void aio_bh_call(QEMUBH *bh)
64{
65 bh->cb(bh->opaque);
66}
67
68/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
69int aio_bh_poll(AioContext *ctx)
70{
71 QEMUBH *bh, **bhp, *next;
72 int ret;
73
74 ctx->walking_bh++;
75
76 ret = 0;
77 for (bh = ctx->first_bh; bh; bh = next) {
78 /* Make sure that fetching bh happens before accessing its members */
79 smp_read_barrier_depends();
80 next = bh->next;
81 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
82 * implicit memory barrier ensures that the callback sees all writes
83 * done by the scheduling thread. It also ensures that the scheduling
84 * thread sees the zero before bh->cb has run, and thus will call
85 * aio_notify again if necessary.
86 */
87 if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
88 /* Idle BHs and the notify BH don't count as progress */
89 if (!bh->idle && bh != ctx->notify_dummy_bh) {
90 ret = 1;
91 }
92 bh->idle = 0;
93 aio_bh_call(bh);
94 }
95 }
96
97 ctx->walking_bh--;
98
99 /* remove deleted bhs */
100 if (!ctx->walking_bh) {
101 qemu_mutex_lock(&ctx->bh_lock);
102 bhp = &ctx->first_bh;
103 while (*bhp) {
104 bh = *bhp;
105 if (bh->deleted) {
106 *bhp = bh->next;
107 g_free(bh);
108 } else {
109 bhp = &bh->next;
110 }
111 }
112 qemu_mutex_unlock(&ctx->bh_lock);
113 }
114
115 return ret;
116}
117
118void qemu_bh_schedule_idle(QEMUBH *bh)
119{
120 bh->idle = 1;
121 /* Make sure that idle & any writes needed by the callback are done
122 * before the locations are read in the aio_bh_poll.
123 */
124 atomic_mb_set(&bh->scheduled, 1);
125}
126
127void qemu_bh_schedule(QEMUBH *bh)
128{
129 AioContext *ctx;
130
131 ctx = bh->ctx;
132 bh->idle = 0;
133 /* The memory barrier implicit in atomic_xchg makes sure that:
134 * 1. idle & any writes needed by the callback are done before the
135 * locations are read in the aio_bh_poll.
136 * 2. ctx is loaded before scheduled is set and the callback has a chance
137 * to execute.
138 */
139 if (atomic_xchg(&bh->scheduled, 1) == 0) {
140 aio_notify(ctx);
141 }
142}
143
144
145/* This func is async.
146 */
147void qemu_bh_cancel(QEMUBH *bh)
148{
149 bh->scheduled = 0;
150}
151
152/* This func is async.The bottom half will do the delete action at the finial
153 * end.
154 */
155void qemu_bh_delete(QEMUBH *bh)
156{
157 bh->scheduled = 0;
158 bh->deleted = 1;
159}
160
161int64_t
162aio_compute_timeout(AioContext *ctx)
163{
164 int64_t deadline;
165 int timeout = -1;
166 QEMUBH *bh;
167
168 for (bh = ctx->first_bh; bh; bh = bh->next) {
169 if (!bh->deleted && bh->scheduled) {
170 if (bh->idle) {
171 /* idle bottom halves will be polled at least
172 * every 10ms */
173 timeout = 10000000;
174 } else {
175 /* non-idle bottom halves will be executed
176 * immediately */
177 return 0;
178 }
179 }
180 }
181
182 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
183 if (deadline == 0) {
184 return 0;
185 } else {
186 return qemu_soonest_timeout(timeout, deadline);
187 }
188}
189
190static gboolean
191aio_ctx_prepare(GSource *source, gint *timeout)
192{
193 AioContext *ctx = (AioContext *) source;
194
195 atomic_or(&ctx->notify_me, 1);
196
197 /* We assume there is no timeout already supplied */
198 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
199
200 if (aio_prepare(ctx)) {
201 *timeout = 0;
202 }
203
204 return *timeout == 0;
205}
206
207static gboolean
208aio_ctx_check(GSource *source)
209{
210 AioContext *ctx = (AioContext *) source;
211 QEMUBH *bh;
212
213 atomic_and(&ctx->notify_me, ~1);
214 aio_notify_accept(ctx);
215
216 for (bh = ctx->first_bh; bh; bh = bh->next) {
217 if (!bh->deleted && bh->scheduled) {
218 return true;
219 }
220 }
221 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
222}
223
224static gboolean
225aio_ctx_dispatch(GSource *source,
226 GSourceFunc callback,
227 gpointer user_data)
228{
229 AioContext *ctx = (AioContext *) source;
230
231 assert(callback == NULL);
232 aio_dispatch(ctx);
233 return true;
234}
235
236static void
237aio_ctx_finalize(GSource *source)
238{
239 AioContext *ctx = (AioContext *) source;
240
241 qemu_bh_delete(ctx->notify_dummy_bh);
242 thread_pool_free(ctx->thread_pool);
243
244 qemu_mutex_lock(&ctx->bh_lock);
245 while (ctx->first_bh) {
246 QEMUBH *next = ctx->first_bh->next;
247
248 /* qemu_bh_delete() must have been called on BHs in this AioContext */
249 assert(ctx->first_bh->deleted);
250
251 g_free(ctx->first_bh);
252 ctx->first_bh = next;
253 }
254 qemu_mutex_unlock(&ctx->bh_lock);
255
256 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
257 event_notifier_cleanup(&ctx->notifier);
258 rfifolock_destroy(&ctx->lock);
259 qemu_mutex_destroy(&ctx->bh_lock);
260 timerlistgroup_deinit(&ctx->tlg);
261}
262
263static GSourceFuncs aio_source_funcs = {
264 aio_ctx_prepare,
265 aio_ctx_check,
266 aio_ctx_dispatch,
267 aio_ctx_finalize
268};
269
270GSource *aio_get_g_source(AioContext *ctx)
271{
272 g_source_ref(&ctx->source);
273 return &ctx->source;
274}
275
276ThreadPool *aio_get_thread_pool(AioContext *ctx)
277{
278 if (!ctx->thread_pool) {
279 ctx->thread_pool = thread_pool_new(ctx);
280 }
281 return ctx->thread_pool;
282}
283
284void aio_notify(AioContext *ctx)
285{
286 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
287 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
288 */
289 smp_mb();
290 if (ctx->notify_me) {
291 event_notifier_set(&ctx->notifier);
292 atomic_mb_set(&ctx->notified, true);
293 }
294}
295
296void aio_notify_accept(AioContext *ctx)
297{
298 if (atomic_xchg(&ctx->notified, false)) {
299 event_notifier_test_and_clear(&ctx->notifier);
300 }
301}
302
303static void aio_timerlist_notify(void *opaque)
304{
305 aio_notify(opaque);
306}
307
308static void aio_rfifolock_cb(void *opaque)
309{
310 AioContext *ctx = opaque;
311
312 /* Kick owner thread in case they are blocked in aio_poll() */
313 qemu_bh_schedule(ctx->notify_dummy_bh);
314}
315
316static void notify_dummy_bh(void *opaque)
317{
318 /* Do nothing, we were invoked just to force the event loop to iterate */
319}
320
321static void event_notifier_dummy_cb(EventNotifier *e)
322{
323}
324
325AioContext *aio_context_new(Error **errp)
326{
327 int ret;
328 AioContext *ctx;
329 Error *local_err = NULL;
330
331 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
332 aio_context_setup(ctx, &local_err);
333 if (local_err) {
334 error_propagate(errp, local_err);
335 goto fail;
336 }
337 ret = event_notifier_init(&ctx->notifier, false);
338 if (ret < 0) {
339 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
340 goto fail;
341 }
342 g_source_set_can_recurse(&ctx->source, true);
343 aio_set_event_notifier(ctx, &ctx->notifier,
344 false,
345 (EventNotifierHandler *)
346 event_notifier_dummy_cb);
347 ctx->thread_pool = NULL;
348 qemu_mutex_init(&ctx->bh_lock);
349 rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
350 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
351
352 ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
353
354 return ctx;
355fail:
356 g_source_destroy(&ctx->source);
357 return NULL;
358}
359
360void aio_context_ref(AioContext *ctx)
361{
362 g_source_ref(&ctx->source);
363}
364
365void aio_context_unref(AioContext *ctx)
366{
367 g_source_unref(&ctx->source);
368}
369
370void aio_context_acquire(AioContext *ctx)
371{
372 rfifolock_lock(&ctx->lock);
373}
374
375void aio_context_release(AioContext *ctx)
376{
377 rfifolock_unlock(&ctx->lock);
378}