]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - async.c
qcow2: Inform block layer about discard boundaries
[mirror_qemu.git] / async.c
... / ...
CommitLineData
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "qapi/error.h"
27#include "qemu-common.h"
28#include "block/aio.h"
29#include "block/thread-pool.h"
30#include "qemu/main-loop.h"
31#include "qemu/atomic.h"
32#include "block/raw-aio.h"
33
34/***********************************************************/
35/* bottom halves (can be seen as timers which expire ASAP) */
36
37struct QEMUBH {
38 AioContext *ctx;
39 QEMUBHFunc *cb;
40 void *opaque;
41 QEMUBH *next;
42 bool scheduled;
43 bool idle;
44 bool deleted;
45};
46
47QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
48{
49 QEMUBH *bh;
50 bh = g_new(QEMUBH, 1);
51 *bh = (QEMUBH){
52 .ctx = ctx,
53 .cb = cb,
54 .opaque = opaque,
55 };
56 qemu_mutex_lock(&ctx->bh_lock);
57 bh->next = ctx->first_bh;
58 /* Make sure that the members are ready before putting bh into list */
59 smp_wmb();
60 ctx->first_bh = bh;
61 qemu_mutex_unlock(&ctx->bh_lock);
62 return bh;
63}
64
65void aio_bh_call(QEMUBH *bh)
66{
67 bh->cb(bh->opaque);
68}
69
70/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
71int aio_bh_poll(AioContext *ctx)
72{
73 QEMUBH *bh, **bhp, *next;
74 int ret;
75
76 ctx->walking_bh++;
77
78 ret = 0;
79 for (bh = ctx->first_bh; bh; bh = next) {
80 /* Make sure that fetching bh happens before accessing its members */
81 smp_read_barrier_depends();
82 next = bh->next;
83 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
84 * implicit memory barrier ensures that the callback sees all writes
85 * done by the scheduling thread. It also ensures that the scheduling
86 * thread sees the zero before bh->cb has run, and thus will call
87 * aio_notify again if necessary.
88 */
89 if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
90 /* Idle BHs and the notify BH don't count as progress */
91 if (!bh->idle && bh != ctx->notify_dummy_bh) {
92 ret = 1;
93 }
94 bh->idle = 0;
95 aio_bh_call(bh);
96 }
97 }
98
99 ctx->walking_bh--;
100
101 /* remove deleted bhs */
102 if (!ctx->walking_bh) {
103 qemu_mutex_lock(&ctx->bh_lock);
104 bhp = &ctx->first_bh;
105 while (*bhp) {
106 bh = *bhp;
107 if (bh->deleted) {
108 *bhp = bh->next;
109 g_free(bh);
110 } else {
111 bhp = &bh->next;
112 }
113 }
114 qemu_mutex_unlock(&ctx->bh_lock);
115 }
116
117 return ret;
118}
119
120void qemu_bh_schedule_idle(QEMUBH *bh)
121{
122 bh->idle = 1;
123 /* Make sure that idle & any writes needed by the callback are done
124 * before the locations are read in the aio_bh_poll.
125 */
126 atomic_mb_set(&bh->scheduled, 1);
127}
128
129void qemu_bh_schedule(QEMUBH *bh)
130{
131 AioContext *ctx;
132
133 ctx = bh->ctx;
134 bh->idle = 0;
135 /* The memory barrier implicit in atomic_xchg makes sure that:
136 * 1. idle & any writes needed by the callback are done before the
137 * locations are read in the aio_bh_poll.
138 * 2. ctx is loaded before scheduled is set and the callback has a chance
139 * to execute.
140 */
141 if (atomic_xchg(&bh->scheduled, 1) == 0) {
142 aio_notify(ctx);
143 }
144}
145
146
147/* This func is async.
148 */
149void qemu_bh_cancel(QEMUBH *bh)
150{
151 bh->scheduled = 0;
152}
153
154/* This func is async.The bottom half will do the delete action at the finial
155 * end.
156 */
157void qemu_bh_delete(QEMUBH *bh)
158{
159 bh->scheduled = 0;
160 bh->deleted = 1;
161}
162
163int64_t
164aio_compute_timeout(AioContext *ctx)
165{
166 int64_t deadline;
167 int timeout = -1;
168 QEMUBH *bh;
169
170 for (bh = ctx->first_bh; bh; bh = bh->next) {
171 if (!bh->deleted && bh->scheduled) {
172 if (bh->idle) {
173 /* idle bottom halves will be polled at least
174 * every 10ms */
175 timeout = 10000000;
176 } else {
177 /* non-idle bottom halves will be executed
178 * immediately */
179 return 0;
180 }
181 }
182 }
183
184 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
185 if (deadline == 0) {
186 return 0;
187 } else {
188 return qemu_soonest_timeout(timeout, deadline);
189 }
190}
191
192static gboolean
193aio_ctx_prepare(GSource *source, gint *timeout)
194{
195 AioContext *ctx = (AioContext *) source;
196
197 atomic_or(&ctx->notify_me, 1);
198
199 /* We assume there is no timeout already supplied */
200 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
201
202 if (aio_prepare(ctx)) {
203 *timeout = 0;
204 }
205
206 return *timeout == 0;
207}
208
209static gboolean
210aio_ctx_check(GSource *source)
211{
212 AioContext *ctx = (AioContext *) source;
213 QEMUBH *bh;
214
215 atomic_and(&ctx->notify_me, ~1);
216 aio_notify_accept(ctx);
217
218 for (bh = ctx->first_bh; bh; bh = bh->next) {
219 if (!bh->deleted && bh->scheduled) {
220 return true;
221 }
222 }
223 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
224}
225
226static gboolean
227aio_ctx_dispatch(GSource *source,
228 GSourceFunc callback,
229 gpointer user_data)
230{
231 AioContext *ctx = (AioContext *) source;
232
233 assert(callback == NULL);
234 aio_dispatch(ctx);
235 return true;
236}
237
238static void
239aio_ctx_finalize(GSource *source)
240{
241 AioContext *ctx = (AioContext *) source;
242
243 qemu_bh_delete(ctx->notify_dummy_bh);
244 thread_pool_free(ctx->thread_pool);
245
246#ifdef CONFIG_LINUX_AIO
247 if (ctx->linux_aio) {
248 laio_detach_aio_context(ctx->linux_aio, ctx);
249 laio_cleanup(ctx->linux_aio);
250 ctx->linux_aio = NULL;
251 }
252#endif
253
254 qemu_mutex_lock(&ctx->bh_lock);
255 while (ctx->first_bh) {
256 QEMUBH *next = ctx->first_bh->next;
257
258 /* qemu_bh_delete() must have been called on BHs in this AioContext */
259 assert(ctx->first_bh->deleted);
260
261 g_free(ctx->first_bh);
262 ctx->first_bh = next;
263 }
264 qemu_mutex_unlock(&ctx->bh_lock);
265
266 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
267 event_notifier_cleanup(&ctx->notifier);
268 rfifolock_destroy(&ctx->lock);
269 qemu_mutex_destroy(&ctx->bh_lock);
270 timerlistgroup_deinit(&ctx->tlg);
271}
272
273static GSourceFuncs aio_source_funcs = {
274 aio_ctx_prepare,
275 aio_ctx_check,
276 aio_ctx_dispatch,
277 aio_ctx_finalize
278};
279
280GSource *aio_get_g_source(AioContext *ctx)
281{
282 g_source_ref(&ctx->source);
283 return &ctx->source;
284}
285
286ThreadPool *aio_get_thread_pool(AioContext *ctx)
287{
288 if (!ctx->thread_pool) {
289 ctx->thread_pool = thread_pool_new(ctx);
290 }
291 return ctx->thread_pool;
292}
293
294#ifdef CONFIG_LINUX_AIO
295LinuxAioState *aio_get_linux_aio(AioContext *ctx)
296{
297 if (!ctx->linux_aio) {
298 ctx->linux_aio = laio_init();
299 laio_attach_aio_context(ctx->linux_aio, ctx);
300 }
301 return ctx->linux_aio;
302}
303#endif
304
305void aio_notify(AioContext *ctx)
306{
307 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
308 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
309 */
310 smp_mb();
311 if (ctx->notify_me) {
312 event_notifier_set(&ctx->notifier);
313 atomic_mb_set(&ctx->notified, true);
314 }
315}
316
317void aio_notify_accept(AioContext *ctx)
318{
319 if (atomic_xchg(&ctx->notified, false)) {
320 event_notifier_test_and_clear(&ctx->notifier);
321 }
322}
323
324static void aio_timerlist_notify(void *opaque)
325{
326 aio_notify(opaque);
327}
328
329static void aio_rfifolock_cb(void *opaque)
330{
331 AioContext *ctx = opaque;
332
333 /* Kick owner thread in case they are blocked in aio_poll() */
334 qemu_bh_schedule(ctx->notify_dummy_bh);
335}
336
337static void notify_dummy_bh(void *opaque)
338{
339 /* Do nothing, we were invoked just to force the event loop to iterate */
340}
341
342static void event_notifier_dummy_cb(EventNotifier *e)
343{
344}
345
346AioContext *aio_context_new(Error **errp)
347{
348 int ret;
349 AioContext *ctx;
350
351 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
352 aio_context_setup(ctx);
353
354 ret = event_notifier_init(&ctx->notifier, false);
355 if (ret < 0) {
356 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
357 goto fail;
358 }
359 g_source_set_can_recurse(&ctx->source, true);
360 aio_set_event_notifier(ctx, &ctx->notifier,
361 false,
362 (EventNotifierHandler *)
363 event_notifier_dummy_cb);
364#ifdef CONFIG_LINUX_AIO
365 ctx->linux_aio = NULL;
366#endif
367 ctx->thread_pool = NULL;
368 qemu_mutex_init(&ctx->bh_lock);
369 rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
370 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
371
372 ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
373
374 return ctx;
375fail:
376 g_source_destroy(&ctx->source);
377 return NULL;
378}
379
380void aio_context_ref(AioContext *ctx)
381{
382 g_source_ref(&ctx->source);
383}
384
385void aio_context_unref(AioContext *ctx)
386{
387 g_source_unref(&ctx->source);
388}
389
390void aio_context_acquire(AioContext *ctx)
391{
392 rfifolock_lock(&ctx->lock);
393}
394
395void aio_context_release(AioContext *ctx)
396{
397 rfifolock_unlock(&ctx->lock);
398}