]> git.proxmox.com Git - mirror_qemu.git/blame - block/linux-aio.c
parallels: wrong call to bdrv_truncate
[mirror_qemu.git] / block / linux-aio.c
CommitLineData
5c6c3a6c
CH
1/*
2 * Linux native AIO support.
3 *
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 */
80c71a24 10#include "qemu/osdep.h"
5c6c3a6c 11#include "qemu-common.h"
737e150e 12#include "block/aio.h"
1de7afc9 13#include "qemu/queue.h"
2174f12b 14#include "block/block.h"
9f8540ec 15#include "block/raw-aio.h"
1de7afc9 16#include "qemu/event_notifier.h"
2174f12b 17#include "qemu/coroutine.h"
5c6c3a6c 18
5c6c3a6c
CH
19#include <libaio.h>
20
21/*
22 * Queue size (per-device).
23 *
24 * XXX: eventually we need to communicate this to the guest and/or make it
25 * tunable by the guest. If we get more outstanding requests at a time
26 * than this we will get EAGAIN from io_submit which is communicated to
27 * the guest as an I/O error.
28 */
29#define MAX_EVENTS 128
30
31struct qemu_laiocb {
7c84b1b8 32 BlockAIOCB common;
2174f12b 33 Coroutine *co;
dd7f7ed1 34 LinuxAioState *ctx;
5c6c3a6c
CH
35 struct iocb iocb;
36 ssize_t ret;
37 size_t nbytes;
b161e2e4
KW
38 QEMUIOVector *qiov;
39 bool is_read;
28b24087 40 QSIMPLEQ_ENTRY(qemu_laiocb) next;
5c6c3a6c
CH
41};
42
1b3abdcc 43typedef struct {
1b3abdcc 44 int plugged;
5e1b34a3
RP
45 unsigned int in_queue;
46 unsigned int in_flight;
43f2376e 47 bool blocked;
28b24087 48 QSIMPLEQ_HEAD(, qemu_laiocb) pending;
1b3abdcc
ML
49} LaioQueue;
50
dd7f7ed1 51struct LinuxAioState {
0187f5c9
PB
52 AioContext *aio_context;
53
5c6c3a6c 54 io_context_t ctx;
c90caf25 55 EventNotifier e;
1b3abdcc 56
1919631e 57 /* io queue for submit at batch. Protected by AioContext lock. */
1b3abdcc 58 LaioQueue io_q;
2cdff7f6 59
1919631e 60 /* I/O completion processing. Only runs in I/O thread. */
2cdff7f6 61 QEMUBH *completion_bh;
2cdff7f6
SH
62 int event_idx;
63 int event_max;
5c6c3a6c
CH
64};
65
dd7f7ed1 66static void ioq_submit(LinuxAioState *s);
28b24087 67
5c6c3a6c
CH
68static inline ssize_t io_event_ret(struct io_event *ev)
69{
70 return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
71}
72
db0ffc24
KW
73/*
74 * Completes an AIO request (calls the callback and frees the ACB).
db0ffc24 75 */
dd7f7ed1 76static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
db0ffc24
KW
77{
78 int ret;
79
db0ffc24
KW
80 ret = laiocb->ret;
81 if (ret != -ECANCELED) {
b161e2e4 82 if (ret == laiocb->nbytes) {
db0ffc24 83 ret = 0;
b161e2e4
KW
84 } else if (ret >= 0) {
85 /* Short reads mean EOF, pad with zeros. */
86 if (laiocb->is_read) {
3d9b4925
MT
87 qemu_iovec_memset(laiocb->qiov, ret, 0,
88 laiocb->qiov->size - ret);
b161e2e4 89 } else {
1c42f149 90 ret = -ENOSPC;
b161e2e4
KW
91 }
92 }
db0ffc24
KW
93 }
94
2174f12b
KW
95 laiocb->ret = ret;
96 if (laiocb->co) {
fe121b9d
SH
97 /* If the coroutine is already entered it must be in ioq_submit() and
98 * will notice laio->ret has been filled in when it eventually runs
99 * later. Coroutines cannot be entered recursively so avoid doing
100 * that!
101 */
102 if (!qemu_coroutine_entered(laiocb->co)) {
b9e413dd 103 aio_co_wake(laiocb->co);
0ed93d84 104 }
2174f12b
KW
105 } else {
106 laiocb->common.cb(laiocb->common.opaque, ret);
107 qemu_aio_unref(laiocb);
108 }
db0ffc24
KW
109}
110
9e909a58
RP
111/**
112 * aio_ring buffer which is shared between userspace and kernel.
113 *
114 * This copied from linux/fs/aio.c, common header does not exist
115 * but AIO exists for ages so we assume ABI is stable.
116 */
117struct aio_ring {
118 unsigned id; /* kernel internal index number */
119 unsigned nr; /* number of io_events */
120 unsigned head; /* Written to by userland or by kernel. */
121 unsigned tail;
122
123 unsigned magic;
124 unsigned compat_features;
125 unsigned incompat_features;
126 unsigned header_length; /* size of aio_ring */
127
128 struct io_event io_events[0];
129};
130
131/**
132 * io_getevents_peek:
133 * @ctx: AIO context
134 * @events: pointer on events array, output value
135
136 * Returns the number of completed events and sets a pointer
137 * on events array. This function does not update the internal
138 * ring buffer, only reads head and tail. When @events has been
139 * processed io_getevents_commit() must be called.
140 */
141static inline unsigned int io_getevents_peek(io_context_t ctx,
142 struct io_event **events)
143{
144 struct aio_ring *ring = (struct aio_ring *)ctx;
145 unsigned int head = ring->head, tail = ring->tail;
146 unsigned int nr;
147
148 nr = tail >= head ? tail - head : ring->nr - head;
149 *events = ring->io_events + head;
150 /* To avoid speculative loads of s->events[i] before observing tail.
151 Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
152 smp_rmb();
153
154 return nr;
155}
156
157/**
158 * io_getevents_commit:
159 * @ctx: AIO context
160 * @nr: the number of events on which head should be advanced
161 *
162 * Advances head of a ring buffer.
163 */
164static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
165{
166 struct aio_ring *ring = (struct aio_ring *)ctx;
167
168 if (nr) {
169 ring->head = (ring->head + nr) % ring->nr;
170 }
171}
172
173/**
174 * io_getevents_advance_and_peek:
175 * @ctx: AIO context
176 * @events: pointer on events array, output value
177 * @nr: the number of events on which head should be advanced
178 *
179 * Advances head of a ring buffer and returns number of elements left.
180 */
181static inline unsigned int
182io_getevents_advance_and_peek(io_context_t ctx,
183 struct io_event **events,
184 unsigned int nr)
185{
186 io_getevents_commit(ctx, nr);
187 return io_getevents_peek(ctx, events);
188}
189
3407de57
RP
190/**
191 * qemu_laio_process_completions:
192 * @s: AIO state
193 *
194 * Fetches completed I/O requests and invokes their callbacks.
2cdff7f6
SH
195 *
196 * The function is somewhat tricky because it supports nested event loops, for
197 * example when a request callback invokes aio_poll(). In order to do this,
3407de57
RP
198 * indices are kept in LinuxAioState. Function schedules BH completion so it
199 * can be called again in a nested event loop. When there are no events left
200 * to complete the BH is being canceled.
2cdff7f6 201 */
3407de57 202static void qemu_laio_process_completions(LinuxAioState *s)
5c6c3a6c 203{
9e909a58 204 struct io_event *events;
5c6c3a6c 205
2cdff7f6
SH
206 /* Reschedule so nested event loops see currently pending completions */
207 qemu_bh_schedule(s->completion_bh);
5c6c3a6c 208
9e909a58
RP
209 while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
210 s->event_idx))) {
211 for (s->event_idx = 0; s->event_idx < s->event_max; ) {
212 struct iocb *iocb = events[s->event_idx].obj;
213 struct qemu_laiocb *laiocb =
2cdff7f6
SH
214 container_of(iocb, struct qemu_laiocb, iocb);
215
9e909a58 216 laiocb->ret = io_event_ret(&events[s->event_idx]);
2cdff7f6 217
9e909a58
RP
218 /* Change counters one-by-one because we can be nested. */
219 s->io_q.in_flight--;
220 s->event_idx++;
221 qemu_laio_process_completion(laiocb);
222 }
2cdff7f6 223 }
28b24087 224
9e909a58
RP
225 qemu_bh_cancel(s->completion_bh);
226
227 /* If we are nested we have to notify the level above that we are done
228 * by setting event_max to zero, upper level will then jump out of it's
229 * own `for` loop. If we are the last all counters droped to zero. */
230 s->event_max = 0;
231 s->event_idx = 0;
3407de57 232}
9e909a58 233
3407de57
RP
234static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
235{
236 qemu_laio_process_completions(s);
1919631e
PB
237
238 aio_context_acquire(s->aio_context);
28b24087
PB
239 if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
240 ioq_submit(s);
241 }
1919631e 242 aio_context_release(s->aio_context);
2cdff7f6
SH
243}
244
3407de57
RP
245static void qemu_laio_completion_bh(void *opaque)
246{
247 LinuxAioState *s = opaque;
248
249 qemu_laio_process_completions_and_submit(s);
250}
251
2cdff7f6
SH
252static void qemu_laio_completion_cb(EventNotifier *e)
253{
dd7f7ed1 254 LinuxAioState *s = container_of(e, LinuxAioState, e);
2cdff7f6
SH
255
256 if (event_notifier_test_and_clear(&s->e)) {
3407de57 257 qemu_laio_process_completions_and_submit(s);
5c6c3a6c
CH
258 }
259}
260
ee686975
SH
261static bool qemu_laio_poll_cb(void *opaque)
262{
263 EventNotifier *e = opaque;
264 LinuxAioState *s = container_of(e, LinuxAioState, e);
265 struct io_event *events;
266
267 if (!io_getevents_peek(s->ctx, &events)) {
268 return false;
269 }
270
271 qemu_laio_process_completions_and_submit(s);
272 return true;
273}
274
7c84b1b8 275static void laio_cancel(BlockAIOCB *blockacb)
5c6c3a6c
CH
276{
277 struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
278 struct io_event event;
279 int ret;
280
771b64da 281 if (laiocb->ret != -EINPROGRESS) {
5c6c3a6c 282 return;
771b64da 283 }
5c6c3a6c 284 ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
771b64da
FZ
285 laiocb->ret = -ECANCELED;
286 if (ret != 0) {
287 /* iocb is not cancelled, cb will be called by the event loop later */
5c6c3a6c
CH
288 return;
289 }
290
771b64da 291 laiocb->common.cb(laiocb->common.opaque, laiocb->ret);
5c6c3a6c
CH
292}
293
d7331bed 294static const AIOCBInfo laio_aiocb_info = {
5c6c3a6c 295 .aiocb_size = sizeof(struct qemu_laiocb),
771b64da 296 .cancel_async = laio_cancel,
5c6c3a6c
CH
297};
298
1b3abdcc
ML
299static void ioq_init(LaioQueue *io_q)
300{
28b24087 301 QSIMPLEQ_INIT(&io_q->pending);
1b3abdcc 302 io_q->plugged = 0;
5e1b34a3
RP
303 io_q->in_queue = 0;
304 io_q->in_flight = 0;
43f2376e 305 io_q->blocked = false;
1b3abdcc
ML
306}
307
dd7f7ed1 308static void ioq_submit(LinuxAioState *s)
1b3abdcc 309{
82595da8 310 int ret, len;
28b24087 311 struct qemu_laiocb *aiocb;
5e1b34a3 312 struct iocb *iocbs[MAX_EVENTS];
82595da8 313 QSIMPLEQ_HEAD(, qemu_laiocb) completed;
1b3abdcc 314
43f2376e 315 do {
5e1b34a3
RP
316 if (s->io_q.in_flight >= MAX_EVENTS) {
317 break;
318 }
43f2376e
PB
319 len = 0;
320 QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
321 iocbs[len++] = &aiocb->iocb;
5e1b34a3 322 if (s->io_q.in_flight + len >= MAX_EVENTS) {
43f2376e
PB
323 break;
324 }
28b24087 325 }
1b3abdcc 326
43f2376e
PB
327 ret = io_submit(s->ctx, len, iocbs);
328 if (ret == -EAGAIN) {
82595da8 329 break;
43f2376e
PB
330 }
331 if (ret < 0) {
44713c9e
KW
332 /* Fail the first request, retry the rest */
333 aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
334 QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
335 s->io_q.in_queue--;
336 aiocb->ret = ret;
337 qemu_laio_process_completion(aiocb);
338 continue;
43f2376e
PB
339 }
340
5e1b34a3
RP
341 s->io_q.in_flight += ret;
342 s->io_q.in_queue -= ret;
82595da8
PB
343 aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
344 QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
43f2376e 345 } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
5e1b34a3 346 s->io_q.blocked = (s->io_q.in_queue > 0);
0ed93d84
RP
347
348 if (s->io_q.in_flight) {
349 /* We can try to complete something just right away if there are
350 * still requests in-flight. */
351 qemu_laio_process_completions(s);
352 /*
353 * Even we have completed everything (in_flight == 0), the queue can
354 * have still pended requests (in_queue > 0). We do not attempt to
355 * repeat submission to avoid IO hang. The reason is simple: s->e is
356 * still set and completion callback will be called shortly and all
357 * pended requests will be submitted from there.
358 */
359 }
1b3abdcc
ML
360}
361
dd7f7ed1 362void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
1b3abdcc 363{
0187f5c9 364 s->io_q.plugged++;
1b3abdcc
ML
365}
366
dd7f7ed1 367void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
1b3abdcc 368{
6b98bd64 369 assert(s->io_q.plugged);
0187f5c9
PB
370 if (--s->io_q.plugged == 0 &&
371 !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
de354644 372 ioq_submit(s);
1b3abdcc 373 }
1b3abdcc
ML
374}
375
2174f12b
KW
376static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
377 int type)
5c6c3a6c 378{
2174f12b
KW
379 LinuxAioState *s = laiocb->ctx;
380 struct iocb *iocbs = &laiocb->iocb;
381 QEMUIOVector *qiov = laiocb->qiov;
5c6c3a6c
CH
382
383 switch (type) {
384 case QEMU_AIO_WRITE:
385 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
386 break;
387 case QEMU_AIO_READ:
388 io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
389 break;
c30e624d 390 /* Currently Linux kernel does not support other operations */
5c6c3a6c
CH
391 default:
392 fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
393 __func__, type);
2174f12b 394 return -EIO;
5c6c3a6c 395 }
c90caf25 396 io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
5c6c3a6c 397
28b24087 398 QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
5e1b34a3 399 s->io_q.in_queue++;
43f2376e 400 if (!s->io_q.blocked &&
5e1b34a3
RP
401 (!s->io_q.plugged ||
402 s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
28b24087 403 ioq_submit(s);
1b3abdcc 404 }
5c6c3a6c 405
2174f12b
KW
406 return 0;
407}
408
409int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
9d52aa3c 410 uint64_t offset, QEMUIOVector *qiov, int type)
2174f12b 411{
2174f12b 412 int ret;
2174f12b
KW
413 struct qemu_laiocb laiocb = {
414 .co = qemu_coroutine_self(),
9d52aa3c 415 .nbytes = qiov->size,
2174f12b 416 .ctx = s,
0ed93d84 417 .ret = -EINPROGRESS,
2174f12b
KW
418 .is_read = (type == QEMU_AIO_READ),
419 .qiov = qiov,
420 };
421
422 ret = laio_do_submit(fd, &laiocb, offset, type);
423 if (ret < 0) {
424 return ret;
425 }
426
0ed93d84
RP
427 if (laiocb.ret == -EINPROGRESS) {
428 qemu_coroutine_yield();
429 }
2174f12b
KW
430 return laiocb.ret;
431}
432
433BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
434 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
435 BlockCompletionFunc *cb, void *opaque, int type)
436{
437 struct qemu_laiocb *laiocb;
438 off_t offset = sector_num * BDRV_SECTOR_SIZE;
439 int ret;
440
441 laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque);
442 laiocb->nbytes = nb_sectors * BDRV_SECTOR_SIZE;
443 laiocb->ctx = s;
444 laiocb->ret = -EINPROGRESS;
445 laiocb->is_read = (type == QEMU_AIO_READ);
446 laiocb->qiov = qiov;
447
448 ret = laio_do_submit(fd, laiocb, offset, type);
449 if (ret < 0) {
450 qemu_aio_unref(laiocb);
451 return NULL;
452 }
453
454 return &laiocb->common;
5c6c3a6c
CH
455}
456
dd7f7ed1 457void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
c2f3426c 458{
f6a51c84 459 aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
2cdff7f6 460 qemu_bh_delete(s->completion_bh);
1919631e 461 s->aio_context = NULL;
c2f3426c
SH
462}
463
dd7f7ed1 464void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
c2f3426c 465{
0187f5c9 466 s->aio_context = new_context;
2cdff7f6 467 s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
dca21ef2 468 aio_set_event_notifier(new_context, &s->e, false,
ee686975
SH
469 qemu_laio_completion_cb,
470 qemu_laio_poll_cb);
c2f3426c
SH
471}
472
dd7f7ed1 473LinuxAioState *laio_init(void)
5c6c3a6c 474{
dd7f7ed1 475 LinuxAioState *s;
5c6c3a6c 476
7267c094 477 s = g_malloc0(sizeof(*s));
c90caf25 478 if (event_notifier_init(&s->e, false) < 0) {
5c6c3a6c 479 goto out_free_state;
c90caf25 480 }
5c6c3a6c 481
c90caf25 482 if (io_setup(MAX_EVENTS, &s->ctx) != 0) {
5c6c3a6c 483 goto out_close_efd;
c90caf25 484 }
5c6c3a6c 485
1b3abdcc
ML
486 ioq_init(&s->io_q);
487
5c6c3a6c
CH
488 return s;
489
490out_close_efd:
c90caf25 491 event_notifier_cleanup(&s->e);
5c6c3a6c 492out_free_state:
7267c094 493 g_free(s);
5c6c3a6c
CH
494 return NULL;
495}
abd269b7 496
dd7f7ed1 497void laio_cleanup(LinuxAioState *s)
abd269b7 498{
abd269b7 499 event_notifier_cleanup(&s->e);
a1abf40d
GA
500
501 if (io_destroy(s->ctx) != 0) {
502 fprintf(stderr, "%s: destroy AIO context %p failed\n",
503 __func__, &s->ctx);
504 }
abd269b7
SH
505 g_free(s);
506}