]> git.proxmox.com Git - mirror_qemu.git/blob - block/linux-aio.c
Merge remote-tracking branch 'remotes/jnsnow/tags/ide-pull-request' into staging
[mirror_qemu.git] / block / linux-aio.c
1 /*
2 * Linux native AIO support.
3 *
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 */
10 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "block/aio.h"
13 #include "qemu/queue.h"
14 #include "block/block.h"
15 #include "block/raw-aio.h"
16 #include "qemu/event_notifier.h"
17 #include "qemu/coroutine.h"
18
19 #include <libaio.h>
20
21 /*
22 * Queue size (per-device).
23 *
24 * XXX: eventually we need to communicate this to the guest and/or make it
25 * tunable by the guest. If we get more outstanding requests at a time
26 * than this we will get EAGAIN from io_submit which is communicated to
27 * the guest as an I/O error.
28 */
29 #define MAX_EVENTS 128
30
31 struct qemu_laiocb {
32 BlockAIOCB common;
33 Coroutine *co;
34 LinuxAioState *ctx;
35 struct iocb iocb;
36 ssize_t ret;
37 size_t nbytes;
38 QEMUIOVector *qiov;
39 bool is_read;
40 QSIMPLEQ_ENTRY(qemu_laiocb) next;
41 };
42
43 typedef struct {
44 int plugged;
45 unsigned int in_queue;
46 unsigned int in_flight;
47 bool blocked;
48 QSIMPLEQ_HEAD(, qemu_laiocb) pending;
49 } LaioQueue;
50
51 struct LinuxAioState {
52 AioContext *aio_context;
53
54 io_context_t ctx;
55 EventNotifier e;
56
57 /* io queue for submit at batch */
58 LaioQueue io_q;
59
60 /* I/O completion processing */
61 QEMUBH *completion_bh;
62 struct io_event events[MAX_EVENTS];
63 int event_idx;
64 int event_max;
65 };
66
67 static void ioq_submit(LinuxAioState *s);
68
69 static inline ssize_t io_event_ret(struct io_event *ev)
70 {
71 return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
72 }
73
74 /*
75 * Completes an AIO request (calls the callback and frees the ACB).
76 */
77 static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
78 {
79 int ret;
80
81 ret = laiocb->ret;
82 if (ret != -ECANCELED) {
83 if (ret == laiocb->nbytes) {
84 ret = 0;
85 } else if (ret >= 0) {
86 /* Short reads mean EOF, pad with zeros. */
87 if (laiocb->is_read) {
88 qemu_iovec_memset(laiocb->qiov, ret, 0,
89 laiocb->qiov->size - ret);
90 } else {
91 ret = -ENOSPC;
92 }
93 }
94 }
95
96 laiocb->ret = ret;
97 if (laiocb->co) {
98 qemu_coroutine_enter(laiocb->co);
99 } else {
100 laiocb->common.cb(laiocb->common.opaque, ret);
101 qemu_aio_unref(laiocb);
102 }
103 }
104
105 /* The completion BH fetches completed I/O requests and invokes their
106 * callbacks.
107 *
108 * The function is somewhat tricky because it supports nested event loops, for
109 * example when a request callback invokes aio_poll(). In order to do this,
110 * the completion events array and index are kept in LinuxAioState. The BH
111 * reschedules itself as long as there are completions pending so it will
112 * either be called again in a nested event loop or will be called after all
113 * events have been completed. When there are no events left to complete, the
114 * BH returns without rescheduling.
115 */
116 static void qemu_laio_completion_bh(void *opaque)
117 {
118 LinuxAioState *s = opaque;
119
120 /* Fetch more completion events when empty */
121 if (s->event_idx == s->event_max) {
122 do {
123 struct timespec ts = { 0 };
124 s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
125 s->events, &ts);
126 } while (s->event_max == -EINTR);
127
128 s->event_idx = 0;
129 if (s->event_max <= 0) {
130 s->event_max = 0;
131 return; /* no more events */
132 }
133 s->io_q.in_flight -= s->event_max;
134 }
135
136 /* Reschedule so nested event loops see currently pending completions */
137 qemu_bh_schedule(s->completion_bh);
138
139 /* Process completion events */
140 while (s->event_idx < s->event_max) {
141 struct iocb *iocb = s->events[s->event_idx].obj;
142 struct qemu_laiocb *laiocb =
143 container_of(iocb, struct qemu_laiocb, iocb);
144
145 laiocb->ret = io_event_ret(&s->events[s->event_idx]);
146 s->event_idx++;
147
148 qemu_laio_process_completion(laiocb);
149 }
150
151 if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
152 ioq_submit(s);
153 }
154
155 qemu_bh_cancel(s->completion_bh);
156 }
157
158 static void qemu_laio_completion_cb(EventNotifier *e)
159 {
160 LinuxAioState *s = container_of(e, LinuxAioState, e);
161
162 if (event_notifier_test_and_clear(&s->e)) {
163 qemu_laio_completion_bh(s);
164 }
165 }
166
167 static void laio_cancel(BlockAIOCB *blockacb)
168 {
169 struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
170 struct io_event event;
171 int ret;
172
173 if (laiocb->ret != -EINPROGRESS) {
174 return;
175 }
176 ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
177 laiocb->ret = -ECANCELED;
178 if (ret != 0) {
179 /* iocb is not cancelled, cb will be called by the event loop later */
180 return;
181 }
182
183 laiocb->common.cb(laiocb->common.opaque, laiocb->ret);
184 }
185
186 static const AIOCBInfo laio_aiocb_info = {
187 .aiocb_size = sizeof(struct qemu_laiocb),
188 .cancel_async = laio_cancel,
189 };
190
191 static void ioq_init(LaioQueue *io_q)
192 {
193 QSIMPLEQ_INIT(&io_q->pending);
194 io_q->plugged = 0;
195 io_q->in_queue = 0;
196 io_q->in_flight = 0;
197 io_q->blocked = false;
198 }
199
200 static void ioq_submit(LinuxAioState *s)
201 {
202 int ret, len;
203 struct qemu_laiocb *aiocb;
204 struct iocb *iocbs[MAX_EVENTS];
205 QSIMPLEQ_HEAD(, qemu_laiocb) completed;
206
207 do {
208 if (s->io_q.in_flight >= MAX_EVENTS) {
209 break;
210 }
211 len = 0;
212 QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
213 iocbs[len++] = &aiocb->iocb;
214 if (s->io_q.in_flight + len >= MAX_EVENTS) {
215 break;
216 }
217 }
218
219 ret = io_submit(s->ctx, len, iocbs);
220 if (ret == -EAGAIN) {
221 break;
222 }
223 if (ret < 0) {
224 abort();
225 }
226
227 s->io_q.in_flight += ret;
228 s->io_q.in_queue -= ret;
229 aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
230 QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
231 } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
232 s->io_q.blocked = (s->io_q.in_queue > 0);
233 }
234
235 void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
236 {
237 s->io_q.plugged++;
238 }
239
240 void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
241 {
242 assert(s->io_q.plugged);
243 if (--s->io_q.plugged == 0 &&
244 !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
245 ioq_submit(s);
246 }
247 }
248
249 static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
250 int type)
251 {
252 LinuxAioState *s = laiocb->ctx;
253 struct iocb *iocbs = &laiocb->iocb;
254 QEMUIOVector *qiov = laiocb->qiov;
255
256 switch (type) {
257 case QEMU_AIO_WRITE:
258 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
259 break;
260 case QEMU_AIO_READ:
261 io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
262 break;
263 /* Currently Linux kernel does not support other operations */
264 default:
265 fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
266 __func__, type);
267 return -EIO;
268 }
269 io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
270
271 QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
272 s->io_q.in_queue++;
273 if (!s->io_q.blocked &&
274 (!s->io_q.plugged ||
275 s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
276 ioq_submit(s);
277 }
278
279 return 0;
280 }
281
282 int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
283 uint64_t offset, QEMUIOVector *qiov, int type)
284 {
285 int ret;
286 struct qemu_laiocb laiocb = {
287 .co = qemu_coroutine_self(),
288 .nbytes = qiov->size,
289 .ctx = s,
290 .is_read = (type == QEMU_AIO_READ),
291 .qiov = qiov,
292 };
293
294 ret = laio_do_submit(fd, &laiocb, offset, type);
295 if (ret < 0) {
296 return ret;
297 }
298
299 qemu_coroutine_yield();
300 return laiocb.ret;
301 }
302
303 BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
304 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
305 BlockCompletionFunc *cb, void *opaque, int type)
306 {
307 struct qemu_laiocb *laiocb;
308 off_t offset = sector_num * BDRV_SECTOR_SIZE;
309 int ret;
310
311 laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque);
312 laiocb->nbytes = nb_sectors * BDRV_SECTOR_SIZE;
313 laiocb->ctx = s;
314 laiocb->ret = -EINPROGRESS;
315 laiocb->is_read = (type == QEMU_AIO_READ);
316 laiocb->qiov = qiov;
317
318 ret = laio_do_submit(fd, laiocb, offset, type);
319 if (ret < 0) {
320 qemu_aio_unref(laiocb);
321 return NULL;
322 }
323
324 return &laiocb->common;
325 }
326
327 void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
328 {
329 aio_set_event_notifier(old_context, &s->e, false, NULL);
330 qemu_bh_delete(s->completion_bh);
331 }
332
333 void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
334 {
335 s->aio_context = new_context;
336 s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
337 aio_set_event_notifier(new_context, &s->e, false,
338 qemu_laio_completion_cb);
339 }
340
341 LinuxAioState *laio_init(void)
342 {
343 LinuxAioState *s;
344
345 s = g_malloc0(sizeof(*s));
346 if (event_notifier_init(&s->e, false) < 0) {
347 goto out_free_state;
348 }
349
350 if (io_setup(MAX_EVENTS, &s->ctx) != 0) {
351 goto out_close_efd;
352 }
353
354 ioq_init(&s->io_q);
355
356 return s;
357
358 out_close_efd:
359 event_notifier_cleanup(&s->e);
360 out_free_state:
361 g_free(s);
362 return NULL;
363 }
364
365 void laio_cleanup(LinuxAioState *s)
366 {
367 event_notifier_cleanup(&s->e);
368
369 if (io_destroy(s->ctx) != 0) {
370 fprintf(stderr, "%s: destroy AIO context %p failed\n",
371 __func__, &s->ctx);
372 }
373 g_free(s);
374 }