2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qemu-common.h"
18 #include "block/block.h"
19 #include "qemu/queue.h"
20 #include "qemu/sockets.h"
21 #include "qemu/cutils.h"
23 #ifdef CONFIG_EPOLL_CREATE1
24 #include <sys/epoll.h>
36 QLIST_ENTRY(AioHandler
) node
;
39 #ifdef CONFIG_EPOLL_CREATE1
41 /* The fd number threashold to switch to epoll */
42 #define EPOLL_ENABLE_THRESHOLD 64
44 static void aio_epoll_disable(AioContext
*ctx
)
46 ctx
->epoll_available
= false;
47 if (!ctx
->epoll_enabled
) {
50 ctx
->epoll_enabled
= false;
54 static inline int epoll_events_from_pfd(int pfd_events
)
56 return (pfd_events
& G_IO_IN
? EPOLLIN
: 0) |
57 (pfd_events
& G_IO_OUT
? EPOLLOUT
: 0) |
58 (pfd_events
& G_IO_HUP
? EPOLLHUP
: 0) |
59 (pfd_events
& G_IO_ERR
? EPOLLERR
: 0);
62 static bool aio_epoll_try_enable(AioContext
*ctx
)
65 struct epoll_event event
;
67 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
69 if (node
->deleted
|| !node
->pfd
.events
) {
72 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
73 event
.data
.ptr
= node
;
74 r
= epoll_ctl(ctx
->epollfd
, EPOLL_CTL_ADD
, node
->pfd
.fd
, &event
);
79 ctx
->epoll_enabled
= true;
83 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
85 struct epoll_event event
;
89 if (!ctx
->epoll_enabled
) {
92 if (!node
->pfd
.events
) {
95 event
.data
.ptr
= node
;
96 event
.events
= epoll_events_from_pfd(node
->pfd
.events
);
97 ctl
= is_new
? EPOLL_CTL_ADD
: EPOLL_CTL_MOD
;
100 r
= epoll_ctl(ctx
->epollfd
, ctl
, node
->pfd
.fd
, &event
);
102 aio_epoll_disable(ctx
);
106 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
107 unsigned npfd
, int64_t timeout
)
111 struct epoll_event events
[128];
114 assert(pfds
[0].fd
== ctx
->epollfd
);
116 ret
= qemu_poll_ns(pfds
, npfd
, timeout
);
118 if (timeout
<= 0 || ret
> 0) {
119 ret
= epoll_wait(ctx
->epollfd
, events
,
120 sizeof(events
) / sizeof(events
[0]),
125 for (i
= 0; i
< ret
; i
++) {
126 int ev
= events
[i
].events
;
127 node
= events
[i
].data
.ptr
;
128 node
->pfd
.revents
= (ev
& EPOLLIN
? G_IO_IN
: 0) |
129 (ev
& EPOLLOUT
? G_IO_OUT
: 0) |
130 (ev
& EPOLLHUP
? G_IO_HUP
: 0) |
131 (ev
& EPOLLERR
? G_IO_ERR
: 0);
138 static bool aio_epoll_enabled(AioContext
*ctx
)
140 /* Fall back to ppoll when external clients are disabled. */
141 return !aio_external_disabled(ctx
) && ctx
->epoll_enabled
;
144 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
145 unsigned npfd
, int64_t timeout
)
147 if (!ctx
->epoll_available
) {
150 if (aio_epoll_enabled(ctx
)) {
153 if (npfd
>= EPOLL_ENABLE_THRESHOLD
) {
154 if (aio_epoll_try_enable(ctx
)) {
157 aio_epoll_disable(ctx
);
165 static void aio_epoll_update(AioContext
*ctx
, AioHandler
*node
, bool is_new
)
169 static int aio_epoll(AioContext
*ctx
, GPollFD
*pfds
,
170 unsigned npfd
, int64_t timeout
)
175 static bool aio_epoll_enabled(AioContext
*ctx
)
180 static bool aio_epoll_check_poll(AioContext
*ctx
, GPollFD
*pfds
,
181 unsigned npfd
, int64_t timeout
)
188 static AioHandler
*find_aio_handler(AioContext
*ctx
, int fd
)
192 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
193 if (node
->pfd
.fd
== fd
)
201 void aio_set_fd_handler(AioContext
*ctx
,
211 bool deleted
= false;
213 node
= find_aio_handler(ctx
, fd
);
215 /* Are we deleting the fd handler? */
216 if (!io_read
&& !io_write
&& !io_poll
) {
221 g_source_remove_poll(&ctx
->source
, &node
->pfd
);
223 /* If the lock is held, just mark the node as deleted */
224 if (ctx
->walking_handlers
) {
226 node
->pfd
.revents
= 0;
228 /* Otherwise, delete it for real. We can't just mark it as
229 * deleted because deleted nodes are only cleaned up after
230 * releasing the walking_handlers lock.
232 QLIST_REMOVE(node
, node
);
236 if (!node
->io_poll
) {
237 ctx
->poll_disable_cnt
--;
241 /* Alloc and insert if it's not already there */
242 node
= g_new0(AioHandler
, 1);
244 QLIST_INSERT_HEAD(&ctx
->aio_handlers
, node
, node
);
246 g_source_add_poll(&ctx
->source
, &node
->pfd
);
249 ctx
->poll_disable_cnt
+= !io_poll
;
251 ctx
->poll_disable_cnt
+= !io_poll
- !node
->io_poll
;
254 /* Update handler with latest information */
255 node
->io_read
= io_read
;
256 node
->io_write
= io_write
;
257 node
->io_poll
= io_poll
;
258 node
->opaque
= opaque
;
259 node
->is_external
= is_external
;
261 node
->pfd
.events
= (io_read
? G_IO_IN
| G_IO_HUP
| G_IO_ERR
: 0);
262 node
->pfd
.events
|= (io_write
? G_IO_OUT
| G_IO_ERR
: 0);
265 aio_epoll_update(ctx
, node
, is_new
);
273 void aio_set_event_notifier(AioContext
*ctx
,
274 EventNotifier
*notifier
,
276 EventNotifierHandler
*io_read
,
279 aio_set_fd_handler(ctx
, event_notifier_get_fd(notifier
), is_external
,
280 (IOHandler
*)io_read
, NULL
, io_poll
, notifier
);
283 bool aio_prepare(AioContext
*ctx
)
288 bool aio_pending(AioContext
*ctx
)
292 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
295 revents
= node
->pfd
.revents
& node
->pfd
.events
;
296 if (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
) && node
->io_read
&&
297 aio_node_check(ctx
, node
->is_external
)) {
300 if (revents
& (G_IO_OUT
| G_IO_ERR
) && node
->io_write
&&
301 aio_node_check(ctx
, node
->is_external
)) {
310 * Note that dispatch_fds == false has the side-effect of post-poning the
311 * freeing of deleted handlers.
313 bool aio_dispatch(AioContext
*ctx
, bool dispatch_fds
)
315 AioHandler
*node
= NULL
;
316 bool progress
= false;
319 * If there are callbacks left that have been queued, we need to call them.
320 * Do not call select in this case, because it is possible that the caller
321 * does not need a complete flush (as is the case for aio_poll loops).
323 if (aio_bh_poll(ctx
)) {
328 * We have to walk very carefully in case aio_set_fd_handler is
329 * called while we're walking.
332 node
= QLIST_FIRST(&ctx
->aio_handlers
);
338 ctx
->walking_handlers
++;
340 revents
= node
->pfd
.revents
& node
->pfd
.events
;
341 node
->pfd
.revents
= 0;
343 if (!node
->deleted
&&
344 (revents
& (G_IO_IN
| G_IO_HUP
| G_IO_ERR
)) &&
345 aio_node_check(ctx
, node
->is_external
) &&
347 node
->io_read(node
->opaque
);
349 /* aio_notify() does not count as progress */
350 if (node
->opaque
!= &ctx
->notifier
) {
354 if (!node
->deleted
&&
355 (revents
& (G_IO_OUT
| G_IO_ERR
)) &&
356 aio_node_check(ctx
, node
->is_external
) &&
358 node
->io_write(node
->opaque
);
363 node
= QLIST_NEXT(node
, node
);
365 ctx
->walking_handlers
--;
367 if (!ctx
->walking_handlers
&& tmp
->deleted
) {
368 QLIST_REMOVE(tmp
, node
);
374 progress
|= timerlistgroup_run_timers(&ctx
->tlg
);
379 /* These thread-local variables are used only in a small part of aio_poll
380 * around the call to the poll() system call. In particular they are not
381 * used while aio_poll is performing callbacks, which makes it much easier
382 * to think about reentrancy!
384 * Stack-allocated arrays would be perfect but they have size limitations;
385 * heap allocation is expensive enough that we want to reuse arrays across
386 * calls to aio_poll(). And because poll() has to be called without holding
387 * any lock, the arrays cannot be stored in AioContext. Thread-local data
388 * has none of the disadvantages of these three options.
390 static __thread GPollFD
*pollfds
;
391 static __thread AioHandler
**nodes
;
392 static __thread
unsigned npfd
, nalloc
;
393 static __thread Notifier pollfds_cleanup_notifier
;
395 static void pollfds_cleanup(Notifier
*n
, void *unused
)
403 static void add_pollfd(AioHandler
*node
)
405 if (npfd
== nalloc
) {
407 pollfds_cleanup_notifier
.notify
= pollfds_cleanup
;
408 qemu_thread_atexit_add(&pollfds_cleanup_notifier
);
411 g_assert(nalloc
<= INT_MAX
);
414 pollfds
= g_renew(GPollFD
, pollfds
, nalloc
);
415 nodes
= g_renew(AioHandler
*, nodes
, nalloc
);
418 pollfds
[npfd
] = (GPollFD
) {
420 .events
= node
->pfd
.events
,
425 /* run_poll_handlers:
426 * @ctx: the AioContext
427 * @max_ns: maximum time to poll for, in nanoseconds
429 * Polls for a given time.
431 * Note that ctx->notify_me must be non-zero so this function can detect
434 * Note that the caller must have incremented ctx->walking_handlers.
436 * Returns: true if progress was made, false otherwise
438 static bool run_poll_handlers(AioContext
*ctx
, int64_t max_ns
)
440 bool progress
= false;
443 assert(ctx
->notify_me
);
444 assert(ctx
->walking_handlers
> 0);
445 assert(ctx
->poll_disable_cnt
== 0);
447 trace_run_poll_handlers_begin(ctx
, max_ns
);
449 end_time
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + max_ns
;
454 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
455 if (!node
->deleted
&& node
->io_poll
&&
456 node
->io_poll(node
->opaque
)) {
460 /* Caller handles freeing deleted nodes. Don't do it here. */
462 } while (!progress
&& qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) < end_time
);
464 trace_run_poll_handlers_end(ctx
, progress
);
470 * @ctx: the AioContext
471 * @blocking: polling is only attempted when blocking is true
473 * If blocking is true then ctx->notify_me must be non-zero so this function
474 * can detect aio_notify().
476 * Note that the caller must have incremented ctx->walking_handlers.
478 * Returns: true if progress was made, false otherwise
480 static bool try_poll_mode(AioContext
*ctx
, bool blocking
)
482 if (blocking
&& ctx
->poll_max_ns
&& ctx
->poll_disable_cnt
== 0) {
483 /* See qemu_soonest_timeout() uint64_t hack */
484 int64_t max_ns
= MIN((uint64_t)aio_compute_timeout(ctx
),
485 (uint64_t)ctx
->poll_max_ns
);
488 if (run_poll_handlers(ctx
, max_ns
)) {
497 bool aio_poll(AioContext
*ctx
, bool blocking
)
505 aio_context_acquire(ctx
);
508 /* aio_notify can avoid the expensive event_notifier_set if
509 * everything (file descriptors, bottom halves, timers) will
510 * be re-evaluated before the next blocking poll(). This is
511 * already true when aio_poll is called with blocking == false;
512 * if blocking == true, it is only true after poll() returns,
513 * so disable the optimization now.
516 atomic_add(&ctx
->notify_me
, 2);
519 ctx
->walking_handlers
++;
521 if (try_poll_mode(ctx
, blocking
)) {
528 if (!aio_epoll_enabled(ctx
)) {
529 QLIST_FOREACH(node
, &ctx
->aio_handlers
, node
) {
530 if (!node
->deleted
&& node
->pfd
.events
531 && aio_node_check(ctx
, node
->is_external
)) {
537 timeout
= blocking
? aio_compute_timeout(ctx
) : 0;
539 /* wait until next event */
541 aio_context_release(ctx
);
543 if (aio_epoll_check_poll(ctx
, pollfds
, npfd
, timeout
)) {
544 AioHandler epoll_handler
;
546 epoll_handler
.pfd
.fd
= ctx
->epollfd
;
547 epoll_handler
.pfd
.events
= G_IO_IN
| G_IO_OUT
| G_IO_HUP
| G_IO_ERR
;
549 add_pollfd(&epoll_handler
);
550 ret
= aio_epoll(ctx
, pollfds
, npfd
, timeout
);
552 ret
= qemu_poll_ns(pollfds
, npfd
, timeout
);
555 aio_context_acquire(ctx
);
560 atomic_sub(&ctx
->notify_me
, 2);
563 aio_notify_accept(ctx
);
565 /* if we have any readable fds, dispatch event */
567 for (i
= 0; i
< npfd
; i
++) {
568 nodes
[i
]->pfd
.revents
= pollfds
[i
].revents
;
573 ctx
->walking_handlers
--;
575 /* Run dispatch even if there were no readable fds to run timers */
576 if (aio_dispatch(ctx
, ret
> 0)) {
580 aio_context_release(ctx
);
585 void aio_context_setup(AioContext
*ctx
)
587 /* TODO remove this in final patch submission */
588 if (getenv("QEMU_AIO_POLL_MAX_NS")) {
589 fprintf(stderr
, "The QEMU_AIO_POLL_MAX_NS environment variable has "
590 "been replaced with -object iothread,poll-max-ns=NUM\n");
594 #ifdef CONFIG_EPOLL_CREATE1
595 assert(!ctx
->epollfd
);
596 ctx
->epollfd
= epoll_create1(EPOLL_CLOEXEC
);
597 if (ctx
->epollfd
== -1) {
598 fprintf(stderr
, "Failed to create epoll instance: %s", strerror(errno
));
599 ctx
->epoll_available
= false;
601 ctx
->epoll_available
= true;
606 void aio_context_set_poll_params(AioContext
*ctx
, int64_t max_ns
, Error
**errp
)
608 /* No thread synchronization here, it doesn't matter if an incorrect poll
609 * timeout is used once.
611 ctx
->poll_max_ns
= max_ns
;