]> git.proxmox.com Git - mirror_qemu.git/blame - util/fdmon-poll.c
aio: remove aio_disable_external() API
[mirror_qemu.git] / util / fdmon-poll.c
CommitLineData
1f050a46
SH
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * poll(2) file descriptor monitoring
4 *
5 * Uses ppoll(2) when available, g_poll() otherwise.
6 */
7
8#include "qemu/osdep.h"
9#include "aio-posix.h"
10#include "qemu/rcu_queue.h"
11
12/*
13 * These thread-local variables are used only in fdmon_poll_wait() around the
14 * call to the poll() system call. In particular they are not used while
15 * aio_poll is performing callbacks, which makes it much easier to think about
16 * reentrancy!
17 *
18 * Stack-allocated arrays would be perfect but they have size limitations;
19 * heap allocation is expensive enough that we want to reuse arrays across
20 * calls to aio_poll(). And because poll() has to be called without holding
21 * any lock, the arrays cannot be stored in AioContext. Thread-local data
22 * has none of the disadvantages of these three options.
23 */
24static __thread GPollFD *pollfds;
25static __thread AioHandler **nodes;
26static __thread unsigned npfd, nalloc;
27static __thread Notifier pollfds_cleanup_notifier;
28
29static void pollfds_cleanup(Notifier *n, void *unused)
30{
31 g_assert(npfd == 0);
32 g_free(pollfds);
33 g_free(nodes);
34 nalloc = 0;
35}
36
37static void add_pollfd(AioHandler *node)
38{
39 if (npfd == nalloc) {
40 if (nalloc == 0) {
41 pollfds_cleanup_notifier.notify = pollfds_cleanup;
42 qemu_thread_atexit_add(&pollfds_cleanup_notifier);
43 nalloc = 8;
44 } else {
45 g_assert(nalloc <= INT_MAX);
46 nalloc *= 2;
47 }
48 pollfds = g_renew(GPollFD, pollfds, nalloc);
49 nodes = g_renew(AioHandler *, nodes, nalloc);
50 }
51 nodes[npfd] = node;
52 pollfds[npfd] = (GPollFD) {
53 .fd = node->pfd.fd,
54 .events = node->pfd.events,
55 };
56 npfd++;
57}
58
59static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
60 int64_t timeout)
61{
62 AioHandler *node;
63 int ret;
64
65 assert(npfd == 0);
66
67 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
60f782b6 68 if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) {
1f050a46
SH
69 add_pollfd(node);
70 }
71 }
72
73 /* epoll(7) is faster above a certain number of fds */
74 if (fdmon_epoll_try_upgrade(ctx, npfd)) {
5cd9c382 75 npfd = 0; /* we won't need pollfds[], reset npfd */
1f050a46
SH
76 return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
77 }
78
79 ret = qemu_poll_ns(pollfds, npfd, timeout);
80 if (ret > 0) {
81 int i;
82
83 for (i = 0; i < npfd; i++) {
84 int revents = pollfds[i].revents;
85
86 if (revents) {
87 aio_add_ready_handler(ready_list, nodes[i], revents);
88 }
89 }
90 }
91
92 npfd = 0;
93 return ret;
94}
95
b321051c
SH
96static void fdmon_poll_update(AioContext *ctx,
97 AioHandler *old_node,
98 AioHandler *new_node)
1f050a46
SH
99{
100 /* Do nothing, AioHandler already contains the state we'll need */
101}
102
103const FDMonOps fdmon_poll_ops = {
104 .update = fdmon_poll_update,
105 .wait = fdmon_poll_wait,
aa38e19f 106 .need_wait = aio_poll_disabled,
1f050a46 107};