]> git.proxmox.com Git - qemu.git/blob - qemu-aio.h
2ed6ad3723f72260b15559e22bfa7c6c10b25061
[qemu.git] / qemu-aio.h
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16
17 #include "qemu-common.h"
18 #include "event_notifier.h"
19
20 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
21 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
22
23 typedef struct AIOPool {
24 void (*cancel)(BlockDriverAIOCB *acb);
25 int aiocb_size;
26 BlockDriverAIOCB *free_aiocb;
27 } AIOPool;
28
29 struct BlockDriverAIOCB {
30 AIOPool *pool;
31 BlockDriverState *bs;
32 BlockDriverCompletionFunc *cb;
33 void *opaque;
34 BlockDriverAIOCB *next;
35 };
36
37 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
38 BlockDriverCompletionFunc *cb, void *opaque);
39 void qemu_aio_release(void *p);
40
41 typedef struct AioHandler AioHandler;
42 typedef void QEMUBHFunc(void *opaque);
43 typedef void IOHandler(void *opaque);
44
45 typedef struct AioContext {
46 /* Anchor of the list of Bottom Halves belonging to the context */
47 struct QEMUBH *first_bh;
48
49 /* A simple lock used to protect the first_bh list, and ensure that
50 * no callbacks are removed while we're walking and dispatching callbacks.
51 */
52 int walking_bh;
53 } AioContext;
54
55 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
56 typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
57
58 /**
59 * aio_context_new: Allocate a new AioContext.
60 *
61 * AioContext provide a mini event-loop that can be waited on synchronously.
62 * They also provide bottom halves, a service to execute a piece of code
63 * as soon as possible.
64 */
65 AioContext *aio_context_new(void);
66
67 /**
68 * aio_bh_new: Allocate a new bottom half structure.
69 *
70 * Bottom halves are lightweight callbacks whose invocation is guaranteed
71 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
72 * is opaque and must be allocated prior to its use.
73 */
74 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
75
76 /**
77 * aio_bh_poll: Poll bottom halves for an AioContext.
78 *
79 * These are internal functions used by the QEMU main loop.
80 */
81 int aio_bh_poll(AioContext *ctx);
82 void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout);
83
84 /**
85 * qemu_bh_schedule: Schedule a bottom half.
86 *
87 * Scheduling a bottom half interrupts the main loop and causes the
88 * execution of the callback that was passed to qemu_bh_new.
89 *
90 * Bottom halves that are scheduled from a bottom half handler are instantly
91 * invoked. This can create an infinite loop if a bottom half handler
92 * schedules itself.
93 *
94 * @bh: The bottom half to be scheduled.
95 */
96 void qemu_bh_schedule(QEMUBH *bh);
97
98 /**
99 * qemu_bh_cancel: Cancel execution of a bottom half.
100 *
101 * Canceling execution of a bottom half undoes the effect of calls to
102 * qemu_bh_schedule without freeing its resources yet. While cancellation
103 * itself is also wait-free and thread-safe, it can of course race with the
104 * loop that executes bottom halves unless you are holding the iothread
105 * mutex. This makes it mostly useless if you are not holding the mutex.
106 *
107 * @bh: The bottom half to be canceled.
108 */
109 void qemu_bh_cancel(QEMUBH *bh);
110
111 /**
112 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
113 *
114 * Deleting a bottom half frees the memory that was allocated for it by
115 * qemu_bh_new. It also implies canceling the bottom half if it was
116 * scheduled.
117 *
118 * @bh: The bottom half to be deleted.
119 */
120 void qemu_bh_delete(QEMUBH *bh);
121
122 /* Flush any pending AIO operation. This function will block until all
123 * outstanding AIO operations have been completed or cancelled. */
124 void qemu_aio_flush(void);
125
126 /* Wait for a single AIO completion to occur. This function will wait
127 * until a single AIO event has completed and it will ensure something
128 * has moved before returning. This can issue new pending aio as
129 * result of executing I/O completion or bh callbacks.
130 *
131 * Return whether there is still any pending AIO operation. */
132 bool qemu_aio_wait(void);
133
134 #ifdef CONFIG_POSIX
135 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
136 typedef int (AioFlushHandler)(void *opaque);
137
138 /* Register a file descriptor and associated callbacks. Behaves very similarly
139 * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
140 * be invoked when using either qemu_aio_wait() or qemu_aio_flush().
141 *
142 * Code that invokes AIO completion functions should rely on this function
143 * instead of qemu_set_fd_handler[2].
144 */
145 void qemu_aio_set_fd_handler(int fd,
146 IOHandler *io_read,
147 IOHandler *io_write,
148 AioFlushHandler *io_flush,
149 void *opaque);
150 #endif
151
152 /* Register an event notifier and associated callbacks. Behaves very similarly
153 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
154 * will be invoked when using either qemu_aio_wait() or qemu_aio_flush().
155 *
156 * Code that invokes AIO completion functions should rely on this function
157 * instead of event_notifier_set_handler.
158 */
159 void qemu_aio_set_event_notifier(EventNotifier *notifier,
160 EventNotifierHandler *io_read,
161 AioFlushEventNotifierHandler *io_flush);
162
163 #endif