]> git.proxmox.com Git - qemu.git/blob - qemu-aio.h
fix live migration
[qemu.git] / qemu-aio.h
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16
17 #include "qemu-common.h"
18 #include "qemu-queue.h"
19 #include "event_notifier.h"
20
21 typedef struct BlockDriverAIOCB BlockDriverAIOCB;
22 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
23
24 typedef struct AIOPool {
25 void (*cancel)(BlockDriverAIOCB *acb);
26 int aiocb_size;
27 BlockDriverAIOCB *free_aiocb;
28 } AIOPool;
29
30 struct BlockDriverAIOCB {
31 AIOPool *pool;
32 BlockDriverState *bs;
33 BlockDriverCompletionFunc *cb;
34 void *opaque;
35 BlockDriverAIOCB *next;
36 };
37
38 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
39 BlockDriverCompletionFunc *cb, void *opaque);
40 void qemu_aio_release(void *p);
41
42 typedef struct AioHandler AioHandler;
43 typedef void QEMUBHFunc(void *opaque);
44 typedef void IOHandler(void *opaque);
45
46 typedef struct AioContext {
47 GSource source;
48
49 /* The list of registered AIO handlers */
50 QLIST_HEAD(, AioHandler) aio_handlers;
51
52 /* This is a simple lock used to protect the aio_handlers list.
53 * Specifically, it's used to ensure that no callbacks are removed while
54 * we're walking and dispatching callbacks.
55 */
56 int walking_handlers;
57
58 /* Anchor of the list of Bottom Halves belonging to the context */
59 struct QEMUBH *first_bh;
60
61 /* A simple lock used to protect the first_bh list, and ensure that
62 * no callbacks are removed while we're walking and dispatching callbacks.
63 */
64 int walking_bh;
65
66 /* Used for aio_notify. */
67 EventNotifier notifier;
68 } AioContext;
69
70 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
71 typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
72
73 /**
74 * aio_context_new: Allocate a new AioContext.
75 *
76 * AioContext provide a mini event-loop that can be waited on synchronously.
77 * They also provide bottom halves, a service to execute a piece of code
78 * as soon as possible.
79 */
80 AioContext *aio_context_new(void);
81
82 /**
83 * aio_context_ref:
84 * @ctx: The AioContext to operate on.
85 *
86 * Add a reference to an AioContext.
87 */
88 void aio_context_ref(AioContext *ctx);
89
90 /**
91 * aio_context_unref:
92 * @ctx: The AioContext to operate on.
93 *
94 * Drop a reference to an AioContext.
95 */
96 void aio_context_unref(AioContext *ctx);
97
98 /**
99 * aio_bh_new: Allocate a new bottom half structure.
100 *
101 * Bottom halves are lightweight callbacks whose invocation is guaranteed
102 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
103 * is opaque and must be allocated prior to its use.
104 */
105 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
106
107 /**
108 * aio_notify: Force processing of pending events.
109 *
110 * Similar to signaling a condition variable, aio_notify forces
111 * aio_wait to exit, so that the next call will re-examine pending events.
112 * The caller of aio_notify will usually call aio_wait again very soon,
113 * or go through another iteration of the GLib main loop. Hence, aio_notify
114 * also has the side effect of recalculating the sets of file descriptors
115 * that the main loop waits for.
116 *
117 * Calling aio_notify is rarely necessary, because for example scheduling
118 * a bottom half calls it already.
119 */
120 void aio_notify(AioContext *ctx);
121
122 /**
123 * aio_bh_poll: Poll bottom halves for an AioContext.
124 *
125 * These are internal functions used by the QEMU main loop.
126 */
127 int aio_bh_poll(AioContext *ctx);
128
129 /**
130 * qemu_bh_schedule: Schedule a bottom half.
131 *
132 * Scheduling a bottom half interrupts the main loop and causes the
133 * execution of the callback that was passed to qemu_bh_new.
134 *
135 * Bottom halves that are scheduled from a bottom half handler are instantly
136 * invoked. This can create an infinite loop if a bottom half handler
137 * schedules itself.
138 *
139 * @bh: The bottom half to be scheduled.
140 */
141 void qemu_bh_schedule(QEMUBH *bh);
142
143 /**
144 * qemu_bh_cancel: Cancel execution of a bottom half.
145 *
146 * Canceling execution of a bottom half undoes the effect of calls to
147 * qemu_bh_schedule without freeing its resources yet. While cancellation
148 * itself is also wait-free and thread-safe, it can of course race with the
149 * loop that executes bottom halves unless you are holding the iothread
150 * mutex. This makes it mostly useless if you are not holding the mutex.
151 *
152 * @bh: The bottom half to be canceled.
153 */
154 void qemu_bh_cancel(QEMUBH *bh);
155
156 /**
157 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
158 *
159 * Deleting a bottom half frees the memory that was allocated for it by
160 * qemu_bh_new. It also implies canceling the bottom half if it was
161 * scheduled.
162 *
163 * @bh: The bottom half to be deleted.
164 */
165 void qemu_bh_delete(QEMUBH *bh);
166
167 /* Flush any pending AIO operation. This function will block until all
168 * outstanding AIO operations have been completed or cancelled. */
169 void aio_flush(AioContext *ctx);
170
171 /* Return whether there are any pending callbacks from the GSource
172 * attached to the AioContext.
173 *
174 * This is used internally in the implementation of the GSource.
175 */
176 bool aio_pending(AioContext *ctx);
177
178 /* Progress in completing AIO work to occur. This can issue new pending
179 * aio as a result of executing I/O completion or bh callbacks.
180 *
181 * If there is no pending AIO operation or completion (bottom half),
182 * return false. If there are pending bottom halves, return true.
183 *
184 * If there are no pending bottom halves, but there are pending AIO
185 * operations, it may not be possible to make any progress without
186 * blocking. If @blocking is true, this function will wait until one
187 * or more AIO events have completed, to ensure something has moved
188 * before returning.
189 *
190 * If @blocking is false, this function will also return false if the
191 * function cannot make any progress without blocking.
192 */
193 bool aio_poll(AioContext *ctx, bool blocking);
194
195 #ifdef CONFIG_POSIX
196 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
197 typedef int (AioFlushHandler)(void *opaque);
198
199 /* Register a file descriptor and associated callbacks. Behaves very similarly
200 * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
201 * be invoked when using either qemu_aio_wait() or qemu_aio_flush().
202 *
203 * Code that invokes AIO completion functions should rely on this function
204 * instead of qemu_set_fd_handler[2].
205 */
206 void aio_set_fd_handler(AioContext *ctx,
207 int fd,
208 IOHandler *io_read,
209 IOHandler *io_write,
210 AioFlushHandler *io_flush,
211 void *opaque);
212 #endif
213
214 /* Register an event notifier and associated callbacks. Behaves very similarly
215 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
216 * will be invoked when using either qemu_aio_wait() or qemu_aio_flush().
217 *
218 * Code that invokes AIO completion functions should rely on this function
219 * instead of event_notifier_set_handler.
220 */
221 void aio_set_event_notifier(AioContext *ctx,
222 EventNotifier *notifier,
223 EventNotifierHandler *io_read,
224 AioFlushEventNotifierHandler *io_flush);
225
226 /* Return a GSource that lets the main loop poll the file descriptors attached
227 * to this AioContext.
228 */
229 GSource *aio_get_g_source(AioContext *ctx);
230
231 /* Functions to operate on the main QEMU AioContext. */
232
233 void qemu_aio_flush(void);
234 bool qemu_aio_wait(void);
235 void qemu_aio_set_event_notifier(EventNotifier *notifier,
236 EventNotifierHandler *io_read,
237 AioFlushEventNotifierHandler *io_flush);
238
239 #ifdef CONFIG_POSIX
240 void qemu_aio_set_fd_handler(int fd,
241 IOHandler *io_read,
242 IOHandler *io_write,
243 AioFlushHandler *io_flush,
244 void *opaque);
245 #endif
246
247 #endif