]> git.proxmox.com Git - mirror_qemu.git/blob - include/block/aio.h
aio: add missing aio_notify() to aio_enable_external()
[mirror_qemu.git] / include / block / aio.h
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16
17 #include "qemu-common.h"
18 #include "qemu/queue.h"
19 #include "qemu/event_notifier.h"
20 #include "qemu/thread.h"
21 #include "qemu/timer.h"
22
23 typedef struct BlockAIOCB BlockAIOCB;
24 typedef void BlockCompletionFunc(void *opaque, int ret);
25
26 typedef struct AIOCBInfo {
27 void (*cancel_async)(BlockAIOCB *acb);
28 AioContext *(*get_aio_context)(BlockAIOCB *acb);
29 size_t aiocb_size;
30 } AIOCBInfo;
31
32 struct BlockAIOCB {
33 const AIOCBInfo *aiocb_info;
34 BlockDriverState *bs;
35 BlockCompletionFunc *cb;
36 void *opaque;
37 int refcnt;
38 };
39
40 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
41 BlockCompletionFunc *cb, void *opaque);
42 void qemu_aio_unref(void *p);
43 void qemu_aio_ref(void *p);
44
45 typedef struct AioHandler AioHandler;
46 typedef void QEMUBHFunc(void *opaque);
47 typedef bool AioPollFn(void *opaque);
48 typedef void IOHandler(void *opaque);
49
50 struct Coroutine;
51 struct ThreadPool;
52 struct LinuxAioState;
53
54 struct AioContext {
55 GSource source;
56
57 /* Used by AioContext users to protect from multi-threaded access. */
58 QemuRecMutex lock;
59
60 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
61 QLIST_HEAD(, AioHandler) aio_handlers;
62
63 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
64 * accessed with atomic primitives. If this field is 0, everything
65 * (file descriptors, bottom halves, timers) will be re-evaluated
66 * before the next blocking poll(), thus the event_notifier_set call
67 * can be skipped. If it is non-zero, you may need to wake up a
68 * concurrent aio_poll or the glib main event loop, making
69 * event_notifier_set necessary.
70 *
71 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
72 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
73 * Bits 1-31 simply count the number of active calls to aio_poll
74 * that are in the prepare or poll phase.
75 *
76 * The GSource and aio_poll must use a different mechanism because
77 * there is no certainty that a call to GSource's prepare callback
78 * (via g_main_context_prepare) is indeed followed by check and
79 * dispatch. It's not clear whether this would be a bug, but let's
80 * play safe and allow it---it will just cause extra calls to
81 * event_notifier_set until the next call to dispatch.
82 *
83 * Instead, the aio_poll calls include both the prepare and the
84 * dispatch phase, hence a simple counter is enough for them.
85 */
86 uint32_t notify_me;
87
88 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
89 * and to ensure that no callbacks are removed while we're walking and
90 * dispatching them.
91 */
92 QemuLockCnt list_lock;
93
94 /* Anchor of the list of Bottom Halves belonging to the context */
95 struct QEMUBH *first_bh;
96
97 /* Used by aio_notify.
98 *
99 * "notified" is used to avoid expensive event_notifier_test_and_clear
100 * calls. When it is clear, the EventNotifier is clear, or one thread
101 * is going to clear "notified" before processing more events. False
102 * positives are possible, i.e. "notified" could be set even though the
103 * EventNotifier is clear.
104 *
105 * Note that event_notifier_set *cannot* be optimized the same way. For
106 * more information on the problem that would result, see "#ifdef BUG2"
107 * in the docs/aio_notify_accept.promela formal model.
108 */
109 bool notified;
110 EventNotifier notifier;
111
112 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
113 QEMUBH *co_schedule_bh;
114
115 /* Thread pool for performing work and receiving completion callbacks.
116 * Has its own locking.
117 */
118 struct ThreadPool *thread_pool;
119
120 #ifdef CONFIG_LINUX_AIO
121 /* State for native Linux AIO. Uses aio_context_acquire/release for
122 * locking.
123 */
124 struct LinuxAioState *linux_aio;
125 #endif
126
127 /* TimerLists for calling timers - one per clock type. Has its own
128 * locking.
129 */
130 QEMUTimerListGroup tlg;
131
132 int external_disable_cnt;
133
134 /* Number of AioHandlers without .io_poll() */
135 int poll_disable_cnt;
136
137 /* Polling mode parameters */
138 int64_t poll_ns; /* current polling time in nanoseconds */
139 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
140 int64_t poll_grow; /* polling time growth factor */
141 int64_t poll_shrink; /* polling time shrink factor */
142
143 /* Are we in polling mode or monitoring file descriptors? */
144 bool poll_started;
145
146 /* epoll(7) state used when built with CONFIG_EPOLL */
147 int epollfd;
148 bool epoll_enabled;
149 bool epoll_available;
150 };
151
152 /**
153 * aio_context_new: Allocate a new AioContext.
154 *
155 * AioContext provide a mini event-loop that can be waited on synchronously.
156 * They also provide bottom halves, a service to execute a piece of code
157 * as soon as possible.
158 */
159 AioContext *aio_context_new(Error **errp);
160
161 /**
162 * aio_context_ref:
163 * @ctx: The AioContext to operate on.
164 *
165 * Add a reference to an AioContext.
166 */
167 void aio_context_ref(AioContext *ctx);
168
169 /**
170 * aio_context_unref:
171 * @ctx: The AioContext to operate on.
172 *
173 * Drop a reference to an AioContext.
174 */
175 void aio_context_unref(AioContext *ctx);
176
177 /* Take ownership of the AioContext. If the AioContext will be shared between
178 * threads, and a thread does not want to be interrupted, it will have to
179 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
180 * automatically takes care of calling aio_context_acquire and
181 * aio_context_release.
182 *
183 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
184 * thread still has to call those to avoid being interrupted by the guest.
185 *
186 * Bottom halves, timers and callbacks can be created or removed without
187 * acquiring the AioContext.
188 */
189 void aio_context_acquire(AioContext *ctx);
190
191 /* Relinquish ownership of the AioContext. */
192 void aio_context_release(AioContext *ctx);
193
194 /**
195 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
196 * only once and as soon as possible.
197 */
198 void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
199
200 /**
201 * aio_bh_new: Allocate a new bottom half structure.
202 *
203 * Bottom halves are lightweight callbacks whose invocation is guaranteed
204 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
205 * is opaque and must be allocated prior to its use.
206 */
207 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
208
209 /**
210 * aio_notify: Force processing of pending events.
211 *
212 * Similar to signaling a condition variable, aio_notify forces
213 * aio_poll to exit, so that the next call will re-examine pending events.
214 * The caller of aio_notify will usually call aio_poll again very soon,
215 * or go through another iteration of the GLib main loop. Hence, aio_notify
216 * also has the side effect of recalculating the sets of file descriptors
217 * that the main loop waits for.
218 *
219 * Calling aio_notify is rarely necessary, because for example scheduling
220 * a bottom half calls it already.
221 */
222 void aio_notify(AioContext *ctx);
223
224 /**
225 * aio_notify_accept: Acknowledge receiving an aio_notify.
226 *
227 * aio_notify() uses an EventNotifier in order to wake up a sleeping
228 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
229 * usually rare, but the AioContext has to clear the EventNotifier on
230 * every aio_poll() or g_main_context_iteration() in order to avoid
231 * busy waiting. This event_notifier_test_and_clear() cannot be done
232 * using the usual aio_context_set_event_notifier(), because it must
233 * be done before processing all events (file descriptors, bottom halves,
234 * timers).
235 *
236 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
237 * that is specific to an AioContext's notifier; it is used internally
238 * to clear the EventNotifier only if aio_notify() had been called.
239 */
240 void aio_notify_accept(AioContext *ctx);
241
242 /**
243 * aio_bh_call: Executes callback function of the specified BH.
244 */
245 void aio_bh_call(QEMUBH *bh);
246
247 /**
248 * aio_bh_poll: Poll bottom halves for an AioContext.
249 *
250 * These are internal functions used by the QEMU main loop.
251 * And notice that multiple occurrences of aio_bh_poll cannot
252 * be called concurrently
253 */
254 int aio_bh_poll(AioContext *ctx);
255
256 /**
257 * qemu_bh_schedule: Schedule a bottom half.
258 *
259 * Scheduling a bottom half interrupts the main loop and causes the
260 * execution of the callback that was passed to qemu_bh_new.
261 *
262 * Bottom halves that are scheduled from a bottom half handler are instantly
263 * invoked. This can create an infinite loop if a bottom half handler
264 * schedules itself.
265 *
266 * @bh: The bottom half to be scheduled.
267 */
268 void qemu_bh_schedule(QEMUBH *bh);
269
270 /**
271 * qemu_bh_cancel: Cancel execution of a bottom half.
272 *
273 * Canceling execution of a bottom half undoes the effect of calls to
274 * qemu_bh_schedule without freeing its resources yet. While cancellation
275 * itself is also wait-free and thread-safe, it can of course race with the
276 * loop that executes bottom halves unless you are holding the iothread
277 * mutex. This makes it mostly useless if you are not holding the mutex.
278 *
279 * @bh: The bottom half to be canceled.
280 */
281 void qemu_bh_cancel(QEMUBH *bh);
282
283 /**
284 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
285 *
286 * Deleting a bottom half frees the memory that was allocated for it by
287 * qemu_bh_new. It also implies canceling the bottom half if it was
288 * scheduled.
289 * This func is async. The bottom half will do the delete action at the finial
290 * end.
291 *
292 * @bh: The bottom half to be deleted.
293 */
294 void qemu_bh_delete(QEMUBH *bh);
295
296 /* Return whether there are any pending callbacks from the GSource
297 * attached to the AioContext, before g_poll is invoked.
298 *
299 * This is used internally in the implementation of the GSource.
300 */
301 bool aio_prepare(AioContext *ctx);
302
303 /* Return whether there are any pending callbacks from the GSource
304 * attached to the AioContext, after g_poll is invoked.
305 *
306 * This is used internally in the implementation of the GSource.
307 */
308 bool aio_pending(AioContext *ctx);
309
310 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
311 *
312 * This is used internally in the implementation of the GSource.
313 */
314 void aio_dispatch(AioContext *ctx);
315
316 /* Progress in completing AIO work to occur. This can issue new pending
317 * aio as a result of executing I/O completion or bh callbacks.
318 *
319 * Return whether any progress was made by executing AIO or bottom half
320 * handlers. If @blocking == true, this should always be true except
321 * if someone called aio_notify.
322 *
323 * If there are no pending bottom halves, but there are pending AIO
324 * operations, it may not be possible to make any progress without
325 * blocking. If @blocking is true, this function will wait until one
326 * or more AIO events have completed, to ensure something has moved
327 * before returning.
328 */
329 bool aio_poll(AioContext *ctx, bool blocking);
330
331 /* Register a file descriptor and associated callbacks. Behaves very similarly
332 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
333 * be invoked when using aio_poll().
334 *
335 * Code that invokes AIO completion functions should rely on this function
336 * instead of qemu_set_fd_handler[2].
337 */
338 void aio_set_fd_handler(AioContext *ctx,
339 int fd,
340 bool is_external,
341 IOHandler *io_read,
342 IOHandler *io_write,
343 AioPollFn *io_poll,
344 void *opaque);
345
346 /* Set polling begin/end callbacks for a file descriptor that has already been
347 * registered with aio_set_fd_handler. Do nothing if the file descriptor is
348 * not registered.
349 */
350 void aio_set_fd_poll(AioContext *ctx, int fd,
351 IOHandler *io_poll_begin,
352 IOHandler *io_poll_end);
353
354 /* Register an event notifier and associated callbacks. Behaves very similarly
355 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
356 * will be invoked when using aio_poll().
357 *
358 * Code that invokes AIO completion functions should rely on this function
359 * instead of event_notifier_set_handler.
360 */
361 void aio_set_event_notifier(AioContext *ctx,
362 EventNotifier *notifier,
363 bool is_external,
364 EventNotifierHandler *io_read,
365 AioPollFn *io_poll);
366
367 /* Set polling begin/end callbacks for an event notifier that has already been
368 * registered with aio_set_event_notifier. Do nothing if the event notifier is
369 * not registered.
370 */
371 void aio_set_event_notifier_poll(AioContext *ctx,
372 EventNotifier *notifier,
373 EventNotifierHandler *io_poll_begin,
374 EventNotifierHandler *io_poll_end);
375
376 /* Return a GSource that lets the main loop poll the file descriptors attached
377 * to this AioContext.
378 */
379 GSource *aio_get_g_source(AioContext *ctx);
380
381 /* Return the ThreadPool bound to this AioContext */
382 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
383
384 /* Return the LinuxAioState bound to this AioContext */
385 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
386
387 /**
388 * aio_timer_new:
389 * @ctx: the aio context
390 * @type: the clock type
391 * @scale: the scale
392 * @cb: the callback to call on timer expiry
393 * @opaque: the opaque pointer to pass to the callback
394 *
395 * Allocate a new timer attached to the context @ctx.
396 * The function is responsible for memory allocation.
397 *
398 * The preferred interface is aio_timer_init. Use that
399 * unless you really need dynamic memory allocation.
400 *
401 * Returns: a pointer to the new timer
402 */
403 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
404 int scale,
405 QEMUTimerCB *cb, void *opaque)
406 {
407 return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
408 }
409
410 /**
411 * aio_timer_init:
412 * @ctx: the aio context
413 * @ts: the timer
414 * @type: the clock type
415 * @scale: the scale
416 * @cb: the callback to call on timer expiry
417 * @opaque: the opaque pointer to pass to the callback
418 *
419 * Initialise a new timer attached to the context @ctx.
420 * The caller is responsible for memory allocation.
421 */
422 static inline void aio_timer_init(AioContext *ctx,
423 QEMUTimer *ts, QEMUClockType type,
424 int scale,
425 QEMUTimerCB *cb, void *opaque)
426 {
427 timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque);
428 }
429
430 /**
431 * aio_compute_timeout:
432 * @ctx: the aio context
433 *
434 * Compute the timeout that a blocking aio_poll should use.
435 */
436 int64_t aio_compute_timeout(AioContext *ctx);
437
438 /**
439 * aio_disable_external:
440 * @ctx: the aio context
441 *
442 * Disable the further processing of external clients.
443 */
444 static inline void aio_disable_external(AioContext *ctx)
445 {
446 atomic_inc(&ctx->external_disable_cnt);
447 }
448
449 /**
450 * aio_enable_external:
451 * @ctx: the aio context
452 *
453 * Enable the processing of external clients.
454 */
455 static inline void aio_enable_external(AioContext *ctx)
456 {
457 int old;
458
459 old = atomic_fetch_dec(&ctx->external_disable_cnt);
460 assert(old > 0);
461 if (old == 1) {
462 /* Kick event loop so it re-arms file descriptors */
463 aio_notify(ctx);
464 }
465 }
466
467 /**
468 * aio_external_disabled:
469 * @ctx: the aio context
470 *
471 * Return true if the external clients are disabled.
472 */
473 static inline bool aio_external_disabled(AioContext *ctx)
474 {
475 return atomic_read(&ctx->external_disable_cnt);
476 }
477
478 /**
479 * aio_node_check:
480 * @ctx: the aio context
481 * @is_external: Whether or not the checked node is an external event source.
482 *
483 * Check if the node's is_external flag is okay to be polled by the ctx at this
484 * moment. True means green light.
485 */
486 static inline bool aio_node_check(AioContext *ctx, bool is_external)
487 {
488 return !is_external || !atomic_read(&ctx->external_disable_cnt);
489 }
490
491 /**
492 * aio_co_schedule:
493 * @ctx: the aio context
494 * @co: the coroutine
495 *
496 * Start a coroutine on a remote AioContext.
497 *
498 * The coroutine must not be entered by anyone else while aio_co_schedule()
499 * is active. In addition the coroutine must have yielded unless ctx
500 * is the context in which the coroutine is running (i.e. the value of
501 * qemu_get_current_aio_context() from the coroutine itself).
502 */
503 void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
504
505 /**
506 * aio_co_wake:
507 * @co: the coroutine
508 *
509 * Restart a coroutine on the AioContext where it was running last, thus
510 * preventing coroutines from jumping from one context to another when they
511 * go to sleep.
512 *
513 * aio_co_wake may be executed either in coroutine or non-coroutine
514 * context. The coroutine must not be entered by anyone else while
515 * aio_co_wake() is active.
516 */
517 void aio_co_wake(struct Coroutine *co);
518
519 /**
520 * aio_co_enter:
521 * @ctx: the context to run the coroutine
522 * @co: the coroutine to run
523 *
524 * Enter a coroutine in the specified AioContext.
525 */
526 void aio_co_enter(AioContext *ctx, struct Coroutine *co);
527
528 /**
529 * Return the AioContext whose event loop runs in the current thread.
530 *
531 * If called from an IOThread this will be the IOThread's AioContext. If
532 * called from another thread it will be the main loop AioContext.
533 */
534 AioContext *qemu_get_current_aio_context(void);
535
536 /**
537 * @ctx: the aio context
538 *
539 * Return whether we are running in the I/O thread that manages @ctx.
540 */
541 static inline bool aio_context_in_iothread(AioContext *ctx)
542 {
543 return ctx == qemu_get_current_aio_context();
544 }
545
546 /**
547 * aio_context_setup:
548 * @ctx: the aio context
549 *
550 * Initialize the aio context.
551 */
552 void aio_context_setup(AioContext *ctx);
553
554 /**
555 * aio_context_set_poll_params:
556 * @ctx: the aio context
557 * @max_ns: how long to busy poll for, in nanoseconds
558 * @grow: polling time growth factor
559 * @shrink: polling time shrink factor
560 *
561 * Poll mode can be disabled by setting poll_max_ns to 0.
562 */
563 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
564 int64_t grow, int64_t shrink,
565 Error **errp);
566
567 #endif