]> git.proxmox.com Git - mirror_qemu.git/blob - include/block/aio.h
block: Fix deadlocks in bdrv_graph_wrunlock()
[mirror_qemu.git] / include / block / aio.h
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16
17 #ifdef CONFIG_LINUX_IO_URING
18 #include <liburing.h>
19 #endif
20 #include "qemu/coroutine-core.h"
21 #include "qemu/queue.h"
22 #include "qemu/event_notifier.h"
23 #include "qemu/thread.h"
24 #include "qemu/timer.h"
25 #include "block/graph-lock.h"
26 #include "hw/qdev-core.h"
27
28
29 typedef struct BlockAIOCB BlockAIOCB;
30 typedef void BlockCompletionFunc(void *opaque, int ret);
31
32 typedef struct AIOCBInfo {
33 void (*cancel_async)(BlockAIOCB *acb);
34 size_t aiocb_size;
35 } AIOCBInfo;
36
37 struct BlockAIOCB {
38 const AIOCBInfo *aiocb_info;
39 BlockDriverState *bs;
40 BlockCompletionFunc *cb;
41 void *opaque;
42 int refcnt;
43 };
44
45 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
46 BlockCompletionFunc *cb, void *opaque);
47 void qemu_aio_unref(void *p);
48 void qemu_aio_ref(void *p);
49
50 typedef struct AioHandler AioHandler;
51 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
52 typedef void QEMUBHFunc(void *opaque);
53 typedef bool AioPollFn(void *opaque);
54 typedef void IOHandler(void *opaque);
55
56 struct ThreadPool;
57 struct LinuxAioState;
58 struct LuringState;
59
60 /* Is polling disabled? */
61 bool aio_poll_disabled(AioContext *ctx);
62
63 /* Callbacks for file descriptor monitoring implementations */
64 typedef struct {
65 /*
66 * update:
67 * @ctx: the AioContext
68 * @old_node: the existing handler or NULL if this file descriptor is being
69 * monitored for the first time
70 * @new_node: the new handler or NULL if this file descriptor is being
71 * removed
72 *
73 * Add/remove/modify a monitored file descriptor.
74 *
75 * Called with ctx->list_lock acquired.
76 */
77 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
78
79 /*
80 * wait:
81 * @ctx: the AioContext
82 * @ready_list: list for handlers that become ready
83 * @timeout: maximum duration to wait, in nanoseconds
84 *
85 * Wait for file descriptors to become ready and place them on ready_list.
86 *
87 * Called with ctx->list_lock incremented but not locked.
88 *
89 * Returns: number of ready file descriptors.
90 */
91 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
92
93 /*
94 * need_wait:
95 * @ctx: the AioContext
96 *
97 * Tell aio_poll() when to stop userspace polling early because ->wait()
98 * has fds ready.
99 *
100 * File descriptor monitoring implementations that cannot poll fd readiness
101 * from userspace should use aio_poll_disabled() here. This ensures that
102 * file descriptors are not starved by handlers that frequently make
103 * progress via userspace polling.
104 *
105 * Returns: true if ->wait() should be called, false otherwise.
106 */
107 bool (*need_wait)(AioContext *ctx);
108 } FDMonOps;
109
110 /*
111 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
112 * scheduled BHs are not processed until the next aio_bh_poll() call. All
113 * active aio_bh_poll() calls chain their slices together in a list, so that
114 * nested aio_bh_poll() calls process all scheduled bottom halves.
115 */
116 typedef QSLIST_HEAD(, QEMUBH) BHList;
117 typedef struct BHListSlice BHListSlice;
118 struct BHListSlice {
119 BHList bh_list;
120 QSIMPLEQ_ENTRY(BHListSlice) next;
121 };
122
123 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
124
125 struct AioContext {
126 GSource source;
127
128 /* Used by AioContext users to protect from multi-threaded access. */
129 QemuRecMutex lock;
130
131 /*
132 * Keep track of readers and writers of the block layer graph.
133 * This is essential to avoid performing additions and removal
134 * of nodes and edges from block graph while some
135 * other thread is traversing it.
136 */
137 BdrvGraphRWlock *bdrv_graph;
138
139 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
140 AioHandlerList aio_handlers;
141
142 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
143 AioHandlerList deleted_aio_handlers;
144
145 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
146 * only written from the AioContext home thread, or under the BQL in
147 * the case of the main AioContext. However, it is read from any
148 * thread so it is still accessed with atomic primitives.
149 *
150 * If this field is 0, everything (file descriptors, bottom halves,
151 * timers) will be re-evaluated before the next blocking poll() or
152 * io_uring wait; therefore, the event_notifier_set call can be
153 * skipped. If it is non-zero, you may need to wake up a concurrent
154 * aio_poll or the glib main event loop, making event_notifier_set
155 * necessary.
156 *
157 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
158 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
159 * Bits 1-31 simply count the number of active calls to aio_poll
160 * that are in the prepare or poll phase.
161 *
162 * The GSource and aio_poll must use a different mechanism because
163 * there is no certainty that a call to GSource's prepare callback
164 * (via g_main_context_prepare) is indeed followed by check and
165 * dispatch. It's not clear whether this would be a bug, but let's
166 * play safe and allow it---it will just cause extra calls to
167 * event_notifier_set until the next call to dispatch.
168 *
169 * Instead, the aio_poll calls include both the prepare and the
170 * dispatch phase, hence a simple counter is enough for them.
171 */
172 uint32_t notify_me;
173
174 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
175 * and to ensure that no callbacks are removed while we're walking and
176 * dispatching them.
177 */
178 QemuLockCnt list_lock;
179
180 /* Bottom Halves pending aio_bh_poll() processing */
181 BHList bh_list;
182
183 /* Chained BH list slices for each nested aio_bh_poll() call */
184 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
185
186 /* Used by aio_notify.
187 *
188 * "notified" is used to avoid expensive event_notifier_test_and_clear
189 * calls. When it is clear, the EventNotifier is clear, or one thread
190 * is going to clear "notified" before processing more events. False
191 * positives are possible, i.e. "notified" could be set even though the
192 * EventNotifier is clear.
193 *
194 * Note that event_notifier_set *cannot* be optimized the same way. For
195 * more information on the problem that would result, see "#ifdef BUG2"
196 * in the docs/aio_notify_accept.promela formal model.
197 */
198 bool notified;
199 EventNotifier notifier;
200
201 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
202 QEMUBH *co_schedule_bh;
203
204 int thread_pool_min;
205 int thread_pool_max;
206 /* Thread pool for performing work and receiving completion callbacks.
207 * Has its own locking.
208 */
209 struct ThreadPool *thread_pool;
210
211 #ifdef CONFIG_LINUX_AIO
212 struct LinuxAioState *linux_aio;
213 #endif
214 #ifdef CONFIG_LINUX_IO_URING
215 struct LuringState *linux_io_uring;
216
217 /* State for file descriptor monitoring using Linux io_uring */
218 struct io_uring fdmon_io_uring;
219 AioHandlerSList submit_list;
220 #endif
221
222 /* TimerLists for calling timers - one per clock type. Has its own
223 * locking.
224 */
225 QEMUTimerListGroup tlg;
226
227 /* Number of AioHandlers without .io_poll() */
228 int poll_disable_cnt;
229
230 /* Polling mode parameters */
231 int64_t poll_ns; /* current polling time in nanoseconds */
232 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
233 int64_t poll_grow; /* polling time growth factor */
234 int64_t poll_shrink; /* polling time shrink factor */
235
236 /* AIO engine parameters */
237 int64_t aio_max_batch; /* maximum number of requests in a batch */
238
239 /*
240 * List of handlers participating in userspace polling. Protected by
241 * ctx->list_lock. Iterated and modified mostly by the event loop thread
242 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
243 * only touches the list to delete nodes if ctx->list_lock's count is zero.
244 */
245 AioHandlerList poll_aio_handlers;
246
247 /* Are we in polling mode or monitoring file descriptors? */
248 bool poll_started;
249
250 /* epoll(7) state used when built with CONFIG_EPOLL */
251 int epollfd;
252
253 const FDMonOps *fdmon_ops;
254 };
255
256 /**
257 * aio_context_new: Allocate a new AioContext.
258 *
259 * AioContext provide a mini event-loop that can be waited on synchronously.
260 * They also provide bottom halves, a service to execute a piece of code
261 * as soon as possible.
262 */
263 AioContext *aio_context_new(Error **errp);
264
265 /**
266 * aio_context_ref:
267 * @ctx: The AioContext to operate on.
268 *
269 * Add a reference to an AioContext.
270 */
271 void aio_context_ref(AioContext *ctx);
272
273 /**
274 * aio_context_unref:
275 * @ctx: The AioContext to operate on.
276 *
277 * Drop a reference to an AioContext.
278 */
279 void aio_context_unref(AioContext *ctx);
280
281 /* Take ownership of the AioContext. If the AioContext will be shared between
282 * threads, and a thread does not want to be interrupted, it will have to
283 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
284 * automatically takes care of calling aio_context_acquire and
285 * aio_context_release.
286 *
287 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
288 * thread still has to call those to avoid being interrupted by the guest.
289 *
290 * Bottom halves, timers and callbacks can be created or removed without
291 * acquiring the AioContext.
292 */
293 void aio_context_acquire(AioContext *ctx);
294
295 /* Relinquish ownership of the AioContext. */
296 void aio_context_release(AioContext *ctx);
297
298 /**
299 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
300 * run only once and as soon as possible.
301 *
302 * @name: A human-readable identifier for debugging purposes.
303 */
304 void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
305 const char *name);
306
307 /**
308 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
309 * only once and as soon as possible.
310 *
311 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
312 * name string.
313 */
314 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
315 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
316
317 /**
318 * aio_bh_new_full: Allocate a new bottom half structure.
319 *
320 * Bottom halves are lightweight callbacks whose invocation is guaranteed
321 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
322 * is opaque and must be allocated prior to its use.
323 *
324 * @name: A human-readable identifier for debugging purposes.
325 * @reentrancy_guard: A guard set when entering a cb to prevent
326 * device-reentrancy issues
327 */
328 QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
329 const char *name, MemReentrancyGuard *reentrancy_guard);
330
331 /**
332 * aio_bh_new: Allocate a new bottom half structure
333 *
334 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
335 * string.
336 */
337 #define aio_bh_new(ctx, cb, opaque) \
338 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
339
340 /**
341 * aio_bh_new_guarded: Allocate a new bottom half structure with a
342 * reentrancy_guard
343 *
344 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
345 * string.
346 */
347 #define aio_bh_new_guarded(ctx, cb, opaque, guard) \
348 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
349
350 /**
351 * aio_notify: Force processing of pending events.
352 *
353 * Similar to signaling a condition variable, aio_notify forces
354 * aio_poll to exit, so that the next call will re-examine pending events.
355 * The caller of aio_notify will usually call aio_poll again very soon,
356 * or go through another iteration of the GLib main loop. Hence, aio_notify
357 * also has the side effect of recalculating the sets of file descriptors
358 * that the main loop waits for.
359 *
360 * Calling aio_notify is rarely necessary, because for example scheduling
361 * a bottom half calls it already.
362 */
363 void aio_notify(AioContext *ctx);
364
365 /**
366 * aio_notify_accept: Acknowledge receiving an aio_notify.
367 *
368 * aio_notify() uses an EventNotifier in order to wake up a sleeping
369 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
370 * usually rare, but the AioContext has to clear the EventNotifier on
371 * every aio_poll() or g_main_context_iteration() in order to avoid
372 * busy waiting. This event_notifier_test_and_clear() cannot be done
373 * using the usual aio_context_set_event_notifier(), because it must
374 * be done before processing all events (file descriptors, bottom halves,
375 * timers).
376 *
377 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
378 * that is specific to an AioContext's notifier; it is used internally
379 * to clear the EventNotifier only if aio_notify() had been called.
380 */
381 void aio_notify_accept(AioContext *ctx);
382
383 /**
384 * aio_bh_call: Executes callback function of the specified BH.
385 */
386 void aio_bh_call(QEMUBH *bh);
387
388 /**
389 * aio_bh_poll: Poll bottom halves for an AioContext.
390 *
391 * These are internal functions used by the QEMU main loop.
392 * And notice that multiple occurrences of aio_bh_poll cannot
393 * be called concurrently
394 */
395 int aio_bh_poll(AioContext *ctx);
396
397 /**
398 * qemu_bh_schedule: Schedule a bottom half.
399 *
400 * Scheduling a bottom half interrupts the main loop and causes the
401 * execution of the callback that was passed to qemu_bh_new.
402 *
403 * Bottom halves that are scheduled from a bottom half handler are instantly
404 * invoked. This can create an infinite loop if a bottom half handler
405 * schedules itself.
406 *
407 * @bh: The bottom half to be scheduled.
408 */
409 void qemu_bh_schedule(QEMUBH *bh);
410
411 /**
412 * qemu_bh_cancel: Cancel execution of a bottom half.
413 *
414 * Canceling execution of a bottom half undoes the effect of calls to
415 * qemu_bh_schedule without freeing its resources yet. While cancellation
416 * itself is also wait-free and thread-safe, it can of course race with the
417 * loop that executes bottom halves unless you are holding the iothread
418 * mutex. This makes it mostly useless if you are not holding the mutex.
419 *
420 * @bh: The bottom half to be canceled.
421 */
422 void qemu_bh_cancel(QEMUBH *bh);
423
424 /**
425 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
426 *
427 * Deleting a bottom half frees the memory that was allocated for it by
428 * qemu_bh_new. It also implies canceling the bottom half if it was
429 * scheduled.
430 * This func is async. The bottom half will do the delete action at the finial
431 * end.
432 *
433 * @bh: The bottom half to be deleted.
434 */
435 void qemu_bh_delete(QEMUBH *bh);
436
437 /* Return whether there are any pending callbacks from the GSource
438 * attached to the AioContext, before g_poll is invoked.
439 *
440 * This is used internally in the implementation of the GSource.
441 */
442 bool aio_prepare(AioContext *ctx);
443
444 /* Return whether there are any pending callbacks from the GSource
445 * attached to the AioContext, after g_poll is invoked.
446 *
447 * This is used internally in the implementation of the GSource.
448 */
449 bool aio_pending(AioContext *ctx);
450
451 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
452 *
453 * This is used internally in the implementation of the GSource.
454 */
455 void aio_dispatch(AioContext *ctx);
456
457 /* Progress in completing AIO work to occur. This can issue new pending
458 * aio as a result of executing I/O completion or bh callbacks.
459 *
460 * Return whether any progress was made by executing AIO or bottom half
461 * handlers. If @blocking == true, this should always be true except
462 * if someone called aio_notify.
463 *
464 * If there are no pending bottom halves, but there are pending AIO
465 * operations, it may not be possible to make any progress without
466 * blocking. If @blocking is true, this function will wait until one
467 * or more AIO events have completed, to ensure something has moved
468 * before returning.
469 */
470 bool no_coroutine_fn aio_poll(AioContext *ctx, bool blocking);
471
472 /* Register a file descriptor and associated callbacks. Behaves very similarly
473 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
474 * be invoked when using aio_poll().
475 *
476 * Code that invokes AIO completion functions should rely on this function
477 * instead of qemu_set_fd_handler[2].
478 */
479 void aio_set_fd_handler(AioContext *ctx,
480 int fd,
481 IOHandler *io_read,
482 IOHandler *io_write,
483 AioPollFn *io_poll,
484 IOHandler *io_poll_ready,
485 void *opaque);
486
487 /* Register an event notifier and associated callbacks. Behaves very similarly
488 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
489 * will be invoked when using aio_poll().
490 *
491 * Code that invokes AIO completion functions should rely on this function
492 * instead of event_notifier_set_handler.
493 */
494 void aio_set_event_notifier(AioContext *ctx,
495 EventNotifier *notifier,
496 EventNotifierHandler *io_read,
497 AioPollFn *io_poll,
498 EventNotifierHandler *io_poll_ready);
499
500 /* Set polling begin/end callbacks for an event notifier that has already been
501 * registered with aio_set_event_notifier. Do nothing if the event notifier is
502 * not registered.
503 */
504 void aio_set_event_notifier_poll(AioContext *ctx,
505 EventNotifier *notifier,
506 EventNotifierHandler *io_poll_begin,
507 EventNotifierHandler *io_poll_end);
508
509 /* Return a GSource that lets the main loop poll the file descriptors attached
510 * to this AioContext.
511 */
512 GSource *aio_get_g_source(AioContext *ctx);
513
514 /* Return the ThreadPool bound to this AioContext */
515 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
516
517 /* Setup the LinuxAioState bound to this AioContext */
518 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
519
520 /* Return the LinuxAioState bound to this AioContext */
521 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
522
523 /* Setup the LuringState bound to this AioContext */
524 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
525
526 /* Return the LuringState bound to this AioContext */
527 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
528 /**
529 * aio_timer_new_with_attrs:
530 * @ctx: the aio context
531 * @type: the clock type
532 * @scale: the scale
533 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
534 * to assign
535 * @cb: the callback to call on timer expiry
536 * @opaque: the opaque pointer to pass to the callback
537 *
538 * Allocate a new timer (with attributes) attached to the context @ctx.
539 * The function is responsible for memory allocation.
540 *
541 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
542 * Use that unless you really need dynamic memory allocation.
543 *
544 * Returns: a pointer to the new timer
545 */
546 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
547 QEMUClockType type,
548 int scale, int attributes,
549 QEMUTimerCB *cb, void *opaque)
550 {
551 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
552 }
553
554 /**
555 * aio_timer_new:
556 * @ctx: the aio context
557 * @type: the clock type
558 * @scale: the scale
559 * @cb: the callback to call on timer expiry
560 * @opaque: the opaque pointer to pass to the callback
561 *
562 * Allocate a new timer attached to the context @ctx.
563 * See aio_timer_new_with_attrs for details.
564 *
565 * Returns: a pointer to the new timer
566 */
567 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
568 int scale,
569 QEMUTimerCB *cb, void *opaque)
570 {
571 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
572 }
573
574 /**
575 * aio_timer_init_with_attrs:
576 * @ctx: the aio context
577 * @ts: the timer
578 * @type: the clock type
579 * @scale: the scale
580 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
581 * to assign
582 * @cb: the callback to call on timer expiry
583 * @opaque: the opaque pointer to pass to the callback
584 *
585 * Initialise a new timer (with attributes) attached to the context @ctx.
586 * The caller is responsible for memory allocation.
587 */
588 static inline void aio_timer_init_with_attrs(AioContext *ctx,
589 QEMUTimer *ts, QEMUClockType type,
590 int scale, int attributes,
591 QEMUTimerCB *cb, void *opaque)
592 {
593 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
594 }
595
596 /**
597 * aio_timer_init:
598 * @ctx: the aio context
599 * @ts: the timer
600 * @type: the clock type
601 * @scale: the scale
602 * @cb: the callback to call on timer expiry
603 * @opaque: the opaque pointer to pass to the callback
604 *
605 * Initialise a new timer attached to the context @ctx.
606 * See aio_timer_init_with_attrs for details.
607 */
608 static inline void aio_timer_init(AioContext *ctx,
609 QEMUTimer *ts, QEMUClockType type,
610 int scale,
611 QEMUTimerCB *cb, void *opaque)
612 {
613 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
614 }
615
616 /**
617 * aio_compute_timeout:
618 * @ctx: the aio context
619 *
620 * Compute the timeout that a blocking aio_poll should use.
621 */
622 int64_t aio_compute_timeout(AioContext *ctx);
623
624 /**
625 * aio_co_schedule:
626 * @ctx: the aio context
627 * @co: the coroutine
628 *
629 * Start a coroutine on a remote AioContext.
630 *
631 * The coroutine must not be entered by anyone else while aio_co_schedule()
632 * is active. In addition the coroutine must have yielded unless ctx
633 * is the context in which the coroutine is running (i.e. the value of
634 * qemu_get_current_aio_context() from the coroutine itself).
635 */
636 void aio_co_schedule(AioContext *ctx, Coroutine *co);
637
638 /**
639 * aio_co_reschedule_self:
640 * @new_ctx: the new context
641 *
642 * Move the currently running coroutine to new_ctx. If the coroutine is already
643 * running in new_ctx, do nothing.
644 */
645 void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
646
647 /**
648 * aio_co_wake:
649 * @co: the coroutine
650 *
651 * Restart a coroutine on the AioContext where it was running last, thus
652 * preventing coroutines from jumping from one context to another when they
653 * go to sleep.
654 *
655 * aio_co_wake may be executed either in coroutine or non-coroutine
656 * context. The coroutine must not be entered by anyone else while
657 * aio_co_wake() is active.
658 */
659 void aio_co_wake(Coroutine *co);
660
661 /**
662 * aio_co_enter:
663 * @ctx: the context to run the coroutine
664 * @co: the coroutine to run
665 *
666 * Enter a coroutine in the specified AioContext.
667 */
668 void aio_co_enter(AioContext *ctx, Coroutine *co);
669
670 /**
671 * Return the AioContext whose event loop runs in the current thread.
672 *
673 * If called from an IOThread this will be the IOThread's AioContext. If
674 * called from the main thread or with the "big QEMU lock" taken it
675 * will be the main loop AioContext.
676 */
677 AioContext *qemu_get_current_aio_context(void);
678
679 void qemu_set_current_aio_context(AioContext *ctx);
680
681 /**
682 * aio_context_setup:
683 * @ctx: the aio context
684 *
685 * Initialize the aio context.
686 */
687 void aio_context_setup(AioContext *ctx);
688
689 /**
690 * aio_context_destroy:
691 * @ctx: the aio context
692 *
693 * Destroy the aio context.
694 */
695 void aio_context_destroy(AioContext *ctx);
696
697 /* Used internally, do not call outside AioContext code */
698 void aio_context_use_g_source(AioContext *ctx);
699
700 /**
701 * aio_context_set_poll_params:
702 * @ctx: the aio context
703 * @max_ns: how long to busy poll for, in nanoseconds
704 * @grow: polling time growth factor
705 * @shrink: polling time shrink factor
706 *
707 * Poll mode can be disabled by setting poll_max_ns to 0.
708 */
709 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
710 int64_t grow, int64_t shrink,
711 Error **errp);
712
713 /**
714 * aio_context_set_aio_params:
715 * @ctx: the aio context
716 * @max_batch: maximum number of requests in a batch, 0 means that the
717 * engine will use its default
718 */
719 void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
720 Error **errp);
721
722 /**
723 * aio_context_set_thread_pool_params:
724 * @ctx: the aio context
725 * @min: min number of threads to have readily available in the thread pool
726 * @min: max number of threads the thread pool can contain
727 */
728 void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
729 int64_t max, Error **errp);
730 #endif