]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <aliguori@us.ibm.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_AIO_H | |
15 | #define QEMU_AIO_H | |
16 | ||
73fd282e SH |
17 | #ifdef CONFIG_LINUX_IO_URING |
18 | #include <liburing.h> | |
19 | #endif | |
26b0b698 | 20 | #include "qemu/coroutine.h" |
1de7afc9 PB |
21 | #include "qemu/queue.h" |
22 | #include "qemu/event_notifier.h" | |
dcc772e2 | 23 | #include "qemu/thread.h" |
dae21b98 | 24 | #include "qemu/timer.h" |
a76bab49 | 25 | |
7c84b1b8 | 26 | typedef struct BlockAIOCB BlockAIOCB; |
097310b5 | 27 | typedef void BlockCompletionFunc(void *opaque, int ret); |
85e8dab1 | 28 | |
d7331bed | 29 | typedef struct AIOCBInfo { |
7c84b1b8 MA |
30 | void (*cancel_async)(BlockAIOCB *acb); |
31 | AioContext *(*get_aio_context)(BlockAIOCB *acb); | |
8c82e9a4 | 32 | size_t aiocb_size; |
d7331bed | 33 | } AIOCBInfo; |
85e8dab1 | 34 | |
7c84b1b8 | 35 | struct BlockAIOCB { |
d7331bed | 36 | const AIOCBInfo *aiocb_info; |
85e8dab1 | 37 | BlockDriverState *bs; |
097310b5 | 38 | BlockCompletionFunc *cb; |
85e8dab1 | 39 | void *opaque; |
f197fe2b | 40 | int refcnt; |
85e8dab1 PB |
41 | }; |
42 | ||
d7331bed | 43 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, |
097310b5 | 44 | BlockCompletionFunc *cb, void *opaque); |
8007429a | 45 | void qemu_aio_unref(void *p); |
f197fe2b | 46 | void qemu_aio_ref(void *p); |
85e8dab1 | 47 | |
f627aab1 | 48 | typedef struct AioHandler AioHandler; |
4749079c | 49 | typedef QLIST_HEAD(, AioHandler) AioHandlerList; |
f627aab1 | 50 | typedef void QEMUBHFunc(void *opaque); |
f6a51c84 | 51 | typedef bool AioPollFn(void *opaque); |
f627aab1 PB |
52 | typedef void IOHandler(void *opaque); |
53 | ||
0c330a73 | 54 | struct Coroutine; |
0187f5c9 PB |
55 | struct ThreadPool; |
56 | struct LinuxAioState; | |
6663a0a3 | 57 | struct LuringState; |
0187f5c9 | 58 | |
aa38e19f SH |
59 | /* Is polling disabled? */ |
60 | bool aio_poll_disabled(AioContext *ctx); | |
61 | ||
1f050a46 SH |
62 | /* Callbacks for file descriptor monitoring implementations */ |
63 | typedef struct { | |
64 | /* | |
65 | * update: | |
66 | * @ctx: the AioContext | |
b321051c SH |
67 | * @old_node: the existing handler or NULL if this file descriptor is being |
68 | * monitored for the first time | |
69 | * @new_node: the new handler or NULL if this file descriptor is being | |
70 | * removed | |
1f050a46 | 71 | * |
b321051c | 72 | * Add/remove/modify a monitored file descriptor. |
1f050a46 SH |
73 | * |
74 | * Called with ctx->list_lock acquired. | |
75 | */ | |
b321051c | 76 | void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node); |
1f050a46 SH |
77 | |
78 | /* | |
79 | * wait: | |
80 | * @ctx: the AioContext | |
81 | * @ready_list: list for handlers that become ready | |
82 | * @timeout: maximum duration to wait, in nanoseconds | |
83 | * | |
84 | * Wait for file descriptors to become ready and place them on ready_list. | |
85 | * | |
86 | * Called with ctx->list_lock incremented but not locked. | |
87 | * | |
88 | * Returns: number of ready file descriptors. | |
89 | */ | |
90 | int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout); | |
aa38e19f SH |
91 | |
92 | /* | |
93 | * need_wait: | |
94 | * @ctx: the AioContext | |
95 | * | |
96 | * Tell aio_poll() when to stop userspace polling early because ->wait() | |
97 | * has fds ready. | |
98 | * | |
99 | * File descriptor monitoring implementations that cannot poll fd readiness | |
100 | * from userspace should use aio_poll_disabled() here. This ensures that | |
101 | * file descriptors are not starved by handlers that frequently make | |
102 | * progress via userspace polling. | |
103 | * | |
104 | * Returns: true if ->wait() should be called, false otherwise. | |
105 | */ | |
106 | bool (*need_wait)(AioContext *ctx); | |
1f050a46 SH |
107 | } FDMonOps; |
108 | ||
8c6b0356 SH |
109 | /* |
110 | * Each aio_bh_poll() call carves off a slice of the BH list, so that newly | |
111 | * scheduled BHs are not processed until the next aio_bh_poll() call. All | |
112 | * active aio_bh_poll() calls chain their slices together in a list, so that | |
113 | * nested aio_bh_poll() calls process all scheduled bottom halves. | |
114 | */ | |
115 | typedef QSLIST_HEAD(, QEMUBH) BHList; | |
116 | typedef struct BHListSlice BHListSlice; | |
117 | struct BHListSlice { | |
118 | BHList bh_list; | |
119 | QSIMPLEQ_ENTRY(BHListSlice) next; | |
120 | }; | |
121 | ||
73fd282e SH |
122 | typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; |
123 | ||
6a1751b7 | 124 | struct AioContext { |
e3713e00 PB |
125 | GSource source; |
126 | ||
7c690fd1 | 127 | /* Used by AioContext users to protect from multi-threaded access. */ |
3fe71223 | 128 | QemuRecMutex lock; |
98563fc3 | 129 | |
7c690fd1 | 130 | /* The list of registered AIO handlers. Protected by ctx->list_lock. */ |
4749079c SH |
131 | AioHandlerList aio_handlers; |
132 | ||
133 | /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */ | |
134 | AioHandlerList deleted_aio_handlers; | |
a915f4bc | 135 | |
eabc9779 | 136 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify; |
3c18a92d PB |
137 | * only written from the AioContext home thread, or under the BQL in |
138 | * the case of the main AioContext. However, it is read from any | |
139 | * thread so it is still accessed with atomic primitives. | |
140 | * | |
141 | * If this field is 0, everything (file descriptors, bottom halves, | |
142 | * timers) will be re-evaluated before the next blocking poll() or | |
143 | * io_uring wait; therefore, the event_notifier_set call can be | |
144 | * skipped. If it is non-zero, you may need to wake up a concurrent | |
145 | * aio_poll or the glib main event loop, making event_notifier_set | |
146 | * necessary. | |
eabc9779 PB |
147 | * |
148 | * Bit 0 is reserved for GSource usage of the AioContext, and is 1 | |
54a16a63 | 149 | * between a call to aio_ctx_prepare and the next call to aio_ctx_check. |
eabc9779 PB |
150 | * Bits 1-31 simply count the number of active calls to aio_poll |
151 | * that are in the prepare or poll phase. | |
152 | * | |
153 | * The GSource and aio_poll must use a different mechanism because | |
154 | * there is no certainty that a call to GSource's prepare callback | |
155 | * (via g_main_context_prepare) is indeed followed by check and | |
156 | * dispatch. It's not clear whether this would be a bug, but let's | |
157 | * play safe and allow it---it will just cause extra calls to | |
158 | * event_notifier_set until the next call to dispatch. | |
159 | * | |
160 | * Instead, the aio_poll calls include both the prepare and the | |
161 | * dispatch phase, hence a simple counter is enough for them. | |
0ceb849b | 162 | */ |
eabc9779 | 163 | uint32_t notify_me; |
0ceb849b | 164 | |
7c690fd1 PB |
165 | /* A lock to protect between QEMUBH and AioHandler adders and deleter, |
166 | * and to ensure that no callbacks are removed while we're walking and | |
167 | * dispatching them. | |
d7c99a12 PB |
168 | */ |
169 | QemuLockCnt list_lock; | |
0ceb849b | 170 | |
8c6b0356 SH |
171 | /* Bottom Halves pending aio_bh_poll() processing */ |
172 | BHList bh_list; | |
173 | ||
174 | /* Chained BH list slices for each nested aio_bh_poll() call */ | |
175 | QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list; | |
f627aab1 | 176 | |
05e514b1 PB |
177 | /* Used by aio_notify. |
178 | * | |
179 | * "notified" is used to avoid expensive event_notifier_test_and_clear | |
180 | * calls. When it is clear, the EventNotifier is clear, or one thread | |
181 | * is going to clear "notified" before processing more events. False | |
182 | * positives are possible, i.e. "notified" could be set even though the | |
183 | * EventNotifier is clear. | |
184 | * | |
185 | * Note that event_notifier_set *cannot* be optimized the same way. For | |
186 | * more information on the problem that would result, see "#ifdef BUG2" | |
187 | * in the docs/aio_notify_accept.promela formal model. | |
188 | */ | |
189 | bool notified; | |
2f4dc3c1 | 190 | EventNotifier notifier; |
6b5f8762 | 191 | |
0c330a73 PB |
192 | QSLIST_HEAD(, Coroutine) scheduled_coroutines; |
193 | QEMUBH *co_schedule_bh; | |
194 | ||
71ad4713 NSJ |
195 | int thread_pool_min; |
196 | int thread_pool_max; | |
7c690fd1 PB |
197 | /* Thread pool for performing work and receiving completion callbacks. |
198 | * Has its own locking. | |
199 | */ | |
9b34277d | 200 | struct ThreadPool *thread_pool; |
dae21b98 | 201 | |
0187f5c9 | 202 | #ifdef CONFIG_LINUX_AIO |
6663a0a3 AM |
203 | /* |
204 | * State for native Linux AIO. Uses aio_context_acquire/release for | |
0187f5c9 PB |
205 | * locking. |
206 | */ | |
207 | struct LinuxAioState *linux_aio; | |
208 | #endif | |
6663a0a3 AM |
209 | #ifdef CONFIG_LINUX_IO_URING |
210 | /* | |
211 | * State for Linux io_uring. Uses aio_context_acquire/release for | |
212 | * locking. | |
213 | */ | |
214 | struct LuringState *linux_io_uring; | |
73fd282e SH |
215 | |
216 | /* State for file descriptor monitoring using Linux io_uring */ | |
217 | struct io_uring fdmon_io_uring; | |
218 | AioHandlerSList submit_list; | |
6663a0a3 | 219 | #endif |
0187f5c9 | 220 | |
7c690fd1 PB |
221 | /* TimerLists for calling timers - one per clock type. Has its own |
222 | * locking. | |
223 | */ | |
dae21b98 | 224 | QEMUTimerListGroup tlg; |
c1e1e5fa FZ |
225 | |
226 | int external_disable_cnt; | |
fbe3fc5c | 227 | |
4a1cba38 SH |
228 | /* Number of AioHandlers without .io_poll() */ |
229 | int poll_disable_cnt; | |
230 | ||
82a41186 SH |
231 | /* Polling mode parameters */ |
232 | int64_t poll_ns; /* current polling time in nanoseconds */ | |
233 | int64_t poll_max_ns; /* maximum polling time in nanoseconds */ | |
234 | int64_t poll_grow; /* polling time growth factor */ | |
235 | int64_t poll_shrink; /* polling time shrink factor */ | |
4a1cba38 | 236 | |
1793ad02 SG |
237 | /* AIO engine parameters */ |
238 | int64_t aio_max_batch; /* maximum number of requests in a batch */ | |
239 | ||
d37d0e36 SH |
240 | /* |
241 | * List of handlers participating in userspace polling. Protected by | |
242 | * ctx->list_lock. Iterated and modified mostly by the event loop thread | |
243 | * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler() | |
244 | * only touches the list to delete nodes if ctx->list_lock's count is zero. | |
245 | */ | |
246 | AioHandlerList poll_aio_handlers; | |
247 | ||
684e508c SH |
248 | /* Are we in polling mode or monitoring file descriptors? */ |
249 | bool poll_started; | |
250 | ||
fbe3fc5c FZ |
251 | /* epoll(7) state used when built with CONFIG_EPOLL */ |
252 | int epollfd; | |
1f050a46 SH |
253 | |
254 | const FDMonOps *fdmon_ops; | |
6a1751b7 | 255 | }; |
f627aab1 | 256 | |
f627aab1 PB |
257 | /** |
258 | * aio_context_new: Allocate a new AioContext. | |
259 | * | |
260 | * AioContext provide a mini event-loop that can be waited on synchronously. | |
261 | * They also provide bottom halves, a service to execute a piece of code | |
262 | * as soon as possible. | |
263 | */ | |
2f78e491 | 264 | AioContext *aio_context_new(Error **errp); |
f627aab1 | 265 | |
e3713e00 PB |
266 | /** |
267 | * aio_context_ref: | |
268 | * @ctx: The AioContext to operate on. | |
269 | * | |
270 | * Add a reference to an AioContext. | |
271 | */ | |
272 | void aio_context_ref(AioContext *ctx); | |
273 | ||
274 | /** | |
275 | * aio_context_unref: | |
276 | * @ctx: The AioContext to operate on. | |
277 | * | |
278 | * Drop a reference to an AioContext. | |
279 | */ | |
280 | void aio_context_unref(AioContext *ctx); | |
281 | ||
98563fc3 | 282 | /* Take ownership of the AioContext. If the AioContext will be shared between |
49110174 PB |
283 | * threads, and a thread does not want to be interrupted, it will have to |
284 | * take ownership around calls to aio_poll(). Otherwise, aio_poll() | |
285 | * automatically takes care of calling aio_context_acquire and | |
286 | * aio_context_release. | |
98563fc3 | 287 | * |
7c690fd1 PB |
288 | * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A |
289 | * thread still has to call those to avoid being interrupted by the guest. | |
290 | * | |
291 | * Bottom halves, timers and callbacks can be created or removed without | |
292 | * acquiring the AioContext. | |
98563fc3 SH |
293 | */ |
294 | void aio_context_acquire(AioContext *ctx); | |
295 | ||
296 | /* Relinquish ownership of the AioContext. */ | |
297 | void aio_context_release(AioContext *ctx); | |
298 | ||
0f08586c SH |
299 | /** |
300 | * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will | |
301 | * run only once and as soon as possible. | |
302 | * | |
303 | * @name: A human-readable identifier for debugging purposes. | |
304 | */ | |
305 | void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, | |
306 | const char *name); | |
307 | ||
5b8bb359 PB |
308 | /** |
309 | * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run | |
310 | * only once and as soon as possible. | |
0f08586c SH |
311 | * |
312 | * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the | |
313 | * name string. | |
5b8bb359 | 314 | */ |
0f08586c SH |
315 | #define aio_bh_schedule_oneshot(ctx, cb, opaque) \ |
316 | aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb))) | |
5b8bb359 | 317 | |
f627aab1 | 318 | /** |
0f08586c | 319 | * aio_bh_new_full: Allocate a new bottom half structure. |
f627aab1 PB |
320 | * |
321 | * Bottom halves are lightweight callbacks whose invocation is guaranteed | |
322 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure | |
323 | * is opaque and must be allocated prior to its use. | |
0f08586c SH |
324 | * |
325 | * @name: A human-readable identifier for debugging purposes. | |
326 | */ | |
327 | QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, | |
328 | const char *name); | |
329 | ||
330 | /** | |
331 | * aio_bh_new: Allocate a new bottom half structure | |
332 | * | |
333 | * A convenience wrapper for aio_bh_new_full() that uses the cb as the name | |
334 | * string. | |
f627aab1 | 335 | */ |
0f08586c SH |
336 | #define aio_bh_new(ctx, cb, opaque) \ |
337 | aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb))) | |
f627aab1 | 338 | |
2f4dc3c1 PB |
339 | /** |
340 | * aio_notify: Force processing of pending events. | |
341 | * | |
342 | * Similar to signaling a condition variable, aio_notify forces | |
722f8d90 YB |
343 | * aio_poll to exit, so that the next call will re-examine pending events. |
344 | * The caller of aio_notify will usually call aio_poll again very soon, | |
2f4dc3c1 PB |
345 | * or go through another iteration of the GLib main loop. Hence, aio_notify |
346 | * also has the side effect of recalculating the sets of file descriptors | |
347 | * that the main loop waits for. | |
348 | * | |
349 | * Calling aio_notify is rarely necessary, because for example scheduling | |
350 | * a bottom half calls it already. | |
351 | */ | |
352 | void aio_notify(AioContext *ctx); | |
353 | ||
05e514b1 PB |
354 | /** |
355 | * aio_notify_accept: Acknowledge receiving an aio_notify. | |
356 | * | |
357 | * aio_notify() uses an EventNotifier in order to wake up a sleeping | |
358 | * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are | |
359 | * usually rare, but the AioContext has to clear the EventNotifier on | |
360 | * every aio_poll() or g_main_context_iteration() in order to avoid | |
361 | * busy waiting. This event_notifier_test_and_clear() cannot be done | |
362 | * using the usual aio_context_set_event_notifier(), because it must | |
363 | * be done before processing all events (file descriptors, bottom halves, | |
364 | * timers). | |
365 | * | |
366 | * aio_notify_accept() is an optimized event_notifier_test_and_clear() | |
367 | * that is specific to an AioContext's notifier; it is used internally | |
368 | * to clear the EventNotifier only if aio_notify() had been called. | |
369 | */ | |
370 | void aio_notify_accept(AioContext *ctx); | |
371 | ||
df281b80 PD |
372 | /** |
373 | * aio_bh_call: Executes callback function of the specified BH. | |
374 | */ | |
375 | void aio_bh_call(QEMUBH *bh); | |
376 | ||
f627aab1 PB |
377 | /** |
378 | * aio_bh_poll: Poll bottom halves for an AioContext. | |
379 | * | |
380 | * These are internal functions used by the QEMU main loop. | |
dcc772e2 LPF |
381 | * And notice that multiple occurrences of aio_bh_poll cannot |
382 | * be called concurrently | |
f627aab1 PB |
383 | */ |
384 | int aio_bh_poll(AioContext *ctx); | |
f627aab1 PB |
385 | |
386 | /** | |
387 | * qemu_bh_schedule: Schedule a bottom half. | |
388 | * | |
389 | * Scheduling a bottom half interrupts the main loop and causes the | |
390 | * execution of the callback that was passed to qemu_bh_new. | |
391 | * | |
392 | * Bottom halves that are scheduled from a bottom half handler are instantly | |
393 | * invoked. This can create an infinite loop if a bottom half handler | |
394 | * schedules itself. | |
395 | * | |
396 | * @bh: The bottom half to be scheduled. | |
397 | */ | |
398 | void qemu_bh_schedule(QEMUBH *bh); | |
399 | ||
400 | /** | |
401 | * qemu_bh_cancel: Cancel execution of a bottom half. | |
402 | * | |
403 | * Canceling execution of a bottom half undoes the effect of calls to | |
404 | * qemu_bh_schedule without freeing its resources yet. While cancellation | |
405 | * itself is also wait-free and thread-safe, it can of course race with the | |
406 | * loop that executes bottom halves unless you are holding the iothread | |
407 | * mutex. This makes it mostly useless if you are not holding the mutex. | |
408 | * | |
409 | * @bh: The bottom half to be canceled. | |
410 | */ | |
411 | void qemu_bh_cancel(QEMUBH *bh); | |
412 | ||
413 | /** | |
414 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. | |
415 | * | |
416 | * Deleting a bottom half frees the memory that was allocated for it by | |
417 | * qemu_bh_new. It also implies canceling the bottom half if it was | |
418 | * scheduled. | |
dcc772e2 LPF |
419 | * This func is async. The bottom half will do the delete action at the finial |
420 | * end. | |
f627aab1 PB |
421 | * |
422 | * @bh: The bottom half to be deleted. | |
423 | */ | |
424 | void qemu_bh_delete(QEMUBH *bh); | |
425 | ||
cd9ba1eb | 426 | /* Return whether there are any pending callbacks from the GSource |
a3462c65 PB |
427 | * attached to the AioContext, before g_poll is invoked. |
428 | * | |
429 | * This is used internally in the implementation of the GSource. | |
430 | */ | |
431 | bool aio_prepare(AioContext *ctx); | |
432 | ||
433 | /* Return whether there are any pending callbacks from the GSource | |
434 | * attached to the AioContext, after g_poll is invoked. | |
cd9ba1eb PB |
435 | * |
436 | * This is used internally in the implementation of the GSource. | |
437 | */ | |
438 | bool aio_pending(AioContext *ctx); | |
439 | ||
e4c7e2d1 PB |
440 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. |
441 | * | |
442 | * This is used internally in the implementation of the GSource. | |
443 | */ | |
a153bf52 | 444 | void aio_dispatch(AioContext *ctx); |
e4c7e2d1 | 445 | |
7c0628b2 PB |
446 | /* Progress in completing AIO work to occur. This can issue new pending |
447 | * aio as a result of executing I/O completion or bh callbacks. | |
bcdc1857 | 448 | * |
acfb23ad PB |
449 | * Return whether any progress was made by executing AIO or bottom half |
450 | * handlers. If @blocking == true, this should always be true except | |
451 | * if someone called aio_notify. | |
7c0628b2 PB |
452 | * |
453 | * If there are no pending bottom halves, but there are pending AIO | |
454 | * operations, it may not be possible to make any progress without | |
455 | * blocking. If @blocking is true, this function will wait until one | |
456 | * or more AIO events have completed, to ensure something has moved | |
457 | * before returning. | |
7c0628b2 PB |
458 | */ |
459 | bool aio_poll(AioContext *ctx, bool blocking); | |
a76bab49 AL |
460 | |
461 | /* Register a file descriptor and associated callbacks. Behaves very similarly | |
6484e422 | 462 | * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will |
87f68d31 | 463 | * be invoked when using aio_poll(). |
a76bab49 AL |
464 | * |
465 | * Code that invokes AIO completion functions should rely on this function | |
466 | * instead of qemu_set_fd_handler[2]. | |
467 | */ | |
a915f4bc PB |
468 | void aio_set_fd_handler(AioContext *ctx, |
469 | int fd, | |
dca21ef2 | 470 | bool is_external, |
a915f4bc PB |
471 | IOHandler *io_read, |
472 | IOHandler *io_write, | |
f6a51c84 | 473 | AioPollFn *io_poll, |
826cc324 | 474 | IOHandler *io_poll_ready, |
a915f4bc | 475 | void *opaque); |
9958c351 | 476 | |
684e508c SH |
477 | /* Set polling begin/end callbacks for a file descriptor that has already been |
478 | * registered with aio_set_fd_handler. Do nothing if the file descriptor is | |
479 | * not registered. | |
480 | */ | |
481 | void aio_set_fd_poll(AioContext *ctx, int fd, | |
482 | IOHandler *io_poll_begin, | |
483 | IOHandler *io_poll_end); | |
484 | ||
9958c351 PB |
485 | /* Register an event notifier and associated callbacks. Behaves very similarly |
486 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks | |
87f68d31 | 487 | * will be invoked when using aio_poll(). |
9958c351 PB |
488 | * |
489 | * Code that invokes AIO completion functions should rely on this function | |
490 | * instead of event_notifier_set_handler. | |
491 | */ | |
a915f4bc PB |
492 | void aio_set_event_notifier(AioContext *ctx, |
493 | EventNotifier *notifier, | |
dca21ef2 | 494 | bool is_external, |
f6a51c84 | 495 | EventNotifierHandler *io_read, |
826cc324 SH |
496 | AioPollFn *io_poll, |
497 | EventNotifierHandler *io_poll_ready); | |
a915f4bc | 498 | |
684e508c SH |
499 | /* Set polling begin/end callbacks for an event notifier that has already been |
500 | * registered with aio_set_event_notifier. Do nothing if the event notifier is | |
501 | * not registered. | |
502 | */ | |
503 | void aio_set_event_notifier_poll(AioContext *ctx, | |
504 | EventNotifier *notifier, | |
505 | EventNotifierHandler *io_poll_begin, | |
506 | EventNotifierHandler *io_poll_end); | |
507 | ||
e3713e00 PB |
508 | /* Return a GSource that lets the main loop poll the file descriptors attached |
509 | * to this AioContext. | |
510 | */ | |
511 | GSource *aio_get_g_source(AioContext *ctx); | |
512 | ||
9b34277d SH |
513 | /* Return the ThreadPool bound to this AioContext */ |
514 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); | |
515 | ||
ed6e2161 NA |
516 | /* Setup the LinuxAioState bound to this AioContext */ |
517 | struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); | |
518 | ||
0187f5c9 PB |
519 | /* Return the LinuxAioState bound to this AioContext */ |
520 | struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); | |
521 | ||
6663a0a3 AM |
522 | /* Setup the LuringState bound to this AioContext */ |
523 | struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); | |
524 | ||
525 | /* Return the LuringState bound to this AioContext */ | |
526 | struct LuringState *aio_get_linux_io_uring(AioContext *ctx); | |
4e29e831 | 527 | /** |
89a603a0 | 528 | * aio_timer_new_with_attrs: |
4e29e831 AB |
529 | * @ctx: the aio context |
530 | * @type: the clock type | |
531 | * @scale: the scale | |
89a603a0 AP |
532 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values |
533 | * to assign | |
4e29e831 AB |
534 | * @cb: the callback to call on timer expiry |
535 | * @opaque: the opaque pointer to pass to the callback | |
536 | * | |
89a603a0 | 537 | * Allocate a new timer (with attributes) attached to the context @ctx. |
4e29e831 AB |
538 | * The function is responsible for memory allocation. |
539 | * | |
89a603a0 AP |
540 | * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. |
541 | * Use that unless you really need dynamic memory allocation. | |
542 | * | |
543 | * Returns: a pointer to the new timer | |
544 | */ | |
545 | static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, | |
546 | QEMUClockType type, | |
547 | int scale, int attributes, | |
548 | QEMUTimerCB *cb, void *opaque) | |
549 | { | |
550 | return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); | |
551 | } | |
552 | ||
553 | /** | |
554 | * aio_timer_new: | |
555 | * @ctx: the aio context | |
556 | * @type: the clock type | |
557 | * @scale: the scale | |
558 | * @cb: the callback to call on timer expiry | |
559 | * @opaque: the opaque pointer to pass to the callback | |
560 | * | |
561 | * Allocate a new timer attached to the context @ctx. | |
562 | * See aio_timer_new_with_attrs for details. | |
4e29e831 AB |
563 | * |
564 | * Returns: a pointer to the new timer | |
565 | */ | |
566 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, | |
567 | int scale, | |
568 | QEMUTimerCB *cb, void *opaque) | |
569 | { | |
89a603a0 AP |
570 | return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); |
571 | } | |
572 | ||
573 | /** | |
574 | * aio_timer_init_with_attrs: | |
575 | * @ctx: the aio context | |
576 | * @ts: the timer | |
577 | * @type: the clock type | |
578 | * @scale: the scale | |
579 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values | |
580 | * to assign | |
581 | * @cb: the callback to call on timer expiry | |
582 | * @opaque: the opaque pointer to pass to the callback | |
583 | * | |
584 | * Initialise a new timer (with attributes) attached to the context @ctx. | |
585 | * The caller is responsible for memory allocation. | |
586 | */ | |
587 | static inline void aio_timer_init_with_attrs(AioContext *ctx, | |
588 | QEMUTimer *ts, QEMUClockType type, | |
589 | int scale, int attributes, | |
590 | QEMUTimerCB *cb, void *opaque) | |
591 | { | |
592 | timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); | |
4e29e831 AB |
593 | } |
594 | ||
595 | /** | |
596 | * aio_timer_init: | |
597 | * @ctx: the aio context | |
598 | * @ts: the timer | |
599 | * @type: the clock type | |
600 | * @scale: the scale | |
601 | * @cb: the callback to call on timer expiry | |
602 | * @opaque: the opaque pointer to pass to the callback | |
603 | * | |
604 | * Initialise a new timer attached to the context @ctx. | |
89a603a0 | 605 | * See aio_timer_init_with_attrs for details. |
4e29e831 AB |
606 | */ |
607 | static inline void aio_timer_init(AioContext *ctx, | |
608 | QEMUTimer *ts, QEMUClockType type, | |
609 | int scale, | |
610 | QEMUTimerCB *cb, void *opaque) | |
611 | { | |
89a603a0 | 612 | timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); |
4e29e831 AB |
613 | } |
614 | ||
845ca10d PB |
615 | /** |
616 | * aio_compute_timeout: | |
617 | * @ctx: the aio context | |
618 | * | |
619 | * Compute the timeout that a blocking aio_poll should use. | |
620 | */ | |
621 | int64_t aio_compute_timeout(AioContext *ctx); | |
622 | ||
c1e1e5fa FZ |
623 | /** |
624 | * aio_disable_external: | |
625 | * @ctx: the aio context | |
626 | * | |
627 | * Disable the further processing of external clients. | |
628 | */ | |
629 | static inline void aio_disable_external(AioContext *ctx) | |
630 | { | |
d73415a3 | 631 | qatomic_inc(&ctx->external_disable_cnt); |
c1e1e5fa FZ |
632 | } |
633 | ||
634 | /** | |
635 | * aio_enable_external: | |
636 | * @ctx: the aio context | |
637 | * | |
638 | * Enable the processing of external clients. | |
639 | */ | |
640 | static inline void aio_enable_external(AioContext *ctx) | |
641 | { | |
321d1dba SH |
642 | int old; |
643 | ||
d73415a3 | 644 | old = qatomic_fetch_dec(&ctx->external_disable_cnt); |
321d1dba SH |
645 | assert(old > 0); |
646 | if (old == 1) { | |
647 | /* Kick event loop so it re-arms file descriptors */ | |
648 | aio_notify(ctx); | |
649 | } | |
c1e1e5fa FZ |
650 | } |
651 | ||
5ceb9e39 FZ |
652 | /** |
653 | * aio_external_disabled: | |
654 | * @ctx: the aio context | |
655 | * | |
656 | * Return true if the external clients are disabled. | |
657 | */ | |
658 | static inline bool aio_external_disabled(AioContext *ctx) | |
659 | { | |
d73415a3 | 660 | return qatomic_read(&ctx->external_disable_cnt); |
5ceb9e39 FZ |
661 | } |
662 | ||
c1e1e5fa FZ |
663 | /** |
664 | * aio_node_check: | |
665 | * @ctx: the aio context | |
666 | * @is_external: Whether or not the checked node is an external event source. | |
667 | * | |
668 | * Check if the node's is_external flag is okay to be polled by the ctx at this | |
669 | * moment. True means green light. | |
670 | */ | |
671 | static inline bool aio_node_check(AioContext *ctx, bool is_external) | |
672 | { | |
d73415a3 | 673 | return !is_external || !qatomic_read(&ctx->external_disable_cnt); |
c1e1e5fa FZ |
674 | } |
675 | ||
0c330a73 PB |
676 | /** |
677 | * aio_co_schedule: | |
678 | * @ctx: the aio context | |
679 | * @co: the coroutine | |
680 | * | |
681 | * Start a coroutine on a remote AioContext. | |
682 | * | |
683 | * The coroutine must not be entered by anyone else while aio_co_schedule() | |
684 | * is active. In addition the coroutine must have yielded unless ctx | |
685 | * is the context in which the coroutine is running (i.e. the value of | |
686 | * qemu_get_current_aio_context() from the coroutine itself). | |
687 | */ | |
688 | void aio_co_schedule(AioContext *ctx, struct Coroutine *co); | |
689 | ||
26b0b698 KW |
690 | /** |
691 | * aio_co_reschedule_self: | |
692 | * @new_ctx: the new context | |
693 | * | |
694 | * Move the currently running coroutine to new_ctx. If the coroutine is already | |
695 | * running in new_ctx, do nothing. | |
696 | */ | |
697 | void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx); | |
698 | ||
0c330a73 PB |
699 | /** |
700 | * aio_co_wake: | |
701 | * @co: the coroutine | |
702 | * | |
703 | * Restart a coroutine on the AioContext where it was running last, thus | |
704 | * preventing coroutines from jumping from one context to another when they | |
705 | * go to sleep. | |
706 | * | |
707 | * aio_co_wake may be executed either in coroutine or non-coroutine | |
708 | * context. The coroutine must not be entered by anyone else while | |
709 | * aio_co_wake() is active. | |
710 | */ | |
711 | void aio_co_wake(struct Coroutine *co); | |
712 | ||
8865852e FZ |
713 | /** |
714 | * aio_co_enter: | |
715 | * @ctx: the context to run the coroutine | |
716 | * @co: the coroutine to run | |
717 | * | |
718 | * Enter a coroutine in the specified AioContext. | |
719 | */ | |
720 | void aio_co_enter(AioContext *ctx, struct Coroutine *co); | |
721 | ||
e4370165 PB |
722 | /** |
723 | * Return the AioContext whose event loop runs in the current thread. | |
724 | * | |
725 | * If called from an IOThread this will be the IOThread's AioContext. If | |
5f50be9b PB |
726 | * called from the main thread or with the "big QEMU lock" taken it |
727 | * will be the main loop AioContext. | |
e4370165 PB |
728 | */ |
729 | AioContext *qemu_get_current_aio_context(void); | |
730 | ||
5f50be9b PB |
731 | void qemu_set_current_aio_context(AioContext *ctx); |
732 | ||
37fcee5d FZ |
733 | /** |
734 | * aio_context_setup: | |
735 | * @ctx: the aio context | |
736 | * | |
737 | * Initialize the aio context. | |
738 | */ | |
7e003465 | 739 | void aio_context_setup(AioContext *ctx); |
37fcee5d | 740 | |
cd0a6d2b JW |
741 | /** |
742 | * aio_context_destroy: | |
743 | * @ctx: the aio context | |
744 | * | |
745 | * Destroy the aio context. | |
746 | */ | |
747 | void aio_context_destroy(AioContext *ctx); | |
748 | ||
ba607ca8 SH |
749 | /* Used internally, do not call outside AioContext code */ |
750 | void aio_context_use_g_source(AioContext *ctx); | |
751 | ||
4a1cba38 SH |
752 | /** |
753 | * aio_context_set_poll_params: | |
754 | * @ctx: the aio context | |
755 | * @max_ns: how long to busy poll for, in nanoseconds | |
82a41186 SH |
756 | * @grow: polling time growth factor |
757 | * @shrink: polling time shrink factor | |
4a1cba38 SH |
758 | * |
759 | * Poll mode can be disabled by setting poll_max_ns to 0. | |
760 | */ | |
761 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, | |
82a41186 | 762 | int64_t grow, int64_t shrink, |
4a1cba38 SH |
763 | Error **errp); |
764 | ||
1793ad02 SG |
765 | /** |
766 | * aio_context_set_aio_params: | |
767 | * @ctx: the aio context | |
768 | * @max_batch: maximum number of requests in a batch, 0 means that the | |
769 | * engine will use its default | |
770 | */ | |
771 | void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, | |
772 | Error **errp); | |
773 | ||
71ad4713 NSJ |
774 | /** |
775 | * aio_context_set_thread_pool_params: | |
776 | * @ctx: the aio context | |
777 | * @min: min number of threads to have readily available in the thread pool | |
778 | * @min: max number of threads the thread pool can contain | |
779 | */ | |
780 | void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, | |
781 | int64_t max, Error **errp); | |
a76bab49 | 782 | #endif |