1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Stefan Reiter <s.reiter@proxmox.com>
3 Date: Wed, 27 May 2020 11:33:21 +0200
4 Subject: [PATCH] savevm-async: flush IOThread-drives async before entering
7 By flushing all drives where its possible to so before entering the
8 blocking part (where the VM is stopped), we can reduce the time spent in
9 said part for every disk that has an IOThread (other drives cannot be
10 flushed async anyway).
12 Suggested-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
13 Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
15 savevm-async.c | 23 +++++++++++++++++++++++
16 1 file changed, 23 insertions(+)
18 diff --git a/savevm-async.c b/savevm-async.c
19 index 2894c94233..4ce83a0691 100644
22 @@ -253,6 +253,8 @@ static void coroutine_fn process_savevm_co(void *opaque)
26 + BdrvNextIterator it;
27 + BlockDriverState *bs = NULL;
29 ret = qemu_file_get_error(snap_state.file);
31 @@ -288,6 +290,27 @@ static void coroutine_fn process_savevm_co(void *opaque)
35 + /* If a drive runs in an IOThread we can flush it async, and only
36 + * need to sync-flush whatever IO happens between now and
37 + * vm_stop_force_state. bdrv_next can only be called from main AioContext,
38 + * so move there now and after every flush.
40 + aio_co_reschedule_self(qemu_get_aio_context());
41 + for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
42 + /* target has BDRV_O_NO_FLUSH, no sense calling bdrv_flush on it */
43 + if (bs == blk_bs(snap_state.target)) {
47 + AioContext *bs_ctx = bdrv_get_aio_context(bs);
48 + if (bs_ctx != qemu_get_aio_context()) {
49 + DPRINTF("savevm: async flushing drive %s\n", bs->filename);
50 + aio_co_reschedule_self(bs_ctx);
52 + aio_co_reschedule_self(qemu_get_aio_context());
56 qemu_bh_schedule(snap_state.finalize_bh);