]> git.proxmox.com Git - pve-qemu.git/blame - debian/patches/pve/0048-savevm-async-add-debug-timing-prints.patch
fixup: qemu submodule should be at v5.0.0
[pve-qemu.git] / debian / patches / pve / 0048-savevm-async-add-debug-timing-prints.patch
CommitLineData
f063a8aa
TL
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Stefan Reiter <s.reiter@proxmox.com>
3Date: Wed, 27 May 2020 11:33:22 +0200
4Subject: [PATCH] savevm-async: add debug timing prints
5
6Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
7[ Thomas: guard variable declaration by DEBUG #ifdef ]
8Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
9---
10 savevm-async.c | 22 ++++++++++++++++++++++
11 1 file changed, 22 insertions(+)
12
13diff --git a/savevm-async.c b/savevm-async.c
14index 4ce83a0691..0388cebbe9 100644
15--- a/savevm-async.c
16+++ b/savevm-async.c
17@@ -202,6 +202,10 @@ static void process_savevm_finalize(void *opaque)
18 AioContext *iohandler_ctx = iohandler_get_aio_context();
19 MigrationState *ms = migrate_get_current();
20
21+#ifdef DEBUG_SAVEVM_STATE
22+ int64_t start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
23+#endif
24+
25 qemu_bh_delete(snap_state.finalize_bh);
26 snap_state.finalize_bh = NULL;
27 snap_state.co = NULL;
28@@ -226,6 +230,8 @@ static void process_savevm_finalize(void *opaque)
29 }
30
31 DPRINTF("state saving complete\n");
32+ DPRINTF("timing: process_savevm_finalize (state saving) took %ld ms\n",
33+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
34
35 /* clear migration state */
36 migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP,
37@@ -247,6 +253,9 @@ static void process_savevm_finalize(void *opaque)
38 vm_start();
39 snap_state.saved_vm_running = false;
40 }
41+
42+ DPRINTF("timing: process_savevm_finalize (full) took %ld ms\n",
43+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
44 }
45
46 static void coroutine_fn process_savevm_co(void *opaque)
47@@ -256,6 +265,10 @@ static void coroutine_fn process_savevm_co(void *opaque)
48 BdrvNextIterator it;
49 BlockDriverState *bs = NULL;
50
51+#ifdef DEBUG_SAVEVM_STATE
52+ int64_t start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
53+#endif
54+
55 ret = qemu_file_get_error(snap_state.file);
56 if (ret < 0) {
57 save_snapshot_error("qemu_savevm_state_setup failed");
58@@ -290,6 +303,12 @@ static void coroutine_fn process_savevm_co(void *opaque)
59 }
60 }
61
62+ DPRINTF("timing: process_savevm_co took %ld ms\n",
63+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
64+
65+#ifdef DEBUG_SAVEVM_STATE
66+ int64_t start_time_flush = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
67+#endif
68 /* If a drive runs in an IOThread we can flush it async, and only
69 * need to sync-flush whatever IO happens between now and
70 * vm_stop_force_state. bdrv_next can only be called from main AioContext,
71@@ -311,6 +330,9 @@ static void coroutine_fn process_savevm_co(void *opaque)
72 }
73 }
74
75+ DPRINTF("timing: async flushing took %ld ms\n",
76+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time_flush);
77+
78 qemu_bh_schedule(snap_state.finalize_bh);
79 }
80