]> git.proxmox.com Git - pve-qemu-kvm.git/blame - debian/patches/pve/0046-convert-savevm-async-to-threads.patch
savevm-async: iothreads fixups
[pve-qemu-kvm.git] / debian / patches / pve / 0046-convert-savevm-async-to-threads.patch
CommitLineData
d2a6ab5c 1From f255c2a02017dbefc3bcc786762b1de3c5aa5c89 Mon Sep 17 00:00:00 2001
712b39fe
WB
2From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3Date: Tue, 8 Nov 2016 11:13:06 +0100
4Subject: [PATCH 46/46] convert savevm-async to threads
5
6---
d2a6ab5c
WB
7 savevm-async.c | 143 +++++++++++++++++++++++++++++++++++----------------------
8 1 file changed, 88 insertions(+), 55 deletions(-)
712b39fe
WB
9
10diff --git a/savevm-async.c b/savevm-async.c
d2a6ab5c 11index 05b5b19..c4afc3b 100644
712b39fe
WB
12--- a/savevm-async.c
13+++ b/savevm-async.c
14@@ -48,6 +48,8 @@ static struct SnapshotState {
15 int saved_vm_running;
16 QEMUFile *file;
17 int64_t total_time;
18+ QEMUBH *cleanup_bh;
19+ QemuThread thread;
20 } snap_state;
21
22 SaveVMInfo *qmp_query_savevm(Error **errp)
23@@ -135,19 +137,6 @@ static void save_snapshot_error(const char *fmt, ...)
24 g_free (msg);
25
26 snap_state.state = SAVE_STATE_ERROR;
27-
28- save_snapshot_cleanup();
29-}
30-
31-static void save_snapshot_completed(void)
32-{
33- DPRINTF("save_snapshot_completed\n");
34-
35- if (save_snapshot_cleanup() < 0) {
36- snap_state.state = SAVE_STATE_ERROR;
37- } else {
38- snap_state.state = SAVE_STATE_COMPLETED;
39- }
40 }
41
42 static int block_state_close(void *opaque)
43@@ -156,36 +145,76 @@ static int block_state_close(void *opaque)
44 return blk_flush(snap_state.target);
45 }
46
47+typedef struct BlkRwCo {
48+ int64_t offset;
49+ QEMUIOVector *qiov;
50+ int ret;
51+} BlkRwCo;
52+
53+static void block_state_write_entry(void *opaque) {
54+ BlkRwCo *rwco = opaque;
55+ rwco->ret = blk_co_pwritev(snap_state.target, rwco->offset, rwco->qiov->size,
56+ rwco->qiov, 0);
57+}
58+
59 static ssize_t block_state_writev_buffer(void *opaque, struct iovec *iov,
60 int iovcnt, int64_t pos)
61 {
62- int ret;
63 QEMUIOVector qiov;
64+ AioContext *aio_context;
65+ Coroutine *co;
66+ BlkRwCo rwco;
67+
68+ assert(pos == snap_state.bs_pos);
69+ rwco = (BlkRwCo) {
70+ .offset = pos,
71+ .qiov = &qiov,
72+ .ret = NOT_DONE,
73+ };
74
75 qemu_iovec_init_external(&qiov, iov, iovcnt);
76- ret = blk_co_pwritev(snap_state.target, pos, qiov.size, &qiov, 0);
77- if (ret < 0) {
78- return ret;
79+
d2a6ab5c
WB
80+ aio_context = blk_get_aio_context(snap_state.target);
81+ aio_context_acquire(aio_context);
712b39fe
WB
82+ co = qemu_coroutine_create(&block_state_write_entry, &rwco);
83+ qemu_coroutine_enter(co);
712b39fe
WB
84+ while (rwco.ret == NOT_DONE) {
85+ aio_poll(aio_context, true);
86 }
d2a6ab5c 87+ aio_context_release(aio_context);
712b39fe
WB
88+
89 snap_state.bs_pos += qiov.size;
90 return qiov.size;
91 }
92
93-static int store_and_stop(void) {
94- if (global_state_store()) {
95- save_snapshot_error("Error saving global state");
96- return 1;
97+static void process_savevm_cleanup(void *opaque)
98+{
99+ int ret;
100+ qemu_bh_delete(snap_state.cleanup_bh);
101+ snap_state.cleanup_bh = NULL;
102+ qemu_mutex_unlock_iothread();
103+ qemu_thread_join(&snap_state.thread);
104+ qemu_mutex_lock_iothread();
105+ ret = save_snapshot_cleanup();
106+ if (ret < 0) {
107+ save_snapshot_error("save_snapshot_cleanup error %d", ret);
108+ } else if (snap_state.state == SAVE_STATE_ACTIVE) {
109+ snap_state.state = SAVE_STATE_COMPLETED;
110+ } else {
111+ save_snapshot_error("process_savevm_cleanup: invalid state: %d",
112+ snap_state.state);
113 }
114- if (runstate_is_running()) {
115- vm_stop(RUN_STATE_SAVE_VM);
116+ if (snap_state.saved_vm_running) {
117+ vm_start();
118+ snap_state.saved_vm_running = false;
119 }
120- return 0;
121 }
122
123-static void process_savevm_co(void *opaque)
124+static void *process_savevm_thread(void *opaque)
125 {
126 int ret;
127 int64_t maxlen;
712b39fe
WB
128+
129 MigrationParams params = {
130 .blk = 0,
131 .shared = 0
d2a6ab5c 132@@ -193,14 +222,15 @@ static void process_savevm_co(void *opaque)
712b39fe
WB
133
134 snap_state.state = SAVE_STATE_ACTIVE;
135
136- qemu_mutex_unlock_iothread();
137+ rcu_register_thread();
138+
139 qemu_savevm_state_header(snap_state.file);
140 ret = qemu_savevm_state_begin(snap_state.file, &params);
141- qemu_mutex_lock_iothread();
142
143 if (ret < 0) {
144 save_snapshot_error("qemu_savevm_state_begin failed");
145- return;
146+ rcu_unregister_thread();
147+ return NULL;
148 }
149
712b39fe 150 while (snap_state.state == SAVE_STATE_ACTIVE) {
d2a6ab5c 151@@ -209,41 +239,43 @@ static void process_savevm_co(void *opaque)
712b39fe
WB
152 qemu_savevm_state_pending(snap_state.file, 0, &pend_nonpost, &pend_post);
153 pending_size = pend_post + pend_nonpost;
154
155- if (pending_size) {
156- ret = qemu_savevm_state_iterate(snap_state.file, false);
157- if (ret < 0) {
158- save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
159- break;
160- }
161- DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
162+ maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
163+
164+ if (pending_size > 400000 && snap_state.bs_pos + pending_size < maxlen) {
d2a6ab5c 165+ qemu_mutex_lock_iothread();
712b39fe
WB
166+ ret = qemu_savevm_state_iterate(snap_state.file, false);
167+ if (ret < 0) {
712b39fe
WB
168+ save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
169+ break;
170+ }
d2a6ab5c 171+ qemu_mutex_unlock_iothread();
712b39fe
WB
172+ DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
173 } else {
174- DPRINTF("done iterating\n");
175- if (store_and_stop())
712b39fe
WB
176+ qemu_mutex_lock_iothread();
177+ qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
178+ ret = global_state_store();
179+ if (ret) {
712b39fe
WB
180+ save_snapshot_error("global_state_store error %d", ret);
181+ break;
182+ }
183+ ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
184+ if (ret < 0) {
712b39fe
WB
185+ save_snapshot_error("vm_stop_force_state error %d", ret);
186 break;
187+ }
188 DPRINTF("savevm inerate finished\n");
189 qemu_savevm_state_complete_precopy(snap_state.file, false);
190+ qemu_savevm_state_cleanup();
191 DPRINTF("save complete\n");
192- save_snapshot_completed();
712b39fe
WB
193 break;
194 }
195-
196- /* stop the VM if we get to the end of available space,
197- * or if pending_size is just a few MB
198- */
199- maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
200- if ((pending_size < 100000) ||
201- ((snap_state.bs_pos + pending_size) >= maxlen)) {
202- if (store_and_stop())
203- break;
204- }
d2a6ab5c
WB
205 }
206
712b39fe
WB
207- if(snap_state.state == SAVE_STATE_CANCELLED) {
208- save_snapshot_completed();
209- Error *errp = NULL;
210- qmp_savevm_end(&errp);
d2a6ab5c 211- }
712b39fe 212+ qemu_bh_schedule(snap_state.cleanup_bh);
d2a6ab5c
WB
213+ qemu_mutex_unlock_iothread();
214
712b39fe
WB
215+ rcu_unregister_thread();
216+ return NULL;
217 }
218
219 static const QEMUFileOps block_file_ops = {
d2a6ab5c 220@@ -306,8 +338,9 @@ void qmp_savevm_start(bool has_statefile, const char *statefile, Error **errp)
712b39fe
WB
221 error_setg(&snap_state.blocker, "block device is in use by savevm");
222 blk_op_block_all(snap_state.target, snap_state.blocker);
223
224- Coroutine *co = qemu_coroutine_create(process_savevm_co, NULL);
225- qemu_coroutine_enter(co);
226+ snap_state.cleanup_bh = qemu_bh_new(process_savevm_cleanup, &snap_state);
227+ qemu_thread_create(&snap_state.thread, "savevm-async", process_savevm_thread,
228+ NULL, QEMU_THREAD_JOINABLE);
229
230 return;
231
232--
2332.1.4
234