]> git.proxmox.com Git - pve-qemu.git/blame - debian/patches/pve/0046-convert-savevm-async-to-threads.patch
add qemu-img dd stdin/stdout pipe patch
[pve-qemu.git] / debian / patches / pve / 0046-convert-savevm-async-to-threads.patch
CommitLineData
f185a969 1From c80c9d6e7365d83bae020de4862cc9825374b88c Mon Sep 17 00:00:00 2001
95259824
WB
2From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3Date: Tue, 8 Nov 2016 11:13:06 +0100
45169293 4Subject: [PATCH 46/49] convert savevm-async to threads
95259824
WB
5
6---
7 savevm-async.c | 144 +++++++++++++++++++++++++++++++++++----------------------
8 1 file changed, 88 insertions(+), 56 deletions(-)
9
10diff --git a/savevm-async.c b/savevm-async.c
45169293 11index 3adf89fdb2..9f839faab5 100644
95259824
WB
12--- a/savevm-async.c
13+++ b/savevm-async.c
14@@ -48,6 +48,8 @@ static struct SnapshotState {
15 int saved_vm_running;
16 QEMUFile *file;
17 int64_t total_time;
18+ QEMUBH *cleanup_bh;
19+ QemuThread thread;
20 } snap_state;
21
22 SaveVMInfo *qmp_query_savevm(Error **errp)
23@@ -135,19 +137,6 @@ static void save_snapshot_error(const char *fmt, ...)
24 g_free (msg);
25
26 snap_state.state = SAVE_STATE_ERROR;
27-
28- save_snapshot_cleanup();
29-}
30-
31-static void save_snapshot_completed(void)
32-{
33- DPRINTF("save_snapshot_completed\n");
34-
35- if (save_snapshot_cleanup() < 0) {
36- snap_state.state = SAVE_STATE_ERROR;
37- } else {
38- snap_state.state = SAVE_STATE_COMPLETED;
39- }
40 }
41
42 static int block_state_close(void *opaque)
43@@ -156,51 +145,90 @@ static int block_state_close(void *opaque)
44 return blk_flush(snap_state.target);
45 }
46
47+typedef struct BlkRwCo {
48+ int64_t offset;
49+ QEMUIOVector *qiov;
50+ int ret;
51+} BlkRwCo;
52+
53+static void block_state_write_entry(void *opaque) {
54+ BlkRwCo *rwco = opaque;
55+ rwco->ret = blk_co_pwritev(snap_state.target, rwco->offset, rwco->qiov->size,
56+ rwco->qiov, 0);
57+}
58+
59 static ssize_t block_state_writev_buffer(void *opaque, struct iovec *iov,
60 int iovcnt, int64_t pos)
61 {
62- int ret;
63 QEMUIOVector qiov;
64+ AioContext *aio_context;
65+ Coroutine *co;
66+ BlkRwCo rwco;
67+
68+ assert(pos == snap_state.bs_pos);
69+ rwco = (BlkRwCo) {
70+ .offset = pos,
71+ .qiov = &qiov,
72+ .ret = NOT_DONE,
73+ };
74
75 qemu_iovec_init_external(&qiov, iov, iovcnt);
76- ret = blk_co_pwritev(snap_state.target, pos, qiov.size, &qiov, 0);
77- if (ret < 0) {
78- return ret;
79+
80+ aio_context = blk_get_aio_context(snap_state.target);
81+ aio_context_acquire(aio_context);
82+ co = qemu_coroutine_create(&block_state_write_entry, &rwco);
83+ qemu_coroutine_enter(co);
84+ while (rwco.ret == NOT_DONE) {
85+ aio_poll(aio_context, true);
86 }
87+ aio_context_release(aio_context);
88+
89 snap_state.bs_pos += qiov.size;
90 return qiov.size;
91 }
92
93-static int store_and_stop(void) {
94- if (global_state_store()) {
95- save_snapshot_error("Error saving global state");
96- return 1;
97+static void process_savevm_cleanup(void *opaque)
98+{
99+ int ret;
100+ qemu_bh_delete(snap_state.cleanup_bh);
101+ snap_state.cleanup_bh = NULL;
102+ qemu_mutex_unlock_iothread();
103+ qemu_thread_join(&snap_state.thread);
104+ qemu_mutex_lock_iothread();
105+ ret = save_snapshot_cleanup();
106+ if (ret < 0) {
107+ save_snapshot_error("save_snapshot_cleanup error %d", ret);
108+ } else if (snap_state.state == SAVE_STATE_ACTIVE) {
109+ snap_state.state = SAVE_STATE_COMPLETED;
110+ } else {
111+ save_snapshot_error("process_savevm_cleanup: invalid state: %d",
112+ snap_state.state);
113 }
114- if (runstate_is_running()) {
115- vm_stop(RUN_STATE_SAVE_VM);
116+ if (snap_state.saved_vm_running) {
117+ vm_start();
118+ snap_state.saved_vm_running = false;
119 }
120- return 0;
121 }
122
123-static void process_savevm_co(void *opaque)
124+static void *process_savevm_thread(void *opaque)
125 {
126 int ret;
127 int64_t maxlen;
128+
129 MigrationParams params = {
130 .blk = 0,
131 .shared = 0
132 };
133
134- snap_state.state = SAVE_STATE_ACTIVE;
135+ rcu_register_thread();
136
137- qemu_mutex_unlock_iothread();
138 qemu_savevm_state_header(snap_state.file);
139 ret = qemu_savevm_state_begin(snap_state.file, &params);
140- qemu_mutex_lock_iothread();
141
142 if (ret < 0) {
143 save_snapshot_error("qemu_savevm_state_begin failed");
144- return;
145+ rcu_unregister_thread();
146+ return NULL;
147 }
148
149 while (snap_state.state == SAVE_STATE_ACTIVE) {
150@@ -209,41 +237,43 @@ static void process_savevm_co(void *opaque)
151 qemu_savevm_state_pending(snap_state.file, 0, &pend_nonpost, &pend_post);
152 pending_size = pend_post + pend_nonpost;
153
154- if (pending_size) {
155- ret = qemu_savevm_state_iterate(snap_state.file, false);
156- if (ret < 0) {
157- save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
158- break;
159- }
160- DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
161+ maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
162+
163+ if (pending_size > 400000 && snap_state.bs_pos + pending_size < maxlen) {
164+ qemu_mutex_lock_iothread();
165+ ret = qemu_savevm_state_iterate(snap_state.file, false);
166+ if (ret < 0) {
167+ save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
168+ break;
169+ }
170+ qemu_mutex_unlock_iothread();
171+ DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
172 } else {
173- DPRINTF("done iterating\n");
174- if (store_and_stop())
175+ qemu_mutex_lock_iothread();
176+ qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
177+ ret = global_state_store();
178+ if (ret) {
179+ save_snapshot_error("global_state_store error %d", ret);
180+ break;
181+ }
182+ ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
183+ if (ret < 0) {
184+ save_snapshot_error("vm_stop_force_state error %d", ret);
185 break;
186+ }
187 DPRINTF("savevm inerate finished\n");
188 qemu_savevm_state_complete_precopy(snap_state.file, false);
189+ qemu_savevm_state_cleanup();
190 DPRINTF("save complete\n");
191- save_snapshot_completed();
192 break;
193 }
194-
195- /* stop the VM if we get to the end of available space,
196- * or if pending_size is just a few MB
197- */
198- maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
199- if ((pending_size < 100000) ||
200- ((snap_state.bs_pos + pending_size) >= maxlen)) {
201- if (store_and_stop())
202- break;
203- }
204 }
205
206- if(snap_state.state == SAVE_STATE_CANCELLED) {
207- save_snapshot_completed();
208- Error *errp = NULL;
209- qmp_savevm_end(&errp);
210- }
211+ qemu_bh_schedule(snap_state.cleanup_bh);
212+ qemu_mutex_unlock_iothread();
213
214+ rcu_unregister_thread();
215+ return NULL;
216 }
217
218 static const QEMUFileOps block_file_ops = {
219@@ -306,8 +336,10 @@ void qmp_savevm_start(bool has_statefile, const char *statefile, Error **errp)
220 error_setg(&snap_state.blocker, "block device is in use by savevm");
221 blk_op_block_all(snap_state.target, snap_state.blocker);
222
223- Coroutine *co = qemu_coroutine_create(process_savevm_co, NULL);
224- qemu_coroutine_enter(co);
225+ snap_state.state = SAVE_STATE_ACTIVE;
226+ snap_state.cleanup_bh = qemu_bh_new(process_savevm_cleanup, &snap_state);
227+ qemu_thread_create(&snap_state.thread, "savevm-async", process_savevm_thread,
228+ NULL, QEMU_THREAD_JOINABLE);
229
230 return;
231
232--
45169293 2332.11.0
95259824 234