]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0011-convert-savevm-async-to-threads.patch
bump version to 2.11.1-1
[pve-qemu.git] / debian / patches / pve / 0011-convert-savevm-async-to-threads.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3 Date: Tue, 8 Nov 2016 11:13:06 +0100
4 Subject: [PATCH] convert savevm-async to threads
5
6 ---
7 savevm-async.c | 143 +++++++++++++++++++++++++++++++++++----------------------
8 1 file changed, 87 insertions(+), 56 deletions(-)
9
10 diff --git a/savevm-async.c b/savevm-async.c
11 index 897134ab5a..96523c88ae 100644
12 --- a/savevm-async.c
13 +++ b/savevm-async.c
14 @@ -43,6 +43,8 @@ static struct SnapshotState {
15 int saved_vm_running;
16 QEMUFile *file;
17 int64_t total_time;
18 + QEMUBH *cleanup_bh;
19 + QemuThread thread;
20 } snap_state;
21
22 SaveVMInfo *qmp_query_savevm(Error **errp)
23 @@ -130,19 +132,6 @@ static void save_snapshot_error(const char *fmt, ...)
24 g_free (msg);
25
26 snap_state.state = SAVE_STATE_ERROR;
27 -
28 - save_snapshot_cleanup();
29 -}
30 -
31 -static void save_snapshot_completed(void)
32 -{
33 - DPRINTF("save_snapshot_completed\n");
34 -
35 - if (save_snapshot_cleanup() < 0) {
36 - snap_state.state = SAVE_STATE_ERROR;
37 - } else {
38 - snap_state.state = SAVE_STATE_COMPLETED;
39 - }
40 }
41
42 static int block_state_close(void *opaque)
43 @@ -151,48 +140,86 @@ static int block_state_close(void *opaque)
44 return blk_flush(snap_state.target);
45 }
46
47 +typedef struct BlkRwCo {
48 + int64_t offset;
49 + QEMUIOVector *qiov;
50 + int ret;
51 +} BlkRwCo;
52 +
53 +static void block_state_write_entry(void *opaque) {
54 + BlkRwCo *rwco = opaque;
55 + rwco->ret = blk_co_pwritev(snap_state.target, rwco->offset, rwco->qiov->size,
56 + rwco->qiov, 0);
57 +}
58 +
59 static ssize_t block_state_writev_buffer(void *opaque, struct iovec *iov,
60 int iovcnt, int64_t pos)
61 {
62 - int ret;
63 QEMUIOVector qiov;
64 + AioContext *aio_context;
65 + Coroutine *co;
66 + BlkRwCo rwco;
67 +
68 + assert(pos == snap_state.bs_pos);
69 + rwco = (BlkRwCo) {
70 + .offset = pos,
71 + .qiov = &qiov,
72 + .ret = NOT_DONE,
73 + };
74
75 qemu_iovec_init_external(&qiov, iov, iovcnt);
76 - ret = blk_co_pwritev(snap_state.target, pos, qiov.size, &qiov, 0);
77 - if (ret < 0) {
78 - return ret;
79 +
80 + aio_context = blk_get_aio_context(snap_state.target);
81 + aio_context_acquire(aio_context);
82 + co = qemu_coroutine_create(&block_state_write_entry, &rwco);
83 + qemu_coroutine_enter(co);
84 + while (rwco.ret == NOT_DONE) {
85 + aio_poll(aio_context, true);
86 }
87 + aio_context_release(aio_context);
88 +
89 snap_state.bs_pos += qiov.size;
90 return qiov.size;
91 }
92
93 -static int store_and_stop(void) {
94 - if (global_state_store()) {
95 - save_snapshot_error("Error saving global state");
96 - return 1;
97 +static void process_savevm_cleanup(void *opaque)
98 +{
99 + int ret;
100 + qemu_bh_delete(snap_state.cleanup_bh);
101 + snap_state.cleanup_bh = NULL;
102 + qemu_mutex_unlock_iothread();
103 + qemu_thread_join(&snap_state.thread);
104 + qemu_mutex_lock_iothread();
105 + ret = save_snapshot_cleanup();
106 + if (ret < 0) {
107 + save_snapshot_error("save_snapshot_cleanup error %d", ret);
108 + } else if (snap_state.state == SAVE_STATE_ACTIVE) {
109 + snap_state.state = SAVE_STATE_COMPLETED;
110 + } else {
111 + save_snapshot_error("process_savevm_cleanup: invalid state: %d",
112 + snap_state.state);
113 }
114 - if (runstate_is_running()) {
115 - vm_stop(RUN_STATE_SAVE_VM);
116 + if (snap_state.saved_vm_running) {
117 + vm_start();
118 + snap_state.saved_vm_running = false;
119 }
120 - return 0;
121 }
122
123 -static void process_savevm_co(void *opaque)
124 +static void *process_savevm_thread(void *opaque)
125 {
126 int ret;
127 int64_t maxlen;
128
129 - snap_state.state = SAVE_STATE_ACTIVE;
130 + rcu_register_thread();
131
132 - qemu_mutex_unlock_iothread();
133 qemu_savevm_state_header(snap_state.file);
134 qemu_savevm_state_setup(snap_state.file);
135 ret = qemu_file_get_error(snap_state.file);
136 - qemu_mutex_lock_iothread();
137
138 if (ret < 0) {
139 save_snapshot_error("qemu_savevm_state_setup failed");
140 - return;
141 + rcu_unregister_thread();
142 + return NULL;
143 }
144
145 while (snap_state.state == SAVE_STATE_ACTIVE) {
146 @@ -201,17 +228,30 @@ static void process_savevm_co(void *opaque)
147 qemu_savevm_state_pending(snap_state.file, 0, &pend_nonpost, &pend_post);
148 pending_size = pend_post + pend_nonpost;
149
150 - if (pending_size) {
151 - ret = qemu_savevm_state_iterate(snap_state.file, false);
152 - if (ret < 0) {
153 - save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
154 - break;
155 - }
156 - DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
157 + maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
158 +
159 + if (pending_size > 400000 && snap_state.bs_pos + pending_size < maxlen) {
160 + qemu_mutex_lock_iothread();
161 + ret = qemu_savevm_state_iterate(snap_state.file, false);
162 + if (ret < 0) {
163 + save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
164 + break;
165 + }
166 + qemu_mutex_unlock_iothread();
167 + DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
168 } else {
169 - DPRINTF("done iterating\n");
170 - if (store_and_stop())
171 + qemu_mutex_lock_iothread();
172 + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
173 + ret = global_state_store();
174 + if (ret) {
175 + save_snapshot_error("global_state_store error %d", ret);
176 break;
177 + }
178 + ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
179 + if (ret < 0) {
180 + save_snapshot_error("vm_stop_force_state error %d", ret);
181 + break;
182 + }
183 DPRINTF("savevm inerate finished\n");
184 /* upstream made the return value here inconsistent
185 * (-1 instead of 'ret' in one case and 0 after flush which can
186 @@ -223,28 +263,17 @@ static void process_savevm_co(void *opaque)
187 save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
188 break;
189 }
190 + qemu_savevm_state_cleanup();
191 DPRINTF("save complete\n");
192 - save_snapshot_completed();
193 break;
194 }
195 -
196 - /* stop the VM if we get to the end of available space,
197 - * or if pending_size is just a few MB
198 - */
199 - maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
200 - if ((pending_size < 100000) ||
201 - ((snap_state.bs_pos + pending_size) >= maxlen)) {
202 - if (store_and_stop())
203 - break;
204 - }
205 }
206
207 - if(snap_state.state == SAVE_STATE_CANCELLED) {
208 - save_snapshot_completed();
209 - Error *errp = NULL;
210 - qmp_savevm_end(&errp);
211 - }
212 + qemu_bh_schedule(snap_state.cleanup_bh);
213 + qemu_mutex_unlock_iothread();
214
215 + rcu_unregister_thread();
216 + return NULL;
217 }
218
219 static const QEMUFileOps block_file_ops = {
220 @@ -307,8 +336,10 @@ void qmp_savevm_start(bool has_statefile, const char *statefile, Error **errp)
221 error_setg(&snap_state.blocker, "block device is in use by savevm");
222 blk_op_block_all(snap_state.target, snap_state.blocker);
223
224 - Coroutine *co = qemu_coroutine_create(process_savevm_co, NULL);
225 - qemu_coroutine_enter(co);
226 + snap_state.state = SAVE_STATE_ACTIVE;
227 + snap_state.cleanup_bh = qemu_bh_new(process_savevm_cleanup, &snap_state);
228 + qemu_thread_create(&snap_state.thread, "savevm-async", process_savevm_thread,
229 + NULL, QEMU_THREAD_JOINABLE);
230
231 return;
232
233 --
234 2.11.0
235