1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3 Date: Tue, 8 Nov 2016 11:13:06 +0100
4 Subject: [PATCH] convert savevm-async to threads
7 savevm-async.c | 143 +++++++++++++++++++++++++++++++++++----------------------
8 1 file changed, 87 insertions(+), 56 deletions(-)
10 diff --git a/savevm-async.c b/savevm-async.c
11 index 897134ab5a..96523c88ae 100644
14 @@ -43,6 +43,8 @@ static struct SnapshotState {
22 SaveVMInfo *qmp_query_savevm(Error **errp)
23 @@ -130,19 +132,6 @@ static void save_snapshot_error(const char *fmt, ...)
26 snap_state.state = SAVE_STATE_ERROR;
28 - save_snapshot_cleanup();
31 -static void save_snapshot_completed(void)
33 - DPRINTF("save_snapshot_completed\n");
35 - if (save_snapshot_cleanup() < 0) {
36 - snap_state.state = SAVE_STATE_ERROR;
38 - snap_state.state = SAVE_STATE_COMPLETED;
42 static int block_state_close(void *opaque)
43 @@ -151,48 +140,86 @@ static int block_state_close(void *opaque)
44 return blk_flush(snap_state.target);
47 +typedef struct BlkRwCo {
53 +static void block_state_write_entry(void *opaque) {
54 + BlkRwCo *rwco = opaque;
55 + rwco->ret = blk_co_pwritev(snap_state.target, rwco->offset, rwco->qiov->size,
59 static ssize_t block_state_writev_buffer(void *opaque, struct iovec *iov,
60 int iovcnt, int64_t pos)
64 + AioContext *aio_context;
68 + assert(pos == snap_state.bs_pos);
75 qemu_iovec_init_external(&qiov, iov, iovcnt);
76 - ret = blk_co_pwritev(snap_state.target, pos, qiov.size, &qiov, 0);
80 + aio_context = blk_get_aio_context(snap_state.target);
81 + aio_context_acquire(aio_context);
82 + co = qemu_coroutine_create(&block_state_write_entry, &rwco);
83 + qemu_coroutine_enter(co);
84 + while (rwco.ret == NOT_DONE) {
85 + aio_poll(aio_context, true);
87 + aio_context_release(aio_context);
89 snap_state.bs_pos += qiov.size;
93 -static int store_and_stop(void) {
94 - if (global_state_store()) {
95 - save_snapshot_error("Error saving global state");
97 +static void process_savevm_cleanup(void *opaque)
100 + qemu_bh_delete(snap_state.cleanup_bh);
101 + snap_state.cleanup_bh = NULL;
102 + qemu_mutex_unlock_iothread();
103 + qemu_thread_join(&snap_state.thread);
104 + qemu_mutex_lock_iothread();
105 + ret = save_snapshot_cleanup();
107 + save_snapshot_error("save_snapshot_cleanup error %d", ret);
108 + } else if (snap_state.state == SAVE_STATE_ACTIVE) {
109 + snap_state.state = SAVE_STATE_COMPLETED;
111 + save_snapshot_error("process_savevm_cleanup: invalid state: %d",
114 - if (runstate_is_running()) {
115 - vm_stop(RUN_STATE_SAVE_VM);
116 + if (snap_state.saved_vm_running) {
118 + snap_state.saved_vm_running = false;
123 -static void process_savevm_co(void *opaque)
124 +static void *process_savevm_thread(void *opaque)
129 - snap_state.state = SAVE_STATE_ACTIVE;
130 + rcu_register_thread();
132 - qemu_mutex_unlock_iothread();
133 qemu_savevm_state_header(snap_state.file);
134 qemu_savevm_state_setup(snap_state.file);
135 ret = qemu_file_get_error(snap_state.file);
136 - qemu_mutex_lock_iothread();
139 save_snapshot_error("qemu_savevm_state_setup failed");
141 + rcu_unregister_thread();
145 while (snap_state.state == SAVE_STATE_ACTIVE) {
146 @@ -201,17 +228,30 @@ static void process_savevm_co(void *opaque)
147 qemu_savevm_state_pending(snap_state.file, 0, &pend_nonpost, &pend_post);
148 pending_size = pend_post + pend_nonpost;
150 - if (pending_size) {
151 - ret = qemu_savevm_state_iterate(snap_state.file, false);
153 - save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
156 - DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
157 + maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
159 + if (pending_size > 400000 && snap_state.bs_pos + pending_size < maxlen) {
160 + qemu_mutex_lock_iothread();
161 + ret = qemu_savevm_state_iterate(snap_state.file, false);
163 + save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
166 + qemu_mutex_unlock_iothread();
167 + DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, ret);
169 - DPRINTF("done iterating\n");
170 - if (store_and_stop())
171 + qemu_mutex_lock_iothread();
172 + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
173 + ret = global_state_store();
175 + save_snapshot_error("global_state_store error %d", ret);
178 + ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
180 + save_snapshot_error("vm_stop_force_state error %d", ret);
183 DPRINTF("savevm inerate finished\n");
184 /* upstream made the return value here inconsistent
185 * (-1 instead of 'ret' in one case and 0 after flush which can
186 @@ -223,28 +263,17 @@ static void process_savevm_co(void *opaque)
187 save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
190 + qemu_savevm_state_cleanup();
191 DPRINTF("save complete\n");
192 - save_snapshot_completed();
196 - /* stop the VM if we get to the end of available space,
197 - * or if pending_size is just a few MB
199 - maxlen = blk_getlength(snap_state.target) - 30*1024*1024;
200 - if ((pending_size < 100000) ||
201 - ((snap_state.bs_pos + pending_size) >= maxlen)) {
202 - if (store_and_stop())
207 - if(snap_state.state == SAVE_STATE_CANCELLED) {
208 - save_snapshot_completed();
209 - Error *errp = NULL;
210 - qmp_savevm_end(&errp);
212 + qemu_bh_schedule(snap_state.cleanup_bh);
213 + qemu_mutex_unlock_iothread();
215 + rcu_unregister_thread();
219 static const QEMUFileOps block_file_ops = {
220 @@ -307,8 +336,10 @@ void qmp_savevm_start(bool has_statefile, const char *statefile, Error **errp)
221 error_setg(&snap_state.blocker, "block device is in use by savevm");
222 blk_op_block_all(snap_state.target, snap_state.blocker);
224 - Coroutine *co = qemu_coroutine_create(process_savevm_co, NULL);
225 - qemu_coroutine_enter(co);
226 + snap_state.state = SAVE_STATE_ACTIVE;
227 + snap_state.cleanup_bh = qemu_bh_new(process_savevm_cleanup, &snap_state);
228 + qemu_thread_create(&snap_state.thread, "savevm-async", process_savevm_thread,
229 + NULL, QEMU_THREAD_JOINABLE);