1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Stefan Reiter <s.reiter@proxmox.com>
3 Date: Mon, 28 Sep 2020 13:40:51 +0200
4 Subject: [PATCH] PVE-Backup: Don't block on finishing and cleanup
7 proxmox_backup_co_finish is already async, but previously we would wait
8 for the coroutine using block_on_coroutine_fn(). Avoid this by
9 scheduling pvebackup_co_complete_stream (and thus pvebackup_co_cleanup)
10 as a real coroutine when calling from pvebackup_complete_cb. This is ok,
11 since complete_stream uses the backup_mutex internally to synchronize,
12 and other streams can happily continue writing in the meantime anyway.
14 To accomodate, backup_mutex is converted to a CoMutex. This means
15 converting every user to a coroutine. This is not just useful here, but
16 will come in handy once this series[0] is merged, and QMP calls can be
17 yield-able coroutines too. Then we can also finally get rid of
18 block_on_coroutine_fn.
20 Cases of aio_context_acquire/release from within what is now a coroutine
21 are changed to aio_co_reschedule_self, which works since a running
22 coroutine always holds the aio lock for the context it is running in.
24 job_cancel_sync is called from a BH since it can't be run from a
25 coroutine (uses AIO_WAIT_WHILE internally).
27 Same thing for create_backup_jobs, which is converted to a BH too.
29 To communicate the finishing state, a new property is introduced to
30 query-backup: 'finishing'. A new state is explicitly not used, since
31 that would break compatibility with older qemu-server versions.
33 Also fix create_backup_jobs:
35 No more weird bool returns, just the standard "errp" format used
36 everywhere else too. With this, if backup_job_create fails, the error
37 message is actually returned over QMP and can be shown to the user.
39 To facilitate correct cleanup on such an error, we call
40 create_backup_jobs as a bottom half directly from pvebackup_co_prepare.
41 This additionally allows us to actually hold the backup_mutex during
44 Also add a job_cancel_sync before job_unref, since a job must be in
45 STATUS_NULL to be deleted by unref, which could trigger an assert
48 [0] https://lists.gnu.org/archive/html/qemu-devel/2020-09/msg03515.html
50 Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
51 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
52 [FE: add new force parameter to job_cancel_sync calls]
53 Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
55 pve-backup.c | 212 +++++++++++++++++++++++++++----------------
56 qapi/block-core.json | 5 +-
57 2 files changed, 138 insertions(+), 79 deletions(-)
59 diff --git a/pve-backup.c b/pve-backup.c
60 index b5fb844434..88268bb586 100644
63 @@ -33,7 +33,9 @@ const char *PBS_BITMAP_NAME = "pbs-incremental-dirty-bitmap";
65 static struct PVEBackupState {
67 - // Everithing accessed from qmp_backup_query command is protected using lock
68 + // Everything accessed from qmp_backup_query command is protected using
69 + // this lock. Do NOT hold this lock for long times, as it is sometimes
70 + // acquired from coroutines, and thus any wait time may block the guest.
74 @@ -47,20 +49,22 @@ static struct PVEBackupState {
83 ProxmoxBackupHandle *pbs;
86 - QemuMutex backup_mutex;
87 + CoMutex backup_mutex;
88 CoMutex dump_callback_mutex;
91 static void pvebackup_init(void)
93 qemu_mutex_init(&backup_state.stat.lock);
94 - qemu_mutex_init(&backup_state.backup_mutex);
95 + qemu_co_mutex_init(&backup_state.backup_mutex);
96 qemu_co_mutex_init(&backup_state.dump_callback_mutex);
99 @@ -72,6 +76,7 @@ typedef struct PVEBackupDevInfo {
103 + int completed_ret; // INT_MAX if not completed
104 char targetfile[PATH_MAX];
105 BdrvDirtyBitmap *bitmap;
106 BlockDriverState *target;
107 @@ -227,12 +232,12 @@ pvebackup_co_dump_vma_cb(
110 // assumes the caller holds backup_mutex
111 -static void coroutine_fn pvebackup_co_cleanup(void *unused)
112 +static void coroutine_fn pvebackup_co_cleanup(void)
114 assert(qemu_in_coroutine());
116 qemu_mutex_lock(&backup_state.stat.lock);
117 - backup_state.stat.end_time = time(NULL);
118 + backup_state.stat.finishing = true;
119 qemu_mutex_unlock(&backup_state.stat.lock);
121 if (backup_state.vmaw) {
122 @@ -261,35 +266,29 @@ static void coroutine_fn pvebackup_co_cleanup(void *unused)
124 g_list_free(backup_state.di_list);
125 backup_state.di_list = NULL;
127 + qemu_mutex_lock(&backup_state.stat.lock);
128 + backup_state.stat.end_time = time(NULL);
129 + backup_state.stat.finishing = false;
130 + qemu_mutex_unlock(&backup_state.stat.lock);
133 -// assumes the caller holds backup_mutex
134 -static void coroutine_fn pvebackup_complete_stream(void *opaque)
135 +static void coroutine_fn pvebackup_co_complete_stream(void *opaque)
137 PVEBackupDevInfo *di = opaque;
138 + int ret = di->completed_ret;
140 - bool error_or_canceled = pvebackup_error_or_canceled();
142 - if (backup_state.vmaw) {
143 - vma_writer_close_stream(backup_state.vmaw, di->dev_id);
144 + qemu_mutex_lock(&backup_state.stat.lock);
145 + bool starting = backup_state.stat.starting;
146 + qemu_mutex_unlock(&backup_state.stat.lock);
148 + /* in 'starting' state, no tasks have been run yet, meaning we can (and
149 + * must) skip all cleanup, as we don't know what has and hasn't been
150 + * initialized yet. */
154 - if (backup_state.pbs && !error_or_canceled) {
155 - Error *local_err = NULL;
156 - proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
157 - if (local_err != NULL) {
158 - pvebackup_propagate_error(local_err);
163 -static void pvebackup_complete_cb(void *opaque, int ret)
165 - assert(!qemu_in_coroutine());
167 - PVEBackupDevInfo *di = opaque;
169 - qemu_mutex_lock(&backup_state.backup_mutex);
170 + qemu_co_mutex_lock(&backup_state.backup_mutex);
173 Error *local_err = NULL;
174 @@ -301,7 +300,19 @@ static void pvebackup_complete_cb(void *opaque, int ret)
176 assert(di->target == NULL);
178 - block_on_coroutine_fn(pvebackup_complete_stream, di);
179 + bool error_or_canceled = pvebackup_error_or_canceled();
181 + if (backup_state.vmaw) {
182 + vma_writer_close_stream(backup_state.vmaw, di->dev_id);
185 + if (backup_state.pbs && !error_or_canceled) {
186 + Error *local_err = NULL;
187 + proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
188 + if (local_err != NULL) {
189 + pvebackup_propagate_error(local_err);
193 // remove self from job list
194 backup_state.di_list = g_list_remove(backup_state.di_list, di);
195 @@ -310,21 +321,46 @@ static void pvebackup_complete_cb(void *opaque, int ret)
197 /* call cleanup if we're the last job */
198 if (!g_list_first(backup_state.di_list)) {
199 - block_on_coroutine_fn(pvebackup_co_cleanup, NULL);
200 + pvebackup_co_cleanup();
203 - qemu_mutex_unlock(&backup_state.backup_mutex);
204 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
207 -static void pvebackup_cancel(void)
208 +static void pvebackup_complete_cb(void *opaque, int ret)
210 - assert(!qemu_in_coroutine());
211 + PVEBackupDevInfo *di = opaque;
212 + di->completed_ret = ret;
215 + * Schedule stream cleanup in async coroutine. close_image and finish might
216 + * take a while, so we can't block on them here. This way it also doesn't
217 + * matter if we're already running in a coroutine or not.
218 + * Note: di is a pointer to an entry in the global backup_state struct, so
221 + Coroutine *co = qemu_coroutine_create(pvebackup_co_complete_stream, di);
222 + aio_co_enter(qemu_get_aio_context(), co);
226 + * job_cancel(_sync) does not like to be called from coroutines, so defer to
227 + * main loop processing via a bottom half.
229 +static void job_cancel_bh(void *opaque) {
230 + CoCtxData *data = (CoCtxData*)opaque;
231 + Job *job = (Job*)data->data;
232 + job_cancel_sync(job, true);
233 + aio_co_enter(data->ctx, data->co);
236 +static void coroutine_fn pvebackup_co_cancel(void *opaque)
238 Error *cancel_err = NULL;
239 error_setg(&cancel_err, "backup canceled");
240 pvebackup_propagate_error(cancel_err);
242 - qemu_mutex_lock(&backup_state.backup_mutex);
243 + qemu_co_mutex_lock(&backup_state.backup_mutex);
245 if (backup_state.vmaw) {
246 /* make sure vma writer does not block anymore */
247 @@ -342,28 +378,22 @@ static void pvebackup_cancel(void)
248 ((PVEBackupDevInfo *)bdi->data)->job :
251 - /* ref the job before releasing the mutex, just to be safe */
253 - WITH_JOB_LOCK_GUARD() {
254 - job_ref_locked(&cancel_job->job);
257 + .ctx = qemu_get_current_aio_context(),
258 + .co = qemu_coroutine_self(),
259 + .data = &cancel_job->job,
261 + aio_bh_schedule_oneshot(data.ctx, job_cancel_bh, &data);
262 + qemu_coroutine_yield();
265 - /* job_cancel_sync may enter the job, so we need to release the
266 - * backup_mutex to avoid deadlock */
267 - qemu_mutex_unlock(&backup_state.backup_mutex);
270 - WITH_JOB_LOCK_GUARD() {
271 - job_cancel_sync_locked(&cancel_job->job, true);
272 - job_unref_locked(&cancel_job->job);
275 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
278 void qmp_backup_cancel(Error **errp)
280 - pvebackup_cancel();
281 + block_on_coroutine_fn(pvebackup_co_cancel, NULL);
284 // assumes the caller holds backup_mutex
285 @@ -416,10 +446,18 @@ static int coroutine_fn pvebackup_co_add_config(
289 -static bool create_backup_jobs(void) {
291 + * backup_job_create can *not* be run from a coroutine (and requires an
292 + * acquired AioContext), so this can't either.
293 + * The caller is responsible that backup_mutex is held nonetheless.
295 +static void create_backup_jobs_bh(void *opaque) {
297 assert(!qemu_in_coroutine());
299 + CoCtxData *data = (CoCtxData*)opaque;
300 + Error **errp = (Error**)data->data;
302 Error *local_err = NULL;
304 /* create job transaction to synchronize bitmap commit and cancel all
305 @@ -455,24 +493,19 @@ static bool create_backup_jobs(void) {
307 aio_context_release(aio_context);
309 - if (!job || local_err != NULL) {
310 - Error *create_job_err = NULL;
311 - error_setg(&create_job_err, "backup_job_create failed: %s",
312 - local_err ? error_get_pretty(local_err) : "null");
315 - pvebackup_propagate_error(create_job_err);
316 + if (!job || local_err) {
317 + error_setg(errp, "backup_job_create failed: %s",
318 + local_err ? error_get_pretty(local_err) : "null");
324 bdrv_unref(di->target);
328 - bool errors = pvebackup_error_or_canceled();
332 l = backup_state.di_list;
334 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
335 @@ -485,13 +518,15 @@ static bool create_backup_jobs(void) {
338 WITH_JOB_LOCK_GUARD() {
339 + job_cancel_sync_locked(&di->job->job, true);
340 job_unref_locked(&di->job->job);
348 + aio_co_enter(data->ctx, data->co);
351 typedef struct QmpBackupTask {
352 @@ -528,11 +563,12 @@ typedef struct QmpBackupTask {
356 -// assumes the caller holds backup_mutex
357 static void coroutine_fn pvebackup_co_prepare(void *opaque)
359 assert(qemu_in_coroutine());
361 + qemu_co_mutex_lock(&backup_state.backup_mutex);
363 QmpBackupTask *task = opaque;
365 task->result = NULL; // just to be sure
366 @@ -553,8 +589,9 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
367 const char *firewall_name = "qemu-server.fw";
369 if (backup_state.di_list) {
370 - error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
371 + error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
372 "previous backup not finished");
373 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
377 @@ -621,6 +658,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
382 + di->completed_ret = INT_MAX;
386 @@ -852,6 +891,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
387 backup_state.stat.dirty = total - backup_state.stat.reused;
388 backup_state.stat.transferred = 0;
389 backup_state.stat.zero_bytes = 0;
390 + backup_state.stat.finishing = false;
391 + backup_state.stat.starting = true;
393 qemu_mutex_unlock(&backup_state.stat.lock);
395 @@ -866,6 +907,33 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
396 uuid_info->UUID = uuid_str;
398 task->result = uuid_info;
400 + /* Run create_backup_jobs_bh outside of coroutine (in BH) but keep
401 + * backup_mutex locked. This is fine, a CoMutex can be held across yield
402 + * points, and we'll release it as soon as the BH reschedules us.
404 + CoCtxData waker = {
405 + .co = qemu_coroutine_self(),
406 + .ctx = qemu_get_current_aio_context(),
407 + .data = &local_err,
409 + aio_bh_schedule_oneshot(waker.ctx, create_backup_jobs_bh, &waker);
410 + qemu_coroutine_yield();
413 + error_propagate(task->errp, local_err);
417 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
419 + qemu_mutex_lock(&backup_state.stat.lock);
420 + backup_state.stat.starting = false;
421 + qemu_mutex_unlock(&backup_state.stat.lock);
423 + /* start the first job in the transaction */
424 + job_txn_start_seq(backup_state.txn);
429 @@ -888,6 +956,7 @@ err:
432 g_list_free(di_list);
433 + backup_state.di_list = NULL;
437 @@ -908,6 +977,8 @@ err:
442 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
446 @@ -961,24 +1032,8 @@ UuidInfo *qmp_backup(
450 - qemu_mutex_lock(&backup_state.backup_mutex);
452 block_on_coroutine_fn(pvebackup_co_prepare, &task);
454 - if (*errp == NULL) {
455 - bool errors = create_backup_jobs();
456 - qemu_mutex_unlock(&backup_state.backup_mutex);
459 - /* start the first job in the transaction
460 - * note: this might directly enter the job, so we need to do this
461 - * after unlocking the backup_mutex */
462 - job_txn_start_seq(backup_state.txn);
465 - qemu_mutex_unlock(&backup_state.backup_mutex);
471 @@ -1030,6 +1085,7 @@ BackupStatus *qmp_query_backup(Error **errp)
472 info->transferred = backup_state.stat.transferred;
473 info->has_reused = true;
474 info->reused = backup_state.stat.reused;
475 + info->finishing = backup_state.stat.finishing;
477 qemu_mutex_unlock(&backup_state.stat.lock);
479 diff --git a/qapi/block-core.json b/qapi/block-core.json
480 index 7fde927621..bf559c6d52 100644
481 --- a/qapi/block-core.json
482 +++ b/qapi/block-core.json
483 @@ -770,12 +770,15 @@
485 # @uuid: uuid for this backup job
487 +# @finishing: if status='active' and finishing=true, then the backup process is
488 +# waiting for the target to finish.
491 { 'struct': 'BackupStatus',
492 'data': {'*status': 'str', '*errmsg': 'str', '*total': 'int', '*dirty': 'int',
493 '*transferred': 'int', '*zero-bytes': 'int', '*reused': 'int',
494 '*start-time': 'int', '*end-time': 'int',
495 - '*backup-file': 'str', '*uuid': 'str' } }
496 + '*backup-file': 'str', '*uuid': 'str', 'finishing': 'bool' } }