]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0039-PVE-Backup-Don-t-block-on-finishing-and-cleanup-crea.patch
update submodule and patches to 7.2.0
[pve-qemu.git] / debian / patches / pve / 0039-PVE-Backup-Don-t-block-on-finishing-and-cleanup-crea.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Stefan Reiter <s.reiter@proxmox.com>
3 Date: Mon, 28 Sep 2020 13:40:51 +0200
4 Subject: [PATCH] PVE-Backup: Don't block on finishing and cleanup
5 create_backup_jobs
6
7 proxmox_backup_co_finish is already async, but previously we would wait
8 for the coroutine using block_on_coroutine_fn(). Avoid this by
9 scheduling pvebackup_co_complete_stream (and thus pvebackup_co_cleanup)
10 as a real coroutine when calling from pvebackup_complete_cb. This is ok,
11 since complete_stream uses the backup_mutex internally to synchronize,
12 and other streams can happily continue writing in the meantime anyway.
13
14 To accomodate, backup_mutex is converted to a CoMutex. This means
15 converting every user to a coroutine. This is not just useful here, but
16 will come in handy once this series[0] is merged, and QMP calls can be
17 yield-able coroutines too. Then we can also finally get rid of
18 block_on_coroutine_fn.
19
20 Cases of aio_context_acquire/release from within what is now a coroutine
21 are changed to aio_co_reschedule_self, which works since a running
22 coroutine always holds the aio lock for the context it is running in.
23
24 job_cancel_sync is called from a BH since it can't be run from a
25 coroutine (uses AIO_WAIT_WHILE internally).
26
27 Same thing for create_backup_jobs, which is converted to a BH too.
28
29 To communicate the finishing state, a new property is introduced to
30 query-backup: 'finishing'. A new state is explicitly not used, since
31 that would break compatibility with older qemu-server versions.
32
33 Also fix create_backup_jobs:
34
35 No more weird bool returns, just the standard "errp" format used
36 everywhere else too. With this, if backup_job_create fails, the error
37 message is actually returned over QMP and can be shown to the user.
38
39 To facilitate correct cleanup on such an error, we call
40 create_backup_jobs as a bottom half directly from pvebackup_co_prepare.
41 This additionally allows us to actually hold the backup_mutex during
42 operation.
43
44 Also add a job_cancel_sync before job_unref, since a job must be in
45 STATUS_NULL to be deleted by unref, which could trigger an assert
46 before.
47
48 [0] https://lists.gnu.org/archive/html/qemu-devel/2020-09/msg03515.html
49
50 Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
51 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
52 [FE: add new force parameter to job_cancel_sync calls]
53 Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
54 ---
55 pve-backup.c | 212 +++++++++++++++++++++++++++----------------
56 qapi/block-core.json | 5 +-
57 2 files changed, 138 insertions(+), 79 deletions(-)
58
59 diff --git a/pve-backup.c b/pve-backup.c
60 index b5fb844434..88268bb586 100644
61 --- a/pve-backup.c
62 +++ b/pve-backup.c
63 @@ -33,7 +33,9 @@ const char *PBS_BITMAP_NAME = "pbs-incremental-dirty-bitmap";
64
65 static struct PVEBackupState {
66 struct {
67 - // Everithing accessed from qmp_backup_query command is protected using lock
68 + // Everything accessed from qmp_backup_query command is protected using
69 + // this lock. Do NOT hold this lock for long times, as it is sometimes
70 + // acquired from coroutines, and thus any wait time may block the guest.
71 QemuMutex lock;
72 Error *error;
73 time_t start_time;
74 @@ -47,20 +49,22 @@ static struct PVEBackupState {
75 size_t reused;
76 size_t zero_bytes;
77 GList *bitmap_list;
78 + bool finishing;
79 + bool starting;
80 } stat;
81 int64_t speed;
82 VmaWriter *vmaw;
83 ProxmoxBackupHandle *pbs;
84 GList *di_list;
85 JobTxn *txn;
86 - QemuMutex backup_mutex;
87 + CoMutex backup_mutex;
88 CoMutex dump_callback_mutex;
89 } backup_state;
90
91 static void pvebackup_init(void)
92 {
93 qemu_mutex_init(&backup_state.stat.lock);
94 - qemu_mutex_init(&backup_state.backup_mutex);
95 + qemu_co_mutex_init(&backup_state.backup_mutex);
96 qemu_co_mutex_init(&backup_state.dump_callback_mutex);
97 }
98
99 @@ -72,6 +76,7 @@ typedef struct PVEBackupDevInfo {
100 size_t size;
101 uint64_t block_size;
102 uint8_t dev_id;
103 + int completed_ret; // INT_MAX if not completed
104 char targetfile[PATH_MAX];
105 BdrvDirtyBitmap *bitmap;
106 BlockDriverState *target;
107 @@ -227,12 +232,12 @@ pvebackup_co_dump_vma_cb(
108 }
109
110 // assumes the caller holds backup_mutex
111 -static void coroutine_fn pvebackup_co_cleanup(void *unused)
112 +static void coroutine_fn pvebackup_co_cleanup(void)
113 {
114 assert(qemu_in_coroutine());
115
116 qemu_mutex_lock(&backup_state.stat.lock);
117 - backup_state.stat.end_time = time(NULL);
118 + backup_state.stat.finishing = true;
119 qemu_mutex_unlock(&backup_state.stat.lock);
120
121 if (backup_state.vmaw) {
122 @@ -261,35 +266,29 @@ static void coroutine_fn pvebackup_co_cleanup(void *unused)
123
124 g_list_free(backup_state.di_list);
125 backup_state.di_list = NULL;
126 +
127 + qemu_mutex_lock(&backup_state.stat.lock);
128 + backup_state.stat.end_time = time(NULL);
129 + backup_state.stat.finishing = false;
130 + qemu_mutex_unlock(&backup_state.stat.lock);
131 }
132
133 -// assumes the caller holds backup_mutex
134 -static void coroutine_fn pvebackup_complete_stream(void *opaque)
135 +static void coroutine_fn pvebackup_co_complete_stream(void *opaque)
136 {
137 PVEBackupDevInfo *di = opaque;
138 + int ret = di->completed_ret;
139
140 - bool error_or_canceled = pvebackup_error_or_canceled();
141 -
142 - if (backup_state.vmaw) {
143 - vma_writer_close_stream(backup_state.vmaw, di->dev_id);
144 + qemu_mutex_lock(&backup_state.stat.lock);
145 + bool starting = backup_state.stat.starting;
146 + qemu_mutex_unlock(&backup_state.stat.lock);
147 + if (starting) {
148 + /* in 'starting' state, no tasks have been run yet, meaning we can (and
149 + * must) skip all cleanup, as we don't know what has and hasn't been
150 + * initialized yet. */
151 + return;
152 }
153
154 - if (backup_state.pbs && !error_or_canceled) {
155 - Error *local_err = NULL;
156 - proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
157 - if (local_err != NULL) {
158 - pvebackup_propagate_error(local_err);
159 - }
160 - }
161 -}
162 -
163 -static void pvebackup_complete_cb(void *opaque, int ret)
164 -{
165 - assert(!qemu_in_coroutine());
166 -
167 - PVEBackupDevInfo *di = opaque;
168 -
169 - qemu_mutex_lock(&backup_state.backup_mutex);
170 + qemu_co_mutex_lock(&backup_state.backup_mutex);
171
172 if (ret < 0) {
173 Error *local_err = NULL;
174 @@ -301,7 +300,19 @@ static void pvebackup_complete_cb(void *opaque, int ret)
175
176 assert(di->target == NULL);
177
178 - block_on_coroutine_fn(pvebackup_complete_stream, di);
179 + bool error_or_canceled = pvebackup_error_or_canceled();
180 +
181 + if (backup_state.vmaw) {
182 + vma_writer_close_stream(backup_state.vmaw, di->dev_id);
183 + }
184 +
185 + if (backup_state.pbs && !error_or_canceled) {
186 + Error *local_err = NULL;
187 + proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err);
188 + if (local_err != NULL) {
189 + pvebackup_propagate_error(local_err);
190 + }
191 + }
192
193 // remove self from job list
194 backup_state.di_list = g_list_remove(backup_state.di_list, di);
195 @@ -310,21 +321,46 @@ static void pvebackup_complete_cb(void *opaque, int ret)
196
197 /* call cleanup if we're the last job */
198 if (!g_list_first(backup_state.di_list)) {
199 - block_on_coroutine_fn(pvebackup_co_cleanup, NULL);
200 + pvebackup_co_cleanup();
201 }
202
203 - qemu_mutex_unlock(&backup_state.backup_mutex);
204 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
205 }
206
207 -static void pvebackup_cancel(void)
208 +static void pvebackup_complete_cb(void *opaque, int ret)
209 {
210 - assert(!qemu_in_coroutine());
211 + PVEBackupDevInfo *di = opaque;
212 + di->completed_ret = ret;
213 +
214 + /*
215 + * Schedule stream cleanup in async coroutine. close_image and finish might
216 + * take a while, so we can't block on them here. This way it also doesn't
217 + * matter if we're already running in a coroutine or not.
218 + * Note: di is a pointer to an entry in the global backup_state struct, so
219 + * it stays valid.
220 + */
221 + Coroutine *co = qemu_coroutine_create(pvebackup_co_complete_stream, di);
222 + aio_co_enter(qemu_get_aio_context(), co);
223 +}
224 +
225 +/*
226 + * job_cancel(_sync) does not like to be called from coroutines, so defer to
227 + * main loop processing via a bottom half.
228 + */
229 +static void job_cancel_bh(void *opaque) {
230 + CoCtxData *data = (CoCtxData*)opaque;
231 + Job *job = (Job*)data->data;
232 + job_cancel_sync(job, true);
233 + aio_co_enter(data->ctx, data->co);
234 +}
235
236 +static void coroutine_fn pvebackup_co_cancel(void *opaque)
237 +{
238 Error *cancel_err = NULL;
239 error_setg(&cancel_err, "backup canceled");
240 pvebackup_propagate_error(cancel_err);
241
242 - qemu_mutex_lock(&backup_state.backup_mutex);
243 + qemu_co_mutex_lock(&backup_state.backup_mutex);
244
245 if (backup_state.vmaw) {
246 /* make sure vma writer does not block anymore */
247 @@ -342,28 +378,22 @@ static void pvebackup_cancel(void)
248 ((PVEBackupDevInfo *)bdi->data)->job :
249 NULL;
250
251 - /* ref the job before releasing the mutex, just to be safe */
252 if (cancel_job) {
253 - WITH_JOB_LOCK_GUARD() {
254 - job_ref_locked(&cancel_job->job);
255 - }
256 + CoCtxData data = {
257 + .ctx = qemu_get_current_aio_context(),
258 + .co = qemu_coroutine_self(),
259 + .data = &cancel_job->job,
260 + };
261 + aio_bh_schedule_oneshot(data.ctx, job_cancel_bh, &data);
262 + qemu_coroutine_yield();
263 }
264
265 - /* job_cancel_sync may enter the job, so we need to release the
266 - * backup_mutex to avoid deadlock */
267 - qemu_mutex_unlock(&backup_state.backup_mutex);
268 -
269 - if (cancel_job) {
270 - WITH_JOB_LOCK_GUARD() {
271 - job_cancel_sync_locked(&cancel_job->job, true);
272 - job_unref_locked(&cancel_job->job);
273 - }
274 - }
275 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
276 }
277
278 void qmp_backup_cancel(Error **errp)
279 {
280 - pvebackup_cancel();
281 + block_on_coroutine_fn(pvebackup_co_cancel, NULL);
282 }
283
284 // assumes the caller holds backup_mutex
285 @@ -416,10 +446,18 @@ static int coroutine_fn pvebackup_co_add_config(
286 goto out;
287 }
288
289 -static bool create_backup_jobs(void) {
290 +/*
291 + * backup_job_create can *not* be run from a coroutine (and requires an
292 + * acquired AioContext), so this can't either.
293 + * The caller is responsible that backup_mutex is held nonetheless.
294 + */
295 +static void create_backup_jobs_bh(void *opaque) {
296
297 assert(!qemu_in_coroutine());
298
299 + CoCtxData *data = (CoCtxData*)opaque;
300 + Error **errp = (Error**)data->data;
301 +
302 Error *local_err = NULL;
303
304 /* create job transaction to synchronize bitmap commit and cancel all
305 @@ -455,24 +493,19 @@ static bool create_backup_jobs(void) {
306
307 aio_context_release(aio_context);
308
309 - if (!job || local_err != NULL) {
310 - Error *create_job_err = NULL;
311 - error_setg(&create_job_err, "backup_job_create failed: %s",
312 - local_err ? error_get_pretty(local_err) : "null");
313 + di->job = job;
314
315 - pvebackup_propagate_error(create_job_err);
316 + if (!job || local_err) {
317 + error_setg(errp, "backup_job_create failed: %s",
318 + local_err ? error_get_pretty(local_err) : "null");
319 break;
320 }
321
322 - di->job = job;
323 -
324 bdrv_unref(di->target);
325 di->target = NULL;
326 }
327
328 - bool errors = pvebackup_error_or_canceled();
329 -
330 - if (errors) {
331 + if (*errp) {
332 l = backup_state.di_list;
333 while (l) {
334 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
335 @@ -485,13 +518,15 @@ static bool create_backup_jobs(void) {
336
337 if (di->job) {
338 WITH_JOB_LOCK_GUARD() {
339 + job_cancel_sync_locked(&di->job->job, true);
340 job_unref_locked(&di->job->job);
341 }
342 }
343 }
344 }
345
346 - return errors;
347 + /* return */
348 + aio_co_enter(data->ctx, data->co);
349 }
350
351 typedef struct QmpBackupTask {
352 @@ -528,11 +563,12 @@ typedef struct QmpBackupTask {
353 UuidInfo *result;
354 } QmpBackupTask;
355
356 -// assumes the caller holds backup_mutex
357 static void coroutine_fn pvebackup_co_prepare(void *opaque)
358 {
359 assert(qemu_in_coroutine());
360
361 + qemu_co_mutex_lock(&backup_state.backup_mutex);
362 +
363 QmpBackupTask *task = opaque;
364
365 task->result = NULL; // just to be sure
366 @@ -553,8 +589,9 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
367 const char *firewall_name = "qemu-server.fw";
368
369 if (backup_state.di_list) {
370 - error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
371 + error_set(task->errp, ERROR_CLASS_GENERIC_ERROR,
372 "previous backup not finished");
373 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
374 return;
375 }
376
377 @@ -621,6 +658,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
378 }
379 di->size = size;
380 total += size;
381 +
382 + di->completed_ret = INT_MAX;
383 }
384
385 uuid_generate(uuid);
386 @@ -852,6 +891,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
387 backup_state.stat.dirty = total - backup_state.stat.reused;
388 backup_state.stat.transferred = 0;
389 backup_state.stat.zero_bytes = 0;
390 + backup_state.stat.finishing = false;
391 + backup_state.stat.starting = true;
392
393 qemu_mutex_unlock(&backup_state.stat.lock);
394
395 @@ -866,6 +907,33 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque)
396 uuid_info->UUID = uuid_str;
397
398 task->result = uuid_info;
399 +
400 + /* Run create_backup_jobs_bh outside of coroutine (in BH) but keep
401 + * backup_mutex locked. This is fine, a CoMutex can be held across yield
402 + * points, and we'll release it as soon as the BH reschedules us.
403 + */
404 + CoCtxData waker = {
405 + .co = qemu_coroutine_self(),
406 + .ctx = qemu_get_current_aio_context(),
407 + .data = &local_err,
408 + };
409 + aio_bh_schedule_oneshot(waker.ctx, create_backup_jobs_bh, &waker);
410 + qemu_coroutine_yield();
411 +
412 + if (local_err) {
413 + error_propagate(task->errp, local_err);
414 + goto err;
415 + }
416 +
417 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
418 +
419 + qemu_mutex_lock(&backup_state.stat.lock);
420 + backup_state.stat.starting = false;
421 + qemu_mutex_unlock(&backup_state.stat.lock);
422 +
423 + /* start the first job in the transaction */
424 + job_txn_start_seq(backup_state.txn);
425 +
426 return;
427
428 err_mutex:
429 @@ -888,6 +956,7 @@ err:
430 g_free(di);
431 }
432 g_list_free(di_list);
433 + backup_state.di_list = NULL;
434
435 if (devs) {
436 g_strfreev(devs);
437 @@ -908,6 +977,8 @@ err:
438 }
439
440 task->result = NULL;
441 +
442 + qemu_co_mutex_unlock(&backup_state.backup_mutex);
443 return;
444 }
445
446 @@ -961,24 +1032,8 @@ UuidInfo *qmp_backup(
447 .errp = errp,
448 };
449
450 - qemu_mutex_lock(&backup_state.backup_mutex);
451 -
452 block_on_coroutine_fn(pvebackup_co_prepare, &task);
453
454 - if (*errp == NULL) {
455 - bool errors = create_backup_jobs();
456 - qemu_mutex_unlock(&backup_state.backup_mutex);
457 -
458 - if (!errors) {
459 - /* start the first job in the transaction
460 - * note: this might directly enter the job, so we need to do this
461 - * after unlocking the backup_mutex */
462 - job_txn_start_seq(backup_state.txn);
463 - }
464 - } else {
465 - qemu_mutex_unlock(&backup_state.backup_mutex);
466 - }
467 -
468 return task.result;
469 }
470
471 @@ -1030,6 +1085,7 @@ BackupStatus *qmp_query_backup(Error **errp)
472 info->transferred = backup_state.stat.transferred;
473 info->has_reused = true;
474 info->reused = backup_state.stat.reused;
475 + info->finishing = backup_state.stat.finishing;
476
477 qemu_mutex_unlock(&backup_state.stat.lock);
478
479 diff --git a/qapi/block-core.json b/qapi/block-core.json
480 index 7fde927621..bf559c6d52 100644
481 --- a/qapi/block-core.json
482 +++ b/qapi/block-core.json
483 @@ -770,12 +770,15 @@
484 #
485 # @uuid: uuid for this backup job
486 #
487 +# @finishing: if status='active' and finishing=true, then the backup process is
488 +# waiting for the target to finish.
489 +#
490 ##
491 { 'struct': 'BackupStatus',
492 'data': {'*status': 'str', '*errmsg': 'str', '*total': 'int', '*dirty': 'int',
493 '*transferred': 'int', '*zero-bytes': 'int', '*reused': 'int',
494 '*start-time': 'int', '*end-time': 'int',
495 - '*backup-file': 'str', '*uuid': 'str' } }
496 + '*backup-file': 'str', '*uuid': 'str', 'finishing': 'bool' } }
497
498 ##
499 # @BackupFormat: