]>
Commit | Line | Data |
---|---|---|
d333327a SR |
1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
2 | From: Stefan Reiter <s.reiter@proxmox.com> | |
3 | Date: Mon, 28 Sep 2020 13:40:51 +0200 | |
0c893fd8 SR |
4 | Subject: [PATCH] PVE-Backup: Don't block on finishing and cleanup |
5 | create_backup_jobs | |
d333327a SR |
6 | |
7 | proxmox_backup_co_finish is already async, but previously we would wait | |
8 | for the coroutine using block_on_coroutine_fn(). Avoid this by | |
9 | scheduling pvebackup_co_complete_stream (and thus pvebackup_co_cleanup) | |
10 | as a real coroutine when calling from pvebackup_complete_cb. This is ok, | |
11 | since complete_stream uses the backup_mutex internally to synchronize, | |
12 | and other streams can happily continue writing in the meantime anyway. | |
13 | ||
14 | To accomodate, backup_mutex is converted to a CoMutex. This means | |
15 | converting every user to a coroutine. This is not just useful here, but | |
16 | will come in handy once this series[0] is merged, and QMP calls can be | |
17 | yield-able coroutines too. Then we can also finally get rid of | |
18 | block_on_coroutine_fn. | |
19 | ||
20 | Cases of aio_context_acquire/release from within what is now a coroutine | |
21 | are changed to aio_co_reschedule_self, which works since a running | |
22 | coroutine always holds the aio lock for the context it is running in. | |
23 | ||
72ae34ec SR |
24 | job_cancel_sync is called from a BH since it can't be run from a |
25 | coroutine (uses AIO_WAIT_WHILE internally). | |
d333327a | 26 | |
72ae34ec | 27 | Same thing for create_backup_jobs, which is converted to a BH too. |
d333327a SR |
28 | |
29 | To communicate the finishing state, a new property is introduced to | |
30 | query-backup: 'finishing'. A new state is explicitly not used, since | |
31 | that would break compatibility with older qemu-server versions. | |
32 | ||
0c893fd8 SR |
33 | Also fix create_backup_jobs: |
34 | ||
35 | No more weird bool returns, just the standard "errp" format used | |
36 | everywhere else too. With this, if backup_job_create fails, the error | |
37 | message is actually returned over QMP and can be shown to the user. | |
38 | ||
39 | To facilitate correct cleanup on such an error, we call | |
40 | create_backup_jobs as a bottom half directly from pvebackup_co_prepare. | |
41 | This additionally allows us to actually hold the backup_mutex during | |
42 | operation. | |
43 | ||
44 | Also add a job_cancel_sync before job_unref, since a job must be in | |
45 | STATUS_NULL to be deleted by unref, which could trigger an assert | |
46 | before. | |
47 | ||
d333327a | 48 | [0] https://lists.gnu.org/archive/html/qemu-devel/2020-09/msg03515.html |
72ae34ec SR |
49 | |
50 | Signed-off-by: Stefan Reiter <s.reiter@proxmox.com> | |
ddbf7a87 | 51 | Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> |
d03e1b3c FE |
52 | [FE: add new force parameter to job_cancel_sync calls] |
53 | Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> | |
d333327a | 54 | --- |
d03e1b3c | 55 | pve-backup.c | 212 +++++++++++++++++++++++++++---------------- |
72ae34ec | 56 | qapi/block-core.json | 5 +- |
d03e1b3c | 57 | 2 files changed, 138 insertions(+), 79 deletions(-) |
d333327a | 58 | |
d333327a | 59 | diff --git a/pve-backup.c b/pve-backup.c |
b64c4dec | 60 | index 629da3e6c7..1da9dd9edc 100644 |
d333327a SR |
61 | --- a/pve-backup.c |
62 | +++ b/pve-backup.c | |
bf251437 | 63 | @@ -35,7 +35,9 @@ const char *PBS_BITMAP_NAME = "pbs-incremental-dirty-bitmap"; |
d333327a SR |
64 | |
65 | static struct PVEBackupState { | |
66 | struct { | |
67 | - // Everithing accessed from qmp_backup_query command is protected using lock | |
68 | + // Everything accessed from qmp_backup_query command is protected using | |
69 | + // this lock. Do NOT hold this lock for long times, as it is sometimes | |
70 | + // acquired from coroutines, and thus any wait time may block the guest. | |
71 | QemuMutex lock; | |
72 | Error *error; | |
73 | time_t start_time; | |
bf251437 | 74 | @@ -49,20 +51,22 @@ static struct PVEBackupState { |
d333327a SR |
75 | size_t reused; |
76 | size_t zero_bytes; | |
77 | GList *bitmap_list; | |
78 | + bool finishing; | |
0c893fd8 | 79 | + bool starting; |
d333327a SR |
80 | } stat; |
81 | int64_t speed; | |
82 | VmaWriter *vmaw; | |
83 | ProxmoxBackupHandle *pbs; | |
84 | GList *di_list; | |
85 | JobTxn *txn; | |
86 | - QemuMutex backup_mutex; | |
87 | + CoMutex backup_mutex; | |
88 | CoMutex dump_callback_mutex; | |
89 | } backup_state; | |
90 | ||
91 | static void pvebackup_init(void) | |
92 | { | |
93 | qemu_mutex_init(&backup_state.stat.lock); | |
94 | - qemu_mutex_init(&backup_state.backup_mutex); | |
95 | + qemu_co_mutex_init(&backup_state.backup_mutex); | |
96 | qemu_co_mutex_init(&backup_state.dump_callback_mutex); | |
97 | } | |
98 | ||
bf251437 | 99 | @@ -74,6 +78,7 @@ typedef struct PVEBackupDevInfo { |
d333327a SR |
100 | size_t size; |
101 | uint64_t block_size; | |
102 | uint8_t dev_id; | |
103 | + int completed_ret; // INT_MAX if not completed | |
104 | char targetfile[PATH_MAX]; | |
105 | BdrvDirtyBitmap *bitmap; | |
106 | BlockDriverState *target; | |
bf251437 | 107 | @@ -229,12 +234,12 @@ pvebackup_co_dump_vma_cb( |
d333327a SR |
108 | } |
109 | ||
110 | // assumes the caller holds backup_mutex | |
111 | -static void coroutine_fn pvebackup_co_cleanup(void *unused) | |
112 | +static void coroutine_fn pvebackup_co_cleanup(void) | |
113 | { | |
114 | assert(qemu_in_coroutine()); | |
115 | ||
116 | qemu_mutex_lock(&backup_state.stat.lock); | |
117 | - backup_state.stat.end_time = time(NULL); | |
118 | + backup_state.stat.finishing = true; | |
119 | qemu_mutex_unlock(&backup_state.stat.lock); | |
120 | ||
121 | if (backup_state.vmaw) { | |
bf251437 | 122 | @@ -263,35 +268,29 @@ static void coroutine_fn pvebackup_co_cleanup(void *unused) |
d333327a SR |
123 | |
124 | g_list_free(backup_state.di_list); | |
125 | backup_state.di_list = NULL; | |
126 | + | |
127 | + qemu_mutex_lock(&backup_state.stat.lock); | |
128 | + backup_state.stat.end_time = time(NULL); | |
129 | + backup_state.stat.finishing = false; | |
130 | + qemu_mutex_unlock(&backup_state.stat.lock); | |
131 | } | |
132 | ||
133 | -// assumes the caller holds backup_mutex | |
134 | -static void coroutine_fn pvebackup_complete_stream(void *opaque) | |
135 | +static void coroutine_fn pvebackup_co_complete_stream(void *opaque) | |
136 | { | |
137 | PVEBackupDevInfo *di = opaque; | |
138 | + int ret = di->completed_ret; | |
d333327a | 139 | |
0c893fd8 SR |
140 | - bool error_or_canceled = pvebackup_error_or_canceled(); |
141 | - | |
142 | - if (backup_state.vmaw) { | |
143 | - vma_writer_close_stream(backup_state.vmaw, di->dev_id); | |
144 | + qemu_mutex_lock(&backup_state.stat.lock); | |
145 | + bool starting = backup_state.stat.starting; | |
146 | + qemu_mutex_unlock(&backup_state.stat.lock); | |
147 | + if (starting) { | |
148 | + /* in 'starting' state, no tasks have been run yet, meaning we can (and | |
149 | + * must) skip all cleanup, as we don't know what has and hasn't been | |
150 | + * initialized yet. */ | |
151 | + return; | |
d333327a | 152 | } |
0c893fd8 SR |
153 | |
154 | - if (backup_state.pbs && !error_or_canceled) { | |
155 | - Error *local_err = NULL; | |
156 | - proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err); | |
157 | - if (local_err != NULL) { | |
158 | - pvebackup_propagate_error(local_err); | |
159 | - } | |
160 | - } | |
d333327a SR |
161 | -} |
162 | - | |
163 | -static void pvebackup_complete_cb(void *opaque, int ret) | |
164 | -{ | |
165 | - assert(!qemu_in_coroutine()); | |
166 | - | |
167 | - PVEBackupDevInfo *di = opaque; | |
168 | - | |
169 | - qemu_mutex_lock(&backup_state.backup_mutex); | |
0c893fd8 SR |
170 | + qemu_co_mutex_lock(&backup_state.backup_mutex); |
171 | ||
172 | if (ret < 0) { | |
173 | Error *local_err = NULL; | |
bf251437 | 174 | @@ -303,7 +302,19 @@ static void pvebackup_complete_cb(void *opaque, int ret) |
0c893fd8 SR |
175 | |
176 | assert(di->target == NULL); | |
177 | ||
d333327a | 178 | - block_on_coroutine_fn(pvebackup_complete_stream, di); |
0c893fd8 SR |
179 | + bool error_or_canceled = pvebackup_error_or_canceled(); |
180 | + | |
181 | + if (backup_state.vmaw) { | |
182 | + vma_writer_close_stream(backup_state.vmaw, di->dev_id); | |
183 | + } | |
184 | + | |
185 | + if (backup_state.pbs && !error_or_canceled) { | |
186 | + Error *local_err = NULL; | |
187 | + proxmox_backup_co_close_image(backup_state.pbs, di->dev_id, &local_err); | |
188 | + if (local_err != NULL) { | |
189 | + pvebackup_propagate_error(local_err); | |
190 | + } | |
191 | + } | |
d333327a SR |
192 | |
193 | // remove self from job list | |
194 | backup_state.di_list = g_list_remove(backup_state.di_list, di); | |
bf251437 | 195 | @@ -312,21 +323,46 @@ static void pvebackup_complete_cb(void *opaque, int ret) |
d333327a SR |
196 | |
197 | /* call cleanup if we're the last job */ | |
198 | if (!g_list_first(backup_state.di_list)) { | |
199 | - block_on_coroutine_fn(pvebackup_co_cleanup, NULL); | |
200 | + pvebackup_co_cleanup(); | |
201 | } | |
202 | ||
203 | - qemu_mutex_unlock(&backup_state.backup_mutex); | |
204 | + qemu_co_mutex_unlock(&backup_state.backup_mutex); | |
205 | } | |
206 | ||
207 | -static void pvebackup_cancel(void) | |
208 | +static void pvebackup_complete_cb(void *opaque, int ret) | |
209 | { | |
72ae34ec | 210 | - assert(!qemu_in_coroutine()); |
d333327a SR |
211 | + PVEBackupDevInfo *di = opaque; |
212 | + di->completed_ret = ret; | |
4fd0fa7f | 213 | + |
d333327a SR |
214 | + /* |
215 | + * Schedule stream cleanup in async coroutine. close_image and finish might | |
72ae34ec SR |
216 | + * take a while, so we can't block on them here. This way it also doesn't |
217 | + * matter if we're already running in a coroutine or not. | |
d333327a SR |
218 | + * Note: di is a pointer to an entry in the global backup_state struct, so |
219 | + * it stays valid. | |
220 | + */ | |
221 | + Coroutine *co = qemu_coroutine_create(pvebackup_co_complete_stream, di); | |
72ae34ec SR |
222 | + aio_co_enter(qemu_get_aio_context(), co); |
223 | +} | |
0c893fd8 | 224 | + |
72ae34ec SR |
225 | +/* |
226 | + * job_cancel(_sync) does not like to be called from coroutines, so defer to | |
227 | + * main loop processing via a bottom half. | |
228 | + */ | |
229 | +static void job_cancel_bh(void *opaque) { | |
230 | + CoCtxData *data = (CoCtxData*)opaque; | |
231 | + Job *job = (Job*)data->data; | |
4567474e | 232 | + job_cancel_sync(job, true); |
72ae34ec | 233 | + aio_co_enter(data->ctx, data->co); |
d333327a | 234 | +} |
4fd0fa7f | 235 | |
d333327a SR |
236 | +static void coroutine_fn pvebackup_co_cancel(void *opaque) |
237 | +{ | |
238 | Error *cancel_err = NULL; | |
239 | error_setg(&cancel_err, "backup canceled"); | |
240 | pvebackup_propagate_error(cancel_err); | |
241 | ||
242 | - qemu_mutex_lock(&backup_state.backup_mutex); | |
243 | + qemu_co_mutex_lock(&backup_state.backup_mutex); | |
244 | ||
245 | if (backup_state.vmaw) { | |
246 | /* make sure vma writer does not block anymore */ | |
bf251437 | 247 | @@ -344,28 +380,22 @@ static void pvebackup_cancel(void) |
d333327a SR |
248 | ((PVEBackupDevInfo *)bdi->data)->job : |
249 | NULL; | |
250 | ||
251 | - /* ref the job before releasing the mutex, just to be safe */ | |
252 | if (cancel_job) { | |
d03e1b3c FE |
253 | - WITH_JOB_LOCK_GUARD() { |
254 | - job_ref_locked(&cancel_job->job); | |
255 | - } | |
72ae34ec SR |
256 | + CoCtxData data = { |
257 | + .ctx = qemu_get_current_aio_context(), | |
258 | + .co = qemu_coroutine_self(), | |
259 | + .data = &cancel_job->job, | |
260 | + }; | |
261 | + aio_bh_schedule_oneshot(data.ctx, job_cancel_bh, &data); | |
262 | + qemu_coroutine_yield(); | |
d333327a SR |
263 | } |
264 | ||
265 | - /* job_cancel_sync may enter the job, so we need to release the | |
266 | - * backup_mutex to avoid deadlock */ | |
267 | - qemu_mutex_unlock(&backup_state.backup_mutex); | |
268 | - | |
269 | - if (cancel_job) { | |
d03e1b3c FE |
270 | - WITH_JOB_LOCK_GUARD() { |
271 | - job_cancel_sync_locked(&cancel_job->job, true); | |
272 | - job_unref_locked(&cancel_job->job); | |
273 | - } | |
d333327a SR |
274 | - } |
275 | + qemu_co_mutex_unlock(&backup_state.backup_mutex); | |
276 | } | |
277 | ||
278 | void qmp_backup_cancel(Error **errp) | |
279 | { | |
280 | - pvebackup_cancel(); | |
281 | + block_on_coroutine_fn(pvebackup_co_cancel, NULL); | |
282 | } | |
283 | ||
284 | // assumes the caller holds backup_mutex | |
bf251437 | 285 | @@ -418,10 +448,18 @@ static int coroutine_fn pvebackup_co_add_config( |
d333327a SR |
286 | goto out; |
287 | } | |
288 | ||
0c893fd8 | 289 | -static bool create_backup_jobs(void) { |
d333327a SR |
290 | +/* |
291 | + * backup_job_create can *not* be run from a coroutine (and requires an | |
292 | + * acquired AioContext), so this can't either. | |
0c893fd8 | 293 | + * The caller is responsible that backup_mutex is held nonetheless. |
d333327a | 294 | + */ |
0c893fd8 | 295 | +static void create_backup_jobs_bh(void *opaque) { |
d333327a SR |
296 | |
297 | assert(!qemu_in_coroutine()); | |
0c893fd8 SR |
298 | |
299 | + CoCtxData *data = (CoCtxData*)opaque; | |
300 | + Error **errp = (Error**)data->data; | |
301 | + | |
302 | Error *local_err = NULL; | |
303 | ||
304 | /* create job transaction to synchronize bitmap commit and cancel all | |
bf251437 | 305 | @@ -457,24 +495,19 @@ static bool create_backup_jobs(void) { |
0c893fd8 SR |
306 | |
307 | aio_context_release(aio_context); | |
308 | ||
309 | - if (!job || local_err != NULL) { | |
310 | - Error *create_job_err = NULL; | |
311 | - error_setg(&create_job_err, "backup_job_create failed: %s", | |
4fd0fa7f | 312 | - local_err ? error_get_pretty(local_err) : "null"); |
0c893fd8 | 313 | + di->job = job; |
4fd0fa7f TL |
314 | |
315 | - pvebackup_propagate_error(create_job_err); | |
0c893fd8 SR |
316 | + if (!job || local_err) { |
317 | + error_setg(errp, "backup_job_create failed: %s", | |
4fd0fa7f | 318 | + local_err ? error_get_pretty(local_err) : "null"); |
0c893fd8 SR |
319 | break; |
320 | } | |
321 | ||
322 | - di->job = job; | |
323 | - | |
324 | bdrv_unref(di->target); | |
325 | di->target = NULL; | |
326 | } | |
327 | ||
328 | - bool errors = pvebackup_error_or_canceled(); | |
329 | - | |
330 | - if (errors) { | |
331 | + if (*errp) { | |
332 | l = backup_state.di_list; | |
333 | while (l) { | |
334 | PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data; | |
bf251437 | 335 | @@ -487,13 +520,15 @@ static bool create_backup_jobs(void) { |
0c893fd8 SR |
336 | |
337 | if (di->job) { | |
d03e1b3c FE |
338 | WITH_JOB_LOCK_GUARD() { |
339 | + job_cancel_sync_locked(&di->job->job, true); | |
340 | job_unref_locked(&di->job->job); | |
341 | } | |
0c893fd8 SR |
342 | } |
343 | } | |
344 | } | |
345 | ||
346 | - return errors; | |
347 | + /* return */ | |
348 | + aio_co_enter(data->ctx, data->co); | |
349 | } | |
350 | ||
351 | typedef struct QmpBackupTask { | |
bf251437 | 352 | @@ -522,11 +557,12 @@ typedef struct QmpBackupTask { |
d333327a SR |
353 | UuidInfo *result; |
354 | } QmpBackupTask; | |
355 | ||
356 | -// assumes the caller holds backup_mutex | |
357 | static void coroutine_fn pvebackup_co_prepare(void *opaque) | |
358 | { | |
359 | assert(qemu_in_coroutine()); | |
360 | ||
361 | + qemu_co_mutex_lock(&backup_state.backup_mutex); | |
362 | + | |
363 | QmpBackupTask *task = opaque; | |
364 | ||
365 | task->result = NULL; // just to be sure | |
bf251437 | 366 | @@ -547,8 +583,9 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque) |
72ae34ec SR |
367 | const char *firewall_name = "qemu-server.fw"; |
368 | ||
369 | if (backup_state.di_list) { | |
370 | - error_set(task->errp, ERROR_CLASS_GENERIC_ERROR, | |
371 | + error_set(task->errp, ERROR_CLASS_GENERIC_ERROR, | |
372 | "previous backup not finished"); | |
373 | + qemu_co_mutex_unlock(&backup_state.backup_mutex); | |
374 | return; | |
375 | } | |
376 | ||
bf251437 | 377 | @@ -615,6 +652,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque) |
d333327a SR |
378 | } |
379 | di->size = size; | |
380 | total += size; | |
381 | + | |
382 | + di->completed_ret = INT_MAX; | |
383 | } | |
384 | ||
385 | uuid_generate(uuid); | |
bf251437 | 386 | @@ -846,6 +885,8 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque) |
d333327a SR |
387 | backup_state.stat.dirty = total - backup_state.stat.reused; |
388 | backup_state.stat.transferred = 0; | |
389 | backup_state.stat.zero_bytes = 0; | |
390 | + backup_state.stat.finishing = false; | |
0c893fd8 | 391 | + backup_state.stat.starting = true; |
d333327a SR |
392 | |
393 | qemu_mutex_unlock(&backup_state.stat.lock); | |
394 | ||
bf251437 | 395 | @@ -860,6 +901,33 @@ static void coroutine_fn pvebackup_co_prepare(void *opaque) |
d333327a SR |
396 | uuid_info->UUID = uuid_str; |
397 | ||
398 | task->result = uuid_info; | |
399 | + | |
0c893fd8 SR |
400 | + /* Run create_backup_jobs_bh outside of coroutine (in BH) but keep |
401 | + * backup_mutex locked. This is fine, a CoMutex can be held across yield | |
402 | + * points, and we'll release it as soon as the BH reschedules us. | |
403 | + */ | |
404 | + CoCtxData waker = { | |
405 | + .co = qemu_coroutine_self(), | |
406 | + .ctx = qemu_get_current_aio_context(), | |
407 | + .data = &local_err, | |
408 | + }; | |
409 | + aio_bh_schedule_oneshot(waker.ctx, create_backup_jobs_bh, &waker); | |
410 | + qemu_coroutine_yield(); | |
411 | + | |
412 | + if (local_err) { | |
413 | + error_propagate(task->errp, local_err); | |
414 | + goto err; | |
415 | + } | |
416 | + | |
d333327a | 417 | + qemu_co_mutex_unlock(&backup_state.backup_mutex); |
0c893fd8 SR |
418 | + |
419 | + qemu_mutex_lock(&backup_state.stat.lock); | |
420 | + backup_state.stat.starting = false; | |
421 | + qemu_mutex_unlock(&backup_state.stat.lock); | |
422 | + | |
423 | + /* start the first job in the transaction */ | |
424 | + job_txn_start_seq(backup_state.txn); | |
425 | + | |
d333327a SR |
426 | return; |
427 | ||
428 | err_mutex: | |
bf251437 | 429 | @@ -882,6 +950,7 @@ err: |
0c893fd8 SR |
430 | g_free(di); |
431 | } | |
432 | g_list_free(di_list); | |
433 | + backup_state.di_list = NULL; | |
434 | ||
435 | if (devs) { | |
436 | g_strfreev(devs); | |
bf251437 | 437 | @@ -902,6 +971,8 @@ err: |
d333327a SR |
438 | } |
439 | ||
440 | task->result = NULL; | |
441 | + | |
442 | + qemu_co_mutex_unlock(&backup_state.backup_mutex); | |
443 | return; | |
444 | } | |
445 | ||
bf251437 | 446 | @@ -947,24 +1018,8 @@ UuidInfo *qmp_backup( |
d333327a SR |
447 | .errp = errp, |
448 | }; | |
449 | ||
450 | - qemu_mutex_lock(&backup_state.backup_mutex); | |
451 | - | |
452 | block_on_coroutine_fn(pvebackup_co_prepare, &task); | |
453 | ||
0c893fd8 SR |
454 | - if (*errp == NULL) { |
455 | - bool errors = create_backup_jobs(); | |
d333327a | 456 | - qemu_mutex_unlock(&backup_state.backup_mutex); |
0c893fd8 SR |
457 | - |
458 | - if (!errors) { | |
d333327a SR |
459 | - /* start the first job in the transaction |
460 | - * note: this might directly enter the job, so we need to do this | |
461 | - * after unlocking the backup_mutex */ | |
0c893fd8 SR |
462 | - job_txn_start_seq(backup_state.txn); |
463 | - } | |
d333327a SR |
464 | - } else { |
465 | - qemu_mutex_unlock(&backup_state.backup_mutex); | |
0c893fd8 SR |
466 | - } |
467 | - | |
d333327a | 468 | return task.result; |
0c893fd8 SR |
469 | } |
470 | ||
bf251437 | 471 | @@ -1012,6 +1067,7 @@ BackupStatus *qmp_query_backup(Error **errp) |
d333327a SR |
472 | info->transferred = backup_state.stat.transferred; |
473 | info->has_reused = true; | |
474 | info->reused = backup_state.stat.reused; | |
475 | + info->finishing = backup_state.stat.finishing; | |
476 | ||
477 | qemu_mutex_unlock(&backup_state.stat.lock); | |
478 | ||
479 | diff --git a/qapi/block-core.json b/qapi/block-core.json | |
bf251437 | 480 | index 130d5f065f..43838212e3 100644 |
d333327a SR |
481 | --- a/qapi/block-core.json |
482 | +++ b/qapi/block-core.json | |
bf251437 | 483 | @@ -865,12 +865,15 @@ |
d333327a SR |
484 | # |
485 | # @uuid: uuid for this backup job | |
486 | # | |
487 | +# @finishing: if status='active' and finishing=true, then the backup process is | |
488 | +# waiting for the target to finish. | |
489 | +# | |
490 | ## | |
491 | { 'struct': 'BackupStatus', | |
492 | 'data': {'*status': 'str', '*errmsg': 'str', '*total': 'int', '*dirty': 'int', | |
493 | '*transferred': 'int', '*zero-bytes': 'int', '*reused': 'int', | |
494 | '*start-time': 'int', '*end-time': 'int', | |
495 | - '*backup-file': 'str', '*uuid': 'str' } } | |
496 | + '*backup-file': 'str', '*uuid': 'str', 'finishing': 'bool' } } | |
497 | ||
498 | ## | |
499 | # @BackupFormat: |