]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0021-PVE-Deprecated-adding-old-vma-files.patch
update patches for v4.0.0
[pve-qemu.git] / debian / patches / pve / 0021-PVE-Deprecated-adding-old-vma-files.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3 Date: Mon, 7 Aug 2017 08:51:16 +0200
4 Subject: [PATCH] PVE: [Deprecated] adding old vma files
5
6 TODO: Move to using a libvma block backend
7 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
8 ---
9 Makefile | 3 +-
10 Makefile.objs | 1 +
11 block/backup.c | 103 +++--
12 block/replication.c | 1 +
13 blockdev.c | 208 +++++----
14 include/block/block_int.h | 4 +
15 job.c | 3 +-
16 vma-reader.c | 857 ++++++++++++++++++++++++++++++++++++++
17 vma-writer.c | 771 ++++++++++++++++++++++++++++++++++
18 vma.c | 756 +++++++++++++++++++++++++++++++++
19 vma.h | 150 +++++++
20 11 files changed, 2752 insertions(+), 105 deletions(-)
21 create mode 100644 vma-reader.c
22 create mode 100644 vma-writer.c
23 create mode 100644 vma.c
24 create mode 100644 vma.h
25
26 diff --git a/Makefile b/Makefile
27 index 04a0d45050..0b5a8353ea 100644
28 --- a/Makefile
29 +++ b/Makefile
30 @@ -419,7 +419,7 @@ dummy := $(call unnest-vars,, \
31
32 include $(SRC_PATH)/tests/Makefile.include
33
34 -all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) $(HELPERS-y) recurse-all modules
35 +all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) vma$(EXESUF) $(HELPERS-y) recurse-all modules
36
37 qemu-version.h: FORCE
38 $(call quiet-command, \
39 @@ -509,6 +509,7 @@ qemu-img.o: qemu-img-cmds.h
40 qemu-img$(EXESUF): qemu-img.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
41 qemu-nbd$(EXESUF): qemu-nbd.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
42 qemu-io$(EXESUF): qemu-io.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
43 +vma$(EXESUF): vma.o vma-reader.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
44
45 qemu-bridge-helper$(EXESUF): qemu-bridge-helper.o $(COMMON_LDADDS)
46
47 diff --git a/Makefile.objs b/Makefile.objs
48 index 559486973a..9477a23ca2 100644
49 --- a/Makefile.objs
50 +++ b/Makefile.objs
51 @@ -18,6 +18,7 @@ block-obj-y += block.o blockjob.o job.o
52 block-obj-y += block/ scsi/
53 block-obj-y += qemu-io-cmds.o
54 block-obj-$(CONFIG_REPLICATION) += replication.o
55 +block-obj-y += vma-writer.o
56
57 block-obj-m = block/
58
59 diff --git a/block/backup.c b/block/backup.c
60 index 51c36d291b..18598fd491 100644
61 --- a/block/backup.c
62 +++ b/block/backup.c
63 @@ -41,6 +41,7 @@ typedef struct BackupBlockJob {
64 /* bitmap for sync=incremental */
65 BdrvDirtyBitmap *sync_bitmap;
66 MirrorSyncMode sync_mode;
67 + BackupDumpFunc *dump_cb;
68 BlockdevOnError on_source_error;
69 BlockdevOnError on_target_error;
70 CoRwlock flush_rwlock;
71 @@ -130,12 +131,20 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
72 }
73
74 if (qemu_iovec_is_zero(&qiov)) {
75 - ret = blk_co_pwrite_zeroes(job->target, start,
76 - qiov.size, write_flags | BDRV_REQ_MAY_UNMAP);
77 + if (job->dump_cb) {
78 + ret = job->dump_cb(job->common.job.opaque, job->target, start, qiov.size, NULL);
79 + } else {
80 + ret = blk_co_pwrite_zeroes(job->target, start,
81 + qiov.size, write_flags | BDRV_REQ_MAY_UNMAP);
82 + }
83 } else {
84 - ret = blk_co_pwritev(job->target, start,
85 - qiov.size, &qiov, write_flags |
86 - (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
87 + if (job->dump_cb) {
88 + ret = job->dump_cb(job->common.job.opaque, job->target, start, qiov.size, *bounce_buffer);
89 + } else {
90 + ret = blk_co_pwritev(job->target, start,
91 + qiov.size, &qiov, write_flags |
92 + (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
93 + }
94 }
95 if (ret < 0) {
96 trace_backup_do_cow_write_fail(job, start, ret);
97 @@ -213,7 +222,11 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
98 trace_backup_do_cow_process(job, start);
99
100 if (job->use_copy_range) {
101 - ret = backup_cow_with_offload(job, start, end, is_write_notifier);
102 + if (job->dump_cb) {
103 + ret = - 1;
104 + } else {
105 + ret = backup_cow_with_offload(job, start, end, is_write_notifier);
106 + }
107 if (ret < 0) {
108 job->use_copy_range = false;
109 }
110 @@ -297,7 +310,9 @@ static void backup_abort(Job *job)
111 static void backup_clean(Job *job)
112 {
113 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
114 - assert(s->target);
115 + if (!s->target) {
116 + return;
117 + }
118 blk_unref(s->target);
119 s->target = NULL;
120 }
121 @@ -306,7 +321,9 @@ static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
122 {
123 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
124
125 - blk_set_aio_context(s->target, aio_context);
126 + if (s->target) {
127 + blk_set_aio_context(s->target, aio_context);
128 + }
129 }
130
131 void backup_do_checkpoint(BlockJob *job, Error **errp)
132 @@ -347,9 +364,11 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
133 if (read) {
134 return block_job_error_action(&job->common, job->on_source_error,
135 true, error);
136 - } else {
137 + } else if (job->target) {
138 return block_job_error_action(&job->common, job->on_target_error,
139 false, error);
140 + } else {
141 + return BLOCK_ERROR_ACTION_REPORT;
142 }
143 }
144
145 @@ -571,6 +590,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
146 BlockdevOnError on_source_error,
147 BlockdevOnError on_target_error,
148 int creation_flags,
149 + BackupDumpFunc *dump_cb,
150 BlockCompletionFunc *cb, void *opaque,
151 int pause_count,
152 JobTxn *txn, Error **errp)
153 @@ -581,7 +601,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
154 int ret;
155
156 assert(bs);
157 - assert(target);
158 + assert(target || dump_cb);
159
160 if (bs == target) {
161 error_setg(errp, "Source and target cannot be the same");
162 @@ -594,13 +614,13 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
163 return NULL;
164 }
165
166 - if (!bdrv_is_inserted(target)) {
167 + if (target && !bdrv_is_inserted(target)) {
168 error_setg(errp, "Device is not inserted: %s",
169 bdrv_get_device_name(target));
170 return NULL;
171 }
172
173 - if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
174 + if (target && compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
175 error_setg(errp, "Compression is not supported for this drive %s",
176 bdrv_get_device_name(target));
177 return NULL;
178 @@ -610,7 +630,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
179 return NULL;
180 }
181
182 - if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
183 + if (target && bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
184 return NULL;
185 }
186
187 @@ -650,15 +670,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
188 goto error;
189 }
190
191 - /* The target must match the source in size, so no resize here either */
192 - job->target = blk_new(BLK_PERM_WRITE,
193 - BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
194 - BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
195 - ret = blk_insert_bs(job->target, target, errp);
196 - if (ret < 0) {
197 - goto error;
198 + if (target) {
199 + /* The target must match the source in size, so no resize here either */
200 + job->target = blk_new(BLK_PERM_WRITE,
201 + BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
202 + BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
203 + ret = blk_insert_bs(job->target, target, errp);
204 + if (ret < 0) {
205 + goto error;
206 + }
207 }
208
209 + job->dump_cb = dump_cb;
210 job->on_source_error = on_source_error;
211 job->on_target_error = on_target_error;
212 job->sync_mode = sync_mode;
213 @@ -669,6 +692,9 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
214 /* Detect image-fleecing (and similar) schemes */
215 job->serialize_target_writes = bdrv_chain_contains(target, bs);
216
217 + if (!target) {
218 + goto use_default_cluster_size;
219 + }
220 /* If there is no backing file on the target, we cannot rely on COW if our
221 * backup cluster size is smaller than the target cluster size. Even for
222 * targets with a backing file, try to avoid COW if possible. */
223 @@ -693,18 +719,35 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
224 /* Not fatal; just trudge on ahead. */
225 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
226 } else {
227 - job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
228 + use_default_cluster_size:
229 + ret = bdrv_get_info(bs, &bdi);
230 + if (ret < 0) {
231 + job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
232 + } else {
233 + /* round down to nearest BACKUP_CLUSTER_SIZE_DEFAULT */
234 + job->cluster_size = (bdi.cluster_size / BACKUP_CLUSTER_SIZE_DEFAULT) * BACKUP_CLUSTER_SIZE_DEFAULT;
235 + if (job->cluster_size == 0) {
236 + /* but we can't go below it */
237 + job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
238 + }
239 + }
240 + }
241 + if (target) {
242 + job->use_copy_range = true;
243 + job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
244 + blk_get_max_transfer(job->target));
245 + job->copy_range_size = MAX(job->cluster_size,
246 + QEMU_ALIGN_UP(job->copy_range_size,
247 + job->cluster_size));
248 + } else {
249 + job->use_copy_range = false;
250 }
251 - job->use_copy_range = true;
252 - job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
253 - blk_get_max_transfer(job->target));
254 - job->copy_range_size = MAX(job->cluster_size,
255 - QEMU_ALIGN_UP(job->copy_range_size,
256 - job->cluster_size));
257
258 - /* Required permissions are already taken with target's blk_new() */
259 - block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
260 - &error_abort);
261 + if (target) {
262 + /* Required permissions are already taken with target's blk_new() */
263 + block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
264 + &error_abort);
265 + }
266 job->len = len;
267 job->common.job.pause_count += pause_count;
268
269 diff --git a/block/replication.c b/block/replication.c
270 index 0a265db1b5..e85c62ba9c 100644
271 --- a/block/replication.c
272 +++ b/block/replication.c
273 @@ -543,6 +543,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
274 0, MIRROR_SYNC_MODE_NONE, NULL, false,
275 BLOCKDEV_ON_ERROR_REPORT,
276 BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
277 + NULL,
278 backup_job_completed, bs, 0, NULL, &local_err);
279 if (local_err) {
280 error_propagate(errp, local_err);
281 diff --git a/blockdev.c b/blockdev.c
282 index 9210494b47..f8ce285caa 100644
283 --- a/blockdev.c
284 +++ b/blockdev.c
285 @@ -31,7 +31,6 @@
286 */
287
288 #include "qemu/osdep.h"
289 -#include "qemu/uuid.h"
290 #include "sysemu/block-backend.h"
291 #include "sysemu/blockdev.h"
292 #include "hw/block/block.h"
293 @@ -63,6 +62,7 @@
294 #include "qemu/cutils.h"
295 #include "qemu/help_option.h"
296 #include "qemu/throttle-options.h"
297 +#include "vma.h"
298
299 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
300 QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
301 @@ -3160,15 +3160,14 @@ out:
302 static struct PVEBackupState {
303 Error *error;
304 bool cancel;
305 - QemuUUID uuid;
306 + uuid_t uuid;
307 char uuid_str[37];
308 int64_t speed;
309 time_t start_time;
310 time_t end_time;
311 char *backup_file;
312 - Object *vmaobj;
313 + VmaWriter *vmaw;
314 GList *di_list;
315 - size_t next_job;
316 size_t total;
317 size_t transferred;
318 size_t zero_bytes;
319 @@ -3187,6 +3186,71 @@ typedef struct PVEBackupDevInfo {
320
321 static void pvebackup_run_next_job(void);
322
323 +static int pvebackup_dump_cb(void *opaque, BlockBackend *target,
324 + uint64_t start, uint64_t bytes,
325 + const void *pbuf)
326 +{
327 + const uint64_t size = bytes;
328 + const unsigned char *buf = pbuf;
329 + PVEBackupDevInfo *di = opaque;
330 +
331 + if (backup_state.cancel) {
332 + return size; // return success
333 + }
334 +
335 + uint64_t cluster_num = start / VMA_CLUSTER_SIZE;
336 + if ((cluster_num * VMA_CLUSTER_SIZE) != start) {
337 + if (!backup_state.error) {
338 + error_setg(&backup_state.error,
339 + "got unaligned write inside backup dump "
340 + "callback (sector %ld)", start);
341 + }
342 + return -1; // not aligned to cluster size
343 + }
344 +
345 + int ret = -1;
346 +
347 + if (backup_state.vmaw) {
348 + size_t zero_bytes = 0;
349 + uint64_t remaining = size;
350 + while (remaining > 0) {
351 + ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num,
352 + buf, &zero_bytes);
353 + ++cluster_num;
354 + if (buf) {
355 + buf += VMA_CLUSTER_SIZE;
356 + }
357 + if (ret < 0) {
358 + if (!backup_state.error) {
359 + vma_writer_error_propagate(backup_state.vmaw, &backup_state.error);
360 + }
361 + if (di->bs && di->bs->job) {
362 + job_cancel(&di->bs->job->job, true);
363 + }
364 + break;
365 + } else {
366 + backup_state.zero_bytes += zero_bytes;
367 + if (remaining >= VMA_CLUSTER_SIZE) {
368 + backup_state.transferred += VMA_CLUSTER_SIZE;
369 + remaining -= VMA_CLUSTER_SIZE;
370 + } else {
371 + backup_state.transferred += remaining;
372 + remaining = 0;
373 + }
374 + }
375 + }
376 + } else {
377 + if (!buf) {
378 + backup_state.zero_bytes += size;
379 + }
380 + backup_state.transferred += size;
381 + }
382 +
383 + // Note: always return success, because we want that writes succeed anyways.
384 +
385 + return size;
386 +}
387 +
388 static void pvebackup_cleanup(void)
389 {
390 qemu_mutex_lock(&backup_state.backup_mutex);
391 @@ -3198,9 +3262,11 @@ static void pvebackup_cleanup(void)
392
393 backup_state.end_time = time(NULL);
394
395 - if (backup_state.vmaobj) {
396 - object_unparent(backup_state.vmaobj);
397 - backup_state.vmaobj = NULL;
398 + if (backup_state.vmaw) {
399 + Error *local_err = NULL;
400 + vma_writer_close(backup_state.vmaw, &local_err);
401 + error_propagate(&backup_state.error, local_err);
402 + backup_state.vmaw = NULL;
403 }
404
405 g_list_free(backup_state.di_list);
406 @@ -3208,6 +3274,13 @@ static void pvebackup_cleanup(void)
407 qemu_mutex_unlock(&backup_state.backup_mutex);
408 }
409
410 +static void coroutine_fn backup_close_vma_stream(void *opaque)
411 +{
412 + PVEBackupDevInfo *di = opaque;
413 +
414 + vma_writer_close_stream(backup_state.vmaw, di->dev_id);
415 +}
416 +
417 static void pvebackup_complete_cb(void *opaque, int ret)
418 {
419 // This always runs in the main loop
420 @@ -3224,9 +3297,9 @@ static void pvebackup_complete_cb(void *opaque, int ret)
421 di->bs = NULL;
422 di->target = NULL;
423
424 - if (backup_state.vmaobj) {
425 - object_unparent(backup_state.vmaobj);
426 - backup_state.vmaobj = NULL;
427 + if (backup_state.vmaw) {
428 + Coroutine *co = qemu_coroutine_create(backup_close_vma_stream, di);
429 + qemu_coroutine_enter(co);
430 }
431
432 // remove self from job queue
433 @@ -3254,14 +3327,9 @@ static void pvebackup_cancel(void *opaque)
434 error_setg(&backup_state.error, "backup cancelled");
435 }
436
437 - if (backup_state.vmaobj) {
438 - Error *err;
439 + if (backup_state.vmaw) {
440 /* make sure vma writer does not block anymore */
441 - if (!object_set_props(backup_state.vmaobj, &err, "blocked", "yes", NULL)) {
442 - if (err) {
443 - error_report_err(err);
444 - }
445 - }
446 + vma_writer_set_error(backup_state.vmaw, "backup cancelled");
447 }
448
449 GList *l = backup_state.di_list;
450 @@ -3292,18 +3360,14 @@ void qmp_backup_cancel(Error **errp)
451 Coroutine *co = qemu_coroutine_create(pvebackup_cancel, NULL);
452 qemu_coroutine_enter(co);
453
454 - while (backup_state.vmaobj) {
455 - /* FIXME: Find something better for this */
456 + while (backup_state.vmaw) {
457 + /* vma writer use main aio context */
458 aio_poll(qemu_get_aio_context(), true);
459 }
460 }
461
462 -void vma_object_add_config_file(Object *obj, const char *name,
463 - const char *contents, size_t len,
464 - Error **errp);
465 static int config_to_vma(const char *file, BackupFormat format,
466 - Object *vmaobj,
467 - const char *backup_dir,
468 + const char *backup_dir, VmaWriter *vmaw,
469 Error **errp)
470 {
471 char *cdata = NULL;
472 @@ -3317,7 +3381,12 @@ static int config_to_vma(const char *file, BackupFormat format,
473 char *basename = g_path_get_basename(file);
474
475 if (format == BACKUP_FORMAT_VMA) {
476 - vma_object_add_config_file(vmaobj, basename, cdata, clen, errp);
477 + if (vma_writer_add_config(vmaw, basename, cdata, clen) != 0) {
478 + error_setg(errp, "unable to add %s config data to vma archive", file);
479 + g_free(cdata);
480 + g_free(basename);
481 + return 1;
482 + }
483 } else if (format == BACKUP_FORMAT_DIR) {
484 char config_path[PATH_MAX];
485 snprintf(config_path, PATH_MAX, "%s/%s", backup_dir, basename);
486 @@ -3334,28 +3403,30 @@ static int config_to_vma(const char *file, BackupFormat format,
487 return 0;
488 }
489
490 +bool job_should_pause(Job *job);
491 static void pvebackup_run_next_job(void)
492 {
493 qemu_mutex_lock(&backup_state.backup_mutex);
494
495 - GList *next = g_list_nth(backup_state.di_list, backup_state.next_job);
496 - while (next) {
497 - PVEBackupDevInfo *di = (PVEBackupDevInfo *)next->data;
498 - backup_state.next_job++;
499 + GList *l = backup_state.di_list;
500 + while (l) {
501 + PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
502 + l = g_list_next(l);
503 if (!di->completed && di->bs && di->bs->job) {
504 BlockJob *job = di->bs->job;
505 AioContext *aio_context = blk_get_aio_context(job->blk);
506 aio_context_acquire(aio_context);
507 qemu_mutex_unlock(&backup_state.backup_mutex);
508 - if (backup_state.error || backup_state.cancel) {
509 - job_cancel_sync(job);
510 - } else {
511 - job_resume(job);
512 + if (job_should_pause(&job->job)) {
513 + if (backup_state.error || backup_state.cancel) {
514 + job_cancel_sync(&job->job);
515 + } else {
516 + job_resume(&job->job);
517 + }
518 }
519 aio_context_release(aio_context);
520 return;
521 }
522 - next = g_list_next(next);
523 }
524 qemu_mutex_unlock(&backup_state.backup_mutex);
525
526 @@ -3366,7 +3437,7 @@ static void pvebackup_run_next_job(void)
527 UuidInfo *qmp_backup(const char *backup_file, bool has_format,
528 BackupFormat format,
529 bool has_config_file, const char *config_file,
530 - bool has_firewall_file, const char *firewall_file,
531 + bool has_firewall_file, const char *firewall_file,
532 bool has_devlist, const char *devlist,
533 bool has_speed, int64_t speed, Error **errp)
534 {
535 @@ -3374,7 +3445,8 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
536 BlockDriverState *bs = NULL;
537 const char *backup_dir = NULL;
538 Error *local_err = NULL;
539 - QemuUUID uuid;
540 + uuid_t uuid;
541 + VmaWriter *vmaw = NULL;
542 gchar **devs = NULL;
543 GList *di_list = NULL;
544 GList *l;
545 @@ -3386,7 +3458,7 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
546 backup_state.backup_mutex_initialized = true;
547 }
548
549 - if (backup_state.di_list || backup_state.vmaobj) {
550 + if (backup_state.di_list) {
551 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
552 "previous backup not finished");
553 return NULL;
554 @@ -3461,40 +3533,28 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
555 total += size;
556 }
557
558 - qemu_uuid_generate(&uuid);
559 + uuid_generate(uuid);
560
561 if (format == BACKUP_FORMAT_VMA) {
562 - char uuidstr[UUID_FMT_LEN+1];
563 - qemu_uuid_unparse(&uuid, uuidstr);
564 - uuidstr[UUID_FMT_LEN] = 0;
565 - backup_state.vmaobj =
566 - object_new_with_props("vma", object_get_objects_root(),
567 - "vma-backup-obj", &local_err,
568 - "filename", backup_file,
569 - "uuid", uuidstr,
570 - NULL);
571 - if (!backup_state.vmaobj) {
572 + vmaw = vma_writer_create(backup_file, uuid, &local_err);
573 + if (!vmaw) {
574 if (local_err) {
575 error_propagate(errp, local_err);
576 }
577 goto err;
578 }
579
580 + /* register all devices for vma writer */
581 l = di_list;
582 while (l) {
583 - QDict *options = qdict_new();
584 -
585 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
586 l = g_list_next(l);
587
588 const char *devname = bdrv_get_device_name(di->bs);
589 - snprintf(di->targetfile, PATH_MAX, "vma-backup-obj/%s.raw", devname);
590 -
591 - qdict_put(options, "driver", qstring_from_str("vma-drive"));
592 - qdict_put(options, "size", qint_from_int(di->size));
593 - di->target = bdrv_open(di->targetfile, NULL, options, BDRV_O_RDWR, &local_err);
594 - if (!di->target) {
595 - error_propagate(errp, local_err);
596 + di->dev_id = vma_writer_register_stream(vmaw, devname, di->size);
597 + if (di->dev_id <= 0) {
598 + error_set(errp, ERROR_CLASS_GENERIC_ERROR,
599 + "register_stream failed");
600 goto err;
601 }
602 }
603 @@ -3535,14 +3595,14 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
604
605 /* add configuration file to archive */
606 if (has_config_file) {
607 - if(config_to_vma(config_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
608 + if (config_to_vma(config_file, format, backup_dir, vmaw, errp) != 0) {
609 goto err;
610 }
611 }
612
613 /* add firewall file to archive */
614 if (has_firewall_file) {
615 - if(config_to_vma(firewall_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
616 + if (config_to_vma(firewall_file, format, backup_dir, vmaw, errp) != 0) {
617 goto err;
618 }
619 }
620 @@ -3565,12 +3625,13 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
621 }
622 backup_state.backup_file = g_strdup(backup_file);
623
624 - memcpy(&backup_state.uuid, &uuid, sizeof(uuid));
625 - qemu_uuid_unparse(&uuid, backup_state.uuid_str);
626 + backup_state.vmaw = vmaw;
627 +
628 + uuid_copy(backup_state.uuid, uuid);
629 + uuid_unparse_lower(uuid, backup_state.uuid_str);
630
631 qemu_mutex_lock(&backup_state.backup_mutex);
632 backup_state.di_list = di_list;
633 - backup_state.next_job = 0;
634
635 backup_state.total = total;
636 backup_state.transferred = 0;
637 @@ -3581,21 +3642,21 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
638 while (l) {
639 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
640 l = g_list_next(l);
641 -
642 job = backup_job_create(NULL, di->bs, di->target, speed, MIRROR_SYNC_MODE_FULL, NULL,
643 false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
644 JOB_DEFAULT,
645 - pvebackup_complete_cb, di, 2, NULL, &local_err);
646 - if (di->target) {
647 - bdrv_unref(di->target);
648 - di->target = NULL;
649 - }
650 + pvebackup_dump_cb, pvebackup_complete_cb, di,
651 + 1, NULL, &local_err);
652 if (!job || local_err != NULL) {
653 error_setg(&backup_state.error, "backup_job_create failed");
654 pvebackup_cancel(NULL);
655 } else {
656 job_start(&job->job);
657 }
658 + if (di->target) {
659 + bdrv_unref(di->target);
660 + di->target = NULL;
661 + }
662 }
663
664 qemu_mutex_unlock(&backup_state.backup_mutex);
665 @@ -3631,9 +3692,10 @@ err:
666 g_strfreev(devs);
667 }
668
669 - if (backup_state.vmaobj) {
670 - object_unparent(backup_state.vmaobj);
671 - backup_state.vmaobj = NULL;
672 + if (vmaw) {
673 + Error *err = NULL;
674 + vma_writer_close(vmaw, &err);
675 + unlink(backup_file);
676 }
677
678 if (backup_dir) {
679 @@ -4086,7 +4148,7 @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
680 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
681 backup->sync, bmap, backup->compress,
682 backup->on_source_error, backup->on_target_error,
683 - job_flags, NULL, NULL, 0, txn, &local_err);
684 + job_flags, NULL, NULL, NULL, 0, txn, &local_err);
685 bdrv_unref(target_bs);
686 if (local_err != NULL) {
687 error_propagate(errp, local_err);
688 @@ -4196,7 +4258,7 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
689 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
690 backup->sync, bmap, backup->compress,
691 backup->on_source_error, backup->on_target_error,
692 - job_flags, NULL, NULL, 0, txn, &local_err);
693 + job_flags, NULL, NULL, NULL, 0, txn, &local_err);
694 if (local_err != NULL) {
695 error_propagate(errp, local_err);
696 }
697 diff --git a/include/block/block_int.h b/include/block/block_int.h
698 index b409e02be8..fd1828cd70 100644
699 --- a/include/block/block_int.h
700 +++ b/include/block/block_int.h
701 @@ -61,6 +61,9 @@
702
703 #define BLOCK_PROBE_BUF_SIZE 512
704
705 +typedef int BackupDumpFunc(void *opaque, BlockBackend *be,
706 + uint64_t offset, uint64_t bytes, const void *buf);
707 +
708 enum BdrvTrackedRequestType {
709 BDRV_TRACKED_READ,
710 BDRV_TRACKED_WRITE,
711 @@ -1156,6 +1159,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
712 BlockdevOnError on_source_error,
713 BlockdevOnError on_target_error,
714 int creation_flags,
715 + BackupDumpFunc *dump_cb,
716 BlockCompletionFunc *cb, void *opaque,
717 int pause_count,
718 JobTxn *txn, Error **errp);
719 diff --git a/job.c b/job.c
720 index 86161bd9f3..114640688a 100644
721 --- a/job.c
722 +++ b/job.c
723 @@ -249,7 +249,8 @@ static bool job_started(Job *job)
724 return job->co;
725 }
726
727 -static bool job_should_pause(Job *job)
728 +bool job_should_pause(Job *job);
729 +bool job_should_pause(Job *job)
730 {
731 return job->pause_count > 0;
732 }
733 diff --git a/vma-reader.c b/vma-reader.c
734 new file mode 100644
735 index 0000000000..2b1d1cdab3
736 --- /dev/null
737 +++ b/vma-reader.c
738 @@ -0,0 +1,857 @@
739 +/*
740 + * VMA: Virtual Machine Archive
741 + *
742 + * Copyright (C) 2012 Proxmox Server Solutions
743 + *
744 + * Authors:
745 + * Dietmar Maurer (dietmar@proxmox.com)
746 + *
747 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
748 + * See the COPYING file in the top-level directory.
749 + *
750 + */
751 +
752 +#include "qemu/osdep.h"
753 +#include <glib.h>
754 +#include <uuid/uuid.h>
755 +
756 +#include "qemu-common.h"
757 +#include "qemu/timer.h"
758 +#include "qemu/ratelimit.h"
759 +#include "vma.h"
760 +#include "block/block.h"
761 +#include "sysemu/block-backend.h"
762 +
763 +static unsigned char zero_vma_block[VMA_BLOCK_SIZE];
764 +
765 +typedef struct VmaRestoreState {
766 + BlockBackend *target;
767 + bool write_zeroes;
768 + unsigned long *bitmap;
769 + int bitmap_size;
770 +} VmaRestoreState;
771 +
772 +struct VmaReader {
773 + int fd;
774 + GChecksum *md5csum;
775 + GHashTable *blob_hash;
776 + unsigned char *head_data;
777 + VmaDeviceInfo devinfo[256];
778 + VmaRestoreState rstate[256];
779 + GList *cdata_list;
780 + guint8 vmstate_stream;
781 + uint32_t vmstate_clusters;
782 + /* to show restore percentage if run with -v */
783 + time_t start_time;
784 + int64_t cluster_count;
785 + int64_t clusters_read;
786 + int64_t zero_cluster_data;
787 + int64_t partial_zero_cluster_data;
788 + int clusters_read_per;
789 +};
790 +
791 +static guint
792 +g_int32_hash(gconstpointer v)
793 +{
794 + return *(const uint32_t *)v;
795 +}
796 +
797 +static gboolean
798 +g_int32_equal(gconstpointer v1, gconstpointer v2)
799 +{
800 + return *((const uint32_t *)v1) == *((const uint32_t *)v2);
801 +}
802 +
803 +static int vma_reader_get_bitmap(VmaRestoreState *rstate, int64_t cluster_num)
804 +{
805 + assert(rstate);
806 + assert(rstate->bitmap);
807 +
808 + unsigned long val, idx, bit;
809 +
810 + idx = cluster_num / BITS_PER_LONG;
811 +
812 + assert(rstate->bitmap_size > idx);
813 +
814 + bit = cluster_num % BITS_PER_LONG;
815 + val = rstate->bitmap[idx];
816 +
817 + return !!(val & (1UL << bit));
818 +}
819 +
820 +static void vma_reader_set_bitmap(VmaRestoreState *rstate, int64_t cluster_num,
821 + int dirty)
822 +{
823 + assert(rstate);
824 + assert(rstate->bitmap);
825 +
826 + unsigned long val, idx, bit;
827 +
828 + idx = cluster_num / BITS_PER_LONG;
829 +
830 + assert(rstate->bitmap_size > idx);
831 +
832 + bit = cluster_num % BITS_PER_LONG;
833 + val = rstate->bitmap[idx];
834 + if (dirty) {
835 + if (!(val & (1UL << bit))) {
836 + val |= 1UL << bit;
837 + }
838 + } else {
839 + if (val & (1UL << bit)) {
840 + val &= ~(1UL << bit);
841 + }
842 + }
843 + rstate->bitmap[idx] = val;
844 +}
845 +
846 +typedef struct VmaBlob {
847 + uint32_t start;
848 + uint32_t len;
849 + void *data;
850 +} VmaBlob;
851 +
852 +static const VmaBlob *get_header_blob(VmaReader *vmar, uint32_t pos)
853 +{
854 + assert(vmar);
855 + assert(vmar->blob_hash);
856 +
857 + return g_hash_table_lookup(vmar->blob_hash, &pos);
858 +}
859 +
860 +static const char *get_header_str(VmaReader *vmar, uint32_t pos)
861 +{
862 + const VmaBlob *blob = get_header_blob(vmar, pos);
863 + if (!blob) {
864 + return NULL;
865 + }
866 + const char *res = (char *)blob->data;
867 + if (res[blob->len-1] != '\0') {
868 + return NULL;
869 + }
870 + return res;
871 +}
872 +
873 +static ssize_t
874 +safe_read(int fd, unsigned char *buf, size_t count)
875 +{
876 + ssize_t n;
877 +
878 + do {
879 + n = read(fd, buf, count);
880 + } while (n < 0 && errno == EINTR);
881 +
882 + return n;
883 +}
884 +
885 +static ssize_t
886 +full_read(int fd, unsigned char *buf, size_t len)
887 +{
888 + ssize_t n;
889 + size_t total;
890 +
891 + total = 0;
892 +
893 + while (len > 0) {
894 + n = safe_read(fd, buf, len);
895 +
896 + if (n == 0) {
897 + return total;
898 + }
899 +
900 + if (n <= 0) {
901 + break;
902 + }
903 +
904 + buf += n;
905 + total += n;
906 + len -= n;
907 + }
908 +
909 + if (len) {
910 + return -1;
911 + }
912 +
913 + return total;
914 +}
915 +
916 +void vma_reader_destroy(VmaReader *vmar)
917 +{
918 + assert(vmar);
919 +
920 + if (vmar->fd >= 0) {
921 + close(vmar->fd);
922 + }
923 +
924 + if (vmar->cdata_list) {
925 + g_list_free(vmar->cdata_list);
926 + }
927 +
928 + int i;
929 + for (i = 1; i < 256; i++) {
930 + if (vmar->rstate[i].bitmap) {
931 + g_free(vmar->rstate[i].bitmap);
932 + }
933 + }
934 +
935 + if (vmar->md5csum) {
936 + g_checksum_free(vmar->md5csum);
937 + }
938 +
939 + if (vmar->blob_hash) {
940 + g_hash_table_destroy(vmar->blob_hash);
941 + }
942 +
943 + if (vmar->head_data) {
944 + g_free(vmar->head_data);
945 + }
946 +
947 + g_free(vmar);
948 +
949 +};
950 +
951 +static int vma_reader_read_head(VmaReader *vmar, Error **errp)
952 +{
953 + assert(vmar);
954 + assert(errp);
955 + assert(*errp == NULL);
956 +
957 + unsigned char md5sum[16];
958 + int i;
959 + int ret = 0;
960 +
961 + vmar->head_data = g_malloc(sizeof(VmaHeader));
962 +
963 + if (full_read(vmar->fd, vmar->head_data, sizeof(VmaHeader)) !=
964 + sizeof(VmaHeader)) {
965 + error_setg(errp, "can't read vma header - %s",
966 + errno ? g_strerror(errno) : "got EOF");
967 + return -1;
968 + }
969 +
970 + VmaHeader *h = (VmaHeader *)vmar->head_data;
971 +
972 + if (h->magic != VMA_MAGIC) {
973 + error_setg(errp, "not a vma file - wrong magic number");
974 + return -1;
975 + }
976 +
977 + uint32_t header_size = GUINT32_FROM_BE(h->header_size);
978 + int need = header_size - sizeof(VmaHeader);
979 + if (need <= 0) {
980 + error_setg(errp, "wrong vma header size %d", header_size);
981 + return -1;
982 + }
983 +
984 + vmar->head_data = g_realloc(vmar->head_data, header_size);
985 + h = (VmaHeader *)vmar->head_data;
986 +
987 + if (full_read(vmar->fd, vmar->head_data + sizeof(VmaHeader), need) !=
988 + need) {
989 + error_setg(errp, "can't read vma header data - %s",
990 + errno ? g_strerror(errno) : "got EOF");
991 + return -1;
992 + }
993 +
994 + memcpy(md5sum, h->md5sum, 16);
995 + memset(h->md5sum, 0, 16);
996 +
997 + g_checksum_reset(vmar->md5csum);
998 + g_checksum_update(vmar->md5csum, vmar->head_data, header_size);
999 + gsize csize = 16;
1000 + g_checksum_get_digest(vmar->md5csum, (guint8 *)(h->md5sum), &csize);
1001 +
1002 + if (memcmp(md5sum, h->md5sum, 16) != 0) {
1003 + error_setg(errp, "wrong vma header chechsum");
1004 + return -1;
1005 + }
1006 +
1007 + /* we can modify header data after checksum verify */
1008 + h->header_size = header_size;
1009 +
1010 + h->version = GUINT32_FROM_BE(h->version);
1011 + if (h->version != 1) {
1012 + error_setg(errp, "wrong vma version %d", h->version);
1013 + return -1;
1014 + }
1015 +
1016 + h->ctime = GUINT64_FROM_BE(h->ctime);
1017 + h->blob_buffer_offset = GUINT32_FROM_BE(h->blob_buffer_offset);
1018 + h->blob_buffer_size = GUINT32_FROM_BE(h->blob_buffer_size);
1019 +
1020 + uint32_t bstart = h->blob_buffer_offset + 1;
1021 + uint32_t bend = h->blob_buffer_offset + h->blob_buffer_size;
1022 +
1023 + if (bstart <= sizeof(VmaHeader)) {
1024 + error_setg(errp, "wrong vma blob buffer offset %d",
1025 + h->blob_buffer_offset);
1026 + return -1;
1027 + }
1028 +
1029 + if (bend > header_size) {
1030 + error_setg(errp, "wrong vma blob buffer size %d/%d",
1031 + h->blob_buffer_offset, h->blob_buffer_size);
1032 + return -1;
1033 + }
1034 +
1035 + while ((bstart + 2) <= bend) {
1036 + uint32_t size = vmar->head_data[bstart] +
1037 + (vmar->head_data[bstart+1] << 8);
1038 + if ((bstart + size + 2) <= bend) {
1039 + VmaBlob *blob = g_new0(VmaBlob, 1);
1040 + blob->start = bstart - h->blob_buffer_offset;
1041 + blob->len = size;
1042 + blob->data = vmar->head_data + bstart + 2;
1043 + g_hash_table_insert(vmar->blob_hash, &blob->start, blob);
1044 + }
1045 + bstart += size + 2;
1046 + }
1047 +
1048 +
1049 + int count = 0;
1050 + for (i = 1; i < 256; i++) {
1051 + VmaDeviceInfoHeader *dih = &h->dev_info[i];
1052 + uint32_t devname_ptr = GUINT32_FROM_BE(dih->devname_ptr);
1053 + uint64_t size = GUINT64_FROM_BE(dih->size);
1054 + const char *devname = get_header_str(vmar, devname_ptr);
1055 +
1056 + if (size && devname) {
1057 + count++;
1058 + vmar->devinfo[i].size = size;
1059 + vmar->devinfo[i].devname = devname;
1060 +
1061 + if (strcmp(devname, "vmstate") == 0) {
1062 + vmar->vmstate_stream = i;
1063 + }
1064 + }
1065 + }
1066 +
1067 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1068 + uint32_t name_ptr = GUINT32_FROM_BE(h->config_names[i]);
1069 + uint32_t data_ptr = GUINT32_FROM_BE(h->config_data[i]);
1070 +
1071 + if (!(name_ptr && data_ptr)) {
1072 + continue;
1073 + }
1074 + const char *name = get_header_str(vmar, name_ptr);
1075 + const VmaBlob *blob = get_header_blob(vmar, data_ptr);
1076 +
1077 + if (!(name && blob)) {
1078 + error_setg(errp, "vma contains invalid data pointers");
1079 + return -1;
1080 + }
1081 +
1082 + VmaConfigData *cdata = g_new0(VmaConfigData, 1);
1083 + cdata->name = name;
1084 + cdata->data = blob->data;
1085 + cdata->len = blob->len;
1086 +
1087 + vmar->cdata_list = g_list_append(vmar->cdata_list, cdata);
1088 + }
1089 +
1090 + return ret;
1091 +};
1092 +
1093 +VmaReader *vma_reader_create(const char *filename, Error **errp)
1094 +{
1095 + assert(filename);
1096 + assert(errp);
1097 +
1098 + VmaReader *vmar = g_new0(VmaReader, 1);
1099 +
1100 + if (strcmp(filename, "-") == 0) {
1101 + vmar->fd = dup(0);
1102 + } else {
1103 + vmar->fd = open(filename, O_RDONLY);
1104 + }
1105 +
1106 + if (vmar->fd < 0) {
1107 + error_setg(errp, "can't open file %s - %s\n", filename,
1108 + g_strerror(errno));
1109 + goto err;
1110 + }
1111 +
1112 + vmar->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1113 + if (!vmar->md5csum) {
1114 + error_setg(errp, "can't allocate cmsum\n");
1115 + goto err;
1116 + }
1117 +
1118 + vmar->blob_hash = g_hash_table_new_full(g_int32_hash, g_int32_equal,
1119 + NULL, g_free);
1120 +
1121 + if (vma_reader_read_head(vmar, errp) < 0) {
1122 + goto err;
1123 + }
1124 +
1125 + return vmar;
1126 +
1127 +err:
1128 + if (vmar) {
1129 + vma_reader_destroy(vmar);
1130 + }
1131 +
1132 + return NULL;
1133 +}
1134 +
1135 +VmaHeader *vma_reader_get_header(VmaReader *vmar)
1136 +{
1137 + assert(vmar);
1138 + assert(vmar->head_data);
1139 +
1140 + return (VmaHeader *)(vmar->head_data);
1141 +}
1142 +
1143 +GList *vma_reader_get_config_data(VmaReader *vmar)
1144 +{
1145 + assert(vmar);
1146 + assert(vmar->head_data);
1147 +
1148 + return vmar->cdata_list;
1149 +}
1150 +
1151 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id)
1152 +{
1153 + assert(vmar);
1154 + assert(dev_id);
1155 +
1156 + if (vmar->devinfo[dev_id].size && vmar->devinfo[dev_id].devname) {
1157 + return &vmar->devinfo[dev_id];
1158 + }
1159 +
1160 + return NULL;
1161 +}
1162 +
1163 +static void allocate_rstate(VmaReader *vmar, guint8 dev_id,
1164 + BlockBackend *target, bool write_zeroes)
1165 +{
1166 + assert(vmar);
1167 + assert(dev_id);
1168 +
1169 + vmar->rstate[dev_id].target = target;
1170 + vmar->rstate[dev_id].write_zeroes = write_zeroes;
1171 +
1172 + int64_t size = vmar->devinfo[dev_id].size;
1173 +
1174 + int64_t bitmap_size = (size/BDRV_SECTOR_SIZE) +
1175 + (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG - 1;
1176 + bitmap_size /= (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG;
1177 +
1178 + vmar->rstate[dev_id].bitmap_size = bitmap_size;
1179 + vmar->rstate[dev_id].bitmap = g_new0(unsigned long, bitmap_size);
1180 +
1181 + vmar->cluster_count += size/VMA_CLUSTER_SIZE;
1182 +}
1183 +
1184 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target,
1185 + bool write_zeroes, Error **errp)
1186 +{
1187 + assert(vmar);
1188 + assert(target != NULL);
1189 + assert(dev_id);
1190 + assert(vmar->rstate[dev_id].target == NULL);
1191 +
1192 + int64_t size = blk_getlength(target);
1193 + int64_t size_diff = size - vmar->devinfo[dev_id].size;
1194 +
1195 + /* storage types can have different size restrictions, so it
1196 + * is not always possible to create an image with exact size.
1197 + * So we tolerate a size difference up to 4MB.
1198 + */
1199 + if ((size_diff < 0) || (size_diff > 4*1024*1024)) {
1200 + error_setg(errp, "vma_reader_register_bs for stream %s failed - "
1201 + "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname,
1202 + size, vmar->devinfo[dev_id].size);
1203 + return -1;
1204 + }
1205 +
1206 + allocate_rstate(vmar, dev_id, target, write_zeroes);
1207 +
1208 + return 0;
1209 +}
1210 +
1211 +static ssize_t safe_write(int fd, void *buf, size_t count)
1212 +{
1213 + ssize_t n;
1214 +
1215 + do {
1216 + n = write(fd, buf, count);
1217 + } while (n < 0 && errno == EINTR);
1218 +
1219 + return n;
1220 +}
1221 +
1222 +static size_t full_write(int fd, void *buf, size_t len)
1223 +{
1224 + ssize_t n;
1225 + size_t total;
1226 +
1227 + total = 0;
1228 +
1229 + while (len > 0) {
1230 + n = safe_write(fd, buf, len);
1231 + if (n < 0) {
1232 + return n;
1233 + }
1234 + buf += n;
1235 + total += n;
1236 + len -= n;
1237 + }
1238 +
1239 + if (len) {
1240 + /* incomplete write ? */
1241 + return -1;
1242 + }
1243 +
1244 + return total;
1245 +}
1246 +
1247 +static int restore_write_data(VmaReader *vmar, guint8 dev_id,
1248 + BlockBackend *target, int vmstate_fd,
1249 + unsigned char *buf, int64_t sector_num,
1250 + int nb_sectors, Error **errp)
1251 +{
1252 + assert(vmar);
1253 +
1254 + if (dev_id == vmar->vmstate_stream) {
1255 + if (vmstate_fd >= 0) {
1256 + int len = nb_sectors * BDRV_SECTOR_SIZE;
1257 + int res = full_write(vmstate_fd, buf, len);
1258 + if (res < 0) {
1259 + error_setg(errp, "write vmstate failed %d", res);
1260 + return -1;
1261 + }
1262 + }
1263 + } else {
1264 + int res = blk_pwrite(target, sector_num * BDRV_SECTOR_SIZE, buf, nb_sectors * BDRV_SECTOR_SIZE, 0);
1265 + if (res < 0) {
1266 + error_setg(errp, "blk_pwrite to %s failed (%d)",
1267 + bdrv_get_device_name(blk_bs(target)), res);
1268 + return -1;
1269 + }
1270 + }
1271 + return 0;
1272 +}
1273 +
1274 +static int restore_extent(VmaReader *vmar, unsigned char *buf,
1275 + int extent_size, int vmstate_fd,
1276 + bool verbose, bool verify, Error **errp)
1277 +{
1278 + assert(vmar);
1279 + assert(buf);
1280 +
1281 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1282 + int start = VMA_EXTENT_HEADER_SIZE;
1283 + int i;
1284 +
1285 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1286 + uint64_t block_info = GUINT64_FROM_BE(ehead->blockinfo[i]);
1287 + uint64_t cluster_num = block_info & 0xffffffff;
1288 + uint8_t dev_id = (block_info >> 32) & 0xff;
1289 + uint16_t mask = block_info >> (32+16);
1290 + int64_t max_sector;
1291 +
1292 + if (!dev_id) {
1293 + continue;
1294 + }
1295 +
1296 + VmaRestoreState *rstate = &vmar->rstate[dev_id];
1297 + BlockBackend *target = NULL;
1298 +
1299 + if (dev_id != vmar->vmstate_stream) {
1300 + target = rstate->target;
1301 + if (!verify && !target) {
1302 + error_setg(errp, "got wrong dev id %d", dev_id);
1303 + return -1;
1304 + }
1305 +
1306 + if (vma_reader_get_bitmap(rstate, cluster_num)) {
1307 + error_setg(errp, "found duplicated cluster %zd for stream %s",
1308 + cluster_num, vmar->devinfo[dev_id].devname);
1309 + return -1;
1310 + }
1311 + vma_reader_set_bitmap(rstate, cluster_num, 1);
1312 +
1313 + max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE;
1314 + } else {
1315 + max_sector = G_MAXINT64;
1316 + if (cluster_num != vmar->vmstate_clusters) {
1317 + error_setg(errp, "found out of order vmstate data");
1318 + return -1;
1319 + }
1320 + vmar->vmstate_clusters++;
1321 + }
1322 +
1323 + vmar->clusters_read++;
1324 +
1325 + if (verbose) {
1326 + time_t duration = time(NULL) - vmar->start_time;
1327 + int percent = (vmar->clusters_read*100)/vmar->cluster_count;
1328 + if (percent != vmar->clusters_read_per) {
1329 + printf("progress %d%% (read %zd bytes, duration %zd sec)\n",
1330 + percent, vmar->clusters_read*VMA_CLUSTER_SIZE,
1331 + duration);
1332 + fflush(stdout);
1333 + vmar->clusters_read_per = percent;
1334 + }
1335 + }
1336 +
1337 + /* try to write whole clusters to speedup restore */
1338 + if (mask == 0xffff) {
1339 + if ((start + VMA_CLUSTER_SIZE) > extent_size) {
1340 + error_setg(errp, "short vma extent - too many blocks");
1341 + return -1;
1342 + }
1343 + int64_t sector_num = (cluster_num * VMA_CLUSTER_SIZE) /
1344 + BDRV_SECTOR_SIZE;
1345 + int64_t end_sector = sector_num +
1346 + VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE;
1347 +
1348 + if (end_sector > max_sector) {
1349 + end_sector = max_sector;
1350 + }
1351 +
1352 + if (end_sector <= sector_num) {
1353 + error_setg(errp, "got wrong block address - write beyond end");
1354 + return -1;
1355 + }
1356 +
1357 + if (!verify) {
1358 + int nb_sectors = end_sector - sector_num;
1359 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1360 + buf + start, sector_num, nb_sectors,
1361 + errp) < 0) {
1362 + return -1;
1363 + }
1364 + }
1365 +
1366 + start += VMA_CLUSTER_SIZE;
1367 + } else {
1368 + int j;
1369 + int bit = 1;
1370 +
1371 + for (j = 0; j < 16; j++) {
1372 + int64_t sector_num = (cluster_num*VMA_CLUSTER_SIZE +
1373 + j*VMA_BLOCK_SIZE)/BDRV_SECTOR_SIZE;
1374 +
1375 + int64_t end_sector = sector_num +
1376 + VMA_BLOCK_SIZE/BDRV_SECTOR_SIZE;
1377 + if (end_sector > max_sector) {
1378 + end_sector = max_sector;
1379 + }
1380 +
1381 + if (mask & bit) {
1382 + if ((start + VMA_BLOCK_SIZE) > extent_size) {
1383 + error_setg(errp, "short vma extent - too many blocks");
1384 + return -1;
1385 + }
1386 +
1387 + if (end_sector <= sector_num) {
1388 + error_setg(errp, "got wrong block address - "
1389 + "write beyond end");
1390 + return -1;
1391 + }
1392 +
1393 + if (!verify) {
1394 + int nb_sectors = end_sector - sector_num;
1395 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1396 + buf + start, sector_num,
1397 + nb_sectors, errp) < 0) {
1398 + return -1;
1399 + }
1400 + }
1401 +
1402 + start += VMA_BLOCK_SIZE;
1403 +
1404 + } else {
1405 +
1406 +
1407 + if (end_sector > sector_num) {
1408 + /* Todo: use bdrv_co_write_zeroes (but that need to
1409 + * be run inside coroutine?)
1410 + */
1411 + int nb_sectors = end_sector - sector_num;
1412 + int zero_size = BDRV_SECTOR_SIZE*nb_sectors;
1413 + vmar->zero_cluster_data += zero_size;
1414 + if (mask != 0) {
1415 + vmar->partial_zero_cluster_data += zero_size;
1416 + }
1417 +
1418 + if (rstate->write_zeroes && !verify) {
1419 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1420 + zero_vma_block, sector_num,
1421 + nb_sectors, errp) < 0) {
1422 + return -1;
1423 + }
1424 + }
1425 + }
1426 + }
1427 +
1428 + bit = bit << 1;
1429 + }
1430 + }
1431 + }
1432 +
1433 + if (start != extent_size) {
1434 + error_setg(errp, "vma extent error - missing blocks");
1435 + return -1;
1436 + }
1437 +
1438 + return 0;
1439 +}
1440 +
1441 +static int vma_reader_restore_full(VmaReader *vmar, int vmstate_fd,
1442 + bool verbose, bool verify,
1443 + Error **errp)
1444 +{
1445 + assert(vmar);
1446 + assert(vmar->head_data);
1447 +
1448 + int ret = 0;
1449 + unsigned char buf[VMA_MAX_EXTENT_SIZE];
1450 + int buf_pos = 0;
1451 + unsigned char md5sum[16];
1452 + VmaHeader *h = (VmaHeader *)vmar->head_data;
1453 +
1454 + vmar->start_time = time(NULL);
1455 +
1456 + while (1) {
1457 + int bytes = full_read(vmar->fd, buf + buf_pos, sizeof(buf) - buf_pos);
1458 + if (bytes < 0) {
1459 + error_setg(errp, "read failed - %s", g_strerror(errno));
1460 + return -1;
1461 + }
1462 +
1463 + buf_pos += bytes;
1464 +
1465 + if (!buf_pos) {
1466 + break; /* EOF */
1467 + }
1468 +
1469 + if (buf_pos < VMA_EXTENT_HEADER_SIZE) {
1470 + error_setg(errp, "read short extent (%d bytes)", buf_pos);
1471 + return -1;
1472 + }
1473 +
1474 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1475 +
1476 + /* extract md5sum */
1477 + memcpy(md5sum, ehead->md5sum, sizeof(ehead->md5sum));
1478 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
1479 +
1480 + g_checksum_reset(vmar->md5csum);
1481 + g_checksum_update(vmar->md5csum, buf, VMA_EXTENT_HEADER_SIZE);
1482 + gsize csize = 16;
1483 + g_checksum_get_digest(vmar->md5csum, ehead->md5sum, &csize);
1484 +
1485 + if (memcmp(md5sum, ehead->md5sum, 16) != 0) {
1486 + error_setg(errp, "wrong vma extent header chechsum");
1487 + return -1;
1488 + }
1489 +
1490 + if (memcmp(h->uuid, ehead->uuid, sizeof(ehead->uuid)) != 0) {
1491 + error_setg(errp, "wrong vma extent uuid");
1492 + return -1;
1493 + }
1494 +
1495 + if (ehead->magic != VMA_EXTENT_MAGIC || ehead->reserved1 != 0) {
1496 + error_setg(errp, "wrong vma extent header magic");
1497 + return -1;
1498 + }
1499 +
1500 + int block_count = GUINT16_FROM_BE(ehead->block_count);
1501 + int extent_size = VMA_EXTENT_HEADER_SIZE + block_count*VMA_BLOCK_SIZE;
1502 +
1503 + if (buf_pos < extent_size) {
1504 + error_setg(errp, "short vma extent (%d < %d)", buf_pos,
1505 + extent_size);
1506 + return -1;
1507 + }
1508 +
1509 + if (restore_extent(vmar, buf, extent_size, vmstate_fd, verbose,
1510 + verify, errp) < 0) {
1511 + return -1;
1512 + }
1513 +
1514 + if (buf_pos > extent_size) {
1515 + memmove(buf, buf + extent_size, buf_pos - extent_size);
1516 + buf_pos = buf_pos - extent_size;
1517 + } else {
1518 + buf_pos = 0;
1519 + }
1520 + }
1521 +
1522 + bdrv_drain_all();
1523 +
1524 + int i;
1525 + for (i = 1; i < 256; i++) {
1526 + VmaRestoreState *rstate = &vmar->rstate[i];
1527 + if (!rstate->target) {
1528 + continue;
1529 + }
1530 +
1531 + if (blk_flush(rstate->target) < 0) {
1532 + error_setg(errp, "vma blk_flush %s failed",
1533 + vmar->devinfo[i].devname);
1534 + return -1;
1535 + }
1536 +
1537 + if (vmar->devinfo[i].size &&
1538 + (strcmp(vmar->devinfo[i].devname, "vmstate") != 0)) {
1539 + assert(rstate->bitmap);
1540 +
1541 + int64_t cluster_num, end;
1542 +
1543 + end = (vmar->devinfo[i].size + VMA_CLUSTER_SIZE - 1) /
1544 + VMA_CLUSTER_SIZE;
1545 +
1546 + for (cluster_num = 0; cluster_num < end; cluster_num++) {
1547 + if (!vma_reader_get_bitmap(rstate, cluster_num)) {
1548 + error_setg(errp, "detected missing cluster %zd "
1549 + "for stream %s", cluster_num,
1550 + vmar->devinfo[i].devname);
1551 + return -1;
1552 + }
1553 + }
1554 + }
1555 + }
1556 +
1557 + if (verbose) {
1558 + if (vmar->clusters_read) {
1559 + printf("total bytes read %zd, sparse bytes %zd (%.3g%%)\n",
1560 + vmar->clusters_read*VMA_CLUSTER_SIZE,
1561 + vmar->zero_cluster_data,
1562 + (double)(100.0*vmar->zero_cluster_data)/
1563 + (vmar->clusters_read*VMA_CLUSTER_SIZE));
1564 +
1565 + int64_t datasize = vmar->clusters_read*VMA_CLUSTER_SIZE-vmar->zero_cluster_data;
1566 + if (datasize) { // this does not make sense for empty files
1567 + printf("space reduction due to 4K zero blocks %.3g%%\n",
1568 + (double)(100.0*vmar->partial_zero_cluster_data) / datasize);
1569 + }
1570 + } else {
1571 + printf("vma archive contains no image data\n");
1572 + }
1573 + }
1574 + return ret;
1575 +}
1576 +
1577 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
1578 + Error **errp)
1579 +{
1580 + return vma_reader_restore_full(vmar, vmstate_fd, verbose, false, errp);
1581 +}
1582 +
1583 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp)
1584 +{
1585 + guint8 dev_id;
1586 +
1587 + for (dev_id = 1; dev_id < 255; dev_id++) {
1588 + if (vma_reader_get_device_info(vmar, dev_id)) {
1589 + allocate_rstate(vmar, dev_id, NULL, false);
1590 + }
1591 + }
1592 +
1593 + return vma_reader_restore_full(vmar, -1, verbose, true, errp);
1594 +}
1595 +
1596 diff --git a/vma-writer.c b/vma-writer.c
1597 new file mode 100644
1598 index 0000000000..fd9567634d
1599 --- /dev/null
1600 +++ b/vma-writer.c
1601 @@ -0,0 +1,771 @@
1602 +/*
1603 + * VMA: Virtual Machine Archive
1604 + *
1605 + * Copyright (C) 2012 Proxmox Server Solutions
1606 + *
1607 + * Authors:
1608 + * Dietmar Maurer (dietmar@proxmox.com)
1609 + *
1610 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
1611 + * See the COPYING file in the top-level directory.
1612 + *
1613 + */
1614 +
1615 +#include "qemu/osdep.h"
1616 +#include <glib.h>
1617 +#include <uuid/uuid.h>
1618 +
1619 +#include "vma.h"
1620 +#include "block/block.h"
1621 +#include "monitor/monitor.h"
1622 +#include "qemu/main-loop.h"
1623 +#include "qemu/coroutine.h"
1624 +#include "qemu/cutils.h"
1625 +
1626 +#define DEBUG_VMA 0
1627 +
1628 +#define DPRINTF(fmt, ...)\
1629 + do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
1630 +
1631 +#define WRITE_BUFFERS 5
1632 +#define HEADER_CLUSTERS 8
1633 +#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
1634 +
1635 +struct VmaWriter {
1636 + int fd;
1637 + FILE *cmd;
1638 + int status;
1639 + char errmsg[8192];
1640 + uuid_t uuid;
1641 + bool header_written;
1642 + bool closed;
1643 +
1644 + /* we always write extents */
1645 + unsigned char *outbuf;
1646 + int outbuf_pos; /* in bytes */
1647 + int outbuf_count; /* in VMA_BLOCKS */
1648 + uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
1649 +
1650 + unsigned char *headerbuf;
1651 +
1652 + GChecksum *md5csum;
1653 + CoMutex flush_lock;
1654 + Coroutine *co_writer;
1655 +
1656 + /* drive informations */
1657 + VmaStreamInfo stream_info[256];
1658 + guint stream_count;
1659 +
1660 + guint8 vmstate_stream;
1661 + uint32_t vmstate_clusters;
1662 +
1663 + /* header blob table */
1664 + char *header_blob_table;
1665 + uint32_t header_blob_table_size;
1666 + uint32_t header_blob_table_pos;
1667 +
1668 + /* store for config blobs */
1669 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1670 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1671 + uint32_t config_count;
1672 +};
1673 +
1674 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...)
1675 +{
1676 + va_list ap;
1677 +
1678 + if (vmaw->status < 0) {
1679 + return;
1680 + }
1681 +
1682 + vmaw->status = -1;
1683 +
1684 + va_start(ap, fmt);
1685 + g_vsnprintf(vmaw->errmsg, sizeof(vmaw->errmsg), fmt, ap);
1686 + va_end(ap);
1687 +
1688 + DPRINTF("vma_writer_set_error: %s\n", vmaw->errmsg);
1689 +}
1690 +
1691 +static uint32_t allocate_header_blob(VmaWriter *vmaw, const char *data,
1692 + size_t len)
1693 +{
1694 + if (len > 65535) {
1695 + return 0;
1696 + }
1697 +
1698 + if (!vmaw->header_blob_table ||
1699 + (vmaw->header_blob_table_size <
1700 + (vmaw->header_blob_table_pos + len + 2))) {
1701 + int newsize = vmaw->header_blob_table_size + ((len + 2 + 511)/512)*512;
1702 +
1703 + vmaw->header_blob_table = g_realloc(vmaw->header_blob_table, newsize);
1704 + memset(vmaw->header_blob_table + vmaw->header_blob_table_size,
1705 + 0, newsize - vmaw->header_blob_table_size);
1706 + vmaw->header_blob_table_size = newsize;
1707 + }
1708 +
1709 + uint32_t cpos = vmaw->header_blob_table_pos;
1710 + vmaw->header_blob_table[cpos] = len & 255;
1711 + vmaw->header_blob_table[cpos+1] = (len >> 8) & 255;
1712 + memcpy(vmaw->header_blob_table + cpos + 2, data, len);
1713 + vmaw->header_blob_table_pos += len + 2;
1714 + return cpos;
1715 +}
1716 +
1717 +static uint32_t allocate_header_string(VmaWriter *vmaw, const char *str)
1718 +{
1719 + assert(vmaw);
1720 +
1721 + size_t len = strlen(str) + 1;
1722 +
1723 + return allocate_header_blob(vmaw, str, len);
1724 +}
1725 +
1726 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
1727 + gsize len)
1728 +{
1729 + assert(vmaw);
1730 + assert(!vmaw->header_written);
1731 + assert(vmaw->config_count < VMA_MAX_CONFIGS);
1732 + assert(name);
1733 + assert(data);
1734 +
1735 + gchar *basename = g_path_get_basename(name);
1736 + uint32_t name_ptr = allocate_header_string(vmaw, basename);
1737 + g_free(basename);
1738 +
1739 + if (!name_ptr) {
1740 + return -1;
1741 + }
1742 +
1743 + uint32_t data_ptr = allocate_header_blob(vmaw, data, len);
1744 + if (!data_ptr) {
1745 + return -1;
1746 + }
1747 +
1748 + vmaw->config_names[vmaw->config_count] = name_ptr;
1749 + vmaw->config_data[vmaw->config_count] = data_ptr;
1750 +
1751 + vmaw->config_count++;
1752 +
1753 + return 0;
1754 +}
1755 +
1756 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
1757 + size_t size)
1758 +{
1759 + assert(vmaw);
1760 + assert(devname);
1761 + assert(!vmaw->status);
1762 +
1763 + if (vmaw->header_written) {
1764 + vma_writer_set_error(vmaw, "vma_writer_register_stream: header "
1765 + "already written");
1766 + return -1;
1767 + }
1768 +
1769 + guint n = vmaw->stream_count + 1;
1770 +
1771 + /* we can have dev_ids form 1 to 255 (0 reserved)
1772 + * 255(-1) reseverd for safety
1773 + */
1774 + if (n > 254) {
1775 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1776 + "too many drives");
1777 + return -1;
1778 + }
1779 +
1780 + if (size <= 0) {
1781 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1782 + "got strange size %zd", size);
1783 + return -1;
1784 + }
1785 +
1786 + DPRINTF("vma_writer_register_stream %s %zu %d\n", devname, size, n);
1787 +
1788 + vmaw->stream_info[n].devname = g_strdup(devname);
1789 + vmaw->stream_info[n].size = size;
1790 +
1791 + vmaw->stream_info[n].cluster_count = (size + VMA_CLUSTER_SIZE - 1) /
1792 + VMA_CLUSTER_SIZE;
1793 +
1794 + vmaw->stream_count = n;
1795 +
1796 + if (strcmp(devname, "vmstate") == 0) {
1797 + vmaw->vmstate_stream = n;
1798 + }
1799 +
1800 + return n;
1801 +}
1802 +
1803 +static void vma_co_continue_write(void *opaque)
1804 +{
1805 + VmaWriter *vmaw = opaque;
1806 +
1807 + DPRINTF("vma_co_continue_write\n");
1808 + qemu_coroutine_enter(vmaw->co_writer);
1809 +}
1810 +
1811 +static ssize_t coroutine_fn
1812 +vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
1813 +{
1814 + DPRINTF("vma_queue_write enter %zd\n", bytes);
1815 +
1816 + assert(vmaw);
1817 + assert(buf);
1818 + assert(bytes <= VMA_MAX_EXTENT_SIZE);
1819 +
1820 + size_t done = 0;
1821 + ssize_t ret;
1822 +
1823 + assert(vmaw->co_writer == NULL);
1824 +
1825 + vmaw->co_writer = qemu_coroutine_self();
1826 +
1827 + while (done < bytes) {
1828 + aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, vma_co_continue_write, NULL, vmaw);
1829 + qemu_coroutine_yield();
1830 + aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, NULL, NULL, NULL);
1831 + if (vmaw->status < 0) {
1832 + DPRINTF("vma_queue_write detected canceled backup\n");
1833 + done = -1;
1834 + break;
1835 + }
1836 + ret = write(vmaw->fd, buf + done, bytes - done);
1837 + if (ret > 0) {
1838 + done += ret;
1839 + DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
1840 + } else if (ret < 0) {
1841 + if (errno == EAGAIN || errno == EWOULDBLOCK) {
1842 + /* try again */
1843 + } else {
1844 + vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
1845 + g_strerror(errno));
1846 + done = -1; /* always return failure for partial writes */
1847 + break;
1848 + }
1849 + } else if (ret == 0) {
1850 + /* should not happen - simply try again */
1851 + }
1852 + }
1853 +
1854 + vmaw->co_writer = NULL;
1855 +
1856 + return (done == bytes) ? bytes : -1;
1857 +}
1858 +
1859 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
1860 +{
1861 + const char *p;
1862 +
1863 + assert(sizeof(VmaHeader) == (4096 + 8192));
1864 + assert(G_STRUCT_OFFSET(VmaHeader, config_names) == 2044);
1865 + assert(G_STRUCT_OFFSET(VmaHeader, config_data) == 3068);
1866 + assert(G_STRUCT_OFFSET(VmaHeader, dev_info) == 4096);
1867 + assert(sizeof(VmaExtentHeader) == 512);
1868 +
1869 + VmaWriter *vmaw = g_new0(VmaWriter, 1);
1870 + vmaw->fd = -1;
1871 +
1872 + vmaw->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1873 + if (!vmaw->md5csum) {
1874 + error_setg(errp, "can't allocate cmsum\n");
1875 + goto err;
1876 + }
1877 +
1878 + if (strstart(filename, "exec:", &p)) {
1879 + vmaw->cmd = popen(p, "w");
1880 + if (vmaw->cmd == NULL) {
1881 + error_setg(errp, "can't popen command '%s' - %s\n", p,
1882 + g_strerror(errno));
1883 + goto err;
1884 + }
1885 + vmaw->fd = fileno(vmaw->cmd);
1886 +
1887 + /* try to use O_NONBLOCK */
1888 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1889 +
1890 + } else {
1891 + struct stat st;
1892 + int oflags;
1893 + const char *tmp_id_str;
1894 +
1895 + if ((stat(filename, &st) == 0) && S_ISFIFO(st.st_mode)) {
1896 + oflags = O_NONBLOCK|O_WRONLY;
1897 + vmaw->fd = qemu_open(filename, oflags, 0644);
1898 + } else if (strstart(filename, "/dev/fdset/", &tmp_id_str)) {
1899 + oflags = O_NONBLOCK|O_WRONLY;
1900 + vmaw->fd = qemu_open(filename, oflags, 0644);
1901 + } else if (strstart(filename, "/dev/fdname/", &tmp_id_str)) {
1902 + vmaw->fd = monitor_get_fd(cur_mon, tmp_id_str, errp);
1903 + if (vmaw->fd < 0) {
1904 + goto err;
1905 + }
1906 + /* try to use O_NONBLOCK */
1907 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1908 + } else {
1909 + oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_CREAT|O_EXCL;
1910 + vmaw->fd = qemu_open(filename, oflags, 0644);
1911 + }
1912 +
1913 + if (vmaw->fd < 0) {
1914 + error_setg(errp, "can't open file %s - %s\n", filename,
1915 + g_strerror(errno));
1916 + goto err;
1917 + }
1918 + }
1919 +
1920 + /* we use O_DIRECT, so we need to align IO buffers */
1921 +
1922 + vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
1923 + vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
1924 +
1925 + vmaw->outbuf_count = 0;
1926 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
1927 +
1928 + vmaw->header_blob_table_pos = 1; /* start at pos 1 */
1929 +
1930 + qemu_co_mutex_init(&vmaw->flush_lock);
1931 +
1932 + uuid_copy(vmaw->uuid, uuid);
1933 +
1934 + return vmaw;
1935 +
1936 +err:
1937 + if (vmaw) {
1938 + if (vmaw->cmd) {
1939 + pclose(vmaw->cmd);
1940 + } else if (vmaw->fd >= 0) {
1941 + close(vmaw->fd);
1942 + }
1943 +
1944 + if (vmaw->md5csum) {
1945 + g_checksum_free(vmaw->md5csum);
1946 + }
1947 +
1948 + g_free(vmaw);
1949 + }
1950 +
1951 + return NULL;
1952 +}
1953 +
1954 +static int coroutine_fn vma_write_header(VmaWriter *vmaw)
1955 +{
1956 + assert(vmaw);
1957 + unsigned char *buf = vmaw->headerbuf;
1958 + VmaHeader *head = (VmaHeader *)buf;
1959 +
1960 + int i;
1961 +
1962 + DPRINTF("VMA WRITE HEADER\n");
1963 +
1964 + if (vmaw->status < 0) {
1965 + return vmaw->status;
1966 + }
1967 +
1968 + memset(buf, 0, HEADERBUF_SIZE);
1969 +
1970 + head->magic = VMA_MAGIC;
1971 + head->version = GUINT32_TO_BE(1); /* v1 */
1972 + memcpy(head->uuid, vmaw->uuid, 16);
1973 +
1974 + time_t ctime = time(NULL);
1975 + head->ctime = GUINT64_TO_BE(ctime);
1976 +
1977 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1978 + head->config_names[i] = GUINT32_TO_BE(vmaw->config_names[i]);
1979 + head->config_data[i] = GUINT32_TO_BE(vmaw->config_data[i]);
1980 + }
1981 +
1982 + /* 32 bytes per device (12 used currently) = 8192 bytes max */
1983 + for (i = 1; i <= 254; i++) {
1984 + VmaStreamInfo *si = &vmaw->stream_info[i];
1985 + if (si->size) {
1986 + assert(si->devname);
1987 + uint32_t devname_ptr = allocate_header_string(vmaw, si->devname);
1988 + if (!devname_ptr) {
1989 + return -1;
1990 + }
1991 + head->dev_info[i].devname_ptr = GUINT32_TO_BE(devname_ptr);
1992 + head->dev_info[i].size = GUINT64_TO_BE(si->size);
1993 + }
1994 + }
1995 +
1996 + uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
1997 + head->header_size = GUINT32_TO_BE(header_size);
1998 +
1999 + if (header_size > HEADERBUF_SIZE) {
2000 + return -1; /* just to be sure */
2001 + }
2002 +
2003 + uint32_t blob_buffer_offset = sizeof(VmaHeader);
2004 + memcpy(buf + blob_buffer_offset, vmaw->header_blob_table,
2005 + vmaw->header_blob_table_size);
2006 + head->blob_buffer_offset = GUINT32_TO_BE(blob_buffer_offset);
2007 + head->blob_buffer_size = GUINT32_TO_BE(vmaw->header_blob_table_pos);
2008 +
2009 + g_checksum_reset(vmaw->md5csum);
2010 + g_checksum_update(vmaw->md5csum, (const guchar *)buf, header_size);
2011 + gsize csize = 16;
2012 + g_checksum_get_digest(vmaw->md5csum, (guint8 *)(head->md5sum), &csize);
2013 +
2014 + return vma_queue_write(vmaw, buf, header_size);
2015 +}
2016 +
2017 +static int coroutine_fn vma_writer_flush(VmaWriter *vmaw)
2018 +{
2019 + assert(vmaw);
2020 +
2021 + int ret;
2022 + int i;
2023 +
2024 + if (vmaw->status < 0) {
2025 + return vmaw->status;
2026 + }
2027 +
2028 + if (!vmaw->header_written) {
2029 + vmaw->header_written = true;
2030 + ret = vma_write_header(vmaw);
2031 + if (ret < 0) {
2032 + vma_writer_set_error(vmaw, "vma_writer_flush: write header failed");
2033 + return ret;
2034 + }
2035 + }
2036 +
2037 + DPRINTF("VMA WRITE FLUSH %d %d\n", vmaw->outbuf_count, vmaw->outbuf_pos);
2038 +
2039 +
2040 + VmaExtentHeader *ehead = (VmaExtentHeader *)vmaw->outbuf;
2041 +
2042 + ehead->magic = VMA_EXTENT_MAGIC;
2043 + ehead->reserved1 = 0;
2044 +
2045 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2046 + ehead->blockinfo[i] = GUINT64_TO_BE(vmaw->outbuf_block_info[i]);
2047 + }
2048 +
2049 + guint16 block_count = (vmaw->outbuf_pos - VMA_EXTENT_HEADER_SIZE) /
2050 + VMA_BLOCK_SIZE;
2051 +
2052 + ehead->block_count = GUINT16_TO_BE(block_count);
2053 +
2054 + memcpy(ehead->uuid, vmaw->uuid, sizeof(ehead->uuid));
2055 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
2056 +
2057 + g_checksum_reset(vmaw->md5csum);
2058 + g_checksum_update(vmaw->md5csum, vmaw->outbuf, VMA_EXTENT_HEADER_SIZE);
2059 + gsize csize = 16;
2060 + g_checksum_get_digest(vmaw->md5csum, ehead->md5sum, &csize);
2061 +
2062 + int bytes = vmaw->outbuf_pos;
2063 + ret = vma_queue_write(vmaw, vmaw->outbuf, bytes);
2064 + if (ret != bytes) {
2065 + vma_writer_set_error(vmaw, "vma_writer_flush: failed write");
2066 + }
2067 +
2068 + vmaw->outbuf_count = 0;
2069 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2070 +
2071 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2072 + vmaw->outbuf_block_info[i] = 0;
2073 + }
2074 +
2075 + return vmaw->status;
2076 +}
2077 +
2078 +static int vma_count_open_streams(VmaWriter *vmaw)
2079 +{
2080 + g_assert(vmaw != NULL);
2081 +
2082 + int i;
2083 + int open_drives = 0;
2084 + for (i = 0; i <= 255; i++) {
2085 + if (vmaw->stream_info[i].size && !vmaw->stream_info[i].finished) {
2086 + open_drives++;
2087 + }
2088 + }
2089 +
2090 + return open_drives;
2091 +}
2092 +
2093 +
2094 +/**
2095 + * You need to call this if the vma archive does not contain
2096 + * any data stream.
2097 + */
2098 +int coroutine_fn
2099 +vma_writer_flush_output(VmaWriter *vmaw)
2100 +{
2101 + qemu_co_mutex_lock(&vmaw->flush_lock);
2102 + int ret = vma_writer_flush(vmaw);
2103 + qemu_co_mutex_unlock(&vmaw->flush_lock);
2104 + if (ret < 0) {
2105 + vma_writer_set_error(vmaw, "vma_writer_flush_header failed");
2106 + }
2107 + return ret;
2108 +}
2109 +
2110 +/**
2111 + * all jobs should call this when there is no more data
2112 + * Returns: number of remaining stream (0 ==> finished)
2113 + */
2114 +int coroutine_fn
2115 +vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id)
2116 +{
2117 + g_assert(vmaw != NULL);
2118 +
2119 + DPRINTF("vma_writer_set_status %d\n", dev_id);
2120 + if (!vmaw->stream_info[dev_id].size) {
2121 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2122 + "no such stream %d", dev_id);
2123 + return -1;
2124 + }
2125 + if (vmaw->stream_info[dev_id].finished) {
2126 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2127 + "stream already closed %d", dev_id);
2128 + return -1;
2129 + }
2130 +
2131 + vmaw->stream_info[dev_id].finished = true;
2132 +
2133 + int open_drives = vma_count_open_streams(vmaw);
2134 +
2135 + if (open_drives <= 0) {
2136 + DPRINTF("vma_writer_set_status all drives completed\n");
2137 + vma_writer_flush_output(vmaw);
2138 + }
2139 +
2140 + return open_drives;
2141 +}
2142 +
2143 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status)
2144 +{
2145 + int i;
2146 +
2147 + g_assert(vmaw != NULL);
2148 +
2149 + if (status) {
2150 + status->status = vmaw->status;
2151 + g_strlcpy(status->errmsg, vmaw->errmsg, sizeof(status->errmsg));
2152 + for (i = 0; i <= 255; i++) {
2153 + status->stream_info[i] = vmaw->stream_info[i];
2154 + }
2155 +
2156 + uuid_unparse_lower(vmaw->uuid, status->uuid_str);
2157 + }
2158 +
2159 + status->closed = vmaw->closed;
2160 +
2161 + return vmaw->status;
2162 +}
2163 +
2164 +static int vma_writer_get_buffer(VmaWriter *vmaw)
2165 +{
2166 + int ret = 0;
2167 +
2168 + qemu_co_mutex_lock(&vmaw->flush_lock);
2169 +
2170 + /* wait until buffer is available */
2171 + while (vmaw->outbuf_count >= (VMA_BLOCKS_PER_EXTENT - 1)) {
2172 + ret = vma_writer_flush(vmaw);
2173 + if (ret < 0) {
2174 + vma_writer_set_error(vmaw, "vma_writer_get_buffer: flush failed");
2175 + break;
2176 + }
2177 + }
2178 +
2179 + qemu_co_mutex_unlock(&vmaw->flush_lock);
2180 +
2181 + return ret;
2182 +}
2183 +
2184 +
2185 +int64_t coroutine_fn
2186 +vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
2187 + const unsigned char *buf, size_t *zero_bytes)
2188 +{
2189 + g_assert(vmaw != NULL);
2190 + g_assert(zero_bytes != NULL);
2191 +
2192 + *zero_bytes = 0;
2193 +
2194 + if (vmaw->status < 0) {
2195 + return vmaw->status;
2196 + }
2197 +
2198 + if (!dev_id || !vmaw->stream_info[dev_id].size) {
2199 + vma_writer_set_error(vmaw, "vma_writer_write: "
2200 + "no such stream %d", dev_id);
2201 + return -1;
2202 + }
2203 +
2204 + if (vmaw->stream_info[dev_id].finished) {
2205 + vma_writer_set_error(vmaw, "vma_writer_write: "
2206 + "stream already closed %d", dev_id);
2207 + return -1;
2208 + }
2209 +
2210 +
2211 + if (cluster_num >= (((uint64_t)1)<<32)) {
2212 + vma_writer_set_error(vmaw, "vma_writer_write: "
2213 + "cluster number out of range");
2214 + return -1;
2215 + }
2216 +
2217 + if (dev_id == vmaw->vmstate_stream) {
2218 + if (cluster_num != vmaw->vmstate_clusters) {
2219 + vma_writer_set_error(vmaw, "vma_writer_write: "
2220 + "non sequential vmstate write");
2221 + }
2222 + vmaw->vmstate_clusters++;
2223 + } else if (cluster_num >= vmaw->stream_info[dev_id].cluster_count) {
2224 + vma_writer_set_error(vmaw, "vma_writer_write: cluster number too big");
2225 + return -1;
2226 + }
2227 +
2228 + /* wait until buffer is available */
2229 + if (vma_writer_get_buffer(vmaw) < 0) {
2230 + vma_writer_set_error(vmaw, "vma_writer_write: "
2231 + "vma_writer_get_buffer failed");
2232 + return -1;
2233 + }
2234 +
2235 + DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
2236 +
2237 + uint16_t mask = 0;
2238 +
2239 + if (buf) {
2240 + int i;
2241 + int bit = 1;
2242 + for (i = 0; i < 16; i++) {
2243 + const unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
2244 + if (!buffer_is_zero(vmablock, VMA_BLOCK_SIZE)) {
2245 + mask |= bit;
2246 + memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
2247 + VMA_BLOCK_SIZE);
2248 + vmaw->outbuf_pos += VMA_BLOCK_SIZE;
2249 + } else {
2250 + DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
2251 + vmaw->stream_info[dev_id].zero_bytes += VMA_BLOCK_SIZE;
2252 + *zero_bytes += VMA_BLOCK_SIZE;
2253 + }
2254 +
2255 + bit = bit << 1;
2256 + }
2257 + } else {
2258 + DPRINTF("VMA WRITE %zd ZERO CLUSTER\n", cluster_num);
2259 + vmaw->stream_info[dev_id].zero_bytes += VMA_CLUSTER_SIZE;
2260 + *zero_bytes += VMA_CLUSTER_SIZE;
2261 + }
2262 +
2263 + uint64_t block_info = ((uint64_t)mask) << (32+16);
2264 + block_info |= ((uint64_t)dev_id) << 32;
2265 + block_info |= (cluster_num & 0xffffffff);
2266 + vmaw->outbuf_block_info[vmaw->outbuf_count] = block_info;
2267 +
2268 + DPRINTF("VMA WRITE MASK %zd %zx\n", cluster_num, block_info);
2269 +
2270 + vmaw->outbuf_count++;
2271 +
2272 + /** NOTE: We allways write whole clusters, but we correctly set
2273 + * transferred bytes. So transferred == size when when everything
2274 + * went OK.
2275 + */
2276 + size_t transferred = VMA_CLUSTER_SIZE;
2277 +
2278 + if (dev_id != vmaw->vmstate_stream) {
2279 + uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
2280 + if (last > vmaw->stream_info[dev_id].size) {
2281 + uint64_t diff = last - vmaw->stream_info[dev_id].size;
2282 + if (diff >= VMA_CLUSTER_SIZE) {
2283 + vma_writer_set_error(vmaw, "vma_writer_write: "
2284 + "read after last cluster");
2285 + return -1;
2286 + }
2287 + transferred -= diff;
2288 + }
2289 + }
2290 +
2291 + vmaw->stream_info[dev_id].transferred += transferred;
2292 +
2293 + return transferred;
2294 +}
2295 +
2296 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp)
2297 +{
2298 + if (vmaw->status < 0 && *errp == NULL) {
2299 + error_setg(errp, "%s", vmaw->errmsg);
2300 + }
2301 +}
2302 +
2303 +int vma_writer_close(VmaWriter *vmaw, Error **errp)
2304 +{
2305 + g_assert(vmaw != NULL);
2306 +
2307 + int i;
2308 +
2309 + while (vmaw->co_writer) {
2310 + aio_poll(qemu_get_aio_context(), true);
2311 + }
2312 +
2313 + assert(vmaw->co_writer == NULL);
2314 +
2315 + if (vmaw->cmd) {
2316 + if (pclose(vmaw->cmd) < 0) {
2317 + vma_writer_set_error(vmaw, "vma_writer_close: "
2318 + "pclose failed - %s", g_strerror(errno));
2319 + }
2320 + } else {
2321 + if (close(vmaw->fd) < 0) {
2322 + vma_writer_set_error(vmaw, "vma_writer_close: "
2323 + "close failed - %s", g_strerror(errno));
2324 + }
2325 + }
2326 +
2327 + for (i = 0; i <= 255; i++) {
2328 + VmaStreamInfo *si = &vmaw->stream_info[i];
2329 + if (si->size) {
2330 + if (!si->finished) {
2331 + vma_writer_set_error(vmaw, "vma_writer_close: "
2332 + "detected open stream '%s'", si->devname);
2333 + } else if ((si->transferred != si->size) &&
2334 + (i != vmaw->vmstate_stream)) {
2335 + vma_writer_set_error(vmaw, "vma_writer_close: "
2336 + "incomplete stream '%s' (%zd != %zd)",
2337 + si->devname, si->transferred, si->size);
2338 + }
2339 + }
2340 + }
2341 +
2342 + for (i = 0; i <= 255; i++) {
2343 + vmaw->stream_info[i].finished = 1; /* mark as closed */
2344 + }
2345 +
2346 + vmaw->closed = 1;
2347 +
2348 + if (vmaw->status < 0 && *errp == NULL) {
2349 + error_setg(errp, "%s", vmaw->errmsg);
2350 + }
2351 +
2352 + return vmaw->status;
2353 +}
2354 +
2355 +void vma_writer_destroy(VmaWriter *vmaw)
2356 +{
2357 + assert(vmaw);
2358 +
2359 + int i;
2360 +
2361 + for (i = 0; i <= 255; i++) {
2362 + if (vmaw->stream_info[i].devname) {
2363 + g_free(vmaw->stream_info[i].devname);
2364 + }
2365 + }
2366 +
2367 + if (vmaw->md5csum) {
2368 + g_checksum_free(vmaw->md5csum);
2369 + }
2370 +
2371 + g_free(vmaw);
2372 +}
2373 diff --git a/vma.c b/vma.c
2374 new file mode 100644
2375 index 0000000000..1b59fd1555
2376 --- /dev/null
2377 +++ b/vma.c
2378 @@ -0,0 +1,756 @@
2379 +/*
2380 + * VMA: Virtual Machine Archive
2381 + *
2382 + * Copyright (C) 2012-2013 Proxmox Server Solutions
2383 + *
2384 + * Authors:
2385 + * Dietmar Maurer (dietmar@proxmox.com)
2386 + *
2387 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
2388 + * See the COPYING file in the top-level directory.
2389 + *
2390 + */
2391 +
2392 +#include "qemu/osdep.h"
2393 +#include <glib.h>
2394 +
2395 +#include "vma.h"
2396 +#include "qemu-common.h"
2397 +#include "qemu/error-report.h"
2398 +#include "qemu/main-loop.h"
2399 +#include "qapi/qmp/qstring.h"
2400 +#include "sysemu/block-backend.h"
2401 +
2402 +static void help(void)
2403 +{
2404 + const char *help_msg =
2405 + "usage: vma command [command options]\n"
2406 + "\n"
2407 + "vma list <filename>\n"
2408 + "vma config <filename> [-c config]\n"
2409 + "vma create <filename> [-c config] pathname ...\n"
2410 + "vma extract <filename> [-r <fifo>] <targetdir>\n"
2411 + "vma verify <filename> [-v]\n"
2412 + ;
2413 +
2414 + printf("%s", help_msg);
2415 + exit(1);
2416 +}
2417 +
2418 +static const char *extract_devname(const char *path, char **devname, int index)
2419 +{
2420 + assert(path);
2421 +
2422 + const char *sep = strchr(path, '=');
2423 +
2424 + if (sep) {
2425 + *devname = g_strndup(path, sep - path);
2426 + path = sep + 1;
2427 + } else {
2428 + if (index >= 0) {
2429 + *devname = g_strdup_printf("disk%d", index);
2430 + } else {
2431 + *devname = NULL;
2432 + }
2433 + }
2434 +
2435 + return path;
2436 +}
2437 +
2438 +static void print_content(VmaReader *vmar)
2439 +{
2440 + assert(vmar);
2441 +
2442 + VmaHeader *head = vma_reader_get_header(vmar);
2443 +
2444 + GList *l = vma_reader_get_config_data(vmar);
2445 + while (l && l->data) {
2446 + VmaConfigData *cdata = (VmaConfigData *)l->data;
2447 + l = g_list_next(l);
2448 + printf("CFG: size: %d name: %s\n", cdata->len, cdata->name);
2449 + }
2450 +
2451 + int i;
2452 + VmaDeviceInfo *di;
2453 + for (i = 1; i < 255; i++) {
2454 + di = vma_reader_get_device_info(vmar, i);
2455 + if (di) {
2456 + if (strcmp(di->devname, "vmstate") == 0) {
2457 + printf("VMSTATE: dev_id=%d memory: %zd\n", i, di->size);
2458 + } else {
2459 + printf("DEV: dev_id=%d size: %zd devname: %s\n",
2460 + i, di->size, di->devname);
2461 + }
2462 + }
2463 + }
2464 + /* ctime is the last entry we print */
2465 + printf("CTIME: %s", ctime(&head->ctime));
2466 + fflush(stdout);
2467 +}
2468 +
2469 +static int list_content(int argc, char **argv)
2470 +{
2471 + int c, ret = 0;
2472 + const char *filename;
2473 +
2474 + for (;;) {
2475 + c = getopt(argc, argv, "h");
2476 + if (c == -1) {
2477 + break;
2478 + }
2479 + switch (c) {
2480 + case '?':
2481 + case 'h':
2482 + help();
2483 + break;
2484 + default:
2485 + g_assert_not_reached();
2486 + }
2487 + }
2488 +
2489 + /* Get the filename */
2490 + if ((optind + 1) != argc) {
2491 + help();
2492 + }
2493 + filename = argv[optind++];
2494 +
2495 + Error *errp = NULL;
2496 + VmaReader *vmar = vma_reader_create(filename, &errp);
2497 +
2498 + if (!vmar) {
2499 + g_error("%s", error_get_pretty(errp));
2500 + }
2501 +
2502 + print_content(vmar);
2503 +
2504 + vma_reader_destroy(vmar);
2505 +
2506 + return ret;
2507 +}
2508 +
2509 +typedef struct RestoreMap {
2510 + char *devname;
2511 + char *path;
2512 + char *format;
2513 + bool write_zero;
2514 +} RestoreMap;
2515 +
2516 +static int extract_content(int argc, char **argv)
2517 +{
2518 + int c, ret = 0;
2519 + int verbose = 0;
2520 + const char *filename;
2521 + const char *dirname;
2522 + const char *readmap = NULL;
2523 +
2524 + for (;;) {
2525 + c = getopt(argc, argv, "hvr:");
2526 + if (c == -1) {
2527 + break;
2528 + }
2529 + switch (c) {
2530 + case '?':
2531 + case 'h':
2532 + help();
2533 + break;
2534 + case 'r':
2535 + readmap = optarg;
2536 + break;
2537 + case 'v':
2538 + verbose = 1;
2539 + break;
2540 + default:
2541 + help();
2542 + }
2543 + }
2544 +
2545 + /* Get the filename */
2546 + if ((optind + 2) != argc) {
2547 + help();
2548 + }
2549 + filename = argv[optind++];
2550 + dirname = argv[optind++];
2551 +
2552 + Error *errp = NULL;
2553 + VmaReader *vmar = vma_reader_create(filename, &errp);
2554 +
2555 + if (!vmar) {
2556 + g_error("%s", error_get_pretty(errp));
2557 + }
2558 +
2559 + if (mkdir(dirname, 0777) < 0) {
2560 + g_error("unable to create target directory %s - %s",
2561 + dirname, g_strerror(errno));
2562 + }
2563 +
2564 + GList *l = vma_reader_get_config_data(vmar);
2565 + while (l && l->data) {
2566 + VmaConfigData *cdata = (VmaConfigData *)l->data;
2567 + l = g_list_next(l);
2568 + char *cfgfn = g_strdup_printf("%s/%s", dirname, cdata->name);
2569 + GError *err = NULL;
2570 + if (!g_file_set_contents(cfgfn, (gchar *)cdata->data, cdata->len,
2571 + &err)) {
2572 + g_error("unable to write file: %s", err->message);
2573 + }
2574 + }
2575 +
2576 + GHashTable *devmap = g_hash_table_new(g_str_hash, g_str_equal);
2577 +
2578 + if (readmap) {
2579 + print_content(vmar);
2580 +
2581 + FILE *map = fopen(readmap, "r");
2582 + if (!map) {
2583 + g_error("unable to open fifo %s - %s", readmap, g_strerror(errno));
2584 + }
2585 +
2586 + while (1) {
2587 + char inbuf[8192];
2588 + char *line = fgets(inbuf, sizeof(inbuf), map);
2589 + if (!line || line[0] == '\0' || !strcmp(line, "done\n")) {
2590 + break;
2591 + }
2592 + int len = strlen(line);
2593 + if (line[len - 1] == '\n') {
2594 + line[len - 1] = '\0';
2595 + if (len == 1) {
2596 + break;
2597 + }
2598 + }
2599 +
2600 + char *format = NULL;
2601 + if (strncmp(line, "format=", sizeof("format=")-1) == 0) {
2602 + format = line + sizeof("format=")-1;
2603 + char *colon = strchr(format, ':');
2604 + if (!colon) {
2605 + g_error("read map failed - found only a format ('%s')", inbuf);
2606 + }
2607 + format = g_strndup(format, colon - format);
2608 + line = colon+1;
2609 + }
2610 +
2611 + const char *path;
2612 + bool write_zero;
2613 + if (line[0] == '0' && line[1] == ':') {
2614 + path = line + 2;
2615 + write_zero = false;
2616 + } else if (line[0] == '1' && line[1] == ':') {
2617 + path = line + 2;
2618 + write_zero = true;
2619 + } else {
2620 + g_error("read map failed - parse error ('%s')", inbuf);
2621 + }
2622 +
2623 + char *devname = NULL;
2624 + path = extract_devname(path, &devname, -1);
2625 + if (!devname) {
2626 + g_error("read map failed - no dev name specified ('%s')",
2627 + inbuf);
2628 + }
2629 +
2630 + RestoreMap *map = g_new0(RestoreMap, 1);
2631 + map->devname = g_strdup(devname);
2632 + map->path = g_strdup(path);
2633 + map->format = format;
2634 + map->write_zero = write_zero;
2635 +
2636 + g_hash_table_insert(devmap, map->devname, map);
2637 +
2638 + };
2639 + }
2640 +
2641 + int i;
2642 + int vmstate_fd = -1;
2643 + guint8 vmstate_stream = 0;
2644 +
2645 + BlockBackend *blk = NULL;
2646 +
2647 + for (i = 1; i < 255; i++) {
2648 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2649 + if (di && (strcmp(di->devname, "vmstate") == 0)) {
2650 + vmstate_stream = i;
2651 + char *statefn = g_strdup_printf("%s/vmstate.bin", dirname);
2652 + vmstate_fd = open(statefn, O_WRONLY|O_CREAT|O_EXCL, 0644);
2653 + if (vmstate_fd < 0) {
2654 + g_error("create vmstate file '%s' failed - %s", statefn,
2655 + g_strerror(errno));
2656 + }
2657 + g_free(statefn);
2658 + } else if (di) {
2659 + char *devfn = NULL;
2660 + const char *format = NULL;
2661 + int flags = BDRV_O_RDWR | BDRV_O_NO_FLUSH;
2662 + bool write_zero = true;
2663 +
2664 + if (readmap) {
2665 + RestoreMap *map;
2666 + map = (RestoreMap *)g_hash_table_lookup(devmap, di->devname);
2667 + if (map == NULL) {
2668 + g_error("no device name mapping for %s", di->devname);
2669 + }
2670 + devfn = map->path;
2671 + format = map->format;
2672 + write_zero = map->write_zero;
2673 + } else {
2674 + devfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2675 + dirname, di->devname);
2676 + printf("DEVINFO %s %zd\n", devfn, di->size);
2677 +
2678 + bdrv_img_create(devfn, "raw", NULL, NULL, NULL, di->size,
2679 + flags, true, &errp);
2680 + if (errp) {
2681 + g_error("can't create file %s: %s", devfn,
2682 + error_get_pretty(errp));
2683 + }
2684 +
2685 + /* Note: we created an empty file above, so there is no
2686 + * need to write zeroes (so we generate a sparse file)
2687 + */
2688 + write_zero = false;
2689 + }
2690 +
2691 + size_t devlen = strlen(devfn);
2692 + QDict *options = NULL;
2693 + if (format) {
2694 + /* explicit format from commandline */
2695 + options = qdict_new();
2696 + qdict_put(options, "driver", qstring_from_str(format));
2697 + } else if ((devlen > 4 && strcmp(devfn+devlen-4, ".raw") == 0) ||
2698 + strncmp(devfn, "/dev/", 5) == 0)
2699 + {
2700 + /* This part is now deprecated for PVE as well (just as qemu
2701 + * deprecated not specifying an explicit raw format, too.
2702 + */
2703 + /* explicit raw format */
2704 + options = qdict_new();
2705 + qdict_put(options, "driver", qstring_from_str("raw"));
2706 + }
2707 +
2708 +
2709 + if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) {
2710 + g_error("can't open file %s - %s", devfn,
2711 + error_get_pretty(errp));
2712 + }
2713 +
2714 + if (vma_reader_register_bs(vmar, i, blk, write_zero, &errp) < 0) {
2715 + g_error("%s", error_get_pretty(errp));
2716 + }
2717 +
2718 + if (!readmap) {
2719 + g_free(devfn);
2720 + }
2721 + }
2722 + }
2723 +
2724 + if (vma_reader_restore(vmar, vmstate_fd, verbose, &errp) < 0) {
2725 + g_error("restore failed - %s", error_get_pretty(errp));
2726 + }
2727 +
2728 + if (!readmap) {
2729 + for (i = 1; i < 255; i++) {
2730 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2731 + if (di && (i != vmstate_stream)) {
2732 + char *tmpfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2733 + dirname, di->devname);
2734 + char *fn = g_strdup_printf("%s/disk-%s.raw",
2735 + dirname, di->devname);
2736 + if (rename(tmpfn, fn) != 0) {
2737 + g_error("rename %s to %s failed - %s",
2738 + tmpfn, fn, g_strerror(errno));
2739 + }
2740 + }
2741 + }
2742 + }
2743 +
2744 + vma_reader_destroy(vmar);
2745 +
2746 + blk_unref(blk);
2747 +
2748 + bdrv_close_all();
2749 +
2750 + return ret;
2751 +}
2752 +
2753 +static int verify_content(int argc, char **argv)
2754 +{
2755 + int c, ret = 0;
2756 + int verbose = 0;
2757 + const char *filename;
2758 +
2759 + for (;;) {
2760 + c = getopt(argc, argv, "hv");
2761 + if (c == -1) {
2762 + break;
2763 + }
2764 + switch (c) {
2765 + case '?':
2766 + case 'h':
2767 + help();
2768 + break;
2769 + case 'v':
2770 + verbose = 1;
2771 + break;
2772 + default:
2773 + help();
2774 + }
2775 + }
2776 +
2777 + /* Get the filename */
2778 + if ((optind + 1) != argc) {
2779 + help();
2780 + }
2781 + filename = argv[optind++];
2782 +
2783 + Error *errp = NULL;
2784 + VmaReader *vmar = vma_reader_create(filename, &errp);
2785 +
2786 + if (!vmar) {
2787 + g_error("%s", error_get_pretty(errp));
2788 + }
2789 +
2790 + if (verbose) {
2791 + print_content(vmar);
2792 + }
2793 +
2794 + if (vma_reader_verify(vmar, verbose, &errp) < 0) {
2795 + g_error("verify failed - %s", error_get_pretty(errp));
2796 + }
2797 +
2798 + vma_reader_destroy(vmar);
2799 +
2800 + bdrv_close_all();
2801 +
2802 + return ret;
2803 +}
2804 +
2805 +typedef struct BackupJob {
2806 + BlockBackend *target;
2807 + int64_t len;
2808 + VmaWriter *vmaw;
2809 + uint8_t dev_id;
2810 +} BackupJob;
2811 +
2812 +#define BACKUP_SECTORS_PER_CLUSTER (VMA_CLUSTER_SIZE / BDRV_SECTOR_SIZE)
2813 +
2814 +static void coroutine_fn backup_run_empty(void *opaque)
2815 +{
2816 + VmaWriter *vmaw = (VmaWriter *)opaque;
2817 +
2818 + vma_writer_flush_output(vmaw);
2819 +
2820 + Error *err = NULL;
2821 + if (vma_writer_close(vmaw, &err) != 0) {
2822 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2823 + }
2824 +}
2825 +
2826 +static void coroutine_fn backup_run(void *opaque)
2827 +{
2828 + BackupJob *job = (BackupJob *)opaque;
2829 + struct iovec iov;
2830 + QEMUIOVector qiov;
2831 +
2832 + int64_t start, end;
2833 + int ret = 0;
2834 +
2835 + unsigned char *buf = blk_blockalign(job->target, VMA_CLUSTER_SIZE);
2836 +
2837 + start = 0;
2838 + end = DIV_ROUND_UP(job->len / BDRV_SECTOR_SIZE,
2839 + BACKUP_SECTORS_PER_CLUSTER);
2840 +
2841 + for (; start < end; start++) {
2842 + iov.iov_base = buf;
2843 + iov.iov_len = VMA_CLUSTER_SIZE;
2844 + qemu_iovec_init_external(&qiov, &iov, 1);
2845 +
2846 + ret = blk_co_preadv(job->target, start * VMA_CLUSTER_SIZE,
2847 + VMA_CLUSTER_SIZE, &qiov, 0);
2848 + if (ret < 0) {
2849 + vma_writer_set_error(job->vmaw, "read error", -1);
2850 + goto out;
2851 + }
2852 +
2853 + size_t zb = 0;
2854 + if (vma_writer_write(job->vmaw, job->dev_id, start, buf, &zb) < 0) {
2855 + vma_writer_set_error(job->vmaw, "backup_dump_cb vma_writer_write failed", -1);
2856 + goto out;
2857 + }
2858 + }
2859 +
2860 +
2861 +out:
2862 + if (vma_writer_close_stream(job->vmaw, job->dev_id) <= 0) {
2863 + Error *err = NULL;
2864 + if (vma_writer_close(job->vmaw, &err) != 0) {
2865 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2866 + }
2867 + }
2868 +}
2869 +
2870 +static int create_archive(int argc, char **argv)
2871 +{
2872 + int i, c;
2873 + int verbose = 0;
2874 + const char *archivename;
2875 + GList *config_files = NULL;
2876 +
2877 + for (;;) {
2878 + c = getopt(argc, argv, "hvc:");
2879 + if (c == -1) {
2880 + break;
2881 + }
2882 + switch (c) {
2883 + case '?':
2884 + case 'h':
2885 + help();
2886 + break;
2887 + case 'c':
2888 + config_files = g_list_append(config_files, optarg);
2889 + break;
2890 + case 'v':
2891 + verbose = 1;
2892 + break;
2893 + default:
2894 + g_assert_not_reached();
2895 + }
2896 + }
2897 +
2898 +
2899 + /* make sure we an archive name */
2900 + if ((optind + 1) > argc) {
2901 + help();
2902 + }
2903 +
2904 + archivename = argv[optind++];
2905 +
2906 + uuid_t uuid;
2907 + uuid_generate(uuid);
2908 +
2909 + Error *local_err = NULL;
2910 + VmaWriter *vmaw = vma_writer_create(archivename, uuid, &local_err);
2911 +
2912 + if (vmaw == NULL) {
2913 + g_error("%s", error_get_pretty(local_err));
2914 + }
2915 +
2916 + GList *l = config_files;
2917 + while (l && l->data) {
2918 + char *name = l->data;
2919 + char *cdata = NULL;
2920 + gsize clen = 0;
2921 + GError *err = NULL;
2922 + if (!g_file_get_contents(name, &cdata, &clen, &err)) {
2923 + unlink(archivename);
2924 + g_error("Unable to read file: %s", err->message);
2925 + }
2926 +
2927 + if (vma_writer_add_config(vmaw, name, cdata, clen) != 0) {
2928 + unlink(archivename);
2929 + g_error("Unable to append config data %s (len = %zd)",
2930 + name, clen);
2931 + }
2932 + l = g_list_next(l);
2933 + }
2934 +
2935 + int devcount = 0;
2936 + while (optind < argc) {
2937 + const char *path = argv[optind++];
2938 + char *devname = NULL;
2939 + path = extract_devname(path, &devname, devcount++);
2940 +
2941 + Error *errp = NULL;
2942 + BlockBackend *target;
2943 +
2944 + target = blk_new_open(path, NULL, NULL, 0, &errp);
2945 + if (!target) {
2946 + unlink(archivename);
2947 + g_error("bdrv_open '%s' failed - %s", path, error_get_pretty(errp));
2948 + }
2949 + int64_t size = blk_getlength(target);
2950 + int dev_id = vma_writer_register_stream(vmaw, devname, size);
2951 + if (dev_id <= 0) {
2952 + unlink(archivename);
2953 + g_error("vma_writer_register_stream '%s' failed", devname);
2954 + }
2955 +
2956 + BackupJob *job = g_new0(BackupJob, 1);
2957 + job->len = size;
2958 + job->target = target;
2959 + job->vmaw = vmaw;
2960 + job->dev_id = dev_id;
2961 +
2962 + Coroutine *co = qemu_coroutine_create(backup_run, job);
2963 + qemu_coroutine_enter(co);
2964 + }
2965 +
2966 + VmaStatus vmastat;
2967 + int percent = 0;
2968 + int last_percent = -1;
2969 +
2970 + if (devcount) {
2971 + while (1) {
2972 + main_loop_wait(false);
2973 + vma_writer_get_status(vmaw, &vmastat);
2974 +
2975 + if (verbose) {
2976 +
2977 + uint64_t total = 0;
2978 + uint64_t transferred = 0;
2979 + uint64_t zero_bytes = 0;
2980 +
2981 + int i;
2982 + for (i = 0; i < 256; i++) {
2983 + if (vmastat.stream_info[i].size) {
2984 + total += vmastat.stream_info[i].size;
2985 + transferred += vmastat.stream_info[i].transferred;
2986 + zero_bytes += vmastat.stream_info[i].zero_bytes;
2987 + }
2988 + }
2989 + percent = (transferred*100)/total;
2990 + if (percent != last_percent) {
2991 + fprintf(stderr, "progress %d%% %zd/%zd %zd\n", percent,
2992 + transferred, total, zero_bytes);
2993 + fflush(stderr);
2994 +
2995 + last_percent = percent;
2996 + }
2997 + }
2998 +
2999 + if (vmastat.closed) {
3000 + break;
3001 + }
3002 + }
3003 + } else {
3004 + Coroutine *co = qemu_coroutine_create(backup_run_empty, vmaw);
3005 + qemu_coroutine_enter(co);
3006 + while (1) {
3007 + main_loop_wait(false);
3008 + vma_writer_get_status(vmaw, &vmastat);
3009 + if (vmastat.closed) {
3010 + break;
3011 + }
3012 + }
3013 + }
3014 +
3015 + bdrv_drain_all();
3016 +
3017 + vma_writer_get_status(vmaw, &vmastat);
3018 +
3019 + if (verbose) {
3020 + for (i = 0; i < 256; i++) {
3021 + VmaStreamInfo *si = &vmastat.stream_info[i];
3022 + if (si->size) {
3023 + fprintf(stderr, "image %s: size=%zd zeros=%zd saved=%zd\n",
3024 + si->devname, si->size, si->zero_bytes,
3025 + si->size - si->zero_bytes);
3026 + }
3027 + }
3028 + }
3029 +
3030 + if (vmastat.status < 0) {
3031 + unlink(archivename);
3032 + g_error("creating vma archive failed");
3033 + }
3034 +
3035 + return 0;
3036 +}
3037 +
3038 +static int dump_config(int argc, char **argv)
3039 +{
3040 + int c, ret = 0;
3041 + const char *filename;
3042 + const char *config_name = "qemu-server.conf";
3043 +
3044 + for (;;) {
3045 + c = getopt(argc, argv, "hc:");
3046 + if (c == -1) {
3047 + break;
3048 + }
3049 + switch (c) {
3050 + case '?':
3051 + case 'h':
3052 + help();
3053 + break;
3054 + case 'c':
3055 + config_name = optarg;
3056 + break;
3057 + default:
3058 + help();
3059 + }
3060 + }
3061 +
3062 + /* Get the filename */
3063 + if ((optind + 1) != argc) {
3064 + help();
3065 + }
3066 + filename = argv[optind++];
3067 +
3068 + Error *errp = NULL;
3069 + VmaReader *vmar = vma_reader_create(filename, &errp);
3070 +
3071 + if (!vmar) {
3072 + g_error("%s", error_get_pretty(errp));
3073 + }
3074 +
3075 + int found = 0;
3076 + GList *l = vma_reader_get_config_data(vmar);
3077 + while (l && l->data) {
3078 + VmaConfigData *cdata = (VmaConfigData *)l->data;
3079 + l = g_list_next(l);
3080 + if (strcmp(cdata->name, config_name) == 0) {
3081 + found = 1;
3082 + fwrite(cdata->data, cdata->len, 1, stdout);
3083 + break;
3084 + }
3085 + }
3086 +
3087 + vma_reader_destroy(vmar);
3088 +
3089 + bdrv_close_all();
3090 +
3091 + if (!found) {
3092 + fprintf(stderr, "unable to find configuration data '%s'\n", config_name);
3093 + return -1;
3094 + }
3095 +
3096 + return ret;
3097 +}
3098 +
3099 +int main(int argc, char **argv)
3100 +{
3101 + const char *cmdname;
3102 + Error *main_loop_err = NULL;
3103 +
3104 + error_set_progname(argv[0]);
3105 +
3106 + if (qemu_init_main_loop(&main_loop_err)) {
3107 + g_error("%s", error_get_pretty(main_loop_err));
3108 + }
3109 +
3110 + bdrv_init();
3111 +
3112 + if (argc < 2) {
3113 + help();
3114 + }
3115 +
3116 + cmdname = argv[1];
3117 + argc--; argv++;
3118 +
3119 +
3120 + if (!strcmp(cmdname, "list")) {
3121 + return list_content(argc, argv);
3122 + } else if (!strcmp(cmdname, "create")) {
3123 + return create_archive(argc, argv);
3124 + } else if (!strcmp(cmdname, "extract")) {
3125 + return extract_content(argc, argv);
3126 + } else if (!strcmp(cmdname, "verify")) {
3127 + return verify_content(argc, argv);
3128 + } else if (!strcmp(cmdname, "config")) {
3129 + return dump_config(argc, argv);
3130 + }
3131 +
3132 + help();
3133 + return 0;
3134 +}
3135 diff --git a/vma.h b/vma.h
3136 new file mode 100644
3137 index 0000000000..c895c97f6d
3138 --- /dev/null
3139 +++ b/vma.h
3140 @@ -0,0 +1,150 @@
3141 +/*
3142 + * VMA: Virtual Machine Archive
3143 + *
3144 + * Copyright (C) Proxmox Server Solutions
3145 + *
3146 + * Authors:
3147 + * Dietmar Maurer (dietmar@proxmox.com)
3148 + *
3149 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
3150 + * See the COPYING file in the top-level directory.
3151 + *
3152 + */
3153 +
3154 +#ifndef BACKUP_VMA_H
3155 +#define BACKUP_VMA_H
3156 +
3157 +#include <uuid/uuid.h>
3158 +#include "qapi/error.h"
3159 +#include "block/block.h"
3160 +
3161 +#define VMA_BLOCK_BITS 12
3162 +#define VMA_BLOCK_SIZE (1<<VMA_BLOCK_BITS)
3163 +#define VMA_CLUSTER_BITS (VMA_BLOCK_BITS+4)
3164 +#define VMA_CLUSTER_SIZE (1<<VMA_CLUSTER_BITS)
3165 +
3166 +#if VMA_CLUSTER_SIZE != 65536
3167 +#error unexpected cluster size
3168 +#endif
3169 +
3170 +#define VMA_EXTENT_HEADER_SIZE 512
3171 +#define VMA_BLOCKS_PER_EXTENT 59
3172 +#define VMA_MAX_CONFIGS 256
3173 +
3174 +#define VMA_MAX_EXTENT_SIZE \
3175 + (VMA_EXTENT_HEADER_SIZE+VMA_CLUSTER_SIZE*VMA_BLOCKS_PER_EXTENT)
3176 +#if VMA_MAX_EXTENT_SIZE != 3867136
3177 +#error unexpected VMA_EXTENT_SIZE
3178 +#endif
3179 +
3180 +/* File Format Definitions */
3181 +
3182 +#define VMA_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|0x00))
3183 +#define VMA_EXTENT_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|'E'))
3184 +
3185 +typedef struct VmaDeviceInfoHeader {
3186 + uint32_t devname_ptr; /* offset into blob_buffer table */
3187 + uint32_t reserved0;
3188 + uint64_t size; /* device size in bytes */
3189 + uint64_t reserved1;
3190 + uint64_t reserved2;
3191 +} VmaDeviceInfoHeader;
3192 +
3193 +typedef struct VmaHeader {
3194 + uint32_t magic;
3195 + uint32_t version;
3196 + unsigned char uuid[16];
3197 + int64_t ctime;
3198 + unsigned char md5sum[16];
3199 +
3200 + uint32_t blob_buffer_offset;
3201 + uint32_t blob_buffer_size;
3202 + uint32_t header_size;
3203 +
3204 + unsigned char reserved[1984];
3205 +
3206 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3207 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3208 +
3209 + uint32_t reserved1;
3210 +
3211 + VmaDeviceInfoHeader dev_info[256];
3212 +} VmaHeader;
3213 +
3214 +typedef struct VmaExtentHeader {
3215 + uint32_t magic;
3216 + uint16_t reserved1;
3217 + uint16_t block_count;
3218 + unsigned char uuid[16];
3219 + unsigned char md5sum[16];
3220 + uint64_t blockinfo[VMA_BLOCKS_PER_EXTENT];
3221 +} VmaExtentHeader;
3222 +
3223 +/* functions/definitions to read/write vma files */
3224 +
3225 +typedef struct VmaReader VmaReader;
3226 +
3227 +typedef struct VmaWriter VmaWriter;
3228 +
3229 +typedef struct VmaConfigData {
3230 + const char *name;
3231 + const void *data;
3232 + uint32_t len;
3233 +} VmaConfigData;
3234 +
3235 +typedef struct VmaStreamInfo {
3236 + uint64_t size;
3237 + uint64_t cluster_count;
3238 + uint64_t transferred;
3239 + uint64_t zero_bytes;
3240 + int finished;
3241 + char *devname;
3242 +} VmaStreamInfo;
3243 +
3244 +typedef struct VmaStatus {
3245 + int status;
3246 + bool closed;
3247 + char errmsg[8192];
3248 + char uuid_str[37];
3249 + VmaStreamInfo stream_info[256];
3250 +} VmaStatus;
3251 +
3252 +typedef struct VmaDeviceInfo {
3253 + uint64_t size; /* device size in bytes */
3254 + const char *devname;
3255 +} VmaDeviceInfo;
3256 +
3257 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp);
3258 +int vma_writer_close(VmaWriter *vmaw, Error **errp);
3259 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp);
3260 +void vma_writer_destroy(VmaWriter *vmaw);
3261 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
3262 + size_t len);
3263 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
3264 + size_t size);
3265 +
3266 +int64_t coroutine_fn vma_writer_write(VmaWriter *vmaw, uint8_t dev_id,
3267 + int64_t cluster_num,
3268 + const unsigned char *buf,
3269 + size_t *zero_bytes);
3270 +
3271 +int coroutine_fn vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id);
3272 +int coroutine_fn vma_writer_flush_output(VmaWriter *vmaw);
3273 +
3274 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status);
3275 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...);
3276 +
3277 +
3278 +VmaReader *vma_reader_create(const char *filename, Error **errp);
3279 +void vma_reader_destroy(VmaReader *vmar);
3280 +VmaHeader *vma_reader_get_header(VmaReader *vmar);
3281 +GList *vma_reader_get_config_data(VmaReader *vmar);
3282 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id);
3283 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id,
3284 + BlockBackend *target, bool write_zeroes,
3285 + Error **errp);
3286 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
3287 + Error **errp);
3288 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp);
3289 +
3290 +#endif /* BACKUP_VMA_H */
3291 --
3292 2.20.1
3293