1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3 Date: Mon, 7 Aug 2017 08:51:16 +0200
4 Subject: [PATCH] PVE: [Deprecated] adding old vma files
6 TODO: Move to using a libvma block backend
7 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
11 block/backup.c | 103 +++--
12 block/replication.c | 1 +
13 blockdev.c | 208 +++++----
14 include/block/block_int.h | 4 +
16 vma-reader.c | 857 ++++++++++++++++++++++++++++++++++++++
17 vma-writer.c | 771 ++++++++++++++++++++++++++++++++++
18 vma.c | 756 +++++++++++++++++++++++++++++++++
20 11 files changed, 2752 insertions(+), 105 deletions(-)
21 create mode 100644 vma-reader.c
22 create mode 100644 vma-writer.c
23 create mode 100644 vma.c
24 create mode 100644 vma.h
26 diff --git a/Makefile b/Makefile
27 index 04a0d45050..0b5a8353ea 100644
30 @@ -419,7 +419,7 @@ dummy := $(call unnest-vars,, \
32 include $(SRC_PATH)/tests/Makefile.include
34 -all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) $(HELPERS-y) recurse-all modules
35 +all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) vma$(EXESUF) $(HELPERS-y) recurse-all modules
38 $(call quiet-command, \
39 @@ -509,6 +509,7 @@ qemu-img.o: qemu-img-cmds.h
40 qemu-img$(EXESUF): qemu-img.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
41 qemu-nbd$(EXESUF): qemu-nbd.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
42 qemu-io$(EXESUF): qemu-io.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
43 +vma$(EXESUF): vma.o vma-reader.o $(authz-obj-y) $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
45 qemu-bridge-helper$(EXESUF): qemu-bridge-helper.o $(COMMON_LDADDS)
47 diff --git a/Makefile.objs b/Makefile.objs
48 index 559486973a..9477a23ca2 100644
51 @@ -18,6 +18,7 @@ block-obj-y += block.o blockjob.o job.o
52 block-obj-y += block/ scsi/
53 block-obj-y += qemu-io-cmds.o
54 block-obj-$(CONFIG_REPLICATION) += replication.o
55 +block-obj-y += vma-writer.o
59 diff --git a/block/backup.c b/block/backup.c
60 index 51c36d291b..18598fd491 100644
63 @@ -41,6 +41,7 @@ typedef struct BackupBlockJob {
64 /* bitmap for sync=incremental */
65 BdrvDirtyBitmap *sync_bitmap;
66 MirrorSyncMode sync_mode;
67 + BackupDumpFunc *dump_cb;
68 BlockdevOnError on_source_error;
69 BlockdevOnError on_target_error;
70 CoRwlock flush_rwlock;
71 @@ -130,12 +131,20 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
74 if (qemu_iovec_is_zero(&qiov)) {
75 - ret = blk_co_pwrite_zeroes(job->target, start,
76 - qiov.size, write_flags | BDRV_REQ_MAY_UNMAP);
78 + ret = job->dump_cb(job->common.job.opaque, job->target, start, qiov.size, NULL);
80 + ret = blk_co_pwrite_zeroes(job->target, start,
81 + qiov.size, write_flags | BDRV_REQ_MAY_UNMAP);
84 - ret = blk_co_pwritev(job->target, start,
85 - qiov.size, &qiov, write_flags |
86 - (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
88 + ret = job->dump_cb(job->common.job.opaque, job->target, start, qiov.size, *bounce_buffer);
90 + ret = blk_co_pwritev(job->target, start,
91 + qiov.size, &qiov, write_flags |
92 + (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
96 trace_backup_do_cow_write_fail(job, start, ret);
97 @@ -213,7 +222,11 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
98 trace_backup_do_cow_process(job, start);
100 if (job->use_copy_range) {
101 - ret = backup_cow_with_offload(job, start, end, is_write_notifier);
102 + if (job->dump_cb) {
105 + ret = backup_cow_with_offload(job, start, end, is_write_notifier);
108 job->use_copy_range = false;
110 @@ -297,7 +310,9 @@ static void backup_abort(Job *job)
111 static void backup_clean(Job *job)
113 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
118 blk_unref(s->target);
121 @@ -306,7 +321,9 @@ static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
123 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
125 - blk_set_aio_context(s->target, aio_context);
127 + blk_set_aio_context(s->target, aio_context);
131 void backup_do_checkpoint(BlockJob *job, Error **errp)
132 @@ -347,9 +364,11 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
134 return block_job_error_action(&job->common, job->on_source_error,
137 + } else if (job->target) {
138 return block_job_error_action(&job->common, job->on_target_error,
141 + return BLOCK_ERROR_ACTION_REPORT;
145 @@ -571,6 +590,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
146 BlockdevOnError on_source_error,
147 BlockdevOnError on_target_error,
149 + BackupDumpFunc *dump_cb,
150 BlockCompletionFunc *cb, void *opaque,
152 JobTxn *txn, Error **errp)
153 @@ -581,7 +601,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
158 + assert(target || dump_cb);
161 error_setg(errp, "Source and target cannot be the same");
162 @@ -594,13 +614,13 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
166 - if (!bdrv_is_inserted(target)) {
167 + if (target && !bdrv_is_inserted(target)) {
168 error_setg(errp, "Device is not inserted: %s",
169 bdrv_get_device_name(target));
173 - if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
174 + if (target && compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
175 error_setg(errp, "Compression is not supported for this drive %s",
176 bdrv_get_device_name(target));
178 @@ -610,7 +630,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
182 - if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
183 + if (target && bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
187 @@ -650,15 +670,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
191 - /* The target must match the source in size, so no resize here either */
192 - job->target = blk_new(BLK_PERM_WRITE,
193 - BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
194 - BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
195 - ret = blk_insert_bs(job->target, target, errp);
199 + /* The target must match the source in size, so no resize here either */
200 + job->target = blk_new(BLK_PERM_WRITE,
201 + BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
202 + BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
203 + ret = blk_insert_bs(job->target, target, errp);
209 + job->dump_cb = dump_cb;
210 job->on_source_error = on_source_error;
211 job->on_target_error = on_target_error;
212 job->sync_mode = sync_mode;
213 @@ -669,6 +692,9 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
214 /* Detect image-fleecing (and similar) schemes */
215 job->serialize_target_writes = bdrv_chain_contains(target, bs);
218 + goto use_default_cluster_size;
220 /* If there is no backing file on the target, we cannot rely on COW if our
221 * backup cluster size is smaller than the target cluster size. Even for
222 * targets with a backing file, try to avoid COW if possible. */
223 @@ -693,18 +719,35 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
224 /* Not fatal; just trudge on ahead. */
225 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
227 - job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
228 + use_default_cluster_size:
229 + ret = bdrv_get_info(bs, &bdi);
231 + job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
233 + /* round down to nearest BACKUP_CLUSTER_SIZE_DEFAULT */
234 + job->cluster_size = (bdi.cluster_size / BACKUP_CLUSTER_SIZE_DEFAULT) * BACKUP_CLUSTER_SIZE_DEFAULT;
235 + if (job->cluster_size == 0) {
236 + /* but we can't go below it */
237 + job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
242 + job->use_copy_range = true;
243 + job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
244 + blk_get_max_transfer(job->target));
245 + job->copy_range_size = MAX(job->cluster_size,
246 + QEMU_ALIGN_UP(job->copy_range_size,
247 + job->cluster_size));
249 + job->use_copy_range = false;
251 - job->use_copy_range = true;
252 - job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
253 - blk_get_max_transfer(job->target));
254 - job->copy_range_size = MAX(job->cluster_size,
255 - QEMU_ALIGN_UP(job->copy_range_size,
256 - job->cluster_size));
258 - /* Required permissions are already taken with target's blk_new() */
259 - block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
262 + /* Required permissions are already taken with target's blk_new() */
263 + block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
267 job->common.job.pause_count += pause_count;
269 diff --git a/block/replication.c b/block/replication.c
270 index 0a265db1b5..e85c62ba9c 100644
271 --- a/block/replication.c
272 +++ b/block/replication.c
273 @@ -543,6 +543,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
274 0, MIRROR_SYNC_MODE_NONE, NULL, false,
275 BLOCKDEV_ON_ERROR_REPORT,
276 BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
278 backup_job_completed, bs, 0, NULL, &local_err);
280 error_propagate(errp, local_err);
281 diff --git a/blockdev.c b/blockdev.c
282 index 9210494b47..f8ce285caa 100644
288 #include "qemu/osdep.h"
289 -#include "qemu/uuid.h"
290 #include "sysemu/block-backend.h"
291 #include "sysemu/blockdev.h"
292 #include "hw/block/block.h"
294 #include "qemu/cutils.h"
295 #include "qemu/help_option.h"
296 #include "qemu/throttle-options.h"
299 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
300 QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
301 @@ -3160,15 +3160,14 @@ out:
302 static struct PVEBackupState {
319 @@ -3187,6 +3186,71 @@ typedef struct PVEBackupDevInfo {
321 static void pvebackup_run_next_job(void);
323 +static int pvebackup_dump_cb(void *opaque, BlockBackend *target,
324 + uint64_t start, uint64_t bytes,
327 + const uint64_t size = bytes;
328 + const unsigned char *buf = pbuf;
329 + PVEBackupDevInfo *di = opaque;
331 + if (backup_state.cancel) {
332 + return size; // return success
335 + uint64_t cluster_num = start / VMA_CLUSTER_SIZE;
336 + if ((cluster_num * VMA_CLUSTER_SIZE) != start) {
337 + if (!backup_state.error) {
338 + error_setg(&backup_state.error,
339 + "got unaligned write inside backup dump "
340 + "callback (sector %ld)", start);
342 + return -1; // not aligned to cluster size
347 + if (backup_state.vmaw) {
348 + size_t zero_bytes = 0;
349 + uint64_t remaining = size;
350 + while (remaining > 0) {
351 + ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num,
355 + buf += VMA_CLUSTER_SIZE;
358 + if (!backup_state.error) {
359 + vma_writer_error_propagate(backup_state.vmaw, &backup_state.error);
361 + if (di->bs && di->bs->job) {
362 + job_cancel(&di->bs->job->job, true);
366 + backup_state.zero_bytes += zero_bytes;
367 + if (remaining >= VMA_CLUSTER_SIZE) {
368 + backup_state.transferred += VMA_CLUSTER_SIZE;
369 + remaining -= VMA_CLUSTER_SIZE;
371 + backup_state.transferred += remaining;
378 + backup_state.zero_bytes += size;
380 + backup_state.transferred += size;
383 + // Note: always return success, because we want that writes succeed anyways.
388 static void pvebackup_cleanup(void)
390 qemu_mutex_lock(&backup_state.backup_mutex);
391 @@ -3198,9 +3262,11 @@ static void pvebackup_cleanup(void)
393 backup_state.end_time = time(NULL);
395 - if (backup_state.vmaobj) {
396 - object_unparent(backup_state.vmaobj);
397 - backup_state.vmaobj = NULL;
398 + if (backup_state.vmaw) {
399 + Error *local_err = NULL;
400 + vma_writer_close(backup_state.vmaw, &local_err);
401 + error_propagate(&backup_state.error, local_err);
402 + backup_state.vmaw = NULL;
405 g_list_free(backup_state.di_list);
406 @@ -3208,6 +3274,13 @@ static void pvebackup_cleanup(void)
407 qemu_mutex_unlock(&backup_state.backup_mutex);
410 +static void coroutine_fn backup_close_vma_stream(void *opaque)
412 + PVEBackupDevInfo *di = opaque;
414 + vma_writer_close_stream(backup_state.vmaw, di->dev_id);
417 static void pvebackup_complete_cb(void *opaque, int ret)
419 // This always runs in the main loop
420 @@ -3224,9 +3297,9 @@ static void pvebackup_complete_cb(void *opaque, int ret)
424 - if (backup_state.vmaobj) {
425 - object_unparent(backup_state.vmaobj);
426 - backup_state.vmaobj = NULL;
427 + if (backup_state.vmaw) {
428 + Coroutine *co = qemu_coroutine_create(backup_close_vma_stream, di);
429 + qemu_coroutine_enter(co);
432 // remove self from job queue
433 @@ -3254,14 +3327,9 @@ static void pvebackup_cancel(void *opaque)
434 error_setg(&backup_state.error, "backup cancelled");
437 - if (backup_state.vmaobj) {
439 + if (backup_state.vmaw) {
440 /* make sure vma writer does not block anymore */
441 - if (!object_set_props(backup_state.vmaobj, &err, "blocked", "yes", NULL)) {
443 - error_report_err(err);
446 + vma_writer_set_error(backup_state.vmaw, "backup cancelled");
449 GList *l = backup_state.di_list;
450 @@ -3292,18 +3360,14 @@ void qmp_backup_cancel(Error **errp)
451 Coroutine *co = qemu_coroutine_create(pvebackup_cancel, NULL);
452 qemu_coroutine_enter(co);
454 - while (backup_state.vmaobj) {
455 - /* FIXME: Find something better for this */
456 + while (backup_state.vmaw) {
457 + /* vma writer use main aio context */
458 aio_poll(qemu_get_aio_context(), true);
462 -void vma_object_add_config_file(Object *obj, const char *name,
463 - const char *contents, size_t len,
465 static int config_to_vma(const char *file, BackupFormat format,
467 - const char *backup_dir,
468 + const char *backup_dir, VmaWriter *vmaw,
472 @@ -3317,7 +3381,12 @@ static int config_to_vma(const char *file, BackupFormat format,
473 char *basename = g_path_get_basename(file);
475 if (format == BACKUP_FORMAT_VMA) {
476 - vma_object_add_config_file(vmaobj, basename, cdata, clen, errp);
477 + if (vma_writer_add_config(vmaw, basename, cdata, clen) != 0) {
478 + error_setg(errp, "unable to add %s config data to vma archive", file);
483 } else if (format == BACKUP_FORMAT_DIR) {
484 char config_path[PATH_MAX];
485 snprintf(config_path, PATH_MAX, "%s/%s", backup_dir, basename);
486 @@ -3334,28 +3403,30 @@ static int config_to_vma(const char *file, BackupFormat format,
490 +bool job_should_pause(Job *job);
491 static void pvebackup_run_next_job(void)
493 qemu_mutex_lock(&backup_state.backup_mutex);
495 - GList *next = g_list_nth(backup_state.di_list, backup_state.next_job);
497 - PVEBackupDevInfo *di = (PVEBackupDevInfo *)next->data;
498 - backup_state.next_job++;
499 + GList *l = backup_state.di_list;
501 + PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
502 + l = g_list_next(l);
503 if (!di->completed && di->bs && di->bs->job) {
504 BlockJob *job = di->bs->job;
505 AioContext *aio_context = blk_get_aio_context(job->blk);
506 aio_context_acquire(aio_context);
507 qemu_mutex_unlock(&backup_state.backup_mutex);
508 - if (backup_state.error || backup_state.cancel) {
509 - job_cancel_sync(job);
512 + if (job_should_pause(&job->job)) {
513 + if (backup_state.error || backup_state.cancel) {
514 + job_cancel_sync(&job->job);
516 + job_resume(&job->job);
519 aio_context_release(aio_context);
522 - next = g_list_next(next);
524 qemu_mutex_unlock(&backup_state.backup_mutex);
526 @@ -3366,7 +3437,7 @@ static void pvebackup_run_next_job(void)
527 UuidInfo *qmp_backup(const char *backup_file, bool has_format,
529 bool has_config_file, const char *config_file,
530 - bool has_firewall_file, const char *firewall_file,
531 + bool has_firewall_file, const char *firewall_file,
532 bool has_devlist, const char *devlist,
533 bool has_speed, int64_t speed, Error **errp)
535 @@ -3374,7 +3445,8 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
536 BlockDriverState *bs = NULL;
537 const char *backup_dir = NULL;
538 Error *local_err = NULL;
541 + VmaWriter *vmaw = NULL;
543 GList *di_list = NULL;
545 @@ -3386,7 +3458,7 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
546 backup_state.backup_mutex_initialized = true;
549 - if (backup_state.di_list || backup_state.vmaobj) {
550 + if (backup_state.di_list) {
551 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
552 "previous backup not finished");
554 @@ -3461,40 +3533,28 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
558 - qemu_uuid_generate(&uuid);
559 + uuid_generate(uuid);
561 if (format == BACKUP_FORMAT_VMA) {
562 - char uuidstr[UUID_FMT_LEN+1];
563 - qemu_uuid_unparse(&uuid, uuidstr);
564 - uuidstr[UUID_FMT_LEN] = 0;
565 - backup_state.vmaobj =
566 - object_new_with_props("vma", object_get_objects_root(),
567 - "vma-backup-obj", &local_err,
568 - "filename", backup_file,
571 - if (!backup_state.vmaobj) {
572 + vmaw = vma_writer_create(backup_file, uuid, &local_err);
575 error_propagate(errp, local_err);
580 + /* register all devices for vma writer */
583 - QDict *options = qdict_new();
585 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
588 const char *devname = bdrv_get_device_name(di->bs);
589 - snprintf(di->targetfile, PATH_MAX, "vma-backup-obj/%s.raw", devname);
591 - qdict_put(options, "driver", qstring_from_str("vma-drive"));
592 - qdict_put(options, "size", qint_from_int(di->size));
593 - di->target = bdrv_open(di->targetfile, NULL, options, BDRV_O_RDWR, &local_err);
595 - error_propagate(errp, local_err);
596 + di->dev_id = vma_writer_register_stream(vmaw, devname, di->size);
597 + if (di->dev_id <= 0) {
598 + error_set(errp, ERROR_CLASS_GENERIC_ERROR,
599 + "register_stream failed");
603 @@ -3535,14 +3595,14 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
605 /* add configuration file to archive */
606 if (has_config_file) {
607 - if(config_to_vma(config_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
608 + if (config_to_vma(config_file, format, backup_dir, vmaw, errp) != 0) {
613 /* add firewall file to archive */
614 if (has_firewall_file) {
615 - if(config_to_vma(firewall_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
616 + if (config_to_vma(firewall_file, format, backup_dir, vmaw, errp) != 0) {
620 @@ -3565,12 +3625,13 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
622 backup_state.backup_file = g_strdup(backup_file);
624 - memcpy(&backup_state.uuid, &uuid, sizeof(uuid));
625 - qemu_uuid_unparse(&uuid, backup_state.uuid_str);
626 + backup_state.vmaw = vmaw;
628 + uuid_copy(backup_state.uuid, uuid);
629 + uuid_unparse_lower(uuid, backup_state.uuid_str);
631 qemu_mutex_lock(&backup_state.backup_mutex);
632 backup_state.di_list = di_list;
633 - backup_state.next_job = 0;
635 backup_state.total = total;
636 backup_state.transferred = 0;
637 @@ -3581,21 +3642,21 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
639 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
642 job = backup_job_create(NULL, di->bs, di->target, speed, MIRROR_SYNC_MODE_FULL, NULL,
643 false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
645 - pvebackup_complete_cb, di, 2, NULL, &local_err);
647 - bdrv_unref(di->target);
650 + pvebackup_dump_cb, pvebackup_complete_cb, di,
651 + 1, NULL, &local_err);
652 if (!job || local_err != NULL) {
653 error_setg(&backup_state.error, "backup_job_create failed");
654 pvebackup_cancel(NULL);
656 job_start(&job->job);
659 + bdrv_unref(di->target);
664 qemu_mutex_unlock(&backup_state.backup_mutex);
665 @@ -3631,9 +3692,10 @@ err:
669 - if (backup_state.vmaobj) {
670 - object_unparent(backup_state.vmaobj);
671 - backup_state.vmaobj = NULL;
674 + vma_writer_close(vmaw, &err);
675 + unlink(backup_file);
679 @@ -4086,7 +4148,7 @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
680 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
681 backup->sync, bmap, backup->compress,
682 backup->on_source_error, backup->on_target_error,
683 - job_flags, NULL, NULL, 0, txn, &local_err);
684 + job_flags, NULL, NULL, NULL, 0, txn, &local_err);
685 bdrv_unref(target_bs);
686 if (local_err != NULL) {
687 error_propagate(errp, local_err);
688 @@ -4196,7 +4258,7 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
689 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
690 backup->sync, bmap, backup->compress,
691 backup->on_source_error, backup->on_target_error,
692 - job_flags, NULL, NULL, 0, txn, &local_err);
693 + job_flags, NULL, NULL, NULL, 0, txn, &local_err);
694 if (local_err != NULL) {
695 error_propagate(errp, local_err);
697 diff --git a/include/block/block_int.h b/include/block/block_int.h
698 index b409e02be8..fd1828cd70 100644
699 --- a/include/block/block_int.h
700 +++ b/include/block/block_int.h
703 #define BLOCK_PROBE_BUF_SIZE 512
705 +typedef int BackupDumpFunc(void *opaque, BlockBackend *be,
706 + uint64_t offset, uint64_t bytes, const void *buf);
708 enum BdrvTrackedRequestType {
711 @@ -1156,6 +1159,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
712 BlockdevOnError on_source_error,
713 BlockdevOnError on_target_error,
715 + BackupDumpFunc *dump_cb,
716 BlockCompletionFunc *cb, void *opaque,
718 JobTxn *txn, Error **errp);
719 diff --git a/job.c b/job.c
720 index 86161bd9f3..114640688a 100644
723 @@ -249,7 +249,8 @@ static bool job_started(Job *job)
727 -static bool job_should_pause(Job *job)
728 +bool job_should_pause(Job *job);
729 +bool job_should_pause(Job *job)
731 return job->pause_count > 0;
733 diff --git a/vma-reader.c b/vma-reader.c
735 index 0000000000..2b1d1cdab3
740 + * VMA: Virtual Machine Archive
742 + * Copyright (C) 2012 Proxmox Server Solutions
745 + * Dietmar Maurer (dietmar@proxmox.com)
747 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
748 + * See the COPYING file in the top-level directory.
752 +#include "qemu/osdep.h"
754 +#include <uuid/uuid.h>
756 +#include "qemu-common.h"
757 +#include "qemu/timer.h"
758 +#include "qemu/ratelimit.h"
760 +#include "block/block.h"
761 +#include "sysemu/block-backend.h"
763 +static unsigned char zero_vma_block[VMA_BLOCK_SIZE];
765 +typedef struct VmaRestoreState {
766 + BlockBackend *target;
768 + unsigned long *bitmap;
774 + GChecksum *md5csum;
775 + GHashTable *blob_hash;
776 + unsigned char *head_data;
777 + VmaDeviceInfo devinfo[256];
778 + VmaRestoreState rstate[256];
780 + guint8 vmstate_stream;
781 + uint32_t vmstate_clusters;
782 + /* to show restore percentage if run with -v */
784 + int64_t cluster_count;
785 + int64_t clusters_read;
786 + int64_t zero_cluster_data;
787 + int64_t partial_zero_cluster_data;
788 + int clusters_read_per;
792 +g_int32_hash(gconstpointer v)
794 + return *(const uint32_t *)v;
798 +g_int32_equal(gconstpointer v1, gconstpointer v2)
800 + return *((const uint32_t *)v1) == *((const uint32_t *)v2);
803 +static int vma_reader_get_bitmap(VmaRestoreState *rstate, int64_t cluster_num)
806 + assert(rstate->bitmap);
808 + unsigned long val, idx, bit;
810 + idx = cluster_num / BITS_PER_LONG;
812 + assert(rstate->bitmap_size > idx);
814 + bit = cluster_num % BITS_PER_LONG;
815 + val = rstate->bitmap[idx];
817 + return !!(val & (1UL << bit));
820 +static void vma_reader_set_bitmap(VmaRestoreState *rstate, int64_t cluster_num,
824 + assert(rstate->bitmap);
826 + unsigned long val, idx, bit;
828 + idx = cluster_num / BITS_PER_LONG;
830 + assert(rstate->bitmap_size > idx);
832 + bit = cluster_num % BITS_PER_LONG;
833 + val = rstate->bitmap[idx];
835 + if (!(val & (1UL << bit))) {
839 + if (val & (1UL << bit)) {
840 + val &= ~(1UL << bit);
843 + rstate->bitmap[idx] = val;
846 +typedef struct VmaBlob {
852 +static const VmaBlob *get_header_blob(VmaReader *vmar, uint32_t pos)
855 + assert(vmar->blob_hash);
857 + return g_hash_table_lookup(vmar->blob_hash, &pos);
860 +static const char *get_header_str(VmaReader *vmar, uint32_t pos)
862 + const VmaBlob *blob = get_header_blob(vmar, pos);
866 + const char *res = (char *)blob->data;
867 + if (res[blob->len-1] != '\0') {
874 +safe_read(int fd, unsigned char *buf, size_t count)
879 + n = read(fd, buf, count);
880 + } while (n < 0 && errno == EINTR);
886 +full_read(int fd, unsigned char *buf, size_t len)
894 + n = safe_read(fd, buf, len);
916 +void vma_reader_destroy(VmaReader *vmar)
920 + if (vmar->fd >= 0) {
924 + if (vmar->cdata_list) {
925 + g_list_free(vmar->cdata_list);
929 + for (i = 1; i < 256; i++) {
930 + if (vmar->rstate[i].bitmap) {
931 + g_free(vmar->rstate[i].bitmap);
935 + if (vmar->md5csum) {
936 + g_checksum_free(vmar->md5csum);
939 + if (vmar->blob_hash) {
940 + g_hash_table_destroy(vmar->blob_hash);
943 + if (vmar->head_data) {
944 + g_free(vmar->head_data);
951 +static int vma_reader_read_head(VmaReader *vmar, Error **errp)
955 + assert(*errp == NULL);
957 + unsigned char md5sum[16];
961 + vmar->head_data = g_malloc(sizeof(VmaHeader));
963 + if (full_read(vmar->fd, vmar->head_data, sizeof(VmaHeader)) !=
964 + sizeof(VmaHeader)) {
965 + error_setg(errp, "can't read vma header - %s",
966 + errno ? g_strerror(errno) : "got EOF");
970 + VmaHeader *h = (VmaHeader *)vmar->head_data;
972 + if (h->magic != VMA_MAGIC) {
973 + error_setg(errp, "not a vma file - wrong magic number");
977 + uint32_t header_size = GUINT32_FROM_BE(h->header_size);
978 + int need = header_size - sizeof(VmaHeader);
980 + error_setg(errp, "wrong vma header size %d", header_size);
984 + vmar->head_data = g_realloc(vmar->head_data, header_size);
985 + h = (VmaHeader *)vmar->head_data;
987 + if (full_read(vmar->fd, vmar->head_data + sizeof(VmaHeader), need) !=
989 + error_setg(errp, "can't read vma header data - %s",
990 + errno ? g_strerror(errno) : "got EOF");
994 + memcpy(md5sum, h->md5sum, 16);
995 + memset(h->md5sum, 0, 16);
997 + g_checksum_reset(vmar->md5csum);
998 + g_checksum_update(vmar->md5csum, vmar->head_data, header_size);
1000 + g_checksum_get_digest(vmar->md5csum, (guint8 *)(h->md5sum), &csize);
1002 + if (memcmp(md5sum, h->md5sum, 16) != 0) {
1003 + error_setg(errp, "wrong vma header chechsum");
1007 + /* we can modify header data after checksum verify */
1008 + h->header_size = header_size;
1010 + h->version = GUINT32_FROM_BE(h->version);
1011 + if (h->version != 1) {
1012 + error_setg(errp, "wrong vma version %d", h->version);
1016 + h->ctime = GUINT64_FROM_BE(h->ctime);
1017 + h->blob_buffer_offset = GUINT32_FROM_BE(h->blob_buffer_offset);
1018 + h->blob_buffer_size = GUINT32_FROM_BE(h->blob_buffer_size);
1020 + uint32_t bstart = h->blob_buffer_offset + 1;
1021 + uint32_t bend = h->blob_buffer_offset + h->blob_buffer_size;
1023 + if (bstart <= sizeof(VmaHeader)) {
1024 + error_setg(errp, "wrong vma blob buffer offset %d",
1025 + h->blob_buffer_offset);
1029 + if (bend > header_size) {
1030 + error_setg(errp, "wrong vma blob buffer size %d/%d",
1031 + h->blob_buffer_offset, h->blob_buffer_size);
1035 + while ((bstart + 2) <= bend) {
1036 + uint32_t size = vmar->head_data[bstart] +
1037 + (vmar->head_data[bstart+1] << 8);
1038 + if ((bstart + size + 2) <= bend) {
1039 + VmaBlob *blob = g_new0(VmaBlob, 1);
1040 + blob->start = bstart - h->blob_buffer_offset;
1042 + blob->data = vmar->head_data + bstart + 2;
1043 + g_hash_table_insert(vmar->blob_hash, &blob->start, blob);
1045 + bstart += size + 2;
1050 + for (i = 1; i < 256; i++) {
1051 + VmaDeviceInfoHeader *dih = &h->dev_info[i];
1052 + uint32_t devname_ptr = GUINT32_FROM_BE(dih->devname_ptr);
1053 + uint64_t size = GUINT64_FROM_BE(dih->size);
1054 + const char *devname = get_header_str(vmar, devname_ptr);
1056 + if (size && devname) {
1058 + vmar->devinfo[i].size = size;
1059 + vmar->devinfo[i].devname = devname;
1061 + if (strcmp(devname, "vmstate") == 0) {
1062 + vmar->vmstate_stream = i;
1067 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1068 + uint32_t name_ptr = GUINT32_FROM_BE(h->config_names[i]);
1069 + uint32_t data_ptr = GUINT32_FROM_BE(h->config_data[i]);
1071 + if (!(name_ptr && data_ptr)) {
1074 + const char *name = get_header_str(vmar, name_ptr);
1075 + const VmaBlob *blob = get_header_blob(vmar, data_ptr);
1077 + if (!(name && blob)) {
1078 + error_setg(errp, "vma contains invalid data pointers");
1082 + VmaConfigData *cdata = g_new0(VmaConfigData, 1);
1083 + cdata->name = name;
1084 + cdata->data = blob->data;
1085 + cdata->len = blob->len;
1087 + vmar->cdata_list = g_list_append(vmar->cdata_list, cdata);
1093 +VmaReader *vma_reader_create(const char *filename, Error **errp)
1098 + VmaReader *vmar = g_new0(VmaReader, 1);
1100 + if (strcmp(filename, "-") == 0) {
1101 + vmar->fd = dup(0);
1103 + vmar->fd = open(filename, O_RDONLY);
1106 + if (vmar->fd < 0) {
1107 + error_setg(errp, "can't open file %s - %s\n", filename,
1108 + g_strerror(errno));
1112 + vmar->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1113 + if (!vmar->md5csum) {
1114 + error_setg(errp, "can't allocate cmsum\n");
1118 + vmar->blob_hash = g_hash_table_new_full(g_int32_hash, g_int32_equal,
1121 + if (vma_reader_read_head(vmar, errp) < 0) {
1129 + vma_reader_destroy(vmar);
1135 +VmaHeader *vma_reader_get_header(VmaReader *vmar)
1138 + assert(vmar->head_data);
1140 + return (VmaHeader *)(vmar->head_data);
1143 +GList *vma_reader_get_config_data(VmaReader *vmar)
1146 + assert(vmar->head_data);
1148 + return vmar->cdata_list;
1151 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id)
1156 + if (vmar->devinfo[dev_id].size && vmar->devinfo[dev_id].devname) {
1157 + return &vmar->devinfo[dev_id];
1163 +static void allocate_rstate(VmaReader *vmar, guint8 dev_id,
1164 + BlockBackend *target, bool write_zeroes)
1169 + vmar->rstate[dev_id].target = target;
1170 + vmar->rstate[dev_id].write_zeroes = write_zeroes;
1172 + int64_t size = vmar->devinfo[dev_id].size;
1174 + int64_t bitmap_size = (size/BDRV_SECTOR_SIZE) +
1175 + (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG - 1;
1176 + bitmap_size /= (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG;
1178 + vmar->rstate[dev_id].bitmap_size = bitmap_size;
1179 + vmar->rstate[dev_id].bitmap = g_new0(unsigned long, bitmap_size);
1181 + vmar->cluster_count += size/VMA_CLUSTER_SIZE;
1184 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target,
1185 + bool write_zeroes, Error **errp)
1188 + assert(target != NULL);
1190 + assert(vmar->rstate[dev_id].target == NULL);
1192 + int64_t size = blk_getlength(target);
1193 + int64_t size_diff = size - vmar->devinfo[dev_id].size;
1195 + /* storage types can have different size restrictions, so it
1196 + * is not always possible to create an image with exact size.
1197 + * So we tolerate a size difference up to 4MB.
1199 + if ((size_diff < 0) || (size_diff > 4*1024*1024)) {
1200 + error_setg(errp, "vma_reader_register_bs for stream %s failed - "
1201 + "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname,
1202 + size, vmar->devinfo[dev_id].size);
1206 + allocate_rstate(vmar, dev_id, target, write_zeroes);
1211 +static ssize_t safe_write(int fd, void *buf, size_t count)
1216 + n = write(fd, buf, count);
1217 + } while (n < 0 && errno == EINTR);
1222 +static size_t full_write(int fd, void *buf, size_t len)
1230 + n = safe_write(fd, buf, len);
1240 + /* incomplete write ? */
1247 +static int restore_write_data(VmaReader *vmar, guint8 dev_id,
1248 + BlockBackend *target, int vmstate_fd,
1249 + unsigned char *buf, int64_t sector_num,
1250 + int nb_sectors, Error **errp)
1254 + if (dev_id == vmar->vmstate_stream) {
1255 + if (vmstate_fd >= 0) {
1256 + int len = nb_sectors * BDRV_SECTOR_SIZE;
1257 + int res = full_write(vmstate_fd, buf, len);
1259 + error_setg(errp, "write vmstate failed %d", res);
1264 + int res = blk_pwrite(target, sector_num * BDRV_SECTOR_SIZE, buf, nb_sectors * BDRV_SECTOR_SIZE, 0);
1266 + error_setg(errp, "blk_pwrite to %s failed (%d)",
1267 + bdrv_get_device_name(blk_bs(target)), res);
1274 +static int restore_extent(VmaReader *vmar, unsigned char *buf,
1275 + int extent_size, int vmstate_fd,
1276 + bool verbose, bool verify, Error **errp)
1281 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1282 + int start = VMA_EXTENT_HEADER_SIZE;
1285 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1286 + uint64_t block_info = GUINT64_FROM_BE(ehead->blockinfo[i]);
1287 + uint64_t cluster_num = block_info & 0xffffffff;
1288 + uint8_t dev_id = (block_info >> 32) & 0xff;
1289 + uint16_t mask = block_info >> (32+16);
1290 + int64_t max_sector;
1296 + VmaRestoreState *rstate = &vmar->rstate[dev_id];
1297 + BlockBackend *target = NULL;
1299 + if (dev_id != vmar->vmstate_stream) {
1300 + target = rstate->target;
1301 + if (!verify && !target) {
1302 + error_setg(errp, "got wrong dev id %d", dev_id);
1306 + if (vma_reader_get_bitmap(rstate, cluster_num)) {
1307 + error_setg(errp, "found duplicated cluster %zd for stream %s",
1308 + cluster_num, vmar->devinfo[dev_id].devname);
1311 + vma_reader_set_bitmap(rstate, cluster_num, 1);
1313 + max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE;
1315 + max_sector = G_MAXINT64;
1316 + if (cluster_num != vmar->vmstate_clusters) {
1317 + error_setg(errp, "found out of order vmstate data");
1320 + vmar->vmstate_clusters++;
1323 + vmar->clusters_read++;
1326 + time_t duration = time(NULL) - vmar->start_time;
1327 + int percent = (vmar->clusters_read*100)/vmar->cluster_count;
1328 + if (percent != vmar->clusters_read_per) {
1329 + printf("progress %d%% (read %zd bytes, duration %zd sec)\n",
1330 + percent, vmar->clusters_read*VMA_CLUSTER_SIZE,
1333 + vmar->clusters_read_per = percent;
1337 + /* try to write whole clusters to speedup restore */
1338 + if (mask == 0xffff) {
1339 + if ((start + VMA_CLUSTER_SIZE) > extent_size) {
1340 + error_setg(errp, "short vma extent - too many blocks");
1343 + int64_t sector_num = (cluster_num * VMA_CLUSTER_SIZE) /
1345 + int64_t end_sector = sector_num +
1346 + VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE;
1348 + if (end_sector > max_sector) {
1349 + end_sector = max_sector;
1352 + if (end_sector <= sector_num) {
1353 + error_setg(errp, "got wrong block address - write beyond end");
1358 + int nb_sectors = end_sector - sector_num;
1359 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1360 + buf + start, sector_num, nb_sectors,
1366 + start += VMA_CLUSTER_SIZE;
1371 + for (j = 0; j < 16; j++) {
1372 + int64_t sector_num = (cluster_num*VMA_CLUSTER_SIZE +
1373 + j*VMA_BLOCK_SIZE)/BDRV_SECTOR_SIZE;
1375 + int64_t end_sector = sector_num +
1376 + VMA_BLOCK_SIZE/BDRV_SECTOR_SIZE;
1377 + if (end_sector > max_sector) {
1378 + end_sector = max_sector;
1382 + if ((start + VMA_BLOCK_SIZE) > extent_size) {
1383 + error_setg(errp, "short vma extent - too many blocks");
1387 + if (end_sector <= sector_num) {
1388 + error_setg(errp, "got wrong block address - "
1389 + "write beyond end");
1394 + int nb_sectors = end_sector - sector_num;
1395 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1396 + buf + start, sector_num,
1397 + nb_sectors, errp) < 0) {
1402 + start += VMA_BLOCK_SIZE;
1407 + if (end_sector > sector_num) {
1408 + /* Todo: use bdrv_co_write_zeroes (but that need to
1409 + * be run inside coroutine?)
1411 + int nb_sectors = end_sector - sector_num;
1412 + int zero_size = BDRV_SECTOR_SIZE*nb_sectors;
1413 + vmar->zero_cluster_data += zero_size;
1415 + vmar->partial_zero_cluster_data += zero_size;
1418 + if (rstate->write_zeroes && !verify) {
1419 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1420 + zero_vma_block, sector_num,
1421 + nb_sectors, errp) < 0) {
1433 + if (start != extent_size) {
1434 + error_setg(errp, "vma extent error - missing blocks");
1441 +static int vma_reader_restore_full(VmaReader *vmar, int vmstate_fd,
1442 + bool verbose, bool verify,
1446 + assert(vmar->head_data);
1449 + unsigned char buf[VMA_MAX_EXTENT_SIZE];
1451 + unsigned char md5sum[16];
1452 + VmaHeader *h = (VmaHeader *)vmar->head_data;
1454 + vmar->start_time = time(NULL);
1457 + int bytes = full_read(vmar->fd, buf + buf_pos, sizeof(buf) - buf_pos);
1459 + error_setg(errp, "read failed - %s", g_strerror(errno));
1469 + if (buf_pos < VMA_EXTENT_HEADER_SIZE) {
1470 + error_setg(errp, "read short extent (%d bytes)", buf_pos);
1474 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1476 + /* extract md5sum */
1477 + memcpy(md5sum, ehead->md5sum, sizeof(ehead->md5sum));
1478 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
1480 + g_checksum_reset(vmar->md5csum);
1481 + g_checksum_update(vmar->md5csum, buf, VMA_EXTENT_HEADER_SIZE);
1483 + g_checksum_get_digest(vmar->md5csum, ehead->md5sum, &csize);
1485 + if (memcmp(md5sum, ehead->md5sum, 16) != 0) {
1486 + error_setg(errp, "wrong vma extent header chechsum");
1490 + if (memcmp(h->uuid, ehead->uuid, sizeof(ehead->uuid)) != 0) {
1491 + error_setg(errp, "wrong vma extent uuid");
1495 + if (ehead->magic != VMA_EXTENT_MAGIC || ehead->reserved1 != 0) {
1496 + error_setg(errp, "wrong vma extent header magic");
1500 + int block_count = GUINT16_FROM_BE(ehead->block_count);
1501 + int extent_size = VMA_EXTENT_HEADER_SIZE + block_count*VMA_BLOCK_SIZE;
1503 + if (buf_pos < extent_size) {
1504 + error_setg(errp, "short vma extent (%d < %d)", buf_pos,
1509 + if (restore_extent(vmar, buf, extent_size, vmstate_fd, verbose,
1510 + verify, errp) < 0) {
1514 + if (buf_pos > extent_size) {
1515 + memmove(buf, buf + extent_size, buf_pos - extent_size);
1516 + buf_pos = buf_pos - extent_size;
1525 + for (i = 1; i < 256; i++) {
1526 + VmaRestoreState *rstate = &vmar->rstate[i];
1527 + if (!rstate->target) {
1531 + if (blk_flush(rstate->target) < 0) {
1532 + error_setg(errp, "vma blk_flush %s failed",
1533 + vmar->devinfo[i].devname);
1537 + if (vmar->devinfo[i].size &&
1538 + (strcmp(vmar->devinfo[i].devname, "vmstate") != 0)) {
1539 + assert(rstate->bitmap);
1541 + int64_t cluster_num, end;
1543 + end = (vmar->devinfo[i].size + VMA_CLUSTER_SIZE - 1) /
1546 + for (cluster_num = 0; cluster_num < end; cluster_num++) {
1547 + if (!vma_reader_get_bitmap(rstate, cluster_num)) {
1548 + error_setg(errp, "detected missing cluster %zd "
1549 + "for stream %s", cluster_num,
1550 + vmar->devinfo[i].devname);
1558 + if (vmar->clusters_read) {
1559 + printf("total bytes read %zd, sparse bytes %zd (%.3g%%)\n",
1560 + vmar->clusters_read*VMA_CLUSTER_SIZE,
1561 + vmar->zero_cluster_data,
1562 + (double)(100.0*vmar->zero_cluster_data)/
1563 + (vmar->clusters_read*VMA_CLUSTER_SIZE));
1565 + int64_t datasize = vmar->clusters_read*VMA_CLUSTER_SIZE-vmar->zero_cluster_data;
1566 + if (datasize) { // this does not make sense for empty files
1567 + printf("space reduction due to 4K zero blocks %.3g%%\n",
1568 + (double)(100.0*vmar->partial_zero_cluster_data) / datasize);
1571 + printf("vma archive contains no image data\n");
1577 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
1580 + return vma_reader_restore_full(vmar, vmstate_fd, verbose, false, errp);
1583 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp)
1587 + for (dev_id = 1; dev_id < 255; dev_id++) {
1588 + if (vma_reader_get_device_info(vmar, dev_id)) {
1589 + allocate_rstate(vmar, dev_id, NULL, false);
1593 + return vma_reader_restore_full(vmar, -1, verbose, true, errp);
1596 diff --git a/vma-writer.c b/vma-writer.c
1597 new file mode 100644
1598 index 0000000000..fd9567634d
1603 + * VMA: Virtual Machine Archive
1605 + * Copyright (C) 2012 Proxmox Server Solutions
1608 + * Dietmar Maurer (dietmar@proxmox.com)
1610 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
1611 + * See the COPYING file in the top-level directory.
1615 +#include "qemu/osdep.h"
1617 +#include <uuid/uuid.h>
1620 +#include "block/block.h"
1621 +#include "monitor/monitor.h"
1622 +#include "qemu/main-loop.h"
1623 +#include "qemu/coroutine.h"
1624 +#include "qemu/cutils.h"
1626 +#define DEBUG_VMA 0
1628 +#define DPRINTF(fmt, ...)\
1629 + do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
1631 +#define WRITE_BUFFERS 5
1632 +#define HEADER_CLUSTERS 8
1633 +#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
1639 + char errmsg[8192];
1641 + bool header_written;
1644 + /* we always write extents */
1645 + unsigned char *outbuf;
1646 + int outbuf_pos; /* in bytes */
1647 + int outbuf_count; /* in VMA_BLOCKS */
1648 + uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
1650 + unsigned char *headerbuf;
1652 + GChecksum *md5csum;
1653 + CoMutex flush_lock;
1654 + Coroutine *co_writer;
1656 + /* drive informations */
1657 + VmaStreamInfo stream_info[256];
1658 + guint stream_count;
1660 + guint8 vmstate_stream;
1661 + uint32_t vmstate_clusters;
1663 + /* header blob table */
1664 + char *header_blob_table;
1665 + uint32_t header_blob_table_size;
1666 + uint32_t header_blob_table_pos;
1668 + /* store for config blobs */
1669 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1670 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1671 + uint32_t config_count;
1674 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...)
1678 + if (vmaw->status < 0) {
1682 + vmaw->status = -1;
1684 + va_start(ap, fmt);
1685 + g_vsnprintf(vmaw->errmsg, sizeof(vmaw->errmsg), fmt, ap);
1688 + DPRINTF("vma_writer_set_error: %s\n", vmaw->errmsg);
1691 +static uint32_t allocate_header_blob(VmaWriter *vmaw, const char *data,
1694 + if (len > 65535) {
1698 + if (!vmaw->header_blob_table ||
1699 + (vmaw->header_blob_table_size <
1700 + (vmaw->header_blob_table_pos + len + 2))) {
1701 + int newsize = vmaw->header_blob_table_size + ((len + 2 + 511)/512)*512;
1703 + vmaw->header_blob_table = g_realloc(vmaw->header_blob_table, newsize);
1704 + memset(vmaw->header_blob_table + vmaw->header_blob_table_size,
1705 + 0, newsize - vmaw->header_blob_table_size);
1706 + vmaw->header_blob_table_size = newsize;
1709 + uint32_t cpos = vmaw->header_blob_table_pos;
1710 + vmaw->header_blob_table[cpos] = len & 255;
1711 + vmaw->header_blob_table[cpos+1] = (len >> 8) & 255;
1712 + memcpy(vmaw->header_blob_table + cpos + 2, data, len);
1713 + vmaw->header_blob_table_pos += len + 2;
1717 +static uint32_t allocate_header_string(VmaWriter *vmaw, const char *str)
1721 + size_t len = strlen(str) + 1;
1723 + return allocate_header_blob(vmaw, str, len);
1726 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
1730 + assert(!vmaw->header_written);
1731 + assert(vmaw->config_count < VMA_MAX_CONFIGS);
1735 + gchar *basename = g_path_get_basename(name);
1736 + uint32_t name_ptr = allocate_header_string(vmaw, basename);
1743 + uint32_t data_ptr = allocate_header_blob(vmaw, data, len);
1748 + vmaw->config_names[vmaw->config_count] = name_ptr;
1749 + vmaw->config_data[vmaw->config_count] = data_ptr;
1751 + vmaw->config_count++;
1756 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
1761 + assert(!vmaw->status);
1763 + if (vmaw->header_written) {
1764 + vma_writer_set_error(vmaw, "vma_writer_register_stream: header "
1765 + "already written");
1769 + guint n = vmaw->stream_count + 1;
1771 + /* we can have dev_ids form 1 to 255 (0 reserved)
1772 + * 255(-1) reseverd for safety
1775 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1776 + "too many drives");
1781 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1782 + "got strange size %zd", size);
1786 + DPRINTF("vma_writer_register_stream %s %zu %d\n", devname, size, n);
1788 + vmaw->stream_info[n].devname = g_strdup(devname);
1789 + vmaw->stream_info[n].size = size;
1791 + vmaw->stream_info[n].cluster_count = (size + VMA_CLUSTER_SIZE - 1) /
1794 + vmaw->stream_count = n;
1796 + if (strcmp(devname, "vmstate") == 0) {
1797 + vmaw->vmstate_stream = n;
1803 +static void vma_co_continue_write(void *opaque)
1805 + VmaWriter *vmaw = opaque;
1807 + DPRINTF("vma_co_continue_write\n");
1808 + qemu_coroutine_enter(vmaw->co_writer);
1811 +static ssize_t coroutine_fn
1812 +vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
1814 + DPRINTF("vma_queue_write enter %zd\n", bytes);
1818 + assert(bytes <= VMA_MAX_EXTENT_SIZE);
1823 + assert(vmaw->co_writer == NULL);
1825 + vmaw->co_writer = qemu_coroutine_self();
1827 + while (done < bytes) {
1828 + aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, vma_co_continue_write, NULL, vmaw);
1829 + qemu_coroutine_yield();
1830 + aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, NULL, NULL, NULL);
1831 + if (vmaw->status < 0) {
1832 + DPRINTF("vma_queue_write detected canceled backup\n");
1836 + ret = write(vmaw->fd, buf + done, bytes - done);
1839 + DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
1840 + } else if (ret < 0) {
1841 + if (errno == EAGAIN || errno == EWOULDBLOCK) {
1844 + vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
1845 + g_strerror(errno));
1846 + done = -1; /* always return failure for partial writes */
1849 + } else if (ret == 0) {
1850 + /* should not happen - simply try again */
1854 + vmaw->co_writer = NULL;
1856 + return (done == bytes) ? bytes : -1;
1859 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
1863 + assert(sizeof(VmaHeader) == (4096 + 8192));
1864 + assert(G_STRUCT_OFFSET(VmaHeader, config_names) == 2044);
1865 + assert(G_STRUCT_OFFSET(VmaHeader, config_data) == 3068);
1866 + assert(G_STRUCT_OFFSET(VmaHeader, dev_info) == 4096);
1867 + assert(sizeof(VmaExtentHeader) == 512);
1869 + VmaWriter *vmaw = g_new0(VmaWriter, 1);
1872 + vmaw->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1873 + if (!vmaw->md5csum) {
1874 + error_setg(errp, "can't allocate cmsum\n");
1878 + if (strstart(filename, "exec:", &p)) {
1879 + vmaw->cmd = popen(p, "w");
1880 + if (vmaw->cmd == NULL) {
1881 + error_setg(errp, "can't popen command '%s' - %s\n", p,
1882 + g_strerror(errno));
1885 + vmaw->fd = fileno(vmaw->cmd);
1887 + /* try to use O_NONBLOCK */
1888 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1893 + const char *tmp_id_str;
1895 + if ((stat(filename, &st) == 0) && S_ISFIFO(st.st_mode)) {
1896 + oflags = O_NONBLOCK|O_WRONLY;
1897 + vmaw->fd = qemu_open(filename, oflags, 0644);
1898 + } else if (strstart(filename, "/dev/fdset/", &tmp_id_str)) {
1899 + oflags = O_NONBLOCK|O_WRONLY;
1900 + vmaw->fd = qemu_open(filename, oflags, 0644);
1901 + } else if (strstart(filename, "/dev/fdname/", &tmp_id_str)) {
1902 + vmaw->fd = monitor_get_fd(cur_mon, tmp_id_str, errp);
1903 + if (vmaw->fd < 0) {
1906 + /* try to use O_NONBLOCK */
1907 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1909 + oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_CREAT|O_EXCL;
1910 + vmaw->fd = qemu_open(filename, oflags, 0644);
1913 + if (vmaw->fd < 0) {
1914 + error_setg(errp, "can't open file %s - %s\n", filename,
1915 + g_strerror(errno));
1920 + /* we use O_DIRECT, so we need to align IO buffers */
1922 + vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
1923 + vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
1925 + vmaw->outbuf_count = 0;
1926 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
1928 + vmaw->header_blob_table_pos = 1; /* start at pos 1 */
1930 + qemu_co_mutex_init(&vmaw->flush_lock);
1932 + uuid_copy(vmaw->uuid, uuid);
1939 + pclose(vmaw->cmd);
1940 + } else if (vmaw->fd >= 0) {
1944 + if (vmaw->md5csum) {
1945 + g_checksum_free(vmaw->md5csum);
1954 +static int coroutine_fn vma_write_header(VmaWriter *vmaw)
1957 + unsigned char *buf = vmaw->headerbuf;
1958 + VmaHeader *head = (VmaHeader *)buf;
1962 + DPRINTF("VMA WRITE HEADER\n");
1964 + if (vmaw->status < 0) {
1965 + return vmaw->status;
1968 + memset(buf, 0, HEADERBUF_SIZE);
1970 + head->magic = VMA_MAGIC;
1971 + head->version = GUINT32_TO_BE(1); /* v1 */
1972 + memcpy(head->uuid, vmaw->uuid, 16);
1974 + time_t ctime = time(NULL);
1975 + head->ctime = GUINT64_TO_BE(ctime);
1977 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1978 + head->config_names[i] = GUINT32_TO_BE(vmaw->config_names[i]);
1979 + head->config_data[i] = GUINT32_TO_BE(vmaw->config_data[i]);
1982 + /* 32 bytes per device (12 used currently) = 8192 bytes max */
1983 + for (i = 1; i <= 254; i++) {
1984 + VmaStreamInfo *si = &vmaw->stream_info[i];
1986 + assert(si->devname);
1987 + uint32_t devname_ptr = allocate_header_string(vmaw, si->devname);
1988 + if (!devname_ptr) {
1991 + head->dev_info[i].devname_ptr = GUINT32_TO_BE(devname_ptr);
1992 + head->dev_info[i].size = GUINT64_TO_BE(si->size);
1996 + uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
1997 + head->header_size = GUINT32_TO_BE(header_size);
1999 + if (header_size > HEADERBUF_SIZE) {
2000 + return -1; /* just to be sure */
2003 + uint32_t blob_buffer_offset = sizeof(VmaHeader);
2004 + memcpy(buf + blob_buffer_offset, vmaw->header_blob_table,
2005 + vmaw->header_blob_table_size);
2006 + head->blob_buffer_offset = GUINT32_TO_BE(blob_buffer_offset);
2007 + head->blob_buffer_size = GUINT32_TO_BE(vmaw->header_blob_table_pos);
2009 + g_checksum_reset(vmaw->md5csum);
2010 + g_checksum_update(vmaw->md5csum, (const guchar *)buf, header_size);
2012 + g_checksum_get_digest(vmaw->md5csum, (guint8 *)(head->md5sum), &csize);
2014 + return vma_queue_write(vmaw, buf, header_size);
2017 +static int coroutine_fn vma_writer_flush(VmaWriter *vmaw)
2024 + if (vmaw->status < 0) {
2025 + return vmaw->status;
2028 + if (!vmaw->header_written) {
2029 + vmaw->header_written = true;
2030 + ret = vma_write_header(vmaw);
2032 + vma_writer_set_error(vmaw, "vma_writer_flush: write header failed");
2037 + DPRINTF("VMA WRITE FLUSH %d %d\n", vmaw->outbuf_count, vmaw->outbuf_pos);
2040 + VmaExtentHeader *ehead = (VmaExtentHeader *)vmaw->outbuf;
2042 + ehead->magic = VMA_EXTENT_MAGIC;
2043 + ehead->reserved1 = 0;
2045 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2046 + ehead->blockinfo[i] = GUINT64_TO_BE(vmaw->outbuf_block_info[i]);
2049 + guint16 block_count = (vmaw->outbuf_pos - VMA_EXTENT_HEADER_SIZE) /
2052 + ehead->block_count = GUINT16_TO_BE(block_count);
2054 + memcpy(ehead->uuid, vmaw->uuid, sizeof(ehead->uuid));
2055 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
2057 + g_checksum_reset(vmaw->md5csum);
2058 + g_checksum_update(vmaw->md5csum, vmaw->outbuf, VMA_EXTENT_HEADER_SIZE);
2060 + g_checksum_get_digest(vmaw->md5csum, ehead->md5sum, &csize);
2062 + int bytes = vmaw->outbuf_pos;
2063 + ret = vma_queue_write(vmaw, vmaw->outbuf, bytes);
2064 + if (ret != bytes) {
2065 + vma_writer_set_error(vmaw, "vma_writer_flush: failed write");
2068 + vmaw->outbuf_count = 0;
2069 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2071 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2072 + vmaw->outbuf_block_info[i] = 0;
2075 + return vmaw->status;
2078 +static int vma_count_open_streams(VmaWriter *vmaw)
2080 + g_assert(vmaw != NULL);
2083 + int open_drives = 0;
2084 + for (i = 0; i <= 255; i++) {
2085 + if (vmaw->stream_info[i].size && !vmaw->stream_info[i].finished) {
2090 + return open_drives;
2095 + * You need to call this if the vma archive does not contain
2096 + * any data stream.
2099 +vma_writer_flush_output(VmaWriter *vmaw)
2101 + qemu_co_mutex_lock(&vmaw->flush_lock);
2102 + int ret = vma_writer_flush(vmaw);
2103 + qemu_co_mutex_unlock(&vmaw->flush_lock);
2105 + vma_writer_set_error(vmaw, "vma_writer_flush_header failed");
2111 + * all jobs should call this when there is no more data
2112 + * Returns: number of remaining stream (0 ==> finished)
2115 +vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id)
2117 + g_assert(vmaw != NULL);
2119 + DPRINTF("vma_writer_set_status %d\n", dev_id);
2120 + if (!vmaw->stream_info[dev_id].size) {
2121 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2122 + "no such stream %d", dev_id);
2125 + if (vmaw->stream_info[dev_id].finished) {
2126 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2127 + "stream already closed %d", dev_id);
2131 + vmaw->stream_info[dev_id].finished = true;
2133 + int open_drives = vma_count_open_streams(vmaw);
2135 + if (open_drives <= 0) {
2136 + DPRINTF("vma_writer_set_status all drives completed\n");
2137 + vma_writer_flush_output(vmaw);
2140 + return open_drives;
2143 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status)
2147 + g_assert(vmaw != NULL);
2150 + status->status = vmaw->status;
2151 + g_strlcpy(status->errmsg, vmaw->errmsg, sizeof(status->errmsg));
2152 + for (i = 0; i <= 255; i++) {
2153 + status->stream_info[i] = vmaw->stream_info[i];
2156 + uuid_unparse_lower(vmaw->uuid, status->uuid_str);
2159 + status->closed = vmaw->closed;
2161 + return vmaw->status;
2164 +static int vma_writer_get_buffer(VmaWriter *vmaw)
2168 + qemu_co_mutex_lock(&vmaw->flush_lock);
2170 + /* wait until buffer is available */
2171 + while (vmaw->outbuf_count >= (VMA_BLOCKS_PER_EXTENT - 1)) {
2172 + ret = vma_writer_flush(vmaw);
2174 + vma_writer_set_error(vmaw, "vma_writer_get_buffer: flush failed");
2179 + qemu_co_mutex_unlock(&vmaw->flush_lock);
2185 +int64_t coroutine_fn
2186 +vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
2187 + const unsigned char *buf, size_t *zero_bytes)
2189 + g_assert(vmaw != NULL);
2190 + g_assert(zero_bytes != NULL);
2194 + if (vmaw->status < 0) {
2195 + return vmaw->status;
2198 + if (!dev_id || !vmaw->stream_info[dev_id].size) {
2199 + vma_writer_set_error(vmaw, "vma_writer_write: "
2200 + "no such stream %d", dev_id);
2204 + if (vmaw->stream_info[dev_id].finished) {
2205 + vma_writer_set_error(vmaw, "vma_writer_write: "
2206 + "stream already closed %d", dev_id);
2211 + if (cluster_num >= (((uint64_t)1)<<32)) {
2212 + vma_writer_set_error(vmaw, "vma_writer_write: "
2213 + "cluster number out of range");
2217 + if (dev_id == vmaw->vmstate_stream) {
2218 + if (cluster_num != vmaw->vmstate_clusters) {
2219 + vma_writer_set_error(vmaw, "vma_writer_write: "
2220 + "non sequential vmstate write");
2222 + vmaw->vmstate_clusters++;
2223 + } else if (cluster_num >= vmaw->stream_info[dev_id].cluster_count) {
2224 + vma_writer_set_error(vmaw, "vma_writer_write: cluster number too big");
2228 + /* wait until buffer is available */
2229 + if (vma_writer_get_buffer(vmaw) < 0) {
2230 + vma_writer_set_error(vmaw, "vma_writer_write: "
2231 + "vma_writer_get_buffer failed");
2235 + DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
2237 + uint16_t mask = 0;
2242 + for (i = 0; i < 16; i++) {
2243 + const unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
2244 + if (!buffer_is_zero(vmablock, VMA_BLOCK_SIZE)) {
2246 + memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
2248 + vmaw->outbuf_pos += VMA_BLOCK_SIZE;
2250 + DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
2251 + vmaw->stream_info[dev_id].zero_bytes += VMA_BLOCK_SIZE;
2252 + *zero_bytes += VMA_BLOCK_SIZE;
2258 + DPRINTF("VMA WRITE %zd ZERO CLUSTER\n", cluster_num);
2259 + vmaw->stream_info[dev_id].zero_bytes += VMA_CLUSTER_SIZE;
2260 + *zero_bytes += VMA_CLUSTER_SIZE;
2263 + uint64_t block_info = ((uint64_t)mask) << (32+16);
2264 + block_info |= ((uint64_t)dev_id) << 32;
2265 + block_info |= (cluster_num & 0xffffffff);
2266 + vmaw->outbuf_block_info[vmaw->outbuf_count] = block_info;
2268 + DPRINTF("VMA WRITE MASK %zd %zx\n", cluster_num, block_info);
2270 + vmaw->outbuf_count++;
2272 + /** NOTE: We allways write whole clusters, but we correctly set
2273 + * transferred bytes. So transferred == size when when everything
2276 + size_t transferred = VMA_CLUSTER_SIZE;
2278 + if (dev_id != vmaw->vmstate_stream) {
2279 + uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
2280 + if (last > vmaw->stream_info[dev_id].size) {
2281 + uint64_t diff = last - vmaw->stream_info[dev_id].size;
2282 + if (diff >= VMA_CLUSTER_SIZE) {
2283 + vma_writer_set_error(vmaw, "vma_writer_write: "
2284 + "read after last cluster");
2287 + transferred -= diff;
2291 + vmaw->stream_info[dev_id].transferred += transferred;
2293 + return transferred;
2296 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp)
2298 + if (vmaw->status < 0 && *errp == NULL) {
2299 + error_setg(errp, "%s", vmaw->errmsg);
2303 +int vma_writer_close(VmaWriter *vmaw, Error **errp)
2305 + g_assert(vmaw != NULL);
2309 + while (vmaw->co_writer) {
2310 + aio_poll(qemu_get_aio_context(), true);
2313 + assert(vmaw->co_writer == NULL);
2316 + if (pclose(vmaw->cmd) < 0) {
2317 + vma_writer_set_error(vmaw, "vma_writer_close: "
2318 + "pclose failed - %s", g_strerror(errno));
2321 + if (close(vmaw->fd) < 0) {
2322 + vma_writer_set_error(vmaw, "vma_writer_close: "
2323 + "close failed - %s", g_strerror(errno));
2327 + for (i = 0; i <= 255; i++) {
2328 + VmaStreamInfo *si = &vmaw->stream_info[i];
2330 + if (!si->finished) {
2331 + vma_writer_set_error(vmaw, "vma_writer_close: "
2332 + "detected open stream '%s'", si->devname);
2333 + } else if ((si->transferred != si->size) &&
2334 + (i != vmaw->vmstate_stream)) {
2335 + vma_writer_set_error(vmaw, "vma_writer_close: "
2336 + "incomplete stream '%s' (%zd != %zd)",
2337 + si->devname, si->transferred, si->size);
2342 + for (i = 0; i <= 255; i++) {
2343 + vmaw->stream_info[i].finished = 1; /* mark as closed */
2348 + if (vmaw->status < 0 && *errp == NULL) {
2349 + error_setg(errp, "%s", vmaw->errmsg);
2352 + return vmaw->status;
2355 +void vma_writer_destroy(VmaWriter *vmaw)
2361 + for (i = 0; i <= 255; i++) {
2362 + if (vmaw->stream_info[i].devname) {
2363 + g_free(vmaw->stream_info[i].devname);
2367 + if (vmaw->md5csum) {
2368 + g_checksum_free(vmaw->md5csum);
2373 diff --git a/vma.c b/vma.c
2374 new file mode 100644
2375 index 0000000000..1b59fd1555
2380 + * VMA: Virtual Machine Archive
2382 + * Copyright (C) 2012-2013 Proxmox Server Solutions
2385 + * Dietmar Maurer (dietmar@proxmox.com)
2387 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
2388 + * See the COPYING file in the top-level directory.
2392 +#include "qemu/osdep.h"
2396 +#include "qemu-common.h"
2397 +#include "qemu/error-report.h"
2398 +#include "qemu/main-loop.h"
2399 +#include "qapi/qmp/qstring.h"
2400 +#include "sysemu/block-backend.h"
2402 +static void help(void)
2404 + const char *help_msg =
2405 + "usage: vma command [command options]\n"
2407 + "vma list <filename>\n"
2408 + "vma config <filename> [-c config]\n"
2409 + "vma create <filename> [-c config] pathname ...\n"
2410 + "vma extract <filename> [-r <fifo>] <targetdir>\n"
2411 + "vma verify <filename> [-v]\n"
2414 + printf("%s", help_msg);
2418 +static const char *extract_devname(const char *path, char **devname, int index)
2422 + const char *sep = strchr(path, '=');
2425 + *devname = g_strndup(path, sep - path);
2429 + *devname = g_strdup_printf("disk%d", index);
2438 +static void print_content(VmaReader *vmar)
2442 + VmaHeader *head = vma_reader_get_header(vmar);
2444 + GList *l = vma_reader_get_config_data(vmar);
2445 + while (l && l->data) {
2446 + VmaConfigData *cdata = (VmaConfigData *)l->data;
2447 + l = g_list_next(l);
2448 + printf("CFG: size: %d name: %s\n", cdata->len, cdata->name);
2452 + VmaDeviceInfo *di;
2453 + for (i = 1; i < 255; i++) {
2454 + di = vma_reader_get_device_info(vmar, i);
2456 + if (strcmp(di->devname, "vmstate") == 0) {
2457 + printf("VMSTATE: dev_id=%d memory: %zd\n", i, di->size);
2459 + printf("DEV: dev_id=%d size: %zd devname: %s\n",
2460 + i, di->size, di->devname);
2464 + /* ctime is the last entry we print */
2465 + printf("CTIME: %s", ctime(&head->ctime));
2469 +static int list_content(int argc, char **argv)
2472 + const char *filename;
2475 + c = getopt(argc, argv, "h");
2485 + g_assert_not_reached();
2489 + /* Get the filename */
2490 + if ((optind + 1) != argc) {
2493 + filename = argv[optind++];
2495 + Error *errp = NULL;
2496 + VmaReader *vmar = vma_reader_create(filename, &errp);
2499 + g_error("%s", error_get_pretty(errp));
2502 + print_content(vmar);
2504 + vma_reader_destroy(vmar);
2509 +typedef struct RestoreMap {
2516 +static int extract_content(int argc, char **argv)
2520 + const char *filename;
2521 + const char *dirname;
2522 + const char *readmap = NULL;
2525 + c = getopt(argc, argv, "hvr:");
2545 + /* Get the filename */
2546 + if ((optind + 2) != argc) {
2549 + filename = argv[optind++];
2550 + dirname = argv[optind++];
2552 + Error *errp = NULL;
2553 + VmaReader *vmar = vma_reader_create(filename, &errp);
2556 + g_error("%s", error_get_pretty(errp));
2559 + if (mkdir(dirname, 0777) < 0) {
2560 + g_error("unable to create target directory %s - %s",
2561 + dirname, g_strerror(errno));
2564 + GList *l = vma_reader_get_config_data(vmar);
2565 + while (l && l->data) {
2566 + VmaConfigData *cdata = (VmaConfigData *)l->data;
2567 + l = g_list_next(l);
2568 + char *cfgfn = g_strdup_printf("%s/%s", dirname, cdata->name);
2569 + GError *err = NULL;
2570 + if (!g_file_set_contents(cfgfn, (gchar *)cdata->data, cdata->len,
2572 + g_error("unable to write file: %s", err->message);
2576 + GHashTable *devmap = g_hash_table_new(g_str_hash, g_str_equal);
2579 + print_content(vmar);
2581 + FILE *map = fopen(readmap, "r");
2583 + g_error("unable to open fifo %s - %s", readmap, g_strerror(errno));
2588 + char *line = fgets(inbuf, sizeof(inbuf), map);
2589 + if (!line || line[0] == '\0' || !strcmp(line, "done\n")) {
2592 + int len = strlen(line);
2593 + if (line[len - 1] == '\n') {
2594 + line[len - 1] = '\0';
2600 + char *format = NULL;
2601 + if (strncmp(line, "format=", sizeof("format=")-1) == 0) {
2602 + format = line + sizeof("format=")-1;
2603 + char *colon = strchr(format, ':');
2605 + g_error("read map failed - found only a format ('%s')", inbuf);
2607 + format = g_strndup(format, colon - format);
2613 + if (line[0] == '0' && line[1] == ':') {
2615 + write_zero = false;
2616 + } else if (line[0] == '1' && line[1] == ':') {
2618 + write_zero = true;
2620 + g_error("read map failed - parse error ('%s')", inbuf);
2623 + char *devname = NULL;
2624 + path = extract_devname(path, &devname, -1);
2626 + g_error("read map failed - no dev name specified ('%s')",
2630 + RestoreMap *map = g_new0(RestoreMap, 1);
2631 + map->devname = g_strdup(devname);
2632 + map->path = g_strdup(path);
2633 + map->format = format;
2634 + map->write_zero = write_zero;
2636 + g_hash_table_insert(devmap, map->devname, map);
2642 + int vmstate_fd = -1;
2643 + guint8 vmstate_stream = 0;
2645 + BlockBackend *blk = NULL;
2647 + for (i = 1; i < 255; i++) {
2648 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2649 + if (di && (strcmp(di->devname, "vmstate") == 0)) {
2650 + vmstate_stream = i;
2651 + char *statefn = g_strdup_printf("%s/vmstate.bin", dirname);
2652 + vmstate_fd = open(statefn, O_WRONLY|O_CREAT|O_EXCL, 0644);
2653 + if (vmstate_fd < 0) {
2654 + g_error("create vmstate file '%s' failed - %s", statefn,
2655 + g_strerror(errno));
2659 + char *devfn = NULL;
2660 + const char *format = NULL;
2661 + int flags = BDRV_O_RDWR | BDRV_O_NO_FLUSH;
2662 + bool write_zero = true;
2666 + map = (RestoreMap *)g_hash_table_lookup(devmap, di->devname);
2667 + if (map == NULL) {
2668 + g_error("no device name mapping for %s", di->devname);
2670 + devfn = map->path;
2671 + format = map->format;
2672 + write_zero = map->write_zero;
2674 + devfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2675 + dirname, di->devname);
2676 + printf("DEVINFO %s %zd\n", devfn, di->size);
2678 + bdrv_img_create(devfn, "raw", NULL, NULL, NULL, di->size,
2679 + flags, true, &errp);
2681 + g_error("can't create file %s: %s", devfn,
2682 + error_get_pretty(errp));
2685 + /* Note: we created an empty file above, so there is no
2686 + * need to write zeroes (so we generate a sparse file)
2688 + write_zero = false;
2691 + size_t devlen = strlen(devfn);
2692 + QDict *options = NULL;
2694 + /* explicit format from commandline */
2695 + options = qdict_new();
2696 + qdict_put(options, "driver", qstring_from_str(format));
2697 + } else if ((devlen > 4 && strcmp(devfn+devlen-4, ".raw") == 0) ||
2698 + strncmp(devfn, "/dev/", 5) == 0)
2700 + /* This part is now deprecated for PVE as well (just as qemu
2701 + * deprecated not specifying an explicit raw format, too.
2703 + /* explicit raw format */
2704 + options = qdict_new();
2705 + qdict_put(options, "driver", qstring_from_str("raw"));
2709 + if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) {
2710 + g_error("can't open file %s - %s", devfn,
2711 + error_get_pretty(errp));
2714 + if (vma_reader_register_bs(vmar, i, blk, write_zero, &errp) < 0) {
2715 + g_error("%s", error_get_pretty(errp));
2724 + if (vma_reader_restore(vmar, vmstate_fd, verbose, &errp) < 0) {
2725 + g_error("restore failed - %s", error_get_pretty(errp));
2729 + for (i = 1; i < 255; i++) {
2730 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2731 + if (di && (i != vmstate_stream)) {
2732 + char *tmpfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2733 + dirname, di->devname);
2734 + char *fn = g_strdup_printf("%s/disk-%s.raw",
2735 + dirname, di->devname);
2736 + if (rename(tmpfn, fn) != 0) {
2737 + g_error("rename %s to %s failed - %s",
2738 + tmpfn, fn, g_strerror(errno));
2744 + vma_reader_destroy(vmar);
2753 +static int verify_content(int argc, char **argv)
2757 + const char *filename;
2760 + c = getopt(argc, argv, "hv");
2777 + /* Get the filename */
2778 + if ((optind + 1) != argc) {
2781 + filename = argv[optind++];
2783 + Error *errp = NULL;
2784 + VmaReader *vmar = vma_reader_create(filename, &errp);
2787 + g_error("%s", error_get_pretty(errp));
2791 + print_content(vmar);
2794 + if (vma_reader_verify(vmar, verbose, &errp) < 0) {
2795 + g_error("verify failed - %s", error_get_pretty(errp));
2798 + vma_reader_destroy(vmar);
2805 +typedef struct BackupJob {
2806 + BlockBackend *target;
2812 +#define BACKUP_SECTORS_PER_CLUSTER (VMA_CLUSTER_SIZE / BDRV_SECTOR_SIZE)
2814 +static void coroutine_fn backup_run_empty(void *opaque)
2816 + VmaWriter *vmaw = (VmaWriter *)opaque;
2818 + vma_writer_flush_output(vmaw);
2820 + Error *err = NULL;
2821 + if (vma_writer_close(vmaw, &err) != 0) {
2822 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2826 +static void coroutine_fn backup_run(void *opaque)
2828 + BackupJob *job = (BackupJob *)opaque;
2830 + QEMUIOVector qiov;
2832 + int64_t start, end;
2835 + unsigned char *buf = blk_blockalign(job->target, VMA_CLUSTER_SIZE);
2838 + end = DIV_ROUND_UP(job->len / BDRV_SECTOR_SIZE,
2839 + BACKUP_SECTORS_PER_CLUSTER);
2841 + for (; start < end; start++) {
2842 + iov.iov_base = buf;
2843 + iov.iov_len = VMA_CLUSTER_SIZE;
2844 + qemu_iovec_init_external(&qiov, &iov, 1);
2846 + ret = blk_co_preadv(job->target, start * VMA_CLUSTER_SIZE,
2847 + VMA_CLUSTER_SIZE, &qiov, 0);
2849 + vma_writer_set_error(job->vmaw, "read error", -1);
2854 + if (vma_writer_write(job->vmaw, job->dev_id, start, buf, &zb) < 0) {
2855 + vma_writer_set_error(job->vmaw, "backup_dump_cb vma_writer_write failed", -1);
2862 + if (vma_writer_close_stream(job->vmaw, job->dev_id) <= 0) {
2863 + Error *err = NULL;
2864 + if (vma_writer_close(job->vmaw, &err) != 0) {
2865 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2870 +static int create_archive(int argc, char **argv)
2874 + const char *archivename;
2875 + GList *config_files = NULL;
2878 + c = getopt(argc, argv, "hvc:");
2888 + config_files = g_list_append(config_files, optarg);
2894 + g_assert_not_reached();
2899 + /* make sure we an archive name */
2900 + if ((optind + 1) > argc) {
2904 + archivename = argv[optind++];
2907 + uuid_generate(uuid);
2909 + Error *local_err = NULL;
2910 + VmaWriter *vmaw = vma_writer_create(archivename, uuid, &local_err);
2912 + if (vmaw == NULL) {
2913 + g_error("%s", error_get_pretty(local_err));
2916 + GList *l = config_files;
2917 + while (l && l->data) {
2918 + char *name = l->data;
2919 + char *cdata = NULL;
2921 + GError *err = NULL;
2922 + if (!g_file_get_contents(name, &cdata, &clen, &err)) {
2923 + unlink(archivename);
2924 + g_error("Unable to read file: %s", err->message);
2927 + if (vma_writer_add_config(vmaw, name, cdata, clen) != 0) {
2928 + unlink(archivename);
2929 + g_error("Unable to append config data %s (len = %zd)",
2932 + l = g_list_next(l);
2936 + while (optind < argc) {
2937 + const char *path = argv[optind++];
2938 + char *devname = NULL;
2939 + path = extract_devname(path, &devname, devcount++);
2941 + Error *errp = NULL;
2942 + BlockBackend *target;
2944 + target = blk_new_open(path, NULL, NULL, 0, &errp);
2946 + unlink(archivename);
2947 + g_error("bdrv_open '%s' failed - %s", path, error_get_pretty(errp));
2949 + int64_t size = blk_getlength(target);
2950 + int dev_id = vma_writer_register_stream(vmaw, devname, size);
2951 + if (dev_id <= 0) {
2952 + unlink(archivename);
2953 + g_error("vma_writer_register_stream '%s' failed", devname);
2956 + BackupJob *job = g_new0(BackupJob, 1);
2958 + job->target = target;
2960 + job->dev_id = dev_id;
2962 + Coroutine *co = qemu_coroutine_create(backup_run, job);
2963 + qemu_coroutine_enter(co);
2966 + VmaStatus vmastat;
2968 + int last_percent = -1;
2972 + main_loop_wait(false);
2973 + vma_writer_get_status(vmaw, &vmastat);
2977 + uint64_t total = 0;
2978 + uint64_t transferred = 0;
2979 + uint64_t zero_bytes = 0;
2982 + for (i = 0; i < 256; i++) {
2983 + if (vmastat.stream_info[i].size) {
2984 + total += vmastat.stream_info[i].size;
2985 + transferred += vmastat.stream_info[i].transferred;
2986 + zero_bytes += vmastat.stream_info[i].zero_bytes;
2989 + percent = (transferred*100)/total;
2990 + if (percent != last_percent) {
2991 + fprintf(stderr, "progress %d%% %zd/%zd %zd\n", percent,
2992 + transferred, total, zero_bytes);
2995 + last_percent = percent;
2999 + if (vmastat.closed) {
3004 + Coroutine *co = qemu_coroutine_create(backup_run_empty, vmaw);
3005 + qemu_coroutine_enter(co);
3007 + main_loop_wait(false);
3008 + vma_writer_get_status(vmaw, &vmastat);
3009 + if (vmastat.closed) {
3017 + vma_writer_get_status(vmaw, &vmastat);
3020 + for (i = 0; i < 256; i++) {
3021 + VmaStreamInfo *si = &vmastat.stream_info[i];
3023 + fprintf(stderr, "image %s: size=%zd zeros=%zd saved=%zd\n",
3024 + si->devname, si->size, si->zero_bytes,
3025 + si->size - si->zero_bytes);
3030 + if (vmastat.status < 0) {
3031 + unlink(archivename);
3032 + g_error("creating vma archive failed");
3038 +static int dump_config(int argc, char **argv)
3041 + const char *filename;
3042 + const char *config_name = "qemu-server.conf";
3045 + c = getopt(argc, argv, "hc:");
3055 + config_name = optarg;
3062 + /* Get the filename */
3063 + if ((optind + 1) != argc) {
3066 + filename = argv[optind++];
3068 + Error *errp = NULL;
3069 + VmaReader *vmar = vma_reader_create(filename, &errp);
3072 + g_error("%s", error_get_pretty(errp));
3076 + GList *l = vma_reader_get_config_data(vmar);
3077 + while (l && l->data) {
3078 + VmaConfigData *cdata = (VmaConfigData *)l->data;
3079 + l = g_list_next(l);
3080 + if (strcmp(cdata->name, config_name) == 0) {
3082 + fwrite(cdata->data, cdata->len, 1, stdout);
3087 + vma_reader_destroy(vmar);
3092 + fprintf(stderr, "unable to find configuration data '%s'\n", config_name);
3099 +int main(int argc, char **argv)
3101 + const char *cmdname;
3102 + Error *main_loop_err = NULL;
3104 + error_set_progname(argv[0]);
3106 + if (qemu_init_main_loop(&main_loop_err)) {
3107 + g_error("%s", error_get_pretty(main_loop_err));
3116 + cmdname = argv[1];
3120 + if (!strcmp(cmdname, "list")) {
3121 + return list_content(argc, argv);
3122 + } else if (!strcmp(cmdname, "create")) {
3123 + return create_archive(argc, argv);
3124 + } else if (!strcmp(cmdname, "extract")) {
3125 + return extract_content(argc, argv);
3126 + } else if (!strcmp(cmdname, "verify")) {
3127 + return verify_content(argc, argv);
3128 + } else if (!strcmp(cmdname, "config")) {
3129 + return dump_config(argc, argv);
3135 diff --git a/vma.h b/vma.h
3136 new file mode 100644
3137 index 0000000000..c895c97f6d
3142 + * VMA: Virtual Machine Archive
3144 + * Copyright (C) Proxmox Server Solutions
3147 + * Dietmar Maurer (dietmar@proxmox.com)
3149 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
3150 + * See the COPYING file in the top-level directory.
3154 +#ifndef BACKUP_VMA_H
3155 +#define BACKUP_VMA_H
3157 +#include <uuid/uuid.h>
3158 +#include "qapi/error.h"
3159 +#include "block/block.h"
3161 +#define VMA_BLOCK_BITS 12
3162 +#define VMA_BLOCK_SIZE (1<<VMA_BLOCK_BITS)
3163 +#define VMA_CLUSTER_BITS (VMA_BLOCK_BITS+4)
3164 +#define VMA_CLUSTER_SIZE (1<<VMA_CLUSTER_BITS)
3166 +#if VMA_CLUSTER_SIZE != 65536
3167 +#error unexpected cluster size
3170 +#define VMA_EXTENT_HEADER_SIZE 512
3171 +#define VMA_BLOCKS_PER_EXTENT 59
3172 +#define VMA_MAX_CONFIGS 256
3174 +#define VMA_MAX_EXTENT_SIZE \
3175 + (VMA_EXTENT_HEADER_SIZE+VMA_CLUSTER_SIZE*VMA_BLOCKS_PER_EXTENT)
3176 +#if VMA_MAX_EXTENT_SIZE != 3867136
3177 +#error unexpected VMA_EXTENT_SIZE
3180 +/* File Format Definitions */
3182 +#define VMA_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|0x00))
3183 +#define VMA_EXTENT_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|'E'))
3185 +typedef struct VmaDeviceInfoHeader {
3186 + uint32_t devname_ptr; /* offset into blob_buffer table */
3187 + uint32_t reserved0;
3188 + uint64_t size; /* device size in bytes */
3189 + uint64_t reserved1;
3190 + uint64_t reserved2;
3191 +} VmaDeviceInfoHeader;
3193 +typedef struct VmaHeader {
3196 + unsigned char uuid[16];
3198 + unsigned char md5sum[16];
3200 + uint32_t blob_buffer_offset;
3201 + uint32_t blob_buffer_size;
3202 + uint32_t header_size;
3204 + unsigned char reserved[1984];
3206 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3207 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3209 + uint32_t reserved1;
3211 + VmaDeviceInfoHeader dev_info[256];
3214 +typedef struct VmaExtentHeader {
3216 + uint16_t reserved1;
3217 + uint16_t block_count;
3218 + unsigned char uuid[16];
3219 + unsigned char md5sum[16];
3220 + uint64_t blockinfo[VMA_BLOCKS_PER_EXTENT];
3223 +/* functions/definitions to read/write vma files */
3225 +typedef struct VmaReader VmaReader;
3227 +typedef struct VmaWriter VmaWriter;
3229 +typedef struct VmaConfigData {
3235 +typedef struct VmaStreamInfo {
3237 + uint64_t cluster_count;
3238 + uint64_t transferred;
3239 + uint64_t zero_bytes;
3244 +typedef struct VmaStatus {
3247 + char errmsg[8192];
3248 + char uuid_str[37];
3249 + VmaStreamInfo stream_info[256];
3252 +typedef struct VmaDeviceInfo {
3253 + uint64_t size; /* device size in bytes */
3254 + const char *devname;
3257 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp);
3258 +int vma_writer_close(VmaWriter *vmaw, Error **errp);
3259 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp);
3260 +void vma_writer_destroy(VmaWriter *vmaw);
3261 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
3263 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
3266 +int64_t coroutine_fn vma_writer_write(VmaWriter *vmaw, uint8_t dev_id,
3267 + int64_t cluster_num,
3268 + const unsigned char *buf,
3269 + size_t *zero_bytes);
3271 +int coroutine_fn vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id);
3272 +int coroutine_fn vma_writer_flush_output(VmaWriter *vmaw);
3274 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status);
3275 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...);
3278 +VmaReader *vma_reader_create(const char *filename, Error **errp);
3279 +void vma_reader_destroy(VmaReader *vmar);
3280 +VmaHeader *vma_reader_get_header(VmaReader *vmar);
3281 +GList *vma_reader_get_config_data(VmaReader *vmar);
3282 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id);
3283 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id,
3284 + BlockBackend *target, bool write_zeroes,
3286 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
3288 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp);
3290 +#endif /* BACKUP_VMA_H */