]> git.proxmox.com Git - pve-qemu.git/blame - debian/patches/pve/0028-adding-old-vma-files.patch
fix #1420: fix stop mode backup with virtio-blk
[pve-qemu.git] / debian / patches / pve / 0028-adding-old-vma-files.patch
CommitLineData
2ab9b48e 1From 741b9f52069e043e93f0fc47e820ddbfa0bff7a3 Mon Sep 17 00:00:00 2001
67af0fa4
WB
2From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3Date: Mon, 7 Aug 2017 08:51:16 +0200
4Subject: [PATCH 28/28] adding old vma files
95259824 5
95259824 6---
67af0fa4
WB
7 Makefile | 3 +-
8 Makefile.objs | 1 +
9 block/backup.c | 128 ++++---
10 block/replication.c | 1 +
2ab9b48e 11 blockdev.c | 250 +++++++++-----
67af0fa4
WB
12 blockjob.c | 11 +-
13 include/block/block_int.h | 4 +
14 vma-reader.c | 857 ++++++++++++++++++++++++++++++++++++++++++++++
15 vma-writer.c | 771 +++++++++++++++++++++++++++++++++++++++++
16 vma.c | 757 ++++++++++++++++++++++++++++++++++++++++
17 vma.h | 149 ++++++++
2ab9b48e 18 11 files changed, 2799 insertions(+), 133 deletions(-)
95259824
WB
19 create mode 100644 vma-reader.c
20 create mode 100644 vma-writer.c
21 create mode 100644 vma.c
22 create mode 100644 vma.h
23
24diff --git a/Makefile b/Makefile
45169293 25index 6c359b2f86..edbc8b50f0 100644
95259824
WB
26--- a/Makefile
27+++ b/Makefile
a544966d 28@@ -284,7 +284,7 @@ ifneq ($(wildcard config-host.mak),)
95259824
WB
29 include $(SRC_PATH)/tests/Makefile.include
30 endif
31
32-all: $(DOCS) $(TOOLS) $(HELPERS-y) recurse-all modules
33+all: $(DOCS) $(TOOLS) vma$(EXESUF) $(HELPERS-y) recurse-all modules
34
35 qemu-version.h: FORCE
36 $(call quiet-command, \
a544966d
WB
37@@ -377,6 +377,7 @@ qemu-img.o: qemu-img-cmds.h
38 qemu-img$(EXESUF): qemu-img.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
39 qemu-nbd$(EXESUF): qemu-nbd.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
40 qemu-io$(EXESUF): qemu-io.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
41+vma$(EXESUF): vma.o vma-reader.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
95259824 42
a544966d 43 qemu-bridge-helper$(EXESUF): qemu-bridge-helper.o $(COMMON_LDADDS)
95259824
WB
44
45diff --git a/Makefile.objs b/Makefile.objs
67af0fa4 46index fbfbbb7f70..f5f8dbab3b 100644
95259824
WB
47--- a/Makefile.objs
48+++ b/Makefile.objs
a544966d 49@@ -14,6 +14,7 @@ block-obj-y += block.o blockjob.o
95259824
WB
50 block-obj-y += block/
51 block-obj-y += qemu-io-cmds.o
a544966d 52 block-obj-$(CONFIG_REPLICATION) += replication.o
95259824
WB
53+block-obj-y += vma-writer.o
54
55 block-obj-m = block/
56
67af0fa4
WB
57diff --git a/block/backup.c b/block/backup.c
58index 1ede70c061..51b5ba6eda 100644
59--- a/block/backup.c
60+++ b/block/backup.c
61@@ -36,6 +36,7 @@ typedef struct BackupBlockJob {
62 BdrvDirtyBitmap *sync_bitmap;
63 MirrorSyncMode sync_mode;
64 RateLimit limit;
65+ BackupDumpFunc *dump_cb;
66 BlockdevOnError on_source_error;
67 BlockdevOnError on_target_error;
68 CoRwlock flush_rwlock;
69@@ -145,13 +146,24 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
70 goto out;
71 }
72
73+ int64_t start_sec = start * sectors_per_cluster;
74 if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
75- ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
76- bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
77+ if (job->dump_cb) {
78+ ret = job->dump_cb(job->common.opaque, job->target, start_sec, n, NULL);
79+ }
80+ if (job->target) {
81+ ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
82+ bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
83+ }
84 } else {
85- ret = blk_co_pwritev(job->target, start * job->cluster_size,
86- bounce_qiov.size, &bounce_qiov,
87- job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
88+ if (job->dump_cb) {
89+ ret = job->dump_cb(job->common.opaque, job->target, start_sec, n, bounce_buffer);
90+ }
91+ if (job->target) {
92+ ret = blk_co_pwritev(job->target, start * job->cluster_size,
93+ bounce_qiov.size, &bounce_qiov,
94+ job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
95+ }
96 }
97 if (ret < 0) {
98 trace_backup_do_cow_write_fail(job, start, ret);
99@@ -246,6 +258,8 @@ static void backup_abort(BlockJob *job)
100 static void backup_clean(BlockJob *job)
101 {
102 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
103+ if (!s->target)
104+ return;
105 assert(s->target);
106 blk_unref(s->target);
107 s->target = NULL;
02709230
FG
108@@ -255,7 +269,8 @@ static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
109 {
110 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
111
112- blk_set_aio_context(s->target, aio_context);
113+ if (s->target)
114+ blk_set_aio_context(s->target, aio_context);
115 }
116
117 void backup_do_checkpoint(BlockJob *job, Error **errp)
118@@ -330,9 +345,11 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
67af0fa4
WB
119 if (read) {
120 return block_job_error_action(&job->common, job->on_source_error,
121 true, error);
122- } else {
123+ } else if (job->target) {
124 return block_job_error_action(&job->common, job->on_target_error,
125 false, error);
126+ } else {
127+ return BLOCK_ERROR_ACTION_REPORT;
128 }
129 }
130
131@@ -453,6 +469,7 @@ static void coroutine_fn backup_run(void *opaque)
132
133 job->done_bitmap = bitmap_new(end);
134
135+
136 job->before_write.notify = backup_before_write_notify;
137 bdrv_add_before_write_notifier(bs, &job->before_write);
138
139@@ -557,6 +574,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
140 BlockdevOnError on_source_error,
141 BlockdevOnError on_target_error,
142 int creation_flags,
143+ BackupDumpFunc *dump_cb,
144 BlockCompletionFunc *cb, void *opaque,
145 int pause_count,
146 BlockJobTxn *txn, Error **errp)
147@@ -567,7 +585,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
148 int ret;
149
150 assert(bs);
151- assert(target);
152+ assert(target || dump_cb);
153
154 if (bs == target) {
155 error_setg(errp, "Source and target cannot be the same");
156@@ -580,13 +598,13 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
157 return NULL;
158 }
159
160- if (!bdrv_is_inserted(target)) {
161+ if (target && !bdrv_is_inserted(target)) {
162 error_setg(errp, "Device is not inserted: %s",
163 bdrv_get_device_name(target));
164 return NULL;
165 }
166
167- if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
168+ if (target && compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
169 error_setg(errp, "Compression is not supported for this drive %s",
170 bdrv_get_device_name(target));
171 return NULL;
172@@ -596,7 +614,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
173 return NULL;
174 }
175
176- if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
177+ if (target && bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
178 return NULL;
179 }
180
181@@ -636,15 +654,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
182 goto error;
183 }
184
185- /* The target must match the source in size, so no resize here either */
186- job->target = blk_new(BLK_PERM_WRITE,
187- BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
188- BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
189- ret = blk_insert_bs(job->target, target, errp);
190- if (ret < 0) {
191- goto error;
192+ if (target) {
193+ /* The target must match the source in size, so no resize here either */
194+ job->target = blk_new(BLK_PERM_WRITE,
195+ BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
196+ BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
197+ ret = blk_insert_bs(job->target, target, errp);
198+ if (ret < 0) {
199+ goto error;
200+ }
201 }
202
203+ job->dump_cb = dump_cb;
204 job->on_source_error = on_source_error;
205 job->on_target_error = on_target_error;
206 job->sync_mode = sync_mode;
207@@ -652,38 +673,55 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
208 sync_bitmap : NULL;
209 job->compress = compress;
210
211- /* If there is no backing file on the target, we cannot rely on COW if our
212- * backup cluster size is smaller than the target cluster size. Even for
213- * targets with a backing file, try to avoid COW if possible. */
214- ret = bdrv_get_info(target, &bdi);
215- if (ret == -ENOTSUP && !target->backing) {
216- /* Cluster size is not defined */
217- error_report("WARNING: The target block device doesn't provide "
218- "information about the block size and it doesn't have a "
219- "backing file. The default block size of %u bytes is "
220- "used. If the actual block size of the target exceeds "
221- "this default, the backup may be unusable",
222- BACKUP_CLUSTER_SIZE_DEFAULT);
223- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
224- } else if (ret < 0 && !target->backing) {
225- error_setg_errno(errp, -ret,
226- "Couldn't determine the cluster size of the target image, "
227- "which has no backing file");
228- error_append_hint(errp,
229- "Aborting, since this may create an unusable destination image\n");
230- goto error;
231- } else if (ret < 0 && target->backing) {
232- /* Not fatal; just trudge on ahead. */
233- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
234+ if (target) {
235+ /* If there is no backing file on the target, we cannot rely on COW if our
236+ * backup cluster size is smaller than the target cluster size. Even for
237+ * targets with a backing file, try to avoid COW if possible. */
238+ ret = bdrv_get_info(target, &bdi);
239+ if (ret == -ENOTSUP && !target->backing) {
240+ /* Cluster size is not defined */
241+ error_report("WARNING: The target block device doesn't provide "
242+ "information about the block size and it doesn't have a "
243+ "backing file. The default block size of %u bytes is "
244+ "used. If the actual block size of the target exceeds "
245+ "this default, the backup may be unusable",
246+ BACKUP_CLUSTER_SIZE_DEFAULT);
247+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
248+ } else if (ret < 0 && !target->backing) {
249+ error_setg_errno(errp, -ret,
250+ "Couldn't determine the cluster size of the target image, "
251+ "which has no backing file");
252+ error_append_hint(errp,
253+ "Aborting, since this may create an unusable destination image\n");
254+ goto error;
255+ } else if (ret < 0 && target->backing) {
256+ /* Not fatal; just trudge on ahead. */
257+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
258+ } else {
259+ job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
260+ }
261 } else {
262- job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
263+ ret = bdrv_get_info(bs, &bdi);
264+ if (ret < 0) {
265+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
266+ } else {
267+ /* round down to nearest BACKUP_CLUSTER_SIZE_DEFAULT */
268+ job->cluster_size = (bdi.cluster_size / BACKUP_CLUSTER_SIZE_DEFAULT) * BACKUP_CLUSTER_SIZE_DEFAULT;
269+ if (job->cluster_size == 0) {
270+ /* but we can't go below it */
271+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
272+ }
273+ }
274 }
275
276- /* Required permissions are already taken with target's blk_new() */
277- block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
278- &error_abort);
279+ if (target) {
280+ /* Required permissions are already taken with target's blk_new() */
281+ block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
282+ &error_abort);
283+ } else {
284+ job->common.pause_count = pause_count;
285+ }
286 job->common.len = len;
287- job->common.pause_count = pause_count;
288 block_job_txn_add_job(txn, &job->common);
289
290 return &job->common;
291diff --git a/block/replication.c b/block/replication.c
292index 1c41d9e6bf..60c6524417 100644
293--- a/block/replication.c
294+++ b/block/replication.c
295@@ -531,6 +531,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
296 0, MIRROR_SYNC_MODE_NONE, NULL, false,
297 BLOCKDEV_ON_ERROR_REPORT,
298 BLOCKDEV_ON_ERROR_REPORT, BLOCK_JOB_INTERNAL,
299+ NULL,
300 backup_job_completed, bs, 0, NULL, &local_err);
301 if (local_err) {
302 error_propagate(errp, local_err);
303diff --git a/blockdev.c b/blockdev.c
2ab9b48e 304index 981276692a..76a7103743 100644
67af0fa4
WB
305--- a/blockdev.c
306+++ b/blockdev.c
307@@ -31,7 +31,6 @@
308 */
309
310 #include "qemu/osdep.h"
311-#include "qemu/uuid.h"
312 #include "sysemu/block-backend.h"
313 #include "sysemu/blockdev.h"
314 #include "hw/block/block.h"
315@@ -55,6 +54,7 @@
316 #include "qemu/cutils.h"
317 #include "qemu/help_option.h"
318 #include "qemu/throttle-options.h"
319+#include "vma.h"
320
321 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
322 QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
323@@ -2958,20 +2958,44 @@ out:
324 aio_context_release(aio_context);
325 }
326
327+void block_job_event_cancelled(BlockJob *job);
328+void block_job_event_completed(BlockJob *job, const char *msg);
329+static void block_job_cb(void *opaque, int ret)
330+{
331+ /* Note that this function may be executed from another AioContext besides
332+ * the QEMU main loop. If you need to access anything that assumes the
333+ * QEMU global mutex, use a BH or introduce a mutex.
334+ */
335+
336+ BlockDriverState *bs = opaque;
337+ const char *msg = NULL;
338+
339+ assert(bs->job);
340+
341+ if (ret < 0) {
342+ msg = strerror(-ret);
343+ }
344+
345+ if (block_job_is_cancelled(bs->job)) {
346+ block_job_event_cancelled(bs->job);
347+ } else {
348+ block_job_event_completed(bs->job, msg);
349+ }
350+}
351+
352 /* PVE backup related function */
353
354 static struct PVEBackupState {
355 Error *error;
356 bool cancel;
357- QemuUUID uuid;
358+ uuid_t uuid;
359 char uuid_str[37];
360 int64_t speed;
361 time_t start_time;
362 time_t end_time;
363 char *backup_file;
364- Object *vmaobj;
365+ VmaWriter *vmaw;
366 GList *di_list;
367- size_t next_job;
368 size_t total;
369 size_t transferred;
370 size_t zero_bytes;
371@@ -2981,6 +3005,7 @@ typedef struct PVEBackupDevInfo {
372 BlockDriverState *bs;
373 size_t size;
374 uint8_t dev_id;
375+ //bool started;
376 bool completed;
377 char targetfile[PATH_MAX];
378 BlockDriverState *target;
2ab9b48e 379@@ -2988,13 +3013,79 @@ typedef struct PVEBackupDevInfo {
67af0fa4
WB
380
381 static void pvebackup_run_next_job(void);
382
383+static int pvebackup_dump_cb(void *opaque, BlockBackend *target,
384+ int64_t sector_num, int n_sectors,
385+ unsigned char *buf)
386+{
387+ PVEBackupDevInfo *di = opaque;
388+
389+ int size = n_sectors * BDRV_SECTOR_SIZE;
390+ if (backup_state.cancel) {
391+ return size; // return success
392+ }
393+
394+ if (sector_num & 0x7f) {
395+ if (!backup_state.error) {
396+ error_setg(&backup_state.error,
397+ "got unaligned write inside backup dump "
398+ "callback (sector %ld)", sector_num);
399+ }
400+ return -1; // not aligned to cluster size
401+ }
402+
403+ int64_t cluster_num = sector_num >> 7;
404+
405+ int ret = -1;
406+
407+ if (backup_state.vmaw) {
408+ size_t zero_bytes = 0;
2ab9b48e
WB
409+ int64_t remaining = n_sectors * BDRV_SECTOR_SIZE;
410+ while (remaining > 0) {
67af0fa4
WB
411+ ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num,
412+ buf, &zero_bytes);
67af0fa4
WB
413+ ++cluster_num;
414+ if (buf) {
415+ buf += VMA_CLUSTER_SIZE;
416+ }
417+ if (ret < 0) {
418+ if (!backup_state.error) {
419+ vma_writer_error_propagate(backup_state.vmaw, &backup_state.error);
420+ }
421+ if (di->bs && di->bs->job) {
422+ block_job_cancel(di->bs->job);
423+ }
2ab9b48e 424+ break;
67af0fa4
WB
425+ } else {
426+ backup_state.zero_bytes += zero_bytes;
2ab9b48e
WB
427+ if (remaining >= VMA_CLUSTER_SIZE) {
428+ backup_state.transferred += VMA_CLUSTER_SIZE;
429+ } else {
430+ backup_state.transferred += remaining;
431+ }
432+ remaining -= VMA_CLUSTER_SIZE;
67af0fa4
WB
433+ }
434+ }
435+ } else {
436+ if (!buf) {
437+ backup_state.zero_bytes += size;
438+ }
439+ backup_state.transferred += size;
440+ }
441+
442+ // Note: always return success, because we want that writes succeed anyways.
443+
444+ return size;
445+}
446+
447 static void pvebackup_cleanup(void)
448 {
449 backup_state.end_time = time(NULL);
450
451- if (backup_state.vmaobj) {
452- object_unparent(backup_state.vmaobj);
453- backup_state.vmaobj = NULL;
454+ if (backup_state.vmaw) {
455+ Error *local_err = NULL;
456+ vma_writer_close(backup_state.vmaw, &local_err);
457+ error_propagate(&backup_state.error, local_err);
458+ backup_state.vmaw = NULL;
459 }
460
461 if (backup_state.di_list) {
2ab9b48e 462@@ -3009,6 +3100,13 @@ static void pvebackup_cleanup(void)
67af0fa4
WB
463 }
464 }
465
466+static void coroutine_fn backup_close_vma_stream(void *opaque)
467+{
468+ PVEBackupDevInfo *di = opaque;
469+
470+ vma_writer_close_stream(backup_state.vmaw, di->dev_id);
471+}
472+
473 static void pvebackup_complete_cb(void *opaque, int ret)
474 {
475 PVEBackupDevInfo *di = opaque;
2ab9b48e 476@@ -3020,14 +3118,18 @@ static void pvebackup_complete_cb(void *opaque, int ret)
67af0fa4
WB
477 ret, strerror(-ret));
478 }
479
480+ BlockDriverState *bs = di->bs;
481+
482 di->bs = NULL;
483 di->target = NULL;
484
485- if (backup_state.vmaobj) {
486- object_unparent(backup_state.vmaobj);
487- backup_state.vmaobj = NULL;
488+ if (backup_state.vmaw) {
489+ Coroutine *co = qemu_coroutine_create(backup_close_vma_stream, di);
490+ qemu_coroutine_enter(co);
491 }
492
493+ block_job_cb(bs, ret);
494+
495 if (!backup_state.cancel) {
496 pvebackup_run_next_job();
497 }
2ab9b48e 498@@ -3041,14 +3143,9 @@ static void pvebackup_cancel(void *opaque)
67af0fa4
WB
499 error_setg(&backup_state.error, "backup cancelled");
500 }
501
502- if (backup_state.vmaobj) {
503- Error *err;
504+ if (backup_state.vmaw) {
505 /* make sure vma writer does not block anymore */
506- if (!object_set_props(backup_state.vmaobj, &err, "blocked", "yes", NULL)) {
507- if (err) {
508- error_report_err(err);
509- }
510- }
511+ vma_writer_set_error(backup_state.vmaw, "backup cancelled");
512 }
513
514 GList *l = backup_state.di_list;
2ab9b48e 515@@ -3073,19 +3170,15 @@ void qmp_backup_cancel(Error **errp)
67af0fa4
WB
516 Coroutine *co = qemu_coroutine_create(pvebackup_cancel, NULL);
517 qemu_coroutine_enter(co);
518
519- while (backup_state.vmaobj) {
520- /* FIXME: Find something better for this */
521+ while (backup_state.vmaw) {
522+ /* vma writer use main aio context */
523 aio_poll(qemu_get_aio_context(), true);
524 }
525 }
526
527-void vma_object_add_config_file(Object *obj, const char *name,
528- const char *contents, size_t len,
529- Error **errp);
530 static int config_to_vma(const char *file, BackupFormat format,
531- Object *vmaobj,
532- const char *backup_dir,
533- Error **errp)
534+ const char *backup_dir, VmaWriter *vmaw,
535+ Error **errp)
536 {
537 char *cdata = NULL;
538 gsize clen = 0;
2ab9b48e 539@@ -3098,12 +3191,17 @@ static int config_to_vma(const char *file, BackupFormat format,
67af0fa4
WB
540 char *basename = g_path_get_basename(file);
541
542 if (format == BACKUP_FORMAT_VMA) {
543- vma_object_add_config_file(vmaobj, basename, cdata, clen, errp);
544+ if (vma_writer_add_config(vmaw, basename, cdata, clen) != 0) {
545+ error_setg(errp, "unable to add %s config data to vma archive", file);
546+ g_free(cdata);
547+ g_free(basename);
548+ return 1;
549+ }
550 } else if (format == BACKUP_FORMAT_DIR) {
551 char config_path[PATH_MAX];
552 snprintf(config_path, PATH_MAX, "%s/%s", backup_dir, basename);
553 if (!g_file_set_contents(config_path, cdata, clen, &err)) {
554- error_setg(errp, "unable to write config file '%s'", config_path);
555+ error_setg(errp, "unable to write config file '%s'", config_path);
556 g_free(cdata);
557 g_free(basename);
558 return 1;
2ab9b48e 559@@ -3113,35 +3211,37 @@ static int config_to_vma(const char *file, BackupFormat format,
67af0fa4
WB
560 g_free(basename);
561 g_free(cdata);
562
563- return 0;
564+ return 0;
565 }
566
567+bool block_job_should_pause(BlockJob *job);
568 static void pvebackup_run_next_job(void)
569 {
570- bool cancel = backup_state.error || backup_state.cancel;
571-fprintf(stderr, "run next job: %zu\n", backup_state.next_job);
572- GList *next = g_list_nth(backup_state.di_list, backup_state.next_job);
573- while (next) {
574- PVEBackupDevInfo *di = (PVEBackupDevInfo *)next->data;
575- backup_state.next_job++;
576+ GList *l = backup_state.di_list;
577+ while (l) {
578+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
579+ l = g_list_next(l);
580 if (!di->completed && di->bs && di->bs->job) {
581 BlockJob *job = di->bs->job;
582- if (cancel) {
583- block_job_cancel(job);
584- } else {
585- block_job_resume(job);
586+ if (block_job_should_pause(job)) {
587+ bool cancel = backup_state.error || backup_state.cancel;
588+ if (cancel) {
589+ block_job_cancel(job);
590+ } else {
591+ block_job_resume(job);
592+ }
593 }
594 return;
595 }
596- next = g_list_next(next);
597 }
598+
599 pvebackup_cleanup();
600 }
601
602 UuidInfo *qmp_backup(const char *backup_file, bool has_format,
603 BackupFormat format,
604 bool has_config_file, const char *config_file,
605- bool has_firewall_file, const char *firewall_file,
606+ bool has_firewall_file, const char *firewall_file,
607 bool has_devlist, const char *devlist,
608 bool has_speed, int64_t speed, Error **errp)
609 {
2ab9b48e 610@@ -3149,14 +3249,15 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
611 BlockDriverState *bs = NULL;
612 const char *backup_dir = NULL;
613 Error *local_err = NULL;
614- QemuUUID uuid;
615+ uuid_t uuid;
616+ VmaWriter *vmaw = NULL;
617 gchar **devs = NULL;
618 GList *di_list = NULL;
619 GList *l;
620 UuidInfo *uuid_info;
621 BlockJob *job;
622
623- if (backup_state.di_list || backup_state.vmaobj) {
624+ if (backup_state.di_list) {
625 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
626 "previous backup not finished");
627 return NULL;
2ab9b48e 628@@ -3231,40 +3332,28 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
629 total += size;
630 }
631
632- qemu_uuid_generate(&uuid);
633+ uuid_generate(uuid);
634
635 if (format == BACKUP_FORMAT_VMA) {
636- char uuidstr[UUID_FMT_LEN+1];
637- qemu_uuid_unparse(&uuid, uuidstr);
638- uuidstr[UUID_FMT_LEN] = 0;
639- backup_state.vmaobj =
640- object_new_with_props("vma", object_get_objects_root(),
641- "vma-backup-obj", &local_err,
642- "filename", backup_file,
643- "uuid", uuidstr,
644- NULL);
645- if (!backup_state.vmaobj) {
646+ vmaw = vma_writer_create(backup_file, uuid, &local_err);
647+ if (!vmaw) {
648 if (local_err) {
649 error_propagate(errp, local_err);
650 }
651 goto err;
652 }
653
654+ /* register all devices for vma writer */
655 l = di_list;
656 while (l) {
657- QDict *options = qdict_new();
658-
659 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
660 l = g_list_next(l);
661
662 const char *devname = bdrv_get_device_name(di->bs);
663- snprintf(di->targetfile, PATH_MAX, "vma-backup-obj/%s.raw", devname);
664-
665- qdict_put(options, "driver", qstring_from_str("vma-drive"));
666- qdict_put(options, "size", qint_from_int(di->size));
667- di->target = bdrv_open(di->targetfile, NULL, options, BDRV_O_RDWR, &local_err);
668- if (!di->target) {
669- error_propagate(errp, local_err);
670+ di->dev_id = vma_writer_register_stream(vmaw, devname, di->size);
671+ if (di->dev_id <= 0) {
672+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
673+ "register_stream failed");
674 goto err;
675 }
676 }
2ab9b48e 677@@ -3305,15 +3394,15 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
678
679 /* add configuration file to archive */
680 if (has_config_file) {
681- if(config_to_vma(config_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
682- goto err;
683+ if(config_to_vma(config_file, format, backup_dir, vmaw, errp) != 0) {
684+ goto err;
685 }
686 }
687
688 /* add firewall file to archive */
689 if (has_firewall_file) {
690- if(config_to_vma(firewall_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
691- goto err;
692+ if(config_to_vma(firewall_file, format, backup_dir, vmaw, errp) != 0) {
693+ goto err;
694 }
695 }
696 /* initialize global backup_state now */
2ab9b48e 697@@ -3335,11 +3424,12 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
698 }
699 backup_state.backup_file = g_strdup(backup_file);
700
701- memcpy(&backup_state.uuid, &uuid, sizeof(uuid));
702- qemu_uuid_unparse(&uuid, backup_state.uuid_str);
703+ backup_state.vmaw = vmaw;
704+
705+ uuid_copy(backup_state.uuid, uuid);
706+ uuid_unparse_lower(uuid, backup_state.uuid_str);
707
708 backup_state.di_list = di_list;
709- backup_state.next_job = 0;
710
711 backup_state.total = total;
712 backup_state.transferred = 0;
2ab9b48e 713@@ -3350,21 +3440,16 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
714 while (l) {
715 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
716 l = g_list_next(l);
717-
718 job = backup_job_create(NULL, di->bs, di->target, speed, MIRROR_SYNC_MODE_FULL, NULL,
719 false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
720 BLOCK_JOB_DEFAULT,
721- pvebackup_complete_cb, di, 2, NULL, &local_err);
722- if (di->target) {
723- bdrv_unref(di->target);
724- di->target = NULL;
725- }
726+ pvebackup_dump_cb, pvebackup_complete_cb, di,
727+ 2, NULL, &local_err);
728 if (!job || local_err != NULL) {
729 error_setg(&backup_state.error, "backup_job_create failed");
730 pvebackup_cancel(NULL);
731- } else {
732- block_job_start(job);
733 }
734+ block_job_start(job);
735 }
736
737 if (!backup_state.error) {
2ab9b48e 738@@ -3398,9 +3483,10 @@ err:
67af0fa4
WB
739 g_strfreev(devs);
740 }
741
742- if (backup_state.vmaobj) {
743- object_unparent(backup_state.vmaobj);
744- backup_state.vmaobj = NULL;
745+ if (vmaw) {
746+ Error *err = NULL;
747+ vma_writer_close(vmaw, &err);
748+ unlink(backup_file);
749 }
750
751 if (backup_dir) {
2ab9b48e 752@@ -3772,7 +3858,7 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
67af0fa4
WB
753 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
754 backup->sync, bmap, backup->compress,
755 backup->on_source_error, backup->on_target_error,
756- BLOCK_JOB_DEFAULT, NULL, NULL, 0, txn, &local_err);
757+ BLOCK_JOB_DEFAULT, NULL, NULL, NULL, 0, txn, &local_err);
758 bdrv_unref(target_bs);
759 if (local_err != NULL) {
760 error_propagate(errp, local_err);
2ab9b48e 761@@ -3851,7 +3937,7 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
67af0fa4
WB
762 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
763 backup->sync, NULL, backup->compress,
764 backup->on_source_error, backup->on_target_error,
765- BLOCK_JOB_DEFAULT, NULL, NULL, 0, txn, &local_err);
766+ BLOCK_JOB_DEFAULT, NULL, NULL, NULL, 0, txn, &local_err);
767 if (local_err != NULL) {
768 error_propagate(errp, local_err);
769 }
770diff --git a/blockjob.c b/blockjob.c
771index 764d41863e..cb3741f6dd 100644
772--- a/blockjob.c
773+++ b/blockjob.c
774@@ -37,8 +37,8 @@
775 #include "qemu/timer.h"
776 #include "qapi-event.h"
777
778-static void block_job_event_cancelled(BlockJob *job);
779-static void block_job_event_completed(BlockJob *job, const char *msg);
780+void block_job_event_cancelled(BlockJob *job);
781+void block_job_event_completed(BlockJob *job, const char *msg);
782
783 /* Transactional group of block jobs */
784 struct BlockJobTxn {
785@@ -473,7 +473,8 @@ void block_job_user_pause(BlockJob *job)
786 block_job_pause(job);
787 }
788
789-static bool block_job_should_pause(BlockJob *job)
790+bool block_job_should_pause(BlockJob *job);
791+bool block_job_should_pause(BlockJob *job)
792 {
793 return job->pause_count > 0;
794 }
795@@ -687,7 +688,7 @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
796 }
797 }
798
799-static void block_job_event_cancelled(BlockJob *job)
800+void block_job_event_cancelled(BlockJob *job)
801 {
802 if (block_job_is_internal(job)) {
803 return;
804@@ -701,7 +702,7 @@ static void block_job_event_cancelled(BlockJob *job)
805 &error_abort);
806 }
807
808-static void block_job_event_completed(BlockJob *job, const char *msg)
809+void block_job_event_completed(BlockJob *job, const char *msg)
810 {
811 if (block_job_is_internal(job)) {
812 return;
813diff --git a/include/block/block_int.h b/include/block/block_int.h
814index 2b3ecd0575..278da161fb 100644
815--- a/include/block/block_int.h
816+++ b/include/block/block_int.h
817@@ -59,6 +59,9 @@
818
819 #define BLOCK_PROBE_BUF_SIZE 512
820
821+typedef int BackupDumpFunc(void *opaque, BlockBackend *be,
822+ int64_t sector_num, int n_sectors, unsigned char *buf);
823+
824 enum BdrvTrackedRequestType {
825 BDRV_TRACKED_READ,
826 BDRV_TRACKED_WRITE,
827@@ -877,6 +880,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
828 BlockdevOnError on_source_error,
829 BlockdevOnError on_target_error,
830 int creation_flags,
831+ BackupDumpFunc *dump_cb,
832 BlockCompletionFunc *cb, void *opaque,
833 int pause_count,
834 BlockJobTxn *txn, Error **errp);
95259824
WB
835diff --git a/vma-reader.c b/vma-reader.c
836new file mode 100644
67af0fa4 837index 0000000000..2000889bd3
95259824
WB
838--- /dev/null
839+++ b/vma-reader.c
67af0fa4 840@@ -0,0 +1,857 @@
95259824
WB
841+/*
842+ * VMA: Virtual Machine Archive
843+ *
844+ * Copyright (C) 2012 Proxmox Server Solutions
845+ *
846+ * Authors:
847+ * Dietmar Maurer (dietmar@proxmox.com)
848+ *
849+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
850+ * See the COPYING file in the top-level directory.
851+ *
852+ */
853+
854+#include "qemu/osdep.h"
855+#include <glib.h>
856+#include <uuid/uuid.h>
857+
858+#include "qemu-common.h"
859+#include "qemu/timer.h"
860+#include "qemu/ratelimit.h"
861+#include "vma.h"
862+#include "block/block.h"
863+#include "sysemu/block-backend.h"
864+
865+static unsigned char zero_vma_block[VMA_BLOCK_SIZE];
866+
867+typedef struct VmaRestoreState {
67af0fa4 868+ BlockBackend *target;
95259824
WB
869+ bool write_zeroes;
870+ unsigned long *bitmap;
871+ int bitmap_size;
872+} VmaRestoreState;
873+
874+struct VmaReader {
875+ int fd;
876+ GChecksum *md5csum;
877+ GHashTable *blob_hash;
878+ unsigned char *head_data;
879+ VmaDeviceInfo devinfo[256];
880+ VmaRestoreState rstate[256];
881+ GList *cdata_list;
882+ guint8 vmstate_stream;
883+ uint32_t vmstate_clusters;
884+ /* to show restore percentage if run with -v */
885+ time_t start_time;
886+ int64_t cluster_count;
887+ int64_t clusters_read;
67af0fa4
WB
888+ int64_t zero_cluster_data;
889+ int64_t partial_zero_cluster_data;
95259824
WB
890+ int clusters_read_per;
891+};
892+
893+static guint
894+g_int32_hash(gconstpointer v)
895+{
896+ return *(const uint32_t *)v;
897+}
898+
899+static gboolean
900+g_int32_equal(gconstpointer v1, gconstpointer v2)
901+{
902+ return *((const uint32_t *)v1) == *((const uint32_t *)v2);
903+}
904+
905+static int vma_reader_get_bitmap(VmaRestoreState *rstate, int64_t cluster_num)
906+{
907+ assert(rstate);
908+ assert(rstate->bitmap);
909+
910+ unsigned long val, idx, bit;
911+
912+ idx = cluster_num / BITS_PER_LONG;
913+
914+ assert(rstate->bitmap_size > idx);
915+
916+ bit = cluster_num % BITS_PER_LONG;
917+ val = rstate->bitmap[idx];
918+
919+ return !!(val & (1UL << bit));
920+}
921+
922+static void vma_reader_set_bitmap(VmaRestoreState *rstate, int64_t cluster_num,
923+ int dirty)
924+{
925+ assert(rstate);
926+ assert(rstate->bitmap);
927+
928+ unsigned long val, idx, bit;
929+
930+ idx = cluster_num / BITS_PER_LONG;
931+
932+ assert(rstate->bitmap_size > idx);
933+
934+ bit = cluster_num % BITS_PER_LONG;
935+ val = rstate->bitmap[idx];
936+ if (dirty) {
937+ if (!(val & (1UL << bit))) {
938+ val |= 1UL << bit;
939+ }
940+ } else {
941+ if (val & (1UL << bit)) {
942+ val &= ~(1UL << bit);
943+ }
944+ }
945+ rstate->bitmap[idx] = val;
946+}
947+
948+typedef struct VmaBlob {
949+ uint32_t start;
950+ uint32_t len;
951+ void *data;
952+} VmaBlob;
953+
954+static const VmaBlob *get_header_blob(VmaReader *vmar, uint32_t pos)
955+{
956+ assert(vmar);
957+ assert(vmar->blob_hash);
958+
959+ return g_hash_table_lookup(vmar->blob_hash, &pos);
960+}
961+
962+static const char *get_header_str(VmaReader *vmar, uint32_t pos)
963+{
964+ const VmaBlob *blob = get_header_blob(vmar, pos);
965+ if (!blob) {
966+ return NULL;
967+ }
968+ const char *res = (char *)blob->data;
969+ if (res[blob->len-1] != '\0') {
970+ return NULL;
971+ }
972+ return res;
973+}
974+
975+static ssize_t
976+safe_read(int fd, unsigned char *buf, size_t count)
977+{
978+ ssize_t n;
979+
980+ do {
981+ n = read(fd, buf, count);
982+ } while (n < 0 && errno == EINTR);
983+
984+ return n;
985+}
986+
987+static ssize_t
988+full_read(int fd, unsigned char *buf, size_t len)
989+{
990+ ssize_t n;
991+ size_t total;
992+
993+ total = 0;
994+
995+ while (len > 0) {
996+ n = safe_read(fd, buf, len);
997+
998+ if (n == 0) {
999+ return total;
1000+ }
1001+
1002+ if (n <= 0) {
1003+ break;
1004+ }
1005+
1006+ buf += n;
1007+ total += n;
1008+ len -= n;
1009+ }
1010+
1011+ if (len) {
1012+ return -1;
1013+ }
1014+
1015+ return total;
1016+}
1017+
1018+void vma_reader_destroy(VmaReader *vmar)
1019+{
1020+ assert(vmar);
1021+
1022+ if (vmar->fd >= 0) {
1023+ close(vmar->fd);
1024+ }
1025+
1026+ if (vmar->cdata_list) {
1027+ g_list_free(vmar->cdata_list);
1028+ }
1029+
1030+ int i;
1031+ for (i = 1; i < 256; i++) {
1032+ if (vmar->rstate[i].bitmap) {
1033+ g_free(vmar->rstate[i].bitmap);
1034+ }
1035+ }
1036+
1037+ if (vmar->md5csum) {
1038+ g_checksum_free(vmar->md5csum);
1039+ }
1040+
1041+ if (vmar->blob_hash) {
1042+ g_hash_table_destroy(vmar->blob_hash);
1043+ }
1044+
1045+ if (vmar->head_data) {
1046+ g_free(vmar->head_data);
1047+ }
1048+
1049+ g_free(vmar);
1050+
1051+};
1052+
1053+static int vma_reader_read_head(VmaReader *vmar, Error **errp)
1054+{
1055+ assert(vmar);
1056+ assert(errp);
1057+ assert(*errp == NULL);
1058+
1059+ unsigned char md5sum[16];
1060+ int i;
1061+ int ret = 0;
1062+
1063+ vmar->head_data = g_malloc(sizeof(VmaHeader));
1064+
1065+ if (full_read(vmar->fd, vmar->head_data, sizeof(VmaHeader)) !=
1066+ sizeof(VmaHeader)) {
1067+ error_setg(errp, "can't read vma header - %s",
1068+ errno ? g_strerror(errno) : "got EOF");
1069+ return -1;
1070+ }
1071+
1072+ VmaHeader *h = (VmaHeader *)vmar->head_data;
1073+
1074+ if (h->magic != VMA_MAGIC) {
1075+ error_setg(errp, "not a vma file - wrong magic number");
1076+ return -1;
1077+ }
1078+
1079+ uint32_t header_size = GUINT32_FROM_BE(h->header_size);
1080+ int need = header_size - sizeof(VmaHeader);
1081+ if (need <= 0) {
1082+ error_setg(errp, "wrong vma header size %d", header_size);
1083+ return -1;
1084+ }
1085+
1086+ vmar->head_data = g_realloc(vmar->head_data, header_size);
1087+ h = (VmaHeader *)vmar->head_data;
1088+
1089+ if (full_read(vmar->fd, vmar->head_data + sizeof(VmaHeader), need) !=
1090+ need) {
1091+ error_setg(errp, "can't read vma header data - %s",
1092+ errno ? g_strerror(errno) : "got EOF");
1093+ return -1;
1094+ }
1095+
1096+ memcpy(md5sum, h->md5sum, 16);
1097+ memset(h->md5sum, 0, 16);
1098+
1099+ g_checksum_reset(vmar->md5csum);
1100+ g_checksum_update(vmar->md5csum, vmar->head_data, header_size);
1101+ gsize csize = 16;
1102+ g_checksum_get_digest(vmar->md5csum, (guint8 *)(h->md5sum), &csize);
1103+
1104+ if (memcmp(md5sum, h->md5sum, 16) != 0) {
1105+ error_setg(errp, "wrong vma header chechsum");
1106+ return -1;
1107+ }
1108+
1109+ /* we can modify header data after checksum verify */
1110+ h->header_size = header_size;
1111+
1112+ h->version = GUINT32_FROM_BE(h->version);
1113+ if (h->version != 1) {
1114+ error_setg(errp, "wrong vma version %d", h->version);
1115+ return -1;
1116+ }
1117+
1118+ h->ctime = GUINT64_FROM_BE(h->ctime);
1119+ h->blob_buffer_offset = GUINT32_FROM_BE(h->blob_buffer_offset);
1120+ h->blob_buffer_size = GUINT32_FROM_BE(h->blob_buffer_size);
1121+
1122+ uint32_t bstart = h->blob_buffer_offset + 1;
1123+ uint32_t bend = h->blob_buffer_offset + h->blob_buffer_size;
1124+
1125+ if (bstart <= sizeof(VmaHeader)) {
1126+ error_setg(errp, "wrong vma blob buffer offset %d",
1127+ h->blob_buffer_offset);
1128+ return -1;
1129+ }
1130+
1131+ if (bend > header_size) {
1132+ error_setg(errp, "wrong vma blob buffer size %d/%d",
1133+ h->blob_buffer_offset, h->blob_buffer_size);
1134+ return -1;
1135+ }
1136+
1137+ while ((bstart + 2) <= bend) {
1138+ uint32_t size = vmar->head_data[bstart] +
1139+ (vmar->head_data[bstart+1] << 8);
1140+ if ((bstart + size + 2) <= bend) {
1141+ VmaBlob *blob = g_new0(VmaBlob, 1);
1142+ blob->start = bstart - h->blob_buffer_offset;
1143+ blob->len = size;
1144+ blob->data = vmar->head_data + bstart + 2;
1145+ g_hash_table_insert(vmar->blob_hash, &blob->start, blob);
1146+ }
1147+ bstart += size + 2;
1148+ }
1149+
1150+
1151+ int count = 0;
1152+ for (i = 1; i < 256; i++) {
1153+ VmaDeviceInfoHeader *dih = &h->dev_info[i];
1154+ uint32_t devname_ptr = GUINT32_FROM_BE(dih->devname_ptr);
1155+ uint64_t size = GUINT64_FROM_BE(dih->size);
1156+ const char *devname = get_header_str(vmar, devname_ptr);
1157+
1158+ if (size && devname) {
1159+ count++;
1160+ vmar->devinfo[i].size = size;
1161+ vmar->devinfo[i].devname = devname;
1162+
1163+ if (strcmp(devname, "vmstate") == 0) {
1164+ vmar->vmstate_stream = i;
1165+ }
1166+ }
1167+ }
1168+
95259824
WB
1169+ for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1170+ uint32_t name_ptr = GUINT32_FROM_BE(h->config_names[i]);
1171+ uint32_t data_ptr = GUINT32_FROM_BE(h->config_data[i]);
1172+
1173+ if (!(name_ptr && data_ptr)) {
1174+ continue;
1175+ }
1176+ const char *name = get_header_str(vmar, name_ptr);
1177+ const VmaBlob *blob = get_header_blob(vmar, data_ptr);
1178+
1179+ if (!(name && blob)) {
1180+ error_setg(errp, "vma contains invalid data pointers");
1181+ return -1;
1182+ }
1183+
1184+ VmaConfigData *cdata = g_new0(VmaConfigData, 1);
1185+ cdata->name = name;
1186+ cdata->data = blob->data;
1187+ cdata->len = blob->len;
1188+
1189+ vmar->cdata_list = g_list_append(vmar->cdata_list, cdata);
1190+ }
1191+
1192+ return ret;
1193+};
1194+
1195+VmaReader *vma_reader_create(const char *filename, Error **errp)
1196+{
1197+ assert(filename);
1198+ assert(errp);
1199+
1200+ VmaReader *vmar = g_new0(VmaReader, 1);
1201+
1202+ if (strcmp(filename, "-") == 0) {
1203+ vmar->fd = dup(0);
1204+ } else {
1205+ vmar->fd = open(filename, O_RDONLY);
1206+ }
1207+
1208+ if (vmar->fd < 0) {
1209+ error_setg(errp, "can't open file %s - %s\n", filename,
1210+ g_strerror(errno));
1211+ goto err;
1212+ }
1213+
1214+ vmar->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1215+ if (!vmar->md5csum) {
1216+ error_setg(errp, "can't allocate cmsum\n");
1217+ goto err;
1218+ }
1219+
1220+ vmar->blob_hash = g_hash_table_new_full(g_int32_hash, g_int32_equal,
1221+ NULL, g_free);
1222+
1223+ if (vma_reader_read_head(vmar, errp) < 0) {
1224+ goto err;
1225+ }
1226+
1227+ return vmar;
1228+
1229+err:
1230+ if (vmar) {
1231+ vma_reader_destroy(vmar);
1232+ }
1233+
1234+ return NULL;
1235+}
1236+
1237+VmaHeader *vma_reader_get_header(VmaReader *vmar)
1238+{
1239+ assert(vmar);
1240+ assert(vmar->head_data);
1241+
1242+ return (VmaHeader *)(vmar->head_data);
1243+}
1244+
1245+GList *vma_reader_get_config_data(VmaReader *vmar)
1246+{
1247+ assert(vmar);
1248+ assert(vmar->head_data);
1249+
1250+ return vmar->cdata_list;
1251+}
1252+
1253+VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id)
1254+{
1255+ assert(vmar);
1256+ assert(dev_id);
1257+
1258+ if (vmar->devinfo[dev_id].size && vmar->devinfo[dev_id].devname) {
1259+ return &vmar->devinfo[dev_id];
1260+ }
1261+
1262+ return NULL;
1263+}
1264+
67af0fa4
WB
1265+static void allocate_rstate(VmaReader *vmar, guint8 dev_id,
1266+ BlockBackend *target, bool write_zeroes)
1267+{
1268+ assert(vmar);
1269+ assert(dev_id);
1270+
1271+ vmar->rstate[dev_id].target = target;
1272+ vmar->rstate[dev_id].write_zeroes = write_zeroes;
1273+
1274+ int64_t size = vmar->devinfo[dev_id].size;
1275+
1276+ int64_t bitmap_size = (size/BDRV_SECTOR_SIZE) +
1277+ (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG - 1;
1278+ bitmap_size /= (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG;
1279+
1280+ vmar->rstate[dev_id].bitmap_size = bitmap_size;
1281+ vmar->rstate[dev_id].bitmap = g_new0(unsigned long, bitmap_size);
1282+
1283+ vmar->cluster_count += size/VMA_CLUSTER_SIZE;
1284+}
1285+
1286+int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target,
95259824
WB
1287+ bool write_zeroes, Error **errp)
1288+{
1289+ assert(vmar);
67af0fa4 1290+ assert(target != NULL);
95259824 1291+ assert(dev_id);
67af0fa4 1292+ assert(vmar->rstate[dev_id].target == NULL);
95259824 1293+
67af0fa4 1294+ int64_t size = blk_getlength(target);
95259824
WB
1295+ int64_t size_diff = size - vmar->devinfo[dev_id].size;
1296+
1297+ /* storage types can have different size restrictions, so it
1298+ * is not always possible to create an image with exact size.
1299+ * So we tolerate a size difference up to 4MB.
1300+ */
1301+ if ((size_diff < 0) || (size_diff > 4*1024*1024)) {
1302+ error_setg(errp, "vma_reader_register_bs for stream %s failed - "
1303+ "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname,
1304+ size, vmar->devinfo[dev_id].size);
1305+ return -1;
1306+ }
1307+
67af0fa4 1308+ allocate_rstate(vmar, dev_id, target, write_zeroes);
95259824
WB
1309+
1310+ return 0;
1311+}
1312+
1313+static ssize_t safe_write(int fd, void *buf, size_t count)
1314+{
1315+ ssize_t n;
1316+
1317+ do {
1318+ n = write(fd, buf, count);
1319+ } while (n < 0 && errno == EINTR);
1320+
1321+ return n;
1322+}
1323+
1324+static size_t full_write(int fd, void *buf, size_t len)
1325+{
1326+ ssize_t n;
1327+ size_t total;
1328+
1329+ total = 0;
1330+
1331+ while (len > 0) {
1332+ n = safe_write(fd, buf, len);
1333+ if (n < 0) {
1334+ return n;
1335+ }
1336+ buf += n;
1337+ total += n;
1338+ len -= n;
1339+ }
1340+
1341+ if (len) {
1342+ /* incomplete write ? */
1343+ return -1;
1344+ }
1345+
1346+ return total;
1347+}
1348+
1349+static int restore_write_data(VmaReader *vmar, guint8 dev_id,
67af0fa4 1350+ BlockBackend *target, int vmstate_fd,
95259824
WB
1351+ unsigned char *buf, int64_t sector_num,
1352+ int nb_sectors, Error **errp)
1353+{
1354+ assert(vmar);
1355+
1356+ if (dev_id == vmar->vmstate_stream) {
1357+ if (vmstate_fd >= 0) {
1358+ int len = nb_sectors * BDRV_SECTOR_SIZE;
1359+ int res = full_write(vmstate_fd, buf, len);
1360+ if (res < 0) {
1361+ error_setg(errp, "write vmstate failed %d", res);
1362+ return -1;
1363+ }
1364+ }
1365+ } else {
67af0fa4 1366+ int res = blk_pwrite(target, sector_num * BDRV_SECTOR_SIZE, buf, nb_sectors * BDRV_SECTOR_SIZE, 0);
95259824 1367+ if (res < 0) {
67af0fa4
WB
1368+ error_setg(errp, "blk_pwrite to %s failed (%d)",
1369+ bdrv_get_device_name(blk_bs(target)), res);
95259824
WB
1370+ return -1;
1371+ }
1372+ }
1373+ return 0;
1374+}
67af0fa4 1375+
95259824
WB
1376+static int restore_extent(VmaReader *vmar, unsigned char *buf,
1377+ int extent_size, int vmstate_fd,
67af0fa4 1378+ bool verbose, bool verify, Error **errp)
95259824
WB
1379+{
1380+ assert(vmar);
1381+ assert(buf);
1382+
1383+ VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1384+ int start = VMA_EXTENT_HEADER_SIZE;
1385+ int i;
1386+
1387+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1388+ uint64_t block_info = GUINT64_FROM_BE(ehead->blockinfo[i]);
1389+ uint64_t cluster_num = block_info & 0xffffffff;
1390+ uint8_t dev_id = (block_info >> 32) & 0xff;
1391+ uint16_t mask = block_info >> (32+16);
1392+ int64_t max_sector;
1393+
1394+ if (!dev_id) {
1395+ continue;
1396+ }
1397+
1398+ VmaRestoreState *rstate = &vmar->rstate[dev_id];
67af0fa4 1399+ BlockBackend *target = NULL;
95259824
WB
1400+
1401+ if (dev_id != vmar->vmstate_stream) {
67af0fa4
WB
1402+ target = rstate->target;
1403+ if (!verify && !target) {
95259824
WB
1404+ error_setg(errp, "got wrong dev id %d", dev_id);
1405+ return -1;
1406+ }
1407+
1408+ if (vma_reader_get_bitmap(rstate, cluster_num)) {
1409+ error_setg(errp, "found duplicated cluster %zd for stream %s",
1410+ cluster_num, vmar->devinfo[dev_id].devname);
1411+ return -1;
1412+ }
1413+ vma_reader_set_bitmap(rstate, cluster_num, 1);
1414+
1415+ max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE;
1416+ } else {
1417+ max_sector = G_MAXINT64;
1418+ if (cluster_num != vmar->vmstate_clusters) {
1419+ error_setg(errp, "found out of order vmstate data");
1420+ return -1;
1421+ }
1422+ vmar->vmstate_clusters++;
1423+ }
1424+
1425+ vmar->clusters_read++;
1426+
1427+ if (verbose) {
1428+ time_t duration = time(NULL) - vmar->start_time;
1429+ int percent = (vmar->clusters_read*100)/vmar->cluster_count;
1430+ if (percent != vmar->clusters_read_per) {
1431+ printf("progress %d%% (read %zd bytes, duration %zd sec)\n",
1432+ percent, vmar->clusters_read*VMA_CLUSTER_SIZE,
1433+ duration);
1434+ fflush(stdout);
1435+ vmar->clusters_read_per = percent;
1436+ }
1437+ }
1438+
1439+ /* try to write whole clusters to speedup restore */
1440+ if (mask == 0xffff) {
1441+ if ((start + VMA_CLUSTER_SIZE) > extent_size) {
1442+ error_setg(errp, "short vma extent - too many blocks");
1443+ return -1;
1444+ }
1445+ int64_t sector_num = (cluster_num * VMA_CLUSTER_SIZE) /
1446+ BDRV_SECTOR_SIZE;
1447+ int64_t end_sector = sector_num +
1448+ VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE;
1449+
1450+ if (end_sector > max_sector) {
1451+ end_sector = max_sector;
1452+ }
1453+
1454+ if (end_sector <= sector_num) {
1455+ error_setg(errp, "got wrong block address - write bejond end");
1456+ return -1;
1457+ }
1458+
67af0fa4
WB
1459+ if (!verify) {
1460+ int nb_sectors = end_sector - sector_num;
1461+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1462+ buf + start, sector_num, nb_sectors,
1463+ errp) < 0) {
1464+ return -1;
1465+ }
95259824
WB
1466+ }
1467+
1468+ start += VMA_CLUSTER_SIZE;
1469+ } else {
1470+ int j;
1471+ int bit = 1;
1472+
1473+ for (j = 0; j < 16; j++) {
1474+ int64_t sector_num = (cluster_num*VMA_CLUSTER_SIZE +
1475+ j*VMA_BLOCK_SIZE)/BDRV_SECTOR_SIZE;
1476+
1477+ int64_t end_sector = sector_num +
1478+ VMA_BLOCK_SIZE/BDRV_SECTOR_SIZE;
1479+ if (end_sector > max_sector) {
1480+ end_sector = max_sector;
1481+ }
1482+
1483+ if (mask & bit) {
1484+ if ((start + VMA_BLOCK_SIZE) > extent_size) {
1485+ error_setg(errp, "short vma extent - too many blocks");
1486+ return -1;
1487+ }
1488+
1489+ if (end_sector <= sector_num) {
1490+ error_setg(errp, "got wrong block address - "
1491+ "write bejond end");
1492+ return -1;
1493+ }
1494+
67af0fa4
WB
1495+ if (!verify) {
1496+ int nb_sectors = end_sector - sector_num;
1497+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1498+ buf + start, sector_num,
1499+ nb_sectors, errp) < 0) {
1500+ return -1;
1501+ }
95259824
WB
1502+ }
1503+
1504+ start += VMA_BLOCK_SIZE;
1505+
1506+ } else {
1507+
67af0fa4
WB
1508+
1509+ if (end_sector > sector_num) {
95259824
WB
1510+ /* Todo: use bdrv_co_write_zeroes (but that need to
1511+ * be run inside coroutine?)
1512+ */
1513+ int nb_sectors = end_sector - sector_num;
67af0fa4
WB
1514+ int zero_size = BDRV_SECTOR_SIZE*nb_sectors;
1515+ vmar->zero_cluster_data += zero_size;
1516+ if (mask != 0) {
1517+ vmar->partial_zero_cluster_data += zero_size;
1518+ }
1519+
1520+ if (rstate->write_zeroes && !verify) {
1521+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1522+ zero_vma_block, sector_num,
1523+ nb_sectors, errp) < 0) {
1524+ return -1;
1525+ }
95259824
WB
1526+ }
1527+ }
1528+ }
1529+
1530+ bit = bit << 1;
1531+ }
1532+ }
1533+ }
1534+
1535+ if (start != extent_size) {
1536+ error_setg(errp, "vma extent error - missing blocks");
1537+ return -1;
1538+ }
1539+
1540+ return 0;
1541+}
1542+
67af0fa4
WB
1543+static int vma_reader_restore_full(VmaReader *vmar, int vmstate_fd,
1544+ bool verbose, bool verify,
1545+ Error **errp)
95259824
WB
1546+{
1547+ assert(vmar);
1548+ assert(vmar->head_data);
1549+
1550+ int ret = 0;
1551+ unsigned char buf[VMA_MAX_EXTENT_SIZE];
1552+ int buf_pos = 0;
1553+ unsigned char md5sum[16];
1554+ VmaHeader *h = (VmaHeader *)vmar->head_data;
1555+
1556+ vmar->start_time = time(NULL);
1557+
1558+ while (1) {
1559+ int bytes = full_read(vmar->fd, buf + buf_pos, sizeof(buf) - buf_pos);
1560+ if (bytes < 0) {
1561+ error_setg(errp, "read failed - %s", g_strerror(errno));
1562+ return -1;
1563+ }
1564+
1565+ buf_pos += bytes;
1566+
1567+ if (!buf_pos) {
1568+ break; /* EOF */
1569+ }
1570+
1571+ if (buf_pos < VMA_EXTENT_HEADER_SIZE) {
1572+ error_setg(errp, "read short extent (%d bytes)", buf_pos);
1573+ return -1;
1574+ }
1575+
1576+ VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1577+
1578+ /* extract md5sum */
1579+ memcpy(md5sum, ehead->md5sum, sizeof(ehead->md5sum));
1580+ memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
1581+
1582+ g_checksum_reset(vmar->md5csum);
1583+ g_checksum_update(vmar->md5csum, buf, VMA_EXTENT_HEADER_SIZE);
1584+ gsize csize = 16;
1585+ g_checksum_get_digest(vmar->md5csum, ehead->md5sum, &csize);
1586+
1587+ if (memcmp(md5sum, ehead->md5sum, 16) != 0) {
1588+ error_setg(errp, "wrong vma extent header chechsum");
1589+ return -1;
1590+ }
1591+
1592+ if (memcmp(h->uuid, ehead->uuid, sizeof(ehead->uuid)) != 0) {
1593+ error_setg(errp, "wrong vma extent uuid");
1594+ return -1;
1595+ }
1596+
1597+ if (ehead->magic != VMA_EXTENT_MAGIC || ehead->reserved1 != 0) {
1598+ error_setg(errp, "wrong vma extent header magic");
1599+ return -1;
1600+ }
1601+
1602+ int block_count = GUINT16_FROM_BE(ehead->block_count);
1603+ int extent_size = VMA_EXTENT_HEADER_SIZE + block_count*VMA_BLOCK_SIZE;
1604+
1605+ if (buf_pos < extent_size) {
1606+ error_setg(errp, "short vma extent (%d < %d)", buf_pos,
1607+ extent_size);
1608+ return -1;
1609+ }
1610+
1611+ if (restore_extent(vmar, buf, extent_size, vmstate_fd, verbose,
67af0fa4 1612+ verify, errp) < 0) {
95259824
WB
1613+ return -1;
1614+ }
1615+
1616+ if (buf_pos > extent_size) {
1617+ memmove(buf, buf + extent_size, buf_pos - extent_size);
1618+ buf_pos = buf_pos - extent_size;
1619+ } else {
1620+ buf_pos = 0;
1621+ }
1622+ }
1623+
1624+ bdrv_drain_all();
1625+
1626+ int i;
1627+ for (i = 1; i < 256; i++) {
1628+ VmaRestoreState *rstate = &vmar->rstate[i];
67af0fa4 1629+ if (!rstate->target) {
95259824
WB
1630+ continue;
1631+ }
1632+
67af0fa4
WB
1633+ if (blk_flush(rstate->target) < 0) {
1634+ error_setg(errp, "vma blk_flush %s failed",
95259824
WB
1635+ vmar->devinfo[i].devname);
1636+ return -1;
1637+ }
1638+
1639+ if (vmar->devinfo[i].size &&
1640+ (strcmp(vmar->devinfo[i].devname, "vmstate") != 0)) {
1641+ assert(rstate->bitmap);
1642+
1643+ int64_t cluster_num, end;
1644+
1645+ end = (vmar->devinfo[i].size + VMA_CLUSTER_SIZE - 1) /
1646+ VMA_CLUSTER_SIZE;
1647+
1648+ for (cluster_num = 0; cluster_num < end; cluster_num++) {
1649+ if (!vma_reader_get_bitmap(rstate, cluster_num)) {
1650+ error_setg(errp, "detected missing cluster %zd "
1651+ "for stream %s", cluster_num,
1652+ vmar->devinfo[i].devname);
1653+ return -1;
1654+ }
1655+ }
1656+ }
1657+ }
1658+
67af0fa4
WB
1659+ if (verbose) {
1660+ if (vmar->clusters_read) {
1661+ printf("total bytes read %zd, sparse bytes %zd (%.3g%%)\n",
1662+ vmar->clusters_read*VMA_CLUSTER_SIZE,
1663+ vmar->zero_cluster_data,
1664+ (double)(100.0*vmar->zero_cluster_data)/
1665+ (vmar->clusters_read*VMA_CLUSTER_SIZE));
1666+
1667+ int64_t datasize = vmar->clusters_read*VMA_CLUSTER_SIZE-vmar->zero_cluster_data;
1668+ if (datasize) { // this does not make sense for empty files
1669+ printf("space reduction due to 4K zero blocks %.3g%%\n",
1670+ (double)(100.0*vmar->partial_zero_cluster_data) / datasize);
1671+ }
1672+ } else {
1673+ printf("vma archive contains no image data\n");
1674+ }
1675+ }
95259824
WB
1676+ return ret;
1677+}
1678+
67af0fa4
WB
1679+int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
1680+ Error **errp)
1681+{
1682+ return vma_reader_restore_full(vmar, vmstate_fd, verbose, false, errp);
1683+}
1684+
1685+int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp)
1686+{
1687+ guint8 dev_id;
1688+
1689+ for (dev_id = 1; dev_id < 255; dev_id++) {
1690+ if (vma_reader_get_device_info(vmar, dev_id)) {
1691+ allocate_rstate(vmar, dev_id, NULL, false);
1692+ }
1693+ }
1694+
1695+ return vma_reader_restore_full(vmar, -1, verbose, true, errp);
1696+}
1697+
95259824
WB
1698diff --git a/vma-writer.c b/vma-writer.c
1699new file mode 100644
67af0fa4 1700index 0000000000..9001cbdd2b
95259824
WB
1701--- /dev/null
1702+++ b/vma-writer.c
67af0fa4 1703@@ -0,0 +1,771 @@
95259824
WB
1704+/*
1705+ * VMA: Virtual Machine Archive
1706+ *
1707+ * Copyright (C) 2012 Proxmox Server Solutions
1708+ *
1709+ * Authors:
1710+ * Dietmar Maurer (dietmar@proxmox.com)
1711+ *
1712+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
1713+ * See the COPYING file in the top-level directory.
1714+ *
1715+ */
1716+
1717+#include "qemu/osdep.h"
1718+#include <glib.h>
1719+#include <uuid/uuid.h>
1720+
1721+#include "vma.h"
1722+#include "block/block.h"
1723+#include "monitor/monitor.h"
1724+#include "qemu/main-loop.h"
1725+#include "qemu/coroutine.h"
1726+#include "qemu/cutils.h"
1727+
1728+#define DEBUG_VMA 0
1729+
1730+#define DPRINTF(fmt, ...)\
1731+ do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
1732+
1733+#define WRITE_BUFFERS 5
67af0fa4
WB
1734+#define HEADER_CLUSTERS 8
1735+#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
95259824
WB
1736+
1737+struct VmaWriter {
1738+ int fd;
1739+ FILE *cmd;
1740+ int status;
1741+ char errmsg[8192];
1742+ uuid_t uuid;
1743+ bool header_written;
1744+ bool closed;
1745+
1746+ /* we always write extents */
67af0fa4 1747+ unsigned char *outbuf;
95259824
WB
1748+ int outbuf_pos; /* in bytes */
1749+ int outbuf_count; /* in VMA_BLOCKS */
1750+ uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
1751+
67af0fa4 1752+ unsigned char *headerbuf;
95259824
WB
1753+
1754+ GChecksum *md5csum;
95259824
WB
1755+ CoMutex flush_lock;
1756+ Coroutine *co_writer;
1757+
1758+ /* drive informations */
1759+ VmaStreamInfo stream_info[256];
1760+ guint stream_count;
1761+
1762+ guint8 vmstate_stream;
1763+ uint32_t vmstate_clusters;
1764+
1765+ /* header blob table */
1766+ char *header_blob_table;
1767+ uint32_t header_blob_table_size;
1768+ uint32_t header_blob_table_pos;
1769+
1770+ /* store for config blobs */
1771+ uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1772+ uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1773+ uint32_t config_count;
1774+};
1775+
1776+void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...)
1777+{
1778+ va_list ap;
1779+
1780+ if (vmaw->status < 0) {
1781+ return;
1782+ }
1783+
1784+ vmaw->status = -1;
1785+
1786+ va_start(ap, fmt);
1787+ g_vsnprintf(vmaw->errmsg, sizeof(vmaw->errmsg), fmt, ap);
1788+ va_end(ap);
1789+
1790+ DPRINTF("vma_writer_set_error: %s\n", vmaw->errmsg);
1791+}
1792+
1793+static uint32_t allocate_header_blob(VmaWriter *vmaw, const char *data,
1794+ size_t len)
1795+{
1796+ if (len > 65535) {
1797+ return 0;
1798+ }
1799+
1800+ if (!vmaw->header_blob_table ||
1801+ (vmaw->header_blob_table_size <
1802+ (vmaw->header_blob_table_pos + len + 2))) {
1803+ int newsize = vmaw->header_blob_table_size + ((len + 2 + 511)/512)*512;
1804+
1805+ vmaw->header_blob_table = g_realloc(vmaw->header_blob_table, newsize);
1806+ memset(vmaw->header_blob_table + vmaw->header_blob_table_size,
1807+ 0, newsize - vmaw->header_blob_table_size);
1808+ vmaw->header_blob_table_size = newsize;
1809+ }
1810+
1811+ uint32_t cpos = vmaw->header_blob_table_pos;
1812+ vmaw->header_blob_table[cpos] = len & 255;
1813+ vmaw->header_blob_table[cpos+1] = (len >> 8) & 255;
1814+ memcpy(vmaw->header_blob_table + cpos + 2, data, len);
1815+ vmaw->header_blob_table_pos += len + 2;
1816+ return cpos;
1817+}
1818+
1819+static uint32_t allocate_header_string(VmaWriter *vmaw, const char *str)
1820+{
1821+ assert(vmaw);
1822+
1823+ size_t len = strlen(str) + 1;
1824+
1825+ return allocate_header_blob(vmaw, str, len);
1826+}
1827+
1828+int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
1829+ gsize len)
1830+{
1831+ assert(vmaw);
1832+ assert(!vmaw->header_written);
1833+ assert(vmaw->config_count < VMA_MAX_CONFIGS);
1834+ assert(name);
1835+ assert(data);
95259824
WB
1836+
1837+ gchar *basename = g_path_get_basename(name);
1838+ uint32_t name_ptr = allocate_header_string(vmaw, basename);
1839+ g_free(basename);
1840+
1841+ if (!name_ptr) {
1842+ return -1;
1843+ }
1844+
1845+ uint32_t data_ptr = allocate_header_blob(vmaw, data, len);
1846+ if (!data_ptr) {
1847+ return -1;
1848+ }
1849+
1850+ vmaw->config_names[vmaw->config_count] = name_ptr;
1851+ vmaw->config_data[vmaw->config_count] = data_ptr;
1852+
1853+ vmaw->config_count++;
1854+
1855+ return 0;
1856+}
1857+
1858+int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
1859+ size_t size)
1860+{
1861+ assert(vmaw);
1862+ assert(devname);
1863+ assert(!vmaw->status);
1864+
1865+ if (vmaw->header_written) {
1866+ vma_writer_set_error(vmaw, "vma_writer_register_stream: header "
1867+ "already written");
1868+ return -1;
1869+ }
1870+
1871+ guint n = vmaw->stream_count + 1;
1872+
1873+ /* we can have dev_ids form 1 to 255 (0 reserved)
1874+ * 255(-1) reseverd for safety
1875+ */
1876+ if (n > 254) {
1877+ vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1878+ "too many drives");
1879+ return -1;
1880+ }
1881+
1882+ if (size <= 0) {
1883+ vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1884+ "got strange size %zd", size);
1885+ return -1;
1886+ }
1887+
1888+ DPRINTF("vma_writer_register_stream %s %zu %d\n", devname, size, n);
1889+
1890+ vmaw->stream_info[n].devname = g_strdup(devname);
1891+ vmaw->stream_info[n].size = size;
1892+
1893+ vmaw->stream_info[n].cluster_count = (size + VMA_CLUSTER_SIZE - 1) /
1894+ VMA_CLUSTER_SIZE;
1895+
1896+ vmaw->stream_count = n;
1897+
1898+ if (strcmp(devname, "vmstate") == 0) {
1899+ vmaw->vmstate_stream = n;
1900+ }
1901+
1902+ return n;
1903+}
1904+
1905+static void vma_co_continue_write(void *opaque)
1906+{
1907+ VmaWriter *vmaw = opaque;
1908+
1909+ DPRINTF("vma_co_continue_write\n");
1910+ qemu_coroutine_enter(vmaw->co_writer);
1911+}
1912+
1913+static ssize_t coroutine_fn
67af0fa4 1914+vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
95259824 1915+{
67af0fa4 1916+ DPRINTF("vma_queue_write enter %zd\n", bytes);
95259824 1917+
67af0fa4
WB
1918+ assert(vmaw);
1919+ assert(buf);
1920+ assert(bytes <= VMA_MAX_EXTENT_SIZE);
95259824 1921+
67af0fa4
WB
1922+ size_t done = 0;
1923+ ssize_t ret;
95259824
WB
1924+
1925+ assert(vmaw->co_writer == NULL);
1926+
1927+ vmaw->co_writer = qemu_coroutine_self();
1928+
95259824 1929+ while (done < bytes) {
67af0fa4
WB
1930+ aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, vma_co_continue_write, NULL, vmaw);
1931+ qemu_coroutine_yield();
1932+ aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, NULL, NULL, NULL);
1933+ if (vmaw->status < 0) {
1934+ DPRINTF("vma_queue_write detected canceled backup\n");
1935+ done = -1;
1936+ break;
1937+ }
95259824
WB
1938+ ret = write(vmaw->fd, buf + done, bytes - done);
1939+ if (ret > 0) {
1940+ done += ret;
67af0fa4 1941+ DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
95259824
WB
1942+ } else if (ret < 0) {
1943+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
67af0fa4
WB
1944+ /* try again */
1945+ } else {
1946+ vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
95259824
WB
1947+ g_strerror(errno));
1948+ done = -1; /* always return failure for partial writes */
1949+ break;
1950+ }
1951+ } else if (ret == 0) {
1952+ /* should not happen - simply try again */
1953+ }
1954+ }
1955+
95259824
WB
1956+ vmaw->co_writer = NULL;
1957+
67af0fa4 1958+ return (done == bytes) ? bytes : -1;
95259824
WB
1959+}
1960+
1961+VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
1962+{
1963+ const char *p;
1964+
1965+ assert(sizeof(VmaHeader) == (4096 + 8192));
1966+ assert(G_STRUCT_OFFSET(VmaHeader, config_names) == 2044);
1967+ assert(G_STRUCT_OFFSET(VmaHeader, config_data) == 3068);
1968+ assert(G_STRUCT_OFFSET(VmaHeader, dev_info) == 4096);
1969+ assert(sizeof(VmaExtentHeader) == 512);
1970+
1971+ VmaWriter *vmaw = g_new0(VmaWriter, 1);
1972+ vmaw->fd = -1;
1973+
1974+ vmaw->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1975+ if (!vmaw->md5csum) {
1976+ error_setg(errp, "can't allocate cmsum\n");
1977+ goto err;
1978+ }
1979+
1980+ if (strstart(filename, "exec:", &p)) {
1981+ vmaw->cmd = popen(p, "w");
1982+ if (vmaw->cmd == NULL) {
1983+ error_setg(errp, "can't popen command '%s' - %s\n", p,
1984+ g_strerror(errno));
1985+ goto err;
1986+ }
1987+ vmaw->fd = fileno(vmaw->cmd);
1988+
67af0fa4 1989+ /* try to use O_NONBLOCK */
95259824 1990+ fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
95259824
WB
1991+
1992+ } else {
1993+ struct stat st;
1994+ int oflags;
1995+ const char *tmp_id_str;
1996+
1997+ if ((stat(filename, &st) == 0) && S_ISFIFO(st.st_mode)) {
67af0fa4 1998+ oflags = O_NONBLOCK|O_WRONLY;
95259824
WB
1999+ vmaw->fd = qemu_open(filename, oflags, 0644);
2000+ } else if (strstart(filename, "/dev/fdset/", &tmp_id_str)) {
67af0fa4 2001+ oflags = O_NONBLOCK|O_WRONLY;
95259824
WB
2002+ vmaw->fd = qemu_open(filename, oflags, 0644);
2003+ } else if (strstart(filename, "/dev/fdname/", &tmp_id_str)) {
2004+ vmaw->fd = monitor_get_fd(cur_mon, tmp_id_str, errp);
2005+ if (vmaw->fd < 0) {
2006+ goto err;
2007+ }
67af0fa4 2008+ /* try to use O_NONBLOCK */
95259824 2009+ fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
95259824
WB
2010+ } else {
2011+ oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_CREAT|O_EXCL;
2012+ vmaw->fd = qemu_open(filename, oflags, 0644);
2013+ }
2014+
2015+ if (vmaw->fd < 0) {
2016+ error_setg(errp, "can't open file %s - %s\n", filename,
2017+ g_strerror(errno));
2018+ goto err;
2019+ }
2020+ }
2021+
2022+ /* we use O_DIRECT, so we need to align IO buffers */
67af0fa4
WB
2023+
2024+ vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
2025+ vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
95259824
WB
2026+
2027+ vmaw->outbuf_count = 0;
2028+ vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2029+
2030+ vmaw->header_blob_table_pos = 1; /* start at pos 1 */
2031+
95259824 2032+ qemu_co_mutex_init(&vmaw->flush_lock);
95259824
WB
2033+
2034+ uuid_copy(vmaw->uuid, uuid);
2035+
2036+ return vmaw;
2037+
2038+err:
2039+ if (vmaw) {
2040+ if (vmaw->cmd) {
2041+ pclose(vmaw->cmd);
2042+ } else if (vmaw->fd >= 0) {
2043+ close(vmaw->fd);
2044+ }
2045+
2046+ if (vmaw->md5csum) {
2047+ g_checksum_free(vmaw->md5csum);
2048+ }
2049+
2050+ g_free(vmaw);
2051+ }
2052+
2053+ return NULL;
2054+}
2055+
2056+static int coroutine_fn vma_write_header(VmaWriter *vmaw)
2057+{
2058+ assert(vmaw);
67af0fa4 2059+ unsigned char *buf = vmaw->headerbuf;
95259824
WB
2060+ VmaHeader *head = (VmaHeader *)buf;
2061+
2062+ int i;
2063+
2064+ DPRINTF("VMA WRITE HEADER\n");
2065+
2066+ if (vmaw->status < 0) {
2067+ return vmaw->status;
2068+ }
2069+
67af0fa4 2070+ memset(buf, 0, HEADERBUF_SIZE);
95259824
WB
2071+
2072+ head->magic = VMA_MAGIC;
2073+ head->version = GUINT32_TO_BE(1); /* v1 */
2074+ memcpy(head->uuid, vmaw->uuid, 16);
2075+
2076+ time_t ctime = time(NULL);
2077+ head->ctime = GUINT64_TO_BE(ctime);
2078+
95259824
WB
2079+ for (i = 0; i < VMA_MAX_CONFIGS; i++) {
2080+ head->config_names[i] = GUINT32_TO_BE(vmaw->config_names[i]);
2081+ head->config_data[i] = GUINT32_TO_BE(vmaw->config_data[i]);
2082+ }
2083+
2084+ /* 32 bytes per device (12 used currently) = 8192 bytes max */
2085+ for (i = 1; i <= 254; i++) {
2086+ VmaStreamInfo *si = &vmaw->stream_info[i];
2087+ if (si->size) {
2088+ assert(si->devname);
2089+ uint32_t devname_ptr = allocate_header_string(vmaw, si->devname);
2090+ if (!devname_ptr) {
2091+ return -1;
2092+ }
2093+ head->dev_info[i].devname_ptr = GUINT32_TO_BE(devname_ptr);
2094+ head->dev_info[i].size = GUINT64_TO_BE(si->size);
2095+ }
2096+ }
2097+
2098+ uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
2099+ head->header_size = GUINT32_TO_BE(header_size);
2100+
67af0fa4 2101+ if (header_size > HEADERBUF_SIZE) {
95259824
WB
2102+ return -1; /* just to be sure */
2103+ }
2104+
2105+ uint32_t blob_buffer_offset = sizeof(VmaHeader);
2106+ memcpy(buf + blob_buffer_offset, vmaw->header_blob_table,
2107+ vmaw->header_blob_table_size);
2108+ head->blob_buffer_offset = GUINT32_TO_BE(blob_buffer_offset);
2109+ head->blob_buffer_size = GUINT32_TO_BE(vmaw->header_blob_table_pos);
2110+
2111+ g_checksum_reset(vmaw->md5csum);
2112+ g_checksum_update(vmaw->md5csum, (const guchar *)buf, header_size);
2113+ gsize csize = 16;
2114+ g_checksum_get_digest(vmaw->md5csum, (guint8 *)(head->md5sum), &csize);
2115+
2116+ return vma_queue_write(vmaw, buf, header_size);
2117+}
2118+
2119+static int coroutine_fn vma_writer_flush(VmaWriter *vmaw)
2120+{
2121+ assert(vmaw);
2122+
2123+ int ret;
2124+ int i;
2125+
2126+ if (vmaw->status < 0) {
2127+ return vmaw->status;
2128+ }
2129+
2130+ if (!vmaw->header_written) {
2131+ vmaw->header_written = true;
2132+ ret = vma_write_header(vmaw);
2133+ if (ret < 0) {
2134+ vma_writer_set_error(vmaw, "vma_writer_flush: write header failed");
2135+ return ret;
2136+ }
2137+ }
2138+
2139+ DPRINTF("VMA WRITE FLUSH %d %d\n", vmaw->outbuf_count, vmaw->outbuf_pos);
2140+
2141+
2142+ VmaExtentHeader *ehead = (VmaExtentHeader *)vmaw->outbuf;
2143+
2144+ ehead->magic = VMA_EXTENT_MAGIC;
2145+ ehead->reserved1 = 0;
2146+
2147+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2148+ ehead->blockinfo[i] = GUINT64_TO_BE(vmaw->outbuf_block_info[i]);
2149+ }
2150+
2151+ guint16 block_count = (vmaw->outbuf_pos - VMA_EXTENT_HEADER_SIZE) /
2152+ VMA_BLOCK_SIZE;
2153+
2154+ ehead->block_count = GUINT16_TO_BE(block_count);
2155+
2156+ memcpy(ehead->uuid, vmaw->uuid, sizeof(ehead->uuid));
2157+ memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
2158+
2159+ g_checksum_reset(vmaw->md5csum);
2160+ g_checksum_update(vmaw->md5csum, vmaw->outbuf, VMA_EXTENT_HEADER_SIZE);
2161+ gsize csize = 16;
2162+ g_checksum_get_digest(vmaw->md5csum, ehead->md5sum, &csize);
2163+
2164+ int bytes = vmaw->outbuf_pos;
2165+ ret = vma_queue_write(vmaw, vmaw->outbuf, bytes);
2166+ if (ret != bytes) {
2167+ vma_writer_set_error(vmaw, "vma_writer_flush: failed write");
2168+ }
2169+
2170+ vmaw->outbuf_count = 0;
2171+ vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2172+
2173+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2174+ vmaw->outbuf_block_info[i] = 0;
2175+ }
2176+
2177+ return vmaw->status;
2178+}
2179+
2180+static int vma_count_open_streams(VmaWriter *vmaw)
2181+{
2182+ g_assert(vmaw != NULL);
2183+
2184+ int i;
2185+ int open_drives = 0;
2186+ for (i = 0; i <= 255; i++) {
2187+ if (vmaw->stream_info[i].size && !vmaw->stream_info[i].finished) {
2188+ open_drives++;
2189+ }
2190+ }
2191+
2192+ return open_drives;
2193+}
2194+
67af0fa4
WB
2195+
2196+/**
2197+ * You need to call this if the vma archive does not contain
2198+ * any data stream.
2199+ */
2200+int coroutine_fn
2201+vma_writer_flush_output(VmaWriter *vmaw)
2202+{
2203+ qemu_co_mutex_lock(&vmaw->flush_lock);
2204+ int ret = vma_writer_flush(vmaw);
2205+ qemu_co_mutex_unlock(&vmaw->flush_lock);
2206+ if (ret < 0) {
2207+ vma_writer_set_error(vmaw, "vma_writer_flush_header failed");
2208+ }
2209+ return ret;
2210+}
2211+
95259824
WB
2212+/**
2213+ * all jobs should call this when there is no more data
2214+ * Returns: number of remaining stream (0 ==> finished)
2215+ */
2216+int coroutine_fn
2217+vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id)
2218+{
2219+ g_assert(vmaw != NULL);
2220+
2221+ DPRINTF("vma_writer_set_status %d\n", dev_id);
2222+ if (!vmaw->stream_info[dev_id].size) {
2223+ vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2224+ "no such stream %d", dev_id);
2225+ return -1;
2226+ }
2227+ if (vmaw->stream_info[dev_id].finished) {
2228+ vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2229+ "stream already closed %d", dev_id);
2230+ return -1;
2231+ }
2232+
2233+ vmaw->stream_info[dev_id].finished = true;
2234+
2235+ int open_drives = vma_count_open_streams(vmaw);
2236+
2237+ if (open_drives <= 0) {
2238+ DPRINTF("vma_writer_set_status all drives completed\n");
67af0fa4 2239+ vma_writer_flush_output(vmaw);
95259824
WB
2240+ }
2241+
2242+ return open_drives;
2243+}
2244+
2245+int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status)
2246+{
2247+ int i;
2248+
2249+ g_assert(vmaw != NULL);
2250+
2251+ if (status) {
2252+ status->status = vmaw->status;
2253+ g_strlcpy(status->errmsg, vmaw->errmsg, sizeof(status->errmsg));
2254+ for (i = 0; i <= 255; i++) {
2255+ status->stream_info[i] = vmaw->stream_info[i];
2256+ }
2257+
2258+ uuid_unparse_lower(vmaw->uuid, status->uuid_str);
2259+ }
2260+
2261+ status->closed = vmaw->closed;
2262+
2263+ return vmaw->status;
2264+}
2265+
2266+static int vma_writer_get_buffer(VmaWriter *vmaw)
2267+{
2268+ int ret = 0;
2269+
2270+ qemu_co_mutex_lock(&vmaw->flush_lock);
2271+
2272+ /* wait until buffer is available */
2273+ while (vmaw->outbuf_count >= (VMA_BLOCKS_PER_EXTENT - 1)) {
2274+ ret = vma_writer_flush(vmaw);
2275+ if (ret < 0) {
2276+ vma_writer_set_error(vmaw, "vma_writer_get_buffer: flush failed");
2277+ break;
2278+ }
2279+ }
2280+
2281+ qemu_co_mutex_unlock(&vmaw->flush_lock);
2282+
2283+ return ret;
2284+}
2285+
2286+
2287+int64_t coroutine_fn
2288+vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
2289+ unsigned char *buf, size_t *zero_bytes)
2290+{
2291+ g_assert(vmaw != NULL);
2292+ g_assert(zero_bytes != NULL);
2293+
2294+ *zero_bytes = 0;
2295+
2296+ if (vmaw->status < 0) {
2297+ return vmaw->status;
2298+ }
2299+
2300+ if (!dev_id || !vmaw->stream_info[dev_id].size) {
2301+ vma_writer_set_error(vmaw, "vma_writer_write: "
2302+ "no such stream %d", dev_id);
2303+ return -1;
2304+ }
2305+
2306+ if (vmaw->stream_info[dev_id].finished) {
2307+ vma_writer_set_error(vmaw, "vma_writer_write: "
2308+ "stream already closed %d", dev_id);
2309+ return -1;
2310+ }
2311+
2312+
2313+ if (cluster_num >= (((uint64_t)1)<<32)) {
2314+ vma_writer_set_error(vmaw, "vma_writer_write: "
2315+ "cluster number out of range");
2316+ return -1;
2317+ }
2318+
2319+ if (dev_id == vmaw->vmstate_stream) {
2320+ if (cluster_num != vmaw->vmstate_clusters) {
2321+ vma_writer_set_error(vmaw, "vma_writer_write: "
2322+ "non sequential vmstate write");
2323+ }
2324+ vmaw->vmstate_clusters++;
2325+ } else if (cluster_num >= vmaw->stream_info[dev_id].cluster_count) {
2326+ vma_writer_set_error(vmaw, "vma_writer_write: cluster number too big");
2327+ return -1;
2328+ }
2329+
2330+ /* wait until buffer is available */
2331+ if (vma_writer_get_buffer(vmaw) < 0) {
2332+ vma_writer_set_error(vmaw, "vma_writer_write: "
2333+ "vma_writer_get_buffer failed");
2334+ return -1;
2335+ }
2336+
2337+ DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
2338+
2339+ uint16_t mask = 0;
2340+
2341+ if (buf) {
2342+ int i;
2343+ int bit = 1;
2344+ for (i = 0; i < 16; i++) {
2345+ unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
2346+ if (!buffer_is_zero(vmablock, VMA_BLOCK_SIZE)) {
2347+ mask |= bit;
2348+ memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
2349+ VMA_BLOCK_SIZE);
2350+ vmaw->outbuf_pos += VMA_BLOCK_SIZE;
2351+ } else {
2352+ DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
2353+ vmaw->stream_info[dev_id].zero_bytes += VMA_BLOCK_SIZE;
2354+ *zero_bytes += VMA_BLOCK_SIZE;
2355+ }
2356+
2357+ bit = bit << 1;
2358+ }
2359+ } else {
2360+ DPRINTF("VMA WRITE %zd ZERO CLUSTER\n", cluster_num);
2361+ vmaw->stream_info[dev_id].zero_bytes += VMA_CLUSTER_SIZE;
2362+ *zero_bytes += VMA_CLUSTER_SIZE;
2363+ }
2364+
2365+ uint64_t block_info = ((uint64_t)mask) << (32+16);
2366+ block_info |= ((uint64_t)dev_id) << 32;
2367+ block_info |= (cluster_num & 0xffffffff);
2368+ vmaw->outbuf_block_info[vmaw->outbuf_count] = block_info;
2369+
2370+ DPRINTF("VMA WRITE MASK %zd %zx\n", cluster_num, block_info);
2371+
2372+ vmaw->outbuf_count++;
2373+
2374+ /** NOTE: We allways write whole clusters, but we correctly set
2375+ * transferred bytes. So transferred == size when when everything
2376+ * went OK.
2377+ */
2378+ size_t transferred = VMA_CLUSTER_SIZE;
2379+
2380+ if (dev_id != vmaw->vmstate_stream) {
2381+ uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
2382+ if (last > vmaw->stream_info[dev_id].size) {
2383+ uint64_t diff = last - vmaw->stream_info[dev_id].size;
2384+ if (diff >= VMA_CLUSTER_SIZE) {
2385+ vma_writer_set_error(vmaw, "vma_writer_write: "
2386+ "read after last cluster");
2387+ return -1;
2388+ }
2389+ transferred -= diff;
2390+ }
2391+ }
2392+
2393+ vmaw->stream_info[dev_id].transferred += transferred;
2394+
2395+ return transferred;
2396+}
2397+
67af0fa4
WB
2398+void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp)
2399+{
2400+ if (vmaw->status < 0 && *errp == NULL) {
2401+ error_setg(errp, "%s", vmaw->errmsg);
2402+ }
2403+}
2404+
95259824
WB
2405+int vma_writer_close(VmaWriter *vmaw, Error **errp)
2406+{
2407+ g_assert(vmaw != NULL);
2408+
2409+ int i;
2410+
67af0fa4
WB
2411+ while (vmaw->co_writer) {
2412+ aio_poll(qemu_get_aio_context(), true);
95259824
WB
2413+ }
2414+
67af0fa4
WB
2415+ assert(vmaw->co_writer == NULL);
2416+
95259824
WB
2417+ if (vmaw->cmd) {
2418+ if (pclose(vmaw->cmd) < 0) {
2419+ vma_writer_set_error(vmaw, "vma_writer_close: "
2420+ "pclose failed - %s", g_strerror(errno));
2421+ }
2422+ } else {
2423+ if (close(vmaw->fd) < 0) {
2424+ vma_writer_set_error(vmaw, "vma_writer_close: "
2425+ "close failed - %s", g_strerror(errno));
2426+ }
2427+ }
2428+
2429+ for (i = 0; i <= 255; i++) {
2430+ VmaStreamInfo *si = &vmaw->stream_info[i];
2431+ if (si->size) {
2432+ if (!si->finished) {
2433+ vma_writer_set_error(vmaw, "vma_writer_close: "
2434+ "detected open stream '%s'", si->devname);
2435+ } else if ((si->transferred != si->size) &&
2436+ (i != vmaw->vmstate_stream)) {
2437+ vma_writer_set_error(vmaw, "vma_writer_close: "
2438+ "incomplete stream '%s' (%zd != %zd)",
2439+ si->devname, si->transferred, si->size);
2440+ }
2441+ }
2442+ }
2443+
2444+ for (i = 0; i <= 255; i++) {
2445+ vmaw->stream_info[i].finished = 1; /* mark as closed */
2446+ }
2447+
2448+ vmaw->closed = 1;
2449+
2450+ if (vmaw->status < 0 && *errp == NULL) {
2451+ error_setg(errp, "%s", vmaw->errmsg);
2452+ }
2453+
2454+ return vmaw->status;
2455+}
2456+
2457+void vma_writer_destroy(VmaWriter *vmaw)
2458+{
2459+ assert(vmaw);
2460+
2461+ int i;
2462+
2463+ for (i = 0; i <= 255; i++) {
2464+ if (vmaw->stream_info[i].devname) {
2465+ g_free(vmaw->stream_info[i].devname);
2466+ }
2467+ }
2468+
2469+ if (vmaw->md5csum) {
2470+ g_checksum_free(vmaw->md5csum);
2471+ }
2472+
95259824
WB
2473+ g_free(vmaw);
2474+}
2475diff --git a/vma.c b/vma.c
2476new file mode 100644
67af0fa4 2477index 0000000000..04915427c8
95259824
WB
2478--- /dev/null
2479+++ b/vma.c
67af0fa4 2480@@ -0,0 +1,757 @@
95259824
WB
2481+/*
2482+ * VMA: Virtual Machine Archive
2483+ *
2484+ * Copyright (C) 2012-2013 Proxmox Server Solutions
2485+ *
2486+ * Authors:
2487+ * Dietmar Maurer (dietmar@proxmox.com)
2488+ *
2489+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
2490+ * See the COPYING file in the top-level directory.
2491+ *
2492+ */
2493+
2494+#include "qemu/osdep.h"
2495+#include <glib.h>
2496+
2497+#include "vma.h"
2498+#include "qemu-common.h"
2499+#include "qemu/error-report.h"
2500+#include "qemu/main-loop.h"
a544966d 2501+#include "qapi/qmp/qstring.h"
95259824 2502+#include "sysemu/char.h" /* qstring_from_str */
67af0fa4 2503+#include "sysemu/block-backend.h"
95259824
WB
2504+
2505+static void help(void)
2506+{
2507+ const char *help_msg =
2508+ "usage: vma command [command options]\n"
2509+ "\n"
2510+ "vma list <filename>\n"
67af0fa4
WB
2511+ "vma config <filename> [-c config]\n"
2512+ "vma create <filename> [-c config] pathname ...\n"
95259824 2513+ "vma extract <filename> [-r <fifo>] <targetdir>\n"
67af0fa4 2514+ "vma verify <filename> [-v]\n"
95259824
WB
2515+ ;
2516+
2517+ printf("%s", help_msg);
2518+ exit(1);
2519+}
2520+
2521+static const char *extract_devname(const char *path, char **devname, int index)
2522+{
2523+ assert(path);
2524+
2525+ const char *sep = strchr(path, '=');
2526+
2527+ if (sep) {
2528+ *devname = g_strndup(path, sep - path);
2529+ path = sep + 1;
2530+ } else {
2531+ if (index >= 0) {
2532+ *devname = g_strdup_printf("disk%d", index);
2533+ } else {
2534+ *devname = NULL;
2535+ }
2536+ }
2537+
2538+ return path;
2539+}
2540+
2541+static void print_content(VmaReader *vmar)
2542+{
2543+ assert(vmar);
2544+
2545+ VmaHeader *head = vma_reader_get_header(vmar);
2546+
2547+ GList *l = vma_reader_get_config_data(vmar);
2548+ while (l && l->data) {
2549+ VmaConfigData *cdata = (VmaConfigData *)l->data;
2550+ l = g_list_next(l);
2551+ printf("CFG: size: %d name: %s\n", cdata->len, cdata->name);
2552+ }
2553+
2554+ int i;
2555+ VmaDeviceInfo *di;
2556+ for (i = 1; i < 255; i++) {
2557+ di = vma_reader_get_device_info(vmar, i);
2558+ if (di) {
2559+ if (strcmp(di->devname, "vmstate") == 0) {
2560+ printf("VMSTATE: dev_id=%d memory: %zd\n", i, di->size);
2561+ } else {
2562+ printf("DEV: dev_id=%d size: %zd devname: %s\n",
2563+ i, di->size, di->devname);
2564+ }
2565+ }
2566+ }
2567+ /* ctime is the last entry we print */
2568+ printf("CTIME: %s", ctime(&head->ctime));
2569+ fflush(stdout);
2570+}
2571+
2572+static int list_content(int argc, char **argv)
2573+{
2574+ int c, ret = 0;
2575+ const char *filename;
2576+
2577+ for (;;) {
2578+ c = getopt(argc, argv, "h");
2579+ if (c == -1) {
2580+ break;
2581+ }
2582+ switch (c) {
2583+ case '?':
2584+ case 'h':
2585+ help();
2586+ break;
2587+ default:
2588+ g_assert_not_reached();
2589+ }
2590+ }
2591+
2592+ /* Get the filename */
2593+ if ((optind + 1) != argc) {
2594+ help();
2595+ }
2596+ filename = argv[optind++];
2597+
2598+ Error *errp = NULL;
2599+ VmaReader *vmar = vma_reader_create(filename, &errp);
2600+
2601+ if (!vmar) {
2602+ g_error("%s", error_get_pretty(errp));
2603+ }
2604+
2605+ print_content(vmar);
2606+
2607+ vma_reader_destroy(vmar);
2608+
2609+ return ret;
2610+}
2611+
2612+typedef struct RestoreMap {
2613+ char *devname;
2614+ char *path;
67af0fa4 2615+ char *format;
95259824
WB
2616+ bool write_zero;
2617+} RestoreMap;
2618+
2619+static int extract_content(int argc, char **argv)
2620+{
2621+ int c, ret = 0;
2622+ int verbose = 0;
2623+ const char *filename;
2624+ const char *dirname;
2625+ const char *readmap = NULL;
2626+
2627+ for (;;) {
2628+ c = getopt(argc, argv, "hvr:");
2629+ if (c == -1) {
2630+ break;
2631+ }
2632+ switch (c) {
2633+ case '?':
2634+ case 'h':
2635+ help();
2636+ break;
2637+ case 'r':
2638+ readmap = optarg;
2639+ break;
2640+ case 'v':
2641+ verbose = 1;
2642+ break;
2643+ default:
2644+ help();
2645+ }
2646+ }
2647+
2648+ /* Get the filename */
2649+ if ((optind + 2) != argc) {
2650+ help();
2651+ }
2652+ filename = argv[optind++];
2653+ dirname = argv[optind++];
2654+
2655+ Error *errp = NULL;
2656+ VmaReader *vmar = vma_reader_create(filename, &errp);
2657+
2658+ if (!vmar) {
2659+ g_error("%s", error_get_pretty(errp));
2660+ }
2661+
2662+ if (mkdir(dirname, 0777) < 0) {
2663+ g_error("unable to create target directory %s - %s",
2664+ dirname, g_strerror(errno));
2665+ }
2666+
2667+ GList *l = vma_reader_get_config_data(vmar);
2668+ while (l && l->data) {
2669+ VmaConfigData *cdata = (VmaConfigData *)l->data;
2670+ l = g_list_next(l);
2671+ char *cfgfn = g_strdup_printf("%s/%s", dirname, cdata->name);
2672+ GError *err = NULL;
2673+ if (!g_file_set_contents(cfgfn, (gchar *)cdata->data, cdata->len,
2674+ &err)) {
2675+ g_error("unable to write file: %s", err->message);
2676+ }
2677+ }
2678+
2679+ GHashTable *devmap = g_hash_table_new(g_str_hash, g_str_equal);
2680+
2681+ if (readmap) {
2682+ print_content(vmar);
2683+
2684+ FILE *map = fopen(readmap, "r");
2685+ if (!map) {
2686+ g_error("unable to open fifo %s - %s", readmap, g_strerror(errno));
2687+ }
2688+
2689+ while (1) {
2690+ char inbuf[8192];
2691+ char *line = fgets(inbuf, sizeof(inbuf), map);
2692+ if (!line || line[0] == '\0' || !strcmp(line, "done\n")) {
2693+ break;
2694+ }
2695+ int len = strlen(line);
2696+ if (line[len - 1] == '\n') {
2697+ line[len - 1] = '\0';
2698+ if (len == 1) {
2699+ break;
2700+ }
2701+ }
2702+
67af0fa4
WB
2703+ char *format = NULL;
2704+ if (strncmp(line, "format=", sizeof("format=")-1) == 0) {
2705+ format = line + sizeof("format=")-1;
2706+ char *colon = strchr(format, ':');
2707+ if (!colon) {
2708+ g_error("read map failed - found only a format ('%s')", inbuf);
2709+ }
2710+ format = g_strndup(format, colon - format);
2711+ line = colon+1;
2712+ }
2713+
95259824
WB
2714+ const char *path;
2715+ bool write_zero;
2716+ if (line[0] == '0' && line[1] == ':') {
67af0fa4 2717+ path = line + 2;
95259824
WB
2718+ write_zero = false;
2719+ } else if (line[0] == '1' && line[1] == ':') {
67af0fa4 2720+ path = line + 2;
95259824
WB
2721+ write_zero = true;
2722+ } else {
2723+ g_error("read map failed - parse error ('%s')", inbuf);
2724+ }
2725+
2726+ char *devname = NULL;
2727+ path = extract_devname(path, &devname, -1);
2728+ if (!devname) {
2729+ g_error("read map failed - no dev name specified ('%s')",
2730+ inbuf);
2731+ }
2732+
2733+ RestoreMap *map = g_new0(RestoreMap, 1);
2734+ map->devname = g_strdup(devname);
2735+ map->path = g_strdup(path);
67af0fa4 2736+ map->format = format;
95259824
WB
2737+ map->write_zero = write_zero;
2738+
2739+ g_hash_table_insert(devmap, map->devname, map);
2740+
2741+ };
2742+ }
2743+
2744+ int i;
2745+ int vmstate_fd = -1;
2746+ guint8 vmstate_stream = 0;
2747+
67af0fa4
WB
2748+ BlockBackend *blk = NULL;
2749+
95259824
WB
2750+ for (i = 1; i < 255; i++) {
2751+ VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2752+ if (di && (strcmp(di->devname, "vmstate") == 0)) {
2753+ vmstate_stream = i;
2754+ char *statefn = g_strdup_printf("%s/vmstate.bin", dirname);
2755+ vmstate_fd = open(statefn, O_WRONLY|O_CREAT|O_EXCL, 0644);
2756+ if (vmstate_fd < 0) {
2757+ g_error("create vmstate file '%s' failed - %s", statefn,
2758+ g_strerror(errno));
2759+ }
2760+ g_free(statefn);
2761+ } else if (di) {
2762+ char *devfn = NULL;
67af0fa4
WB
2763+ const char *format = NULL;
2764+ int flags = BDRV_O_RDWR | BDRV_O_NO_FLUSH;
95259824
WB
2765+ bool write_zero = true;
2766+
2767+ if (readmap) {
2768+ RestoreMap *map;
2769+ map = (RestoreMap *)g_hash_table_lookup(devmap, di->devname);
2770+ if (map == NULL) {
2771+ g_error("no device name mapping for %s", di->devname);
2772+ }
2773+ devfn = map->path;
67af0fa4 2774+ format = map->format;
95259824
WB
2775+ write_zero = map->write_zero;
2776+ } else {
2777+ devfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2778+ dirname, di->devname);
2779+ printf("DEVINFO %s %zd\n", devfn, di->size);
2780+
2781+ bdrv_img_create(devfn, "raw", NULL, NULL, NULL, di->size,
2782+ flags, &errp, 0);
2783+ if (errp) {
2784+ g_error("can't create file %s: %s", devfn,
2785+ error_get_pretty(errp));
2786+ }
2787+
2788+ /* Note: we created an empty file above, so there is no
2789+ * need to write zeroes (so we generate a sparse file)
2790+ */
2791+ write_zero = false;
2792+ }
2793+
67af0fa4
WB
2794+ size_t devlen = strlen(devfn);
2795+ QDict *options = NULL;
2796+ if (format) {
2797+ /* explicit format from commandline */
2798+ options = qdict_new();
2799+ qdict_put(options, "driver", qstring_from_str(format));
2800+ } else if ((devlen > 4 && strcmp(devfn+devlen-4, ".raw") == 0) ||
2801+ strncmp(devfn, "/dev/", 5) == 0)
2802+ {
2803+ /* This part is now deprecated for PVE as well (just as qemu
2804+ * deprecated not specifying an explicit raw format, too.
2805+ */
2806+ /* explicit raw format */
2807+ options = qdict_new();
2808+ qdict_put(options, "driver", qstring_from_str("raw"));
2809+ }
2810+
2811+
2812+ if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) {
95259824
WB
2813+ g_error("can't open file %s - %s", devfn,
2814+ error_get_pretty(errp));
2815+ }
67af0fa4
WB
2816+
2817+ if (vma_reader_register_bs(vmar, i, blk, write_zero, &errp) < 0) {
95259824
WB
2818+ g_error("%s", error_get_pretty(errp));
2819+ }
2820+
2821+ if (!readmap) {
2822+ g_free(devfn);
2823+ }
2824+ }
2825+ }
2826+
2827+ if (vma_reader_restore(vmar, vmstate_fd, verbose, &errp) < 0) {
2828+ g_error("restore failed - %s", error_get_pretty(errp));
2829+ }
2830+
2831+ if (!readmap) {
2832+ for (i = 1; i < 255; i++) {
2833+ VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2834+ if (di && (i != vmstate_stream)) {
2835+ char *tmpfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2836+ dirname, di->devname);
2837+ char *fn = g_strdup_printf("%s/disk-%s.raw",
2838+ dirname, di->devname);
2839+ if (rename(tmpfn, fn) != 0) {
2840+ g_error("rename %s to %s failed - %s",
2841+ tmpfn, fn, g_strerror(errno));
2842+ }
2843+ }
2844+ }
2845+ }
2846+
2847+ vma_reader_destroy(vmar);
2848+
67af0fa4
WB
2849+ blk_unref(blk);
2850+
2851+ bdrv_close_all();
2852+
2853+ return ret;
2854+}
2855+
2856+static int verify_content(int argc, char **argv)
2857+{
2858+ int c, ret = 0;
2859+ int verbose = 0;
2860+ const char *filename;
2861+
2862+ for (;;) {
2863+ c = getopt(argc, argv, "hv");
2864+ if (c == -1) {
2865+ break;
2866+ }
2867+ switch (c) {
2868+ case '?':
2869+ case 'h':
2870+ help();
2871+ break;
2872+ case 'v':
2873+ verbose = 1;
2874+ break;
2875+ default:
2876+ help();
2877+ }
2878+ }
2879+
2880+ /* Get the filename */
2881+ if ((optind + 1) != argc) {
2882+ help();
2883+ }
2884+ filename = argv[optind++];
2885+
2886+ Error *errp = NULL;
2887+ VmaReader *vmar = vma_reader_create(filename, &errp);
2888+
2889+ if (!vmar) {
2890+ g_error("%s", error_get_pretty(errp));
2891+ }
2892+
2893+ if (verbose) {
2894+ print_content(vmar);
2895+ }
2896+
2897+ if (vma_reader_verify(vmar, verbose, &errp) < 0) {
2898+ g_error("verify failed - %s", error_get_pretty(errp));
2899+ }
2900+
2901+ vma_reader_destroy(vmar);
2902+
95259824
WB
2903+ bdrv_close_all();
2904+
2905+ return ret;
2906+}
2907+
2908+typedef struct BackupJob {
67af0fa4 2909+ BlockBackend *target;
95259824
WB
2910+ int64_t len;
2911+ VmaWriter *vmaw;
2912+ uint8_t dev_id;
2913+} BackupJob;
2914+
2915+#define BACKUP_SECTORS_PER_CLUSTER (VMA_CLUSTER_SIZE / BDRV_SECTOR_SIZE)
2916+
67af0fa4
WB
2917+static void coroutine_fn backup_run_empty(void *opaque)
2918+{
2919+ VmaWriter *vmaw = (VmaWriter *)opaque;
2920+
2921+ vma_writer_flush_output(vmaw);
2922+
2923+ Error *err = NULL;
2924+ if (vma_writer_close(vmaw, &err) != 0) {
2925+ g_warning("vma_writer_close failed %s", error_get_pretty(err));
2926+ }
2927+}
2928+
95259824
WB
2929+static void coroutine_fn backup_run(void *opaque)
2930+{
2931+ BackupJob *job = (BackupJob *)opaque;
2932+ struct iovec iov;
2933+ QEMUIOVector qiov;
2934+
2935+ int64_t start, end;
2936+ int ret = 0;
2937+
67af0fa4 2938+ unsigned char *buf = blk_blockalign(job->target, VMA_CLUSTER_SIZE);
95259824
WB
2939+
2940+ start = 0;
2941+ end = DIV_ROUND_UP(job->len / BDRV_SECTOR_SIZE,
2942+ BACKUP_SECTORS_PER_CLUSTER);
2943+
2944+ for (; start < end; start++) {
2945+ iov.iov_base = buf;
2946+ iov.iov_len = VMA_CLUSTER_SIZE;
2947+ qemu_iovec_init_external(&qiov, &iov, 1);
2948+
67af0fa4
WB
2949+ ret = blk_co_preadv(job->target, start * VMA_CLUSTER_SIZE,
2950+ VMA_CLUSTER_SIZE, &qiov, 0);
95259824
WB
2951+ if (ret < 0) {
2952+ vma_writer_set_error(job->vmaw, "read error", -1);
2953+ goto out;
2954+ }
2955+
2956+ size_t zb = 0;
2957+ if (vma_writer_write(job->vmaw, job->dev_id, start, buf, &zb) < 0) {
2958+ vma_writer_set_error(job->vmaw, "backup_dump_cb vma_writer_write failed", -1);
2959+ goto out;
2960+ }
2961+ }
2962+
2963+
2964+out:
2965+ if (vma_writer_close_stream(job->vmaw, job->dev_id) <= 0) {
2966+ Error *err = NULL;
2967+ if (vma_writer_close(job->vmaw, &err) != 0) {
2968+ g_warning("vma_writer_close failed %s", error_get_pretty(err));
2969+ }
2970+ }
2971+}
2972+
2973+static int create_archive(int argc, char **argv)
2974+{
2975+ int i, c;
2976+ int verbose = 0;
2977+ const char *archivename;
2978+ GList *config_files = NULL;
2979+
2980+ for (;;) {
2981+ c = getopt(argc, argv, "hvc:");
2982+ if (c == -1) {
2983+ break;
2984+ }
2985+ switch (c) {
2986+ case '?':
2987+ case 'h':
2988+ help();
2989+ break;
2990+ case 'c':
2991+ config_files = g_list_append(config_files, optarg);
2992+ break;
2993+ case 'v':
2994+ verbose = 1;
2995+ break;
2996+ default:
2997+ g_assert_not_reached();
2998+ }
2999+ }
3000+
3001+
67af0fa4
WB
3002+ /* make sure we an archive name */
3003+ if ((optind + 1) > argc) {
95259824
WB
3004+ help();
3005+ }
3006+
3007+ archivename = argv[optind++];
3008+
3009+ uuid_t uuid;
3010+ uuid_generate(uuid);
3011+
3012+ Error *local_err = NULL;
3013+ VmaWriter *vmaw = vma_writer_create(archivename, uuid, &local_err);
3014+
3015+ if (vmaw == NULL) {
3016+ g_error("%s", error_get_pretty(local_err));
3017+ }
3018+
3019+ GList *l = config_files;
3020+ while (l && l->data) {
3021+ char *name = l->data;
3022+ char *cdata = NULL;
3023+ gsize clen = 0;
3024+ GError *err = NULL;
3025+ if (!g_file_get_contents(name, &cdata, &clen, &err)) {
3026+ unlink(archivename);
3027+ g_error("Unable to read file: %s", err->message);
3028+ }
3029+
3030+ if (vma_writer_add_config(vmaw, name, cdata, clen) != 0) {
3031+ unlink(archivename);
3032+ g_error("Unable to append config data %s (len = %zd)",
3033+ name, clen);
3034+ }
3035+ l = g_list_next(l);
3036+ }
3037+
67af0fa4 3038+ int devcount = 0;
95259824
WB
3039+ while (optind < argc) {
3040+ const char *path = argv[optind++];
3041+ char *devname = NULL;
67af0fa4 3042+ path = extract_devname(path, &devname, devcount++);
95259824
WB
3043+
3044+ Error *errp = NULL;
67af0fa4 3045+ BlockBackend *target;
95259824 3046+
67af0fa4
WB
3047+ target = blk_new_open(path, NULL, NULL, 0, &errp);
3048+ if (!target) {
95259824
WB
3049+ unlink(archivename);
3050+ g_error("bdrv_open '%s' failed - %s", path, error_get_pretty(errp));
3051+ }
67af0fa4 3052+ int64_t size = blk_getlength(target);
95259824
WB
3053+ int dev_id = vma_writer_register_stream(vmaw, devname, size);
3054+ if (dev_id <= 0) {
3055+ unlink(archivename);
3056+ g_error("vma_writer_register_stream '%s' failed", devname);
3057+ }
3058+
3059+ BackupJob *job = g_new0(BackupJob, 1);
3060+ job->len = size;
67af0fa4 3061+ job->target = target;
95259824
WB
3062+ job->vmaw = vmaw;
3063+ job->dev_id = dev_id;
3064+
3065+ Coroutine *co = qemu_coroutine_create(backup_run, job);
3066+ qemu_coroutine_enter(co);
3067+ }
3068+
3069+ VmaStatus vmastat;
3070+ int percent = 0;
3071+ int last_percent = -1;
3072+
67af0fa4
WB
3073+ if (devcount) {
3074+ while (1) {
3075+ main_loop_wait(false);
3076+ vma_writer_get_status(vmaw, &vmastat);
95259824 3077+
67af0fa4 3078+ if (verbose) {
95259824 3079+
67af0fa4
WB
3080+ uint64_t total = 0;
3081+ uint64_t transferred = 0;
3082+ uint64_t zero_bytes = 0;
95259824 3083+
67af0fa4
WB
3084+ int i;
3085+ for (i = 0; i < 256; i++) {
3086+ if (vmastat.stream_info[i].size) {
3087+ total += vmastat.stream_info[i].size;
3088+ transferred += vmastat.stream_info[i].transferred;
3089+ zero_bytes += vmastat.stream_info[i].zero_bytes;
3090+ }
95259824 3091+ }
67af0fa4
WB
3092+ percent = (transferred*100)/total;
3093+ if (percent != last_percent) {
3094+ fprintf(stderr, "progress %d%% %zd/%zd %zd\n", percent,
3095+ transferred, total, zero_bytes);
3096+ fflush(stderr);
95259824 3097+
67af0fa4
WB
3098+ last_percent = percent;
3099+ }
95259824 3100+ }
95259824 3101+
67af0fa4
WB
3102+ if (vmastat.closed) {
3103+ break;
3104+ }
95259824
WB
3105+ }
3106+ } else {
3107+ Coroutine *co = qemu_coroutine_create(backup_run_empty, vmaw);
3108+ qemu_coroutine_enter(co);
3109+ while (1) {
3110+ main_loop_wait(false);
3111+ vma_writer_get_status(vmaw, &vmastat);
3112+ if (vmastat.closed) {
3113+ break;
3114+ }
3115+ }
3116+ }
3117+
3118+ bdrv_drain_all();
3119+
3120+ vma_writer_get_status(vmaw, &vmastat);
3121+
3122+ if (verbose) {
3123+ for (i = 0; i < 256; i++) {
3124+ VmaStreamInfo *si = &vmastat.stream_info[i];
3125+ if (si->size) {
3126+ fprintf(stderr, "image %s: size=%zd zeros=%zd saved=%zd\n",
3127+ si->devname, si->size, si->zero_bytes,
3128+ si->size - si->zero_bytes);
3129+ }
3130+ }
3131+ }
3132+
3133+ if (vmastat.status < 0) {
3134+ unlink(archivename);
3135+ g_error("creating vma archive failed");
3136+ }
3137+
3138+ return 0;
3139+}
3140+
67af0fa4
WB
3141+static int dump_config(int argc, char **argv)
3142+{
3143+ int c, ret = 0;
3144+ const char *filename;
3145+ const char *config_name = "qemu-server.conf";
3146+
3147+ for (;;) {
3148+ c = getopt(argc, argv, "hc:");
3149+ if (c == -1) {
3150+ break;
3151+ }
3152+ switch (c) {
3153+ case '?':
3154+ case 'h':
3155+ help();
3156+ break;
3157+ case 'c':
3158+ config_name = optarg;
3159+ break;
3160+ default:
3161+ help();
3162+ }
3163+ }
3164+
3165+ /* Get the filename */
3166+ if ((optind + 1) != argc) {
3167+ help();
3168+ }
3169+ filename = argv[optind++];
3170+
3171+ Error *errp = NULL;
3172+ VmaReader *vmar = vma_reader_create(filename, &errp);
3173+
3174+ if (!vmar) {
3175+ g_error("%s", error_get_pretty(errp));
3176+ }
3177+
3178+ int found = 0;
3179+ GList *l = vma_reader_get_config_data(vmar);
3180+ while (l && l->data) {
3181+ VmaConfigData *cdata = (VmaConfigData *)l->data;
3182+ l = g_list_next(l);
3183+ if (strcmp(cdata->name, config_name) == 0) {
3184+ found = 1;
3185+ fwrite(cdata->data, cdata->len, 1, stdout);
3186+ break;
3187+ }
3188+ }
3189+
3190+ vma_reader_destroy(vmar);
3191+
3192+ bdrv_close_all();
3193+
3194+ if (!found) {
3195+ fprintf(stderr, "unable to find configuration data '%s'\n", config_name);
3196+ return -1;
3197+ }
3198+
3199+ return ret;
3200+}
3201+
95259824
WB
3202+int main(int argc, char **argv)
3203+{
3204+ const char *cmdname;
3205+ Error *main_loop_err = NULL;
3206+
3207+ error_set_progname(argv[0]);
3208+
3209+ if (qemu_init_main_loop(&main_loop_err)) {
3210+ g_error("%s", error_get_pretty(main_loop_err));
3211+ }
3212+
3213+ bdrv_init();
3214+
3215+ if (argc < 2) {
3216+ help();
3217+ }
3218+
3219+ cmdname = argv[1];
3220+ argc--; argv++;
3221+
3222+
3223+ if (!strcmp(cmdname, "list")) {
3224+ return list_content(argc, argv);
3225+ } else if (!strcmp(cmdname, "create")) {
3226+ return create_archive(argc, argv);
3227+ } else if (!strcmp(cmdname, "extract")) {
3228+ return extract_content(argc, argv);
67af0fa4
WB
3229+ } else if (!strcmp(cmdname, "verify")) {
3230+ return verify_content(argc, argv);
3231+ } else if (!strcmp(cmdname, "config")) {
3232+ return dump_config(argc, argv);
95259824
WB
3233+ }
3234+
3235+ help();
3236+ return 0;
3237+}
3238diff --git a/vma.h b/vma.h
3239new file mode 100644
67af0fa4 3240index 0000000000..fa6f4df7e7
95259824
WB
3241--- /dev/null
3242+++ b/vma.h
67af0fa4 3243@@ -0,0 +1,149 @@
95259824
WB
3244+/*
3245+ * VMA: Virtual Machine Archive
3246+ *
3247+ * Copyright (C) Proxmox Server Solutions
3248+ *
3249+ * Authors:
3250+ * Dietmar Maurer (dietmar@proxmox.com)
3251+ *
3252+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
3253+ * See the COPYING file in the top-level directory.
3254+ *
3255+ */
3256+
3257+#ifndef BACKUP_VMA_H
3258+#define BACKUP_VMA_H
3259+
3260+#include <uuid/uuid.h>
3261+#include "qapi/error.h"
3262+#include "block/block.h"
3263+
3264+#define VMA_BLOCK_BITS 12
3265+#define VMA_BLOCK_SIZE (1<<VMA_BLOCK_BITS)
3266+#define VMA_CLUSTER_BITS (VMA_BLOCK_BITS+4)
3267+#define VMA_CLUSTER_SIZE (1<<VMA_CLUSTER_BITS)
3268+
3269+#if VMA_CLUSTER_SIZE != 65536
3270+#error unexpected cluster size
3271+#endif
3272+
3273+#define VMA_EXTENT_HEADER_SIZE 512
3274+#define VMA_BLOCKS_PER_EXTENT 59
3275+#define VMA_MAX_CONFIGS 256
3276+
3277+#define VMA_MAX_EXTENT_SIZE \
3278+ (VMA_EXTENT_HEADER_SIZE+VMA_CLUSTER_SIZE*VMA_BLOCKS_PER_EXTENT)
3279+#if VMA_MAX_EXTENT_SIZE != 3867136
3280+#error unexpected VMA_EXTENT_SIZE
3281+#endif
3282+
3283+/* File Format Definitions */
3284+
3285+#define VMA_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|0x00))
3286+#define VMA_EXTENT_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|'E'))
3287+
3288+typedef struct VmaDeviceInfoHeader {
3289+ uint32_t devname_ptr; /* offset into blob_buffer table */
3290+ uint32_t reserved0;
3291+ uint64_t size; /* device size in bytes */
3292+ uint64_t reserved1;
3293+ uint64_t reserved2;
3294+} VmaDeviceInfoHeader;
3295+
3296+typedef struct VmaHeader {
3297+ uint32_t magic;
3298+ uint32_t version;
3299+ unsigned char uuid[16];
3300+ int64_t ctime;
3301+ unsigned char md5sum[16];
3302+
3303+ uint32_t blob_buffer_offset;
3304+ uint32_t blob_buffer_size;
3305+ uint32_t header_size;
3306+
3307+ unsigned char reserved[1984];
3308+
3309+ uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3310+ uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3311+
3312+ uint32_t reserved1;
3313+
3314+ VmaDeviceInfoHeader dev_info[256];
3315+} VmaHeader;
3316+
3317+typedef struct VmaExtentHeader {
3318+ uint32_t magic;
3319+ uint16_t reserved1;
3320+ uint16_t block_count;
3321+ unsigned char uuid[16];
3322+ unsigned char md5sum[16];
3323+ uint64_t blockinfo[VMA_BLOCKS_PER_EXTENT];
3324+} VmaExtentHeader;
3325+
3326+/* functions/definitions to read/write vma files */
3327+
3328+typedef struct VmaReader VmaReader;
3329+
3330+typedef struct VmaWriter VmaWriter;
3331+
3332+typedef struct VmaConfigData {
3333+ const char *name;
3334+ const void *data;
3335+ uint32_t len;
3336+} VmaConfigData;
3337+
3338+typedef struct VmaStreamInfo {
3339+ uint64_t size;
3340+ uint64_t cluster_count;
3341+ uint64_t transferred;
3342+ uint64_t zero_bytes;
3343+ int finished;
3344+ char *devname;
3345+} VmaStreamInfo;
3346+
3347+typedef struct VmaStatus {
3348+ int status;
3349+ bool closed;
3350+ char errmsg[8192];
3351+ char uuid_str[37];
3352+ VmaStreamInfo stream_info[256];
3353+} VmaStatus;
3354+
3355+typedef struct VmaDeviceInfo {
3356+ uint64_t size; /* device size in bytes */
3357+ const char *devname;
3358+} VmaDeviceInfo;
3359+
3360+VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp);
3361+int vma_writer_close(VmaWriter *vmaw, Error **errp);
67af0fa4 3362+void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp);
95259824
WB
3363+void vma_writer_destroy(VmaWriter *vmaw);
3364+int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
3365+ size_t len);
3366+int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
3367+ size_t size);
3368+
3369+int64_t coroutine_fn vma_writer_write(VmaWriter *vmaw, uint8_t dev_id,
3370+ int64_t cluster_num, unsigned char *buf,
3371+ size_t *zero_bytes);
3372+
3373+int coroutine_fn vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id);
67af0fa4 3374+int coroutine_fn vma_writer_flush_output(VmaWriter *vmaw);
95259824
WB
3375+
3376+int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status);
3377+void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...);
3378+
3379+
3380+VmaReader *vma_reader_create(const char *filename, Error **errp);
3381+void vma_reader_destroy(VmaReader *vmar);
3382+VmaHeader *vma_reader_get_header(VmaReader *vmar);
3383+GList *vma_reader_get_config_data(VmaReader *vmar);
3384+VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id);
3385+int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id,
67af0fa4 3386+ BlockBackend *target, bool write_zeroes,
95259824
WB
3387+ Error **errp);
3388+int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
3389+ Error **errp);
67af0fa4 3390+int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp);
95259824
WB
3391+
3392+#endif /* BACKUP_VMA_H */
3393--
45169293 33942.11.0
95259824 3395