]> git.proxmox.com Git - pve-qemu.git/blame - debian/patches/pve/0028-adding-old-vma-files.patch
bump version to 2.9.1-9
[pve-qemu.git] / debian / patches / pve / 0028-adding-old-vma-files.patch
CommitLineData
23102ed6 1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
67af0fa4
WB
2From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3Date: Mon, 7 Aug 2017 08:51:16 +0200
23102ed6 4Subject: [PATCH] adding old vma files
95259824 5
95259824 6---
67af0fa4
WB
7 Makefile | 3 +-
8 Makefile.objs | 1 +
507c2194 9 block/backup.c | 132 ++++---
67af0fa4 10 block/replication.c | 1 +
23102ed6 11 blockdev.c | 249 +++++++++-----
67af0fa4
WB
12 blockjob.c | 11 +-
13 include/block/block_int.h | 4 +
14 vma-reader.c | 857 ++++++++++++++++++++++++++++++++++++++++++++++
15 vma-writer.c | 771 +++++++++++++++++++++++++++++++++++++++++
16 vma.c | 757 ++++++++++++++++++++++++++++++++++++++++
17 vma.h | 149 ++++++++
23102ed6 18 11 files changed, 2802 insertions(+), 133 deletions(-)
95259824
WB
19 create mode 100644 vma-reader.c
20 create mode 100644 vma-writer.c
21 create mode 100644 vma.c
22 create mode 100644 vma.h
23
24diff --git a/Makefile b/Makefile
45169293 25index 6c359b2f86..edbc8b50f0 100644
95259824
WB
26--- a/Makefile
27+++ b/Makefile
a544966d 28@@ -284,7 +284,7 @@ ifneq ($(wildcard config-host.mak),)
95259824
WB
29 include $(SRC_PATH)/tests/Makefile.include
30 endif
31
32-all: $(DOCS) $(TOOLS) $(HELPERS-y) recurse-all modules
33+all: $(DOCS) $(TOOLS) vma$(EXESUF) $(HELPERS-y) recurse-all modules
34
35 qemu-version.h: FORCE
36 $(call quiet-command, \
a544966d
WB
37@@ -377,6 +377,7 @@ qemu-img.o: qemu-img-cmds.h
38 qemu-img$(EXESUF): qemu-img.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
39 qemu-nbd$(EXESUF): qemu-nbd.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
40 qemu-io$(EXESUF): qemu-io.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
41+vma$(EXESUF): vma.o vma-reader.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
95259824 42
a544966d 43 qemu-bridge-helper$(EXESUF): qemu-bridge-helper.o $(COMMON_LDADDS)
95259824
WB
44
45diff --git a/Makefile.objs b/Makefile.objs
67af0fa4 46index fbfbbb7f70..f5f8dbab3b 100644
95259824
WB
47--- a/Makefile.objs
48+++ b/Makefile.objs
a544966d 49@@ -14,6 +14,7 @@ block-obj-y += block.o blockjob.o
95259824
WB
50 block-obj-y += block/
51 block-obj-y += qemu-io-cmds.o
a544966d 52 block-obj-$(CONFIG_REPLICATION) += replication.o
95259824
WB
53+block-obj-y += vma-writer.o
54
55 block-obj-m = block/
56
67af0fa4 57diff --git a/block/backup.c b/block/backup.c
507c2194 58index 1ede70c061..7c5febc434 100644
67af0fa4
WB
59--- a/block/backup.c
60+++ b/block/backup.c
61@@ -36,6 +36,7 @@ typedef struct BackupBlockJob {
62 BdrvDirtyBitmap *sync_bitmap;
63 MirrorSyncMode sync_mode;
64 RateLimit limit;
65+ BackupDumpFunc *dump_cb;
66 BlockdevOnError on_source_error;
67 BlockdevOnError on_target_error;
68 CoRwlock flush_rwlock;
69@@ -145,13 +146,24 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
70 goto out;
71 }
72
73+ int64_t start_sec = start * sectors_per_cluster;
74 if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
75- ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
76- bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
77+ if (job->dump_cb) {
78+ ret = job->dump_cb(job->common.opaque, job->target, start_sec, n, NULL);
79+ }
80+ if (job->target) {
81+ ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
82+ bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
83+ }
84 } else {
85- ret = blk_co_pwritev(job->target, start * job->cluster_size,
86- bounce_qiov.size, &bounce_qiov,
87- job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
88+ if (job->dump_cb) {
89+ ret = job->dump_cb(job->common.opaque, job->target, start_sec, n, bounce_buffer);
90+ }
91+ if (job->target) {
92+ ret = blk_co_pwritev(job->target, start * job->cluster_size,
93+ bounce_qiov.size, &bounce_qiov,
94+ job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
95+ }
96 }
97 if (ret < 0) {
98 trace_backup_do_cow_write_fail(job, start, ret);
507c2194 99@@ -246,6 +258,9 @@ static void backup_abort(BlockJob *job)
67af0fa4
WB
100 static void backup_clean(BlockJob *job)
101 {
102 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
507c2194 103+ if (!s->target) {
67af0fa4 104+ return;
507c2194 105+ }
67af0fa4
WB
106 assert(s->target);
107 blk_unref(s->target);
108 s->target = NULL;
507c2194 109@@ -255,7 +270,9 @@ static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
02709230
FG
110 {
111 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
112
113- blk_set_aio_context(s->target, aio_context);
507c2194 114+ if (s->target) {
02709230 115+ blk_set_aio_context(s->target, aio_context);
507c2194 116+ }
02709230
FG
117 }
118
119 void backup_do_checkpoint(BlockJob *job, Error **errp)
507c2194 120@@ -330,9 +347,11 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
67af0fa4
WB
121 if (read) {
122 return block_job_error_action(&job->common, job->on_source_error,
123 true, error);
124- } else {
125+ } else if (job->target) {
126 return block_job_error_action(&job->common, job->on_target_error,
127 false, error);
128+ } else {
129+ return BLOCK_ERROR_ACTION_REPORT;
130 }
131 }
132
507c2194 133@@ -557,6 +576,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
134 BlockdevOnError on_source_error,
135 BlockdevOnError on_target_error,
136 int creation_flags,
137+ BackupDumpFunc *dump_cb,
138 BlockCompletionFunc *cb, void *opaque,
139 int pause_count,
140 BlockJobTxn *txn, Error **errp)
507c2194 141@@ -567,7 +587,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
142 int ret;
143
144 assert(bs);
145- assert(target);
146+ assert(target || dump_cb);
147
148 if (bs == target) {
149 error_setg(errp, "Source and target cannot be the same");
507c2194 150@@ -580,13 +600,13 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
151 return NULL;
152 }
153
154- if (!bdrv_is_inserted(target)) {
155+ if (target && !bdrv_is_inserted(target)) {
156 error_setg(errp, "Device is not inserted: %s",
157 bdrv_get_device_name(target));
158 return NULL;
159 }
160
161- if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
162+ if (target && compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
163 error_setg(errp, "Compression is not supported for this drive %s",
164 bdrv_get_device_name(target));
165 return NULL;
507c2194 166@@ -596,7 +616,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
167 return NULL;
168 }
169
170- if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
171+ if (target && bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
172 return NULL;
173 }
174
507c2194 175@@ -636,15 +656,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
176 goto error;
177 }
178
179- /* The target must match the source in size, so no resize here either */
180- job->target = blk_new(BLK_PERM_WRITE,
181- BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
182- BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
183- ret = blk_insert_bs(job->target, target, errp);
184- if (ret < 0) {
185- goto error;
186+ if (target) {
187+ /* The target must match the source in size, so no resize here either */
188+ job->target = blk_new(BLK_PERM_WRITE,
189+ BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
190+ BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
191+ ret = blk_insert_bs(job->target, target, errp);
192+ if (ret < 0) {
193+ goto error;
194+ }
195 }
196
197+ job->dump_cb = dump_cb;
198 job->on_source_error = on_source_error;
199 job->on_target_error = on_target_error;
200 job->sync_mode = sync_mode;
507c2194 201@@ -652,38 +675,55 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
202 sync_bitmap : NULL;
203 job->compress = compress;
204
205- /* If there is no backing file on the target, we cannot rely on COW if our
206- * backup cluster size is smaller than the target cluster size. Even for
207- * targets with a backing file, try to avoid COW if possible. */
208- ret = bdrv_get_info(target, &bdi);
209- if (ret == -ENOTSUP && !target->backing) {
210- /* Cluster size is not defined */
211- error_report("WARNING: The target block device doesn't provide "
212- "information about the block size and it doesn't have a "
213- "backing file. The default block size of %u bytes is "
214- "used. If the actual block size of the target exceeds "
215- "this default, the backup may be unusable",
216- BACKUP_CLUSTER_SIZE_DEFAULT);
217- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
218- } else if (ret < 0 && !target->backing) {
219- error_setg_errno(errp, -ret,
220- "Couldn't determine the cluster size of the target image, "
221- "which has no backing file");
222- error_append_hint(errp,
223- "Aborting, since this may create an unusable destination image\n");
224- goto error;
225- } else if (ret < 0 && target->backing) {
226- /* Not fatal; just trudge on ahead. */
227- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
228+ if (target) {
229+ /* If there is no backing file on the target, we cannot rely on COW if our
230+ * backup cluster size is smaller than the target cluster size. Even for
231+ * targets with a backing file, try to avoid COW if possible. */
232+ ret = bdrv_get_info(target, &bdi);
233+ if (ret == -ENOTSUP && !target->backing) {
234+ /* Cluster size is not defined */
235+ error_report("WARNING: The target block device doesn't provide "
236+ "information about the block size and it doesn't have a "
237+ "backing file. The default block size of %u bytes is "
238+ "used. If the actual block size of the target exceeds "
239+ "this default, the backup may be unusable",
240+ BACKUP_CLUSTER_SIZE_DEFAULT);
241+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
242+ } else if (ret < 0 && !target->backing) {
243+ error_setg_errno(errp, -ret,
244+ "Couldn't determine the cluster size of the target image, "
245+ "which has no backing file");
246+ error_append_hint(errp,
247+ "Aborting, since this may create an unusable destination image\n");
248+ goto error;
249+ } else if (ret < 0 && target->backing) {
250+ /* Not fatal; just trudge on ahead. */
251+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
252+ } else {
253+ job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
254+ }
255 } else {
256- job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
257+ ret = bdrv_get_info(bs, &bdi);
258+ if (ret < 0) {
259+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
260+ } else {
261+ /* round down to nearest BACKUP_CLUSTER_SIZE_DEFAULT */
262+ job->cluster_size = (bdi.cluster_size / BACKUP_CLUSTER_SIZE_DEFAULT) * BACKUP_CLUSTER_SIZE_DEFAULT;
263+ if (job->cluster_size == 0) {
264+ /* but we can't go below it */
265+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
266+ }
267+ }
268 }
269
270- /* Required permissions are already taken with target's blk_new() */
271- block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
272- &error_abort);
273+ if (target) {
274+ /* Required permissions are already taken with target's blk_new() */
275+ block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
276+ &error_abort);
277+ } else {
278+ job->common.pause_count = pause_count;
279+ }
280 job->common.len = len;
281- job->common.pause_count = pause_count;
282 block_job_txn_add_job(txn, &job->common);
283
284 return &job->common;
285diff --git a/block/replication.c b/block/replication.c
286index 1c41d9e6bf..60c6524417 100644
287--- a/block/replication.c
288+++ b/block/replication.c
289@@ -531,6 +531,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
290 0, MIRROR_SYNC_MODE_NONE, NULL, false,
291 BLOCKDEV_ON_ERROR_REPORT,
292 BLOCKDEV_ON_ERROR_REPORT, BLOCK_JOB_INTERNAL,
293+ NULL,
294 backup_job_completed, bs, 0, NULL, &local_err);
295 if (local_err) {
296 error_propagate(errp, local_err);
297diff --git a/blockdev.c b/blockdev.c
23102ed6 298index 534c00f5da..19a82e8774 100644
67af0fa4
WB
299--- a/blockdev.c
300+++ b/blockdev.c
301@@ -31,7 +31,6 @@
302 */
303
304 #include "qemu/osdep.h"
305-#include "qemu/uuid.h"
306 #include "sysemu/block-backend.h"
307 #include "sysemu/blockdev.h"
308 #include "hw/block/block.h"
309@@ -55,6 +54,7 @@
310 #include "qemu/cutils.h"
311 #include "qemu/help_option.h"
312 #include "qemu/throttle-options.h"
313+#include "vma.h"
314
315 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
316 QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
ddbcf45e 317@@ -2934,20 +2934,44 @@ out:
67af0fa4
WB
318 aio_context_release(aio_context);
319 }
320
321+void block_job_event_cancelled(BlockJob *job);
322+void block_job_event_completed(BlockJob *job, const char *msg);
323+static void block_job_cb(void *opaque, int ret)
324+{
325+ /* Note that this function may be executed from another AioContext besides
326+ * the QEMU main loop. If you need to access anything that assumes the
327+ * QEMU global mutex, use a BH or introduce a mutex.
328+ */
329+
330+ BlockDriverState *bs = opaque;
331+ const char *msg = NULL;
332+
333+ assert(bs->job);
334+
335+ if (ret < 0) {
336+ msg = strerror(-ret);
337+ }
338+
339+ if (block_job_is_cancelled(bs->job)) {
340+ block_job_event_cancelled(bs->job);
341+ } else {
342+ block_job_event_completed(bs->job, msg);
343+ }
344+}
345+
346 /* PVE backup related function */
347
348 static struct PVEBackupState {
349 Error *error;
350 bool cancel;
351- QemuUUID uuid;
352+ uuid_t uuid;
353 char uuid_str[37];
354 int64_t speed;
355 time_t start_time;
356 time_t end_time;
357 char *backup_file;
358- Object *vmaobj;
359+ VmaWriter *vmaw;
360 GList *di_list;
361- size_t next_job;
362 size_t total;
363 size_t transferred;
364 size_t zero_bytes;
ddbcf45e 365@@ -2957,6 +2981,7 @@ typedef struct PVEBackupDevInfo {
67af0fa4
WB
366 BlockDriverState *bs;
367 size_t size;
368 uint8_t dev_id;
369+ //bool started;
370 bool completed;
371 char targetfile[PATH_MAX];
372 BlockDriverState *target;
ddbcf45e 373@@ -2964,13 +2989,79 @@ typedef struct PVEBackupDevInfo {
67af0fa4
WB
374
375 static void pvebackup_run_next_job(void);
376
377+static int pvebackup_dump_cb(void *opaque, BlockBackend *target,
378+ int64_t sector_num, int n_sectors,
379+ unsigned char *buf)
380+{
381+ PVEBackupDevInfo *di = opaque;
382+
383+ int size = n_sectors * BDRV_SECTOR_SIZE;
384+ if (backup_state.cancel) {
385+ return size; // return success
386+ }
387+
388+ if (sector_num & 0x7f) {
389+ if (!backup_state.error) {
390+ error_setg(&backup_state.error,
391+ "got unaligned write inside backup dump "
392+ "callback (sector %ld)", sector_num);
393+ }
394+ return -1; // not aligned to cluster size
395+ }
396+
397+ int64_t cluster_num = sector_num >> 7;
398+
399+ int ret = -1;
400+
401+ if (backup_state.vmaw) {
402+ size_t zero_bytes = 0;
2ab9b48e
WB
403+ int64_t remaining = n_sectors * BDRV_SECTOR_SIZE;
404+ while (remaining > 0) {
67af0fa4
WB
405+ ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num,
406+ buf, &zero_bytes);
67af0fa4
WB
407+ ++cluster_num;
408+ if (buf) {
409+ buf += VMA_CLUSTER_SIZE;
410+ }
411+ if (ret < 0) {
412+ if (!backup_state.error) {
413+ vma_writer_error_propagate(backup_state.vmaw, &backup_state.error);
414+ }
415+ if (di->bs && di->bs->job) {
416+ block_job_cancel(di->bs->job);
417+ }
2ab9b48e 418+ break;
67af0fa4
WB
419+ } else {
420+ backup_state.zero_bytes += zero_bytes;
2ab9b48e
WB
421+ if (remaining >= VMA_CLUSTER_SIZE) {
422+ backup_state.transferred += VMA_CLUSTER_SIZE;
423+ } else {
424+ backup_state.transferred += remaining;
425+ }
426+ remaining -= VMA_CLUSTER_SIZE;
67af0fa4
WB
427+ }
428+ }
429+ } else {
430+ if (!buf) {
431+ backup_state.zero_bytes += size;
432+ }
433+ backup_state.transferred += size;
434+ }
435+
436+ // Note: always return success, because we want that writes succeed anyways.
437+
438+ return size;
439+}
440+
441 static void pvebackup_cleanup(void)
442 {
443 backup_state.end_time = time(NULL);
444
445- if (backup_state.vmaobj) {
446- object_unparent(backup_state.vmaobj);
447- backup_state.vmaobj = NULL;
448+ if (backup_state.vmaw) {
449+ Error *local_err = NULL;
450+ vma_writer_close(backup_state.vmaw, &local_err);
451+ error_propagate(&backup_state.error, local_err);
452+ backup_state.vmaw = NULL;
453 }
454
455 if (backup_state.di_list) {
ddbcf45e 456@@ -2985,6 +3076,13 @@ static void pvebackup_cleanup(void)
67af0fa4
WB
457 }
458 }
459
460+static void coroutine_fn backup_close_vma_stream(void *opaque)
461+{
462+ PVEBackupDevInfo *di = opaque;
463+
464+ vma_writer_close_stream(backup_state.vmaw, di->dev_id);
465+}
466+
467 static void pvebackup_complete_cb(void *opaque, int ret)
468 {
469 PVEBackupDevInfo *di = opaque;
ddbcf45e 470@@ -2996,14 +3094,18 @@ static void pvebackup_complete_cb(void *opaque, int ret)
67af0fa4
WB
471 ret, strerror(-ret));
472 }
473
474+ BlockDriverState *bs = di->bs;
475+
476 di->bs = NULL;
477 di->target = NULL;
478
479- if (backup_state.vmaobj) {
480- object_unparent(backup_state.vmaobj);
481- backup_state.vmaobj = NULL;
482+ if (backup_state.vmaw) {
483+ Coroutine *co = qemu_coroutine_create(backup_close_vma_stream, di);
484+ qemu_coroutine_enter(co);
485 }
486
487+ block_job_cb(bs, ret);
488+
489 if (!backup_state.cancel) {
490 pvebackup_run_next_job();
491 }
ddbcf45e 492@@ -3017,14 +3119,9 @@ static void pvebackup_cancel(void *opaque)
67af0fa4
WB
493 error_setg(&backup_state.error, "backup cancelled");
494 }
495
496- if (backup_state.vmaobj) {
497- Error *err;
498+ if (backup_state.vmaw) {
499 /* make sure vma writer does not block anymore */
500- if (!object_set_props(backup_state.vmaobj, &err, "blocked", "yes", NULL)) {
501- if (err) {
502- error_report_err(err);
503- }
504- }
505+ vma_writer_set_error(backup_state.vmaw, "backup cancelled");
506 }
507
508 GList *l = backup_state.di_list;
ddbcf45e 509@@ -3049,19 +3146,15 @@ void qmp_backup_cancel(Error **errp)
67af0fa4
WB
510 Coroutine *co = qemu_coroutine_create(pvebackup_cancel, NULL);
511 qemu_coroutine_enter(co);
512
513- while (backup_state.vmaobj) {
514- /* FIXME: Find something better for this */
515+ while (backup_state.vmaw) {
516+ /* vma writer use main aio context */
517 aio_poll(qemu_get_aio_context(), true);
518 }
519 }
520
521-void vma_object_add_config_file(Object *obj, const char *name,
522- const char *contents, size_t len,
523- Error **errp);
524 static int config_to_vma(const char *file, BackupFormat format,
525- Object *vmaobj,
526- const char *backup_dir,
527- Error **errp)
528+ const char *backup_dir, VmaWriter *vmaw,
529+ Error **errp)
530 {
531 char *cdata = NULL;
532 gsize clen = 0;
ddbcf45e 533@@ -3074,12 +3167,17 @@ static int config_to_vma(const char *file, BackupFormat format,
67af0fa4
WB
534 char *basename = g_path_get_basename(file);
535
536 if (format == BACKUP_FORMAT_VMA) {
537- vma_object_add_config_file(vmaobj, basename, cdata, clen, errp);
538+ if (vma_writer_add_config(vmaw, basename, cdata, clen) != 0) {
539+ error_setg(errp, "unable to add %s config data to vma archive", file);
540+ g_free(cdata);
541+ g_free(basename);
542+ return 1;
543+ }
544 } else if (format == BACKUP_FORMAT_DIR) {
545 char config_path[PATH_MAX];
546 snprintf(config_path, PATH_MAX, "%s/%s", backup_dir, basename);
547 if (!g_file_set_contents(config_path, cdata, clen, &err)) {
548- error_setg(errp, "unable to write config file '%s'", config_path);
549+ error_setg(errp, "unable to write config file '%s'", config_path);
550 g_free(cdata);
551 g_free(basename);
552 return 1;
23102ed6 553@@ -3089,34 +3187,37 @@ static int config_to_vma(const char *file, BackupFormat format,
67af0fa4
WB
554 g_free(basename);
555 g_free(cdata);
556
557- return 0;
558+ return 0;
559 }
560
561+bool block_job_should_pause(BlockJob *job);
562 static void pvebackup_run_next_job(void)
563 {
564- bool cancel = backup_state.error || backup_state.cancel;
67af0fa4
WB
565- GList *next = g_list_nth(backup_state.di_list, backup_state.next_job);
566- while (next) {
567- PVEBackupDevInfo *di = (PVEBackupDevInfo *)next->data;
568- backup_state.next_job++;
569+ GList *l = backup_state.di_list;
570+ while (l) {
571+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
572+ l = g_list_next(l);
573 if (!di->completed && di->bs && di->bs->job) {
574 BlockJob *job = di->bs->job;
575- if (cancel) {
576- block_job_cancel(job);
577- } else {
578- block_job_resume(job);
579+ if (block_job_should_pause(job)) {
580+ bool cancel = backup_state.error || backup_state.cancel;
581+ if (cancel) {
582+ block_job_cancel(job);
583+ } else {
584+ block_job_resume(job);
585+ }
586 }
587 return;
588 }
589- next = g_list_next(next);
590 }
591+
592 pvebackup_cleanup();
593 }
594
595 UuidInfo *qmp_backup(const char *backup_file, bool has_format,
596 BackupFormat format,
597 bool has_config_file, const char *config_file,
598- bool has_firewall_file, const char *firewall_file,
599+ bool has_firewall_file, const char *firewall_file,
600 bool has_devlist, const char *devlist,
601 bool has_speed, int64_t speed, Error **errp)
602 {
23102ed6 603@@ -3124,14 +3225,15 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
604 BlockDriverState *bs = NULL;
605 const char *backup_dir = NULL;
606 Error *local_err = NULL;
607- QemuUUID uuid;
608+ uuid_t uuid;
609+ VmaWriter *vmaw = NULL;
610 gchar **devs = NULL;
611 GList *di_list = NULL;
612 GList *l;
613 UuidInfo *uuid_info;
614 BlockJob *job;
615
616- if (backup_state.di_list || backup_state.vmaobj) {
617+ if (backup_state.di_list) {
618 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
619 "previous backup not finished");
620 return NULL;
23102ed6 621@@ -3206,40 +3308,28 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
622 total += size;
623 }
624
625- qemu_uuid_generate(&uuid);
626+ uuid_generate(uuid);
627
628 if (format == BACKUP_FORMAT_VMA) {
629- char uuidstr[UUID_FMT_LEN+1];
630- qemu_uuid_unparse(&uuid, uuidstr);
631- uuidstr[UUID_FMT_LEN] = 0;
632- backup_state.vmaobj =
633- object_new_with_props("vma", object_get_objects_root(),
634- "vma-backup-obj", &local_err,
635- "filename", backup_file,
636- "uuid", uuidstr,
637- NULL);
638- if (!backup_state.vmaobj) {
639+ vmaw = vma_writer_create(backup_file, uuid, &local_err);
640+ if (!vmaw) {
641 if (local_err) {
642 error_propagate(errp, local_err);
643 }
644 goto err;
645 }
646
647+ /* register all devices for vma writer */
648 l = di_list;
649 while (l) {
650- QDict *options = qdict_new();
651-
652 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
653 l = g_list_next(l);
654
655 const char *devname = bdrv_get_device_name(di->bs);
656- snprintf(di->targetfile, PATH_MAX, "vma-backup-obj/%s.raw", devname);
657-
658- qdict_put(options, "driver", qstring_from_str("vma-drive"));
659- qdict_put(options, "size", qint_from_int(di->size));
660- di->target = bdrv_open(di->targetfile, NULL, options, BDRV_O_RDWR, &local_err);
661- if (!di->target) {
662- error_propagate(errp, local_err);
663+ di->dev_id = vma_writer_register_stream(vmaw, devname, di->size);
664+ if (di->dev_id <= 0) {
665+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
666+ "register_stream failed");
667 goto err;
668 }
669 }
23102ed6 670@@ -3280,15 +3370,15 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
671
672 /* add configuration file to archive */
673 if (has_config_file) {
674- if(config_to_vma(config_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
675- goto err;
676+ if(config_to_vma(config_file, format, backup_dir, vmaw, errp) != 0) {
677+ goto err;
678 }
679 }
680
681 /* add firewall file to archive */
682 if (has_firewall_file) {
683- if(config_to_vma(firewall_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
684- goto err;
685+ if(config_to_vma(firewall_file, format, backup_dir, vmaw, errp) != 0) {
686+ goto err;
687 }
688 }
689 /* initialize global backup_state now */
23102ed6 690@@ -3310,11 +3400,12 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
691 }
692 backup_state.backup_file = g_strdup(backup_file);
693
694- memcpy(&backup_state.uuid, &uuid, sizeof(uuid));
695- qemu_uuid_unparse(&uuid, backup_state.uuid_str);
696+ backup_state.vmaw = vmaw;
697+
698+ uuid_copy(backup_state.uuid, uuid);
699+ uuid_unparse_lower(uuid, backup_state.uuid_str);
700
701 backup_state.di_list = di_list;
702- backup_state.next_job = 0;
703
704 backup_state.total = total;
705 backup_state.transferred = 0;
23102ed6 706@@ -3325,21 +3416,16 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
707 while (l) {
708 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
709 l = g_list_next(l);
710-
711 job = backup_job_create(NULL, di->bs, di->target, speed, MIRROR_SYNC_MODE_FULL, NULL,
712 false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
713 BLOCK_JOB_DEFAULT,
714- pvebackup_complete_cb, di, 2, NULL, &local_err);
715- if (di->target) {
716- bdrv_unref(di->target);
717- di->target = NULL;
718- }
719+ pvebackup_dump_cb, pvebackup_complete_cb, di,
720+ 2, NULL, &local_err);
721 if (!job || local_err != NULL) {
722 error_setg(&backup_state.error, "backup_job_create failed");
723 pvebackup_cancel(NULL);
724- } else {
725- block_job_start(job);
726 }
727+ block_job_start(job);
728 }
729
730 if (!backup_state.error) {
23102ed6 731@@ -3373,9 +3459,10 @@ err:
67af0fa4
WB
732 g_strfreev(devs);
733 }
734
735- if (backup_state.vmaobj) {
736- object_unparent(backup_state.vmaobj);
737- backup_state.vmaobj = NULL;
738+ if (vmaw) {
739+ Error *err = NULL;
740+ vma_writer_close(vmaw, &err);
741+ unlink(backup_file);
742 }
743
744 if (backup_dir) {
23102ed6 745@@ -3760,7 +3847,7 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
67af0fa4
WB
746 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
747 backup->sync, bmap, backup->compress,
748 backup->on_source_error, backup->on_target_error,
749- BLOCK_JOB_DEFAULT, NULL, NULL, 0, txn, &local_err);
750+ BLOCK_JOB_DEFAULT, NULL, NULL, NULL, 0, txn, &local_err);
751 bdrv_unref(target_bs);
752 if (local_err != NULL) {
753 error_propagate(errp, local_err);
23102ed6 754@@ -3839,7 +3926,7 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
67af0fa4
WB
755 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
756 backup->sync, NULL, backup->compress,
757 backup->on_source_error, backup->on_target_error,
758- BLOCK_JOB_DEFAULT, NULL, NULL, 0, txn, &local_err);
759+ BLOCK_JOB_DEFAULT, NULL, NULL, NULL, 0, txn, &local_err);
760 if (local_err != NULL) {
761 error_propagate(errp, local_err);
762 }
763diff --git a/blockjob.c b/blockjob.c
764index 764d41863e..cb3741f6dd 100644
765--- a/blockjob.c
766+++ b/blockjob.c
767@@ -37,8 +37,8 @@
768 #include "qemu/timer.h"
769 #include "qapi-event.h"
770
771-static void block_job_event_cancelled(BlockJob *job);
772-static void block_job_event_completed(BlockJob *job, const char *msg);
773+void block_job_event_cancelled(BlockJob *job);
774+void block_job_event_completed(BlockJob *job, const char *msg);
775
776 /* Transactional group of block jobs */
777 struct BlockJobTxn {
778@@ -473,7 +473,8 @@ void block_job_user_pause(BlockJob *job)
779 block_job_pause(job);
780 }
781
782-static bool block_job_should_pause(BlockJob *job)
783+bool block_job_should_pause(BlockJob *job);
784+bool block_job_should_pause(BlockJob *job)
785 {
786 return job->pause_count > 0;
787 }
788@@ -687,7 +688,7 @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
789 }
790 }
791
792-static void block_job_event_cancelled(BlockJob *job)
793+void block_job_event_cancelled(BlockJob *job)
794 {
795 if (block_job_is_internal(job)) {
796 return;
797@@ -701,7 +702,7 @@ static void block_job_event_cancelled(BlockJob *job)
798 &error_abort);
799 }
800
801-static void block_job_event_completed(BlockJob *job, const char *msg)
802+void block_job_event_completed(BlockJob *job, const char *msg)
803 {
804 if (block_job_is_internal(job)) {
805 return;
806diff --git a/include/block/block_int.h b/include/block/block_int.h
ddbcf45e 807index 19b84b027f..fc1c53a059 100644
67af0fa4
WB
808--- a/include/block/block_int.h
809+++ b/include/block/block_int.h
810@@ -59,6 +59,9 @@
811
812 #define BLOCK_PROBE_BUF_SIZE 512
813
814+typedef int BackupDumpFunc(void *opaque, BlockBackend *be,
815+ int64_t sector_num, int n_sectors, unsigned char *buf);
816+
817 enum BdrvTrackedRequestType {
818 BDRV_TRACKED_READ,
819 BDRV_TRACKED_WRITE,
ddbcf45e 820@@ -878,6 +881,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
821 BlockdevOnError on_source_error,
822 BlockdevOnError on_target_error,
823 int creation_flags,
824+ BackupDumpFunc *dump_cb,
825 BlockCompletionFunc *cb, void *opaque,
826 int pause_count,
827 BlockJobTxn *txn, Error **errp);
95259824
WB
828diff --git a/vma-reader.c b/vma-reader.c
829new file mode 100644
67af0fa4 830index 0000000000..2000889bd3
95259824
WB
831--- /dev/null
832+++ b/vma-reader.c
67af0fa4 833@@ -0,0 +1,857 @@
95259824
WB
834+/*
835+ * VMA: Virtual Machine Archive
836+ *
837+ * Copyright (C) 2012 Proxmox Server Solutions
838+ *
839+ * Authors:
840+ * Dietmar Maurer (dietmar@proxmox.com)
841+ *
842+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
843+ * See the COPYING file in the top-level directory.
844+ *
845+ */
846+
847+#include "qemu/osdep.h"
848+#include <glib.h>
849+#include <uuid/uuid.h>
850+
851+#include "qemu-common.h"
852+#include "qemu/timer.h"
853+#include "qemu/ratelimit.h"
854+#include "vma.h"
855+#include "block/block.h"
856+#include "sysemu/block-backend.h"
857+
858+static unsigned char zero_vma_block[VMA_BLOCK_SIZE];
859+
860+typedef struct VmaRestoreState {
67af0fa4 861+ BlockBackend *target;
95259824
WB
862+ bool write_zeroes;
863+ unsigned long *bitmap;
864+ int bitmap_size;
865+} VmaRestoreState;
866+
867+struct VmaReader {
868+ int fd;
869+ GChecksum *md5csum;
870+ GHashTable *blob_hash;
871+ unsigned char *head_data;
872+ VmaDeviceInfo devinfo[256];
873+ VmaRestoreState rstate[256];
874+ GList *cdata_list;
875+ guint8 vmstate_stream;
876+ uint32_t vmstate_clusters;
877+ /* to show restore percentage if run with -v */
878+ time_t start_time;
879+ int64_t cluster_count;
880+ int64_t clusters_read;
67af0fa4
WB
881+ int64_t zero_cluster_data;
882+ int64_t partial_zero_cluster_data;
95259824
WB
883+ int clusters_read_per;
884+};
885+
886+static guint
887+g_int32_hash(gconstpointer v)
888+{
889+ return *(const uint32_t *)v;
890+}
891+
892+static gboolean
893+g_int32_equal(gconstpointer v1, gconstpointer v2)
894+{
895+ return *((const uint32_t *)v1) == *((const uint32_t *)v2);
896+}
897+
898+static int vma_reader_get_bitmap(VmaRestoreState *rstate, int64_t cluster_num)
899+{
900+ assert(rstate);
901+ assert(rstate->bitmap);
902+
903+ unsigned long val, idx, bit;
904+
905+ idx = cluster_num / BITS_PER_LONG;
906+
907+ assert(rstate->bitmap_size > idx);
908+
909+ bit = cluster_num % BITS_PER_LONG;
910+ val = rstate->bitmap[idx];
911+
912+ return !!(val & (1UL << bit));
913+}
914+
915+static void vma_reader_set_bitmap(VmaRestoreState *rstate, int64_t cluster_num,
916+ int dirty)
917+{
918+ assert(rstate);
919+ assert(rstate->bitmap);
920+
921+ unsigned long val, idx, bit;
922+
923+ idx = cluster_num / BITS_PER_LONG;
924+
925+ assert(rstate->bitmap_size > idx);
926+
927+ bit = cluster_num % BITS_PER_LONG;
928+ val = rstate->bitmap[idx];
929+ if (dirty) {
930+ if (!(val & (1UL << bit))) {
931+ val |= 1UL << bit;
932+ }
933+ } else {
934+ if (val & (1UL << bit)) {
935+ val &= ~(1UL << bit);
936+ }
937+ }
938+ rstate->bitmap[idx] = val;
939+}
940+
941+typedef struct VmaBlob {
942+ uint32_t start;
943+ uint32_t len;
944+ void *data;
945+} VmaBlob;
946+
947+static const VmaBlob *get_header_blob(VmaReader *vmar, uint32_t pos)
948+{
949+ assert(vmar);
950+ assert(vmar->blob_hash);
951+
952+ return g_hash_table_lookup(vmar->blob_hash, &pos);
953+}
954+
955+static const char *get_header_str(VmaReader *vmar, uint32_t pos)
956+{
957+ const VmaBlob *blob = get_header_blob(vmar, pos);
958+ if (!blob) {
959+ return NULL;
960+ }
961+ const char *res = (char *)blob->data;
962+ if (res[blob->len-1] != '\0') {
963+ return NULL;
964+ }
965+ return res;
966+}
967+
968+static ssize_t
969+safe_read(int fd, unsigned char *buf, size_t count)
970+{
971+ ssize_t n;
972+
973+ do {
974+ n = read(fd, buf, count);
975+ } while (n < 0 && errno == EINTR);
976+
977+ return n;
978+}
979+
980+static ssize_t
981+full_read(int fd, unsigned char *buf, size_t len)
982+{
983+ ssize_t n;
984+ size_t total;
985+
986+ total = 0;
987+
988+ while (len > 0) {
989+ n = safe_read(fd, buf, len);
990+
991+ if (n == 0) {
992+ return total;
993+ }
994+
995+ if (n <= 0) {
996+ break;
997+ }
998+
999+ buf += n;
1000+ total += n;
1001+ len -= n;
1002+ }
1003+
1004+ if (len) {
1005+ return -1;
1006+ }
1007+
1008+ return total;
1009+}
1010+
1011+void vma_reader_destroy(VmaReader *vmar)
1012+{
1013+ assert(vmar);
1014+
1015+ if (vmar->fd >= 0) {
1016+ close(vmar->fd);
1017+ }
1018+
1019+ if (vmar->cdata_list) {
1020+ g_list_free(vmar->cdata_list);
1021+ }
1022+
1023+ int i;
1024+ for (i = 1; i < 256; i++) {
1025+ if (vmar->rstate[i].bitmap) {
1026+ g_free(vmar->rstate[i].bitmap);
1027+ }
1028+ }
1029+
1030+ if (vmar->md5csum) {
1031+ g_checksum_free(vmar->md5csum);
1032+ }
1033+
1034+ if (vmar->blob_hash) {
1035+ g_hash_table_destroy(vmar->blob_hash);
1036+ }
1037+
1038+ if (vmar->head_data) {
1039+ g_free(vmar->head_data);
1040+ }
1041+
1042+ g_free(vmar);
1043+
1044+};
1045+
1046+static int vma_reader_read_head(VmaReader *vmar, Error **errp)
1047+{
1048+ assert(vmar);
1049+ assert(errp);
1050+ assert(*errp == NULL);
1051+
1052+ unsigned char md5sum[16];
1053+ int i;
1054+ int ret = 0;
1055+
1056+ vmar->head_data = g_malloc(sizeof(VmaHeader));
1057+
1058+ if (full_read(vmar->fd, vmar->head_data, sizeof(VmaHeader)) !=
1059+ sizeof(VmaHeader)) {
1060+ error_setg(errp, "can't read vma header - %s",
1061+ errno ? g_strerror(errno) : "got EOF");
1062+ return -1;
1063+ }
1064+
1065+ VmaHeader *h = (VmaHeader *)vmar->head_data;
1066+
1067+ if (h->magic != VMA_MAGIC) {
1068+ error_setg(errp, "not a vma file - wrong magic number");
1069+ return -1;
1070+ }
1071+
1072+ uint32_t header_size = GUINT32_FROM_BE(h->header_size);
1073+ int need = header_size - sizeof(VmaHeader);
1074+ if (need <= 0) {
1075+ error_setg(errp, "wrong vma header size %d", header_size);
1076+ return -1;
1077+ }
1078+
1079+ vmar->head_data = g_realloc(vmar->head_data, header_size);
1080+ h = (VmaHeader *)vmar->head_data;
1081+
1082+ if (full_read(vmar->fd, vmar->head_data + sizeof(VmaHeader), need) !=
1083+ need) {
1084+ error_setg(errp, "can't read vma header data - %s",
1085+ errno ? g_strerror(errno) : "got EOF");
1086+ return -1;
1087+ }
1088+
1089+ memcpy(md5sum, h->md5sum, 16);
1090+ memset(h->md5sum, 0, 16);
1091+
1092+ g_checksum_reset(vmar->md5csum);
1093+ g_checksum_update(vmar->md5csum, vmar->head_data, header_size);
1094+ gsize csize = 16;
1095+ g_checksum_get_digest(vmar->md5csum, (guint8 *)(h->md5sum), &csize);
1096+
1097+ if (memcmp(md5sum, h->md5sum, 16) != 0) {
1098+ error_setg(errp, "wrong vma header chechsum");
1099+ return -1;
1100+ }
1101+
1102+ /* we can modify header data after checksum verify */
1103+ h->header_size = header_size;
1104+
1105+ h->version = GUINT32_FROM_BE(h->version);
1106+ if (h->version != 1) {
1107+ error_setg(errp, "wrong vma version %d", h->version);
1108+ return -1;
1109+ }
1110+
1111+ h->ctime = GUINT64_FROM_BE(h->ctime);
1112+ h->blob_buffer_offset = GUINT32_FROM_BE(h->blob_buffer_offset);
1113+ h->blob_buffer_size = GUINT32_FROM_BE(h->blob_buffer_size);
1114+
1115+ uint32_t bstart = h->blob_buffer_offset + 1;
1116+ uint32_t bend = h->blob_buffer_offset + h->blob_buffer_size;
1117+
1118+ if (bstart <= sizeof(VmaHeader)) {
1119+ error_setg(errp, "wrong vma blob buffer offset %d",
1120+ h->blob_buffer_offset);
1121+ return -1;
1122+ }
1123+
1124+ if (bend > header_size) {
1125+ error_setg(errp, "wrong vma blob buffer size %d/%d",
1126+ h->blob_buffer_offset, h->blob_buffer_size);
1127+ return -1;
1128+ }
1129+
1130+ while ((bstart + 2) <= bend) {
1131+ uint32_t size = vmar->head_data[bstart] +
1132+ (vmar->head_data[bstart+1] << 8);
1133+ if ((bstart + size + 2) <= bend) {
1134+ VmaBlob *blob = g_new0(VmaBlob, 1);
1135+ blob->start = bstart - h->blob_buffer_offset;
1136+ blob->len = size;
1137+ blob->data = vmar->head_data + bstart + 2;
1138+ g_hash_table_insert(vmar->blob_hash, &blob->start, blob);
1139+ }
1140+ bstart += size + 2;
1141+ }
1142+
1143+
1144+ int count = 0;
1145+ for (i = 1; i < 256; i++) {
1146+ VmaDeviceInfoHeader *dih = &h->dev_info[i];
1147+ uint32_t devname_ptr = GUINT32_FROM_BE(dih->devname_ptr);
1148+ uint64_t size = GUINT64_FROM_BE(dih->size);
1149+ const char *devname = get_header_str(vmar, devname_ptr);
1150+
1151+ if (size && devname) {
1152+ count++;
1153+ vmar->devinfo[i].size = size;
1154+ vmar->devinfo[i].devname = devname;
1155+
1156+ if (strcmp(devname, "vmstate") == 0) {
1157+ vmar->vmstate_stream = i;
1158+ }
1159+ }
1160+ }
1161+
95259824
WB
1162+ for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1163+ uint32_t name_ptr = GUINT32_FROM_BE(h->config_names[i]);
1164+ uint32_t data_ptr = GUINT32_FROM_BE(h->config_data[i]);
1165+
1166+ if (!(name_ptr && data_ptr)) {
1167+ continue;
1168+ }
1169+ const char *name = get_header_str(vmar, name_ptr);
1170+ const VmaBlob *blob = get_header_blob(vmar, data_ptr);
1171+
1172+ if (!(name && blob)) {
1173+ error_setg(errp, "vma contains invalid data pointers");
1174+ return -1;
1175+ }
1176+
1177+ VmaConfigData *cdata = g_new0(VmaConfigData, 1);
1178+ cdata->name = name;
1179+ cdata->data = blob->data;
1180+ cdata->len = blob->len;
1181+
1182+ vmar->cdata_list = g_list_append(vmar->cdata_list, cdata);
1183+ }
1184+
1185+ return ret;
1186+};
1187+
1188+VmaReader *vma_reader_create(const char *filename, Error **errp)
1189+{
1190+ assert(filename);
1191+ assert(errp);
1192+
1193+ VmaReader *vmar = g_new0(VmaReader, 1);
1194+
1195+ if (strcmp(filename, "-") == 0) {
1196+ vmar->fd = dup(0);
1197+ } else {
1198+ vmar->fd = open(filename, O_RDONLY);
1199+ }
1200+
1201+ if (vmar->fd < 0) {
1202+ error_setg(errp, "can't open file %s - %s\n", filename,
1203+ g_strerror(errno));
1204+ goto err;
1205+ }
1206+
1207+ vmar->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1208+ if (!vmar->md5csum) {
1209+ error_setg(errp, "can't allocate cmsum\n");
1210+ goto err;
1211+ }
1212+
1213+ vmar->blob_hash = g_hash_table_new_full(g_int32_hash, g_int32_equal,
1214+ NULL, g_free);
1215+
1216+ if (vma_reader_read_head(vmar, errp) < 0) {
1217+ goto err;
1218+ }
1219+
1220+ return vmar;
1221+
1222+err:
1223+ if (vmar) {
1224+ vma_reader_destroy(vmar);
1225+ }
1226+
1227+ return NULL;
1228+}
1229+
1230+VmaHeader *vma_reader_get_header(VmaReader *vmar)
1231+{
1232+ assert(vmar);
1233+ assert(vmar->head_data);
1234+
1235+ return (VmaHeader *)(vmar->head_data);
1236+}
1237+
1238+GList *vma_reader_get_config_data(VmaReader *vmar)
1239+{
1240+ assert(vmar);
1241+ assert(vmar->head_data);
1242+
1243+ return vmar->cdata_list;
1244+}
1245+
1246+VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id)
1247+{
1248+ assert(vmar);
1249+ assert(dev_id);
1250+
1251+ if (vmar->devinfo[dev_id].size && vmar->devinfo[dev_id].devname) {
1252+ return &vmar->devinfo[dev_id];
1253+ }
1254+
1255+ return NULL;
1256+}
1257+
67af0fa4
WB
1258+static void allocate_rstate(VmaReader *vmar, guint8 dev_id,
1259+ BlockBackend *target, bool write_zeroes)
1260+{
1261+ assert(vmar);
1262+ assert(dev_id);
1263+
1264+ vmar->rstate[dev_id].target = target;
1265+ vmar->rstate[dev_id].write_zeroes = write_zeroes;
1266+
1267+ int64_t size = vmar->devinfo[dev_id].size;
1268+
1269+ int64_t bitmap_size = (size/BDRV_SECTOR_SIZE) +
1270+ (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG - 1;
1271+ bitmap_size /= (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG;
1272+
1273+ vmar->rstate[dev_id].bitmap_size = bitmap_size;
1274+ vmar->rstate[dev_id].bitmap = g_new0(unsigned long, bitmap_size);
1275+
1276+ vmar->cluster_count += size/VMA_CLUSTER_SIZE;
1277+}
1278+
1279+int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target,
95259824
WB
1280+ bool write_zeroes, Error **errp)
1281+{
1282+ assert(vmar);
67af0fa4 1283+ assert(target != NULL);
95259824 1284+ assert(dev_id);
67af0fa4 1285+ assert(vmar->rstate[dev_id].target == NULL);
95259824 1286+
67af0fa4 1287+ int64_t size = blk_getlength(target);
95259824
WB
1288+ int64_t size_diff = size - vmar->devinfo[dev_id].size;
1289+
1290+ /* storage types can have different size restrictions, so it
1291+ * is not always possible to create an image with exact size.
1292+ * So we tolerate a size difference up to 4MB.
1293+ */
1294+ if ((size_diff < 0) || (size_diff > 4*1024*1024)) {
1295+ error_setg(errp, "vma_reader_register_bs for stream %s failed - "
1296+ "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname,
1297+ size, vmar->devinfo[dev_id].size);
1298+ return -1;
1299+ }
1300+
67af0fa4 1301+ allocate_rstate(vmar, dev_id, target, write_zeroes);
95259824
WB
1302+
1303+ return 0;
1304+}
1305+
1306+static ssize_t safe_write(int fd, void *buf, size_t count)
1307+{
1308+ ssize_t n;
1309+
1310+ do {
1311+ n = write(fd, buf, count);
1312+ } while (n < 0 && errno == EINTR);
1313+
1314+ return n;
1315+}
1316+
1317+static size_t full_write(int fd, void *buf, size_t len)
1318+{
1319+ ssize_t n;
1320+ size_t total;
1321+
1322+ total = 0;
1323+
1324+ while (len > 0) {
1325+ n = safe_write(fd, buf, len);
1326+ if (n < 0) {
1327+ return n;
1328+ }
1329+ buf += n;
1330+ total += n;
1331+ len -= n;
1332+ }
1333+
1334+ if (len) {
1335+ /* incomplete write ? */
1336+ return -1;
1337+ }
1338+
1339+ return total;
1340+}
1341+
1342+static int restore_write_data(VmaReader *vmar, guint8 dev_id,
67af0fa4 1343+ BlockBackend *target, int vmstate_fd,
95259824
WB
1344+ unsigned char *buf, int64_t sector_num,
1345+ int nb_sectors, Error **errp)
1346+{
1347+ assert(vmar);
1348+
1349+ if (dev_id == vmar->vmstate_stream) {
1350+ if (vmstate_fd >= 0) {
1351+ int len = nb_sectors * BDRV_SECTOR_SIZE;
1352+ int res = full_write(vmstate_fd, buf, len);
1353+ if (res < 0) {
1354+ error_setg(errp, "write vmstate failed %d", res);
1355+ return -1;
1356+ }
1357+ }
1358+ } else {
67af0fa4 1359+ int res = blk_pwrite(target, sector_num * BDRV_SECTOR_SIZE, buf, nb_sectors * BDRV_SECTOR_SIZE, 0);
95259824 1360+ if (res < 0) {
67af0fa4
WB
1361+ error_setg(errp, "blk_pwrite to %s failed (%d)",
1362+ bdrv_get_device_name(blk_bs(target)), res);
95259824
WB
1363+ return -1;
1364+ }
1365+ }
1366+ return 0;
1367+}
67af0fa4 1368+
95259824
WB
1369+static int restore_extent(VmaReader *vmar, unsigned char *buf,
1370+ int extent_size, int vmstate_fd,
67af0fa4 1371+ bool verbose, bool verify, Error **errp)
95259824
WB
1372+{
1373+ assert(vmar);
1374+ assert(buf);
1375+
1376+ VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1377+ int start = VMA_EXTENT_HEADER_SIZE;
1378+ int i;
1379+
1380+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1381+ uint64_t block_info = GUINT64_FROM_BE(ehead->blockinfo[i]);
1382+ uint64_t cluster_num = block_info & 0xffffffff;
1383+ uint8_t dev_id = (block_info >> 32) & 0xff;
1384+ uint16_t mask = block_info >> (32+16);
1385+ int64_t max_sector;
1386+
1387+ if (!dev_id) {
1388+ continue;
1389+ }
1390+
1391+ VmaRestoreState *rstate = &vmar->rstate[dev_id];
67af0fa4 1392+ BlockBackend *target = NULL;
95259824
WB
1393+
1394+ if (dev_id != vmar->vmstate_stream) {
67af0fa4
WB
1395+ target = rstate->target;
1396+ if (!verify && !target) {
95259824
WB
1397+ error_setg(errp, "got wrong dev id %d", dev_id);
1398+ return -1;
1399+ }
1400+
1401+ if (vma_reader_get_bitmap(rstate, cluster_num)) {
1402+ error_setg(errp, "found duplicated cluster %zd for stream %s",
1403+ cluster_num, vmar->devinfo[dev_id].devname);
1404+ return -1;
1405+ }
1406+ vma_reader_set_bitmap(rstate, cluster_num, 1);
1407+
1408+ max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE;
1409+ } else {
1410+ max_sector = G_MAXINT64;
1411+ if (cluster_num != vmar->vmstate_clusters) {
1412+ error_setg(errp, "found out of order vmstate data");
1413+ return -1;
1414+ }
1415+ vmar->vmstate_clusters++;
1416+ }
1417+
1418+ vmar->clusters_read++;
1419+
1420+ if (verbose) {
1421+ time_t duration = time(NULL) - vmar->start_time;
1422+ int percent = (vmar->clusters_read*100)/vmar->cluster_count;
1423+ if (percent != vmar->clusters_read_per) {
1424+ printf("progress %d%% (read %zd bytes, duration %zd sec)\n",
1425+ percent, vmar->clusters_read*VMA_CLUSTER_SIZE,
1426+ duration);
1427+ fflush(stdout);
1428+ vmar->clusters_read_per = percent;
1429+ }
1430+ }
1431+
1432+ /* try to write whole clusters to speedup restore */
1433+ if (mask == 0xffff) {
1434+ if ((start + VMA_CLUSTER_SIZE) > extent_size) {
1435+ error_setg(errp, "short vma extent - too many blocks");
1436+ return -1;
1437+ }
1438+ int64_t sector_num = (cluster_num * VMA_CLUSTER_SIZE) /
1439+ BDRV_SECTOR_SIZE;
1440+ int64_t end_sector = sector_num +
1441+ VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE;
1442+
1443+ if (end_sector > max_sector) {
1444+ end_sector = max_sector;
1445+ }
1446+
1447+ if (end_sector <= sector_num) {
1448+ error_setg(errp, "got wrong block address - write bejond end");
1449+ return -1;
1450+ }
1451+
67af0fa4
WB
1452+ if (!verify) {
1453+ int nb_sectors = end_sector - sector_num;
1454+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1455+ buf + start, sector_num, nb_sectors,
1456+ errp) < 0) {
1457+ return -1;
1458+ }
95259824
WB
1459+ }
1460+
1461+ start += VMA_CLUSTER_SIZE;
1462+ } else {
1463+ int j;
1464+ int bit = 1;
1465+
1466+ for (j = 0; j < 16; j++) {
1467+ int64_t sector_num = (cluster_num*VMA_CLUSTER_SIZE +
1468+ j*VMA_BLOCK_SIZE)/BDRV_SECTOR_SIZE;
1469+
1470+ int64_t end_sector = sector_num +
1471+ VMA_BLOCK_SIZE/BDRV_SECTOR_SIZE;
1472+ if (end_sector > max_sector) {
1473+ end_sector = max_sector;
1474+ }
1475+
1476+ if (mask & bit) {
1477+ if ((start + VMA_BLOCK_SIZE) > extent_size) {
1478+ error_setg(errp, "short vma extent - too many blocks");
1479+ return -1;
1480+ }
1481+
1482+ if (end_sector <= sector_num) {
1483+ error_setg(errp, "got wrong block address - "
1484+ "write bejond end");
1485+ return -1;
1486+ }
1487+
67af0fa4
WB
1488+ if (!verify) {
1489+ int nb_sectors = end_sector - sector_num;
1490+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1491+ buf + start, sector_num,
1492+ nb_sectors, errp) < 0) {
1493+ return -1;
1494+ }
95259824
WB
1495+ }
1496+
1497+ start += VMA_BLOCK_SIZE;
1498+
1499+ } else {
1500+
67af0fa4
WB
1501+
1502+ if (end_sector > sector_num) {
95259824
WB
1503+ /* Todo: use bdrv_co_write_zeroes (but that need to
1504+ * be run inside coroutine?)
1505+ */
1506+ int nb_sectors = end_sector - sector_num;
67af0fa4
WB
1507+ int zero_size = BDRV_SECTOR_SIZE*nb_sectors;
1508+ vmar->zero_cluster_data += zero_size;
1509+ if (mask != 0) {
1510+ vmar->partial_zero_cluster_data += zero_size;
1511+ }
1512+
1513+ if (rstate->write_zeroes && !verify) {
1514+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1515+ zero_vma_block, sector_num,
1516+ nb_sectors, errp) < 0) {
1517+ return -1;
1518+ }
95259824
WB
1519+ }
1520+ }
1521+ }
1522+
1523+ bit = bit << 1;
1524+ }
1525+ }
1526+ }
1527+
1528+ if (start != extent_size) {
1529+ error_setg(errp, "vma extent error - missing blocks");
1530+ return -1;
1531+ }
1532+
1533+ return 0;
1534+}
1535+
67af0fa4
WB
1536+static int vma_reader_restore_full(VmaReader *vmar, int vmstate_fd,
1537+ bool verbose, bool verify,
1538+ Error **errp)
95259824
WB
1539+{
1540+ assert(vmar);
1541+ assert(vmar->head_data);
1542+
1543+ int ret = 0;
1544+ unsigned char buf[VMA_MAX_EXTENT_SIZE];
1545+ int buf_pos = 0;
1546+ unsigned char md5sum[16];
1547+ VmaHeader *h = (VmaHeader *)vmar->head_data;
1548+
1549+ vmar->start_time = time(NULL);
1550+
1551+ while (1) {
1552+ int bytes = full_read(vmar->fd, buf + buf_pos, sizeof(buf) - buf_pos);
1553+ if (bytes < 0) {
1554+ error_setg(errp, "read failed - %s", g_strerror(errno));
1555+ return -1;
1556+ }
1557+
1558+ buf_pos += bytes;
1559+
1560+ if (!buf_pos) {
1561+ break; /* EOF */
1562+ }
1563+
1564+ if (buf_pos < VMA_EXTENT_HEADER_SIZE) {
1565+ error_setg(errp, "read short extent (%d bytes)", buf_pos);
1566+ return -1;
1567+ }
1568+
1569+ VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1570+
1571+ /* extract md5sum */
1572+ memcpy(md5sum, ehead->md5sum, sizeof(ehead->md5sum));
1573+ memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
1574+
1575+ g_checksum_reset(vmar->md5csum);
1576+ g_checksum_update(vmar->md5csum, buf, VMA_EXTENT_HEADER_SIZE);
1577+ gsize csize = 16;
1578+ g_checksum_get_digest(vmar->md5csum, ehead->md5sum, &csize);
1579+
1580+ if (memcmp(md5sum, ehead->md5sum, 16) != 0) {
1581+ error_setg(errp, "wrong vma extent header chechsum");
1582+ return -1;
1583+ }
1584+
1585+ if (memcmp(h->uuid, ehead->uuid, sizeof(ehead->uuid)) != 0) {
1586+ error_setg(errp, "wrong vma extent uuid");
1587+ return -1;
1588+ }
1589+
1590+ if (ehead->magic != VMA_EXTENT_MAGIC || ehead->reserved1 != 0) {
1591+ error_setg(errp, "wrong vma extent header magic");
1592+ return -1;
1593+ }
1594+
1595+ int block_count = GUINT16_FROM_BE(ehead->block_count);
1596+ int extent_size = VMA_EXTENT_HEADER_SIZE + block_count*VMA_BLOCK_SIZE;
1597+
1598+ if (buf_pos < extent_size) {
1599+ error_setg(errp, "short vma extent (%d < %d)", buf_pos,
1600+ extent_size);
1601+ return -1;
1602+ }
1603+
1604+ if (restore_extent(vmar, buf, extent_size, vmstate_fd, verbose,
67af0fa4 1605+ verify, errp) < 0) {
95259824
WB
1606+ return -1;
1607+ }
1608+
1609+ if (buf_pos > extent_size) {
1610+ memmove(buf, buf + extent_size, buf_pos - extent_size);
1611+ buf_pos = buf_pos - extent_size;
1612+ } else {
1613+ buf_pos = 0;
1614+ }
1615+ }
1616+
1617+ bdrv_drain_all();
1618+
1619+ int i;
1620+ for (i = 1; i < 256; i++) {
1621+ VmaRestoreState *rstate = &vmar->rstate[i];
67af0fa4 1622+ if (!rstate->target) {
95259824
WB
1623+ continue;
1624+ }
1625+
67af0fa4
WB
1626+ if (blk_flush(rstate->target) < 0) {
1627+ error_setg(errp, "vma blk_flush %s failed",
95259824
WB
1628+ vmar->devinfo[i].devname);
1629+ return -1;
1630+ }
1631+
1632+ if (vmar->devinfo[i].size &&
1633+ (strcmp(vmar->devinfo[i].devname, "vmstate") != 0)) {
1634+ assert(rstate->bitmap);
1635+
1636+ int64_t cluster_num, end;
1637+
1638+ end = (vmar->devinfo[i].size + VMA_CLUSTER_SIZE - 1) /
1639+ VMA_CLUSTER_SIZE;
1640+
1641+ for (cluster_num = 0; cluster_num < end; cluster_num++) {
1642+ if (!vma_reader_get_bitmap(rstate, cluster_num)) {
1643+ error_setg(errp, "detected missing cluster %zd "
1644+ "for stream %s", cluster_num,
1645+ vmar->devinfo[i].devname);
1646+ return -1;
1647+ }
1648+ }
1649+ }
1650+ }
1651+
67af0fa4
WB
1652+ if (verbose) {
1653+ if (vmar->clusters_read) {
1654+ printf("total bytes read %zd, sparse bytes %zd (%.3g%%)\n",
1655+ vmar->clusters_read*VMA_CLUSTER_SIZE,
1656+ vmar->zero_cluster_data,
1657+ (double)(100.0*vmar->zero_cluster_data)/
1658+ (vmar->clusters_read*VMA_CLUSTER_SIZE));
1659+
1660+ int64_t datasize = vmar->clusters_read*VMA_CLUSTER_SIZE-vmar->zero_cluster_data;
1661+ if (datasize) { // this does not make sense for empty files
1662+ printf("space reduction due to 4K zero blocks %.3g%%\n",
1663+ (double)(100.0*vmar->partial_zero_cluster_data) / datasize);
1664+ }
1665+ } else {
1666+ printf("vma archive contains no image data\n");
1667+ }
1668+ }
95259824
WB
1669+ return ret;
1670+}
1671+
67af0fa4
WB
1672+int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
1673+ Error **errp)
1674+{
1675+ return vma_reader_restore_full(vmar, vmstate_fd, verbose, false, errp);
1676+}
1677+
1678+int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp)
1679+{
1680+ guint8 dev_id;
1681+
1682+ for (dev_id = 1; dev_id < 255; dev_id++) {
1683+ if (vma_reader_get_device_info(vmar, dev_id)) {
1684+ allocate_rstate(vmar, dev_id, NULL, false);
1685+ }
1686+ }
1687+
1688+ return vma_reader_restore_full(vmar, -1, verbose, true, errp);
1689+}
1690+
95259824
WB
1691diff --git a/vma-writer.c b/vma-writer.c
1692new file mode 100644
67af0fa4 1693index 0000000000..9001cbdd2b
95259824
WB
1694--- /dev/null
1695+++ b/vma-writer.c
67af0fa4 1696@@ -0,0 +1,771 @@
95259824
WB
1697+/*
1698+ * VMA: Virtual Machine Archive
1699+ *
1700+ * Copyright (C) 2012 Proxmox Server Solutions
1701+ *
1702+ * Authors:
1703+ * Dietmar Maurer (dietmar@proxmox.com)
1704+ *
1705+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
1706+ * See the COPYING file in the top-level directory.
1707+ *
1708+ */
1709+
1710+#include "qemu/osdep.h"
1711+#include <glib.h>
1712+#include <uuid/uuid.h>
1713+
1714+#include "vma.h"
1715+#include "block/block.h"
1716+#include "monitor/monitor.h"
1717+#include "qemu/main-loop.h"
1718+#include "qemu/coroutine.h"
1719+#include "qemu/cutils.h"
1720+
1721+#define DEBUG_VMA 0
1722+
1723+#define DPRINTF(fmt, ...)\
1724+ do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
1725+
1726+#define WRITE_BUFFERS 5
67af0fa4
WB
1727+#define HEADER_CLUSTERS 8
1728+#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
95259824
WB
1729+
1730+struct VmaWriter {
1731+ int fd;
1732+ FILE *cmd;
1733+ int status;
1734+ char errmsg[8192];
1735+ uuid_t uuid;
1736+ bool header_written;
1737+ bool closed;
1738+
1739+ /* we always write extents */
67af0fa4 1740+ unsigned char *outbuf;
95259824
WB
1741+ int outbuf_pos; /* in bytes */
1742+ int outbuf_count; /* in VMA_BLOCKS */
1743+ uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
1744+
67af0fa4 1745+ unsigned char *headerbuf;
95259824
WB
1746+
1747+ GChecksum *md5csum;
95259824
WB
1748+ CoMutex flush_lock;
1749+ Coroutine *co_writer;
1750+
1751+ /* drive informations */
1752+ VmaStreamInfo stream_info[256];
1753+ guint stream_count;
1754+
1755+ guint8 vmstate_stream;
1756+ uint32_t vmstate_clusters;
1757+
1758+ /* header blob table */
1759+ char *header_blob_table;
1760+ uint32_t header_blob_table_size;
1761+ uint32_t header_blob_table_pos;
1762+
1763+ /* store for config blobs */
1764+ uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1765+ uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1766+ uint32_t config_count;
1767+};
1768+
1769+void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...)
1770+{
1771+ va_list ap;
1772+
1773+ if (vmaw->status < 0) {
1774+ return;
1775+ }
1776+
1777+ vmaw->status = -1;
1778+
1779+ va_start(ap, fmt);
1780+ g_vsnprintf(vmaw->errmsg, sizeof(vmaw->errmsg), fmt, ap);
1781+ va_end(ap);
1782+
1783+ DPRINTF("vma_writer_set_error: %s\n", vmaw->errmsg);
1784+}
1785+
1786+static uint32_t allocate_header_blob(VmaWriter *vmaw, const char *data,
1787+ size_t len)
1788+{
1789+ if (len > 65535) {
1790+ return 0;
1791+ }
1792+
1793+ if (!vmaw->header_blob_table ||
1794+ (vmaw->header_blob_table_size <
1795+ (vmaw->header_blob_table_pos + len + 2))) {
1796+ int newsize = vmaw->header_blob_table_size + ((len + 2 + 511)/512)*512;
1797+
1798+ vmaw->header_blob_table = g_realloc(vmaw->header_blob_table, newsize);
1799+ memset(vmaw->header_blob_table + vmaw->header_blob_table_size,
1800+ 0, newsize - vmaw->header_blob_table_size);
1801+ vmaw->header_blob_table_size = newsize;
1802+ }
1803+
1804+ uint32_t cpos = vmaw->header_blob_table_pos;
1805+ vmaw->header_blob_table[cpos] = len & 255;
1806+ vmaw->header_blob_table[cpos+1] = (len >> 8) & 255;
1807+ memcpy(vmaw->header_blob_table + cpos + 2, data, len);
1808+ vmaw->header_blob_table_pos += len + 2;
1809+ return cpos;
1810+}
1811+
1812+static uint32_t allocate_header_string(VmaWriter *vmaw, const char *str)
1813+{
1814+ assert(vmaw);
1815+
1816+ size_t len = strlen(str) + 1;
1817+
1818+ return allocate_header_blob(vmaw, str, len);
1819+}
1820+
1821+int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
1822+ gsize len)
1823+{
1824+ assert(vmaw);
1825+ assert(!vmaw->header_written);
1826+ assert(vmaw->config_count < VMA_MAX_CONFIGS);
1827+ assert(name);
1828+ assert(data);
95259824
WB
1829+
1830+ gchar *basename = g_path_get_basename(name);
1831+ uint32_t name_ptr = allocate_header_string(vmaw, basename);
1832+ g_free(basename);
1833+
1834+ if (!name_ptr) {
1835+ return -1;
1836+ }
1837+
1838+ uint32_t data_ptr = allocate_header_blob(vmaw, data, len);
1839+ if (!data_ptr) {
1840+ return -1;
1841+ }
1842+
1843+ vmaw->config_names[vmaw->config_count] = name_ptr;
1844+ vmaw->config_data[vmaw->config_count] = data_ptr;
1845+
1846+ vmaw->config_count++;
1847+
1848+ return 0;
1849+}
1850+
1851+int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
1852+ size_t size)
1853+{
1854+ assert(vmaw);
1855+ assert(devname);
1856+ assert(!vmaw->status);
1857+
1858+ if (vmaw->header_written) {
1859+ vma_writer_set_error(vmaw, "vma_writer_register_stream: header "
1860+ "already written");
1861+ return -1;
1862+ }
1863+
1864+ guint n = vmaw->stream_count + 1;
1865+
1866+ /* we can have dev_ids form 1 to 255 (0 reserved)
1867+ * 255(-1) reseverd for safety
1868+ */
1869+ if (n > 254) {
1870+ vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1871+ "too many drives");
1872+ return -1;
1873+ }
1874+
1875+ if (size <= 0) {
1876+ vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1877+ "got strange size %zd", size);
1878+ return -1;
1879+ }
1880+
1881+ DPRINTF("vma_writer_register_stream %s %zu %d\n", devname, size, n);
1882+
1883+ vmaw->stream_info[n].devname = g_strdup(devname);
1884+ vmaw->stream_info[n].size = size;
1885+
1886+ vmaw->stream_info[n].cluster_count = (size + VMA_CLUSTER_SIZE - 1) /
1887+ VMA_CLUSTER_SIZE;
1888+
1889+ vmaw->stream_count = n;
1890+
1891+ if (strcmp(devname, "vmstate") == 0) {
1892+ vmaw->vmstate_stream = n;
1893+ }
1894+
1895+ return n;
1896+}
1897+
1898+static void vma_co_continue_write(void *opaque)
1899+{
1900+ VmaWriter *vmaw = opaque;
1901+
1902+ DPRINTF("vma_co_continue_write\n");
1903+ qemu_coroutine_enter(vmaw->co_writer);
1904+}
1905+
1906+static ssize_t coroutine_fn
67af0fa4 1907+vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
95259824 1908+{
67af0fa4 1909+ DPRINTF("vma_queue_write enter %zd\n", bytes);
95259824 1910+
67af0fa4
WB
1911+ assert(vmaw);
1912+ assert(buf);
1913+ assert(bytes <= VMA_MAX_EXTENT_SIZE);
95259824 1914+
67af0fa4
WB
1915+ size_t done = 0;
1916+ ssize_t ret;
95259824
WB
1917+
1918+ assert(vmaw->co_writer == NULL);
1919+
1920+ vmaw->co_writer = qemu_coroutine_self();
1921+
95259824 1922+ while (done < bytes) {
67af0fa4
WB
1923+ aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, vma_co_continue_write, NULL, vmaw);
1924+ qemu_coroutine_yield();
1925+ aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, NULL, NULL, NULL);
1926+ if (vmaw->status < 0) {
1927+ DPRINTF("vma_queue_write detected canceled backup\n");
1928+ done = -1;
1929+ break;
1930+ }
95259824
WB
1931+ ret = write(vmaw->fd, buf + done, bytes - done);
1932+ if (ret > 0) {
1933+ done += ret;
67af0fa4 1934+ DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
95259824
WB
1935+ } else if (ret < 0) {
1936+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
67af0fa4
WB
1937+ /* try again */
1938+ } else {
1939+ vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
95259824
WB
1940+ g_strerror(errno));
1941+ done = -1; /* always return failure for partial writes */
1942+ break;
1943+ }
1944+ } else if (ret == 0) {
1945+ /* should not happen - simply try again */
1946+ }
1947+ }
1948+
95259824
WB
1949+ vmaw->co_writer = NULL;
1950+
67af0fa4 1951+ return (done == bytes) ? bytes : -1;
95259824
WB
1952+}
1953+
1954+VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
1955+{
1956+ const char *p;
1957+
1958+ assert(sizeof(VmaHeader) == (4096 + 8192));
1959+ assert(G_STRUCT_OFFSET(VmaHeader, config_names) == 2044);
1960+ assert(G_STRUCT_OFFSET(VmaHeader, config_data) == 3068);
1961+ assert(G_STRUCT_OFFSET(VmaHeader, dev_info) == 4096);
1962+ assert(sizeof(VmaExtentHeader) == 512);
1963+
1964+ VmaWriter *vmaw = g_new0(VmaWriter, 1);
1965+ vmaw->fd = -1;
1966+
1967+ vmaw->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1968+ if (!vmaw->md5csum) {
1969+ error_setg(errp, "can't allocate cmsum\n");
1970+ goto err;
1971+ }
1972+
1973+ if (strstart(filename, "exec:", &p)) {
1974+ vmaw->cmd = popen(p, "w");
1975+ if (vmaw->cmd == NULL) {
1976+ error_setg(errp, "can't popen command '%s' - %s\n", p,
1977+ g_strerror(errno));
1978+ goto err;
1979+ }
1980+ vmaw->fd = fileno(vmaw->cmd);
1981+
67af0fa4 1982+ /* try to use O_NONBLOCK */
95259824 1983+ fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
95259824
WB
1984+
1985+ } else {
1986+ struct stat st;
1987+ int oflags;
1988+ const char *tmp_id_str;
1989+
1990+ if ((stat(filename, &st) == 0) && S_ISFIFO(st.st_mode)) {
67af0fa4 1991+ oflags = O_NONBLOCK|O_WRONLY;
95259824
WB
1992+ vmaw->fd = qemu_open(filename, oflags, 0644);
1993+ } else if (strstart(filename, "/dev/fdset/", &tmp_id_str)) {
67af0fa4 1994+ oflags = O_NONBLOCK|O_WRONLY;
95259824
WB
1995+ vmaw->fd = qemu_open(filename, oflags, 0644);
1996+ } else if (strstart(filename, "/dev/fdname/", &tmp_id_str)) {
1997+ vmaw->fd = monitor_get_fd(cur_mon, tmp_id_str, errp);
1998+ if (vmaw->fd < 0) {
1999+ goto err;
2000+ }
67af0fa4 2001+ /* try to use O_NONBLOCK */
95259824 2002+ fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
95259824
WB
2003+ } else {
2004+ oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_CREAT|O_EXCL;
2005+ vmaw->fd = qemu_open(filename, oflags, 0644);
2006+ }
2007+
2008+ if (vmaw->fd < 0) {
2009+ error_setg(errp, "can't open file %s - %s\n", filename,
2010+ g_strerror(errno));
2011+ goto err;
2012+ }
2013+ }
2014+
2015+ /* we use O_DIRECT, so we need to align IO buffers */
67af0fa4
WB
2016+
2017+ vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
2018+ vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
95259824
WB
2019+
2020+ vmaw->outbuf_count = 0;
2021+ vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2022+
2023+ vmaw->header_blob_table_pos = 1; /* start at pos 1 */
2024+
95259824 2025+ qemu_co_mutex_init(&vmaw->flush_lock);
95259824
WB
2026+
2027+ uuid_copy(vmaw->uuid, uuid);
2028+
2029+ return vmaw;
2030+
2031+err:
2032+ if (vmaw) {
2033+ if (vmaw->cmd) {
2034+ pclose(vmaw->cmd);
2035+ } else if (vmaw->fd >= 0) {
2036+ close(vmaw->fd);
2037+ }
2038+
2039+ if (vmaw->md5csum) {
2040+ g_checksum_free(vmaw->md5csum);
2041+ }
2042+
2043+ g_free(vmaw);
2044+ }
2045+
2046+ return NULL;
2047+}
2048+
2049+static int coroutine_fn vma_write_header(VmaWriter *vmaw)
2050+{
2051+ assert(vmaw);
67af0fa4 2052+ unsigned char *buf = vmaw->headerbuf;
95259824
WB
2053+ VmaHeader *head = (VmaHeader *)buf;
2054+
2055+ int i;
2056+
2057+ DPRINTF("VMA WRITE HEADER\n");
2058+
2059+ if (vmaw->status < 0) {
2060+ return vmaw->status;
2061+ }
2062+
67af0fa4 2063+ memset(buf, 0, HEADERBUF_SIZE);
95259824
WB
2064+
2065+ head->magic = VMA_MAGIC;
2066+ head->version = GUINT32_TO_BE(1); /* v1 */
2067+ memcpy(head->uuid, vmaw->uuid, 16);
2068+
2069+ time_t ctime = time(NULL);
2070+ head->ctime = GUINT64_TO_BE(ctime);
2071+
95259824
WB
2072+ for (i = 0; i < VMA_MAX_CONFIGS; i++) {
2073+ head->config_names[i] = GUINT32_TO_BE(vmaw->config_names[i]);
2074+ head->config_data[i] = GUINT32_TO_BE(vmaw->config_data[i]);
2075+ }
2076+
2077+ /* 32 bytes per device (12 used currently) = 8192 bytes max */
2078+ for (i = 1; i <= 254; i++) {
2079+ VmaStreamInfo *si = &vmaw->stream_info[i];
2080+ if (si->size) {
2081+ assert(si->devname);
2082+ uint32_t devname_ptr = allocate_header_string(vmaw, si->devname);
2083+ if (!devname_ptr) {
2084+ return -1;
2085+ }
2086+ head->dev_info[i].devname_ptr = GUINT32_TO_BE(devname_ptr);
2087+ head->dev_info[i].size = GUINT64_TO_BE(si->size);
2088+ }
2089+ }
2090+
2091+ uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
2092+ head->header_size = GUINT32_TO_BE(header_size);
2093+
67af0fa4 2094+ if (header_size > HEADERBUF_SIZE) {
95259824
WB
2095+ return -1; /* just to be sure */
2096+ }
2097+
2098+ uint32_t blob_buffer_offset = sizeof(VmaHeader);
2099+ memcpy(buf + blob_buffer_offset, vmaw->header_blob_table,
2100+ vmaw->header_blob_table_size);
2101+ head->blob_buffer_offset = GUINT32_TO_BE(blob_buffer_offset);
2102+ head->blob_buffer_size = GUINT32_TO_BE(vmaw->header_blob_table_pos);
2103+
2104+ g_checksum_reset(vmaw->md5csum);
2105+ g_checksum_update(vmaw->md5csum, (const guchar *)buf, header_size);
2106+ gsize csize = 16;
2107+ g_checksum_get_digest(vmaw->md5csum, (guint8 *)(head->md5sum), &csize);
2108+
2109+ return vma_queue_write(vmaw, buf, header_size);
2110+}
2111+
2112+static int coroutine_fn vma_writer_flush(VmaWriter *vmaw)
2113+{
2114+ assert(vmaw);
2115+
2116+ int ret;
2117+ int i;
2118+
2119+ if (vmaw->status < 0) {
2120+ return vmaw->status;
2121+ }
2122+
2123+ if (!vmaw->header_written) {
2124+ vmaw->header_written = true;
2125+ ret = vma_write_header(vmaw);
2126+ if (ret < 0) {
2127+ vma_writer_set_error(vmaw, "vma_writer_flush: write header failed");
2128+ return ret;
2129+ }
2130+ }
2131+
2132+ DPRINTF("VMA WRITE FLUSH %d %d\n", vmaw->outbuf_count, vmaw->outbuf_pos);
2133+
2134+
2135+ VmaExtentHeader *ehead = (VmaExtentHeader *)vmaw->outbuf;
2136+
2137+ ehead->magic = VMA_EXTENT_MAGIC;
2138+ ehead->reserved1 = 0;
2139+
2140+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2141+ ehead->blockinfo[i] = GUINT64_TO_BE(vmaw->outbuf_block_info[i]);
2142+ }
2143+
2144+ guint16 block_count = (vmaw->outbuf_pos - VMA_EXTENT_HEADER_SIZE) /
2145+ VMA_BLOCK_SIZE;
2146+
2147+ ehead->block_count = GUINT16_TO_BE(block_count);
2148+
2149+ memcpy(ehead->uuid, vmaw->uuid, sizeof(ehead->uuid));
2150+ memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
2151+
2152+ g_checksum_reset(vmaw->md5csum);
2153+ g_checksum_update(vmaw->md5csum, vmaw->outbuf, VMA_EXTENT_HEADER_SIZE);
2154+ gsize csize = 16;
2155+ g_checksum_get_digest(vmaw->md5csum, ehead->md5sum, &csize);
2156+
2157+ int bytes = vmaw->outbuf_pos;
2158+ ret = vma_queue_write(vmaw, vmaw->outbuf, bytes);
2159+ if (ret != bytes) {
2160+ vma_writer_set_error(vmaw, "vma_writer_flush: failed write");
2161+ }
2162+
2163+ vmaw->outbuf_count = 0;
2164+ vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2165+
2166+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2167+ vmaw->outbuf_block_info[i] = 0;
2168+ }
2169+
2170+ return vmaw->status;
2171+}
2172+
2173+static int vma_count_open_streams(VmaWriter *vmaw)
2174+{
2175+ g_assert(vmaw != NULL);
2176+
2177+ int i;
2178+ int open_drives = 0;
2179+ for (i = 0; i <= 255; i++) {
2180+ if (vmaw->stream_info[i].size && !vmaw->stream_info[i].finished) {
2181+ open_drives++;
2182+ }
2183+ }
2184+
2185+ return open_drives;
2186+}
2187+
67af0fa4
WB
2188+
2189+/**
2190+ * You need to call this if the vma archive does not contain
2191+ * any data stream.
2192+ */
2193+int coroutine_fn
2194+vma_writer_flush_output(VmaWriter *vmaw)
2195+{
2196+ qemu_co_mutex_lock(&vmaw->flush_lock);
2197+ int ret = vma_writer_flush(vmaw);
2198+ qemu_co_mutex_unlock(&vmaw->flush_lock);
2199+ if (ret < 0) {
2200+ vma_writer_set_error(vmaw, "vma_writer_flush_header failed");
2201+ }
2202+ return ret;
2203+}
2204+
95259824
WB
2205+/**
2206+ * all jobs should call this when there is no more data
2207+ * Returns: number of remaining stream (0 ==> finished)
2208+ */
2209+int coroutine_fn
2210+vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id)
2211+{
2212+ g_assert(vmaw != NULL);
2213+
2214+ DPRINTF("vma_writer_set_status %d\n", dev_id);
2215+ if (!vmaw->stream_info[dev_id].size) {
2216+ vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2217+ "no such stream %d", dev_id);
2218+ return -1;
2219+ }
2220+ if (vmaw->stream_info[dev_id].finished) {
2221+ vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2222+ "stream already closed %d", dev_id);
2223+ return -1;
2224+ }
2225+
2226+ vmaw->stream_info[dev_id].finished = true;
2227+
2228+ int open_drives = vma_count_open_streams(vmaw);
2229+
2230+ if (open_drives <= 0) {
2231+ DPRINTF("vma_writer_set_status all drives completed\n");
67af0fa4 2232+ vma_writer_flush_output(vmaw);
95259824
WB
2233+ }
2234+
2235+ return open_drives;
2236+}
2237+
2238+int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status)
2239+{
2240+ int i;
2241+
2242+ g_assert(vmaw != NULL);
2243+
2244+ if (status) {
2245+ status->status = vmaw->status;
2246+ g_strlcpy(status->errmsg, vmaw->errmsg, sizeof(status->errmsg));
2247+ for (i = 0; i <= 255; i++) {
2248+ status->stream_info[i] = vmaw->stream_info[i];
2249+ }
2250+
2251+ uuid_unparse_lower(vmaw->uuid, status->uuid_str);
2252+ }
2253+
2254+ status->closed = vmaw->closed;
2255+
2256+ return vmaw->status;
2257+}
2258+
2259+static int vma_writer_get_buffer(VmaWriter *vmaw)
2260+{
2261+ int ret = 0;
2262+
2263+ qemu_co_mutex_lock(&vmaw->flush_lock);
2264+
2265+ /* wait until buffer is available */
2266+ while (vmaw->outbuf_count >= (VMA_BLOCKS_PER_EXTENT - 1)) {
2267+ ret = vma_writer_flush(vmaw);
2268+ if (ret < 0) {
2269+ vma_writer_set_error(vmaw, "vma_writer_get_buffer: flush failed");
2270+ break;
2271+ }
2272+ }
2273+
2274+ qemu_co_mutex_unlock(&vmaw->flush_lock);
2275+
2276+ return ret;
2277+}
2278+
2279+
2280+int64_t coroutine_fn
2281+vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
2282+ unsigned char *buf, size_t *zero_bytes)
2283+{
2284+ g_assert(vmaw != NULL);
2285+ g_assert(zero_bytes != NULL);
2286+
2287+ *zero_bytes = 0;
2288+
2289+ if (vmaw->status < 0) {
2290+ return vmaw->status;
2291+ }
2292+
2293+ if (!dev_id || !vmaw->stream_info[dev_id].size) {
2294+ vma_writer_set_error(vmaw, "vma_writer_write: "
2295+ "no such stream %d", dev_id);
2296+ return -1;
2297+ }
2298+
2299+ if (vmaw->stream_info[dev_id].finished) {
2300+ vma_writer_set_error(vmaw, "vma_writer_write: "
2301+ "stream already closed %d", dev_id);
2302+ return -1;
2303+ }
2304+
2305+
2306+ if (cluster_num >= (((uint64_t)1)<<32)) {
2307+ vma_writer_set_error(vmaw, "vma_writer_write: "
2308+ "cluster number out of range");
2309+ return -1;
2310+ }
2311+
2312+ if (dev_id == vmaw->vmstate_stream) {
2313+ if (cluster_num != vmaw->vmstate_clusters) {
2314+ vma_writer_set_error(vmaw, "vma_writer_write: "
2315+ "non sequential vmstate write");
2316+ }
2317+ vmaw->vmstate_clusters++;
2318+ } else if (cluster_num >= vmaw->stream_info[dev_id].cluster_count) {
2319+ vma_writer_set_error(vmaw, "vma_writer_write: cluster number too big");
2320+ return -1;
2321+ }
2322+
2323+ /* wait until buffer is available */
2324+ if (vma_writer_get_buffer(vmaw) < 0) {
2325+ vma_writer_set_error(vmaw, "vma_writer_write: "
2326+ "vma_writer_get_buffer failed");
2327+ return -1;
2328+ }
2329+
2330+ DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
2331+
2332+ uint16_t mask = 0;
2333+
2334+ if (buf) {
2335+ int i;
2336+ int bit = 1;
2337+ for (i = 0; i < 16; i++) {
2338+ unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
2339+ if (!buffer_is_zero(vmablock, VMA_BLOCK_SIZE)) {
2340+ mask |= bit;
2341+ memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
2342+ VMA_BLOCK_SIZE);
2343+ vmaw->outbuf_pos += VMA_BLOCK_SIZE;
2344+ } else {
2345+ DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
2346+ vmaw->stream_info[dev_id].zero_bytes += VMA_BLOCK_SIZE;
2347+ *zero_bytes += VMA_BLOCK_SIZE;
2348+ }
2349+
2350+ bit = bit << 1;
2351+ }
2352+ } else {
2353+ DPRINTF("VMA WRITE %zd ZERO CLUSTER\n", cluster_num);
2354+ vmaw->stream_info[dev_id].zero_bytes += VMA_CLUSTER_SIZE;
2355+ *zero_bytes += VMA_CLUSTER_SIZE;
2356+ }
2357+
2358+ uint64_t block_info = ((uint64_t)mask) << (32+16);
2359+ block_info |= ((uint64_t)dev_id) << 32;
2360+ block_info |= (cluster_num & 0xffffffff);
2361+ vmaw->outbuf_block_info[vmaw->outbuf_count] = block_info;
2362+
2363+ DPRINTF("VMA WRITE MASK %zd %zx\n", cluster_num, block_info);
2364+
2365+ vmaw->outbuf_count++;
2366+
2367+ /** NOTE: We allways write whole clusters, but we correctly set
2368+ * transferred bytes. So transferred == size when when everything
2369+ * went OK.
2370+ */
2371+ size_t transferred = VMA_CLUSTER_SIZE;
2372+
2373+ if (dev_id != vmaw->vmstate_stream) {
2374+ uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
2375+ if (last > vmaw->stream_info[dev_id].size) {
2376+ uint64_t diff = last - vmaw->stream_info[dev_id].size;
2377+ if (diff >= VMA_CLUSTER_SIZE) {
2378+ vma_writer_set_error(vmaw, "vma_writer_write: "
2379+ "read after last cluster");
2380+ return -1;
2381+ }
2382+ transferred -= diff;
2383+ }
2384+ }
2385+
2386+ vmaw->stream_info[dev_id].transferred += transferred;
2387+
2388+ return transferred;
2389+}
2390+
67af0fa4
WB
2391+void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp)
2392+{
2393+ if (vmaw->status < 0 && *errp == NULL) {
2394+ error_setg(errp, "%s", vmaw->errmsg);
2395+ }
2396+}
2397+
95259824
WB
2398+int vma_writer_close(VmaWriter *vmaw, Error **errp)
2399+{
2400+ g_assert(vmaw != NULL);
2401+
2402+ int i;
2403+
67af0fa4
WB
2404+ while (vmaw->co_writer) {
2405+ aio_poll(qemu_get_aio_context(), true);
95259824
WB
2406+ }
2407+
67af0fa4
WB
2408+ assert(vmaw->co_writer == NULL);
2409+
95259824
WB
2410+ if (vmaw->cmd) {
2411+ if (pclose(vmaw->cmd) < 0) {
2412+ vma_writer_set_error(vmaw, "vma_writer_close: "
2413+ "pclose failed - %s", g_strerror(errno));
2414+ }
2415+ } else {
2416+ if (close(vmaw->fd) < 0) {
2417+ vma_writer_set_error(vmaw, "vma_writer_close: "
2418+ "close failed - %s", g_strerror(errno));
2419+ }
2420+ }
2421+
2422+ for (i = 0; i <= 255; i++) {
2423+ VmaStreamInfo *si = &vmaw->stream_info[i];
2424+ if (si->size) {
2425+ if (!si->finished) {
2426+ vma_writer_set_error(vmaw, "vma_writer_close: "
2427+ "detected open stream '%s'", si->devname);
2428+ } else if ((si->transferred != si->size) &&
2429+ (i != vmaw->vmstate_stream)) {
2430+ vma_writer_set_error(vmaw, "vma_writer_close: "
2431+ "incomplete stream '%s' (%zd != %zd)",
2432+ si->devname, si->transferred, si->size);
2433+ }
2434+ }
2435+ }
2436+
2437+ for (i = 0; i <= 255; i++) {
2438+ vmaw->stream_info[i].finished = 1; /* mark as closed */
2439+ }
2440+
2441+ vmaw->closed = 1;
2442+
2443+ if (vmaw->status < 0 && *errp == NULL) {
2444+ error_setg(errp, "%s", vmaw->errmsg);
2445+ }
2446+
2447+ return vmaw->status;
2448+}
2449+
2450+void vma_writer_destroy(VmaWriter *vmaw)
2451+{
2452+ assert(vmaw);
2453+
2454+ int i;
2455+
2456+ for (i = 0; i <= 255; i++) {
2457+ if (vmaw->stream_info[i].devname) {
2458+ g_free(vmaw->stream_info[i].devname);
2459+ }
2460+ }
2461+
2462+ if (vmaw->md5csum) {
2463+ g_checksum_free(vmaw->md5csum);
2464+ }
2465+
95259824
WB
2466+ g_free(vmaw);
2467+}
2468diff --git a/vma.c b/vma.c
2469new file mode 100644
67af0fa4 2470index 0000000000..04915427c8
95259824
WB
2471--- /dev/null
2472+++ b/vma.c
67af0fa4 2473@@ -0,0 +1,757 @@
95259824
WB
2474+/*
2475+ * VMA: Virtual Machine Archive
2476+ *
2477+ * Copyright (C) 2012-2013 Proxmox Server Solutions
2478+ *
2479+ * Authors:
2480+ * Dietmar Maurer (dietmar@proxmox.com)
2481+ *
2482+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
2483+ * See the COPYING file in the top-level directory.
2484+ *
2485+ */
2486+
2487+#include "qemu/osdep.h"
2488+#include <glib.h>
2489+
2490+#include "vma.h"
2491+#include "qemu-common.h"
2492+#include "qemu/error-report.h"
2493+#include "qemu/main-loop.h"
a544966d 2494+#include "qapi/qmp/qstring.h"
95259824 2495+#include "sysemu/char.h" /* qstring_from_str */
67af0fa4 2496+#include "sysemu/block-backend.h"
95259824
WB
2497+
2498+static void help(void)
2499+{
2500+ const char *help_msg =
2501+ "usage: vma command [command options]\n"
2502+ "\n"
2503+ "vma list <filename>\n"
67af0fa4
WB
2504+ "vma config <filename> [-c config]\n"
2505+ "vma create <filename> [-c config] pathname ...\n"
95259824 2506+ "vma extract <filename> [-r <fifo>] <targetdir>\n"
67af0fa4 2507+ "vma verify <filename> [-v]\n"
95259824
WB
2508+ ;
2509+
2510+ printf("%s", help_msg);
2511+ exit(1);
2512+}
2513+
2514+static const char *extract_devname(const char *path, char **devname, int index)
2515+{
2516+ assert(path);
2517+
2518+ const char *sep = strchr(path, '=');
2519+
2520+ if (sep) {
2521+ *devname = g_strndup(path, sep - path);
2522+ path = sep + 1;
2523+ } else {
2524+ if (index >= 0) {
2525+ *devname = g_strdup_printf("disk%d", index);
2526+ } else {
2527+ *devname = NULL;
2528+ }
2529+ }
2530+
2531+ return path;
2532+}
2533+
2534+static void print_content(VmaReader *vmar)
2535+{
2536+ assert(vmar);
2537+
2538+ VmaHeader *head = vma_reader_get_header(vmar);
2539+
2540+ GList *l = vma_reader_get_config_data(vmar);
2541+ while (l && l->data) {
2542+ VmaConfigData *cdata = (VmaConfigData *)l->data;
2543+ l = g_list_next(l);
2544+ printf("CFG: size: %d name: %s\n", cdata->len, cdata->name);
2545+ }
2546+
2547+ int i;
2548+ VmaDeviceInfo *di;
2549+ for (i = 1; i < 255; i++) {
2550+ di = vma_reader_get_device_info(vmar, i);
2551+ if (di) {
2552+ if (strcmp(di->devname, "vmstate") == 0) {
2553+ printf("VMSTATE: dev_id=%d memory: %zd\n", i, di->size);
2554+ } else {
2555+ printf("DEV: dev_id=%d size: %zd devname: %s\n",
2556+ i, di->size, di->devname);
2557+ }
2558+ }
2559+ }
2560+ /* ctime is the last entry we print */
2561+ printf("CTIME: %s", ctime(&head->ctime));
2562+ fflush(stdout);
2563+}
2564+
2565+static int list_content(int argc, char **argv)
2566+{
2567+ int c, ret = 0;
2568+ const char *filename;
2569+
2570+ for (;;) {
2571+ c = getopt(argc, argv, "h");
2572+ if (c == -1) {
2573+ break;
2574+ }
2575+ switch (c) {
2576+ case '?':
2577+ case 'h':
2578+ help();
2579+ break;
2580+ default:
2581+ g_assert_not_reached();
2582+ }
2583+ }
2584+
2585+ /* Get the filename */
2586+ if ((optind + 1) != argc) {
2587+ help();
2588+ }
2589+ filename = argv[optind++];
2590+
2591+ Error *errp = NULL;
2592+ VmaReader *vmar = vma_reader_create(filename, &errp);
2593+
2594+ if (!vmar) {
2595+ g_error("%s", error_get_pretty(errp));
2596+ }
2597+
2598+ print_content(vmar);
2599+
2600+ vma_reader_destroy(vmar);
2601+
2602+ return ret;
2603+}
2604+
2605+typedef struct RestoreMap {
2606+ char *devname;
2607+ char *path;
67af0fa4 2608+ char *format;
95259824
WB
2609+ bool write_zero;
2610+} RestoreMap;
2611+
2612+static int extract_content(int argc, char **argv)
2613+{
2614+ int c, ret = 0;
2615+ int verbose = 0;
2616+ const char *filename;
2617+ const char *dirname;
2618+ const char *readmap = NULL;
2619+
2620+ for (;;) {
2621+ c = getopt(argc, argv, "hvr:");
2622+ if (c == -1) {
2623+ break;
2624+ }
2625+ switch (c) {
2626+ case '?':
2627+ case 'h':
2628+ help();
2629+ break;
2630+ case 'r':
2631+ readmap = optarg;
2632+ break;
2633+ case 'v':
2634+ verbose = 1;
2635+ break;
2636+ default:
2637+ help();
2638+ }
2639+ }
2640+
2641+ /* Get the filename */
2642+ if ((optind + 2) != argc) {
2643+ help();
2644+ }
2645+ filename = argv[optind++];
2646+ dirname = argv[optind++];
2647+
2648+ Error *errp = NULL;
2649+ VmaReader *vmar = vma_reader_create(filename, &errp);
2650+
2651+ if (!vmar) {
2652+ g_error("%s", error_get_pretty(errp));
2653+ }
2654+
2655+ if (mkdir(dirname, 0777) < 0) {
2656+ g_error("unable to create target directory %s - %s",
2657+ dirname, g_strerror(errno));
2658+ }
2659+
2660+ GList *l = vma_reader_get_config_data(vmar);
2661+ while (l && l->data) {
2662+ VmaConfigData *cdata = (VmaConfigData *)l->data;
2663+ l = g_list_next(l);
2664+ char *cfgfn = g_strdup_printf("%s/%s", dirname, cdata->name);
2665+ GError *err = NULL;
2666+ if (!g_file_set_contents(cfgfn, (gchar *)cdata->data, cdata->len,
2667+ &err)) {
2668+ g_error("unable to write file: %s", err->message);
2669+ }
2670+ }
2671+
2672+ GHashTable *devmap = g_hash_table_new(g_str_hash, g_str_equal);
2673+
2674+ if (readmap) {
2675+ print_content(vmar);
2676+
2677+ FILE *map = fopen(readmap, "r");
2678+ if (!map) {
2679+ g_error("unable to open fifo %s - %s", readmap, g_strerror(errno));
2680+ }
2681+
2682+ while (1) {
2683+ char inbuf[8192];
2684+ char *line = fgets(inbuf, sizeof(inbuf), map);
2685+ if (!line || line[0] == '\0' || !strcmp(line, "done\n")) {
2686+ break;
2687+ }
2688+ int len = strlen(line);
2689+ if (line[len - 1] == '\n') {
2690+ line[len - 1] = '\0';
2691+ if (len == 1) {
2692+ break;
2693+ }
2694+ }
2695+
67af0fa4
WB
2696+ char *format = NULL;
2697+ if (strncmp(line, "format=", sizeof("format=")-1) == 0) {
2698+ format = line + sizeof("format=")-1;
2699+ char *colon = strchr(format, ':');
2700+ if (!colon) {
2701+ g_error("read map failed - found only a format ('%s')", inbuf);
2702+ }
2703+ format = g_strndup(format, colon - format);
2704+ line = colon+1;
2705+ }
2706+
95259824
WB
2707+ const char *path;
2708+ bool write_zero;
2709+ if (line[0] == '0' && line[1] == ':') {
67af0fa4 2710+ path = line + 2;
95259824
WB
2711+ write_zero = false;
2712+ } else if (line[0] == '1' && line[1] == ':') {
67af0fa4 2713+ path = line + 2;
95259824
WB
2714+ write_zero = true;
2715+ } else {
2716+ g_error("read map failed - parse error ('%s')", inbuf);
2717+ }
2718+
2719+ char *devname = NULL;
2720+ path = extract_devname(path, &devname, -1);
2721+ if (!devname) {
2722+ g_error("read map failed - no dev name specified ('%s')",
2723+ inbuf);
2724+ }
2725+
2726+ RestoreMap *map = g_new0(RestoreMap, 1);
2727+ map->devname = g_strdup(devname);
2728+ map->path = g_strdup(path);
67af0fa4 2729+ map->format = format;
95259824
WB
2730+ map->write_zero = write_zero;
2731+
2732+ g_hash_table_insert(devmap, map->devname, map);
2733+
2734+ };
2735+ }
2736+
2737+ int i;
2738+ int vmstate_fd = -1;
2739+ guint8 vmstate_stream = 0;
2740+
67af0fa4
WB
2741+ BlockBackend *blk = NULL;
2742+
95259824
WB
2743+ for (i = 1; i < 255; i++) {
2744+ VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2745+ if (di && (strcmp(di->devname, "vmstate") == 0)) {
2746+ vmstate_stream = i;
2747+ char *statefn = g_strdup_printf("%s/vmstate.bin", dirname);
2748+ vmstate_fd = open(statefn, O_WRONLY|O_CREAT|O_EXCL, 0644);
2749+ if (vmstate_fd < 0) {
2750+ g_error("create vmstate file '%s' failed - %s", statefn,
2751+ g_strerror(errno));
2752+ }
2753+ g_free(statefn);
2754+ } else if (di) {
2755+ char *devfn = NULL;
67af0fa4
WB
2756+ const char *format = NULL;
2757+ int flags = BDRV_O_RDWR | BDRV_O_NO_FLUSH;
95259824
WB
2758+ bool write_zero = true;
2759+
2760+ if (readmap) {
2761+ RestoreMap *map;
2762+ map = (RestoreMap *)g_hash_table_lookup(devmap, di->devname);
2763+ if (map == NULL) {
2764+ g_error("no device name mapping for %s", di->devname);
2765+ }
2766+ devfn = map->path;
67af0fa4 2767+ format = map->format;
95259824
WB
2768+ write_zero = map->write_zero;
2769+ } else {
2770+ devfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2771+ dirname, di->devname);
2772+ printf("DEVINFO %s %zd\n", devfn, di->size);
2773+
2774+ bdrv_img_create(devfn, "raw", NULL, NULL, NULL, di->size,
2775+ flags, &errp, 0);
2776+ if (errp) {
2777+ g_error("can't create file %s: %s", devfn,
2778+ error_get_pretty(errp));
2779+ }
2780+
2781+ /* Note: we created an empty file above, so there is no
2782+ * need to write zeroes (so we generate a sparse file)
2783+ */
2784+ write_zero = false;
2785+ }
2786+
67af0fa4
WB
2787+ size_t devlen = strlen(devfn);
2788+ QDict *options = NULL;
2789+ if (format) {
2790+ /* explicit format from commandline */
2791+ options = qdict_new();
2792+ qdict_put(options, "driver", qstring_from_str(format));
2793+ } else if ((devlen > 4 && strcmp(devfn+devlen-4, ".raw") == 0) ||
2794+ strncmp(devfn, "/dev/", 5) == 0)
2795+ {
2796+ /* This part is now deprecated for PVE as well (just as qemu
2797+ * deprecated not specifying an explicit raw format, too.
2798+ */
2799+ /* explicit raw format */
2800+ options = qdict_new();
2801+ qdict_put(options, "driver", qstring_from_str("raw"));
2802+ }
2803+
2804+
2805+ if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) {
95259824
WB
2806+ g_error("can't open file %s - %s", devfn,
2807+ error_get_pretty(errp));
2808+ }
67af0fa4
WB
2809+
2810+ if (vma_reader_register_bs(vmar, i, blk, write_zero, &errp) < 0) {
95259824
WB
2811+ g_error("%s", error_get_pretty(errp));
2812+ }
2813+
2814+ if (!readmap) {
2815+ g_free(devfn);
2816+ }
2817+ }
2818+ }
2819+
2820+ if (vma_reader_restore(vmar, vmstate_fd, verbose, &errp) < 0) {
2821+ g_error("restore failed - %s", error_get_pretty(errp));
2822+ }
2823+
2824+ if (!readmap) {
2825+ for (i = 1; i < 255; i++) {
2826+ VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2827+ if (di && (i != vmstate_stream)) {
2828+ char *tmpfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2829+ dirname, di->devname);
2830+ char *fn = g_strdup_printf("%s/disk-%s.raw",
2831+ dirname, di->devname);
2832+ if (rename(tmpfn, fn) != 0) {
2833+ g_error("rename %s to %s failed - %s",
2834+ tmpfn, fn, g_strerror(errno));
2835+ }
2836+ }
2837+ }
2838+ }
2839+
2840+ vma_reader_destroy(vmar);
2841+
67af0fa4
WB
2842+ blk_unref(blk);
2843+
2844+ bdrv_close_all();
2845+
2846+ return ret;
2847+}
2848+
2849+static int verify_content(int argc, char **argv)
2850+{
2851+ int c, ret = 0;
2852+ int verbose = 0;
2853+ const char *filename;
2854+
2855+ for (;;) {
2856+ c = getopt(argc, argv, "hv");
2857+ if (c == -1) {
2858+ break;
2859+ }
2860+ switch (c) {
2861+ case '?':
2862+ case 'h':
2863+ help();
2864+ break;
2865+ case 'v':
2866+ verbose = 1;
2867+ break;
2868+ default:
2869+ help();
2870+ }
2871+ }
2872+
2873+ /* Get the filename */
2874+ if ((optind + 1) != argc) {
2875+ help();
2876+ }
2877+ filename = argv[optind++];
2878+
2879+ Error *errp = NULL;
2880+ VmaReader *vmar = vma_reader_create(filename, &errp);
2881+
2882+ if (!vmar) {
2883+ g_error("%s", error_get_pretty(errp));
2884+ }
2885+
2886+ if (verbose) {
2887+ print_content(vmar);
2888+ }
2889+
2890+ if (vma_reader_verify(vmar, verbose, &errp) < 0) {
2891+ g_error("verify failed - %s", error_get_pretty(errp));
2892+ }
2893+
2894+ vma_reader_destroy(vmar);
2895+
95259824
WB
2896+ bdrv_close_all();
2897+
2898+ return ret;
2899+}
2900+
2901+typedef struct BackupJob {
67af0fa4 2902+ BlockBackend *target;
95259824
WB
2903+ int64_t len;
2904+ VmaWriter *vmaw;
2905+ uint8_t dev_id;
2906+} BackupJob;
2907+
2908+#define BACKUP_SECTORS_PER_CLUSTER (VMA_CLUSTER_SIZE / BDRV_SECTOR_SIZE)
2909+
67af0fa4
WB
2910+static void coroutine_fn backup_run_empty(void *opaque)
2911+{
2912+ VmaWriter *vmaw = (VmaWriter *)opaque;
2913+
2914+ vma_writer_flush_output(vmaw);
2915+
2916+ Error *err = NULL;
2917+ if (vma_writer_close(vmaw, &err) != 0) {
2918+ g_warning("vma_writer_close failed %s", error_get_pretty(err));
2919+ }
2920+}
2921+
95259824
WB
2922+static void coroutine_fn backup_run(void *opaque)
2923+{
2924+ BackupJob *job = (BackupJob *)opaque;
2925+ struct iovec iov;
2926+ QEMUIOVector qiov;
2927+
2928+ int64_t start, end;
2929+ int ret = 0;
2930+
67af0fa4 2931+ unsigned char *buf = blk_blockalign(job->target, VMA_CLUSTER_SIZE);
95259824
WB
2932+
2933+ start = 0;
2934+ end = DIV_ROUND_UP(job->len / BDRV_SECTOR_SIZE,
2935+ BACKUP_SECTORS_PER_CLUSTER);
2936+
2937+ for (; start < end; start++) {
2938+ iov.iov_base = buf;
2939+ iov.iov_len = VMA_CLUSTER_SIZE;
2940+ qemu_iovec_init_external(&qiov, &iov, 1);
2941+
67af0fa4
WB
2942+ ret = blk_co_preadv(job->target, start * VMA_CLUSTER_SIZE,
2943+ VMA_CLUSTER_SIZE, &qiov, 0);
95259824
WB
2944+ if (ret < 0) {
2945+ vma_writer_set_error(job->vmaw, "read error", -1);
2946+ goto out;
2947+ }
2948+
2949+ size_t zb = 0;
2950+ if (vma_writer_write(job->vmaw, job->dev_id, start, buf, &zb) < 0) {
2951+ vma_writer_set_error(job->vmaw, "backup_dump_cb vma_writer_write failed", -1);
2952+ goto out;
2953+ }
2954+ }
2955+
2956+
2957+out:
2958+ if (vma_writer_close_stream(job->vmaw, job->dev_id) <= 0) {
2959+ Error *err = NULL;
2960+ if (vma_writer_close(job->vmaw, &err) != 0) {
2961+ g_warning("vma_writer_close failed %s", error_get_pretty(err));
2962+ }
2963+ }
2964+}
2965+
2966+static int create_archive(int argc, char **argv)
2967+{
2968+ int i, c;
2969+ int verbose = 0;
2970+ const char *archivename;
2971+ GList *config_files = NULL;
2972+
2973+ for (;;) {
2974+ c = getopt(argc, argv, "hvc:");
2975+ if (c == -1) {
2976+ break;
2977+ }
2978+ switch (c) {
2979+ case '?':
2980+ case 'h':
2981+ help();
2982+ break;
2983+ case 'c':
2984+ config_files = g_list_append(config_files, optarg);
2985+ break;
2986+ case 'v':
2987+ verbose = 1;
2988+ break;
2989+ default:
2990+ g_assert_not_reached();
2991+ }
2992+ }
2993+
2994+
67af0fa4
WB
2995+ /* make sure we an archive name */
2996+ if ((optind + 1) > argc) {
95259824
WB
2997+ help();
2998+ }
2999+
3000+ archivename = argv[optind++];
3001+
3002+ uuid_t uuid;
3003+ uuid_generate(uuid);
3004+
3005+ Error *local_err = NULL;
3006+ VmaWriter *vmaw = vma_writer_create(archivename, uuid, &local_err);
3007+
3008+ if (vmaw == NULL) {
3009+ g_error("%s", error_get_pretty(local_err));
3010+ }
3011+
3012+ GList *l = config_files;
3013+ while (l && l->data) {
3014+ char *name = l->data;
3015+ char *cdata = NULL;
3016+ gsize clen = 0;
3017+ GError *err = NULL;
3018+ if (!g_file_get_contents(name, &cdata, &clen, &err)) {
3019+ unlink(archivename);
3020+ g_error("Unable to read file: %s", err->message);
3021+ }
3022+
3023+ if (vma_writer_add_config(vmaw, name, cdata, clen) != 0) {
3024+ unlink(archivename);
3025+ g_error("Unable to append config data %s (len = %zd)",
3026+ name, clen);
3027+ }
3028+ l = g_list_next(l);
3029+ }
3030+
67af0fa4 3031+ int devcount = 0;
95259824
WB
3032+ while (optind < argc) {
3033+ const char *path = argv[optind++];
3034+ char *devname = NULL;
67af0fa4 3035+ path = extract_devname(path, &devname, devcount++);
95259824
WB
3036+
3037+ Error *errp = NULL;
67af0fa4 3038+ BlockBackend *target;
95259824 3039+
67af0fa4
WB
3040+ target = blk_new_open(path, NULL, NULL, 0, &errp);
3041+ if (!target) {
95259824
WB
3042+ unlink(archivename);
3043+ g_error("bdrv_open '%s' failed - %s", path, error_get_pretty(errp));
3044+ }
67af0fa4 3045+ int64_t size = blk_getlength(target);
95259824
WB
3046+ int dev_id = vma_writer_register_stream(vmaw, devname, size);
3047+ if (dev_id <= 0) {
3048+ unlink(archivename);
3049+ g_error("vma_writer_register_stream '%s' failed", devname);
3050+ }
3051+
3052+ BackupJob *job = g_new0(BackupJob, 1);
3053+ job->len = size;
67af0fa4 3054+ job->target = target;
95259824
WB
3055+ job->vmaw = vmaw;
3056+ job->dev_id = dev_id;
3057+
3058+ Coroutine *co = qemu_coroutine_create(backup_run, job);
3059+ qemu_coroutine_enter(co);
3060+ }
3061+
3062+ VmaStatus vmastat;
3063+ int percent = 0;
3064+ int last_percent = -1;
3065+
67af0fa4
WB
3066+ if (devcount) {
3067+ while (1) {
3068+ main_loop_wait(false);
3069+ vma_writer_get_status(vmaw, &vmastat);
95259824 3070+
67af0fa4 3071+ if (verbose) {
95259824 3072+
67af0fa4
WB
3073+ uint64_t total = 0;
3074+ uint64_t transferred = 0;
3075+ uint64_t zero_bytes = 0;
95259824 3076+
67af0fa4
WB
3077+ int i;
3078+ for (i = 0; i < 256; i++) {
3079+ if (vmastat.stream_info[i].size) {
3080+ total += vmastat.stream_info[i].size;
3081+ transferred += vmastat.stream_info[i].transferred;
3082+ zero_bytes += vmastat.stream_info[i].zero_bytes;
3083+ }
95259824 3084+ }
67af0fa4
WB
3085+ percent = (transferred*100)/total;
3086+ if (percent != last_percent) {
3087+ fprintf(stderr, "progress %d%% %zd/%zd %zd\n", percent,
3088+ transferred, total, zero_bytes);
3089+ fflush(stderr);
95259824 3090+
67af0fa4
WB
3091+ last_percent = percent;
3092+ }
95259824 3093+ }
95259824 3094+
67af0fa4
WB
3095+ if (vmastat.closed) {
3096+ break;
3097+ }
95259824
WB
3098+ }
3099+ } else {
3100+ Coroutine *co = qemu_coroutine_create(backup_run_empty, vmaw);
3101+ qemu_coroutine_enter(co);
3102+ while (1) {
3103+ main_loop_wait(false);
3104+ vma_writer_get_status(vmaw, &vmastat);
3105+ if (vmastat.closed) {
3106+ break;
3107+ }
3108+ }
3109+ }
3110+
3111+ bdrv_drain_all();
3112+
3113+ vma_writer_get_status(vmaw, &vmastat);
3114+
3115+ if (verbose) {
3116+ for (i = 0; i < 256; i++) {
3117+ VmaStreamInfo *si = &vmastat.stream_info[i];
3118+ if (si->size) {
3119+ fprintf(stderr, "image %s: size=%zd zeros=%zd saved=%zd\n",
3120+ si->devname, si->size, si->zero_bytes,
3121+ si->size - si->zero_bytes);
3122+ }
3123+ }
3124+ }
3125+
3126+ if (vmastat.status < 0) {
3127+ unlink(archivename);
3128+ g_error("creating vma archive failed");
3129+ }
3130+
3131+ return 0;
3132+}
3133+
67af0fa4
WB
3134+static int dump_config(int argc, char **argv)
3135+{
3136+ int c, ret = 0;
3137+ const char *filename;
3138+ const char *config_name = "qemu-server.conf";
3139+
3140+ for (;;) {
3141+ c = getopt(argc, argv, "hc:");
3142+ if (c == -1) {
3143+ break;
3144+ }
3145+ switch (c) {
3146+ case '?':
3147+ case 'h':
3148+ help();
3149+ break;
3150+ case 'c':
3151+ config_name = optarg;
3152+ break;
3153+ default:
3154+ help();
3155+ }
3156+ }
3157+
3158+ /* Get the filename */
3159+ if ((optind + 1) != argc) {
3160+ help();
3161+ }
3162+ filename = argv[optind++];
3163+
3164+ Error *errp = NULL;
3165+ VmaReader *vmar = vma_reader_create(filename, &errp);
3166+
3167+ if (!vmar) {
3168+ g_error("%s", error_get_pretty(errp));
3169+ }
3170+
3171+ int found = 0;
3172+ GList *l = vma_reader_get_config_data(vmar);
3173+ while (l && l->data) {
3174+ VmaConfigData *cdata = (VmaConfigData *)l->data;
3175+ l = g_list_next(l);
3176+ if (strcmp(cdata->name, config_name) == 0) {
3177+ found = 1;
3178+ fwrite(cdata->data, cdata->len, 1, stdout);
3179+ break;
3180+ }
3181+ }
3182+
3183+ vma_reader_destroy(vmar);
3184+
3185+ bdrv_close_all();
3186+
3187+ if (!found) {
3188+ fprintf(stderr, "unable to find configuration data '%s'\n", config_name);
3189+ return -1;
3190+ }
3191+
3192+ return ret;
3193+}
3194+
95259824
WB
3195+int main(int argc, char **argv)
3196+{
3197+ const char *cmdname;
3198+ Error *main_loop_err = NULL;
3199+
3200+ error_set_progname(argv[0]);
3201+
3202+ if (qemu_init_main_loop(&main_loop_err)) {
3203+ g_error("%s", error_get_pretty(main_loop_err));
3204+ }
3205+
3206+ bdrv_init();
3207+
3208+ if (argc < 2) {
3209+ help();
3210+ }
3211+
3212+ cmdname = argv[1];
3213+ argc--; argv++;
3214+
3215+
3216+ if (!strcmp(cmdname, "list")) {
3217+ return list_content(argc, argv);
3218+ } else if (!strcmp(cmdname, "create")) {
3219+ return create_archive(argc, argv);
3220+ } else if (!strcmp(cmdname, "extract")) {
3221+ return extract_content(argc, argv);
67af0fa4
WB
3222+ } else if (!strcmp(cmdname, "verify")) {
3223+ return verify_content(argc, argv);
3224+ } else if (!strcmp(cmdname, "config")) {
3225+ return dump_config(argc, argv);
95259824
WB
3226+ }
3227+
3228+ help();
3229+ return 0;
3230+}
3231diff --git a/vma.h b/vma.h
3232new file mode 100644
67af0fa4 3233index 0000000000..fa6f4df7e7
95259824
WB
3234--- /dev/null
3235+++ b/vma.h
67af0fa4 3236@@ -0,0 +1,149 @@
95259824
WB
3237+/*
3238+ * VMA: Virtual Machine Archive
3239+ *
3240+ * Copyright (C) Proxmox Server Solutions
3241+ *
3242+ * Authors:
3243+ * Dietmar Maurer (dietmar@proxmox.com)
3244+ *
3245+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
3246+ * See the COPYING file in the top-level directory.
3247+ *
3248+ */
3249+
3250+#ifndef BACKUP_VMA_H
3251+#define BACKUP_VMA_H
3252+
3253+#include <uuid/uuid.h>
3254+#include "qapi/error.h"
3255+#include "block/block.h"
3256+
3257+#define VMA_BLOCK_BITS 12
3258+#define VMA_BLOCK_SIZE (1<<VMA_BLOCK_BITS)
3259+#define VMA_CLUSTER_BITS (VMA_BLOCK_BITS+4)
3260+#define VMA_CLUSTER_SIZE (1<<VMA_CLUSTER_BITS)
3261+
3262+#if VMA_CLUSTER_SIZE != 65536
3263+#error unexpected cluster size
3264+#endif
3265+
3266+#define VMA_EXTENT_HEADER_SIZE 512
3267+#define VMA_BLOCKS_PER_EXTENT 59
3268+#define VMA_MAX_CONFIGS 256
3269+
3270+#define VMA_MAX_EXTENT_SIZE \
3271+ (VMA_EXTENT_HEADER_SIZE+VMA_CLUSTER_SIZE*VMA_BLOCKS_PER_EXTENT)
3272+#if VMA_MAX_EXTENT_SIZE != 3867136
3273+#error unexpected VMA_EXTENT_SIZE
3274+#endif
3275+
3276+/* File Format Definitions */
3277+
3278+#define VMA_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|0x00))
3279+#define VMA_EXTENT_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|'E'))
3280+
3281+typedef struct VmaDeviceInfoHeader {
3282+ uint32_t devname_ptr; /* offset into blob_buffer table */
3283+ uint32_t reserved0;
3284+ uint64_t size; /* device size in bytes */
3285+ uint64_t reserved1;
3286+ uint64_t reserved2;
3287+} VmaDeviceInfoHeader;
3288+
3289+typedef struct VmaHeader {
3290+ uint32_t magic;
3291+ uint32_t version;
3292+ unsigned char uuid[16];
3293+ int64_t ctime;
3294+ unsigned char md5sum[16];
3295+
3296+ uint32_t blob_buffer_offset;
3297+ uint32_t blob_buffer_size;
3298+ uint32_t header_size;
3299+
3300+ unsigned char reserved[1984];
3301+
3302+ uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3303+ uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3304+
3305+ uint32_t reserved1;
3306+
3307+ VmaDeviceInfoHeader dev_info[256];
3308+} VmaHeader;
3309+
3310+typedef struct VmaExtentHeader {
3311+ uint32_t magic;
3312+ uint16_t reserved1;
3313+ uint16_t block_count;
3314+ unsigned char uuid[16];
3315+ unsigned char md5sum[16];
3316+ uint64_t blockinfo[VMA_BLOCKS_PER_EXTENT];
3317+} VmaExtentHeader;
3318+
3319+/* functions/definitions to read/write vma files */
3320+
3321+typedef struct VmaReader VmaReader;
3322+
3323+typedef struct VmaWriter VmaWriter;
3324+
3325+typedef struct VmaConfigData {
3326+ const char *name;
3327+ const void *data;
3328+ uint32_t len;
3329+} VmaConfigData;
3330+
3331+typedef struct VmaStreamInfo {
3332+ uint64_t size;
3333+ uint64_t cluster_count;
3334+ uint64_t transferred;
3335+ uint64_t zero_bytes;
3336+ int finished;
3337+ char *devname;
3338+} VmaStreamInfo;
3339+
3340+typedef struct VmaStatus {
3341+ int status;
3342+ bool closed;
3343+ char errmsg[8192];
3344+ char uuid_str[37];
3345+ VmaStreamInfo stream_info[256];
3346+} VmaStatus;
3347+
3348+typedef struct VmaDeviceInfo {
3349+ uint64_t size; /* device size in bytes */
3350+ const char *devname;
3351+} VmaDeviceInfo;
3352+
3353+VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp);
3354+int vma_writer_close(VmaWriter *vmaw, Error **errp);
67af0fa4 3355+void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp);
95259824
WB
3356+void vma_writer_destroy(VmaWriter *vmaw);
3357+int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
3358+ size_t len);
3359+int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
3360+ size_t size);
3361+
3362+int64_t coroutine_fn vma_writer_write(VmaWriter *vmaw, uint8_t dev_id,
3363+ int64_t cluster_num, unsigned char *buf,
3364+ size_t *zero_bytes);
3365+
3366+int coroutine_fn vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id);
67af0fa4 3367+int coroutine_fn vma_writer_flush_output(VmaWriter *vmaw);
95259824
WB
3368+
3369+int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status);
3370+void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...);
3371+
3372+
3373+VmaReader *vma_reader_create(const char *filename, Error **errp);
3374+void vma_reader_destroy(VmaReader *vmar);
3375+VmaHeader *vma_reader_get_header(VmaReader *vmar);
3376+GList *vma_reader_get_config_data(VmaReader *vmar);
3377+VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id);
3378+int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id,
67af0fa4 3379+ BlockBackend *target, bool write_zeroes,
95259824
WB
3380+ Error **errp);
3381+int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
3382+ Error **errp);
67af0fa4 3383+int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp);
95259824
WB
3384+
3385+#endif /* BACKUP_VMA_H */
3386--
45169293 33872.11.0
95259824 3388