]> git.proxmox.com Git - pve-qemu.git/blame - debian/patches/pve/0028-adding-old-vma-files.patch
cleanups & style fixups
[pve-qemu.git] / debian / patches / pve / 0028-adding-old-vma-files.patch
CommitLineData
507c2194 1From 19761ee4aa12da9a3e4028cc611a18d972559de0 Mon Sep 17 00:00:00 2001
67af0fa4
WB
2From: Wolfgang Bumiller <w.bumiller@proxmox.com>
3Date: Mon, 7 Aug 2017 08:51:16 +0200
4Subject: [PATCH 28/28] adding old vma files
95259824 5
95259824 6---
67af0fa4
WB
7 Makefile | 3 +-
8 Makefile.objs | 1 +
507c2194 9 block/backup.c | 132 ++++---
67af0fa4 10 block/replication.c | 1 +
2ab9b48e 11 blockdev.c | 250 +++++++++-----
67af0fa4
WB
12 blockjob.c | 11 +-
13 include/block/block_int.h | 4 +
14 vma-reader.c | 857 ++++++++++++++++++++++++++++++++++++++++++++++
15 vma-writer.c | 771 +++++++++++++++++++++++++++++++++++++++++
16 vma.c | 757 ++++++++++++++++++++++++++++++++++++++++
17 vma.h | 149 ++++++++
507c2194 18 11 files changed, 2802 insertions(+), 134 deletions(-)
95259824
WB
19 create mode 100644 vma-reader.c
20 create mode 100644 vma-writer.c
21 create mode 100644 vma.c
22 create mode 100644 vma.h
23
24diff --git a/Makefile b/Makefile
45169293 25index 6c359b2f86..edbc8b50f0 100644
95259824
WB
26--- a/Makefile
27+++ b/Makefile
a544966d 28@@ -284,7 +284,7 @@ ifneq ($(wildcard config-host.mak),)
95259824
WB
29 include $(SRC_PATH)/tests/Makefile.include
30 endif
31
32-all: $(DOCS) $(TOOLS) $(HELPERS-y) recurse-all modules
33+all: $(DOCS) $(TOOLS) vma$(EXESUF) $(HELPERS-y) recurse-all modules
34
35 qemu-version.h: FORCE
36 $(call quiet-command, \
a544966d
WB
37@@ -377,6 +377,7 @@ qemu-img.o: qemu-img-cmds.h
38 qemu-img$(EXESUF): qemu-img.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
39 qemu-nbd$(EXESUF): qemu-nbd.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
40 qemu-io$(EXESUF): qemu-io.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
41+vma$(EXESUF): vma.o vma-reader.o $(block-obj-y) $(crypto-obj-y) $(io-obj-y) $(qom-obj-y) $(COMMON_LDADDS)
95259824 42
a544966d 43 qemu-bridge-helper$(EXESUF): qemu-bridge-helper.o $(COMMON_LDADDS)
95259824
WB
44
45diff --git a/Makefile.objs b/Makefile.objs
67af0fa4 46index fbfbbb7f70..f5f8dbab3b 100644
95259824
WB
47--- a/Makefile.objs
48+++ b/Makefile.objs
a544966d 49@@ -14,6 +14,7 @@ block-obj-y += block.o blockjob.o
95259824
WB
50 block-obj-y += block/
51 block-obj-y += qemu-io-cmds.o
a544966d 52 block-obj-$(CONFIG_REPLICATION) += replication.o
95259824
WB
53+block-obj-y += vma-writer.o
54
55 block-obj-m = block/
56
67af0fa4 57diff --git a/block/backup.c b/block/backup.c
507c2194 58index 1ede70c061..7c5febc434 100644
67af0fa4
WB
59--- a/block/backup.c
60+++ b/block/backup.c
61@@ -36,6 +36,7 @@ typedef struct BackupBlockJob {
62 BdrvDirtyBitmap *sync_bitmap;
63 MirrorSyncMode sync_mode;
64 RateLimit limit;
65+ BackupDumpFunc *dump_cb;
66 BlockdevOnError on_source_error;
67 BlockdevOnError on_target_error;
68 CoRwlock flush_rwlock;
69@@ -145,13 +146,24 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
70 goto out;
71 }
72
73+ int64_t start_sec = start * sectors_per_cluster;
74 if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
75- ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
76- bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
77+ if (job->dump_cb) {
78+ ret = job->dump_cb(job->common.opaque, job->target, start_sec, n, NULL);
79+ }
80+ if (job->target) {
81+ ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size,
82+ bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
83+ }
84 } else {
85- ret = blk_co_pwritev(job->target, start * job->cluster_size,
86- bounce_qiov.size, &bounce_qiov,
87- job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
88+ if (job->dump_cb) {
89+ ret = job->dump_cb(job->common.opaque, job->target, start_sec, n, bounce_buffer);
90+ }
91+ if (job->target) {
92+ ret = blk_co_pwritev(job->target, start * job->cluster_size,
93+ bounce_qiov.size, &bounce_qiov,
94+ job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
95+ }
96 }
97 if (ret < 0) {
98 trace_backup_do_cow_write_fail(job, start, ret);
507c2194 99@@ -246,6 +258,9 @@ static void backup_abort(BlockJob *job)
67af0fa4
WB
100 static void backup_clean(BlockJob *job)
101 {
102 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
507c2194 103+ if (!s->target) {
67af0fa4 104+ return;
507c2194 105+ }
67af0fa4
WB
106 assert(s->target);
107 blk_unref(s->target);
108 s->target = NULL;
507c2194 109@@ -255,7 +270,9 @@ static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
02709230
FG
110 {
111 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
112
113- blk_set_aio_context(s->target, aio_context);
507c2194 114+ if (s->target) {
02709230 115+ blk_set_aio_context(s->target, aio_context);
507c2194 116+ }
02709230
FG
117 }
118
119 void backup_do_checkpoint(BlockJob *job, Error **errp)
507c2194 120@@ -330,9 +347,11 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
67af0fa4
WB
121 if (read) {
122 return block_job_error_action(&job->common, job->on_source_error,
123 true, error);
124- } else {
125+ } else if (job->target) {
126 return block_job_error_action(&job->common, job->on_target_error,
127 false, error);
128+ } else {
129+ return BLOCK_ERROR_ACTION_REPORT;
130 }
131 }
132
507c2194 133@@ -557,6 +576,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
134 BlockdevOnError on_source_error,
135 BlockdevOnError on_target_error,
136 int creation_flags,
137+ BackupDumpFunc *dump_cb,
138 BlockCompletionFunc *cb, void *opaque,
139 int pause_count,
140 BlockJobTxn *txn, Error **errp)
507c2194 141@@ -567,7 +587,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
142 int ret;
143
144 assert(bs);
145- assert(target);
146+ assert(target || dump_cb);
147
148 if (bs == target) {
149 error_setg(errp, "Source and target cannot be the same");
507c2194 150@@ -580,13 +600,13 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
151 return NULL;
152 }
153
154- if (!bdrv_is_inserted(target)) {
155+ if (target && !bdrv_is_inserted(target)) {
156 error_setg(errp, "Device is not inserted: %s",
157 bdrv_get_device_name(target));
158 return NULL;
159 }
160
161- if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
162+ if (target && compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
163 error_setg(errp, "Compression is not supported for this drive %s",
164 bdrv_get_device_name(target));
165 return NULL;
507c2194 166@@ -596,7 +616,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
167 return NULL;
168 }
169
170- if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
171+ if (target && bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
172 return NULL;
173 }
174
507c2194 175@@ -636,15 +656,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
176 goto error;
177 }
178
179- /* The target must match the source in size, so no resize here either */
180- job->target = blk_new(BLK_PERM_WRITE,
181- BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
182- BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
183- ret = blk_insert_bs(job->target, target, errp);
184- if (ret < 0) {
185- goto error;
186+ if (target) {
187+ /* The target must match the source in size, so no resize here either */
188+ job->target = blk_new(BLK_PERM_WRITE,
189+ BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
190+ BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
191+ ret = blk_insert_bs(job->target, target, errp);
192+ if (ret < 0) {
193+ goto error;
194+ }
195 }
196
197+ job->dump_cb = dump_cb;
198 job->on_source_error = on_source_error;
199 job->on_target_error = on_target_error;
200 job->sync_mode = sync_mode;
507c2194 201@@ -652,38 +675,55 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
67af0fa4
WB
202 sync_bitmap : NULL;
203 job->compress = compress;
204
205- /* If there is no backing file on the target, we cannot rely on COW if our
206- * backup cluster size is smaller than the target cluster size. Even for
207- * targets with a backing file, try to avoid COW if possible. */
208- ret = bdrv_get_info(target, &bdi);
209- if (ret == -ENOTSUP && !target->backing) {
210- /* Cluster size is not defined */
211- error_report("WARNING: The target block device doesn't provide "
212- "information about the block size and it doesn't have a "
213- "backing file. The default block size of %u bytes is "
214- "used. If the actual block size of the target exceeds "
215- "this default, the backup may be unusable",
216- BACKUP_CLUSTER_SIZE_DEFAULT);
217- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
218- } else if (ret < 0 && !target->backing) {
219- error_setg_errno(errp, -ret,
220- "Couldn't determine the cluster size of the target image, "
221- "which has no backing file");
222- error_append_hint(errp,
223- "Aborting, since this may create an unusable destination image\n");
224- goto error;
225- } else if (ret < 0 && target->backing) {
226- /* Not fatal; just trudge on ahead. */
227- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
228+ if (target) {
229+ /* If there is no backing file on the target, we cannot rely on COW if our
230+ * backup cluster size is smaller than the target cluster size. Even for
231+ * targets with a backing file, try to avoid COW if possible. */
232+ ret = bdrv_get_info(target, &bdi);
233+ if (ret == -ENOTSUP && !target->backing) {
234+ /* Cluster size is not defined */
235+ error_report("WARNING: The target block device doesn't provide "
236+ "information about the block size and it doesn't have a "
237+ "backing file. The default block size of %u bytes is "
238+ "used. If the actual block size of the target exceeds "
239+ "this default, the backup may be unusable",
240+ BACKUP_CLUSTER_SIZE_DEFAULT);
241+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
242+ } else if (ret < 0 && !target->backing) {
243+ error_setg_errno(errp, -ret,
244+ "Couldn't determine the cluster size of the target image, "
245+ "which has no backing file");
246+ error_append_hint(errp,
247+ "Aborting, since this may create an unusable destination image\n");
248+ goto error;
249+ } else if (ret < 0 && target->backing) {
250+ /* Not fatal; just trudge on ahead. */
251+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
252+ } else {
253+ job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
254+ }
255 } else {
256- job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
257+ ret = bdrv_get_info(bs, &bdi);
258+ if (ret < 0) {
259+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
260+ } else {
261+ /* round down to nearest BACKUP_CLUSTER_SIZE_DEFAULT */
262+ job->cluster_size = (bdi.cluster_size / BACKUP_CLUSTER_SIZE_DEFAULT) * BACKUP_CLUSTER_SIZE_DEFAULT;
263+ if (job->cluster_size == 0) {
264+ /* but we can't go below it */
265+ job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
266+ }
267+ }
268 }
269
270- /* Required permissions are already taken with target's blk_new() */
271- block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
272- &error_abort);
273+ if (target) {
274+ /* Required permissions are already taken with target's blk_new() */
275+ block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
276+ &error_abort);
277+ } else {
278+ job->common.pause_count = pause_count;
279+ }
280 job->common.len = len;
281- job->common.pause_count = pause_count;
282 block_job_txn_add_job(txn, &job->common);
283
284 return &job->common;
285diff --git a/block/replication.c b/block/replication.c
286index 1c41d9e6bf..60c6524417 100644
287--- a/block/replication.c
288+++ b/block/replication.c
289@@ -531,6 +531,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
290 0, MIRROR_SYNC_MODE_NONE, NULL, false,
291 BLOCKDEV_ON_ERROR_REPORT,
292 BLOCKDEV_ON_ERROR_REPORT, BLOCK_JOB_INTERNAL,
293+ NULL,
294 backup_job_completed, bs, 0, NULL, &local_err);
295 if (local_err) {
296 error_propagate(errp, local_err);
297diff --git a/blockdev.c b/blockdev.c
2ab9b48e 298index 981276692a..76a7103743 100644
67af0fa4
WB
299--- a/blockdev.c
300+++ b/blockdev.c
301@@ -31,7 +31,6 @@
302 */
303
304 #include "qemu/osdep.h"
305-#include "qemu/uuid.h"
306 #include "sysemu/block-backend.h"
307 #include "sysemu/blockdev.h"
308 #include "hw/block/block.h"
309@@ -55,6 +54,7 @@
310 #include "qemu/cutils.h"
311 #include "qemu/help_option.h"
312 #include "qemu/throttle-options.h"
313+#include "vma.h"
314
315 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
316 QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
317@@ -2958,20 +2958,44 @@ out:
318 aio_context_release(aio_context);
319 }
320
321+void block_job_event_cancelled(BlockJob *job);
322+void block_job_event_completed(BlockJob *job, const char *msg);
323+static void block_job_cb(void *opaque, int ret)
324+{
325+ /* Note that this function may be executed from another AioContext besides
326+ * the QEMU main loop. If you need to access anything that assumes the
327+ * QEMU global mutex, use a BH or introduce a mutex.
328+ */
329+
330+ BlockDriverState *bs = opaque;
331+ const char *msg = NULL;
332+
333+ assert(bs->job);
334+
335+ if (ret < 0) {
336+ msg = strerror(-ret);
337+ }
338+
339+ if (block_job_is_cancelled(bs->job)) {
340+ block_job_event_cancelled(bs->job);
341+ } else {
342+ block_job_event_completed(bs->job, msg);
343+ }
344+}
345+
346 /* PVE backup related function */
347
348 static struct PVEBackupState {
349 Error *error;
350 bool cancel;
351- QemuUUID uuid;
352+ uuid_t uuid;
353 char uuid_str[37];
354 int64_t speed;
355 time_t start_time;
356 time_t end_time;
357 char *backup_file;
358- Object *vmaobj;
359+ VmaWriter *vmaw;
360 GList *di_list;
361- size_t next_job;
362 size_t total;
363 size_t transferred;
364 size_t zero_bytes;
365@@ -2981,6 +3005,7 @@ typedef struct PVEBackupDevInfo {
366 BlockDriverState *bs;
367 size_t size;
368 uint8_t dev_id;
369+ //bool started;
370 bool completed;
371 char targetfile[PATH_MAX];
372 BlockDriverState *target;
2ab9b48e 373@@ -2988,13 +3013,79 @@ typedef struct PVEBackupDevInfo {
67af0fa4
WB
374
375 static void pvebackup_run_next_job(void);
376
377+static int pvebackup_dump_cb(void *opaque, BlockBackend *target,
378+ int64_t sector_num, int n_sectors,
379+ unsigned char *buf)
380+{
381+ PVEBackupDevInfo *di = opaque;
382+
383+ int size = n_sectors * BDRV_SECTOR_SIZE;
384+ if (backup_state.cancel) {
385+ return size; // return success
386+ }
387+
388+ if (sector_num & 0x7f) {
389+ if (!backup_state.error) {
390+ error_setg(&backup_state.error,
391+ "got unaligned write inside backup dump "
392+ "callback (sector %ld)", sector_num);
393+ }
394+ return -1; // not aligned to cluster size
395+ }
396+
397+ int64_t cluster_num = sector_num >> 7;
398+
399+ int ret = -1;
400+
401+ if (backup_state.vmaw) {
402+ size_t zero_bytes = 0;
2ab9b48e
WB
403+ int64_t remaining = n_sectors * BDRV_SECTOR_SIZE;
404+ while (remaining > 0) {
67af0fa4
WB
405+ ret = vma_writer_write(backup_state.vmaw, di->dev_id, cluster_num,
406+ buf, &zero_bytes);
67af0fa4
WB
407+ ++cluster_num;
408+ if (buf) {
409+ buf += VMA_CLUSTER_SIZE;
410+ }
411+ if (ret < 0) {
412+ if (!backup_state.error) {
413+ vma_writer_error_propagate(backup_state.vmaw, &backup_state.error);
414+ }
415+ if (di->bs && di->bs->job) {
416+ block_job_cancel(di->bs->job);
417+ }
2ab9b48e 418+ break;
67af0fa4
WB
419+ } else {
420+ backup_state.zero_bytes += zero_bytes;
2ab9b48e
WB
421+ if (remaining >= VMA_CLUSTER_SIZE) {
422+ backup_state.transferred += VMA_CLUSTER_SIZE;
423+ } else {
424+ backup_state.transferred += remaining;
425+ }
426+ remaining -= VMA_CLUSTER_SIZE;
67af0fa4
WB
427+ }
428+ }
429+ } else {
430+ if (!buf) {
431+ backup_state.zero_bytes += size;
432+ }
433+ backup_state.transferred += size;
434+ }
435+
436+ // Note: always return success, because we want that writes succeed anyways.
437+
438+ return size;
439+}
440+
441 static void pvebackup_cleanup(void)
442 {
443 backup_state.end_time = time(NULL);
444
445- if (backup_state.vmaobj) {
446- object_unparent(backup_state.vmaobj);
447- backup_state.vmaobj = NULL;
448+ if (backup_state.vmaw) {
449+ Error *local_err = NULL;
450+ vma_writer_close(backup_state.vmaw, &local_err);
451+ error_propagate(&backup_state.error, local_err);
452+ backup_state.vmaw = NULL;
453 }
454
455 if (backup_state.di_list) {
2ab9b48e 456@@ -3009,6 +3100,13 @@ static void pvebackup_cleanup(void)
67af0fa4
WB
457 }
458 }
459
460+static void coroutine_fn backup_close_vma_stream(void *opaque)
461+{
462+ PVEBackupDevInfo *di = opaque;
463+
464+ vma_writer_close_stream(backup_state.vmaw, di->dev_id);
465+}
466+
467 static void pvebackup_complete_cb(void *opaque, int ret)
468 {
469 PVEBackupDevInfo *di = opaque;
2ab9b48e 470@@ -3020,14 +3118,18 @@ static void pvebackup_complete_cb(void *opaque, int ret)
67af0fa4
WB
471 ret, strerror(-ret));
472 }
473
474+ BlockDriverState *bs = di->bs;
475+
476 di->bs = NULL;
477 di->target = NULL;
478
479- if (backup_state.vmaobj) {
480- object_unparent(backup_state.vmaobj);
481- backup_state.vmaobj = NULL;
482+ if (backup_state.vmaw) {
483+ Coroutine *co = qemu_coroutine_create(backup_close_vma_stream, di);
484+ qemu_coroutine_enter(co);
485 }
486
487+ block_job_cb(bs, ret);
488+
489 if (!backup_state.cancel) {
490 pvebackup_run_next_job();
491 }
2ab9b48e 492@@ -3041,14 +3143,9 @@ static void pvebackup_cancel(void *opaque)
67af0fa4
WB
493 error_setg(&backup_state.error, "backup cancelled");
494 }
495
496- if (backup_state.vmaobj) {
497- Error *err;
498+ if (backup_state.vmaw) {
499 /* make sure vma writer does not block anymore */
500- if (!object_set_props(backup_state.vmaobj, &err, "blocked", "yes", NULL)) {
501- if (err) {
502- error_report_err(err);
503- }
504- }
505+ vma_writer_set_error(backup_state.vmaw, "backup cancelled");
506 }
507
508 GList *l = backup_state.di_list;
2ab9b48e 509@@ -3073,19 +3170,15 @@ void qmp_backup_cancel(Error **errp)
67af0fa4
WB
510 Coroutine *co = qemu_coroutine_create(pvebackup_cancel, NULL);
511 qemu_coroutine_enter(co);
512
513- while (backup_state.vmaobj) {
514- /* FIXME: Find something better for this */
515+ while (backup_state.vmaw) {
516+ /* vma writer use main aio context */
517 aio_poll(qemu_get_aio_context(), true);
518 }
519 }
520
521-void vma_object_add_config_file(Object *obj, const char *name,
522- const char *contents, size_t len,
523- Error **errp);
524 static int config_to_vma(const char *file, BackupFormat format,
525- Object *vmaobj,
526- const char *backup_dir,
527- Error **errp)
528+ const char *backup_dir, VmaWriter *vmaw,
529+ Error **errp)
530 {
531 char *cdata = NULL;
532 gsize clen = 0;
2ab9b48e 533@@ -3098,12 +3191,17 @@ static int config_to_vma(const char *file, BackupFormat format,
67af0fa4
WB
534 char *basename = g_path_get_basename(file);
535
536 if (format == BACKUP_FORMAT_VMA) {
537- vma_object_add_config_file(vmaobj, basename, cdata, clen, errp);
538+ if (vma_writer_add_config(vmaw, basename, cdata, clen) != 0) {
539+ error_setg(errp, "unable to add %s config data to vma archive", file);
540+ g_free(cdata);
541+ g_free(basename);
542+ return 1;
543+ }
544 } else if (format == BACKUP_FORMAT_DIR) {
545 char config_path[PATH_MAX];
546 snprintf(config_path, PATH_MAX, "%s/%s", backup_dir, basename);
547 if (!g_file_set_contents(config_path, cdata, clen, &err)) {
548- error_setg(errp, "unable to write config file '%s'", config_path);
549+ error_setg(errp, "unable to write config file '%s'", config_path);
550 g_free(cdata);
551 g_free(basename);
552 return 1;
2ab9b48e 553@@ -3113,35 +3211,37 @@ static int config_to_vma(const char *file, BackupFormat format,
67af0fa4
WB
554 g_free(basename);
555 g_free(cdata);
556
557- return 0;
558+ return 0;
559 }
560
561+bool block_job_should_pause(BlockJob *job);
562 static void pvebackup_run_next_job(void)
563 {
564- bool cancel = backup_state.error || backup_state.cancel;
565-fprintf(stderr, "run next job: %zu\n", backup_state.next_job);
566- GList *next = g_list_nth(backup_state.di_list, backup_state.next_job);
567- while (next) {
568- PVEBackupDevInfo *di = (PVEBackupDevInfo *)next->data;
569- backup_state.next_job++;
570+ GList *l = backup_state.di_list;
571+ while (l) {
572+ PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
573+ l = g_list_next(l);
574 if (!di->completed && di->bs && di->bs->job) {
575 BlockJob *job = di->bs->job;
576- if (cancel) {
577- block_job_cancel(job);
578- } else {
579- block_job_resume(job);
580+ if (block_job_should_pause(job)) {
581+ bool cancel = backup_state.error || backup_state.cancel;
582+ if (cancel) {
583+ block_job_cancel(job);
584+ } else {
585+ block_job_resume(job);
586+ }
587 }
588 return;
589 }
590- next = g_list_next(next);
591 }
592+
593 pvebackup_cleanup();
594 }
595
596 UuidInfo *qmp_backup(const char *backup_file, bool has_format,
597 BackupFormat format,
598 bool has_config_file, const char *config_file,
599- bool has_firewall_file, const char *firewall_file,
600+ bool has_firewall_file, const char *firewall_file,
601 bool has_devlist, const char *devlist,
602 bool has_speed, int64_t speed, Error **errp)
603 {
2ab9b48e 604@@ -3149,14 +3249,15 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
605 BlockDriverState *bs = NULL;
606 const char *backup_dir = NULL;
607 Error *local_err = NULL;
608- QemuUUID uuid;
609+ uuid_t uuid;
610+ VmaWriter *vmaw = NULL;
611 gchar **devs = NULL;
612 GList *di_list = NULL;
613 GList *l;
614 UuidInfo *uuid_info;
615 BlockJob *job;
616
617- if (backup_state.di_list || backup_state.vmaobj) {
618+ if (backup_state.di_list) {
619 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
620 "previous backup not finished");
621 return NULL;
2ab9b48e 622@@ -3231,40 +3332,28 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
623 total += size;
624 }
625
626- qemu_uuid_generate(&uuid);
627+ uuid_generate(uuid);
628
629 if (format == BACKUP_FORMAT_VMA) {
630- char uuidstr[UUID_FMT_LEN+1];
631- qemu_uuid_unparse(&uuid, uuidstr);
632- uuidstr[UUID_FMT_LEN] = 0;
633- backup_state.vmaobj =
634- object_new_with_props("vma", object_get_objects_root(),
635- "vma-backup-obj", &local_err,
636- "filename", backup_file,
637- "uuid", uuidstr,
638- NULL);
639- if (!backup_state.vmaobj) {
640+ vmaw = vma_writer_create(backup_file, uuid, &local_err);
641+ if (!vmaw) {
642 if (local_err) {
643 error_propagate(errp, local_err);
644 }
645 goto err;
646 }
647
648+ /* register all devices for vma writer */
649 l = di_list;
650 while (l) {
651- QDict *options = qdict_new();
652-
653 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
654 l = g_list_next(l);
655
656 const char *devname = bdrv_get_device_name(di->bs);
657- snprintf(di->targetfile, PATH_MAX, "vma-backup-obj/%s.raw", devname);
658-
659- qdict_put(options, "driver", qstring_from_str("vma-drive"));
660- qdict_put(options, "size", qint_from_int(di->size));
661- di->target = bdrv_open(di->targetfile, NULL, options, BDRV_O_RDWR, &local_err);
662- if (!di->target) {
663- error_propagate(errp, local_err);
664+ di->dev_id = vma_writer_register_stream(vmaw, devname, di->size);
665+ if (di->dev_id <= 0) {
666+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
667+ "register_stream failed");
668 goto err;
669 }
670 }
2ab9b48e 671@@ -3305,15 +3394,15 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
672
673 /* add configuration file to archive */
674 if (has_config_file) {
675- if(config_to_vma(config_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
676- goto err;
677+ if(config_to_vma(config_file, format, backup_dir, vmaw, errp) != 0) {
678+ goto err;
679 }
680 }
681
682 /* add firewall file to archive */
683 if (has_firewall_file) {
684- if(config_to_vma(firewall_file, format, backup_state.vmaobj, backup_dir, errp) != 0) {
685- goto err;
686+ if(config_to_vma(firewall_file, format, backup_dir, vmaw, errp) != 0) {
687+ goto err;
688 }
689 }
690 /* initialize global backup_state now */
2ab9b48e 691@@ -3335,11 +3424,12 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
692 }
693 backup_state.backup_file = g_strdup(backup_file);
694
695- memcpy(&backup_state.uuid, &uuid, sizeof(uuid));
696- qemu_uuid_unparse(&uuid, backup_state.uuid_str);
697+ backup_state.vmaw = vmaw;
698+
699+ uuid_copy(backup_state.uuid, uuid);
700+ uuid_unparse_lower(uuid, backup_state.uuid_str);
701
702 backup_state.di_list = di_list;
703- backup_state.next_job = 0;
704
705 backup_state.total = total;
706 backup_state.transferred = 0;
2ab9b48e 707@@ -3350,21 +3440,16 @@ UuidInfo *qmp_backup(const char *backup_file, bool has_format,
67af0fa4
WB
708 while (l) {
709 PVEBackupDevInfo *di = (PVEBackupDevInfo *)l->data;
710 l = g_list_next(l);
711-
712 job = backup_job_create(NULL, di->bs, di->target, speed, MIRROR_SYNC_MODE_FULL, NULL,
713 false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
714 BLOCK_JOB_DEFAULT,
715- pvebackup_complete_cb, di, 2, NULL, &local_err);
716- if (di->target) {
717- bdrv_unref(di->target);
718- di->target = NULL;
719- }
720+ pvebackup_dump_cb, pvebackup_complete_cb, di,
721+ 2, NULL, &local_err);
722 if (!job || local_err != NULL) {
723 error_setg(&backup_state.error, "backup_job_create failed");
724 pvebackup_cancel(NULL);
725- } else {
726- block_job_start(job);
727 }
728+ block_job_start(job);
729 }
730
731 if (!backup_state.error) {
2ab9b48e 732@@ -3398,9 +3483,10 @@ err:
67af0fa4
WB
733 g_strfreev(devs);
734 }
735
736- if (backup_state.vmaobj) {
737- object_unparent(backup_state.vmaobj);
738- backup_state.vmaobj = NULL;
739+ if (vmaw) {
740+ Error *err = NULL;
741+ vma_writer_close(vmaw, &err);
742+ unlink(backup_file);
743 }
744
745 if (backup_dir) {
2ab9b48e 746@@ -3772,7 +3858,7 @@ static BlockJob *do_drive_backup(DriveBackup *backup, BlockJobTxn *txn,
67af0fa4
WB
747 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
748 backup->sync, bmap, backup->compress,
749 backup->on_source_error, backup->on_target_error,
750- BLOCK_JOB_DEFAULT, NULL, NULL, 0, txn, &local_err);
751+ BLOCK_JOB_DEFAULT, NULL, NULL, NULL, 0, txn, &local_err);
752 bdrv_unref(target_bs);
753 if (local_err != NULL) {
754 error_propagate(errp, local_err);
2ab9b48e 755@@ -3851,7 +3937,7 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
67af0fa4
WB
756 job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
757 backup->sync, NULL, backup->compress,
758 backup->on_source_error, backup->on_target_error,
759- BLOCK_JOB_DEFAULT, NULL, NULL, 0, txn, &local_err);
760+ BLOCK_JOB_DEFAULT, NULL, NULL, NULL, 0, txn, &local_err);
761 if (local_err != NULL) {
762 error_propagate(errp, local_err);
763 }
764diff --git a/blockjob.c b/blockjob.c
765index 764d41863e..cb3741f6dd 100644
766--- a/blockjob.c
767+++ b/blockjob.c
768@@ -37,8 +37,8 @@
769 #include "qemu/timer.h"
770 #include "qapi-event.h"
771
772-static void block_job_event_cancelled(BlockJob *job);
773-static void block_job_event_completed(BlockJob *job, const char *msg);
774+void block_job_event_cancelled(BlockJob *job);
775+void block_job_event_completed(BlockJob *job, const char *msg);
776
777 /* Transactional group of block jobs */
778 struct BlockJobTxn {
779@@ -473,7 +473,8 @@ void block_job_user_pause(BlockJob *job)
780 block_job_pause(job);
781 }
782
783-static bool block_job_should_pause(BlockJob *job)
784+bool block_job_should_pause(BlockJob *job);
785+bool block_job_should_pause(BlockJob *job)
786 {
787 return job->pause_count > 0;
788 }
789@@ -687,7 +688,7 @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
790 }
791 }
792
793-static void block_job_event_cancelled(BlockJob *job)
794+void block_job_event_cancelled(BlockJob *job)
795 {
796 if (block_job_is_internal(job)) {
797 return;
798@@ -701,7 +702,7 @@ static void block_job_event_cancelled(BlockJob *job)
799 &error_abort);
800 }
801
802-static void block_job_event_completed(BlockJob *job, const char *msg)
803+void block_job_event_completed(BlockJob *job, const char *msg)
804 {
805 if (block_job_is_internal(job)) {
806 return;
807diff --git a/include/block/block_int.h b/include/block/block_int.h
808index 2b3ecd0575..278da161fb 100644
809--- a/include/block/block_int.h
810+++ b/include/block/block_int.h
811@@ -59,6 +59,9 @@
812
813 #define BLOCK_PROBE_BUF_SIZE 512
814
815+typedef int BackupDumpFunc(void *opaque, BlockBackend *be,
816+ int64_t sector_num, int n_sectors, unsigned char *buf);
817+
818 enum BdrvTrackedRequestType {
819 BDRV_TRACKED_READ,
820 BDRV_TRACKED_WRITE,
821@@ -877,6 +880,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
822 BlockdevOnError on_source_error,
823 BlockdevOnError on_target_error,
824 int creation_flags,
825+ BackupDumpFunc *dump_cb,
826 BlockCompletionFunc *cb, void *opaque,
827 int pause_count,
828 BlockJobTxn *txn, Error **errp);
95259824
WB
829diff --git a/vma-reader.c b/vma-reader.c
830new file mode 100644
67af0fa4 831index 0000000000..2000889bd3
95259824
WB
832--- /dev/null
833+++ b/vma-reader.c
67af0fa4 834@@ -0,0 +1,857 @@
95259824
WB
835+/*
836+ * VMA: Virtual Machine Archive
837+ *
838+ * Copyright (C) 2012 Proxmox Server Solutions
839+ *
840+ * Authors:
841+ * Dietmar Maurer (dietmar@proxmox.com)
842+ *
843+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
844+ * See the COPYING file in the top-level directory.
845+ *
846+ */
847+
848+#include "qemu/osdep.h"
849+#include <glib.h>
850+#include <uuid/uuid.h>
851+
852+#include "qemu-common.h"
853+#include "qemu/timer.h"
854+#include "qemu/ratelimit.h"
855+#include "vma.h"
856+#include "block/block.h"
857+#include "sysemu/block-backend.h"
858+
859+static unsigned char zero_vma_block[VMA_BLOCK_SIZE];
860+
861+typedef struct VmaRestoreState {
67af0fa4 862+ BlockBackend *target;
95259824
WB
863+ bool write_zeroes;
864+ unsigned long *bitmap;
865+ int bitmap_size;
866+} VmaRestoreState;
867+
868+struct VmaReader {
869+ int fd;
870+ GChecksum *md5csum;
871+ GHashTable *blob_hash;
872+ unsigned char *head_data;
873+ VmaDeviceInfo devinfo[256];
874+ VmaRestoreState rstate[256];
875+ GList *cdata_list;
876+ guint8 vmstate_stream;
877+ uint32_t vmstate_clusters;
878+ /* to show restore percentage if run with -v */
879+ time_t start_time;
880+ int64_t cluster_count;
881+ int64_t clusters_read;
67af0fa4
WB
882+ int64_t zero_cluster_data;
883+ int64_t partial_zero_cluster_data;
95259824
WB
884+ int clusters_read_per;
885+};
886+
887+static guint
888+g_int32_hash(gconstpointer v)
889+{
890+ return *(const uint32_t *)v;
891+}
892+
893+static gboolean
894+g_int32_equal(gconstpointer v1, gconstpointer v2)
895+{
896+ return *((const uint32_t *)v1) == *((const uint32_t *)v2);
897+}
898+
899+static int vma_reader_get_bitmap(VmaRestoreState *rstate, int64_t cluster_num)
900+{
901+ assert(rstate);
902+ assert(rstate->bitmap);
903+
904+ unsigned long val, idx, bit;
905+
906+ idx = cluster_num / BITS_PER_LONG;
907+
908+ assert(rstate->bitmap_size > idx);
909+
910+ bit = cluster_num % BITS_PER_LONG;
911+ val = rstate->bitmap[idx];
912+
913+ return !!(val & (1UL << bit));
914+}
915+
916+static void vma_reader_set_bitmap(VmaRestoreState *rstate, int64_t cluster_num,
917+ int dirty)
918+{
919+ assert(rstate);
920+ assert(rstate->bitmap);
921+
922+ unsigned long val, idx, bit;
923+
924+ idx = cluster_num / BITS_PER_LONG;
925+
926+ assert(rstate->bitmap_size > idx);
927+
928+ bit = cluster_num % BITS_PER_LONG;
929+ val = rstate->bitmap[idx];
930+ if (dirty) {
931+ if (!(val & (1UL << bit))) {
932+ val |= 1UL << bit;
933+ }
934+ } else {
935+ if (val & (1UL << bit)) {
936+ val &= ~(1UL << bit);
937+ }
938+ }
939+ rstate->bitmap[idx] = val;
940+}
941+
942+typedef struct VmaBlob {
943+ uint32_t start;
944+ uint32_t len;
945+ void *data;
946+} VmaBlob;
947+
948+static const VmaBlob *get_header_blob(VmaReader *vmar, uint32_t pos)
949+{
950+ assert(vmar);
951+ assert(vmar->blob_hash);
952+
953+ return g_hash_table_lookup(vmar->blob_hash, &pos);
954+}
955+
956+static const char *get_header_str(VmaReader *vmar, uint32_t pos)
957+{
958+ const VmaBlob *blob = get_header_blob(vmar, pos);
959+ if (!blob) {
960+ return NULL;
961+ }
962+ const char *res = (char *)blob->data;
963+ if (res[blob->len-1] != '\0') {
964+ return NULL;
965+ }
966+ return res;
967+}
968+
969+static ssize_t
970+safe_read(int fd, unsigned char *buf, size_t count)
971+{
972+ ssize_t n;
973+
974+ do {
975+ n = read(fd, buf, count);
976+ } while (n < 0 && errno == EINTR);
977+
978+ return n;
979+}
980+
981+static ssize_t
982+full_read(int fd, unsigned char *buf, size_t len)
983+{
984+ ssize_t n;
985+ size_t total;
986+
987+ total = 0;
988+
989+ while (len > 0) {
990+ n = safe_read(fd, buf, len);
991+
992+ if (n == 0) {
993+ return total;
994+ }
995+
996+ if (n <= 0) {
997+ break;
998+ }
999+
1000+ buf += n;
1001+ total += n;
1002+ len -= n;
1003+ }
1004+
1005+ if (len) {
1006+ return -1;
1007+ }
1008+
1009+ return total;
1010+}
1011+
1012+void vma_reader_destroy(VmaReader *vmar)
1013+{
1014+ assert(vmar);
1015+
1016+ if (vmar->fd >= 0) {
1017+ close(vmar->fd);
1018+ }
1019+
1020+ if (vmar->cdata_list) {
1021+ g_list_free(vmar->cdata_list);
1022+ }
1023+
1024+ int i;
1025+ for (i = 1; i < 256; i++) {
1026+ if (vmar->rstate[i].bitmap) {
1027+ g_free(vmar->rstate[i].bitmap);
1028+ }
1029+ }
1030+
1031+ if (vmar->md5csum) {
1032+ g_checksum_free(vmar->md5csum);
1033+ }
1034+
1035+ if (vmar->blob_hash) {
1036+ g_hash_table_destroy(vmar->blob_hash);
1037+ }
1038+
1039+ if (vmar->head_data) {
1040+ g_free(vmar->head_data);
1041+ }
1042+
1043+ g_free(vmar);
1044+
1045+};
1046+
1047+static int vma_reader_read_head(VmaReader *vmar, Error **errp)
1048+{
1049+ assert(vmar);
1050+ assert(errp);
1051+ assert(*errp == NULL);
1052+
1053+ unsigned char md5sum[16];
1054+ int i;
1055+ int ret = 0;
1056+
1057+ vmar->head_data = g_malloc(sizeof(VmaHeader));
1058+
1059+ if (full_read(vmar->fd, vmar->head_data, sizeof(VmaHeader)) !=
1060+ sizeof(VmaHeader)) {
1061+ error_setg(errp, "can't read vma header - %s",
1062+ errno ? g_strerror(errno) : "got EOF");
1063+ return -1;
1064+ }
1065+
1066+ VmaHeader *h = (VmaHeader *)vmar->head_data;
1067+
1068+ if (h->magic != VMA_MAGIC) {
1069+ error_setg(errp, "not a vma file - wrong magic number");
1070+ return -1;
1071+ }
1072+
1073+ uint32_t header_size = GUINT32_FROM_BE(h->header_size);
1074+ int need = header_size - sizeof(VmaHeader);
1075+ if (need <= 0) {
1076+ error_setg(errp, "wrong vma header size %d", header_size);
1077+ return -1;
1078+ }
1079+
1080+ vmar->head_data = g_realloc(vmar->head_data, header_size);
1081+ h = (VmaHeader *)vmar->head_data;
1082+
1083+ if (full_read(vmar->fd, vmar->head_data + sizeof(VmaHeader), need) !=
1084+ need) {
1085+ error_setg(errp, "can't read vma header data - %s",
1086+ errno ? g_strerror(errno) : "got EOF");
1087+ return -1;
1088+ }
1089+
1090+ memcpy(md5sum, h->md5sum, 16);
1091+ memset(h->md5sum, 0, 16);
1092+
1093+ g_checksum_reset(vmar->md5csum);
1094+ g_checksum_update(vmar->md5csum, vmar->head_data, header_size);
1095+ gsize csize = 16;
1096+ g_checksum_get_digest(vmar->md5csum, (guint8 *)(h->md5sum), &csize);
1097+
1098+ if (memcmp(md5sum, h->md5sum, 16) != 0) {
1099+ error_setg(errp, "wrong vma header chechsum");
1100+ return -1;
1101+ }
1102+
1103+ /* we can modify header data after checksum verify */
1104+ h->header_size = header_size;
1105+
1106+ h->version = GUINT32_FROM_BE(h->version);
1107+ if (h->version != 1) {
1108+ error_setg(errp, "wrong vma version %d", h->version);
1109+ return -1;
1110+ }
1111+
1112+ h->ctime = GUINT64_FROM_BE(h->ctime);
1113+ h->blob_buffer_offset = GUINT32_FROM_BE(h->blob_buffer_offset);
1114+ h->blob_buffer_size = GUINT32_FROM_BE(h->blob_buffer_size);
1115+
1116+ uint32_t bstart = h->blob_buffer_offset + 1;
1117+ uint32_t bend = h->blob_buffer_offset + h->blob_buffer_size;
1118+
1119+ if (bstart <= sizeof(VmaHeader)) {
1120+ error_setg(errp, "wrong vma blob buffer offset %d",
1121+ h->blob_buffer_offset);
1122+ return -1;
1123+ }
1124+
1125+ if (bend > header_size) {
1126+ error_setg(errp, "wrong vma blob buffer size %d/%d",
1127+ h->blob_buffer_offset, h->blob_buffer_size);
1128+ return -1;
1129+ }
1130+
1131+ while ((bstart + 2) <= bend) {
1132+ uint32_t size = vmar->head_data[bstart] +
1133+ (vmar->head_data[bstart+1] << 8);
1134+ if ((bstart + size + 2) <= bend) {
1135+ VmaBlob *blob = g_new0(VmaBlob, 1);
1136+ blob->start = bstart - h->blob_buffer_offset;
1137+ blob->len = size;
1138+ blob->data = vmar->head_data + bstart + 2;
1139+ g_hash_table_insert(vmar->blob_hash, &blob->start, blob);
1140+ }
1141+ bstart += size + 2;
1142+ }
1143+
1144+
1145+ int count = 0;
1146+ for (i = 1; i < 256; i++) {
1147+ VmaDeviceInfoHeader *dih = &h->dev_info[i];
1148+ uint32_t devname_ptr = GUINT32_FROM_BE(dih->devname_ptr);
1149+ uint64_t size = GUINT64_FROM_BE(dih->size);
1150+ const char *devname = get_header_str(vmar, devname_ptr);
1151+
1152+ if (size && devname) {
1153+ count++;
1154+ vmar->devinfo[i].size = size;
1155+ vmar->devinfo[i].devname = devname;
1156+
1157+ if (strcmp(devname, "vmstate") == 0) {
1158+ vmar->vmstate_stream = i;
1159+ }
1160+ }
1161+ }
1162+
95259824
WB
1163+ for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1164+ uint32_t name_ptr = GUINT32_FROM_BE(h->config_names[i]);
1165+ uint32_t data_ptr = GUINT32_FROM_BE(h->config_data[i]);
1166+
1167+ if (!(name_ptr && data_ptr)) {
1168+ continue;
1169+ }
1170+ const char *name = get_header_str(vmar, name_ptr);
1171+ const VmaBlob *blob = get_header_blob(vmar, data_ptr);
1172+
1173+ if (!(name && blob)) {
1174+ error_setg(errp, "vma contains invalid data pointers");
1175+ return -1;
1176+ }
1177+
1178+ VmaConfigData *cdata = g_new0(VmaConfigData, 1);
1179+ cdata->name = name;
1180+ cdata->data = blob->data;
1181+ cdata->len = blob->len;
1182+
1183+ vmar->cdata_list = g_list_append(vmar->cdata_list, cdata);
1184+ }
1185+
1186+ return ret;
1187+};
1188+
1189+VmaReader *vma_reader_create(const char *filename, Error **errp)
1190+{
1191+ assert(filename);
1192+ assert(errp);
1193+
1194+ VmaReader *vmar = g_new0(VmaReader, 1);
1195+
1196+ if (strcmp(filename, "-") == 0) {
1197+ vmar->fd = dup(0);
1198+ } else {
1199+ vmar->fd = open(filename, O_RDONLY);
1200+ }
1201+
1202+ if (vmar->fd < 0) {
1203+ error_setg(errp, "can't open file %s - %s\n", filename,
1204+ g_strerror(errno));
1205+ goto err;
1206+ }
1207+
1208+ vmar->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1209+ if (!vmar->md5csum) {
1210+ error_setg(errp, "can't allocate cmsum\n");
1211+ goto err;
1212+ }
1213+
1214+ vmar->blob_hash = g_hash_table_new_full(g_int32_hash, g_int32_equal,
1215+ NULL, g_free);
1216+
1217+ if (vma_reader_read_head(vmar, errp) < 0) {
1218+ goto err;
1219+ }
1220+
1221+ return vmar;
1222+
1223+err:
1224+ if (vmar) {
1225+ vma_reader_destroy(vmar);
1226+ }
1227+
1228+ return NULL;
1229+}
1230+
1231+VmaHeader *vma_reader_get_header(VmaReader *vmar)
1232+{
1233+ assert(vmar);
1234+ assert(vmar->head_data);
1235+
1236+ return (VmaHeader *)(vmar->head_data);
1237+}
1238+
1239+GList *vma_reader_get_config_data(VmaReader *vmar)
1240+{
1241+ assert(vmar);
1242+ assert(vmar->head_data);
1243+
1244+ return vmar->cdata_list;
1245+}
1246+
1247+VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id)
1248+{
1249+ assert(vmar);
1250+ assert(dev_id);
1251+
1252+ if (vmar->devinfo[dev_id].size && vmar->devinfo[dev_id].devname) {
1253+ return &vmar->devinfo[dev_id];
1254+ }
1255+
1256+ return NULL;
1257+}
1258+
67af0fa4
WB
1259+static void allocate_rstate(VmaReader *vmar, guint8 dev_id,
1260+ BlockBackend *target, bool write_zeroes)
1261+{
1262+ assert(vmar);
1263+ assert(dev_id);
1264+
1265+ vmar->rstate[dev_id].target = target;
1266+ vmar->rstate[dev_id].write_zeroes = write_zeroes;
1267+
1268+ int64_t size = vmar->devinfo[dev_id].size;
1269+
1270+ int64_t bitmap_size = (size/BDRV_SECTOR_SIZE) +
1271+ (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG - 1;
1272+ bitmap_size /= (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG;
1273+
1274+ vmar->rstate[dev_id].bitmap_size = bitmap_size;
1275+ vmar->rstate[dev_id].bitmap = g_new0(unsigned long, bitmap_size);
1276+
1277+ vmar->cluster_count += size/VMA_CLUSTER_SIZE;
1278+}
1279+
1280+int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target,
95259824
WB
1281+ bool write_zeroes, Error **errp)
1282+{
1283+ assert(vmar);
67af0fa4 1284+ assert(target != NULL);
95259824 1285+ assert(dev_id);
67af0fa4 1286+ assert(vmar->rstate[dev_id].target == NULL);
95259824 1287+
67af0fa4 1288+ int64_t size = blk_getlength(target);
95259824
WB
1289+ int64_t size_diff = size - vmar->devinfo[dev_id].size;
1290+
1291+ /* storage types can have different size restrictions, so it
1292+ * is not always possible to create an image with exact size.
1293+ * So we tolerate a size difference up to 4MB.
1294+ */
1295+ if ((size_diff < 0) || (size_diff > 4*1024*1024)) {
1296+ error_setg(errp, "vma_reader_register_bs for stream %s failed - "
1297+ "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname,
1298+ size, vmar->devinfo[dev_id].size);
1299+ return -1;
1300+ }
1301+
67af0fa4 1302+ allocate_rstate(vmar, dev_id, target, write_zeroes);
95259824
WB
1303+
1304+ return 0;
1305+}
1306+
1307+static ssize_t safe_write(int fd, void *buf, size_t count)
1308+{
1309+ ssize_t n;
1310+
1311+ do {
1312+ n = write(fd, buf, count);
1313+ } while (n < 0 && errno == EINTR);
1314+
1315+ return n;
1316+}
1317+
1318+static size_t full_write(int fd, void *buf, size_t len)
1319+{
1320+ ssize_t n;
1321+ size_t total;
1322+
1323+ total = 0;
1324+
1325+ while (len > 0) {
1326+ n = safe_write(fd, buf, len);
1327+ if (n < 0) {
1328+ return n;
1329+ }
1330+ buf += n;
1331+ total += n;
1332+ len -= n;
1333+ }
1334+
1335+ if (len) {
1336+ /* incomplete write ? */
1337+ return -1;
1338+ }
1339+
1340+ return total;
1341+}
1342+
1343+static int restore_write_data(VmaReader *vmar, guint8 dev_id,
67af0fa4 1344+ BlockBackend *target, int vmstate_fd,
95259824
WB
1345+ unsigned char *buf, int64_t sector_num,
1346+ int nb_sectors, Error **errp)
1347+{
1348+ assert(vmar);
1349+
1350+ if (dev_id == vmar->vmstate_stream) {
1351+ if (vmstate_fd >= 0) {
1352+ int len = nb_sectors * BDRV_SECTOR_SIZE;
1353+ int res = full_write(vmstate_fd, buf, len);
1354+ if (res < 0) {
1355+ error_setg(errp, "write vmstate failed %d", res);
1356+ return -1;
1357+ }
1358+ }
1359+ } else {
67af0fa4 1360+ int res = blk_pwrite(target, sector_num * BDRV_SECTOR_SIZE, buf, nb_sectors * BDRV_SECTOR_SIZE, 0);
95259824 1361+ if (res < 0) {
67af0fa4
WB
1362+ error_setg(errp, "blk_pwrite to %s failed (%d)",
1363+ bdrv_get_device_name(blk_bs(target)), res);
95259824
WB
1364+ return -1;
1365+ }
1366+ }
1367+ return 0;
1368+}
67af0fa4 1369+
95259824
WB
1370+static int restore_extent(VmaReader *vmar, unsigned char *buf,
1371+ int extent_size, int vmstate_fd,
67af0fa4 1372+ bool verbose, bool verify, Error **errp)
95259824
WB
1373+{
1374+ assert(vmar);
1375+ assert(buf);
1376+
1377+ VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1378+ int start = VMA_EXTENT_HEADER_SIZE;
1379+ int i;
1380+
1381+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1382+ uint64_t block_info = GUINT64_FROM_BE(ehead->blockinfo[i]);
1383+ uint64_t cluster_num = block_info & 0xffffffff;
1384+ uint8_t dev_id = (block_info >> 32) & 0xff;
1385+ uint16_t mask = block_info >> (32+16);
1386+ int64_t max_sector;
1387+
1388+ if (!dev_id) {
1389+ continue;
1390+ }
1391+
1392+ VmaRestoreState *rstate = &vmar->rstate[dev_id];
67af0fa4 1393+ BlockBackend *target = NULL;
95259824
WB
1394+
1395+ if (dev_id != vmar->vmstate_stream) {
67af0fa4
WB
1396+ target = rstate->target;
1397+ if (!verify && !target) {
95259824
WB
1398+ error_setg(errp, "got wrong dev id %d", dev_id);
1399+ return -1;
1400+ }
1401+
1402+ if (vma_reader_get_bitmap(rstate, cluster_num)) {
1403+ error_setg(errp, "found duplicated cluster %zd for stream %s",
1404+ cluster_num, vmar->devinfo[dev_id].devname);
1405+ return -1;
1406+ }
1407+ vma_reader_set_bitmap(rstate, cluster_num, 1);
1408+
1409+ max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE;
1410+ } else {
1411+ max_sector = G_MAXINT64;
1412+ if (cluster_num != vmar->vmstate_clusters) {
1413+ error_setg(errp, "found out of order vmstate data");
1414+ return -1;
1415+ }
1416+ vmar->vmstate_clusters++;
1417+ }
1418+
1419+ vmar->clusters_read++;
1420+
1421+ if (verbose) {
1422+ time_t duration = time(NULL) - vmar->start_time;
1423+ int percent = (vmar->clusters_read*100)/vmar->cluster_count;
1424+ if (percent != vmar->clusters_read_per) {
1425+ printf("progress %d%% (read %zd bytes, duration %zd sec)\n",
1426+ percent, vmar->clusters_read*VMA_CLUSTER_SIZE,
1427+ duration);
1428+ fflush(stdout);
1429+ vmar->clusters_read_per = percent;
1430+ }
1431+ }
1432+
1433+ /* try to write whole clusters to speedup restore */
1434+ if (mask == 0xffff) {
1435+ if ((start + VMA_CLUSTER_SIZE) > extent_size) {
1436+ error_setg(errp, "short vma extent - too many blocks");
1437+ return -1;
1438+ }
1439+ int64_t sector_num = (cluster_num * VMA_CLUSTER_SIZE) /
1440+ BDRV_SECTOR_SIZE;
1441+ int64_t end_sector = sector_num +
1442+ VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE;
1443+
1444+ if (end_sector > max_sector) {
1445+ end_sector = max_sector;
1446+ }
1447+
1448+ if (end_sector <= sector_num) {
1449+ error_setg(errp, "got wrong block address - write bejond end");
1450+ return -1;
1451+ }
1452+
67af0fa4
WB
1453+ if (!verify) {
1454+ int nb_sectors = end_sector - sector_num;
1455+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1456+ buf + start, sector_num, nb_sectors,
1457+ errp) < 0) {
1458+ return -1;
1459+ }
95259824
WB
1460+ }
1461+
1462+ start += VMA_CLUSTER_SIZE;
1463+ } else {
1464+ int j;
1465+ int bit = 1;
1466+
1467+ for (j = 0; j < 16; j++) {
1468+ int64_t sector_num = (cluster_num*VMA_CLUSTER_SIZE +
1469+ j*VMA_BLOCK_SIZE)/BDRV_SECTOR_SIZE;
1470+
1471+ int64_t end_sector = sector_num +
1472+ VMA_BLOCK_SIZE/BDRV_SECTOR_SIZE;
1473+ if (end_sector > max_sector) {
1474+ end_sector = max_sector;
1475+ }
1476+
1477+ if (mask & bit) {
1478+ if ((start + VMA_BLOCK_SIZE) > extent_size) {
1479+ error_setg(errp, "short vma extent - too many blocks");
1480+ return -1;
1481+ }
1482+
1483+ if (end_sector <= sector_num) {
1484+ error_setg(errp, "got wrong block address - "
1485+ "write bejond end");
1486+ return -1;
1487+ }
1488+
67af0fa4
WB
1489+ if (!verify) {
1490+ int nb_sectors = end_sector - sector_num;
1491+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1492+ buf + start, sector_num,
1493+ nb_sectors, errp) < 0) {
1494+ return -1;
1495+ }
95259824
WB
1496+ }
1497+
1498+ start += VMA_BLOCK_SIZE;
1499+
1500+ } else {
1501+
67af0fa4
WB
1502+
1503+ if (end_sector > sector_num) {
95259824
WB
1504+ /* Todo: use bdrv_co_write_zeroes (but that need to
1505+ * be run inside coroutine?)
1506+ */
1507+ int nb_sectors = end_sector - sector_num;
67af0fa4
WB
1508+ int zero_size = BDRV_SECTOR_SIZE*nb_sectors;
1509+ vmar->zero_cluster_data += zero_size;
1510+ if (mask != 0) {
1511+ vmar->partial_zero_cluster_data += zero_size;
1512+ }
1513+
1514+ if (rstate->write_zeroes && !verify) {
1515+ if (restore_write_data(vmar, dev_id, target, vmstate_fd,
1516+ zero_vma_block, sector_num,
1517+ nb_sectors, errp) < 0) {
1518+ return -1;
1519+ }
95259824
WB
1520+ }
1521+ }
1522+ }
1523+
1524+ bit = bit << 1;
1525+ }
1526+ }
1527+ }
1528+
1529+ if (start != extent_size) {
1530+ error_setg(errp, "vma extent error - missing blocks");
1531+ return -1;
1532+ }
1533+
1534+ return 0;
1535+}
1536+
67af0fa4
WB
1537+static int vma_reader_restore_full(VmaReader *vmar, int vmstate_fd,
1538+ bool verbose, bool verify,
1539+ Error **errp)
95259824
WB
1540+{
1541+ assert(vmar);
1542+ assert(vmar->head_data);
1543+
1544+ int ret = 0;
1545+ unsigned char buf[VMA_MAX_EXTENT_SIZE];
1546+ int buf_pos = 0;
1547+ unsigned char md5sum[16];
1548+ VmaHeader *h = (VmaHeader *)vmar->head_data;
1549+
1550+ vmar->start_time = time(NULL);
1551+
1552+ while (1) {
1553+ int bytes = full_read(vmar->fd, buf + buf_pos, sizeof(buf) - buf_pos);
1554+ if (bytes < 0) {
1555+ error_setg(errp, "read failed - %s", g_strerror(errno));
1556+ return -1;
1557+ }
1558+
1559+ buf_pos += bytes;
1560+
1561+ if (!buf_pos) {
1562+ break; /* EOF */
1563+ }
1564+
1565+ if (buf_pos < VMA_EXTENT_HEADER_SIZE) {
1566+ error_setg(errp, "read short extent (%d bytes)", buf_pos);
1567+ return -1;
1568+ }
1569+
1570+ VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
1571+
1572+ /* extract md5sum */
1573+ memcpy(md5sum, ehead->md5sum, sizeof(ehead->md5sum));
1574+ memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
1575+
1576+ g_checksum_reset(vmar->md5csum);
1577+ g_checksum_update(vmar->md5csum, buf, VMA_EXTENT_HEADER_SIZE);
1578+ gsize csize = 16;
1579+ g_checksum_get_digest(vmar->md5csum, ehead->md5sum, &csize);
1580+
1581+ if (memcmp(md5sum, ehead->md5sum, 16) != 0) {
1582+ error_setg(errp, "wrong vma extent header chechsum");
1583+ return -1;
1584+ }
1585+
1586+ if (memcmp(h->uuid, ehead->uuid, sizeof(ehead->uuid)) != 0) {
1587+ error_setg(errp, "wrong vma extent uuid");
1588+ return -1;
1589+ }
1590+
1591+ if (ehead->magic != VMA_EXTENT_MAGIC || ehead->reserved1 != 0) {
1592+ error_setg(errp, "wrong vma extent header magic");
1593+ return -1;
1594+ }
1595+
1596+ int block_count = GUINT16_FROM_BE(ehead->block_count);
1597+ int extent_size = VMA_EXTENT_HEADER_SIZE + block_count*VMA_BLOCK_SIZE;
1598+
1599+ if (buf_pos < extent_size) {
1600+ error_setg(errp, "short vma extent (%d < %d)", buf_pos,
1601+ extent_size);
1602+ return -1;
1603+ }
1604+
1605+ if (restore_extent(vmar, buf, extent_size, vmstate_fd, verbose,
67af0fa4 1606+ verify, errp) < 0) {
95259824
WB
1607+ return -1;
1608+ }
1609+
1610+ if (buf_pos > extent_size) {
1611+ memmove(buf, buf + extent_size, buf_pos - extent_size);
1612+ buf_pos = buf_pos - extent_size;
1613+ } else {
1614+ buf_pos = 0;
1615+ }
1616+ }
1617+
1618+ bdrv_drain_all();
1619+
1620+ int i;
1621+ for (i = 1; i < 256; i++) {
1622+ VmaRestoreState *rstate = &vmar->rstate[i];
67af0fa4 1623+ if (!rstate->target) {
95259824
WB
1624+ continue;
1625+ }
1626+
67af0fa4
WB
1627+ if (blk_flush(rstate->target) < 0) {
1628+ error_setg(errp, "vma blk_flush %s failed",
95259824
WB
1629+ vmar->devinfo[i].devname);
1630+ return -1;
1631+ }
1632+
1633+ if (vmar->devinfo[i].size &&
1634+ (strcmp(vmar->devinfo[i].devname, "vmstate") != 0)) {
1635+ assert(rstate->bitmap);
1636+
1637+ int64_t cluster_num, end;
1638+
1639+ end = (vmar->devinfo[i].size + VMA_CLUSTER_SIZE - 1) /
1640+ VMA_CLUSTER_SIZE;
1641+
1642+ for (cluster_num = 0; cluster_num < end; cluster_num++) {
1643+ if (!vma_reader_get_bitmap(rstate, cluster_num)) {
1644+ error_setg(errp, "detected missing cluster %zd "
1645+ "for stream %s", cluster_num,
1646+ vmar->devinfo[i].devname);
1647+ return -1;
1648+ }
1649+ }
1650+ }
1651+ }
1652+
67af0fa4
WB
1653+ if (verbose) {
1654+ if (vmar->clusters_read) {
1655+ printf("total bytes read %zd, sparse bytes %zd (%.3g%%)\n",
1656+ vmar->clusters_read*VMA_CLUSTER_SIZE,
1657+ vmar->zero_cluster_data,
1658+ (double)(100.0*vmar->zero_cluster_data)/
1659+ (vmar->clusters_read*VMA_CLUSTER_SIZE));
1660+
1661+ int64_t datasize = vmar->clusters_read*VMA_CLUSTER_SIZE-vmar->zero_cluster_data;
1662+ if (datasize) { // this does not make sense for empty files
1663+ printf("space reduction due to 4K zero blocks %.3g%%\n",
1664+ (double)(100.0*vmar->partial_zero_cluster_data) / datasize);
1665+ }
1666+ } else {
1667+ printf("vma archive contains no image data\n");
1668+ }
1669+ }
95259824
WB
1670+ return ret;
1671+}
1672+
67af0fa4
WB
1673+int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
1674+ Error **errp)
1675+{
1676+ return vma_reader_restore_full(vmar, vmstate_fd, verbose, false, errp);
1677+}
1678+
1679+int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp)
1680+{
1681+ guint8 dev_id;
1682+
1683+ for (dev_id = 1; dev_id < 255; dev_id++) {
1684+ if (vma_reader_get_device_info(vmar, dev_id)) {
1685+ allocate_rstate(vmar, dev_id, NULL, false);
1686+ }
1687+ }
1688+
1689+ return vma_reader_restore_full(vmar, -1, verbose, true, errp);
1690+}
1691+
95259824
WB
1692diff --git a/vma-writer.c b/vma-writer.c
1693new file mode 100644
67af0fa4 1694index 0000000000..9001cbdd2b
95259824
WB
1695--- /dev/null
1696+++ b/vma-writer.c
67af0fa4 1697@@ -0,0 +1,771 @@
95259824
WB
1698+/*
1699+ * VMA: Virtual Machine Archive
1700+ *
1701+ * Copyright (C) 2012 Proxmox Server Solutions
1702+ *
1703+ * Authors:
1704+ * Dietmar Maurer (dietmar@proxmox.com)
1705+ *
1706+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
1707+ * See the COPYING file in the top-level directory.
1708+ *
1709+ */
1710+
1711+#include "qemu/osdep.h"
1712+#include <glib.h>
1713+#include <uuid/uuid.h>
1714+
1715+#include "vma.h"
1716+#include "block/block.h"
1717+#include "monitor/monitor.h"
1718+#include "qemu/main-loop.h"
1719+#include "qemu/coroutine.h"
1720+#include "qemu/cutils.h"
1721+
1722+#define DEBUG_VMA 0
1723+
1724+#define DPRINTF(fmt, ...)\
1725+ do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
1726+
1727+#define WRITE_BUFFERS 5
67af0fa4
WB
1728+#define HEADER_CLUSTERS 8
1729+#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
95259824
WB
1730+
1731+struct VmaWriter {
1732+ int fd;
1733+ FILE *cmd;
1734+ int status;
1735+ char errmsg[8192];
1736+ uuid_t uuid;
1737+ bool header_written;
1738+ bool closed;
1739+
1740+ /* we always write extents */
67af0fa4 1741+ unsigned char *outbuf;
95259824
WB
1742+ int outbuf_pos; /* in bytes */
1743+ int outbuf_count; /* in VMA_BLOCKS */
1744+ uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
1745+
67af0fa4 1746+ unsigned char *headerbuf;
95259824
WB
1747+
1748+ GChecksum *md5csum;
95259824
WB
1749+ CoMutex flush_lock;
1750+ Coroutine *co_writer;
1751+
1752+ /* drive informations */
1753+ VmaStreamInfo stream_info[256];
1754+ guint stream_count;
1755+
1756+ guint8 vmstate_stream;
1757+ uint32_t vmstate_clusters;
1758+
1759+ /* header blob table */
1760+ char *header_blob_table;
1761+ uint32_t header_blob_table_size;
1762+ uint32_t header_blob_table_pos;
1763+
1764+ /* store for config blobs */
1765+ uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1766+ uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1767+ uint32_t config_count;
1768+};
1769+
1770+void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...)
1771+{
1772+ va_list ap;
1773+
1774+ if (vmaw->status < 0) {
1775+ return;
1776+ }
1777+
1778+ vmaw->status = -1;
1779+
1780+ va_start(ap, fmt);
1781+ g_vsnprintf(vmaw->errmsg, sizeof(vmaw->errmsg), fmt, ap);
1782+ va_end(ap);
1783+
1784+ DPRINTF("vma_writer_set_error: %s\n", vmaw->errmsg);
1785+}
1786+
1787+static uint32_t allocate_header_blob(VmaWriter *vmaw, const char *data,
1788+ size_t len)
1789+{
1790+ if (len > 65535) {
1791+ return 0;
1792+ }
1793+
1794+ if (!vmaw->header_blob_table ||
1795+ (vmaw->header_blob_table_size <
1796+ (vmaw->header_blob_table_pos + len + 2))) {
1797+ int newsize = vmaw->header_blob_table_size + ((len + 2 + 511)/512)*512;
1798+
1799+ vmaw->header_blob_table = g_realloc(vmaw->header_blob_table, newsize);
1800+ memset(vmaw->header_blob_table + vmaw->header_blob_table_size,
1801+ 0, newsize - vmaw->header_blob_table_size);
1802+ vmaw->header_blob_table_size = newsize;
1803+ }
1804+
1805+ uint32_t cpos = vmaw->header_blob_table_pos;
1806+ vmaw->header_blob_table[cpos] = len & 255;
1807+ vmaw->header_blob_table[cpos+1] = (len >> 8) & 255;
1808+ memcpy(vmaw->header_blob_table + cpos + 2, data, len);
1809+ vmaw->header_blob_table_pos += len + 2;
1810+ return cpos;
1811+}
1812+
1813+static uint32_t allocate_header_string(VmaWriter *vmaw, const char *str)
1814+{
1815+ assert(vmaw);
1816+
1817+ size_t len = strlen(str) + 1;
1818+
1819+ return allocate_header_blob(vmaw, str, len);
1820+}
1821+
1822+int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
1823+ gsize len)
1824+{
1825+ assert(vmaw);
1826+ assert(!vmaw->header_written);
1827+ assert(vmaw->config_count < VMA_MAX_CONFIGS);
1828+ assert(name);
1829+ assert(data);
95259824
WB
1830+
1831+ gchar *basename = g_path_get_basename(name);
1832+ uint32_t name_ptr = allocate_header_string(vmaw, basename);
1833+ g_free(basename);
1834+
1835+ if (!name_ptr) {
1836+ return -1;
1837+ }
1838+
1839+ uint32_t data_ptr = allocate_header_blob(vmaw, data, len);
1840+ if (!data_ptr) {
1841+ return -1;
1842+ }
1843+
1844+ vmaw->config_names[vmaw->config_count] = name_ptr;
1845+ vmaw->config_data[vmaw->config_count] = data_ptr;
1846+
1847+ vmaw->config_count++;
1848+
1849+ return 0;
1850+}
1851+
1852+int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
1853+ size_t size)
1854+{
1855+ assert(vmaw);
1856+ assert(devname);
1857+ assert(!vmaw->status);
1858+
1859+ if (vmaw->header_written) {
1860+ vma_writer_set_error(vmaw, "vma_writer_register_stream: header "
1861+ "already written");
1862+ return -1;
1863+ }
1864+
1865+ guint n = vmaw->stream_count + 1;
1866+
1867+ /* we can have dev_ids form 1 to 255 (0 reserved)
1868+ * 255(-1) reseverd for safety
1869+ */
1870+ if (n > 254) {
1871+ vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1872+ "too many drives");
1873+ return -1;
1874+ }
1875+
1876+ if (size <= 0) {
1877+ vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1878+ "got strange size %zd", size);
1879+ return -1;
1880+ }
1881+
1882+ DPRINTF("vma_writer_register_stream %s %zu %d\n", devname, size, n);
1883+
1884+ vmaw->stream_info[n].devname = g_strdup(devname);
1885+ vmaw->stream_info[n].size = size;
1886+
1887+ vmaw->stream_info[n].cluster_count = (size + VMA_CLUSTER_SIZE - 1) /
1888+ VMA_CLUSTER_SIZE;
1889+
1890+ vmaw->stream_count = n;
1891+
1892+ if (strcmp(devname, "vmstate") == 0) {
1893+ vmaw->vmstate_stream = n;
1894+ }
1895+
1896+ return n;
1897+}
1898+
1899+static void vma_co_continue_write(void *opaque)
1900+{
1901+ VmaWriter *vmaw = opaque;
1902+
1903+ DPRINTF("vma_co_continue_write\n");
1904+ qemu_coroutine_enter(vmaw->co_writer);
1905+}
1906+
1907+static ssize_t coroutine_fn
67af0fa4 1908+vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
95259824 1909+{
67af0fa4 1910+ DPRINTF("vma_queue_write enter %zd\n", bytes);
95259824 1911+
67af0fa4
WB
1912+ assert(vmaw);
1913+ assert(buf);
1914+ assert(bytes <= VMA_MAX_EXTENT_SIZE);
95259824 1915+
67af0fa4
WB
1916+ size_t done = 0;
1917+ ssize_t ret;
95259824
WB
1918+
1919+ assert(vmaw->co_writer == NULL);
1920+
1921+ vmaw->co_writer = qemu_coroutine_self();
1922+
95259824 1923+ while (done < bytes) {
67af0fa4
WB
1924+ aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, vma_co_continue_write, NULL, vmaw);
1925+ qemu_coroutine_yield();
1926+ aio_set_fd_handler(qemu_get_aio_context(), vmaw->fd, false, NULL, NULL, NULL, NULL);
1927+ if (vmaw->status < 0) {
1928+ DPRINTF("vma_queue_write detected canceled backup\n");
1929+ done = -1;
1930+ break;
1931+ }
95259824
WB
1932+ ret = write(vmaw->fd, buf + done, bytes - done);
1933+ if (ret > 0) {
1934+ done += ret;
67af0fa4 1935+ DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
95259824
WB
1936+ } else if (ret < 0) {
1937+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
67af0fa4
WB
1938+ /* try again */
1939+ } else {
1940+ vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
95259824
WB
1941+ g_strerror(errno));
1942+ done = -1; /* always return failure for partial writes */
1943+ break;
1944+ }
1945+ } else if (ret == 0) {
1946+ /* should not happen - simply try again */
1947+ }
1948+ }
1949+
95259824
WB
1950+ vmaw->co_writer = NULL;
1951+
67af0fa4 1952+ return (done == bytes) ? bytes : -1;
95259824
WB
1953+}
1954+
1955+VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
1956+{
1957+ const char *p;
1958+
1959+ assert(sizeof(VmaHeader) == (4096 + 8192));
1960+ assert(G_STRUCT_OFFSET(VmaHeader, config_names) == 2044);
1961+ assert(G_STRUCT_OFFSET(VmaHeader, config_data) == 3068);
1962+ assert(G_STRUCT_OFFSET(VmaHeader, dev_info) == 4096);
1963+ assert(sizeof(VmaExtentHeader) == 512);
1964+
1965+ VmaWriter *vmaw = g_new0(VmaWriter, 1);
1966+ vmaw->fd = -1;
1967+
1968+ vmaw->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1969+ if (!vmaw->md5csum) {
1970+ error_setg(errp, "can't allocate cmsum\n");
1971+ goto err;
1972+ }
1973+
1974+ if (strstart(filename, "exec:", &p)) {
1975+ vmaw->cmd = popen(p, "w");
1976+ if (vmaw->cmd == NULL) {
1977+ error_setg(errp, "can't popen command '%s' - %s\n", p,
1978+ g_strerror(errno));
1979+ goto err;
1980+ }
1981+ vmaw->fd = fileno(vmaw->cmd);
1982+
67af0fa4 1983+ /* try to use O_NONBLOCK */
95259824 1984+ fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
95259824
WB
1985+
1986+ } else {
1987+ struct stat st;
1988+ int oflags;
1989+ const char *tmp_id_str;
1990+
1991+ if ((stat(filename, &st) == 0) && S_ISFIFO(st.st_mode)) {
67af0fa4 1992+ oflags = O_NONBLOCK|O_WRONLY;
95259824
WB
1993+ vmaw->fd = qemu_open(filename, oflags, 0644);
1994+ } else if (strstart(filename, "/dev/fdset/", &tmp_id_str)) {
67af0fa4 1995+ oflags = O_NONBLOCK|O_WRONLY;
95259824
WB
1996+ vmaw->fd = qemu_open(filename, oflags, 0644);
1997+ } else if (strstart(filename, "/dev/fdname/", &tmp_id_str)) {
1998+ vmaw->fd = monitor_get_fd(cur_mon, tmp_id_str, errp);
1999+ if (vmaw->fd < 0) {
2000+ goto err;
2001+ }
67af0fa4 2002+ /* try to use O_NONBLOCK */
95259824 2003+ fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
95259824
WB
2004+ } else {
2005+ oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_CREAT|O_EXCL;
2006+ vmaw->fd = qemu_open(filename, oflags, 0644);
2007+ }
2008+
2009+ if (vmaw->fd < 0) {
2010+ error_setg(errp, "can't open file %s - %s\n", filename,
2011+ g_strerror(errno));
2012+ goto err;
2013+ }
2014+ }
2015+
2016+ /* we use O_DIRECT, so we need to align IO buffers */
67af0fa4
WB
2017+
2018+ vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
2019+ vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
95259824
WB
2020+
2021+ vmaw->outbuf_count = 0;
2022+ vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2023+
2024+ vmaw->header_blob_table_pos = 1; /* start at pos 1 */
2025+
95259824 2026+ qemu_co_mutex_init(&vmaw->flush_lock);
95259824
WB
2027+
2028+ uuid_copy(vmaw->uuid, uuid);
2029+
2030+ return vmaw;
2031+
2032+err:
2033+ if (vmaw) {
2034+ if (vmaw->cmd) {
2035+ pclose(vmaw->cmd);
2036+ } else if (vmaw->fd >= 0) {
2037+ close(vmaw->fd);
2038+ }
2039+
2040+ if (vmaw->md5csum) {
2041+ g_checksum_free(vmaw->md5csum);
2042+ }
2043+
2044+ g_free(vmaw);
2045+ }
2046+
2047+ return NULL;
2048+}
2049+
2050+static int coroutine_fn vma_write_header(VmaWriter *vmaw)
2051+{
2052+ assert(vmaw);
67af0fa4 2053+ unsigned char *buf = vmaw->headerbuf;
95259824
WB
2054+ VmaHeader *head = (VmaHeader *)buf;
2055+
2056+ int i;
2057+
2058+ DPRINTF("VMA WRITE HEADER\n");
2059+
2060+ if (vmaw->status < 0) {
2061+ return vmaw->status;
2062+ }
2063+
67af0fa4 2064+ memset(buf, 0, HEADERBUF_SIZE);
95259824
WB
2065+
2066+ head->magic = VMA_MAGIC;
2067+ head->version = GUINT32_TO_BE(1); /* v1 */
2068+ memcpy(head->uuid, vmaw->uuid, 16);
2069+
2070+ time_t ctime = time(NULL);
2071+ head->ctime = GUINT64_TO_BE(ctime);
2072+
95259824
WB
2073+ for (i = 0; i < VMA_MAX_CONFIGS; i++) {
2074+ head->config_names[i] = GUINT32_TO_BE(vmaw->config_names[i]);
2075+ head->config_data[i] = GUINT32_TO_BE(vmaw->config_data[i]);
2076+ }
2077+
2078+ /* 32 bytes per device (12 used currently) = 8192 bytes max */
2079+ for (i = 1; i <= 254; i++) {
2080+ VmaStreamInfo *si = &vmaw->stream_info[i];
2081+ if (si->size) {
2082+ assert(si->devname);
2083+ uint32_t devname_ptr = allocate_header_string(vmaw, si->devname);
2084+ if (!devname_ptr) {
2085+ return -1;
2086+ }
2087+ head->dev_info[i].devname_ptr = GUINT32_TO_BE(devname_ptr);
2088+ head->dev_info[i].size = GUINT64_TO_BE(si->size);
2089+ }
2090+ }
2091+
2092+ uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
2093+ head->header_size = GUINT32_TO_BE(header_size);
2094+
67af0fa4 2095+ if (header_size > HEADERBUF_SIZE) {
95259824
WB
2096+ return -1; /* just to be sure */
2097+ }
2098+
2099+ uint32_t blob_buffer_offset = sizeof(VmaHeader);
2100+ memcpy(buf + blob_buffer_offset, vmaw->header_blob_table,
2101+ vmaw->header_blob_table_size);
2102+ head->blob_buffer_offset = GUINT32_TO_BE(blob_buffer_offset);
2103+ head->blob_buffer_size = GUINT32_TO_BE(vmaw->header_blob_table_pos);
2104+
2105+ g_checksum_reset(vmaw->md5csum);
2106+ g_checksum_update(vmaw->md5csum, (const guchar *)buf, header_size);
2107+ gsize csize = 16;
2108+ g_checksum_get_digest(vmaw->md5csum, (guint8 *)(head->md5sum), &csize);
2109+
2110+ return vma_queue_write(vmaw, buf, header_size);
2111+}
2112+
2113+static int coroutine_fn vma_writer_flush(VmaWriter *vmaw)
2114+{
2115+ assert(vmaw);
2116+
2117+ int ret;
2118+ int i;
2119+
2120+ if (vmaw->status < 0) {
2121+ return vmaw->status;
2122+ }
2123+
2124+ if (!vmaw->header_written) {
2125+ vmaw->header_written = true;
2126+ ret = vma_write_header(vmaw);
2127+ if (ret < 0) {
2128+ vma_writer_set_error(vmaw, "vma_writer_flush: write header failed");
2129+ return ret;
2130+ }
2131+ }
2132+
2133+ DPRINTF("VMA WRITE FLUSH %d %d\n", vmaw->outbuf_count, vmaw->outbuf_pos);
2134+
2135+
2136+ VmaExtentHeader *ehead = (VmaExtentHeader *)vmaw->outbuf;
2137+
2138+ ehead->magic = VMA_EXTENT_MAGIC;
2139+ ehead->reserved1 = 0;
2140+
2141+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2142+ ehead->blockinfo[i] = GUINT64_TO_BE(vmaw->outbuf_block_info[i]);
2143+ }
2144+
2145+ guint16 block_count = (vmaw->outbuf_pos - VMA_EXTENT_HEADER_SIZE) /
2146+ VMA_BLOCK_SIZE;
2147+
2148+ ehead->block_count = GUINT16_TO_BE(block_count);
2149+
2150+ memcpy(ehead->uuid, vmaw->uuid, sizeof(ehead->uuid));
2151+ memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
2152+
2153+ g_checksum_reset(vmaw->md5csum);
2154+ g_checksum_update(vmaw->md5csum, vmaw->outbuf, VMA_EXTENT_HEADER_SIZE);
2155+ gsize csize = 16;
2156+ g_checksum_get_digest(vmaw->md5csum, ehead->md5sum, &csize);
2157+
2158+ int bytes = vmaw->outbuf_pos;
2159+ ret = vma_queue_write(vmaw, vmaw->outbuf, bytes);
2160+ if (ret != bytes) {
2161+ vma_writer_set_error(vmaw, "vma_writer_flush: failed write");
2162+ }
2163+
2164+ vmaw->outbuf_count = 0;
2165+ vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
2166+
2167+ for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
2168+ vmaw->outbuf_block_info[i] = 0;
2169+ }
2170+
2171+ return vmaw->status;
2172+}
2173+
2174+static int vma_count_open_streams(VmaWriter *vmaw)
2175+{
2176+ g_assert(vmaw != NULL);
2177+
2178+ int i;
2179+ int open_drives = 0;
2180+ for (i = 0; i <= 255; i++) {
2181+ if (vmaw->stream_info[i].size && !vmaw->stream_info[i].finished) {
2182+ open_drives++;
2183+ }
2184+ }
2185+
2186+ return open_drives;
2187+}
2188+
67af0fa4
WB
2189+
2190+/**
2191+ * You need to call this if the vma archive does not contain
2192+ * any data stream.
2193+ */
2194+int coroutine_fn
2195+vma_writer_flush_output(VmaWriter *vmaw)
2196+{
2197+ qemu_co_mutex_lock(&vmaw->flush_lock);
2198+ int ret = vma_writer_flush(vmaw);
2199+ qemu_co_mutex_unlock(&vmaw->flush_lock);
2200+ if (ret < 0) {
2201+ vma_writer_set_error(vmaw, "vma_writer_flush_header failed");
2202+ }
2203+ return ret;
2204+}
2205+
95259824
WB
2206+/**
2207+ * all jobs should call this when there is no more data
2208+ * Returns: number of remaining stream (0 ==> finished)
2209+ */
2210+int coroutine_fn
2211+vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id)
2212+{
2213+ g_assert(vmaw != NULL);
2214+
2215+ DPRINTF("vma_writer_set_status %d\n", dev_id);
2216+ if (!vmaw->stream_info[dev_id].size) {
2217+ vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2218+ "no such stream %d", dev_id);
2219+ return -1;
2220+ }
2221+ if (vmaw->stream_info[dev_id].finished) {
2222+ vma_writer_set_error(vmaw, "vma_writer_close_stream: "
2223+ "stream already closed %d", dev_id);
2224+ return -1;
2225+ }
2226+
2227+ vmaw->stream_info[dev_id].finished = true;
2228+
2229+ int open_drives = vma_count_open_streams(vmaw);
2230+
2231+ if (open_drives <= 0) {
2232+ DPRINTF("vma_writer_set_status all drives completed\n");
67af0fa4 2233+ vma_writer_flush_output(vmaw);
95259824
WB
2234+ }
2235+
2236+ return open_drives;
2237+}
2238+
2239+int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status)
2240+{
2241+ int i;
2242+
2243+ g_assert(vmaw != NULL);
2244+
2245+ if (status) {
2246+ status->status = vmaw->status;
2247+ g_strlcpy(status->errmsg, vmaw->errmsg, sizeof(status->errmsg));
2248+ for (i = 0; i <= 255; i++) {
2249+ status->stream_info[i] = vmaw->stream_info[i];
2250+ }
2251+
2252+ uuid_unparse_lower(vmaw->uuid, status->uuid_str);
2253+ }
2254+
2255+ status->closed = vmaw->closed;
2256+
2257+ return vmaw->status;
2258+}
2259+
2260+static int vma_writer_get_buffer(VmaWriter *vmaw)
2261+{
2262+ int ret = 0;
2263+
2264+ qemu_co_mutex_lock(&vmaw->flush_lock);
2265+
2266+ /* wait until buffer is available */
2267+ while (vmaw->outbuf_count >= (VMA_BLOCKS_PER_EXTENT - 1)) {
2268+ ret = vma_writer_flush(vmaw);
2269+ if (ret < 0) {
2270+ vma_writer_set_error(vmaw, "vma_writer_get_buffer: flush failed");
2271+ break;
2272+ }
2273+ }
2274+
2275+ qemu_co_mutex_unlock(&vmaw->flush_lock);
2276+
2277+ return ret;
2278+}
2279+
2280+
2281+int64_t coroutine_fn
2282+vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
2283+ unsigned char *buf, size_t *zero_bytes)
2284+{
2285+ g_assert(vmaw != NULL);
2286+ g_assert(zero_bytes != NULL);
2287+
2288+ *zero_bytes = 0;
2289+
2290+ if (vmaw->status < 0) {
2291+ return vmaw->status;
2292+ }
2293+
2294+ if (!dev_id || !vmaw->stream_info[dev_id].size) {
2295+ vma_writer_set_error(vmaw, "vma_writer_write: "
2296+ "no such stream %d", dev_id);
2297+ return -1;
2298+ }
2299+
2300+ if (vmaw->stream_info[dev_id].finished) {
2301+ vma_writer_set_error(vmaw, "vma_writer_write: "
2302+ "stream already closed %d", dev_id);
2303+ return -1;
2304+ }
2305+
2306+
2307+ if (cluster_num >= (((uint64_t)1)<<32)) {
2308+ vma_writer_set_error(vmaw, "vma_writer_write: "
2309+ "cluster number out of range");
2310+ return -1;
2311+ }
2312+
2313+ if (dev_id == vmaw->vmstate_stream) {
2314+ if (cluster_num != vmaw->vmstate_clusters) {
2315+ vma_writer_set_error(vmaw, "vma_writer_write: "
2316+ "non sequential vmstate write");
2317+ }
2318+ vmaw->vmstate_clusters++;
2319+ } else if (cluster_num >= vmaw->stream_info[dev_id].cluster_count) {
2320+ vma_writer_set_error(vmaw, "vma_writer_write: cluster number too big");
2321+ return -1;
2322+ }
2323+
2324+ /* wait until buffer is available */
2325+ if (vma_writer_get_buffer(vmaw) < 0) {
2326+ vma_writer_set_error(vmaw, "vma_writer_write: "
2327+ "vma_writer_get_buffer failed");
2328+ return -1;
2329+ }
2330+
2331+ DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
2332+
2333+ uint16_t mask = 0;
2334+
2335+ if (buf) {
2336+ int i;
2337+ int bit = 1;
2338+ for (i = 0; i < 16; i++) {
2339+ unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
2340+ if (!buffer_is_zero(vmablock, VMA_BLOCK_SIZE)) {
2341+ mask |= bit;
2342+ memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
2343+ VMA_BLOCK_SIZE);
2344+ vmaw->outbuf_pos += VMA_BLOCK_SIZE;
2345+ } else {
2346+ DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
2347+ vmaw->stream_info[dev_id].zero_bytes += VMA_BLOCK_SIZE;
2348+ *zero_bytes += VMA_BLOCK_SIZE;
2349+ }
2350+
2351+ bit = bit << 1;
2352+ }
2353+ } else {
2354+ DPRINTF("VMA WRITE %zd ZERO CLUSTER\n", cluster_num);
2355+ vmaw->stream_info[dev_id].zero_bytes += VMA_CLUSTER_SIZE;
2356+ *zero_bytes += VMA_CLUSTER_SIZE;
2357+ }
2358+
2359+ uint64_t block_info = ((uint64_t)mask) << (32+16);
2360+ block_info |= ((uint64_t)dev_id) << 32;
2361+ block_info |= (cluster_num & 0xffffffff);
2362+ vmaw->outbuf_block_info[vmaw->outbuf_count] = block_info;
2363+
2364+ DPRINTF("VMA WRITE MASK %zd %zx\n", cluster_num, block_info);
2365+
2366+ vmaw->outbuf_count++;
2367+
2368+ /** NOTE: We allways write whole clusters, but we correctly set
2369+ * transferred bytes. So transferred == size when when everything
2370+ * went OK.
2371+ */
2372+ size_t transferred = VMA_CLUSTER_SIZE;
2373+
2374+ if (dev_id != vmaw->vmstate_stream) {
2375+ uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
2376+ if (last > vmaw->stream_info[dev_id].size) {
2377+ uint64_t diff = last - vmaw->stream_info[dev_id].size;
2378+ if (diff >= VMA_CLUSTER_SIZE) {
2379+ vma_writer_set_error(vmaw, "vma_writer_write: "
2380+ "read after last cluster");
2381+ return -1;
2382+ }
2383+ transferred -= diff;
2384+ }
2385+ }
2386+
2387+ vmaw->stream_info[dev_id].transferred += transferred;
2388+
2389+ return transferred;
2390+}
2391+
67af0fa4
WB
2392+void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp)
2393+{
2394+ if (vmaw->status < 0 && *errp == NULL) {
2395+ error_setg(errp, "%s", vmaw->errmsg);
2396+ }
2397+}
2398+
95259824
WB
2399+int vma_writer_close(VmaWriter *vmaw, Error **errp)
2400+{
2401+ g_assert(vmaw != NULL);
2402+
2403+ int i;
2404+
67af0fa4
WB
2405+ while (vmaw->co_writer) {
2406+ aio_poll(qemu_get_aio_context(), true);
95259824
WB
2407+ }
2408+
67af0fa4
WB
2409+ assert(vmaw->co_writer == NULL);
2410+
95259824
WB
2411+ if (vmaw->cmd) {
2412+ if (pclose(vmaw->cmd) < 0) {
2413+ vma_writer_set_error(vmaw, "vma_writer_close: "
2414+ "pclose failed - %s", g_strerror(errno));
2415+ }
2416+ } else {
2417+ if (close(vmaw->fd) < 0) {
2418+ vma_writer_set_error(vmaw, "vma_writer_close: "
2419+ "close failed - %s", g_strerror(errno));
2420+ }
2421+ }
2422+
2423+ for (i = 0; i <= 255; i++) {
2424+ VmaStreamInfo *si = &vmaw->stream_info[i];
2425+ if (si->size) {
2426+ if (!si->finished) {
2427+ vma_writer_set_error(vmaw, "vma_writer_close: "
2428+ "detected open stream '%s'", si->devname);
2429+ } else if ((si->transferred != si->size) &&
2430+ (i != vmaw->vmstate_stream)) {
2431+ vma_writer_set_error(vmaw, "vma_writer_close: "
2432+ "incomplete stream '%s' (%zd != %zd)",
2433+ si->devname, si->transferred, si->size);
2434+ }
2435+ }
2436+ }
2437+
2438+ for (i = 0; i <= 255; i++) {
2439+ vmaw->stream_info[i].finished = 1; /* mark as closed */
2440+ }
2441+
2442+ vmaw->closed = 1;
2443+
2444+ if (vmaw->status < 0 && *errp == NULL) {
2445+ error_setg(errp, "%s", vmaw->errmsg);
2446+ }
2447+
2448+ return vmaw->status;
2449+}
2450+
2451+void vma_writer_destroy(VmaWriter *vmaw)
2452+{
2453+ assert(vmaw);
2454+
2455+ int i;
2456+
2457+ for (i = 0; i <= 255; i++) {
2458+ if (vmaw->stream_info[i].devname) {
2459+ g_free(vmaw->stream_info[i].devname);
2460+ }
2461+ }
2462+
2463+ if (vmaw->md5csum) {
2464+ g_checksum_free(vmaw->md5csum);
2465+ }
2466+
95259824
WB
2467+ g_free(vmaw);
2468+}
2469diff --git a/vma.c b/vma.c
2470new file mode 100644
67af0fa4 2471index 0000000000..04915427c8
95259824
WB
2472--- /dev/null
2473+++ b/vma.c
67af0fa4 2474@@ -0,0 +1,757 @@
95259824
WB
2475+/*
2476+ * VMA: Virtual Machine Archive
2477+ *
2478+ * Copyright (C) 2012-2013 Proxmox Server Solutions
2479+ *
2480+ * Authors:
2481+ * Dietmar Maurer (dietmar@proxmox.com)
2482+ *
2483+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
2484+ * See the COPYING file in the top-level directory.
2485+ *
2486+ */
2487+
2488+#include "qemu/osdep.h"
2489+#include <glib.h>
2490+
2491+#include "vma.h"
2492+#include "qemu-common.h"
2493+#include "qemu/error-report.h"
2494+#include "qemu/main-loop.h"
a544966d 2495+#include "qapi/qmp/qstring.h"
95259824 2496+#include "sysemu/char.h" /* qstring_from_str */
67af0fa4 2497+#include "sysemu/block-backend.h"
95259824
WB
2498+
2499+static void help(void)
2500+{
2501+ const char *help_msg =
2502+ "usage: vma command [command options]\n"
2503+ "\n"
2504+ "vma list <filename>\n"
67af0fa4
WB
2505+ "vma config <filename> [-c config]\n"
2506+ "vma create <filename> [-c config] pathname ...\n"
95259824 2507+ "vma extract <filename> [-r <fifo>] <targetdir>\n"
67af0fa4 2508+ "vma verify <filename> [-v]\n"
95259824
WB
2509+ ;
2510+
2511+ printf("%s", help_msg);
2512+ exit(1);
2513+}
2514+
2515+static const char *extract_devname(const char *path, char **devname, int index)
2516+{
2517+ assert(path);
2518+
2519+ const char *sep = strchr(path, '=');
2520+
2521+ if (sep) {
2522+ *devname = g_strndup(path, sep - path);
2523+ path = sep + 1;
2524+ } else {
2525+ if (index >= 0) {
2526+ *devname = g_strdup_printf("disk%d", index);
2527+ } else {
2528+ *devname = NULL;
2529+ }
2530+ }
2531+
2532+ return path;
2533+}
2534+
2535+static void print_content(VmaReader *vmar)
2536+{
2537+ assert(vmar);
2538+
2539+ VmaHeader *head = vma_reader_get_header(vmar);
2540+
2541+ GList *l = vma_reader_get_config_data(vmar);
2542+ while (l && l->data) {
2543+ VmaConfigData *cdata = (VmaConfigData *)l->data;
2544+ l = g_list_next(l);
2545+ printf("CFG: size: %d name: %s\n", cdata->len, cdata->name);
2546+ }
2547+
2548+ int i;
2549+ VmaDeviceInfo *di;
2550+ for (i = 1; i < 255; i++) {
2551+ di = vma_reader_get_device_info(vmar, i);
2552+ if (di) {
2553+ if (strcmp(di->devname, "vmstate") == 0) {
2554+ printf("VMSTATE: dev_id=%d memory: %zd\n", i, di->size);
2555+ } else {
2556+ printf("DEV: dev_id=%d size: %zd devname: %s\n",
2557+ i, di->size, di->devname);
2558+ }
2559+ }
2560+ }
2561+ /* ctime is the last entry we print */
2562+ printf("CTIME: %s", ctime(&head->ctime));
2563+ fflush(stdout);
2564+}
2565+
2566+static int list_content(int argc, char **argv)
2567+{
2568+ int c, ret = 0;
2569+ const char *filename;
2570+
2571+ for (;;) {
2572+ c = getopt(argc, argv, "h");
2573+ if (c == -1) {
2574+ break;
2575+ }
2576+ switch (c) {
2577+ case '?':
2578+ case 'h':
2579+ help();
2580+ break;
2581+ default:
2582+ g_assert_not_reached();
2583+ }
2584+ }
2585+
2586+ /* Get the filename */
2587+ if ((optind + 1) != argc) {
2588+ help();
2589+ }
2590+ filename = argv[optind++];
2591+
2592+ Error *errp = NULL;
2593+ VmaReader *vmar = vma_reader_create(filename, &errp);
2594+
2595+ if (!vmar) {
2596+ g_error("%s", error_get_pretty(errp));
2597+ }
2598+
2599+ print_content(vmar);
2600+
2601+ vma_reader_destroy(vmar);
2602+
2603+ return ret;
2604+}
2605+
2606+typedef struct RestoreMap {
2607+ char *devname;
2608+ char *path;
67af0fa4 2609+ char *format;
95259824
WB
2610+ bool write_zero;
2611+} RestoreMap;
2612+
2613+static int extract_content(int argc, char **argv)
2614+{
2615+ int c, ret = 0;
2616+ int verbose = 0;
2617+ const char *filename;
2618+ const char *dirname;
2619+ const char *readmap = NULL;
2620+
2621+ for (;;) {
2622+ c = getopt(argc, argv, "hvr:");
2623+ if (c == -1) {
2624+ break;
2625+ }
2626+ switch (c) {
2627+ case '?':
2628+ case 'h':
2629+ help();
2630+ break;
2631+ case 'r':
2632+ readmap = optarg;
2633+ break;
2634+ case 'v':
2635+ verbose = 1;
2636+ break;
2637+ default:
2638+ help();
2639+ }
2640+ }
2641+
2642+ /* Get the filename */
2643+ if ((optind + 2) != argc) {
2644+ help();
2645+ }
2646+ filename = argv[optind++];
2647+ dirname = argv[optind++];
2648+
2649+ Error *errp = NULL;
2650+ VmaReader *vmar = vma_reader_create(filename, &errp);
2651+
2652+ if (!vmar) {
2653+ g_error("%s", error_get_pretty(errp));
2654+ }
2655+
2656+ if (mkdir(dirname, 0777) < 0) {
2657+ g_error("unable to create target directory %s - %s",
2658+ dirname, g_strerror(errno));
2659+ }
2660+
2661+ GList *l = vma_reader_get_config_data(vmar);
2662+ while (l && l->data) {
2663+ VmaConfigData *cdata = (VmaConfigData *)l->data;
2664+ l = g_list_next(l);
2665+ char *cfgfn = g_strdup_printf("%s/%s", dirname, cdata->name);
2666+ GError *err = NULL;
2667+ if (!g_file_set_contents(cfgfn, (gchar *)cdata->data, cdata->len,
2668+ &err)) {
2669+ g_error("unable to write file: %s", err->message);
2670+ }
2671+ }
2672+
2673+ GHashTable *devmap = g_hash_table_new(g_str_hash, g_str_equal);
2674+
2675+ if (readmap) {
2676+ print_content(vmar);
2677+
2678+ FILE *map = fopen(readmap, "r");
2679+ if (!map) {
2680+ g_error("unable to open fifo %s - %s", readmap, g_strerror(errno));
2681+ }
2682+
2683+ while (1) {
2684+ char inbuf[8192];
2685+ char *line = fgets(inbuf, sizeof(inbuf), map);
2686+ if (!line || line[0] == '\0' || !strcmp(line, "done\n")) {
2687+ break;
2688+ }
2689+ int len = strlen(line);
2690+ if (line[len - 1] == '\n') {
2691+ line[len - 1] = '\0';
2692+ if (len == 1) {
2693+ break;
2694+ }
2695+ }
2696+
67af0fa4
WB
2697+ char *format = NULL;
2698+ if (strncmp(line, "format=", sizeof("format=")-1) == 0) {
2699+ format = line + sizeof("format=")-1;
2700+ char *colon = strchr(format, ':');
2701+ if (!colon) {
2702+ g_error("read map failed - found only a format ('%s')", inbuf);
2703+ }
2704+ format = g_strndup(format, colon - format);
2705+ line = colon+1;
2706+ }
2707+
95259824
WB
2708+ const char *path;
2709+ bool write_zero;
2710+ if (line[0] == '0' && line[1] == ':') {
67af0fa4 2711+ path = line + 2;
95259824
WB
2712+ write_zero = false;
2713+ } else if (line[0] == '1' && line[1] == ':') {
67af0fa4 2714+ path = line + 2;
95259824
WB
2715+ write_zero = true;
2716+ } else {
2717+ g_error("read map failed - parse error ('%s')", inbuf);
2718+ }
2719+
2720+ char *devname = NULL;
2721+ path = extract_devname(path, &devname, -1);
2722+ if (!devname) {
2723+ g_error("read map failed - no dev name specified ('%s')",
2724+ inbuf);
2725+ }
2726+
2727+ RestoreMap *map = g_new0(RestoreMap, 1);
2728+ map->devname = g_strdup(devname);
2729+ map->path = g_strdup(path);
67af0fa4 2730+ map->format = format;
95259824
WB
2731+ map->write_zero = write_zero;
2732+
2733+ g_hash_table_insert(devmap, map->devname, map);
2734+
2735+ };
2736+ }
2737+
2738+ int i;
2739+ int vmstate_fd = -1;
2740+ guint8 vmstate_stream = 0;
2741+
67af0fa4
WB
2742+ BlockBackend *blk = NULL;
2743+
95259824
WB
2744+ for (i = 1; i < 255; i++) {
2745+ VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2746+ if (di && (strcmp(di->devname, "vmstate") == 0)) {
2747+ vmstate_stream = i;
2748+ char *statefn = g_strdup_printf("%s/vmstate.bin", dirname);
2749+ vmstate_fd = open(statefn, O_WRONLY|O_CREAT|O_EXCL, 0644);
2750+ if (vmstate_fd < 0) {
2751+ g_error("create vmstate file '%s' failed - %s", statefn,
2752+ g_strerror(errno));
2753+ }
2754+ g_free(statefn);
2755+ } else if (di) {
2756+ char *devfn = NULL;
67af0fa4
WB
2757+ const char *format = NULL;
2758+ int flags = BDRV_O_RDWR | BDRV_O_NO_FLUSH;
95259824
WB
2759+ bool write_zero = true;
2760+
2761+ if (readmap) {
2762+ RestoreMap *map;
2763+ map = (RestoreMap *)g_hash_table_lookup(devmap, di->devname);
2764+ if (map == NULL) {
2765+ g_error("no device name mapping for %s", di->devname);
2766+ }
2767+ devfn = map->path;
67af0fa4 2768+ format = map->format;
95259824
WB
2769+ write_zero = map->write_zero;
2770+ } else {
2771+ devfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2772+ dirname, di->devname);
2773+ printf("DEVINFO %s %zd\n", devfn, di->size);
2774+
2775+ bdrv_img_create(devfn, "raw", NULL, NULL, NULL, di->size,
2776+ flags, &errp, 0);
2777+ if (errp) {
2778+ g_error("can't create file %s: %s", devfn,
2779+ error_get_pretty(errp));
2780+ }
2781+
2782+ /* Note: we created an empty file above, so there is no
2783+ * need to write zeroes (so we generate a sparse file)
2784+ */
2785+ write_zero = false;
2786+ }
2787+
67af0fa4
WB
2788+ size_t devlen = strlen(devfn);
2789+ QDict *options = NULL;
2790+ if (format) {
2791+ /* explicit format from commandline */
2792+ options = qdict_new();
2793+ qdict_put(options, "driver", qstring_from_str(format));
2794+ } else if ((devlen > 4 && strcmp(devfn+devlen-4, ".raw") == 0) ||
2795+ strncmp(devfn, "/dev/", 5) == 0)
2796+ {
2797+ /* This part is now deprecated for PVE as well (just as qemu
2798+ * deprecated not specifying an explicit raw format, too.
2799+ */
2800+ /* explicit raw format */
2801+ options = qdict_new();
2802+ qdict_put(options, "driver", qstring_from_str("raw"));
2803+ }
2804+
2805+
2806+ if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) {
95259824
WB
2807+ g_error("can't open file %s - %s", devfn,
2808+ error_get_pretty(errp));
2809+ }
67af0fa4
WB
2810+
2811+ if (vma_reader_register_bs(vmar, i, blk, write_zero, &errp) < 0) {
95259824
WB
2812+ g_error("%s", error_get_pretty(errp));
2813+ }
2814+
2815+ if (!readmap) {
2816+ g_free(devfn);
2817+ }
2818+ }
2819+ }
2820+
2821+ if (vma_reader_restore(vmar, vmstate_fd, verbose, &errp) < 0) {
2822+ g_error("restore failed - %s", error_get_pretty(errp));
2823+ }
2824+
2825+ if (!readmap) {
2826+ for (i = 1; i < 255; i++) {
2827+ VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2828+ if (di && (i != vmstate_stream)) {
2829+ char *tmpfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2830+ dirname, di->devname);
2831+ char *fn = g_strdup_printf("%s/disk-%s.raw",
2832+ dirname, di->devname);
2833+ if (rename(tmpfn, fn) != 0) {
2834+ g_error("rename %s to %s failed - %s",
2835+ tmpfn, fn, g_strerror(errno));
2836+ }
2837+ }
2838+ }
2839+ }
2840+
2841+ vma_reader_destroy(vmar);
2842+
67af0fa4
WB
2843+ blk_unref(blk);
2844+
2845+ bdrv_close_all();
2846+
2847+ return ret;
2848+}
2849+
2850+static int verify_content(int argc, char **argv)
2851+{
2852+ int c, ret = 0;
2853+ int verbose = 0;
2854+ const char *filename;
2855+
2856+ for (;;) {
2857+ c = getopt(argc, argv, "hv");
2858+ if (c == -1) {
2859+ break;
2860+ }
2861+ switch (c) {
2862+ case '?':
2863+ case 'h':
2864+ help();
2865+ break;
2866+ case 'v':
2867+ verbose = 1;
2868+ break;
2869+ default:
2870+ help();
2871+ }
2872+ }
2873+
2874+ /* Get the filename */
2875+ if ((optind + 1) != argc) {
2876+ help();
2877+ }
2878+ filename = argv[optind++];
2879+
2880+ Error *errp = NULL;
2881+ VmaReader *vmar = vma_reader_create(filename, &errp);
2882+
2883+ if (!vmar) {
2884+ g_error("%s", error_get_pretty(errp));
2885+ }
2886+
2887+ if (verbose) {
2888+ print_content(vmar);
2889+ }
2890+
2891+ if (vma_reader_verify(vmar, verbose, &errp) < 0) {
2892+ g_error("verify failed - %s", error_get_pretty(errp));
2893+ }
2894+
2895+ vma_reader_destroy(vmar);
2896+
95259824
WB
2897+ bdrv_close_all();
2898+
2899+ return ret;
2900+}
2901+
2902+typedef struct BackupJob {
67af0fa4 2903+ BlockBackend *target;
95259824
WB
2904+ int64_t len;
2905+ VmaWriter *vmaw;
2906+ uint8_t dev_id;
2907+} BackupJob;
2908+
2909+#define BACKUP_SECTORS_PER_CLUSTER (VMA_CLUSTER_SIZE / BDRV_SECTOR_SIZE)
2910+
67af0fa4
WB
2911+static void coroutine_fn backup_run_empty(void *opaque)
2912+{
2913+ VmaWriter *vmaw = (VmaWriter *)opaque;
2914+
2915+ vma_writer_flush_output(vmaw);
2916+
2917+ Error *err = NULL;
2918+ if (vma_writer_close(vmaw, &err) != 0) {
2919+ g_warning("vma_writer_close failed %s", error_get_pretty(err));
2920+ }
2921+}
2922+
95259824
WB
2923+static void coroutine_fn backup_run(void *opaque)
2924+{
2925+ BackupJob *job = (BackupJob *)opaque;
2926+ struct iovec iov;
2927+ QEMUIOVector qiov;
2928+
2929+ int64_t start, end;
2930+ int ret = 0;
2931+
67af0fa4 2932+ unsigned char *buf = blk_blockalign(job->target, VMA_CLUSTER_SIZE);
95259824
WB
2933+
2934+ start = 0;
2935+ end = DIV_ROUND_UP(job->len / BDRV_SECTOR_SIZE,
2936+ BACKUP_SECTORS_PER_CLUSTER);
2937+
2938+ for (; start < end; start++) {
2939+ iov.iov_base = buf;
2940+ iov.iov_len = VMA_CLUSTER_SIZE;
2941+ qemu_iovec_init_external(&qiov, &iov, 1);
2942+
67af0fa4
WB
2943+ ret = blk_co_preadv(job->target, start * VMA_CLUSTER_SIZE,
2944+ VMA_CLUSTER_SIZE, &qiov, 0);
95259824
WB
2945+ if (ret < 0) {
2946+ vma_writer_set_error(job->vmaw, "read error", -1);
2947+ goto out;
2948+ }
2949+
2950+ size_t zb = 0;
2951+ if (vma_writer_write(job->vmaw, job->dev_id, start, buf, &zb) < 0) {
2952+ vma_writer_set_error(job->vmaw, "backup_dump_cb vma_writer_write failed", -1);
2953+ goto out;
2954+ }
2955+ }
2956+
2957+
2958+out:
2959+ if (vma_writer_close_stream(job->vmaw, job->dev_id) <= 0) {
2960+ Error *err = NULL;
2961+ if (vma_writer_close(job->vmaw, &err) != 0) {
2962+ g_warning("vma_writer_close failed %s", error_get_pretty(err));
2963+ }
2964+ }
2965+}
2966+
2967+static int create_archive(int argc, char **argv)
2968+{
2969+ int i, c;
2970+ int verbose = 0;
2971+ const char *archivename;
2972+ GList *config_files = NULL;
2973+
2974+ for (;;) {
2975+ c = getopt(argc, argv, "hvc:");
2976+ if (c == -1) {
2977+ break;
2978+ }
2979+ switch (c) {
2980+ case '?':
2981+ case 'h':
2982+ help();
2983+ break;
2984+ case 'c':
2985+ config_files = g_list_append(config_files, optarg);
2986+ break;
2987+ case 'v':
2988+ verbose = 1;
2989+ break;
2990+ default:
2991+ g_assert_not_reached();
2992+ }
2993+ }
2994+
2995+
67af0fa4
WB
2996+ /* make sure we an archive name */
2997+ if ((optind + 1) > argc) {
95259824
WB
2998+ help();
2999+ }
3000+
3001+ archivename = argv[optind++];
3002+
3003+ uuid_t uuid;
3004+ uuid_generate(uuid);
3005+
3006+ Error *local_err = NULL;
3007+ VmaWriter *vmaw = vma_writer_create(archivename, uuid, &local_err);
3008+
3009+ if (vmaw == NULL) {
3010+ g_error("%s", error_get_pretty(local_err));
3011+ }
3012+
3013+ GList *l = config_files;
3014+ while (l && l->data) {
3015+ char *name = l->data;
3016+ char *cdata = NULL;
3017+ gsize clen = 0;
3018+ GError *err = NULL;
3019+ if (!g_file_get_contents(name, &cdata, &clen, &err)) {
3020+ unlink(archivename);
3021+ g_error("Unable to read file: %s", err->message);
3022+ }
3023+
3024+ if (vma_writer_add_config(vmaw, name, cdata, clen) != 0) {
3025+ unlink(archivename);
3026+ g_error("Unable to append config data %s (len = %zd)",
3027+ name, clen);
3028+ }
3029+ l = g_list_next(l);
3030+ }
3031+
67af0fa4 3032+ int devcount = 0;
95259824
WB
3033+ while (optind < argc) {
3034+ const char *path = argv[optind++];
3035+ char *devname = NULL;
67af0fa4 3036+ path = extract_devname(path, &devname, devcount++);
95259824
WB
3037+
3038+ Error *errp = NULL;
67af0fa4 3039+ BlockBackend *target;
95259824 3040+
67af0fa4
WB
3041+ target = blk_new_open(path, NULL, NULL, 0, &errp);
3042+ if (!target) {
95259824
WB
3043+ unlink(archivename);
3044+ g_error("bdrv_open '%s' failed - %s", path, error_get_pretty(errp));
3045+ }
67af0fa4 3046+ int64_t size = blk_getlength(target);
95259824
WB
3047+ int dev_id = vma_writer_register_stream(vmaw, devname, size);
3048+ if (dev_id <= 0) {
3049+ unlink(archivename);
3050+ g_error("vma_writer_register_stream '%s' failed", devname);
3051+ }
3052+
3053+ BackupJob *job = g_new0(BackupJob, 1);
3054+ job->len = size;
67af0fa4 3055+ job->target = target;
95259824
WB
3056+ job->vmaw = vmaw;
3057+ job->dev_id = dev_id;
3058+
3059+ Coroutine *co = qemu_coroutine_create(backup_run, job);
3060+ qemu_coroutine_enter(co);
3061+ }
3062+
3063+ VmaStatus vmastat;
3064+ int percent = 0;
3065+ int last_percent = -1;
3066+
67af0fa4
WB
3067+ if (devcount) {
3068+ while (1) {
3069+ main_loop_wait(false);
3070+ vma_writer_get_status(vmaw, &vmastat);
95259824 3071+
67af0fa4 3072+ if (verbose) {
95259824 3073+
67af0fa4
WB
3074+ uint64_t total = 0;
3075+ uint64_t transferred = 0;
3076+ uint64_t zero_bytes = 0;
95259824 3077+
67af0fa4
WB
3078+ int i;
3079+ for (i = 0; i < 256; i++) {
3080+ if (vmastat.stream_info[i].size) {
3081+ total += vmastat.stream_info[i].size;
3082+ transferred += vmastat.stream_info[i].transferred;
3083+ zero_bytes += vmastat.stream_info[i].zero_bytes;
3084+ }
95259824 3085+ }
67af0fa4
WB
3086+ percent = (transferred*100)/total;
3087+ if (percent != last_percent) {
3088+ fprintf(stderr, "progress %d%% %zd/%zd %zd\n", percent,
3089+ transferred, total, zero_bytes);
3090+ fflush(stderr);
95259824 3091+
67af0fa4
WB
3092+ last_percent = percent;
3093+ }
95259824 3094+ }
95259824 3095+
67af0fa4
WB
3096+ if (vmastat.closed) {
3097+ break;
3098+ }
95259824
WB
3099+ }
3100+ } else {
3101+ Coroutine *co = qemu_coroutine_create(backup_run_empty, vmaw);
3102+ qemu_coroutine_enter(co);
3103+ while (1) {
3104+ main_loop_wait(false);
3105+ vma_writer_get_status(vmaw, &vmastat);
3106+ if (vmastat.closed) {
3107+ break;
3108+ }
3109+ }
3110+ }
3111+
3112+ bdrv_drain_all();
3113+
3114+ vma_writer_get_status(vmaw, &vmastat);
3115+
3116+ if (verbose) {
3117+ for (i = 0; i < 256; i++) {
3118+ VmaStreamInfo *si = &vmastat.stream_info[i];
3119+ if (si->size) {
3120+ fprintf(stderr, "image %s: size=%zd zeros=%zd saved=%zd\n",
3121+ si->devname, si->size, si->zero_bytes,
3122+ si->size - si->zero_bytes);
3123+ }
3124+ }
3125+ }
3126+
3127+ if (vmastat.status < 0) {
3128+ unlink(archivename);
3129+ g_error("creating vma archive failed");
3130+ }
3131+
3132+ return 0;
3133+}
3134+
67af0fa4
WB
3135+static int dump_config(int argc, char **argv)
3136+{
3137+ int c, ret = 0;
3138+ const char *filename;
3139+ const char *config_name = "qemu-server.conf";
3140+
3141+ for (;;) {
3142+ c = getopt(argc, argv, "hc:");
3143+ if (c == -1) {
3144+ break;
3145+ }
3146+ switch (c) {
3147+ case '?':
3148+ case 'h':
3149+ help();
3150+ break;
3151+ case 'c':
3152+ config_name = optarg;
3153+ break;
3154+ default:
3155+ help();
3156+ }
3157+ }
3158+
3159+ /* Get the filename */
3160+ if ((optind + 1) != argc) {
3161+ help();
3162+ }
3163+ filename = argv[optind++];
3164+
3165+ Error *errp = NULL;
3166+ VmaReader *vmar = vma_reader_create(filename, &errp);
3167+
3168+ if (!vmar) {
3169+ g_error("%s", error_get_pretty(errp));
3170+ }
3171+
3172+ int found = 0;
3173+ GList *l = vma_reader_get_config_data(vmar);
3174+ while (l && l->data) {
3175+ VmaConfigData *cdata = (VmaConfigData *)l->data;
3176+ l = g_list_next(l);
3177+ if (strcmp(cdata->name, config_name) == 0) {
3178+ found = 1;
3179+ fwrite(cdata->data, cdata->len, 1, stdout);
3180+ break;
3181+ }
3182+ }
3183+
3184+ vma_reader_destroy(vmar);
3185+
3186+ bdrv_close_all();
3187+
3188+ if (!found) {
3189+ fprintf(stderr, "unable to find configuration data '%s'\n", config_name);
3190+ return -1;
3191+ }
3192+
3193+ return ret;
3194+}
3195+
95259824
WB
3196+int main(int argc, char **argv)
3197+{
3198+ const char *cmdname;
3199+ Error *main_loop_err = NULL;
3200+
3201+ error_set_progname(argv[0]);
3202+
3203+ if (qemu_init_main_loop(&main_loop_err)) {
3204+ g_error("%s", error_get_pretty(main_loop_err));
3205+ }
3206+
3207+ bdrv_init();
3208+
3209+ if (argc < 2) {
3210+ help();
3211+ }
3212+
3213+ cmdname = argv[1];
3214+ argc--; argv++;
3215+
3216+
3217+ if (!strcmp(cmdname, "list")) {
3218+ return list_content(argc, argv);
3219+ } else if (!strcmp(cmdname, "create")) {
3220+ return create_archive(argc, argv);
3221+ } else if (!strcmp(cmdname, "extract")) {
3222+ return extract_content(argc, argv);
67af0fa4
WB
3223+ } else if (!strcmp(cmdname, "verify")) {
3224+ return verify_content(argc, argv);
3225+ } else if (!strcmp(cmdname, "config")) {
3226+ return dump_config(argc, argv);
95259824
WB
3227+ }
3228+
3229+ help();
3230+ return 0;
3231+}
3232diff --git a/vma.h b/vma.h
3233new file mode 100644
67af0fa4 3234index 0000000000..fa6f4df7e7
95259824
WB
3235--- /dev/null
3236+++ b/vma.h
67af0fa4 3237@@ -0,0 +1,149 @@
95259824
WB
3238+/*
3239+ * VMA: Virtual Machine Archive
3240+ *
3241+ * Copyright (C) Proxmox Server Solutions
3242+ *
3243+ * Authors:
3244+ * Dietmar Maurer (dietmar@proxmox.com)
3245+ *
3246+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
3247+ * See the COPYING file in the top-level directory.
3248+ *
3249+ */
3250+
3251+#ifndef BACKUP_VMA_H
3252+#define BACKUP_VMA_H
3253+
3254+#include <uuid/uuid.h>
3255+#include "qapi/error.h"
3256+#include "block/block.h"
3257+
3258+#define VMA_BLOCK_BITS 12
3259+#define VMA_BLOCK_SIZE (1<<VMA_BLOCK_BITS)
3260+#define VMA_CLUSTER_BITS (VMA_BLOCK_BITS+4)
3261+#define VMA_CLUSTER_SIZE (1<<VMA_CLUSTER_BITS)
3262+
3263+#if VMA_CLUSTER_SIZE != 65536
3264+#error unexpected cluster size
3265+#endif
3266+
3267+#define VMA_EXTENT_HEADER_SIZE 512
3268+#define VMA_BLOCKS_PER_EXTENT 59
3269+#define VMA_MAX_CONFIGS 256
3270+
3271+#define VMA_MAX_EXTENT_SIZE \
3272+ (VMA_EXTENT_HEADER_SIZE+VMA_CLUSTER_SIZE*VMA_BLOCKS_PER_EXTENT)
3273+#if VMA_MAX_EXTENT_SIZE != 3867136
3274+#error unexpected VMA_EXTENT_SIZE
3275+#endif
3276+
3277+/* File Format Definitions */
3278+
3279+#define VMA_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|0x00))
3280+#define VMA_EXTENT_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|'E'))
3281+
3282+typedef struct VmaDeviceInfoHeader {
3283+ uint32_t devname_ptr; /* offset into blob_buffer table */
3284+ uint32_t reserved0;
3285+ uint64_t size; /* device size in bytes */
3286+ uint64_t reserved1;
3287+ uint64_t reserved2;
3288+} VmaDeviceInfoHeader;
3289+
3290+typedef struct VmaHeader {
3291+ uint32_t magic;
3292+ uint32_t version;
3293+ unsigned char uuid[16];
3294+ int64_t ctime;
3295+ unsigned char md5sum[16];
3296+
3297+ uint32_t blob_buffer_offset;
3298+ uint32_t blob_buffer_size;
3299+ uint32_t header_size;
3300+
3301+ unsigned char reserved[1984];
3302+
3303+ uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3304+ uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
3305+
3306+ uint32_t reserved1;
3307+
3308+ VmaDeviceInfoHeader dev_info[256];
3309+} VmaHeader;
3310+
3311+typedef struct VmaExtentHeader {
3312+ uint32_t magic;
3313+ uint16_t reserved1;
3314+ uint16_t block_count;
3315+ unsigned char uuid[16];
3316+ unsigned char md5sum[16];
3317+ uint64_t blockinfo[VMA_BLOCKS_PER_EXTENT];
3318+} VmaExtentHeader;
3319+
3320+/* functions/definitions to read/write vma files */
3321+
3322+typedef struct VmaReader VmaReader;
3323+
3324+typedef struct VmaWriter VmaWriter;
3325+
3326+typedef struct VmaConfigData {
3327+ const char *name;
3328+ const void *data;
3329+ uint32_t len;
3330+} VmaConfigData;
3331+
3332+typedef struct VmaStreamInfo {
3333+ uint64_t size;
3334+ uint64_t cluster_count;
3335+ uint64_t transferred;
3336+ uint64_t zero_bytes;
3337+ int finished;
3338+ char *devname;
3339+} VmaStreamInfo;
3340+
3341+typedef struct VmaStatus {
3342+ int status;
3343+ bool closed;
3344+ char errmsg[8192];
3345+ char uuid_str[37];
3346+ VmaStreamInfo stream_info[256];
3347+} VmaStatus;
3348+
3349+typedef struct VmaDeviceInfo {
3350+ uint64_t size; /* device size in bytes */
3351+ const char *devname;
3352+} VmaDeviceInfo;
3353+
3354+VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp);
3355+int vma_writer_close(VmaWriter *vmaw, Error **errp);
67af0fa4 3356+void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp);
95259824
WB
3357+void vma_writer_destroy(VmaWriter *vmaw);
3358+int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
3359+ size_t len);
3360+int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
3361+ size_t size);
3362+
3363+int64_t coroutine_fn vma_writer_write(VmaWriter *vmaw, uint8_t dev_id,
3364+ int64_t cluster_num, unsigned char *buf,
3365+ size_t *zero_bytes);
3366+
3367+int coroutine_fn vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id);
67af0fa4 3368+int coroutine_fn vma_writer_flush_output(VmaWriter *vmaw);
95259824
WB
3369+
3370+int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status);
3371+void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...);
3372+
3373+
3374+VmaReader *vma_reader_create(const char *filename, Error **errp);
3375+void vma_reader_destroy(VmaReader *vmar);
3376+VmaHeader *vma_reader_get_header(VmaReader *vmar);
3377+GList *vma_reader_get_config_data(VmaReader *vmar);
3378+VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id);
3379+int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id,
67af0fa4 3380+ BlockBackend *target, bool write_zeroes,
95259824
WB
3381+ Error **errp);
3382+int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
3383+ Error **errp);
67af0fa4 3384+int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp);
95259824
WB
3385+
3386+#endif /* BACKUP_VMA_H */
3387--
45169293 33882.11.0
95259824 3389