]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0026-PVE-Backup-add-vma-backup-format-code.patch
buildsys: change upload dist to bullseye
[pve-qemu.git] / debian / patches / pve / 0026-PVE-Backup-add-vma-backup-format-code.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Dietmar Maurer <dietmar@proxmox.com>
3 Date: Mon, 6 Apr 2020 12:16:57 +0200
4 Subject: [PATCH] PVE-Backup: add vma backup format code
5
6 ---
7 block/meson.build | 2 +
8 meson.build | 5 +
9 vma-reader.c | 857 ++++++++++++++++++++++++++++++++++++++++++++++
10 vma-writer.c | 790 ++++++++++++++++++++++++++++++++++++++++++
11 vma.c | 839 +++++++++++++++++++++++++++++++++++++++++++++
12 vma.h | 150 ++++++++
13 6 files changed, 2643 insertions(+)
14 create mode 100644 vma-reader.c
15 create mode 100644 vma-writer.c
16 create mode 100644 vma.c
17 create mode 100644 vma.h
18
19 diff --git a/block/meson.build b/block/meson.build
20 index cf9b278fa2..d0a8397edf 100644
21 --- a/block/meson.build
22 +++ b/block/meson.build
23 @@ -43,6 +43,8 @@ block_ss.add(files(
24 'zeroinit.c',
25 ), zstd, zlib, gnutls)
26
27 +block_ss.add(files('../vma-writer.c'), libuuid)
28 +
29 softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('blkreplay.c'))
30
31 block_ss.add(when: 'CONFIG_QCOW1', if_true: files('qcow.c'))
32 diff --git a/meson.build b/meson.build
33 index c6f4b0cf5e..7db08406bd 100644
34 --- a/meson.build
35 +++ b/meson.build
36 @@ -945,6 +945,8 @@ keyutils = dependency('libkeyutils', required: false,
37
38 has_gettid = cc.has_function('gettid')
39
40 +libuuid = cc.find_library('uuid', required: true)
41 +
42 # Malloc tests
43
44 malloc = []
45 @@ -2344,6 +2346,9 @@ if have_tools
46 qemu_nbd = executable('qemu-nbd', files('qemu-nbd.c'),
47 dependencies: [blockdev, qemuutil, gnutls], install: true)
48
49 + vma = executable('vma', files('vma.c', 'vma-reader.c'),
50 + dependencies: [authz, block, crypto, io, qom], install: true)
51 +
52 subdir('storage-daemon')
53 subdir('contrib/rdmacm-mux')
54 subdir('contrib/elf2dmp')
55 diff --git a/vma-reader.c b/vma-reader.c
56 new file mode 100644
57 index 0000000000..2b1d1cdab3
58 --- /dev/null
59 +++ b/vma-reader.c
60 @@ -0,0 +1,857 @@
61 +/*
62 + * VMA: Virtual Machine Archive
63 + *
64 + * Copyright (C) 2012 Proxmox Server Solutions
65 + *
66 + * Authors:
67 + * Dietmar Maurer (dietmar@proxmox.com)
68 + *
69 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
70 + * See the COPYING file in the top-level directory.
71 + *
72 + */
73 +
74 +#include "qemu/osdep.h"
75 +#include <glib.h>
76 +#include <uuid/uuid.h>
77 +
78 +#include "qemu-common.h"
79 +#include "qemu/timer.h"
80 +#include "qemu/ratelimit.h"
81 +#include "vma.h"
82 +#include "block/block.h"
83 +#include "sysemu/block-backend.h"
84 +
85 +static unsigned char zero_vma_block[VMA_BLOCK_SIZE];
86 +
87 +typedef struct VmaRestoreState {
88 + BlockBackend *target;
89 + bool write_zeroes;
90 + unsigned long *bitmap;
91 + int bitmap_size;
92 +} VmaRestoreState;
93 +
94 +struct VmaReader {
95 + int fd;
96 + GChecksum *md5csum;
97 + GHashTable *blob_hash;
98 + unsigned char *head_data;
99 + VmaDeviceInfo devinfo[256];
100 + VmaRestoreState rstate[256];
101 + GList *cdata_list;
102 + guint8 vmstate_stream;
103 + uint32_t vmstate_clusters;
104 + /* to show restore percentage if run with -v */
105 + time_t start_time;
106 + int64_t cluster_count;
107 + int64_t clusters_read;
108 + int64_t zero_cluster_data;
109 + int64_t partial_zero_cluster_data;
110 + int clusters_read_per;
111 +};
112 +
113 +static guint
114 +g_int32_hash(gconstpointer v)
115 +{
116 + return *(const uint32_t *)v;
117 +}
118 +
119 +static gboolean
120 +g_int32_equal(gconstpointer v1, gconstpointer v2)
121 +{
122 + return *((const uint32_t *)v1) == *((const uint32_t *)v2);
123 +}
124 +
125 +static int vma_reader_get_bitmap(VmaRestoreState *rstate, int64_t cluster_num)
126 +{
127 + assert(rstate);
128 + assert(rstate->bitmap);
129 +
130 + unsigned long val, idx, bit;
131 +
132 + idx = cluster_num / BITS_PER_LONG;
133 +
134 + assert(rstate->bitmap_size > idx);
135 +
136 + bit = cluster_num % BITS_PER_LONG;
137 + val = rstate->bitmap[idx];
138 +
139 + return !!(val & (1UL << bit));
140 +}
141 +
142 +static void vma_reader_set_bitmap(VmaRestoreState *rstate, int64_t cluster_num,
143 + int dirty)
144 +{
145 + assert(rstate);
146 + assert(rstate->bitmap);
147 +
148 + unsigned long val, idx, bit;
149 +
150 + idx = cluster_num / BITS_PER_LONG;
151 +
152 + assert(rstate->bitmap_size > idx);
153 +
154 + bit = cluster_num % BITS_PER_LONG;
155 + val = rstate->bitmap[idx];
156 + if (dirty) {
157 + if (!(val & (1UL << bit))) {
158 + val |= 1UL << bit;
159 + }
160 + } else {
161 + if (val & (1UL << bit)) {
162 + val &= ~(1UL << bit);
163 + }
164 + }
165 + rstate->bitmap[idx] = val;
166 +}
167 +
168 +typedef struct VmaBlob {
169 + uint32_t start;
170 + uint32_t len;
171 + void *data;
172 +} VmaBlob;
173 +
174 +static const VmaBlob *get_header_blob(VmaReader *vmar, uint32_t pos)
175 +{
176 + assert(vmar);
177 + assert(vmar->blob_hash);
178 +
179 + return g_hash_table_lookup(vmar->blob_hash, &pos);
180 +}
181 +
182 +static const char *get_header_str(VmaReader *vmar, uint32_t pos)
183 +{
184 + const VmaBlob *blob = get_header_blob(vmar, pos);
185 + if (!blob) {
186 + return NULL;
187 + }
188 + const char *res = (char *)blob->data;
189 + if (res[blob->len-1] != '\0') {
190 + return NULL;
191 + }
192 + return res;
193 +}
194 +
195 +static ssize_t
196 +safe_read(int fd, unsigned char *buf, size_t count)
197 +{
198 + ssize_t n;
199 +
200 + do {
201 + n = read(fd, buf, count);
202 + } while (n < 0 && errno == EINTR);
203 +
204 + return n;
205 +}
206 +
207 +static ssize_t
208 +full_read(int fd, unsigned char *buf, size_t len)
209 +{
210 + ssize_t n;
211 + size_t total;
212 +
213 + total = 0;
214 +
215 + while (len > 0) {
216 + n = safe_read(fd, buf, len);
217 +
218 + if (n == 0) {
219 + return total;
220 + }
221 +
222 + if (n <= 0) {
223 + break;
224 + }
225 +
226 + buf += n;
227 + total += n;
228 + len -= n;
229 + }
230 +
231 + if (len) {
232 + return -1;
233 + }
234 +
235 + return total;
236 +}
237 +
238 +void vma_reader_destroy(VmaReader *vmar)
239 +{
240 + assert(vmar);
241 +
242 + if (vmar->fd >= 0) {
243 + close(vmar->fd);
244 + }
245 +
246 + if (vmar->cdata_list) {
247 + g_list_free(vmar->cdata_list);
248 + }
249 +
250 + int i;
251 + for (i = 1; i < 256; i++) {
252 + if (vmar->rstate[i].bitmap) {
253 + g_free(vmar->rstate[i].bitmap);
254 + }
255 + }
256 +
257 + if (vmar->md5csum) {
258 + g_checksum_free(vmar->md5csum);
259 + }
260 +
261 + if (vmar->blob_hash) {
262 + g_hash_table_destroy(vmar->blob_hash);
263 + }
264 +
265 + if (vmar->head_data) {
266 + g_free(vmar->head_data);
267 + }
268 +
269 + g_free(vmar);
270 +
271 +};
272 +
273 +static int vma_reader_read_head(VmaReader *vmar, Error **errp)
274 +{
275 + assert(vmar);
276 + assert(errp);
277 + assert(*errp == NULL);
278 +
279 + unsigned char md5sum[16];
280 + int i;
281 + int ret = 0;
282 +
283 + vmar->head_data = g_malloc(sizeof(VmaHeader));
284 +
285 + if (full_read(vmar->fd, vmar->head_data, sizeof(VmaHeader)) !=
286 + sizeof(VmaHeader)) {
287 + error_setg(errp, "can't read vma header - %s",
288 + errno ? g_strerror(errno) : "got EOF");
289 + return -1;
290 + }
291 +
292 + VmaHeader *h = (VmaHeader *)vmar->head_data;
293 +
294 + if (h->magic != VMA_MAGIC) {
295 + error_setg(errp, "not a vma file - wrong magic number");
296 + return -1;
297 + }
298 +
299 + uint32_t header_size = GUINT32_FROM_BE(h->header_size);
300 + int need = header_size - sizeof(VmaHeader);
301 + if (need <= 0) {
302 + error_setg(errp, "wrong vma header size %d", header_size);
303 + return -1;
304 + }
305 +
306 + vmar->head_data = g_realloc(vmar->head_data, header_size);
307 + h = (VmaHeader *)vmar->head_data;
308 +
309 + if (full_read(vmar->fd, vmar->head_data + sizeof(VmaHeader), need) !=
310 + need) {
311 + error_setg(errp, "can't read vma header data - %s",
312 + errno ? g_strerror(errno) : "got EOF");
313 + return -1;
314 + }
315 +
316 + memcpy(md5sum, h->md5sum, 16);
317 + memset(h->md5sum, 0, 16);
318 +
319 + g_checksum_reset(vmar->md5csum);
320 + g_checksum_update(vmar->md5csum, vmar->head_data, header_size);
321 + gsize csize = 16;
322 + g_checksum_get_digest(vmar->md5csum, (guint8 *)(h->md5sum), &csize);
323 +
324 + if (memcmp(md5sum, h->md5sum, 16) != 0) {
325 + error_setg(errp, "wrong vma header chechsum");
326 + return -1;
327 + }
328 +
329 + /* we can modify header data after checksum verify */
330 + h->header_size = header_size;
331 +
332 + h->version = GUINT32_FROM_BE(h->version);
333 + if (h->version != 1) {
334 + error_setg(errp, "wrong vma version %d", h->version);
335 + return -1;
336 + }
337 +
338 + h->ctime = GUINT64_FROM_BE(h->ctime);
339 + h->blob_buffer_offset = GUINT32_FROM_BE(h->blob_buffer_offset);
340 + h->blob_buffer_size = GUINT32_FROM_BE(h->blob_buffer_size);
341 +
342 + uint32_t bstart = h->blob_buffer_offset + 1;
343 + uint32_t bend = h->blob_buffer_offset + h->blob_buffer_size;
344 +
345 + if (bstart <= sizeof(VmaHeader)) {
346 + error_setg(errp, "wrong vma blob buffer offset %d",
347 + h->blob_buffer_offset);
348 + return -1;
349 + }
350 +
351 + if (bend > header_size) {
352 + error_setg(errp, "wrong vma blob buffer size %d/%d",
353 + h->blob_buffer_offset, h->blob_buffer_size);
354 + return -1;
355 + }
356 +
357 + while ((bstart + 2) <= bend) {
358 + uint32_t size = vmar->head_data[bstart] +
359 + (vmar->head_data[bstart+1] << 8);
360 + if ((bstart + size + 2) <= bend) {
361 + VmaBlob *blob = g_new0(VmaBlob, 1);
362 + blob->start = bstart - h->blob_buffer_offset;
363 + blob->len = size;
364 + blob->data = vmar->head_data + bstart + 2;
365 + g_hash_table_insert(vmar->blob_hash, &blob->start, blob);
366 + }
367 + bstart += size + 2;
368 + }
369 +
370 +
371 + int count = 0;
372 + for (i = 1; i < 256; i++) {
373 + VmaDeviceInfoHeader *dih = &h->dev_info[i];
374 + uint32_t devname_ptr = GUINT32_FROM_BE(dih->devname_ptr);
375 + uint64_t size = GUINT64_FROM_BE(dih->size);
376 + const char *devname = get_header_str(vmar, devname_ptr);
377 +
378 + if (size && devname) {
379 + count++;
380 + vmar->devinfo[i].size = size;
381 + vmar->devinfo[i].devname = devname;
382 +
383 + if (strcmp(devname, "vmstate") == 0) {
384 + vmar->vmstate_stream = i;
385 + }
386 + }
387 + }
388 +
389 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
390 + uint32_t name_ptr = GUINT32_FROM_BE(h->config_names[i]);
391 + uint32_t data_ptr = GUINT32_FROM_BE(h->config_data[i]);
392 +
393 + if (!(name_ptr && data_ptr)) {
394 + continue;
395 + }
396 + const char *name = get_header_str(vmar, name_ptr);
397 + const VmaBlob *blob = get_header_blob(vmar, data_ptr);
398 +
399 + if (!(name && blob)) {
400 + error_setg(errp, "vma contains invalid data pointers");
401 + return -1;
402 + }
403 +
404 + VmaConfigData *cdata = g_new0(VmaConfigData, 1);
405 + cdata->name = name;
406 + cdata->data = blob->data;
407 + cdata->len = blob->len;
408 +
409 + vmar->cdata_list = g_list_append(vmar->cdata_list, cdata);
410 + }
411 +
412 + return ret;
413 +};
414 +
415 +VmaReader *vma_reader_create(const char *filename, Error **errp)
416 +{
417 + assert(filename);
418 + assert(errp);
419 +
420 + VmaReader *vmar = g_new0(VmaReader, 1);
421 +
422 + if (strcmp(filename, "-") == 0) {
423 + vmar->fd = dup(0);
424 + } else {
425 + vmar->fd = open(filename, O_RDONLY);
426 + }
427 +
428 + if (vmar->fd < 0) {
429 + error_setg(errp, "can't open file %s - %s\n", filename,
430 + g_strerror(errno));
431 + goto err;
432 + }
433 +
434 + vmar->md5csum = g_checksum_new(G_CHECKSUM_MD5);
435 + if (!vmar->md5csum) {
436 + error_setg(errp, "can't allocate cmsum\n");
437 + goto err;
438 + }
439 +
440 + vmar->blob_hash = g_hash_table_new_full(g_int32_hash, g_int32_equal,
441 + NULL, g_free);
442 +
443 + if (vma_reader_read_head(vmar, errp) < 0) {
444 + goto err;
445 + }
446 +
447 + return vmar;
448 +
449 +err:
450 + if (vmar) {
451 + vma_reader_destroy(vmar);
452 + }
453 +
454 + return NULL;
455 +}
456 +
457 +VmaHeader *vma_reader_get_header(VmaReader *vmar)
458 +{
459 + assert(vmar);
460 + assert(vmar->head_data);
461 +
462 + return (VmaHeader *)(vmar->head_data);
463 +}
464 +
465 +GList *vma_reader_get_config_data(VmaReader *vmar)
466 +{
467 + assert(vmar);
468 + assert(vmar->head_data);
469 +
470 + return vmar->cdata_list;
471 +}
472 +
473 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id)
474 +{
475 + assert(vmar);
476 + assert(dev_id);
477 +
478 + if (vmar->devinfo[dev_id].size && vmar->devinfo[dev_id].devname) {
479 + return &vmar->devinfo[dev_id];
480 + }
481 +
482 + return NULL;
483 +}
484 +
485 +static void allocate_rstate(VmaReader *vmar, guint8 dev_id,
486 + BlockBackend *target, bool write_zeroes)
487 +{
488 + assert(vmar);
489 + assert(dev_id);
490 +
491 + vmar->rstate[dev_id].target = target;
492 + vmar->rstate[dev_id].write_zeroes = write_zeroes;
493 +
494 + int64_t size = vmar->devinfo[dev_id].size;
495 +
496 + int64_t bitmap_size = (size/BDRV_SECTOR_SIZE) +
497 + (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG - 1;
498 + bitmap_size /= (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG;
499 +
500 + vmar->rstate[dev_id].bitmap_size = bitmap_size;
501 + vmar->rstate[dev_id].bitmap = g_new0(unsigned long, bitmap_size);
502 +
503 + vmar->cluster_count += size/VMA_CLUSTER_SIZE;
504 +}
505 +
506 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target,
507 + bool write_zeroes, Error **errp)
508 +{
509 + assert(vmar);
510 + assert(target != NULL);
511 + assert(dev_id);
512 + assert(vmar->rstate[dev_id].target == NULL);
513 +
514 + int64_t size = blk_getlength(target);
515 + int64_t size_diff = size - vmar->devinfo[dev_id].size;
516 +
517 + /* storage types can have different size restrictions, so it
518 + * is not always possible to create an image with exact size.
519 + * So we tolerate a size difference up to 4MB.
520 + */
521 + if ((size_diff < 0) || (size_diff > 4*1024*1024)) {
522 + error_setg(errp, "vma_reader_register_bs for stream %s failed - "
523 + "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname,
524 + size, vmar->devinfo[dev_id].size);
525 + return -1;
526 + }
527 +
528 + allocate_rstate(vmar, dev_id, target, write_zeroes);
529 +
530 + return 0;
531 +}
532 +
533 +static ssize_t safe_write(int fd, void *buf, size_t count)
534 +{
535 + ssize_t n;
536 +
537 + do {
538 + n = write(fd, buf, count);
539 + } while (n < 0 && errno == EINTR);
540 +
541 + return n;
542 +}
543 +
544 +static size_t full_write(int fd, void *buf, size_t len)
545 +{
546 + ssize_t n;
547 + size_t total;
548 +
549 + total = 0;
550 +
551 + while (len > 0) {
552 + n = safe_write(fd, buf, len);
553 + if (n < 0) {
554 + return n;
555 + }
556 + buf += n;
557 + total += n;
558 + len -= n;
559 + }
560 +
561 + if (len) {
562 + /* incomplete write ? */
563 + return -1;
564 + }
565 +
566 + return total;
567 +}
568 +
569 +static int restore_write_data(VmaReader *vmar, guint8 dev_id,
570 + BlockBackend *target, int vmstate_fd,
571 + unsigned char *buf, int64_t sector_num,
572 + int nb_sectors, Error **errp)
573 +{
574 + assert(vmar);
575 +
576 + if (dev_id == vmar->vmstate_stream) {
577 + if (vmstate_fd >= 0) {
578 + int len = nb_sectors * BDRV_SECTOR_SIZE;
579 + int res = full_write(vmstate_fd, buf, len);
580 + if (res < 0) {
581 + error_setg(errp, "write vmstate failed %d", res);
582 + return -1;
583 + }
584 + }
585 + } else {
586 + int res = blk_pwrite(target, sector_num * BDRV_SECTOR_SIZE, buf, nb_sectors * BDRV_SECTOR_SIZE, 0);
587 + if (res < 0) {
588 + error_setg(errp, "blk_pwrite to %s failed (%d)",
589 + bdrv_get_device_name(blk_bs(target)), res);
590 + return -1;
591 + }
592 + }
593 + return 0;
594 +}
595 +
596 +static int restore_extent(VmaReader *vmar, unsigned char *buf,
597 + int extent_size, int vmstate_fd,
598 + bool verbose, bool verify, Error **errp)
599 +{
600 + assert(vmar);
601 + assert(buf);
602 +
603 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
604 + int start = VMA_EXTENT_HEADER_SIZE;
605 + int i;
606 +
607 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
608 + uint64_t block_info = GUINT64_FROM_BE(ehead->blockinfo[i]);
609 + uint64_t cluster_num = block_info & 0xffffffff;
610 + uint8_t dev_id = (block_info >> 32) & 0xff;
611 + uint16_t mask = block_info >> (32+16);
612 + int64_t max_sector;
613 +
614 + if (!dev_id) {
615 + continue;
616 + }
617 +
618 + VmaRestoreState *rstate = &vmar->rstate[dev_id];
619 + BlockBackend *target = NULL;
620 +
621 + if (dev_id != vmar->vmstate_stream) {
622 + target = rstate->target;
623 + if (!verify && !target) {
624 + error_setg(errp, "got wrong dev id %d", dev_id);
625 + return -1;
626 + }
627 +
628 + if (vma_reader_get_bitmap(rstate, cluster_num)) {
629 + error_setg(errp, "found duplicated cluster %zd for stream %s",
630 + cluster_num, vmar->devinfo[dev_id].devname);
631 + return -1;
632 + }
633 + vma_reader_set_bitmap(rstate, cluster_num, 1);
634 +
635 + max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE;
636 + } else {
637 + max_sector = G_MAXINT64;
638 + if (cluster_num != vmar->vmstate_clusters) {
639 + error_setg(errp, "found out of order vmstate data");
640 + return -1;
641 + }
642 + vmar->vmstate_clusters++;
643 + }
644 +
645 + vmar->clusters_read++;
646 +
647 + if (verbose) {
648 + time_t duration = time(NULL) - vmar->start_time;
649 + int percent = (vmar->clusters_read*100)/vmar->cluster_count;
650 + if (percent != vmar->clusters_read_per) {
651 + printf("progress %d%% (read %zd bytes, duration %zd sec)\n",
652 + percent, vmar->clusters_read*VMA_CLUSTER_SIZE,
653 + duration);
654 + fflush(stdout);
655 + vmar->clusters_read_per = percent;
656 + }
657 + }
658 +
659 + /* try to write whole clusters to speedup restore */
660 + if (mask == 0xffff) {
661 + if ((start + VMA_CLUSTER_SIZE) > extent_size) {
662 + error_setg(errp, "short vma extent - too many blocks");
663 + return -1;
664 + }
665 + int64_t sector_num = (cluster_num * VMA_CLUSTER_SIZE) /
666 + BDRV_SECTOR_SIZE;
667 + int64_t end_sector = sector_num +
668 + VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE;
669 +
670 + if (end_sector > max_sector) {
671 + end_sector = max_sector;
672 + }
673 +
674 + if (end_sector <= sector_num) {
675 + error_setg(errp, "got wrong block address - write beyond end");
676 + return -1;
677 + }
678 +
679 + if (!verify) {
680 + int nb_sectors = end_sector - sector_num;
681 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
682 + buf + start, sector_num, nb_sectors,
683 + errp) < 0) {
684 + return -1;
685 + }
686 + }
687 +
688 + start += VMA_CLUSTER_SIZE;
689 + } else {
690 + int j;
691 + int bit = 1;
692 +
693 + for (j = 0; j < 16; j++) {
694 + int64_t sector_num = (cluster_num*VMA_CLUSTER_SIZE +
695 + j*VMA_BLOCK_SIZE)/BDRV_SECTOR_SIZE;
696 +
697 + int64_t end_sector = sector_num +
698 + VMA_BLOCK_SIZE/BDRV_SECTOR_SIZE;
699 + if (end_sector > max_sector) {
700 + end_sector = max_sector;
701 + }
702 +
703 + if (mask & bit) {
704 + if ((start + VMA_BLOCK_SIZE) > extent_size) {
705 + error_setg(errp, "short vma extent - too many blocks");
706 + return -1;
707 + }
708 +
709 + if (end_sector <= sector_num) {
710 + error_setg(errp, "got wrong block address - "
711 + "write beyond end");
712 + return -1;
713 + }
714 +
715 + if (!verify) {
716 + int nb_sectors = end_sector - sector_num;
717 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
718 + buf + start, sector_num,
719 + nb_sectors, errp) < 0) {
720 + return -1;
721 + }
722 + }
723 +
724 + start += VMA_BLOCK_SIZE;
725 +
726 + } else {
727 +
728 +
729 + if (end_sector > sector_num) {
730 + /* Todo: use bdrv_co_write_zeroes (but that need to
731 + * be run inside coroutine?)
732 + */
733 + int nb_sectors = end_sector - sector_num;
734 + int zero_size = BDRV_SECTOR_SIZE*nb_sectors;
735 + vmar->zero_cluster_data += zero_size;
736 + if (mask != 0) {
737 + vmar->partial_zero_cluster_data += zero_size;
738 + }
739 +
740 + if (rstate->write_zeroes && !verify) {
741 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
742 + zero_vma_block, sector_num,
743 + nb_sectors, errp) < 0) {
744 + return -1;
745 + }
746 + }
747 + }
748 + }
749 +
750 + bit = bit << 1;
751 + }
752 + }
753 + }
754 +
755 + if (start != extent_size) {
756 + error_setg(errp, "vma extent error - missing blocks");
757 + return -1;
758 + }
759 +
760 + return 0;
761 +}
762 +
763 +static int vma_reader_restore_full(VmaReader *vmar, int vmstate_fd,
764 + bool verbose, bool verify,
765 + Error **errp)
766 +{
767 + assert(vmar);
768 + assert(vmar->head_data);
769 +
770 + int ret = 0;
771 + unsigned char buf[VMA_MAX_EXTENT_SIZE];
772 + int buf_pos = 0;
773 + unsigned char md5sum[16];
774 + VmaHeader *h = (VmaHeader *)vmar->head_data;
775 +
776 + vmar->start_time = time(NULL);
777 +
778 + while (1) {
779 + int bytes = full_read(vmar->fd, buf + buf_pos, sizeof(buf) - buf_pos);
780 + if (bytes < 0) {
781 + error_setg(errp, "read failed - %s", g_strerror(errno));
782 + return -1;
783 + }
784 +
785 + buf_pos += bytes;
786 +
787 + if (!buf_pos) {
788 + break; /* EOF */
789 + }
790 +
791 + if (buf_pos < VMA_EXTENT_HEADER_SIZE) {
792 + error_setg(errp, "read short extent (%d bytes)", buf_pos);
793 + return -1;
794 + }
795 +
796 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
797 +
798 + /* extract md5sum */
799 + memcpy(md5sum, ehead->md5sum, sizeof(ehead->md5sum));
800 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
801 +
802 + g_checksum_reset(vmar->md5csum);
803 + g_checksum_update(vmar->md5csum, buf, VMA_EXTENT_HEADER_SIZE);
804 + gsize csize = 16;
805 + g_checksum_get_digest(vmar->md5csum, ehead->md5sum, &csize);
806 +
807 + if (memcmp(md5sum, ehead->md5sum, 16) != 0) {
808 + error_setg(errp, "wrong vma extent header chechsum");
809 + return -1;
810 + }
811 +
812 + if (memcmp(h->uuid, ehead->uuid, sizeof(ehead->uuid)) != 0) {
813 + error_setg(errp, "wrong vma extent uuid");
814 + return -1;
815 + }
816 +
817 + if (ehead->magic != VMA_EXTENT_MAGIC || ehead->reserved1 != 0) {
818 + error_setg(errp, "wrong vma extent header magic");
819 + return -1;
820 + }
821 +
822 + int block_count = GUINT16_FROM_BE(ehead->block_count);
823 + int extent_size = VMA_EXTENT_HEADER_SIZE + block_count*VMA_BLOCK_SIZE;
824 +
825 + if (buf_pos < extent_size) {
826 + error_setg(errp, "short vma extent (%d < %d)", buf_pos,
827 + extent_size);
828 + return -1;
829 + }
830 +
831 + if (restore_extent(vmar, buf, extent_size, vmstate_fd, verbose,
832 + verify, errp) < 0) {
833 + return -1;
834 + }
835 +
836 + if (buf_pos > extent_size) {
837 + memmove(buf, buf + extent_size, buf_pos - extent_size);
838 + buf_pos = buf_pos - extent_size;
839 + } else {
840 + buf_pos = 0;
841 + }
842 + }
843 +
844 + bdrv_drain_all();
845 +
846 + int i;
847 + for (i = 1; i < 256; i++) {
848 + VmaRestoreState *rstate = &vmar->rstate[i];
849 + if (!rstate->target) {
850 + continue;
851 + }
852 +
853 + if (blk_flush(rstate->target) < 0) {
854 + error_setg(errp, "vma blk_flush %s failed",
855 + vmar->devinfo[i].devname);
856 + return -1;
857 + }
858 +
859 + if (vmar->devinfo[i].size &&
860 + (strcmp(vmar->devinfo[i].devname, "vmstate") != 0)) {
861 + assert(rstate->bitmap);
862 +
863 + int64_t cluster_num, end;
864 +
865 + end = (vmar->devinfo[i].size + VMA_CLUSTER_SIZE - 1) /
866 + VMA_CLUSTER_SIZE;
867 +
868 + for (cluster_num = 0; cluster_num < end; cluster_num++) {
869 + if (!vma_reader_get_bitmap(rstate, cluster_num)) {
870 + error_setg(errp, "detected missing cluster %zd "
871 + "for stream %s", cluster_num,
872 + vmar->devinfo[i].devname);
873 + return -1;
874 + }
875 + }
876 + }
877 + }
878 +
879 + if (verbose) {
880 + if (vmar->clusters_read) {
881 + printf("total bytes read %zd, sparse bytes %zd (%.3g%%)\n",
882 + vmar->clusters_read*VMA_CLUSTER_SIZE,
883 + vmar->zero_cluster_data,
884 + (double)(100.0*vmar->zero_cluster_data)/
885 + (vmar->clusters_read*VMA_CLUSTER_SIZE));
886 +
887 + int64_t datasize = vmar->clusters_read*VMA_CLUSTER_SIZE-vmar->zero_cluster_data;
888 + if (datasize) { // this does not make sense for empty files
889 + printf("space reduction due to 4K zero blocks %.3g%%\n",
890 + (double)(100.0*vmar->partial_zero_cluster_data) / datasize);
891 + }
892 + } else {
893 + printf("vma archive contains no image data\n");
894 + }
895 + }
896 + return ret;
897 +}
898 +
899 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
900 + Error **errp)
901 +{
902 + return vma_reader_restore_full(vmar, vmstate_fd, verbose, false, errp);
903 +}
904 +
905 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp)
906 +{
907 + guint8 dev_id;
908 +
909 + for (dev_id = 1; dev_id < 255; dev_id++) {
910 + if (vma_reader_get_device_info(vmar, dev_id)) {
911 + allocate_rstate(vmar, dev_id, NULL, false);
912 + }
913 + }
914 +
915 + return vma_reader_restore_full(vmar, -1, verbose, true, errp);
916 +}
917 +
918 diff --git a/vma-writer.c b/vma-writer.c
919 new file mode 100644
920 index 0000000000..11d8321ffd
921 --- /dev/null
922 +++ b/vma-writer.c
923 @@ -0,0 +1,790 @@
924 +/*
925 + * VMA: Virtual Machine Archive
926 + *
927 + * Copyright (C) 2012 Proxmox Server Solutions
928 + *
929 + * Authors:
930 + * Dietmar Maurer (dietmar@proxmox.com)
931 + *
932 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
933 + * See the COPYING file in the top-level directory.
934 + *
935 + */
936 +
937 +#include "qemu/osdep.h"
938 +#include <glib.h>
939 +#include <uuid/uuid.h>
940 +
941 +#include "vma.h"
942 +#include "block/block.h"
943 +#include "monitor/monitor.h"
944 +#include "qemu/main-loop.h"
945 +#include "qemu/coroutine.h"
946 +#include "qemu/cutils.h"
947 +
948 +#define DEBUG_VMA 0
949 +
950 +#define DPRINTF(fmt, ...)\
951 + do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
952 +
953 +#define WRITE_BUFFERS 5
954 +#define HEADER_CLUSTERS 8
955 +#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
956 +
957 +struct VmaWriter {
958 + int fd;
959 + FILE *cmd;
960 + int status;
961 + char errmsg[8192];
962 + uuid_t uuid;
963 + bool header_written;
964 + bool closed;
965 +
966 + /* we always write extents */
967 + unsigned char *outbuf;
968 + int outbuf_pos; /* in bytes */
969 + int outbuf_count; /* in VMA_BLOCKS */
970 + uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
971 +
972 + unsigned char *headerbuf;
973 +
974 + GChecksum *md5csum;
975 + CoMutex flush_lock;
976 + Coroutine *co_writer;
977 +
978 + /* drive informations */
979 + VmaStreamInfo stream_info[256];
980 + guint stream_count;
981 +
982 + guint8 vmstate_stream;
983 + uint32_t vmstate_clusters;
984 +
985 + /* header blob table */
986 + char *header_blob_table;
987 + uint32_t header_blob_table_size;
988 + uint32_t header_blob_table_pos;
989 +
990 + /* store for config blobs */
991 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
992 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
993 + uint32_t config_count;
994 +};
995 +
996 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...)
997 +{
998 + va_list ap;
999 +
1000 + if (vmaw->status < 0) {
1001 + return;
1002 + }
1003 +
1004 + vmaw->status = -1;
1005 +
1006 + va_start(ap, fmt);
1007 + g_vsnprintf(vmaw->errmsg, sizeof(vmaw->errmsg), fmt, ap);
1008 + va_end(ap);
1009 +
1010 + DPRINTF("vma_writer_set_error: %s\n", vmaw->errmsg);
1011 +}
1012 +
1013 +static uint32_t allocate_header_blob(VmaWriter *vmaw, const char *data,
1014 + size_t len)
1015 +{
1016 + if (len > 65535) {
1017 + return 0;
1018 + }
1019 +
1020 + if (!vmaw->header_blob_table ||
1021 + (vmaw->header_blob_table_size <
1022 + (vmaw->header_blob_table_pos + len + 2))) {
1023 + int newsize = vmaw->header_blob_table_size + ((len + 2 + 511)/512)*512;
1024 +
1025 + vmaw->header_blob_table = g_realloc(vmaw->header_blob_table, newsize);
1026 + memset(vmaw->header_blob_table + vmaw->header_blob_table_size,
1027 + 0, newsize - vmaw->header_blob_table_size);
1028 + vmaw->header_blob_table_size = newsize;
1029 + }
1030 +
1031 + uint32_t cpos = vmaw->header_blob_table_pos;
1032 + vmaw->header_blob_table[cpos] = len & 255;
1033 + vmaw->header_blob_table[cpos+1] = (len >> 8) & 255;
1034 + memcpy(vmaw->header_blob_table + cpos + 2, data, len);
1035 + vmaw->header_blob_table_pos += len + 2;
1036 + return cpos;
1037 +}
1038 +
1039 +static uint32_t allocate_header_string(VmaWriter *vmaw, const char *str)
1040 +{
1041 + assert(vmaw);
1042 +
1043 + size_t len = strlen(str) + 1;
1044 +
1045 + return allocate_header_blob(vmaw, str, len);
1046 +}
1047 +
1048 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
1049 + gsize len)
1050 +{
1051 + assert(vmaw);
1052 + assert(!vmaw->header_written);
1053 + assert(vmaw->config_count < VMA_MAX_CONFIGS);
1054 + assert(name);
1055 + assert(data);
1056 +
1057 + gchar *basename = g_path_get_basename(name);
1058 + uint32_t name_ptr = allocate_header_string(vmaw, basename);
1059 + g_free(basename);
1060 +
1061 + if (!name_ptr) {
1062 + return -1;
1063 + }
1064 +
1065 + uint32_t data_ptr = allocate_header_blob(vmaw, data, len);
1066 + if (!data_ptr) {
1067 + return -1;
1068 + }
1069 +
1070 + vmaw->config_names[vmaw->config_count] = name_ptr;
1071 + vmaw->config_data[vmaw->config_count] = data_ptr;
1072 +
1073 + vmaw->config_count++;
1074 +
1075 + return 0;
1076 +}
1077 +
1078 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
1079 + size_t size)
1080 +{
1081 + assert(vmaw);
1082 + assert(devname);
1083 + assert(!vmaw->status);
1084 +
1085 + if (vmaw->header_written) {
1086 + vma_writer_set_error(vmaw, "vma_writer_register_stream: header "
1087 + "already written");
1088 + return -1;
1089 + }
1090 +
1091 + guint n = vmaw->stream_count + 1;
1092 +
1093 + /* we can have dev_ids form 1 to 255 (0 reserved)
1094 + * 255(-1) reseverd for safety
1095 + */
1096 + if (n > 254) {
1097 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1098 + "too many drives");
1099 + return -1;
1100 + }
1101 +
1102 + if (size <= 0) {
1103 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1104 + "got strange size %zd", size);
1105 + return -1;
1106 + }
1107 +
1108 + DPRINTF("vma_writer_register_stream %s %zu %d\n", devname, size, n);
1109 +
1110 + vmaw->stream_info[n].devname = g_strdup(devname);
1111 + vmaw->stream_info[n].size = size;
1112 +
1113 + vmaw->stream_info[n].cluster_count = (size + VMA_CLUSTER_SIZE - 1) /
1114 + VMA_CLUSTER_SIZE;
1115 +
1116 + vmaw->stream_count = n;
1117 +
1118 + if (strcmp(devname, "vmstate") == 0) {
1119 + vmaw->vmstate_stream = n;
1120 + }
1121 +
1122 + return n;
1123 +}
1124 +
1125 +static void coroutine_fn yield_until_fd_writable(int fd)
1126 +{
1127 + assert(qemu_in_coroutine());
1128 + AioContext *ctx = qemu_get_current_aio_context();
1129 + aio_set_fd_handler(ctx, fd, false, NULL, (IOHandler *)qemu_coroutine_enter,
1130 + NULL, qemu_coroutine_self());
1131 + qemu_coroutine_yield();
1132 + aio_set_fd_handler(ctx, fd, false, NULL, NULL, NULL, NULL);
1133 +}
1134 +
1135 +static ssize_t coroutine_fn
1136 +vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
1137 +{
1138 + DPRINTF("vma_queue_write enter %zd\n", bytes);
1139 +
1140 + assert(vmaw);
1141 + assert(buf);
1142 + assert(bytes <= VMA_MAX_EXTENT_SIZE);
1143 +
1144 + size_t done = 0;
1145 + ssize_t ret;
1146 +
1147 + assert(vmaw->co_writer == NULL);
1148 +
1149 + vmaw->co_writer = qemu_coroutine_self();
1150 +
1151 + while (done < bytes) {
1152 + if (vmaw->status < 0) {
1153 + DPRINTF("vma_queue_write detected canceled backup\n");
1154 + done = -1;
1155 + break;
1156 + }
1157 + yield_until_fd_writable(vmaw->fd);
1158 + ret = write(vmaw->fd, buf + done, bytes - done);
1159 + if (ret > 0) {
1160 + done += ret;
1161 + DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
1162 + } else if (ret < 0) {
1163 + if (errno == EAGAIN || errno == EWOULDBLOCK) {
1164 + /* try again */
1165 + } else {
1166 + vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
1167 + g_strerror(errno));
1168 + done = -1; /* always return failure for partial writes */
1169 + break;
1170 + }
1171 + } else if (ret == 0) {
1172 + /* should not happen - simply try again */
1173 + }
1174 + }
1175 +
1176 + vmaw->co_writer = NULL;
1177 +
1178 + return (done == bytes) ? bytes : -1;
1179 +}
1180 +
1181 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
1182 +{
1183 + const char *p;
1184 +
1185 + assert(sizeof(VmaHeader) == (4096 + 8192));
1186 + assert(G_STRUCT_OFFSET(VmaHeader, config_names) == 2044);
1187 + assert(G_STRUCT_OFFSET(VmaHeader, config_data) == 3068);
1188 + assert(G_STRUCT_OFFSET(VmaHeader, dev_info) == 4096);
1189 + assert(sizeof(VmaExtentHeader) == 512);
1190 +
1191 + VmaWriter *vmaw = g_new0(VmaWriter, 1);
1192 + vmaw->fd = -1;
1193 +
1194 + vmaw->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1195 + if (!vmaw->md5csum) {
1196 + error_setg(errp, "can't allocate cmsum\n");
1197 + goto err;
1198 + }
1199 +
1200 + if (strstart(filename, "exec:", &p)) {
1201 + vmaw->cmd = popen(p, "w");
1202 + if (vmaw->cmd == NULL) {
1203 + error_setg(errp, "can't popen command '%s' - %s\n", p,
1204 + g_strerror(errno));
1205 + goto err;
1206 + }
1207 + vmaw->fd = fileno(vmaw->cmd);
1208 +
1209 + /* try to use O_NONBLOCK */
1210 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1211 +
1212 + } else {
1213 + struct stat st;
1214 + int oflags;
1215 + const char *tmp_id_str;
1216 +
1217 + if ((stat(filename, &st) == 0) && S_ISFIFO(st.st_mode)) {
1218 + oflags = O_NONBLOCK|O_WRONLY;
1219 + vmaw->fd = qemu_open(filename, oflags, errp);
1220 + } else if (strstart(filename, "/dev/fdset/", &tmp_id_str)) {
1221 + oflags = O_NONBLOCK|O_WRONLY;
1222 + vmaw->fd = qemu_open(filename, oflags, errp);
1223 + } else if (strstart(filename, "/dev/fdname/", &tmp_id_str)) {
1224 + vmaw->fd = monitor_get_fd(monitor_cur(), tmp_id_str, errp);
1225 + if (vmaw->fd < 0) {
1226 + goto err;
1227 + }
1228 + /* try to use O_NONBLOCK */
1229 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1230 + } else {
1231 + oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_EXCL;
1232 + vmaw->fd = qemu_create(filename, oflags, 0644, errp);
1233 + }
1234 +
1235 + if (vmaw->fd < 0) {
1236 + error_setg(errp, "can't open file %s - %s\n", filename,
1237 + g_strerror(errno));
1238 + goto err;
1239 + }
1240 + }
1241 +
1242 + /* we use O_DIRECT, so we need to align IO buffers */
1243 +
1244 + vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
1245 + vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
1246 +
1247 + vmaw->outbuf_count = 0;
1248 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
1249 +
1250 + vmaw->header_blob_table_pos = 1; /* start at pos 1 */
1251 +
1252 + qemu_co_mutex_init(&vmaw->flush_lock);
1253 +
1254 + uuid_copy(vmaw->uuid, uuid);
1255 +
1256 + return vmaw;
1257 +
1258 +err:
1259 + if (vmaw) {
1260 + if (vmaw->cmd) {
1261 + pclose(vmaw->cmd);
1262 + } else if (vmaw->fd >= 0) {
1263 + close(vmaw->fd);
1264 + }
1265 +
1266 + if (vmaw->md5csum) {
1267 + g_checksum_free(vmaw->md5csum);
1268 + }
1269 +
1270 + g_free(vmaw);
1271 + }
1272 +
1273 + return NULL;
1274 +}
1275 +
1276 +static int coroutine_fn vma_write_header(VmaWriter *vmaw)
1277 +{
1278 + assert(vmaw);
1279 + unsigned char *buf = vmaw->headerbuf;
1280 + VmaHeader *head = (VmaHeader *)buf;
1281 +
1282 + int i;
1283 +
1284 + DPRINTF("VMA WRITE HEADER\n");
1285 +
1286 + if (vmaw->status < 0) {
1287 + return vmaw->status;
1288 + }
1289 +
1290 + memset(buf, 0, HEADERBUF_SIZE);
1291 +
1292 + head->magic = VMA_MAGIC;
1293 + head->version = GUINT32_TO_BE(1); /* v1 */
1294 + memcpy(head->uuid, vmaw->uuid, 16);
1295 +
1296 + time_t ctime = time(NULL);
1297 + head->ctime = GUINT64_TO_BE(ctime);
1298 +
1299 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1300 + head->config_names[i] = GUINT32_TO_BE(vmaw->config_names[i]);
1301 + head->config_data[i] = GUINT32_TO_BE(vmaw->config_data[i]);
1302 + }
1303 +
1304 + /* 32 bytes per device (12 used currently) = 8192 bytes max */
1305 + for (i = 1; i <= 254; i++) {
1306 + VmaStreamInfo *si = &vmaw->stream_info[i];
1307 + if (si->size) {
1308 + assert(si->devname);
1309 + uint32_t devname_ptr = allocate_header_string(vmaw, si->devname);
1310 + if (!devname_ptr) {
1311 + return -1;
1312 + }
1313 + head->dev_info[i].devname_ptr = GUINT32_TO_BE(devname_ptr);
1314 + head->dev_info[i].size = GUINT64_TO_BE(si->size);
1315 + }
1316 + }
1317 +
1318 + uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
1319 + head->header_size = GUINT32_TO_BE(header_size);
1320 +
1321 + if (header_size > HEADERBUF_SIZE) {
1322 + return -1; /* just to be sure */
1323 + }
1324 +
1325 + uint32_t blob_buffer_offset = sizeof(VmaHeader);
1326 + memcpy(buf + blob_buffer_offset, vmaw->header_blob_table,
1327 + vmaw->header_blob_table_size);
1328 + head->blob_buffer_offset = GUINT32_TO_BE(blob_buffer_offset);
1329 + head->blob_buffer_size = GUINT32_TO_BE(vmaw->header_blob_table_pos);
1330 +
1331 + g_checksum_reset(vmaw->md5csum);
1332 + g_checksum_update(vmaw->md5csum, (const guchar *)buf, header_size);
1333 + gsize csize = 16;
1334 + g_checksum_get_digest(vmaw->md5csum, (guint8 *)(head->md5sum), &csize);
1335 +
1336 + return vma_queue_write(vmaw, buf, header_size);
1337 +}
1338 +
1339 +static int coroutine_fn vma_writer_flush(VmaWriter *vmaw)
1340 +{
1341 + assert(vmaw);
1342 +
1343 + int ret;
1344 + int i;
1345 +
1346 + if (vmaw->status < 0) {
1347 + return vmaw->status;
1348 + }
1349 +
1350 + if (!vmaw->header_written) {
1351 + vmaw->header_written = true;
1352 + ret = vma_write_header(vmaw);
1353 + if (ret < 0) {
1354 + vma_writer_set_error(vmaw, "vma_writer_flush: write header failed");
1355 + return ret;
1356 + }
1357 + }
1358 +
1359 + DPRINTF("VMA WRITE FLUSH %d %d\n", vmaw->outbuf_count, vmaw->outbuf_pos);
1360 +
1361 +
1362 + VmaExtentHeader *ehead = (VmaExtentHeader *)vmaw->outbuf;
1363 +
1364 + ehead->magic = VMA_EXTENT_MAGIC;
1365 + ehead->reserved1 = 0;
1366 +
1367 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1368 + ehead->blockinfo[i] = GUINT64_TO_BE(vmaw->outbuf_block_info[i]);
1369 + }
1370 +
1371 + guint16 block_count = (vmaw->outbuf_pos - VMA_EXTENT_HEADER_SIZE) /
1372 + VMA_BLOCK_SIZE;
1373 +
1374 + ehead->block_count = GUINT16_TO_BE(block_count);
1375 +
1376 + memcpy(ehead->uuid, vmaw->uuid, sizeof(ehead->uuid));
1377 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
1378 +
1379 + g_checksum_reset(vmaw->md5csum);
1380 + g_checksum_update(vmaw->md5csum, vmaw->outbuf, VMA_EXTENT_HEADER_SIZE);
1381 + gsize csize = 16;
1382 + g_checksum_get_digest(vmaw->md5csum, ehead->md5sum, &csize);
1383 +
1384 + int bytes = vmaw->outbuf_pos;
1385 + ret = vma_queue_write(vmaw, vmaw->outbuf, bytes);
1386 + if (ret != bytes) {
1387 + vma_writer_set_error(vmaw, "vma_writer_flush: failed write");
1388 + }
1389 +
1390 + vmaw->outbuf_count = 0;
1391 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
1392 +
1393 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1394 + vmaw->outbuf_block_info[i] = 0;
1395 + }
1396 +
1397 + return vmaw->status;
1398 +}
1399 +
1400 +static int vma_count_open_streams(VmaWriter *vmaw)
1401 +{
1402 + g_assert(vmaw != NULL);
1403 +
1404 + int i;
1405 + int open_drives = 0;
1406 + for (i = 0; i <= 255; i++) {
1407 + if (vmaw->stream_info[i].size && !vmaw->stream_info[i].finished) {
1408 + open_drives++;
1409 + }
1410 + }
1411 +
1412 + return open_drives;
1413 +}
1414 +
1415 +
1416 +/**
1417 + * You need to call this if the vma archive does not contain
1418 + * any data stream.
1419 + */
1420 +int coroutine_fn
1421 +vma_writer_flush_output(VmaWriter *vmaw)
1422 +{
1423 + qemu_co_mutex_lock(&vmaw->flush_lock);
1424 + int ret = vma_writer_flush(vmaw);
1425 + qemu_co_mutex_unlock(&vmaw->flush_lock);
1426 + if (ret < 0) {
1427 + vma_writer_set_error(vmaw, "vma_writer_flush_header failed");
1428 + }
1429 + return ret;
1430 +}
1431 +
1432 +/**
1433 + * all jobs should call this when there is no more data
1434 + * Returns: number of remaining stream (0 ==> finished)
1435 + */
1436 +int coroutine_fn
1437 +vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id)
1438 +{
1439 + g_assert(vmaw != NULL);
1440 +
1441 + DPRINTF("vma_writer_set_status %d\n", dev_id);
1442 + if (!vmaw->stream_info[dev_id].size) {
1443 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
1444 + "no such stream %d", dev_id);
1445 + return -1;
1446 + }
1447 + if (vmaw->stream_info[dev_id].finished) {
1448 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
1449 + "stream already closed %d", dev_id);
1450 + return -1;
1451 + }
1452 +
1453 + vmaw->stream_info[dev_id].finished = true;
1454 +
1455 + int open_drives = vma_count_open_streams(vmaw);
1456 +
1457 + if (open_drives <= 0) {
1458 + DPRINTF("vma_writer_set_status all drives completed\n");
1459 + vma_writer_flush_output(vmaw);
1460 + }
1461 +
1462 + return open_drives;
1463 +}
1464 +
1465 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status)
1466 +{
1467 + int i;
1468 +
1469 + g_assert(vmaw != NULL);
1470 +
1471 + if (status) {
1472 + status->status = vmaw->status;
1473 + g_strlcpy(status->errmsg, vmaw->errmsg, sizeof(status->errmsg));
1474 + for (i = 0; i <= 255; i++) {
1475 + status->stream_info[i] = vmaw->stream_info[i];
1476 + }
1477 +
1478 + uuid_unparse_lower(vmaw->uuid, status->uuid_str);
1479 + }
1480 +
1481 + status->closed = vmaw->closed;
1482 +
1483 + return vmaw->status;
1484 +}
1485 +
1486 +static int vma_writer_get_buffer(VmaWriter *vmaw)
1487 +{
1488 + int ret = 0;
1489 +
1490 + qemu_co_mutex_lock(&vmaw->flush_lock);
1491 +
1492 + /* wait until buffer is available */
1493 + while (vmaw->outbuf_count >= (VMA_BLOCKS_PER_EXTENT - 1)) {
1494 + ret = vma_writer_flush(vmaw);
1495 + if (ret < 0) {
1496 + vma_writer_set_error(vmaw, "vma_writer_get_buffer: flush failed");
1497 + break;
1498 + }
1499 + }
1500 +
1501 + qemu_co_mutex_unlock(&vmaw->flush_lock);
1502 +
1503 + return ret;
1504 +}
1505 +
1506 +
1507 +int64_t coroutine_fn
1508 +vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
1509 + const unsigned char *buf, size_t *zero_bytes)
1510 +{
1511 + g_assert(vmaw != NULL);
1512 + g_assert(zero_bytes != NULL);
1513 +
1514 + *zero_bytes = 0;
1515 +
1516 + if (vmaw->status < 0) {
1517 + return vmaw->status;
1518 + }
1519 +
1520 + if (!dev_id || !vmaw->stream_info[dev_id].size) {
1521 + vma_writer_set_error(vmaw, "vma_writer_write: "
1522 + "no such stream %d", dev_id);
1523 + return -1;
1524 + }
1525 +
1526 + if (vmaw->stream_info[dev_id].finished) {
1527 + vma_writer_set_error(vmaw, "vma_writer_write: "
1528 + "stream already closed %d", dev_id);
1529 + return -1;
1530 + }
1531 +
1532 +
1533 + if (cluster_num >= (((uint64_t)1)<<32)) {
1534 + vma_writer_set_error(vmaw, "vma_writer_write: "
1535 + "cluster number out of range");
1536 + return -1;
1537 + }
1538 +
1539 + if (dev_id == vmaw->vmstate_stream) {
1540 + if (cluster_num != vmaw->vmstate_clusters) {
1541 + vma_writer_set_error(vmaw, "vma_writer_write: "
1542 + "non sequential vmstate write");
1543 + }
1544 + vmaw->vmstate_clusters++;
1545 + } else if (cluster_num >= vmaw->stream_info[dev_id].cluster_count) {
1546 + vma_writer_set_error(vmaw, "vma_writer_write: cluster number too big");
1547 + return -1;
1548 + }
1549 +
1550 + /* wait until buffer is available */
1551 + if (vma_writer_get_buffer(vmaw) < 0) {
1552 + vma_writer_set_error(vmaw, "vma_writer_write: "
1553 + "vma_writer_get_buffer failed");
1554 + return -1;
1555 + }
1556 +
1557 + DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
1558 +
1559 + uint64_t dev_size = vmaw->stream_info[dev_id].size;
1560 + uint16_t mask = 0;
1561 +
1562 + if (buf) {
1563 + int i;
1564 + int bit = 1;
1565 + uint64_t byte_offset = cluster_num * VMA_CLUSTER_SIZE;
1566 + for (i = 0; i < 16; i++) {
1567 + const unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
1568 +
1569 + // Note: If the source is not 64k-aligned, we might reach 4k blocks
1570 + // after the end of the device. Always mark these as zero in the
1571 + // mask, so the restore handles them correctly.
1572 + if (byte_offset < dev_size &&
1573 + !buffer_is_zero(vmablock, VMA_BLOCK_SIZE))
1574 + {
1575 + mask |= bit;
1576 + memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
1577 + VMA_BLOCK_SIZE);
1578 +
1579 + // prevent memory leakage on unaligned last block
1580 + if (byte_offset + VMA_BLOCK_SIZE > dev_size) {
1581 + uint64_t real_data_in_block = dev_size - byte_offset;
1582 + memset(vmaw->outbuf + vmaw->outbuf_pos + real_data_in_block,
1583 + 0, VMA_BLOCK_SIZE - real_data_in_block);
1584 + }
1585 +
1586 + vmaw->outbuf_pos += VMA_BLOCK_SIZE;
1587 + } else {
1588 + DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
1589 + vmaw->stream_info[dev_id].zero_bytes += VMA_BLOCK_SIZE;
1590 + *zero_bytes += VMA_BLOCK_SIZE;
1591 + }
1592 +
1593 + byte_offset += VMA_BLOCK_SIZE;
1594 + bit = bit << 1;
1595 + }
1596 + } else {
1597 + DPRINTF("VMA WRITE %zd ZERO CLUSTER\n", cluster_num);
1598 + vmaw->stream_info[dev_id].zero_bytes += VMA_CLUSTER_SIZE;
1599 + *zero_bytes += VMA_CLUSTER_SIZE;
1600 + }
1601 +
1602 + uint64_t block_info = ((uint64_t)mask) << (32+16);
1603 + block_info |= ((uint64_t)dev_id) << 32;
1604 + block_info |= (cluster_num & 0xffffffff);
1605 + vmaw->outbuf_block_info[vmaw->outbuf_count] = block_info;
1606 +
1607 + DPRINTF("VMA WRITE MASK %zd %zx\n", cluster_num, block_info);
1608 +
1609 + vmaw->outbuf_count++;
1610 +
1611 + /** NOTE: We allways write whole clusters, but we correctly set
1612 + * transferred bytes. So transferred == size when when everything
1613 + * went OK.
1614 + */
1615 + size_t transferred = VMA_CLUSTER_SIZE;
1616 +
1617 + if (dev_id != vmaw->vmstate_stream) {
1618 + uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
1619 + if (last > dev_size) {
1620 + uint64_t diff = last - dev_size;
1621 + if (diff >= VMA_CLUSTER_SIZE) {
1622 + vma_writer_set_error(vmaw, "vma_writer_write: "
1623 + "read after last cluster");
1624 + return -1;
1625 + }
1626 + transferred -= diff;
1627 + }
1628 + }
1629 +
1630 + vmaw->stream_info[dev_id].transferred += transferred;
1631 +
1632 + return transferred;
1633 +}
1634 +
1635 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp)
1636 +{
1637 + if (vmaw->status < 0 && *errp == NULL) {
1638 + error_setg(errp, "%s", vmaw->errmsg);
1639 + }
1640 +}
1641 +
1642 +int vma_writer_close(VmaWriter *vmaw, Error **errp)
1643 +{
1644 + g_assert(vmaw != NULL);
1645 +
1646 + int i;
1647 +
1648 + qemu_co_mutex_lock(&vmaw->flush_lock); // wait for pending writes
1649 +
1650 + assert(vmaw->co_writer == NULL);
1651 +
1652 + if (vmaw->cmd) {
1653 + if (pclose(vmaw->cmd) < 0) {
1654 + vma_writer_set_error(vmaw, "vma_writer_close: "
1655 + "pclose failed - %s", g_strerror(errno));
1656 + }
1657 + } else {
1658 + if (close(vmaw->fd) < 0) {
1659 + vma_writer_set_error(vmaw, "vma_writer_close: "
1660 + "close failed - %s", g_strerror(errno));
1661 + }
1662 + }
1663 +
1664 + for (i = 0; i <= 255; i++) {
1665 + VmaStreamInfo *si = &vmaw->stream_info[i];
1666 + if (si->size) {
1667 + if (!si->finished) {
1668 + vma_writer_set_error(vmaw, "vma_writer_close: "
1669 + "detected open stream '%s'", si->devname);
1670 + } else if ((si->transferred != si->size) &&
1671 + (i != vmaw->vmstate_stream)) {
1672 + vma_writer_set_error(vmaw, "vma_writer_close: "
1673 + "incomplete stream '%s' (%zd != %zd)",
1674 + si->devname, si->transferred, si->size);
1675 + }
1676 + }
1677 + }
1678 +
1679 + for (i = 0; i <= 255; i++) {
1680 + vmaw->stream_info[i].finished = 1; /* mark as closed */
1681 + }
1682 +
1683 + vmaw->closed = 1;
1684 +
1685 + if (vmaw->status < 0 && *errp == NULL) {
1686 + error_setg(errp, "%s", vmaw->errmsg);
1687 + }
1688 +
1689 + qemu_co_mutex_unlock(&vmaw->flush_lock);
1690 +
1691 + return vmaw->status;
1692 +}
1693 +
1694 +void vma_writer_destroy(VmaWriter *vmaw)
1695 +{
1696 + assert(vmaw);
1697 +
1698 + int i;
1699 +
1700 + for (i = 0; i <= 255; i++) {
1701 + if (vmaw->stream_info[i].devname) {
1702 + g_free(vmaw->stream_info[i].devname);
1703 + }
1704 + }
1705 +
1706 + if (vmaw->md5csum) {
1707 + g_checksum_free(vmaw->md5csum);
1708 + }
1709 +
1710 + qemu_vfree(vmaw->headerbuf);
1711 + qemu_vfree(vmaw->outbuf);
1712 + g_free(vmaw);
1713 +}
1714 diff --git a/vma.c b/vma.c
1715 new file mode 100644
1716 index 0000000000..2eea2fc281
1717 --- /dev/null
1718 +++ b/vma.c
1719 @@ -0,0 +1,839 @@
1720 +/*
1721 + * VMA: Virtual Machine Archive
1722 + *
1723 + * Copyright (C) 2012-2013 Proxmox Server Solutions
1724 + *
1725 + * Authors:
1726 + * Dietmar Maurer (dietmar@proxmox.com)
1727 + *
1728 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
1729 + * See the COPYING file in the top-level directory.
1730 + *
1731 + */
1732 +
1733 +#include "qemu/osdep.h"
1734 +#include <glib.h>
1735 +
1736 +#include "vma.h"
1737 +#include "qemu-common.h"
1738 +#include "qemu/module.h"
1739 +#include "qemu/error-report.h"
1740 +#include "qemu/main-loop.h"
1741 +#include "qemu/cutils.h"
1742 +#include "qapi/qmp/qdict.h"
1743 +#include "sysemu/block-backend.h"
1744 +
1745 +static void help(void)
1746 +{
1747 + const char *help_msg =
1748 + "usage: vma command [command options]\n"
1749 + "\n"
1750 + "vma list <filename>\n"
1751 + "vma config <filename> [-c config]\n"
1752 + "vma create <filename> [-c config] pathname ...\n"
1753 + "vma extract <filename> [-r <fifo>] <targetdir>\n"
1754 + "vma verify <filename> [-v]\n"
1755 + ;
1756 +
1757 + printf("%s", help_msg);
1758 + exit(1);
1759 +}
1760 +
1761 +static const char *extract_devname(const char *path, char **devname, int index)
1762 +{
1763 + assert(path);
1764 +
1765 + const char *sep = strchr(path, '=');
1766 +
1767 + if (sep) {
1768 + *devname = g_strndup(path, sep - path);
1769 + path = sep + 1;
1770 + } else {
1771 + if (index >= 0) {
1772 + *devname = g_strdup_printf("disk%d", index);
1773 + } else {
1774 + *devname = NULL;
1775 + }
1776 + }
1777 +
1778 + return path;
1779 +}
1780 +
1781 +static void print_content(VmaReader *vmar)
1782 +{
1783 + assert(vmar);
1784 +
1785 + VmaHeader *head = vma_reader_get_header(vmar);
1786 +
1787 + GList *l = vma_reader_get_config_data(vmar);
1788 + while (l && l->data) {
1789 + VmaConfigData *cdata = (VmaConfigData *)l->data;
1790 + l = g_list_next(l);
1791 + printf("CFG: size: %d name: %s\n", cdata->len, cdata->name);
1792 + }
1793 +
1794 + int i;
1795 + VmaDeviceInfo *di;
1796 + for (i = 1; i < 255; i++) {
1797 + di = vma_reader_get_device_info(vmar, i);
1798 + if (di) {
1799 + if (strcmp(di->devname, "vmstate") == 0) {
1800 + printf("VMSTATE: dev_id=%d memory: %zd\n", i, di->size);
1801 + } else {
1802 + printf("DEV: dev_id=%d size: %zd devname: %s\n",
1803 + i, di->size, di->devname);
1804 + }
1805 + }
1806 + }
1807 + /* ctime is the last entry we print */
1808 + printf("CTIME: %s", ctime(&head->ctime));
1809 + fflush(stdout);
1810 +}
1811 +
1812 +static int list_content(int argc, char **argv)
1813 +{
1814 + int c, ret = 0;
1815 + const char *filename;
1816 +
1817 + for (;;) {
1818 + c = getopt(argc, argv, "h");
1819 + if (c == -1) {
1820 + break;
1821 + }
1822 + switch (c) {
1823 + case '?':
1824 + case 'h':
1825 + help();
1826 + break;
1827 + default:
1828 + g_assert_not_reached();
1829 + }
1830 + }
1831 +
1832 + /* Get the filename */
1833 + if ((optind + 1) != argc) {
1834 + help();
1835 + }
1836 + filename = argv[optind++];
1837 +
1838 + Error *errp = NULL;
1839 + VmaReader *vmar = vma_reader_create(filename, &errp);
1840 +
1841 + if (!vmar) {
1842 + g_error("%s", error_get_pretty(errp));
1843 + }
1844 +
1845 + print_content(vmar);
1846 +
1847 + vma_reader_destroy(vmar);
1848 +
1849 + return ret;
1850 +}
1851 +
1852 +typedef struct RestoreMap {
1853 + char *devname;
1854 + char *path;
1855 + char *format;
1856 + uint64_t throttling_bps;
1857 + char *throttling_group;
1858 + char *cache;
1859 + bool write_zero;
1860 +} RestoreMap;
1861 +
1862 +static bool try_parse_option(char **line, const char *optname, char **out, const char *inbuf) {
1863 + size_t optlen = strlen(optname);
1864 + if (strncmp(*line, optname, optlen) != 0 || (*line)[optlen] != '=') {
1865 + return false;
1866 + }
1867 + if (*out) {
1868 + g_error("read map failed - duplicate value for option '%s'", optname);
1869 + }
1870 + char *value = (*line) + optlen + 1; /* including a '=' */
1871 + char *colon = strchr(value, ':');
1872 + if (!colon) {
1873 + g_error("read map failed - option '%s' not terminated ('%s')",
1874 + optname, inbuf);
1875 + }
1876 + *line = colon+1;
1877 + *out = g_strndup(value, colon - value);
1878 + return true;
1879 +}
1880 +
1881 +static uint64_t verify_u64(const char *text) {
1882 + uint64_t value;
1883 + const char *endptr = NULL;
1884 + if (qemu_strtou64(text, &endptr, 0, &value) != 0 || !endptr || *endptr) {
1885 + g_error("read map failed - not a number: %s", text);
1886 + }
1887 + return value;
1888 +}
1889 +
1890 +static int extract_content(int argc, char **argv)
1891 +{
1892 + int c, ret = 0;
1893 + int verbose = 0;
1894 + const char *filename;
1895 + const char *dirname;
1896 + const char *readmap = NULL;
1897 +
1898 + for (;;) {
1899 + c = getopt(argc, argv, "hvr:");
1900 + if (c == -1) {
1901 + break;
1902 + }
1903 + switch (c) {
1904 + case '?':
1905 + case 'h':
1906 + help();
1907 + break;
1908 + case 'r':
1909 + readmap = optarg;
1910 + break;
1911 + case 'v':
1912 + verbose = 1;
1913 + break;
1914 + default:
1915 + help();
1916 + }
1917 + }
1918 +
1919 + /* Get the filename */
1920 + if ((optind + 2) != argc) {
1921 + help();
1922 + }
1923 + filename = argv[optind++];
1924 + dirname = argv[optind++];
1925 +
1926 + Error *errp = NULL;
1927 + VmaReader *vmar = vma_reader_create(filename, &errp);
1928 +
1929 + if (!vmar) {
1930 + g_error("%s", error_get_pretty(errp));
1931 + }
1932 +
1933 + if (mkdir(dirname, 0777) < 0) {
1934 + g_error("unable to create target directory %s - %s",
1935 + dirname, g_strerror(errno));
1936 + }
1937 +
1938 + GList *l = vma_reader_get_config_data(vmar);
1939 + while (l && l->data) {
1940 + VmaConfigData *cdata = (VmaConfigData *)l->data;
1941 + l = g_list_next(l);
1942 + char *cfgfn = g_strdup_printf("%s/%s", dirname, cdata->name);
1943 + GError *err = NULL;
1944 + if (!g_file_set_contents(cfgfn, (gchar *)cdata->data, cdata->len,
1945 + &err)) {
1946 + g_error("unable to write file: %s", err->message);
1947 + }
1948 + }
1949 +
1950 + GHashTable *devmap = g_hash_table_new(g_str_hash, g_str_equal);
1951 +
1952 + if (readmap) {
1953 + print_content(vmar);
1954 +
1955 + FILE *map = fopen(readmap, "r");
1956 + if (!map) {
1957 + g_error("unable to open fifo %s - %s", readmap, g_strerror(errno));
1958 + }
1959 +
1960 + while (1) {
1961 + char inbuf[8192];
1962 + char *line = fgets(inbuf, sizeof(inbuf), map);
1963 + char *format = NULL;
1964 + char *bps = NULL;
1965 + char *group = NULL;
1966 + char *cache = NULL;
1967 + if (!line || line[0] == '\0' || !strcmp(line, "done\n")) {
1968 + break;
1969 + }
1970 + int len = strlen(line);
1971 + if (line[len - 1] == '\n') {
1972 + line[len - 1] = '\0';
1973 + if (len == 1) {
1974 + break;
1975 + }
1976 + }
1977 +
1978 + while (1) {
1979 + if (!try_parse_option(&line, "format", &format, inbuf) &&
1980 + !try_parse_option(&line, "throttling.bps", &bps, inbuf) &&
1981 + !try_parse_option(&line, "throttling.group", &group, inbuf) &&
1982 + !try_parse_option(&line, "cache", &cache, inbuf))
1983 + {
1984 + break;
1985 + }
1986 + }
1987 +
1988 + uint64_t bps_value = 0;
1989 + if (bps) {
1990 + bps_value = verify_u64(bps);
1991 + g_free(bps);
1992 + }
1993 +
1994 + const char *path;
1995 + bool write_zero;
1996 + if (line[0] == '0' && line[1] == ':') {
1997 + path = line + 2;
1998 + write_zero = false;
1999 + } else if (line[0] == '1' && line[1] == ':') {
2000 + path = line + 2;
2001 + write_zero = true;
2002 + } else {
2003 + g_error("read map failed - parse error ('%s')", inbuf);
2004 + }
2005 +
2006 + char *devname = NULL;
2007 + path = extract_devname(path, &devname, -1);
2008 + if (!devname) {
2009 + g_error("read map failed - no dev name specified ('%s')",
2010 + inbuf);
2011 + }
2012 +
2013 + RestoreMap *map = g_new0(RestoreMap, 1);
2014 + map->devname = g_strdup(devname);
2015 + map->path = g_strdup(path);
2016 + map->format = format;
2017 + map->throttling_bps = bps_value;
2018 + map->throttling_group = group;
2019 + map->cache = cache;
2020 + map->write_zero = write_zero;
2021 +
2022 + g_hash_table_insert(devmap, map->devname, map);
2023 +
2024 + };
2025 + }
2026 +
2027 + int i;
2028 + int vmstate_fd = -1;
2029 + guint8 vmstate_stream = 0;
2030 +
2031 + BlockBackend *blk = NULL;
2032 +
2033 + for (i = 1; i < 255; i++) {
2034 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2035 + if (di && (strcmp(di->devname, "vmstate") == 0)) {
2036 + vmstate_stream = i;
2037 + char *statefn = g_strdup_printf("%s/vmstate.bin", dirname);
2038 + vmstate_fd = open(statefn, O_WRONLY|O_CREAT|O_EXCL, 0644);
2039 + if (vmstate_fd < 0) {
2040 + g_error("create vmstate file '%s' failed - %s", statefn,
2041 + g_strerror(errno));
2042 + }
2043 + g_free(statefn);
2044 + } else if (di) {
2045 + char *devfn = NULL;
2046 + const char *format = NULL;
2047 + uint64_t throttling_bps = 0;
2048 + const char *throttling_group = NULL;
2049 + const char *cache = NULL;
2050 + int flags = BDRV_O_RDWR;
2051 + bool write_zero = true;
2052 +
2053 + if (readmap) {
2054 + RestoreMap *map;
2055 + map = (RestoreMap *)g_hash_table_lookup(devmap, di->devname);
2056 + if (map == NULL) {
2057 + g_error("no device name mapping for %s", di->devname);
2058 + }
2059 + devfn = map->path;
2060 + format = map->format;
2061 + throttling_bps = map->throttling_bps;
2062 + throttling_group = map->throttling_group;
2063 + cache = map->cache;
2064 + write_zero = map->write_zero;
2065 + } else {
2066 + devfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2067 + dirname, di->devname);
2068 + printf("DEVINFO %s %zd\n", devfn, di->size);
2069 +
2070 + bdrv_img_create(devfn, "raw", NULL, NULL, NULL, di->size,
2071 + flags, true, &errp);
2072 + if (errp) {
2073 + g_error("can't create file %s: %s", devfn,
2074 + error_get_pretty(errp));
2075 + }
2076 +
2077 + /* Note: we created an empty file above, so there is no
2078 + * need to write zeroes (so we generate a sparse file)
2079 + */
2080 + write_zero = false;
2081 + }
2082 +
2083 + size_t devlen = strlen(devfn);
2084 + QDict *options = NULL;
2085 + bool writethrough;
2086 + if (format) {
2087 + /* explicit format from commandline */
2088 + options = qdict_new();
2089 + qdict_put_str(options, "driver", format);
2090 + } else if ((devlen > 4 && strcmp(devfn+devlen-4, ".raw") == 0) ||
2091 + strncmp(devfn, "/dev/", 5) == 0)
2092 + {
2093 + /* This part is now deprecated for PVE as well (just as qemu
2094 + * deprecated not specifying an explicit raw format, too.
2095 + */
2096 + /* explicit raw format */
2097 + options = qdict_new();
2098 + qdict_put_str(options, "driver", "raw");
2099 + }
2100 + if (cache && bdrv_parse_cache_mode(cache, &flags, &writethrough)) {
2101 + g_error("invalid cache option: %s\n", cache);
2102 + }
2103 +
2104 + if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) {
2105 + g_error("can't open file %s - %s", devfn,
2106 + error_get_pretty(errp));
2107 + }
2108 +
2109 + if (cache) {
2110 + blk_set_enable_write_cache(blk, !writethrough);
2111 + }
2112 +
2113 + if (throttling_group) {
2114 + blk_io_limits_enable(blk, throttling_group);
2115 + }
2116 +
2117 + if (throttling_bps) {
2118 + if (!throttling_group) {
2119 + blk_io_limits_enable(blk, devfn);
2120 + }
2121 +
2122 + ThrottleConfig cfg;
2123 + throttle_config_init(&cfg);
2124 + cfg.buckets[THROTTLE_BPS_WRITE].avg = throttling_bps;
2125 + Error *err = NULL;
2126 + if (!throttle_is_valid(&cfg, &err)) {
2127 + error_report_err(err);
2128 + g_error("failed to apply throttling");
2129 + }
2130 + blk_set_io_limits(blk, &cfg);
2131 + }
2132 +
2133 + if (vma_reader_register_bs(vmar, i, blk, write_zero, &errp) < 0) {
2134 + g_error("%s", error_get_pretty(errp));
2135 + }
2136 +
2137 + if (!readmap) {
2138 + g_free(devfn);
2139 + }
2140 + }
2141 + }
2142 +
2143 + if (vma_reader_restore(vmar, vmstate_fd, verbose, &errp) < 0) {
2144 + g_error("restore failed - %s", error_get_pretty(errp));
2145 + }
2146 +
2147 + if (!readmap) {
2148 + for (i = 1; i < 255; i++) {
2149 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2150 + if (di && (i != vmstate_stream)) {
2151 + char *tmpfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2152 + dirname, di->devname);
2153 + char *fn = g_strdup_printf("%s/disk-%s.raw",
2154 + dirname, di->devname);
2155 + if (rename(tmpfn, fn) != 0) {
2156 + g_error("rename %s to %s failed - %s",
2157 + tmpfn, fn, g_strerror(errno));
2158 + }
2159 + }
2160 + }
2161 + }
2162 +
2163 + vma_reader_destroy(vmar);
2164 +
2165 + blk_unref(blk);
2166 +
2167 + bdrv_close_all();
2168 +
2169 + return ret;
2170 +}
2171 +
2172 +static int verify_content(int argc, char **argv)
2173 +{
2174 + int c, ret = 0;
2175 + int verbose = 0;
2176 + const char *filename;
2177 +
2178 + for (;;) {
2179 + c = getopt(argc, argv, "hv");
2180 + if (c == -1) {
2181 + break;
2182 + }
2183 + switch (c) {
2184 + case '?':
2185 + case 'h':
2186 + help();
2187 + break;
2188 + case 'v':
2189 + verbose = 1;
2190 + break;
2191 + default:
2192 + help();
2193 + }
2194 + }
2195 +
2196 + /* Get the filename */
2197 + if ((optind + 1) != argc) {
2198 + help();
2199 + }
2200 + filename = argv[optind++];
2201 +
2202 + Error *errp = NULL;
2203 + VmaReader *vmar = vma_reader_create(filename, &errp);
2204 +
2205 + if (!vmar) {
2206 + g_error("%s", error_get_pretty(errp));
2207 + }
2208 +
2209 + if (verbose) {
2210 + print_content(vmar);
2211 + }
2212 +
2213 + if (vma_reader_verify(vmar, verbose, &errp) < 0) {
2214 + g_error("verify failed - %s", error_get_pretty(errp));
2215 + }
2216 +
2217 + vma_reader_destroy(vmar);
2218 +
2219 + bdrv_close_all();
2220 +
2221 + return ret;
2222 +}
2223 +
2224 +typedef struct BackupJob {
2225 + BlockBackend *target;
2226 + int64_t len;
2227 + VmaWriter *vmaw;
2228 + uint8_t dev_id;
2229 +} BackupJob;
2230 +
2231 +#define BACKUP_SECTORS_PER_CLUSTER (VMA_CLUSTER_SIZE / BDRV_SECTOR_SIZE)
2232 +
2233 +static void coroutine_fn backup_run_empty(void *opaque)
2234 +{
2235 + VmaWriter *vmaw = (VmaWriter *)opaque;
2236 +
2237 + vma_writer_flush_output(vmaw);
2238 +
2239 + Error *err = NULL;
2240 + if (vma_writer_close(vmaw, &err) != 0) {
2241 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2242 + }
2243 +}
2244 +
2245 +static void coroutine_fn backup_run(void *opaque)
2246 +{
2247 + BackupJob *job = (BackupJob *)opaque;
2248 + struct iovec iov;
2249 + QEMUIOVector qiov;
2250 +
2251 + int64_t start, end;
2252 + int ret = 0;
2253 +
2254 + unsigned char *buf = blk_blockalign(job->target, VMA_CLUSTER_SIZE);
2255 +
2256 + start = 0;
2257 + end = DIV_ROUND_UP(job->len / BDRV_SECTOR_SIZE,
2258 + BACKUP_SECTORS_PER_CLUSTER);
2259 +
2260 + for (; start < end; start++) {
2261 + iov.iov_base = buf;
2262 + iov.iov_len = VMA_CLUSTER_SIZE;
2263 + qemu_iovec_init_external(&qiov, &iov, 1);
2264 +
2265 + ret = blk_co_preadv(job->target, start * VMA_CLUSTER_SIZE,
2266 + VMA_CLUSTER_SIZE, &qiov, 0);
2267 + if (ret < 0) {
2268 + vma_writer_set_error(job->vmaw, "read error", -1);
2269 + goto out;
2270 + }
2271 +
2272 + size_t zb = 0;
2273 + if (vma_writer_write(job->vmaw, job->dev_id, start, buf, &zb) < 0) {
2274 + vma_writer_set_error(job->vmaw, "backup_dump_cb vma_writer_write failed", -1);
2275 + goto out;
2276 + }
2277 + }
2278 +
2279 +
2280 +out:
2281 + if (vma_writer_close_stream(job->vmaw, job->dev_id) <= 0) {
2282 + Error *err = NULL;
2283 + if (vma_writer_close(job->vmaw, &err) != 0) {
2284 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2285 + }
2286 + }
2287 + qemu_vfree(buf);
2288 +}
2289 +
2290 +static int create_archive(int argc, char **argv)
2291 +{
2292 + int i, c;
2293 + int verbose = 0;
2294 + const char *archivename;
2295 + GList *config_files = NULL;
2296 +
2297 + for (;;) {
2298 + c = getopt(argc, argv, "hvc:");
2299 + if (c == -1) {
2300 + break;
2301 + }
2302 + switch (c) {
2303 + case '?':
2304 + case 'h':
2305 + help();
2306 + break;
2307 + case 'c':
2308 + config_files = g_list_append(config_files, optarg);
2309 + break;
2310 + case 'v':
2311 + verbose = 1;
2312 + break;
2313 + default:
2314 + g_assert_not_reached();
2315 + }
2316 + }
2317 +
2318 +
2319 + /* make sure we an archive name */
2320 + if ((optind + 1) > argc) {
2321 + help();
2322 + }
2323 +
2324 + archivename = argv[optind++];
2325 +
2326 + uuid_t uuid;
2327 + uuid_generate(uuid);
2328 +
2329 + Error *local_err = NULL;
2330 + VmaWriter *vmaw = vma_writer_create(archivename, uuid, &local_err);
2331 +
2332 + if (vmaw == NULL) {
2333 + g_error("%s", error_get_pretty(local_err));
2334 + }
2335 +
2336 + GList *l = config_files;
2337 + while (l && l->data) {
2338 + char *name = l->data;
2339 + char *cdata = NULL;
2340 + gsize clen = 0;
2341 + GError *err = NULL;
2342 + if (!g_file_get_contents(name, &cdata, &clen, &err)) {
2343 + unlink(archivename);
2344 + g_error("Unable to read file: %s", err->message);
2345 + }
2346 +
2347 + if (vma_writer_add_config(vmaw, name, cdata, clen) != 0) {
2348 + unlink(archivename);
2349 + g_error("Unable to append config data %s (len = %zd)",
2350 + name, clen);
2351 + }
2352 + l = g_list_next(l);
2353 + }
2354 +
2355 + int devcount = 0;
2356 + while (optind < argc) {
2357 + const char *path = argv[optind++];
2358 + char *devname = NULL;
2359 + path = extract_devname(path, &devname, devcount++);
2360 +
2361 + Error *errp = NULL;
2362 + BlockBackend *target;
2363 +
2364 + target = blk_new_open(path, NULL, NULL, 0, &errp);
2365 + if (!target) {
2366 + unlink(archivename);
2367 + g_error("bdrv_open '%s' failed - %s", path, error_get_pretty(errp));
2368 + }
2369 + int64_t size = blk_getlength(target);
2370 + int dev_id = vma_writer_register_stream(vmaw, devname, size);
2371 + if (dev_id <= 0) {
2372 + unlink(archivename);
2373 + g_error("vma_writer_register_stream '%s' failed", devname);
2374 + }
2375 +
2376 + BackupJob *job = g_new0(BackupJob, 1);
2377 + job->len = size;
2378 + job->target = target;
2379 + job->vmaw = vmaw;
2380 + job->dev_id = dev_id;
2381 +
2382 + Coroutine *co = qemu_coroutine_create(backup_run, job);
2383 + qemu_coroutine_enter(co);
2384 + }
2385 +
2386 + VmaStatus vmastat;
2387 + int percent = 0;
2388 + int last_percent = -1;
2389 +
2390 + if (devcount) {
2391 + while (1) {
2392 + main_loop_wait(false);
2393 + vma_writer_get_status(vmaw, &vmastat);
2394 +
2395 + if (verbose) {
2396 +
2397 + uint64_t total = 0;
2398 + uint64_t transferred = 0;
2399 + uint64_t zero_bytes = 0;
2400 +
2401 + int i;
2402 + for (i = 0; i < 256; i++) {
2403 + if (vmastat.stream_info[i].size) {
2404 + total += vmastat.stream_info[i].size;
2405 + transferred += vmastat.stream_info[i].transferred;
2406 + zero_bytes += vmastat.stream_info[i].zero_bytes;
2407 + }
2408 + }
2409 + percent = (transferred*100)/total;
2410 + if (percent != last_percent) {
2411 + fprintf(stderr, "progress %d%% %zd/%zd %zd\n", percent,
2412 + transferred, total, zero_bytes);
2413 + fflush(stderr);
2414 +
2415 + last_percent = percent;
2416 + }
2417 + }
2418 +
2419 + if (vmastat.closed) {
2420 + break;
2421 + }
2422 + }
2423 + } else {
2424 + Coroutine *co = qemu_coroutine_create(backup_run_empty, vmaw);
2425 + qemu_coroutine_enter(co);
2426 + while (1) {
2427 + main_loop_wait(false);
2428 + vma_writer_get_status(vmaw, &vmastat);
2429 + if (vmastat.closed) {
2430 + break;
2431 + }
2432 + }
2433 + }
2434 +
2435 + bdrv_drain_all();
2436 +
2437 + vma_writer_get_status(vmaw, &vmastat);
2438 +
2439 + if (verbose) {
2440 + for (i = 0; i < 256; i++) {
2441 + VmaStreamInfo *si = &vmastat.stream_info[i];
2442 + if (si->size) {
2443 + fprintf(stderr, "image %s: size=%zd zeros=%zd saved=%zd\n",
2444 + si->devname, si->size, si->zero_bytes,
2445 + si->size - si->zero_bytes);
2446 + }
2447 + }
2448 + }
2449 +
2450 + if (vmastat.status < 0) {
2451 + unlink(archivename);
2452 + g_error("creating vma archive failed");
2453 + }
2454 +
2455 + vma_writer_destroy(vmaw);
2456 + return 0;
2457 +}
2458 +
2459 +static int dump_config(int argc, char **argv)
2460 +{
2461 + int c, ret = 0;
2462 + const char *filename;
2463 + const char *config_name = "qemu-server.conf";
2464 +
2465 + for (;;) {
2466 + c = getopt(argc, argv, "hc:");
2467 + if (c == -1) {
2468 + break;
2469 + }
2470 + switch (c) {
2471 + case '?':
2472 + case 'h':
2473 + help();
2474 + break;
2475 + case 'c':
2476 + config_name = optarg;
2477 + break;
2478 + default:
2479 + help();
2480 + }
2481 + }
2482 +
2483 + /* Get the filename */
2484 + if ((optind + 1) != argc) {
2485 + help();
2486 + }
2487 + filename = argv[optind++];
2488 +
2489 + Error *errp = NULL;
2490 + VmaReader *vmar = vma_reader_create(filename, &errp);
2491 +
2492 + if (!vmar) {
2493 + g_error("%s", error_get_pretty(errp));
2494 + }
2495 +
2496 + int found = 0;
2497 + GList *l = vma_reader_get_config_data(vmar);
2498 + while (l && l->data) {
2499 + VmaConfigData *cdata = (VmaConfigData *)l->data;
2500 + l = g_list_next(l);
2501 + if (strcmp(cdata->name, config_name) == 0) {
2502 + found = 1;
2503 + fwrite(cdata->data, cdata->len, 1, stdout);
2504 + break;
2505 + }
2506 + }
2507 +
2508 + vma_reader_destroy(vmar);
2509 +
2510 + bdrv_close_all();
2511 +
2512 + if (!found) {
2513 + fprintf(stderr, "unable to find configuration data '%s'\n", config_name);
2514 + return -1;
2515 + }
2516 +
2517 + return ret;
2518 +}
2519 +
2520 +int main(int argc, char **argv)
2521 +{
2522 + const char *cmdname;
2523 + Error *main_loop_err = NULL;
2524 +
2525 + error_init(argv[0]);
2526 + module_call_init(MODULE_INIT_TRACE);
2527 + qemu_init_exec_dir(argv[0]);
2528 +
2529 + if (qemu_init_main_loop(&main_loop_err)) {
2530 + g_error("%s", error_get_pretty(main_loop_err));
2531 + }
2532 +
2533 + bdrv_init();
2534 + module_call_init(MODULE_INIT_QOM);
2535 +
2536 + if (argc < 2) {
2537 + help();
2538 + }
2539 +
2540 + cmdname = argv[1];
2541 + argc--; argv++;
2542 +
2543 +
2544 + if (!strcmp(cmdname, "list")) {
2545 + return list_content(argc, argv);
2546 + } else if (!strcmp(cmdname, "create")) {
2547 + return create_archive(argc, argv);
2548 + } else if (!strcmp(cmdname, "extract")) {
2549 + return extract_content(argc, argv);
2550 + } else if (!strcmp(cmdname, "verify")) {
2551 + return verify_content(argc, argv);
2552 + } else if (!strcmp(cmdname, "config")) {
2553 + return dump_config(argc, argv);
2554 + }
2555 +
2556 + help();
2557 + return 0;
2558 +}
2559 diff --git a/vma.h b/vma.h
2560 new file mode 100644
2561 index 0000000000..c895c97f6d
2562 --- /dev/null
2563 +++ b/vma.h
2564 @@ -0,0 +1,150 @@
2565 +/*
2566 + * VMA: Virtual Machine Archive
2567 + *
2568 + * Copyright (C) Proxmox Server Solutions
2569 + *
2570 + * Authors:
2571 + * Dietmar Maurer (dietmar@proxmox.com)
2572 + *
2573 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
2574 + * See the COPYING file in the top-level directory.
2575 + *
2576 + */
2577 +
2578 +#ifndef BACKUP_VMA_H
2579 +#define BACKUP_VMA_H
2580 +
2581 +#include <uuid/uuid.h>
2582 +#include "qapi/error.h"
2583 +#include "block/block.h"
2584 +
2585 +#define VMA_BLOCK_BITS 12
2586 +#define VMA_BLOCK_SIZE (1<<VMA_BLOCK_BITS)
2587 +#define VMA_CLUSTER_BITS (VMA_BLOCK_BITS+4)
2588 +#define VMA_CLUSTER_SIZE (1<<VMA_CLUSTER_BITS)
2589 +
2590 +#if VMA_CLUSTER_SIZE != 65536
2591 +#error unexpected cluster size
2592 +#endif
2593 +
2594 +#define VMA_EXTENT_HEADER_SIZE 512
2595 +#define VMA_BLOCKS_PER_EXTENT 59
2596 +#define VMA_MAX_CONFIGS 256
2597 +
2598 +#define VMA_MAX_EXTENT_SIZE \
2599 + (VMA_EXTENT_HEADER_SIZE+VMA_CLUSTER_SIZE*VMA_BLOCKS_PER_EXTENT)
2600 +#if VMA_MAX_EXTENT_SIZE != 3867136
2601 +#error unexpected VMA_EXTENT_SIZE
2602 +#endif
2603 +
2604 +/* File Format Definitions */
2605 +
2606 +#define VMA_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|0x00))
2607 +#define VMA_EXTENT_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|'E'))
2608 +
2609 +typedef struct VmaDeviceInfoHeader {
2610 + uint32_t devname_ptr; /* offset into blob_buffer table */
2611 + uint32_t reserved0;
2612 + uint64_t size; /* device size in bytes */
2613 + uint64_t reserved1;
2614 + uint64_t reserved2;
2615 +} VmaDeviceInfoHeader;
2616 +
2617 +typedef struct VmaHeader {
2618 + uint32_t magic;
2619 + uint32_t version;
2620 + unsigned char uuid[16];
2621 + int64_t ctime;
2622 + unsigned char md5sum[16];
2623 +
2624 + uint32_t blob_buffer_offset;
2625 + uint32_t blob_buffer_size;
2626 + uint32_t header_size;
2627 +
2628 + unsigned char reserved[1984];
2629 +
2630 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
2631 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
2632 +
2633 + uint32_t reserved1;
2634 +
2635 + VmaDeviceInfoHeader dev_info[256];
2636 +} VmaHeader;
2637 +
2638 +typedef struct VmaExtentHeader {
2639 + uint32_t magic;
2640 + uint16_t reserved1;
2641 + uint16_t block_count;
2642 + unsigned char uuid[16];
2643 + unsigned char md5sum[16];
2644 + uint64_t blockinfo[VMA_BLOCKS_PER_EXTENT];
2645 +} VmaExtentHeader;
2646 +
2647 +/* functions/definitions to read/write vma files */
2648 +
2649 +typedef struct VmaReader VmaReader;
2650 +
2651 +typedef struct VmaWriter VmaWriter;
2652 +
2653 +typedef struct VmaConfigData {
2654 + const char *name;
2655 + const void *data;
2656 + uint32_t len;
2657 +} VmaConfigData;
2658 +
2659 +typedef struct VmaStreamInfo {
2660 + uint64_t size;
2661 + uint64_t cluster_count;
2662 + uint64_t transferred;
2663 + uint64_t zero_bytes;
2664 + int finished;
2665 + char *devname;
2666 +} VmaStreamInfo;
2667 +
2668 +typedef struct VmaStatus {
2669 + int status;
2670 + bool closed;
2671 + char errmsg[8192];
2672 + char uuid_str[37];
2673 + VmaStreamInfo stream_info[256];
2674 +} VmaStatus;
2675 +
2676 +typedef struct VmaDeviceInfo {
2677 + uint64_t size; /* device size in bytes */
2678 + const char *devname;
2679 +} VmaDeviceInfo;
2680 +
2681 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp);
2682 +int vma_writer_close(VmaWriter *vmaw, Error **errp);
2683 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp);
2684 +void vma_writer_destroy(VmaWriter *vmaw);
2685 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
2686 + size_t len);
2687 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
2688 + size_t size);
2689 +
2690 +int64_t coroutine_fn vma_writer_write(VmaWriter *vmaw, uint8_t dev_id,
2691 + int64_t cluster_num,
2692 + const unsigned char *buf,
2693 + size_t *zero_bytes);
2694 +
2695 +int coroutine_fn vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id);
2696 +int coroutine_fn vma_writer_flush_output(VmaWriter *vmaw);
2697 +
2698 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status);
2699 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...);
2700 +
2701 +
2702 +VmaReader *vma_reader_create(const char *filename, Error **errp);
2703 +void vma_reader_destroy(VmaReader *vmar);
2704 +VmaHeader *vma_reader_get_header(VmaReader *vmar);
2705 +GList *vma_reader_get_config_data(VmaReader *vmar);
2706 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id);
2707 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id,
2708 + BlockBackend *target, bool write_zeroes,
2709 + Error **errp);
2710 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
2711 + Error **errp);
2712 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp);
2713 +
2714 +#endif /* BACKUP_VMA_H */