]> git.proxmox.com Git - pve-qemu.git/blob - debian/patches/pve/0028-PVE-Backup-add-vma-backup-format-code.patch
squash related patches
[pve-qemu.git] / debian / patches / pve / 0028-PVE-Backup-add-vma-backup-format-code.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Dietmar Maurer <dietmar@proxmox.com>
3 Date: Mon, 6 Apr 2020 12:16:57 +0200
4 Subject: [PATCH] PVE-Backup: add vma backup format code
5
6 Notes about partial restoring: skipping a certain drive is done via a
7 map line of the form skip=drive-scsi0. Since in PVE, most archives are
8 compressed and piped to vma for restore, it's not easily possible to
9 skip reads.
10
11 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
12 [FE: improvements during create
13 allow partial restore]
14 Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
15 ---
16 block/meson.build | 2 +
17 meson.build | 5 +
18 vma-reader.c | 867 +++++++++++++++++++++++++++++++++++++++++++++
19 vma-writer.c | 793 +++++++++++++++++++++++++++++++++++++++++
20 vma.c | 878 ++++++++++++++++++++++++++++++++++++++++++++++
21 vma.h | 150 ++++++++
22 6 files changed, 2695 insertions(+)
23 create mode 100644 vma-reader.c
24 create mode 100644 vma-writer.c
25 create mode 100644 vma.c
26 create mode 100644 vma.h
27
28 diff --git a/block/meson.build b/block/meson.build
29 index 253fe49fa2..744b698a82 100644
30 --- a/block/meson.build
31 +++ b/block/meson.build
32 @@ -47,6 +47,8 @@ block_ss.add(files(
33 'zeroinit.c',
34 ), zstd, zlib, gnutls)
35
36 +block_ss.add(files('../vma-writer.c'), libuuid)
37 +
38 softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('blkreplay.c'))
39 softmmu_ss.add(files('block-ram-registrar.c'))
40
41 diff --git a/meson.build b/meson.build
42 index d964e741e7..603cdb97bb 100644
43 --- a/meson.build
44 +++ b/meson.build
45 @@ -1527,6 +1527,8 @@ keyutils = dependency('libkeyutils', required: false,
46
47 has_gettid = cc.has_function('gettid')
48
49 +libuuid = cc.find_library('uuid', required: true)
50 +
51 # libselinux
52 selinux = dependency('libselinux',
53 required: get_option('selinux'),
54 @@ -3646,6 +3648,9 @@ if have_tools
55 dependencies: [blockdev, qemuutil, gnutls, selinux],
56 install: true)
57
58 + vma = executable('vma', files('vma.c', 'vma-reader.c') + genh,
59 + dependencies: [authz, block, crypto, io, qom], install: true)
60 +
61 subdir('storage-daemon')
62 subdir('contrib/rdmacm-mux')
63 subdir('contrib/elf2dmp')
64 diff --git a/vma-reader.c b/vma-reader.c
65 new file mode 100644
66 index 0000000000..81a891c6b1
67 --- /dev/null
68 +++ b/vma-reader.c
69 @@ -0,0 +1,867 @@
70 +/*
71 + * VMA: Virtual Machine Archive
72 + *
73 + * Copyright (C) 2012 Proxmox Server Solutions
74 + *
75 + * Authors:
76 + * Dietmar Maurer (dietmar@proxmox.com)
77 + *
78 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
79 + * See the COPYING file in the top-level directory.
80 + *
81 + */
82 +
83 +#include "qemu/osdep.h"
84 +#include <glib.h>
85 +#include <uuid/uuid.h>
86 +
87 +#include "qemu/timer.h"
88 +#include "qemu/ratelimit.h"
89 +#include "vma.h"
90 +#include "block/block.h"
91 +#include "sysemu/block-backend.h"
92 +
93 +static unsigned char zero_vma_block[VMA_BLOCK_SIZE];
94 +
95 +typedef struct VmaRestoreState {
96 + BlockBackend *target;
97 + bool write_zeroes;
98 + unsigned long *bitmap;
99 + int bitmap_size;
100 + bool skip;
101 +} VmaRestoreState;
102 +
103 +struct VmaReader {
104 + int fd;
105 + GChecksum *md5csum;
106 + GHashTable *blob_hash;
107 + unsigned char *head_data;
108 + VmaDeviceInfo devinfo[256];
109 + VmaRestoreState rstate[256];
110 + GList *cdata_list;
111 + guint8 vmstate_stream;
112 + uint32_t vmstate_clusters;
113 + /* to show restore percentage if run with -v */
114 + time_t start_time;
115 + int64_t cluster_count;
116 + int64_t clusters_read;
117 + int64_t zero_cluster_data;
118 + int64_t partial_zero_cluster_data;
119 + int clusters_read_per;
120 +};
121 +
122 +static guint
123 +g_int32_hash(gconstpointer v)
124 +{
125 + return *(const uint32_t *)v;
126 +}
127 +
128 +static gboolean
129 +g_int32_equal(gconstpointer v1, gconstpointer v2)
130 +{
131 + return *((const uint32_t *)v1) == *((const uint32_t *)v2);
132 +}
133 +
134 +static int vma_reader_get_bitmap(VmaRestoreState *rstate, int64_t cluster_num)
135 +{
136 + assert(rstate);
137 + assert(rstate->bitmap);
138 +
139 + unsigned long val, idx, bit;
140 +
141 + idx = cluster_num / BITS_PER_LONG;
142 +
143 + assert(rstate->bitmap_size > idx);
144 +
145 + bit = cluster_num % BITS_PER_LONG;
146 + val = rstate->bitmap[idx];
147 +
148 + return !!(val & (1UL << bit));
149 +}
150 +
151 +static void vma_reader_set_bitmap(VmaRestoreState *rstate, int64_t cluster_num,
152 + int dirty)
153 +{
154 + assert(rstate);
155 + assert(rstate->bitmap);
156 +
157 + unsigned long val, idx, bit;
158 +
159 + idx = cluster_num / BITS_PER_LONG;
160 +
161 + assert(rstate->bitmap_size > idx);
162 +
163 + bit = cluster_num % BITS_PER_LONG;
164 + val = rstate->bitmap[idx];
165 + if (dirty) {
166 + if (!(val & (1UL << bit))) {
167 + val |= 1UL << bit;
168 + }
169 + } else {
170 + if (val & (1UL << bit)) {
171 + val &= ~(1UL << bit);
172 + }
173 + }
174 + rstate->bitmap[idx] = val;
175 +}
176 +
177 +typedef struct VmaBlob {
178 + uint32_t start;
179 + uint32_t len;
180 + void *data;
181 +} VmaBlob;
182 +
183 +static const VmaBlob *get_header_blob(VmaReader *vmar, uint32_t pos)
184 +{
185 + assert(vmar);
186 + assert(vmar->blob_hash);
187 +
188 + return g_hash_table_lookup(vmar->blob_hash, &pos);
189 +}
190 +
191 +static const char *get_header_str(VmaReader *vmar, uint32_t pos)
192 +{
193 + const VmaBlob *blob = get_header_blob(vmar, pos);
194 + if (!blob) {
195 + return NULL;
196 + }
197 + const char *res = (char *)blob->data;
198 + if (res[blob->len-1] != '\0') {
199 + return NULL;
200 + }
201 + return res;
202 +}
203 +
204 +static ssize_t
205 +safe_read(int fd, unsigned char *buf, size_t count)
206 +{
207 + ssize_t n;
208 +
209 + do {
210 + n = read(fd, buf, count);
211 + } while (n < 0 && errno == EINTR);
212 +
213 + return n;
214 +}
215 +
216 +static ssize_t
217 +full_read(int fd, unsigned char *buf, size_t len)
218 +{
219 + ssize_t n;
220 + size_t total;
221 +
222 + total = 0;
223 +
224 + while (len > 0) {
225 + n = safe_read(fd, buf, len);
226 +
227 + if (n == 0) {
228 + return total;
229 + }
230 +
231 + if (n <= 0) {
232 + break;
233 + }
234 +
235 + buf += n;
236 + total += n;
237 + len -= n;
238 + }
239 +
240 + if (len) {
241 + return -1;
242 + }
243 +
244 + return total;
245 +}
246 +
247 +void vma_reader_destroy(VmaReader *vmar)
248 +{
249 + assert(vmar);
250 +
251 + if (vmar->fd >= 0) {
252 + close(vmar->fd);
253 + }
254 +
255 + if (vmar->cdata_list) {
256 + g_list_free(vmar->cdata_list);
257 + }
258 +
259 + int i;
260 + for (i = 1; i < 256; i++) {
261 + if (vmar->rstate[i].bitmap) {
262 + g_free(vmar->rstate[i].bitmap);
263 + }
264 + if (vmar->rstate[i].target) {
265 + blk_unref(vmar->rstate[i].target);
266 + }
267 + }
268 +
269 + if (vmar->md5csum) {
270 + g_checksum_free(vmar->md5csum);
271 + }
272 +
273 + if (vmar->blob_hash) {
274 + g_hash_table_destroy(vmar->blob_hash);
275 + }
276 +
277 + if (vmar->head_data) {
278 + g_free(vmar->head_data);
279 + }
280 +
281 + g_free(vmar);
282 +
283 +};
284 +
285 +static int vma_reader_read_head(VmaReader *vmar, Error **errp)
286 +{
287 + assert(vmar);
288 + assert(errp);
289 + assert(*errp == NULL);
290 +
291 + unsigned char md5sum[16];
292 + int i;
293 + int ret = 0;
294 +
295 + vmar->head_data = g_malloc(sizeof(VmaHeader));
296 +
297 + if (full_read(vmar->fd, vmar->head_data, sizeof(VmaHeader)) !=
298 + sizeof(VmaHeader)) {
299 + error_setg(errp, "can't read vma header - %s",
300 + errno ? g_strerror(errno) : "got EOF");
301 + return -1;
302 + }
303 +
304 + VmaHeader *h = (VmaHeader *)vmar->head_data;
305 +
306 + if (h->magic != VMA_MAGIC) {
307 + error_setg(errp, "not a vma file - wrong magic number");
308 + return -1;
309 + }
310 +
311 + uint32_t header_size = GUINT32_FROM_BE(h->header_size);
312 + int need = header_size - sizeof(VmaHeader);
313 + if (need <= 0) {
314 + error_setg(errp, "wrong vma header size %d", header_size);
315 + return -1;
316 + }
317 +
318 + vmar->head_data = g_realloc(vmar->head_data, header_size);
319 + h = (VmaHeader *)vmar->head_data;
320 +
321 + if (full_read(vmar->fd, vmar->head_data + sizeof(VmaHeader), need) !=
322 + need) {
323 + error_setg(errp, "can't read vma header data - %s",
324 + errno ? g_strerror(errno) : "got EOF");
325 + return -1;
326 + }
327 +
328 + memcpy(md5sum, h->md5sum, 16);
329 + memset(h->md5sum, 0, 16);
330 +
331 + g_checksum_reset(vmar->md5csum);
332 + g_checksum_update(vmar->md5csum, vmar->head_data, header_size);
333 + gsize csize = 16;
334 + g_checksum_get_digest(vmar->md5csum, (guint8 *)(h->md5sum), &csize);
335 +
336 + if (memcmp(md5sum, h->md5sum, 16) != 0) {
337 + error_setg(errp, "wrong vma header chechsum");
338 + return -1;
339 + }
340 +
341 + /* we can modify header data after checksum verify */
342 + h->header_size = header_size;
343 +
344 + h->version = GUINT32_FROM_BE(h->version);
345 + if (h->version != 1) {
346 + error_setg(errp, "wrong vma version %d", h->version);
347 + return -1;
348 + }
349 +
350 + h->ctime = GUINT64_FROM_BE(h->ctime);
351 + h->blob_buffer_offset = GUINT32_FROM_BE(h->blob_buffer_offset);
352 + h->blob_buffer_size = GUINT32_FROM_BE(h->blob_buffer_size);
353 +
354 + uint32_t bstart = h->blob_buffer_offset + 1;
355 + uint32_t bend = h->blob_buffer_offset + h->blob_buffer_size;
356 +
357 + if (bstart <= sizeof(VmaHeader)) {
358 + error_setg(errp, "wrong vma blob buffer offset %d",
359 + h->blob_buffer_offset);
360 + return -1;
361 + }
362 +
363 + if (bend > header_size) {
364 + error_setg(errp, "wrong vma blob buffer size %d/%d",
365 + h->blob_buffer_offset, h->blob_buffer_size);
366 + return -1;
367 + }
368 +
369 + while ((bstart + 2) <= bend) {
370 + uint32_t size = vmar->head_data[bstart] +
371 + (vmar->head_data[bstart+1] << 8);
372 + if ((bstart + size + 2) <= bend) {
373 + VmaBlob *blob = g_new0(VmaBlob, 1);
374 + blob->start = bstart - h->blob_buffer_offset;
375 + blob->len = size;
376 + blob->data = vmar->head_data + bstart + 2;
377 + g_hash_table_insert(vmar->blob_hash, &blob->start, blob);
378 + }
379 + bstart += size + 2;
380 + }
381 +
382 +
383 + int count = 0;
384 + for (i = 1; i < 256; i++) {
385 + VmaDeviceInfoHeader *dih = &h->dev_info[i];
386 + uint32_t devname_ptr = GUINT32_FROM_BE(dih->devname_ptr);
387 + uint64_t size = GUINT64_FROM_BE(dih->size);
388 + const char *devname = get_header_str(vmar, devname_ptr);
389 +
390 + if (size && devname) {
391 + count++;
392 + vmar->devinfo[i].size = size;
393 + vmar->devinfo[i].devname = devname;
394 +
395 + if (strcmp(devname, "vmstate") == 0) {
396 + vmar->vmstate_stream = i;
397 + }
398 + }
399 + }
400 +
401 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
402 + uint32_t name_ptr = GUINT32_FROM_BE(h->config_names[i]);
403 + uint32_t data_ptr = GUINT32_FROM_BE(h->config_data[i]);
404 +
405 + if (!(name_ptr && data_ptr)) {
406 + continue;
407 + }
408 + const char *name = get_header_str(vmar, name_ptr);
409 + const VmaBlob *blob = get_header_blob(vmar, data_ptr);
410 +
411 + if (!(name && blob)) {
412 + error_setg(errp, "vma contains invalid data pointers");
413 + return -1;
414 + }
415 +
416 + VmaConfigData *cdata = g_new0(VmaConfigData, 1);
417 + cdata->name = name;
418 + cdata->data = blob->data;
419 + cdata->len = blob->len;
420 +
421 + vmar->cdata_list = g_list_append(vmar->cdata_list, cdata);
422 + }
423 +
424 + return ret;
425 +};
426 +
427 +VmaReader *vma_reader_create(const char *filename, Error **errp)
428 +{
429 + assert(filename);
430 + assert(errp);
431 +
432 + VmaReader *vmar = g_new0(VmaReader, 1);
433 +
434 + if (strcmp(filename, "-") == 0) {
435 + vmar->fd = dup(0);
436 + } else {
437 + vmar->fd = open(filename, O_RDONLY);
438 + }
439 +
440 + if (vmar->fd < 0) {
441 + error_setg(errp, "can't open file %s - %s\n", filename,
442 + g_strerror(errno));
443 + goto err;
444 + }
445 +
446 + vmar->md5csum = g_checksum_new(G_CHECKSUM_MD5);
447 + if (!vmar->md5csum) {
448 + error_setg(errp, "can't allocate cmsum\n");
449 + goto err;
450 + }
451 +
452 + vmar->blob_hash = g_hash_table_new_full(g_int32_hash, g_int32_equal,
453 + NULL, g_free);
454 +
455 + if (vma_reader_read_head(vmar, errp) < 0) {
456 + goto err;
457 + }
458 +
459 + return vmar;
460 +
461 +err:
462 + if (vmar) {
463 + vma_reader_destroy(vmar);
464 + }
465 +
466 + return NULL;
467 +}
468 +
469 +VmaHeader *vma_reader_get_header(VmaReader *vmar)
470 +{
471 + assert(vmar);
472 + assert(vmar->head_data);
473 +
474 + return (VmaHeader *)(vmar->head_data);
475 +}
476 +
477 +GList *vma_reader_get_config_data(VmaReader *vmar)
478 +{
479 + assert(vmar);
480 + assert(vmar->head_data);
481 +
482 + return vmar->cdata_list;
483 +}
484 +
485 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id)
486 +{
487 + assert(vmar);
488 + assert(dev_id);
489 +
490 + if (vmar->devinfo[dev_id].size && vmar->devinfo[dev_id].devname) {
491 + return &vmar->devinfo[dev_id];
492 + }
493 +
494 + return NULL;
495 +}
496 +
497 +static void allocate_rstate(VmaReader *vmar, guint8 dev_id,
498 + BlockBackend *target, bool write_zeroes, bool skip)
499 +{
500 + assert(vmar);
501 + assert(dev_id);
502 +
503 + vmar->rstate[dev_id].target = target;
504 + vmar->rstate[dev_id].write_zeroes = write_zeroes;
505 + vmar->rstate[dev_id].skip = skip;
506 +
507 + int64_t size = vmar->devinfo[dev_id].size;
508 +
509 + int64_t bitmap_size = (size/BDRV_SECTOR_SIZE) +
510 + (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG - 1;
511 + bitmap_size /= (VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE) * BITS_PER_LONG;
512 +
513 + vmar->rstate[dev_id].bitmap_size = bitmap_size;
514 + vmar->rstate[dev_id].bitmap = g_new0(unsigned long, bitmap_size);
515 +
516 + vmar->cluster_count += size/VMA_CLUSTER_SIZE;
517 +}
518 +
519 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target,
520 + bool write_zeroes, bool skip, Error **errp)
521 +{
522 + assert(vmar);
523 + assert(target != NULL || skip);
524 + assert(dev_id);
525 + assert(vmar->rstate[dev_id].target == NULL && !vmar->rstate[dev_id].skip);
526 +
527 + if (target != NULL) {
528 + int64_t size = blk_getlength(target);
529 + int64_t size_diff = size - vmar->devinfo[dev_id].size;
530 +
531 + /* storage types can have different size restrictions, so it
532 + * is not always possible to create an image with exact size.
533 + * So we tolerate a size difference up to 4MB.
534 + */
535 + if ((size_diff < 0) || (size_diff > 4*1024*1024)) {
536 + error_setg(errp, "vma_reader_register_bs for stream %s failed - "
537 + "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname,
538 + size, vmar->devinfo[dev_id].size);
539 + return -1;
540 + }
541 + }
542 +
543 + allocate_rstate(vmar, dev_id, target, write_zeroes, skip);
544 +
545 + return 0;
546 +}
547 +
548 +static ssize_t safe_write(int fd, void *buf, size_t count)
549 +{
550 + ssize_t n;
551 +
552 + do {
553 + n = write(fd, buf, count);
554 + } while (n < 0 && errno == EINTR);
555 +
556 + return n;
557 +}
558 +
559 +static size_t full_write(int fd, void *buf, size_t len)
560 +{
561 + ssize_t n;
562 + size_t total;
563 +
564 + total = 0;
565 +
566 + while (len > 0) {
567 + n = safe_write(fd, buf, len);
568 + if (n < 0) {
569 + return n;
570 + }
571 + buf += n;
572 + total += n;
573 + len -= n;
574 + }
575 +
576 + if (len) {
577 + /* incomplete write ? */
578 + return -1;
579 + }
580 +
581 + return total;
582 +}
583 +
584 +static int restore_write_data(VmaReader *vmar, guint8 dev_id,
585 + BlockBackend *target, int vmstate_fd,
586 + unsigned char *buf, int64_t sector_num,
587 + int nb_sectors, Error **errp)
588 +{
589 + assert(vmar);
590 +
591 + if (dev_id == vmar->vmstate_stream) {
592 + if (vmstate_fd >= 0) {
593 + int len = nb_sectors * BDRV_SECTOR_SIZE;
594 + int res = full_write(vmstate_fd, buf, len);
595 + if (res < 0) {
596 + error_setg(errp, "write vmstate failed %d", res);
597 + return -1;
598 + }
599 + }
600 + } else {
601 + int res = blk_pwrite(target, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE, buf, 0);
602 + if (res < 0) {
603 + error_setg(errp, "blk_pwrite to %s failed (%d)",
604 + bdrv_get_device_name(blk_bs(target)), res);
605 + return -1;
606 + }
607 + }
608 + return 0;
609 +}
610 +
611 +static int restore_extent(VmaReader *vmar, unsigned char *buf,
612 + int extent_size, int vmstate_fd,
613 + bool verbose, bool verify, Error **errp)
614 +{
615 + assert(vmar);
616 + assert(buf);
617 +
618 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
619 + int start = VMA_EXTENT_HEADER_SIZE;
620 + int i;
621 +
622 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
623 + uint64_t block_info = GUINT64_FROM_BE(ehead->blockinfo[i]);
624 + uint64_t cluster_num = block_info & 0xffffffff;
625 + uint8_t dev_id = (block_info >> 32) & 0xff;
626 + uint16_t mask = block_info >> (32+16);
627 + int64_t max_sector;
628 +
629 + if (!dev_id) {
630 + continue;
631 + }
632 +
633 + VmaRestoreState *rstate = &vmar->rstate[dev_id];
634 + BlockBackend *target = NULL;
635 +
636 + bool skip = rstate->skip;
637 +
638 + if (dev_id != vmar->vmstate_stream) {
639 + target = rstate->target;
640 + if (!verify && !target && !skip) {
641 + error_setg(errp, "got wrong dev id %d", dev_id);
642 + return -1;
643 + }
644 +
645 + if (!skip) {
646 + if (vma_reader_get_bitmap(rstate, cluster_num)) {
647 + error_setg(errp, "found duplicated cluster %zd for stream %s",
648 + cluster_num, vmar->devinfo[dev_id].devname);
649 + return -1;
650 + }
651 + vma_reader_set_bitmap(rstate, cluster_num, 1);
652 + }
653 +
654 + max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE;
655 + } else {
656 + max_sector = G_MAXINT64;
657 + if (cluster_num != vmar->vmstate_clusters) {
658 + error_setg(errp, "found out of order vmstate data");
659 + return -1;
660 + }
661 + vmar->vmstate_clusters++;
662 + }
663 +
664 + vmar->clusters_read++;
665 +
666 + if (verbose) {
667 + time_t duration = time(NULL) - vmar->start_time;
668 + int percent = (vmar->clusters_read*100)/vmar->cluster_count;
669 + if (percent != vmar->clusters_read_per) {
670 + printf("progress %d%% (read %zd bytes, duration %zd sec)\n",
671 + percent, vmar->clusters_read*VMA_CLUSTER_SIZE,
672 + duration);
673 + fflush(stdout);
674 + vmar->clusters_read_per = percent;
675 + }
676 + }
677 +
678 + /* try to write whole clusters to speedup restore */
679 + if (mask == 0xffff) {
680 + if ((start + VMA_CLUSTER_SIZE) > extent_size) {
681 + error_setg(errp, "short vma extent - too many blocks");
682 + return -1;
683 + }
684 + int64_t sector_num = (cluster_num * VMA_CLUSTER_SIZE) /
685 + BDRV_SECTOR_SIZE;
686 + int64_t end_sector = sector_num +
687 + VMA_CLUSTER_SIZE/BDRV_SECTOR_SIZE;
688 +
689 + if (end_sector > max_sector) {
690 + end_sector = max_sector;
691 + }
692 +
693 + if (end_sector <= sector_num) {
694 + error_setg(errp, "got wrong block address - write beyond end");
695 + return -1;
696 + }
697 +
698 + if (!verify && !skip) {
699 + int nb_sectors = end_sector - sector_num;
700 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
701 + buf + start, sector_num, nb_sectors,
702 + errp) < 0) {
703 + return -1;
704 + }
705 + }
706 +
707 + start += VMA_CLUSTER_SIZE;
708 + } else {
709 + int j;
710 + int bit = 1;
711 +
712 + for (j = 0; j < 16; j++) {
713 + int64_t sector_num = (cluster_num*VMA_CLUSTER_SIZE +
714 + j*VMA_BLOCK_SIZE)/BDRV_SECTOR_SIZE;
715 +
716 + int64_t end_sector = sector_num +
717 + VMA_BLOCK_SIZE/BDRV_SECTOR_SIZE;
718 + if (end_sector > max_sector) {
719 + end_sector = max_sector;
720 + }
721 +
722 + if (mask & bit) {
723 + if ((start + VMA_BLOCK_SIZE) > extent_size) {
724 + error_setg(errp, "short vma extent - too many blocks");
725 + return -1;
726 + }
727 +
728 + if (end_sector <= sector_num) {
729 + error_setg(errp, "got wrong block address - "
730 + "write beyond end");
731 + return -1;
732 + }
733 +
734 + if (!verify && !skip) {
735 + int nb_sectors = end_sector - sector_num;
736 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
737 + buf + start, sector_num,
738 + nb_sectors, errp) < 0) {
739 + return -1;
740 + }
741 + }
742 +
743 + start += VMA_BLOCK_SIZE;
744 +
745 + } else {
746 +
747 +
748 + if (end_sector > sector_num) {
749 + /* Todo: use bdrv_co_write_zeroes (but that need to
750 + * be run inside coroutine?)
751 + */
752 + int nb_sectors = end_sector - sector_num;
753 + int zero_size = BDRV_SECTOR_SIZE*nb_sectors;
754 + vmar->zero_cluster_data += zero_size;
755 + if (mask != 0) {
756 + vmar->partial_zero_cluster_data += zero_size;
757 + }
758 +
759 + if (rstate->write_zeroes && !verify && !skip) {
760 + if (restore_write_data(vmar, dev_id, target, vmstate_fd,
761 + zero_vma_block, sector_num,
762 + nb_sectors, errp) < 0) {
763 + return -1;
764 + }
765 + }
766 + }
767 + }
768 +
769 + bit = bit << 1;
770 + }
771 + }
772 + }
773 +
774 + if (start != extent_size) {
775 + error_setg(errp, "vma extent error - missing blocks");
776 + return -1;
777 + }
778 +
779 + return 0;
780 +}
781 +
782 +static int vma_reader_restore_full(VmaReader *vmar, int vmstate_fd,
783 + bool verbose, bool verify,
784 + Error **errp)
785 +{
786 + assert(vmar);
787 + assert(vmar->head_data);
788 +
789 + int ret = 0;
790 + unsigned char buf[VMA_MAX_EXTENT_SIZE];
791 + int buf_pos = 0;
792 + unsigned char md5sum[16];
793 + VmaHeader *h = (VmaHeader *)vmar->head_data;
794 +
795 + vmar->start_time = time(NULL);
796 +
797 + while (1) {
798 + int bytes = full_read(vmar->fd, buf + buf_pos, sizeof(buf) - buf_pos);
799 + if (bytes < 0) {
800 + error_setg(errp, "read failed - %s", g_strerror(errno));
801 + return -1;
802 + }
803 +
804 + buf_pos += bytes;
805 +
806 + if (!buf_pos) {
807 + break; /* EOF */
808 + }
809 +
810 + if (buf_pos < VMA_EXTENT_HEADER_SIZE) {
811 + error_setg(errp, "read short extent (%d bytes)", buf_pos);
812 + return -1;
813 + }
814 +
815 + VmaExtentHeader *ehead = (VmaExtentHeader *)buf;
816 +
817 + /* extract md5sum */
818 + memcpy(md5sum, ehead->md5sum, sizeof(ehead->md5sum));
819 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
820 +
821 + g_checksum_reset(vmar->md5csum);
822 + g_checksum_update(vmar->md5csum, buf, VMA_EXTENT_HEADER_SIZE);
823 + gsize csize = 16;
824 + g_checksum_get_digest(vmar->md5csum, ehead->md5sum, &csize);
825 +
826 + if (memcmp(md5sum, ehead->md5sum, 16) != 0) {
827 + error_setg(errp, "wrong vma extent header chechsum");
828 + return -1;
829 + }
830 +
831 + if (memcmp(h->uuid, ehead->uuid, sizeof(ehead->uuid)) != 0) {
832 + error_setg(errp, "wrong vma extent uuid");
833 + return -1;
834 + }
835 +
836 + if (ehead->magic != VMA_EXTENT_MAGIC || ehead->reserved1 != 0) {
837 + error_setg(errp, "wrong vma extent header magic");
838 + return -1;
839 + }
840 +
841 + int block_count = GUINT16_FROM_BE(ehead->block_count);
842 + int extent_size = VMA_EXTENT_HEADER_SIZE + block_count*VMA_BLOCK_SIZE;
843 +
844 + if (buf_pos < extent_size) {
845 + error_setg(errp, "short vma extent (%d < %d)", buf_pos,
846 + extent_size);
847 + return -1;
848 + }
849 +
850 + if (restore_extent(vmar, buf, extent_size, vmstate_fd, verbose,
851 + verify, errp) < 0) {
852 + return -1;
853 + }
854 +
855 + if (buf_pos > extent_size) {
856 + memmove(buf, buf + extent_size, buf_pos - extent_size);
857 + buf_pos = buf_pos - extent_size;
858 + } else {
859 + buf_pos = 0;
860 + }
861 + }
862 +
863 + bdrv_drain_all();
864 +
865 + int i;
866 + for (i = 1; i < 256; i++) {
867 + VmaRestoreState *rstate = &vmar->rstate[i];
868 + if (!rstate->target) {
869 + continue;
870 + }
871 +
872 + if (blk_flush(rstate->target) < 0) {
873 + error_setg(errp, "vma blk_flush %s failed",
874 + vmar->devinfo[i].devname);
875 + return -1;
876 + }
877 +
878 + if (vmar->devinfo[i].size &&
879 + (strcmp(vmar->devinfo[i].devname, "vmstate") != 0)) {
880 + assert(rstate->bitmap);
881 +
882 + int64_t cluster_num, end;
883 +
884 + end = (vmar->devinfo[i].size + VMA_CLUSTER_SIZE - 1) /
885 + VMA_CLUSTER_SIZE;
886 +
887 + for (cluster_num = 0; cluster_num < end; cluster_num++) {
888 + if (!vma_reader_get_bitmap(rstate, cluster_num)) {
889 + error_setg(errp, "detected missing cluster %zd "
890 + "for stream %s", cluster_num,
891 + vmar->devinfo[i].devname);
892 + return -1;
893 + }
894 + }
895 + }
896 + }
897 +
898 + if (verbose) {
899 + if (vmar->clusters_read) {
900 + printf("total bytes read %zd, sparse bytes %zd (%.3g%%)\n",
901 + vmar->clusters_read*VMA_CLUSTER_SIZE,
902 + vmar->zero_cluster_data,
903 + (double)(100.0*vmar->zero_cluster_data)/
904 + (vmar->clusters_read*VMA_CLUSTER_SIZE));
905 +
906 + int64_t datasize = vmar->clusters_read*VMA_CLUSTER_SIZE-vmar->zero_cluster_data;
907 + if (datasize) { // this does not make sense for empty files
908 + printf("space reduction due to 4K zero blocks %.3g%%\n",
909 + (double)(100.0*vmar->partial_zero_cluster_data) / datasize);
910 + }
911 + } else {
912 + printf("vma archive contains no image data\n");
913 + }
914 + }
915 + return ret;
916 +}
917 +
918 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
919 + Error **errp)
920 +{
921 + return vma_reader_restore_full(vmar, vmstate_fd, verbose, false, errp);
922 +}
923 +
924 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp)
925 +{
926 + guint8 dev_id;
927 +
928 + for (dev_id = 1; dev_id < 255; dev_id++) {
929 + if (vma_reader_get_device_info(vmar, dev_id)) {
930 + allocate_rstate(vmar, dev_id, NULL, false, false);
931 + }
932 + }
933 +
934 + return vma_reader_restore_full(vmar, -1, verbose, true, errp);
935 +}
936 +
937 diff --git a/vma-writer.c b/vma-writer.c
938 new file mode 100644
939 index 0000000000..ac7da237d0
940 --- /dev/null
941 +++ b/vma-writer.c
942 @@ -0,0 +1,793 @@
943 +/*
944 + * VMA: Virtual Machine Archive
945 + *
946 + * Copyright (C) 2012 Proxmox Server Solutions
947 + *
948 + * Authors:
949 + * Dietmar Maurer (dietmar@proxmox.com)
950 + *
951 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
952 + * See the COPYING file in the top-level directory.
953 + *
954 + */
955 +
956 +#include "qemu/osdep.h"
957 +#include <glib.h>
958 +#include <uuid/uuid.h>
959 +
960 +#include "vma.h"
961 +#include "block/block.h"
962 +#include "monitor/monitor.h"
963 +#include "qemu/main-loop.h"
964 +#include "qemu/coroutine.h"
965 +#include "qemu/cutils.h"
966 +#include "qemu/memalign.h"
967 +
968 +#define DEBUG_VMA 0
969 +
970 +#define DPRINTF(fmt, ...)\
971 + do { if (DEBUG_VMA) { printf("vma: " fmt, ## __VA_ARGS__); } } while (0)
972 +
973 +#define WRITE_BUFFERS 5
974 +#define HEADER_CLUSTERS 8
975 +#define HEADERBUF_SIZE (VMA_CLUSTER_SIZE*HEADER_CLUSTERS)
976 +
977 +struct VmaWriter {
978 + int fd;
979 + FILE *cmd;
980 + int status;
981 + char errmsg[8192];
982 + uuid_t uuid;
983 + bool header_written;
984 + bool closed;
985 +
986 + /* we always write extents */
987 + unsigned char *outbuf;
988 + int outbuf_pos; /* in bytes */
989 + int outbuf_count; /* in VMA_BLOCKS */
990 + uint64_t outbuf_block_info[VMA_BLOCKS_PER_EXTENT];
991 +
992 + unsigned char *headerbuf;
993 +
994 + GChecksum *md5csum;
995 + CoMutex flush_lock;
996 + Coroutine *co_writer;
997 +
998 + /* drive informations */
999 + VmaStreamInfo stream_info[256];
1000 + guint stream_count;
1001 +
1002 + guint8 vmstate_stream;
1003 + uint32_t vmstate_clusters;
1004 +
1005 + /* header blob table */
1006 + char *header_blob_table;
1007 + uint32_t header_blob_table_size;
1008 + uint32_t header_blob_table_pos;
1009 +
1010 + /* store for config blobs */
1011 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1012 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
1013 + uint32_t config_count;
1014 +};
1015 +
1016 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...)
1017 +{
1018 + va_list ap;
1019 +
1020 + if (vmaw->status < 0) {
1021 + return;
1022 + }
1023 +
1024 + vmaw->status = -1;
1025 +
1026 + va_start(ap, fmt);
1027 + g_vsnprintf(vmaw->errmsg, sizeof(vmaw->errmsg), fmt, ap);
1028 + va_end(ap);
1029 +
1030 + DPRINTF("vma_writer_set_error: %s\n", vmaw->errmsg);
1031 +}
1032 +
1033 +static uint32_t allocate_header_blob(VmaWriter *vmaw, const char *data,
1034 + size_t len)
1035 +{
1036 + if (len > 65535) {
1037 + return 0;
1038 + }
1039 +
1040 + if (!vmaw->header_blob_table ||
1041 + (vmaw->header_blob_table_size <
1042 + (vmaw->header_blob_table_pos + len + 2))) {
1043 + int newsize = vmaw->header_blob_table_size + ((len + 2 + 511)/512)*512;
1044 +
1045 + vmaw->header_blob_table = g_realloc(vmaw->header_blob_table, newsize);
1046 + memset(vmaw->header_blob_table + vmaw->header_blob_table_size,
1047 + 0, newsize - vmaw->header_blob_table_size);
1048 + vmaw->header_blob_table_size = newsize;
1049 + }
1050 +
1051 + uint32_t cpos = vmaw->header_blob_table_pos;
1052 + vmaw->header_blob_table[cpos] = len & 255;
1053 + vmaw->header_blob_table[cpos+1] = (len >> 8) & 255;
1054 + memcpy(vmaw->header_blob_table + cpos + 2, data, len);
1055 + vmaw->header_blob_table_pos += len + 2;
1056 + return cpos;
1057 +}
1058 +
1059 +static uint32_t allocate_header_string(VmaWriter *vmaw, const char *str)
1060 +{
1061 + assert(vmaw);
1062 +
1063 + size_t len = strlen(str) + 1;
1064 +
1065 + return allocate_header_blob(vmaw, str, len);
1066 +}
1067 +
1068 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
1069 + gsize len)
1070 +{
1071 + assert(vmaw);
1072 + assert(!vmaw->header_written);
1073 + assert(vmaw->config_count < VMA_MAX_CONFIGS);
1074 + assert(name);
1075 + assert(data);
1076 +
1077 + gchar *basename = g_path_get_basename(name);
1078 + uint32_t name_ptr = allocate_header_string(vmaw, basename);
1079 + g_free(basename);
1080 +
1081 + if (!name_ptr) {
1082 + return -1;
1083 + }
1084 +
1085 + uint32_t data_ptr = allocate_header_blob(vmaw, data, len);
1086 + if (!data_ptr) {
1087 + return -1;
1088 + }
1089 +
1090 + vmaw->config_names[vmaw->config_count] = name_ptr;
1091 + vmaw->config_data[vmaw->config_count] = data_ptr;
1092 +
1093 + vmaw->config_count++;
1094 +
1095 + return 0;
1096 +}
1097 +
1098 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
1099 + size_t size)
1100 +{
1101 + assert(vmaw);
1102 + assert(devname);
1103 + assert(!vmaw->status);
1104 +
1105 + if (vmaw->header_written) {
1106 + vma_writer_set_error(vmaw, "vma_writer_register_stream: header "
1107 + "already written");
1108 + return -1;
1109 + }
1110 +
1111 + guint n = vmaw->stream_count + 1;
1112 +
1113 + /* we can have dev_ids form 1 to 255 (0 reserved)
1114 + * 255(-1) reseverd for safety
1115 + */
1116 + if (n > 254) {
1117 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1118 + "too many drives");
1119 + return -1;
1120 + }
1121 +
1122 + if (size <= 0) {
1123 + vma_writer_set_error(vmaw, "vma_writer_register_stream: "
1124 + "got strange size %zd", size);
1125 + return -1;
1126 + }
1127 +
1128 + DPRINTF("vma_writer_register_stream %s %zu %d\n", devname, size, n);
1129 +
1130 + vmaw->stream_info[n].devname = g_strdup(devname);
1131 + vmaw->stream_info[n].size = size;
1132 +
1133 + vmaw->stream_info[n].cluster_count = (size + VMA_CLUSTER_SIZE - 1) /
1134 + VMA_CLUSTER_SIZE;
1135 +
1136 + vmaw->stream_count = n;
1137 +
1138 + if (strcmp(devname, "vmstate") == 0) {
1139 + vmaw->vmstate_stream = n;
1140 + }
1141 +
1142 + return n;
1143 +}
1144 +
1145 +static void coroutine_fn yield_until_fd_writable(int fd)
1146 +{
1147 + assert(qemu_in_coroutine());
1148 + AioContext *ctx = qemu_get_current_aio_context();
1149 + aio_set_fd_handler(ctx, fd, false, NULL, (IOHandler *)qemu_coroutine_enter,
1150 + NULL, NULL, qemu_coroutine_self());
1151 + qemu_coroutine_yield();
1152 + aio_set_fd_handler(ctx, fd, false, NULL, NULL, NULL, NULL, NULL);
1153 +}
1154 +
1155 +static ssize_t coroutine_fn
1156 +vma_queue_write(VmaWriter *vmaw, const void *buf, size_t bytes)
1157 +{
1158 + DPRINTF("vma_queue_write enter %zd\n", bytes);
1159 +
1160 + assert(vmaw);
1161 + assert(buf);
1162 + assert(bytes <= VMA_MAX_EXTENT_SIZE);
1163 +
1164 + size_t done = 0;
1165 + ssize_t ret;
1166 +
1167 + assert(vmaw->co_writer == NULL);
1168 +
1169 + vmaw->co_writer = qemu_coroutine_self();
1170 +
1171 + while (done < bytes) {
1172 + if (vmaw->status < 0) {
1173 + DPRINTF("vma_queue_write detected canceled backup\n");
1174 + done = -1;
1175 + break;
1176 + }
1177 + yield_until_fd_writable(vmaw->fd);
1178 + ret = write(vmaw->fd, buf + done, bytes - done);
1179 + if (ret > 0) {
1180 + done += ret;
1181 + DPRINTF("vma_queue_write written %zd %zd\n", done, ret);
1182 + } else if (ret < 0) {
1183 + if (errno == EAGAIN || errno == EWOULDBLOCK) {
1184 + /* try again */
1185 + } else {
1186 + vma_writer_set_error(vmaw, "vma_queue_write: write error - %s",
1187 + g_strerror(errno));
1188 + done = -1; /* always return failure for partial writes */
1189 + break;
1190 + }
1191 + } else if (ret == 0) {
1192 + /* should not happen - simply try again */
1193 + }
1194 + }
1195 +
1196 + vmaw->co_writer = NULL;
1197 +
1198 + return (done == bytes) ? bytes : -1;
1199 +}
1200 +
1201 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp)
1202 +{
1203 + const char *p;
1204 +
1205 + assert(sizeof(VmaHeader) == (4096 + 8192));
1206 + assert(G_STRUCT_OFFSET(VmaHeader, config_names) == 2044);
1207 + assert(G_STRUCT_OFFSET(VmaHeader, config_data) == 3068);
1208 + assert(G_STRUCT_OFFSET(VmaHeader, dev_info) == 4096);
1209 + assert(sizeof(VmaExtentHeader) == 512);
1210 +
1211 + VmaWriter *vmaw = g_new0(VmaWriter, 1);
1212 + vmaw->fd = -1;
1213 +
1214 + vmaw->md5csum = g_checksum_new(G_CHECKSUM_MD5);
1215 + if (!vmaw->md5csum) {
1216 + error_setg(errp, "can't allocate cmsum\n");
1217 + goto err;
1218 + }
1219 +
1220 + if (strstart(filename, "exec:", &p)) {
1221 + vmaw->cmd = popen(p, "w");
1222 + if (vmaw->cmd == NULL) {
1223 + error_setg(errp, "can't popen command '%s' - %s\n", p,
1224 + g_strerror(errno));
1225 + goto err;
1226 + }
1227 + vmaw->fd = fileno(vmaw->cmd);
1228 +
1229 + /* try to use O_NONBLOCK */
1230 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1231 +
1232 + } else {
1233 + struct stat st;
1234 + int oflags;
1235 + const char *tmp_id_str;
1236 +
1237 + if ((stat(filename, &st) == 0) && S_ISFIFO(st.st_mode)) {
1238 + oflags = O_NONBLOCK|O_WRONLY;
1239 + vmaw->fd = qemu_open(filename, oflags, errp);
1240 + } else if (strstart(filename, "/dev/fdset/", &tmp_id_str)) {
1241 + oflags = O_NONBLOCK|O_WRONLY;
1242 + vmaw->fd = qemu_open(filename, oflags, errp);
1243 + } else if (strstart(filename, "/dev/fdname/", &tmp_id_str)) {
1244 + vmaw->fd = monitor_get_fd(monitor_cur(), tmp_id_str, errp);
1245 + if (vmaw->fd < 0) {
1246 + goto err;
1247 + }
1248 + /* try to use O_NONBLOCK */
1249 + fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
1250 + } else {
1251 + oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_EXCL;
1252 + vmaw->fd = qemu_create(filename, oflags, 0644, errp);
1253 + }
1254 +
1255 + if (vmaw->fd < 0) {
1256 + error_free(*errp);
1257 + *errp = NULL;
1258 + error_setg(errp, "can't open file %s - %s\n", filename,
1259 + g_strerror(errno));
1260 + goto err;
1261 + }
1262 + }
1263 +
1264 + /* we use O_DIRECT, so we need to align IO buffers */
1265 +
1266 + vmaw->outbuf = qemu_memalign(512, VMA_MAX_EXTENT_SIZE);
1267 + vmaw->headerbuf = qemu_memalign(512, HEADERBUF_SIZE);
1268 +
1269 + vmaw->outbuf_count = 0;
1270 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
1271 +
1272 + vmaw->header_blob_table_pos = 1; /* start at pos 1 */
1273 +
1274 + qemu_co_mutex_init(&vmaw->flush_lock);
1275 +
1276 + uuid_copy(vmaw->uuid, uuid);
1277 +
1278 + return vmaw;
1279 +
1280 +err:
1281 + if (vmaw) {
1282 + if (vmaw->cmd) {
1283 + pclose(vmaw->cmd);
1284 + } else if (vmaw->fd >= 0) {
1285 + close(vmaw->fd);
1286 + }
1287 +
1288 + if (vmaw->md5csum) {
1289 + g_checksum_free(vmaw->md5csum);
1290 + }
1291 +
1292 + g_free(vmaw);
1293 + }
1294 +
1295 + return NULL;
1296 +}
1297 +
1298 +static int coroutine_fn vma_write_header(VmaWriter *vmaw)
1299 +{
1300 + assert(vmaw);
1301 + unsigned char *buf = vmaw->headerbuf;
1302 + VmaHeader *head = (VmaHeader *)buf;
1303 +
1304 + int i;
1305 +
1306 + DPRINTF("VMA WRITE HEADER\n");
1307 +
1308 + if (vmaw->status < 0) {
1309 + return vmaw->status;
1310 + }
1311 +
1312 + memset(buf, 0, HEADERBUF_SIZE);
1313 +
1314 + head->magic = VMA_MAGIC;
1315 + head->version = GUINT32_TO_BE(1); /* v1 */
1316 + memcpy(head->uuid, vmaw->uuid, 16);
1317 +
1318 + time_t ctime = time(NULL);
1319 + head->ctime = GUINT64_TO_BE(ctime);
1320 +
1321 + for (i = 0; i < VMA_MAX_CONFIGS; i++) {
1322 + head->config_names[i] = GUINT32_TO_BE(vmaw->config_names[i]);
1323 + head->config_data[i] = GUINT32_TO_BE(vmaw->config_data[i]);
1324 + }
1325 +
1326 + /* 32 bytes per device (12 used currently) = 8192 bytes max */
1327 + for (i = 1; i <= 254; i++) {
1328 + VmaStreamInfo *si = &vmaw->stream_info[i];
1329 + if (si->size) {
1330 + assert(si->devname);
1331 + uint32_t devname_ptr = allocate_header_string(vmaw, si->devname);
1332 + if (!devname_ptr) {
1333 + return -1;
1334 + }
1335 + head->dev_info[i].devname_ptr = GUINT32_TO_BE(devname_ptr);
1336 + head->dev_info[i].size = GUINT64_TO_BE(si->size);
1337 + }
1338 + }
1339 +
1340 + uint32_t header_size = sizeof(VmaHeader) + vmaw->header_blob_table_size;
1341 + head->header_size = GUINT32_TO_BE(header_size);
1342 +
1343 + if (header_size > HEADERBUF_SIZE) {
1344 + return -1; /* just to be sure */
1345 + }
1346 +
1347 + uint32_t blob_buffer_offset = sizeof(VmaHeader);
1348 + memcpy(buf + blob_buffer_offset, vmaw->header_blob_table,
1349 + vmaw->header_blob_table_size);
1350 + head->blob_buffer_offset = GUINT32_TO_BE(blob_buffer_offset);
1351 + head->blob_buffer_size = GUINT32_TO_BE(vmaw->header_blob_table_pos);
1352 +
1353 + g_checksum_reset(vmaw->md5csum);
1354 + g_checksum_update(vmaw->md5csum, (const guchar *)buf, header_size);
1355 + gsize csize = 16;
1356 + g_checksum_get_digest(vmaw->md5csum, (guint8 *)(head->md5sum), &csize);
1357 +
1358 + return vma_queue_write(vmaw, buf, header_size);
1359 +}
1360 +
1361 +static int coroutine_fn vma_writer_flush(VmaWriter *vmaw)
1362 +{
1363 + assert(vmaw);
1364 +
1365 + int ret;
1366 + int i;
1367 +
1368 + if (vmaw->status < 0) {
1369 + return vmaw->status;
1370 + }
1371 +
1372 + if (!vmaw->header_written) {
1373 + vmaw->header_written = true;
1374 + ret = vma_write_header(vmaw);
1375 + if (ret < 0) {
1376 + vma_writer_set_error(vmaw, "vma_writer_flush: write header failed");
1377 + return ret;
1378 + }
1379 + }
1380 +
1381 + DPRINTF("VMA WRITE FLUSH %d %d\n", vmaw->outbuf_count, vmaw->outbuf_pos);
1382 +
1383 +
1384 + VmaExtentHeader *ehead = (VmaExtentHeader *)vmaw->outbuf;
1385 +
1386 + ehead->magic = VMA_EXTENT_MAGIC;
1387 + ehead->reserved1 = 0;
1388 +
1389 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1390 + ehead->blockinfo[i] = GUINT64_TO_BE(vmaw->outbuf_block_info[i]);
1391 + }
1392 +
1393 + guint16 block_count = (vmaw->outbuf_pos - VMA_EXTENT_HEADER_SIZE) /
1394 + VMA_BLOCK_SIZE;
1395 +
1396 + ehead->block_count = GUINT16_TO_BE(block_count);
1397 +
1398 + memcpy(ehead->uuid, vmaw->uuid, sizeof(ehead->uuid));
1399 + memset(ehead->md5sum, 0, sizeof(ehead->md5sum));
1400 +
1401 + g_checksum_reset(vmaw->md5csum);
1402 + g_checksum_update(vmaw->md5csum, vmaw->outbuf, VMA_EXTENT_HEADER_SIZE);
1403 + gsize csize = 16;
1404 + g_checksum_get_digest(vmaw->md5csum, ehead->md5sum, &csize);
1405 +
1406 + int bytes = vmaw->outbuf_pos;
1407 + ret = vma_queue_write(vmaw, vmaw->outbuf, bytes);
1408 + if (ret != bytes) {
1409 + vma_writer_set_error(vmaw, "vma_writer_flush: failed write");
1410 + }
1411 +
1412 + vmaw->outbuf_count = 0;
1413 + vmaw->outbuf_pos = VMA_EXTENT_HEADER_SIZE;
1414 +
1415 + for (i = 0; i < VMA_BLOCKS_PER_EXTENT; i++) {
1416 + vmaw->outbuf_block_info[i] = 0;
1417 + }
1418 +
1419 + return vmaw->status;
1420 +}
1421 +
1422 +static int vma_count_open_streams(VmaWriter *vmaw)
1423 +{
1424 + g_assert(vmaw != NULL);
1425 +
1426 + int i;
1427 + int open_drives = 0;
1428 + for (i = 0; i <= 255; i++) {
1429 + if (vmaw->stream_info[i].size && !vmaw->stream_info[i].finished) {
1430 + open_drives++;
1431 + }
1432 + }
1433 +
1434 + return open_drives;
1435 +}
1436 +
1437 +
1438 +/**
1439 + * You need to call this if the vma archive does not contain
1440 + * any data stream.
1441 + */
1442 +int coroutine_fn
1443 +vma_writer_flush_output(VmaWriter *vmaw)
1444 +{
1445 + qemu_co_mutex_lock(&vmaw->flush_lock);
1446 + int ret = vma_writer_flush(vmaw);
1447 + qemu_co_mutex_unlock(&vmaw->flush_lock);
1448 + if (ret < 0) {
1449 + vma_writer_set_error(vmaw, "vma_writer_flush_header failed");
1450 + }
1451 + return ret;
1452 +}
1453 +
1454 +/**
1455 + * all jobs should call this when there is no more data
1456 + * Returns: number of remaining stream (0 ==> finished)
1457 + */
1458 +int coroutine_fn
1459 +vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id)
1460 +{
1461 + g_assert(vmaw != NULL);
1462 +
1463 + DPRINTF("vma_writer_set_status %d\n", dev_id);
1464 + if (!vmaw->stream_info[dev_id].size) {
1465 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
1466 + "no such stream %d", dev_id);
1467 + return -1;
1468 + }
1469 + if (vmaw->stream_info[dev_id].finished) {
1470 + vma_writer_set_error(vmaw, "vma_writer_close_stream: "
1471 + "stream already closed %d", dev_id);
1472 + return -1;
1473 + }
1474 +
1475 + vmaw->stream_info[dev_id].finished = true;
1476 +
1477 + int open_drives = vma_count_open_streams(vmaw);
1478 +
1479 + if (open_drives <= 0) {
1480 + DPRINTF("vma_writer_set_status all drives completed\n");
1481 + vma_writer_flush_output(vmaw);
1482 + }
1483 +
1484 + return open_drives;
1485 +}
1486 +
1487 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status)
1488 +{
1489 + int i;
1490 +
1491 + g_assert(vmaw != NULL);
1492 +
1493 + if (status) {
1494 + status->status = vmaw->status;
1495 + g_strlcpy(status->errmsg, vmaw->errmsg, sizeof(status->errmsg));
1496 + for (i = 0; i <= 255; i++) {
1497 + status->stream_info[i] = vmaw->stream_info[i];
1498 + }
1499 +
1500 + uuid_unparse_lower(vmaw->uuid, status->uuid_str);
1501 + }
1502 +
1503 + status->closed = vmaw->closed;
1504 +
1505 + return vmaw->status;
1506 +}
1507 +
1508 +static int vma_writer_get_buffer(VmaWriter *vmaw)
1509 +{
1510 + int ret = 0;
1511 +
1512 + qemu_co_mutex_lock(&vmaw->flush_lock);
1513 +
1514 + /* wait until buffer is available */
1515 + while (vmaw->outbuf_count >= (VMA_BLOCKS_PER_EXTENT - 1)) {
1516 + ret = vma_writer_flush(vmaw);
1517 + if (ret < 0) {
1518 + vma_writer_set_error(vmaw, "vma_writer_get_buffer: flush failed");
1519 + break;
1520 + }
1521 + }
1522 +
1523 + qemu_co_mutex_unlock(&vmaw->flush_lock);
1524 +
1525 + return ret;
1526 +}
1527 +
1528 +
1529 +int64_t coroutine_fn
1530 +vma_writer_write(VmaWriter *vmaw, uint8_t dev_id, int64_t cluster_num,
1531 + const unsigned char *buf, size_t *zero_bytes)
1532 +{
1533 + g_assert(vmaw != NULL);
1534 + g_assert(zero_bytes != NULL);
1535 +
1536 + *zero_bytes = 0;
1537 +
1538 + if (vmaw->status < 0) {
1539 + return vmaw->status;
1540 + }
1541 +
1542 + if (!dev_id || !vmaw->stream_info[dev_id].size) {
1543 + vma_writer_set_error(vmaw, "vma_writer_write: "
1544 + "no such stream %d", dev_id);
1545 + return -1;
1546 + }
1547 +
1548 + if (vmaw->stream_info[dev_id].finished) {
1549 + vma_writer_set_error(vmaw, "vma_writer_write: "
1550 + "stream already closed %d", dev_id);
1551 + return -1;
1552 + }
1553 +
1554 +
1555 + if (cluster_num >= (((uint64_t)1)<<32)) {
1556 + vma_writer_set_error(vmaw, "vma_writer_write: "
1557 + "cluster number out of range");
1558 + return -1;
1559 + }
1560 +
1561 + if (dev_id == vmaw->vmstate_stream) {
1562 + if (cluster_num != vmaw->vmstate_clusters) {
1563 + vma_writer_set_error(vmaw, "vma_writer_write: "
1564 + "non sequential vmstate write");
1565 + }
1566 + vmaw->vmstate_clusters++;
1567 + } else if (cluster_num >= vmaw->stream_info[dev_id].cluster_count) {
1568 + vma_writer_set_error(vmaw, "vma_writer_write: cluster number too big");
1569 + return -1;
1570 + }
1571 +
1572 + /* wait until buffer is available */
1573 + if (vma_writer_get_buffer(vmaw) < 0) {
1574 + vma_writer_set_error(vmaw, "vma_writer_write: "
1575 + "vma_writer_get_buffer failed");
1576 + return -1;
1577 + }
1578 +
1579 + DPRINTF("VMA WRITE %d %zd\n", dev_id, cluster_num);
1580 +
1581 + uint64_t dev_size = vmaw->stream_info[dev_id].size;
1582 + uint16_t mask = 0;
1583 +
1584 + if (buf) {
1585 + int i;
1586 + int bit = 1;
1587 + uint64_t byte_offset = cluster_num * VMA_CLUSTER_SIZE;
1588 + for (i = 0; i < 16; i++) {
1589 + const unsigned char *vmablock = buf + (i*VMA_BLOCK_SIZE);
1590 +
1591 + // Note: If the source is not 64k-aligned, we might reach 4k blocks
1592 + // after the end of the device. Always mark these as zero in the
1593 + // mask, so the restore handles them correctly.
1594 + if (byte_offset < dev_size &&
1595 + !buffer_is_zero(vmablock, VMA_BLOCK_SIZE))
1596 + {
1597 + mask |= bit;
1598 + memcpy(vmaw->outbuf + vmaw->outbuf_pos, vmablock,
1599 + VMA_BLOCK_SIZE);
1600 +
1601 + // prevent memory leakage on unaligned last block
1602 + if (byte_offset + VMA_BLOCK_SIZE > dev_size) {
1603 + uint64_t real_data_in_block = dev_size - byte_offset;
1604 + memset(vmaw->outbuf + vmaw->outbuf_pos + real_data_in_block,
1605 + 0, VMA_BLOCK_SIZE - real_data_in_block);
1606 + }
1607 +
1608 + vmaw->outbuf_pos += VMA_BLOCK_SIZE;
1609 + } else {
1610 + DPRINTF("VMA WRITE %zd ZERO BLOCK %d\n", cluster_num, i);
1611 + vmaw->stream_info[dev_id].zero_bytes += VMA_BLOCK_SIZE;
1612 + *zero_bytes += VMA_BLOCK_SIZE;
1613 + }
1614 +
1615 + byte_offset += VMA_BLOCK_SIZE;
1616 + bit = bit << 1;
1617 + }
1618 + } else {
1619 + DPRINTF("VMA WRITE %zd ZERO CLUSTER\n", cluster_num);
1620 + vmaw->stream_info[dev_id].zero_bytes += VMA_CLUSTER_SIZE;
1621 + *zero_bytes += VMA_CLUSTER_SIZE;
1622 + }
1623 +
1624 + uint64_t block_info = ((uint64_t)mask) << (32+16);
1625 + block_info |= ((uint64_t)dev_id) << 32;
1626 + block_info |= (cluster_num & 0xffffffff);
1627 + vmaw->outbuf_block_info[vmaw->outbuf_count] = block_info;
1628 +
1629 + DPRINTF("VMA WRITE MASK %zd %zx\n", cluster_num, block_info);
1630 +
1631 + vmaw->outbuf_count++;
1632 +
1633 + /** NOTE: We allways write whole clusters, but we correctly set
1634 + * transferred bytes. So transferred == size when when everything
1635 + * went OK.
1636 + */
1637 + size_t transferred = VMA_CLUSTER_SIZE;
1638 +
1639 + if (dev_id != vmaw->vmstate_stream) {
1640 + uint64_t last = (cluster_num + 1) * VMA_CLUSTER_SIZE;
1641 + if (last > dev_size) {
1642 + uint64_t diff = last - dev_size;
1643 + if (diff >= VMA_CLUSTER_SIZE) {
1644 + vma_writer_set_error(vmaw, "vma_writer_write: "
1645 + "read after last cluster");
1646 + return -1;
1647 + }
1648 + transferred -= diff;
1649 + }
1650 + }
1651 +
1652 + vmaw->stream_info[dev_id].transferred += transferred;
1653 +
1654 + return transferred;
1655 +}
1656 +
1657 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp)
1658 +{
1659 + if (vmaw->status < 0 && *errp == NULL) {
1660 + error_setg(errp, "%s", vmaw->errmsg);
1661 + }
1662 +}
1663 +
1664 +int vma_writer_close(VmaWriter *vmaw, Error **errp)
1665 +{
1666 + g_assert(vmaw != NULL);
1667 +
1668 + int i;
1669 +
1670 + qemu_co_mutex_lock(&vmaw->flush_lock); // wait for pending writes
1671 +
1672 + assert(vmaw->co_writer == NULL);
1673 +
1674 + if (vmaw->cmd) {
1675 + if (pclose(vmaw->cmd) < 0) {
1676 + vma_writer_set_error(vmaw, "vma_writer_close: "
1677 + "pclose failed - %s", g_strerror(errno));
1678 + }
1679 + } else {
1680 + if (close(vmaw->fd) < 0) {
1681 + vma_writer_set_error(vmaw, "vma_writer_close: "
1682 + "close failed - %s", g_strerror(errno));
1683 + }
1684 + }
1685 +
1686 + for (i = 0; i <= 255; i++) {
1687 + VmaStreamInfo *si = &vmaw->stream_info[i];
1688 + if (si->size) {
1689 + if (!si->finished) {
1690 + vma_writer_set_error(vmaw, "vma_writer_close: "
1691 + "detected open stream '%s'", si->devname);
1692 + } else if ((si->transferred != si->size) &&
1693 + (i != vmaw->vmstate_stream)) {
1694 + vma_writer_set_error(vmaw, "vma_writer_close: "
1695 + "incomplete stream '%s' (%zd != %zd)",
1696 + si->devname, si->transferred, si->size);
1697 + }
1698 + }
1699 + }
1700 +
1701 + for (i = 0; i <= 255; i++) {
1702 + vmaw->stream_info[i].finished = 1; /* mark as closed */
1703 + }
1704 +
1705 + vmaw->closed = 1;
1706 +
1707 + if (vmaw->status < 0 && *errp == NULL) {
1708 + error_setg(errp, "%s", vmaw->errmsg);
1709 + }
1710 +
1711 + qemu_co_mutex_unlock(&vmaw->flush_lock);
1712 +
1713 + return vmaw->status;
1714 +}
1715 +
1716 +void vma_writer_destroy(VmaWriter *vmaw)
1717 +{
1718 + assert(vmaw);
1719 +
1720 + int i;
1721 +
1722 + for (i = 0; i <= 255; i++) {
1723 + if (vmaw->stream_info[i].devname) {
1724 + g_free(vmaw->stream_info[i].devname);
1725 + }
1726 + }
1727 +
1728 + if (vmaw->md5csum) {
1729 + g_checksum_free(vmaw->md5csum);
1730 + }
1731 +
1732 + qemu_vfree(vmaw->headerbuf);
1733 + qemu_vfree(vmaw->outbuf);
1734 + g_free(vmaw);
1735 +}
1736 diff --git a/vma.c b/vma.c
1737 new file mode 100644
1738 index 0000000000..304f02bc84
1739 --- /dev/null
1740 +++ b/vma.c
1741 @@ -0,0 +1,878 @@
1742 +/*
1743 + * VMA: Virtual Machine Archive
1744 + *
1745 + * Copyright (C) 2012-2013 Proxmox Server Solutions
1746 + *
1747 + * Authors:
1748 + * Dietmar Maurer (dietmar@proxmox.com)
1749 + *
1750 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
1751 + * See the COPYING file in the top-level directory.
1752 + *
1753 + */
1754 +
1755 +#include "qemu/osdep.h"
1756 +#include <glib.h>
1757 +
1758 +#include "vma.h"
1759 +#include "qemu/module.h"
1760 +#include "qemu/error-report.h"
1761 +#include "qemu/main-loop.h"
1762 +#include "qemu/cutils.h"
1763 +#include "qemu/memalign.h"
1764 +#include "qapi/qmp/qdict.h"
1765 +#include "sysemu/block-backend.h"
1766 +
1767 +static void help(void)
1768 +{
1769 + const char *help_msg =
1770 + "usage: vma command [command options]\n"
1771 + "\n"
1772 + "vma list <filename>\n"
1773 + "vma config <filename> [-c config]\n"
1774 + "vma create <filename> [-c config] pathname ...\n"
1775 + "vma extract <filename> [-r <fifo>] <targetdir>\n"
1776 + "vma verify <filename> [-v]\n"
1777 + ;
1778 +
1779 + printf("%s", help_msg);
1780 + exit(1);
1781 +}
1782 +
1783 +static const char *extract_devname(const char *path, char **devname, int index)
1784 +{
1785 + assert(path);
1786 +
1787 + const char *sep = strchr(path, '=');
1788 +
1789 + if (sep) {
1790 + *devname = g_strndup(path, sep - path);
1791 + path = sep + 1;
1792 + } else {
1793 + if (index >= 0) {
1794 + *devname = g_strdup_printf("disk%d", index);
1795 + } else {
1796 + *devname = NULL;
1797 + }
1798 + }
1799 +
1800 + return path;
1801 +}
1802 +
1803 +static void print_content(VmaReader *vmar)
1804 +{
1805 + assert(vmar);
1806 +
1807 + VmaHeader *head = vma_reader_get_header(vmar);
1808 +
1809 + GList *l = vma_reader_get_config_data(vmar);
1810 + while (l && l->data) {
1811 + VmaConfigData *cdata = (VmaConfigData *)l->data;
1812 + l = g_list_next(l);
1813 + printf("CFG: size: %d name: %s\n", cdata->len, cdata->name);
1814 + }
1815 +
1816 + int i;
1817 + VmaDeviceInfo *di;
1818 + for (i = 1; i < 255; i++) {
1819 + di = vma_reader_get_device_info(vmar, i);
1820 + if (di) {
1821 + if (strcmp(di->devname, "vmstate") == 0) {
1822 + printf("VMSTATE: dev_id=%d memory: %zd\n", i, di->size);
1823 + } else {
1824 + printf("DEV: dev_id=%d size: %zd devname: %s\n",
1825 + i, di->size, di->devname);
1826 + }
1827 + }
1828 + }
1829 + /* ctime is the last entry we print */
1830 + printf("CTIME: %s", ctime(&head->ctime));
1831 + fflush(stdout);
1832 +}
1833 +
1834 +static int list_content(int argc, char **argv)
1835 +{
1836 + int c, ret = 0;
1837 + const char *filename;
1838 +
1839 + for (;;) {
1840 + c = getopt(argc, argv, "h");
1841 + if (c == -1) {
1842 + break;
1843 + }
1844 + switch (c) {
1845 + case '?':
1846 + case 'h':
1847 + help();
1848 + break;
1849 + default:
1850 + g_assert_not_reached();
1851 + }
1852 + }
1853 +
1854 + /* Get the filename */
1855 + if ((optind + 1) != argc) {
1856 + help();
1857 + }
1858 + filename = argv[optind++];
1859 +
1860 + Error *errp = NULL;
1861 + VmaReader *vmar = vma_reader_create(filename, &errp);
1862 +
1863 + if (!vmar) {
1864 + g_error("%s", error_get_pretty(errp));
1865 + }
1866 +
1867 + print_content(vmar);
1868 +
1869 + vma_reader_destroy(vmar);
1870 +
1871 + return ret;
1872 +}
1873 +
1874 +typedef struct RestoreMap {
1875 + char *devname;
1876 + char *path;
1877 + char *format;
1878 + uint64_t throttling_bps;
1879 + char *throttling_group;
1880 + char *cache;
1881 + bool write_zero;
1882 + bool skip;
1883 +} RestoreMap;
1884 +
1885 +static bool try_parse_option(char **line, const char *optname, char **out, const char *inbuf) {
1886 + size_t optlen = strlen(optname);
1887 + if (strncmp(*line, optname, optlen) != 0 || (*line)[optlen] != '=') {
1888 + return false;
1889 + }
1890 + if (*out) {
1891 + g_error("read map failed - duplicate value for option '%s'", optname);
1892 + }
1893 + char *value = (*line) + optlen + 1; /* including a '=' */
1894 + char *colon = strchr(value, ':');
1895 + if (!colon) {
1896 + g_error("read map failed - option '%s' not terminated ('%s')",
1897 + optname, inbuf);
1898 + }
1899 + *line = colon+1;
1900 + *out = g_strndup(value, colon - value);
1901 + return true;
1902 +}
1903 +
1904 +static uint64_t verify_u64(const char *text) {
1905 + uint64_t value;
1906 + const char *endptr = NULL;
1907 + if (qemu_strtou64(text, &endptr, 0, &value) != 0 || !endptr || *endptr) {
1908 + g_error("read map failed - not a number: %s", text);
1909 + }
1910 + return value;
1911 +}
1912 +
1913 +static int extract_content(int argc, char **argv)
1914 +{
1915 + int c, ret = 0;
1916 + int verbose = 0;
1917 + const char *filename;
1918 + const char *dirname;
1919 + const char *readmap = NULL;
1920 +
1921 + for (;;) {
1922 + c = getopt(argc, argv, "hvr:");
1923 + if (c == -1) {
1924 + break;
1925 + }
1926 + switch (c) {
1927 + case '?':
1928 + case 'h':
1929 + help();
1930 + break;
1931 + case 'r':
1932 + readmap = optarg;
1933 + break;
1934 + case 'v':
1935 + verbose = 1;
1936 + break;
1937 + default:
1938 + help();
1939 + }
1940 + }
1941 +
1942 + /* Get the filename */
1943 + if ((optind + 2) != argc) {
1944 + help();
1945 + }
1946 + filename = argv[optind++];
1947 + dirname = argv[optind++];
1948 +
1949 + Error *errp = NULL;
1950 + VmaReader *vmar = vma_reader_create(filename, &errp);
1951 +
1952 + if (!vmar) {
1953 + g_error("%s", error_get_pretty(errp));
1954 + }
1955 +
1956 + if (mkdir(dirname, 0777) < 0) {
1957 + g_error("unable to create target directory %s - %s",
1958 + dirname, g_strerror(errno));
1959 + }
1960 +
1961 + GList *l = vma_reader_get_config_data(vmar);
1962 + while (l && l->data) {
1963 + VmaConfigData *cdata = (VmaConfigData *)l->data;
1964 + l = g_list_next(l);
1965 + char *cfgfn = g_strdup_printf("%s/%s", dirname, cdata->name);
1966 + GError *err = NULL;
1967 + if (!g_file_set_contents(cfgfn, (gchar *)cdata->data, cdata->len,
1968 + &err)) {
1969 + g_error("unable to write file: %s", err->message);
1970 + }
1971 + }
1972 +
1973 + GHashTable *devmap = g_hash_table_new(g_str_hash, g_str_equal);
1974 +
1975 + if (readmap) {
1976 + print_content(vmar);
1977 +
1978 + FILE *map = fopen(readmap, "r");
1979 + if (!map) {
1980 + g_error("unable to open fifo %s - %s", readmap, g_strerror(errno));
1981 + }
1982 +
1983 + while (1) {
1984 + char inbuf[8192];
1985 + char *line = fgets(inbuf, sizeof(inbuf), map);
1986 + char *format = NULL;
1987 + char *bps = NULL;
1988 + char *group = NULL;
1989 + char *cache = NULL;
1990 + char *devname = NULL;
1991 + bool skip = false;
1992 + uint64_t bps_value = 0;
1993 + const char *path = NULL;
1994 + bool write_zero = true;
1995 +
1996 + if (!line || line[0] == '\0' || !strcmp(line, "done\n")) {
1997 + break;
1998 + }
1999 + int len = strlen(line);
2000 + if (line[len - 1] == '\n') {
2001 + line[len - 1] = '\0';
2002 + len = len - 1;
2003 + if (len == 0) {
2004 + break;
2005 + }
2006 + }
2007 +
2008 + if (strncmp(line, "skip", 4) == 0) {
2009 + if (len < 6 || line[4] != '=') {
2010 + g_error("read map failed - option 'skip' has no value ('%s')",
2011 + inbuf);
2012 + } else {
2013 + devname = line + 5;
2014 + skip = true;
2015 + }
2016 + } else {
2017 + while (1) {
2018 + if (!try_parse_option(&line, "format", &format, inbuf) &&
2019 + !try_parse_option(&line, "throttling.bps", &bps, inbuf) &&
2020 + !try_parse_option(&line, "throttling.group", &group, inbuf) &&
2021 + !try_parse_option(&line, "cache", &cache, inbuf))
2022 + {
2023 + break;
2024 + }
2025 + }
2026 +
2027 + if (bps) {
2028 + bps_value = verify_u64(bps);
2029 + g_free(bps);
2030 + }
2031 +
2032 + if (line[0] == '0' && line[1] == ':') {
2033 + path = line + 2;
2034 + write_zero = false;
2035 + } else if (line[0] == '1' && line[1] == ':') {
2036 + path = line + 2;
2037 + write_zero = true;
2038 + } else {
2039 + g_error("read map failed - parse error ('%s')", inbuf);
2040 + }
2041 +
2042 + path = extract_devname(path, &devname, -1);
2043 + }
2044 +
2045 + if (!devname) {
2046 + g_error("read map failed - no dev name specified ('%s')",
2047 + inbuf);
2048 + }
2049 +
2050 + RestoreMap *map = g_new0(RestoreMap, 1);
2051 + map->devname = g_strdup(devname);
2052 + map->path = g_strdup(path);
2053 + map->format = format;
2054 + map->throttling_bps = bps_value;
2055 + map->throttling_group = group;
2056 + map->cache = cache;
2057 + map->write_zero = write_zero;
2058 + map->skip = skip;
2059 +
2060 + g_hash_table_insert(devmap, map->devname, map);
2061 +
2062 + };
2063 + }
2064 +
2065 + int i;
2066 + int vmstate_fd = -1;
2067 + guint8 vmstate_stream = 0;
2068 +
2069 + for (i = 1; i < 255; i++) {
2070 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2071 + if (di && (strcmp(di->devname, "vmstate") == 0)) {
2072 + vmstate_stream = i;
2073 + char *statefn = g_strdup_printf("%s/vmstate.bin", dirname);
2074 + vmstate_fd = open(statefn, O_WRONLY|O_CREAT|O_EXCL, 0644);
2075 + if (vmstate_fd < 0) {
2076 + g_error("create vmstate file '%s' failed - %s", statefn,
2077 + g_strerror(errno));
2078 + }
2079 + g_free(statefn);
2080 + } else if (di) {
2081 + char *devfn = NULL;
2082 + const char *format = NULL;
2083 + uint64_t throttling_bps = 0;
2084 + const char *throttling_group = NULL;
2085 + const char *cache = NULL;
2086 + int flags = BDRV_O_RDWR;
2087 + bool write_zero = true;
2088 + bool skip = false;
2089 +
2090 + BlockBackend *blk = NULL;
2091 +
2092 + if (readmap) {
2093 + RestoreMap *map;
2094 + map = (RestoreMap *)g_hash_table_lookup(devmap, di->devname);
2095 + if (map == NULL) {
2096 + g_error("no device name mapping for %s", di->devname);
2097 + }
2098 + devfn = map->path;
2099 + format = map->format;
2100 + throttling_bps = map->throttling_bps;
2101 + throttling_group = map->throttling_group;
2102 + cache = map->cache;
2103 + write_zero = map->write_zero;
2104 + skip = map->skip;
2105 + } else {
2106 + devfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2107 + dirname, di->devname);
2108 + printf("DEVINFO %s %zd\n", devfn, di->size);
2109 +
2110 + bdrv_img_create(devfn, "raw", NULL, NULL, NULL, di->size,
2111 + flags, true, &errp);
2112 + if (errp) {
2113 + g_error("can't create file %s: %s", devfn,
2114 + error_get_pretty(errp));
2115 + }
2116 +
2117 + /* Note: we created an empty file above, so there is no
2118 + * need to write zeroes (so we generate a sparse file)
2119 + */
2120 + write_zero = false;
2121 + }
2122 +
2123 + if (!skip) {
2124 + size_t devlen = strlen(devfn);
2125 + QDict *options = NULL;
2126 + bool writethrough;
2127 + if (format) {
2128 + /* explicit format from commandline */
2129 + options = qdict_new();
2130 + qdict_put_str(options, "driver", format);
2131 + } else if ((devlen > 4 && strcmp(devfn+devlen-4, ".raw") == 0) ||
2132 + strncmp(devfn, "/dev/", 5) == 0)
2133 + {
2134 + /* This part is now deprecated for PVE as well (just as qemu
2135 + * deprecated not specifying an explicit raw format, too.
2136 + */
2137 + /* explicit raw format */
2138 + options = qdict_new();
2139 + qdict_put_str(options, "driver", "raw");
2140 + }
2141 +
2142 + if (cache && bdrv_parse_cache_mode(cache, &flags, &writethrough)) {
2143 + g_error("invalid cache option: %s\n", cache);
2144 + }
2145 +
2146 + if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) {
2147 + g_error("can't open file %s - %s", devfn,
2148 + error_get_pretty(errp));
2149 + }
2150 +
2151 + if (cache) {
2152 + blk_set_enable_write_cache(blk, !writethrough);
2153 + }
2154 +
2155 + if (throttling_group) {
2156 + blk_io_limits_enable(blk, throttling_group);
2157 + }
2158 +
2159 + if (throttling_bps) {
2160 + if (!throttling_group) {
2161 + blk_io_limits_enable(blk, devfn);
2162 + }
2163 +
2164 + ThrottleConfig cfg;
2165 + throttle_config_init(&cfg);
2166 + cfg.buckets[THROTTLE_BPS_WRITE].avg = throttling_bps;
2167 + Error *err = NULL;
2168 + if (!throttle_is_valid(&cfg, &err)) {
2169 + error_report_err(err);
2170 + g_error("failed to apply throttling");
2171 + }
2172 + blk_set_io_limits(blk, &cfg);
2173 + }
2174 + }
2175 +
2176 + if (vma_reader_register_bs(vmar, i, blk, write_zero, skip, &errp) < 0) {
2177 + g_error("%s", error_get_pretty(errp));
2178 + }
2179 +
2180 + if (!readmap) {
2181 + g_free(devfn);
2182 + }
2183 + }
2184 + }
2185 +
2186 + if (vma_reader_restore(vmar, vmstate_fd, verbose, &errp) < 0) {
2187 + g_error("restore failed - %s", error_get_pretty(errp));
2188 + }
2189 +
2190 + if (!readmap) {
2191 + for (i = 1; i < 255; i++) {
2192 + VmaDeviceInfo *di = vma_reader_get_device_info(vmar, i);
2193 + if (di && (i != vmstate_stream)) {
2194 + char *tmpfn = g_strdup_printf("%s/tmp-disk-%s.raw",
2195 + dirname, di->devname);
2196 + char *fn = g_strdup_printf("%s/disk-%s.raw",
2197 + dirname, di->devname);
2198 + if (rename(tmpfn, fn) != 0) {
2199 + g_error("rename %s to %s failed - %s",
2200 + tmpfn, fn, g_strerror(errno));
2201 + }
2202 + }
2203 + }
2204 + }
2205 +
2206 + vma_reader_destroy(vmar);
2207 +
2208 + bdrv_close_all();
2209 +
2210 + return ret;
2211 +}
2212 +
2213 +static int verify_content(int argc, char **argv)
2214 +{
2215 + int c, ret = 0;
2216 + int verbose = 0;
2217 + const char *filename;
2218 +
2219 + for (;;) {
2220 + c = getopt(argc, argv, "hv");
2221 + if (c == -1) {
2222 + break;
2223 + }
2224 + switch (c) {
2225 + case '?':
2226 + case 'h':
2227 + help();
2228 + break;
2229 + case 'v':
2230 + verbose = 1;
2231 + break;
2232 + default:
2233 + help();
2234 + }
2235 + }
2236 +
2237 + /* Get the filename */
2238 + if ((optind + 1) != argc) {
2239 + help();
2240 + }
2241 + filename = argv[optind++];
2242 +
2243 + Error *errp = NULL;
2244 + VmaReader *vmar = vma_reader_create(filename, &errp);
2245 +
2246 + if (!vmar) {
2247 + g_error("%s", error_get_pretty(errp));
2248 + }
2249 +
2250 + if (verbose) {
2251 + print_content(vmar);
2252 + }
2253 +
2254 + if (vma_reader_verify(vmar, verbose, &errp) < 0) {
2255 + g_error("verify failed - %s", error_get_pretty(errp));
2256 + }
2257 +
2258 + vma_reader_destroy(vmar);
2259 +
2260 + bdrv_close_all();
2261 +
2262 + return ret;
2263 +}
2264 +
2265 +typedef struct BackupJob {
2266 + BlockBackend *target;
2267 + int64_t len;
2268 + VmaWriter *vmaw;
2269 + uint8_t dev_id;
2270 +} BackupJob;
2271 +
2272 +#define BACKUP_SECTORS_PER_CLUSTER (VMA_CLUSTER_SIZE / BDRV_SECTOR_SIZE)
2273 +
2274 +static void coroutine_fn backup_run_empty(void *opaque)
2275 +{
2276 + VmaWriter *vmaw = (VmaWriter *)opaque;
2277 +
2278 + vma_writer_flush_output(vmaw);
2279 +
2280 + Error *err = NULL;
2281 + if (vma_writer_close(vmaw, &err) != 0) {
2282 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2283 + }
2284 +}
2285 +
2286 +static void coroutine_fn backup_run(void *opaque)
2287 +{
2288 + BackupJob *job = (BackupJob *)opaque;
2289 + struct iovec iov;
2290 + QEMUIOVector qiov;
2291 +
2292 + int64_t start, end, readlen;
2293 + int ret = 0;
2294 +
2295 + unsigned char *buf = blk_blockalign(job->target, VMA_CLUSTER_SIZE);
2296 +
2297 + start = 0;
2298 + end = DIV_ROUND_UP(job->len / BDRV_SECTOR_SIZE,
2299 + BACKUP_SECTORS_PER_CLUSTER);
2300 +
2301 + for (; start < end; start++) {
2302 + iov.iov_base = buf;
2303 + iov.iov_len = VMA_CLUSTER_SIZE;
2304 + qemu_iovec_init_external(&qiov, &iov, 1);
2305 +
2306 + if (start + 1 == end) {
2307 + memset(buf, 0, VMA_CLUSTER_SIZE);
2308 + readlen = job->len - start * VMA_CLUSTER_SIZE;
2309 + assert(readlen > 0 && readlen <= VMA_CLUSTER_SIZE);
2310 + } else {
2311 + readlen = VMA_CLUSTER_SIZE;
2312 + }
2313 +
2314 + ret = blk_co_preadv(job->target, start * VMA_CLUSTER_SIZE,
2315 + readlen, &qiov, 0);
2316 + if (ret < 0) {
2317 + vma_writer_set_error(job->vmaw, "read error", -1);
2318 + goto out;
2319 + }
2320 +
2321 + size_t zb = 0;
2322 + if (vma_writer_write(job->vmaw, job->dev_id, start, buf, &zb) < 0) {
2323 + vma_writer_set_error(job->vmaw, "backup_dump_cb vma_writer_write failed", -1);
2324 + goto out;
2325 + }
2326 + }
2327 +
2328 +
2329 +out:
2330 + if (vma_writer_close_stream(job->vmaw, job->dev_id) <= 0) {
2331 + Error *err = NULL;
2332 + if (vma_writer_close(job->vmaw, &err) != 0) {
2333 + g_warning("vma_writer_close failed %s", error_get_pretty(err));
2334 + }
2335 + }
2336 + qemu_vfree(buf);
2337 +}
2338 +
2339 +static int create_archive(int argc, char **argv)
2340 +{
2341 + int i, c;
2342 + int verbose = 0;
2343 + const char *archivename;
2344 + GList *backup_coroutines = NULL;
2345 + GList *config_files = NULL;
2346 +
2347 + for (;;) {
2348 + c = getopt(argc, argv, "hvc:");
2349 + if (c == -1) {
2350 + break;
2351 + }
2352 + switch (c) {
2353 + case '?':
2354 + case 'h':
2355 + help();
2356 + break;
2357 + case 'c':
2358 + config_files = g_list_append(config_files, optarg);
2359 + break;
2360 + case 'v':
2361 + verbose = 1;
2362 + break;
2363 + default:
2364 + g_assert_not_reached();
2365 + }
2366 + }
2367 +
2368 +
2369 + /* make sure we an archive name */
2370 + if ((optind + 1) > argc) {
2371 + help();
2372 + }
2373 +
2374 + archivename = argv[optind++];
2375 +
2376 + uuid_t uuid;
2377 + uuid_generate(uuid);
2378 +
2379 + Error *local_err = NULL;
2380 + VmaWriter *vmaw = vma_writer_create(archivename, uuid, &local_err);
2381 +
2382 + if (vmaw == NULL) {
2383 + g_error("%s", error_get_pretty(local_err));
2384 + }
2385 +
2386 + GList *l = config_files;
2387 + while (l && l->data) {
2388 + char *name = l->data;
2389 + char *cdata = NULL;
2390 + gsize clen = 0;
2391 + GError *err = NULL;
2392 + if (!g_file_get_contents(name, &cdata, &clen, &err)) {
2393 + unlink(archivename);
2394 + g_error("Unable to read file: %s", err->message);
2395 + }
2396 +
2397 + if (vma_writer_add_config(vmaw, name, cdata, clen) != 0) {
2398 + unlink(archivename);
2399 + g_error("Unable to append config data %s (len = %zd)",
2400 + name, clen);
2401 + }
2402 + l = g_list_next(l);
2403 + }
2404 +
2405 + int devcount = 0;
2406 + while (optind < argc) {
2407 + const char *path = argv[optind++];
2408 + char *devname = NULL;
2409 + path = extract_devname(path, &devname, devcount++);
2410 +
2411 + Error *errp = NULL;
2412 + BlockBackend *target;
2413 +
2414 + target = blk_new_open(path, NULL, NULL, 0, &errp);
2415 + if (!target) {
2416 + unlink(archivename);
2417 + g_error("bdrv_open '%s' failed - %s", path, error_get_pretty(errp));
2418 + }
2419 + int64_t size = blk_getlength(target);
2420 + int dev_id = vma_writer_register_stream(vmaw, devname, size);
2421 + if (dev_id <= 0) {
2422 + unlink(archivename);
2423 + g_error("vma_writer_register_stream '%s' failed", devname);
2424 + }
2425 +
2426 + BackupJob *job = g_new0(BackupJob, 1);
2427 + job->len = size;
2428 + job->target = target;
2429 + job->vmaw = vmaw;
2430 + job->dev_id = dev_id;
2431 +
2432 + Coroutine *co = qemu_coroutine_create(backup_run, job);
2433 + // Don't enter coroutine yet, because it might write the header before
2434 + // all streams can be registered.
2435 + backup_coroutines = g_list_append(backup_coroutines, co);
2436 + }
2437 +
2438 + VmaStatus vmastat;
2439 + int percent = 0;
2440 + int last_percent = -1;
2441 +
2442 + if (devcount) {
2443 + GList *entry = backup_coroutines;
2444 + while (entry && entry->data) {
2445 + Coroutine *co = entry->data;
2446 + qemu_coroutine_enter(co);
2447 + entry = g_list_next(entry);
2448 + }
2449 +
2450 + while (1) {
2451 + main_loop_wait(false);
2452 + vma_writer_get_status(vmaw, &vmastat);
2453 +
2454 + if (verbose) {
2455 +
2456 + uint64_t total = 0;
2457 + uint64_t transferred = 0;
2458 + uint64_t zero_bytes = 0;
2459 +
2460 + int i;
2461 + for (i = 0; i < 256; i++) {
2462 + if (vmastat.stream_info[i].size) {
2463 + total += vmastat.stream_info[i].size;
2464 + transferred += vmastat.stream_info[i].transferred;
2465 + zero_bytes += vmastat.stream_info[i].zero_bytes;
2466 + }
2467 + }
2468 + percent = (transferred*100)/total;
2469 + if (percent != last_percent) {
2470 + fprintf(stderr, "progress %d%% %zd/%zd %zd\n", percent,
2471 + transferred, total, zero_bytes);
2472 + fflush(stderr);
2473 +
2474 + last_percent = percent;
2475 + }
2476 + }
2477 +
2478 + if (vmastat.closed) {
2479 + break;
2480 + }
2481 + }
2482 + } else {
2483 + Coroutine *co = qemu_coroutine_create(backup_run_empty, vmaw);
2484 + qemu_coroutine_enter(co);
2485 + while (1) {
2486 + main_loop_wait(false);
2487 + vma_writer_get_status(vmaw, &vmastat);
2488 + if (vmastat.closed) {
2489 + break;
2490 + }
2491 + }
2492 + }
2493 +
2494 + bdrv_drain_all();
2495 +
2496 + vma_writer_get_status(vmaw, &vmastat);
2497 +
2498 + if (verbose) {
2499 + for (i = 0; i < 256; i++) {
2500 + VmaStreamInfo *si = &vmastat.stream_info[i];
2501 + if (si->size) {
2502 + fprintf(stderr, "image %s: size=%zd zeros=%zd saved=%zd\n",
2503 + si->devname, si->size, si->zero_bytes,
2504 + si->size - si->zero_bytes);
2505 + }
2506 + }
2507 + }
2508 +
2509 + if (vmastat.status < 0) {
2510 + unlink(archivename);
2511 + g_error("creating vma archive failed");
2512 + }
2513 +
2514 + g_list_free(backup_coroutines);
2515 + g_list_free(config_files);
2516 + vma_writer_destroy(vmaw);
2517 + return 0;
2518 +}
2519 +
2520 +static int dump_config(int argc, char **argv)
2521 +{
2522 + int c, ret = 0;
2523 + const char *filename;
2524 + const char *config_name = "qemu-server.conf";
2525 +
2526 + for (;;) {
2527 + c = getopt(argc, argv, "hc:");
2528 + if (c == -1) {
2529 + break;
2530 + }
2531 + switch (c) {
2532 + case '?':
2533 + case 'h':
2534 + help();
2535 + break;
2536 + case 'c':
2537 + config_name = optarg;
2538 + break;
2539 + default:
2540 + help();
2541 + }
2542 + }
2543 +
2544 + /* Get the filename */
2545 + if ((optind + 1) != argc) {
2546 + help();
2547 + }
2548 + filename = argv[optind++];
2549 +
2550 + Error *errp = NULL;
2551 + VmaReader *vmar = vma_reader_create(filename, &errp);
2552 +
2553 + if (!vmar) {
2554 + g_error("%s", error_get_pretty(errp));
2555 + }
2556 +
2557 + int found = 0;
2558 + GList *l = vma_reader_get_config_data(vmar);
2559 + while (l && l->data) {
2560 + VmaConfigData *cdata = (VmaConfigData *)l->data;
2561 + l = g_list_next(l);
2562 + if (strcmp(cdata->name, config_name) == 0) {
2563 + found = 1;
2564 + fwrite(cdata->data, cdata->len, 1, stdout);
2565 + break;
2566 + }
2567 + }
2568 +
2569 + vma_reader_destroy(vmar);
2570 +
2571 + bdrv_close_all();
2572 +
2573 + if (!found) {
2574 + fprintf(stderr, "unable to find configuration data '%s'\n", config_name);
2575 + return -1;
2576 + }
2577 +
2578 + return ret;
2579 +}
2580 +
2581 +int main(int argc, char **argv)
2582 +{
2583 + const char *cmdname;
2584 + Error *main_loop_err = NULL;
2585 +
2586 + error_init(argv[0]);
2587 + module_call_init(MODULE_INIT_TRACE);
2588 + qemu_init_exec_dir(argv[0]);
2589 +
2590 + if (qemu_init_main_loop(&main_loop_err)) {
2591 + g_error("%s", error_get_pretty(main_loop_err));
2592 + }
2593 +
2594 + bdrv_init();
2595 + module_call_init(MODULE_INIT_QOM);
2596 +
2597 + if (argc < 2) {
2598 + help();
2599 + }
2600 +
2601 + cmdname = argv[1];
2602 + argc--; argv++;
2603 +
2604 +
2605 + if (!strcmp(cmdname, "list")) {
2606 + return list_content(argc, argv);
2607 + } else if (!strcmp(cmdname, "create")) {
2608 + return create_archive(argc, argv);
2609 + } else if (!strcmp(cmdname, "extract")) {
2610 + return extract_content(argc, argv);
2611 + } else if (!strcmp(cmdname, "verify")) {
2612 + return verify_content(argc, argv);
2613 + } else if (!strcmp(cmdname, "config")) {
2614 + return dump_config(argc, argv);
2615 + }
2616 +
2617 + help();
2618 + return 0;
2619 +}
2620 diff --git a/vma.h b/vma.h
2621 new file mode 100644
2622 index 0000000000..1b62859165
2623 --- /dev/null
2624 +++ b/vma.h
2625 @@ -0,0 +1,150 @@
2626 +/*
2627 + * VMA: Virtual Machine Archive
2628 + *
2629 + * Copyright (C) Proxmox Server Solutions
2630 + *
2631 + * Authors:
2632 + * Dietmar Maurer (dietmar@proxmox.com)
2633 + *
2634 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
2635 + * See the COPYING file in the top-level directory.
2636 + *
2637 + */
2638 +
2639 +#ifndef BACKUP_VMA_H
2640 +#define BACKUP_VMA_H
2641 +
2642 +#include <uuid/uuid.h>
2643 +#include "qapi/error.h"
2644 +#include "block/block.h"
2645 +
2646 +#define VMA_BLOCK_BITS 12
2647 +#define VMA_BLOCK_SIZE (1<<VMA_BLOCK_BITS)
2648 +#define VMA_CLUSTER_BITS (VMA_BLOCK_BITS+4)
2649 +#define VMA_CLUSTER_SIZE (1<<VMA_CLUSTER_BITS)
2650 +
2651 +#if VMA_CLUSTER_SIZE != 65536
2652 +#error unexpected cluster size
2653 +#endif
2654 +
2655 +#define VMA_EXTENT_HEADER_SIZE 512
2656 +#define VMA_BLOCKS_PER_EXTENT 59
2657 +#define VMA_MAX_CONFIGS 256
2658 +
2659 +#define VMA_MAX_EXTENT_SIZE \
2660 + (VMA_EXTENT_HEADER_SIZE+VMA_CLUSTER_SIZE*VMA_BLOCKS_PER_EXTENT)
2661 +#if VMA_MAX_EXTENT_SIZE != 3867136
2662 +#error unexpected VMA_EXTENT_SIZE
2663 +#endif
2664 +
2665 +/* File Format Definitions */
2666 +
2667 +#define VMA_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|0x00))
2668 +#define VMA_EXTENT_MAGIC (GUINT32_TO_BE(('V'<<24)|('M'<<16)|('A'<<8)|'E'))
2669 +
2670 +typedef struct VmaDeviceInfoHeader {
2671 + uint32_t devname_ptr; /* offset into blob_buffer table */
2672 + uint32_t reserved0;
2673 + uint64_t size; /* device size in bytes */
2674 + uint64_t reserved1;
2675 + uint64_t reserved2;
2676 +} VmaDeviceInfoHeader;
2677 +
2678 +typedef struct VmaHeader {
2679 + uint32_t magic;
2680 + uint32_t version;
2681 + unsigned char uuid[16];
2682 + int64_t ctime;
2683 + unsigned char md5sum[16];
2684 +
2685 + uint32_t blob_buffer_offset;
2686 + uint32_t blob_buffer_size;
2687 + uint32_t header_size;
2688 +
2689 + unsigned char reserved[1984];
2690 +
2691 + uint32_t config_names[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
2692 + uint32_t config_data[VMA_MAX_CONFIGS]; /* offset into blob_buffer table */
2693 +
2694 + uint32_t reserved1;
2695 +
2696 + VmaDeviceInfoHeader dev_info[256];
2697 +} VmaHeader;
2698 +
2699 +typedef struct VmaExtentHeader {
2700 + uint32_t magic;
2701 + uint16_t reserved1;
2702 + uint16_t block_count;
2703 + unsigned char uuid[16];
2704 + unsigned char md5sum[16];
2705 + uint64_t blockinfo[VMA_BLOCKS_PER_EXTENT];
2706 +} VmaExtentHeader;
2707 +
2708 +/* functions/definitions to read/write vma files */
2709 +
2710 +typedef struct VmaReader VmaReader;
2711 +
2712 +typedef struct VmaWriter VmaWriter;
2713 +
2714 +typedef struct VmaConfigData {
2715 + const char *name;
2716 + const void *data;
2717 + uint32_t len;
2718 +} VmaConfigData;
2719 +
2720 +typedef struct VmaStreamInfo {
2721 + uint64_t size;
2722 + uint64_t cluster_count;
2723 + uint64_t transferred;
2724 + uint64_t zero_bytes;
2725 + int finished;
2726 + char *devname;
2727 +} VmaStreamInfo;
2728 +
2729 +typedef struct VmaStatus {
2730 + int status;
2731 + bool closed;
2732 + char errmsg[8192];
2733 + char uuid_str[37];
2734 + VmaStreamInfo stream_info[256];
2735 +} VmaStatus;
2736 +
2737 +typedef struct VmaDeviceInfo {
2738 + uint64_t size; /* device size in bytes */
2739 + const char *devname;
2740 +} VmaDeviceInfo;
2741 +
2742 +VmaWriter *vma_writer_create(const char *filename, uuid_t uuid, Error **errp);
2743 +int vma_writer_close(VmaWriter *vmaw, Error **errp);
2744 +void vma_writer_error_propagate(VmaWriter *vmaw, Error **errp);
2745 +void vma_writer_destroy(VmaWriter *vmaw);
2746 +int vma_writer_add_config(VmaWriter *vmaw, const char *name, gpointer data,
2747 + size_t len);
2748 +int vma_writer_register_stream(VmaWriter *vmaw, const char *devname,
2749 + size_t size);
2750 +
2751 +int64_t coroutine_fn vma_writer_write(VmaWriter *vmaw, uint8_t dev_id,
2752 + int64_t cluster_num,
2753 + const unsigned char *buf,
2754 + size_t *zero_bytes);
2755 +
2756 +int coroutine_fn vma_writer_close_stream(VmaWriter *vmaw, uint8_t dev_id);
2757 +int coroutine_fn vma_writer_flush_output(VmaWriter *vmaw);
2758 +
2759 +int vma_writer_get_status(VmaWriter *vmaw, VmaStatus *status);
2760 +void vma_writer_set_error(VmaWriter *vmaw, const char *fmt, ...);
2761 +
2762 +
2763 +VmaReader *vma_reader_create(const char *filename, Error **errp);
2764 +void vma_reader_destroy(VmaReader *vmar);
2765 +VmaHeader *vma_reader_get_header(VmaReader *vmar);
2766 +GList *vma_reader_get_config_data(VmaReader *vmar);
2767 +VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id);
2768 +int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id,
2769 + BlockBackend *target, bool write_zeroes,
2770 + bool skip, Error **errp);
2771 +int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose,
2772 + Error **errp);
2773 +int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp);
2774 +
2775 +#endif /* BACKUP_VMA_H */