1 From 577b000e947d817cf4e0189615c0d0257cb20259 Mon Sep 17 00:00:00 2001
2 From: Dietmar Maurer <dietmar@proxmox.com>
3 Date: Tue, 13 Nov 2012 10:03:52 +0100
4 Subject: [PATCH v3 2/7] add basic backup support to block driver
6 Function backup_job_create() creates a block job to backup a block device.
7 The coroutine is started with backup_job_start().
9 We call backup_do_cow() for each write during backup. That function
10 reads the original data and pass it to backup_dump_cb().
12 The tracked_request infrastructure is used to serialize access.
14 Currently backup cluster size is hardcoded to 65536 bytes.
16 Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
19 backup.c | 308 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
21 block.c | 71 ++++++++++++-
24 6 files changed, 418 insertions(+), 6 deletions(-)
25 create mode 100644 backup.c
26 create mode 100644 backup.h
28 diff --git a/Makefile.objs b/Makefile.objs
29 index 3c7abca..cb46be5 100644
32 @@ -48,6 +48,7 @@ coroutine-obj-$(CONFIG_WIN32) += coroutine-win32.o
33 block-obj-y = iov.o cache-utils.o qemu-option.o module.o async.o
34 block-obj-y += nbd.o block.o blockjob.o aes.o qemu-config.o
35 block-obj-y += thread-pool.o qemu-progress.o qemu-sockets.o uri.o notify.o
36 +block-obj-y += backup.o
37 block-obj-y += $(coroutine-obj-y) $(qobject-obj-y) $(version-obj-y)
38 block-obj-$(CONFIG_POSIX) += event_notifier-posix.o aio-posix.o
39 block-obj-$(CONFIG_WIN32) += event_notifier-win32.o aio-win32.o
40 diff --git a/backup.c b/backup.c
42 index 0000000..2c13e21
49 + * Copyright (C) 2012 Proxmox Server Solutions
52 + * Dietmar Maurer (dietmar@proxmox.com)
54 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
55 + * See the COPYING file in the top-level directory.
64 +#include "block_int.h"
65 +#include "blockjob.h"
68 +#define DEBUG_BACKUP 0
70 +#define DPRINTF(fmt, ...) \
71 + do { if (DEBUG_BACKUP) { printf("backup: " fmt, ## __VA_ARGS__); } } \
75 +#define BITS_PER_LONG (sizeof(unsigned long) * 8)
77 +typedef struct BackupBlockJob {
79 + unsigned long *bitmap;
81 + BackupDumpFunc *backup_dump_cb;
82 + BlockDriverCompletionFunc *backup_complete_cb;
86 +static int backup_get_bitmap(BackupBlockJob *job, int64_t cluster_num)
89 + assert(job->bitmap);
91 + unsigned long val, idx, bit;
93 + idx = cluster_num / BITS_PER_LONG;
95 + assert(job->bitmap_size > idx);
97 + bit = cluster_num % BITS_PER_LONG;
98 + val = job->bitmap[idx];
100 + return !!(val & (1UL << bit));
103 +static void backup_set_bitmap(BackupBlockJob *job, int64_t cluster_num,
107 + assert(job->bitmap);
109 + unsigned long val, idx, bit;
111 + idx = cluster_num / BITS_PER_LONG;
113 + assert(job->bitmap_size > idx);
115 + bit = cluster_num % BITS_PER_LONG;
116 + val = job->bitmap[idx];
118 + if (!(val & (1UL << bit))) {
122 + if (val & (1UL << bit)) {
123 + val &= ~(1UL << bit);
126 + job->bitmap[idx] = val;
129 +static int backup_in_progress_count;
131 +static int coroutine_fn backup_do_cow(BlockDriverState *bs,
132 + int64_t sector_num, int nb_sectors)
135 + BackupBlockJob *job = (BackupBlockJob *)bs->job;
138 + BlockDriver *drv = bs->drv;
140 + QEMUIOVector bounce_qiov;
141 + void *bounce_buffer = NULL;
144 + backup_in_progress_count++;
146 + int64_t start, end;
148 + start = sector_num / BACKUP_BLOCKS_PER_CLUSTER;
149 + end = (sector_num + nb_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
150 + BACKUP_BLOCKS_PER_CLUSTER;
152 + DPRINTF("brdv_co_backup_cow enter %s C%zd %zd %d\n",
153 + bdrv_get_device_name(bs), start, sector_num, nb_sectors);
155 + for (; start < end; start++) {
156 + if (backup_get_bitmap(job, start)) {
157 + DPRINTF("brdv_co_backup_cow skip C%zd\n", start);
158 + continue; /* already copied */
161 + /* immediately set bitmap (avoid coroutine race) */
162 + backup_set_bitmap(job, start, 1);
164 + DPRINTF("brdv_co_backup_cow C%zd\n", start);
166 + if (!bounce_buffer) {
167 + iov.iov_len = BACKUP_CLUSTER_SIZE;
168 + iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
169 + qemu_iovec_init_external(&bounce_qiov, &iov, 1);
172 + ret = drv->bdrv_co_readv(bs, start * BACKUP_BLOCKS_PER_CLUSTER,
173 + BACKUP_BLOCKS_PER_CLUSTER,
176 + DPRINTF("brdv_co_backup_cow bdrv_read C%zd failed\n", start);
180 + ret = job->backup_dump_cb(job->opaque, bs, start, bounce_buffer);
182 + DPRINTF("brdv_co_backup_cow dump_cluster_cb C%zd failed\n", start);
186 + DPRINTF("brdv_co_backup_cow done C%zd\n", start);
190 + if (bounce_buffer) {
191 + qemu_vfree(bounce_buffer);
194 + backup_in_progress_count--;
199 +static int coroutine_fn backup_before_read(BlockDriverState *bs,
200 + int64_t sector_num,
201 + int nb_sectors, QEMUIOVector *qiov)
203 + return backup_do_cow(bs, sector_num, nb_sectors);
206 +static int coroutine_fn backup_before_write(BlockDriverState *bs,
207 + int64_t sector_num,
208 + int nb_sectors, QEMUIOVector *qiov)
210 + return backup_do_cow(bs, sector_num, nb_sectors);
214 +static BlockJobType backup_job_type = {
215 + .instance_size = sizeof(BackupBlockJob),
216 + .before_read = backup_before_read,
217 + .before_write = backup_before_write,
218 + .job_type = "backup",
221 +static void coroutine_fn backup_run(void *opaque)
223 + BackupBlockJob *job = opaque;
224 + BlockDriverState *bs = job->common.bs;
227 + int64_t start, end;
230 + end = (bs->total_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
231 + BACKUP_BLOCKS_PER_CLUSTER;
233 + DPRINTF("backup_run start %s %zd %zd\n", bdrv_get_device_name(bs),
238 + for (; start < end; start++) {
239 + if (block_job_is_cancelled(&job->common)) {
244 + if (backup_get_bitmap(job, start)) {
245 + continue; /* already copied */
248 + /* we need to yield so that qemu_aio_flush() returns.
249 + * (without, VM does not reboot)
250 + * todo: can we avoid that?
251 + * Note: use 1000 instead of 0 (0 priorize this task too much)
253 + block_job_sleep_ns(&job->common, rt_clock, 1000);
254 + if (block_job_is_cancelled(&job->common)) {
258 + DPRINTF("backup_run loop C%zd\n", start);
261 + * This triggers a cluster copy
262 + * Note: avoid direct call to brdv_co_backup_cow, because
263 + * this does not call tracked_request_begin()
265 + ret = bdrv_co_backup(bs, start*BACKUP_BLOCKS_PER_CLUSTER, 1);
269 + /* Publish progress */
270 + job->common.offset += BACKUP_CLUSTER_SIZE;
273 + while (backup_in_progress_count > 0) {
274 + DPRINTF("backup_run backup_in_progress_count != 0 (%d)",
275 + backup_in_progress_count);
276 + co_sleep_ns(rt_clock, 10000);
279 + DPRINTF("backup_run complete %d\n", ret);
280 + block_job_completed(&job->common, ret);
283 +static void backup_job_cleanup_cb(void *opaque, int ret)
285 + BlockDriverState *bs = opaque;
287 + BackupBlockJob *job = (BackupBlockJob *)bs->job;
290 + DPRINTF("backup_job_cleanup_cb start %d\n", ret);
292 + job->backup_complete_cb(job->opaque, ret);
294 + DPRINTF("backup_job_cleanup_cb end\n");
296 + g_free(job->bitmap);
300 +backup_job_start(BlockDriverState *bs)
304 + assert(bs->job->co == NULL);
306 + bs->job->co = qemu_coroutine_create(backup_run);
307 + qemu_coroutine_enter(bs->job->co, bs->job);
311 +backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
312 + BlockDriverCompletionFunc *backup_complete_cb,
316 + assert(backup_dump_cb);
317 + assert(backup_complete_cb);
320 + DPRINTF("bdrv_backup_init failed - running job on %s\n",
321 + bdrv_get_device_name(bs));
325 + int64_t bitmap_size;
326 + const char *devname = bdrv_get_device_name(bs);
328 + if (!devname || !devname[0]) {
332 + DPRINTF("bdrv_backup_init %s\n", bdrv_get_device_name(bs));
335 + BackupBlockJob *job = block_job_create(&backup_job_type, bs, 0,
336 + backup_job_cleanup_cb, bs, &errp);
338 + job->common.cluster_size = BACKUP_CLUSTER_SIZE;
340 + bitmap_size = bs->total_sectors +
341 + BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG - 1;
342 + bitmap_size /= BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG;
344 + job->backup_dump_cb = backup_dump_cb;
345 + job->backup_complete_cb = backup_complete_cb;
346 + job->opaque = opaque;
347 + job->bitmap_size = bitmap_size;
348 + job->bitmap = g_new0(unsigned long, bitmap_size);
350 + job->common.len = bs->total_sectors*BDRV_SECTOR_SIZE;
354 diff --git a/backup.h b/backup.h
356 index 0000000..87b9942
361 + * QEMU backup related definitions
363 + * Copyright (C) Proxmox Server Solutions
366 + * Dietmar Maurer (dietmar@proxmox.com)
368 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
369 + * See the COPYING file in the top-level directory.
373 +#ifndef QEMU_BACKUP_H
374 +#define QEMU_BACKUP_H
376 +#include <uuid/uuid.h>
378 +#define BACKUP_CLUSTER_BITS 16
379 +#define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
380 +#define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
382 +typedef int BackupDumpFunc(void *opaque, BlockDriverState *bs,
383 + int64_t cluster_num, unsigned char *buf);
385 +void backup_job_start(BlockDriverState *bs);
387 +int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
388 + BlockDriverCompletionFunc *backup_complete_cb,
391 +#endif /* QEMU_BACKUP_H */
392 diff --git a/block.c b/block.c
393 index c05875f..4de7fbd 100644
398 BDRV_REQ_COPY_ON_READ = 0x1,
399 BDRV_REQ_ZERO_WRITE = 0x2,
400 + BDRV_REQ_BACKUP_ONLY = 0x4,
403 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
404 @@ -1542,7 +1543,7 @@ int bdrv_commit(BlockDriverState *bs)
410 if (!bs->backing_hd) {
413 @@ -1679,6 +1680,22 @@ static void round_to_clusters(BlockDriverState *bs,
418 + * Round a region to job cluster boundaries
420 +static void round_to_job_clusters(BlockDriverState *bs,
421 + int64_t sector_num, int nb_sectors,
422 + int job_cluster_size,
423 + int64_t *cluster_sector_num,
424 + int *cluster_nb_sectors)
426 + int64_t c = job_cluster_size/BDRV_SECTOR_SIZE;
428 + *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
429 + *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
433 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
434 int64_t sector_num, int nb_sectors) {
436 @@ -1693,7 +1710,9 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
439 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
440 - int64_t sector_num, int nb_sectors)
441 + int64_t sector_num,
443 + int job_cluster_size)
445 BdrvTrackedRequest *req;
446 int64_t cluster_sector_num;
447 @@ -1709,6 +1728,11 @@ static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
448 round_to_clusters(bs, sector_num, nb_sectors,
449 &cluster_sector_num, &cluster_nb_sectors);
451 + if (job_cluster_size) {
452 + round_to_job_clusters(bs, sector_num, nb_sectors, job_cluster_size,
453 + &cluster_sector_num, &cluster_nb_sectors);
458 QLIST_FOREACH(req, &bs->tracked_requests, list) {
459 @@ -2278,12 +2302,24 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
460 bs->copy_on_read_in_flight++;
463 - if (bs->copy_on_read_in_flight) {
464 - wait_for_overlapping_requests(bs, sector_num, nb_sectors);
465 + int job_cluster_size = bs->job && bs->job->cluster_size ?
466 + bs->job->cluster_size : 0;
468 + if (bs->copy_on_read_in_flight || job_cluster_size) {
469 + wait_for_overlapping_requests(bs, sector_num, nb_sectors,
473 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
475 + if (bs->job && bs->job->job_type->before_read) {
476 + ret = bs->job->job_type->before_read(bs, sector_num, nb_sectors, qiov);
477 + if ((ret < 0) || (flags & BDRV_REQ_BACKUP_ONLY)) {
478 + /* Note: We do not return any data to the caller */
483 if (flags & BDRV_REQ_COPY_ON_READ) {
486 @@ -2327,6 +2363,17 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
487 BDRV_REQ_COPY_ON_READ);
490 +int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
491 + int64_t sector_num, int nb_sectors)
497 + return bdrv_co_do_readv(bs, sector_num, nb_sectors, NULL,
498 + BDRV_REQ_BACKUP_ONLY);
501 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
502 int64_t sector_num, int nb_sectors)
504 @@ -2384,12 +2431,23 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
505 bdrv_io_limits_intercept(bs, true, nb_sectors);
508 - if (bs->copy_on_read_in_flight) {
509 - wait_for_overlapping_requests(bs, sector_num, nb_sectors);
510 + int job_cluster_size = bs->job && bs->job->cluster_size ?
511 + bs->job->cluster_size : 0;
513 + if (bs->copy_on_read_in_flight || job_cluster_size) {
514 + wait_for_overlapping_requests(bs, sector_num, nb_sectors,
518 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
520 + if (bs->job && bs->job->job_type->before_write) {
521 + ret = bs->job->job_type->before_write(bs, sector_num, nb_sectors, qiov);
527 if (flags & BDRV_REQ_ZERO_WRITE) {
528 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
530 @@ -2408,6 +2466,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
531 bs->wr_highest_sector = sector_num + nb_sectors - 1;
535 tracked_request_end(&req);
538 diff --git a/block.h b/block.h
539 index 722c620..94e5903 100644
542 @@ -172,6 +172,8 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
543 int nb_sectors, QEMUIOVector *qiov);
544 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
545 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
546 +int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
547 + int64_t sector_num, int nb_sectors);
548 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
549 int nb_sectors, QEMUIOVector *qiov);
551 diff --git a/blockjob.h b/blockjob.h
552 index 3792b73..6621173 100644
555 @@ -50,6 +50,13 @@ typedef struct BlockJobType {
558 void (*complete)(BlockJob *job, Error **errp);
560 + /** tracked requests */
561 + int coroutine_fn (*before_read)(BlockDriverState *bs, int64_t sector_num,
562 + int nb_sectors, QEMUIOVector *qiov);
563 + int coroutine_fn (*before_write)(BlockDriverState *bs, int64_t sector_num,
564 + int nb_sectors, QEMUIOVector *qiov);
569 @@ -103,6 +110,9 @@ struct BlockJob {
570 /** Speed that was set with @block_job_set_speed. */
573 + /** tracked requests */
576 /** The completion function that will be called when the job completes. */
577 BlockDriverCompletionFunc *cb;