]> git.proxmox.com Git - pve-qemu-kvm.git/blob - debian/patches/0002-add-basic-backup-support-to-block-driver.patch
053f60a55fab3635b7285cdff245d3769ecc9aa7
[pve-qemu-kvm.git] / debian / patches / 0002-add-basic-backup-support-to-block-driver.patch
1 From 4374768a3a4d92f0ac0a77688cb7f65ff108ef83 Mon Sep 17 00:00:00 2001
2 From: Dietmar Maurer <dietmar@proxmox.com>
3 Date: Tue, 13 Nov 2012 10:03:52 +0100
4 Subject: [PATCH v3 2/6] add basic backup support to block driver
5
6 Function backup_job_create() creates a block job to backup a block device.
7 The coroutine is started with backup_job_start().
8
9 We call backup_do_cow() for each write during backup. That function
10 reads the original data and pass it to backup_dump_cb().
11
12 The tracked_request infrastructure is used to serialize access.
13
14 Currently backup cluster size is hardcoded to 65536 bytes.
15
16 Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
17 ---
18 Makefile.objs | 1 +
19 backup.c | 334 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
20 backup.h | 32 ++++++
21 block.c | 71 +++++++++++-
22 block.h | 2 +
23 blockjob.h | 10 ++
24 6 files changed, 444 insertions(+), 6 deletions(-)
25 create mode 100644 backup.c
26 create mode 100644 backup.h
27
28 diff --git a/Makefile.objs b/Makefile.objs
29 index 3c7abca..cb46be5 100644
30 --- a/Makefile.objs
31 +++ b/Makefile.objs
32 @@ -48,6 +48,7 @@ coroutine-obj-$(CONFIG_WIN32) += coroutine-win32.o
33 block-obj-y = iov.o cache-utils.o qemu-option.o module.o async.o
34 block-obj-y += nbd.o block.o blockjob.o aes.o qemu-config.o
35 block-obj-y += thread-pool.o qemu-progress.o qemu-sockets.o uri.o notify.o
36 +block-obj-y += backup.o
37 block-obj-y += $(coroutine-obj-y) $(qobject-obj-y) $(version-obj-y)
38 block-obj-$(CONFIG_POSIX) += event_notifier-posix.o aio-posix.o
39 block-obj-$(CONFIG_WIN32) += event_notifier-win32.o aio-win32.o
40 diff --git a/backup.c b/backup.c
41 new file mode 100644
42 index 0000000..af511c7
43 --- /dev/null
44 +++ b/backup.c
45 @@ -0,0 +1,334 @@
46 +/*
47 + * QEMU backup
48 + *
49 + * Copyright (C) 2012 Proxmox Server Solutions
50 + *
51 + * Authors:
52 + * Dietmar Maurer (dietmar@proxmox.com)
53 + *
54 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
55 + * See the COPYING file in the top-level directory.
56 + *
57 + */
58 +
59 +#include <stdio.h>
60 +#include <errno.h>
61 +#include <unistd.h>
62 +
63 +#include "block.h"
64 +#include "block_int.h"
65 +#include "blockjob.h"
66 +#include "qemu/ratelimit.h"
67 +#include "backup.h"
68 +
69 +#define DEBUG_BACKUP 0
70 +
71 +#define DPRINTF(fmt, ...) \
72 + do { if (DEBUG_BACKUP) { printf("backup: " fmt, ## __VA_ARGS__); } } \
73 + while (0)
74 +
75 +
76 +#define BITS_PER_LONG (sizeof(unsigned long) * 8)
77 +#define SLICE_TIME 100000000ULL /* ns */
78 +
79 +typedef struct BackupBlockJob {
80 + BlockJob common;
81 + RateLimit limit;
82 + uint64_t sectors_read;
83 + unsigned long *bitmap;
84 + int bitmap_size;
85 + BackupDumpFunc *backup_dump_cb;
86 + BlockDriverCompletionFunc *backup_complete_cb;
87 + void *opaque;
88 +} BackupBlockJob;
89 +
90 +static int backup_get_bitmap(BackupBlockJob *job, int64_t cluster_num)
91 +{
92 + assert(job);
93 + assert(job->bitmap);
94 +
95 + unsigned long val, idx, bit;
96 +
97 + idx = cluster_num / BITS_PER_LONG;
98 +
99 + assert(job->bitmap_size > idx);
100 +
101 + bit = cluster_num % BITS_PER_LONG;
102 + val = job->bitmap[idx];
103 +
104 + return !!(val & (1UL << bit));
105 +}
106 +
107 +static void backup_set_bitmap(BackupBlockJob *job, int64_t cluster_num,
108 + int dirty)
109 +{
110 + assert(job);
111 + assert(job->bitmap);
112 +
113 + unsigned long val, idx, bit;
114 +
115 + idx = cluster_num / BITS_PER_LONG;
116 +
117 + assert(job->bitmap_size > idx);
118 +
119 + bit = cluster_num % BITS_PER_LONG;
120 + val = job->bitmap[idx];
121 + if (dirty) {
122 + if (!(val & (1UL << bit))) {
123 + val |= 1UL << bit;
124 + }
125 + } else {
126 + if (val & (1UL << bit)) {
127 + val &= ~(1UL << bit);
128 + }
129 + }
130 + job->bitmap[idx] = val;
131 +}
132 +
133 +static int backup_in_progress_count;
134 +
135 +static int coroutine_fn backup_do_cow(BlockDriverState *bs,
136 + int64_t sector_num, int nb_sectors)
137 +{
138 + assert(bs);
139 + BackupBlockJob *job = (BackupBlockJob *)bs->job;
140 + assert(job);
141 +
142 + BlockDriver *drv = bs->drv;
143 + struct iovec iov;
144 + QEMUIOVector bounce_qiov;
145 + void *bounce_buffer = NULL;
146 + int ret = 0;
147 +
148 + backup_in_progress_count++;
149 +
150 + int64_t start, end;
151 +
152 + start = sector_num / BACKUP_BLOCKS_PER_CLUSTER;
153 + end = (sector_num + nb_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
154 + BACKUP_BLOCKS_PER_CLUSTER;
155 +
156 + DPRINTF("brdv_co_backup_cow enter %s C%zd %zd %d\n",
157 + bdrv_get_device_name(bs), start, sector_num, nb_sectors);
158 +
159 + for (; start < end; start++) {
160 + if (backup_get_bitmap(job, start)) {
161 + DPRINTF("brdv_co_backup_cow skip C%zd\n", start);
162 + continue; /* already copied */
163 + }
164 +
165 + /* immediately set bitmap (avoid coroutine race) */
166 + backup_set_bitmap(job, start, 1);
167 +
168 + DPRINTF("brdv_co_backup_cow C%zd\n", start);
169 +
170 + if (!bounce_buffer) {
171 + iov.iov_len = BACKUP_CLUSTER_SIZE;
172 + iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
173 + qemu_iovec_init_external(&bounce_qiov, &iov, 1);
174 + }
175 +
176 + ret = drv->bdrv_co_readv(bs, start * BACKUP_BLOCKS_PER_CLUSTER,
177 + BACKUP_BLOCKS_PER_CLUSTER,
178 + &bounce_qiov);
179 +
180 + job->sectors_read += BACKUP_BLOCKS_PER_CLUSTER;
181 +
182 + if (ret < 0) {
183 + DPRINTF("brdv_co_backup_cow bdrv_read C%zd failed\n", start);
184 + goto out;
185 + }
186 +
187 + ret = job->backup_dump_cb(job->opaque, bs, start, bounce_buffer);
188 + if (ret < 0) {
189 + DPRINTF("brdv_co_backup_cow dump_cluster_cb C%zd failed\n", start);
190 + goto out;
191 + }
192 +
193 + DPRINTF("brdv_co_backup_cow done C%zd\n", start);
194 + }
195 +
196 +out:
197 + if (bounce_buffer) {
198 + qemu_vfree(bounce_buffer);
199 + }
200 +
201 + backup_in_progress_count--;
202 +
203 + return ret;
204 +}
205 +
206 +static int coroutine_fn backup_before_read(BlockDriverState *bs,
207 + int64_t sector_num,
208 + int nb_sectors, QEMUIOVector *qiov)
209 +{
210 + return backup_do_cow(bs, sector_num, nb_sectors);
211 +}
212 +
213 +static int coroutine_fn backup_before_write(BlockDriverState *bs,
214 + int64_t sector_num,
215 + int nb_sectors, QEMUIOVector *qiov)
216 +{
217 + return backup_do_cow(bs, sector_num, nb_sectors);
218 +}
219 +
220 +static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
221 +{
222 + BackupBlockJob *s = container_of(job, BackupBlockJob, common);
223 +
224 + if (speed < 0) {
225 + error_set(errp, QERR_INVALID_PARAMETER, "speed");
226 + return;
227 + }
228 + ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
229 +}
230 +
231 +static BlockJobType backup_job_type = {
232 + .instance_size = sizeof(BackupBlockJob),
233 + .before_read = backup_before_read,
234 + .before_write = backup_before_write,
235 + .job_type = "backup",
236 + .set_speed = backup_set_speed,
237 +};
238 +
239 +static void coroutine_fn backup_run(void *opaque)
240 +{
241 + BackupBlockJob *job = opaque;
242 + BlockDriverState *bs = job->common.bs;
243 + assert(bs);
244 +
245 + int64_t start, end;
246 +
247 + start = 0;
248 + end = (bs->total_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
249 + BACKUP_BLOCKS_PER_CLUSTER;
250 +
251 + DPRINTF("backup_run start %s %zd %zd\n", bdrv_get_device_name(bs),
252 + start, end);
253 +
254 + int ret = 0;
255 +
256 + for (; start < end; start++) {
257 + if (block_job_is_cancelled(&job->common)) {
258 + ret = -1;
259 + break;
260 + }
261 +
262 + /* we need to yield so that qemu_aio_flush() returns.
263 + * (without, VM does not reboot)
264 + * Note: use 1000 instead of 0 (0 priorize this task too much)
265 + */
266 + if (job->common.speed) {
267 + uint64_t delay_ns = ratelimit_calculate_delay(
268 + &job->limit, job->sectors_read);
269 + job->sectors_read = 0;
270 + block_job_sleep_ns(&job->common, rt_clock, delay_ns);
271 + } else {
272 + block_job_sleep_ns(&job->common, rt_clock, 1000);
273 + }
274 +
275 + if (block_job_is_cancelled(&job->common)) {
276 + ret = -1;
277 + break;
278 + }
279 +
280 + if (backup_get_bitmap(job, start)) {
281 + continue; /* already copied */
282 + }
283 +
284 + DPRINTF("backup_run loop C%zd\n", start);
285 +
286 + /**
287 + * This triggers a cluster copy
288 + * Note: avoid direct call to brdv_co_backup_cow, because
289 + * this does not call tracked_request_begin()
290 + */
291 + ret = bdrv_co_backup(bs, start*BACKUP_BLOCKS_PER_CLUSTER, 1);
292 + if (ret < 0) {
293 + break;
294 + }
295 + /* Publish progress */
296 + job->common.offset += BACKUP_CLUSTER_SIZE;
297 + }
298 +
299 + while (backup_in_progress_count > 0) {
300 + DPRINTF("backup_run backup_in_progress_count != 0 (%d)",
301 + backup_in_progress_count);
302 + co_sleep_ns(rt_clock, 10000);
303 + }
304 +
305 + DPRINTF("backup_run complete %d\n", ret);
306 + block_job_completed(&job->common, ret);
307 +}
308 +
309 +static void backup_job_cleanup_cb(void *opaque, int ret)
310 +{
311 + BlockDriverState *bs = opaque;
312 + assert(bs);
313 + BackupBlockJob *job = (BackupBlockJob *)bs->job;
314 + assert(job);
315 +
316 + DPRINTF("backup_job_cleanup_cb start %d\n", ret);
317 +
318 + job->backup_complete_cb(job->opaque, ret);
319 +
320 + DPRINTF("backup_job_cleanup_cb end\n");
321 +
322 + g_free(job->bitmap);
323 +}
324 +
325 +void
326 +backup_job_start(BlockDriverState *bs)
327 +{
328 + assert(bs);
329 + assert(bs->job);
330 + assert(bs->job->co == NULL);
331 +
332 + bs->job->co = qemu_coroutine_create(backup_run);
333 + qemu_coroutine_enter(bs->job->co, bs->job);
334 +}
335 +
336 +int
337 +backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
338 + BlockDriverCompletionFunc *backup_complete_cb,
339 + void *opaque, int64_t speed)
340 +{
341 + assert(bs);
342 + assert(backup_dump_cb);
343 + assert(backup_complete_cb);
344 +
345 + if (bs->job) {
346 + DPRINTF("bdrv_backup_init failed - running job on %s\n",
347 + bdrv_get_device_name(bs));
348 + return -1;
349 + }
350 +
351 + int64_t bitmap_size;
352 + const char *devname = bdrv_get_device_name(bs);
353 +
354 + if (!devname || !devname[0]) {
355 + return -1;
356 + }
357 +
358 + DPRINTF("bdrv_backup_init %s\n", bdrv_get_device_name(bs));
359 +
360 + Error *errp;
361 + BackupBlockJob *job = block_job_create(&backup_job_type, bs, speed,
362 + backup_job_cleanup_cb, bs, &errp);
363 +
364 + job->common.cluster_size = BACKUP_CLUSTER_SIZE;
365 +
366 + bitmap_size = bs->total_sectors +
367 + BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG - 1;
368 + bitmap_size /= BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG;
369 +
370 + job->backup_dump_cb = backup_dump_cb;
371 + job->backup_complete_cb = backup_complete_cb;
372 + job->opaque = opaque;
373 + job->bitmap_size = bitmap_size;
374 + job->bitmap = g_new0(unsigned long, bitmap_size);
375 +
376 + job->common.len = bs->total_sectors*BDRV_SECTOR_SIZE;
377 +
378 + return 0;
379 +}
380 diff --git a/backup.h b/backup.h
381 new file mode 100644
382 index 0000000..a5f85e6
383 --- /dev/null
384 +++ b/backup.h
385 @@ -0,0 +1,32 @@
386 +/*
387 + * QEMU backup related definitions
388 + *
389 + * Copyright (C) Proxmox Server Solutions
390 + *
391 + * Authors:
392 + * Dietmar Maurer (dietmar@proxmox.com)
393 + *
394 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
395 + * See the COPYING file in the top-level directory.
396 + *
397 + */
398 +
399 +#ifndef QEMU_BACKUP_H
400 +#define QEMU_BACKUP_H
401 +
402 +#include <uuid/uuid.h>
403 +
404 +#define BACKUP_CLUSTER_BITS 16
405 +#define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
406 +#define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
407 +
408 +typedef int BackupDumpFunc(void *opaque, BlockDriverState *bs,
409 + int64_t cluster_num, unsigned char *buf);
410 +
411 +void backup_job_start(BlockDriverState *bs);
412 +
413 +int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
414 + BlockDriverCompletionFunc *backup_complete_cb,
415 + void *opaque, int64_t speed);
416 +
417 +#endif /* QEMU_BACKUP_H */
418 diff --git a/block.c b/block.c
419 index c05875f..4de7fbd 100644
420 --- a/block.c
421 +++ b/block.c
422 @@ -54,6 +54,7 @@
423 typedef enum {
424 BDRV_REQ_COPY_ON_READ = 0x1,
425 BDRV_REQ_ZERO_WRITE = 0x2,
426 + BDRV_REQ_BACKUP_ONLY = 0x4,
427 } BdrvRequestFlags;
428
429 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
430 @@ -1542,7 +1543,7 @@ int bdrv_commit(BlockDriverState *bs)
431
432 if (!drv)
433 return -ENOMEDIUM;
434 -
435 +
436 if (!bs->backing_hd) {
437 return -ENOTSUP;
438 }
439 @@ -1679,6 +1680,22 @@ static void round_to_clusters(BlockDriverState *bs,
440 }
441 }
442
443 +/**
444 + * Round a region to job cluster boundaries
445 + */
446 +static void round_to_job_clusters(BlockDriverState *bs,
447 + int64_t sector_num, int nb_sectors,
448 + int job_cluster_size,
449 + int64_t *cluster_sector_num,
450 + int *cluster_nb_sectors)
451 +{
452 + int64_t c = job_cluster_size/BDRV_SECTOR_SIZE;
453 +
454 + *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
455 + *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
456 + nb_sectors, c);
457 +}
458 +
459 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
460 int64_t sector_num, int nb_sectors) {
461 /* aaaa bbbb */
462 @@ -1693,7 +1710,9 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
463 }
464
465 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
466 - int64_t sector_num, int nb_sectors)
467 + int64_t sector_num,
468 + int nb_sectors,
469 + int job_cluster_size)
470 {
471 BdrvTrackedRequest *req;
472 int64_t cluster_sector_num;
473 @@ -1709,6 +1728,11 @@ static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
474 round_to_clusters(bs, sector_num, nb_sectors,
475 &cluster_sector_num, &cluster_nb_sectors);
476
477 + if (job_cluster_size) {
478 + round_to_job_clusters(bs, sector_num, nb_sectors, job_cluster_size,
479 + &cluster_sector_num, &cluster_nb_sectors);
480 + }
481 +
482 do {
483 retry = false;
484 QLIST_FOREACH(req, &bs->tracked_requests, list) {
485 @@ -2278,12 +2302,24 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
486 bs->copy_on_read_in_flight++;
487 }
488
489 - if (bs->copy_on_read_in_flight) {
490 - wait_for_overlapping_requests(bs, sector_num, nb_sectors);
491 + int job_cluster_size = bs->job && bs->job->cluster_size ?
492 + bs->job->cluster_size : 0;
493 +
494 + if (bs->copy_on_read_in_flight || job_cluster_size) {
495 + wait_for_overlapping_requests(bs, sector_num, nb_sectors,
496 + job_cluster_size);
497 }
498
499 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
500
501 + if (bs->job && bs->job->job_type->before_read) {
502 + ret = bs->job->job_type->before_read(bs, sector_num, nb_sectors, qiov);
503 + if ((ret < 0) || (flags & BDRV_REQ_BACKUP_ONLY)) {
504 + /* Note: We do not return any data to the caller */
505 + goto out;
506 + }
507 + }
508 +
509 if (flags & BDRV_REQ_COPY_ON_READ) {
510 int pnum;
511
512 @@ -2327,6 +2363,17 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
513 BDRV_REQ_COPY_ON_READ);
514 }
515
516 +int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
517 + int64_t sector_num, int nb_sectors)
518 +{
519 + if (!bs->job) {
520 + return -ENOTSUP;
521 + }
522 +
523 + return bdrv_co_do_readv(bs, sector_num, nb_sectors, NULL,
524 + BDRV_REQ_BACKUP_ONLY);
525 +}
526 +
527 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
528 int64_t sector_num, int nb_sectors)
529 {
530 @@ -2384,12 +2431,23 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
531 bdrv_io_limits_intercept(bs, true, nb_sectors);
532 }
533
534 - if (bs->copy_on_read_in_flight) {
535 - wait_for_overlapping_requests(bs, sector_num, nb_sectors);
536 + int job_cluster_size = bs->job && bs->job->cluster_size ?
537 + bs->job->cluster_size : 0;
538 +
539 + if (bs->copy_on_read_in_flight || job_cluster_size) {
540 + wait_for_overlapping_requests(bs, sector_num, nb_sectors,
541 + job_cluster_size);
542 }
543
544 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
545
546 + if (bs->job && bs->job->job_type->before_write) {
547 + ret = bs->job->job_type->before_write(bs, sector_num, nb_sectors, qiov);
548 + if (ret < 0) {
549 + goto out;
550 + }
551 + }
552 +
553 if (flags & BDRV_REQ_ZERO_WRITE) {
554 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
555 } else {
556 @@ -2408,6 +2466,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
557 bs->wr_highest_sector = sector_num + nb_sectors - 1;
558 }
559
560 +out:
561 tracked_request_end(&req);
562
563 return ret;
564 diff --git a/block.h b/block.h
565 index 722c620..94e5903 100644
566 --- a/block.h
567 +++ b/block.h
568 @@ -172,6 +172,8 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
569 int nb_sectors, QEMUIOVector *qiov);
570 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
571 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
572 +int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
573 + int64_t sector_num, int nb_sectors);
574 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
575 int nb_sectors, QEMUIOVector *qiov);
576 /*
577 diff --git a/blockjob.h b/blockjob.h
578 index 3792b73..6621173 100644
579 --- a/blockjob.h
580 +++ b/blockjob.h
581 @@ -50,6 +50,13 @@ typedef struct BlockJobType {
582 * manually.
583 */
584 void (*complete)(BlockJob *job, Error **errp);
585 +
586 + /** tracked requests */
587 + int coroutine_fn (*before_read)(BlockDriverState *bs, int64_t sector_num,
588 + int nb_sectors, QEMUIOVector *qiov);
589 + int coroutine_fn (*before_write)(BlockDriverState *bs, int64_t sector_num,
590 + int nb_sectors, QEMUIOVector *qiov);
591 +
592 } BlockJobType;
593
594 /**
595 @@ -103,6 +110,9 @@ struct BlockJob {
596 /** Speed that was set with @block_job_set_speed. */
597 int64_t speed;
598
599 + /** tracked requests */
600 + int cluster_size;
601 +
602 /** The completion function that will be called when the job completes. */
603 BlockDriverCompletionFunc *cb;
604
605 --
606 1.7.2.5
607