]> git.proxmox.com Git - pve-qemu-kvm.git/blame - debian/patches/0002-add-basic-backup-support-to-block-driver.patch
Two more fixes
[pve-qemu-kvm.git] / debian / patches / 0002-add-basic-backup-support-to-block-driver.patch
CommitLineData
884c5e9f 1From 1d0c6dfc9616c0dd17986cab6744c10cb748de1e Mon Sep 17 00:00:00 2001
5ad5891c
DM
2From: Dietmar Maurer <dietmar@proxmox.com>
3Date: Tue, 13 Nov 2012 10:03:52 +0100
884c5e9f 4Subject: [PATCH v5 2/6] add basic backup support to block driver
5ad5891c 5
309874bd
DM
6Function backup_job_create() creates a block job to backup a block device.
7The coroutine is started with backup_job_start().
5ad5891c
DM
8
9We call backup_do_cow() for each write during backup. That function
10reads the original data and pass it to backup_dump_cb().
11
12The tracked_request infrastructure is used to serialize access.
13
14Currently backup cluster size is hardcoded to 65536 bytes.
15
16Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
17---
92bf040c 18 Makefile.objs | 1 +
884c5e9f
DM
19 backup.c | 355 ++++++++++++++++++++++++++++++++++++++++++++++
20 backup.h | 30 ++++
92bf040c
DM
21 block.c | 71 +++++++++-
22 include/block/block.h | 2 +
23 include/block/blockjob.h | 10 ++
884c5e9f 24 6 files changed, 463 insertions(+), 6 deletions(-)
5ad5891c
DM
25 create mode 100644 backup.c
26 create mode 100644 backup.h
27
28diff --git a/Makefile.objs b/Makefile.objs
89af8a77 29index a68cdac..df64f70 100644
5ad5891c
DM
30--- a/Makefile.objs
31+++ b/Makefile.objs
92bf040c
DM
32@@ -13,6 +13,7 @@ block-obj-$(CONFIG_POSIX) += aio-posix.o
33 block-obj-$(CONFIG_WIN32) += aio-win32.o
34 block-obj-y += block/
35 block-obj-y += qapi-types.o qapi-visit.o
5ad5891c 36+block-obj-y += backup.o
92bf040c
DM
37
38 block-obj-y += qemu-coroutine.o qemu-coroutine-lock.o qemu-coroutine-io.o
39 block-obj-y += qemu-coroutine-sleep.o
5ad5891c
DM
40diff --git a/backup.c b/backup.c
41new file mode 100644
884c5e9f 42index 0000000..8955e1a
5ad5891c
DM
43--- /dev/null
44+++ b/backup.c
884c5e9f 45@@ -0,0 +1,355 @@
5ad5891c
DM
46+/*
47+ * QEMU backup
48+ *
89af8a77 49+ * Copyright (C) 2013 Proxmox Server Solutions
5ad5891c
DM
50+ *
51+ * Authors:
52+ * Dietmar Maurer (dietmar@proxmox.com)
53+ *
54+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
55+ * See the COPYING file in the top-level directory.
56+ *
57+ */
58+
59+#include <stdio.h>
60+#include <errno.h>
61+#include <unistd.h>
62+
92bf040c
DM
63+#include "block/block.h"
64+#include "block/block_int.h"
65+#include "block/blockjob.h"
55827521 66+#include "qemu/ratelimit.h"
5ad5891c
DM
67+#include "backup.h"
68+
69+#define DEBUG_BACKUP 0
70+
884c5e9f
DM
71+#define USE_ALLOCATION_CHECK 0
72+
5ad5891c
DM
73+#define DPRINTF(fmt, ...) \
74+ do { if (DEBUG_BACKUP) { printf("backup: " fmt, ## __VA_ARGS__); } } \
75+ while (0)
76+
77+
55827521 78+#define SLICE_TIME 100000000ULL /* ns */
5ad5891c
DM
79+
80+typedef struct BackupBlockJob {
81+ BlockJob common;
55827521 82+ RateLimit limit;
884c5e9f 83+ CoRwlock rwlock;
55827521 84+ uint64_t sectors_read;
5ad5891c
DM
85+ unsigned long *bitmap;
86+ int bitmap_size;
87+ BackupDumpFunc *backup_dump_cb;
88+ BlockDriverCompletionFunc *backup_complete_cb;
89+ void *opaque;
90+} BackupBlockJob;
91+
884c5e9f 92+static bool backup_get_bitmap(BackupBlockJob *job, int64_t cluster_num)
5ad5891c 93+{
5ad5891c
DM
94+ assert(job);
95+ assert(job->bitmap);
96+
97+ unsigned long val, idx, bit;
98+
99+ idx = cluster_num / BITS_PER_LONG;
100+
101+ assert(job->bitmap_size > idx);
102+
103+ bit = cluster_num % BITS_PER_LONG;
104+ val = job->bitmap[idx];
105+
106+ return !!(val & (1UL << bit));
107+}
108+
309874bd 109+static void backup_set_bitmap(BackupBlockJob *job, int64_t cluster_num,
884c5e9f 110+ bool dirty)
5ad5891c 111+{
5ad5891c
DM
112+ assert(job);
113+ assert(job->bitmap);
114+
115+ unsigned long val, idx, bit;
116+
117+ idx = cluster_num / BITS_PER_LONG;
118+
119+ assert(job->bitmap_size > idx);
120+
121+ bit = cluster_num % BITS_PER_LONG;
122+ val = job->bitmap[idx];
123+ if (dirty) {
884c5e9f 124+ val |= 1UL << bit;
5ad5891c 125+ } else {
884c5e9f 126+ val &= ~(1UL << bit);
5ad5891c
DM
127+ }
128+ job->bitmap[idx] = val;
129+}
130+
5ad5891c
DM
131+static int coroutine_fn backup_do_cow(BlockDriverState *bs,
132+ int64_t sector_num, int nb_sectors)
133+{
134+ assert(bs);
135+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
136+ assert(job);
137+
138+ BlockDriver *drv = bs->drv;
139+ struct iovec iov;
140+ QEMUIOVector bounce_qiov;
141+ void *bounce_buffer = NULL;
142+ int ret = 0;
143+
884c5e9f 144+ qemu_co_rwlock_rdlock(&job->rwlock);
5ad5891c
DM
145+
146+ int64_t start, end;
147+
148+ start = sector_num / BACKUP_BLOCKS_PER_CLUSTER;
149+ end = (sector_num + nb_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
150+ BACKUP_BLOCKS_PER_CLUSTER;
151+
884c5e9f 152+ DPRINTF("brdv_co_backup_cow enter %s C%" PRId64 " %" PRId64 " %d\n",
5ad5891c
DM
153+ bdrv_get_device_name(bs), start, sector_num, nb_sectors);
154+
155+ for (; start < end; start++) {
884c5e9f
DM
156+ bool zero = 0;
157+
309874bd 158+ if (backup_get_bitmap(job, start)) {
884c5e9f 159+ DPRINTF("brdv_co_backup_cow skip C%" PRId64 "\n", start);
5ad5891c
DM
160+ continue; /* already copied */
161+ }
162+
163+ /* immediately set bitmap (avoid coroutine race) */
309874bd 164+ backup_set_bitmap(job, start, 1);
5ad5891c 165+
884c5e9f 166+ DPRINTF("brdv_co_backup_cow C%" PRId64 "\n", start);
5ad5891c
DM
167+
168+ if (!bounce_buffer) {
169+ iov.iov_len = BACKUP_CLUSTER_SIZE;
170+ iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
171+ qemu_iovec_init_external(&bounce_qiov, &iov, 1);
172+ }
173+
884c5e9f
DM
174+#if USE_ALLOCATION_CHECK
175+ int n = 0;
176+ ret = bdrv_co_is_allocated_above(bs, NULL,
177+ start * BACKUP_BLOCKS_PER_CLUSTER,
178+ BACKUP_BLOCKS_PER_CLUSTER, &n);
5ad5891c 179+ if (ret < 0) {
884c5e9f
DM
180+ DPRINTF("brdv_co_backup_cow is_allocated C%" PRId64 " failed\n",
181+ start);
5ad5891c
DM
182+ goto out;
183+ }
184+
884c5e9f
DM
185+ zero = (ret == 0) && (n == BACKUP_BLOCKS_PER_CLUSTER);
186+
187+ if (!zero) {
188+#endif
189+ ret = drv->bdrv_co_readv(bs, start * BACKUP_BLOCKS_PER_CLUSTER,
190+ BACKUP_BLOCKS_PER_CLUSTER,
191+ &bounce_qiov);
192+ if (ret < 0) {
193+ DPRINTF("brdv_co_backup_cow bdrv_read C%" PRId64 " failed\n",
194+ start);
195+ goto out;
196+ }
197+#if USE_ALLOCATION_CHECK
198+ }
199+#endif
200+ job->sectors_read += BACKUP_BLOCKS_PER_CLUSTER;
201+
202+ ret = job->backup_dump_cb(job->opaque, bs, start,
203+ zero ? NULL : bounce_buffer);
5ad5891c 204+ if (ret < 0) {
884c5e9f
DM
205+ DPRINTF("brdv_co_backup_cow dump_cluster_cb C%" PRId64 " failed\n",
206+ start);
5ad5891c
DM
207+ goto out;
208+ }
209+
884c5e9f 210+ DPRINTF("brdv_co_backup_cow done C%" PRId64 "\n", start);
5ad5891c
DM
211+ }
212+
213+out:
214+ if (bounce_buffer) {
215+ qemu_vfree(bounce_buffer);
216+ }
217+
884c5e9f 218+ qemu_co_rwlock_unlock(&job->rwlock);
5ad5891c
DM
219+
220+ return ret;
221+}
222+
223+static int coroutine_fn backup_before_read(BlockDriverState *bs,
224+ int64_t sector_num,
225+ int nb_sectors, QEMUIOVector *qiov)
226+{
227+ return backup_do_cow(bs, sector_num, nb_sectors);
228+}
229+
230+static int coroutine_fn backup_before_write(BlockDriverState *bs,
231+ int64_t sector_num,
232+ int nb_sectors, QEMUIOVector *qiov)
233+{
234+ return backup_do_cow(bs, sector_num, nb_sectors);
235+}
236+
55827521
DM
237+static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
238+{
239+ BackupBlockJob *s = container_of(job, BackupBlockJob, common);
89af8a77 240+
55827521
DM
241+ if (speed < 0) {
242+ error_set(errp, QERR_INVALID_PARAMETER, "speed");
243+ return;
244+ }
245+ ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
246+}
5ad5891c
DM
247+
248+static BlockJobType backup_job_type = {
249+ .instance_size = sizeof(BackupBlockJob),
250+ .before_read = backup_before_read,
251+ .before_write = backup_before_write,
55827521
DM
252+ .job_type = "backup",
253+ .set_speed = backup_set_speed,
5ad5891c
DM
254+};
255+
256+static void coroutine_fn backup_run(void *opaque)
257+{
258+ BackupBlockJob *job = opaque;
259+ BlockDriverState *bs = job->common.bs;
260+ assert(bs);
261+
262+ int64_t start, end;
263+
264+ start = 0;
265+ end = (bs->total_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
266+ BACKUP_BLOCKS_PER_CLUSTER;
267+
884c5e9f
DM
268+ DPRINTF("backup_run start %s %" PRId64 " %" PRId64 "\n",
269+ bdrv_get_device_name(bs), start, end);
5ad5891c
DM
270+
271+ int ret = 0;
272+
273+ for (; start < end; start++) {
274+ if (block_job_is_cancelled(&job->common)) {
275+ ret = -1;
276+ break;
277+ }
278+
5ad5891c
DM
279+ /* we need to yield so that qemu_aio_flush() returns.
280+ * (without, VM does not reboot)
884c5e9f 281+ * Note: use 1000 instead of 0 (0 prioritize this task too much)
5ad5891c 282+ */
89af8a77 283+ if (job->common.speed) {
55827521
DM
284+ uint64_t delay_ns = ratelimit_calculate_delay(
285+ &job->limit, job->sectors_read);
286+ job->sectors_read = 0;
287+ block_job_sleep_ns(&job->common, rt_clock, delay_ns);
89af8a77 288+ } else {
55827521 289+ block_job_sleep_ns(&job->common, rt_clock, 1000);
89af8a77
DM
290+ }
291+
5ad5891c
DM
292+ if (block_job_is_cancelled(&job->common)) {
293+ ret = -1;
294+ break;
295+ }
55827521
DM
296+
297+ if (backup_get_bitmap(job, start)) {
298+ continue; /* already copied */
299+ }
300+
884c5e9f 301+ DPRINTF("backup_run loop C%" PRId64 "\n", start);
5ad5891c
DM
302+
303+ /**
304+ * This triggers a cluster copy
305+ * Note: avoid direct call to brdv_co_backup_cow, because
306+ * this does not call tracked_request_begin()
307+ */
308+ ret = bdrv_co_backup(bs, start*BACKUP_BLOCKS_PER_CLUSTER, 1);
309+ if (ret < 0) {
310+ break;
311+ }
312+ /* Publish progress */
313+ job->common.offset += BACKUP_CLUSTER_SIZE;
314+ }
315+
884c5e9f
DM
316+ /* wait until pending backup_do_cow()calls have completed */
317+ qemu_co_rwlock_wrlock(&job->rwlock);
318+ qemu_co_rwlock_unlock(&job->rwlock);
5ad5891c
DM
319+
320+ DPRINTF("backup_run complete %d\n", ret);
321+ block_job_completed(&job->common, ret);
322+}
323+
324+static void backup_job_cleanup_cb(void *opaque, int ret)
325+{
326+ BlockDriverState *bs = opaque;
327+ assert(bs);
328+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
329+ assert(job);
330+
331+ DPRINTF("backup_job_cleanup_cb start %d\n", ret);
332+
333+ job->backup_complete_cb(job->opaque, ret);
334+
335+ DPRINTF("backup_job_cleanup_cb end\n");
336+
337+ g_free(job->bitmap);
338+}
339+
309874bd 340+void
359f03dc 341+backup_job_start(BlockDriverState *bs, bool cancel)
309874bd
DM
342+{
343+ assert(bs);
344+ assert(bs->job);
345+ assert(bs->job->co == NULL);
346+
359f03dc
DM
347+ if (cancel) {
348+ block_job_cancel(bs->job); /* set cancel flag */
349+ }
350+
309874bd
DM
351+ bs->job->co = qemu_coroutine_create(backup_run);
352+ qemu_coroutine_enter(bs->job->co, bs->job);
353+}
354+
5ad5891c 355+int
309874bd
DM
356+backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
357+ BlockDriverCompletionFunc *backup_complete_cb,
55827521 358+ void *opaque, int64_t speed)
5ad5891c
DM
359+{
360+ assert(bs);
361+ assert(backup_dump_cb);
362+ assert(backup_complete_cb);
363+
364+ if (bs->job) {
365+ DPRINTF("bdrv_backup_init failed - running job on %s\n",
366+ bdrv_get_device_name(bs));
367+ return -1;
368+ }
369+
370+ int64_t bitmap_size;
371+ const char *devname = bdrv_get_device_name(bs);
372+
373+ if (!devname || !devname[0]) {
374+ return -1;
375+ }
376+
377+ DPRINTF("bdrv_backup_init %s\n", bdrv_get_device_name(bs));
378+
379+ Error *errp;
55827521 380+ BackupBlockJob *job = block_job_create(&backup_job_type, bs, speed,
5ad5891c
DM
381+ backup_job_cleanup_cb, bs, &errp);
382+
884c5e9f
DM
383+ qemu_co_rwlock_init(&job->rwlock);
384+
5ad5891c
DM
385+ job->common.cluster_size = BACKUP_CLUSTER_SIZE;
386+
387+ bitmap_size = bs->total_sectors +
388+ BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG - 1;
389+ bitmap_size /= BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG;
390+
391+ job->backup_dump_cb = backup_dump_cb;
392+ job->backup_complete_cb = backup_complete_cb;
393+ job->opaque = opaque;
394+ job->bitmap_size = bitmap_size;
395+ job->bitmap = g_new0(unsigned long, bitmap_size);
396+
397+ job->common.len = bs->total_sectors*BDRV_SECTOR_SIZE;
89af8a77 398+
5ad5891c
DM
399+ return 0;
400+}
401diff --git a/backup.h b/backup.h
402new file mode 100644
884c5e9f 403index 0000000..9b1ea1c
5ad5891c
DM
404--- /dev/null
405+++ b/backup.h
884c5e9f 406@@ -0,0 +1,30 @@
5ad5891c
DM
407+/*
408+ * QEMU backup related definitions
409+ *
89af8a77 410+ * Copyright (C) 2013 Proxmox Server Solutions
5ad5891c
DM
411+ *
412+ * Authors:
413+ * Dietmar Maurer (dietmar@proxmox.com)
414+ *
415+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
416+ * See the COPYING file in the top-level directory.
417+ *
418+ */
419+
420+#ifndef QEMU_BACKUP_H
421+#define QEMU_BACKUP_H
422+
5ad5891c
DM
423+#define BACKUP_CLUSTER_BITS 16
424+#define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
425+#define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
426+
427+typedef int BackupDumpFunc(void *opaque, BlockDriverState *bs,
428+ int64_t cluster_num, unsigned char *buf);
429+
359f03dc 430+void backup_job_start(BlockDriverState *bs, bool cancel);
309874bd
DM
431+
432+int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
89af8a77
DM
433+ BlockDriverCompletionFunc *backup_complete_cb,
434+ void *opaque, int64_t speed);
5ad5891c
DM
435+
436+#endif /* QEMU_BACKUP_H */
437diff --git a/block.c b/block.c
92bf040c 438index 50dab8e..6e6d08f 100644
5ad5891c
DM
439--- a/block.c
440+++ b/block.c
441@@ -54,6 +54,7 @@
442 typedef enum {
443 BDRV_REQ_COPY_ON_READ = 0x1,
444 BDRV_REQ_ZERO_WRITE = 0x2,
445+ BDRV_REQ_BACKUP_ONLY = 0x4,
446 } BdrvRequestFlags;
447
448 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
92bf040c 449@@ -1554,7 +1555,7 @@ int bdrv_commit(BlockDriverState *bs)
5ad5891c
DM
450
451 if (!drv)
452 return -ENOMEDIUM;
453-
454+
455 if (!bs->backing_hd) {
456 return -ENOTSUP;
457 }
92bf040c 458@@ -1691,6 +1692,22 @@ void bdrv_round_to_clusters(BlockDriverState *bs,
5ad5891c
DM
459 }
460 }
461
462+/**
463+ * Round a region to job cluster boundaries
464+ */
465+static void round_to_job_clusters(BlockDriverState *bs,
466+ int64_t sector_num, int nb_sectors,
467+ int job_cluster_size,
468+ int64_t *cluster_sector_num,
469+ int *cluster_nb_sectors)
470+{
471+ int64_t c = job_cluster_size/BDRV_SECTOR_SIZE;
472+
473+ *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
474+ *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
475+ nb_sectors, c);
476+}
477+
478 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
479 int64_t sector_num, int nb_sectors) {
480 /* aaaa bbbb */
92bf040c 481@@ -1705,7 +1722,9 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
5ad5891c
DM
482 }
483
484 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
485- int64_t sector_num, int nb_sectors)
486+ int64_t sector_num,
487+ int nb_sectors,
488+ int job_cluster_size)
489 {
490 BdrvTrackedRequest *req;
491 int64_t cluster_sector_num;
92bf040c
DM
492@@ -1721,6 +1740,11 @@ static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
493 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
494 &cluster_sector_num, &cluster_nb_sectors);
5ad5891c
DM
495
496+ if (job_cluster_size) {
497+ round_to_job_clusters(bs, sector_num, nb_sectors, job_cluster_size,
498+ &cluster_sector_num, &cluster_nb_sectors);
499+ }
500+
501 do {
502 retry = false;
503 QLIST_FOREACH(req, &bs->tracked_requests, list) {
92bf040c 504@@ -2260,12 +2284,24 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
5ad5891c
DM
505 bs->copy_on_read_in_flight++;
506 }
507
508- if (bs->copy_on_read_in_flight) {
509- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
510+ int job_cluster_size = bs->job && bs->job->cluster_size ?
511+ bs->job->cluster_size : 0;
512+
513+ if (bs->copy_on_read_in_flight || job_cluster_size) {
514+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
515+ job_cluster_size);
516 }
517
518 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
519
520+ if (bs->job && bs->job->job_type->before_read) {
521+ ret = bs->job->job_type->before_read(bs, sector_num, nb_sectors, qiov);
309874bd 522+ if ((ret < 0) || (flags & BDRV_REQ_BACKUP_ONLY)) {
5ad5891c
DM
523+ /* Note: We do not return any data to the caller */
524+ goto out;
525+ }
526+ }
527+
528 if (flags & BDRV_REQ_COPY_ON_READ) {
529 int pnum;
530
92bf040c 531@@ -2309,6 +2345,17 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
5ad5891c
DM
532 BDRV_REQ_COPY_ON_READ);
533 }
534
535+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
536+ int64_t sector_num, int nb_sectors)
537+{
538+ if (!bs->job) {
539+ return -ENOTSUP;
540+ }
541+
542+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, NULL,
543+ BDRV_REQ_BACKUP_ONLY);
544+}
545+
546 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
547 int64_t sector_num, int nb_sectors)
548 {
92bf040c 549@@ -2366,12 +2413,23 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
5ad5891c
DM
550 bdrv_io_limits_intercept(bs, true, nb_sectors);
551 }
552
553- if (bs->copy_on_read_in_flight) {
554- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
555+ int job_cluster_size = bs->job && bs->job->cluster_size ?
556+ bs->job->cluster_size : 0;
557+
558+ if (bs->copy_on_read_in_flight || job_cluster_size) {
559+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
560+ job_cluster_size);
561 }
562
563 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
564
565+ if (bs->job && bs->job->job_type->before_write) {
566+ ret = bs->job->job_type->before_write(bs, sector_num, nb_sectors, qiov);
567+ if (ret < 0) {
568+ goto out;
569+ }
570+ }
571+
572 if (flags & BDRV_REQ_ZERO_WRITE) {
573 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
574 } else {
92bf040c 575@@ -2390,6 +2448,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
5ad5891c
DM
576 bs->wr_highest_sector = sector_num + nb_sectors - 1;
577 }
578
579+out:
580 tracked_request_end(&req);
581
582 return ret;
92bf040c
DM
583diff --git a/include/block/block.h b/include/block/block.h
584index 5c3b911..b6144be 100644
585--- a/include/block/block.h
586+++ b/include/block/block.h
5ad5891c
DM
587@@ -172,6 +172,8 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
588 int nb_sectors, QEMUIOVector *qiov);
589 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
590 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
591+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
592+ int64_t sector_num, int nb_sectors);
593 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
594 int nb_sectors, QEMUIOVector *qiov);
595 /*
92bf040c
DM
596diff --git a/include/block/blockjob.h b/include/block/blockjob.h
597index c290d07..6f42495 100644
598--- a/include/block/blockjob.h
599+++ b/include/block/blockjob.h
5ad5891c
DM
600@@ -50,6 +50,13 @@ typedef struct BlockJobType {
601 * manually.
602 */
603 void (*complete)(BlockJob *job, Error **errp);
604+
605+ /** tracked requests */
606+ int coroutine_fn (*before_read)(BlockDriverState *bs, int64_t sector_num,
607+ int nb_sectors, QEMUIOVector *qiov);
608+ int coroutine_fn (*before_write)(BlockDriverState *bs, int64_t sector_num,
609+ int nb_sectors, QEMUIOVector *qiov);
610+
611 } BlockJobType;
612
613 /**
614@@ -103,6 +110,9 @@ struct BlockJob {
615 /** Speed that was set with @block_job_set_speed. */
616 int64_t speed;
617
618+ /** tracked requests */
619+ int cluster_size;
620+
621 /** The completion function that will be called when the job completes. */
622 BlockDriverCompletionFunc *cb;
623
624--
6251.7.2.5
626