]> git.proxmox.com Git - pve-qemu-kvm.git/blame - debian/patches/0002-add-basic-backup-support-to-block-driver.patch
update to latest backup patches
[pve-qemu-kvm.git] / debian / patches / 0002-add-basic-backup-support-to-block-driver.patch
CommitLineData
89af8a77 1From 940afda26b17f3d5776e4809e6dfce5cee44c102 Mon Sep 17 00:00:00 2001
5ad5891c
DM
2From: Dietmar Maurer <dietmar@proxmox.com>
3Date: Tue, 13 Nov 2012 10:03:52 +0100
89af8a77 4Subject: [PATCH v4 2/6] add basic backup support to block driver
5ad5891c 5
309874bd
DM
6Function backup_job_create() creates a block job to backup a block device.
7The coroutine is started with backup_job_start().
5ad5891c
DM
8
9We call backup_do_cow() for each write during backup. That function
10reads the original data and pass it to backup_dump_cb().
11
12The tracked_request infrastructure is used to serialize access.
13
14Currently backup cluster size is hardcoded to 65536 bytes.
15
16Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
17---
92bf040c
DM
18 Makefile.objs | 1 +
19 backup.c | 338 ++++++++++++++++++++++++++++++++++++++++++++++
20 backup.h | 32 +++++
21 block.c | 71 +++++++++-
22 include/block/block.h | 2 +
23 include/block/blockjob.h | 10 ++
24 6 files changed, 448 insertions(+), 6 deletions(-)
5ad5891c
DM
25 create mode 100644 backup.c
26 create mode 100644 backup.h
27
28diff --git a/Makefile.objs b/Makefile.objs
89af8a77 29index a68cdac..df64f70 100644
5ad5891c
DM
30--- a/Makefile.objs
31+++ b/Makefile.objs
92bf040c
DM
32@@ -13,6 +13,7 @@ block-obj-$(CONFIG_POSIX) += aio-posix.o
33 block-obj-$(CONFIG_WIN32) += aio-win32.o
34 block-obj-y += block/
35 block-obj-y += qapi-types.o qapi-visit.o
5ad5891c 36+block-obj-y += backup.o
92bf040c
DM
37
38 block-obj-y += qemu-coroutine.o qemu-coroutine-lock.o qemu-coroutine-io.o
39 block-obj-y += qemu-coroutine-sleep.o
5ad5891c
DM
40diff --git a/backup.c b/backup.c
41new file mode 100644
89af8a77 42index 0000000..c9576d5
5ad5891c
DM
43--- /dev/null
44+++ b/backup.c
92bf040c 45@@ -0,0 +1,338 @@
5ad5891c
DM
46+/*
47+ * QEMU backup
48+ *
89af8a77 49+ * Copyright (C) 2013 Proxmox Server Solutions
5ad5891c
DM
50+ *
51+ * Authors:
52+ * Dietmar Maurer (dietmar@proxmox.com)
53+ *
54+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
55+ * See the COPYING file in the top-level directory.
56+ *
57+ */
58+
59+#include <stdio.h>
60+#include <errno.h>
61+#include <unistd.h>
62+
92bf040c
DM
63+#include "block/block.h"
64+#include "block/block_int.h"
65+#include "block/blockjob.h"
55827521 66+#include "qemu/ratelimit.h"
5ad5891c
DM
67+#include "backup.h"
68+
69+#define DEBUG_BACKUP 0
70+
71+#define DPRINTF(fmt, ...) \
72+ do { if (DEBUG_BACKUP) { printf("backup: " fmt, ## __VA_ARGS__); } } \
73+ while (0)
74+
75+
55827521 76+#define SLICE_TIME 100000000ULL /* ns */
5ad5891c
DM
77+
78+typedef struct BackupBlockJob {
79+ BlockJob common;
55827521
DM
80+ RateLimit limit;
81+ uint64_t sectors_read;
5ad5891c
DM
82+ unsigned long *bitmap;
83+ int bitmap_size;
84+ BackupDumpFunc *backup_dump_cb;
85+ BlockDriverCompletionFunc *backup_complete_cb;
86+ void *opaque;
87+} BackupBlockJob;
88+
309874bd 89+static int backup_get_bitmap(BackupBlockJob *job, int64_t cluster_num)
5ad5891c 90+{
5ad5891c
DM
91+ assert(job);
92+ assert(job->bitmap);
93+
94+ unsigned long val, idx, bit;
95+
96+ idx = cluster_num / BITS_PER_LONG;
97+
98+ assert(job->bitmap_size > idx);
99+
100+ bit = cluster_num % BITS_PER_LONG;
101+ val = job->bitmap[idx];
102+
103+ return !!(val & (1UL << bit));
104+}
105+
309874bd 106+static void backup_set_bitmap(BackupBlockJob *job, int64_t cluster_num,
5ad5891c
DM
107+ int dirty)
108+{
5ad5891c
DM
109+ assert(job);
110+ assert(job->bitmap);
111+
112+ unsigned long val, idx, bit;
113+
114+ idx = cluster_num / BITS_PER_LONG;
115+
116+ assert(job->bitmap_size > idx);
117+
118+ bit = cluster_num % BITS_PER_LONG;
119+ val = job->bitmap[idx];
120+ if (dirty) {
121+ if (!(val & (1UL << bit))) {
122+ val |= 1UL << bit;
123+ }
124+ } else {
125+ if (val & (1UL << bit)) {
126+ val &= ~(1UL << bit);
127+ }
128+ }
129+ job->bitmap[idx] = val;
130+}
131+
132+static int backup_in_progress_count;
133+
134+static int coroutine_fn backup_do_cow(BlockDriverState *bs,
135+ int64_t sector_num, int nb_sectors)
136+{
137+ assert(bs);
138+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
139+ assert(job);
140+
141+ BlockDriver *drv = bs->drv;
142+ struct iovec iov;
143+ QEMUIOVector bounce_qiov;
144+ void *bounce_buffer = NULL;
145+ int ret = 0;
146+
147+ backup_in_progress_count++;
148+
149+ int64_t start, end;
150+
151+ start = sector_num / BACKUP_BLOCKS_PER_CLUSTER;
152+ end = (sector_num + nb_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
153+ BACKUP_BLOCKS_PER_CLUSTER;
154+
155+ DPRINTF("brdv_co_backup_cow enter %s C%zd %zd %d\n",
156+ bdrv_get_device_name(bs), start, sector_num, nb_sectors);
157+
158+ for (; start < end; start++) {
309874bd 159+ if (backup_get_bitmap(job, start)) {
5ad5891c
DM
160+ DPRINTF("brdv_co_backup_cow skip C%zd\n", start);
161+ continue; /* already copied */
162+ }
163+
164+ /* immediately set bitmap (avoid coroutine race) */
309874bd 165+ backup_set_bitmap(job, start, 1);
5ad5891c
DM
166+
167+ DPRINTF("brdv_co_backup_cow C%zd\n", start);
168+
169+ if (!bounce_buffer) {
170+ iov.iov_len = BACKUP_CLUSTER_SIZE;
171+ iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
172+ qemu_iovec_init_external(&bounce_qiov, &iov, 1);
173+ }
174+
175+ ret = drv->bdrv_co_readv(bs, start * BACKUP_BLOCKS_PER_CLUSTER,
176+ BACKUP_BLOCKS_PER_CLUSTER,
177+ &bounce_qiov);
55827521
DM
178+
179+ job->sectors_read += BACKUP_BLOCKS_PER_CLUSTER;
180+
5ad5891c
DM
181+ if (ret < 0) {
182+ DPRINTF("brdv_co_backup_cow bdrv_read C%zd failed\n", start);
183+ goto out;
184+ }
185+
186+ ret = job->backup_dump_cb(job->opaque, bs, start, bounce_buffer);
187+ if (ret < 0) {
188+ DPRINTF("brdv_co_backup_cow dump_cluster_cb C%zd failed\n", start);
189+ goto out;
190+ }
191+
192+ DPRINTF("brdv_co_backup_cow done C%zd\n", start);
193+ }
194+
195+out:
196+ if (bounce_buffer) {
197+ qemu_vfree(bounce_buffer);
198+ }
199+
200+ backup_in_progress_count--;
201+
202+ return ret;
203+}
204+
205+static int coroutine_fn backup_before_read(BlockDriverState *bs,
206+ int64_t sector_num,
207+ int nb_sectors, QEMUIOVector *qiov)
208+{
209+ return backup_do_cow(bs, sector_num, nb_sectors);
210+}
211+
212+static int coroutine_fn backup_before_write(BlockDriverState *bs,
213+ int64_t sector_num,
214+ int nb_sectors, QEMUIOVector *qiov)
215+{
216+ return backup_do_cow(bs, sector_num, nb_sectors);
217+}
218+
55827521
DM
219+static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
220+{
221+ BackupBlockJob *s = container_of(job, BackupBlockJob, common);
89af8a77 222+
55827521
DM
223+ if (speed < 0) {
224+ error_set(errp, QERR_INVALID_PARAMETER, "speed");
225+ return;
226+ }
227+ ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
228+}
5ad5891c
DM
229+
230+static BlockJobType backup_job_type = {
231+ .instance_size = sizeof(BackupBlockJob),
232+ .before_read = backup_before_read,
233+ .before_write = backup_before_write,
55827521
DM
234+ .job_type = "backup",
235+ .set_speed = backup_set_speed,
5ad5891c
DM
236+};
237+
238+static void coroutine_fn backup_run(void *opaque)
239+{
240+ BackupBlockJob *job = opaque;
241+ BlockDriverState *bs = job->common.bs;
242+ assert(bs);
243+
244+ int64_t start, end;
245+
246+ start = 0;
247+ end = (bs->total_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
248+ BACKUP_BLOCKS_PER_CLUSTER;
249+
250+ DPRINTF("backup_run start %s %zd %zd\n", bdrv_get_device_name(bs),
251+ start, end);
252+
253+ int ret = 0;
254+
255+ for (; start < end; start++) {
256+ if (block_job_is_cancelled(&job->common)) {
257+ ret = -1;
258+ break;
259+ }
260+
5ad5891c
DM
261+ /* we need to yield so that qemu_aio_flush() returns.
262+ * (without, VM does not reboot)
89af8a77 263+ * Note: use 1000 instead of 0 (0 prioritize this task too much)
5ad5891c 264+ */
89af8a77 265+ if (job->common.speed) {
55827521
DM
266+ uint64_t delay_ns = ratelimit_calculate_delay(
267+ &job->limit, job->sectors_read);
268+ job->sectors_read = 0;
269+ block_job_sleep_ns(&job->common, rt_clock, delay_ns);
89af8a77 270+ } else {
55827521 271+ block_job_sleep_ns(&job->common, rt_clock, 1000);
89af8a77
DM
272+ }
273+
5ad5891c
DM
274+ if (block_job_is_cancelled(&job->common)) {
275+ ret = -1;
276+ break;
277+ }
55827521
DM
278+
279+ if (backup_get_bitmap(job, start)) {
280+ continue; /* already copied */
281+ }
282+
5ad5891c
DM
283+ DPRINTF("backup_run loop C%zd\n", start);
284+
285+ /**
286+ * This triggers a cluster copy
287+ * Note: avoid direct call to brdv_co_backup_cow, because
288+ * this does not call tracked_request_begin()
289+ */
290+ ret = bdrv_co_backup(bs, start*BACKUP_BLOCKS_PER_CLUSTER, 1);
291+ if (ret < 0) {
292+ break;
293+ }
294+ /* Publish progress */
295+ job->common.offset += BACKUP_CLUSTER_SIZE;
296+ }
297+
298+ while (backup_in_progress_count > 0) {
299+ DPRINTF("backup_run backup_in_progress_count != 0 (%d)",
300+ backup_in_progress_count);
2dfd543c
DM
301+ block_job_sleep_ns(&job->common, rt_clock, 10000);
302+
5ad5891c
DM
303+ }
304+
305+ DPRINTF("backup_run complete %d\n", ret);
306+ block_job_completed(&job->common, ret);
307+}
308+
309+static void backup_job_cleanup_cb(void *opaque, int ret)
310+{
311+ BlockDriverState *bs = opaque;
312+ assert(bs);
313+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
314+ assert(job);
315+
316+ DPRINTF("backup_job_cleanup_cb start %d\n", ret);
317+
318+ job->backup_complete_cb(job->opaque, ret);
319+
320+ DPRINTF("backup_job_cleanup_cb end\n");
321+
322+ g_free(job->bitmap);
323+}
324+
309874bd 325+void
359f03dc 326+backup_job_start(BlockDriverState *bs, bool cancel)
309874bd
DM
327+{
328+ assert(bs);
329+ assert(bs->job);
330+ assert(bs->job->co == NULL);
331+
359f03dc
DM
332+ if (cancel) {
333+ block_job_cancel(bs->job); /* set cancel flag */
334+ }
335+
309874bd
DM
336+ bs->job->co = qemu_coroutine_create(backup_run);
337+ qemu_coroutine_enter(bs->job->co, bs->job);
338+}
339+
5ad5891c 340+int
309874bd
DM
341+backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
342+ BlockDriverCompletionFunc *backup_complete_cb,
55827521 343+ void *opaque, int64_t speed)
5ad5891c
DM
344+{
345+ assert(bs);
346+ assert(backup_dump_cb);
347+ assert(backup_complete_cb);
348+
349+ if (bs->job) {
350+ DPRINTF("bdrv_backup_init failed - running job on %s\n",
351+ bdrv_get_device_name(bs));
352+ return -1;
353+ }
354+
355+ int64_t bitmap_size;
356+ const char *devname = bdrv_get_device_name(bs);
357+
358+ if (!devname || !devname[0]) {
359+ return -1;
360+ }
361+
362+ DPRINTF("bdrv_backup_init %s\n", bdrv_get_device_name(bs));
363+
364+ Error *errp;
55827521 365+ BackupBlockJob *job = block_job_create(&backup_job_type, bs, speed,
5ad5891c
DM
366+ backup_job_cleanup_cb, bs, &errp);
367+
368+ job->common.cluster_size = BACKUP_CLUSTER_SIZE;
369+
370+ bitmap_size = bs->total_sectors +
371+ BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG - 1;
372+ bitmap_size /= BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG;
373+
374+ job->backup_dump_cb = backup_dump_cb;
375+ job->backup_complete_cb = backup_complete_cb;
376+ job->opaque = opaque;
377+ job->bitmap_size = bitmap_size;
378+ job->bitmap = g_new0(unsigned long, bitmap_size);
379+
380+ job->common.len = bs->total_sectors*BDRV_SECTOR_SIZE;
89af8a77 381+
5ad5891c
DM
382+ return 0;
383+}
384diff --git a/backup.h b/backup.h
385new file mode 100644
89af8a77 386index 0000000..d9395bc
5ad5891c
DM
387--- /dev/null
388+++ b/backup.h
309874bd 389@@ -0,0 +1,32 @@
5ad5891c
DM
390+/*
391+ * QEMU backup related definitions
392+ *
89af8a77 393+ * Copyright (C) 2013 Proxmox Server Solutions
5ad5891c
DM
394+ *
395+ * Authors:
396+ * Dietmar Maurer (dietmar@proxmox.com)
397+ *
398+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
399+ * See the COPYING file in the top-level directory.
400+ *
401+ */
402+
403+#ifndef QEMU_BACKUP_H
404+#define QEMU_BACKUP_H
405+
406+#include <uuid/uuid.h>
407+
408+#define BACKUP_CLUSTER_BITS 16
409+#define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
410+#define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
411+
412+typedef int BackupDumpFunc(void *opaque, BlockDriverState *bs,
413+ int64_t cluster_num, unsigned char *buf);
414+
359f03dc 415+void backup_job_start(BlockDriverState *bs, bool cancel);
309874bd
DM
416+
417+int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
89af8a77
DM
418+ BlockDriverCompletionFunc *backup_complete_cb,
419+ void *opaque, int64_t speed);
5ad5891c
DM
420+
421+#endif /* QEMU_BACKUP_H */
422diff --git a/block.c b/block.c
92bf040c 423index 50dab8e..6e6d08f 100644
5ad5891c
DM
424--- a/block.c
425+++ b/block.c
426@@ -54,6 +54,7 @@
427 typedef enum {
428 BDRV_REQ_COPY_ON_READ = 0x1,
429 BDRV_REQ_ZERO_WRITE = 0x2,
430+ BDRV_REQ_BACKUP_ONLY = 0x4,
431 } BdrvRequestFlags;
432
433 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
92bf040c 434@@ -1554,7 +1555,7 @@ int bdrv_commit(BlockDriverState *bs)
5ad5891c
DM
435
436 if (!drv)
437 return -ENOMEDIUM;
438-
439+
440 if (!bs->backing_hd) {
441 return -ENOTSUP;
442 }
92bf040c 443@@ -1691,6 +1692,22 @@ void bdrv_round_to_clusters(BlockDriverState *bs,
5ad5891c
DM
444 }
445 }
446
447+/**
448+ * Round a region to job cluster boundaries
449+ */
450+static void round_to_job_clusters(BlockDriverState *bs,
451+ int64_t sector_num, int nb_sectors,
452+ int job_cluster_size,
453+ int64_t *cluster_sector_num,
454+ int *cluster_nb_sectors)
455+{
456+ int64_t c = job_cluster_size/BDRV_SECTOR_SIZE;
457+
458+ *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
459+ *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
460+ nb_sectors, c);
461+}
462+
463 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
464 int64_t sector_num, int nb_sectors) {
465 /* aaaa bbbb */
92bf040c 466@@ -1705,7 +1722,9 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
5ad5891c
DM
467 }
468
469 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
470- int64_t sector_num, int nb_sectors)
471+ int64_t sector_num,
472+ int nb_sectors,
473+ int job_cluster_size)
474 {
475 BdrvTrackedRequest *req;
476 int64_t cluster_sector_num;
92bf040c
DM
477@@ -1721,6 +1740,11 @@ static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
478 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
479 &cluster_sector_num, &cluster_nb_sectors);
5ad5891c
DM
480
481+ if (job_cluster_size) {
482+ round_to_job_clusters(bs, sector_num, nb_sectors, job_cluster_size,
483+ &cluster_sector_num, &cluster_nb_sectors);
484+ }
485+
486 do {
487 retry = false;
488 QLIST_FOREACH(req, &bs->tracked_requests, list) {
92bf040c 489@@ -2260,12 +2284,24 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
5ad5891c
DM
490 bs->copy_on_read_in_flight++;
491 }
492
493- if (bs->copy_on_read_in_flight) {
494- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
495+ int job_cluster_size = bs->job && bs->job->cluster_size ?
496+ bs->job->cluster_size : 0;
497+
498+ if (bs->copy_on_read_in_flight || job_cluster_size) {
499+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
500+ job_cluster_size);
501 }
502
503 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
504
505+ if (bs->job && bs->job->job_type->before_read) {
506+ ret = bs->job->job_type->before_read(bs, sector_num, nb_sectors, qiov);
309874bd 507+ if ((ret < 0) || (flags & BDRV_REQ_BACKUP_ONLY)) {
5ad5891c
DM
508+ /* Note: We do not return any data to the caller */
509+ goto out;
510+ }
511+ }
512+
513 if (flags & BDRV_REQ_COPY_ON_READ) {
514 int pnum;
515
92bf040c 516@@ -2309,6 +2345,17 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
5ad5891c
DM
517 BDRV_REQ_COPY_ON_READ);
518 }
519
520+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
521+ int64_t sector_num, int nb_sectors)
522+{
523+ if (!bs->job) {
524+ return -ENOTSUP;
525+ }
526+
527+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, NULL,
528+ BDRV_REQ_BACKUP_ONLY);
529+}
530+
531 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
532 int64_t sector_num, int nb_sectors)
533 {
92bf040c 534@@ -2366,12 +2413,23 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
5ad5891c
DM
535 bdrv_io_limits_intercept(bs, true, nb_sectors);
536 }
537
538- if (bs->copy_on_read_in_flight) {
539- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
540+ int job_cluster_size = bs->job && bs->job->cluster_size ?
541+ bs->job->cluster_size : 0;
542+
543+ if (bs->copy_on_read_in_flight || job_cluster_size) {
544+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
545+ job_cluster_size);
546 }
547
548 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
549
550+ if (bs->job && bs->job->job_type->before_write) {
551+ ret = bs->job->job_type->before_write(bs, sector_num, nb_sectors, qiov);
552+ if (ret < 0) {
553+ goto out;
554+ }
555+ }
556+
557 if (flags & BDRV_REQ_ZERO_WRITE) {
558 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
559 } else {
92bf040c 560@@ -2390,6 +2448,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
5ad5891c
DM
561 bs->wr_highest_sector = sector_num + nb_sectors - 1;
562 }
563
564+out:
565 tracked_request_end(&req);
566
567 return ret;
92bf040c
DM
568diff --git a/include/block/block.h b/include/block/block.h
569index 5c3b911..b6144be 100644
570--- a/include/block/block.h
571+++ b/include/block/block.h
5ad5891c
DM
572@@ -172,6 +172,8 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
573 int nb_sectors, QEMUIOVector *qiov);
574 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
575 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
576+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
577+ int64_t sector_num, int nb_sectors);
578 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
579 int nb_sectors, QEMUIOVector *qiov);
580 /*
92bf040c
DM
581diff --git a/include/block/blockjob.h b/include/block/blockjob.h
582index c290d07..6f42495 100644
583--- a/include/block/blockjob.h
584+++ b/include/block/blockjob.h
5ad5891c
DM
585@@ -50,6 +50,13 @@ typedef struct BlockJobType {
586 * manually.
587 */
588 void (*complete)(BlockJob *job, Error **errp);
589+
590+ /** tracked requests */
591+ int coroutine_fn (*before_read)(BlockDriverState *bs, int64_t sector_num,
592+ int nb_sectors, QEMUIOVector *qiov);
593+ int coroutine_fn (*before_write)(BlockDriverState *bs, int64_t sector_num,
594+ int nb_sectors, QEMUIOVector *qiov);
595+
596 } BlockJobType;
597
598 /**
599@@ -103,6 +110,9 @@ struct BlockJob {
600 /** Speed that was set with @block_job_set_speed. */
601 int64_t speed;
602
603+ /** tracked requests */
604+ int cluster_size;
605+
606 /** The completion function that will be called when the job completes. */
607 BlockDriverCompletionFunc *cb;
608
609--
6101.7.2.5
611