]> git.proxmox.com Git - pve-qemu-kvm.git/blame - debian/patches/0002-add-basic-backup-support-to-block-driver.patch
update qemu to v1.3.1
[pve-qemu-kvm.git] / debian / patches / 0002-add-basic-backup-support-to-block-driver.patch
CommitLineData
359f03dc 1From c51e1c591401926d7a5c3a41011736c329c6ef82 Mon Sep 17 00:00:00 2001
5ad5891c
DM
2From: Dietmar Maurer <dietmar@proxmox.com>
3Date: Tue, 13 Nov 2012 10:03:52 +0100
55827521 4Subject: [PATCH v3 2/6] add basic backup support to block driver
5ad5891c 5
309874bd
DM
6Function backup_job_create() creates a block job to backup a block device.
7The coroutine is started with backup_job_start().
5ad5891c
DM
8
9We call backup_do_cow() for each write during backup. That function
10reads the original data and pass it to backup_dump_cb().
11
12The tracked_request infrastructure is used to serialize access.
13
14Currently backup cluster size is hardcoded to 65536 bytes.
15
16Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
17---
18 Makefile.objs | 1 +
359f03dc 19 backup.c | 339 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
309874bd 20 backup.h | 32 ++++++
55827521 21 block.c | 71 +++++++++++-
5ad5891c
DM
22 block.h | 2 +
23 blockjob.h | 10 ++
359f03dc 24 6 files changed, 449 insertions(+), 6 deletions(-)
5ad5891c
DM
25 create mode 100644 backup.c
26 create mode 100644 backup.h
27
28diff --git a/Makefile.objs b/Makefile.objs
29index 3c7abca..cb46be5 100644
30--- a/Makefile.objs
31+++ b/Makefile.objs
32@@ -48,6 +48,7 @@ coroutine-obj-$(CONFIG_WIN32) += coroutine-win32.o
33 block-obj-y = iov.o cache-utils.o qemu-option.o module.o async.o
34 block-obj-y += nbd.o block.o blockjob.o aes.o qemu-config.o
35 block-obj-y += thread-pool.o qemu-progress.o qemu-sockets.o uri.o notify.o
36+block-obj-y += backup.o
37 block-obj-y += $(coroutine-obj-y) $(qobject-obj-y) $(version-obj-y)
38 block-obj-$(CONFIG_POSIX) += event_notifier-posix.o aio-posix.o
39 block-obj-$(CONFIG_WIN32) += event_notifier-win32.o aio-win32.o
40diff --git a/backup.c b/backup.c
41new file mode 100644
359f03dc 42index 0000000..5dcbd11
5ad5891c
DM
43--- /dev/null
44+++ b/backup.c
359f03dc 45@@ -0,0 +1,339 @@
5ad5891c
DM
46+/*
47+ * QEMU backup
48+ *
49+ * Copyright (C) 2012 Proxmox Server Solutions
50+ *
51+ * Authors:
52+ * Dietmar Maurer (dietmar@proxmox.com)
53+ *
54+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
55+ * See the COPYING file in the top-level directory.
56+ *
57+ */
58+
59+#include <stdio.h>
60+#include <errno.h>
61+#include <unistd.h>
62+
63+#include "block.h"
64+#include "block_int.h"
65+#include "blockjob.h"
55827521 66+#include "qemu/ratelimit.h"
5ad5891c
DM
67+#include "backup.h"
68+
69+#define DEBUG_BACKUP 0
70+
71+#define DPRINTF(fmt, ...) \
72+ do { if (DEBUG_BACKUP) { printf("backup: " fmt, ## __VA_ARGS__); } } \
73+ while (0)
74+
75+
76+#define BITS_PER_LONG (sizeof(unsigned long) * 8)
55827521 77+#define SLICE_TIME 100000000ULL /* ns */
5ad5891c
DM
78+
79+typedef struct BackupBlockJob {
80+ BlockJob common;
55827521
DM
81+ RateLimit limit;
82+ uint64_t sectors_read;
5ad5891c
DM
83+ unsigned long *bitmap;
84+ int bitmap_size;
85+ BackupDumpFunc *backup_dump_cb;
86+ BlockDriverCompletionFunc *backup_complete_cb;
87+ void *opaque;
88+} BackupBlockJob;
89+
309874bd 90+static int backup_get_bitmap(BackupBlockJob *job, int64_t cluster_num)
5ad5891c 91+{
5ad5891c
DM
92+ assert(job);
93+ assert(job->bitmap);
94+
95+ unsigned long val, idx, bit;
96+
97+ idx = cluster_num / BITS_PER_LONG;
98+
99+ assert(job->bitmap_size > idx);
100+
101+ bit = cluster_num % BITS_PER_LONG;
102+ val = job->bitmap[idx];
103+
104+ return !!(val & (1UL << bit));
105+}
106+
309874bd 107+static void backup_set_bitmap(BackupBlockJob *job, int64_t cluster_num,
5ad5891c
DM
108+ int dirty)
109+{
5ad5891c
DM
110+ assert(job);
111+ assert(job->bitmap);
112+
113+ unsigned long val, idx, bit;
114+
115+ idx = cluster_num / BITS_PER_LONG;
116+
117+ assert(job->bitmap_size > idx);
118+
119+ bit = cluster_num % BITS_PER_LONG;
120+ val = job->bitmap[idx];
121+ if (dirty) {
122+ if (!(val & (1UL << bit))) {
123+ val |= 1UL << bit;
124+ }
125+ } else {
126+ if (val & (1UL << bit)) {
127+ val &= ~(1UL << bit);
128+ }
129+ }
130+ job->bitmap[idx] = val;
131+}
132+
133+static int backup_in_progress_count;
134+
135+static int coroutine_fn backup_do_cow(BlockDriverState *bs,
136+ int64_t sector_num, int nb_sectors)
137+{
138+ assert(bs);
139+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
140+ assert(job);
141+
142+ BlockDriver *drv = bs->drv;
143+ struct iovec iov;
144+ QEMUIOVector bounce_qiov;
145+ void *bounce_buffer = NULL;
146+ int ret = 0;
147+
148+ backup_in_progress_count++;
149+
150+ int64_t start, end;
151+
152+ start = sector_num / BACKUP_BLOCKS_PER_CLUSTER;
153+ end = (sector_num + nb_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
154+ BACKUP_BLOCKS_PER_CLUSTER;
155+
156+ DPRINTF("brdv_co_backup_cow enter %s C%zd %zd %d\n",
157+ bdrv_get_device_name(bs), start, sector_num, nb_sectors);
158+
159+ for (; start < end; start++) {
309874bd 160+ if (backup_get_bitmap(job, start)) {
5ad5891c
DM
161+ DPRINTF("brdv_co_backup_cow skip C%zd\n", start);
162+ continue; /* already copied */
163+ }
164+
165+ /* immediately set bitmap (avoid coroutine race) */
309874bd 166+ backup_set_bitmap(job, start, 1);
5ad5891c
DM
167+
168+ DPRINTF("brdv_co_backup_cow C%zd\n", start);
169+
170+ if (!bounce_buffer) {
171+ iov.iov_len = BACKUP_CLUSTER_SIZE;
172+ iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
173+ qemu_iovec_init_external(&bounce_qiov, &iov, 1);
174+ }
175+
176+ ret = drv->bdrv_co_readv(bs, start * BACKUP_BLOCKS_PER_CLUSTER,
177+ BACKUP_BLOCKS_PER_CLUSTER,
178+ &bounce_qiov);
55827521
DM
179+
180+ job->sectors_read += BACKUP_BLOCKS_PER_CLUSTER;
181+
5ad5891c
DM
182+ if (ret < 0) {
183+ DPRINTF("brdv_co_backup_cow bdrv_read C%zd failed\n", start);
184+ goto out;
185+ }
186+
187+ ret = job->backup_dump_cb(job->opaque, bs, start, bounce_buffer);
188+ if (ret < 0) {
189+ DPRINTF("brdv_co_backup_cow dump_cluster_cb C%zd failed\n", start);
190+ goto out;
191+ }
192+
193+ DPRINTF("brdv_co_backup_cow done C%zd\n", start);
194+ }
195+
196+out:
197+ if (bounce_buffer) {
198+ qemu_vfree(bounce_buffer);
199+ }
200+
201+ backup_in_progress_count--;
202+
203+ return ret;
204+}
205+
206+static int coroutine_fn backup_before_read(BlockDriverState *bs,
207+ int64_t sector_num,
208+ int nb_sectors, QEMUIOVector *qiov)
209+{
210+ return backup_do_cow(bs, sector_num, nb_sectors);
211+}
212+
213+static int coroutine_fn backup_before_write(BlockDriverState *bs,
214+ int64_t sector_num,
215+ int nb_sectors, QEMUIOVector *qiov)
216+{
217+ return backup_do_cow(bs, sector_num, nb_sectors);
218+}
219+
55827521
DM
220+static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
221+{
222+ BackupBlockJob *s = container_of(job, BackupBlockJob, common);
223+
224+ if (speed < 0) {
225+ error_set(errp, QERR_INVALID_PARAMETER, "speed");
226+ return;
227+ }
228+ ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
229+}
5ad5891c
DM
230+
231+static BlockJobType backup_job_type = {
232+ .instance_size = sizeof(BackupBlockJob),
233+ .before_read = backup_before_read,
234+ .before_write = backup_before_write,
55827521
DM
235+ .job_type = "backup",
236+ .set_speed = backup_set_speed,
5ad5891c
DM
237+};
238+
239+static void coroutine_fn backup_run(void *opaque)
240+{
241+ BackupBlockJob *job = opaque;
242+ BlockDriverState *bs = job->common.bs;
243+ assert(bs);
244+
245+ int64_t start, end;
246+
247+ start = 0;
248+ end = (bs->total_sectors + BACKUP_BLOCKS_PER_CLUSTER - 1) /
249+ BACKUP_BLOCKS_PER_CLUSTER;
250+
251+ DPRINTF("backup_run start %s %zd %zd\n", bdrv_get_device_name(bs),
252+ start, end);
253+
254+ int ret = 0;
255+
256+ for (; start < end; start++) {
257+ if (block_job_is_cancelled(&job->common)) {
258+ ret = -1;
259+ break;
260+ }
261+
5ad5891c
DM
262+ /* we need to yield so that qemu_aio_flush() returns.
263+ * (without, VM does not reboot)
309874bd 264+ * Note: use 1000 instead of 0 (0 priorize this task too much)
5ad5891c 265+ */
55827521
DM
266+ if (job->common.speed) {
267+ uint64_t delay_ns = ratelimit_calculate_delay(
268+ &job->limit, job->sectors_read);
269+ job->sectors_read = 0;
270+ block_job_sleep_ns(&job->common, rt_clock, delay_ns);
271+ } else {
272+ block_job_sleep_ns(&job->common, rt_clock, 1000);
273+ }
274+
5ad5891c
DM
275+ if (block_job_is_cancelled(&job->common)) {
276+ ret = -1;
277+ break;
278+ }
55827521
DM
279+
280+ if (backup_get_bitmap(job, start)) {
281+ continue; /* already copied */
282+ }
283+
5ad5891c
DM
284+ DPRINTF("backup_run loop C%zd\n", start);
285+
286+ /**
287+ * This triggers a cluster copy
288+ * Note: avoid direct call to brdv_co_backup_cow, because
289+ * this does not call tracked_request_begin()
290+ */
291+ ret = bdrv_co_backup(bs, start*BACKUP_BLOCKS_PER_CLUSTER, 1);
292+ if (ret < 0) {
293+ break;
294+ }
295+ /* Publish progress */
296+ job->common.offset += BACKUP_CLUSTER_SIZE;
297+ }
298+
299+ while (backup_in_progress_count > 0) {
300+ DPRINTF("backup_run backup_in_progress_count != 0 (%d)",
301+ backup_in_progress_count);
2dfd543c
DM
302+ block_job_sleep_ns(&job->common, rt_clock, 10000);
303+
5ad5891c
DM
304+ }
305+
306+ DPRINTF("backup_run complete %d\n", ret);
307+ block_job_completed(&job->common, ret);
308+}
309+
310+static void backup_job_cleanup_cb(void *opaque, int ret)
311+{
312+ BlockDriverState *bs = opaque;
313+ assert(bs);
314+ BackupBlockJob *job = (BackupBlockJob *)bs->job;
315+ assert(job);
316+
317+ DPRINTF("backup_job_cleanup_cb start %d\n", ret);
318+
319+ job->backup_complete_cb(job->opaque, ret);
320+
321+ DPRINTF("backup_job_cleanup_cb end\n");
322+
323+ g_free(job->bitmap);
324+}
325+
309874bd 326+void
359f03dc 327+backup_job_start(BlockDriverState *bs, bool cancel)
309874bd
DM
328+{
329+ assert(bs);
330+ assert(bs->job);
331+ assert(bs->job->co == NULL);
332+
359f03dc
DM
333+ if (cancel) {
334+ block_job_cancel(bs->job); /* set cancel flag */
335+ }
336+
309874bd
DM
337+ bs->job->co = qemu_coroutine_create(backup_run);
338+ qemu_coroutine_enter(bs->job->co, bs->job);
339+}
340+
5ad5891c 341+int
309874bd
DM
342+backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
343+ BlockDriverCompletionFunc *backup_complete_cb,
55827521 344+ void *opaque, int64_t speed)
5ad5891c
DM
345+{
346+ assert(bs);
347+ assert(backup_dump_cb);
348+ assert(backup_complete_cb);
349+
350+ if (bs->job) {
351+ DPRINTF("bdrv_backup_init failed - running job on %s\n",
352+ bdrv_get_device_name(bs));
353+ return -1;
354+ }
355+
356+ int64_t bitmap_size;
357+ const char *devname = bdrv_get_device_name(bs);
358+
359+ if (!devname || !devname[0]) {
360+ return -1;
361+ }
362+
363+ DPRINTF("bdrv_backup_init %s\n", bdrv_get_device_name(bs));
364+
365+ Error *errp;
55827521 366+ BackupBlockJob *job = block_job_create(&backup_job_type, bs, speed,
5ad5891c
DM
367+ backup_job_cleanup_cb, bs, &errp);
368+
369+ job->common.cluster_size = BACKUP_CLUSTER_SIZE;
370+
371+ bitmap_size = bs->total_sectors +
372+ BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG - 1;
373+ bitmap_size /= BACKUP_BLOCKS_PER_CLUSTER * BITS_PER_LONG;
374+
375+ job->backup_dump_cb = backup_dump_cb;
376+ job->backup_complete_cb = backup_complete_cb;
377+ job->opaque = opaque;
378+ job->bitmap_size = bitmap_size;
379+ job->bitmap = g_new0(unsigned long, bitmap_size);
380+
381+ job->common.len = bs->total_sectors*BDRV_SECTOR_SIZE;
309874bd 382+
5ad5891c
DM
383+ return 0;
384+}
385diff --git a/backup.h b/backup.h
386new file mode 100644
359f03dc 387index 0000000..20a9016
5ad5891c
DM
388--- /dev/null
389+++ b/backup.h
309874bd 390@@ -0,0 +1,32 @@
5ad5891c
DM
391+/*
392+ * QEMU backup related definitions
393+ *
394+ * Copyright (C) Proxmox Server Solutions
395+ *
396+ * Authors:
397+ * Dietmar Maurer (dietmar@proxmox.com)
398+ *
399+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
400+ * See the COPYING file in the top-level directory.
401+ *
402+ */
403+
404+#ifndef QEMU_BACKUP_H
405+#define QEMU_BACKUP_H
406+
407+#include <uuid/uuid.h>
408+
409+#define BACKUP_CLUSTER_BITS 16
410+#define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
411+#define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
412+
413+typedef int BackupDumpFunc(void *opaque, BlockDriverState *bs,
414+ int64_t cluster_num, unsigned char *buf);
415+
359f03dc 416+void backup_job_start(BlockDriverState *bs, bool cancel);
309874bd
DM
417+
418+int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
419+ BlockDriverCompletionFunc *backup_complete_cb,
55827521 420+ void *opaque, int64_t speed);
5ad5891c
DM
421+
422+#endif /* QEMU_BACKUP_H */
423diff --git a/block.c b/block.c
309874bd 424index c05875f..4de7fbd 100644
5ad5891c
DM
425--- a/block.c
426+++ b/block.c
427@@ -54,6 +54,7 @@
428 typedef enum {
429 BDRV_REQ_COPY_ON_READ = 0x1,
430 BDRV_REQ_ZERO_WRITE = 0x2,
431+ BDRV_REQ_BACKUP_ONLY = 0x4,
432 } BdrvRequestFlags;
433
434 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
435@@ -1542,7 +1543,7 @@ int bdrv_commit(BlockDriverState *bs)
436
437 if (!drv)
438 return -ENOMEDIUM;
439-
440+
441 if (!bs->backing_hd) {
442 return -ENOTSUP;
443 }
444@@ -1679,6 +1680,22 @@ static void round_to_clusters(BlockDriverState *bs,
445 }
446 }
447
448+/**
449+ * Round a region to job cluster boundaries
450+ */
451+static void round_to_job_clusters(BlockDriverState *bs,
452+ int64_t sector_num, int nb_sectors,
453+ int job_cluster_size,
454+ int64_t *cluster_sector_num,
455+ int *cluster_nb_sectors)
456+{
457+ int64_t c = job_cluster_size/BDRV_SECTOR_SIZE;
458+
459+ *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
460+ *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
461+ nb_sectors, c);
462+}
463+
464 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
465 int64_t sector_num, int nb_sectors) {
466 /* aaaa bbbb */
467@@ -1693,7 +1710,9 @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
468 }
469
470 static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
471- int64_t sector_num, int nb_sectors)
472+ int64_t sector_num,
473+ int nb_sectors,
474+ int job_cluster_size)
475 {
476 BdrvTrackedRequest *req;
477 int64_t cluster_sector_num;
478@@ -1709,6 +1728,11 @@ static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
479 round_to_clusters(bs, sector_num, nb_sectors,
480 &cluster_sector_num, &cluster_nb_sectors);
481
482+ if (job_cluster_size) {
483+ round_to_job_clusters(bs, sector_num, nb_sectors, job_cluster_size,
484+ &cluster_sector_num, &cluster_nb_sectors);
485+ }
486+
487 do {
488 retry = false;
489 QLIST_FOREACH(req, &bs->tracked_requests, list) {
490@@ -2278,12 +2302,24 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
491 bs->copy_on_read_in_flight++;
492 }
493
494- if (bs->copy_on_read_in_flight) {
495- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
496+ int job_cluster_size = bs->job && bs->job->cluster_size ?
497+ bs->job->cluster_size : 0;
498+
499+ if (bs->copy_on_read_in_flight || job_cluster_size) {
500+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
501+ job_cluster_size);
502 }
503
504 tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
505
506+ if (bs->job && bs->job->job_type->before_read) {
507+ ret = bs->job->job_type->before_read(bs, sector_num, nb_sectors, qiov);
309874bd 508+ if ((ret < 0) || (flags & BDRV_REQ_BACKUP_ONLY)) {
5ad5891c
DM
509+ /* Note: We do not return any data to the caller */
510+ goto out;
511+ }
512+ }
513+
514 if (flags & BDRV_REQ_COPY_ON_READ) {
515 int pnum;
516
517@@ -2327,6 +2363,17 @@ int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
518 BDRV_REQ_COPY_ON_READ);
519 }
520
521+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
522+ int64_t sector_num, int nb_sectors)
523+{
524+ if (!bs->job) {
525+ return -ENOTSUP;
526+ }
527+
528+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, NULL,
529+ BDRV_REQ_BACKUP_ONLY);
530+}
531+
532 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
533 int64_t sector_num, int nb_sectors)
534 {
535@@ -2384,12 +2431,23 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
536 bdrv_io_limits_intercept(bs, true, nb_sectors);
537 }
538
539- if (bs->copy_on_read_in_flight) {
540- wait_for_overlapping_requests(bs, sector_num, nb_sectors);
541+ int job_cluster_size = bs->job && bs->job->cluster_size ?
542+ bs->job->cluster_size : 0;
543+
544+ if (bs->copy_on_read_in_flight || job_cluster_size) {
545+ wait_for_overlapping_requests(bs, sector_num, nb_sectors,
546+ job_cluster_size);
547 }
548
549 tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
550
551+ if (bs->job && bs->job->job_type->before_write) {
552+ ret = bs->job->job_type->before_write(bs, sector_num, nb_sectors, qiov);
553+ if (ret < 0) {
554+ goto out;
555+ }
556+ }
557+
558 if (flags & BDRV_REQ_ZERO_WRITE) {
559 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
560 } else {
561@@ -2408,6 +2466,7 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
562 bs->wr_highest_sector = sector_num + nb_sectors - 1;
563 }
564
565+out:
566 tracked_request_end(&req);
567
568 return ret;
569diff --git a/block.h b/block.h
570index 722c620..94e5903 100644
571--- a/block.h
572+++ b/block.h
573@@ -172,6 +172,8 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
574 int nb_sectors, QEMUIOVector *qiov);
575 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
576 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
577+int coroutine_fn bdrv_co_backup(BlockDriverState *bs,
578+ int64_t sector_num, int nb_sectors);
579 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
580 int nb_sectors, QEMUIOVector *qiov);
581 /*
582diff --git a/blockjob.h b/blockjob.h
583index 3792b73..6621173 100644
584--- a/blockjob.h
585+++ b/blockjob.h
586@@ -50,6 +50,13 @@ typedef struct BlockJobType {
587 * manually.
588 */
589 void (*complete)(BlockJob *job, Error **errp);
590+
591+ /** tracked requests */
592+ int coroutine_fn (*before_read)(BlockDriverState *bs, int64_t sector_num,
593+ int nb_sectors, QEMUIOVector *qiov);
594+ int coroutine_fn (*before_write)(BlockDriverState *bs, int64_t sector_num,
595+ int nb_sectors, QEMUIOVector *qiov);
596+
597 } BlockJobType;
598
599 /**
600@@ -103,6 +110,9 @@ struct BlockJob {
601 /** Speed that was set with @block_job_set_speed. */
602 int64_t speed;
603
604+ /** tracked requests */
605+ int cluster_size;
606+
607 /** The completion function that will be called when the job completes. */
608 BlockDriverCompletionFunc *cb;
609
610--
6111.7.2.5
612