4 * Copyright IBM, Corp. 2011
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
15 #include "block_int.h"
19 * Size of data buffer for populating the image file. This should be large
20 * enough to process multiple clusters in a single call, so that populating
21 * contiguous regions of the image is efficient.
23 STREAM_BUFFER_SIZE
= 512 * 1024, /* in bytes */
26 #define SLICE_TIME 100000000ULL /* ns */
29 int64_t next_slice_time
;
34 static int64_t ratelimit_calculate_delay(RateLimit
*limit
, uint64_t n
)
36 int64_t now
= qemu_get_clock_ns(rt_clock
);
38 if (limit
->next_slice_time
< now
) {
39 limit
->next_slice_time
= now
+ SLICE_TIME
;
40 limit
->dispatched
= 0;
42 if (limit
->dispatched
== 0 || limit
->dispatched
+ n
<= limit
->slice_quota
) {
43 limit
->dispatched
+= n
;
46 limit
->dispatched
= n
;
47 return limit
->next_slice_time
- now
;
51 static void ratelimit_set_speed(RateLimit
*limit
, uint64_t speed
)
53 limit
->slice_quota
= speed
/ (1000000000ULL / SLICE_TIME
);
56 typedef struct StreamBlockJob
{
59 BlockDriverState
*base
;
60 char backing_file_id
[1024];
63 static int coroutine_fn
stream_populate(BlockDriverState
*bs
,
64 int64_t sector_num
, int nb_sectors
,
69 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
73 qemu_iovec_init_external(&qiov
, &iov
, 1);
75 /* Copy-on-read the unallocated clusters */
76 return bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
, &qiov
);
79 static void close_unused_images(BlockDriverState
*top
, BlockDriverState
*base
,
82 BlockDriverState
*intermediate
;
83 intermediate
= top
->backing_hd
;
85 while (intermediate
) {
86 BlockDriverState
*unused
;
89 if (intermediate
== base
) {
93 unused
= intermediate
;
94 intermediate
= intermediate
->backing_hd
;
95 unused
->backing_hd
= NULL
;
98 top
->backing_hd
= base
;
102 * Given an image chain: [BASE] -> [INTER1] -> [INTER2] -> [TOP]
104 * Return true if the given sector is allocated in any image between
105 * BASE and TOP (inclusive). BASE can be NULL to check if the given
106 * sector is allocated in any image of the chain. Return false otherwise.
108 * 'pnum' is set to the number of sectors (including and immediately following
109 * the specified sector) that are known to be in the same
110 * allocated/unallocated state.
113 static int coroutine_fn
is_allocated_above(BlockDriverState
*top
,
114 BlockDriverState
*base
,
116 int nb_sectors
, int *pnum
)
118 BlockDriverState
*intermediate
;
119 int ret
, n
= nb_sectors
;
122 while (intermediate
!= base
) {
124 ret
= bdrv_co_is_allocated(intermediate
, sector_num
, nb_sectors
,
134 * [sector_num, nb_sectors] is unallocated on top but intermediate
137 * [sector_num+x, nr_sectors] allocated.
139 if (n
> pnum_inter
) {
143 intermediate
= intermediate
->backing_hd
;
150 static void coroutine_fn
stream_run(void *opaque
)
152 StreamBlockJob
*s
= opaque
;
153 BlockDriverState
*bs
= s
->common
.bs
;
154 BlockDriverState
*base
= s
->base
;
155 int64_t sector_num
, end
;
160 s
->common
.len
= bdrv_getlength(bs
);
161 if (s
->common
.len
< 0) {
162 block_job_complete(&s
->common
, s
->common
.len
);
166 end
= s
->common
.len
>> BDRV_SECTOR_BITS
;
167 buf
= qemu_blockalign(bs
, STREAM_BUFFER_SIZE
);
169 /* Turn on copy-on-read for the whole block device so that guest read
170 * requests help us make progress. Only do this when copying the entire
171 * backing chain since the copy-on-read operation does not take base into
175 bdrv_enable_copy_on_read(bs
);
178 for (sector_num
= 0; sector_num
< end
; sector_num
+= n
) {
179 uint64_t delay_ns
= 0;
183 /* Note that even when no rate limit is applied we need to yield
184 * with no pending I/O here so that qemu_aio_flush() returns.
186 block_job_sleep_ns(&s
->common
, rt_clock
, delay_ns
);
187 if (block_job_is_cancelled(&s
->common
)) {
191 ret
= bdrv_co_is_allocated(bs
, sector_num
,
192 STREAM_BUFFER_SIZE
/ BDRV_SECTOR_SIZE
, &n
);
194 /* Allocated in the top, no need to copy. */
197 /* Copy if allocated in the intermediate images. Limit to the
198 * known-unallocated area [sector_num, sector_num+n). */
199 ret
= is_allocated_above(bs
->backing_hd
, base
, sector_num
, n
, &n
);
203 trace_stream_one_iteration(s
, sector_num
, n
, ret
);
204 if (ret
>= 0 && copy
) {
205 if (s
->common
.speed
) {
206 delay_ns
= ratelimit_calculate_delay(&s
->limit
, n
);
211 ret
= stream_populate(bs
, sector_num
, n
, buf
);
218 /* Publish progress */
219 s
->common
.offset
+= n
* BDRV_SECTOR_SIZE
;
223 bdrv_disable_copy_on_read(bs
);
226 if (!block_job_is_cancelled(&s
->common
) && sector_num
== end
&& ret
== 0) {
227 const char *base_id
= NULL
, *base_fmt
= NULL
;
229 base_id
= s
->backing_file_id
;
231 base_fmt
= base
->drv
->format_name
;
234 ret
= bdrv_change_backing_file(bs
, base_id
, base_fmt
);
235 close_unused_images(bs
, base
, base_id
);
239 block_job_complete(&s
->common
, ret
);
242 static void stream_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
244 StreamBlockJob
*s
= container_of(job
, StreamBlockJob
, common
);
247 error_set(errp
, QERR_INVALID_PARAMETER
, "speed");
250 ratelimit_set_speed(&s
->limit
, speed
/ BDRV_SECTOR_SIZE
);
253 static BlockJobType stream_job_type
= {
254 .instance_size
= sizeof(StreamBlockJob
),
255 .job_type
= "stream",
256 .set_speed
= stream_set_speed
,
259 void stream_start(BlockDriverState
*bs
, BlockDriverState
*base
,
260 const char *base_id
, int64_t speed
,
261 BlockDriverCompletionFunc
*cb
,
262 void *opaque
, Error
**errp
)
266 s
= block_job_create(&stream_job_type
, bs
, speed
, cb
, opaque
, errp
);
273 pstrcpy(s
->backing_file_id
, sizeof(s
->backing_file_id
), base_id
);
276 s
->common
.co
= qemu_coroutine_create(stream_run
);
277 trace_stream_start(bs
, base
, s
, s
->common
.co
, opaque
);
278 qemu_coroutine_enter(s
->common
.co
, s
);