4 * Copyright (C) 2013 Proxmox Server Solutions
5 * Copyright (c) 2019 Virtuozzo International GmbH.
8 * Dietmar Maurer (dietmar@proxmox.com)
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "block/block-copy.h"
20 #include "sysemu/block-backend.h"
22 void block_copy_state_free(BlockCopyState
*s
)
28 bdrv_release_dirty_bitmap(blk_bs(s
->source
), s
->copy_bitmap
);
34 BlockCopyState
*block_copy_state_new(
35 BlockDriverState
*source
, BlockDriverState
*target
,
36 int64_t cluster_size
, BdrvRequestFlags write_flags
,
37 ProgressBytesCallbackFunc progress_bytes_callback
,
38 ProgressResetCallbackFunc progress_reset_callback
,
39 void *progress_opaque
, Error
**errp
)
43 uint64_t no_resize
= BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
|
44 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_GRAPH_MOD
;
45 BdrvDirtyBitmap
*copy_bitmap
;
47 copy_bitmap
= bdrv_create_dirty_bitmap(source
, cluster_size
, NULL
, errp
);
51 bdrv_disable_dirty_bitmap(copy_bitmap
);
53 s
= g_new(BlockCopyState
, 1);
54 *s
= (BlockCopyState
) {
55 .source
= blk_new(bdrv_get_aio_context(source
),
56 BLK_PERM_CONSISTENT_READ
, no_resize
),
57 .target
= blk_new(bdrv_get_aio_context(target
),
58 BLK_PERM_WRITE
, no_resize
),
59 .copy_bitmap
= copy_bitmap
,
60 .cluster_size
= cluster_size
,
61 .len
= bdrv_dirty_bitmap_size(copy_bitmap
),
62 .write_flags
= write_flags
,
63 .progress_bytes_callback
= progress_bytes_callback
,
64 .progress_reset_callback
= progress_reset_callback
,
65 .progress_opaque
= progress_opaque
,
68 s
->copy_range_size
= QEMU_ALIGN_DOWN(MIN(blk_get_max_transfer(s
->source
),
69 blk_get_max_transfer(s
->target
)),
72 * Set use_copy_range, consider the following:
73 * 1. Compression is not supported for copy_range.
74 * 2. copy_range does not respect max_transfer (it's a TODO), so we factor
75 * that in here. If max_transfer is smaller than the job->cluster_size,
76 * we do not use copy_range (in that case it's zero after aligning down
80 !(write_flags
& BDRV_REQ_WRITE_COMPRESSED
) && s
->copy_range_size
> 0;
83 * We just allow aio context change on our block backends. block_copy() user
84 * (now it's only backup) is responsible for source and target being in same
87 blk_set_disable_request_queuing(s
->source
, true);
88 blk_set_allow_aio_context_change(s
->source
, true);
89 blk_set_disable_request_queuing(s
->target
, true);
90 blk_set_allow_aio_context_change(s
->target
, true);
92 ret
= blk_insert_bs(s
->source
, source
, errp
);
97 ret
= blk_insert_bs(s
->target
, target
, errp
);
105 block_copy_state_free(s
);
111 * Copy range to target with a bounce buffer and return the bytes copied. If
112 * error occurred, return a negative error number
114 static int coroutine_fn
block_copy_with_bounce_buffer(BlockCopyState
*s
,
117 bool is_write_notifier
,
119 void **bounce_buffer
)
123 int read_flags
= is_write_notifier
? BDRV_REQ_NO_SERIALISING
: 0;
125 assert(QEMU_IS_ALIGNED(start
, s
->cluster_size
));
126 bdrv_reset_dirty_bitmap(s
->copy_bitmap
, start
, s
->cluster_size
);
127 nbytes
= MIN(s
->cluster_size
, s
->len
- start
);
128 if (!*bounce_buffer
) {
129 *bounce_buffer
= blk_blockalign(s
->source
, s
->cluster_size
);
132 ret
= blk_co_pread(s
->source
, start
, nbytes
, *bounce_buffer
, read_flags
);
134 trace_block_copy_with_bounce_buffer_read_fail(s
, start
, ret
);
136 *error_is_read
= true;
141 ret
= blk_co_pwrite(s
->target
, start
, nbytes
, *bounce_buffer
,
144 trace_block_copy_with_bounce_buffer_write_fail(s
, start
, ret
);
146 *error_is_read
= false;
153 bdrv_set_dirty_bitmap(s
->copy_bitmap
, start
, s
->cluster_size
);
159 * Copy range to target and return the bytes copied. If error occurred, return a
160 * negative error number.
162 static int coroutine_fn
block_copy_with_offload(BlockCopyState
*s
,
165 bool is_write_notifier
)
170 int read_flags
= is_write_notifier
? BDRV_REQ_NO_SERIALISING
: 0;
172 assert(QEMU_IS_ALIGNED(s
->copy_range_size
, s
->cluster_size
));
173 assert(QEMU_IS_ALIGNED(start
, s
->cluster_size
));
174 nbytes
= MIN(s
->copy_range_size
, MIN(end
, s
->len
) - start
);
175 nr_clusters
= DIV_ROUND_UP(nbytes
, s
->cluster_size
);
176 bdrv_reset_dirty_bitmap(s
->copy_bitmap
, start
,
177 s
->cluster_size
* nr_clusters
);
178 ret
= blk_co_copy_range(s
->source
, start
, s
->target
, start
, nbytes
,
179 read_flags
, s
->write_flags
);
181 trace_block_copy_with_offload_fail(s
, start
, ret
);
182 bdrv_set_dirty_bitmap(s
->copy_bitmap
, start
,
183 s
->cluster_size
* nr_clusters
);
191 * Check if the cluster starting at offset is allocated or not.
192 * return via pnum the number of contiguous clusters sharing this allocation.
194 static int block_copy_is_cluster_allocated(BlockCopyState
*s
, int64_t offset
,
197 BlockDriverState
*bs
= blk_bs(s
->source
);
198 int64_t count
, total_count
= 0;
199 int64_t bytes
= s
->len
- offset
;
202 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
205 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &count
);
210 total_count
+= count
;
212 if (ret
|| count
== 0) {
214 * ret: partial segment(s) are considered allocated.
215 * otherwise: unallocated tail is treated as an entire segment.
217 *pnum
= DIV_ROUND_UP(total_count
, s
->cluster_size
);
221 /* Unallocated segment(s) with uncertain following segment(s) */
222 if (total_count
>= s
->cluster_size
) {
223 *pnum
= total_count
/ s
->cluster_size
;
233 * Reset bits in copy_bitmap starting at offset if they represent unallocated
234 * data in the image. May reset subsequent contiguous bits.
235 * @return 0 when the cluster at @offset was unallocated,
236 * 1 otherwise, and -ret on error.
238 int64_t block_copy_reset_unallocated(BlockCopyState
*s
,
239 int64_t offset
, int64_t *count
)
242 int64_t clusters
, bytes
;
244 ret
= block_copy_is_cluster_allocated(s
, offset
, &clusters
);
249 bytes
= clusters
* s
->cluster_size
;
252 bdrv_reset_dirty_bitmap(s
->copy_bitmap
, offset
, bytes
);
253 s
->progress_reset_callback(s
->progress_opaque
);
260 int coroutine_fn
block_copy(BlockCopyState
*s
,
261 int64_t start
, uint64_t bytes
,
263 bool is_write_notifier
)
266 int64_t end
= bytes
+ start
; /* bytes */
267 void *bounce_buffer
= NULL
;
268 int64_t status_bytes
;
271 * block_copy() user is responsible for keeping source and target in same
274 assert(blk_get_aio_context(s
->source
) == blk_get_aio_context(s
->target
));
276 assert(QEMU_IS_ALIGNED(start
, s
->cluster_size
));
277 assert(QEMU_IS_ALIGNED(end
, s
->cluster_size
));
279 while (start
< end
) {
282 if (!bdrv_dirty_bitmap_get(s
->copy_bitmap
, start
)) {
283 trace_block_copy_skip(s
, start
);
284 start
+= s
->cluster_size
;
285 continue; /* already copied */
288 dirty_end
= bdrv_dirty_bitmap_next_zero(s
->copy_bitmap
, start
,
294 if (s
->skip_unallocated
) {
295 ret
= block_copy_reset_unallocated(s
, start
, &status_bytes
);
297 trace_block_copy_skip_range(s
, start
, status_bytes
);
298 start
+= status_bytes
;
301 /* Clamp to known allocated region */
302 dirty_end
= MIN(dirty_end
, start
+ status_bytes
);
305 trace_block_copy_process(s
, start
);
307 if (s
->use_copy_range
) {
308 ret
= block_copy_with_offload(s
, start
, dirty_end
,
311 s
->use_copy_range
= false;
314 if (!s
->use_copy_range
) {
315 ret
= block_copy_with_bounce_buffer(s
, start
, dirty_end
,
317 error_is_read
, &bounce_buffer
);
324 s
->progress_bytes_callback(ret
, s
->progress_opaque
);
329 qemu_vfree(bounce_buffer
);