4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
15 #include "block/blockjob.h"
16 #include "block/block_int.h"
17 #include "qapi/qmp/qerror.h"
18 #include "qemu/ratelimit.h"
19 #include "qemu/bitmap.h"
21 #define SLICE_TIME 100000000ULL /* ns */
22 #define MAX_IN_FLIGHT 16
24 /* The mirroring buffer is a list of granularity-sized chunks.
25 * Free chunks are organized in a list.
27 typedef struct MirrorBuffer
{
28 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
31 typedef struct MirrorBlockJob
{
34 BlockDriverState
*target
;
35 BlockDriverState
*base
;
36 /* The name of the graph node to replace */
38 /* The BDS to replace */
39 BlockDriverState
*to_replace
;
40 /* Used to block operations on the drive-mirror-replace target */
41 Error
*replace_blocker
;
43 BlockdevOnError on_source_error
, on_target_error
;
50 unsigned long *cow_bitmap
;
51 BdrvDirtyBitmap
*dirty_bitmap
;
54 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
57 unsigned long *in_flight_bitmap
;
59 int sectors_in_flight
;
64 typedef struct MirrorOp
{
71 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
76 return block_job_error_action(&s
->common
, s
->common
.bs
,
77 s
->on_source_error
, true, error
);
79 return block_job_error_action(&s
->common
, s
->target
,
80 s
->on_target_error
, false, error
);
84 static void mirror_iteration_done(MirrorOp
*op
, int ret
)
86 MirrorBlockJob
*s
= op
->s
;
89 int i
, nb_chunks
, sectors_per_chunk
;
91 trace_mirror_iteration_done(s
, op
->sector_num
, op
->nb_sectors
, ret
);
94 s
->sectors_in_flight
-= op
->nb_sectors
;
96 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
97 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
98 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
102 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
103 chunk_num
= op
->sector_num
/ sectors_per_chunk
;
104 nb_chunks
= op
->nb_sectors
/ sectors_per_chunk
;
105 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
108 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
110 s
->common
.offset
+= (uint64_t)op
->nb_sectors
* BDRV_SECTOR_SIZE
;
113 qemu_iovec_destroy(&op
->qiov
);
114 g_slice_free(MirrorOp
, op
);
116 /* Enter coroutine when it is not sleeping. The coroutine sleeps to
117 * rate-limit itself. The coroutine will eventually resume since there is
118 * a sleep timeout so don't wake it early.
120 if (s
->common
.busy
) {
121 qemu_coroutine_enter(s
->common
.co
, NULL
);
125 static void mirror_write_complete(void *opaque
, int ret
)
127 MirrorOp
*op
= opaque
;
128 MirrorBlockJob
*s
= op
->s
;
130 BlockErrorAction action
;
132 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
133 action
= mirror_error_action(s
, false, -ret
);
134 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
138 mirror_iteration_done(op
, ret
);
141 static void mirror_read_complete(void *opaque
, int ret
)
143 MirrorOp
*op
= opaque
;
144 MirrorBlockJob
*s
= op
->s
;
146 BlockErrorAction action
;
148 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
149 action
= mirror_error_action(s
, true, -ret
);
150 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
154 mirror_iteration_done(op
, ret
);
157 bdrv_aio_writev(s
->target
, op
->sector_num
, &op
->qiov
, op
->nb_sectors
,
158 mirror_write_complete
, op
);
161 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
163 BlockDriverState
*source
= s
->common
.bs
;
164 int nb_sectors
, sectors_per_chunk
, nb_chunks
;
165 int64_t end
, sector_num
, next_chunk
, next_sector
, hbitmap_next_sector
;
166 uint64_t delay_ns
= 0;
171 s
->sector_num
= hbitmap_iter_next(&s
->hbi
);
172 if (s
->sector_num
< 0) {
173 bdrv_dirty_iter_init(s
->dirty_bitmap
, &s
->hbi
);
174 s
->sector_num
= hbitmap_iter_next(&s
->hbi
);
175 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
176 assert(s
->sector_num
>= 0);
179 hbitmap_next_sector
= s
->sector_num
;
180 sector_num
= s
->sector_num
;
181 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
182 end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
184 /* Extend the QEMUIOVector to include all adjacent blocks that will
185 * be copied in this operation.
187 * We have to do this if we have no backing file yet in the destination,
188 * and the cluster size is very large. Then we need to do COW ourselves.
189 * The first time a cluster is copied, copy it entirely. Note that,
190 * because both the granularity and the cluster size are powers of two,
191 * the number of sectors to copy cannot exceed one cluster.
193 * We also want to extend the QEMUIOVector to include more adjacent
194 * dirty blocks if possible, to limit the number of I/O operations and
195 * run efficiently even with a small granularity.
199 next_sector
= sector_num
;
200 next_chunk
= sector_num
/ sectors_per_chunk
;
202 /* Wait for I/O to this cluster (from a previous iteration) to be done. */
203 while (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
204 trace_mirror_yield_in_flight(s
, sector_num
, s
->in_flight
);
205 qemu_coroutine_yield();
209 int added_sectors
, added_chunks
;
211 if (!bdrv_get_dirty(source
, s
->dirty_bitmap
, next_sector
) ||
212 test_bit(next_chunk
, s
->in_flight_bitmap
)) {
213 assert(nb_sectors
> 0);
217 added_sectors
= sectors_per_chunk
;
218 if (s
->cow_bitmap
&& !test_bit(next_chunk
, s
->cow_bitmap
)) {
219 bdrv_round_to_clusters(s
->target
,
220 next_sector
, added_sectors
,
221 &next_sector
, &added_sectors
);
223 /* On the first iteration, the rounding may make us copy
224 * sectors before the first dirty one.
226 if (next_sector
< sector_num
) {
227 assert(nb_sectors
== 0);
228 sector_num
= next_sector
;
229 next_chunk
= next_sector
/ sectors_per_chunk
;
233 added_sectors
= MIN(added_sectors
, end
- (sector_num
+ nb_sectors
));
234 added_chunks
= (added_sectors
+ sectors_per_chunk
- 1) / sectors_per_chunk
;
236 /* When doing COW, it may happen that there is not enough space for
237 * a full cluster. Wait if that is the case.
239 while (nb_chunks
== 0 && s
->buf_free_count
< added_chunks
) {
240 trace_mirror_yield_buf_busy(s
, nb_chunks
, s
->in_flight
);
241 qemu_coroutine_yield();
243 if (s
->buf_free_count
< nb_chunks
+ added_chunks
) {
244 trace_mirror_break_buf_busy(s
, nb_chunks
, s
->in_flight
);
248 /* We have enough free space to copy these sectors. */
249 bitmap_set(s
->in_flight_bitmap
, next_chunk
, added_chunks
);
251 nb_sectors
+= added_sectors
;
252 nb_chunks
+= added_chunks
;
253 next_sector
+= added_sectors
;
254 next_chunk
+= added_chunks
;
255 if (!s
->synced
&& s
->common
.speed
) {
256 delay_ns
= ratelimit_calculate_delay(&s
->limit
, added_sectors
);
258 } while (delay_ns
== 0 && next_sector
< end
);
260 /* Allocate a MirrorOp that is used as an AIO callback. */
261 op
= g_slice_new(MirrorOp
);
263 op
->sector_num
= sector_num
;
264 op
->nb_sectors
= nb_sectors
;
266 /* Now make a QEMUIOVector taking enough granularity-sized chunks
269 qemu_iovec_init(&op
->qiov
, nb_chunks
);
270 next_sector
= sector_num
;
271 while (nb_chunks
-- > 0) {
272 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
273 size_t remaining
= (nb_sectors
* BDRV_SECTOR_SIZE
) - op
->qiov
.size
;
275 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
277 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
279 /* Advance the HBitmapIter in parallel, so that we do not examine
280 * the same sector twice.
282 if (next_sector
> hbitmap_next_sector
283 && bdrv_get_dirty(source
, s
->dirty_bitmap
, next_sector
)) {
284 hbitmap_next_sector
= hbitmap_iter_next(&s
->hbi
);
287 next_sector
+= sectors_per_chunk
;
290 bdrv_reset_dirty_bitmap(s
->dirty_bitmap
, sector_num
, nb_sectors
);
292 /* Copy the dirty cluster. */
294 s
->sectors_in_flight
+= nb_sectors
;
295 trace_mirror_one_iteration(s
, sector_num
, nb_sectors
);
297 ret
= bdrv_get_block_status_above(source
, NULL
, sector_num
,
299 if (ret
< 0 || pnum
< nb_sectors
||
300 (ret
& BDRV_BLOCK_DATA
&& !(ret
& BDRV_BLOCK_ZERO
))) {
301 bdrv_aio_readv(source
, sector_num
, &op
->qiov
, nb_sectors
,
302 mirror_read_complete
, op
);
303 } else if (ret
& BDRV_BLOCK_ZERO
) {
304 bdrv_aio_write_zeroes(s
->target
, sector_num
, op
->nb_sectors
,
305 s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0,
306 mirror_write_complete
, op
);
308 assert(!(ret
& BDRV_BLOCK_DATA
));
309 bdrv_aio_discard(s
->target
, sector_num
, op
->nb_sectors
,
310 mirror_write_complete
, op
);
315 static void mirror_free_init(MirrorBlockJob
*s
)
317 int granularity
= s
->granularity
;
318 size_t buf_size
= s
->buf_size
;
319 uint8_t *buf
= s
->buf
;
321 assert(s
->buf_free_count
== 0);
322 QSIMPLEQ_INIT(&s
->buf_free
);
323 while (buf_size
!= 0) {
324 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
325 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
327 buf_size
-= granularity
;
332 static void mirror_drain(MirrorBlockJob
*s
)
334 while (s
->in_flight
> 0) {
335 qemu_coroutine_yield();
343 static void mirror_exit(BlockJob
*job
, void *opaque
)
345 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
346 MirrorExitData
*data
= opaque
;
347 AioContext
*replace_aio_context
= NULL
;
350 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
351 aio_context_acquire(replace_aio_context
);
354 if (s
->should_complete
&& data
->ret
== 0) {
355 BlockDriverState
*to_replace
= s
->common
.bs
;
357 to_replace
= s
->to_replace
;
359 if (bdrv_get_flags(s
->target
) != bdrv_get_flags(to_replace
)) {
360 bdrv_reopen(s
->target
, bdrv_get_flags(to_replace
), NULL
);
362 bdrv_swap(s
->target
, to_replace
);
363 if (s
->common
.driver
->job_type
== BLOCK_JOB_TYPE_COMMIT
) {
364 /* drop the bs loop chain formed by the swap: break the loop then
365 * trigger the unref from the top one */
366 BlockDriverState
*p
= s
->base
->backing_hd
;
367 bdrv_set_backing_hd(s
->base
, NULL
);
372 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
373 error_free(s
->replace_blocker
);
374 bdrv_unref(s
->to_replace
);
376 if (replace_aio_context
) {
377 aio_context_release(replace_aio_context
);
380 bdrv_unref(s
->target
);
381 block_job_completed(&s
->common
, data
->ret
);
385 static void coroutine_fn
mirror_run(void *opaque
)
387 MirrorBlockJob
*s
= opaque
;
388 MirrorExitData
*data
;
389 BlockDriverState
*bs
= s
->common
.bs
;
390 int64_t sector_num
, end
, sectors_per_chunk
, length
;
391 uint64_t last_pause_ns
;
393 char backing_filename
[2]; /* we only need 2 characters because we are only
394 checking for a NULL string */
398 if (block_job_is_cancelled(&s
->common
)) {
402 s
->bdev_length
= bdrv_getlength(bs
);
403 if (s
->bdev_length
< 0) {
404 ret
= s
->bdev_length
;
406 } else if (s
->bdev_length
== 0) {
407 /* Report BLOCK_JOB_READY and wait for complete. */
408 block_job_event_ready(&s
->common
);
410 while (!block_job_is_cancelled(&s
->common
) && !s
->should_complete
) {
411 block_job_yield(&s
->common
);
413 s
->common
.cancelled
= false;
417 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
418 s
->in_flight_bitmap
= bitmap_new(length
);
420 /* If we have no backing file yet in the destination, we cannot let
421 * the destination do COW. Instead, we copy sectors around the
422 * dirty data if needed. We need a bitmap to do that.
424 bdrv_get_backing_filename(s
->target
, backing_filename
,
425 sizeof(backing_filename
));
426 if (backing_filename
[0] && !s
->target
->backing_hd
) {
427 ret
= bdrv_get_info(s
->target
, &bdi
);
431 if (s
->granularity
< bdi
.cluster_size
) {
432 s
->buf_size
= MAX(s
->buf_size
, bdi
.cluster_size
);
433 s
->cow_bitmap
= bitmap_new(length
);
437 end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
438 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
439 if (s
->buf
== NULL
) {
444 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
447 if (!s
->is_none_mode
) {
448 /* First part, loop on the sectors and initialize the dirty bitmap. */
449 BlockDriverState
*base
= s
->base
;
450 for (sector_num
= 0; sector_num
< end
; ) {
451 int64_t next
= (sector_num
| (sectors_per_chunk
- 1)) + 1;
452 ret
= bdrv_is_allocated_above(bs
, base
,
453 sector_num
, next
- sector_num
, &n
);
461 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, sector_num
, n
);
469 bdrv_dirty_iter_init(s
->dirty_bitmap
, &s
->hbi
);
470 last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
472 uint64_t delay_ns
= 0;
474 bool should_complete
;
481 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
482 /* s->common.offset contains the number of bytes already processed so
483 * far, cnt is the number of dirty sectors remaining and
484 * s->sectors_in_flight is the number of sectors currently being
485 * processed; together those are the current total operation length */
486 s
->common
.len
= s
->common
.offset
+
487 (cnt
+ s
->sectors_in_flight
) * BDRV_SECTOR_SIZE
;
489 /* Note that even when no rate limit is applied we need to yield
490 * periodically with no pending I/O so that bdrv_drain_all() returns.
491 * We do so every SLICE_TIME nanoseconds, or when there is an error,
492 * or when the source is clean, whichever comes first.
494 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - last_pause_ns
< SLICE_TIME
&&
495 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
496 if (s
->in_flight
== MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
497 (cnt
== 0 && s
->in_flight
> 0)) {
498 trace_mirror_yield(s
, s
->in_flight
, s
->buf_free_count
, cnt
);
499 qemu_coroutine_yield();
501 } else if (cnt
!= 0) {
502 delay_ns
= mirror_iteration(s
);
506 should_complete
= false;
507 if (s
->in_flight
== 0 && cnt
== 0) {
508 trace_mirror_before_flush(s
);
509 ret
= bdrv_flush(s
->target
);
511 if (mirror_error_action(s
, false, -ret
) ==
512 BLOCK_ERROR_ACTION_REPORT
) {
516 /* We're out of the streaming phase. From now on, if the job
517 * is cancelled we will actually complete all pending I/O and
518 * report completion. This way, block-job-cancel will leave
519 * the target in a consistent state.
522 block_job_event_ready(&s
->common
);
526 should_complete
= s
->should_complete
||
527 block_job_is_cancelled(&s
->common
);
528 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
532 if (cnt
== 0 && should_complete
) {
533 /* The dirty bitmap is not updated while operations are pending.
534 * If we're about to exit, wait for pending operations before
535 * calling bdrv_get_dirty_count(bs), or we may exit while the
536 * source has dirty data to copy!
538 * Note that I/O can be submitted by the guest while
539 * mirror_populate runs.
541 trace_mirror_before_drain(s
, cnt
);
543 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
547 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
549 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
550 if (block_job_is_cancelled(&s
->common
)) {
553 } else if (!should_complete
) {
554 delay_ns
= (s
->in_flight
== 0 && cnt
== 0 ? SLICE_TIME
: 0);
555 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
556 } else if (cnt
== 0) {
557 /* The two disks are in sync. Exit and report successful
560 assert(QLIST_EMPTY(&bs
->tracked_requests
));
561 s
->common
.cancelled
= false;
564 last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
568 if (s
->in_flight
> 0) {
569 /* We get here only if something went wrong. Either the job failed,
570 * or it was cancelled prematurely so that we do not guarantee that
571 * the target is a copy of the source.
573 assert(ret
< 0 || (!s
->synced
&& block_job_is_cancelled(&s
->common
)));
577 assert(s
->in_flight
== 0);
579 g_free(s
->cow_bitmap
);
580 g_free(s
->in_flight_bitmap
);
581 bdrv_release_dirty_bitmap(bs
, s
->dirty_bitmap
);
582 bdrv_iostatus_disable(s
->target
);
584 data
= g_malloc(sizeof(*data
));
586 block_job_defer_to_main_loop(&s
->common
, mirror_exit
, data
);
589 static void mirror_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
591 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
594 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
597 ratelimit_set_speed(&s
->limit
, speed
/ BDRV_SECTOR_SIZE
, SLICE_TIME
);
600 static void mirror_iostatus_reset(BlockJob
*job
)
602 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
604 bdrv_iostatus_reset(s
->target
);
607 static void mirror_complete(BlockJob
*job
, Error
**errp
)
609 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
610 Error
*local_err
= NULL
;
613 ret
= bdrv_open_backing_file(s
->target
, NULL
, &local_err
);
615 error_propagate(errp
, local_err
);
619 error_setg(errp
, QERR_BLOCK_JOB_NOT_READY
,
620 bdrv_get_device_name(job
->bs
));
624 /* check the target bs is not blocked and block all operations on it */
626 AioContext
*replace_aio_context
;
628 s
->to_replace
= check_to_replace_node(s
->replaces
, &local_err
);
629 if (!s
->to_replace
) {
630 error_propagate(errp
, local_err
);
634 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
635 aio_context_acquire(replace_aio_context
);
637 error_setg(&s
->replace_blocker
,
638 "block device is in use by block-job-complete");
639 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
640 bdrv_ref(s
->to_replace
);
642 aio_context_release(replace_aio_context
);
645 s
->should_complete
= true;
646 block_job_enter(&s
->common
);
649 static const BlockJobDriver mirror_job_driver
= {
650 .instance_size
= sizeof(MirrorBlockJob
),
651 .job_type
= BLOCK_JOB_TYPE_MIRROR
,
652 .set_speed
= mirror_set_speed
,
653 .iostatus_reset
= mirror_iostatus_reset
,
654 .complete
= mirror_complete
,
657 static const BlockJobDriver commit_active_job_driver
= {
658 .instance_size
= sizeof(MirrorBlockJob
),
659 .job_type
= BLOCK_JOB_TYPE_COMMIT
,
660 .set_speed
= mirror_set_speed
,
662 = mirror_iostatus_reset
,
663 .complete
= mirror_complete
,
666 static void mirror_start_job(BlockDriverState
*bs
, BlockDriverState
*target
,
667 const char *replaces
,
668 int64_t speed
, uint32_t granularity
,
670 BlockdevOnError on_source_error
,
671 BlockdevOnError on_target_error
,
673 BlockCompletionFunc
*cb
,
674 void *opaque
, Error
**errp
,
675 const BlockJobDriver
*driver
,
676 bool is_none_mode
, BlockDriverState
*base
)
680 if (granularity
== 0) {
681 granularity
= bdrv_get_default_bitmap_granularity(target
);
684 assert ((granularity
& (granularity
- 1)) == 0);
686 if ((on_source_error
== BLOCKDEV_ON_ERROR_STOP
||
687 on_source_error
== BLOCKDEV_ON_ERROR_ENOSPC
) &&
688 !bdrv_iostatus_is_enabled(bs
)) {
689 error_setg(errp
, QERR_INVALID_PARAMETER
, "on-source-error");
694 s
= block_job_create(driver
, bs
, speed
, cb
, opaque
, errp
);
699 s
->replaces
= g_strdup(replaces
);
700 s
->on_source_error
= on_source_error
;
701 s
->on_target_error
= on_target_error
;
703 s
->is_none_mode
= is_none_mode
;
705 s
->granularity
= granularity
;
706 s
->buf_size
= MAX(buf_size
, granularity
);
709 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
710 if (!s
->dirty_bitmap
) {
713 bdrv_set_enable_write_cache(s
->target
, true);
714 bdrv_set_on_error(s
->target
, on_target_error
, on_target_error
);
715 bdrv_iostatus_enable(s
->target
);
716 s
->common
.co
= qemu_coroutine_create(mirror_run
);
717 trace_mirror_start(bs
, s
, s
->common
.co
, opaque
);
718 qemu_coroutine_enter(s
->common
.co
, s
);
721 void mirror_start(BlockDriverState
*bs
, BlockDriverState
*target
,
722 const char *replaces
,
723 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
724 MirrorSyncMode mode
, BlockdevOnError on_source_error
,
725 BlockdevOnError on_target_error
,
727 BlockCompletionFunc
*cb
,
728 void *opaque
, Error
**errp
)
731 BlockDriverState
*base
;
733 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
734 error_setg(errp
, "Sync mode 'incremental' not supported");
737 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
738 base
= mode
== MIRROR_SYNC_MODE_TOP
? bs
->backing_hd
: NULL
;
739 mirror_start_job(bs
, target
, replaces
,
740 speed
, granularity
, buf_size
,
741 on_source_error
, on_target_error
, unmap
, cb
, opaque
, errp
,
742 &mirror_job_driver
, is_none_mode
, base
);
745 void commit_active_start(BlockDriverState
*bs
, BlockDriverState
*base
,
747 BlockdevOnError on_error
,
748 BlockCompletionFunc
*cb
,
749 void *opaque
, Error
**errp
)
751 int64_t length
, base_length
;
754 Error
*local_err
= NULL
;
756 orig_base_flags
= bdrv_get_flags(base
);
758 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
762 length
= bdrv_getlength(bs
);
764 error_setg_errno(errp
, -length
,
765 "Unable to determine length of %s", bs
->filename
);
766 goto error_restore_flags
;
769 base_length
= bdrv_getlength(base
);
770 if (base_length
< 0) {
771 error_setg_errno(errp
, -base_length
,
772 "Unable to determine length of %s", base
->filename
);
773 goto error_restore_flags
;
776 if (length
> base_length
) {
777 ret
= bdrv_truncate(base
, length
);
779 error_setg_errno(errp
, -ret
,
780 "Top image %s is larger than base image %s, and "
781 "resize of base image failed",
782 bs
->filename
, base
->filename
);
783 goto error_restore_flags
;
788 mirror_start_job(bs
, base
, NULL
, speed
, 0, 0,
789 on_error
, on_error
, false, cb
, opaque
, &local_err
,
790 &commit_active_job_driver
, false, base
);
792 error_propagate(errp
, local_err
);
793 goto error_restore_flags
;
799 /* ignore error and errp for bdrv_reopen, because we want to propagate
800 * the original error */
801 bdrv_reopen(base
, orig_base_flags
, NULL
);