4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "block/blockjob_int.h"
17 #include "block/block_int.h"
18 #include "sysemu/block-backend.h"
19 #include "qapi/error.h"
20 #include "qapi/qmp/qerror.h"
21 #include "qemu/ratelimit.h"
22 #include "qemu/bitmap.h"
24 #define SLICE_TIME 100000000ULL /* ns */
25 #define MAX_IN_FLIGHT 16
26 #define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */
27 #define DEFAULT_MIRROR_BUF_SIZE \
28 (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE)
30 /* The mirroring buffer is a list of granularity-sized chunks.
31 * Free chunks are organized in a list.
33 typedef struct MirrorBuffer
{
34 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
37 typedef struct MirrorBlockJob
{
41 BlockDriverState
*mirror_top_bs
;
42 BlockDriverState
*source
;
43 BlockDriverState
*base
;
45 /* The name of the graph node to replace */
47 /* The BDS to replace */
48 BlockDriverState
*to_replace
;
49 /* Used to block operations on the drive-mirror-replace target */
50 Error
*replace_blocker
;
52 BlockMirrorBackingMode backing_mode
;
53 BlockdevOnError on_source_error
, on_target_error
;
59 unsigned long *cow_bitmap
;
60 BdrvDirtyBitmap
*dirty_bitmap
;
61 BdrvDirtyBitmapIter
*dbi
;
63 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
66 uint64_t last_pause_ns
;
67 unsigned long *in_flight_bitmap
;
69 int64_t sectors_in_flight
;
73 int target_cluster_sectors
;
75 bool initial_zeroing_ongoing
;
78 typedef struct MirrorOp
{
85 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
90 return block_job_error_action(&s
->common
, s
->on_source_error
,
93 return block_job_error_action(&s
->common
, s
->on_target_error
,
98 static void mirror_iteration_done(MirrorOp
*op
, int ret
)
100 MirrorBlockJob
*s
= op
->s
;
103 int i
, nb_chunks
, sectors_per_chunk
;
105 trace_mirror_iteration_done(s
, op
->sector_num
, op
->nb_sectors
, ret
);
108 s
->sectors_in_flight
-= op
->nb_sectors
;
110 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
111 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
112 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
116 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
117 chunk_num
= op
->sector_num
/ sectors_per_chunk
;
118 nb_chunks
= DIV_ROUND_UP(op
->nb_sectors
, sectors_per_chunk
);
119 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
122 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
124 if (!s
->initial_zeroing_ongoing
) {
125 s
->common
.offset
+= (uint64_t)op
->nb_sectors
* BDRV_SECTOR_SIZE
;
128 qemu_iovec_destroy(&op
->qiov
);
131 if (s
->waiting_for_io
) {
132 qemu_coroutine_enter(s
->common
.co
);
136 static void mirror_write_complete(void *opaque
, int ret
)
138 MirrorOp
*op
= opaque
;
139 MirrorBlockJob
*s
= op
->s
;
141 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
143 BlockErrorAction action
;
145 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
146 action
= mirror_error_action(s
, false, -ret
);
147 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
151 mirror_iteration_done(op
, ret
);
152 aio_context_release(blk_get_aio_context(s
->common
.blk
));
155 static void mirror_read_complete(void *opaque
, int ret
)
157 MirrorOp
*op
= opaque
;
158 MirrorBlockJob
*s
= op
->s
;
160 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
162 BlockErrorAction action
;
164 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
165 action
= mirror_error_action(s
, true, -ret
);
166 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
170 mirror_iteration_done(op
, ret
);
172 blk_aio_pwritev(s
->target
, op
->sector_num
* BDRV_SECTOR_SIZE
, &op
->qiov
,
173 0, mirror_write_complete
, op
);
175 aio_context_release(blk_get_aio_context(s
->common
.blk
));
178 static inline void mirror_clip_sectors(MirrorBlockJob
*s
,
182 *nb_sectors
= MIN(*nb_sectors
,
183 s
->bdev_length
/ BDRV_SECTOR_SIZE
- sector_num
);
186 /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
187 * return the offset of the adjusted tail sector against original. */
188 static int mirror_cow_align(MirrorBlockJob
*s
,
194 int chunk_sectors
= s
->granularity
>> BDRV_SECTOR_BITS
;
195 int64_t align_sector_num
= *sector_num
;
196 int align_nb_sectors
= *nb_sectors
;
197 int max_sectors
= chunk_sectors
* s
->max_iov
;
199 need_cow
= !test_bit(*sector_num
/ chunk_sectors
, s
->cow_bitmap
);
200 need_cow
|= !test_bit((*sector_num
+ *nb_sectors
- 1) / chunk_sectors
,
203 bdrv_round_sectors_to_clusters(blk_bs(s
->target
), *sector_num
,
204 *nb_sectors
, &align_sector_num
,
208 if (align_nb_sectors
> max_sectors
) {
209 align_nb_sectors
= max_sectors
;
211 align_nb_sectors
= QEMU_ALIGN_DOWN(align_nb_sectors
,
212 s
->target_cluster_sectors
);
215 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
216 * that doesn't matter because it's already the end of source image. */
217 mirror_clip_sectors(s
, align_sector_num
, &align_nb_sectors
);
219 ret
= align_sector_num
+ align_nb_sectors
- (*sector_num
+ *nb_sectors
);
220 *sector_num
= align_sector_num
;
221 *nb_sectors
= align_nb_sectors
;
226 static inline void mirror_wait_for_io(MirrorBlockJob
*s
)
228 assert(!s
->waiting_for_io
);
229 s
->waiting_for_io
= true;
230 qemu_coroutine_yield();
231 s
->waiting_for_io
= false;
234 /* Submit async read while handling COW.
235 * Returns: The number of sectors copied after and including sector_num,
236 * excluding any sectors copied prior to sector_num due to alignment.
237 * This will be nb_sectors if no alignment is necessary, or
238 * (new_end - sector_num) if tail is rounded up or down due to
239 * alignment or buffer limit.
241 static int mirror_do_read(MirrorBlockJob
*s
, int64_t sector_num
,
244 BlockBackend
*source
= s
->common
.blk
;
245 int sectors_per_chunk
, nb_chunks
;
250 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
251 max_sectors
= sectors_per_chunk
* s
->max_iov
;
253 /* We can only handle as much as buf_size at a time. */
254 nb_sectors
= MIN(s
->buf_size
>> BDRV_SECTOR_BITS
, nb_sectors
);
255 nb_sectors
= MIN(max_sectors
, nb_sectors
);
260 ret
+= mirror_cow_align(s
, §or_num
, &nb_sectors
);
262 assert(nb_sectors
<< BDRV_SECTOR_BITS
<= s
->buf_size
);
263 /* The sector range must meet granularity because:
264 * 1) Caller passes in aligned values;
265 * 2) mirror_cow_align is used only when target cluster is larger. */
266 assert(!(sector_num
% sectors_per_chunk
));
267 nb_chunks
= DIV_ROUND_UP(nb_sectors
, sectors_per_chunk
);
269 while (s
->buf_free_count
< nb_chunks
) {
270 trace_mirror_yield_in_flight(s
, sector_num
, s
->in_flight
);
271 mirror_wait_for_io(s
);
274 /* Allocate a MirrorOp that is used as an AIO callback. */
275 op
= g_new(MirrorOp
, 1);
277 op
->sector_num
= sector_num
;
278 op
->nb_sectors
= nb_sectors
;
280 /* Now make a QEMUIOVector taking enough granularity-sized chunks
283 qemu_iovec_init(&op
->qiov
, nb_chunks
);
284 while (nb_chunks
-- > 0) {
285 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
286 size_t remaining
= nb_sectors
* BDRV_SECTOR_SIZE
- op
->qiov
.size
;
288 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
290 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
293 /* Copy the dirty cluster. */
295 s
->sectors_in_flight
+= nb_sectors
;
296 trace_mirror_one_iteration(s
, sector_num
, nb_sectors
);
298 blk_aio_preadv(source
, sector_num
* BDRV_SECTOR_SIZE
, &op
->qiov
, 0,
299 mirror_read_complete
, op
);
303 static void mirror_do_zero_or_discard(MirrorBlockJob
*s
,
310 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
311 * so the freeing in mirror_iteration_done is nop. */
312 op
= g_new0(MirrorOp
, 1);
314 op
->sector_num
= sector_num
;
315 op
->nb_sectors
= nb_sectors
;
318 s
->sectors_in_flight
+= nb_sectors
;
320 blk_aio_pdiscard(s
->target
, sector_num
<< BDRV_SECTOR_BITS
,
321 op
->nb_sectors
<< BDRV_SECTOR_BITS
,
322 mirror_write_complete
, op
);
324 blk_aio_pwrite_zeroes(s
->target
, sector_num
* BDRV_SECTOR_SIZE
,
325 op
->nb_sectors
* BDRV_SECTOR_SIZE
,
326 s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0,
327 mirror_write_complete
, op
);
331 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
333 BlockDriverState
*source
= s
->source
;
334 int64_t sector_num
, first_chunk
;
335 uint64_t delay_ns
= 0;
336 /* At least the first dirty chunk is mirrored in one iteration. */
338 int64_t end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
339 int sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
340 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
341 int max_io_sectors
= MAX((s
->buf_size
>> BDRV_SECTOR_BITS
) / MAX_IN_FLIGHT
,
344 sector_num
= bdrv_dirty_iter_next(s
->dbi
);
345 if (sector_num
< 0) {
346 bdrv_set_dirty_iter(s
->dbi
, 0);
347 sector_num
= bdrv_dirty_iter_next(s
->dbi
);
348 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
349 assert(sector_num
>= 0);
352 first_chunk
= sector_num
/ sectors_per_chunk
;
353 while (test_bit(first_chunk
, s
->in_flight_bitmap
)) {
354 trace_mirror_yield_in_flight(s
, sector_num
, s
->in_flight
);
355 mirror_wait_for_io(s
);
358 block_job_pause_point(&s
->common
);
360 /* Find the number of consective dirty chunks following the first dirty
361 * one, and wait for in flight requests in them. */
362 while (nb_chunks
* sectors_per_chunk
< (s
->buf_size
>> BDRV_SECTOR_BITS
)) {
364 int64_t next_sector
= sector_num
+ nb_chunks
* sectors_per_chunk
;
365 int64_t next_chunk
= next_sector
/ sectors_per_chunk
;
366 if (next_sector
>= end
||
367 !bdrv_get_dirty(source
, s
->dirty_bitmap
, next_sector
)) {
370 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
374 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
375 if (next_dirty
> next_sector
|| next_dirty
< 0) {
376 /* The bitmap iterator's cache is stale, refresh it */
377 bdrv_set_dirty_iter(s
->dbi
, next_sector
);
378 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
380 assert(next_dirty
== next_sector
);
384 /* Clear dirty bits before querying the block status, because
385 * calling bdrv_get_block_status_above could yield - if some blocks are
386 * marked dirty in this window, we need to know.
388 bdrv_reset_dirty_bitmap(s
->dirty_bitmap
, sector_num
,
389 nb_chunks
* sectors_per_chunk
);
390 bitmap_set(s
->in_flight_bitmap
, sector_num
/ sectors_per_chunk
, nb_chunks
);
391 while (nb_chunks
> 0 && sector_num
< end
) {
393 int io_sectors
, io_sectors_acct
;
394 BlockDriverState
*file
;
398 MIRROR_METHOD_DISCARD
399 } mirror_method
= MIRROR_METHOD_COPY
;
401 assert(!(sector_num
% sectors_per_chunk
));
402 ret
= bdrv_get_block_status_above(source
, NULL
, sector_num
,
403 nb_chunks
* sectors_per_chunk
,
406 io_sectors
= MIN(nb_chunks
* sectors_per_chunk
, max_io_sectors
);
407 } else if (ret
& BDRV_BLOCK_DATA
) {
408 io_sectors
= MIN(io_sectors
, max_io_sectors
);
411 io_sectors
-= io_sectors
% sectors_per_chunk
;
412 if (io_sectors
< sectors_per_chunk
) {
413 io_sectors
= sectors_per_chunk
;
414 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
415 int64_t target_sector_num
;
416 int target_nb_sectors
;
417 bdrv_round_sectors_to_clusters(blk_bs(s
->target
), sector_num
,
418 io_sectors
, &target_sector_num
,
420 if (target_sector_num
== sector_num
&&
421 target_nb_sectors
== io_sectors
) {
422 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
424 MIRROR_METHOD_DISCARD
;
428 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
429 trace_mirror_yield_in_flight(s
, sector_num
, s
->in_flight
);
430 mirror_wait_for_io(s
);
437 mirror_clip_sectors(s
, sector_num
, &io_sectors
);
438 switch (mirror_method
) {
439 case MIRROR_METHOD_COPY
:
440 io_sectors
= mirror_do_read(s
, sector_num
, io_sectors
);
441 io_sectors_acct
= io_sectors
;
443 case MIRROR_METHOD_ZERO
:
444 case MIRROR_METHOD_DISCARD
:
445 mirror_do_zero_or_discard(s
, sector_num
, io_sectors
,
446 mirror_method
== MIRROR_METHOD_DISCARD
);
447 if (write_zeroes_ok
) {
450 io_sectors_acct
= io_sectors
;
457 sector_num
+= io_sectors
;
458 nb_chunks
-= DIV_ROUND_UP(io_sectors
, sectors_per_chunk
);
459 if (s
->common
.speed
) {
460 delay_ns
= ratelimit_calculate_delay(&s
->limit
, io_sectors_acct
);
466 static void mirror_free_init(MirrorBlockJob
*s
)
468 int granularity
= s
->granularity
;
469 size_t buf_size
= s
->buf_size
;
470 uint8_t *buf
= s
->buf
;
472 assert(s
->buf_free_count
== 0);
473 QSIMPLEQ_INIT(&s
->buf_free
);
474 while (buf_size
!= 0) {
475 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
476 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
478 buf_size
-= granularity
;
483 /* This is also used for the .pause callback. There is no matching
484 * mirror_resume() because mirror_run() will begin iterating again
485 * when the job is resumed.
487 static void mirror_wait_for_all_io(MirrorBlockJob
*s
)
489 while (s
->in_flight
> 0) {
490 mirror_wait_for_io(s
);
498 static void mirror_exit(BlockJob
*job
, void *opaque
)
500 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
501 MirrorExitData
*data
= opaque
;
502 AioContext
*replace_aio_context
= NULL
;
503 BlockDriverState
*src
= s
->source
;
504 BlockDriverState
*target_bs
= blk_bs(s
->target
);
505 BlockDriverState
*mirror_top_bs
= s
->mirror_top_bs
;
506 Error
*local_err
= NULL
;
508 /* Make sure that the source BDS doesn't go away before we called
509 * block_job_completed(). */
511 bdrv_ref(mirror_top_bs
);
514 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
515 * inserting target_bs at s->to_replace, where we might not be able to get
516 * these permissions. */
517 blk_unref(s
->target
);
520 /* We don't access the source any more. Dropping any WRITE/RESIZE is
521 * required before it could become a backing file of target_bs. */
522 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
524 if (s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
525 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
526 if (backing_bs(target_bs
) != backing
) {
527 bdrv_set_backing_hd(target_bs
, backing
, &local_err
);
529 error_report_err(local_err
);
536 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
537 aio_context_acquire(replace_aio_context
);
540 if (s
->should_complete
&& data
->ret
== 0) {
541 BlockDriverState
*to_replace
= src
;
543 to_replace
= s
->to_replace
;
546 if (bdrv_get_flags(target_bs
) != bdrv_get_flags(to_replace
)) {
547 bdrv_reopen(target_bs
, bdrv_get_flags(to_replace
), NULL
);
550 /* The mirror job has no requests in flight any more, but we need to
551 * drain potential other users of the BDS before changing the graph. */
552 bdrv_drained_begin(target_bs
);
553 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
554 bdrv_drained_end(target_bs
);
556 error_report_err(local_err
);
561 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
562 error_free(s
->replace_blocker
);
563 bdrv_unref(s
->to_replace
);
565 if (replace_aio_context
) {
566 aio_context_release(replace_aio_context
);
569 bdrv_unref(target_bs
);
571 /* Remove the mirror filter driver from the graph. Before this, get rid of
572 * the blockers on the intermediate nodes so that the resulting state is
573 * valid. Also give up permissions on mirror_top_bs->backing, which might
574 * block the removal. */
575 block_job_remove_all_bdrv(job
);
576 bdrv_child_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
);
577 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
579 /* We just changed the BDS the job BB refers to (with either or both of the
580 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
581 * the right thing. We don't need any permissions any more now. */
582 blk_remove_bs(job
->blk
);
583 blk_set_perm(job
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
584 blk_insert_bs(job
->blk
, mirror_top_bs
, &error_abort
);
586 block_job_completed(&s
->common
, data
->ret
);
589 bdrv_drained_end(src
);
590 bdrv_unref(mirror_top_bs
);
594 static void mirror_throttle(MirrorBlockJob
*s
)
596 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
598 if (now
- s
->last_pause_ns
> SLICE_TIME
) {
599 s
->last_pause_ns
= now
;
600 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, 0);
602 block_job_pause_point(&s
->common
);
606 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
608 int64_t sector_num
, end
;
609 BlockDriverState
*base
= s
->base
;
610 BlockDriverState
*bs
= s
->source
;
611 BlockDriverState
*target_bs
= blk_bs(s
->target
);
614 end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
616 if (base
== NULL
&& !bdrv_has_zero_init(target_bs
)) {
617 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
618 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, end
);
622 s
->initial_zeroing_ongoing
= true;
623 for (sector_num
= 0; sector_num
< end
; ) {
624 int nb_sectors
= MIN(end
- sector_num
,
625 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
) >> BDRV_SECTOR_BITS
);
629 if (block_job_is_cancelled(&s
->common
)) {
630 s
->initial_zeroing_ongoing
= false;
634 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
635 trace_mirror_yield(s
, s
->in_flight
, s
->buf_free_count
, -1);
636 mirror_wait_for_io(s
);
640 mirror_do_zero_or_discard(s
, sector_num
, nb_sectors
, false);
641 sector_num
+= nb_sectors
;
644 mirror_wait_for_all_io(s
);
645 s
->initial_zeroing_ongoing
= false;
648 /* First part, loop on the sectors and initialize the dirty bitmap. */
649 for (sector_num
= 0; sector_num
< end
; ) {
650 /* Just to make sure we are not exceeding int limit. */
651 int nb_sectors
= MIN(INT_MAX
>> BDRV_SECTOR_BITS
,
656 if (block_job_is_cancelled(&s
->common
)) {
660 ret
= bdrv_is_allocated_above(bs
, base
, sector_num
, nb_sectors
, &n
);
667 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, sector_num
, n
);
674 /* Called when going out of the streaming phase to flush the bulk of the
675 * data to the medium, or just before completing.
677 static int mirror_flush(MirrorBlockJob
*s
)
679 int ret
= blk_flush(s
->target
);
681 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
688 static void coroutine_fn
mirror_run(void *opaque
)
690 MirrorBlockJob
*s
= opaque
;
691 MirrorExitData
*data
;
692 BlockDriverState
*bs
= s
->source
;
693 BlockDriverState
*target_bs
= blk_bs(s
->target
);
694 bool need_drain
= true;
697 char backing_filename
[2]; /* we only need 2 characters because we are only
698 checking for a NULL string */
700 int target_cluster_size
= BDRV_SECTOR_SIZE
;
702 if (block_job_is_cancelled(&s
->common
)) {
706 s
->bdev_length
= bdrv_getlength(bs
);
707 if (s
->bdev_length
< 0) {
708 ret
= s
->bdev_length
;
712 /* Active commit must resize the base image if its size differs from the
714 if (s
->base
== blk_bs(s
->target
)) {
717 base_length
= blk_getlength(s
->target
);
718 if (base_length
< 0) {
723 if (s
->bdev_length
> base_length
) {
724 ret
= blk_truncate(s
->target
, s
->bdev_length
);
731 if (s
->bdev_length
== 0) {
732 /* Report BLOCK_JOB_READY and wait for complete. */
733 block_job_event_ready(&s
->common
);
735 while (!block_job_is_cancelled(&s
->common
) && !s
->should_complete
) {
736 block_job_yield(&s
->common
);
738 s
->common
.cancelled
= false;
742 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
743 s
->in_flight_bitmap
= bitmap_new(length
);
745 /* If we have no backing file yet in the destination, we cannot let
746 * the destination do COW. Instead, we copy sectors around the
747 * dirty data if needed. We need a bitmap to do that.
749 bdrv_get_backing_filename(target_bs
, backing_filename
,
750 sizeof(backing_filename
));
751 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
752 target_cluster_size
= bdi
.cluster_size
;
754 if (backing_filename
[0] && !target_bs
->backing
755 && s
->granularity
< target_cluster_size
) {
756 s
->buf_size
= MAX(s
->buf_size
, target_cluster_size
);
757 s
->cow_bitmap
= bitmap_new(length
);
759 s
->target_cluster_sectors
= target_cluster_size
>> BDRV_SECTOR_BITS
;
760 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
762 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
763 if (s
->buf
== NULL
) {
770 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
771 if (!s
->is_none_mode
) {
772 ret
= mirror_dirty_init(s
);
773 if (ret
< 0 || block_job_is_cancelled(&s
->common
)) {
779 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
, 0);
781 uint64_t delay_ns
= 0;
783 bool should_complete
;
790 block_job_pause_point(&s
->common
);
792 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
793 /* s->common.offset contains the number of bytes already processed so
794 * far, cnt is the number of dirty sectors remaining and
795 * s->sectors_in_flight is the number of sectors currently being
796 * processed; together those are the current total operation length */
797 s
->common
.len
= s
->common
.offset
+
798 (cnt
+ s
->sectors_in_flight
) * BDRV_SECTOR_SIZE
;
800 /* Note that even when no rate limit is applied we need to yield
801 * periodically with no pending I/O so that bdrv_drain_all() returns.
802 * We do so every SLICE_TIME nanoseconds, or when there is an error,
803 * or when the source is clean, whichever comes first.
805 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
806 if (delta
< SLICE_TIME
&&
807 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
808 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
809 (cnt
== 0 && s
->in_flight
> 0)) {
810 trace_mirror_yield(s
, s
->in_flight
, s
->buf_free_count
, cnt
);
811 mirror_wait_for_io(s
);
813 } else if (cnt
!= 0) {
814 delay_ns
= mirror_iteration(s
);
818 should_complete
= false;
819 if (s
->in_flight
== 0 && cnt
== 0) {
820 trace_mirror_before_flush(s
);
822 if (mirror_flush(s
) < 0) {
823 /* Go check s->ret. */
826 /* We're out of the streaming phase. From now on, if the job
827 * is cancelled we will actually complete all pending I/O and
828 * report completion. This way, block-job-cancel will leave
829 * the target in a consistent state.
831 block_job_event_ready(&s
->common
);
835 should_complete
= s
->should_complete
||
836 block_job_is_cancelled(&s
->common
);
837 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
840 if (cnt
== 0 && should_complete
) {
841 /* The dirty bitmap is not updated while operations are pending.
842 * If we're about to exit, wait for pending operations before
843 * calling bdrv_get_dirty_count(bs), or we may exit while the
844 * source has dirty data to copy!
846 * Note that I/O can be submitted by the guest while
847 * mirror_populate runs, so pause it now. Before deciding
848 * whether to switch to target check one last time if I/O has
849 * come in the meanwhile, and if not flush the data to disk.
851 trace_mirror_before_drain(s
, cnt
);
853 bdrv_drained_begin(bs
);
854 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
855 if (cnt
> 0 || mirror_flush(s
) < 0) {
856 bdrv_drained_end(bs
);
860 /* The two disks are in sync. Exit and report successful
863 assert(QLIST_EMPTY(&bs
->tracked_requests
));
864 s
->common
.cancelled
= false;
870 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
872 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
873 if (block_job_is_cancelled(&s
->common
)) {
876 } else if (!should_complete
) {
877 delay_ns
= (s
->in_flight
== 0 && cnt
== 0 ? SLICE_TIME
: 0);
878 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
880 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
884 if (s
->in_flight
> 0) {
885 /* We get here only if something went wrong. Either the job failed,
886 * or it was cancelled prematurely so that we do not guarantee that
887 * the target is a copy of the source.
889 assert(ret
< 0 || (!s
->synced
&& block_job_is_cancelled(&s
->common
)));
891 mirror_wait_for_all_io(s
);
894 assert(s
->in_flight
== 0);
896 g_free(s
->cow_bitmap
);
897 g_free(s
->in_flight_bitmap
);
898 bdrv_dirty_iter_free(s
->dbi
);
899 bdrv_release_dirty_bitmap(bs
, s
->dirty_bitmap
);
901 data
= g_malloc(sizeof(*data
));
905 bdrv_drained_begin(bs
);
907 block_job_defer_to_main_loop(&s
->common
, mirror_exit
, data
);
910 static void mirror_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
912 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
915 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
918 ratelimit_set_speed(&s
->limit
, speed
/ BDRV_SECTOR_SIZE
, SLICE_TIME
);
921 static void mirror_complete(BlockJob
*job
, Error
**errp
)
923 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
924 BlockDriverState
*target
;
926 target
= blk_bs(s
->target
);
929 error_setg(errp
, "The active block job '%s' cannot be completed",
934 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
937 assert(!target
->backing
);
938 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
944 /* block all operations on to_replace bs */
946 AioContext
*replace_aio_context
;
948 s
->to_replace
= bdrv_find_node(s
->replaces
);
949 if (!s
->to_replace
) {
950 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
954 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
955 aio_context_acquire(replace_aio_context
);
957 /* TODO Translate this into permission system. Current definition of
958 * GRAPH_MOD would require to request it for the parents; they might
959 * not even be BlockDriverStates, however, so a BdrvChild can't address
960 * them. May need redefinition of GRAPH_MOD. */
961 error_setg(&s
->replace_blocker
,
962 "block device is in use by block-job-complete");
963 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
964 bdrv_ref(s
->to_replace
);
966 aio_context_release(replace_aio_context
);
969 s
->should_complete
= true;
970 block_job_enter(&s
->common
);
973 static void mirror_pause(BlockJob
*job
)
975 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
977 mirror_wait_for_all_io(s
);
980 static void mirror_attached_aio_context(BlockJob
*job
, AioContext
*new_context
)
982 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
984 blk_set_aio_context(s
->target
, new_context
);
987 static void mirror_drain(BlockJob
*job
)
989 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
991 /* Need to keep a reference in case blk_drain triggers execution
992 * of mirror_complete...
995 BlockBackend
*target
= s
->target
;
1002 static const BlockJobDriver mirror_job_driver
= {
1003 .instance_size
= sizeof(MirrorBlockJob
),
1004 .job_type
= BLOCK_JOB_TYPE_MIRROR
,
1005 .set_speed
= mirror_set_speed
,
1006 .start
= mirror_run
,
1007 .complete
= mirror_complete
,
1008 .pause
= mirror_pause
,
1009 .attached_aio_context
= mirror_attached_aio_context
,
1010 .drain
= mirror_drain
,
1013 static const BlockJobDriver commit_active_job_driver
= {
1014 .instance_size
= sizeof(MirrorBlockJob
),
1015 .job_type
= BLOCK_JOB_TYPE_COMMIT
,
1016 .set_speed
= mirror_set_speed
,
1017 .start
= mirror_run
,
1018 .complete
= mirror_complete
,
1019 .pause
= mirror_pause
,
1020 .attached_aio_context
= mirror_attached_aio_context
,
1021 .drain
= mirror_drain
,
1024 static int coroutine_fn
bdrv_mirror_top_preadv(BlockDriverState
*bs
,
1025 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1027 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1030 static int coroutine_fn
bdrv_mirror_top_pwritev(BlockDriverState
*bs
,
1031 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1033 return bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1036 static int coroutine_fn
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1038 return bdrv_co_flush(bs
->backing
->bs
);
1041 static int64_t coroutine_fn
bdrv_mirror_top_get_block_status(
1042 BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
, int *pnum
,
1043 BlockDriverState
**file
)
1046 *file
= bs
->backing
->bs
;
1047 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
| BDRV_BLOCK_DATA
|
1048 (sector_num
<< BDRV_SECTOR_BITS
);
1051 static int coroutine_fn
bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
,
1052 int64_t offset
, int count
, BdrvRequestFlags flags
)
1054 return bdrv_co_pwrite_zeroes(bs
->backing
, offset
, count
, flags
);
1057 static int coroutine_fn
bdrv_mirror_top_pdiscard(BlockDriverState
*bs
,
1058 int64_t offset
, int count
)
1060 return bdrv_co_pdiscard(bs
->backing
->bs
, offset
, count
);
1063 static void bdrv_mirror_top_close(BlockDriverState
*bs
)
1067 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1068 const BdrvChildRole
*role
,
1069 uint64_t perm
, uint64_t shared
,
1070 uint64_t *nperm
, uint64_t *nshared
)
1072 /* Must be able to forward guest writes to the real image */
1074 if (perm
& BLK_PERM_WRITE
) {
1075 *nperm
|= BLK_PERM_WRITE
;
1078 *nshared
= BLK_PERM_ALL
;
1081 /* Dummy node that provides consistent read to its users without requiring it
1082 * from its backing file and that allows writes on the backing file chain. */
1083 static BlockDriver bdrv_mirror_top
= {
1084 .format_name
= "mirror_top",
1085 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1086 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1087 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1088 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1089 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1090 .bdrv_co_get_block_status
= bdrv_mirror_top_get_block_status
,
1091 .bdrv_close
= bdrv_mirror_top_close
,
1092 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1095 static void mirror_start_job(const char *job_id
, BlockDriverState
*bs
,
1096 int creation_flags
, BlockDriverState
*target
,
1097 const char *replaces
, int64_t speed
,
1098 uint32_t granularity
, int64_t buf_size
,
1099 BlockMirrorBackingMode backing_mode
,
1100 BlockdevOnError on_source_error
,
1101 BlockdevOnError on_target_error
,
1103 BlockCompletionFunc
*cb
,
1104 void *opaque
, Error
**errp
,
1105 const BlockJobDriver
*driver
,
1106 bool is_none_mode
, BlockDriverState
*base
,
1107 bool auto_complete
, const char *filter_node_name
)
1110 BlockDriverState
*mirror_top_bs
;
1111 bool target_graph_mod
;
1112 bool target_is_backing
;
1113 Error
*local_err
= NULL
;
1116 if (granularity
== 0) {
1117 granularity
= bdrv_get_default_bitmap_granularity(target
);
1120 assert ((granularity
& (granularity
- 1)) == 0);
1123 error_setg(errp
, "Invalid parameter 'buf-size'");
1127 if (buf_size
== 0) {
1128 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1131 /* In the case of active commit, add dummy driver to provide consistent
1132 * reads on the top, while disabling it in the intermediate nodes, and make
1133 * the backing chain writable. */
1134 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1136 if (mirror_top_bs
== NULL
) {
1139 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1141 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1142 * it alive until block_job_create() even if bs has no parent. */
1143 bdrv_ref(mirror_top_bs
);
1144 bdrv_drained_begin(bs
);
1145 bdrv_append(mirror_top_bs
, bs
, &local_err
);
1146 bdrv_drained_end(bs
);
1149 bdrv_unref(mirror_top_bs
);
1150 error_propagate(errp
, local_err
);
1154 /* Make sure that the source is not resized while the job is running */
1155 s
= block_job_create(job_id
, driver
, mirror_top_bs
,
1156 BLK_PERM_CONSISTENT_READ
,
1157 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1158 BLK_PERM_WRITE
| BLK_PERM_GRAPH_MOD
, speed
,
1159 creation_flags
, cb
, opaque
, errp
);
1160 bdrv_unref(mirror_top_bs
);
1165 s
->mirror_top_bs
= mirror_top_bs
;
1167 /* No resize for the target either; while the mirror is still running, a
1168 * consistent read isn't necessarily possible. We could possibly allow
1169 * writes and graph modifications, though it would likely defeat the
1170 * purpose of a mirror, so leave them blocked for now.
1172 * In the case of active commit, things look a bit different, though,
1173 * because the target is an already populated backing file in active use.
1174 * We can allow anything except resize there.*/
1175 target_is_backing
= bdrv_chain_contains(bs
, target
);
1176 target_graph_mod
= (backing_mode
!= MIRROR_LEAVE_BACKING_CHAIN
);
1177 s
->target
= blk_new(BLK_PERM_WRITE
| BLK_PERM_RESIZE
|
1178 (target_graph_mod
? BLK_PERM_GRAPH_MOD
: 0),
1179 BLK_PERM_WRITE_UNCHANGED
|
1180 (target_is_backing
? BLK_PERM_CONSISTENT_READ
|
1182 BLK_PERM_GRAPH_MOD
: 0));
1183 ret
= blk_insert_bs(s
->target
, target
, errp
);
1188 s
->replaces
= g_strdup(replaces
);
1189 s
->on_source_error
= on_source_error
;
1190 s
->on_target_error
= on_target_error
;
1191 s
->is_none_mode
= is_none_mode
;
1192 s
->backing_mode
= backing_mode
;
1194 s
->granularity
= granularity
;
1195 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1197 if (auto_complete
) {
1198 s
->should_complete
= true;
1201 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
1202 if (!s
->dirty_bitmap
) {
1206 /* Required permissions are already taken with blk_new() */
1207 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1210 /* In commit_active_start() all intermediate nodes disappear, so
1211 * any jobs in them must be blocked */
1212 if (target_is_backing
) {
1213 BlockDriverState
*iter
;
1214 for (iter
= backing_bs(bs
); iter
!= target
; iter
= backing_bs(iter
)) {
1215 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1216 * ourselves at s->base (if writes are blocked for a node, they are
1217 * also blocked for its backing file). The other options would be a
1218 * second filter driver above s->base (== target). */
1219 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1220 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
,
1228 trace_mirror_start(bs
, s
, opaque
);
1229 block_job_start(&s
->common
);
1234 g_free(s
->replaces
);
1235 blk_unref(s
->target
);
1236 block_job_unref(&s
->common
);
1239 bdrv_child_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
);
1240 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
1243 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
1244 BlockDriverState
*target
, const char *replaces
,
1245 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
1246 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
1247 BlockdevOnError on_source_error
,
1248 BlockdevOnError on_target_error
,
1249 bool unmap
, const char *filter_node_name
, Error
**errp
)
1252 BlockDriverState
*base
;
1254 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
1255 error_setg(errp
, "Sync mode 'incremental' not supported");
1258 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
1259 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
1260 mirror_start_job(job_id
, bs
, BLOCK_JOB_DEFAULT
, target
, replaces
,
1261 speed
, granularity
, buf_size
, backing_mode
,
1262 on_source_error
, on_target_error
, unmap
, NULL
, NULL
, errp
,
1263 &mirror_job_driver
, is_none_mode
, base
, false,
1267 void commit_active_start(const char *job_id
, BlockDriverState
*bs
,
1268 BlockDriverState
*base
, int creation_flags
,
1269 int64_t speed
, BlockdevOnError on_error
,
1270 const char *filter_node_name
,
1271 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
,
1274 int orig_base_flags
;
1275 Error
*local_err
= NULL
;
1277 orig_base_flags
= bdrv_get_flags(base
);
1279 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
1283 mirror_start_job(job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
1284 MIRROR_LEAVE_BACKING_CHAIN
,
1285 on_error
, on_error
, true, cb
, opaque
, &local_err
,
1286 &commit_active_job_driver
, false, base
, auto_complete
,
1289 error_propagate(errp
, local_err
);
1290 goto error_restore_flags
;
1295 error_restore_flags
:
1296 /* ignore error and errp for bdrv_reopen, because we want to propagate
1297 * the original error */
1298 bdrv_reopen(base
, orig_base_flags
, NULL
);