4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "sysemu/block-backend.h"
22 #include "qapi/error.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
27 #define MAX_IN_FLIGHT 16
28 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
29 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
31 /* The mirroring buffer is a list of granularity-sized chunks.
32 * Free chunks are organized in a list.
34 typedef struct MirrorBuffer
{
35 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
38 typedef struct MirrorOp MirrorOp
;
40 typedef struct MirrorBlockJob
{
43 BlockDriverState
*mirror_top_bs
;
44 BlockDriverState
*base
;
46 /* The name of the graph node to replace */
48 /* The BDS to replace */
49 BlockDriverState
*to_replace
;
50 /* Used to block operations on the drive-mirror-replace target */
51 Error
*replace_blocker
;
53 BlockMirrorBackingMode backing_mode
;
54 /* Whether the target image requires explicit zero-initialization */
56 MirrorCopyMode copy_mode
;
57 BlockdevOnError on_source_error
, on_target_error
;
59 /* Set when the target is synced (dirty bitmap is clean, nothing
60 * in flight) and the job is running in active mode */
66 unsigned long *cow_bitmap
;
67 BdrvDirtyBitmap
*dirty_bitmap
;
68 BdrvDirtyBitmapIter
*dbi
;
70 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
73 uint64_t last_pause_ns
;
74 unsigned long *in_flight_bitmap
;
76 int64_t bytes_in_flight
;
77 QTAILQ_HEAD(, MirrorOp
) ops_in_flight
;
80 int target_cluster_size
;
82 bool initial_zeroing_ongoing
;
83 int in_active_write_counter
;
88 typedef struct MirrorBDSOpaque
{
99 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
100 * mirror_co_discard() before yielding for the first time */
101 int64_t *bytes_handled
;
104 bool is_active_write
;
105 CoQueue waiting_requests
;
108 QTAILQ_ENTRY(MirrorOp
) next
;
111 typedef enum MirrorMethod
{
114 MIRROR_METHOD_DISCARD
,
117 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
121 s
->actively_synced
= false;
123 return block_job_error_action(&s
->common
, s
->on_source_error
,
126 return block_job_error_action(&s
->common
, s
->on_target_error
,
131 static void coroutine_fn
mirror_wait_on_conflicts(MirrorOp
*self
,
136 uint64_t self_start_chunk
= offset
/ s
->granularity
;
137 uint64_t self_end_chunk
= DIV_ROUND_UP(offset
+ bytes
, s
->granularity
);
138 uint64_t self_nb_chunks
= self_end_chunk
- self_start_chunk
;
140 while (find_next_bit(s
->in_flight_bitmap
, self_end_chunk
,
141 self_start_chunk
) < self_end_chunk
&&
146 QTAILQ_FOREACH(op
, &s
->ops_in_flight
, next
) {
147 uint64_t op_start_chunk
= op
->offset
/ s
->granularity
;
148 uint64_t op_nb_chunks
= DIV_ROUND_UP(op
->offset
+ op
->bytes
,
156 if (ranges_overlap(self_start_chunk
, self_nb_chunks
,
157 op_start_chunk
, op_nb_chunks
))
159 qemu_co_queue_wait(&op
->waiting_requests
, NULL
);
166 static void coroutine_fn
mirror_iteration_done(MirrorOp
*op
, int ret
)
168 MirrorBlockJob
*s
= op
->s
;
173 trace_mirror_iteration_done(s
, op
->offset
, op
->bytes
, ret
);
176 s
->bytes_in_flight
-= op
->bytes
;
178 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
179 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
180 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
184 chunk_num
= op
->offset
/ s
->granularity
;
185 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
187 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
188 QTAILQ_REMOVE(&s
->ops_in_flight
, op
, next
);
191 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
193 if (!s
->initial_zeroing_ongoing
) {
194 job_progress_update(&s
->common
.job
, op
->bytes
);
197 qemu_iovec_destroy(&op
->qiov
);
199 qemu_co_queue_restart_all(&op
->waiting_requests
);
203 static void coroutine_fn
mirror_write_complete(MirrorOp
*op
, int ret
)
205 MirrorBlockJob
*s
= op
->s
;
208 BlockErrorAction action
;
210 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
211 action
= mirror_error_action(s
, false, -ret
);
212 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
217 mirror_iteration_done(op
, ret
);
220 static void coroutine_fn
mirror_read_complete(MirrorOp
*op
, int ret
)
222 MirrorBlockJob
*s
= op
->s
;
225 BlockErrorAction action
;
227 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
228 action
= mirror_error_action(s
, true, -ret
);
229 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
233 mirror_iteration_done(op
, ret
);
237 ret
= blk_co_pwritev(s
->target
, op
->offset
, op
->qiov
.size
, &op
->qiov
, 0);
238 mirror_write_complete(op
, ret
);
241 /* Clip bytes relative to offset to not exceed end-of-file */
242 static inline int64_t mirror_clip_bytes(MirrorBlockJob
*s
,
246 return MIN(bytes
, s
->bdev_length
- offset
);
249 /* Round offset and/or bytes to target cluster if COW is needed, and
250 * return the offset of the adjusted tail against original. */
251 static int mirror_cow_align(MirrorBlockJob
*s
, int64_t *offset
,
256 int64_t align_offset
= *offset
;
257 int64_t align_bytes
= *bytes
;
258 int max_bytes
= s
->granularity
* s
->max_iov
;
260 need_cow
= !test_bit(*offset
/ s
->granularity
, s
->cow_bitmap
);
261 need_cow
|= !test_bit((*offset
+ *bytes
- 1) / s
->granularity
,
264 bdrv_round_to_clusters(blk_bs(s
->target
), *offset
, *bytes
,
265 &align_offset
, &align_bytes
);
268 if (align_bytes
> max_bytes
) {
269 align_bytes
= max_bytes
;
271 align_bytes
= QEMU_ALIGN_DOWN(align_bytes
, s
->target_cluster_size
);
274 /* Clipping may result in align_bytes unaligned to chunk boundary, but
275 * that doesn't matter because it's already the end of source image. */
276 align_bytes
= mirror_clip_bytes(s
, align_offset
, align_bytes
);
278 ret
= align_offset
+ align_bytes
- (*offset
+ *bytes
);
279 *offset
= align_offset
;
280 *bytes
= align_bytes
;
285 static inline void coroutine_fn
286 mirror_wait_for_any_operation(MirrorBlockJob
*s
, MirrorOp
*self
, bool active
)
290 QTAILQ_FOREACH(op
, &s
->ops_in_flight
, next
) {
294 /* Do not wait on pseudo ops, because it may in turn wait on
295 * some other operation to start, which may in fact be the
296 * caller of this function. Since there is only one pseudo op
297 * at any given time, we will always find some real operation
299 if (!op
->is_pseudo_op
&& op
->is_active_write
== active
) {
300 qemu_co_queue_wait(&op
->waiting_requests
, NULL
);
307 static inline void coroutine_fn
308 mirror_wait_for_free_in_flight_slot(MirrorBlockJob
*s
, MirrorOp
*self
)
310 /* Only non-active operations use up in-flight slots */
311 mirror_wait_for_any_operation(s
, self
, false);
314 /* Perform a mirror copy operation.
316 * *op->bytes_handled is set to the number of bytes copied after and
317 * including offset, excluding any bytes copied prior to offset due
318 * to alignment. This will be op->bytes if no alignment is necessary,
319 * or (new_end - op->offset) if the tail is rounded up or down due to
320 * alignment or buffer limit.
322 static void coroutine_fn
mirror_co_read(void *opaque
)
324 MirrorOp
*op
= opaque
;
325 MirrorBlockJob
*s
= op
->s
;
330 max_bytes
= s
->granularity
* s
->max_iov
;
332 /* We can only handle as much as buf_size at a time. */
333 op
->bytes
= MIN(s
->buf_size
, MIN(max_bytes
, op
->bytes
));
335 assert(op
->bytes
< BDRV_REQUEST_MAX_BYTES
);
336 *op
->bytes_handled
= op
->bytes
;
339 *op
->bytes_handled
+= mirror_cow_align(s
, &op
->offset
, &op
->bytes
);
341 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
342 assert(*op
->bytes_handled
<= UINT_MAX
);
343 assert(op
->bytes
<= s
->buf_size
);
344 /* The offset is granularity-aligned because:
345 * 1) Caller passes in aligned values;
346 * 2) mirror_cow_align is used only when target cluster is larger. */
347 assert(QEMU_IS_ALIGNED(op
->offset
, s
->granularity
));
348 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
349 assert(QEMU_IS_ALIGNED(op
->bytes
, BDRV_SECTOR_SIZE
));
350 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
352 while (s
->buf_free_count
< nb_chunks
) {
353 trace_mirror_yield_in_flight(s
, op
->offset
, s
->in_flight
);
354 mirror_wait_for_free_in_flight_slot(s
, op
);
357 /* Now make a QEMUIOVector taking enough granularity-sized chunks
360 qemu_iovec_init(&op
->qiov
, nb_chunks
);
361 while (nb_chunks
-- > 0) {
362 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
363 size_t remaining
= op
->bytes
- op
->qiov
.size
;
365 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
367 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
370 /* Copy the dirty cluster. */
372 s
->bytes_in_flight
+= op
->bytes
;
373 trace_mirror_one_iteration(s
, op
->offset
, op
->bytes
);
375 ret
= bdrv_co_preadv(s
->mirror_top_bs
->backing
, op
->offset
, op
->bytes
,
377 mirror_read_complete(op
, ret
);
380 static void coroutine_fn
mirror_co_zero(void *opaque
)
382 MirrorOp
*op
= opaque
;
386 op
->s
->bytes_in_flight
+= op
->bytes
;
387 *op
->bytes_handled
= op
->bytes
;
389 ret
= blk_co_pwrite_zeroes(op
->s
->target
, op
->offset
, op
->bytes
,
390 op
->s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0);
391 mirror_write_complete(op
, ret
);
394 static void coroutine_fn
mirror_co_discard(void *opaque
)
396 MirrorOp
*op
= opaque
;
400 op
->s
->bytes_in_flight
+= op
->bytes
;
401 *op
->bytes_handled
= op
->bytes
;
403 ret
= blk_co_pdiscard(op
->s
->target
, op
->offset
, op
->bytes
);
404 mirror_write_complete(op
, ret
);
407 static unsigned mirror_perform(MirrorBlockJob
*s
, int64_t offset
,
408 unsigned bytes
, MirrorMethod mirror_method
)
412 int64_t bytes_handled
= -1;
414 op
= g_new(MirrorOp
, 1);
419 .bytes_handled
= &bytes_handled
,
421 qemu_co_queue_init(&op
->waiting_requests
);
423 switch (mirror_method
) {
424 case MIRROR_METHOD_COPY
:
425 co
= qemu_coroutine_create(mirror_co_read
, op
);
427 case MIRROR_METHOD_ZERO
:
428 co
= qemu_coroutine_create(mirror_co_zero
, op
);
430 case MIRROR_METHOD_DISCARD
:
431 co
= qemu_coroutine_create(mirror_co_discard
, op
);
438 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, op
, next
);
439 qemu_coroutine_enter(co
);
440 /* At this point, ownership of op has been moved to the coroutine
441 * and the object may already be freed */
443 /* Assert that this value has been set */
444 assert(bytes_handled
>= 0);
446 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
447 * and mirror_co_discard(), bytes_handled == op->bytes, which
448 * is the @bytes parameter given to this function) */
449 assert(bytes_handled
<= UINT_MAX
);
450 return bytes_handled
;
453 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
455 BlockDriverState
*source
= s
->mirror_top_bs
->backing
->bs
;
458 uint64_t delay_ns
= 0, ret
= 0;
459 /* At least the first dirty chunk is mirrored in one iteration. */
461 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
462 int max_io_bytes
= MAX(s
->buf_size
/ MAX_IN_FLIGHT
, MAX_IO_BYTES
);
464 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
465 offset
= bdrv_dirty_iter_next(s
->dbi
);
467 bdrv_set_dirty_iter(s
->dbi
, 0);
468 offset
= bdrv_dirty_iter_next(s
->dbi
);
469 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
472 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
474 mirror_wait_on_conflicts(NULL
, s
, offset
, 1);
476 job_pause_point(&s
->common
.job
);
478 /* Find the number of consective dirty chunks following the first dirty
479 * one, and wait for in flight requests in them. */
480 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
481 while (nb_chunks
* s
->granularity
< s
->buf_size
) {
483 int64_t next_offset
= offset
+ nb_chunks
* s
->granularity
;
484 int64_t next_chunk
= next_offset
/ s
->granularity
;
485 if (next_offset
>= s
->bdev_length
||
486 !bdrv_dirty_bitmap_get_locked(s
->dirty_bitmap
, next_offset
)) {
489 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
493 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
494 if (next_dirty
> next_offset
|| next_dirty
< 0) {
495 /* The bitmap iterator's cache is stale, refresh it */
496 bdrv_set_dirty_iter(s
->dbi
, next_offset
);
497 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
499 assert(next_dirty
== next_offset
);
503 /* Clear dirty bits before querying the block status, because
504 * calling bdrv_block_status_above could yield - if some blocks are
505 * marked dirty in this window, we need to know.
507 bdrv_reset_dirty_bitmap_locked(s
->dirty_bitmap
, offset
,
508 nb_chunks
* s
->granularity
);
509 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
511 /* Before claiming an area in the in-flight bitmap, we have to
512 * create a MirrorOp for it so that conflicting requests can wait
513 * for it. mirror_perform() will create the real MirrorOps later,
514 * for now we just create a pseudo operation that will wake up all
515 * conflicting requests once all real operations have been
517 pseudo_op
= g_new(MirrorOp
, 1);
518 *pseudo_op
= (MirrorOp
){
520 .bytes
= nb_chunks
* s
->granularity
,
521 .is_pseudo_op
= true,
523 qemu_co_queue_init(&pseudo_op
->waiting_requests
);
524 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, pseudo_op
, next
);
526 bitmap_set(s
->in_flight_bitmap
, offset
/ s
->granularity
, nb_chunks
);
527 while (nb_chunks
> 0 && offset
< s
->bdev_length
) {
530 int64_t io_bytes_acct
;
531 MirrorMethod mirror_method
= MIRROR_METHOD_COPY
;
533 assert(!(offset
% s
->granularity
));
534 ret
= bdrv_block_status_above(source
, NULL
, offset
,
535 nb_chunks
* s
->granularity
,
536 &io_bytes
, NULL
, NULL
);
538 io_bytes
= MIN(nb_chunks
* s
->granularity
, max_io_bytes
);
539 } else if (ret
& BDRV_BLOCK_DATA
) {
540 io_bytes
= MIN(io_bytes
, max_io_bytes
);
543 io_bytes
-= io_bytes
% s
->granularity
;
544 if (io_bytes
< s
->granularity
) {
545 io_bytes
= s
->granularity
;
546 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
547 int64_t target_offset
;
548 int64_t target_bytes
;
549 bdrv_round_to_clusters(blk_bs(s
->target
), offset
, io_bytes
,
550 &target_offset
, &target_bytes
);
551 if (target_offset
== offset
&&
552 target_bytes
== io_bytes
) {
553 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
555 MIRROR_METHOD_DISCARD
;
559 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
560 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
561 mirror_wait_for_free_in_flight_slot(s
, pseudo_op
);
569 io_bytes
= mirror_clip_bytes(s
, offset
, io_bytes
);
570 io_bytes
= mirror_perform(s
, offset
, io_bytes
, mirror_method
);
571 if (mirror_method
!= MIRROR_METHOD_COPY
&& write_zeroes_ok
) {
574 io_bytes_acct
= io_bytes
;
578 nb_chunks
-= DIV_ROUND_UP(io_bytes
, s
->granularity
);
579 delay_ns
= block_job_ratelimit_get_delay(&s
->common
, io_bytes_acct
);
584 QTAILQ_REMOVE(&s
->ops_in_flight
, pseudo_op
, next
);
585 qemu_co_queue_restart_all(&pseudo_op
->waiting_requests
);
591 static void mirror_free_init(MirrorBlockJob
*s
)
593 int granularity
= s
->granularity
;
594 size_t buf_size
= s
->buf_size
;
595 uint8_t *buf
= s
->buf
;
597 assert(s
->buf_free_count
== 0);
598 QSIMPLEQ_INIT(&s
->buf_free
);
599 while (buf_size
!= 0) {
600 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
601 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
603 buf_size
-= granularity
;
608 /* This is also used for the .pause callback. There is no matching
609 * mirror_resume() because mirror_run() will begin iterating again
610 * when the job is resumed.
612 static void coroutine_fn
mirror_wait_for_all_io(MirrorBlockJob
*s
)
614 while (s
->in_flight
> 0) {
615 mirror_wait_for_free_in_flight_slot(s
, NULL
);
620 * mirror_exit_common: handle both abort() and prepare() cases.
621 * for .prepare, returns 0 on success and -errno on failure.
622 * for .abort cases, denoted by abort = true, MUST return 0.
624 static int mirror_exit_common(Job
*job
)
626 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
627 BlockJob
*bjob
= &s
->common
;
628 MirrorBDSOpaque
*bs_opaque
;
629 AioContext
*replace_aio_context
= NULL
;
630 BlockDriverState
*src
;
631 BlockDriverState
*target_bs
;
632 BlockDriverState
*mirror_top_bs
;
633 Error
*local_err
= NULL
;
634 bool abort
= job
->ret
< 0;
642 mirror_top_bs
= s
->mirror_top_bs
;
643 bs_opaque
= mirror_top_bs
->opaque
;
644 src
= mirror_top_bs
->backing
->bs
;
645 target_bs
= blk_bs(s
->target
);
647 if (bdrv_chain_contains(src
, target_bs
)) {
648 bdrv_unfreeze_backing_chain(mirror_top_bs
, target_bs
);
651 bdrv_release_dirty_bitmap(s
->dirty_bitmap
);
653 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
654 * before we can call bdrv_drained_end */
656 bdrv_ref(mirror_top_bs
);
660 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
661 * inserting target_bs at s->to_replace, where we might not be able to get
664 blk_unref(s
->target
);
667 /* We don't access the source any more. Dropping any WRITE/RESIZE is
668 * required before it could become a backing file of target_bs. Not having
669 * these permissions any more means that we can't allow any new requests on
670 * mirror_top_bs from now on, so keep it drained. */
671 bdrv_drained_begin(mirror_top_bs
);
672 bs_opaque
->stop
= true;
673 bdrv_child_refresh_perms(mirror_top_bs
, mirror_top_bs
->backing
,
675 if (!abort
&& s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
676 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
677 if (backing_bs(target_bs
) != backing
) {
678 bdrv_set_backing_hd(target_bs
, backing
, &local_err
);
680 error_report_err(local_err
);
687 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
688 aio_context_acquire(replace_aio_context
);
691 if (s
->should_complete
&& !abort
) {
692 BlockDriverState
*to_replace
= s
->to_replace
?: src
;
693 bool ro
= bdrv_is_read_only(to_replace
);
695 if (ro
!= bdrv_is_read_only(target_bs
)) {
696 bdrv_reopen_set_read_only(target_bs
, ro
, NULL
);
699 /* The mirror job has no requests in flight any more, but we need to
700 * drain potential other users of the BDS before changing the graph. */
702 bdrv_drained_begin(target_bs
);
703 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
704 bdrv_drained_end(target_bs
);
706 error_report_err(local_err
);
711 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
712 error_free(s
->replace_blocker
);
713 bdrv_unref(s
->to_replace
);
715 if (replace_aio_context
) {
716 aio_context_release(replace_aio_context
);
719 bdrv_unref(target_bs
);
722 * Remove the mirror filter driver from the graph. Before this, get rid of
723 * the blockers on the intermediate nodes so that the resulting state is
726 block_job_remove_all_bdrv(bjob
);
727 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
729 /* We just changed the BDS the job BB refers to (with either or both of the
730 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
731 * the right thing. We don't need any permissions any more now. */
732 blk_remove_bs(bjob
->blk
);
733 blk_set_perm(bjob
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
734 blk_insert_bs(bjob
->blk
, mirror_top_bs
, &error_abort
);
736 bs_opaque
->job
= NULL
;
738 bdrv_drained_end(src
);
739 bdrv_drained_end(mirror_top_bs
);
741 bdrv_unref(mirror_top_bs
);
747 static int mirror_prepare(Job
*job
)
749 return mirror_exit_common(job
);
752 static void mirror_abort(Job
*job
)
754 int ret
= mirror_exit_common(job
);
758 static void coroutine_fn
mirror_throttle(MirrorBlockJob
*s
)
760 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
762 if (now
- s
->last_pause_ns
> BLOCK_JOB_SLICE_TIME
) {
763 s
->last_pause_ns
= now
;
764 job_sleep_ns(&s
->common
.job
, 0);
766 job_pause_point(&s
->common
.job
);
770 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
773 BlockDriverState
*base
= s
->base
;
774 BlockDriverState
*bs
= s
->mirror_top_bs
->backing
->bs
;
775 BlockDriverState
*target_bs
= blk_bs(s
->target
);
779 if (s
->zero_target
) {
780 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
781 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, s
->bdev_length
);
785 s
->initial_zeroing_ongoing
= true;
786 for (offset
= 0; offset
< s
->bdev_length
; ) {
787 int bytes
= MIN(s
->bdev_length
- offset
,
788 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
792 if (job_is_cancelled(&s
->common
.job
)) {
793 s
->initial_zeroing_ongoing
= false;
797 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
798 trace_mirror_yield(s
, UINT64_MAX
, s
->buf_free_count
,
800 mirror_wait_for_free_in_flight_slot(s
, NULL
);
804 mirror_perform(s
, offset
, bytes
, MIRROR_METHOD_ZERO
);
808 mirror_wait_for_all_io(s
);
809 s
->initial_zeroing_ongoing
= false;
812 /* First part, loop on the sectors and initialize the dirty bitmap. */
813 for (offset
= 0; offset
< s
->bdev_length
; ) {
814 /* Just to make sure we are not exceeding int limit. */
815 int bytes
= MIN(s
->bdev_length
- offset
,
816 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
820 if (job_is_cancelled(&s
->common
.job
)) {
824 ret
= bdrv_is_allocated_above(bs
, base
, false, offset
, bytes
, &count
);
831 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, offset
, count
);
838 /* Called when going out of the streaming phase to flush the bulk of the
839 * data to the medium, or just before completing.
841 static int mirror_flush(MirrorBlockJob
*s
)
843 int ret
= blk_flush(s
->target
);
845 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
852 static int coroutine_fn
mirror_run(Job
*job
, Error
**errp
)
854 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
855 BlockDriverState
*bs
= s
->mirror_top_bs
->backing
->bs
;
856 BlockDriverState
*target_bs
= blk_bs(s
->target
);
857 bool need_drain
= true;
860 char backing_filename
[2]; /* we only need 2 characters because we are only
861 checking for a NULL string */
864 if (job_is_cancelled(&s
->common
.job
)) {
868 s
->bdev_length
= bdrv_getlength(bs
);
869 if (s
->bdev_length
< 0) {
870 ret
= s
->bdev_length
;
874 /* Active commit must resize the base image if its size differs from the
876 if (s
->base
== blk_bs(s
->target
)) {
879 base_length
= blk_getlength(s
->target
);
880 if (base_length
< 0) {
885 if (s
->bdev_length
> base_length
) {
886 ret
= blk_truncate(s
->target
, s
->bdev_length
, false,
887 PREALLOC_MODE_OFF
, NULL
);
894 if (s
->bdev_length
== 0) {
895 /* Transition to the READY state and wait for complete. */
896 job_transition_to_ready(&s
->common
.job
);
898 s
->actively_synced
= true;
899 while (!job_is_cancelled(&s
->common
.job
) && !s
->should_complete
) {
900 job_yield(&s
->common
.job
);
902 s
->common
.job
.cancelled
= false;
906 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
907 s
->in_flight_bitmap
= bitmap_new(length
);
909 /* If we have no backing file yet in the destination, we cannot let
910 * the destination do COW. Instead, we copy sectors around the
911 * dirty data if needed. We need a bitmap to do that.
913 bdrv_get_backing_filename(target_bs
, backing_filename
,
914 sizeof(backing_filename
));
915 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
916 s
->target_cluster_size
= bdi
.cluster_size
;
918 s
->target_cluster_size
= BDRV_SECTOR_SIZE
;
920 if (backing_filename
[0] && !target_bs
->backing
&&
921 s
->granularity
< s
->target_cluster_size
) {
922 s
->buf_size
= MAX(s
->buf_size
, s
->target_cluster_size
);
923 s
->cow_bitmap
= bitmap_new(length
);
925 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
927 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
928 if (s
->buf
== NULL
) {
935 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
936 if (!s
->is_none_mode
) {
937 ret
= mirror_dirty_init(s
);
938 if (ret
< 0 || job_is_cancelled(&s
->common
.job
)) {
944 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
);
946 uint64_t delay_ns
= 0;
948 bool should_complete
;
950 /* Do not start passive operations while there are active
951 * writes in progress */
952 while (s
->in_active_write_counter
) {
953 mirror_wait_for_any_operation(s
, NULL
, true);
961 job_pause_point(&s
->common
.job
);
963 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
964 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
965 * the number of bytes currently being processed; together those are
966 * the current remaining operation length */
967 job_progress_set_remaining(&s
->common
.job
, s
->bytes_in_flight
+ cnt
);
969 /* Note that even when no rate limit is applied we need to yield
970 * periodically with no pending I/O so that bdrv_drain_all() returns.
971 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
972 * an error, or when the source is clean, whichever comes first. */
973 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
974 if (delta
< BLOCK_JOB_SLICE_TIME
&&
975 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
976 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
977 (cnt
== 0 && s
->in_flight
> 0)) {
978 trace_mirror_yield(s
, cnt
, s
->buf_free_count
, s
->in_flight
);
979 mirror_wait_for_free_in_flight_slot(s
, NULL
);
981 } else if (cnt
!= 0) {
982 delay_ns
= mirror_iteration(s
);
986 should_complete
= false;
987 if (s
->in_flight
== 0 && cnt
== 0) {
988 trace_mirror_before_flush(s
);
990 if (mirror_flush(s
) < 0) {
991 /* Go check s->ret. */
994 /* We're out of the streaming phase. From now on, if the job
995 * is cancelled we will actually complete all pending I/O and
996 * report completion. This way, block-job-cancel will leave
997 * the target in a consistent state.
999 job_transition_to_ready(&s
->common
.job
);
1001 if (s
->copy_mode
!= MIRROR_COPY_MODE_BACKGROUND
) {
1002 s
->actively_synced
= true;
1006 should_complete
= s
->should_complete
||
1007 job_is_cancelled(&s
->common
.job
);
1008 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
1011 if (cnt
== 0 && should_complete
) {
1012 /* The dirty bitmap is not updated while operations are pending.
1013 * If we're about to exit, wait for pending operations before
1014 * calling bdrv_get_dirty_count(bs), or we may exit while the
1015 * source has dirty data to copy!
1017 * Note that I/O can be submitted by the guest while
1018 * mirror_populate runs, so pause it now. Before deciding
1019 * whether to switch to target check one last time if I/O has
1020 * come in the meanwhile, and if not flush the data to disk.
1022 trace_mirror_before_drain(s
, cnt
);
1025 bdrv_drained_begin(bs
);
1026 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
1027 if (cnt
> 0 || mirror_flush(s
) < 0) {
1028 bdrv_drained_end(bs
);
1029 s
->in_drain
= false;
1033 /* The two disks are in sync. Exit and report successful
1036 assert(QLIST_EMPTY(&bs
->tracked_requests
));
1037 s
->common
.job
.cancelled
= false;
1044 if (s
->synced
&& !should_complete
) {
1045 delay_ns
= (s
->in_flight
== 0 &&
1046 cnt
== 0 ? BLOCK_JOB_SLICE_TIME
: 0);
1048 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
1049 job_sleep_ns(&s
->common
.job
, delay_ns
);
1050 if (job_is_cancelled(&s
->common
.job
) &&
1051 (!s
->synced
|| s
->common
.job
.force_cancel
))
1055 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1059 if (s
->in_flight
> 0) {
1060 /* We get here only if something went wrong. Either the job failed,
1061 * or it was cancelled prematurely so that we do not guarantee that
1062 * the target is a copy of the source.
1064 assert(ret
< 0 || ((s
->common
.job
.force_cancel
|| !s
->synced
) &&
1065 job_is_cancelled(&s
->common
.job
)));
1067 mirror_wait_for_all_io(s
);
1070 assert(s
->in_flight
== 0);
1072 g_free(s
->cow_bitmap
);
1073 g_free(s
->in_flight_bitmap
);
1074 bdrv_dirty_iter_free(s
->dbi
);
1078 bdrv_drained_begin(bs
);
1084 static void mirror_complete(Job
*job
, Error
**errp
)
1086 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1087 BlockDriverState
*target
;
1089 target
= blk_bs(s
->target
);
1092 error_setg(errp
, "The active block job '%s' cannot be completed",
1097 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
1100 assert(!target
->backing
);
1101 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
1107 /* block all operations on to_replace bs */
1109 AioContext
*replace_aio_context
;
1111 s
->to_replace
= bdrv_find_node(s
->replaces
);
1112 if (!s
->to_replace
) {
1113 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
1117 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
1118 aio_context_acquire(replace_aio_context
);
1120 /* TODO Translate this into permission system. Current definition of
1121 * GRAPH_MOD would require to request it for the parents; they might
1122 * not even be BlockDriverStates, however, so a BdrvChild can't address
1123 * them. May need redefinition of GRAPH_MOD. */
1124 error_setg(&s
->replace_blocker
,
1125 "block device is in use by block-job-complete");
1126 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
1127 bdrv_ref(s
->to_replace
);
1129 aio_context_release(replace_aio_context
);
1132 s
->should_complete
= true;
1136 static void coroutine_fn
mirror_pause(Job
*job
)
1138 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1140 mirror_wait_for_all_io(s
);
1143 static bool mirror_drained_poll(BlockJob
*job
)
1145 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1147 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1148 * issue more requests. We make an exception if we've reached this point
1149 * from one of our own drain sections, to avoid a deadlock waiting for
1152 if (!s
->common
.job
.paused
&& !s
->common
.job
.cancelled
&& !s
->in_drain
) {
1156 return !!s
->in_flight
;
1159 static const BlockJobDriver mirror_job_driver
= {
1161 .instance_size
= sizeof(MirrorBlockJob
),
1162 .job_type
= JOB_TYPE_MIRROR
,
1163 .free
= block_job_free
,
1164 .user_resume
= block_job_user_resume
,
1166 .prepare
= mirror_prepare
,
1167 .abort
= mirror_abort
,
1168 .pause
= mirror_pause
,
1169 .complete
= mirror_complete
,
1171 .drained_poll
= mirror_drained_poll
,
1174 static const BlockJobDriver commit_active_job_driver
= {
1176 .instance_size
= sizeof(MirrorBlockJob
),
1177 .job_type
= JOB_TYPE_COMMIT
,
1178 .free
= block_job_free
,
1179 .user_resume
= block_job_user_resume
,
1181 .prepare
= mirror_prepare
,
1182 .abort
= mirror_abort
,
1183 .pause
= mirror_pause
,
1184 .complete
= mirror_complete
,
1186 .drained_poll
= mirror_drained_poll
,
1189 static void coroutine_fn
1190 do_sync_target_write(MirrorBlockJob
*job
, MirrorMethod method
,
1191 uint64_t offset
, uint64_t bytes
,
1192 QEMUIOVector
*qiov
, int flags
)
1195 size_t qiov_offset
= 0;
1196 int64_t bitmap_offset
, bitmap_end
;
1198 if (!QEMU_IS_ALIGNED(offset
, job
->granularity
) &&
1199 bdrv_dirty_bitmap_get(job
->dirty_bitmap
, offset
))
1202 * Dirty unaligned padding: ignore it.
1205 * 1. If we copy it, we can't reset corresponding bit in
1206 * dirty_bitmap as there may be some "dirty" bytes still not
1208 * 2. It's already dirty, so skipping it we don't diverge mirror
1211 * Note, that because of this, guest write may have no contribution
1212 * into mirror converge, but that's not bad, as we have background
1213 * process of mirroring. If under some bad circumstances (high guest
1214 * IO load) background process starve, we will not converge anyway,
1215 * even if each write will contribute, as guest is not guaranteed to
1216 * rewrite the whole disk.
1218 qiov_offset
= QEMU_ALIGN_UP(offset
, job
->granularity
) - offset
;
1219 if (bytes
<= qiov_offset
) {
1220 /* nothing to do after shrink */
1223 offset
+= qiov_offset
;
1224 bytes
-= qiov_offset
;
1227 if (!QEMU_IS_ALIGNED(offset
+ bytes
, job
->granularity
) &&
1228 bdrv_dirty_bitmap_get(job
->dirty_bitmap
, offset
+ bytes
- 1))
1230 uint64_t tail
= (offset
+ bytes
) % job
->granularity
;
1232 if (bytes
<= tail
) {
1233 /* nothing to do after shrink */
1240 * Tails are either clean or shrunk, so for bitmap resetting
1241 * we safely align the range down.
1243 bitmap_offset
= QEMU_ALIGN_UP(offset
, job
->granularity
);
1244 bitmap_end
= QEMU_ALIGN_DOWN(offset
+ bytes
, job
->granularity
);
1245 if (bitmap_offset
< bitmap_end
) {
1246 bdrv_reset_dirty_bitmap(job
->dirty_bitmap
, bitmap_offset
,
1247 bitmap_end
- bitmap_offset
);
1250 job_progress_increase_remaining(&job
->common
.job
, bytes
);
1253 case MIRROR_METHOD_COPY
:
1254 ret
= blk_co_pwritev_part(job
->target
, offset
, bytes
,
1255 qiov
, qiov_offset
, flags
);
1258 case MIRROR_METHOD_ZERO
:
1260 ret
= blk_co_pwrite_zeroes(job
->target
, offset
, bytes
, flags
);
1263 case MIRROR_METHOD_DISCARD
:
1265 ret
= blk_co_pdiscard(job
->target
, offset
, bytes
);
1273 job_progress_update(&job
->common
.job
, bytes
);
1275 BlockErrorAction action
;
1278 * We failed, so we should mark dirty the whole area, aligned up.
1279 * Note that we don't care about shrunk tails if any: they were dirty
1280 * at function start, and they must be still dirty, as we've locked
1281 * the region for in-flight op.
1283 bitmap_offset
= QEMU_ALIGN_DOWN(offset
, job
->granularity
);
1284 bitmap_end
= QEMU_ALIGN_UP(offset
+ bytes
, job
->granularity
);
1285 bdrv_set_dirty_bitmap(job
->dirty_bitmap
, bitmap_offset
,
1286 bitmap_end
- bitmap_offset
);
1287 job
->actively_synced
= false;
1289 action
= mirror_error_action(job
, false, -ret
);
1290 if (action
== BLOCK_ERROR_ACTION_REPORT
) {
1298 static MirrorOp
*coroutine_fn
active_write_prepare(MirrorBlockJob
*s
,
1303 uint64_t start_chunk
= offset
/ s
->granularity
;
1304 uint64_t end_chunk
= DIV_ROUND_UP(offset
+ bytes
, s
->granularity
);
1306 op
= g_new(MirrorOp
, 1);
1311 .is_active_write
= true,
1313 qemu_co_queue_init(&op
->waiting_requests
);
1314 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, op
, next
);
1316 s
->in_active_write_counter
++;
1318 mirror_wait_on_conflicts(op
, s
, offset
, bytes
);
1320 bitmap_set(s
->in_flight_bitmap
, start_chunk
, end_chunk
- start_chunk
);
1325 static void coroutine_fn
active_write_settle(MirrorOp
*op
)
1327 uint64_t start_chunk
= op
->offset
/ op
->s
->granularity
;
1328 uint64_t end_chunk
= DIV_ROUND_UP(op
->offset
+ op
->bytes
,
1329 op
->s
->granularity
);
1331 if (!--op
->s
->in_active_write_counter
&& op
->s
->actively_synced
) {
1332 BdrvChild
*source
= op
->s
->mirror_top_bs
->backing
;
1334 if (QLIST_FIRST(&source
->bs
->parents
) == source
&&
1335 QLIST_NEXT(source
, next_parent
) == NULL
)
1337 /* Assert that we are back in sync once all active write
1338 * operations are settled.
1339 * Note that we can only assert this if the mirror node
1340 * is the source node's only parent. */
1341 assert(!bdrv_get_dirty_count(op
->s
->dirty_bitmap
));
1344 bitmap_clear(op
->s
->in_flight_bitmap
, start_chunk
, end_chunk
- start_chunk
);
1345 QTAILQ_REMOVE(&op
->s
->ops_in_flight
, op
, next
);
1346 qemu_co_queue_restart_all(&op
->waiting_requests
);
1350 static int coroutine_fn
bdrv_mirror_top_preadv(BlockDriverState
*bs
,
1351 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1353 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1356 static int coroutine_fn
bdrv_mirror_top_do_write(BlockDriverState
*bs
,
1357 MirrorMethod method
, uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
,
1360 MirrorOp
*op
= NULL
;
1361 MirrorBDSOpaque
*s
= bs
->opaque
;
1363 bool copy_to_target
;
1365 copy_to_target
= s
->job
->ret
>= 0 &&
1366 s
->job
->copy_mode
== MIRROR_COPY_MODE_WRITE_BLOCKING
;
1368 if (copy_to_target
) {
1369 op
= active_write_prepare(s
->job
, offset
, bytes
);
1373 case MIRROR_METHOD_COPY
:
1374 ret
= bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1377 case MIRROR_METHOD_ZERO
:
1378 ret
= bdrv_co_pwrite_zeroes(bs
->backing
, offset
, bytes
, flags
);
1381 case MIRROR_METHOD_DISCARD
:
1382 ret
= bdrv_co_pdiscard(bs
->backing
, offset
, bytes
);
1393 if (copy_to_target
) {
1394 do_sync_target_write(s
->job
, method
, offset
, bytes
, qiov
, flags
);
1398 if (copy_to_target
) {
1399 active_write_settle(op
);
1404 static int coroutine_fn
bdrv_mirror_top_pwritev(BlockDriverState
*bs
,
1405 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1407 MirrorBDSOpaque
*s
= bs
->opaque
;
1408 QEMUIOVector bounce_qiov
;
1411 bool copy_to_target
;
1413 copy_to_target
= s
->job
->ret
>= 0 &&
1414 s
->job
->copy_mode
== MIRROR_COPY_MODE_WRITE_BLOCKING
;
1416 if (copy_to_target
) {
1417 /* The guest might concurrently modify the data to write; but
1418 * the data on source and destination must match, so we have
1419 * to use a bounce buffer if we are going to write to the
1421 bounce_buf
= qemu_blockalign(bs
, bytes
);
1422 iov_to_buf_full(qiov
->iov
, qiov
->niov
, 0, bounce_buf
, bytes
);
1424 qemu_iovec_init(&bounce_qiov
, 1);
1425 qemu_iovec_add(&bounce_qiov
, bounce_buf
, bytes
);
1426 qiov
= &bounce_qiov
;
1429 ret
= bdrv_mirror_top_do_write(bs
, MIRROR_METHOD_COPY
, offset
, bytes
, qiov
,
1432 if (copy_to_target
) {
1433 qemu_iovec_destroy(&bounce_qiov
);
1434 qemu_vfree(bounce_buf
);
1440 static int coroutine_fn
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1442 if (bs
->backing
== NULL
) {
1443 /* we can be here after failed bdrv_append in mirror_start_job */
1446 return bdrv_co_flush(bs
->backing
->bs
);
1449 static int coroutine_fn
bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
,
1450 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1452 return bdrv_mirror_top_do_write(bs
, MIRROR_METHOD_ZERO
, offset
, bytes
, NULL
,
1456 static int coroutine_fn
bdrv_mirror_top_pdiscard(BlockDriverState
*bs
,
1457 int64_t offset
, int bytes
)
1459 return bdrv_mirror_top_do_write(bs
, MIRROR_METHOD_DISCARD
, offset
, bytes
,
1463 static void bdrv_mirror_top_refresh_filename(BlockDriverState
*bs
)
1465 if (bs
->backing
== NULL
) {
1466 /* we can be here after failed bdrv_attach_child in
1467 * bdrv_set_backing_hd */
1470 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1471 bs
->backing
->bs
->filename
);
1474 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1475 const BdrvChildRole
*role
,
1476 BlockReopenQueue
*reopen_queue
,
1477 uint64_t perm
, uint64_t shared
,
1478 uint64_t *nperm
, uint64_t *nshared
)
1480 MirrorBDSOpaque
*s
= bs
->opaque
;
1484 * If the job is to be stopped, we do not need to forward
1485 * anything to the real image.
1488 *nshared
= BLK_PERM_ALL
;
1492 /* Must be able to forward guest writes to the real image */
1494 if (perm
& BLK_PERM_WRITE
) {
1495 *nperm
|= BLK_PERM_WRITE
;
1498 *nshared
= BLK_PERM_ALL
;
1501 /* Dummy node that provides consistent read to its users without requiring it
1502 * from its backing file and that allows writes on the backing file chain. */
1503 static BlockDriver bdrv_mirror_top
= {
1504 .format_name
= "mirror_top",
1505 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1506 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1507 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1508 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1509 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1510 .bdrv_co_block_status
= bdrv_co_block_status_from_backing
,
1511 .bdrv_refresh_filename
= bdrv_mirror_top_refresh_filename
,
1512 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1515 static BlockJob
*mirror_start_job(
1516 const char *job_id
, BlockDriverState
*bs
,
1517 int creation_flags
, BlockDriverState
*target
,
1518 const char *replaces
, int64_t speed
,
1519 uint32_t granularity
, int64_t buf_size
,
1520 BlockMirrorBackingMode backing_mode
,
1522 BlockdevOnError on_source_error
,
1523 BlockdevOnError on_target_error
,
1525 BlockCompletionFunc
*cb
,
1527 const BlockJobDriver
*driver
,
1528 bool is_none_mode
, BlockDriverState
*base
,
1529 bool auto_complete
, const char *filter_node_name
,
1530 bool is_mirror
, MirrorCopyMode copy_mode
,
1534 MirrorBDSOpaque
*bs_opaque
;
1535 BlockDriverState
*mirror_top_bs
;
1536 bool target_graph_mod
;
1537 bool target_is_backing
;
1538 Error
*local_err
= NULL
;
1541 if (granularity
== 0) {
1542 granularity
= bdrv_get_default_bitmap_granularity(target
);
1545 assert(is_power_of_2(granularity
));
1548 error_setg(errp
, "Invalid parameter 'buf-size'");
1552 if (buf_size
== 0) {
1553 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1557 error_setg(errp
, "Can't mirror node into itself");
1561 /* In the case of active commit, add dummy driver to provide consistent
1562 * reads on the top, while disabling it in the intermediate nodes, and make
1563 * the backing chain writable. */
1564 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1566 if (mirror_top_bs
== NULL
) {
1569 if (!filter_node_name
) {
1570 mirror_top_bs
->implicit
= true;
1573 /* So that we can always drop this node */
1574 mirror_top_bs
->never_freeze
= true;
1576 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1577 mirror_top_bs
->supported_write_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1578 mirror_top_bs
->supported_zero_flags
= BDRV_REQ_WRITE_UNCHANGED
|
1579 BDRV_REQ_NO_FALLBACK
;
1580 bs_opaque
= g_new0(MirrorBDSOpaque
, 1);
1581 mirror_top_bs
->opaque
= bs_opaque
;
1583 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1584 * it alive until block_job_create() succeeds even if bs has no parent. */
1585 bdrv_ref(mirror_top_bs
);
1586 bdrv_drained_begin(bs
);
1587 bdrv_append(mirror_top_bs
, bs
, &local_err
);
1588 bdrv_drained_end(bs
);
1591 bdrv_unref(mirror_top_bs
);
1592 error_propagate(errp
, local_err
);
1596 /* Make sure that the source is not resized while the job is running */
1597 s
= block_job_create(job_id
, driver
, NULL
, mirror_top_bs
,
1598 BLK_PERM_CONSISTENT_READ
,
1599 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1600 BLK_PERM_WRITE
| BLK_PERM_GRAPH_MOD
, speed
,
1601 creation_flags
, cb
, opaque
, errp
);
1607 /* The block job now has a reference to this node */
1608 bdrv_unref(mirror_top_bs
);
1610 s
->mirror_top_bs
= mirror_top_bs
;
1612 /* No resize for the target either; while the mirror is still running, a
1613 * consistent read isn't necessarily possible. We could possibly allow
1614 * writes and graph modifications, though it would likely defeat the
1615 * purpose of a mirror, so leave them blocked for now.
1617 * In the case of active commit, things look a bit different, though,
1618 * because the target is an already populated backing file in active use.
1619 * We can allow anything except resize there.*/
1620 target_is_backing
= bdrv_chain_contains(bs
, target
);
1621 target_graph_mod
= (backing_mode
!= MIRROR_LEAVE_BACKING_CHAIN
);
1622 s
->target
= blk_new(s
->common
.job
.aio_context
,
1623 BLK_PERM_WRITE
| BLK_PERM_RESIZE
|
1624 (target_graph_mod
? BLK_PERM_GRAPH_MOD
: 0),
1625 BLK_PERM_WRITE_UNCHANGED
|
1626 (target_is_backing
? BLK_PERM_CONSISTENT_READ
|
1628 BLK_PERM_GRAPH_MOD
: 0));
1629 ret
= blk_insert_bs(s
->target
, target
, errp
);
1634 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1635 * of non-shared block migration. To allow migration completion, we
1636 * have to allow "inactivate" of the target BB. When that happens, we
1637 * know the job is drained, and the vcpus are stopped, so no write
1638 * operation will be performed. Block layer already has assertions to
1640 blk_set_force_allow_inactivate(s
->target
);
1642 blk_set_allow_aio_context_change(s
->target
, true);
1643 blk_set_disable_request_queuing(s
->target
, true);
1645 s
->replaces
= g_strdup(replaces
);
1646 s
->on_source_error
= on_source_error
;
1647 s
->on_target_error
= on_target_error
;
1648 s
->is_none_mode
= is_none_mode
;
1649 s
->backing_mode
= backing_mode
;
1650 s
->zero_target
= zero_target
;
1651 s
->copy_mode
= copy_mode
;
1653 s
->granularity
= granularity
;
1654 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1656 if (auto_complete
) {
1657 s
->should_complete
= true;
1660 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
1661 if (!s
->dirty_bitmap
) {
1664 if (s
->copy_mode
== MIRROR_COPY_MODE_WRITE_BLOCKING
) {
1665 bdrv_disable_dirty_bitmap(s
->dirty_bitmap
);
1668 ret
= block_job_add_bdrv(&s
->common
, "source", bs
, 0,
1669 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
|
1670 BLK_PERM_CONSISTENT_READ
,
1676 /* Required permissions are already taken with blk_new() */
1677 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1680 /* In commit_active_start() all intermediate nodes disappear, so
1681 * any jobs in them must be blocked */
1682 if (target_is_backing
) {
1683 BlockDriverState
*iter
;
1684 for (iter
= backing_bs(bs
); iter
!= target
; iter
= backing_bs(iter
)) {
1685 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1686 * ourselves at s->base (if writes are blocked for a node, they are
1687 * also blocked for its backing file). The other options would be a
1688 * second filter driver above s->base (== target). */
1689 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1690 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
,
1697 if (bdrv_freeze_backing_chain(mirror_top_bs
, target
, errp
) < 0) {
1702 QTAILQ_INIT(&s
->ops_in_flight
);
1704 trace_mirror_start(bs
, s
, opaque
);
1705 job_start(&s
->common
.job
);
1711 /* Make sure this BDS does not go away until we have completed the graph
1713 bdrv_ref(mirror_top_bs
);
1715 g_free(s
->replaces
);
1716 blk_unref(s
->target
);
1717 bs_opaque
->job
= NULL
;
1718 if (s
->dirty_bitmap
) {
1719 bdrv_release_dirty_bitmap(s
->dirty_bitmap
);
1721 job_early_fail(&s
->common
.job
);
1724 bs_opaque
->stop
= true;
1725 bdrv_child_refresh_perms(mirror_top_bs
, mirror_top_bs
->backing
,
1727 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
1729 bdrv_unref(mirror_top_bs
);
1734 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
1735 BlockDriverState
*target
, const char *replaces
,
1736 int creation_flags
, int64_t speed
,
1737 uint32_t granularity
, int64_t buf_size
,
1738 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
1740 BlockdevOnError on_source_error
,
1741 BlockdevOnError on_target_error
,
1742 bool unmap
, const char *filter_node_name
,
1743 MirrorCopyMode copy_mode
, Error
**errp
)
1746 BlockDriverState
*base
;
1748 if ((mode
== MIRROR_SYNC_MODE_INCREMENTAL
) ||
1749 (mode
== MIRROR_SYNC_MODE_BITMAP
)) {
1750 error_setg(errp
, "Sync mode '%s' not supported",
1751 MirrorSyncMode_str(mode
));
1754 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
1755 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
1756 mirror_start_job(job_id
, bs
, creation_flags
, target
, replaces
,
1757 speed
, granularity
, buf_size
, backing_mode
, zero_target
,
1758 on_source_error
, on_target_error
, unmap
, NULL
, NULL
,
1759 &mirror_job_driver
, is_none_mode
, base
, false,
1760 filter_node_name
, true, copy_mode
, errp
);
1763 BlockJob
*commit_active_start(const char *job_id
, BlockDriverState
*bs
,
1764 BlockDriverState
*base
, int creation_flags
,
1765 int64_t speed
, BlockdevOnError on_error
,
1766 const char *filter_node_name
,
1767 BlockCompletionFunc
*cb
, void *opaque
,
1768 bool auto_complete
, Error
**errp
)
1770 bool base_read_only
;
1771 Error
*local_err
= NULL
;
1774 base_read_only
= bdrv_is_read_only(base
);
1776 if (base_read_only
) {
1777 if (bdrv_reopen_set_read_only(base
, false, errp
) < 0) {
1782 ret
= mirror_start_job(
1783 job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
1784 MIRROR_LEAVE_BACKING_CHAIN
, false,
1785 on_error
, on_error
, true, cb
, opaque
,
1786 &commit_active_job_driver
, false, base
, auto_complete
,
1787 filter_node_name
, false, MIRROR_COPY_MODE_BACKGROUND
,
1790 error_propagate(errp
, local_err
);
1791 goto error_restore_flags
;
1796 error_restore_flags
:
1797 /* ignore error and errp for bdrv_reopen, because we want to propagate
1798 * the original error */
1799 if (base_read_only
) {
1800 bdrv_reopen_set_read_only(base
, true, NULL
);