4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
18 #include "block/blockjob_int.h"
19 #include "block/block_int.h"
20 #include "sysemu/block-backend.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/bitmap.h"
26 #define MAX_IN_FLIGHT 16
27 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
28 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
30 /* The mirroring buffer is a list of granularity-sized chunks.
31 * Free chunks are organized in a list.
33 typedef struct MirrorBuffer
{
34 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
37 typedef struct MirrorOp MirrorOp
;
39 typedef struct MirrorBlockJob
{
42 BlockDriverState
*mirror_top_bs
;
43 BlockDriverState
*source
;
44 BlockDriverState
*base
;
46 /* The name of the graph node to replace */
48 /* The BDS to replace */
49 BlockDriverState
*to_replace
;
50 /* Used to block operations on the drive-mirror-replace target */
51 Error
*replace_blocker
;
53 BlockMirrorBackingMode backing_mode
;
54 BlockdevOnError on_source_error
, on_target_error
;
60 unsigned long *cow_bitmap
;
61 BdrvDirtyBitmap
*dirty_bitmap
;
62 BdrvDirtyBitmapIter
*dbi
;
64 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
67 uint64_t last_pause_ns
;
68 unsigned long *in_flight_bitmap
;
70 int64_t bytes_in_flight
;
71 QTAILQ_HEAD(MirrorOpList
, MirrorOp
) ops_in_flight
;
74 int target_cluster_size
;
76 bool initial_zeroing_ongoing
;
85 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
86 * mirror_co_discard() before yielding for the first time */
87 int64_t *bytes_handled
;
89 CoQueue waiting_requests
;
91 QTAILQ_ENTRY(MirrorOp
) next
;
94 typedef enum MirrorMethod
{
97 MIRROR_METHOD_DISCARD
,
100 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
105 return block_job_error_action(&s
->common
, s
->on_source_error
,
108 return block_job_error_action(&s
->common
, s
->on_target_error
,
113 static void coroutine_fn
mirror_iteration_done(MirrorOp
*op
, int ret
)
115 MirrorBlockJob
*s
= op
->s
;
120 trace_mirror_iteration_done(s
, op
->offset
, op
->bytes
, ret
);
123 s
->bytes_in_flight
-= op
->bytes
;
125 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
126 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
127 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
131 chunk_num
= op
->offset
/ s
->granularity
;
132 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
134 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
135 QTAILQ_REMOVE(&s
->ops_in_flight
, op
, next
);
138 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
140 if (!s
->initial_zeroing_ongoing
) {
141 job_progress_update(&s
->common
.job
, op
->bytes
);
144 qemu_iovec_destroy(&op
->qiov
);
146 qemu_co_queue_restart_all(&op
->waiting_requests
);
150 static void coroutine_fn
mirror_write_complete(MirrorOp
*op
, int ret
)
152 MirrorBlockJob
*s
= op
->s
;
154 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
156 BlockErrorAction action
;
158 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
159 action
= mirror_error_action(s
, false, -ret
);
160 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
164 mirror_iteration_done(op
, ret
);
165 aio_context_release(blk_get_aio_context(s
->common
.blk
));
168 static void coroutine_fn
mirror_read_complete(MirrorOp
*op
, int ret
)
170 MirrorBlockJob
*s
= op
->s
;
172 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
174 BlockErrorAction action
;
176 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
177 action
= mirror_error_action(s
, true, -ret
);
178 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
182 mirror_iteration_done(op
, ret
);
184 ret
= blk_co_pwritev(s
->target
, op
->offset
,
185 op
->qiov
.size
, &op
->qiov
, 0);
186 mirror_write_complete(op
, ret
);
188 aio_context_release(blk_get_aio_context(s
->common
.blk
));
191 /* Clip bytes relative to offset to not exceed end-of-file */
192 static inline int64_t mirror_clip_bytes(MirrorBlockJob
*s
,
196 return MIN(bytes
, s
->bdev_length
- offset
);
199 /* Round offset and/or bytes to target cluster if COW is needed, and
200 * return the offset of the adjusted tail against original. */
201 static int mirror_cow_align(MirrorBlockJob
*s
, int64_t *offset
,
206 int64_t align_offset
= *offset
;
207 int64_t align_bytes
= *bytes
;
208 int max_bytes
= s
->granularity
* s
->max_iov
;
210 need_cow
= !test_bit(*offset
/ s
->granularity
, s
->cow_bitmap
);
211 need_cow
|= !test_bit((*offset
+ *bytes
- 1) / s
->granularity
,
214 bdrv_round_to_clusters(blk_bs(s
->target
), *offset
, *bytes
,
215 &align_offset
, &align_bytes
);
218 if (align_bytes
> max_bytes
) {
219 align_bytes
= max_bytes
;
221 align_bytes
= QEMU_ALIGN_DOWN(align_bytes
, s
->target_cluster_size
);
224 /* Clipping may result in align_bytes unaligned to chunk boundary, but
225 * that doesn't matter because it's already the end of source image. */
226 align_bytes
= mirror_clip_bytes(s
, align_offset
, align_bytes
);
228 ret
= align_offset
+ align_bytes
- (*offset
+ *bytes
);
229 *offset
= align_offset
;
230 *bytes
= align_bytes
;
235 static inline void mirror_wait_for_io(MirrorBlockJob
*s
)
239 op
= QTAILQ_FIRST(&s
->ops_in_flight
);
241 qemu_co_queue_wait(&op
->waiting_requests
, NULL
);
244 /* Perform a mirror copy operation.
246 * *op->bytes_handled is set to the number of bytes copied after and
247 * including offset, excluding any bytes copied prior to offset due
248 * to alignment. This will be op->bytes if no alignment is necessary,
249 * or (new_end - op->offset) if the tail is rounded up or down due to
250 * alignment or buffer limit.
252 static void coroutine_fn
mirror_co_read(void *opaque
)
254 MirrorOp
*op
= opaque
;
255 MirrorBlockJob
*s
= op
->s
;
256 BlockBackend
*source
= s
->common
.blk
;
261 max_bytes
= s
->granularity
* s
->max_iov
;
263 /* We can only handle as much as buf_size at a time. */
264 op
->bytes
= MIN(s
->buf_size
, MIN(max_bytes
, op
->bytes
));
266 assert(op
->bytes
< BDRV_REQUEST_MAX_BYTES
);
267 *op
->bytes_handled
= op
->bytes
;
270 *op
->bytes_handled
+= mirror_cow_align(s
, &op
->offset
, &op
->bytes
);
272 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
273 assert(*op
->bytes_handled
<= UINT_MAX
);
274 assert(op
->bytes
<= s
->buf_size
);
275 /* The offset is granularity-aligned because:
276 * 1) Caller passes in aligned values;
277 * 2) mirror_cow_align is used only when target cluster is larger. */
278 assert(QEMU_IS_ALIGNED(op
->offset
, s
->granularity
));
279 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
280 assert(QEMU_IS_ALIGNED(op
->bytes
, BDRV_SECTOR_SIZE
));
281 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
283 while (s
->buf_free_count
< nb_chunks
) {
284 trace_mirror_yield_in_flight(s
, op
->offset
, s
->in_flight
);
285 mirror_wait_for_io(s
);
288 /* Now make a QEMUIOVector taking enough granularity-sized chunks
291 qemu_iovec_init(&op
->qiov
, nb_chunks
);
292 while (nb_chunks
-- > 0) {
293 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
294 size_t remaining
= op
->bytes
- op
->qiov
.size
;
296 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
298 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
301 /* Copy the dirty cluster. */
303 s
->bytes_in_flight
+= op
->bytes
;
304 trace_mirror_one_iteration(s
, op
->offset
, op
->bytes
);
306 ret
= blk_co_preadv(source
, op
->offset
, op
->bytes
, &op
->qiov
, 0);
307 mirror_read_complete(op
, ret
);
310 static void coroutine_fn
mirror_co_zero(void *opaque
)
312 MirrorOp
*op
= opaque
;
316 op
->s
->bytes_in_flight
+= op
->bytes
;
317 *op
->bytes_handled
= op
->bytes
;
319 ret
= blk_co_pwrite_zeroes(op
->s
->target
, op
->offset
, op
->bytes
,
320 op
->s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0);
321 mirror_write_complete(op
, ret
);
324 static void coroutine_fn
mirror_co_discard(void *opaque
)
326 MirrorOp
*op
= opaque
;
330 op
->s
->bytes_in_flight
+= op
->bytes
;
331 *op
->bytes_handled
= op
->bytes
;
333 ret
= blk_co_pdiscard(op
->s
->target
, op
->offset
, op
->bytes
);
334 mirror_write_complete(op
, ret
);
337 static unsigned mirror_perform(MirrorBlockJob
*s
, int64_t offset
,
338 unsigned bytes
, MirrorMethod mirror_method
)
342 int64_t bytes_handled
= -1;
344 op
= g_new(MirrorOp
, 1);
349 .bytes_handled
= &bytes_handled
,
351 qemu_co_queue_init(&op
->waiting_requests
);
353 switch (mirror_method
) {
354 case MIRROR_METHOD_COPY
:
355 co
= qemu_coroutine_create(mirror_co_read
, op
);
357 case MIRROR_METHOD_ZERO
:
358 co
= qemu_coroutine_create(mirror_co_zero
, op
);
360 case MIRROR_METHOD_DISCARD
:
361 co
= qemu_coroutine_create(mirror_co_discard
, op
);
367 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, op
, next
);
368 qemu_coroutine_enter(co
);
369 /* At this point, ownership of op has been moved to the coroutine
370 * and the object may already be freed */
372 /* Assert that this value has been set */
373 assert(bytes_handled
>= 0);
375 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
376 * and mirror_co_discard(), bytes_handled == op->bytes, which
377 * is the @bytes parameter given to this function) */
378 assert(bytes_handled
<= UINT_MAX
);
379 return bytes_handled
;
382 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
384 BlockDriverState
*source
= s
->source
;
385 int64_t offset
, first_chunk
;
386 uint64_t delay_ns
= 0;
387 /* At least the first dirty chunk is mirrored in one iteration. */
389 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
390 int max_io_bytes
= MAX(s
->buf_size
/ MAX_IN_FLIGHT
, MAX_IO_BYTES
);
392 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
393 offset
= bdrv_dirty_iter_next(s
->dbi
);
395 bdrv_set_dirty_iter(s
->dbi
, 0);
396 offset
= bdrv_dirty_iter_next(s
->dbi
);
397 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
400 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
402 first_chunk
= offset
/ s
->granularity
;
403 while (test_bit(first_chunk
, s
->in_flight_bitmap
)) {
404 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
405 mirror_wait_for_io(s
);
408 job_pause_point(&s
->common
.job
);
410 /* Find the number of consective dirty chunks following the first dirty
411 * one, and wait for in flight requests in them. */
412 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
413 while (nb_chunks
* s
->granularity
< s
->buf_size
) {
415 int64_t next_offset
= offset
+ nb_chunks
* s
->granularity
;
416 int64_t next_chunk
= next_offset
/ s
->granularity
;
417 if (next_offset
>= s
->bdev_length
||
418 !bdrv_get_dirty_locked(source
, s
->dirty_bitmap
, next_offset
)) {
421 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
425 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
426 if (next_dirty
> next_offset
|| next_dirty
< 0) {
427 /* The bitmap iterator's cache is stale, refresh it */
428 bdrv_set_dirty_iter(s
->dbi
, next_offset
);
429 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
431 assert(next_dirty
== next_offset
);
435 /* Clear dirty bits before querying the block status, because
436 * calling bdrv_block_status_above could yield - if some blocks are
437 * marked dirty in this window, we need to know.
439 bdrv_reset_dirty_bitmap_locked(s
->dirty_bitmap
, offset
,
440 nb_chunks
* s
->granularity
);
441 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
443 bitmap_set(s
->in_flight_bitmap
, offset
/ s
->granularity
, nb_chunks
);
444 while (nb_chunks
> 0 && offset
< s
->bdev_length
) {
447 int64_t io_bytes_acct
;
448 MirrorMethod mirror_method
= MIRROR_METHOD_COPY
;
450 assert(!(offset
% s
->granularity
));
451 ret
= bdrv_block_status_above(source
, NULL
, offset
,
452 nb_chunks
* s
->granularity
,
453 &io_bytes
, NULL
, NULL
);
455 io_bytes
= MIN(nb_chunks
* s
->granularity
, max_io_bytes
);
456 } else if (ret
& BDRV_BLOCK_DATA
) {
457 io_bytes
= MIN(io_bytes
, max_io_bytes
);
460 io_bytes
-= io_bytes
% s
->granularity
;
461 if (io_bytes
< s
->granularity
) {
462 io_bytes
= s
->granularity
;
463 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
464 int64_t target_offset
;
465 int64_t target_bytes
;
466 bdrv_round_to_clusters(blk_bs(s
->target
), offset
, io_bytes
,
467 &target_offset
, &target_bytes
);
468 if (target_offset
== offset
&&
469 target_bytes
== io_bytes
) {
470 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
472 MIRROR_METHOD_DISCARD
;
476 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
477 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
478 mirror_wait_for_io(s
);
485 io_bytes
= mirror_clip_bytes(s
, offset
, io_bytes
);
486 io_bytes
= mirror_perform(s
, offset
, io_bytes
, mirror_method
);
487 if (mirror_method
!= MIRROR_METHOD_COPY
&& write_zeroes_ok
) {
490 io_bytes_acct
= io_bytes
;
494 nb_chunks
-= DIV_ROUND_UP(io_bytes
, s
->granularity
);
495 delay_ns
= block_job_ratelimit_get_delay(&s
->common
, io_bytes_acct
);
500 static void mirror_free_init(MirrorBlockJob
*s
)
502 int granularity
= s
->granularity
;
503 size_t buf_size
= s
->buf_size
;
504 uint8_t *buf
= s
->buf
;
506 assert(s
->buf_free_count
== 0);
507 QSIMPLEQ_INIT(&s
->buf_free
);
508 while (buf_size
!= 0) {
509 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
510 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
512 buf_size
-= granularity
;
517 /* This is also used for the .pause callback. There is no matching
518 * mirror_resume() because mirror_run() will begin iterating again
519 * when the job is resumed.
521 static void mirror_wait_for_all_io(MirrorBlockJob
*s
)
523 while (s
->in_flight
> 0) {
524 mirror_wait_for_io(s
);
532 static void mirror_exit(Job
*job
, void *opaque
)
534 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
535 BlockJob
*bjob
= &s
->common
;
536 MirrorExitData
*data
= opaque
;
537 AioContext
*replace_aio_context
= NULL
;
538 BlockDriverState
*src
= s
->source
;
539 BlockDriverState
*target_bs
= blk_bs(s
->target
);
540 BlockDriverState
*mirror_top_bs
= s
->mirror_top_bs
;
541 Error
*local_err
= NULL
;
543 bdrv_release_dirty_bitmap(src
, s
->dirty_bitmap
);
545 /* Make sure that the source BDS doesn't go away before we called
546 * job_completed(). */
548 bdrv_ref(mirror_top_bs
);
551 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
552 * inserting target_bs at s->to_replace, where we might not be able to get
555 * Note that blk_unref() alone doesn't necessarily drop permissions because
556 * we might be running nested inside mirror_drain(), which takes an extra
557 * reference, so use an explicit blk_set_perm() first. */
558 blk_set_perm(s
->target
, 0, BLK_PERM_ALL
, &error_abort
);
559 blk_unref(s
->target
);
562 /* We don't access the source any more. Dropping any WRITE/RESIZE is
563 * required before it could become a backing file of target_bs. */
564 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
566 if (s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
567 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
568 if (backing_bs(target_bs
) != backing
) {
569 bdrv_set_backing_hd(target_bs
, backing
, &local_err
);
571 error_report_err(local_err
);
578 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
579 aio_context_acquire(replace_aio_context
);
582 if (s
->should_complete
&& data
->ret
== 0) {
583 BlockDriverState
*to_replace
= src
;
585 to_replace
= s
->to_replace
;
588 if (bdrv_get_flags(target_bs
) != bdrv_get_flags(to_replace
)) {
589 bdrv_reopen(target_bs
, bdrv_get_flags(to_replace
), NULL
);
592 /* The mirror job has no requests in flight any more, but we need to
593 * drain potential other users of the BDS before changing the graph. */
594 bdrv_drained_begin(target_bs
);
595 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
596 bdrv_drained_end(target_bs
);
598 error_report_err(local_err
);
603 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
604 error_free(s
->replace_blocker
);
605 bdrv_unref(s
->to_replace
);
607 if (replace_aio_context
) {
608 aio_context_release(replace_aio_context
);
611 bdrv_unref(target_bs
);
613 /* Remove the mirror filter driver from the graph. Before this, get rid of
614 * the blockers on the intermediate nodes so that the resulting state is
615 * valid. Also give up permissions on mirror_top_bs->backing, which might
616 * block the removal. */
617 block_job_remove_all_bdrv(bjob
);
618 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
620 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
622 /* We just changed the BDS the job BB refers to (with either or both of the
623 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
624 * the right thing. We don't need any permissions any more now. */
625 blk_remove_bs(bjob
->blk
);
626 blk_set_perm(bjob
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
627 blk_insert_bs(bjob
->blk
, mirror_top_bs
, &error_abort
);
629 job_completed(job
, data
->ret
, NULL
);
632 bdrv_drained_end(src
);
633 bdrv_unref(mirror_top_bs
);
637 static void mirror_throttle(MirrorBlockJob
*s
)
639 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
641 if (now
- s
->last_pause_ns
> BLOCK_JOB_SLICE_TIME
) {
642 s
->last_pause_ns
= now
;
643 job_sleep_ns(&s
->common
.job
, 0);
645 job_pause_point(&s
->common
.job
);
649 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
652 BlockDriverState
*base
= s
->base
;
653 BlockDriverState
*bs
= s
->source
;
654 BlockDriverState
*target_bs
= blk_bs(s
->target
);
658 if (base
== NULL
&& !bdrv_has_zero_init(target_bs
)) {
659 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
660 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, s
->bdev_length
);
664 s
->initial_zeroing_ongoing
= true;
665 for (offset
= 0; offset
< s
->bdev_length
; ) {
666 int bytes
= MIN(s
->bdev_length
- offset
,
667 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
671 if (job_is_cancelled(&s
->common
.job
)) {
672 s
->initial_zeroing_ongoing
= false;
676 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
677 trace_mirror_yield(s
, UINT64_MAX
, s
->buf_free_count
,
679 mirror_wait_for_io(s
);
683 mirror_perform(s
, offset
, bytes
, MIRROR_METHOD_ZERO
);
687 mirror_wait_for_all_io(s
);
688 s
->initial_zeroing_ongoing
= false;
691 /* First part, loop on the sectors and initialize the dirty bitmap. */
692 for (offset
= 0; offset
< s
->bdev_length
; ) {
693 /* Just to make sure we are not exceeding int limit. */
694 int bytes
= MIN(s
->bdev_length
- offset
,
695 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
699 if (job_is_cancelled(&s
->common
.job
)) {
703 ret
= bdrv_is_allocated_above(bs
, base
, offset
, bytes
, &count
);
710 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, offset
, count
);
717 /* Called when going out of the streaming phase to flush the bulk of the
718 * data to the medium, or just before completing.
720 static int mirror_flush(MirrorBlockJob
*s
)
722 int ret
= blk_flush(s
->target
);
724 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
731 static void coroutine_fn
mirror_run(void *opaque
)
733 MirrorBlockJob
*s
= opaque
;
734 MirrorExitData
*data
;
735 BlockDriverState
*bs
= s
->source
;
736 BlockDriverState
*target_bs
= blk_bs(s
->target
);
737 bool need_drain
= true;
740 char backing_filename
[2]; /* we only need 2 characters because we are only
741 checking for a NULL string */
744 if (job_is_cancelled(&s
->common
.job
)) {
748 s
->bdev_length
= bdrv_getlength(bs
);
749 if (s
->bdev_length
< 0) {
750 ret
= s
->bdev_length
;
754 /* Active commit must resize the base image if its size differs from the
756 if (s
->base
== blk_bs(s
->target
)) {
759 base_length
= blk_getlength(s
->target
);
760 if (base_length
< 0) {
765 if (s
->bdev_length
> base_length
) {
766 ret
= blk_truncate(s
->target
, s
->bdev_length
, PREALLOC_MODE_OFF
,
774 if (s
->bdev_length
== 0) {
775 /* Transition to the READY state and wait for complete. */
776 job_transition_to_ready(&s
->common
.job
);
778 while (!job_is_cancelled(&s
->common
.job
) && !s
->should_complete
) {
779 job_yield(&s
->common
.job
);
781 s
->common
.job
.cancelled
= false;
785 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
786 s
->in_flight_bitmap
= bitmap_new(length
);
788 /* If we have no backing file yet in the destination, we cannot let
789 * the destination do COW. Instead, we copy sectors around the
790 * dirty data if needed. We need a bitmap to do that.
792 bdrv_get_backing_filename(target_bs
, backing_filename
,
793 sizeof(backing_filename
));
794 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
795 s
->target_cluster_size
= bdi
.cluster_size
;
797 s
->target_cluster_size
= BDRV_SECTOR_SIZE
;
799 if (backing_filename
[0] && !target_bs
->backing
&&
800 s
->granularity
< s
->target_cluster_size
) {
801 s
->buf_size
= MAX(s
->buf_size
, s
->target_cluster_size
);
802 s
->cow_bitmap
= bitmap_new(length
);
804 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
806 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
807 if (s
->buf
== NULL
) {
814 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
815 if (!s
->is_none_mode
) {
816 ret
= mirror_dirty_init(s
);
817 if (ret
< 0 || job_is_cancelled(&s
->common
.job
)) {
823 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
);
825 uint64_t delay_ns
= 0;
827 bool should_complete
;
834 job_pause_point(&s
->common
.job
);
836 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
837 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
838 * the number of bytes currently being processed; together those are
839 * the current remaining operation length */
840 job_progress_set_remaining(&s
->common
.job
, s
->bytes_in_flight
+ cnt
);
842 /* Note that even when no rate limit is applied we need to yield
843 * periodically with no pending I/O so that bdrv_drain_all() returns.
844 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
845 * an error, or when the source is clean, whichever comes first. */
846 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
847 if (delta
< BLOCK_JOB_SLICE_TIME
&&
848 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
849 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
850 (cnt
== 0 && s
->in_flight
> 0)) {
851 trace_mirror_yield(s
, cnt
, s
->buf_free_count
, s
->in_flight
);
852 mirror_wait_for_io(s
);
854 } else if (cnt
!= 0) {
855 delay_ns
= mirror_iteration(s
);
859 should_complete
= false;
860 if (s
->in_flight
== 0 && cnt
== 0) {
861 trace_mirror_before_flush(s
);
863 if (mirror_flush(s
) < 0) {
864 /* Go check s->ret. */
867 /* We're out of the streaming phase. From now on, if the job
868 * is cancelled we will actually complete all pending I/O and
869 * report completion. This way, block-job-cancel will leave
870 * the target in a consistent state.
872 job_transition_to_ready(&s
->common
.job
);
876 should_complete
= s
->should_complete
||
877 job_is_cancelled(&s
->common
.job
);
878 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
881 if (cnt
== 0 && should_complete
) {
882 /* The dirty bitmap is not updated while operations are pending.
883 * If we're about to exit, wait for pending operations before
884 * calling bdrv_get_dirty_count(bs), or we may exit while the
885 * source has dirty data to copy!
887 * Note that I/O can be submitted by the guest while
888 * mirror_populate runs, so pause it now. Before deciding
889 * whether to switch to target check one last time if I/O has
890 * come in the meanwhile, and if not flush the data to disk.
892 trace_mirror_before_drain(s
, cnt
);
894 bdrv_drained_begin(bs
);
895 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
896 if (cnt
> 0 || mirror_flush(s
) < 0) {
897 bdrv_drained_end(bs
);
901 /* The two disks are in sync. Exit and report successful
904 assert(QLIST_EMPTY(&bs
->tracked_requests
));
905 s
->common
.job
.cancelled
= false;
912 if (s
->synced
&& !should_complete
) {
913 delay_ns
= (s
->in_flight
== 0 &&
914 cnt
== 0 ? BLOCK_JOB_SLICE_TIME
: 0);
916 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
917 job_sleep_ns(&s
->common
.job
, delay_ns
);
918 if (job_is_cancelled(&s
->common
.job
) &&
919 (!s
->synced
|| s
->common
.job
.force_cancel
))
923 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
927 if (s
->in_flight
> 0) {
928 /* We get here only if something went wrong. Either the job failed,
929 * or it was cancelled prematurely so that we do not guarantee that
930 * the target is a copy of the source.
932 assert(ret
< 0 || ((s
->common
.job
.force_cancel
|| !s
->synced
) &&
933 job_is_cancelled(&s
->common
.job
)));
935 mirror_wait_for_all_io(s
);
938 assert(s
->in_flight
== 0);
940 g_free(s
->cow_bitmap
);
941 g_free(s
->in_flight_bitmap
);
942 bdrv_dirty_iter_free(s
->dbi
);
944 data
= g_malloc(sizeof(*data
));
948 bdrv_drained_begin(bs
);
950 job_defer_to_main_loop(&s
->common
.job
, mirror_exit
, data
);
953 static void mirror_complete(Job
*job
, Error
**errp
)
955 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
956 BlockDriverState
*target
;
958 target
= blk_bs(s
->target
);
961 error_setg(errp
, "The active block job '%s' cannot be completed",
966 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
969 assert(!target
->backing
);
970 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
976 /* block all operations on to_replace bs */
978 AioContext
*replace_aio_context
;
980 s
->to_replace
= bdrv_find_node(s
->replaces
);
981 if (!s
->to_replace
) {
982 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
986 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
987 aio_context_acquire(replace_aio_context
);
989 /* TODO Translate this into permission system. Current definition of
990 * GRAPH_MOD would require to request it for the parents; they might
991 * not even be BlockDriverStates, however, so a BdrvChild can't address
992 * them. May need redefinition of GRAPH_MOD. */
993 error_setg(&s
->replace_blocker
,
994 "block device is in use by block-job-complete");
995 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
996 bdrv_ref(s
->to_replace
);
998 aio_context_release(replace_aio_context
);
1001 s
->should_complete
= true;
1005 static void mirror_pause(Job
*job
)
1007 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1009 mirror_wait_for_all_io(s
);
1012 static bool mirror_drained_poll(BlockJob
*job
)
1014 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1015 return !!s
->in_flight
;
1018 static void mirror_attached_aio_context(BlockJob
*job
, AioContext
*new_context
)
1020 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1022 blk_set_aio_context(s
->target
, new_context
);
1025 static void mirror_drain(BlockJob
*job
)
1027 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1029 /* Need to keep a reference in case blk_drain triggers execution
1030 * of mirror_complete...
1033 BlockBackend
*target
= s
->target
;
1040 static const BlockJobDriver mirror_job_driver
= {
1042 .instance_size
= sizeof(MirrorBlockJob
),
1043 .job_type
= JOB_TYPE_MIRROR
,
1044 .free
= block_job_free
,
1045 .user_resume
= block_job_user_resume
,
1046 .drain
= block_job_drain
,
1047 .start
= mirror_run
,
1048 .pause
= mirror_pause
,
1049 .complete
= mirror_complete
,
1051 .drained_poll
= mirror_drained_poll
,
1052 .attached_aio_context
= mirror_attached_aio_context
,
1053 .drain
= mirror_drain
,
1056 static const BlockJobDriver commit_active_job_driver
= {
1058 .instance_size
= sizeof(MirrorBlockJob
),
1059 .job_type
= JOB_TYPE_COMMIT
,
1060 .free
= block_job_free
,
1061 .user_resume
= block_job_user_resume
,
1062 .drain
= block_job_drain
,
1063 .start
= mirror_run
,
1064 .pause
= mirror_pause
,
1065 .complete
= mirror_complete
,
1067 .drained_poll
= mirror_drained_poll
,
1068 .attached_aio_context
= mirror_attached_aio_context
,
1069 .drain
= mirror_drain
,
1072 static int coroutine_fn
bdrv_mirror_top_preadv(BlockDriverState
*bs
,
1073 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1075 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1078 static int coroutine_fn
bdrv_mirror_top_pwritev(BlockDriverState
*bs
,
1079 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1081 return bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1084 static int coroutine_fn
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1086 if (bs
->backing
== NULL
) {
1087 /* we can be here after failed bdrv_append in mirror_start_job */
1090 return bdrv_co_flush(bs
->backing
->bs
);
1093 static int coroutine_fn
bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
,
1094 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1096 return bdrv_co_pwrite_zeroes(bs
->backing
, offset
, bytes
, flags
);
1099 static int coroutine_fn
bdrv_mirror_top_pdiscard(BlockDriverState
*bs
,
1100 int64_t offset
, int bytes
)
1102 return bdrv_co_pdiscard(bs
->backing
->bs
, offset
, bytes
);
1105 static void bdrv_mirror_top_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1107 if (bs
->backing
== NULL
) {
1108 /* we can be here after failed bdrv_attach_child in
1109 * bdrv_set_backing_hd */
1112 bdrv_refresh_filename(bs
->backing
->bs
);
1113 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1114 bs
->backing
->bs
->filename
);
1117 static void bdrv_mirror_top_close(BlockDriverState
*bs
)
1121 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1122 const BdrvChildRole
*role
,
1123 BlockReopenQueue
*reopen_queue
,
1124 uint64_t perm
, uint64_t shared
,
1125 uint64_t *nperm
, uint64_t *nshared
)
1127 /* Must be able to forward guest writes to the real image */
1129 if (perm
& BLK_PERM_WRITE
) {
1130 *nperm
|= BLK_PERM_WRITE
;
1133 *nshared
= BLK_PERM_ALL
;
1136 /* Dummy node that provides consistent read to its users without requiring it
1137 * from its backing file and that allows writes on the backing file chain. */
1138 static BlockDriver bdrv_mirror_top
= {
1139 .format_name
= "mirror_top",
1140 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1141 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1142 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1143 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1144 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1145 .bdrv_co_block_status
= bdrv_co_block_status_from_backing
,
1146 .bdrv_refresh_filename
= bdrv_mirror_top_refresh_filename
,
1147 .bdrv_close
= bdrv_mirror_top_close
,
1148 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1151 static void mirror_start_job(const char *job_id
, BlockDriverState
*bs
,
1152 int creation_flags
, BlockDriverState
*target
,
1153 const char *replaces
, int64_t speed
,
1154 uint32_t granularity
, int64_t buf_size
,
1155 BlockMirrorBackingMode backing_mode
,
1156 BlockdevOnError on_source_error
,
1157 BlockdevOnError on_target_error
,
1159 BlockCompletionFunc
*cb
,
1161 const BlockJobDriver
*driver
,
1162 bool is_none_mode
, BlockDriverState
*base
,
1163 bool auto_complete
, const char *filter_node_name
,
1168 BlockDriverState
*mirror_top_bs
;
1169 bool target_graph_mod
;
1170 bool target_is_backing
;
1171 Error
*local_err
= NULL
;
1174 if (granularity
== 0) {
1175 granularity
= bdrv_get_default_bitmap_granularity(target
);
1178 assert(is_power_of_2(granularity
));
1181 error_setg(errp
, "Invalid parameter 'buf-size'");
1185 if (buf_size
== 0) {
1186 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1189 /* In the case of active commit, add dummy driver to provide consistent
1190 * reads on the top, while disabling it in the intermediate nodes, and make
1191 * the backing chain writable. */
1192 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1194 if (mirror_top_bs
== NULL
) {
1197 if (!filter_node_name
) {
1198 mirror_top_bs
->implicit
= true;
1200 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1201 mirror_top_bs
->supported_write_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1202 mirror_top_bs
->supported_zero_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1203 bdrv_set_aio_context(mirror_top_bs
, bdrv_get_aio_context(bs
));
1205 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1206 * it alive until block_job_create() succeeds even if bs has no parent. */
1207 bdrv_ref(mirror_top_bs
);
1208 bdrv_drained_begin(bs
);
1209 bdrv_append(mirror_top_bs
, bs
, &local_err
);
1210 bdrv_drained_end(bs
);
1213 bdrv_unref(mirror_top_bs
);
1214 error_propagate(errp
, local_err
);
1218 /* Make sure that the source is not resized while the job is running */
1219 s
= block_job_create(job_id
, driver
, NULL
, mirror_top_bs
,
1220 BLK_PERM_CONSISTENT_READ
,
1221 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1222 BLK_PERM_WRITE
| BLK_PERM_GRAPH_MOD
, speed
,
1223 creation_flags
, cb
, opaque
, errp
);
1227 /* The block job now has a reference to this node */
1228 bdrv_unref(mirror_top_bs
);
1231 s
->mirror_top_bs
= mirror_top_bs
;
1233 /* No resize for the target either; while the mirror is still running, a
1234 * consistent read isn't necessarily possible. We could possibly allow
1235 * writes and graph modifications, though it would likely defeat the
1236 * purpose of a mirror, so leave them blocked for now.
1238 * In the case of active commit, things look a bit different, though,
1239 * because the target is an already populated backing file in active use.
1240 * We can allow anything except resize there.*/
1241 target_is_backing
= bdrv_chain_contains(bs
, target
);
1242 target_graph_mod
= (backing_mode
!= MIRROR_LEAVE_BACKING_CHAIN
);
1243 s
->target
= blk_new(BLK_PERM_WRITE
| BLK_PERM_RESIZE
|
1244 (target_graph_mod
? BLK_PERM_GRAPH_MOD
: 0),
1245 BLK_PERM_WRITE_UNCHANGED
|
1246 (target_is_backing
? BLK_PERM_CONSISTENT_READ
|
1248 BLK_PERM_GRAPH_MOD
: 0));
1249 ret
= blk_insert_bs(s
->target
, target
, errp
);
1254 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1255 * of non-shared block migration. To allow migration completion, we
1256 * have to allow "inactivate" of the target BB. When that happens, we
1257 * know the job is drained, and the vcpus are stopped, so no write
1258 * operation will be performed. Block layer already has assertions to
1260 blk_set_force_allow_inactivate(s
->target
);
1263 s
->replaces
= g_strdup(replaces
);
1264 s
->on_source_error
= on_source_error
;
1265 s
->on_target_error
= on_target_error
;
1266 s
->is_none_mode
= is_none_mode
;
1267 s
->backing_mode
= backing_mode
;
1269 s
->granularity
= granularity
;
1270 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1272 if (auto_complete
) {
1273 s
->should_complete
= true;
1276 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
1277 if (!s
->dirty_bitmap
) {
1281 /* Required permissions are already taken with blk_new() */
1282 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1285 /* In commit_active_start() all intermediate nodes disappear, so
1286 * any jobs in them must be blocked */
1287 if (target_is_backing
) {
1288 BlockDriverState
*iter
;
1289 for (iter
= backing_bs(bs
); iter
!= target
; iter
= backing_bs(iter
)) {
1290 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1291 * ourselves at s->base (if writes are blocked for a node, they are
1292 * also blocked for its backing file). The other options would be a
1293 * second filter driver above s->base (== target). */
1294 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1295 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
,
1303 QTAILQ_INIT(&s
->ops_in_flight
);
1305 trace_mirror_start(bs
, s
, opaque
);
1306 job_start(&s
->common
.job
);
1311 /* Make sure this BDS does not go away until we have completed the graph
1313 bdrv_ref(mirror_top_bs
);
1315 g_free(s
->replaces
);
1316 blk_unref(s
->target
);
1317 job_early_fail(&s
->common
.job
);
1320 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
1322 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
1324 bdrv_unref(mirror_top_bs
);
1327 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
1328 BlockDriverState
*target
, const char *replaces
,
1329 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
1330 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
1331 BlockdevOnError on_source_error
,
1332 BlockdevOnError on_target_error
,
1333 bool unmap
, const char *filter_node_name
, Error
**errp
)
1336 BlockDriverState
*base
;
1338 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
1339 error_setg(errp
, "Sync mode 'incremental' not supported");
1342 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
1343 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
1344 mirror_start_job(job_id
, bs
, JOB_DEFAULT
, target
, replaces
,
1345 speed
, granularity
, buf_size
, backing_mode
,
1346 on_source_error
, on_target_error
, unmap
, NULL
, NULL
,
1347 &mirror_job_driver
, is_none_mode
, base
, false,
1348 filter_node_name
, true, errp
);
1351 void commit_active_start(const char *job_id
, BlockDriverState
*bs
,
1352 BlockDriverState
*base
, int creation_flags
,
1353 int64_t speed
, BlockdevOnError on_error
,
1354 const char *filter_node_name
,
1355 BlockCompletionFunc
*cb
, void *opaque
,
1356 bool auto_complete
, Error
**errp
)
1358 int orig_base_flags
;
1359 Error
*local_err
= NULL
;
1361 orig_base_flags
= bdrv_get_flags(base
);
1363 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
1367 mirror_start_job(job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
1368 MIRROR_LEAVE_BACKING_CHAIN
,
1369 on_error
, on_error
, true, cb
, opaque
,
1370 &commit_active_job_driver
, false, base
, auto_complete
,
1371 filter_node_name
, false, &local_err
);
1373 error_propagate(errp
, local_err
);
1374 goto error_restore_flags
;
1379 error_restore_flags
:
1380 /* ignore error and errp for bdrv_reopen, because we want to propagate
1381 * the original error */
1382 bdrv_reopen(base
, orig_base_flags
, NULL
);