#include "trace.h"
#include "block/blockjob_int.h"
#include "block/block_int.h"
+#include "block/dirty-bitmap.h"
#include "sysemu/block-backend.h"
#include "qapi/error.h"
-#include "qapi/qmp/qerror.h"
#include "qemu/ratelimit.h"
#include "qemu/bitmap.h"
#include "qemu/memalign.h"
BlockMirrorBackingMode backing_mode;
/* Whether the target image requires explicit zero-initialization */
bool zero_target;
+ /*
+ * To be accesssed with atomics. Written only under the BQL (required by the
+ * current implementation of mirror_change()).
+ */
MirrorCopyMode copy_mode;
BlockdevOnError on_source_error, on_target_error;
- /* Set when the target is synced (dirty bitmap is clean, nothing
- * in flight) and the job is running in active mode */
+ /*
+ * To be accessed with atomics.
+ *
+ * Set when the target is synced (dirty bitmap is clean, nothing in flight)
+ * and the job is running in active mode.
+ */
bool actively_synced;
bool should_complete;
int64_t granularity;
int max_iov;
bool initial_zeroing_ongoing;
int in_active_write_counter;
+ int64_t active_write_bytes_in_flight;
bool prepared;
bool in_drain;
} MirrorBlockJob;
static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
int error)
{
- s->actively_synced = false;
+ qatomic_set(&s->actively_synced, false);
if (read) {
return block_job_error_action(&s->common, s->on_source_error,
true, error);
/* Round offset and/or bytes to target cluster if COW is needed, and
* return the offset of the adjusted tail against original. */
-static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
- uint64_t *bytes)
+static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
+ uint64_t *bytes)
{
bool need_cow;
int ret = 0;
need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
s->cow_bitmap);
if (need_cow) {
- bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
- &align_offset, &align_bytes);
+ bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes,
+ &align_offset, &align_bytes);
}
if (align_bytes > max_bytes) {
}
static inline void coroutine_fn
-mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
+mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
{
MirrorOp *op;
QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
- /* Do not wait on pseudo ops, because it may in turn wait on
+ /*
+ * Do not wait on pseudo ops, because it may in turn wait on
* some other operation to start, which may in fact be the
* caller of this function. Since there is only one pseudo op
* at any given time, we will always find some real operation
- * to wait on. */
- if (!op->is_pseudo_op && op->is_in_flight &&
- op->is_active_write == active)
- {
+ * to wait on.
+ * Also, do not wait on active operations, because they do not
+ * use up in-flight slots.
+ */
+ if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) {
qemu_co_queue_wait(&op->waiting_requests, NULL);
return;
}
abort();
}
-static inline void coroutine_fn
-mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
-{
- /* Only non-active operations use up in-flight slots */
- mirror_wait_for_any_operation(s, false);
-}
-
/* Perform a mirror copy operation.
*
* *op->bytes_handled is set to the number of bytes copied after and
op->is_in_flight = true;
trace_mirror_one_iteration(s, op->offset, op->bytes);
- ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
- &op->qiov, 0);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
+ &op->qiov, 0);
+ }
mirror_read_complete(op, ret);
}
return bytes_handled;
}
-static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
+static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
{
BlockDriverState *source = s->mirror_top_bs->backing->bs;
MirrorOp *pseudo_op;
int64_t offset;
- uint64_t delay_ns = 0, ret = 0;
/* At least the first dirty chunk is mirrored in one iteration. */
int nb_chunks = 1;
bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
}
bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
+ /*
+ * Wait for concurrent requests to @offset. The next loop will limit the
+ * copied area based on in_flight_bitmap so we only copy an area that does
+ * not overlap with concurrent in-flight requests. Still, we would like to
+ * copy something, so wait until there are at least no more requests to the
+ * very beginning of the area.
+ */
mirror_wait_on_conflicts(NULL, s, offset, 1);
job_pause_point(&s->common.job);
- /* Find the number of consective dirty chunks following the first dirty
+ /* Find the number of consecutive dirty chunks following the first dirty
* one, and wait for in flight requests in them. */
bdrv_dirty_bitmap_lock(s->dirty_bitmap);
while (nb_chunks * s->granularity < s->buf_size) {
MirrorMethod mirror_method = MIRROR_METHOD_COPY;
assert(!(offset % s->granularity));
- ret = bdrv_block_status_above(source, NULL, offset,
- nb_chunks * s->granularity,
- &io_bytes, NULL, NULL);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ ret = bdrv_co_block_status_above(source, NULL, offset,
+ nb_chunks * s->granularity,
+ &io_bytes, NULL, NULL);
+ }
if (ret < 0) {
io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
} else if (ret & BDRV_BLOCK_DATA) {
} else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
int64_t target_offset;
int64_t target_bytes;
- bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
- &target_offset, &target_bytes);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes,
+ &target_offset, &target_bytes);
+ }
if (target_offset == offset &&
target_bytes == io_bytes) {
mirror_method = ret & BDRV_BLOCK_ZERO ?
assert(io_bytes);
offset += io_bytes;
nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
- delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
+ block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct);
}
- ret = delay_ns;
fail:
QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
g_free(pseudo_op);
-
- return ret;
}
static void mirror_free_init(MirrorBlockJob *s)
bool abort = job->ret < 0;
int ret = 0;
+ GLOBAL_STATE_CODE();
+
if (s->prepared) {
return 0;
}
s->prepared = true;
+ aio_context_acquire(qemu_get_aio_context());
+
mirror_top_bs = s->mirror_top_bs;
bs_opaque = mirror_top_bs->opaque;
src = mirror_top_bs->backing->bs;
* mirror_top_bs from now on, so keep it drained. */
bdrv_drained_begin(mirror_top_bs);
bs_opaque->stop = true;
+
+ bdrv_graph_rdlock_main_loop();
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
&error_abort);
+ bdrv_graph_rdunlock_main_loop();
+
if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
BlockDriverState *backing = s->is_none_mode ? src : s->base;
BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
* Cannot use check_to_replace_node() here, because that would
* check for an op blocker on @to_replace, and we have our own
* there.
+ *
+ * TODO Pull out the writer lock from bdrv_replace_node() to here
*/
+ bdrv_graph_rdlock_main_loop();
if (bdrv_recurse_can_replace(src, to_replace)) {
bdrv_replace_node(to_replace, target_bs, &local_err);
} else {
"would not lead to an abrupt change of visible data",
to_replace->node_name, target_bs->node_name);
}
+ bdrv_graph_rdunlock_main_loop();
bdrv_drained_end(target_bs);
if (local_err) {
error_report_err(local_err);
bdrv_unref(mirror_top_bs);
bdrv_unref(src);
+ aio_context_release(qemu_get_aio_context());
+
return ret;
}
return 0;
}
- ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
- &count);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
+ bytes, &count);
+ }
if (ret < 0) {
return ret;
}
/* Called when going out of the streaming phase to flush the bulk of the
* data to the medium, or just before completing.
*/
-static int mirror_flush(MirrorBlockJob *s)
+static int coroutine_fn mirror_flush(MirrorBlockJob *s)
{
- int ret = blk_flush(s->target);
+ int ret = blk_co_flush(s->target);
if (ret < 0) {
if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
s->ret = ret;
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
+ MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true;
BlockDeviceIoStatus iostatus;
goto immediate_exit;
}
- s->bdev_length = bdrv_getlength(bs);
+ bdrv_graph_co_rdlock();
+ s->bdev_length = bdrv_co_getlength(bs);
+ bdrv_graph_co_rdunlock();
+
if (s->bdev_length < 0) {
ret = s->bdev_length;
goto immediate_exit;
}
- target_length = blk_getlength(s->target);
+ target_length = blk_co_getlength(s->target);
if (target_length < 0) {
ret = target_length;
goto immediate_exit;
if (s->bdev_length == 0) {
/* Transition to the READY state and wait for complete. */
job_transition_to_ready(&s->common.job);
- s->actively_synced = true;
+ qatomic_set(&s->actively_synced, true);
while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
job_yield(&s->common.job);
}
*/
bdrv_get_backing_filename(target_bs, backing_filename,
sizeof(backing_filename));
- if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
+ bdrv_graph_co_rdlock();
+ if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) {
s->target_cluster_size = bdi.cluster_size;
} else {
s->target_cluster_size = BDRV_SECTOR_SIZE;
}
+ bdrv_graph_co_rdunlock();
if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
s->granularity < s->target_cluster_size) {
s->buf_size = MAX(s->buf_size, s->target_cluster_size);
}
}
+ /*
+ * Only now the job is fully initialised and mirror_top_bs should start
+ * accessing it.
+ */
+ mirror_top_opaque->job = s;
+
assert(!s->dbi);
s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
for (;;) {
- uint64_t delay_ns = 0;
int64_t cnt, delta;
bool should_complete;
- /* Do not start passive operations while there are active
- * writes in progress */
- while (s->in_active_write_counter) {
- mirror_wait_for_any_operation(s, true);
- }
-
if (s->ret < 0) {
ret = s->ret;
goto immediate_exit;
/* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
* the number of bytes currently being processed; together those are
* the current remaining operation length */
- job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
+ job_progress_set_remaining(&s->common.job,
+ s->bytes_in_flight + cnt +
+ s->active_write_bytes_in_flight);
/* Note that even when no rate limit is applied we need to yield
* periodically with no pending I/O so that bdrv_drain_all() returns.
mirror_wait_for_free_in_flight_slot(s);
continue;
} else if (cnt != 0) {
- delay_ns = mirror_iteration(s);
+ mirror_iteration(s);
}
}
* the target in a consistent state.
*/
job_transition_to_ready(&s->common.job);
- if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
- s->actively_synced = true;
- }
+ }
+ if (qatomic_read(&s->copy_mode) != MIRROR_COPY_MODE_BACKGROUND) {
+ qatomic_set(&s->actively_synced, true);
}
should_complete = s->should_complete ||
s->in_drain = true;
bdrv_drained_begin(bs);
+
+ /* Must be zero because we are drained */
+ assert(s->in_active_write_counter == 0);
+
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
if (cnt > 0 || mirror_flush(s) < 0) {
bdrv_drained_end(bs);
}
if (job_is_ready(&s->common.job) && !should_complete) {
- delay_ns = (s->in_flight == 0 &&
- cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
+ if (s->in_flight == 0 && cnt == 0) {
+ trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
+ BLOCK_JOB_SLICE_TIME);
+ job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME);
+ }
+ } else {
+ block_job_ratelimit_sleep(&s->common);
}
- trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
- delay_ns);
- job_sleep_ns(&s->common.job, delay_ns);
s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
}
return force || !job_is_ready(job);
}
+static void mirror_change(BlockJob *job, BlockJobChangeOptions *opts,
+ Error **errp)
+{
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+ BlockJobChangeOptionsMirror *change_opts = &opts->u.mirror;
+ MirrorCopyMode current;
+
+ /*
+ * The implementation relies on the fact that copy_mode is only written
+ * under the BQL. Otherwise, further synchronization would be required.
+ */
+
+ GLOBAL_STATE_CODE();
+
+ if (qatomic_read(&s->copy_mode) == change_opts->copy_mode) {
+ return;
+ }
+
+ if (change_opts->copy_mode != MIRROR_COPY_MODE_WRITE_BLOCKING) {
+ error_setg(errp, "Change to copy mode '%s' is not implemented",
+ MirrorCopyMode_str(change_opts->copy_mode));
+ return;
+ }
+
+ current = qatomic_cmpxchg(&s->copy_mode, MIRROR_COPY_MODE_BACKGROUND,
+ change_opts->copy_mode);
+ if (current != MIRROR_COPY_MODE_BACKGROUND) {
+ error_setg(errp, "Expected current copy mode '%s', got '%s'",
+ MirrorCopyMode_str(MIRROR_COPY_MODE_BACKGROUND),
+ MirrorCopyMode_str(current));
+ }
+}
+
+static void mirror_query(BlockJob *job, BlockJobInfo *info)
+{
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+
+ info->u.mirror = (BlockJobInfoMirror) {
+ .actively_synced = qatomic_read(&s->actively_synced),
+ };
+}
+
static const BlockJobDriver mirror_job_driver = {
.job_driver = {
.instance_size = sizeof(MirrorBlockJob),
.cancel = mirror_cancel,
},
.drained_poll = mirror_drained_poll,
+ .change = mirror_change,
+ .query = mirror_query,
};
static const BlockJobDriver commit_active_job_driver = {
}
job_progress_increase_remaining(&job->common.job, bytes);
+ job->active_write_bytes_in_flight += bytes;
switch (method) {
case MIRROR_METHOD_COPY:
abort();
}
+ job->active_write_bytes_in_flight -= bytes;
if (ret >= 0) {
job_progress_update(&job->common.job, bytes);
} else {
bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
bitmap_end - bitmap_offset);
- job->actively_synced = false;
+ qatomic_set(&job->actively_synced, false);
action = mirror_error_action(job, false, -ret);
if (action == BLOCK_ERROR_ACTION_REPORT) {
s->in_active_write_counter++;
+ /*
+ * Wait for concurrent requests affecting the area. If there are already
+ * running requests that are copying off now-to-be stale data in the area,
+ * we must wait for them to finish before we begin writing fresh data to the
+ * target so that the write operations appear in the correct order.
+ * Note that background requests (see mirror_iteration()) in contrast only
+ * wait for conflicting requests at the start of the dirty area, and then
+ * (based on the in_flight_bitmap) truncate the area to copy so it will not
+ * conflict with any requests beyond that. For active writes, however, we
+ * cannot truncate that area. The request from our parent must be blocked
+ * until the area is copied in full. Therefore, we must wait for the whole
+ * area to become free of concurrent requests.
+ */
mirror_wait_on_conflicts(op, s, offset, bytes);
bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
return op;
}
-static void coroutine_fn active_write_settle(MirrorOp *op)
+static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op)
{
uint64_t start_chunk = op->offset / op->s->granularity;
uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
op->s->granularity);
- if (!--op->s->in_active_write_counter && op->s->actively_synced) {
+ if (!--op->s->in_active_write_counter &&
+ qatomic_read(&op->s->actively_synced)) {
BdrvChild *source = op->s->mirror_top_bs->backing;
if (QLIST_FIRST(&source->bs->parents) == source &&
g_free(op);
}
-static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
- int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
+static int coroutine_fn GRAPH_RDLOCK
+bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
}
-static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
- MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
- int flags)
+static bool should_copy_to_target(MirrorBDSOpaque *s)
+{
+ return s->job && s->job->ret >= 0 &&
+ !job_is_cancelled(&s->job->common.job) &&
+ qatomic_read(&s->job->copy_mode) == MIRROR_COPY_MODE_WRITE_BLOCKING;
+}
+
+static int coroutine_fn GRAPH_RDLOCK
+bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
+ bool copy_to_target, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
MirrorOp *op = NULL;
MirrorBDSOpaque *s = bs->opaque;
int ret = 0;
- bool copy_to_target;
-
- copy_to_target = s->job->ret >= 0 &&
- !job_is_cancelled(&s->job->common.job) &&
- s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
if (copy_to_target) {
op = active_write_prepare(s->job, offset, bytes);
abort();
}
+ if (!copy_to_target && s->job && s->job->dirty_bitmap) {
+ qatomic_set(&s->job->actively_synced, false);
+ bdrv_set_dirty_bitmap(s->job->dirty_bitmap, offset, bytes);
+ }
+
if (ret < 0) {
goto out;
}
return ret;
}
-static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
- int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
+static int coroutine_fn GRAPH_RDLOCK
+bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, BdrvRequestFlags flags)
{
- MirrorBDSOpaque *s = bs->opaque;
QEMUIOVector bounce_qiov;
void *bounce_buf;
int ret = 0;
- bool copy_to_target;
-
- copy_to_target = s->job->ret >= 0 &&
- !job_is_cancelled(&s->job->common.job) &&
- s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
+ bool copy_to_target = should_copy_to_target(bs->opaque);
if (copy_to_target) {
/* The guest might concurrently modify the data to write; but
flags &= ~BDRV_REQ_REGISTERED_BUF;
}
- ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
- flags);
+ ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, copy_to_target,
+ offset, bytes, qiov, flags);
if (copy_to_target) {
qemu_iovec_destroy(&bounce_qiov);
return ret;
}
-static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
+static int coroutine_fn GRAPH_RDLOCK bdrv_mirror_top_flush(BlockDriverState *bs)
{
if (bs->backing == NULL) {
/* we can be here after failed bdrv_append in mirror_start_job */
return bdrv_co_flush(bs->backing->bs);
}
-static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
- int64_t offset, int64_t bytes, BdrvRequestFlags flags)
+static int coroutine_fn GRAPH_RDLOCK
+bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
+ int64_t bytes, BdrvRequestFlags flags)
{
- return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
- flags);
+ bool copy_to_target = should_copy_to_target(bs->opaque);
+ return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, copy_to_target,
+ offset, bytes, NULL, flags);
}
-static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
- int64_t offset, int64_t bytes)
+static int coroutine_fn GRAPH_RDLOCK
+bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
- return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
- NULL, 0);
+ bool copy_to_target = should_copy_to_target(bs->opaque);
+ return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, copy_to_target,
+ offset, bytes, NULL, 0);
}
static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
uint64_t target_perms, target_shared_perms;
int ret;
+ GLOBAL_STATE_CODE();
+
if (granularity == 0) {
granularity = bdrv_get_default_bitmap_granularity(target);
}
if (!s) {
goto fail;
}
- bs_opaque->job = s;
/* The block job now has a reference to this node */
bdrv_unref(mirror_top_bs);
s->is_none_mode = is_none_mode;
s->backing_mode = backing_mode;
s->zero_target = zero_target;
- s->copy_mode = copy_mode;
+ qatomic_set(&s->copy_mode, copy_mode);
s->base = base;
s->base_overlay = bdrv_find_overlay(bs, base);
s->granularity = granularity;
s->should_complete = true;
}
- s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
+ s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity,
+ NULL, errp);
if (!s->dirty_bitmap) {
goto fail;
}
- if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
- bdrv_disable_dirty_bitmap(s->dirty_bitmap);
- }
+
+ /*
+ * The dirty bitmap is set by bdrv_mirror_top_do_write() when not in active
+ * mode.
+ */
+ bdrv_disable_dirty_bitmap(s->dirty_bitmap);
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
}
bs_opaque->stop = true;
+ bdrv_graph_rdlock_main_loop();
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
&error_abort);
+ bdrv_graph_rdunlock_main_loop();
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
bdrv_unref(mirror_top_bs);