Error *replace_blocker;
bool is_none_mode;
BlockMirrorBackingMode backing_mode;
+ /* Whether the target image requires explicit zero-initialization */
+ bool zero_target;
MirrorCopyMode copy_mode;
BlockdevOnError on_source_error, on_target_error;
bool synced;
typedef struct MirrorBDSOpaque {
MirrorBlockJob *job;
+ bool stop;
} MirrorBDSOpaque;
struct MirrorOp {
bool is_pseudo_op;
bool is_active_write;
+ bool is_in_flight;
CoQueue waiting_requests;
+ Coroutine *co;
QTAILQ_ENTRY(MirrorOp) next;
};
* caller of this function. Since there is only one pseudo op
* at any given time, we will always find some real operation
* to wait on. */
- if (!op->is_pseudo_op && op->is_active_write == active) {
+ if (!op->is_pseudo_op && op->is_in_flight &&
+ op->is_active_write == active)
+ {
qemu_co_queue_wait(&op->waiting_requests, NULL);
return;
}
/* Copy the dirty cluster. */
s->in_flight++;
s->bytes_in_flight += op->bytes;
+ op->is_in_flight = true;
trace_mirror_one_iteration(s, op->offset, op->bytes);
ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
op->s->in_flight++;
op->s->bytes_in_flight += op->bytes;
*op->bytes_handled = op->bytes;
+ op->is_in_flight = true;
ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
op->s->in_flight++;
op->s->bytes_in_flight += op->bytes;
*op->bytes_handled = op->bytes;
+ op->is_in_flight = true;
ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
mirror_write_complete(op, ret);
default:
abort();
}
+ op->co = co;
QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
qemu_coroutine_enter(co);
int64_t next_offset = offset + nb_chunks * s->granularity;
int64_t next_chunk = next_offset / s->granularity;
if (next_offset >= s->bdev_length ||
- !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) {
+ !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
break;
}
if (test_bit(next_chunk, s->in_flight_bitmap)) {
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockJob *bjob = &s->common;
- MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
+ MirrorBDSOpaque *bs_opaque;
AioContext *replace_aio_context = NULL;
- BlockDriverState *src = s->mirror_top_bs->backing->bs;
- BlockDriverState *target_bs = blk_bs(s->target);
- BlockDriverState *mirror_top_bs = s->mirror_top_bs;
+ BlockDriverState *src;
+ BlockDriverState *target_bs;
+ BlockDriverState *mirror_top_bs;
Error *local_err = NULL;
bool abort = job->ret < 0;
int ret = 0;
}
s->prepared = true;
+ mirror_top_bs = s->mirror_top_bs;
+ bs_opaque = mirror_top_bs->opaque;
+ src = mirror_top_bs->backing->bs;
+ target_bs = blk_bs(s->target);
+
if (bdrv_chain_contains(src, target_bs)) {
bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
}
- bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
+ bdrv_release_dirty_bitmap(s->dirty_bitmap);
/* Make sure that the source BDS doesn't go away during bdrv_replace_node,
* before we can call bdrv_drained_end */
bdrv_ref(mirror_top_bs);
bdrv_ref(target_bs);
- /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
+ /*
+ * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
* inserting target_bs at s->to_replace, where we might not be able to get
* these permissions.
- *
- * Note that blk_unref() alone doesn't necessarily drop permissions because
- * we might be running nested inside mirror_drain(), which takes an extra
- * reference, so use an explicit blk_set_perm() first. */
- blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
+ */
blk_unref(s->target);
s->target = NULL;
/* We don't access the source any more. Dropping any WRITE/RESIZE is
- * required before it could become a backing file of target_bs. */
- bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
+ * required before it could become a backing file of target_bs. Not having
+ * these permissions any more means that we can't allow any new requests on
+ * mirror_top_bs from now on, so keep it drained. */
+ bdrv_drained_begin(mirror_top_bs);
+ bs_opaque->stop = true;
+ bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
+ &error_abort);
if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
BlockDriverState *backing = s->is_none_mode ? src : s->base;
if (backing_bs(target_bs) != backing) {
bdrv_set_backing_hd(target_bs, backing, &local_err);
if (local_err) {
error_report_err(local_err);
+ local_err = NULL;
ret = -EPERM;
}
}
* drain potential other users of the BDS before changing the graph. */
assert(s->in_drain);
bdrv_drained_begin(target_bs);
- bdrv_replace_node(to_replace, target_bs, &local_err);
+ /*
+ * Cannot use check_to_replace_node() here, because that would
+ * check for an op blocker on @to_replace, and we have our own
+ * there.
+ */
+ if (bdrv_recurse_can_replace(src, to_replace)) {
+ bdrv_replace_node(to_replace, target_bs, &local_err);
+ } else {
+ error_setg(&local_err, "Can no longer replace '%s' by '%s', "
+ "because it can no longer be guaranteed that doing so "
+ "would not lead to an abrupt change of visible data",
+ to_replace->node_name, target_bs->node_name);
+ }
bdrv_drained_end(target_bs);
if (local_err) {
error_report_err(local_err);
g_free(s->replaces);
bdrv_unref(target_bs);
- /* Remove the mirror filter driver from the graph. Before this, get rid of
+ /*
+ * Remove the mirror filter driver from the graph. Before this, get rid of
* the blockers on the intermediate nodes so that the resulting state is
- * valid. Also give up permissions on mirror_top_bs->backing, which might
- * block the removal. */
+ * valid.
+ */
block_job_remove_all_bdrv(bjob);
- bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
/* We just changed the BDS the job BB refers to (with either or both of the
bs_opaque->job = NULL;
bdrv_drained_end(src);
+ bdrv_drained_end(mirror_top_bs);
s->in_drain = false;
bdrv_unref(mirror_top_bs);
bdrv_unref(src);
int ret;
int64_t count;
- if (base == NULL && !bdrv_has_zero_init(target_bs)) {
+ if (s->zero_target) {
if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
return 0;
return 0;
}
- ret = bdrv_is_allocated_above(bs, base, offset, bytes, &count);
+ ret = bdrv_is_allocated_above(bs, base, false, offset, bytes, &count);
if (ret < 0) {
return ret;
}
BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true;
int64_t length;
+ int64_t target_length;
BlockDriverInfo bdi;
char backing_filename[2]; /* we only need 2 characters because we are only
checking for a NULL string */
goto immediate_exit;
}
+ target_length = blk_getlength(s->target);
+ if (target_length < 0) {
+ ret = target_length;
+ goto immediate_exit;
+ }
+
/* Active commit must resize the base image if its size differs from the
* active layer. */
if (s->base == blk_bs(s->target)) {
- int64_t base_length;
-
- base_length = blk_getlength(s->target);
- if (base_length < 0) {
- ret = base_length;
- goto immediate_exit;
- }
-
- if (s->bdev_length > base_length) {
- ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
- NULL);
+ if (s->bdev_length > target_length) {
+ ret = blk_truncate(s->target, s->bdev_length, false,
+ PREALLOC_MODE_OFF, 0, NULL);
if (ret < 0) {
goto immediate_exit;
}
}
+ } else if (s->bdev_length != target_length) {
+ error_setg(errp, "Source and target image have different sizes");
+ ret = -EINVAL;
+ goto immediate_exit;
}
if (s->bdev_length == 0) {
return !!s->in_flight;
}
-static void mirror_drain(BlockJob *job)
-{
- MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
-
- /* Need to keep a reference in case blk_drain triggers execution
- * of mirror_complete...
- */
- if (s->target) {
- BlockBackend *target = s->target;
- blk_ref(target);
- blk_drain(target);
- blk_unref(target);
- }
-}
-
static const BlockJobDriver mirror_job_driver = {
.job_driver = {
.instance_size = sizeof(MirrorBlockJob),
.job_type = JOB_TYPE_MIRROR,
.free = block_job_free,
.user_resume = block_job_user_resume,
- .drain = block_job_drain,
.run = mirror_run,
.prepare = mirror_prepare,
.abort = mirror_abort,
.complete = mirror_complete,
},
.drained_poll = mirror_drained_poll,
- .drain = mirror_drain,
};
static const BlockJobDriver commit_active_job_driver = {
.job_type = JOB_TYPE_COMMIT,
.free = block_job_free,
.user_resume = block_job_user_resume,
- .drain = block_job_drain,
.run = mirror_run,
.prepare = mirror_prepare,
.abort = mirror_abort,
.complete = mirror_complete,
},
.drained_poll = mirror_drained_poll,
- .drain = mirror_drain,
};
static void coroutine_fn
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
- QEMUIOVector target_qiov;
- uint64_t dirty_offset = offset;
- uint64_t dirty_bytes;
+ int ret;
+ size_t qiov_offset = 0;
+ int64_t bitmap_offset, bitmap_end;
- if (qiov) {
- qemu_iovec_init(&target_qiov, qiov->niov);
+ if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
+ bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
+ {
+ /*
+ * Dirty unaligned padding: ignore it.
+ *
+ * Reasoning:
+ * 1. If we copy it, we can't reset corresponding bit in
+ * dirty_bitmap as there may be some "dirty" bytes still not
+ * copied.
+ * 2. It's already dirty, so skipping it we don't diverge mirror
+ * progress.
+ *
+ * Note, that because of this, guest write may have no contribution
+ * into mirror converge, but that's not bad, as we have background
+ * process of mirroring. If under some bad circumstances (high guest
+ * IO load) background process starve, we will not converge anyway,
+ * even if each write will contribute, as guest is not guaranteed to
+ * rewrite the whole disk.
+ */
+ qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
+ if (bytes <= qiov_offset) {
+ /* nothing to do after shrink */
+ return;
+ }
+ offset += qiov_offset;
+ bytes -= qiov_offset;
}
- while (true) {
- bool valid_area;
- int ret;
+ if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
+ bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
+ {
+ uint64_t tail = (offset + bytes) % job->granularity;
- bdrv_dirty_bitmap_lock(job->dirty_bitmap);
- dirty_bytes = MIN(offset + bytes - dirty_offset, INT_MAX);
- valid_area = bdrv_dirty_bitmap_next_dirty_area(job->dirty_bitmap,
- &dirty_offset,
- &dirty_bytes);
- if (!valid_area) {
- bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
- break;
+ if (bytes <= tail) {
+ /* nothing to do after shrink */
+ return;
}
+ bytes -= tail;
+ }
- bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap,
- dirty_offset, dirty_bytes);
- bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
-
- job_progress_increase_remaining(&job->common.job, dirty_bytes);
+ /*
+ * Tails are either clean or shrunk, so for bitmap resetting
+ * we safely align the range down.
+ */
+ bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
+ bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
+ if (bitmap_offset < bitmap_end) {
+ bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
+ bitmap_end - bitmap_offset);
+ }
- assert(dirty_offset - offset <= SIZE_MAX);
- if (qiov) {
- qemu_iovec_reset(&target_qiov);
- qemu_iovec_concat(&target_qiov, qiov,
- dirty_offset - offset, dirty_bytes);
- }
+ job_progress_increase_remaining(&job->common.job, bytes);
- switch (method) {
- case MIRROR_METHOD_COPY:
- ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes,
- qiov ? &target_qiov : NULL, flags);
- break;
-
- case MIRROR_METHOD_ZERO:
- assert(!qiov);
- ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes,
- flags);
- break;
+ switch (method) {
+ case MIRROR_METHOD_COPY:
+ ret = blk_co_pwritev_part(job->target, offset, bytes,
+ qiov, qiov_offset, flags);
+ break;
- case MIRROR_METHOD_DISCARD:
- assert(!qiov);
- ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes);
- break;
+ case MIRROR_METHOD_ZERO:
+ assert(!qiov);
+ ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
+ break;
- default:
- abort();
- }
+ case MIRROR_METHOD_DISCARD:
+ assert(!qiov);
+ ret = blk_co_pdiscard(job->target, offset, bytes);
+ break;
- if (ret >= 0) {
- job_progress_update(&job->common.job, dirty_bytes);
- } else {
- BlockErrorAction action;
+ default:
+ abort();
+ }
- bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes);
- job->actively_synced = false;
+ if (ret >= 0) {
+ job_progress_update(&job->common.job, bytes);
+ } else {
+ BlockErrorAction action;
- action = mirror_error_action(job, false, -ret);
- if (action == BLOCK_ERROR_ACTION_REPORT) {
- if (!job->ret) {
- job->ret = ret;
- }
- break;
+ /*
+ * We failed, so we should mark dirty the whole area, aligned up.
+ * Note that we don't care about shrunk tails if any: they were dirty
+ * at function start, and they must be still dirty, as we've locked
+ * the region for in-flight op.
+ */
+ bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
+ bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
+ bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
+ bitmap_end - bitmap_offset);
+ job->actively_synced = false;
+
+ action = mirror_error_action(job, false, -ret);
+ if (action == BLOCK_ERROR_ACTION_REPORT) {
+ if (!job->ret) {
+ job->ret = ret;
}
}
-
- dirty_offset += dirty_bytes;
- }
-
- if (qiov) {
- qemu_iovec_destroy(&target_qiov);
}
}
.offset = offset,
.bytes = bytes,
.is_active_write = true,
+ .is_in_flight = true,
};
qemu_co_queue_init(&op->waiting_requests);
QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
}
static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
- const BdrvChildRole *role,
+ BdrvChildRole role,
BlockReopenQueue *reopen_queue,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
+ MirrorBDSOpaque *s = bs->opaque;
+
+ if (s->stop) {
+ /*
+ * If the job is to be stopped, we do not need to forward
+ * anything to the real image.
+ */
+ *nperm = 0;
+ *nshared = BLK_PERM_ALL;
+ return;
+ }
+
/* Must be able to forward guest writes to the real image */
*nperm = 0;
if (perm & BLK_PERM_WRITE) {
.bdrv_co_block_status = bdrv_co_block_status_from_backing,
.bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
.bdrv_child_perm = bdrv_mirror_top_child_perm,
+
+ .is_filter = true,
};
-static void mirror_start_job(const char *job_id, BlockDriverState *bs,
+static BlockJob *mirror_start_job(
+ const char *job_id, BlockDriverState *bs,
int creation_flags, BlockDriverState *target,
const char *replaces, int64_t speed,
uint32_t granularity, int64_t buf_size,
BlockMirrorBackingMode backing_mode,
+ bool zero_target,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
bool unmap,
if (buf_size < 0) {
error_setg(errp, "Invalid parameter 'buf-size'");
- return;
+ return NULL;
}
if (buf_size == 0) {
if (bs == target) {
error_setg(errp, "Can't mirror node into itself");
- return;
+ return NULL;
}
/* In the case of active commit, add dummy driver to provide consistent
mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
BDRV_O_RDWR, errp);
if (mirror_top_bs == NULL) {
- return;
+ return NULL;
}
if (!filter_node_name) {
mirror_top_bs->implicit = true;
}
+
+ /* So that we can always drop this node */
+ mirror_top_bs->never_freeze = true;
+
mirror_top_bs->total_sectors = bs->total_sectors;
mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
if (local_err) {
bdrv_unref(mirror_top_bs);
error_propagate(errp, local_err);
- return;
+ return NULL;
}
/* Make sure that the source is not resized while the job is running */
blk_set_force_allow_inactivate(s->target);
}
blk_set_allow_aio_context_change(s->target, true);
+ blk_set_disable_request_queuing(s->target, true);
s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;
s->on_target_error = on_target_error;
s->is_none_mode = is_none_mode;
s->backing_mode = backing_mode;
+ s->zero_target = zero_target;
s->copy_mode = copy_mode;
s->base = base;
s->granularity = granularity;
if (!s->dirty_bitmap) {
goto fail;
}
+ if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
+ bdrv_disable_dirty_bitmap(s->dirty_bitmap);
+ }
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
trace_mirror_start(bs, s, opaque);
job_start(&s->common.job);
- return;
+
+ return &s->common;
fail:
if (s) {
blk_unref(s->target);
bs_opaque->job = NULL;
if (s->dirty_bitmap) {
- bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
+ bdrv_release_dirty_bitmap(s->dirty_bitmap);
}
job_early_fail(&s->common.job);
}
- bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
+ bs_opaque->stop = true;
+ bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
+ &error_abort);
bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
bdrv_unref(mirror_top_bs);
+
+ return NULL;
}
void mirror_start(const char *job_id, BlockDriverState *bs,
int creation_flags, int64_t speed,
uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
+ bool zero_target,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
bool unmap, const char *filter_node_name,
bool is_none_mode;
BlockDriverState *base;
- if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
- error_setg(errp, "Sync mode 'incremental' not supported");
+ if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
+ (mode == MIRROR_SYNC_MODE_BITMAP)) {
+ error_setg(errp, "Sync mode '%s' not supported",
+ MirrorSyncMode_str(mode));
return;
}
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
mirror_start_job(job_id, bs, creation_flags, target, replaces,
- speed, granularity, buf_size, backing_mode,
+ speed, granularity, buf_size, backing_mode, zero_target,
on_source_error, on_target_error, unmap, NULL, NULL,
&mirror_job_driver, is_none_mode, base, false,
filter_node_name, true, copy_mode, errp);
}
-void commit_active_start(const char *job_id, BlockDriverState *bs,
- BlockDriverState *base, int creation_flags,
- int64_t speed, BlockdevOnError on_error,
- const char *filter_node_name,
- BlockCompletionFunc *cb, void *opaque,
- bool auto_complete, Error **errp)
+BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, int creation_flags,
+ int64_t speed, BlockdevOnError on_error,
+ const char *filter_node_name,
+ BlockCompletionFunc *cb, void *opaque,
+ bool auto_complete, Error **errp)
{
bool base_read_only;
Error *local_err = NULL;
+ BlockJob *ret;
base_read_only = bdrv_is_read_only(base);
if (base_read_only) {
if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
- return;
+ return NULL;
}
}
- mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
- MIRROR_LEAVE_BACKING_CHAIN,
+ ret = mirror_start_job(
+ job_id, bs, creation_flags, base, NULL, speed, 0, 0,
+ MIRROR_LEAVE_BACKING_CHAIN, false,
on_error, on_error, true, cb, opaque,
&commit_active_job_driver, false, base, auto_complete,
filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
goto error_restore_flags;
}
- return;
+ return ret;
error_restore_flags:
/* ignore error and errp for bdrv_reopen, because we want to propagate
if (base_read_only) {
bdrv_reopen_set_read_only(base, true, NULL);
}
- return;
+ return NULL;
}