BlockBackend *target;
BlockDriverState *mirror_top_bs;
BlockDriverState *base;
+ BlockDriverState *base_overlay;
/* The name of the graph node to replace */
char *replaces;
bool is_pseudo_op;
bool is_active_write;
+ bool is_in_flight;
CoQueue waiting_requests;
+ Coroutine *co;
QTAILQ_ENTRY(MirrorOp) next;
};
* caller of this function. Since there is only one pseudo op
* at any given time, we will always find some real operation
* to wait on. */
- if (!op->is_pseudo_op && op->is_active_write == active) {
+ if (!op->is_pseudo_op && op->is_in_flight &&
+ op->is_active_write == active)
+ {
qemu_co_queue_wait(&op->waiting_requests, NULL);
return;
}
/* Copy the dirty cluster. */
s->in_flight++;
s->bytes_in_flight += op->bytes;
+ op->is_in_flight = true;
trace_mirror_one_iteration(s, op->offset, op->bytes);
ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
op->s->in_flight++;
op->s->bytes_in_flight += op->bytes;
*op->bytes_handled = op->bytes;
+ op->is_in_flight = true;
ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
op->s->in_flight++;
op->s->bytes_in_flight += op->bytes;
*op->bytes_handled = op->bytes;
+ op->is_in_flight = true;
ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
mirror_write_complete(op, ret);
default:
abort();
}
+ op->co = co;
QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
qemu_coroutine_enter(co);
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
BlockJob *bjob = &s->common;
- MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
+ MirrorBDSOpaque *bs_opaque;
AioContext *replace_aio_context = NULL;
- BlockDriverState *src = s->mirror_top_bs->backing->bs;
- BlockDriverState *target_bs = blk_bs(s->target);
- BlockDriverState *mirror_top_bs = s->mirror_top_bs;
+ BlockDriverState *src;
+ BlockDriverState *target_bs;
+ BlockDriverState *mirror_top_bs;
Error *local_err = NULL;
bool abort = job->ret < 0;
int ret = 0;
}
s->prepared = true;
+ mirror_top_bs = s->mirror_top_bs;
+ bs_opaque = mirror_top_bs->opaque;
+ src = mirror_top_bs->backing->bs;
+ target_bs = blk_bs(s->target);
+
if (bdrv_chain_contains(src, target_bs)) {
bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
}
&error_abort);
if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
BlockDriverState *backing = s->is_none_mode ? src : s->base;
- if (backing_bs(target_bs) != backing) {
- bdrv_set_backing_hd(target_bs, backing, &local_err);
+ BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
+
+ if (bdrv_cow_bs(unfiltered_target) != backing) {
+ bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
if (local_err) {
error_report_err(local_err);
+ local_err = NULL;
ret = -EPERM;
}
}
* drain potential other users of the BDS before changing the graph. */
assert(s->in_drain);
bdrv_drained_begin(target_bs);
- bdrv_replace_node(to_replace, target_bs, &local_err);
+ /*
+ * Cannot use check_to_replace_node() here, because that would
+ * check for an op blocker on @to_replace, and we have our own
+ * there.
+ */
+ if (bdrv_recurse_can_replace(src, to_replace)) {
+ bdrv_replace_node(to_replace, target_bs, &local_err);
+ } else {
+ error_setg(&local_err, "Can no longer replace '%s' by '%s', "
+ "because it can no longer be guaranteed that doing so "
+ "would not lead to an abrupt change of visible data",
+ to_replace->node_name, target_bs->node_name);
+ }
bdrv_drained_end(target_bs);
if (local_err) {
error_report_err(local_err);
* valid.
*/
block_job_remove_all_bdrv(bjob);
- bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
+ bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
/* We just changed the BDS the job BB refers to (with either or both of the
* bdrv_replace_node() calls), so switch the BB back so the cleanup does
static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
{
int64_t offset;
- BlockDriverState *base = s->base;
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
BlockDriverState *target_bs = blk_bs(s->target);
int ret;
return 0;
}
- ret = bdrv_is_allocated_above(bs, base, false, offset, bytes, &count);
+ ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
+ &count);
if (ret < 0) {
return ret;
}
assert(count);
- if (ret == 1) {
+ if (ret > 0) {
bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
}
offset += count;
BlockDriverState *target_bs = blk_bs(s->target);
bool need_drain = true;
int64_t length;
+ int64_t target_length;
BlockDriverInfo bdi;
char backing_filename[2]; /* we only need 2 characters because we are only
checking for a NULL string */
goto immediate_exit;
}
+ target_length = blk_getlength(s->target);
+ if (target_length < 0) {
+ ret = target_length;
+ goto immediate_exit;
+ }
+
/* Active commit must resize the base image if its size differs from the
* active layer. */
if (s->base == blk_bs(s->target)) {
- int64_t base_length;
-
- base_length = blk_getlength(s->target);
- if (base_length < 0) {
- ret = base_length;
- goto immediate_exit;
- }
-
- if (s->bdev_length > base_length) {
- ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
- NULL);
+ if (s->bdev_length > target_length) {
+ ret = blk_truncate(s->target, s->bdev_length, false,
+ PREALLOC_MODE_OFF, 0, NULL);
if (ret < 0) {
goto immediate_exit;
}
}
+ } else if (s->bdev_length != target_length) {
+ error_setg(errp, "Source and target image have different sizes");
+ ret = -EINVAL;
+ goto immediate_exit;
}
if (s->bdev_length == 0) {
} else {
s->target_cluster_size = BDRV_SECTOR_SIZE;
}
- if (backing_filename[0] && !target_bs->backing &&
+ if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
s->granularity < s->target_cluster_size) {
s->buf_size = MAX(s->buf_size, s->target_cluster_size);
s->cow_bitmap = bitmap_new(length);
if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
int ret;
- assert(!target->backing);
- ret = bdrv_open_backing_file(target, NULL, "backing", errp);
+ assert(!bdrv_backing_chain_next(target));
+ ret = bdrv_open_backing_file(bdrv_skip_filters(target), NULL,
+ "backing", errp);
if (ret < 0) {
return;
}
return !!s->in_flight;
}
+static void mirror_cancel(Job *job)
+{
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
+ BlockDriverState *target = blk_bs(s->target);
+
+ bdrv_cancel_in_flight(target);
+}
+
static const BlockJobDriver mirror_job_driver = {
.job_driver = {
.instance_size = sizeof(MirrorBlockJob),
.abort = mirror_abort,
.pause = mirror_pause,
.complete = mirror_complete,
+ .cancel = mirror_cancel,
},
.drained_poll = mirror_drained_poll,
};
uint64_t offset, uint64_t bytes,
QEMUIOVector *qiov, int flags)
{
- QEMUIOVector target_qiov;
- uint64_t dirty_offset = offset;
- uint64_t dirty_bytes;
+ int ret;
+ size_t qiov_offset = 0;
+ int64_t bitmap_offset, bitmap_end;
- if (qiov) {
- qemu_iovec_init(&target_qiov, qiov->niov);
+ if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
+ bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
+ {
+ /*
+ * Dirty unaligned padding: ignore it.
+ *
+ * Reasoning:
+ * 1. If we copy it, we can't reset corresponding bit in
+ * dirty_bitmap as there may be some "dirty" bytes still not
+ * copied.
+ * 2. It's already dirty, so skipping it we don't diverge mirror
+ * progress.
+ *
+ * Note, that because of this, guest write may have no contribution
+ * into mirror converge, but that's not bad, as we have background
+ * process of mirroring. If under some bad circumstances (high guest
+ * IO load) background process starve, we will not converge anyway,
+ * even if each write will contribute, as guest is not guaranteed to
+ * rewrite the whole disk.
+ */
+ qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
+ if (bytes <= qiov_offset) {
+ /* nothing to do after shrink */
+ return;
+ }
+ offset += qiov_offset;
+ bytes -= qiov_offset;
}
- while (true) {
- bool valid_area;
- int ret;
+ if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
+ bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
+ {
+ uint64_t tail = (offset + bytes) % job->granularity;
- bdrv_dirty_bitmap_lock(job->dirty_bitmap);
- dirty_bytes = MIN(offset + bytes - dirty_offset, INT_MAX);
- valid_area = bdrv_dirty_bitmap_next_dirty_area(job->dirty_bitmap,
- &dirty_offset,
- &dirty_bytes);
- if (!valid_area) {
- bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
- break;
+ if (bytes <= tail) {
+ /* nothing to do after shrink */
+ return;
}
+ bytes -= tail;
+ }
- bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap,
- dirty_offset, dirty_bytes);
- bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
-
- job_progress_increase_remaining(&job->common.job, dirty_bytes);
-
- assert(dirty_offset - offset <= SIZE_MAX);
- if (qiov) {
- qemu_iovec_reset(&target_qiov);
- qemu_iovec_concat(&target_qiov, qiov,
- dirty_offset - offset, dirty_bytes);
- }
+ /*
+ * Tails are either clean or shrunk, so for bitmap resetting
+ * we safely align the range down.
+ */
+ bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
+ bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
+ if (bitmap_offset < bitmap_end) {
+ bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
+ bitmap_end - bitmap_offset);
+ }
- switch (method) {
- case MIRROR_METHOD_COPY:
- ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes,
- qiov ? &target_qiov : NULL, flags);
- break;
+ job_progress_increase_remaining(&job->common.job, bytes);
- case MIRROR_METHOD_ZERO:
- assert(!qiov);
- ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes,
- flags);
- break;
+ switch (method) {
+ case MIRROR_METHOD_COPY:
+ ret = blk_co_pwritev_part(job->target, offset, bytes,
+ qiov, qiov_offset, flags);
+ break;
- case MIRROR_METHOD_DISCARD:
- assert(!qiov);
- ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes);
- break;
+ case MIRROR_METHOD_ZERO:
+ assert(!qiov);
+ ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
+ break;
- default:
- abort();
- }
+ case MIRROR_METHOD_DISCARD:
+ assert(!qiov);
+ ret = blk_co_pdiscard(job->target, offset, bytes);
+ break;
- if (ret >= 0) {
- job_progress_update(&job->common.job, dirty_bytes);
- } else {
- BlockErrorAction action;
+ default:
+ abort();
+ }
- bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes);
- job->actively_synced = false;
+ if (ret >= 0) {
+ job_progress_update(&job->common.job, bytes);
+ } else {
+ BlockErrorAction action;
- action = mirror_error_action(job, false, -ret);
- if (action == BLOCK_ERROR_ACTION_REPORT) {
- if (!job->ret) {
- job->ret = ret;
- }
- break;
+ /*
+ * We failed, so we should mark dirty the whole area, aligned up.
+ * Note that we don't care about shrunk tails if any: they were dirty
+ * at function start, and they must be still dirty, as we've locked
+ * the region for in-flight op.
+ */
+ bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
+ bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
+ bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
+ bitmap_end - bitmap_offset);
+ job->actively_synced = false;
+
+ action = mirror_error_action(job, false, -ret);
+ if (action == BLOCK_ERROR_ACTION_REPORT) {
+ if (!job->ret) {
+ job->ret = ret;
}
}
-
- dirty_offset += dirty_bytes;
- }
-
- if (qiov) {
- qemu_iovec_destroy(&target_qiov);
}
}
.offset = offset,
.bytes = bytes,
.is_active_write = true,
+ .is_in_flight = true,
};
qemu_co_queue_init(&op->waiting_requests);
QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
}
static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
- const BdrvChildRole *role,
+ BdrvChildRole role,
BlockReopenQueue *reopen_queue,
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
*nshared = BLK_PERM_ALL;
}
-static void bdrv_mirror_top_refresh_limits(BlockDriverState *bs, Error **errp)
-{
- MirrorBDSOpaque *s = bs->opaque;
-
- if (s && s->job && s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
- bs->bl.request_alignment = s->job->granularity;
- }
-}
-
/* Dummy node that provides consistent read to its users without requiring it
* from its backing file and that allows writes on the backing file chain. */
static BlockDriver bdrv_mirror_top = {
.bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
.bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
.bdrv_co_flush = bdrv_mirror_top_flush,
- .bdrv_co_block_status = bdrv_co_block_status_from_backing,
.bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
.bdrv_child_perm = bdrv_mirror_top_child_perm,
- .bdrv_refresh_limits = bdrv_mirror_top_refresh_limits,
+
+ .is_filter = true,
};
static BlockJob *mirror_start_job(
MirrorBlockJob *s;
MirrorBDSOpaque *bs_opaque;
BlockDriverState *mirror_top_bs;
- bool target_graph_mod;
bool target_is_backing;
- Error *local_err = NULL;
+ uint64_t target_perms, target_shared_perms;
int ret;
if (granularity == 0) {
buf_size = DEFAULT_MIRROR_BUF_SIZE;
}
- if (bs == target) {
+ if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
error_setg(errp, "Can't mirror node into itself");
return NULL;
}
* it alive until block_job_create() succeeds even if bs has no parent. */
bdrv_ref(mirror_top_bs);
bdrv_drained_begin(bs);
- bdrv_append(mirror_top_bs, bs, &local_err);
+ ret = bdrv_append(mirror_top_bs, bs, errp);
bdrv_drained_end(bs);
- if (local_err) {
+ if (ret < 0) {
bdrv_unref(mirror_top_bs);
- error_propagate(errp, local_err);
return NULL;
}
* In the case of active commit, things look a bit different, though,
* because the target is an already populated backing file in active use.
* We can allow anything except resize there.*/
+
+ target_perms = BLK_PERM_WRITE;
+ target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
+
target_is_backing = bdrv_chain_contains(bs, target);
- target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
+ if (target_is_backing) {
+ int64_t bs_size, target_size;
+ bs_size = bdrv_getlength(bs);
+ if (bs_size < 0) {
+ error_setg_errno(errp, -bs_size,
+ "Could not inquire top image size");
+ goto fail;
+ }
+
+ target_size = bdrv_getlength(target);
+ if (target_size < 0) {
+ error_setg_errno(errp, -target_size,
+ "Could not inquire base image size");
+ goto fail;
+ }
+
+ if (target_size < bs_size) {
+ target_perms |= BLK_PERM_RESIZE;
+ }
+
+ target_shared_perms |= BLK_PERM_CONSISTENT_READ
+ | BLK_PERM_WRITE
+ | BLK_PERM_GRAPH_MOD;
+ } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
+ /*
+ * We may want to allow this in the future, but it would
+ * require taking some extra care.
+ */
+ error_setg(errp, "Cannot mirror to a filter on top of a node in the "
+ "source's backing chain");
+ goto fail;
+ }
+
+ if (backing_mode != MIRROR_LEAVE_BACKING_CHAIN) {
+ target_perms |= BLK_PERM_GRAPH_MOD;
+ }
+
s->target = blk_new(s->common.job.aio_context,
- BLK_PERM_WRITE | BLK_PERM_RESIZE |
- (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
- BLK_PERM_WRITE_UNCHANGED |
- (target_is_backing ? BLK_PERM_CONSISTENT_READ |
- BLK_PERM_WRITE |
- BLK_PERM_GRAPH_MOD : 0));
+ target_perms, target_shared_perms);
ret = blk_insert_bs(s->target, target, errp);
if (ret < 0) {
goto fail;
s->zero_target = zero_target;
s->copy_mode = copy_mode;
s->base = base;
+ s->base_overlay = bdrv_find_overlay(bs, base);
s->granularity = granularity;
s->buf_size = ROUND_UP(buf_size, granularity);
s->unmap = unmap;
s->should_complete = true;
}
- /*
- * Must be called before we start tracking writes, but after
- *
- * ((MirrorBlockJob *)
- * ((MirrorBDSOpaque *)
- * mirror_top_bs->opaque
- * )->job
- * )->copy_mode
- *
- * has the correct value.
- * (We start tracking writes as of the following
- * bdrv_create_dirty_bitmap() call.)
- */
- bdrv_refresh_limits(mirror_top_bs, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- goto fail;
- }
-
s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
if (!s->dirty_bitmap) {
goto fail;
}
+ if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
+ bdrv_disable_dirty_bitmap(s->dirty_bitmap);
+ }
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
/* In commit_active_start() all intermediate nodes disappear, so
* any jobs in them must be blocked */
if (target_is_backing) {
- BlockDriverState *iter;
- for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
- /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
- * ourselves at s->base (if writes are blocked for a node, they are
- * also blocked for its backing file). The other options would be a
- * second filter driver above s->base (== target). */
+ BlockDriverState *iter, *filtered_target;
+ uint64_t iter_shared_perms;
+
+ /*
+ * The topmost node with
+ * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
+ */
+ filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
+
+ assert(bdrv_skip_filters(filtered_target) ==
+ bdrv_skip_filters(target));
+
+ /*
+ * XXX BLK_PERM_WRITE needs to be allowed so we don't block
+ * ourselves at s->base (if writes are blocked for a node, they are
+ * also blocked for its backing file). The other options would be a
+ * second filter driver above s->base (== target).
+ */
+ iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
+
+ for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
+ iter = bdrv_filter_or_cow_bs(iter))
+ {
+ if (iter == filtered_target) {
+ /*
+ * From here on, all nodes are filters on the base.
+ * This allows us to share BLK_PERM_CONSISTENT_READ.
+ */
+ iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
+ }
+
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
- BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
- errp);
+ iter_shared_perms, errp);
if (ret < 0) {
goto fail;
}
bs_opaque->stop = true;
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
&error_abort);
- bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
+ bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
bdrv_unref(mirror_top_bs);
return;
}
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
- base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
+ base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
mirror_start_job(job_id, bs, creation_flags, target, replaces,
speed, granularity, buf_size, backing_mode, zero_target,
on_source_error, on_target_error, unmap, NULL, NULL,
bool auto_complete, Error **errp)
{
bool base_read_only;
- Error *local_err = NULL;
- BlockJob *ret;
+ BlockJob *job;
base_read_only = bdrv_is_read_only(base);
}
}
- ret = mirror_start_job(
+ job = mirror_start_job(
job_id, bs, creation_flags, base, NULL, speed, 0, 0,
MIRROR_LEAVE_BACKING_CHAIN, false,
on_error, on_error, true, cb, opaque,
&commit_active_job_driver, false, base, auto_complete,
filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
- &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ errp);
+ if (!job) {
goto error_restore_flags;
}
- return ret;
+ return job;
error_restore_flags:
/* ignore error and errp for bdrv_reopen, because we want to propagate