#include "block/coroutines.h"
#include "block/write-threshold.h"
#include "qemu/cutils.h"
+#include "qemu/memalign.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
void bdrv_parent_drained_end_single(BdrvChild *c)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
}
void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
{
+ IO_OR_GS_CODE();
c->parent_quiesce_counter++;
if (c->klass->drained_begin) {
c->klass->drained_begin(c);
static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
{
+ dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
+ src->pdiscard_alignment);
dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
dst->min_mem_alignment = MAX(dst->min_mem_alignment,
src->min_mem_alignment);
dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
+ dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
}
typedef struct BdrvRefreshLimitsState {
BdrvChild *c;
bool have_limits;
+ GLOBAL_STATE_CODE();
+
if (tran) {
BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
*s = (BdrvRefreshLimitsState) {
QLIST_FOREACH(c, &bs->children, next) {
if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
{
- bdrv_refresh_limits(c->bs, tran, errp);
- if (*errp) {
- return;
- }
bdrv_merge_limits(&bs->bl, &c->bs->bl);
have_limits = true;
}
*/
void bdrv_enable_copy_on_read(BlockDriverState *bs)
{
+ IO_CODE();
qatomic_inc(&bs->copy_on_read);
}
void bdrv_disable_copy_on_read(BlockDriverState *bs)
{
int old = qatomic_fetch_dec(&bs->copy_on_read);
+ IO_CODE();
assert(old >= 1);
}
BdrvChild *ignore_parent, bool ignore_bds_parents)
{
BdrvChild *child, *next;
+ IO_OR_GS_CODE();
if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
return true;
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
BdrvChild *parent, bool ignore_bds_parents)
{
+ IO_OR_GS_CODE();
assert(!qemu_in_coroutine());
/* Stop things in parent-to-child order */
void bdrv_drained_begin(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_do_drained_begin(bs, false, NULL, false, true);
}
void bdrv_subtree_drained_begin(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_do_drained_begin(bs, true, NULL, false, true);
}
void bdrv_drained_end(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
}
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
{
+ IO_CODE();
bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
}
void bdrv_subtree_drained_end(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
}
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
{
int i;
+ IO_OR_GS_CODE();
for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
bdrv_do_drained_begin(child->bs, true, child, false, true);
{
int drained_end_counter = 0;
int i;
+ IO_OR_GS_CODE();
for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
bdrv_do_drained_end(child->bs, true, child, false,
*/
void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
assert(qemu_in_coroutine());
bdrv_drained_begin(bs);
bdrv_drained_end(bs);
void bdrv_drain(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_drained_begin(bs);
bdrv_drained_end(bs);
}
{
BlockDriverState *bs = NULL;
bool result = false;
+ GLOBAL_STATE_CODE();
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
void bdrv_drain_all_begin(void)
{
BlockDriverState *bs = NULL;
+ GLOBAL_STATE_CODE();
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ GLOBAL_STATE_CODE();
g_assert(bs->quiesce_counter > 0);
g_assert(!bs->refcnt);
{
BlockDriverState *bs = NULL;
int drained_end_counter = 0;
+ GLOBAL_STATE_CODE();
/*
* bdrv queue is managed by record/replay,
void bdrv_drain_all(void)
{
+ GLOBAL_STATE_CODE();
bdrv_drain_all_begin();
bdrv_drain_all_end();
}
{
BdrvTrackedRequest *req;
Coroutine *self = qemu_coroutine_self();
+ IO_CODE();
QLIST_FOREACH(req, &bs->tracked_requests, list) {
if (req->co == self) {
int64_t *cluster_bytes)
{
BlockDriverInfo bdi;
-
+ IO_CODE();
if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
*cluster_offset = offset;
*cluster_bytes = bytes;
void bdrv_inc_in_flight(BlockDriverState *bs)
{
+ IO_CODE();
qatomic_inc(&bs->in_flight);
}
void bdrv_wakeup(BlockDriverState *bs)
{
+ IO_CODE();
aio_wait_kick();
}
void bdrv_dec_in_flight(BlockDriverState *bs)
{
+ IO_CODE();
qatomic_dec(&bs->in_flight);
bdrv_wakeup(bs);
}
uint64_t align)
{
bool waited;
+ IO_CODE();
qemu_co_mutex_lock(&req->bs->reqs_lock);
return waited;
}
-static int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
- QEMUIOVector *qiov, size_t qiov_offset,
- Error **errp)
+int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset,
+ Error **errp)
{
/*
* Check generic offset/bytes correctness
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_CODE();
return bdrv_pwritev(child, offset, bytes, NULL,
BDRV_REQ_ZERO_WRITE | flags);
}
int ret;
int64_t target_size, bytes, offset = 0;
BlockDriverState *bs = child->bs;
+ IO_CODE();
target_size = bdrv_getlength(bs);
if (target_size < 0) {
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
if (bytes < 0) {
return -EINVAL;
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
if (bytes < 0) {
return -EINVAL;
const void *buf, int64_t count)
{
int ret;
+ IO_CODE();
ret = bdrv_pwrite(child, offset, buf, count);
if (ret < 0) {
static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
int64_t offset, int64_t bytes,
QEMUIOVector *qiov,
- size_t qiov_offset, int flags)
+ size_t qiov_offset,
+ BdrvRequestFlags flags)
{
BlockDriver *drv = bs->drv;
int64_t sector_num;
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
+ IO_CODE();
return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
}
BdrvTrackedRequest req;
BdrvRequestPadding pad;
int ret;
+ IO_CODE();
trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
NULL);
if (ret < 0) {
- return ret;
+ goto fail;
}
tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
bs->bl.request_alignment,
qiov, qiov_offset, flags);
tracked_request_end(&req);
- bdrv_dec_in_flight(bs);
-
bdrv_padding_destroy(&pad);
+fail:
+ bdrv_dec_in_flight(bs);
+
return ret;
}
int head = 0;
int tail = 0;
- int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
+ int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
+ INT64_MAX);
int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
bs->bl.request_alignment);
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
return -ENOTSUP;
}
+ /* Invalidate the cached block-status data range if this write overlaps */
+ bdrv_bsc_invalidate_range(bs, offset, bytes);
+
assert(alignment % bs->bl.request_alignment == 0);
head = offset % alignment;
tail = (offset + bytes) % alignment;
*/
static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
- int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
+ int64_t align, QEMUIOVector *qiov, size_t qiov_offset,
+ BdrvRequestFlags flags)
{
BlockDriverState *bs = child->bs;
BlockDriver *drv = bs->drv;
padding = bdrv_init_padding(bs, offset, bytes, &pad);
if (padding) {
+ assert(!(flags & BDRV_REQ_NO_WAIT));
bdrv_make_request_serialising(req, align);
bdrv_padding_rmw_read(child, req, &pad, true);
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
+ IO_CODE();
return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
}
BdrvRequestPadding pad;
int ret;
bool padded = false;
+ IO_CODE();
trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
return -ENOMEDIUM;
}
- ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
+ if (flags & BDRV_REQ_ZERO_WRITE) {
+ ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
+ } else {
+ ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
+ }
if (ret < 0) {
return ret;
}
* serialize the request to prevent interactions of the
* widened region with other transactions.
*/
+ assert(!(flags & BDRV_REQ_NO_WAIT));
bdrv_make_request_serialising(&req, align);
bdrv_padding_rmw_read(child, &req, &pad, false);
}
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_CODE();
trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
BlockDriverState *bs = NULL;
int result = 0;
+ GLOBAL_STATE_CODE();
+
/*
* bdrv queue is managed by record/replay,
* creating new flush request for stopping
aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
if (bs->drv->bdrv_co_block_status) {
- ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
- aligned_bytes, pnum, &local_map,
- &local_file);
+ /*
+ * Use the block-status cache only for protocol nodes: Format
+ * drivers are generally quick to inquire the status, but protocol
+ * drivers often need to get information from outside of qemu, so
+ * we do not have control over the actual implementation. There
+ * have been cases where inquiring the status took an unreasonably
+ * long time, and we can do nothing in qemu to fix it.
+ * This is especially problematic for images with large data areas,
+ * because finding the few holes in them and giving them special
+ * treatment does not gain much performance. Therefore, we try to
+ * cache the last-identified data region.
+ *
+ * Second, limiting ourselves to protocol nodes allows us to assume
+ * the block status for data regions to be DATA | OFFSET_VALID, and
+ * that the host offset is the same as the guest offset.
+ *
+ * Note that it is possible that external writers zero parts of
+ * the cached regions without the cache being invalidated, and so
+ * we may report zeroes as data. This is not catastrophic,
+ * however, because reporting zeroes as data is fine.
+ */
+ if (QLIST_EMPTY(&bs->children) &&
+ bdrv_bsc_is_data(bs, aligned_offset, pnum))
+ {
+ ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
+ local_file = bs;
+ local_map = aligned_offset;
+ } else {
+ ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
+ aligned_bytes, pnum, &local_map,
+ &local_file);
+
+ /*
+ * Note that checking QLIST_EMPTY(&bs->children) is also done when
+ * the cache is queried above. Technically, we do not need to check
+ * it here; the worst that can happen is that we fill the cache for
+ * non-protocol nodes, and then it is never used. However, filling
+ * the cache requires an RCU update, so double check here to avoid
+ * such an update if possible.
+ *
+ * Check want_zero, because we only want to update the cache when we
+ * have accurate information about what is zero and what is data.
+ */
+ if (want_zero &&
+ ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
+ QLIST_EMPTY(&bs->children))
+ {
+ /*
+ * When a protocol driver reports BLOCK_OFFSET_VALID, the
+ * returned local_map value must be the same as the offset we
+ * have passed (aligned_offset), and local_bs must be the node
+ * itself.
+ * Assert this, because we follow this rule when reading from
+ * the cache (see the `local_file = bs` and
+ * `local_map = aligned_offset` assignments above), and the
+ * result the cache delivers must be the same as the driver
+ * would deliver.
+ */
+ assert(local_file == bs);
+ assert(local_map == aligned_offset);
+ bdrv_bsc_fill(bs, aligned_offset, *pnum);
+ }
+ }
} else {
/* Default code for filters */
BlockDriverState *p;
int64_t eof = 0;
int dummy;
+ IO_CODE();
assert(!include_base || base); /* Can't include NULL base */
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
+ IO_CODE();
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
pnum, map, file, NULL);
}
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
+ IO_CODE();
return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
offset, bytes, pnum, map, file);
}
{
int ret;
int64_t pnum = bytes;
+ IO_CODE();
if (!bytes) {
return 1;
{
int ret;
int64_t dummy;
+ IO_CODE();
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
bytes, pnum ? pnum : &dummy, NULL,
int ret = bdrv_common_block_status_above(top, base, include_base, false,
offset, bytes, pnum, NULL, NULL,
&depth);
+ IO_CODE();
if (ret < 0) {
return ret;
}
{
BlockDriver *drv = bs->drv;
BlockDriverState *child_bs = bdrv_primary_bs(bs);
- int ret = -ENOTSUP;
+ int ret;
+ IO_CODE();
+
+ ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
+ if (ret < 0) {
+ return ret;
+ }
if (!drv) {
return -ENOMEDIUM;
ret = drv->bdrv_load_vmstate(bs, qiov, pos);
} else if (child_bs) {
ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
+ } else {
+ ret = -ENOTSUP;
}
bdrv_dec_in_flight(bs);
{
BlockDriver *drv = bs->drv;
BlockDriverState *child_bs = bdrv_primary_bs(bs);
- int ret = -ENOTSUP;
+ int ret;
+ IO_CODE();
+
+ ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
+ if (ret < 0) {
+ return ret;
+ }
if (!drv) {
return -ENOMEDIUM;
ret = drv->bdrv_save_vmstate(bs, qiov, pos);
} else if (child_bs) {
ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
+ } else {
+ ret = -ENOTSUP;
}
bdrv_dec_in_flight(bs);
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
int ret = bdrv_writev_vmstate(bs, &qiov, pos);
+ IO_CODE();
return ret < 0 ? ret : size;
}
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
int ret = bdrv_readv_vmstate(bs, &qiov, pos);
+ IO_CODE();
return ret < 0 ? ret : size;
}
void bdrv_aio_cancel(BlockAIOCB *acb)
{
+ IO_CODE();
qemu_aio_ref(acb);
bdrv_aio_cancel_async(acb);
while (acb->refcnt > 1) {
* In either case the completion callback must be called. */
void bdrv_aio_cancel_async(BlockAIOCB *acb)
{
+ IO_CODE();
if (acb->aiocb_info->cancel_async) {
acb->aiocb_info->cancel_async(acb);
}
BdrvChild *child;
int current_gen;
int ret = 0;
+ IO_CODE();
bdrv_inc_in_flight(bs);
int64_t bytes)
{
BdrvTrackedRequest req;
- int max_pdiscard, ret;
+ int ret;
+ int64_t max_pdiscard;
int head, tail, align;
BlockDriverState *bs = child->bs;
+ IO_CODE();
if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
return -ENOMEDIUM;
return 0;
}
+ /* Invalidate the cached block-status data range if this discard overlaps */
+ bdrv_bsc_invalidate_range(bs, offset, bytes);
+
/* Discard is advisory, but some devices track and coalesce
* unaligned requests, so we must pass everything down rather than
* round here. Still, most devices will just silently ignore
goto out;
}
- max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
+ max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
align);
assert(max_pdiscard >= bs->bl.request_alignment);
.coroutine = qemu_coroutine_self(),
};
BlockAIOCB *acb;
+ IO_CODE();
bdrv_inc_in_flight(bs);
if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
void *qemu_blockalign(BlockDriverState *bs, size_t size)
{
+ IO_CODE();
return qemu_memalign(bdrv_opt_mem_align(bs), size);
}
void *qemu_blockalign0(BlockDriverState *bs, size_t size)
{
+ IO_CODE();
return memset(qemu_blockalign(bs, size), 0, size);
}
void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
{
size_t align = bdrv_opt_mem_align(bs);
+ IO_CODE();
/* Ensure that NULL is never returned on success */
assert(align > 0);
void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
{
void *mem = qemu_try_blockalign(bs, size);
+ IO_CODE();
if (mem) {
memset(mem, 0, size);
{
int i;
size_t alignment = bdrv_min_mem_align(bs);
+ IO_CODE();
for (i = 0; i < qiov->niov; i++) {
if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
void bdrv_io_plug(BlockDriverState *bs)
{
BdrvChild *child;
+ IO_CODE();
QLIST_FOREACH(child, &bs->children, next) {
bdrv_io_plug(child->bs);
void bdrv_io_unplug(BlockDriverState *bs)
{
BdrvChild *child;
+ IO_CODE();
assert(bs->io_plugged);
if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
{
BdrvChild *child;
+ GLOBAL_STATE_CODE();
if (bs->drv && bs->drv->bdrv_register_buf) {
bs->drv->bdrv_register_buf(bs, host, size);
}
{
BdrvChild *child;
+ GLOBAL_STATE_CODE();
if (bs->drv && bs->drv->bdrv_unregister_buf) {
bs->drv->bdrv_unregister_buf(bs, host);
}
/* TODO We can support BDRV_REQ_NO_FALLBACK here */
assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
+ assert(!(read_flags & BDRV_REQ_NO_WAIT));
+ assert(!(write_flags & BDRV_REQ_NO_WAIT));
if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
return -ENOMEDIUM;
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
+ IO_CODE();
trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
read_flags, write_flags);
return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
+ IO_CODE();
trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
read_flags, write_flags);
return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
int64_t bytes, BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
+ IO_CODE();
return bdrv_co_copy_range_from(src, src_offset,
dst, dst_offset,
bytes, read_flags, write_flags);
BdrvTrackedRequest req;
int64_t old_size, new_bytes;
int ret;
-
+ IO_CODE();
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
if (!drv) {
void bdrv_cancel_in_flight(BlockDriverState *bs)
{
+ GLOBAL_STATE_CODE();
if (!bs || !bs->drv) {
return;
}
bs->drv->bdrv_cancel_in_flight(bs);
}
}
+
+int coroutine_fn
+bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
+ QEMUIOVector *qiov, size_t qiov_offset)
+{
+ BlockDriverState *bs = child->bs;
+ BlockDriver *drv = bs->drv;
+ int ret;
+ IO_CODE();
+
+ if (!drv) {
+ return -ENOMEDIUM;
+ }
+
+ if (!drv->bdrv_co_preadv_snapshot) {
+ return -ENOTSUP;
+ }
+
+ bdrv_inc_in_flight(bs);
+ ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
+ bdrv_dec_in_flight(bs);
+
+ return ret;
+}
+
+int coroutine_fn
+bdrv_co_snapshot_block_status(BlockDriverState *bs,
+ bool want_zero, int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
+ BlockDriverState **file)
+{
+ BlockDriver *drv = bs->drv;
+ int ret;
+ IO_CODE();
+
+ if (!drv) {
+ return -ENOMEDIUM;
+ }
+
+ if (!drv->bdrv_co_snapshot_block_status) {
+ return -ENOTSUP;
+ }
+
+ bdrv_inc_in_flight(bs);
+ ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
+ pnum, map, file);
+ bdrv_dec_in_flight(bs);
+
+ return ret;
+}
+
+int coroutine_fn
+bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
+{
+ BlockDriver *drv = bs->drv;
+ int ret;
+ IO_CODE();
+
+ if (!drv) {
+ return -ENOMEDIUM;
+ }
+
+ if (!drv->bdrv_co_pdiscard_snapshot) {
+ return -ENOTSUP;
+ }
+
+ bdrv_inc_in_flight(bs);
+ ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
+ bdrv_dec_in_flight(bs);
+
+ return ret;
+}