#include <zlib.h>
#include "qemu-common.h"
-#include "block_int.h"
+#include "block/block_int.h"
#include "block/qcow2.h"
#include "trace.h"
l2_table = *table;
- if (old_l2_offset == 0) {
+ if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
/* if there was no old l2 table, clear the new table */
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
} else {
/* if there was an old l2 table, read it from the disk */
BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
- ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_offset,
+ ret = qcow2_cache_get(bs, s->l2_table_cache,
+ old_l2_offset & L1E_OFFSET_MASK,
(void**) &old_table);
if (ret < 0) {
goto fail;
return ret;
}
+/*
+ * Checks how many clusters in a given L2 table are contiguous in the image
+ * file. As soon as one of the flags in the bitmask stop_flags changes compared
+ * to the first cluster, the search is stopped and the cluster is not counted
+ * as contiguous. (This allows it, for example, to stop at the first compressed
+ * cluster which may require a different handling)
+ */
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
- uint64_t *l2_table, uint64_t start, uint64_t mask)
+ uint64_t *l2_table, uint64_t start, uint64_t stop_flags)
{
int i;
- uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
+ uint64_t mask = stop_flags | L2E_OFFSET_MASK;
+ uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
if (!offset)
return 0;
- for (i = start; i < start + nb_clusters; i++)
- if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
+ for (i = start; i < start + nb_clusters; i++) {
+ uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
+ if (offset + (uint64_t) i * cluster_size != l2_entry) {
break;
+ }
+ }
return (i - start);
}
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
{
- int i = 0;
+ int i;
- while(nb_clusters-- && l2_table[i] == 0)
- i++;
+ for (i = 0; i < nb_clusters; i++) {
+ int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
+
+ if (type != QCOW2_CLUSTER_UNALLOCATED) {
+ break;
+ }
+ }
return i;
}
*
* on exit, *num is the number of contiguous sectors we can read.
*
- * Return 0, if the offset is found
- * Return -errno, otherwise.
- *
+ * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
+ * cases.
*/
-
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
int *num, uint64_t *cluster_offset)
{
/* seek the the l2 offset in the l1 table */
l1_index = offset >> l1_bits;
- if (l1_index >= s->l1_size)
+ if (l1_index >= s->l1_size) {
+ ret = QCOW2_CLUSTER_UNALLOCATED;
goto out;
+ }
- l2_offset = s->l1_table[l1_index];
-
- /* seek the l2 table of the given l2 offset */
-
- if (!l2_offset)
+ l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
+ if (!l2_offset) {
+ ret = QCOW2_CLUSTER_UNALLOCATED;
goto out;
+ }
/* load the l2 table in memory */
- l2_offset &= ~QCOW_OFLAG_COPIED;
ret = l2_load(bs, l2_offset, &l2_table);
if (ret < 0) {
return ret;
*cluster_offset = be64_to_cpu(l2_table[l2_index]);
nb_clusters = size_to_clusters(s, nb_needed << 9);
- if (!*cluster_offset) {
+ ret = qcow2_get_cluster_type(*cluster_offset);
+ switch (ret) {
+ case QCOW2_CLUSTER_COMPRESSED:
+ /* Compressed clusters can only be processed one by one */
+ c = 1;
+ *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
+ break;
+ case QCOW2_CLUSTER_ZERO:
+ c = count_contiguous_clusters(nb_clusters, s->cluster_size,
+ &l2_table[l2_index], 0,
+ QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
+ *cluster_offset = 0;
+ break;
+ case QCOW2_CLUSTER_UNALLOCATED:
/* how many empty clusters ? */
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
- } else {
+ *cluster_offset = 0;
+ break;
+ case QCOW2_CLUSTER_NORMAL:
/* how many allocated clusters ? */
c = count_contiguous_clusters(nb_clusters, s->cluster_size,
- &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
+ &l2_table[l2_index], 0,
+ QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
+ *cluster_offset &= L2E_OFFSET_MASK;
+ break;
+ default:
+ abort();
}
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
- nb_available = (c * s->cluster_sectors);
+ nb_available = (c * s->cluster_sectors);
+
out:
if (nb_available > nb_needed)
nb_available = nb_needed;
*num = nb_available - index_in_cluster;
- *cluster_offset &=~QCOW_OFLAG_COPIED;
- return 0;
+ return ret;
}
/*
return ret;
}
}
- l2_offset = s->l1_table[l1_index];
+
+ l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
/* seek the l2 table of the given l2 offset */
- if (l2_offset & QCOW_OFLAG_COPIED) {
+ if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
/* load the l2 table in memory */
- l2_offset &= ~QCOW_OFLAG_COPIED;
ret = l2_load(bs, l2_offset, &l2_table);
if (ret < 0) {
return ret;
if (l2_offset) {
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
}
- l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
}
/* find the cluster offset for the given disk offset */
return 0;
}
+ /* Compression can't overwrite anything. Fail if the cluster was already
+ * allocated. */
cluster_offset = be64_to_cpu(l2_table[l2_index]);
- if (cluster_offset & QCOW_OFLAG_COPIED) {
+ if (cluster_offset & L2E_OFFSET_MASK) {
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
return 0;
}
- if (cluster_offset)
- qcow2_free_any_clusters(bs, cluster_offset, 1);
-
cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
if (cluster_offset < 0) {
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
return cluster_offset;
}
-int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
+static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
{
BDRVQcowState *s = bs->opaque;
- int i, j = 0, l2_index, ret;
- uint64_t *old_cluster, start_sect, *l2_table;
- uint64_t cluster_offset = m->alloc_offset;
- bool cow = false;
-
- trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
+ int ret;
- if (m->nb_clusters == 0)
+ if (r->nb_sectors == 0) {
return 0;
+ }
- old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
+ qemu_co_mutex_unlock(&s->lock);
+ ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
+ r->offset / BDRV_SECTOR_SIZE,
+ r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
+ qemu_co_mutex_lock(&s->lock);
- /* copy content of unmodified sectors */
- start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
- if (m->n_start) {
- cow = true;
- qemu_co_mutex_unlock(&s->lock);
- ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
- qemu_co_mutex_lock(&s->lock);
- if (ret < 0)
- goto err;
- }
-
- if (m->nb_available & (s->cluster_sectors - 1)) {
- uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
- cow = true;
- qemu_co_mutex_unlock(&s->lock);
- ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
- m->nb_available - end, s->cluster_sectors);
- qemu_co_mutex_lock(&s->lock);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ return ret;
}
/*
- * Update L2 table.
- *
* Before we update the L2 table to actually point to the new cluster, we
* need to be sure that the refcounts have been increased and COW was
* handled.
*/
- if (cow) {
- qcow2_cache_depends_on_flush(s->l2_table_cache);
+ qcow2_cache_depends_on_flush(s->l2_table_cache);
+
+ return 0;
+}
+
+int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
+{
+ BDRVQcowState *s = bs->opaque;
+ int i, j = 0, l2_index, ret;
+ uint64_t *old_cluster, *l2_table;
+ uint64_t cluster_offset = m->alloc_offset;
+
+ trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
+ assert(m->nb_clusters > 0);
+
+ old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
+
+ /* copy content of unmodified sectors */
+ ret = perform_cow(bs, m, &m->cow_start);
+ if (ret < 0) {
+ goto err;
+ }
+
+ ret = perform_cow(bs, m, &m->cow_end);
+ if (ret < 0) {
+ goto err;
+ }
+
+ /* Update L2 table. */
+ if (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS) {
+ qcow2_mark_dirty(bs);
+ }
+ if (qcow2_need_accurate_refcounts(s)) {
+ qcow2_cache_set_dependency(bs, s->l2_table_cache,
+ s->refcount_block_cache);
}
- qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
if (ret < 0) {
goto err;
*/
if (j != 0) {
for (i = 0; i < j; i++) {
- qcow2_free_any_clusters(bs,
- be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
+ qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
}
}
static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
uint64_t *l2_table, int l2_index)
{
- int i = 0;
- uint64_t cluster_offset;
+ int i;
- while (i < nb_clusters) {
- i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
- &l2_table[l2_index], i, 0);
- if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) {
- break;
- }
+ for (i = 0; i < nb_clusters; i++) {
+ uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
+ int cluster_type = qcow2_get_cluster_type(l2_entry);
- i += count_contiguous_free_clusters(nb_clusters - i,
- &l2_table[l2_index + i]);
- if (i >= nb_clusters) {
+ switch(cluster_type) {
+ case QCOW2_CLUSTER_NORMAL:
+ if (l2_entry & QCOW_OFLAG_COPIED) {
+ goto out;
+ }
break;
- }
-
- cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
-
- if ((cluster_offset & QCOW_OFLAG_COPIED) ||
- (cluster_offset & QCOW_OFLAG_COMPRESSED))
+ case QCOW2_CLUSTER_UNALLOCATED:
+ case QCOW2_CLUSTER_COMPRESSED:
+ case QCOW2_CLUSTER_ZERO:
break;
+ default:
+ abort();
+ }
}
+out:
assert(i <= nb_clusters);
return i;
}
/*
- * Allocates new clusters for the given guest_offset.
- *
- * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
- * contain the number of clusters that have been allocated and are contiguous
- * in the image file.
- *
- * If *host_offset is non-zero, it specifies the offset in the image file at
- * which the new clusters must start. *nb_clusters can be 0 on return in this
- * case if the cluster at host_offset is already in use. If *host_offset is
- * zero, the clusters can be allocated anywhere in the image file.
- *
- * *host_offset is updated to contain the offset into the image file at which
- * the first allocated cluster starts.
- *
- * Return 0 on success and -errno in error cases. -EAGAIN means that the
- * function has been waiting for another request and the allocation must be
- * restarted, but the whole request should not be failed.
+ * Check if there already is an AIO write request in flight which allocates
+ * the same cluster. In this case we need to wait until the previous
+ * request has completed and updated the L2 table accordingly.
*/
-static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
- uint64_t *host_offset, unsigned int *nb_clusters, uint64_t *l2_table)
+static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
+ unsigned int *nb_clusters)
{
BDRVQcowState *s = bs->opaque;
- int64_t cluster_offset;
QCowL2Meta *old_alloc;
- trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
- *host_offset, *nb_clusters);
-
- /*
- * Check if there already is an AIO write request in flight which allocates
- * the same cluster. In this case we need to wait until the previous
- * request has completed and updated the L2 table accordingly.
- */
QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
uint64_t start = guest_offset >> s->cluster_bits;
abort();
}
+ return 0;
+}
+
+/*
+ * Allocates new clusters for the given guest_offset.
+ *
+ * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
+ * contain the number of clusters that have been allocated and are contiguous
+ * in the image file.
+ *
+ * If *host_offset is non-zero, it specifies the offset in the image file at
+ * which the new clusters must start. *nb_clusters can be 0 on return in this
+ * case if the cluster at host_offset is already in use. If *host_offset is
+ * zero, the clusters can be allocated anywhere in the image file.
+ *
+ * *host_offset is updated to contain the offset into the image file at which
+ * the first allocated cluster starts.
+ *
+ * Return 0 on success and -errno in error cases. -EAGAIN means that the
+ * function has been waiting for another request and the allocation must be
+ * restarted, but the whole request should not be failed.
+ */
+static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
+ uint64_t *host_offset, unsigned int *nb_clusters)
+{
+ BDRVQcowState *s = bs->opaque;
+ int ret;
+
+ trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
+ *host_offset, *nb_clusters);
+
+ ret = handle_dependencies(bs, guest_offset, nb_clusters);
+ if (ret < 0) {
+ return ret;
+ }
+
/* Allocate new clusters */
trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
if (*host_offset == 0) {
- cluster_offset = qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
+ int64_t cluster_offset =
+ qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
+ if (cluster_offset < 0) {
+ return cluster_offset;
+ }
+ *host_offset = cluster_offset;
+ return 0;
} else {
- cluster_offset = *host_offset;
- *nb_clusters = qcow2_alloc_clusters_at(bs, cluster_offset, *nb_clusters);
- }
-
- if (cluster_offset < 0) {
- return cluster_offset;
+ ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
+ if (ret < 0) {
+ return ret;
+ }
+ *nb_clusters = ret;
+ return 0;
}
- *host_offset = cluster_offset;
- return 0;
}
/*
* Return 0 on success and -errno in error cases
*/
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
- int n_start, int n_end, int *num, QCowL2Meta *m)
+ int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m)
{
BDRVQcowState *s = bs->opaque;
int l2_index, ret, sectors;
n_start, n_end);
/* Find L2 entry for the first involved cluster */
+again:
ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
if (ret < 0) {
return ret;
* Calculate the number of clusters to look for. We stop at L2 table
* boundaries to keep things simple.
*/
-again:
nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS),
s->l2_size - l2_index);
* Check how many clusters are already allocated and don't need COW, and how
* many need a new allocation.
*/
- if (cluster_offset & QCOW_OFLAG_COPIED) {
+ if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
+ && (cluster_offset & QCOW_OFLAG_COPIED))
+ {
/* We keep all QCOW_OFLAG_COPIED clusters */
- keep_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
- &l2_table[l2_index], 0, 0);
+ keep_clusters =
+ count_contiguous_clusters(nb_clusters, s->cluster_size,
+ &l2_table[l2_index], 0,
+ QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
assert(keep_clusters <= nb_clusters);
nb_clusters -= keep_clusters;
} else {
+ keep_clusters = 0;
+ cluster_offset = 0;
+ }
+
+ if (nb_clusters > 0) {
/* For the moment, overwrite compressed clusters one by one */
- if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
+ uint64_t entry = be64_to_cpu(l2_table[l2_index + keep_clusters]);
+ if (entry & QCOW_OFLAG_COMPRESSED) {
nb_clusters = 1;
} else {
- nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
+ nb_clusters = count_cow_clusters(s, nb_clusters, l2_table,
+ l2_index + keep_clusters);
}
-
- keep_clusters = 0;
- cluster_offset = 0;
}
- cluster_offset &= ~QCOW_OFLAG_COPIED;
+ cluster_offset &= L2E_OFFSET_MASK;
- /* If there is something left to allocate, do that now */
- *m = (QCowL2Meta) {
- .cluster_offset = cluster_offset,
- .nb_clusters = 0,
- };
- qemu_co_queue_init(&m->dependent_requests);
+ /*
+ * The L2 table isn't used any more after this. As long as the cache works
+ * synchronously, it's important to release it before calling
+ * do_alloc_cluster_offset, which may yield if we need to wait for another
+ * request to complete. If we still had the reference, we could use up the
+ * whole cache with sleeping requests.
+ */
+ ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
+ if (ret < 0) {
+ return ret;
+ }
+ /* If there is something left to allocate, do that now */
if (nb_clusters > 0) {
uint64_t alloc_offset;
uint64_t alloc_cluster_offset;
/* Allocate, if necessary at a given offset in the image file */
ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset,
- &nb_clusters, l2_table);
+ &nb_clusters);
if (ret == -EAGAIN) {
goto again;
} else if (ret < 0) {
/* save info needed for meta data update */
if (nb_clusters > 0) {
+ /*
+ * requested_sectors: Number of sectors from the start of the first
+ * newly allocated cluster to the end of the (possibly shortened
+ * before) write request.
+ *
+ * avail_sectors: Number of sectors from the start of the first
+ * newly allocated to the end of the last newly allocated cluster.
+ *
+ * nb_sectors: The number of sectors from the start of the first
+ * newly allocated cluster to the end of the aread that the write
+ * request actually writes to (excluding COW at the end)
+ */
int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
- int avail_sectors = (keep_clusters + nb_clusters)
+ int avail_sectors = nb_clusters
<< (s->cluster_bits - BDRV_SECTOR_BITS);
+ int alloc_n_start = keep_clusters == 0 ? n_start : 0;
+ int nb_sectors = MIN(requested_sectors, avail_sectors);
+
+ if (keep_clusters == 0) {
+ cluster_offset = alloc_cluster_offset;
+ }
- *m = (QCowL2Meta) {
- .cluster_offset = keep_clusters == 0 ?
- alloc_cluster_offset : cluster_offset,
+ *m = g_malloc0(sizeof(**m));
+
+ **m = (QCowL2Meta) {
.alloc_offset = alloc_cluster_offset,
- .offset = alloc_offset,
- .n_start = keep_clusters == 0 ? n_start : 0,
+ .offset = alloc_offset & ~(s->cluster_size - 1),
.nb_clusters = nb_clusters,
- .nb_available = MIN(requested_sectors, avail_sectors),
+ .nb_available = nb_sectors,
+
+ .cow_start = {
+ .offset = 0,
+ .nb_sectors = alloc_n_start,
+ },
+ .cow_end = {
+ .offset = nb_sectors * BDRV_SECTOR_SIZE,
+ .nb_sectors = avail_sectors - nb_sectors,
+ },
};
- qemu_co_queue_init(&m->dependent_requests);
- QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
+ qemu_co_queue_init(&(*m)->dependent_requests);
+ QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
}
}
/* Some cleanup work */
- ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
- if (ret < 0) {
- goto fail_put;
- }
-
sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
if (sectors > n_end) {
sectors = n_end;
assert(sectors > n_start);
*num = sectors - n_start;
+ *host_offset = cluster_offset;
return 0;
fail:
- qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
-fail_put:
- if (nb_clusters > 0) {
- QLIST_REMOVE(m, next_in_flight);
+ if (*m && (*m)->nb_clusters > 0) {
+ QLIST_REMOVE(*m, next_in_flight);
}
return ret;
}
uint64_t old_offset;
old_offset = be64_to_cpu(l2_table[l2_index + i]);
- old_offset &= ~QCOW_OFLAG_COPIED;
-
- if (old_offset == 0) {
+ if ((old_offset & L2E_OFFSET_MASK) == 0) {
continue;
}
return 0;
}
+
+/*
+ * This zeroes as many clusters of nb_clusters as possible at once (i.e.
+ * all clusters in the same L2 table) and returns the number of zeroed
+ * clusters.
+ */
+static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
+ unsigned int nb_clusters)
+{
+ BDRVQcowState *s = bs->opaque;
+ uint64_t *l2_table;
+ int l2_index;
+ int ret;
+ int i;
+
+ ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Limit nb_clusters to one L2 table */
+ nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
+
+ for (i = 0; i < nb_clusters; i++) {
+ uint64_t old_offset;
+
+ old_offset = be64_to_cpu(l2_table[l2_index + i]);
+
+ /* Update L2 entries */
+ qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ if (old_offset & QCOW_OFLAG_COMPRESSED) {
+ l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
+ qcow2_free_any_clusters(bs, old_offset, 1);
+ } else {
+ l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
+ }
+ }
+
+ ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return nb_clusters;
+}
+
+int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
+{
+ BDRVQcowState *s = bs->opaque;
+ unsigned int nb_clusters;
+ int ret;
+
+ /* The zero flag is only supported by version 3 and newer */
+ if (s->qcow_version < 3) {
+ return -ENOTSUP;
+ }
+
+ /* Each L2 table is handled by its own loop iteration */
+ nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
+
+ while (nb_clusters > 0) {
+ ret = zero_single_l2(bs, offset, nb_clusters);
+ if (ret < 0) {
+ return ret;
+ }
+
+ nb_clusters -= ret;
+ offset += (ret * s->cluster_size);
+ }
+
+ return 0;
+}