2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
28 #include "qemu-common.h"
29 #include "block/block_int.h"
30 #include "block/qcow2.h"
31 #include "qemu/bswap.h"
34 int qcow2_shrink_l1_table(BlockDriverState
*bs
, uint64_t exact_size
)
36 BDRVQcow2State
*s
= bs
->opaque
;
37 int new_l1_size
, i
, ret
;
39 if (exact_size
>= s
->l1_size
) {
43 new_l1_size
= exact_size
;
46 fprintf(stderr
, "shrink l1_table from %d to %d\n", s
->l1_size
, new_l1_size
);
49 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_SHRINK_WRITE_TABLE
);
50 ret
= bdrv_pwrite_zeroes(bs
->file
, s
->l1_table_offset
+
51 new_l1_size
* sizeof(uint64_t),
52 (s
->l1_size
- new_l1_size
) * sizeof(uint64_t), 0);
57 ret
= bdrv_flush(bs
->file
->bs
);
62 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS
);
63 for (i
= s
->l1_size
- 1; i
> new_l1_size
- 1; i
--) {
64 if ((s
->l1_table
[i
] & L1E_OFFSET_MASK
) == 0) {
67 qcow2_free_clusters(bs
, s
->l1_table
[i
] & L1E_OFFSET_MASK
,
68 s
->cluster_size
, QCOW2_DISCARD_ALWAYS
);
75 * If the write in the l1_table failed the image may contain a partially
76 * overwritten l1_table. In this case it would be better to clear the
77 * l1_table in memory to avoid possible image corruption.
79 memset(s
->l1_table
+ new_l1_size
, 0,
80 (s
->l1_size
- new_l1_size
) * sizeof(uint64_t));
84 int qcow2_grow_l1_table(BlockDriverState
*bs
, uint64_t min_size
,
87 BDRVQcow2State
*s
= bs
->opaque
;
88 int new_l1_size2
, ret
, i
;
89 uint64_t *new_l1_table
;
90 int64_t old_l1_table_offset
, old_l1_size
;
91 int64_t new_l1_table_offset
, new_l1_size
;
94 if (min_size
<= s
->l1_size
)
97 /* Do a sanity check on min_size before trying to calculate new_l1_size
98 * (this prevents overflows during the while loop for the calculation of
100 if (min_size
> INT_MAX
/ sizeof(uint64_t)) {
105 new_l1_size
= min_size
;
107 /* Bump size up to reduce the number of times we have to grow */
108 new_l1_size
= s
->l1_size
;
109 if (new_l1_size
== 0) {
112 while (min_size
> new_l1_size
) {
113 new_l1_size
= DIV_ROUND_UP(new_l1_size
* 3, 2);
117 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE
> INT_MAX
);
118 if (new_l1_size
> QCOW_MAX_L1_SIZE
/ sizeof(uint64_t)) {
123 fprintf(stderr
, "grow l1_table from %d to %" PRId64
"\n",
124 s
->l1_size
, new_l1_size
);
127 new_l1_size2
= sizeof(uint64_t) * new_l1_size
;
128 new_l1_table
= qemu_try_blockalign(bs
->file
->bs
,
129 align_offset(new_l1_size2
, 512));
130 if (new_l1_table
== NULL
) {
133 memset(new_l1_table
, 0, align_offset(new_l1_size2
, 512));
136 memcpy(new_l1_table
, s
->l1_table
, s
->l1_size
* sizeof(uint64_t));
139 /* write new table (align to cluster) */
140 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ALLOC_TABLE
);
141 new_l1_table_offset
= qcow2_alloc_clusters(bs
, new_l1_size2
);
142 if (new_l1_table_offset
< 0) {
143 qemu_vfree(new_l1_table
);
144 return new_l1_table_offset
;
147 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
152 /* the L1 position has not yet been updated, so these clusters must
153 * indeed be completely free */
154 ret
= qcow2_pre_write_overlap_check(bs
, 0, new_l1_table_offset
,
160 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_WRITE_TABLE
);
161 for(i
= 0; i
< s
->l1_size
; i
++)
162 new_l1_table
[i
] = cpu_to_be64(new_l1_table
[i
]);
163 ret
= bdrv_pwrite_sync(bs
->file
, new_l1_table_offset
,
164 new_l1_table
, new_l1_size2
);
167 for(i
= 0; i
< s
->l1_size
; i
++)
168 new_l1_table
[i
] = be64_to_cpu(new_l1_table
[i
]);
171 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ACTIVATE_TABLE
);
172 stl_be_p(data
, new_l1_size
);
173 stq_be_p(data
+ 4, new_l1_table_offset
);
174 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, l1_size
),
179 qemu_vfree(s
->l1_table
);
180 old_l1_table_offset
= s
->l1_table_offset
;
181 s
->l1_table_offset
= new_l1_table_offset
;
182 s
->l1_table
= new_l1_table
;
183 old_l1_size
= s
->l1_size
;
184 s
->l1_size
= new_l1_size
;
185 qcow2_free_clusters(bs
, old_l1_table_offset
, old_l1_size
* sizeof(uint64_t),
186 QCOW2_DISCARD_OTHER
);
189 qemu_vfree(new_l1_table
);
190 qcow2_free_clusters(bs
, new_l1_table_offset
, new_l1_size2
,
191 QCOW2_DISCARD_OTHER
);
198 * @bs: The BlockDriverState
199 * @offset: A guest offset, used to calculate what slice of the L2
201 * @l2_offset: Offset to the L2 table in the image file.
202 * @l2_slice: Location to store the pointer to the L2 slice.
204 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
205 * that are loaded by the qcow2 cache). If the slice is in the cache,
206 * the cache is used; otherwise the L2 slice is loaded from the image
209 static int l2_load(BlockDriverState
*bs
, uint64_t offset
,
210 uint64_t l2_offset
, uint64_t **l2_slice
)
212 BDRVQcow2State
*s
= bs
->opaque
;
213 int start_of_slice
= sizeof(uint64_t) *
214 (offset_to_l2_index(s
, offset
) - offset_to_l2_slice_index(s
, offset
));
216 return qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
+ start_of_slice
,
221 * Writes one sector of the L1 table to the disk (can't update single entries
222 * and we really don't want bdrv_pread to perform a read-modify-write)
224 #define L1_ENTRIES_PER_SECTOR (512 / 8)
225 int qcow2_write_l1_entry(BlockDriverState
*bs
, int l1_index
)
227 BDRVQcow2State
*s
= bs
->opaque
;
228 uint64_t buf
[L1_ENTRIES_PER_SECTOR
] = { 0 };
232 l1_start_index
= l1_index
& ~(L1_ENTRIES_PER_SECTOR
- 1);
233 for (i
= 0; i
< L1_ENTRIES_PER_SECTOR
&& l1_start_index
+ i
< s
->l1_size
;
236 buf
[i
] = cpu_to_be64(s
->l1_table
[l1_start_index
+ i
]);
239 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_ACTIVE_L1
,
240 s
->l1_table_offset
+ 8 * l1_start_index
, sizeof(buf
));
245 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
246 ret
= bdrv_pwrite_sync(bs
->file
,
247 s
->l1_table_offset
+ 8 * l1_start_index
,
259 * Allocate a new l2 entry in the file. If l1_index points to an already
260 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
261 * table) copy the contents of the old L2 table into the newly allocated one.
262 * Otherwise the new table is initialized with zeros.
266 static int l2_allocate(BlockDriverState
*bs
, int l1_index
, uint64_t **table
)
268 BDRVQcow2State
*s
= bs
->opaque
;
269 uint64_t old_l2_offset
;
270 uint64_t *l2_table
= NULL
;
274 old_l2_offset
= s
->l1_table
[l1_index
];
276 trace_qcow2_l2_allocate(bs
, l1_index
);
278 /* allocate a new l2 entry */
280 l2_offset
= qcow2_alloc_clusters(bs
, s
->l2_size
* sizeof(uint64_t));
286 /* If we're allocating the table at offset 0 then something is wrong */
287 if (l2_offset
== 0) {
288 qcow2_signal_corruption(bs
, true, -1, -1, "Preventing invalid "
289 "allocation of L2 table at offset 0");
294 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
299 /* allocate a new entry in the l2 cache */
301 trace_qcow2_l2_allocate_get_empty(bs
, l1_index
);
302 ret
= qcow2_cache_get_empty(bs
, s
->l2_table_cache
, l2_offset
, (void**) table
);
309 if ((old_l2_offset
& L1E_OFFSET_MASK
) == 0) {
310 /* if there was no old l2 table, clear the new table */
311 memset(l2_table
, 0, s
->l2_size
* sizeof(uint64_t));
315 /* if there was an old l2 table, read it from the disk */
316 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_COW_READ
);
317 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
,
318 old_l2_offset
& L1E_OFFSET_MASK
,
319 (void**) &old_table
);
324 memcpy(l2_table
, old_table
, s
->cluster_size
);
326 qcow2_cache_put(s
->l2_table_cache
, (void **) &old_table
);
329 /* write the l2 table to the file */
330 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_WRITE
);
332 trace_qcow2_l2_allocate_write_l2(bs
, l1_index
);
333 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
334 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
339 /* update the L1 entry */
340 trace_qcow2_l2_allocate_write_l1(bs
, l1_index
);
341 s
->l1_table
[l1_index
] = l2_offset
| QCOW_OFLAG_COPIED
;
342 ret
= qcow2_write_l1_entry(bs
, l1_index
);
348 trace_qcow2_l2_allocate_done(bs
, l1_index
, 0);
352 trace_qcow2_l2_allocate_done(bs
, l1_index
, ret
);
353 if (l2_table
!= NULL
) {
354 qcow2_cache_put(s
->l2_table_cache
, (void **) table
);
356 s
->l1_table
[l1_index
] = old_l2_offset
;
358 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
359 QCOW2_DISCARD_ALWAYS
);
365 * Checks how many clusters in a given L2 table are contiguous in the image
366 * file. As soon as one of the flags in the bitmask stop_flags changes compared
367 * to the first cluster, the search is stopped and the cluster is not counted
368 * as contiguous. (This allows it, for example, to stop at the first compressed
369 * cluster which may require a different handling)
371 static int count_contiguous_clusters(int nb_clusters
, int cluster_size
,
372 uint64_t *l2_table
, uint64_t stop_flags
)
375 QCow2ClusterType first_cluster_type
;
376 uint64_t mask
= stop_flags
| L2E_OFFSET_MASK
| QCOW_OFLAG_COMPRESSED
;
377 uint64_t first_entry
= be64_to_cpu(l2_table
[0]);
378 uint64_t offset
= first_entry
& mask
;
384 /* must be allocated */
385 first_cluster_type
= qcow2_get_cluster_type(first_entry
);
386 assert(first_cluster_type
== QCOW2_CLUSTER_NORMAL
||
387 first_cluster_type
== QCOW2_CLUSTER_ZERO_ALLOC
);
389 for (i
= 0; i
< nb_clusters
; i
++) {
390 uint64_t l2_entry
= be64_to_cpu(l2_table
[i
]) & mask
;
391 if (offset
+ (uint64_t) i
* cluster_size
!= l2_entry
) {
400 * Checks how many consecutive unallocated clusters in a given L2
401 * table have the same cluster type.
403 static int count_contiguous_clusters_unallocated(int nb_clusters
,
405 QCow2ClusterType wanted_type
)
409 assert(wanted_type
== QCOW2_CLUSTER_ZERO_PLAIN
||
410 wanted_type
== QCOW2_CLUSTER_UNALLOCATED
);
411 for (i
= 0; i
< nb_clusters
; i
++) {
412 uint64_t entry
= be64_to_cpu(l2_table
[i
]);
413 QCow2ClusterType type
= qcow2_get_cluster_type(entry
);
415 if (type
!= wanted_type
) {
423 static int coroutine_fn
do_perform_cow_read(BlockDriverState
*bs
,
424 uint64_t src_cluster_offset
,
425 unsigned offset_in_cluster
,
430 if (qiov
->size
== 0) {
434 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_READ
);
440 /* Call .bdrv_co_readv() directly instead of using the public block-layer
441 * interface. This avoids double I/O throttling and request tracking,
442 * which can lead to deadlock when block layer copy-on-read is enabled.
444 ret
= bs
->drv
->bdrv_co_preadv(bs
, src_cluster_offset
+ offset_in_cluster
,
445 qiov
->size
, qiov
, 0);
453 static bool coroutine_fn
do_perform_cow_encrypt(BlockDriverState
*bs
,
454 uint64_t src_cluster_offset
,
455 uint64_t cluster_offset
,
456 unsigned offset_in_cluster
,
460 if (bytes
&& bs
->encrypted
) {
461 BDRVQcow2State
*s
= bs
->opaque
;
462 int64_t offset
= (s
->crypt_physical_offset
?
463 (cluster_offset
+ offset_in_cluster
) :
464 (src_cluster_offset
+ offset_in_cluster
));
465 assert((offset_in_cluster
& ~BDRV_SECTOR_MASK
) == 0);
466 assert((bytes
& ~BDRV_SECTOR_MASK
) == 0);
468 if (qcrypto_block_encrypt(s
->crypto
, offset
, buffer
, bytes
, NULL
) < 0) {
475 static int coroutine_fn
do_perform_cow_write(BlockDriverState
*bs
,
476 uint64_t cluster_offset
,
477 unsigned offset_in_cluster
,
482 if (qiov
->size
== 0) {
486 ret
= qcow2_pre_write_overlap_check(bs
, 0,
487 cluster_offset
+ offset_in_cluster
, qiov
->size
);
492 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_WRITE
);
493 ret
= bdrv_co_pwritev(bs
->file
, cluster_offset
+ offset_in_cluster
,
494 qiov
->size
, qiov
, 0);
506 * For a given offset of the virtual disk, find the cluster type and offset in
507 * the qcow2 file. The offset is stored in *cluster_offset.
509 * On entry, *bytes is the maximum number of contiguous bytes starting at
510 * offset that we are interested in.
512 * On exit, *bytes is the number of bytes starting at offset that have the same
513 * cluster type and (if applicable) are stored contiguously in the image file.
514 * Compressed clusters are always returned one by one.
516 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
519 int qcow2_get_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
520 unsigned int *bytes
, uint64_t *cluster_offset
)
522 BDRVQcow2State
*s
= bs
->opaque
;
523 unsigned int l2_index
;
524 uint64_t l1_index
, l2_offset
, *l2_table
;
526 unsigned int offset_in_cluster
;
527 uint64_t bytes_available
, bytes_needed
, nb_clusters
;
528 QCow2ClusterType type
;
531 offset_in_cluster
= offset_into_cluster(s
, offset
);
532 bytes_needed
= (uint64_t) *bytes
+ offset_in_cluster
;
534 l1_bits
= s
->l2_bits
+ s
->cluster_bits
;
536 /* compute how many bytes there are between the start of the cluster
537 * containing offset and the end of the l1 entry */
538 bytes_available
= (1ULL << l1_bits
) - (offset
& ((1ULL << l1_bits
) - 1))
541 if (bytes_needed
> bytes_available
) {
542 bytes_needed
= bytes_available
;
547 /* seek to the l2 offset in the l1 table */
549 l1_index
= offset_to_l1_index(s
, offset
);
550 if (l1_index
>= s
->l1_size
) {
551 type
= QCOW2_CLUSTER_UNALLOCATED
;
555 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
557 type
= QCOW2_CLUSTER_UNALLOCATED
;
561 if (offset_into_cluster(s
, l2_offset
)) {
562 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
563 " unaligned (L1 index: %#" PRIx64
")",
564 l2_offset
, l1_index
);
568 /* load the l2 table in memory */
570 ret
= l2_load(bs
, offset
, l2_offset
, &l2_table
);
575 /* find the cluster offset for the given disk offset */
577 l2_index
= offset_to_l2_index(s
, offset
);
578 *cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
580 nb_clusters
= size_to_clusters(s
, bytes_needed
);
581 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
582 * integers; the minimum cluster size is 512, so this assertion is always
584 assert(nb_clusters
<= INT_MAX
);
586 type
= qcow2_get_cluster_type(*cluster_offset
);
587 if (s
->qcow_version
< 3 && (type
== QCOW2_CLUSTER_ZERO_PLAIN
||
588 type
== QCOW2_CLUSTER_ZERO_ALLOC
)) {
589 qcow2_signal_corruption(bs
, true, -1, -1, "Zero cluster entry found"
590 " in pre-v3 image (L2 offset: %#" PRIx64
591 ", L2 index: %#x)", l2_offset
, l2_index
);
596 case QCOW2_CLUSTER_COMPRESSED
:
597 /* Compressed clusters can only be processed one by one */
599 *cluster_offset
&= L2E_COMPRESSED_OFFSET_SIZE_MASK
;
601 case QCOW2_CLUSTER_ZERO_PLAIN
:
602 case QCOW2_CLUSTER_UNALLOCATED
:
603 /* how many empty clusters ? */
604 c
= count_contiguous_clusters_unallocated(nb_clusters
,
605 &l2_table
[l2_index
], type
);
608 case QCOW2_CLUSTER_ZERO_ALLOC
:
609 case QCOW2_CLUSTER_NORMAL
:
610 /* how many allocated clusters ? */
611 c
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
612 &l2_table
[l2_index
], QCOW_OFLAG_ZERO
);
613 *cluster_offset
&= L2E_OFFSET_MASK
;
614 if (offset_into_cluster(s
, *cluster_offset
)) {
615 qcow2_signal_corruption(bs
, true, -1, -1,
616 "Cluster allocation offset %#"
617 PRIx64
" unaligned (L2 offset: %#" PRIx64
618 ", L2 index: %#x)", *cluster_offset
,
619 l2_offset
, l2_index
);
628 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
630 bytes_available
= (int64_t)c
* s
->cluster_size
;
633 if (bytes_available
> bytes_needed
) {
634 bytes_available
= bytes_needed
;
637 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
638 * subtracting offset_in_cluster will therefore definitely yield something
639 * not exceeding UINT_MAX */
640 assert(bytes_available
- offset_in_cluster
<= UINT_MAX
);
641 *bytes
= bytes_available
- offset_in_cluster
;
646 qcow2_cache_put(s
->l2_table_cache
, (void **)&l2_table
);
653 * for a given disk offset, load (and allocate if needed)
656 * the cluster index in the l2 table is given to the caller.
658 * Returns 0 on success, -errno in failure case
660 static int get_cluster_table(BlockDriverState
*bs
, uint64_t offset
,
661 uint64_t **new_l2_table
,
664 BDRVQcow2State
*s
= bs
->opaque
;
665 unsigned int l2_index
;
666 uint64_t l1_index
, l2_offset
;
667 uint64_t *l2_table
= NULL
;
670 /* seek to the l2 offset in the l1 table */
672 l1_index
= offset_to_l1_index(s
, offset
);
673 if (l1_index
>= s
->l1_size
) {
674 ret
= qcow2_grow_l1_table(bs
, l1_index
+ 1, false);
680 assert(l1_index
< s
->l1_size
);
681 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
682 if (offset_into_cluster(s
, l2_offset
)) {
683 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#" PRIx64
684 " unaligned (L1 index: %#" PRIx64
")",
685 l2_offset
, l1_index
);
689 /* seek the l2 table of the given l2 offset */
691 if (s
->l1_table
[l1_index
] & QCOW_OFLAG_COPIED
) {
692 /* load the l2 table in memory */
693 ret
= l2_load(bs
, offset
, l2_offset
, &l2_table
);
698 /* First allocate a new L2 table (and do COW if needed) */
699 ret
= l2_allocate(bs
, l1_index
, &l2_table
);
704 /* Then decrease the refcount of the old table */
706 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
707 QCOW2_DISCARD_OTHER
);
711 /* find the cluster offset for the given disk offset */
713 l2_index
= offset_to_l2_index(s
, offset
);
715 *new_l2_table
= l2_table
;
716 *new_l2_index
= l2_index
;
722 * alloc_compressed_cluster_offset
724 * For a given offset of the disk image, return cluster offset in
727 * If the offset is not found, allocate a new compressed cluster.
729 * Return the cluster offset if successful,
730 * Return 0, otherwise.
734 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState
*bs
,
738 BDRVQcow2State
*s
= bs
->opaque
;
741 int64_t cluster_offset
;
744 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
749 /* Compression can't overwrite anything. Fail if the cluster was already
751 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
752 if (cluster_offset
& L2E_OFFSET_MASK
) {
753 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
757 cluster_offset
= qcow2_alloc_bytes(bs
, compressed_size
);
758 if (cluster_offset
< 0) {
759 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
763 nb_csectors
= ((cluster_offset
+ compressed_size
- 1) >> 9) -
764 (cluster_offset
>> 9);
766 cluster_offset
|= QCOW_OFLAG_COMPRESSED
|
767 ((uint64_t)nb_csectors
<< s
->csize_shift
);
769 /* update L2 table */
771 /* compressed clusters never have the copied flag */
773 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE_COMPRESSED
);
774 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
775 l2_table
[l2_index
] = cpu_to_be64(cluster_offset
);
776 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
778 return cluster_offset
;
781 static int perform_cow(BlockDriverState
*bs
, QCowL2Meta
*m
)
783 BDRVQcow2State
*s
= bs
->opaque
;
784 Qcow2COWRegion
*start
= &m
->cow_start
;
785 Qcow2COWRegion
*end
= &m
->cow_end
;
786 unsigned buffer_size
;
787 unsigned data_bytes
= end
->offset
- (start
->offset
+ start
->nb_bytes
);
789 uint8_t *start_buffer
, *end_buffer
;
793 assert(start
->nb_bytes
<= UINT_MAX
- end
->nb_bytes
);
794 assert(start
->nb_bytes
+ end
->nb_bytes
<= UINT_MAX
- data_bytes
);
795 assert(start
->offset
+ start
->nb_bytes
<= end
->offset
);
796 assert(!m
->data_qiov
|| m
->data_qiov
->size
== data_bytes
);
798 if (start
->nb_bytes
== 0 && end
->nb_bytes
== 0) {
802 /* If we have to read both the start and end COW regions and the
803 * middle region is not too large then perform just one read
805 merge_reads
= start
->nb_bytes
&& end
->nb_bytes
&& data_bytes
<= 16384;
807 buffer_size
= start
->nb_bytes
+ data_bytes
+ end
->nb_bytes
;
809 /* If we have to do two reads, add some padding in the middle
810 * if necessary to make sure that the end region is optimally
812 size_t align
= bdrv_opt_mem_align(bs
);
813 assert(align
> 0 && align
<= UINT_MAX
);
814 assert(QEMU_ALIGN_UP(start
->nb_bytes
, align
) <=
815 UINT_MAX
- end
->nb_bytes
);
816 buffer_size
= QEMU_ALIGN_UP(start
->nb_bytes
, align
) + end
->nb_bytes
;
819 /* Reserve a buffer large enough to store all the data that we're
821 start_buffer
= qemu_try_blockalign(bs
, buffer_size
);
822 if (start_buffer
== NULL
) {
825 /* The part of the buffer where the end region is located */
826 end_buffer
= start_buffer
+ buffer_size
- end
->nb_bytes
;
828 qemu_iovec_init(&qiov
, 2 + (m
->data_qiov
? m
->data_qiov
->niov
: 0));
830 qemu_co_mutex_unlock(&s
->lock
);
831 /* First we read the existing data from both COW regions. We
832 * either read the whole region in one go, or the start and end
833 * regions separately. */
835 qemu_iovec_add(&qiov
, start_buffer
, buffer_size
);
836 ret
= do_perform_cow_read(bs
, m
->offset
, start
->offset
, &qiov
);
838 qemu_iovec_add(&qiov
, start_buffer
, start
->nb_bytes
);
839 ret
= do_perform_cow_read(bs
, m
->offset
, start
->offset
, &qiov
);
844 qemu_iovec_reset(&qiov
);
845 qemu_iovec_add(&qiov
, end_buffer
, end
->nb_bytes
);
846 ret
= do_perform_cow_read(bs
, m
->offset
, end
->offset
, &qiov
);
852 /* Encrypt the data if necessary before writing it */
854 if (!do_perform_cow_encrypt(bs
, m
->offset
, m
->alloc_offset
,
855 start
->offset
, start_buffer
,
857 !do_perform_cow_encrypt(bs
, m
->offset
, m
->alloc_offset
,
858 end
->offset
, end_buffer
, end
->nb_bytes
)) {
864 /* And now we can write everything. If we have the guest data we
865 * can write everything in one single operation */
867 qemu_iovec_reset(&qiov
);
868 if (start
->nb_bytes
) {
869 qemu_iovec_add(&qiov
, start_buffer
, start
->nb_bytes
);
871 qemu_iovec_concat(&qiov
, m
->data_qiov
, 0, data_bytes
);
873 qemu_iovec_add(&qiov
, end_buffer
, end
->nb_bytes
);
875 /* NOTE: we have a write_aio blkdebug event here followed by
876 * a cow_write one in do_perform_cow_write(), but there's only
877 * one single I/O operation */
878 BLKDBG_EVENT(bs
->file
, BLKDBG_WRITE_AIO
);
879 ret
= do_perform_cow_write(bs
, m
->alloc_offset
, start
->offset
, &qiov
);
881 /* If there's no guest data then write both COW regions separately */
882 qemu_iovec_reset(&qiov
);
883 qemu_iovec_add(&qiov
, start_buffer
, start
->nb_bytes
);
884 ret
= do_perform_cow_write(bs
, m
->alloc_offset
, start
->offset
, &qiov
);
889 qemu_iovec_reset(&qiov
);
890 qemu_iovec_add(&qiov
, end_buffer
, end
->nb_bytes
);
891 ret
= do_perform_cow_write(bs
, m
->alloc_offset
, end
->offset
, &qiov
);
895 qemu_co_mutex_lock(&s
->lock
);
898 * Before we update the L2 table to actually point to the new cluster, we
899 * need to be sure that the refcounts have been increased and COW was
903 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
906 qemu_vfree(start_buffer
);
907 qemu_iovec_destroy(&qiov
);
911 int qcow2_alloc_cluster_link_l2(BlockDriverState
*bs
, QCowL2Meta
*m
)
913 BDRVQcow2State
*s
= bs
->opaque
;
914 int i
, j
= 0, l2_index
, ret
;
915 uint64_t *old_cluster
, *l2_table
;
916 uint64_t cluster_offset
= m
->alloc_offset
;
918 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m
->nb_clusters
);
919 assert(m
->nb_clusters
> 0);
921 old_cluster
= g_try_new(uint64_t, m
->nb_clusters
);
922 if (old_cluster
== NULL
) {
927 /* copy content of unmodified sectors */
928 ret
= perform_cow(bs
, m
);
933 /* Update L2 table. */
934 if (s
->use_lazy_refcounts
) {
935 qcow2_mark_dirty(bs
);
937 if (qcow2_need_accurate_refcounts(s
)) {
938 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
,
939 s
->refcount_block_cache
);
942 ret
= get_cluster_table(bs
, m
->offset
, &l2_table
, &l2_index
);
946 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
948 assert(l2_index
+ m
->nb_clusters
<= s
->l2_size
);
949 for (i
= 0; i
< m
->nb_clusters
; i
++) {
950 /* if two concurrent writes happen to the same unallocated cluster
951 * each write allocates separate cluster and writes data concurrently.
952 * The first one to complete updates l2 table with pointer to its
953 * cluster the second one has to do RMW (which is done above by
954 * perform_cow()), update l2 table with its cluster pointer and free
955 * old cluster. This is what this loop does */
956 if (l2_table
[l2_index
+ i
] != 0) {
957 old_cluster
[j
++] = l2_table
[l2_index
+ i
];
960 l2_table
[l2_index
+ i
] = cpu_to_be64((cluster_offset
+
961 (i
<< s
->cluster_bits
)) | QCOW_OFLAG_COPIED
);
965 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
968 * If this was a COW, we need to decrease the refcount of the old cluster.
970 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
971 * clusters), the next write will reuse them anyway.
973 if (!m
->keep_old_clusters
&& j
!= 0) {
974 for (i
= 0; i
< j
; i
++) {
975 qcow2_free_any_clusters(bs
, be64_to_cpu(old_cluster
[i
]), 1,
976 QCOW2_DISCARD_NEVER
);
987 * Returns the number of contiguous clusters that can be used for an allocating
988 * write, but require COW to be performed (this includes yet unallocated space,
989 * which must copy from the backing file)
991 static int count_cow_clusters(BDRVQcow2State
*s
, int nb_clusters
,
992 uint64_t *l2_table
, int l2_index
)
996 for (i
= 0; i
< nb_clusters
; i
++) {
997 uint64_t l2_entry
= be64_to_cpu(l2_table
[l2_index
+ i
]);
998 QCow2ClusterType cluster_type
= qcow2_get_cluster_type(l2_entry
);
1000 switch(cluster_type
) {
1001 case QCOW2_CLUSTER_NORMAL
:
1002 if (l2_entry
& QCOW_OFLAG_COPIED
) {
1006 case QCOW2_CLUSTER_UNALLOCATED
:
1007 case QCOW2_CLUSTER_COMPRESSED
:
1008 case QCOW2_CLUSTER_ZERO_PLAIN
:
1009 case QCOW2_CLUSTER_ZERO_ALLOC
:
1017 assert(i
<= nb_clusters
);
1022 * Check if there already is an AIO write request in flight which allocates
1023 * the same cluster. In this case we need to wait until the previous
1024 * request has completed and updated the L2 table accordingly.
1027 * 0 if there was no dependency. *cur_bytes indicates the number of
1028 * bytes from guest_offset that can be read before the next
1029 * dependency must be processed (or the request is complete)
1031 * -EAGAIN if we had to wait for another request, previously gathered
1032 * information on cluster allocation may be invalid now. The caller
1033 * must start over anyway, so consider *cur_bytes undefined.
1035 static int handle_dependencies(BlockDriverState
*bs
, uint64_t guest_offset
,
1036 uint64_t *cur_bytes
, QCowL2Meta
**m
)
1038 BDRVQcow2State
*s
= bs
->opaque
;
1039 QCowL2Meta
*old_alloc
;
1040 uint64_t bytes
= *cur_bytes
;
1042 QLIST_FOREACH(old_alloc
, &s
->cluster_allocs
, next_in_flight
) {
1044 uint64_t start
= guest_offset
;
1045 uint64_t end
= start
+ bytes
;
1046 uint64_t old_start
= l2meta_cow_start(old_alloc
);
1047 uint64_t old_end
= l2meta_cow_end(old_alloc
);
1049 if (end
<= old_start
|| start
>= old_end
) {
1050 /* No intersection */
1052 if (start
< old_start
) {
1053 /* Stop at the start of a running allocation */
1054 bytes
= old_start
- start
;
1059 /* Stop if already an l2meta exists. After yielding, it wouldn't
1060 * be valid any more, so we'd have to clean up the old L2Metas
1061 * and deal with requests depending on them before starting to
1062 * gather new ones. Not worth the trouble. */
1063 if (bytes
== 0 && *m
) {
1069 /* Wait for the dependency to complete. We need to recheck
1070 * the free/allocated clusters when we continue. */
1071 qemu_co_queue_wait(&old_alloc
->dependent_requests
, &s
->lock
);
1077 /* Make sure that existing clusters and new allocations are only used up to
1078 * the next dependency if we shortened the request above */
1085 * Checks how many already allocated clusters that don't require a copy on
1086 * write there are at the given guest_offset (up to *bytes). If
1087 * *host_offset is not zero, only physically contiguous clusters beginning at
1088 * this host offset are counted.
1090 * Note that guest_offset may not be cluster aligned. In this case, the
1091 * returned *host_offset points to exact byte referenced by guest_offset and
1092 * therefore isn't cluster aligned as well.
1095 * 0: if no allocated clusters are available at the given offset.
1096 * *bytes is normally unchanged. It is set to 0 if the cluster
1097 * is allocated and doesn't need COW, but doesn't have the right
1100 * 1: if allocated clusters that don't require a COW are available at
1101 * the requested offset. *bytes may have decreased and describes
1102 * the length of the area that can be written to.
1104 * -errno: in error cases
1106 static int handle_copied(BlockDriverState
*bs
, uint64_t guest_offset
,
1107 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
1109 BDRVQcow2State
*s
= bs
->opaque
;
1111 uint64_t cluster_offset
;
1113 uint64_t nb_clusters
;
1114 unsigned int keep_clusters
;
1117 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset
, *host_offset
,
1120 assert(*host_offset
== 0 || offset_into_cluster(s
, guest_offset
)
1121 == offset_into_cluster(s
, *host_offset
));
1124 * Calculate the number of clusters to look for. We stop at L2 table
1125 * boundaries to keep things simple.
1128 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
1130 l2_index
= offset_to_l2_index(s
, guest_offset
);
1131 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1132 assert(nb_clusters
<= INT_MAX
);
1134 /* Find L2 entry for the first involved cluster */
1135 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
1140 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
1142 /* Check how many clusters are already allocated and don't need COW */
1143 if (qcow2_get_cluster_type(cluster_offset
) == QCOW2_CLUSTER_NORMAL
1144 && (cluster_offset
& QCOW_OFLAG_COPIED
))
1146 /* If a specific host_offset is required, check it */
1147 bool offset_matches
=
1148 (cluster_offset
& L2E_OFFSET_MASK
) == *host_offset
;
1150 if (offset_into_cluster(s
, cluster_offset
& L2E_OFFSET_MASK
)) {
1151 qcow2_signal_corruption(bs
, true, -1, -1, "Data cluster offset "
1152 "%#llx unaligned (guest offset: %#" PRIx64
1153 ")", cluster_offset
& L2E_OFFSET_MASK
,
1159 if (*host_offset
!= 0 && !offset_matches
) {
1165 /* We keep all QCOW_OFLAG_COPIED clusters */
1167 count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
1168 &l2_table
[l2_index
],
1169 QCOW_OFLAG_COPIED
| QCOW_OFLAG_ZERO
);
1170 assert(keep_clusters
<= nb_clusters
);
1172 *bytes
= MIN(*bytes
,
1173 keep_clusters
* s
->cluster_size
1174 - offset_into_cluster(s
, guest_offset
));
1183 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
1185 /* Only return a host offset if we actually made progress. Otherwise we
1186 * would make requirements for handle_alloc() that it can't fulfill */
1188 *host_offset
= (cluster_offset
& L2E_OFFSET_MASK
)
1189 + offset_into_cluster(s
, guest_offset
);
1196 * Allocates new clusters for the given guest_offset.
1198 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1199 * contain the number of clusters that have been allocated and are contiguous
1200 * in the image file.
1202 * If *host_offset is non-zero, it specifies the offset in the image file at
1203 * which the new clusters must start. *nb_clusters can be 0 on return in this
1204 * case if the cluster at host_offset is already in use. If *host_offset is
1205 * zero, the clusters can be allocated anywhere in the image file.
1207 * *host_offset is updated to contain the offset into the image file at which
1208 * the first allocated cluster starts.
1210 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1211 * function has been waiting for another request and the allocation must be
1212 * restarted, but the whole request should not be failed.
1214 static int do_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t guest_offset
,
1215 uint64_t *host_offset
, uint64_t *nb_clusters
)
1217 BDRVQcow2State
*s
= bs
->opaque
;
1219 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset
,
1220 *host_offset
, *nb_clusters
);
1222 /* Allocate new clusters */
1223 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1224 if (*host_offset
== 0) {
1225 int64_t cluster_offset
=
1226 qcow2_alloc_clusters(bs
, *nb_clusters
* s
->cluster_size
);
1227 if (cluster_offset
< 0) {
1228 return cluster_offset
;
1230 *host_offset
= cluster_offset
;
1233 int64_t ret
= qcow2_alloc_clusters_at(bs
, *host_offset
, *nb_clusters
);
1243 * Allocates new clusters for an area that either is yet unallocated or needs a
1244 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1245 * the new allocation can match the specified host offset.
1247 * Note that guest_offset may not be cluster aligned. In this case, the
1248 * returned *host_offset points to exact byte referenced by guest_offset and
1249 * therefore isn't cluster aligned as well.
1252 * 0: if no clusters could be allocated. *bytes is set to 0,
1253 * *host_offset is left unchanged.
1255 * 1: if new clusters were allocated. *bytes may be decreased if the
1256 * new allocation doesn't cover all of the requested area.
1257 * *host_offset is updated to contain the host offset of the first
1258 * newly allocated cluster.
1260 * -errno: in error cases
1262 static int handle_alloc(BlockDriverState
*bs
, uint64_t guest_offset
,
1263 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
1265 BDRVQcow2State
*s
= bs
->opaque
;
1269 uint64_t nb_clusters
;
1271 bool keep_old_clusters
= false;
1273 uint64_t alloc_cluster_offset
= 0;
1275 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset
, *host_offset
,
1280 * Calculate the number of clusters to look for. We stop at L2 table
1281 * boundaries to keep things simple.
1284 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
1286 l2_index
= offset_to_l2_index(s
, guest_offset
);
1287 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1288 assert(nb_clusters
<= INT_MAX
);
1290 /* Find L2 entry for the first involved cluster */
1291 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
1296 entry
= be64_to_cpu(l2_table
[l2_index
]);
1298 /* For the moment, overwrite compressed clusters one by one */
1299 if (entry
& QCOW_OFLAG_COMPRESSED
) {
1302 nb_clusters
= count_cow_clusters(s
, nb_clusters
, l2_table
, l2_index
);
1305 /* This function is only called when there were no non-COW clusters, so if
1306 * we can't find any unallocated or COW clusters either, something is
1307 * wrong with our code. */
1308 assert(nb_clusters
> 0);
1310 if (qcow2_get_cluster_type(entry
) == QCOW2_CLUSTER_ZERO_ALLOC
&&
1311 (entry
& QCOW_OFLAG_COPIED
) &&
1313 start_of_cluster(s
, *host_offset
) == (entry
& L2E_OFFSET_MASK
)))
1315 int preallocated_nb_clusters
;
1317 if (offset_into_cluster(s
, entry
& L2E_OFFSET_MASK
)) {
1318 qcow2_signal_corruption(bs
, true, -1, -1, "Preallocated zero "
1319 "cluster offset %#llx unaligned (guest "
1320 "offset: %#" PRIx64
")",
1321 entry
& L2E_OFFSET_MASK
, guest_offset
);
1326 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1327 * would be fine, too, but count_cow_clusters() above has limited
1328 * nb_clusters already to a range of COW clusters */
1329 preallocated_nb_clusters
=
1330 count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
1331 &l2_table
[l2_index
], QCOW_OFLAG_COPIED
);
1332 assert(preallocated_nb_clusters
> 0);
1334 nb_clusters
= preallocated_nb_clusters
;
1335 alloc_cluster_offset
= entry
& L2E_OFFSET_MASK
;
1337 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1338 * should not free them. */
1339 keep_old_clusters
= true;
1342 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
1344 if (!alloc_cluster_offset
) {
1345 /* Allocate, if necessary at a given offset in the image file */
1346 alloc_cluster_offset
= start_of_cluster(s
, *host_offset
);
1347 ret
= do_alloc_cluster_offset(bs
, guest_offset
, &alloc_cluster_offset
,
1353 /* Can't extend contiguous allocation */
1354 if (nb_clusters
== 0) {
1359 /* !*host_offset would overwrite the image header and is reserved for
1360 * "no host offset preferred". If 0 was a valid host offset, it'd
1361 * trigger the following overlap check; do that now to avoid having an
1362 * invalid value in *host_offset. */
1363 if (!alloc_cluster_offset
) {
1364 ret
= qcow2_pre_write_overlap_check(bs
, 0, alloc_cluster_offset
,
1365 nb_clusters
* s
->cluster_size
);
1372 * Save info needed for meta data update.
1374 * requested_bytes: Number of bytes from the start of the first
1375 * newly allocated cluster to the end of the (possibly shortened
1376 * before) write request.
1378 * avail_bytes: Number of bytes from the start of the first
1379 * newly allocated to the end of the last newly allocated cluster.
1381 * nb_bytes: The number of bytes from the start of the first
1382 * newly allocated cluster to the end of the area that the write
1383 * request actually writes to (excluding COW at the end)
1385 uint64_t requested_bytes
= *bytes
+ offset_into_cluster(s
, guest_offset
);
1386 int avail_bytes
= MIN(INT_MAX
, nb_clusters
<< s
->cluster_bits
);
1387 int nb_bytes
= MIN(requested_bytes
, avail_bytes
);
1388 QCowL2Meta
*old_m
= *m
;
1390 *m
= g_malloc0(sizeof(**m
));
1392 **m
= (QCowL2Meta
) {
1395 .alloc_offset
= alloc_cluster_offset
,
1396 .offset
= start_of_cluster(s
, guest_offset
),
1397 .nb_clusters
= nb_clusters
,
1399 .keep_old_clusters
= keep_old_clusters
,
1403 .nb_bytes
= offset_into_cluster(s
, guest_offset
),
1407 .nb_bytes
= avail_bytes
- nb_bytes
,
1410 qemu_co_queue_init(&(*m
)->dependent_requests
);
1411 QLIST_INSERT_HEAD(&s
->cluster_allocs
, *m
, next_in_flight
);
1413 *host_offset
= alloc_cluster_offset
+ offset_into_cluster(s
, guest_offset
);
1414 *bytes
= MIN(*bytes
, nb_bytes
- offset_into_cluster(s
, guest_offset
));
1415 assert(*bytes
!= 0);
1420 if (*m
&& (*m
)->nb_clusters
> 0) {
1421 QLIST_REMOVE(*m
, next_in_flight
);
1427 * alloc_cluster_offset
1429 * For a given offset on the virtual disk, find the cluster offset in qcow2
1430 * file. If the offset is not found, allocate a new cluster.
1432 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1433 * other fields in m are meaningless.
1435 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1436 * contiguous clusters that have been allocated. In this case, the other
1437 * fields of m are valid and contain information about the first allocated
1440 * If the request conflicts with another write request in flight, the coroutine
1441 * is queued and will be reentered when the dependency has completed.
1443 * Return 0 on success and -errno in error cases
1445 int qcow2_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
1446 unsigned int *bytes
, uint64_t *host_offset
,
1449 BDRVQcow2State
*s
= bs
->opaque
;
1450 uint64_t start
, remaining
;
1451 uint64_t cluster_offset
;
1455 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset
, *bytes
);
1467 if (!*host_offset
) {
1468 *host_offset
= start_of_cluster(s
, cluster_offset
);
1471 assert(remaining
>= cur_bytes
);
1474 remaining
-= cur_bytes
;
1475 cluster_offset
+= cur_bytes
;
1477 if (remaining
== 0) {
1481 cur_bytes
= remaining
;
1484 * Now start gathering as many contiguous clusters as possible:
1486 * 1. Check for overlaps with in-flight allocations
1488 * a) Overlap not in the first cluster -> shorten this request and
1489 * let the caller handle the rest in its next loop iteration.
1491 * b) Real overlaps of two requests. Yield and restart the search
1492 * for contiguous clusters (the situation could have changed
1493 * while we were sleeping)
1495 * c) TODO: Request starts in the same cluster as the in-flight
1496 * allocation ends. Shorten the COW of the in-fight allocation,
1497 * set cluster_offset to write to the same cluster and set up
1498 * the right synchronisation between the in-flight request and
1501 ret
= handle_dependencies(bs
, start
, &cur_bytes
, m
);
1502 if (ret
== -EAGAIN
) {
1503 /* Currently handle_dependencies() doesn't yield if we already had
1504 * an allocation. If it did, we would have to clean up the L2Meta
1505 * structs before starting over. */
1508 } else if (ret
< 0) {
1510 } else if (cur_bytes
== 0) {
1513 /* handle_dependencies() may have decreased cur_bytes (shortened
1514 * the allocations below) so that the next dependency is processed
1515 * correctly during the next loop iteration. */
1519 * 2. Count contiguous COPIED clusters.
1521 ret
= handle_copied(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1526 } else if (cur_bytes
== 0) {
1531 * 3. If the request still hasn't completed, allocate new clusters,
1532 * considering any cluster_offset of steps 1c or 2.
1534 ret
= handle_alloc(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1540 assert(cur_bytes
== 0);
1545 *bytes
-= remaining
;
1547 assert(*host_offset
!= 0);
1552 static int decompress_buffer(uint8_t *out_buf
, int out_buf_size
,
1553 const uint8_t *buf
, int buf_size
)
1555 z_stream strm1
, *strm
= &strm1
;
1558 memset(strm
, 0, sizeof(*strm
));
1560 strm
->next_in
= (uint8_t *)buf
;
1561 strm
->avail_in
= buf_size
;
1562 strm
->next_out
= out_buf
;
1563 strm
->avail_out
= out_buf_size
;
1565 ret
= inflateInit2(strm
, -12);
1568 ret
= inflate(strm
, Z_FINISH
);
1569 out_len
= strm
->next_out
- out_buf
;
1570 if ((ret
!= Z_STREAM_END
&& ret
!= Z_BUF_ERROR
) ||
1571 out_len
!= out_buf_size
) {
1579 int qcow2_decompress_cluster(BlockDriverState
*bs
, uint64_t cluster_offset
)
1581 BDRVQcow2State
*s
= bs
->opaque
;
1582 int ret
, csize
, nb_csectors
, sector_offset
;
1585 coffset
= cluster_offset
& s
->cluster_offset_mask
;
1586 if (s
->cluster_cache_offset
!= coffset
) {
1587 nb_csectors
= ((cluster_offset
>> s
->csize_shift
) & s
->csize_mask
) + 1;
1588 sector_offset
= coffset
& 511;
1589 csize
= nb_csectors
* 512 - sector_offset
;
1591 /* Allocate buffers on first decompress operation, most images are
1592 * uncompressed and the memory overhead can be avoided. The buffers
1593 * are freed in .bdrv_close().
1595 if (!s
->cluster_data
) {
1596 /* one more sector for decompressed data alignment */
1597 s
->cluster_data
= qemu_try_blockalign(bs
->file
->bs
,
1598 QCOW_MAX_CRYPT_CLUSTERS
* s
->cluster_size
+ 512);
1599 if (!s
->cluster_data
) {
1603 if (!s
->cluster_cache
) {
1604 s
->cluster_cache
= g_malloc(s
->cluster_size
);
1607 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_COMPRESSED
);
1608 ret
= bdrv_read(bs
->file
, coffset
>> 9, s
->cluster_data
,
1613 if (decompress_buffer(s
->cluster_cache
, s
->cluster_size
,
1614 s
->cluster_data
+ sector_offset
, csize
) < 0) {
1617 s
->cluster_cache_offset
= coffset
;
1623 * This discards as many clusters of nb_clusters as possible at once (i.e.
1624 * all clusters in the same L2 table) and returns the number of discarded
1627 static int discard_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1628 uint64_t nb_clusters
, enum qcow2_discard_type type
,
1631 BDRVQcow2State
*s
= bs
->opaque
;
1637 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1642 /* Limit nb_clusters to one L2 table */
1643 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1644 assert(nb_clusters
<= INT_MAX
);
1646 for (i
= 0; i
< nb_clusters
; i
++) {
1647 uint64_t old_l2_entry
;
1649 old_l2_entry
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1652 * If full_discard is false, make sure that a discarded area reads back
1653 * as zeroes for v3 images (we cannot do it for v2 without actually
1654 * writing a zero-filled buffer). We can skip the operation if the
1655 * cluster is already marked as zero, or if it's unallocated and we
1656 * don't have a backing file.
1658 * TODO We might want to use bdrv_block_status(bs) here, but we're
1659 * holding s->lock, so that doesn't work today.
1661 * If full_discard is true, the sector should not read back as zeroes,
1662 * but rather fall through to the backing file.
1664 switch (qcow2_get_cluster_type(old_l2_entry
)) {
1665 case QCOW2_CLUSTER_UNALLOCATED
:
1666 if (full_discard
|| !bs
->backing
) {
1671 case QCOW2_CLUSTER_ZERO_PLAIN
:
1672 if (!full_discard
) {
1677 case QCOW2_CLUSTER_ZERO_ALLOC
:
1678 case QCOW2_CLUSTER_NORMAL
:
1679 case QCOW2_CLUSTER_COMPRESSED
:
1686 /* First remove L2 entries */
1687 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1688 if (!full_discard
&& s
->qcow_version
>= 3) {
1689 l2_table
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1691 l2_table
[l2_index
+ i
] = cpu_to_be64(0);
1694 /* Then decrease the refcount */
1695 qcow2_free_any_clusters(bs
, old_l2_entry
, 1, type
);
1698 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
1703 int qcow2_cluster_discard(BlockDriverState
*bs
, uint64_t offset
,
1704 uint64_t bytes
, enum qcow2_discard_type type
,
1707 BDRVQcow2State
*s
= bs
->opaque
;
1708 uint64_t end_offset
= offset
+ bytes
;
1709 uint64_t nb_clusters
;
1713 /* Caller must pass aligned values, except at image end */
1714 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
1715 assert(QEMU_IS_ALIGNED(end_offset
, s
->cluster_size
) ||
1716 end_offset
== bs
->total_sectors
<< BDRV_SECTOR_BITS
);
1718 nb_clusters
= size_to_clusters(s
, bytes
);
1720 s
->cache_discards
= true;
1722 /* Each L2 table is handled by its own loop iteration */
1723 while (nb_clusters
> 0) {
1724 cleared
= discard_single_l2(bs
, offset
, nb_clusters
, type
,
1731 nb_clusters
-= cleared
;
1732 offset
+= (cleared
* s
->cluster_size
);
1737 s
->cache_discards
= false;
1738 qcow2_process_discards(bs
, ret
);
1744 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1745 * all clusters in the same L2 table) and returns the number of zeroed
1748 static int zero_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1749 uint64_t nb_clusters
, int flags
)
1751 BDRVQcow2State
*s
= bs
->opaque
;
1756 bool unmap
= !!(flags
& BDRV_REQ_MAY_UNMAP
);
1758 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1763 /* Limit nb_clusters to one L2 table */
1764 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1765 assert(nb_clusters
<= INT_MAX
);
1767 for (i
= 0; i
< nb_clusters
; i
++) {
1768 uint64_t old_offset
;
1769 QCow2ClusterType cluster_type
;
1771 old_offset
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1774 * Minimize L2 changes if the cluster already reads back as
1775 * zeroes with correct allocation.
1777 cluster_type
= qcow2_get_cluster_type(old_offset
);
1778 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
||
1779 (cluster_type
== QCOW2_CLUSTER_ZERO_ALLOC
&& !unmap
)) {
1783 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1784 if (cluster_type
== QCOW2_CLUSTER_COMPRESSED
|| unmap
) {
1785 l2_table
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1786 qcow2_free_any_clusters(bs
, old_offset
, 1, QCOW2_DISCARD_REQUEST
);
1788 l2_table
[l2_index
+ i
] |= cpu_to_be64(QCOW_OFLAG_ZERO
);
1792 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
1797 int qcow2_cluster_zeroize(BlockDriverState
*bs
, uint64_t offset
,
1798 uint64_t bytes
, int flags
)
1800 BDRVQcow2State
*s
= bs
->opaque
;
1801 uint64_t end_offset
= offset
+ bytes
;
1802 uint64_t nb_clusters
;
1806 /* Caller must pass aligned values, except at image end */
1807 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
1808 assert(QEMU_IS_ALIGNED(end_offset
, s
->cluster_size
) ||
1809 end_offset
== bs
->total_sectors
<< BDRV_SECTOR_BITS
);
1811 /* The zero flag is only supported by version 3 and newer */
1812 if (s
->qcow_version
< 3) {
1816 /* Each L2 table is handled by its own loop iteration */
1817 nb_clusters
= size_to_clusters(s
, bytes
);
1819 s
->cache_discards
= true;
1821 while (nb_clusters
> 0) {
1822 cleared
= zero_single_l2(bs
, offset
, nb_clusters
, flags
);
1828 nb_clusters
-= cleared
;
1829 offset
+= (cleared
* s
->cluster_size
);
1834 s
->cache_discards
= false;
1835 qcow2_process_discards(bs
, ret
);
1841 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1842 * non-backed non-pre-allocated zero clusters).
1844 * l1_entries and *visited_l1_entries are used to keep track of progress for
1845 * status_cb(). l1_entries contains the total number of L1 entries and
1846 * *visited_l1_entries counts all visited L1 entries.
1848 static int expand_zero_clusters_in_l1(BlockDriverState
*bs
, uint64_t *l1_table
,
1849 int l1_size
, int64_t *visited_l1_entries
,
1851 BlockDriverAmendStatusCB
*status_cb
,
1854 BDRVQcow2State
*s
= bs
->opaque
;
1855 bool is_active_l1
= (l1_table
== s
->l1_table
);
1856 uint64_t *l2_table
= NULL
;
1860 if (!is_active_l1
) {
1861 /* inactive L2 tables require a buffer to be stored in when loading
1863 l2_table
= qemu_try_blockalign(bs
->file
->bs
, s
->cluster_size
);
1864 if (l2_table
== NULL
) {
1869 for (i
= 0; i
< l1_size
; i
++) {
1870 uint64_t l2_offset
= l1_table
[i
] & L1E_OFFSET_MASK
;
1871 bool l2_dirty
= false;
1872 uint64_t l2_refcount
;
1876 (*visited_l1_entries
)++;
1878 status_cb(bs
, *visited_l1_entries
, l1_entries
, cb_opaque
);
1883 if (offset_into_cluster(s
, l2_offset
)) {
1884 qcow2_signal_corruption(bs
, true, -1, -1, "L2 table offset %#"
1885 PRIx64
" unaligned (L1 index: %#x)",
1892 /* get active L2 tables from cache */
1893 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
,
1894 (void **)&l2_table
);
1896 /* load inactive L2 tables from disk */
1897 ret
= bdrv_read(bs
->file
, l2_offset
/ BDRV_SECTOR_SIZE
,
1898 (void *)l2_table
, s
->cluster_sectors
);
1904 ret
= qcow2_get_refcount(bs
, l2_offset
>> s
->cluster_bits
,
1910 for (j
= 0; j
< s
->l2_size
; j
++) {
1911 uint64_t l2_entry
= be64_to_cpu(l2_table
[j
]);
1912 int64_t offset
= l2_entry
& L2E_OFFSET_MASK
;
1913 QCow2ClusterType cluster_type
= qcow2_get_cluster_type(l2_entry
);
1915 if (cluster_type
!= QCOW2_CLUSTER_ZERO_PLAIN
&&
1916 cluster_type
!= QCOW2_CLUSTER_ZERO_ALLOC
) {
1920 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1922 /* not backed; therefore we can simply deallocate the
1929 offset
= qcow2_alloc_clusters(bs
, s
->cluster_size
);
1935 if (l2_refcount
> 1) {
1936 /* For shared L2 tables, set the refcount accordingly (it is
1937 * already 1 and needs to be l2_refcount) */
1938 ret
= qcow2_update_cluster_refcount(bs
,
1939 offset
>> s
->cluster_bits
,
1940 refcount_diff(1, l2_refcount
), false,
1941 QCOW2_DISCARD_OTHER
);
1943 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1944 QCOW2_DISCARD_OTHER
);
1950 if (offset_into_cluster(s
, offset
)) {
1951 qcow2_signal_corruption(bs
, true, -1, -1,
1952 "Cluster allocation offset "
1953 "%#" PRIx64
" unaligned (L2 offset: %#"
1954 PRIx64
", L2 index: %#x)", offset
,
1956 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1957 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1958 QCOW2_DISCARD_ALWAYS
);
1964 ret
= qcow2_pre_write_overlap_check(bs
, 0, offset
, s
->cluster_size
);
1966 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1967 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1968 QCOW2_DISCARD_ALWAYS
);
1973 ret
= bdrv_pwrite_zeroes(bs
->file
, offset
, s
->cluster_size
, 0);
1975 if (cluster_type
== QCOW2_CLUSTER_ZERO_PLAIN
) {
1976 qcow2_free_clusters(bs
, offset
, s
->cluster_size
,
1977 QCOW2_DISCARD_ALWAYS
);
1982 if (l2_refcount
== 1) {
1983 l2_table
[j
] = cpu_to_be64(offset
| QCOW_OFLAG_COPIED
);
1985 l2_table
[j
] = cpu_to_be64(offset
);
1992 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1993 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
1995 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
1998 ret
= qcow2_pre_write_overlap_check(bs
,
1999 QCOW2_OL_INACTIVE_L2
| QCOW2_OL_ACTIVE_L2
, l2_offset
,
2005 ret
= bdrv_write(bs
->file
, l2_offset
/ BDRV_SECTOR_SIZE
,
2006 (void *)l2_table
, s
->cluster_sectors
);
2013 (*visited_l1_entries
)++;
2015 status_cb(bs
, *visited_l1_entries
, l1_entries
, cb_opaque
);
2023 if (!is_active_l1
) {
2024 qemu_vfree(l2_table
);
2026 qcow2_cache_put(s
->l2_table_cache
, (void **) &l2_table
);
2033 * For backed images, expands all zero clusters on the image. For non-backed
2034 * images, deallocates all non-pre-allocated zero clusters (and claims the
2035 * allocation for pre-allocated ones). This is important for downgrading to a
2036 * qcow2 version which doesn't yet support metadata zero clusters.
2038 int qcow2_expand_zero_clusters(BlockDriverState
*bs
,
2039 BlockDriverAmendStatusCB
*status_cb
,
2042 BDRVQcow2State
*s
= bs
->opaque
;
2043 uint64_t *l1_table
= NULL
;
2044 int64_t l1_entries
= 0, visited_l1_entries
= 0;
2049 l1_entries
= s
->l1_size
;
2050 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
2051 l1_entries
+= s
->snapshots
[i
].l1_size
;
2055 ret
= expand_zero_clusters_in_l1(bs
, s
->l1_table
, s
->l1_size
,
2056 &visited_l1_entries
, l1_entries
,
2057 status_cb
, cb_opaque
);
2062 /* Inactive L1 tables may point to active L2 tables - therefore it is
2063 * necessary to flush the L2 table cache before trying to access the L2
2064 * tables pointed to by inactive L1 entries (else we might try to expand
2065 * zero clusters that have already been expanded); furthermore, it is also
2066 * necessary to empty the L2 table cache, since it may contain tables which
2067 * are now going to be modified directly on disk, bypassing the cache.
2068 * qcow2_cache_empty() does both for us. */
2069 ret
= qcow2_cache_empty(bs
, s
->l2_table_cache
);
2074 for (i
= 0; i
< s
->nb_snapshots
; i
++) {
2075 int l1_sectors
= DIV_ROUND_UP(s
->snapshots
[i
].l1_size
*
2076 sizeof(uint64_t), BDRV_SECTOR_SIZE
);
2078 uint64_t *new_l1_table
=
2079 g_try_realloc(l1_table
, l1_sectors
* BDRV_SECTOR_SIZE
);
2081 if (!new_l1_table
) {
2086 l1_table
= new_l1_table
;
2088 ret
= bdrv_read(bs
->file
,
2089 s
->snapshots
[i
].l1_table_offset
/ BDRV_SECTOR_SIZE
,
2090 (void *)l1_table
, l1_sectors
);
2095 for (j
= 0; j
< s
->snapshots
[i
].l1_size
; j
++) {
2096 be64_to_cpus(&l1_table
[j
]);
2099 ret
= expand_zero_clusters_in_l1(bs
, l1_table
, s
->snapshots
[i
].l1_size
,
2100 &visited_l1_entries
, l1_entries
,
2101 status_cb
, cb_opaque
);