2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
32 int qcow2_grow_l1_table(BlockDriverState
*bs
, uint64_t min_size
,
35 BDRVQcowState
*s
= bs
->opaque
;
36 int new_l1_size2
, ret
, i
;
37 uint64_t *new_l1_table
;
38 int64_t new_l1_table_offset
, new_l1_size
;
41 if (min_size
<= s
->l1_size
)
45 new_l1_size
= min_size
;
47 /* Bump size up to reduce the number of times we have to grow */
48 new_l1_size
= s
->l1_size
;
49 if (new_l1_size
== 0) {
52 while (min_size
> new_l1_size
) {
53 new_l1_size
= (new_l1_size
* 3 + 1) / 2;
57 if (new_l1_size
> INT_MAX
) {
62 fprintf(stderr
, "grow l1_table from %d to %" PRId64
"\n",
63 s
->l1_size
, new_l1_size
);
66 new_l1_size2
= sizeof(uint64_t) * new_l1_size
;
67 new_l1_table
= g_malloc0(align_offset(new_l1_size2
, 512));
68 memcpy(new_l1_table
, s
->l1_table
, s
->l1_size
* sizeof(uint64_t));
70 /* write new table (align to cluster) */
71 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ALLOC_TABLE
);
72 new_l1_table_offset
= qcow2_alloc_clusters(bs
, new_l1_size2
);
73 if (new_l1_table_offset
< 0) {
75 return new_l1_table_offset
;
78 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
83 /* the L1 position has not yet been updated, so these clusters must
84 * indeed be completely free */
85 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_DEFAULT
,
86 new_l1_table_offset
, new_l1_size2
);
91 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_WRITE_TABLE
);
92 for(i
= 0; i
< s
->l1_size
; i
++)
93 new_l1_table
[i
] = cpu_to_be64(new_l1_table
[i
]);
94 ret
= bdrv_pwrite_sync(bs
->file
, new_l1_table_offset
, new_l1_table
, new_l1_size2
);
97 for(i
= 0; i
< s
->l1_size
; i
++)
98 new_l1_table
[i
] = be64_to_cpu(new_l1_table
[i
]);
101 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_GROW_ACTIVATE_TABLE
);
102 cpu_to_be32w((uint32_t*)data
, new_l1_size
);
103 cpu_to_be64wu((uint64_t*)(data
+ 4), new_l1_table_offset
);
104 ret
= bdrv_pwrite_sync(bs
->file
, offsetof(QCowHeader
, l1_size
), data
,sizeof(data
));
109 qcow2_free_clusters(bs
, s
->l1_table_offset
, s
->l1_size
* sizeof(uint64_t),
110 QCOW2_DISCARD_OTHER
);
111 s
->l1_table_offset
= new_l1_table_offset
;
112 s
->l1_table
= new_l1_table
;
113 s
->l1_size
= new_l1_size
;
116 g_free(new_l1_table
);
117 qcow2_free_clusters(bs
, new_l1_table_offset
, new_l1_size2
,
118 QCOW2_DISCARD_OTHER
);
125 * Loads a L2 table into memory. If the table is in the cache, the cache
126 * is used; otherwise the L2 table is loaded from the image file.
128 * Returns a pointer to the L2 table on success, or NULL if the read from
129 * the image file failed.
132 static int l2_load(BlockDriverState
*bs
, uint64_t l2_offset
,
135 BDRVQcowState
*s
= bs
->opaque
;
138 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
, l2_offset
, (void**) l2_table
);
144 * Writes one sector of the L1 table to the disk (can't update single entries
145 * and we really don't want bdrv_pread to perform a read-modify-write)
147 #define L1_ENTRIES_PER_SECTOR (512 / 8)
148 int qcow2_write_l1_entry(BlockDriverState
*bs
, int l1_index
)
150 BDRVQcowState
*s
= bs
->opaque
;
151 uint64_t buf
[L1_ENTRIES_PER_SECTOR
];
155 l1_start_index
= l1_index
& ~(L1_ENTRIES_PER_SECTOR
- 1);
156 for (i
= 0; i
< L1_ENTRIES_PER_SECTOR
; i
++) {
157 buf
[i
] = cpu_to_be64(s
->l1_table
[l1_start_index
+ i
]);
160 ret
= qcow2_pre_write_overlap_check(bs
,
161 QCOW2_OL_DEFAULT
& ~QCOW2_OL_ACTIVE_L1
,
162 s
->l1_table_offset
+ 8 * l1_start_index
, sizeof(buf
));
167 BLKDBG_EVENT(bs
->file
, BLKDBG_L1_UPDATE
);
168 ret
= bdrv_pwrite_sync(bs
->file
, s
->l1_table_offset
+ 8 * l1_start_index
,
180 * Allocate a new l2 entry in the file. If l1_index points to an already
181 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
182 * table) copy the contents of the old L2 table into the newly allocated one.
183 * Otherwise the new table is initialized with zeros.
187 static int l2_allocate(BlockDriverState
*bs
, int l1_index
, uint64_t **table
)
189 BDRVQcowState
*s
= bs
->opaque
;
190 uint64_t old_l2_offset
;
195 old_l2_offset
= s
->l1_table
[l1_index
];
197 trace_qcow2_l2_allocate(bs
, l1_index
);
199 /* allocate a new l2 entry */
201 l2_offset
= qcow2_alloc_clusters(bs
, s
->l2_size
* sizeof(uint64_t));
206 ret
= qcow2_cache_flush(bs
, s
->refcount_block_cache
);
211 /* allocate a new entry in the l2 cache */
213 trace_qcow2_l2_allocate_get_empty(bs
, l1_index
);
214 ret
= qcow2_cache_get_empty(bs
, s
->l2_table_cache
, l2_offset
, (void**) table
);
221 if ((old_l2_offset
& L1E_OFFSET_MASK
) == 0) {
222 /* if there was no old l2 table, clear the new table */
223 memset(l2_table
, 0, s
->l2_size
* sizeof(uint64_t));
227 /* if there was an old l2 table, read it from the disk */
228 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_COW_READ
);
229 ret
= qcow2_cache_get(bs
, s
->l2_table_cache
,
230 old_l2_offset
& L1E_OFFSET_MASK
,
231 (void**) &old_table
);
236 memcpy(l2_table
, old_table
, s
->cluster_size
);
238 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &old_table
);
244 /* write the l2 table to the file */
245 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_ALLOC_WRITE
);
247 trace_qcow2_l2_allocate_write_l2(bs
, l1_index
);
248 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
249 ret
= qcow2_cache_flush(bs
, s
->l2_table_cache
);
254 /* update the L1 entry */
255 trace_qcow2_l2_allocate_write_l1(bs
, l1_index
);
256 s
->l1_table
[l1_index
] = l2_offset
| QCOW_OFLAG_COPIED
;
257 ret
= qcow2_write_l1_entry(bs
, l1_index
);
263 trace_qcow2_l2_allocate_done(bs
, l1_index
, 0);
267 trace_qcow2_l2_allocate_done(bs
, l1_index
, ret
);
268 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) table
);
269 s
->l1_table
[l1_index
] = old_l2_offset
;
274 * Checks how many clusters in a given L2 table are contiguous in the image
275 * file. As soon as one of the flags in the bitmask stop_flags changes compared
276 * to the first cluster, the search is stopped and the cluster is not counted
277 * as contiguous. (This allows it, for example, to stop at the first compressed
278 * cluster which may require a different handling)
280 static int count_contiguous_clusters(uint64_t nb_clusters
, int cluster_size
,
281 uint64_t *l2_table
, uint64_t start
, uint64_t stop_flags
)
284 uint64_t mask
= stop_flags
| L2E_OFFSET_MASK
;
285 uint64_t offset
= be64_to_cpu(l2_table
[0]) & mask
;
290 for (i
= start
; i
< start
+ nb_clusters
; i
++) {
291 uint64_t l2_entry
= be64_to_cpu(l2_table
[i
]) & mask
;
292 if (offset
+ (uint64_t) i
* cluster_size
!= l2_entry
) {
300 static int count_contiguous_free_clusters(uint64_t nb_clusters
, uint64_t *l2_table
)
304 for (i
= 0; i
< nb_clusters
; i
++) {
305 int type
= qcow2_get_cluster_type(be64_to_cpu(l2_table
[i
]));
307 if (type
!= QCOW2_CLUSTER_UNALLOCATED
) {
315 /* The crypt function is compatible with the linux cryptoloop
316 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
318 void qcow2_encrypt_sectors(BDRVQcowState
*s
, int64_t sector_num
,
319 uint8_t *out_buf
, const uint8_t *in_buf
,
320 int nb_sectors
, int enc
,
329 for(i
= 0; i
< nb_sectors
; i
++) {
330 ivec
.ll
[0] = cpu_to_le64(sector_num
);
332 AES_cbc_encrypt(in_buf
, out_buf
, 512, key
,
340 static int coroutine_fn
copy_sectors(BlockDriverState
*bs
,
342 uint64_t cluster_offset
,
343 int n_start
, int n_end
)
345 BDRVQcowState
*s
= bs
->opaque
;
351 * If this is the last cluster and it is only partially used, we must only
352 * copy until the end of the image, or bdrv_check_request will fail for the
353 * bdrv_read/write calls below.
355 if (start_sect
+ n_end
> bs
->total_sectors
) {
356 n_end
= bs
->total_sectors
- start_sect
;
364 iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
365 iov
.iov_base
= qemu_blockalign(bs
, iov
.iov_len
);
367 qemu_iovec_init_external(&qiov
, &iov
, 1);
369 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_READ
);
371 /* Call .bdrv_co_readv() directly instead of using the public block-layer
372 * interface. This avoids double I/O throttling and request tracking,
373 * which can lead to deadlock when block layer copy-on-read is enabled.
375 ret
= bs
->drv
->bdrv_co_readv(bs
, start_sect
+ n_start
, n
, &qiov
);
380 if (s
->crypt_method
) {
381 qcow2_encrypt_sectors(s
, start_sect
+ n_start
,
382 iov
.iov_base
, iov
.iov_base
, n
, 1,
383 &s
->aes_encrypt_key
);
386 ret
= qcow2_pre_write_overlap_check(bs
, QCOW2_OL_DEFAULT
,
387 cluster_offset
+ n_start
* BDRV_SECTOR_SIZE
, n
* BDRV_SECTOR_SIZE
);
392 BLKDBG_EVENT(bs
->file
, BLKDBG_COW_WRITE
);
393 ret
= bdrv_co_writev(bs
->file
, (cluster_offset
>> 9) + n_start
, n
, &qiov
);
400 qemu_vfree(iov
.iov_base
);
408 * For a given offset of the disk image, find the cluster offset in
409 * qcow2 file. The offset is stored in *cluster_offset.
411 * on entry, *num is the number of contiguous sectors we'd like to
412 * access following offset.
414 * on exit, *num is the number of contiguous sectors we can read.
416 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
419 int qcow2_get_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
420 int *num
, uint64_t *cluster_offset
)
422 BDRVQcowState
*s
= bs
->opaque
;
423 unsigned int l2_index
;
424 uint64_t l1_index
, l2_offset
, *l2_table
;
426 unsigned int index_in_cluster
, nb_clusters
;
427 uint64_t nb_available
, nb_needed
;
430 index_in_cluster
= (offset
>> 9) & (s
->cluster_sectors
- 1);
431 nb_needed
= *num
+ index_in_cluster
;
433 l1_bits
= s
->l2_bits
+ s
->cluster_bits
;
435 /* compute how many bytes there are between the offset and
436 * the end of the l1 entry
439 nb_available
= (1ULL << l1_bits
) - (offset
& ((1ULL << l1_bits
) - 1));
441 /* compute the number of available sectors */
443 nb_available
= (nb_available
>> 9) + index_in_cluster
;
445 if (nb_needed
> nb_available
) {
446 nb_needed
= nb_available
;
451 /* seek the the l2 offset in the l1 table */
453 l1_index
= offset
>> l1_bits
;
454 if (l1_index
>= s
->l1_size
) {
455 ret
= QCOW2_CLUSTER_UNALLOCATED
;
459 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
461 ret
= QCOW2_CLUSTER_UNALLOCATED
;
465 /* load the l2 table in memory */
467 ret
= l2_load(bs
, l2_offset
, &l2_table
);
472 /* find the cluster offset for the given disk offset */
474 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
475 *cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
476 nb_clusters
= size_to_clusters(s
, nb_needed
<< 9);
478 ret
= qcow2_get_cluster_type(*cluster_offset
);
480 case QCOW2_CLUSTER_COMPRESSED
:
481 /* Compressed clusters can only be processed one by one */
483 *cluster_offset
&= L2E_COMPRESSED_OFFSET_SIZE_MASK
;
485 case QCOW2_CLUSTER_ZERO
:
486 if (s
->qcow_version
< 3) {
489 c
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
490 &l2_table
[l2_index
], 0,
491 QCOW_OFLAG_COMPRESSED
| QCOW_OFLAG_ZERO
);
494 case QCOW2_CLUSTER_UNALLOCATED
:
495 /* how many empty clusters ? */
496 c
= count_contiguous_free_clusters(nb_clusters
, &l2_table
[l2_index
]);
499 case QCOW2_CLUSTER_NORMAL
:
500 /* how many allocated clusters ? */
501 c
= count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
502 &l2_table
[l2_index
], 0,
503 QCOW_OFLAG_COMPRESSED
| QCOW_OFLAG_ZERO
);
504 *cluster_offset
&= L2E_OFFSET_MASK
;
510 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
512 nb_available
= (c
* s
->cluster_sectors
);
515 if (nb_available
> nb_needed
)
516 nb_available
= nb_needed
;
518 *num
= nb_available
- index_in_cluster
;
526 * for a given disk offset, load (and allocate if needed)
529 * the l2 table offset in the qcow2 file and the cluster index
530 * in the l2 table are given to the caller.
532 * Returns 0 on success, -errno in failure case
534 static int get_cluster_table(BlockDriverState
*bs
, uint64_t offset
,
535 uint64_t **new_l2_table
,
538 BDRVQcowState
*s
= bs
->opaque
;
539 unsigned int l2_index
;
540 uint64_t l1_index
, l2_offset
;
541 uint64_t *l2_table
= NULL
;
544 /* seek the the l2 offset in the l1 table */
546 l1_index
= offset
>> (s
->l2_bits
+ s
->cluster_bits
);
547 if (l1_index
>= s
->l1_size
) {
548 ret
= qcow2_grow_l1_table(bs
, l1_index
+ 1, false);
554 assert(l1_index
< s
->l1_size
);
555 l2_offset
= s
->l1_table
[l1_index
] & L1E_OFFSET_MASK
;
557 /* seek the l2 table of the given l2 offset */
559 if (s
->l1_table
[l1_index
] & QCOW_OFLAG_COPIED
) {
560 /* load the l2 table in memory */
561 ret
= l2_load(bs
, l2_offset
, &l2_table
);
566 /* First allocate a new L2 table (and do COW if needed) */
567 ret
= l2_allocate(bs
, l1_index
, &l2_table
);
572 /* Then decrease the refcount of the old table */
574 qcow2_free_clusters(bs
, l2_offset
, s
->l2_size
* sizeof(uint64_t),
575 QCOW2_DISCARD_OTHER
);
579 /* find the cluster offset for the given disk offset */
581 l2_index
= (offset
>> s
->cluster_bits
) & (s
->l2_size
- 1);
583 *new_l2_table
= l2_table
;
584 *new_l2_index
= l2_index
;
590 * alloc_compressed_cluster_offset
592 * For a given offset of the disk image, return cluster offset in
595 * If the offset is not found, allocate a new compressed cluster.
597 * Return the cluster offset if successful,
598 * Return 0, otherwise.
602 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState
*bs
,
606 BDRVQcowState
*s
= bs
->opaque
;
609 int64_t cluster_offset
;
612 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
617 /* Compression can't overwrite anything. Fail if the cluster was already
619 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
620 if (cluster_offset
& L2E_OFFSET_MASK
) {
621 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
625 cluster_offset
= qcow2_alloc_bytes(bs
, compressed_size
);
626 if (cluster_offset
< 0) {
627 qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
631 nb_csectors
= ((cluster_offset
+ compressed_size
- 1) >> 9) -
632 (cluster_offset
>> 9);
634 cluster_offset
|= QCOW_OFLAG_COMPRESSED
|
635 ((uint64_t)nb_csectors
<< s
->csize_shift
);
637 /* update L2 table */
639 /* compressed clusters never have the copied flag */
641 BLKDBG_EVENT(bs
->file
, BLKDBG_L2_UPDATE_COMPRESSED
);
642 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
643 l2_table
[l2_index
] = cpu_to_be64(cluster_offset
);
644 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
649 return cluster_offset
;
652 static int perform_cow(BlockDriverState
*bs
, QCowL2Meta
*m
, Qcow2COWRegion
*r
)
654 BDRVQcowState
*s
= bs
->opaque
;
657 if (r
->nb_sectors
== 0) {
661 qemu_co_mutex_unlock(&s
->lock
);
662 ret
= copy_sectors(bs
, m
->offset
/ BDRV_SECTOR_SIZE
, m
->alloc_offset
,
663 r
->offset
/ BDRV_SECTOR_SIZE
,
664 r
->offset
/ BDRV_SECTOR_SIZE
+ r
->nb_sectors
);
665 qemu_co_mutex_lock(&s
->lock
);
672 * Before we update the L2 table to actually point to the new cluster, we
673 * need to be sure that the refcounts have been increased and COW was
676 qcow2_cache_depends_on_flush(s
->l2_table_cache
);
681 int qcow2_alloc_cluster_link_l2(BlockDriverState
*bs
, QCowL2Meta
*m
)
683 BDRVQcowState
*s
= bs
->opaque
;
684 int i
, j
= 0, l2_index
, ret
;
685 uint64_t *old_cluster
, *l2_table
;
686 uint64_t cluster_offset
= m
->alloc_offset
;
688 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m
->nb_clusters
);
689 assert(m
->nb_clusters
> 0);
691 old_cluster
= g_malloc(m
->nb_clusters
* sizeof(uint64_t));
693 /* copy content of unmodified sectors */
694 ret
= perform_cow(bs
, m
, &m
->cow_start
);
699 ret
= perform_cow(bs
, m
, &m
->cow_end
);
704 /* Update L2 table. */
705 if (s
->use_lazy_refcounts
) {
706 qcow2_mark_dirty(bs
);
708 if (qcow2_need_accurate_refcounts(s
)) {
709 qcow2_cache_set_dependency(bs
, s
->l2_table_cache
,
710 s
->refcount_block_cache
);
713 ret
= get_cluster_table(bs
, m
->offset
, &l2_table
, &l2_index
);
717 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
719 for (i
= 0; i
< m
->nb_clusters
; i
++) {
720 /* if two concurrent writes happen to the same unallocated cluster
721 * each write allocates separate cluster and writes data concurrently.
722 * The first one to complete updates l2 table with pointer to its
723 * cluster the second one has to do RMW (which is done above by
724 * copy_sectors()), update l2 table with its cluster pointer and free
725 * old cluster. This is what this loop does */
726 if(l2_table
[l2_index
+ i
] != 0)
727 old_cluster
[j
++] = l2_table
[l2_index
+ i
];
729 l2_table
[l2_index
+ i
] = cpu_to_be64((cluster_offset
+
730 (i
<< s
->cluster_bits
)) | QCOW_OFLAG_COPIED
);
734 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
740 * If this was a COW, we need to decrease the refcount of the old cluster.
741 * Also flush bs->file to get the right order for L2 and refcount update.
743 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
744 * clusters), the next write will reuse them anyway.
747 for (i
= 0; i
< j
; i
++) {
748 qcow2_free_any_clusters(bs
, be64_to_cpu(old_cluster
[i
]), 1,
749 QCOW2_DISCARD_NEVER
);
760 * Returns the number of contiguous clusters that can be used for an allocating
761 * write, but require COW to be performed (this includes yet unallocated space,
762 * which must copy from the backing file)
764 static int count_cow_clusters(BDRVQcowState
*s
, int nb_clusters
,
765 uint64_t *l2_table
, int l2_index
)
769 for (i
= 0; i
< nb_clusters
; i
++) {
770 uint64_t l2_entry
= be64_to_cpu(l2_table
[l2_index
+ i
]);
771 int cluster_type
= qcow2_get_cluster_type(l2_entry
);
773 switch(cluster_type
) {
774 case QCOW2_CLUSTER_NORMAL
:
775 if (l2_entry
& QCOW_OFLAG_COPIED
) {
779 case QCOW2_CLUSTER_UNALLOCATED
:
780 case QCOW2_CLUSTER_COMPRESSED
:
781 case QCOW2_CLUSTER_ZERO
:
789 assert(i
<= nb_clusters
);
794 * Check if there already is an AIO write request in flight which allocates
795 * the same cluster. In this case we need to wait until the previous
796 * request has completed and updated the L2 table accordingly.
799 * 0 if there was no dependency. *cur_bytes indicates the number of
800 * bytes from guest_offset that can be read before the next
801 * dependency must be processed (or the request is complete)
803 * -EAGAIN if we had to wait for another request, previously gathered
804 * information on cluster allocation may be invalid now. The caller
805 * must start over anyway, so consider *cur_bytes undefined.
807 static int handle_dependencies(BlockDriverState
*bs
, uint64_t guest_offset
,
808 uint64_t *cur_bytes
, QCowL2Meta
**m
)
810 BDRVQcowState
*s
= bs
->opaque
;
811 QCowL2Meta
*old_alloc
;
812 uint64_t bytes
= *cur_bytes
;
814 QLIST_FOREACH(old_alloc
, &s
->cluster_allocs
, next_in_flight
) {
816 uint64_t start
= guest_offset
;
817 uint64_t end
= start
+ bytes
;
818 uint64_t old_start
= l2meta_cow_start(old_alloc
);
819 uint64_t old_end
= l2meta_cow_end(old_alloc
);
821 if (end
<= old_start
|| start
>= old_end
) {
822 /* No intersection */
824 if (start
< old_start
) {
825 /* Stop at the start of a running allocation */
826 bytes
= old_start
- start
;
831 /* Stop if already an l2meta exists. After yielding, it wouldn't
832 * be valid any more, so we'd have to clean up the old L2Metas
833 * and deal with requests depending on them before starting to
834 * gather new ones. Not worth the trouble. */
835 if (bytes
== 0 && *m
) {
841 /* Wait for the dependency to complete. We need to recheck
842 * the free/allocated clusters when we continue. */
843 qemu_co_mutex_unlock(&s
->lock
);
844 qemu_co_queue_wait(&old_alloc
->dependent_requests
);
845 qemu_co_mutex_lock(&s
->lock
);
851 /* Make sure that existing clusters and new allocations are only used up to
852 * the next dependency if we shortened the request above */
859 * Checks how many already allocated clusters that don't require a copy on
860 * write there are at the given guest_offset (up to *bytes). If
861 * *host_offset is not zero, only physically contiguous clusters beginning at
862 * this host offset are counted.
864 * Note that guest_offset may not be cluster aligned. In this case, the
865 * returned *host_offset points to exact byte referenced by guest_offset and
866 * therefore isn't cluster aligned as well.
869 * 0: if no allocated clusters are available at the given offset.
870 * *bytes is normally unchanged. It is set to 0 if the cluster
871 * is allocated and doesn't need COW, but doesn't have the right
874 * 1: if allocated clusters that don't require a COW are available at
875 * the requested offset. *bytes may have decreased and describes
876 * the length of the area that can be written to.
878 * -errno: in error cases
880 static int handle_copied(BlockDriverState
*bs
, uint64_t guest_offset
,
881 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
883 BDRVQcowState
*s
= bs
->opaque
;
885 uint64_t cluster_offset
;
887 unsigned int nb_clusters
;
888 unsigned int keep_clusters
;
891 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset
, *host_offset
,
894 assert(*host_offset
== 0 || offset_into_cluster(s
, guest_offset
)
895 == offset_into_cluster(s
, *host_offset
));
898 * Calculate the number of clusters to look for. We stop at L2 table
899 * boundaries to keep things simple.
902 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
904 l2_index
= offset_to_l2_index(s
, guest_offset
);
905 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
907 /* Find L2 entry for the first involved cluster */
908 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
913 cluster_offset
= be64_to_cpu(l2_table
[l2_index
]);
915 /* Check how many clusters are already allocated and don't need COW */
916 if (qcow2_get_cluster_type(cluster_offset
) == QCOW2_CLUSTER_NORMAL
917 && (cluster_offset
& QCOW_OFLAG_COPIED
))
919 /* If a specific host_offset is required, check it */
920 bool offset_matches
=
921 (cluster_offset
& L2E_OFFSET_MASK
) == *host_offset
;
923 if (*host_offset
!= 0 && !offset_matches
) {
929 /* We keep all QCOW_OFLAG_COPIED clusters */
931 count_contiguous_clusters(nb_clusters
, s
->cluster_size
,
932 &l2_table
[l2_index
], 0,
933 QCOW_OFLAG_COPIED
| QCOW_OFLAG_ZERO
);
934 assert(keep_clusters
<= nb_clusters
);
937 keep_clusters
* s
->cluster_size
938 - offset_into_cluster(s
, guest_offset
));
947 pret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
952 /* Only return a host offset if we actually made progress. Otherwise we
953 * would make requirements for handle_alloc() that it can't fulfill */
955 *host_offset
= (cluster_offset
& L2E_OFFSET_MASK
)
956 + offset_into_cluster(s
, guest_offset
);
963 * Allocates new clusters for the given guest_offset.
965 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
966 * contain the number of clusters that have been allocated and are contiguous
969 * If *host_offset is non-zero, it specifies the offset in the image file at
970 * which the new clusters must start. *nb_clusters can be 0 on return in this
971 * case if the cluster at host_offset is already in use. If *host_offset is
972 * zero, the clusters can be allocated anywhere in the image file.
974 * *host_offset is updated to contain the offset into the image file at which
975 * the first allocated cluster starts.
977 * Return 0 on success and -errno in error cases. -EAGAIN means that the
978 * function has been waiting for another request and the allocation must be
979 * restarted, but the whole request should not be failed.
981 static int do_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t guest_offset
,
982 uint64_t *host_offset
, unsigned int *nb_clusters
)
984 BDRVQcowState
*s
= bs
->opaque
;
986 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset
,
987 *host_offset
, *nb_clusters
);
989 /* Allocate new clusters */
990 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
991 if (*host_offset
== 0) {
992 int64_t cluster_offset
=
993 qcow2_alloc_clusters(bs
, *nb_clusters
* s
->cluster_size
);
994 if (cluster_offset
< 0) {
995 return cluster_offset
;
997 *host_offset
= cluster_offset
;
1000 int ret
= qcow2_alloc_clusters_at(bs
, *host_offset
, *nb_clusters
);
1010 * Allocates new clusters for an area that either is yet unallocated or needs a
1011 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1012 * the new allocation can match the specified host offset.
1014 * Note that guest_offset may not be cluster aligned. In this case, the
1015 * returned *host_offset points to exact byte referenced by guest_offset and
1016 * therefore isn't cluster aligned as well.
1019 * 0: if no clusters could be allocated. *bytes is set to 0,
1020 * *host_offset is left unchanged.
1022 * 1: if new clusters were allocated. *bytes may be decreased if the
1023 * new allocation doesn't cover all of the requested area.
1024 * *host_offset is updated to contain the host offset of the first
1025 * newly allocated cluster.
1027 * -errno: in error cases
1029 static int handle_alloc(BlockDriverState
*bs
, uint64_t guest_offset
,
1030 uint64_t *host_offset
, uint64_t *bytes
, QCowL2Meta
**m
)
1032 BDRVQcowState
*s
= bs
->opaque
;
1036 unsigned int nb_clusters
;
1039 uint64_t alloc_cluster_offset
;
1041 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset
, *host_offset
,
1046 * Calculate the number of clusters to look for. We stop at L2 table
1047 * boundaries to keep things simple.
1050 size_to_clusters(s
, offset_into_cluster(s
, guest_offset
) + *bytes
);
1052 l2_index
= offset_to_l2_index(s
, guest_offset
);
1053 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1055 /* Find L2 entry for the first involved cluster */
1056 ret
= get_cluster_table(bs
, guest_offset
, &l2_table
, &l2_index
);
1061 entry
= be64_to_cpu(l2_table
[l2_index
]);
1063 /* For the moment, overwrite compressed clusters one by one */
1064 if (entry
& QCOW_OFLAG_COMPRESSED
) {
1067 nb_clusters
= count_cow_clusters(s
, nb_clusters
, l2_table
, l2_index
);
1070 /* This function is only called when there were no non-COW clusters, so if
1071 * we can't find any unallocated or COW clusters either, something is
1072 * wrong with our code. */
1073 assert(nb_clusters
> 0);
1075 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1080 /* Allocate, if necessary at a given offset in the image file */
1081 alloc_cluster_offset
= start_of_cluster(s
, *host_offset
);
1082 ret
= do_alloc_cluster_offset(bs
, guest_offset
, &alloc_cluster_offset
,
1088 /* Can't extend contiguous allocation */
1089 if (nb_clusters
== 0) {
1095 * Save info needed for meta data update.
1097 * requested_sectors: Number of sectors from the start of the first
1098 * newly allocated cluster to the end of the (possibly shortened
1099 * before) write request.
1101 * avail_sectors: Number of sectors from the start of the first
1102 * newly allocated to the end of the last newly allocated cluster.
1104 * nb_sectors: The number of sectors from the start of the first
1105 * newly allocated cluster to the end of the area that the write
1106 * request actually writes to (excluding COW at the end)
1108 int requested_sectors
=
1109 (*bytes
+ offset_into_cluster(s
, guest_offset
))
1110 >> BDRV_SECTOR_BITS
;
1111 int avail_sectors
= nb_clusters
1112 << (s
->cluster_bits
- BDRV_SECTOR_BITS
);
1113 int alloc_n_start
= offset_into_cluster(s
, guest_offset
)
1114 >> BDRV_SECTOR_BITS
;
1115 int nb_sectors
= MIN(requested_sectors
, avail_sectors
);
1116 QCowL2Meta
*old_m
= *m
;
1118 *m
= g_malloc0(sizeof(**m
));
1120 **m
= (QCowL2Meta
) {
1123 .alloc_offset
= alloc_cluster_offset
,
1124 .offset
= start_of_cluster(s
, guest_offset
),
1125 .nb_clusters
= nb_clusters
,
1126 .nb_available
= nb_sectors
,
1130 .nb_sectors
= alloc_n_start
,
1133 .offset
= nb_sectors
* BDRV_SECTOR_SIZE
,
1134 .nb_sectors
= avail_sectors
- nb_sectors
,
1137 qemu_co_queue_init(&(*m
)->dependent_requests
);
1138 QLIST_INSERT_HEAD(&s
->cluster_allocs
, *m
, next_in_flight
);
1140 *host_offset
= alloc_cluster_offset
+ offset_into_cluster(s
, guest_offset
);
1141 *bytes
= MIN(*bytes
, (nb_sectors
* BDRV_SECTOR_SIZE
)
1142 - offset_into_cluster(s
, guest_offset
));
1143 assert(*bytes
!= 0);
1148 if (*m
&& (*m
)->nb_clusters
> 0) {
1149 QLIST_REMOVE(*m
, next_in_flight
);
1155 * alloc_cluster_offset
1157 * For a given offset on the virtual disk, find the cluster offset in qcow2
1158 * file. If the offset is not found, allocate a new cluster.
1160 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1161 * other fields in m are meaningless.
1163 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1164 * contiguous clusters that have been allocated. In this case, the other
1165 * fields of m are valid and contain information about the first allocated
1168 * If the request conflicts with another write request in flight, the coroutine
1169 * is queued and will be reentered when the dependency has completed.
1171 * Return 0 on success and -errno in error cases
1173 int qcow2_alloc_cluster_offset(BlockDriverState
*bs
, uint64_t offset
,
1174 int n_start
, int n_end
, int *num
, uint64_t *host_offset
, QCowL2Meta
**m
)
1176 BDRVQcowState
*s
= bs
->opaque
;
1177 uint64_t start
, remaining
;
1178 uint64_t cluster_offset
;
1182 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset
,
1185 assert(n_start
* BDRV_SECTOR_SIZE
== offset_into_cluster(s
, offset
));
1186 offset
= start_of_cluster(s
, offset
);
1189 start
= offset
+ (n_start
<< BDRV_SECTOR_BITS
);
1190 remaining
= (n_end
- n_start
) << BDRV_SECTOR_BITS
;
1198 if (!*host_offset
) {
1199 *host_offset
= start_of_cluster(s
, cluster_offset
);
1202 assert(remaining
>= cur_bytes
);
1205 remaining
-= cur_bytes
;
1206 cluster_offset
+= cur_bytes
;
1208 if (remaining
== 0) {
1212 cur_bytes
= remaining
;
1215 * Now start gathering as many contiguous clusters as possible:
1217 * 1. Check for overlaps with in-flight allocations
1219 * a) Overlap not in the first cluster -> shorten this request and
1220 * let the caller handle the rest in its next loop iteration.
1222 * b) Real overlaps of two requests. Yield and restart the search
1223 * for contiguous clusters (the situation could have changed
1224 * while we were sleeping)
1226 * c) TODO: Request starts in the same cluster as the in-flight
1227 * allocation ends. Shorten the COW of the in-fight allocation,
1228 * set cluster_offset to write to the same cluster and set up
1229 * the right synchronisation between the in-flight request and
1232 ret
= handle_dependencies(bs
, start
, &cur_bytes
, m
);
1233 if (ret
== -EAGAIN
) {
1234 /* Currently handle_dependencies() doesn't yield if we already had
1235 * an allocation. If it did, we would have to clean up the L2Meta
1236 * structs before starting over. */
1239 } else if (ret
< 0) {
1241 } else if (cur_bytes
== 0) {
1244 /* handle_dependencies() may have decreased cur_bytes (shortened
1245 * the allocations below) so that the next dependency is processed
1246 * correctly during the next loop iteration. */
1250 * 2. Count contiguous COPIED clusters.
1252 ret
= handle_copied(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1257 } else if (cur_bytes
== 0) {
1262 * 3. If the request still hasn't completed, allocate new clusters,
1263 * considering any cluster_offset of steps 1c or 2.
1265 ret
= handle_alloc(bs
, start
, &cluster_offset
, &cur_bytes
, m
);
1271 assert(cur_bytes
== 0);
1276 *num
= (n_end
- n_start
) - (remaining
>> BDRV_SECTOR_BITS
);
1278 assert(*host_offset
!= 0);
1283 static int decompress_buffer(uint8_t *out_buf
, int out_buf_size
,
1284 const uint8_t *buf
, int buf_size
)
1286 z_stream strm1
, *strm
= &strm1
;
1289 memset(strm
, 0, sizeof(*strm
));
1291 strm
->next_in
= (uint8_t *)buf
;
1292 strm
->avail_in
= buf_size
;
1293 strm
->next_out
= out_buf
;
1294 strm
->avail_out
= out_buf_size
;
1296 ret
= inflateInit2(strm
, -12);
1299 ret
= inflate(strm
, Z_FINISH
);
1300 out_len
= strm
->next_out
- out_buf
;
1301 if ((ret
!= Z_STREAM_END
&& ret
!= Z_BUF_ERROR
) ||
1302 out_len
!= out_buf_size
) {
1310 int qcow2_decompress_cluster(BlockDriverState
*bs
, uint64_t cluster_offset
)
1312 BDRVQcowState
*s
= bs
->opaque
;
1313 int ret
, csize
, nb_csectors
, sector_offset
;
1316 coffset
= cluster_offset
& s
->cluster_offset_mask
;
1317 if (s
->cluster_cache_offset
!= coffset
) {
1318 nb_csectors
= ((cluster_offset
>> s
->csize_shift
) & s
->csize_mask
) + 1;
1319 sector_offset
= coffset
& 511;
1320 csize
= nb_csectors
* 512 - sector_offset
;
1321 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_COMPRESSED
);
1322 ret
= bdrv_read(bs
->file
, coffset
>> 9, s
->cluster_data
, nb_csectors
);
1326 if (decompress_buffer(s
->cluster_cache
, s
->cluster_size
,
1327 s
->cluster_data
+ sector_offset
, csize
) < 0) {
1330 s
->cluster_cache_offset
= coffset
;
1336 * This discards as many clusters of nb_clusters as possible at once (i.e.
1337 * all clusters in the same L2 table) and returns the number of discarded
1340 static int discard_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1341 unsigned int nb_clusters
)
1343 BDRVQcowState
*s
= bs
->opaque
;
1349 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1354 /* Limit nb_clusters to one L2 table */
1355 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1357 for (i
= 0; i
< nb_clusters
; i
++) {
1358 uint64_t old_offset
;
1360 old_offset
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1361 if ((old_offset
& L2E_OFFSET_MASK
) == 0) {
1365 /* First remove L2 entries */
1366 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1367 l2_table
[l2_index
+ i
] = cpu_to_be64(0);
1369 /* Then decrease the refcount */
1370 qcow2_free_any_clusters(bs
, old_offset
, 1, QCOW2_DISCARD_REQUEST
);
1373 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1381 int qcow2_discard_clusters(BlockDriverState
*bs
, uint64_t offset
,
1384 BDRVQcowState
*s
= bs
->opaque
;
1385 uint64_t end_offset
;
1386 unsigned int nb_clusters
;
1389 end_offset
= offset
+ (nb_sectors
<< BDRV_SECTOR_BITS
);
1391 /* Round start up and end down */
1392 offset
= align_offset(offset
, s
->cluster_size
);
1393 end_offset
&= ~(s
->cluster_size
- 1);
1395 if (offset
> end_offset
) {
1399 nb_clusters
= size_to_clusters(s
, end_offset
- offset
);
1401 s
->cache_discards
= true;
1403 /* Each L2 table is handled by its own loop iteration */
1404 while (nb_clusters
> 0) {
1405 ret
= discard_single_l2(bs
, offset
, nb_clusters
);
1411 offset
+= (ret
* s
->cluster_size
);
1416 s
->cache_discards
= false;
1417 qcow2_process_discards(bs
, ret
);
1423 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1424 * all clusters in the same L2 table) and returns the number of zeroed
1427 static int zero_single_l2(BlockDriverState
*bs
, uint64_t offset
,
1428 unsigned int nb_clusters
)
1430 BDRVQcowState
*s
= bs
->opaque
;
1436 ret
= get_cluster_table(bs
, offset
, &l2_table
, &l2_index
);
1441 /* Limit nb_clusters to one L2 table */
1442 nb_clusters
= MIN(nb_clusters
, s
->l2_size
- l2_index
);
1444 for (i
= 0; i
< nb_clusters
; i
++) {
1445 uint64_t old_offset
;
1447 old_offset
= be64_to_cpu(l2_table
[l2_index
+ i
]);
1449 /* Update L2 entries */
1450 qcow2_cache_entry_mark_dirty(s
->l2_table_cache
, l2_table
);
1451 if (old_offset
& QCOW_OFLAG_COMPRESSED
) {
1452 l2_table
[l2_index
+ i
] = cpu_to_be64(QCOW_OFLAG_ZERO
);
1453 qcow2_free_any_clusters(bs
, old_offset
, 1, QCOW2_DISCARD_REQUEST
);
1455 l2_table
[l2_index
+ i
] |= cpu_to_be64(QCOW_OFLAG_ZERO
);
1459 ret
= qcow2_cache_put(bs
, s
->l2_table_cache
, (void**) &l2_table
);
1467 int qcow2_zero_clusters(BlockDriverState
*bs
, uint64_t offset
, int nb_sectors
)
1469 BDRVQcowState
*s
= bs
->opaque
;
1470 unsigned int nb_clusters
;
1473 /* The zero flag is only supported by version 3 and newer */
1474 if (s
->qcow_version
< 3) {
1478 /* Each L2 table is handled by its own loop iteration */
1479 nb_clusters
= size_to_clusters(s
, nb_sectors
<< BDRV_SECTOR_BITS
);
1481 s
->cache_discards
= true;
1483 while (nb_clusters
> 0) {
1484 ret
= zero_single_l2(bs
, offset
, nb_clusters
);
1490 offset
+= (ret
* s
->cluster_size
);
1495 s
->cache_discards
= false;
1496 qcow2_process_discards(bs
, ret
);