]>
Commit | Line | Data |
---|---|---|
45aba42f KW |
1 | /* |
2 | * Block driver for the QCOW version 2 format | |
3 | * | |
4 | * Copyright (c) 2004-2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include <zlib.h> | |
26 | ||
27 | #include "qemu-common.h" | |
737e150e | 28 | #include "block/block_int.h" |
45aba42f | 29 | #include "block/qcow2.h" |
3cce16f4 | 30 | #include "trace.h" |
45aba42f | 31 | |
2cf7cfa1 KW |
32 | int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, |
33 | bool exact_size) | |
45aba42f KW |
34 | { |
35 | BDRVQcowState *s = bs->opaque; | |
2cf7cfa1 | 36 | int new_l1_size2, ret, i; |
45aba42f | 37 | uint64_t *new_l1_table; |
2cf7cfa1 | 38 | int64_t new_l1_table_offset, new_l1_size; |
45aba42f KW |
39 | uint8_t data[12]; |
40 | ||
72893756 | 41 | if (min_size <= s->l1_size) |
45aba42f | 42 | return 0; |
72893756 SH |
43 | |
44 | if (exact_size) { | |
45 | new_l1_size = min_size; | |
46 | } else { | |
47 | /* Bump size up to reduce the number of times we have to grow */ | |
48 | new_l1_size = s->l1_size; | |
49 | if (new_l1_size == 0) { | |
50 | new_l1_size = 1; | |
51 | } | |
52 | while (min_size > new_l1_size) { | |
53 | new_l1_size = (new_l1_size * 3 + 1) / 2; | |
54 | } | |
45aba42f | 55 | } |
72893756 | 56 | |
2cf7cfa1 KW |
57 | if (new_l1_size > INT_MAX) { |
58 | return -EFBIG; | |
59 | } | |
60 | ||
45aba42f | 61 | #ifdef DEBUG_ALLOC2 |
2cf7cfa1 KW |
62 | fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", |
63 | s->l1_size, new_l1_size); | |
45aba42f KW |
64 | #endif |
65 | ||
66 | new_l1_size2 = sizeof(uint64_t) * new_l1_size; | |
7267c094 | 67 | new_l1_table = g_malloc0(align_offset(new_l1_size2, 512)); |
45aba42f KW |
68 | memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); |
69 | ||
70 | /* write new table (align to cluster) */ | |
66f82cee | 71 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
ed6ccf0f | 72 | new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
5d757b56 | 73 | if (new_l1_table_offset < 0) { |
7267c094 | 74 | g_free(new_l1_table); |
5d757b56 KW |
75 | return new_l1_table_offset; |
76 | } | |
29c1a730 KW |
77 | |
78 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
79 | if (ret < 0) { | |
80fa3341 | 80 | goto fail; |
29c1a730 | 81 | } |
45aba42f | 82 | |
66f82cee | 83 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
45aba42f KW |
84 | for(i = 0; i < s->l1_size; i++) |
85 | new_l1_table[i] = cpu_to_be64(new_l1_table[i]); | |
8b3b7206 KW |
86 | ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); |
87 | if (ret < 0) | |
45aba42f KW |
88 | goto fail; |
89 | for(i = 0; i < s->l1_size; i++) | |
90 | new_l1_table[i] = be64_to_cpu(new_l1_table[i]); | |
91 | ||
92 | /* set new table */ | |
66f82cee | 93 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
45aba42f | 94 | cpu_to_be32w((uint32_t*)data, new_l1_size); |
653df36b | 95 | cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset); |
8b3b7206 KW |
96 | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); |
97 | if (ret < 0) { | |
45aba42f | 98 | goto fail; |
fb8fa77c | 99 | } |
7267c094 | 100 | g_free(s->l1_table); |
6cfcb9b8 KW |
101 | qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t), |
102 | QCOW2_DISCARD_OTHER); | |
45aba42f KW |
103 | s->l1_table_offset = new_l1_table_offset; |
104 | s->l1_table = new_l1_table; | |
105 | s->l1_size = new_l1_size; | |
106 | return 0; | |
107 | fail: | |
7267c094 | 108 | g_free(new_l1_table); |
6cfcb9b8 KW |
109 | qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, |
110 | QCOW2_DISCARD_OTHER); | |
8b3b7206 | 111 | return ret; |
45aba42f KW |
112 | } |
113 | ||
45aba42f KW |
114 | /* |
115 | * l2_load | |
116 | * | |
117 | * Loads a L2 table into memory. If the table is in the cache, the cache | |
118 | * is used; otherwise the L2 table is loaded from the image file. | |
119 | * | |
120 | * Returns a pointer to the L2 table on success, or NULL if the read from | |
121 | * the image file failed. | |
122 | */ | |
123 | ||
55c17e98 KW |
124 | static int l2_load(BlockDriverState *bs, uint64_t l2_offset, |
125 | uint64_t **l2_table) | |
45aba42f KW |
126 | { |
127 | BDRVQcowState *s = bs->opaque; | |
55c17e98 | 128 | int ret; |
45aba42f | 129 | |
29c1a730 | 130 | ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); |
45aba42f | 131 | |
29c1a730 | 132 | return ret; |
45aba42f KW |
133 | } |
134 | ||
6583e3c7 KW |
135 | /* |
136 | * Writes one sector of the L1 table to the disk (can't update single entries | |
137 | * and we really don't want bdrv_pread to perform a read-modify-write) | |
138 | */ | |
139 | #define L1_ENTRIES_PER_SECTOR (512 / 8) | |
66f82cee | 140 | static int write_l1_entry(BlockDriverState *bs, int l1_index) |
6583e3c7 | 141 | { |
66f82cee | 142 | BDRVQcowState *s = bs->opaque; |
6583e3c7 KW |
143 | uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
144 | int l1_start_index; | |
f7defcb6 | 145 | int i, ret; |
6583e3c7 KW |
146 | |
147 | l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); | |
148 | for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { | |
149 | buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); | |
150 | } | |
151 | ||
66f82cee | 152 | BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
8b3b7206 | 153 | ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, |
f7defcb6 KW |
154 | buf, sizeof(buf)); |
155 | if (ret < 0) { | |
156 | return ret; | |
6583e3c7 KW |
157 | } |
158 | ||
159 | return 0; | |
160 | } | |
161 | ||
45aba42f KW |
162 | /* |
163 | * l2_allocate | |
164 | * | |
165 | * Allocate a new l2 entry in the file. If l1_index points to an already | |
166 | * used entry in the L2 table (i.e. we are doing a copy on write for the L2 | |
167 | * table) copy the contents of the old L2 table into the newly allocated one. | |
168 | * Otherwise the new table is initialized with zeros. | |
169 | * | |
170 | */ | |
171 | ||
c46e1167 | 172 | static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) |
45aba42f KW |
173 | { |
174 | BDRVQcowState *s = bs->opaque; | |
6583e3c7 | 175 | uint64_t old_l2_offset; |
f4f0d391 KW |
176 | uint64_t *l2_table; |
177 | int64_t l2_offset; | |
c46e1167 | 178 | int ret; |
45aba42f KW |
179 | |
180 | old_l2_offset = s->l1_table[l1_index]; | |
181 | ||
3cce16f4 KW |
182 | trace_qcow2_l2_allocate(bs, l1_index); |
183 | ||
45aba42f KW |
184 | /* allocate a new l2 entry */ |
185 | ||
ed6ccf0f | 186 | l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); |
5d757b56 | 187 | if (l2_offset < 0) { |
c46e1167 | 188 | return l2_offset; |
5d757b56 | 189 | } |
29c1a730 KW |
190 | |
191 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
192 | if (ret < 0) { | |
193 | goto fail; | |
194 | } | |
45aba42f | 195 | |
45aba42f KW |
196 | /* allocate a new entry in the l2 cache */ |
197 | ||
3cce16f4 | 198 | trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
29c1a730 KW |
199 | ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); |
200 | if (ret < 0) { | |
201 | return ret; | |
202 | } | |
203 | ||
204 | l2_table = *table; | |
45aba42f | 205 | |
8e37f681 | 206 | if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { |
45aba42f KW |
207 | /* if there was no old l2 table, clear the new table */ |
208 | memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); | |
209 | } else { | |
29c1a730 KW |
210 | uint64_t* old_table; |
211 | ||
45aba42f | 212 | /* if there was an old l2 table, read it from the disk */ |
66f82cee | 213 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
8e37f681 KW |
214 | ret = qcow2_cache_get(bs, s->l2_table_cache, |
215 | old_l2_offset & L1E_OFFSET_MASK, | |
29c1a730 KW |
216 | (void**) &old_table); |
217 | if (ret < 0) { | |
218 | goto fail; | |
219 | } | |
220 | ||
221 | memcpy(l2_table, old_table, s->cluster_size); | |
222 | ||
223 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table); | |
c46e1167 | 224 | if (ret < 0) { |
175e1152 | 225 | goto fail; |
c46e1167 | 226 | } |
45aba42f | 227 | } |
29c1a730 | 228 | |
45aba42f | 229 | /* write the l2 table to the file */ |
66f82cee | 230 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
29c1a730 | 231 | |
3cce16f4 | 232 | trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
29c1a730 KW |
233 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
234 | ret = qcow2_cache_flush(bs, s->l2_table_cache); | |
c46e1167 | 235 | if (ret < 0) { |
175e1152 KW |
236 | goto fail; |
237 | } | |
238 | ||
239 | /* update the L1 entry */ | |
3cce16f4 | 240 | trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
175e1152 KW |
241 | s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
242 | ret = write_l1_entry(bs, l1_index); | |
243 | if (ret < 0) { | |
244 | goto fail; | |
c46e1167 | 245 | } |
45aba42f | 246 | |
c46e1167 | 247 | *table = l2_table; |
3cce16f4 | 248 | trace_qcow2_l2_allocate_done(bs, l1_index, 0); |
c46e1167 | 249 | return 0; |
175e1152 KW |
250 | |
251 | fail: | |
3cce16f4 | 252 | trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
29c1a730 | 253 | qcow2_cache_put(bs, s->l2_table_cache, (void**) table); |
68dba0bf | 254 | s->l1_table[l1_index] = old_l2_offset; |
175e1152 | 255 | return ret; |
45aba42f KW |
256 | } |
257 | ||
2bfcc4a0 KW |
258 | /* |
259 | * Checks how many clusters in a given L2 table are contiguous in the image | |
260 | * file. As soon as one of the flags in the bitmask stop_flags changes compared | |
261 | * to the first cluster, the search is stopped and the cluster is not counted | |
262 | * as contiguous. (This allows it, for example, to stop at the first compressed | |
263 | * cluster which may require a different handling) | |
264 | */ | |
45aba42f | 265 | static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
2bfcc4a0 | 266 | uint64_t *l2_table, uint64_t start, uint64_t stop_flags) |
45aba42f KW |
267 | { |
268 | int i; | |
2bfcc4a0 KW |
269 | uint64_t mask = stop_flags | L2E_OFFSET_MASK; |
270 | uint64_t offset = be64_to_cpu(l2_table[0]) & mask; | |
45aba42f KW |
271 | |
272 | if (!offset) | |
273 | return 0; | |
274 | ||
2bfcc4a0 KW |
275 | for (i = start; i < start + nb_clusters; i++) { |
276 | uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; | |
277 | if (offset + (uint64_t) i * cluster_size != l2_entry) { | |
45aba42f | 278 | break; |
2bfcc4a0 KW |
279 | } |
280 | } | |
45aba42f KW |
281 | |
282 | return (i - start); | |
283 | } | |
284 | ||
285 | static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) | |
286 | { | |
2bfcc4a0 KW |
287 | int i; |
288 | ||
289 | for (i = 0; i < nb_clusters; i++) { | |
290 | int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); | |
45aba42f | 291 | |
2bfcc4a0 KW |
292 | if (type != QCOW2_CLUSTER_UNALLOCATED) { |
293 | break; | |
294 | } | |
295 | } | |
45aba42f KW |
296 | |
297 | return i; | |
298 | } | |
299 | ||
300 | /* The crypt function is compatible with the linux cryptoloop | |
301 | algorithm for < 4 GB images. NOTE: out_buf == in_buf is | |
302 | supported */ | |
ed6ccf0f KW |
303 | void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, |
304 | uint8_t *out_buf, const uint8_t *in_buf, | |
305 | int nb_sectors, int enc, | |
306 | const AES_KEY *key) | |
45aba42f KW |
307 | { |
308 | union { | |
309 | uint64_t ll[2]; | |
310 | uint8_t b[16]; | |
311 | } ivec; | |
312 | int i; | |
313 | ||
314 | for(i = 0; i < nb_sectors; i++) { | |
315 | ivec.ll[0] = cpu_to_le64(sector_num); | |
316 | ivec.ll[1] = 0; | |
317 | AES_cbc_encrypt(in_buf, out_buf, 512, key, | |
318 | ivec.b, enc); | |
319 | sector_num++; | |
320 | in_buf += 512; | |
321 | out_buf += 512; | |
322 | } | |
323 | } | |
324 | ||
aef4acb6 SH |
325 | static int coroutine_fn copy_sectors(BlockDriverState *bs, |
326 | uint64_t start_sect, | |
327 | uint64_t cluster_offset, | |
328 | int n_start, int n_end) | |
45aba42f KW |
329 | { |
330 | BDRVQcowState *s = bs->opaque; | |
aef4acb6 SH |
331 | QEMUIOVector qiov; |
332 | struct iovec iov; | |
45aba42f | 333 | int n, ret; |
1b9f1491 KW |
334 | |
335 | /* | |
336 | * If this is the last cluster and it is only partially used, we must only | |
337 | * copy until the end of the image, or bdrv_check_request will fail for the | |
338 | * bdrv_read/write calls below. | |
339 | */ | |
340 | if (start_sect + n_end > bs->total_sectors) { | |
341 | n_end = bs->total_sectors - start_sect; | |
342 | } | |
45aba42f KW |
343 | |
344 | n = n_end - n_start; | |
1b9f1491 | 345 | if (n <= 0) { |
45aba42f | 346 | return 0; |
1b9f1491 KW |
347 | } |
348 | ||
aef4acb6 SH |
349 | iov.iov_len = n * BDRV_SECTOR_SIZE; |
350 | iov.iov_base = qemu_blockalign(bs, iov.iov_len); | |
351 | ||
352 | qemu_iovec_init_external(&qiov, &iov, 1); | |
1b9f1491 | 353 | |
66f82cee | 354 | BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
aef4acb6 SH |
355 | |
356 | /* Call .bdrv_co_readv() directly instead of using the public block-layer | |
357 | * interface. This avoids double I/O throttling and request tracking, | |
358 | * which can lead to deadlock when block layer copy-on-read is enabled. | |
359 | */ | |
360 | ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); | |
1b9f1491 KW |
361 | if (ret < 0) { |
362 | goto out; | |
363 | } | |
364 | ||
45aba42f | 365 | if (s->crypt_method) { |
ed6ccf0f | 366 | qcow2_encrypt_sectors(s, start_sect + n_start, |
aef4acb6 | 367 | iov.iov_base, iov.iov_base, n, 1, |
45aba42f KW |
368 | &s->aes_encrypt_key); |
369 | } | |
1b9f1491 | 370 | |
66f82cee | 371 | BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
aef4acb6 | 372 | ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); |
1b9f1491 KW |
373 | if (ret < 0) { |
374 | goto out; | |
375 | } | |
376 | ||
377 | ret = 0; | |
378 | out: | |
aef4acb6 | 379 | qemu_vfree(iov.iov_base); |
1b9f1491 | 380 | return ret; |
45aba42f KW |
381 | } |
382 | ||
383 | ||
384 | /* | |
385 | * get_cluster_offset | |
386 | * | |
1c46efaa KW |
387 | * For a given offset of the disk image, find the cluster offset in |
388 | * qcow2 file. The offset is stored in *cluster_offset. | |
45aba42f | 389 | * |
d57237f2 | 390 | * on entry, *num is the number of contiguous sectors we'd like to |
45aba42f KW |
391 | * access following offset. |
392 | * | |
d57237f2 | 393 | * on exit, *num is the number of contiguous sectors we can read. |
45aba42f | 394 | * |
68d000a3 KW |
395 | * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error |
396 | * cases. | |
45aba42f | 397 | */ |
1c46efaa KW |
398 | int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, |
399 | int *num, uint64_t *cluster_offset) | |
45aba42f KW |
400 | { |
401 | BDRVQcowState *s = bs->opaque; | |
2cf7cfa1 KW |
402 | unsigned int l2_index; |
403 | uint64_t l1_index, l2_offset, *l2_table; | |
45aba42f | 404 | int l1_bits, c; |
80ee15a6 KW |
405 | unsigned int index_in_cluster, nb_clusters; |
406 | uint64_t nb_available, nb_needed; | |
55c17e98 | 407 | int ret; |
45aba42f KW |
408 | |
409 | index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); | |
410 | nb_needed = *num + index_in_cluster; | |
411 | ||
412 | l1_bits = s->l2_bits + s->cluster_bits; | |
413 | ||
414 | /* compute how many bytes there are between the offset and | |
415 | * the end of the l1 entry | |
416 | */ | |
417 | ||
80ee15a6 | 418 | nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); |
45aba42f KW |
419 | |
420 | /* compute the number of available sectors */ | |
421 | ||
422 | nb_available = (nb_available >> 9) + index_in_cluster; | |
423 | ||
424 | if (nb_needed > nb_available) { | |
425 | nb_needed = nb_available; | |
426 | } | |
427 | ||
1c46efaa | 428 | *cluster_offset = 0; |
45aba42f KW |
429 | |
430 | /* seek the the l2 offset in the l1 table */ | |
431 | ||
432 | l1_index = offset >> l1_bits; | |
68d000a3 KW |
433 | if (l1_index >= s->l1_size) { |
434 | ret = QCOW2_CLUSTER_UNALLOCATED; | |
45aba42f | 435 | goto out; |
68d000a3 | 436 | } |
45aba42f | 437 | |
68d000a3 KW |
438 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
439 | if (!l2_offset) { | |
440 | ret = QCOW2_CLUSTER_UNALLOCATED; | |
45aba42f | 441 | goto out; |
68d000a3 | 442 | } |
45aba42f KW |
443 | |
444 | /* load the l2 table in memory */ | |
445 | ||
55c17e98 KW |
446 | ret = l2_load(bs, l2_offset, &l2_table); |
447 | if (ret < 0) { | |
448 | return ret; | |
1c46efaa | 449 | } |
45aba42f KW |
450 | |
451 | /* find the cluster offset for the given disk offset */ | |
452 | ||
453 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); | |
1c46efaa | 454 | *cluster_offset = be64_to_cpu(l2_table[l2_index]); |
45aba42f KW |
455 | nb_clusters = size_to_clusters(s, nb_needed << 9); |
456 | ||
68d000a3 KW |
457 | ret = qcow2_get_cluster_type(*cluster_offset); |
458 | switch (ret) { | |
459 | case QCOW2_CLUSTER_COMPRESSED: | |
460 | /* Compressed clusters can only be processed one by one */ | |
461 | c = 1; | |
462 | *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; | |
463 | break; | |
6377af48 | 464 | case QCOW2_CLUSTER_ZERO: |
381b487d PB |
465 | if (s->qcow_version < 3) { |
466 | return -EIO; | |
467 | } | |
6377af48 KW |
468 | c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
469 | &l2_table[l2_index], 0, | |
470 | QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO); | |
471 | *cluster_offset = 0; | |
472 | break; | |
68d000a3 | 473 | case QCOW2_CLUSTER_UNALLOCATED: |
45aba42f KW |
474 | /* how many empty clusters ? */ |
475 | c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); | |
68d000a3 KW |
476 | *cluster_offset = 0; |
477 | break; | |
478 | case QCOW2_CLUSTER_NORMAL: | |
45aba42f KW |
479 | /* how many allocated clusters ? */ |
480 | c = count_contiguous_clusters(nb_clusters, s->cluster_size, | |
6377af48 KW |
481 | &l2_table[l2_index], 0, |
482 | QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO); | |
68d000a3 KW |
483 | *cluster_offset &= L2E_OFFSET_MASK; |
484 | break; | |
1417d7e4 KW |
485 | default: |
486 | abort(); | |
45aba42f KW |
487 | } |
488 | ||
29c1a730 KW |
489 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
490 | ||
68d000a3 KW |
491 | nb_available = (c * s->cluster_sectors); |
492 | ||
45aba42f KW |
493 | out: |
494 | if (nb_available > nb_needed) | |
495 | nb_available = nb_needed; | |
496 | ||
497 | *num = nb_available - index_in_cluster; | |
498 | ||
68d000a3 | 499 | return ret; |
45aba42f KW |
500 | } |
501 | ||
502 | /* | |
503 | * get_cluster_table | |
504 | * | |
505 | * for a given disk offset, load (and allocate if needed) | |
506 | * the l2 table. | |
507 | * | |
508 | * the l2 table offset in the qcow2 file and the cluster index | |
509 | * in the l2 table are given to the caller. | |
510 | * | |
1e3e8f1a | 511 | * Returns 0 on success, -errno in failure case |
45aba42f | 512 | */ |
45aba42f KW |
513 | static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
514 | uint64_t **new_l2_table, | |
45aba42f KW |
515 | int *new_l2_index) |
516 | { | |
517 | BDRVQcowState *s = bs->opaque; | |
2cf7cfa1 KW |
518 | unsigned int l2_index; |
519 | uint64_t l1_index, l2_offset; | |
c46e1167 | 520 | uint64_t *l2_table = NULL; |
80ee15a6 | 521 | int ret; |
45aba42f KW |
522 | |
523 | /* seek the the l2 offset in the l1 table */ | |
524 | ||
525 | l1_index = offset >> (s->l2_bits + s->cluster_bits); | |
526 | if (l1_index >= s->l1_size) { | |
72893756 | 527 | ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
1e3e8f1a KW |
528 | if (ret < 0) { |
529 | return ret; | |
530 | } | |
45aba42f | 531 | } |
8e37f681 | 532 | |
2cf7cfa1 | 533 | assert(l1_index < s->l1_size); |
8e37f681 | 534 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
45aba42f KW |
535 | |
536 | /* seek the l2 table of the given l2 offset */ | |
537 | ||
8e37f681 | 538 | if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { |
45aba42f | 539 | /* load the l2 table in memory */ |
55c17e98 KW |
540 | ret = l2_load(bs, l2_offset, &l2_table); |
541 | if (ret < 0) { | |
542 | return ret; | |
1e3e8f1a | 543 | } |
45aba42f | 544 | } else { |
16fde5f2 | 545 | /* First allocate a new L2 table (and do COW if needed) */ |
c46e1167 KW |
546 | ret = l2_allocate(bs, l1_index, &l2_table); |
547 | if (ret < 0) { | |
548 | return ret; | |
1e3e8f1a | 549 | } |
16fde5f2 KW |
550 | |
551 | /* Then decrease the refcount of the old table */ | |
552 | if (l2_offset) { | |
6cfcb9b8 KW |
553 | qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), |
554 | QCOW2_DISCARD_OTHER); | |
16fde5f2 | 555 | } |
45aba42f KW |
556 | } |
557 | ||
558 | /* find the cluster offset for the given disk offset */ | |
559 | ||
560 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); | |
561 | ||
562 | *new_l2_table = l2_table; | |
45aba42f KW |
563 | *new_l2_index = l2_index; |
564 | ||
1e3e8f1a | 565 | return 0; |
45aba42f KW |
566 | } |
567 | ||
568 | /* | |
569 | * alloc_compressed_cluster_offset | |
570 | * | |
571 | * For a given offset of the disk image, return cluster offset in | |
572 | * qcow2 file. | |
573 | * | |
574 | * If the offset is not found, allocate a new compressed cluster. | |
575 | * | |
576 | * Return the cluster offset if successful, | |
577 | * Return 0, otherwise. | |
578 | * | |
579 | */ | |
580 | ||
ed6ccf0f KW |
581 | uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
582 | uint64_t offset, | |
583 | int compressed_size) | |
45aba42f KW |
584 | { |
585 | BDRVQcowState *s = bs->opaque; | |
586 | int l2_index, ret; | |
3948d1d4 | 587 | uint64_t *l2_table; |
f4f0d391 | 588 | int64_t cluster_offset; |
45aba42f KW |
589 | int nb_csectors; |
590 | ||
3948d1d4 | 591 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1e3e8f1a | 592 | if (ret < 0) { |
45aba42f | 593 | return 0; |
1e3e8f1a | 594 | } |
45aba42f | 595 | |
b0b6862e KW |
596 | /* Compression can't overwrite anything. Fail if the cluster was already |
597 | * allocated. */ | |
45aba42f | 598 | cluster_offset = be64_to_cpu(l2_table[l2_index]); |
b0b6862e | 599 | if (cluster_offset & L2E_OFFSET_MASK) { |
8f1efd00 KW |
600 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
601 | return 0; | |
602 | } | |
45aba42f | 603 | |
ed6ccf0f | 604 | cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
5d757b56 | 605 | if (cluster_offset < 0) { |
29c1a730 | 606 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
5d757b56 KW |
607 | return 0; |
608 | } | |
609 | ||
45aba42f KW |
610 | nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
611 | (cluster_offset >> 9); | |
612 | ||
613 | cluster_offset |= QCOW_OFLAG_COMPRESSED | | |
614 | ((uint64_t)nb_csectors << s->csize_shift); | |
615 | ||
616 | /* update L2 table */ | |
617 | ||
618 | /* compressed clusters never have the copied flag */ | |
619 | ||
66f82cee | 620 | BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
29c1a730 | 621 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
45aba42f | 622 | l2_table[l2_index] = cpu_to_be64(cluster_offset); |
29c1a730 | 623 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
79a31189 | 624 | if (ret < 0) { |
29c1a730 | 625 | return 0; |
4c1612d9 KW |
626 | } |
627 | ||
29c1a730 | 628 | return cluster_offset; |
4c1612d9 KW |
629 | } |
630 | ||
593fb83c KW |
631 | static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) |
632 | { | |
633 | BDRVQcowState *s = bs->opaque; | |
634 | int ret; | |
635 | ||
636 | if (r->nb_sectors == 0) { | |
637 | return 0; | |
638 | } | |
639 | ||
640 | qemu_co_mutex_unlock(&s->lock); | |
641 | ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, | |
642 | r->offset / BDRV_SECTOR_SIZE, | |
643 | r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); | |
644 | qemu_co_mutex_lock(&s->lock); | |
645 | ||
646 | if (ret < 0) { | |
647 | return ret; | |
648 | } | |
649 | ||
650 | /* | |
651 | * Before we update the L2 table to actually point to the new cluster, we | |
652 | * need to be sure that the refcounts have been increased and COW was | |
653 | * handled. | |
654 | */ | |
655 | qcow2_cache_depends_on_flush(s->l2_table_cache); | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
148da7ea | 660 | int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
45aba42f KW |
661 | { |
662 | BDRVQcowState *s = bs->opaque; | |
663 | int i, j = 0, l2_index, ret; | |
593fb83c | 664 | uint64_t *old_cluster, *l2_table; |
250196f1 | 665 | uint64_t cluster_offset = m->alloc_offset; |
45aba42f | 666 | |
3cce16f4 | 667 | trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
f50f88b9 | 668 | assert(m->nb_clusters > 0); |
45aba42f | 669 | |
7267c094 | 670 | old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t)); |
45aba42f KW |
671 | |
672 | /* copy content of unmodified sectors */ | |
593fb83c KW |
673 | ret = perform_cow(bs, m, &m->cow_start); |
674 | if (ret < 0) { | |
675 | goto err; | |
45aba42f KW |
676 | } |
677 | ||
593fb83c KW |
678 | ret = perform_cow(bs, m, &m->cow_end); |
679 | if (ret < 0) { | |
680 | goto err; | |
29c1a730 KW |
681 | } |
682 | ||
593fb83c | 683 | /* Update L2 table. */ |
74c4510a | 684 | if (s->use_lazy_refcounts) { |
280d3735 KW |
685 | qcow2_mark_dirty(bs); |
686 | } | |
bfe8043e SH |
687 | if (qcow2_need_accurate_refcounts(s)) { |
688 | qcow2_cache_set_dependency(bs, s->l2_table_cache, | |
689 | s->refcount_block_cache); | |
690 | } | |
280d3735 | 691 | |
3948d1d4 | 692 | ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); |
1e3e8f1a | 693 | if (ret < 0) { |
45aba42f | 694 | goto err; |
1e3e8f1a | 695 | } |
29c1a730 | 696 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
45aba42f KW |
697 | |
698 | for (i = 0; i < m->nb_clusters; i++) { | |
699 | /* if two concurrent writes happen to the same unallocated cluster | |
700 | * each write allocates separate cluster and writes data concurrently. | |
701 | * The first one to complete updates l2 table with pointer to its | |
702 | * cluster the second one has to do RMW (which is done above by | |
703 | * copy_sectors()), update l2 table with its cluster pointer and free | |
704 | * old cluster. This is what this loop does */ | |
705 | if(l2_table[l2_index + i] != 0) | |
706 | old_cluster[j++] = l2_table[l2_index + i]; | |
707 | ||
708 | l2_table[l2_index + i] = cpu_to_be64((cluster_offset + | |
709 | (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); | |
710 | } | |
711 | ||
9f8e668e | 712 | |
29c1a730 | 713 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
c835d00f | 714 | if (ret < 0) { |
45aba42f | 715 | goto err; |
4c1612d9 | 716 | } |
45aba42f | 717 | |
7ec5e6a4 KW |
718 | /* |
719 | * If this was a COW, we need to decrease the refcount of the old cluster. | |
720 | * Also flush bs->file to get the right order for L2 and refcount update. | |
6cfcb9b8 KW |
721 | * |
722 | * Don't discard clusters that reach a refcount of 0 (e.g. compressed | |
723 | * clusters), the next write will reuse them anyway. | |
7ec5e6a4 KW |
724 | */ |
725 | if (j != 0) { | |
7ec5e6a4 | 726 | for (i = 0; i < j; i++) { |
6cfcb9b8 KW |
727 | qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, |
728 | QCOW2_DISCARD_NEVER); | |
7ec5e6a4 KW |
729 | } |
730 | } | |
45aba42f KW |
731 | |
732 | ret = 0; | |
733 | err: | |
7267c094 | 734 | g_free(old_cluster); |
45aba42f KW |
735 | return ret; |
736 | } | |
737 | ||
bf319ece KW |
738 | /* |
739 | * Returns the number of contiguous clusters that can be used for an allocating | |
740 | * write, but require COW to be performed (this includes yet unallocated space, | |
741 | * which must copy from the backing file) | |
742 | */ | |
743 | static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, | |
744 | uint64_t *l2_table, int l2_index) | |
745 | { | |
143550a8 | 746 | int i; |
bf319ece | 747 | |
143550a8 KW |
748 | for (i = 0; i < nb_clusters; i++) { |
749 | uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); | |
750 | int cluster_type = qcow2_get_cluster_type(l2_entry); | |
751 | ||
752 | switch(cluster_type) { | |
753 | case QCOW2_CLUSTER_NORMAL: | |
754 | if (l2_entry & QCOW_OFLAG_COPIED) { | |
755 | goto out; | |
756 | } | |
bf319ece | 757 | break; |
143550a8 KW |
758 | case QCOW2_CLUSTER_UNALLOCATED: |
759 | case QCOW2_CLUSTER_COMPRESSED: | |
6377af48 | 760 | case QCOW2_CLUSTER_ZERO: |
bf319ece | 761 | break; |
143550a8 KW |
762 | default: |
763 | abort(); | |
764 | } | |
bf319ece KW |
765 | } |
766 | ||
143550a8 | 767 | out: |
bf319ece KW |
768 | assert(i <= nb_clusters); |
769 | return i; | |
770 | } | |
771 | ||
250196f1 | 772 | /* |
226c3c26 KW |
773 | * Check if there already is an AIO write request in flight which allocates |
774 | * the same cluster. In this case we need to wait until the previous | |
775 | * request has completed and updated the L2 table accordingly. | |
65eb2e35 KW |
776 | * |
777 | * Returns: | |
778 | * 0 if there was no dependency. *cur_bytes indicates the number of | |
779 | * bytes from guest_offset that can be read before the next | |
780 | * dependency must be processed (or the request is complete) | |
781 | * | |
782 | * -EAGAIN if we had to wait for another request, previously gathered | |
783 | * information on cluster allocation may be invalid now. The caller | |
784 | * must start over anyway, so consider *cur_bytes undefined. | |
250196f1 | 785 | */ |
226c3c26 | 786 | static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, |
ecdd5333 | 787 | uint64_t *cur_bytes, QCowL2Meta **m) |
250196f1 KW |
788 | { |
789 | BDRVQcowState *s = bs->opaque; | |
250196f1 | 790 | QCowL2Meta *old_alloc; |
65eb2e35 | 791 | uint64_t bytes = *cur_bytes; |
250196f1 | 792 | |
250196f1 KW |
793 | QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
794 | ||
65eb2e35 KW |
795 | uint64_t start = guest_offset; |
796 | uint64_t end = start + bytes; | |
797 | uint64_t old_start = l2meta_cow_start(old_alloc); | |
798 | uint64_t old_end = l2meta_cow_end(old_alloc); | |
250196f1 | 799 | |
d9d74f41 | 800 | if (end <= old_start || start >= old_end) { |
250196f1 KW |
801 | /* No intersection */ |
802 | } else { | |
803 | if (start < old_start) { | |
804 | /* Stop at the start of a running allocation */ | |
65eb2e35 | 805 | bytes = old_start - start; |
250196f1 | 806 | } else { |
65eb2e35 | 807 | bytes = 0; |
250196f1 KW |
808 | } |
809 | ||
ecdd5333 KW |
810 | /* Stop if already an l2meta exists. After yielding, it wouldn't |
811 | * be valid any more, so we'd have to clean up the old L2Metas | |
812 | * and deal with requests depending on them before starting to | |
813 | * gather new ones. Not worth the trouble. */ | |
814 | if (bytes == 0 && *m) { | |
815 | *cur_bytes = 0; | |
816 | return 0; | |
817 | } | |
818 | ||
65eb2e35 | 819 | if (bytes == 0) { |
250196f1 KW |
820 | /* Wait for the dependency to complete. We need to recheck |
821 | * the free/allocated clusters when we continue. */ | |
822 | qemu_co_mutex_unlock(&s->lock); | |
823 | qemu_co_queue_wait(&old_alloc->dependent_requests); | |
824 | qemu_co_mutex_lock(&s->lock); | |
825 | return -EAGAIN; | |
826 | } | |
827 | } | |
828 | } | |
829 | ||
65eb2e35 KW |
830 | /* Make sure that existing clusters and new allocations are only used up to |
831 | * the next dependency if we shortened the request above */ | |
832 | *cur_bytes = bytes; | |
250196f1 | 833 | |
226c3c26 KW |
834 | return 0; |
835 | } | |
836 | ||
0af729ec KW |
837 | /* |
838 | * Checks how many already allocated clusters that don't require a copy on | |
839 | * write there are at the given guest_offset (up to *bytes). If | |
840 | * *host_offset is not zero, only physically contiguous clusters beginning at | |
841 | * this host offset are counted. | |
842 | * | |
411d62b0 KW |
843 | * Note that guest_offset may not be cluster aligned. In this case, the |
844 | * returned *host_offset points to exact byte referenced by guest_offset and | |
845 | * therefore isn't cluster aligned as well. | |
0af729ec KW |
846 | * |
847 | * Returns: | |
848 | * 0: if no allocated clusters are available at the given offset. | |
849 | * *bytes is normally unchanged. It is set to 0 if the cluster | |
850 | * is allocated and doesn't need COW, but doesn't have the right | |
851 | * physical offset. | |
852 | * | |
853 | * 1: if allocated clusters that don't require a COW are available at | |
854 | * the requested offset. *bytes may have decreased and describes | |
855 | * the length of the area that can be written to. | |
856 | * | |
857 | * -errno: in error cases | |
0af729ec KW |
858 | */ |
859 | static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, | |
c53ede9f | 860 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
0af729ec KW |
861 | { |
862 | BDRVQcowState *s = bs->opaque; | |
863 | int l2_index; | |
864 | uint64_t cluster_offset; | |
865 | uint64_t *l2_table; | |
acb0467f | 866 | unsigned int nb_clusters; |
c53ede9f | 867 | unsigned int keep_clusters; |
0af729ec KW |
868 | int ret, pret; |
869 | ||
870 | trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, | |
871 | *bytes); | |
0af729ec | 872 | |
411d62b0 KW |
873 | assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) |
874 | == offset_into_cluster(s, *host_offset)); | |
875 | ||
acb0467f KW |
876 | /* |
877 | * Calculate the number of clusters to look for. We stop at L2 table | |
878 | * boundaries to keep things simple. | |
879 | */ | |
880 | nb_clusters = | |
881 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); | |
882 | ||
883 | l2_index = offset_to_l2_index(s, guest_offset); | |
884 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); | |
885 | ||
0af729ec KW |
886 | /* Find L2 entry for the first involved cluster */ |
887 | ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); | |
888 | if (ret < 0) { | |
889 | return ret; | |
890 | } | |
891 | ||
892 | cluster_offset = be64_to_cpu(l2_table[l2_index]); | |
893 | ||
894 | /* Check how many clusters are already allocated and don't need COW */ | |
895 | if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL | |
896 | && (cluster_offset & QCOW_OFLAG_COPIED)) | |
897 | { | |
e62daaf6 KW |
898 | /* If a specific host_offset is required, check it */ |
899 | bool offset_matches = | |
900 | (cluster_offset & L2E_OFFSET_MASK) == *host_offset; | |
901 | ||
902 | if (*host_offset != 0 && !offset_matches) { | |
903 | *bytes = 0; | |
904 | ret = 0; | |
905 | goto out; | |
906 | } | |
907 | ||
0af729ec | 908 | /* We keep all QCOW_OFLAG_COPIED clusters */ |
c53ede9f | 909 | keep_clusters = |
acb0467f | 910 | count_contiguous_clusters(nb_clusters, s->cluster_size, |
0af729ec KW |
911 | &l2_table[l2_index], 0, |
912 | QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); | |
c53ede9f KW |
913 | assert(keep_clusters <= nb_clusters); |
914 | ||
915 | *bytes = MIN(*bytes, | |
916 | keep_clusters * s->cluster_size | |
917 | - offset_into_cluster(s, guest_offset)); | |
0af729ec KW |
918 | |
919 | ret = 1; | |
920 | } else { | |
0af729ec KW |
921 | ret = 0; |
922 | } | |
923 | ||
0af729ec | 924 | /* Cleanup */ |
e62daaf6 | 925 | out: |
0af729ec KW |
926 | pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
927 | if (pret < 0) { | |
928 | return pret; | |
929 | } | |
930 | ||
e62daaf6 KW |
931 | /* Only return a host offset if we actually made progress. Otherwise we |
932 | * would make requirements for handle_alloc() that it can't fulfill */ | |
933 | if (ret) { | |
411d62b0 KW |
934 | *host_offset = (cluster_offset & L2E_OFFSET_MASK) |
935 | + offset_into_cluster(s, guest_offset); | |
e62daaf6 KW |
936 | } |
937 | ||
0af729ec KW |
938 | return ret; |
939 | } | |
940 | ||
226c3c26 KW |
941 | /* |
942 | * Allocates new clusters for the given guest_offset. | |
943 | * | |
944 | * At most *nb_clusters are allocated, and on return *nb_clusters is updated to | |
945 | * contain the number of clusters that have been allocated and are contiguous | |
946 | * in the image file. | |
947 | * | |
948 | * If *host_offset is non-zero, it specifies the offset in the image file at | |
949 | * which the new clusters must start. *nb_clusters can be 0 on return in this | |
950 | * case if the cluster at host_offset is already in use. If *host_offset is | |
951 | * zero, the clusters can be allocated anywhere in the image file. | |
952 | * | |
953 | * *host_offset is updated to contain the offset into the image file at which | |
954 | * the first allocated cluster starts. | |
955 | * | |
956 | * Return 0 on success and -errno in error cases. -EAGAIN means that the | |
957 | * function has been waiting for another request and the allocation must be | |
958 | * restarted, but the whole request should not be failed. | |
959 | */ | |
960 | static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, | |
961 | uint64_t *host_offset, unsigned int *nb_clusters) | |
962 | { | |
963 | BDRVQcowState *s = bs->opaque; | |
226c3c26 KW |
964 | |
965 | trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, | |
966 | *host_offset, *nb_clusters); | |
967 | ||
250196f1 KW |
968 | /* Allocate new clusters */ |
969 | trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); | |
970 | if (*host_offset == 0) { | |
df021791 KW |
971 | int64_t cluster_offset = |
972 | qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); | |
973 | if (cluster_offset < 0) { | |
974 | return cluster_offset; | |
975 | } | |
976 | *host_offset = cluster_offset; | |
977 | return 0; | |
250196f1 | 978 | } else { |
17a71e58 | 979 | int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); |
df021791 KW |
980 | if (ret < 0) { |
981 | return ret; | |
982 | } | |
983 | *nb_clusters = ret; | |
984 | return 0; | |
250196f1 | 985 | } |
250196f1 KW |
986 | } |
987 | ||
10f0ed8b KW |
988 | /* |
989 | * Allocates new clusters for an area that either is yet unallocated or needs a | |
990 | * copy on write. If *host_offset is non-zero, clusters are only allocated if | |
991 | * the new allocation can match the specified host offset. | |
992 | * | |
411d62b0 KW |
993 | * Note that guest_offset may not be cluster aligned. In this case, the |
994 | * returned *host_offset points to exact byte referenced by guest_offset and | |
995 | * therefore isn't cluster aligned as well. | |
10f0ed8b KW |
996 | * |
997 | * Returns: | |
998 | * 0: if no clusters could be allocated. *bytes is set to 0, | |
999 | * *host_offset is left unchanged. | |
1000 | * | |
1001 | * 1: if new clusters were allocated. *bytes may be decreased if the | |
1002 | * new allocation doesn't cover all of the requested area. | |
1003 | * *host_offset is updated to contain the host offset of the first | |
1004 | * newly allocated cluster. | |
1005 | * | |
1006 | * -errno: in error cases | |
10f0ed8b KW |
1007 | */ |
1008 | static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, | |
c37f4cd7 | 1009 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
10f0ed8b KW |
1010 | { |
1011 | BDRVQcowState *s = bs->opaque; | |
1012 | int l2_index; | |
1013 | uint64_t *l2_table; | |
1014 | uint64_t entry; | |
f5bc6350 | 1015 | unsigned int nb_clusters; |
10f0ed8b KW |
1016 | int ret; |
1017 | ||
10f0ed8b | 1018 | uint64_t alloc_cluster_offset; |
10f0ed8b KW |
1019 | |
1020 | trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, | |
1021 | *bytes); | |
1022 | assert(*bytes > 0); | |
1023 | ||
f5bc6350 KW |
1024 | /* |
1025 | * Calculate the number of clusters to look for. We stop at L2 table | |
1026 | * boundaries to keep things simple. | |
1027 | */ | |
c37f4cd7 KW |
1028 | nb_clusters = |
1029 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); | |
1030 | ||
f5bc6350 | 1031 | l2_index = offset_to_l2_index(s, guest_offset); |
c37f4cd7 | 1032 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
f5bc6350 | 1033 | |
10f0ed8b KW |
1034 | /* Find L2 entry for the first involved cluster */ |
1035 | ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); | |
1036 | if (ret < 0) { | |
1037 | return ret; | |
1038 | } | |
1039 | ||
3b8e2e26 | 1040 | entry = be64_to_cpu(l2_table[l2_index]); |
10f0ed8b KW |
1041 | |
1042 | /* For the moment, overwrite compressed clusters one by one */ | |
1043 | if (entry & QCOW_OFLAG_COMPRESSED) { | |
1044 | nb_clusters = 1; | |
1045 | } else { | |
3b8e2e26 | 1046 | nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); |
10f0ed8b KW |
1047 | } |
1048 | ||
ecdd5333 KW |
1049 | /* This function is only called when there were no non-COW clusters, so if |
1050 | * we can't find any unallocated or COW clusters either, something is | |
1051 | * wrong with our code. */ | |
1052 | assert(nb_clusters > 0); | |
1053 | ||
10f0ed8b KW |
1054 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
1055 | if (ret < 0) { | |
1056 | return ret; | |
1057 | } | |
1058 | ||
10f0ed8b | 1059 | /* Allocate, if necessary at a given offset in the image file */ |
411d62b0 | 1060 | alloc_cluster_offset = start_of_cluster(s, *host_offset); |
83baa9a4 | 1061 | ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, |
10f0ed8b KW |
1062 | &nb_clusters); |
1063 | if (ret < 0) { | |
1064 | goto fail; | |
1065 | } | |
1066 | ||
83baa9a4 KW |
1067 | /* Can't extend contiguous allocation */ |
1068 | if (nb_clusters == 0) { | |
10f0ed8b KW |
1069 | *bytes = 0; |
1070 | return 0; | |
1071 | } | |
1072 | ||
83baa9a4 KW |
1073 | /* |
1074 | * Save info needed for meta data update. | |
1075 | * | |
1076 | * requested_sectors: Number of sectors from the start of the first | |
1077 | * newly allocated cluster to the end of the (possibly shortened | |
1078 | * before) write request. | |
1079 | * | |
1080 | * avail_sectors: Number of sectors from the start of the first | |
1081 | * newly allocated to the end of the last newly allocated cluster. | |
1082 | * | |
1083 | * nb_sectors: The number of sectors from the start of the first | |
1084 | * newly allocated cluster to the end of the area that the write | |
1085 | * request actually writes to (excluding COW at the end) | |
1086 | */ | |
1087 | int requested_sectors = | |
1088 | (*bytes + offset_into_cluster(s, guest_offset)) | |
1089 | >> BDRV_SECTOR_BITS; | |
1090 | int avail_sectors = nb_clusters | |
1091 | << (s->cluster_bits - BDRV_SECTOR_BITS); | |
1092 | int alloc_n_start = offset_into_cluster(s, guest_offset) | |
1093 | >> BDRV_SECTOR_BITS; | |
1094 | int nb_sectors = MIN(requested_sectors, avail_sectors); | |
88c6588c | 1095 | QCowL2Meta *old_m = *m; |
83baa9a4 | 1096 | |
83baa9a4 KW |
1097 | *m = g_malloc0(sizeof(**m)); |
1098 | ||
1099 | **m = (QCowL2Meta) { | |
88c6588c KW |
1100 | .next = old_m, |
1101 | ||
411d62b0 | 1102 | .alloc_offset = alloc_cluster_offset, |
83baa9a4 KW |
1103 | .offset = start_of_cluster(s, guest_offset), |
1104 | .nb_clusters = nb_clusters, | |
1105 | .nb_available = nb_sectors, | |
1106 | ||
1107 | .cow_start = { | |
1108 | .offset = 0, | |
1109 | .nb_sectors = alloc_n_start, | |
1110 | }, | |
1111 | .cow_end = { | |
1112 | .offset = nb_sectors * BDRV_SECTOR_SIZE, | |
1113 | .nb_sectors = avail_sectors - nb_sectors, | |
1114 | }, | |
1115 | }; | |
1116 | qemu_co_queue_init(&(*m)->dependent_requests); | |
1117 | QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); | |
1118 | ||
411d62b0 | 1119 | *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); |
83baa9a4 KW |
1120 | *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) |
1121 | - offset_into_cluster(s, guest_offset)); | |
1122 | assert(*bytes != 0); | |
1123 | ||
10f0ed8b KW |
1124 | return 1; |
1125 | ||
1126 | fail: | |
1127 | if (*m && (*m)->nb_clusters > 0) { | |
1128 | QLIST_REMOVE(*m, next_in_flight); | |
1129 | } | |
1130 | return ret; | |
1131 | } | |
1132 | ||
45aba42f KW |
1133 | /* |
1134 | * alloc_cluster_offset | |
1135 | * | |
250196f1 KW |
1136 | * For a given offset on the virtual disk, find the cluster offset in qcow2 |
1137 | * file. If the offset is not found, allocate a new cluster. | |
45aba42f | 1138 | * |
250196f1 | 1139 | * If the cluster was already allocated, m->nb_clusters is set to 0 and |
a7912369 | 1140 | * other fields in m are meaningless. |
148da7ea KW |
1141 | * |
1142 | * If the cluster is newly allocated, m->nb_clusters is set to the number of | |
68d100e9 KW |
1143 | * contiguous clusters that have been allocated. In this case, the other |
1144 | * fields of m are valid and contain information about the first allocated | |
1145 | * cluster. | |
45aba42f | 1146 | * |
68d100e9 KW |
1147 | * If the request conflicts with another write request in flight, the coroutine |
1148 | * is queued and will be reentered when the dependency has completed. | |
148da7ea KW |
1149 | * |
1150 | * Return 0 on success and -errno in error cases | |
45aba42f | 1151 | */ |
f4f0d391 | 1152 | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, |
f50f88b9 | 1153 | int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m) |
45aba42f KW |
1154 | { |
1155 | BDRVQcowState *s = bs->opaque; | |
710c2496 | 1156 | uint64_t start, remaining; |
250196f1 | 1157 | uint64_t cluster_offset; |
65eb2e35 | 1158 | uint64_t cur_bytes; |
710c2496 | 1159 | int ret; |
45aba42f | 1160 | |
3cce16f4 KW |
1161 | trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, |
1162 | n_start, n_end); | |
1163 | ||
710c2496 KW |
1164 | assert(n_start * BDRV_SECTOR_SIZE == offset_into_cluster(s, offset)); |
1165 | offset = start_of_cluster(s, offset); | |
1166 | ||
72424114 | 1167 | again: |
710c2496 KW |
1168 | start = offset + (n_start << BDRV_SECTOR_BITS); |
1169 | remaining = (n_end - n_start) << BDRV_SECTOR_BITS; | |
0af729ec KW |
1170 | cluster_offset = 0; |
1171 | *host_offset = 0; | |
ecdd5333 KW |
1172 | cur_bytes = 0; |
1173 | *m = NULL; | |
0af729ec | 1174 | |
2c3b32d2 | 1175 | while (true) { |
ecdd5333 KW |
1176 | |
1177 | if (!*host_offset) { | |
1178 | *host_offset = start_of_cluster(s, cluster_offset); | |
1179 | } | |
1180 | ||
1181 | assert(remaining >= cur_bytes); | |
1182 | ||
1183 | start += cur_bytes; | |
1184 | remaining -= cur_bytes; | |
1185 | cluster_offset += cur_bytes; | |
1186 | ||
1187 | if (remaining == 0) { | |
1188 | break; | |
1189 | } | |
1190 | ||
1191 | cur_bytes = remaining; | |
1192 | ||
2c3b32d2 KW |
1193 | /* |
1194 | * Now start gathering as many contiguous clusters as possible: | |
1195 | * | |
1196 | * 1. Check for overlaps with in-flight allocations | |
1197 | * | |
1198 | * a) Overlap not in the first cluster -> shorten this request and | |
1199 | * let the caller handle the rest in its next loop iteration. | |
1200 | * | |
1201 | * b) Real overlaps of two requests. Yield and restart the search | |
1202 | * for contiguous clusters (the situation could have changed | |
1203 | * while we were sleeping) | |
1204 | * | |
1205 | * c) TODO: Request starts in the same cluster as the in-flight | |
1206 | * allocation ends. Shorten the COW of the in-fight allocation, | |
1207 | * set cluster_offset to write to the same cluster and set up | |
1208 | * the right synchronisation between the in-flight request and | |
1209 | * the new one. | |
1210 | */ | |
ecdd5333 | 1211 | ret = handle_dependencies(bs, start, &cur_bytes, m); |
2c3b32d2 | 1212 | if (ret == -EAGAIN) { |
ecdd5333 KW |
1213 | /* Currently handle_dependencies() doesn't yield if we already had |
1214 | * an allocation. If it did, we would have to clean up the L2Meta | |
1215 | * structs before starting over. */ | |
1216 | assert(*m == NULL); | |
2c3b32d2 KW |
1217 | goto again; |
1218 | } else if (ret < 0) { | |
1219 | return ret; | |
ecdd5333 KW |
1220 | } else if (cur_bytes == 0) { |
1221 | break; | |
2c3b32d2 KW |
1222 | } else { |
1223 | /* handle_dependencies() may have decreased cur_bytes (shortened | |
1224 | * the allocations below) so that the next dependency is processed | |
1225 | * correctly during the next loop iteration. */ | |
0af729ec | 1226 | } |
710c2496 | 1227 | |
2c3b32d2 KW |
1228 | /* |
1229 | * 2. Count contiguous COPIED clusters. | |
1230 | */ | |
1231 | ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); | |
1232 | if (ret < 0) { | |
1233 | return ret; | |
1234 | } else if (ret) { | |
ecdd5333 | 1235 | continue; |
2c3b32d2 KW |
1236 | } else if (cur_bytes == 0) { |
1237 | break; | |
1238 | } | |
060bee89 | 1239 | |
2c3b32d2 KW |
1240 | /* |
1241 | * 3. If the request still hasn't completed, allocate new clusters, | |
1242 | * considering any cluster_offset of steps 1c or 2. | |
1243 | */ | |
1244 | ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); | |
1245 | if (ret < 0) { | |
1246 | return ret; | |
1247 | } else if (ret) { | |
ecdd5333 | 1248 | continue; |
2c3b32d2 KW |
1249 | } else { |
1250 | assert(cur_bytes == 0); | |
1251 | break; | |
1252 | } | |
f5bc6350 | 1253 | } |
10f0ed8b | 1254 | |
710c2496 KW |
1255 | *num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS); |
1256 | assert(*num > 0); | |
1257 | assert(*host_offset != 0); | |
45aba42f | 1258 | |
148da7ea | 1259 | return 0; |
45aba42f KW |
1260 | } |
1261 | ||
1262 | static int decompress_buffer(uint8_t *out_buf, int out_buf_size, | |
1263 | const uint8_t *buf, int buf_size) | |
1264 | { | |
1265 | z_stream strm1, *strm = &strm1; | |
1266 | int ret, out_len; | |
1267 | ||
1268 | memset(strm, 0, sizeof(*strm)); | |
1269 | ||
1270 | strm->next_in = (uint8_t *)buf; | |
1271 | strm->avail_in = buf_size; | |
1272 | strm->next_out = out_buf; | |
1273 | strm->avail_out = out_buf_size; | |
1274 | ||
1275 | ret = inflateInit2(strm, -12); | |
1276 | if (ret != Z_OK) | |
1277 | return -1; | |
1278 | ret = inflate(strm, Z_FINISH); | |
1279 | out_len = strm->next_out - out_buf; | |
1280 | if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || | |
1281 | out_len != out_buf_size) { | |
1282 | inflateEnd(strm); | |
1283 | return -1; | |
1284 | } | |
1285 | inflateEnd(strm); | |
1286 | return 0; | |
1287 | } | |
1288 | ||
66f82cee | 1289 | int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) |
45aba42f | 1290 | { |
66f82cee | 1291 | BDRVQcowState *s = bs->opaque; |
45aba42f KW |
1292 | int ret, csize, nb_csectors, sector_offset; |
1293 | uint64_t coffset; | |
1294 | ||
1295 | coffset = cluster_offset & s->cluster_offset_mask; | |
1296 | if (s->cluster_cache_offset != coffset) { | |
1297 | nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; | |
1298 | sector_offset = coffset & 511; | |
1299 | csize = nb_csectors * 512 - sector_offset; | |
66f82cee KW |
1300 | BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
1301 | ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); | |
45aba42f | 1302 | if (ret < 0) { |
8af36488 | 1303 | return ret; |
45aba42f KW |
1304 | } |
1305 | if (decompress_buffer(s->cluster_cache, s->cluster_size, | |
1306 | s->cluster_data + sector_offset, csize) < 0) { | |
8af36488 | 1307 | return -EIO; |
45aba42f KW |
1308 | } |
1309 | s->cluster_cache_offset = coffset; | |
1310 | } | |
1311 | return 0; | |
1312 | } | |
5ea929e3 KW |
1313 | |
1314 | /* | |
1315 | * This discards as many clusters of nb_clusters as possible at once (i.e. | |
1316 | * all clusters in the same L2 table) and returns the number of discarded | |
1317 | * clusters. | |
1318 | */ | |
1319 | static int discard_single_l2(BlockDriverState *bs, uint64_t offset, | |
1320 | unsigned int nb_clusters) | |
1321 | { | |
1322 | BDRVQcowState *s = bs->opaque; | |
3948d1d4 | 1323 | uint64_t *l2_table; |
5ea929e3 KW |
1324 | int l2_index; |
1325 | int ret; | |
1326 | int i; | |
1327 | ||
3948d1d4 | 1328 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
5ea929e3 KW |
1329 | if (ret < 0) { |
1330 | return ret; | |
1331 | } | |
1332 | ||
1333 | /* Limit nb_clusters to one L2 table */ | |
1334 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); | |
1335 | ||
1336 | for (i = 0; i < nb_clusters; i++) { | |
1337 | uint64_t old_offset; | |
1338 | ||
1339 | old_offset = be64_to_cpu(l2_table[l2_index + i]); | |
8e37f681 | 1340 | if ((old_offset & L2E_OFFSET_MASK) == 0) { |
5ea929e3 KW |
1341 | continue; |
1342 | } | |
1343 | ||
1344 | /* First remove L2 entries */ | |
1345 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); | |
1346 | l2_table[l2_index + i] = cpu_to_be64(0); | |
1347 | ||
1348 | /* Then decrease the refcount */ | |
6cfcb9b8 | 1349 | qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); |
5ea929e3 KW |
1350 | } |
1351 | ||
1352 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
1353 | if (ret < 0) { | |
1354 | return ret; | |
1355 | } | |
1356 | ||
1357 | return nb_clusters; | |
1358 | } | |
1359 | ||
1360 | int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, | |
1361 | int nb_sectors) | |
1362 | { | |
1363 | BDRVQcowState *s = bs->opaque; | |
1364 | uint64_t end_offset; | |
1365 | unsigned int nb_clusters; | |
1366 | int ret; | |
1367 | ||
1368 | end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); | |
1369 | ||
1370 | /* Round start up and end down */ | |
1371 | offset = align_offset(offset, s->cluster_size); | |
1372 | end_offset &= ~(s->cluster_size - 1); | |
1373 | ||
1374 | if (offset > end_offset) { | |
1375 | return 0; | |
1376 | } | |
1377 | ||
1378 | nb_clusters = size_to_clusters(s, end_offset - offset); | |
1379 | ||
0b919fae KW |
1380 | s->cache_discards = true; |
1381 | ||
5ea929e3 KW |
1382 | /* Each L2 table is handled by its own loop iteration */ |
1383 | while (nb_clusters > 0) { | |
1384 | ret = discard_single_l2(bs, offset, nb_clusters); | |
1385 | if (ret < 0) { | |
0b919fae | 1386 | goto fail; |
5ea929e3 KW |
1387 | } |
1388 | ||
1389 | nb_clusters -= ret; | |
1390 | offset += (ret * s->cluster_size); | |
1391 | } | |
1392 | ||
0b919fae KW |
1393 | ret = 0; |
1394 | fail: | |
1395 | s->cache_discards = false; | |
1396 | qcow2_process_discards(bs, ret); | |
1397 | ||
1398 | return ret; | |
5ea929e3 | 1399 | } |
621f0589 KW |
1400 | |
1401 | /* | |
1402 | * This zeroes as many clusters of nb_clusters as possible at once (i.e. | |
1403 | * all clusters in the same L2 table) and returns the number of zeroed | |
1404 | * clusters. | |
1405 | */ | |
1406 | static int zero_single_l2(BlockDriverState *bs, uint64_t offset, | |
1407 | unsigned int nb_clusters) | |
1408 | { | |
1409 | BDRVQcowState *s = bs->opaque; | |
1410 | uint64_t *l2_table; | |
1411 | int l2_index; | |
1412 | int ret; | |
1413 | int i; | |
1414 | ||
1415 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); | |
1416 | if (ret < 0) { | |
1417 | return ret; | |
1418 | } | |
1419 | ||
1420 | /* Limit nb_clusters to one L2 table */ | |
1421 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); | |
1422 | ||
1423 | for (i = 0; i < nb_clusters; i++) { | |
1424 | uint64_t old_offset; | |
1425 | ||
1426 | old_offset = be64_to_cpu(l2_table[l2_index + i]); | |
1427 | ||
1428 | /* Update L2 entries */ | |
1429 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); | |
1430 | if (old_offset & QCOW_OFLAG_COMPRESSED) { | |
1431 | l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); | |
6cfcb9b8 | 1432 | qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); |
621f0589 KW |
1433 | } else { |
1434 | l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); | |
1435 | } | |
1436 | } | |
1437 | ||
1438 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
1439 | if (ret < 0) { | |
1440 | return ret; | |
1441 | } | |
1442 | ||
1443 | return nb_clusters; | |
1444 | } | |
1445 | ||
1446 | int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) | |
1447 | { | |
1448 | BDRVQcowState *s = bs->opaque; | |
1449 | unsigned int nb_clusters; | |
1450 | int ret; | |
1451 | ||
1452 | /* The zero flag is only supported by version 3 and newer */ | |
1453 | if (s->qcow_version < 3) { | |
1454 | return -ENOTSUP; | |
1455 | } | |
1456 | ||
1457 | /* Each L2 table is handled by its own loop iteration */ | |
1458 | nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); | |
1459 | ||
0b919fae KW |
1460 | s->cache_discards = true; |
1461 | ||
621f0589 KW |
1462 | while (nb_clusters > 0) { |
1463 | ret = zero_single_l2(bs, offset, nb_clusters); | |
1464 | if (ret < 0) { | |
0b919fae | 1465 | goto fail; |
621f0589 KW |
1466 | } |
1467 | ||
1468 | nb_clusters -= ret; | |
1469 | offset += (ret * s->cluster_size); | |
1470 | } | |
1471 | ||
0b919fae KW |
1472 | ret = 0; |
1473 | fail: | |
1474 | s->cache_discards = false; | |
1475 | qcow2_process_discards(bs, ret); | |
1476 | ||
1477 | return ret; | |
621f0589 | 1478 | } |