]> git.proxmox.com Git - mirror_qemu.git/blob - block/qcow2-cluster.c
Merge remote-tracking branch 'remotes/lvivier-gitlab/tags/linux-user-for-7.0-pull...
[mirror_qemu.git] / block / qcow2-cluster.c
1 /*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include <zlib.h>
27
28 #include "qapi/error.h"
29 #include "qcow2.h"
30 #include "qemu/bswap.h"
31 #include "qemu/memalign.h"
32 #include "trace.h"
33
34 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
35 {
36 BDRVQcow2State *s = bs->opaque;
37 int new_l1_size, i, ret;
38
39 if (exact_size >= s->l1_size) {
40 return 0;
41 }
42
43 new_l1_size = exact_size;
44
45 #ifdef DEBUG_ALLOC2
46 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
47 #endif
48
49 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
50 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
51 new_l1_size * L1E_SIZE,
52 (s->l1_size - new_l1_size) * L1E_SIZE, 0);
53 if (ret < 0) {
54 goto fail;
55 }
56
57 ret = bdrv_flush(bs->file->bs);
58 if (ret < 0) {
59 goto fail;
60 }
61
62 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
63 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
64 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
65 continue;
66 }
67 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
68 s->cluster_size, QCOW2_DISCARD_ALWAYS);
69 s->l1_table[i] = 0;
70 }
71 return 0;
72
73 fail:
74 /*
75 * If the write in the l1_table failed the image may contain a partially
76 * overwritten l1_table. In this case it would be better to clear the
77 * l1_table in memory to avoid possible image corruption.
78 */
79 memset(s->l1_table + new_l1_size, 0,
80 (s->l1_size - new_l1_size) * L1E_SIZE);
81 return ret;
82 }
83
84 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
85 bool exact_size)
86 {
87 BDRVQcow2State *s = bs->opaque;
88 int new_l1_size2, ret, i;
89 uint64_t *new_l1_table;
90 int64_t old_l1_table_offset, old_l1_size;
91 int64_t new_l1_table_offset, new_l1_size;
92 uint8_t data[12];
93
94 if (min_size <= s->l1_size)
95 return 0;
96
97 /* Do a sanity check on min_size before trying to calculate new_l1_size
98 * (this prevents overflows during the while loop for the calculation of
99 * new_l1_size) */
100 if (min_size > INT_MAX / L1E_SIZE) {
101 return -EFBIG;
102 }
103
104 if (exact_size) {
105 new_l1_size = min_size;
106 } else {
107 /* Bump size up to reduce the number of times we have to grow */
108 new_l1_size = s->l1_size;
109 if (new_l1_size == 0) {
110 new_l1_size = 1;
111 }
112 while (min_size > new_l1_size) {
113 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
114 }
115 }
116
117 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
118 if (new_l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) {
119 return -EFBIG;
120 }
121
122 #ifdef DEBUG_ALLOC2
123 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
124 s->l1_size, new_l1_size);
125 #endif
126
127 new_l1_size2 = L1E_SIZE * new_l1_size;
128 new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2);
129 if (new_l1_table == NULL) {
130 return -ENOMEM;
131 }
132 memset(new_l1_table, 0, new_l1_size2);
133
134 if (s->l1_size) {
135 memcpy(new_l1_table, s->l1_table, s->l1_size * L1E_SIZE);
136 }
137
138 /* write new table (align to cluster) */
139 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
140 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
141 if (new_l1_table_offset < 0) {
142 qemu_vfree(new_l1_table);
143 return new_l1_table_offset;
144 }
145
146 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
147 if (ret < 0) {
148 goto fail;
149 }
150
151 /* the L1 position has not yet been updated, so these clusters must
152 * indeed be completely free */
153 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
154 new_l1_size2, false);
155 if (ret < 0) {
156 goto fail;
157 }
158
159 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
160 for(i = 0; i < s->l1_size; i++)
161 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
162 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
163 new_l1_table, new_l1_size2);
164 if (ret < 0)
165 goto fail;
166 for(i = 0; i < s->l1_size; i++)
167 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
168
169 /* set new table */
170 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
171 stl_be_p(data, new_l1_size);
172 stq_be_p(data + 4, new_l1_table_offset);
173 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
174 data, sizeof(data));
175 if (ret < 0) {
176 goto fail;
177 }
178 qemu_vfree(s->l1_table);
179 old_l1_table_offset = s->l1_table_offset;
180 s->l1_table_offset = new_l1_table_offset;
181 s->l1_table = new_l1_table;
182 old_l1_size = s->l1_size;
183 s->l1_size = new_l1_size;
184 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * L1E_SIZE,
185 QCOW2_DISCARD_OTHER);
186 return 0;
187 fail:
188 qemu_vfree(new_l1_table);
189 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
190 QCOW2_DISCARD_OTHER);
191 return ret;
192 }
193
194 /*
195 * l2_load
196 *
197 * @bs: The BlockDriverState
198 * @offset: A guest offset, used to calculate what slice of the L2
199 * table to load.
200 * @l2_offset: Offset to the L2 table in the image file.
201 * @l2_slice: Location to store the pointer to the L2 slice.
202 *
203 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
204 * that are loaded by the qcow2 cache). If the slice is in the cache,
205 * the cache is used; otherwise the L2 slice is loaded from the image
206 * file.
207 */
208 static int l2_load(BlockDriverState *bs, uint64_t offset,
209 uint64_t l2_offset, uint64_t **l2_slice)
210 {
211 BDRVQcow2State *s = bs->opaque;
212 int start_of_slice = l2_entry_size(s) *
213 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
214
215 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
216 (void **)l2_slice);
217 }
218
219 /*
220 * Writes an L1 entry to disk (note that depending on the alignment
221 * requirements this function may write more that just one entry in
222 * order to prevent bdrv_pwrite from performing a read-modify-write)
223 */
224 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
225 {
226 BDRVQcow2State *s = bs->opaque;
227 int l1_start_index;
228 int i, ret;
229 int bufsize = MAX(L1E_SIZE,
230 MIN(bs->file->bs->bl.request_alignment, s->cluster_size));
231 int nentries = bufsize / L1E_SIZE;
232 g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries);
233
234 if (buf == NULL) {
235 return -ENOMEM;
236 }
237
238 l1_start_index = QEMU_ALIGN_DOWN(l1_index, nentries);
239 for (i = 0; i < MIN(nentries, s->l1_size - l1_start_index); i++) {
240 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
241 }
242
243 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
244 s->l1_table_offset + L1E_SIZE * l1_start_index, bufsize, false);
245 if (ret < 0) {
246 return ret;
247 }
248
249 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
250 ret = bdrv_pwrite_sync(bs->file,
251 s->l1_table_offset + L1E_SIZE * l1_start_index,
252 buf, bufsize);
253 if (ret < 0) {
254 return ret;
255 }
256
257 return 0;
258 }
259
260 /*
261 * l2_allocate
262 *
263 * Allocate a new l2 entry in the file. If l1_index points to an already
264 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
265 * table) copy the contents of the old L2 table into the newly allocated one.
266 * Otherwise the new table is initialized with zeros.
267 *
268 */
269
270 static int l2_allocate(BlockDriverState *bs, int l1_index)
271 {
272 BDRVQcow2State *s = bs->opaque;
273 uint64_t old_l2_offset;
274 uint64_t *l2_slice = NULL;
275 unsigned slice, slice_size2, n_slices;
276 int64_t l2_offset;
277 int ret;
278
279 old_l2_offset = s->l1_table[l1_index];
280
281 trace_qcow2_l2_allocate(bs, l1_index);
282
283 /* allocate a new l2 entry */
284
285 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * l2_entry_size(s));
286 if (l2_offset < 0) {
287 ret = l2_offset;
288 goto fail;
289 }
290
291 /* The offset must fit in the offset field of the L1 table entry */
292 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset);
293
294 /* If we're allocating the table at offset 0 then something is wrong */
295 if (l2_offset == 0) {
296 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
297 "allocation of L2 table at offset 0");
298 ret = -EIO;
299 goto fail;
300 }
301
302 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
303 if (ret < 0) {
304 goto fail;
305 }
306
307 /* allocate a new entry in the l2 cache */
308
309 slice_size2 = s->l2_slice_size * l2_entry_size(s);
310 n_slices = s->cluster_size / slice_size2;
311
312 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
313 for (slice = 0; slice < n_slices; slice++) {
314 ret = qcow2_cache_get_empty(bs, s->l2_table_cache,
315 l2_offset + slice * slice_size2,
316 (void **) &l2_slice);
317 if (ret < 0) {
318 goto fail;
319 }
320
321 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
322 /* if there was no old l2 table, clear the new slice */
323 memset(l2_slice, 0, slice_size2);
324 } else {
325 uint64_t *old_slice;
326 uint64_t old_l2_slice_offset =
327 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2;
328
329 /* if there was an old l2 table, read a slice from the disk */
330 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
331 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset,
332 (void **) &old_slice);
333 if (ret < 0) {
334 goto fail;
335 }
336
337 memcpy(l2_slice, old_slice, slice_size2);
338
339 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice);
340 }
341
342 /* write the l2 slice to the file */
343 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
344
345 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
346 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
347 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
348 }
349
350 ret = qcow2_cache_flush(bs, s->l2_table_cache);
351 if (ret < 0) {
352 goto fail;
353 }
354
355 /* update the L1 entry */
356 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
357 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
358 ret = qcow2_write_l1_entry(bs, l1_index);
359 if (ret < 0) {
360 goto fail;
361 }
362
363 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
364 return 0;
365
366 fail:
367 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
368 if (l2_slice != NULL) {
369 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
370 }
371 s->l1_table[l1_index] = old_l2_offset;
372 if (l2_offset > 0) {
373 qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s),
374 QCOW2_DISCARD_ALWAYS);
375 }
376 return ret;
377 }
378
379 /*
380 * For a given L2 entry, count the number of contiguous subclusters of
381 * the same type starting from @sc_from. Compressed clusters are
382 * treated as if they were divided into subclusters of size
383 * s->subcluster_size.
384 *
385 * Return the number of contiguous subclusters and set @type to the
386 * subcluster type.
387 *
388 * If the L2 entry is invalid return -errno and set @type to
389 * QCOW2_SUBCLUSTER_INVALID.
390 */
391 static int qcow2_get_subcluster_range_type(BlockDriverState *bs,
392 uint64_t l2_entry,
393 uint64_t l2_bitmap,
394 unsigned sc_from,
395 QCow2SubclusterType *type)
396 {
397 BDRVQcow2State *s = bs->opaque;
398 uint32_t val;
399
400 *type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_from);
401
402 if (*type == QCOW2_SUBCLUSTER_INVALID) {
403 return -EINVAL;
404 } else if (!has_subclusters(s) || *type == QCOW2_SUBCLUSTER_COMPRESSED) {
405 return s->subclusters_per_cluster - sc_from;
406 }
407
408 switch (*type) {
409 case QCOW2_SUBCLUSTER_NORMAL:
410 val = l2_bitmap | QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from);
411 return cto32(val) - sc_from;
412
413 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
414 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
415 val = (l2_bitmap | QCOW_OFLAG_SUB_ZERO_RANGE(0, sc_from)) >> 32;
416 return cto32(val) - sc_from;
417
418 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
419 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
420 val = ((l2_bitmap >> 32) | l2_bitmap)
421 & ~QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from);
422 return ctz32(val) - sc_from;
423
424 default:
425 g_assert_not_reached();
426 }
427 }
428
429 /*
430 * Return the number of contiguous subclusters of the exact same type
431 * in a given L2 slice, starting from cluster @l2_index, subcluster
432 * @sc_index. Allocated subclusters are required to be contiguous in
433 * the image file.
434 * At most @nb_clusters are checked (note that this means clusters,
435 * not subclusters).
436 * Compressed clusters are always processed one by one but for the
437 * purpose of this count they are treated as if they were divided into
438 * subclusters of size s->subcluster_size.
439 * On failure return -errno and update @l2_index to point to the
440 * invalid entry.
441 */
442 static int count_contiguous_subclusters(BlockDriverState *bs, int nb_clusters,
443 unsigned sc_index, uint64_t *l2_slice,
444 unsigned *l2_index)
445 {
446 BDRVQcow2State *s = bs->opaque;
447 int i, count = 0;
448 bool check_offset = false;
449 uint64_t expected_offset = 0;
450 QCow2SubclusterType expected_type = QCOW2_SUBCLUSTER_NORMAL, type;
451
452 assert(*l2_index + nb_clusters <= s->l2_slice_size);
453
454 for (i = 0; i < nb_clusters; i++) {
455 unsigned first_sc = (i == 0) ? sc_index : 0;
456 uint64_t l2_entry = get_l2_entry(s, l2_slice, *l2_index + i);
457 uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, *l2_index + i);
458 int ret = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap,
459 first_sc, &type);
460 if (ret < 0) {
461 *l2_index += i; /* Point to the invalid entry */
462 return -EIO;
463 }
464 if (i == 0) {
465 if (type == QCOW2_SUBCLUSTER_COMPRESSED) {
466 /* Compressed clusters are always processed one by one */
467 return ret;
468 }
469 expected_type = type;
470 expected_offset = l2_entry & L2E_OFFSET_MASK;
471 check_offset = (type == QCOW2_SUBCLUSTER_NORMAL ||
472 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
473 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC);
474 } else if (type != expected_type) {
475 break;
476 } else if (check_offset) {
477 expected_offset += s->cluster_size;
478 if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) {
479 break;
480 }
481 }
482 count += ret;
483 /* Stop if there are type changes before the end of the cluster */
484 if (first_sc + ret < s->subclusters_per_cluster) {
485 break;
486 }
487 }
488
489 return count;
490 }
491
492 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
493 uint64_t src_cluster_offset,
494 unsigned offset_in_cluster,
495 QEMUIOVector *qiov)
496 {
497 int ret;
498
499 if (qiov->size == 0) {
500 return 0;
501 }
502
503 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
504
505 if (!bs->drv) {
506 return -ENOMEDIUM;
507 }
508
509 /*
510 * We never deal with requests that don't satisfy
511 * bdrv_check_qiov_request(), and aligning requests to clusters never
512 * breaks this condition. So, do some assertions before calling
513 * bs->drv->bdrv_co_preadv_part() which has int64_t arguments.
514 */
515 assert(src_cluster_offset <= INT64_MAX);
516 assert(src_cluster_offset + offset_in_cluster <= INT64_MAX);
517 /* Cast qiov->size to uint64_t to silence a compiler warning on -m32 */
518 assert((uint64_t)qiov->size <= INT64_MAX);
519 bdrv_check_qiov_request(src_cluster_offset + offset_in_cluster, qiov->size,
520 qiov, 0, &error_abort);
521 /*
522 * Call .bdrv_co_readv() directly instead of using the public block-layer
523 * interface. This avoids double I/O throttling and request tracking,
524 * which can lead to deadlock when block layer copy-on-read is enabled.
525 */
526 ret = bs->drv->bdrv_co_preadv_part(bs,
527 src_cluster_offset + offset_in_cluster,
528 qiov->size, qiov, 0, 0);
529 if (ret < 0) {
530 return ret;
531 }
532
533 return 0;
534 }
535
536 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
537 uint64_t cluster_offset,
538 unsigned offset_in_cluster,
539 QEMUIOVector *qiov)
540 {
541 BDRVQcow2State *s = bs->opaque;
542 int ret;
543
544 if (qiov->size == 0) {
545 return 0;
546 }
547
548 ret = qcow2_pre_write_overlap_check(bs, 0,
549 cluster_offset + offset_in_cluster, qiov->size, true);
550 if (ret < 0) {
551 return ret;
552 }
553
554 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
555 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster,
556 qiov->size, qiov, 0);
557 if (ret < 0) {
558 return ret;
559 }
560
561 return 0;
562 }
563
564
565 /*
566 * get_host_offset
567 *
568 * For a given offset of the virtual disk find the equivalent host
569 * offset in the qcow2 file and store it in *host_offset. Neither
570 * offset needs to be aligned to a cluster boundary.
571 *
572 * If the cluster is unallocated then *host_offset will be 0.
573 * If the cluster is compressed then *host_offset will contain the l2 entry.
574 *
575 * On entry, *bytes is the maximum number of contiguous bytes starting at
576 * offset that we are interested in.
577 *
578 * On exit, *bytes is the number of bytes starting at offset that have the same
579 * subcluster type and (if applicable) are stored contiguously in the image
580 * file. The subcluster type is stored in *subcluster_type.
581 * Compressed clusters are always processed one by one.
582 *
583 * Returns 0 on success, -errno in error cases.
584 */
585 int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
586 unsigned int *bytes, uint64_t *host_offset,
587 QCow2SubclusterType *subcluster_type)
588 {
589 BDRVQcow2State *s = bs->opaque;
590 unsigned int l2_index, sc_index;
591 uint64_t l1_index, l2_offset, *l2_slice, l2_entry, l2_bitmap;
592 int sc;
593 unsigned int offset_in_cluster;
594 uint64_t bytes_available, bytes_needed, nb_clusters;
595 QCow2SubclusterType type;
596 int ret;
597
598 offset_in_cluster = offset_into_cluster(s, offset);
599 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
600
601 /* compute how many bytes there are between the start of the cluster
602 * containing offset and the end of the l2 slice that contains
603 * the entry pointing to it */
604 bytes_available =
605 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset)))
606 << s->cluster_bits;
607
608 if (bytes_needed > bytes_available) {
609 bytes_needed = bytes_available;
610 }
611
612 *host_offset = 0;
613
614 /* seek to the l2 offset in the l1 table */
615
616 l1_index = offset_to_l1_index(s, offset);
617 if (l1_index >= s->l1_size) {
618 type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN;
619 goto out;
620 }
621
622 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
623 if (!l2_offset) {
624 type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN;
625 goto out;
626 }
627
628 if (offset_into_cluster(s, l2_offset)) {
629 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
630 " unaligned (L1 index: %#" PRIx64 ")",
631 l2_offset, l1_index);
632 return -EIO;
633 }
634
635 /* load the l2 slice in memory */
636
637 ret = l2_load(bs, offset, l2_offset, &l2_slice);
638 if (ret < 0) {
639 return ret;
640 }
641
642 /* find the cluster offset for the given disk offset */
643
644 l2_index = offset_to_l2_slice_index(s, offset);
645 sc_index = offset_to_sc_index(s, offset);
646 l2_entry = get_l2_entry(s, l2_slice, l2_index);
647 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index);
648
649 nb_clusters = size_to_clusters(s, bytes_needed);
650 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
651 * integers; the minimum cluster size is 512, so this assertion is always
652 * true */
653 assert(nb_clusters <= INT_MAX);
654
655 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index);
656 if (s->qcow_version < 3 && (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
657 type == QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
658 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
659 " in pre-v3 image (L2 offset: %#" PRIx64
660 ", L2 index: %#x)", l2_offset, l2_index);
661 ret = -EIO;
662 goto fail;
663 }
664 switch (type) {
665 case QCOW2_SUBCLUSTER_INVALID:
666 break; /* This is handled by count_contiguous_subclusters() below */
667 case QCOW2_SUBCLUSTER_COMPRESSED:
668 if (has_data_file(bs)) {
669 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster "
670 "entry found in image with external data "
671 "file (L2 offset: %#" PRIx64 ", L2 index: "
672 "%#x)", l2_offset, l2_index);
673 ret = -EIO;
674 goto fail;
675 }
676 *host_offset = l2_entry;
677 break;
678 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
679 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
680 break;
681 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
682 case QCOW2_SUBCLUSTER_NORMAL:
683 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: {
684 uint64_t host_cluster_offset = l2_entry & L2E_OFFSET_MASK;
685 *host_offset = host_cluster_offset + offset_in_cluster;
686 if (offset_into_cluster(s, host_cluster_offset)) {
687 qcow2_signal_corruption(bs, true, -1, -1,
688 "Cluster allocation offset %#"
689 PRIx64 " unaligned (L2 offset: %#" PRIx64
690 ", L2 index: %#x)", host_cluster_offset,
691 l2_offset, l2_index);
692 ret = -EIO;
693 goto fail;
694 }
695 if (has_data_file(bs) && *host_offset != offset) {
696 qcow2_signal_corruption(bs, true, -1, -1,
697 "External data file host cluster offset %#"
698 PRIx64 " does not match guest cluster "
699 "offset: %#" PRIx64
700 ", L2 index: %#x)", host_cluster_offset,
701 offset - offset_in_cluster, l2_index);
702 ret = -EIO;
703 goto fail;
704 }
705 break;
706 }
707 default:
708 abort();
709 }
710
711 sc = count_contiguous_subclusters(bs, nb_clusters, sc_index,
712 l2_slice, &l2_index);
713 if (sc < 0) {
714 qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster entry found "
715 " (L2 offset: %#" PRIx64 ", L2 index: %#x)",
716 l2_offset, l2_index);
717 ret = -EIO;
718 goto fail;
719 }
720 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
721
722 bytes_available = ((int64_t)sc + sc_index) << s->subcluster_bits;
723
724 out:
725 if (bytes_available > bytes_needed) {
726 bytes_available = bytes_needed;
727 }
728
729 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
730 * subtracting offset_in_cluster will therefore definitely yield something
731 * not exceeding UINT_MAX */
732 assert(bytes_available - offset_in_cluster <= UINT_MAX);
733 *bytes = bytes_available - offset_in_cluster;
734
735 *subcluster_type = type;
736
737 return 0;
738
739 fail:
740 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice);
741 return ret;
742 }
743
744 /*
745 * get_cluster_table
746 *
747 * for a given disk offset, load (and allocate if needed)
748 * the appropriate slice of its l2 table.
749 *
750 * the cluster index in the l2 slice is given to the caller.
751 *
752 * Returns 0 on success, -errno in failure case
753 */
754 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
755 uint64_t **new_l2_slice,
756 int *new_l2_index)
757 {
758 BDRVQcow2State *s = bs->opaque;
759 unsigned int l2_index;
760 uint64_t l1_index, l2_offset;
761 uint64_t *l2_slice = NULL;
762 int ret;
763
764 /* seek to the l2 offset in the l1 table */
765
766 l1_index = offset_to_l1_index(s, offset);
767 if (l1_index >= s->l1_size) {
768 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
769 if (ret < 0) {
770 return ret;
771 }
772 }
773
774 assert(l1_index < s->l1_size);
775 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
776 if (offset_into_cluster(s, l2_offset)) {
777 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
778 " unaligned (L1 index: %#" PRIx64 ")",
779 l2_offset, l1_index);
780 return -EIO;
781 }
782
783 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) {
784 /* First allocate a new L2 table (and do COW if needed) */
785 ret = l2_allocate(bs, l1_index);
786 if (ret < 0) {
787 return ret;
788 }
789
790 /* Then decrease the refcount of the old table */
791 if (l2_offset) {
792 qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s),
793 QCOW2_DISCARD_OTHER);
794 }
795
796 /* Get the offset of the newly-allocated l2 table */
797 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
798 assert(offset_into_cluster(s, l2_offset) == 0);
799 }
800
801 /* load the l2 slice in memory */
802 ret = l2_load(bs, offset, l2_offset, &l2_slice);
803 if (ret < 0) {
804 return ret;
805 }
806
807 /* find the cluster offset for the given disk offset */
808
809 l2_index = offset_to_l2_slice_index(s, offset);
810
811 *new_l2_slice = l2_slice;
812 *new_l2_index = l2_index;
813
814 return 0;
815 }
816
817 /*
818 * alloc_compressed_cluster_offset
819 *
820 * For a given offset on the virtual disk, allocate a new compressed cluster
821 * and put the host offset of the cluster into *host_offset. If a cluster is
822 * already allocated at the offset, return an error.
823 *
824 * Return 0 on success and -errno in error cases
825 */
826 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
827 uint64_t offset,
828 int compressed_size,
829 uint64_t *host_offset)
830 {
831 BDRVQcow2State *s = bs->opaque;
832 int l2_index, ret;
833 uint64_t *l2_slice;
834 int64_t cluster_offset;
835 int nb_csectors;
836
837 if (has_data_file(bs)) {
838 return 0;
839 }
840
841 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
842 if (ret < 0) {
843 return ret;
844 }
845
846 /* Compression can't overwrite anything. Fail if the cluster was already
847 * allocated. */
848 cluster_offset = get_l2_entry(s, l2_slice, l2_index);
849 if (cluster_offset & L2E_OFFSET_MASK) {
850 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
851 return -EIO;
852 }
853
854 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
855 if (cluster_offset < 0) {
856 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
857 return cluster_offset;
858 }
859
860 nb_csectors =
861 (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE -
862 (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE);
863
864 /* The offset and size must fit in their fields of the L2 table entry */
865 assert((cluster_offset & s->cluster_offset_mask) == cluster_offset);
866 assert((nb_csectors & s->csize_mask) == nb_csectors);
867
868 cluster_offset |= QCOW_OFLAG_COMPRESSED |
869 ((uint64_t)nb_csectors << s->csize_shift);
870
871 /* update L2 table */
872
873 /* compressed clusters never have the copied flag */
874
875 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
876 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
877 set_l2_entry(s, l2_slice, l2_index, cluster_offset);
878 if (has_subclusters(s)) {
879 set_l2_bitmap(s, l2_slice, l2_index, 0);
880 }
881 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
882
883 *host_offset = cluster_offset & s->cluster_offset_mask;
884 return 0;
885 }
886
887 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
888 {
889 BDRVQcow2State *s = bs->opaque;
890 Qcow2COWRegion *start = &m->cow_start;
891 Qcow2COWRegion *end = &m->cow_end;
892 unsigned buffer_size;
893 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
894 bool merge_reads;
895 uint8_t *start_buffer, *end_buffer;
896 QEMUIOVector qiov;
897 int ret;
898
899 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
900 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
901 assert(start->offset + start->nb_bytes <= end->offset);
902
903 if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) {
904 return 0;
905 }
906
907 /* If we have to read both the start and end COW regions and the
908 * middle region is not too large then perform just one read
909 * operation */
910 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
911 if (merge_reads) {
912 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
913 } else {
914 /* If we have to do two reads, add some padding in the middle
915 * if necessary to make sure that the end region is optimally
916 * aligned. */
917 size_t align = bdrv_opt_mem_align(bs);
918 assert(align > 0 && align <= UINT_MAX);
919 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
920 UINT_MAX - end->nb_bytes);
921 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
922 }
923
924 /* Reserve a buffer large enough to store all the data that we're
925 * going to read */
926 start_buffer = qemu_try_blockalign(bs, buffer_size);
927 if (start_buffer == NULL) {
928 return -ENOMEM;
929 }
930 /* The part of the buffer where the end region is located */
931 end_buffer = start_buffer + buffer_size - end->nb_bytes;
932
933 qemu_iovec_init(&qiov, 2 + (m->data_qiov ?
934 qemu_iovec_subvec_niov(m->data_qiov,
935 m->data_qiov_offset,
936 data_bytes)
937 : 0));
938
939 qemu_co_mutex_unlock(&s->lock);
940 /* First we read the existing data from both COW regions. We
941 * either read the whole region in one go, or the start and end
942 * regions separately. */
943 if (merge_reads) {
944 qemu_iovec_add(&qiov, start_buffer, buffer_size);
945 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
946 } else {
947 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
948 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
949 if (ret < 0) {
950 goto fail;
951 }
952
953 qemu_iovec_reset(&qiov);
954 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
955 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
956 }
957 if (ret < 0) {
958 goto fail;
959 }
960
961 /* Encrypt the data if necessary before writing it */
962 if (bs->encrypted) {
963 ret = qcow2_co_encrypt(bs,
964 m->alloc_offset + start->offset,
965 m->offset + start->offset,
966 start_buffer, start->nb_bytes);
967 if (ret < 0) {
968 goto fail;
969 }
970
971 ret = qcow2_co_encrypt(bs,
972 m->alloc_offset + end->offset,
973 m->offset + end->offset,
974 end_buffer, end->nb_bytes);
975 if (ret < 0) {
976 goto fail;
977 }
978 }
979
980 /* And now we can write everything. If we have the guest data we
981 * can write everything in one single operation */
982 if (m->data_qiov) {
983 qemu_iovec_reset(&qiov);
984 if (start->nb_bytes) {
985 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
986 }
987 qemu_iovec_concat(&qiov, m->data_qiov, m->data_qiov_offset, data_bytes);
988 if (end->nb_bytes) {
989 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
990 }
991 /* NOTE: we have a write_aio blkdebug event here followed by
992 * a cow_write one in do_perform_cow_write(), but there's only
993 * one single I/O operation */
994 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
995 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
996 } else {
997 /* If there's no guest data then write both COW regions separately */
998 qemu_iovec_reset(&qiov);
999 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
1000 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
1001 if (ret < 0) {
1002 goto fail;
1003 }
1004
1005 qemu_iovec_reset(&qiov);
1006 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
1007 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
1008 }
1009
1010 fail:
1011 qemu_co_mutex_lock(&s->lock);
1012
1013 /*
1014 * Before we update the L2 table to actually point to the new cluster, we
1015 * need to be sure that the refcounts have been increased and COW was
1016 * handled.
1017 */
1018 if (ret == 0) {
1019 qcow2_cache_depends_on_flush(s->l2_table_cache);
1020 }
1021
1022 qemu_vfree(start_buffer);
1023 qemu_iovec_destroy(&qiov);
1024 return ret;
1025 }
1026
1027 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
1028 {
1029 BDRVQcow2State *s = bs->opaque;
1030 int i, j = 0, l2_index, ret;
1031 uint64_t *old_cluster, *l2_slice;
1032 uint64_t cluster_offset = m->alloc_offset;
1033
1034 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
1035 assert(m->nb_clusters > 0);
1036
1037 old_cluster = g_try_new(uint64_t, m->nb_clusters);
1038 if (old_cluster == NULL) {
1039 ret = -ENOMEM;
1040 goto err;
1041 }
1042
1043 /* copy content of unmodified sectors */
1044 ret = perform_cow(bs, m);
1045 if (ret < 0) {
1046 goto err;
1047 }
1048
1049 /* Update L2 table. */
1050 if (s->use_lazy_refcounts) {
1051 qcow2_mark_dirty(bs);
1052 }
1053 if (qcow2_need_accurate_refcounts(s)) {
1054 qcow2_cache_set_dependency(bs, s->l2_table_cache,
1055 s->refcount_block_cache);
1056 }
1057
1058 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index);
1059 if (ret < 0) {
1060 goto err;
1061 }
1062 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1063
1064 assert(l2_index + m->nb_clusters <= s->l2_slice_size);
1065 assert(m->cow_end.offset + m->cow_end.nb_bytes <=
1066 m->nb_clusters << s->cluster_bits);
1067 for (i = 0; i < m->nb_clusters; i++) {
1068 uint64_t offset = cluster_offset + ((uint64_t)i << s->cluster_bits);
1069 /* if two concurrent writes happen to the same unallocated cluster
1070 * each write allocates separate cluster and writes data concurrently.
1071 * The first one to complete updates l2 table with pointer to its
1072 * cluster the second one has to do RMW (which is done above by
1073 * perform_cow()), update l2 table with its cluster pointer and free
1074 * old cluster. This is what this loop does */
1075 if (get_l2_entry(s, l2_slice, l2_index + i) != 0) {
1076 old_cluster[j++] = get_l2_entry(s, l2_slice, l2_index + i);
1077 }
1078
1079 /* The offset must fit in the offset field of the L2 table entry */
1080 assert((offset & L2E_OFFSET_MASK) == offset);
1081
1082 set_l2_entry(s, l2_slice, l2_index + i, offset | QCOW_OFLAG_COPIED);
1083
1084 /* Update bitmap with the subclusters that were just written */
1085 if (has_subclusters(s) && !m->prealloc) {
1086 uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
1087 unsigned written_from = m->cow_start.offset;
1088 unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes;
1089 int first_sc, last_sc;
1090 /* Narrow written_from and written_to down to the current cluster */
1091 written_from = MAX(written_from, i << s->cluster_bits);
1092 written_to = MIN(written_to, (i + 1) << s->cluster_bits);
1093 assert(written_from < written_to);
1094 first_sc = offset_to_sc_index(s, written_from);
1095 last_sc = offset_to_sc_index(s, written_to - 1);
1096 l2_bitmap |= QCOW_OFLAG_SUB_ALLOC_RANGE(first_sc, last_sc + 1);
1097 l2_bitmap &= ~QCOW_OFLAG_SUB_ZERO_RANGE(first_sc, last_sc + 1);
1098 set_l2_bitmap(s, l2_slice, l2_index + i, l2_bitmap);
1099 }
1100 }
1101
1102
1103 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1104
1105 /*
1106 * If this was a COW, we need to decrease the refcount of the old cluster.
1107 *
1108 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
1109 * clusters), the next write will reuse them anyway.
1110 */
1111 if (!m->keep_old_clusters && j != 0) {
1112 for (i = 0; i < j; i++) {
1113 qcow2_free_any_cluster(bs, old_cluster[i], QCOW2_DISCARD_NEVER);
1114 }
1115 }
1116
1117 ret = 0;
1118 err:
1119 g_free(old_cluster);
1120 return ret;
1121 }
1122
1123 /**
1124 * Frees the allocated clusters because the request failed and they won't
1125 * actually be linked.
1126 */
1127 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
1128 {
1129 BDRVQcow2State *s = bs->opaque;
1130 if (!has_data_file(bs) && !m->keep_old_clusters) {
1131 qcow2_free_clusters(bs, m->alloc_offset,
1132 m->nb_clusters << s->cluster_bits,
1133 QCOW2_DISCARD_NEVER);
1134 }
1135 }
1136
1137 /*
1138 * For a given write request, create a new QCowL2Meta structure, add
1139 * it to @m and the BDRVQcow2State.cluster_allocs list. If the write
1140 * request does not need copy-on-write or changes to the L2 metadata
1141 * then this function does nothing.
1142 *
1143 * @host_cluster_offset points to the beginning of the first cluster.
1144 *
1145 * @guest_offset and @bytes indicate the offset and length of the
1146 * request.
1147 *
1148 * @l2_slice contains the L2 entries of all clusters involved in this
1149 * write request.
1150 *
1151 * If @keep_old is true it means that the clusters were already
1152 * allocated and will be overwritten. If false then the clusters are
1153 * new and we have to decrease the reference count of the old ones.
1154 *
1155 * Returns 0 on success, -errno on failure.
1156 */
1157 static int calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset,
1158 uint64_t guest_offset, unsigned bytes,
1159 uint64_t *l2_slice, QCowL2Meta **m, bool keep_old)
1160 {
1161 BDRVQcow2State *s = bs->opaque;
1162 int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset);
1163 uint64_t l2_entry, l2_bitmap;
1164 unsigned cow_start_from, cow_end_to;
1165 unsigned cow_start_to = offset_into_cluster(s, guest_offset);
1166 unsigned cow_end_from = cow_start_to + bytes;
1167 unsigned nb_clusters = size_to_clusters(s, cow_end_from);
1168 QCowL2Meta *old_m = *m;
1169 QCow2SubclusterType type;
1170 int i;
1171 bool skip_cow = keep_old;
1172
1173 assert(nb_clusters <= s->l2_slice_size - l2_index);
1174
1175 /* Check the type of all affected subclusters */
1176 for (i = 0; i < nb_clusters; i++) {
1177 l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
1178 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
1179 if (skip_cow) {
1180 unsigned write_from = MAX(cow_start_to, i << s->cluster_bits);
1181 unsigned write_to = MIN(cow_end_from, (i + 1) << s->cluster_bits);
1182 int first_sc = offset_to_sc_index(s, write_from);
1183 int last_sc = offset_to_sc_index(s, write_to - 1);
1184 int cnt = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap,
1185 first_sc, &type);
1186 /* Is any of the subclusters of type != QCOW2_SUBCLUSTER_NORMAL ? */
1187 if (type != QCOW2_SUBCLUSTER_NORMAL || first_sc + cnt <= last_sc) {
1188 skip_cow = false;
1189 }
1190 } else {
1191 /* If we can't skip the cow we can still look for invalid entries */
1192 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, 0);
1193 }
1194 if (type == QCOW2_SUBCLUSTER_INVALID) {
1195 int l1_index = offset_to_l1_index(s, guest_offset);
1196 uint64_t l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
1197 qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster "
1198 "entry found (L2 offset: %#" PRIx64
1199 ", L2 index: %#x)",
1200 l2_offset, l2_index + i);
1201 return -EIO;
1202 }
1203 }
1204
1205 if (skip_cow) {
1206 return 0;
1207 }
1208
1209 /* Get the L2 entry of the first cluster */
1210 l2_entry = get_l2_entry(s, l2_slice, l2_index);
1211 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index);
1212 sc_index = offset_to_sc_index(s, guest_offset);
1213 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index);
1214
1215 if (!keep_old) {
1216 switch (type) {
1217 case QCOW2_SUBCLUSTER_COMPRESSED:
1218 cow_start_from = 0;
1219 break;
1220 case QCOW2_SUBCLUSTER_NORMAL:
1221 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
1222 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
1223 if (has_subclusters(s)) {
1224 /* Skip all leading zero and unallocated subclusters */
1225 uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC;
1226 cow_start_from =
1227 MIN(sc_index, ctz32(alloc_bitmap)) << s->subcluster_bits;
1228 } else {
1229 cow_start_from = 0;
1230 }
1231 break;
1232 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
1233 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
1234 cow_start_from = sc_index << s->subcluster_bits;
1235 break;
1236 default:
1237 g_assert_not_reached();
1238 }
1239 } else {
1240 switch (type) {
1241 case QCOW2_SUBCLUSTER_NORMAL:
1242 cow_start_from = cow_start_to;
1243 break;
1244 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
1245 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
1246 cow_start_from = sc_index << s->subcluster_bits;
1247 break;
1248 default:
1249 g_assert_not_reached();
1250 }
1251 }
1252
1253 /* Get the L2 entry of the last cluster */
1254 l2_index += nb_clusters - 1;
1255 l2_entry = get_l2_entry(s, l2_slice, l2_index);
1256 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index);
1257 sc_index = offset_to_sc_index(s, guest_offset + bytes - 1);
1258 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index);
1259
1260 if (!keep_old) {
1261 switch (type) {
1262 case QCOW2_SUBCLUSTER_COMPRESSED:
1263 cow_end_to = ROUND_UP(cow_end_from, s->cluster_size);
1264 break;
1265 case QCOW2_SUBCLUSTER_NORMAL:
1266 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
1267 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
1268 cow_end_to = ROUND_UP(cow_end_from, s->cluster_size);
1269 if (has_subclusters(s)) {
1270 /* Skip all trailing zero and unallocated subclusters */
1271 uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC;
1272 cow_end_to -=
1273 MIN(s->subclusters_per_cluster - sc_index - 1,
1274 clz32(alloc_bitmap)) << s->subcluster_bits;
1275 }
1276 break;
1277 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
1278 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
1279 cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size);
1280 break;
1281 default:
1282 g_assert_not_reached();
1283 }
1284 } else {
1285 switch (type) {
1286 case QCOW2_SUBCLUSTER_NORMAL:
1287 cow_end_to = cow_end_from;
1288 break;
1289 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
1290 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
1291 cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size);
1292 break;
1293 default:
1294 g_assert_not_reached();
1295 }
1296 }
1297
1298 *m = g_malloc0(sizeof(**m));
1299 **m = (QCowL2Meta) {
1300 .next = old_m,
1301
1302 .alloc_offset = host_cluster_offset,
1303 .offset = start_of_cluster(s, guest_offset),
1304 .nb_clusters = nb_clusters,
1305
1306 .keep_old_clusters = keep_old,
1307
1308 .cow_start = {
1309 .offset = cow_start_from,
1310 .nb_bytes = cow_start_to - cow_start_from,
1311 },
1312 .cow_end = {
1313 .offset = cow_end_from,
1314 .nb_bytes = cow_end_to - cow_end_from,
1315 },
1316 };
1317
1318 qemu_co_queue_init(&(*m)->dependent_requests);
1319 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1320
1321 return 0;
1322 }
1323
1324 /*
1325 * Returns true if writing to the cluster pointed to by @l2_entry
1326 * requires a new allocation (that is, if the cluster is unallocated
1327 * or has refcount > 1 and therefore cannot be written in-place).
1328 */
1329 static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry)
1330 {
1331 switch (qcow2_get_cluster_type(bs, l2_entry)) {
1332 case QCOW2_CLUSTER_NORMAL:
1333 case QCOW2_CLUSTER_ZERO_ALLOC:
1334 if (l2_entry & QCOW_OFLAG_COPIED) {
1335 return false;
1336 }
1337 /* fallthrough */
1338 case QCOW2_CLUSTER_UNALLOCATED:
1339 case QCOW2_CLUSTER_COMPRESSED:
1340 case QCOW2_CLUSTER_ZERO_PLAIN:
1341 return true;
1342 default:
1343 abort();
1344 }
1345 }
1346
1347 /*
1348 * Returns the number of contiguous clusters that can be written to
1349 * using one single write request, starting from @l2_index.
1350 * At most @nb_clusters are checked.
1351 *
1352 * If @new_alloc is true this counts clusters that are either
1353 * unallocated, or allocated but with refcount > 1 (so they need to be
1354 * newly allocated and COWed).
1355 *
1356 * If @new_alloc is false this counts clusters that are already
1357 * allocated and can be overwritten in-place (this includes clusters
1358 * of type QCOW2_CLUSTER_ZERO_ALLOC).
1359 */
1360 static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters,
1361 uint64_t *l2_slice, int l2_index,
1362 bool new_alloc)
1363 {
1364 BDRVQcow2State *s = bs->opaque;
1365 uint64_t l2_entry = get_l2_entry(s, l2_slice, l2_index);
1366 uint64_t expected_offset = l2_entry & L2E_OFFSET_MASK;
1367 int i;
1368
1369 for (i = 0; i < nb_clusters; i++) {
1370 l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
1371 if (cluster_needs_new_alloc(bs, l2_entry) != new_alloc) {
1372 break;
1373 }
1374 if (!new_alloc) {
1375 if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) {
1376 break;
1377 }
1378 expected_offset += s->cluster_size;
1379 }
1380 }
1381
1382 assert(i <= nb_clusters);
1383 return i;
1384 }
1385
1386 /*
1387 * Check if there already is an AIO write request in flight which allocates
1388 * the same cluster. In this case we need to wait until the previous
1389 * request has completed and updated the L2 table accordingly.
1390 *
1391 * Returns:
1392 * 0 if there was no dependency. *cur_bytes indicates the number of
1393 * bytes from guest_offset that can be read before the next
1394 * dependency must be processed (or the request is complete)
1395 *
1396 * -EAGAIN if we had to wait for another request, previously gathered
1397 * information on cluster allocation may be invalid now. The caller
1398 * must start over anyway, so consider *cur_bytes undefined.
1399 */
1400 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1401 uint64_t *cur_bytes, QCowL2Meta **m)
1402 {
1403 BDRVQcow2State *s = bs->opaque;
1404 QCowL2Meta *old_alloc;
1405 uint64_t bytes = *cur_bytes;
1406
1407 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1408
1409 uint64_t start = guest_offset;
1410 uint64_t end = start + bytes;
1411 uint64_t old_start = start_of_cluster(s, l2meta_cow_start(old_alloc));
1412 uint64_t old_end = ROUND_UP(l2meta_cow_end(old_alloc), s->cluster_size);
1413
1414 if (end <= old_start || start >= old_end) {
1415 /* No intersection */
1416 continue;
1417 }
1418
1419 if (old_alloc->keep_old_clusters &&
1420 (end <= l2meta_cow_start(old_alloc) ||
1421 start >= l2meta_cow_end(old_alloc)))
1422 {
1423 /*
1424 * Clusters intersect but COW areas don't. And cluster itself is
1425 * already allocated. So, there is no actual conflict.
1426 */
1427 continue;
1428 }
1429
1430 /* Conflict */
1431
1432 if (start < old_start) {
1433 /* Stop at the start of a running allocation */
1434 bytes = old_start - start;
1435 } else {
1436 bytes = 0;
1437 }
1438
1439 /*
1440 * Stop if an l2meta already exists. After yielding, it wouldn't
1441 * be valid any more, so we'd have to clean up the old L2Metas
1442 * and deal with requests depending on them before starting to
1443 * gather new ones. Not worth the trouble.
1444 */
1445 if (bytes == 0 && *m) {
1446 *cur_bytes = 0;
1447 return 0;
1448 }
1449
1450 if (bytes == 0) {
1451 /*
1452 * Wait for the dependency to complete. We need to recheck
1453 * the free/allocated clusters when we continue.
1454 */
1455 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1456 return -EAGAIN;
1457 }
1458 }
1459
1460 /* Make sure that existing clusters and new allocations are only used up to
1461 * the next dependency if we shortened the request above */
1462 *cur_bytes = bytes;
1463
1464 return 0;
1465 }
1466
1467 /*
1468 * Checks how many already allocated clusters that don't require a new
1469 * allocation there are at the given guest_offset (up to *bytes).
1470 * If *host_offset is not INV_OFFSET, only physically contiguous clusters
1471 * beginning at this host offset are counted.
1472 *
1473 * Note that guest_offset may not be cluster aligned. In this case, the
1474 * returned *host_offset points to exact byte referenced by guest_offset and
1475 * therefore isn't cluster aligned as well.
1476 *
1477 * Returns:
1478 * 0: if no allocated clusters are available at the given offset.
1479 * *bytes is normally unchanged. It is set to 0 if the cluster
1480 * is allocated and can be overwritten in-place but doesn't have
1481 * the right physical offset.
1482 *
1483 * 1: if allocated clusters that can be overwritten in place are
1484 * available at the requested offset. *bytes may have decreased
1485 * and describes the length of the area that can be written to.
1486 *
1487 * -errno: in error cases
1488 */
1489 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1490 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1491 {
1492 BDRVQcow2State *s = bs->opaque;
1493 int l2_index;
1494 uint64_t l2_entry, cluster_offset;
1495 uint64_t *l2_slice;
1496 uint64_t nb_clusters;
1497 unsigned int keep_clusters;
1498 int ret;
1499
1500 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1501 *bytes);
1502
1503 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset)
1504 == offset_into_cluster(s, *host_offset));
1505
1506 /*
1507 * Calculate the number of clusters to look for. We stop at L2 slice
1508 * boundaries to keep things simple.
1509 */
1510 nb_clusters =
1511 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1512
1513 l2_index = offset_to_l2_slice_index(s, guest_offset);
1514 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1515 /* Limit total byte count to BDRV_REQUEST_MAX_BYTES */
1516 nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits);
1517
1518 /* Find L2 entry for the first involved cluster */
1519 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1520 if (ret < 0) {
1521 return ret;
1522 }
1523
1524 l2_entry = get_l2_entry(s, l2_slice, l2_index);
1525 cluster_offset = l2_entry & L2E_OFFSET_MASK;
1526
1527 if (!cluster_needs_new_alloc(bs, l2_entry)) {
1528 if (offset_into_cluster(s, cluster_offset)) {
1529 qcow2_signal_corruption(bs, true, -1, -1, "%s cluster offset "
1530 "%#" PRIx64 " unaligned (guest offset: %#"
1531 PRIx64 ")", l2_entry & QCOW_OFLAG_ZERO ?
1532 "Preallocated zero" : "Data",
1533 cluster_offset, guest_offset);
1534 ret = -EIO;
1535 goto out;
1536 }
1537
1538 /* If a specific host_offset is required, check it */
1539 if (*host_offset != INV_OFFSET && cluster_offset != *host_offset) {
1540 *bytes = 0;
1541 ret = 0;
1542 goto out;
1543 }
1544
1545 /* We keep all QCOW_OFLAG_COPIED clusters */
1546 keep_clusters = count_single_write_clusters(bs, nb_clusters, l2_slice,
1547 l2_index, false);
1548 assert(keep_clusters <= nb_clusters);
1549
1550 *bytes = MIN(*bytes,
1551 keep_clusters * s->cluster_size
1552 - offset_into_cluster(s, guest_offset));
1553 assert(*bytes != 0);
1554
1555 ret = calculate_l2_meta(bs, cluster_offset, guest_offset,
1556 *bytes, l2_slice, m, true);
1557 if (ret < 0) {
1558 goto out;
1559 }
1560
1561 ret = 1;
1562 } else {
1563 ret = 0;
1564 }
1565
1566 /* Cleanup */
1567 out:
1568 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1569
1570 /* Only return a host offset if we actually made progress. Otherwise we
1571 * would make requirements for handle_alloc() that it can't fulfill */
1572 if (ret > 0) {
1573 *host_offset = cluster_offset + offset_into_cluster(s, guest_offset);
1574 }
1575
1576 return ret;
1577 }
1578
1579 /*
1580 * Allocates new clusters for the given guest_offset.
1581 *
1582 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1583 * contain the number of clusters that have been allocated and are contiguous
1584 * in the image file.
1585 *
1586 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file
1587 * at which the new clusters must start. *nb_clusters can be 0 on return in
1588 * this case if the cluster at host_offset is already in use. If *host_offset
1589 * is INV_OFFSET, the clusters can be allocated anywhere in the image file.
1590 *
1591 * *host_offset is updated to contain the offset into the image file at which
1592 * the first allocated cluster starts.
1593 *
1594 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1595 * function has been waiting for another request and the allocation must be
1596 * restarted, but the whole request should not be failed.
1597 */
1598 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1599 uint64_t *host_offset, uint64_t *nb_clusters)
1600 {
1601 BDRVQcow2State *s = bs->opaque;
1602
1603 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1604 *host_offset, *nb_clusters);
1605
1606 if (has_data_file(bs)) {
1607 assert(*host_offset == INV_OFFSET ||
1608 *host_offset == start_of_cluster(s, guest_offset));
1609 *host_offset = start_of_cluster(s, guest_offset);
1610 return 0;
1611 }
1612
1613 /* Allocate new clusters */
1614 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1615 if (*host_offset == INV_OFFSET) {
1616 int64_t cluster_offset =
1617 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1618 if (cluster_offset < 0) {
1619 return cluster_offset;
1620 }
1621 *host_offset = cluster_offset;
1622 return 0;
1623 } else {
1624 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1625 if (ret < 0) {
1626 return ret;
1627 }
1628 *nb_clusters = ret;
1629 return 0;
1630 }
1631 }
1632
1633 /*
1634 * Allocates new clusters for an area that is either still unallocated or
1635 * cannot be overwritten in-place. If *host_offset is not INV_OFFSET,
1636 * clusters are only allocated if the new allocation can match the specified
1637 * host offset.
1638 *
1639 * Note that guest_offset may not be cluster aligned. In this case, the
1640 * returned *host_offset points to exact byte referenced by guest_offset and
1641 * therefore isn't cluster aligned as well.
1642 *
1643 * Returns:
1644 * 0: if no clusters could be allocated. *bytes is set to 0,
1645 * *host_offset is left unchanged.
1646 *
1647 * 1: if new clusters were allocated. *bytes may be decreased if the
1648 * new allocation doesn't cover all of the requested area.
1649 * *host_offset is updated to contain the host offset of the first
1650 * newly allocated cluster.
1651 *
1652 * -errno: in error cases
1653 */
1654 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1655 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1656 {
1657 BDRVQcow2State *s = bs->opaque;
1658 int l2_index;
1659 uint64_t *l2_slice;
1660 uint64_t nb_clusters;
1661 int ret;
1662
1663 uint64_t alloc_cluster_offset;
1664
1665 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1666 *bytes);
1667 assert(*bytes > 0);
1668
1669 /*
1670 * Calculate the number of clusters to look for. We stop at L2 slice
1671 * boundaries to keep things simple.
1672 */
1673 nb_clusters =
1674 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1675
1676 l2_index = offset_to_l2_slice_index(s, guest_offset);
1677 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1678 /* Limit total allocation byte count to BDRV_REQUEST_MAX_BYTES */
1679 nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits);
1680
1681 /* Find L2 entry for the first involved cluster */
1682 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1683 if (ret < 0) {
1684 return ret;
1685 }
1686
1687 nb_clusters = count_single_write_clusters(bs, nb_clusters,
1688 l2_slice, l2_index, true);
1689
1690 /* This function is only called when there were no non-COW clusters, so if
1691 * we can't find any unallocated or COW clusters either, something is
1692 * wrong with our code. */
1693 assert(nb_clusters > 0);
1694
1695 /* Allocate at a given offset in the image file */
1696 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET :
1697 start_of_cluster(s, *host_offset);
1698 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1699 &nb_clusters);
1700 if (ret < 0) {
1701 goto out;
1702 }
1703
1704 /* Can't extend contiguous allocation */
1705 if (nb_clusters == 0) {
1706 *bytes = 0;
1707 ret = 0;
1708 goto out;
1709 }
1710
1711 assert(alloc_cluster_offset != INV_OFFSET);
1712
1713 /*
1714 * Save info needed for meta data update.
1715 *
1716 * requested_bytes: Number of bytes from the start of the first
1717 * newly allocated cluster to the end of the (possibly shortened
1718 * before) write request.
1719 *
1720 * avail_bytes: Number of bytes from the start of the first
1721 * newly allocated to the end of the last newly allocated cluster.
1722 *
1723 * nb_bytes: The number of bytes from the start of the first
1724 * newly allocated cluster to the end of the area that the write
1725 * request actually writes to (excluding COW at the end)
1726 */
1727 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1728 int avail_bytes = nb_clusters << s->cluster_bits;
1729 int nb_bytes = MIN(requested_bytes, avail_bytes);
1730
1731 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1732 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1733 assert(*bytes != 0);
1734
1735 ret = calculate_l2_meta(bs, alloc_cluster_offset, guest_offset, *bytes,
1736 l2_slice, m, false);
1737 if (ret < 0) {
1738 goto out;
1739 }
1740
1741 ret = 1;
1742
1743 out:
1744 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1745 return ret;
1746 }
1747
1748 /*
1749 * For a given area on the virtual disk defined by @offset and @bytes,
1750 * find the corresponding area on the qcow2 image, allocating new
1751 * clusters (or subclusters) if necessary. The result can span a
1752 * combination of allocated and previously unallocated clusters.
1753 *
1754 * Note that offset may not be cluster aligned. In this case, the returned
1755 * *host_offset points to exact byte referenced by offset and therefore
1756 * isn't cluster aligned as well.
1757 *
1758 * On return, @host_offset is set to the beginning of the requested
1759 * area. This area is guaranteed to be contiguous on the qcow2 file
1760 * but it can be smaller than initially requested. In this case @bytes
1761 * is updated with the actual size.
1762 *
1763 * If any clusters or subclusters were allocated then @m contains a
1764 * list with the information of all the affected regions. Note that
1765 * this can happen regardless of whether this function succeeds or
1766 * not. The caller is responsible for updating the L2 metadata of the
1767 * allocated clusters (on success) or freeing them (on failure), and
1768 * for clearing the contents of @m afterwards in both cases.
1769 *
1770 * If the request conflicts with another write request in flight, the coroutine
1771 * is queued and will be reentered when the dependency has completed.
1772 *
1773 * Return 0 on success and -errno in error cases
1774 */
1775 int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
1776 unsigned int *bytes, uint64_t *host_offset,
1777 QCowL2Meta **m)
1778 {
1779 BDRVQcow2State *s = bs->opaque;
1780 uint64_t start, remaining;
1781 uint64_t cluster_offset;
1782 uint64_t cur_bytes;
1783 int ret;
1784
1785 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1786
1787 again:
1788 start = offset;
1789 remaining = *bytes;
1790 cluster_offset = INV_OFFSET;
1791 *host_offset = INV_OFFSET;
1792 cur_bytes = 0;
1793 *m = NULL;
1794
1795 while (true) {
1796
1797 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
1798 *host_offset = cluster_offset;
1799 }
1800
1801 assert(remaining >= cur_bytes);
1802
1803 start += cur_bytes;
1804 remaining -= cur_bytes;
1805
1806 if (cluster_offset != INV_OFFSET) {
1807 cluster_offset += cur_bytes;
1808 }
1809
1810 if (remaining == 0) {
1811 break;
1812 }
1813
1814 cur_bytes = remaining;
1815
1816 /*
1817 * Now start gathering as many contiguous clusters as possible:
1818 *
1819 * 1. Check for overlaps with in-flight allocations
1820 *
1821 * a) Overlap not in the first cluster -> shorten this request and
1822 * let the caller handle the rest in its next loop iteration.
1823 *
1824 * b) Real overlaps of two requests. Yield and restart the search
1825 * for contiguous clusters (the situation could have changed
1826 * while we were sleeping)
1827 *
1828 * c) TODO: Request starts in the same cluster as the in-flight
1829 * allocation ends. Shorten the COW of the in-fight allocation,
1830 * set cluster_offset to write to the same cluster and set up
1831 * the right synchronisation between the in-flight request and
1832 * the new one.
1833 */
1834 ret = handle_dependencies(bs, start, &cur_bytes, m);
1835 if (ret == -EAGAIN) {
1836 /* Currently handle_dependencies() doesn't yield if we already had
1837 * an allocation. If it did, we would have to clean up the L2Meta
1838 * structs before starting over. */
1839 assert(*m == NULL);
1840 goto again;
1841 } else if (ret < 0) {
1842 return ret;
1843 } else if (cur_bytes == 0) {
1844 break;
1845 } else {
1846 /* handle_dependencies() may have decreased cur_bytes (shortened
1847 * the allocations below) so that the next dependency is processed
1848 * correctly during the next loop iteration. */
1849 }
1850
1851 /*
1852 * 2. Count contiguous COPIED clusters.
1853 */
1854 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1855 if (ret < 0) {
1856 return ret;
1857 } else if (ret) {
1858 continue;
1859 } else if (cur_bytes == 0) {
1860 break;
1861 }
1862
1863 /*
1864 * 3. If the request still hasn't completed, allocate new clusters,
1865 * considering any cluster_offset of steps 1c or 2.
1866 */
1867 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1868 if (ret < 0) {
1869 return ret;
1870 } else if (ret) {
1871 continue;
1872 } else {
1873 assert(cur_bytes == 0);
1874 break;
1875 }
1876 }
1877
1878 *bytes -= remaining;
1879 assert(*bytes > 0);
1880 assert(*host_offset != INV_OFFSET);
1881 assert(offset_into_cluster(s, *host_offset) ==
1882 offset_into_cluster(s, offset));
1883
1884 return 0;
1885 }
1886
1887 /*
1888 * This discards as many clusters of nb_clusters as possible at once (i.e.
1889 * all clusters in the same L2 slice) and returns the number of discarded
1890 * clusters.
1891 */
1892 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1893 uint64_t nb_clusters,
1894 enum qcow2_discard_type type, bool full_discard)
1895 {
1896 BDRVQcow2State *s = bs->opaque;
1897 uint64_t *l2_slice;
1898 int l2_index;
1899 int ret;
1900 int i;
1901
1902 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1903 if (ret < 0) {
1904 return ret;
1905 }
1906
1907 /* Limit nb_clusters to one L2 slice */
1908 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1909 assert(nb_clusters <= INT_MAX);
1910
1911 for (i = 0; i < nb_clusters; i++) {
1912 uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
1913 uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
1914 uint64_t new_l2_entry = old_l2_entry;
1915 uint64_t new_l2_bitmap = old_l2_bitmap;
1916 QCow2ClusterType cluster_type =
1917 qcow2_get_cluster_type(bs, old_l2_entry);
1918
1919 /*
1920 * If full_discard is true, the cluster should not read back as zeroes,
1921 * but rather fall through to the backing file.
1922 *
1923 * If full_discard is false, make sure that a discarded area reads back
1924 * as zeroes for v3 images (we cannot do it for v2 without actually
1925 * writing a zero-filled buffer). We can skip the operation if the
1926 * cluster is already marked as zero, or if it's unallocated and we
1927 * don't have a backing file.
1928 *
1929 * TODO We might want to use bdrv_block_status(bs) here, but we're
1930 * holding s->lock, so that doesn't work today.
1931 */
1932 if (full_discard) {
1933 new_l2_entry = new_l2_bitmap = 0;
1934 } else if (bs->backing || qcow2_cluster_is_allocated(cluster_type)) {
1935 if (has_subclusters(s)) {
1936 new_l2_entry = 0;
1937 new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES;
1938 } else {
1939 new_l2_entry = s->qcow_version >= 3 ? QCOW_OFLAG_ZERO : 0;
1940 }
1941 }
1942
1943 if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) {
1944 continue;
1945 }
1946
1947 /* First remove L2 entries */
1948 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1949 set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
1950 if (has_subclusters(s)) {
1951 set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
1952 }
1953 /* Then decrease the refcount */
1954 qcow2_free_any_cluster(bs, old_l2_entry, type);
1955 }
1956
1957 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1958
1959 return nb_clusters;
1960 }
1961
1962 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1963 uint64_t bytes, enum qcow2_discard_type type,
1964 bool full_discard)
1965 {
1966 BDRVQcow2State *s = bs->opaque;
1967 uint64_t end_offset = offset + bytes;
1968 uint64_t nb_clusters;
1969 int64_t cleared;
1970 int ret;
1971
1972 /* Caller must pass aligned values, except at image end */
1973 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1974 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1975 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1976
1977 nb_clusters = size_to_clusters(s, bytes);
1978
1979 s->cache_discards = true;
1980
1981 /* Each L2 slice is handled by its own loop iteration */
1982 while (nb_clusters > 0) {
1983 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type,
1984 full_discard);
1985 if (cleared < 0) {
1986 ret = cleared;
1987 goto fail;
1988 }
1989
1990 nb_clusters -= cleared;
1991 offset += (cleared * s->cluster_size);
1992 }
1993
1994 ret = 0;
1995 fail:
1996 s->cache_discards = false;
1997 qcow2_process_discards(bs, ret);
1998
1999 return ret;
2000 }
2001
2002 /*
2003 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
2004 * all clusters in the same L2 slice) and returns the number of zeroed
2005 * clusters.
2006 */
2007 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
2008 uint64_t nb_clusters, int flags)
2009 {
2010 BDRVQcow2State *s = bs->opaque;
2011 uint64_t *l2_slice;
2012 int l2_index;
2013 int ret;
2014 int i;
2015
2016 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
2017 if (ret < 0) {
2018 return ret;
2019 }
2020
2021 /* Limit nb_clusters to one L2 slice */
2022 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
2023 assert(nb_clusters <= INT_MAX);
2024
2025 for (i = 0; i < nb_clusters; i++) {
2026 uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
2027 uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
2028 QCow2ClusterType type = qcow2_get_cluster_type(bs, old_l2_entry);
2029 bool unmap = (type == QCOW2_CLUSTER_COMPRESSED) ||
2030 ((flags & BDRV_REQ_MAY_UNMAP) && qcow2_cluster_is_allocated(type));
2031 uint64_t new_l2_entry = unmap ? 0 : old_l2_entry;
2032 uint64_t new_l2_bitmap = old_l2_bitmap;
2033
2034 if (has_subclusters(s)) {
2035 new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES;
2036 } else {
2037 new_l2_entry |= QCOW_OFLAG_ZERO;
2038 }
2039
2040 if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) {
2041 continue;
2042 }
2043
2044 /* First update L2 entries */
2045 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
2046 set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
2047 if (has_subclusters(s)) {
2048 set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
2049 }
2050
2051 /* Then decrease the refcount */
2052 if (unmap) {
2053 qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
2054 }
2055 }
2056
2057 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2058
2059 return nb_clusters;
2060 }
2061
2062 static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset,
2063 unsigned nb_subclusters)
2064 {
2065 BDRVQcow2State *s = bs->opaque;
2066 uint64_t *l2_slice;
2067 uint64_t old_l2_bitmap, l2_bitmap;
2068 int l2_index, ret, sc = offset_to_sc_index(s, offset);
2069
2070 /* For full clusters use zero_in_l2_slice() instead */
2071 assert(nb_subclusters > 0 && nb_subclusters < s->subclusters_per_cluster);
2072 assert(sc + nb_subclusters <= s->subclusters_per_cluster);
2073 assert(offset_into_subcluster(s, offset) == 0);
2074
2075 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
2076 if (ret < 0) {
2077 return ret;
2078 }
2079
2080 switch (qcow2_get_cluster_type(bs, get_l2_entry(s, l2_slice, l2_index))) {
2081 case QCOW2_CLUSTER_COMPRESSED:
2082 ret = -ENOTSUP; /* We cannot partially zeroize compressed clusters */
2083 goto out;
2084 case QCOW2_CLUSTER_NORMAL:
2085 case QCOW2_CLUSTER_UNALLOCATED:
2086 break;
2087 default:
2088 g_assert_not_reached();
2089 }
2090
2091 old_l2_bitmap = l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index);
2092
2093 l2_bitmap |= QCOW_OFLAG_SUB_ZERO_RANGE(sc, sc + nb_subclusters);
2094 l2_bitmap &= ~QCOW_OFLAG_SUB_ALLOC_RANGE(sc, sc + nb_subclusters);
2095
2096 if (old_l2_bitmap != l2_bitmap) {
2097 set_l2_bitmap(s, l2_slice, l2_index, l2_bitmap);
2098 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
2099 }
2100
2101 ret = 0;
2102 out:
2103 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2104
2105 return ret;
2106 }
2107
2108 int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
2109 uint64_t bytes, int flags)
2110 {
2111 BDRVQcow2State *s = bs->opaque;
2112 uint64_t end_offset = offset + bytes;
2113 uint64_t nb_clusters;
2114 unsigned head, tail;
2115 int64_t cleared;
2116 int ret;
2117
2118 /* If we have to stay in sync with an external data file, zero out
2119 * s->data_file first. */
2120 if (data_file_is_raw(bs)) {
2121 assert(has_data_file(bs));
2122 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags);
2123 if (ret < 0) {
2124 return ret;
2125 }
2126 }
2127
2128 /* Caller must pass aligned values, except at image end */
2129 assert(offset_into_subcluster(s, offset) == 0);
2130 assert(offset_into_subcluster(s, end_offset) == 0 ||
2131 end_offset >= bs->total_sectors << BDRV_SECTOR_BITS);
2132
2133 /*
2134 * The zero flag is only supported by version 3 and newer. However, if we
2135 * have no backing file, we can resort to discard in version 2.
2136 */
2137 if (s->qcow_version < 3) {
2138 if (!bs->backing) {
2139 return qcow2_cluster_discard(bs, offset, bytes,
2140 QCOW2_DISCARD_REQUEST, false);
2141 }
2142 return -ENOTSUP;
2143 }
2144
2145 head = MIN(end_offset, ROUND_UP(offset, s->cluster_size)) - offset;
2146 offset += head;
2147
2148 tail = (end_offset >= bs->total_sectors << BDRV_SECTOR_BITS) ? 0 :
2149 end_offset - MAX(offset, start_of_cluster(s, end_offset));
2150 end_offset -= tail;
2151
2152 s->cache_discards = true;
2153
2154 if (head) {
2155 ret = zero_l2_subclusters(bs, offset - head,
2156 size_to_subclusters(s, head));
2157 if (ret < 0) {
2158 goto fail;
2159 }
2160 }
2161
2162 /* Each L2 slice is handled by its own loop iteration */
2163 nb_clusters = size_to_clusters(s, end_offset - offset);
2164
2165 while (nb_clusters > 0) {
2166 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
2167 if (cleared < 0) {
2168 ret = cleared;
2169 goto fail;
2170 }
2171
2172 nb_clusters -= cleared;
2173 offset += (cleared * s->cluster_size);
2174 }
2175
2176 if (tail) {
2177 ret = zero_l2_subclusters(bs, end_offset, size_to_subclusters(s, tail));
2178 if (ret < 0) {
2179 goto fail;
2180 }
2181 }
2182
2183 ret = 0;
2184 fail:
2185 s->cache_discards = false;
2186 qcow2_process_discards(bs, ret);
2187
2188 return ret;
2189 }
2190
2191 /*
2192 * Expands all zero clusters in a specific L1 table (or deallocates them, for
2193 * non-backed non-pre-allocated zero clusters).
2194 *
2195 * l1_entries and *visited_l1_entries are used to keep track of progress for
2196 * status_cb(). l1_entries contains the total number of L1 entries and
2197 * *visited_l1_entries counts all visited L1 entries.
2198 */
2199 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
2200 int l1_size, int64_t *visited_l1_entries,
2201 int64_t l1_entries,
2202 BlockDriverAmendStatusCB *status_cb,
2203 void *cb_opaque)
2204 {
2205 BDRVQcow2State *s = bs->opaque;
2206 bool is_active_l1 = (l1_table == s->l1_table);
2207 uint64_t *l2_slice = NULL;
2208 unsigned slice, slice_size2, n_slices;
2209 int ret;
2210 int i, j;
2211
2212 /* qcow2_downgrade() is not allowed in images with subclusters */
2213 assert(!has_subclusters(s));
2214
2215 slice_size2 = s->l2_slice_size * l2_entry_size(s);
2216 n_slices = s->cluster_size / slice_size2;
2217
2218 if (!is_active_l1) {
2219 /* inactive L2 tables require a buffer to be stored in when loading
2220 * them from disk */
2221 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2);
2222 if (l2_slice == NULL) {
2223 return -ENOMEM;
2224 }
2225 }
2226
2227 for (i = 0; i < l1_size; i++) {
2228 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
2229 uint64_t l2_refcount;
2230
2231 if (!l2_offset) {
2232 /* unallocated */
2233 (*visited_l1_entries)++;
2234 if (status_cb) {
2235 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2236 }
2237 continue;
2238 }
2239
2240 if (offset_into_cluster(s, l2_offset)) {
2241 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
2242 PRIx64 " unaligned (L1 index: %#x)",
2243 l2_offset, i);
2244 ret = -EIO;
2245 goto fail;
2246 }
2247
2248 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
2249 &l2_refcount);
2250 if (ret < 0) {
2251 goto fail;
2252 }
2253
2254 for (slice = 0; slice < n_slices; slice++) {
2255 uint64_t slice_offset = l2_offset + slice * slice_size2;
2256 bool l2_dirty = false;
2257 if (is_active_l1) {
2258 /* get active L2 tables from cache */
2259 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset,
2260 (void **)&l2_slice);
2261 } else {
2262 /* load inactive L2 tables from disk */
2263 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2);
2264 }
2265 if (ret < 0) {
2266 goto fail;
2267 }
2268
2269 for (j = 0; j < s->l2_slice_size; j++) {
2270 uint64_t l2_entry = get_l2_entry(s, l2_slice, j);
2271 int64_t offset = l2_entry & L2E_OFFSET_MASK;
2272 QCow2ClusterType cluster_type =
2273 qcow2_get_cluster_type(bs, l2_entry);
2274
2275 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
2276 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
2277 continue;
2278 }
2279
2280 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
2281 if (!bs->backing) {
2282 /*
2283 * not backed; therefore we can simply deallocate the
2284 * cluster. No need to call set_l2_bitmap(), this
2285 * function doesn't support images with subclusters.
2286 */
2287 set_l2_entry(s, l2_slice, j, 0);
2288 l2_dirty = true;
2289 continue;
2290 }
2291
2292 offset = qcow2_alloc_clusters(bs, s->cluster_size);
2293 if (offset < 0) {
2294 ret = offset;
2295 goto fail;
2296 }
2297
2298 /* The offset must fit in the offset field */
2299 assert((offset & L2E_OFFSET_MASK) == offset);
2300
2301 if (l2_refcount > 1) {
2302 /* For shared L2 tables, set the refcount accordingly
2303 * (it is already 1 and needs to be l2_refcount) */
2304 ret = qcow2_update_cluster_refcount(
2305 bs, offset >> s->cluster_bits,
2306 refcount_diff(1, l2_refcount), false,
2307 QCOW2_DISCARD_OTHER);
2308 if (ret < 0) {
2309 qcow2_free_clusters(bs, offset, s->cluster_size,
2310 QCOW2_DISCARD_OTHER);
2311 goto fail;
2312 }
2313 }
2314 }
2315
2316 if (offset_into_cluster(s, offset)) {
2317 int l2_index = slice * s->l2_slice_size + j;
2318 qcow2_signal_corruption(
2319 bs, true, -1, -1,
2320 "Cluster allocation offset "
2321 "%#" PRIx64 " unaligned (L2 offset: %#"
2322 PRIx64 ", L2 index: %#x)", offset,
2323 l2_offset, l2_index);
2324 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
2325 qcow2_free_clusters(bs, offset, s->cluster_size,
2326 QCOW2_DISCARD_ALWAYS);
2327 }
2328 ret = -EIO;
2329 goto fail;
2330 }
2331
2332 ret = qcow2_pre_write_overlap_check(bs, 0, offset,
2333 s->cluster_size, true);
2334 if (ret < 0) {
2335 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
2336 qcow2_free_clusters(bs, offset, s->cluster_size,
2337 QCOW2_DISCARD_ALWAYS);
2338 }
2339 goto fail;
2340 }
2341
2342 ret = bdrv_pwrite_zeroes(s->data_file, offset,
2343 s->cluster_size, 0);
2344 if (ret < 0) {
2345 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
2346 qcow2_free_clusters(bs, offset, s->cluster_size,
2347 QCOW2_DISCARD_ALWAYS);
2348 }
2349 goto fail;
2350 }
2351
2352 if (l2_refcount == 1) {
2353 set_l2_entry(s, l2_slice, j, offset | QCOW_OFLAG_COPIED);
2354 } else {
2355 set_l2_entry(s, l2_slice, j, offset);
2356 }
2357 /*
2358 * No need to call set_l2_bitmap() after set_l2_entry() because
2359 * this function doesn't support images with subclusters.
2360 */
2361 l2_dirty = true;
2362 }
2363
2364 if (is_active_l1) {
2365 if (l2_dirty) {
2366 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
2367 qcow2_cache_depends_on_flush(s->l2_table_cache);
2368 }
2369 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2370 } else {
2371 if (l2_dirty) {
2372 ret = qcow2_pre_write_overlap_check(
2373 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2,
2374 slice_offset, slice_size2, false);
2375 if (ret < 0) {
2376 goto fail;
2377 }
2378
2379 ret = bdrv_pwrite(bs->file, slice_offset,
2380 l2_slice, slice_size2);
2381 if (ret < 0) {
2382 goto fail;
2383 }
2384 }
2385 }
2386 }
2387
2388 (*visited_l1_entries)++;
2389 if (status_cb) {
2390 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2391 }
2392 }
2393
2394 ret = 0;
2395
2396 fail:
2397 if (l2_slice) {
2398 if (!is_active_l1) {
2399 qemu_vfree(l2_slice);
2400 } else {
2401 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2402 }
2403 }
2404 return ret;
2405 }
2406
2407 /*
2408 * For backed images, expands all zero clusters on the image. For non-backed
2409 * images, deallocates all non-pre-allocated zero clusters (and claims the
2410 * allocation for pre-allocated ones). This is important for downgrading to a
2411 * qcow2 version which doesn't yet support metadata zero clusters.
2412 */
2413 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2414 BlockDriverAmendStatusCB *status_cb,
2415 void *cb_opaque)
2416 {
2417 BDRVQcow2State *s = bs->opaque;
2418 uint64_t *l1_table = NULL;
2419 int64_t l1_entries = 0, visited_l1_entries = 0;
2420 int ret;
2421 int i, j;
2422
2423 if (status_cb) {
2424 l1_entries = s->l1_size;
2425 for (i = 0; i < s->nb_snapshots; i++) {
2426 l1_entries += s->snapshots[i].l1_size;
2427 }
2428 }
2429
2430 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2431 &visited_l1_entries, l1_entries,
2432 status_cb, cb_opaque);
2433 if (ret < 0) {
2434 goto fail;
2435 }
2436
2437 /* Inactive L1 tables may point to active L2 tables - therefore it is
2438 * necessary to flush the L2 table cache before trying to access the L2
2439 * tables pointed to by inactive L1 entries (else we might try to expand
2440 * zero clusters that have already been expanded); furthermore, it is also
2441 * necessary to empty the L2 table cache, since it may contain tables which
2442 * are now going to be modified directly on disk, bypassing the cache.
2443 * qcow2_cache_empty() does both for us. */
2444 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2445 if (ret < 0) {
2446 goto fail;
2447 }
2448
2449 for (i = 0; i < s->nb_snapshots; i++) {
2450 int l1_size2;
2451 uint64_t *new_l1_table;
2452 Error *local_err = NULL;
2453
2454 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
2455 s->snapshots[i].l1_size, L1E_SIZE,
2456 QCOW_MAX_L1_SIZE, "Snapshot L1 table",
2457 &local_err);
2458 if (ret < 0) {
2459 error_report_err(local_err);
2460 goto fail;
2461 }
2462
2463 l1_size2 = s->snapshots[i].l1_size * L1E_SIZE;
2464 new_l1_table = g_try_realloc(l1_table, l1_size2);
2465
2466 if (!new_l1_table) {
2467 ret = -ENOMEM;
2468 goto fail;
2469 }
2470
2471 l1_table = new_l1_table;
2472
2473 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset,
2474 l1_table, l1_size2);
2475 if (ret < 0) {
2476 goto fail;
2477 }
2478
2479 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2480 be64_to_cpus(&l1_table[j]);
2481 }
2482
2483 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2484 &visited_l1_entries, l1_entries,
2485 status_cb, cb_opaque);
2486 if (ret < 0) {
2487 goto fail;
2488 }
2489 }
2490
2491 ret = 0;
2492
2493 fail:
2494 g_free(l1_table);
2495 return ret;
2496 }
2497
2498 void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
2499 uint64_t *coffset, int *csize)
2500 {
2501 BDRVQcow2State *s = bs->opaque;
2502 int nb_csectors;
2503
2504 assert(qcow2_get_cluster_type(bs, l2_entry) == QCOW2_CLUSTER_COMPRESSED);
2505
2506 *coffset = l2_entry & s->cluster_offset_mask;
2507
2508 nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1;
2509 *csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
2510 (*coffset & (QCOW2_COMPRESSED_SECTOR_SIZE - 1));
2511 }