]> git.proxmox.com Git - mirror_qemu.git/blob - block/qcow2-cluster.c
spapr: Refactor spapr_populate_memory() to allow memoryless nodes
[mirror_qemu.git] / block / qcow2-cluster.c
1 /*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include <zlib.h>
26
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
31
32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
33 bool exact_size)
34 {
35 BDRVQcowState *s = bs->opaque;
36 int new_l1_size2, ret, i;
37 uint64_t *new_l1_table;
38 int64_t old_l1_table_offset, old_l1_size;
39 int64_t new_l1_table_offset, new_l1_size;
40 uint8_t data[12];
41
42 if (min_size <= s->l1_size)
43 return 0;
44
45 /* Do a sanity check on min_size before trying to calculate new_l1_size
46 * (this prevents overflows during the while loop for the calculation of
47 * new_l1_size) */
48 if (min_size > INT_MAX / sizeof(uint64_t)) {
49 return -EFBIG;
50 }
51
52 if (exact_size) {
53 new_l1_size = min_size;
54 } else {
55 /* Bump size up to reduce the number of times we have to grow */
56 new_l1_size = s->l1_size;
57 if (new_l1_size == 0) {
58 new_l1_size = 1;
59 }
60 while (min_size > new_l1_size) {
61 new_l1_size = (new_l1_size * 3 + 1) / 2;
62 }
63 }
64
65 if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
66 return -EFBIG;
67 }
68
69 #ifdef DEBUG_ALLOC2
70 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
71 s->l1_size, new_l1_size);
72 #endif
73
74 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
75 new_l1_table = qemu_try_blockalign(bs->file,
76 align_offset(new_l1_size2, 512));
77 if (new_l1_table == NULL) {
78 return -ENOMEM;
79 }
80 memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
81
82 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
83
84 /* write new table (align to cluster) */
85 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
86 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
87 if (new_l1_table_offset < 0) {
88 qemu_vfree(new_l1_table);
89 return new_l1_table_offset;
90 }
91
92 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
93 if (ret < 0) {
94 goto fail;
95 }
96
97 /* the L1 position has not yet been updated, so these clusters must
98 * indeed be completely free */
99 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
100 new_l1_size2);
101 if (ret < 0) {
102 goto fail;
103 }
104
105 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
106 for(i = 0; i < s->l1_size; i++)
107 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
108 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
109 if (ret < 0)
110 goto fail;
111 for(i = 0; i < s->l1_size; i++)
112 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
113
114 /* set new table */
115 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
116 cpu_to_be32w((uint32_t*)data, new_l1_size);
117 stq_be_p(data + 4, new_l1_table_offset);
118 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
119 if (ret < 0) {
120 goto fail;
121 }
122 qemu_vfree(s->l1_table);
123 old_l1_table_offset = s->l1_table_offset;
124 s->l1_table_offset = new_l1_table_offset;
125 s->l1_table = new_l1_table;
126 old_l1_size = s->l1_size;
127 s->l1_size = new_l1_size;
128 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
129 QCOW2_DISCARD_OTHER);
130 return 0;
131 fail:
132 qemu_vfree(new_l1_table);
133 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
134 QCOW2_DISCARD_OTHER);
135 return ret;
136 }
137
138 /*
139 * l2_load
140 *
141 * Loads a L2 table into memory. If the table is in the cache, the cache
142 * is used; otherwise the L2 table is loaded from the image file.
143 *
144 * Returns a pointer to the L2 table on success, or NULL if the read from
145 * the image file failed.
146 */
147
148 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
149 uint64_t **l2_table)
150 {
151 BDRVQcowState *s = bs->opaque;
152 int ret;
153
154 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
155
156 return ret;
157 }
158
159 /*
160 * Writes one sector of the L1 table to the disk (can't update single entries
161 * and we really don't want bdrv_pread to perform a read-modify-write)
162 */
163 #define L1_ENTRIES_PER_SECTOR (512 / 8)
164 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
165 {
166 BDRVQcowState *s = bs->opaque;
167 uint64_t buf[L1_ENTRIES_PER_SECTOR];
168 int l1_start_index;
169 int i, ret;
170
171 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
172 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
173 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
174 }
175
176 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
177 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
178 if (ret < 0) {
179 return ret;
180 }
181
182 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
183 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
184 buf, sizeof(buf));
185 if (ret < 0) {
186 return ret;
187 }
188
189 return 0;
190 }
191
192 /*
193 * l2_allocate
194 *
195 * Allocate a new l2 entry in the file. If l1_index points to an already
196 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
197 * table) copy the contents of the old L2 table into the newly allocated one.
198 * Otherwise the new table is initialized with zeros.
199 *
200 */
201
202 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
203 {
204 BDRVQcowState *s = bs->opaque;
205 uint64_t old_l2_offset;
206 uint64_t *l2_table = NULL;
207 int64_t l2_offset;
208 int ret;
209
210 old_l2_offset = s->l1_table[l1_index];
211
212 trace_qcow2_l2_allocate(bs, l1_index);
213
214 /* allocate a new l2 entry */
215
216 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
217 if (l2_offset < 0) {
218 ret = l2_offset;
219 goto fail;
220 }
221
222 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
223 if (ret < 0) {
224 goto fail;
225 }
226
227 /* allocate a new entry in the l2 cache */
228
229 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
230 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
231 if (ret < 0) {
232 goto fail;
233 }
234
235 l2_table = *table;
236
237 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
238 /* if there was no old l2 table, clear the new table */
239 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
240 } else {
241 uint64_t* old_table;
242
243 /* if there was an old l2 table, read it from the disk */
244 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
245 ret = qcow2_cache_get(bs, s->l2_table_cache,
246 old_l2_offset & L1E_OFFSET_MASK,
247 (void**) &old_table);
248 if (ret < 0) {
249 goto fail;
250 }
251
252 memcpy(l2_table, old_table, s->cluster_size);
253
254 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
255 if (ret < 0) {
256 goto fail;
257 }
258 }
259
260 /* write the l2 table to the file */
261 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
262
263 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
264 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
265 ret = qcow2_cache_flush(bs, s->l2_table_cache);
266 if (ret < 0) {
267 goto fail;
268 }
269
270 /* update the L1 entry */
271 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
272 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
273 ret = qcow2_write_l1_entry(bs, l1_index);
274 if (ret < 0) {
275 goto fail;
276 }
277
278 *table = l2_table;
279 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
280 return 0;
281
282 fail:
283 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
284 if (l2_table != NULL) {
285 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
286 }
287 s->l1_table[l1_index] = old_l2_offset;
288 if (l2_offset > 0) {
289 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
290 QCOW2_DISCARD_ALWAYS);
291 }
292 return ret;
293 }
294
295 /*
296 * Checks how many clusters in a given L2 table are contiguous in the image
297 * file. As soon as one of the flags in the bitmask stop_flags changes compared
298 * to the first cluster, the search is stopped and the cluster is not counted
299 * as contiguous. (This allows it, for example, to stop at the first compressed
300 * cluster which may require a different handling)
301 */
302 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
303 uint64_t *l2_table, uint64_t stop_flags)
304 {
305 int i;
306 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
307 uint64_t first_entry = be64_to_cpu(l2_table[0]);
308 uint64_t offset = first_entry & mask;
309
310 if (!offset)
311 return 0;
312
313 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED);
314
315 for (i = 0; i < nb_clusters; i++) {
316 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
317 if (offset + (uint64_t) i * cluster_size != l2_entry) {
318 break;
319 }
320 }
321
322 return i;
323 }
324
325 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
326 {
327 int i;
328
329 for (i = 0; i < nb_clusters; i++) {
330 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
331
332 if (type != QCOW2_CLUSTER_UNALLOCATED) {
333 break;
334 }
335 }
336
337 return i;
338 }
339
340 /* The crypt function is compatible with the linux cryptoloop
341 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
342 supported */
343 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
344 uint8_t *out_buf, const uint8_t *in_buf,
345 int nb_sectors, int enc,
346 const AES_KEY *key)
347 {
348 union {
349 uint64_t ll[2];
350 uint8_t b[16];
351 } ivec;
352 int i;
353
354 for(i = 0; i < nb_sectors; i++) {
355 ivec.ll[0] = cpu_to_le64(sector_num);
356 ivec.ll[1] = 0;
357 AES_cbc_encrypt(in_buf, out_buf, 512, key,
358 ivec.b, enc);
359 sector_num++;
360 in_buf += 512;
361 out_buf += 512;
362 }
363 }
364
365 static int coroutine_fn copy_sectors(BlockDriverState *bs,
366 uint64_t start_sect,
367 uint64_t cluster_offset,
368 int n_start, int n_end)
369 {
370 BDRVQcowState *s = bs->opaque;
371 QEMUIOVector qiov;
372 struct iovec iov;
373 int n, ret;
374
375 n = n_end - n_start;
376 if (n <= 0) {
377 return 0;
378 }
379
380 iov.iov_len = n * BDRV_SECTOR_SIZE;
381 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len);
382 if (iov.iov_base == NULL) {
383 return -ENOMEM;
384 }
385
386 qemu_iovec_init_external(&qiov, &iov, 1);
387
388 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
389
390 if (!bs->drv) {
391 ret = -ENOMEDIUM;
392 goto out;
393 }
394
395 /* Call .bdrv_co_readv() directly instead of using the public block-layer
396 * interface. This avoids double I/O throttling and request tracking,
397 * which can lead to deadlock when block layer copy-on-read is enabled.
398 */
399 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
400 if (ret < 0) {
401 goto out;
402 }
403
404 if (s->crypt_method) {
405 qcow2_encrypt_sectors(s, start_sect + n_start,
406 iov.iov_base, iov.iov_base, n, 1,
407 &s->aes_encrypt_key);
408 }
409
410 ret = qcow2_pre_write_overlap_check(bs, 0,
411 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE);
412 if (ret < 0) {
413 goto out;
414 }
415
416 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
417 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
418 if (ret < 0) {
419 goto out;
420 }
421
422 ret = 0;
423 out:
424 qemu_vfree(iov.iov_base);
425 return ret;
426 }
427
428
429 /*
430 * get_cluster_offset
431 *
432 * For a given offset of the disk image, find the cluster offset in
433 * qcow2 file. The offset is stored in *cluster_offset.
434 *
435 * on entry, *num is the number of contiguous sectors we'd like to
436 * access following offset.
437 *
438 * on exit, *num is the number of contiguous sectors we can read.
439 *
440 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
441 * cases.
442 */
443 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
444 int *num, uint64_t *cluster_offset)
445 {
446 BDRVQcowState *s = bs->opaque;
447 unsigned int l2_index;
448 uint64_t l1_index, l2_offset, *l2_table;
449 int l1_bits, c;
450 unsigned int index_in_cluster, nb_clusters;
451 uint64_t nb_available, nb_needed;
452 int ret;
453
454 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
455 nb_needed = *num + index_in_cluster;
456
457 l1_bits = s->l2_bits + s->cluster_bits;
458
459 /* compute how many bytes there are between the offset and
460 * the end of the l1 entry
461 */
462
463 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
464
465 /* compute the number of available sectors */
466
467 nb_available = (nb_available >> 9) + index_in_cluster;
468
469 if (nb_needed > nb_available) {
470 nb_needed = nb_available;
471 }
472
473 *cluster_offset = 0;
474
475 /* seek the the l2 offset in the l1 table */
476
477 l1_index = offset >> l1_bits;
478 if (l1_index >= s->l1_size) {
479 ret = QCOW2_CLUSTER_UNALLOCATED;
480 goto out;
481 }
482
483 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
484 if (!l2_offset) {
485 ret = QCOW2_CLUSTER_UNALLOCATED;
486 goto out;
487 }
488
489 /* load the l2 table in memory */
490
491 ret = l2_load(bs, l2_offset, &l2_table);
492 if (ret < 0) {
493 return ret;
494 }
495
496 /* find the cluster offset for the given disk offset */
497
498 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
499 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
500 nb_clusters = size_to_clusters(s, nb_needed << 9);
501
502 ret = qcow2_get_cluster_type(*cluster_offset);
503 switch (ret) {
504 case QCOW2_CLUSTER_COMPRESSED:
505 /* Compressed clusters can only be processed one by one */
506 c = 1;
507 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
508 break;
509 case QCOW2_CLUSTER_ZERO:
510 if (s->qcow_version < 3) {
511 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
512 return -EIO;
513 }
514 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
515 &l2_table[l2_index], QCOW_OFLAG_ZERO);
516 *cluster_offset = 0;
517 break;
518 case QCOW2_CLUSTER_UNALLOCATED:
519 /* how many empty clusters ? */
520 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
521 *cluster_offset = 0;
522 break;
523 case QCOW2_CLUSTER_NORMAL:
524 /* how many allocated clusters ? */
525 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
526 &l2_table[l2_index], QCOW_OFLAG_ZERO);
527 *cluster_offset &= L2E_OFFSET_MASK;
528 break;
529 default:
530 abort();
531 }
532
533 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
534
535 nb_available = (c * s->cluster_sectors);
536
537 out:
538 if (nb_available > nb_needed)
539 nb_available = nb_needed;
540
541 *num = nb_available - index_in_cluster;
542
543 return ret;
544 }
545
546 /*
547 * get_cluster_table
548 *
549 * for a given disk offset, load (and allocate if needed)
550 * the l2 table.
551 *
552 * the l2 table offset in the qcow2 file and the cluster index
553 * in the l2 table are given to the caller.
554 *
555 * Returns 0 on success, -errno in failure case
556 */
557 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
558 uint64_t **new_l2_table,
559 int *new_l2_index)
560 {
561 BDRVQcowState *s = bs->opaque;
562 unsigned int l2_index;
563 uint64_t l1_index, l2_offset;
564 uint64_t *l2_table = NULL;
565 int ret;
566
567 /* seek the the l2 offset in the l1 table */
568
569 l1_index = offset >> (s->l2_bits + s->cluster_bits);
570 if (l1_index >= s->l1_size) {
571 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
572 if (ret < 0) {
573 return ret;
574 }
575 }
576
577 assert(l1_index < s->l1_size);
578 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
579
580 /* seek the l2 table of the given l2 offset */
581
582 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
583 /* load the l2 table in memory */
584 ret = l2_load(bs, l2_offset, &l2_table);
585 if (ret < 0) {
586 return ret;
587 }
588 } else {
589 /* First allocate a new L2 table (and do COW if needed) */
590 ret = l2_allocate(bs, l1_index, &l2_table);
591 if (ret < 0) {
592 return ret;
593 }
594
595 /* Then decrease the refcount of the old table */
596 if (l2_offset) {
597 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
598 QCOW2_DISCARD_OTHER);
599 }
600 }
601
602 /* find the cluster offset for the given disk offset */
603
604 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
605
606 *new_l2_table = l2_table;
607 *new_l2_index = l2_index;
608
609 return 0;
610 }
611
612 /*
613 * alloc_compressed_cluster_offset
614 *
615 * For a given offset of the disk image, return cluster offset in
616 * qcow2 file.
617 *
618 * If the offset is not found, allocate a new compressed cluster.
619 *
620 * Return the cluster offset if successful,
621 * Return 0, otherwise.
622 *
623 */
624
625 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
626 uint64_t offset,
627 int compressed_size)
628 {
629 BDRVQcowState *s = bs->opaque;
630 int l2_index, ret;
631 uint64_t *l2_table;
632 int64_t cluster_offset;
633 int nb_csectors;
634
635 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
636 if (ret < 0) {
637 return 0;
638 }
639
640 /* Compression can't overwrite anything. Fail if the cluster was already
641 * allocated. */
642 cluster_offset = be64_to_cpu(l2_table[l2_index]);
643 if (cluster_offset & L2E_OFFSET_MASK) {
644 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
645 return 0;
646 }
647
648 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
649 if (cluster_offset < 0) {
650 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
651 return 0;
652 }
653
654 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
655 (cluster_offset >> 9);
656
657 cluster_offset |= QCOW_OFLAG_COMPRESSED |
658 ((uint64_t)nb_csectors << s->csize_shift);
659
660 /* update L2 table */
661
662 /* compressed clusters never have the copied flag */
663
664 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
665 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
666 l2_table[l2_index] = cpu_to_be64(cluster_offset);
667 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
668 if (ret < 0) {
669 return 0;
670 }
671
672 return cluster_offset;
673 }
674
675 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
676 {
677 BDRVQcowState *s = bs->opaque;
678 int ret;
679
680 if (r->nb_sectors == 0) {
681 return 0;
682 }
683
684 qemu_co_mutex_unlock(&s->lock);
685 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
686 r->offset / BDRV_SECTOR_SIZE,
687 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
688 qemu_co_mutex_lock(&s->lock);
689
690 if (ret < 0) {
691 return ret;
692 }
693
694 /*
695 * Before we update the L2 table to actually point to the new cluster, we
696 * need to be sure that the refcounts have been increased and COW was
697 * handled.
698 */
699 qcow2_cache_depends_on_flush(s->l2_table_cache);
700
701 return 0;
702 }
703
704 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
705 {
706 BDRVQcowState *s = bs->opaque;
707 int i, j = 0, l2_index, ret;
708 uint64_t *old_cluster, *l2_table;
709 uint64_t cluster_offset = m->alloc_offset;
710
711 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
712 assert(m->nb_clusters > 0);
713
714 old_cluster = g_try_new(uint64_t, m->nb_clusters);
715 if (old_cluster == NULL) {
716 ret = -ENOMEM;
717 goto err;
718 }
719
720 /* copy content of unmodified sectors */
721 ret = perform_cow(bs, m, &m->cow_start);
722 if (ret < 0) {
723 goto err;
724 }
725
726 ret = perform_cow(bs, m, &m->cow_end);
727 if (ret < 0) {
728 goto err;
729 }
730
731 /* Update L2 table. */
732 if (s->use_lazy_refcounts) {
733 qcow2_mark_dirty(bs);
734 }
735 if (qcow2_need_accurate_refcounts(s)) {
736 qcow2_cache_set_dependency(bs, s->l2_table_cache,
737 s->refcount_block_cache);
738 }
739
740 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
741 if (ret < 0) {
742 goto err;
743 }
744 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
745
746 assert(l2_index + m->nb_clusters <= s->l2_size);
747 for (i = 0; i < m->nb_clusters; i++) {
748 /* if two concurrent writes happen to the same unallocated cluster
749 * each write allocates separate cluster and writes data concurrently.
750 * The first one to complete updates l2 table with pointer to its
751 * cluster the second one has to do RMW (which is done above by
752 * copy_sectors()), update l2 table with its cluster pointer and free
753 * old cluster. This is what this loop does */
754 if(l2_table[l2_index + i] != 0)
755 old_cluster[j++] = l2_table[l2_index + i];
756
757 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
758 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
759 }
760
761
762 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
763 if (ret < 0) {
764 goto err;
765 }
766
767 /*
768 * If this was a COW, we need to decrease the refcount of the old cluster.
769 * Also flush bs->file to get the right order for L2 and refcount update.
770 *
771 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
772 * clusters), the next write will reuse them anyway.
773 */
774 if (j != 0) {
775 for (i = 0; i < j; i++) {
776 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
777 QCOW2_DISCARD_NEVER);
778 }
779 }
780
781 ret = 0;
782 err:
783 g_free(old_cluster);
784 return ret;
785 }
786
787 /*
788 * Returns the number of contiguous clusters that can be used for an allocating
789 * write, but require COW to be performed (this includes yet unallocated space,
790 * which must copy from the backing file)
791 */
792 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
793 uint64_t *l2_table, int l2_index)
794 {
795 int i;
796
797 for (i = 0; i < nb_clusters; i++) {
798 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
799 int cluster_type = qcow2_get_cluster_type(l2_entry);
800
801 switch(cluster_type) {
802 case QCOW2_CLUSTER_NORMAL:
803 if (l2_entry & QCOW_OFLAG_COPIED) {
804 goto out;
805 }
806 break;
807 case QCOW2_CLUSTER_UNALLOCATED:
808 case QCOW2_CLUSTER_COMPRESSED:
809 case QCOW2_CLUSTER_ZERO:
810 break;
811 default:
812 abort();
813 }
814 }
815
816 out:
817 assert(i <= nb_clusters);
818 return i;
819 }
820
821 /*
822 * Check if there already is an AIO write request in flight which allocates
823 * the same cluster. In this case we need to wait until the previous
824 * request has completed and updated the L2 table accordingly.
825 *
826 * Returns:
827 * 0 if there was no dependency. *cur_bytes indicates the number of
828 * bytes from guest_offset that can be read before the next
829 * dependency must be processed (or the request is complete)
830 *
831 * -EAGAIN if we had to wait for another request, previously gathered
832 * information on cluster allocation may be invalid now. The caller
833 * must start over anyway, so consider *cur_bytes undefined.
834 */
835 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
836 uint64_t *cur_bytes, QCowL2Meta **m)
837 {
838 BDRVQcowState *s = bs->opaque;
839 QCowL2Meta *old_alloc;
840 uint64_t bytes = *cur_bytes;
841
842 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
843
844 uint64_t start = guest_offset;
845 uint64_t end = start + bytes;
846 uint64_t old_start = l2meta_cow_start(old_alloc);
847 uint64_t old_end = l2meta_cow_end(old_alloc);
848
849 if (end <= old_start || start >= old_end) {
850 /* No intersection */
851 } else {
852 if (start < old_start) {
853 /* Stop at the start of a running allocation */
854 bytes = old_start - start;
855 } else {
856 bytes = 0;
857 }
858
859 /* Stop if already an l2meta exists. After yielding, it wouldn't
860 * be valid any more, so we'd have to clean up the old L2Metas
861 * and deal with requests depending on them before starting to
862 * gather new ones. Not worth the trouble. */
863 if (bytes == 0 && *m) {
864 *cur_bytes = 0;
865 return 0;
866 }
867
868 if (bytes == 0) {
869 /* Wait for the dependency to complete. We need to recheck
870 * the free/allocated clusters when we continue. */
871 qemu_co_mutex_unlock(&s->lock);
872 qemu_co_queue_wait(&old_alloc->dependent_requests);
873 qemu_co_mutex_lock(&s->lock);
874 return -EAGAIN;
875 }
876 }
877 }
878
879 /* Make sure that existing clusters and new allocations are only used up to
880 * the next dependency if we shortened the request above */
881 *cur_bytes = bytes;
882
883 return 0;
884 }
885
886 /*
887 * Checks how many already allocated clusters that don't require a copy on
888 * write there are at the given guest_offset (up to *bytes). If
889 * *host_offset is not zero, only physically contiguous clusters beginning at
890 * this host offset are counted.
891 *
892 * Note that guest_offset may not be cluster aligned. In this case, the
893 * returned *host_offset points to exact byte referenced by guest_offset and
894 * therefore isn't cluster aligned as well.
895 *
896 * Returns:
897 * 0: if no allocated clusters are available at the given offset.
898 * *bytes is normally unchanged. It is set to 0 if the cluster
899 * is allocated and doesn't need COW, but doesn't have the right
900 * physical offset.
901 *
902 * 1: if allocated clusters that don't require a COW are available at
903 * the requested offset. *bytes may have decreased and describes
904 * the length of the area that can be written to.
905 *
906 * -errno: in error cases
907 */
908 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
909 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
910 {
911 BDRVQcowState *s = bs->opaque;
912 int l2_index;
913 uint64_t cluster_offset;
914 uint64_t *l2_table;
915 unsigned int nb_clusters;
916 unsigned int keep_clusters;
917 int ret, pret;
918
919 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
920 *bytes);
921
922 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
923 == offset_into_cluster(s, *host_offset));
924
925 /*
926 * Calculate the number of clusters to look for. We stop at L2 table
927 * boundaries to keep things simple.
928 */
929 nb_clusters =
930 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
931
932 l2_index = offset_to_l2_index(s, guest_offset);
933 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
934
935 /* Find L2 entry for the first involved cluster */
936 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
937 if (ret < 0) {
938 return ret;
939 }
940
941 cluster_offset = be64_to_cpu(l2_table[l2_index]);
942
943 /* Check how many clusters are already allocated and don't need COW */
944 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
945 && (cluster_offset & QCOW_OFLAG_COPIED))
946 {
947 /* If a specific host_offset is required, check it */
948 bool offset_matches =
949 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
950
951 if (*host_offset != 0 && !offset_matches) {
952 *bytes = 0;
953 ret = 0;
954 goto out;
955 }
956
957 /* We keep all QCOW_OFLAG_COPIED clusters */
958 keep_clusters =
959 count_contiguous_clusters(nb_clusters, s->cluster_size,
960 &l2_table[l2_index],
961 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
962 assert(keep_clusters <= nb_clusters);
963
964 *bytes = MIN(*bytes,
965 keep_clusters * s->cluster_size
966 - offset_into_cluster(s, guest_offset));
967
968 ret = 1;
969 } else {
970 ret = 0;
971 }
972
973 /* Cleanup */
974 out:
975 pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
976 if (pret < 0) {
977 return pret;
978 }
979
980 /* Only return a host offset if we actually made progress. Otherwise we
981 * would make requirements for handle_alloc() that it can't fulfill */
982 if (ret) {
983 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
984 + offset_into_cluster(s, guest_offset);
985 }
986
987 return ret;
988 }
989
990 /*
991 * Allocates new clusters for the given guest_offset.
992 *
993 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
994 * contain the number of clusters that have been allocated and are contiguous
995 * in the image file.
996 *
997 * If *host_offset is non-zero, it specifies the offset in the image file at
998 * which the new clusters must start. *nb_clusters can be 0 on return in this
999 * case if the cluster at host_offset is already in use. If *host_offset is
1000 * zero, the clusters can be allocated anywhere in the image file.
1001 *
1002 * *host_offset is updated to contain the offset into the image file at which
1003 * the first allocated cluster starts.
1004 *
1005 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1006 * function has been waiting for another request and the allocation must be
1007 * restarted, but the whole request should not be failed.
1008 */
1009 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1010 uint64_t *host_offset, unsigned int *nb_clusters)
1011 {
1012 BDRVQcowState *s = bs->opaque;
1013
1014 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1015 *host_offset, *nb_clusters);
1016
1017 /* Allocate new clusters */
1018 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1019 if (*host_offset == 0) {
1020 int64_t cluster_offset =
1021 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1022 if (cluster_offset < 0) {
1023 return cluster_offset;
1024 }
1025 *host_offset = cluster_offset;
1026 return 0;
1027 } else {
1028 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1029 if (ret < 0) {
1030 return ret;
1031 }
1032 *nb_clusters = ret;
1033 return 0;
1034 }
1035 }
1036
1037 /*
1038 * Allocates new clusters for an area that either is yet unallocated or needs a
1039 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1040 * the new allocation can match the specified host offset.
1041 *
1042 * Note that guest_offset may not be cluster aligned. In this case, the
1043 * returned *host_offset points to exact byte referenced by guest_offset and
1044 * therefore isn't cluster aligned as well.
1045 *
1046 * Returns:
1047 * 0: if no clusters could be allocated. *bytes is set to 0,
1048 * *host_offset is left unchanged.
1049 *
1050 * 1: if new clusters were allocated. *bytes may be decreased if the
1051 * new allocation doesn't cover all of the requested area.
1052 * *host_offset is updated to contain the host offset of the first
1053 * newly allocated cluster.
1054 *
1055 * -errno: in error cases
1056 */
1057 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1058 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1059 {
1060 BDRVQcowState *s = bs->opaque;
1061 int l2_index;
1062 uint64_t *l2_table;
1063 uint64_t entry;
1064 unsigned int nb_clusters;
1065 int ret;
1066
1067 uint64_t alloc_cluster_offset;
1068
1069 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1070 *bytes);
1071 assert(*bytes > 0);
1072
1073 /*
1074 * Calculate the number of clusters to look for. We stop at L2 table
1075 * boundaries to keep things simple.
1076 */
1077 nb_clusters =
1078 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1079
1080 l2_index = offset_to_l2_index(s, guest_offset);
1081 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1082
1083 /* Find L2 entry for the first involved cluster */
1084 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1085 if (ret < 0) {
1086 return ret;
1087 }
1088
1089 entry = be64_to_cpu(l2_table[l2_index]);
1090
1091 /* For the moment, overwrite compressed clusters one by one */
1092 if (entry & QCOW_OFLAG_COMPRESSED) {
1093 nb_clusters = 1;
1094 } else {
1095 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1096 }
1097
1098 /* This function is only called when there were no non-COW clusters, so if
1099 * we can't find any unallocated or COW clusters either, something is
1100 * wrong with our code. */
1101 assert(nb_clusters > 0);
1102
1103 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1104 if (ret < 0) {
1105 return ret;
1106 }
1107
1108 /* Allocate, if necessary at a given offset in the image file */
1109 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1110 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1111 &nb_clusters);
1112 if (ret < 0) {
1113 goto fail;
1114 }
1115
1116 /* Can't extend contiguous allocation */
1117 if (nb_clusters == 0) {
1118 *bytes = 0;
1119 return 0;
1120 }
1121
1122 /* !*host_offset would overwrite the image header and is reserved for "no
1123 * host offset preferred". If 0 was a valid host offset, it'd trigger the
1124 * following overlap check; do that now to avoid having an invalid value in
1125 * *host_offset. */
1126 if (!alloc_cluster_offset) {
1127 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1128 nb_clusters * s->cluster_size);
1129 assert(ret < 0);
1130 goto fail;
1131 }
1132
1133 /*
1134 * Save info needed for meta data update.
1135 *
1136 * requested_sectors: Number of sectors from the start of the first
1137 * newly allocated cluster to the end of the (possibly shortened
1138 * before) write request.
1139 *
1140 * avail_sectors: Number of sectors from the start of the first
1141 * newly allocated to the end of the last newly allocated cluster.
1142 *
1143 * nb_sectors: The number of sectors from the start of the first
1144 * newly allocated cluster to the end of the area that the write
1145 * request actually writes to (excluding COW at the end)
1146 */
1147 int requested_sectors =
1148 (*bytes + offset_into_cluster(s, guest_offset))
1149 >> BDRV_SECTOR_BITS;
1150 int avail_sectors = nb_clusters
1151 << (s->cluster_bits - BDRV_SECTOR_BITS);
1152 int alloc_n_start = offset_into_cluster(s, guest_offset)
1153 >> BDRV_SECTOR_BITS;
1154 int nb_sectors = MIN(requested_sectors, avail_sectors);
1155 QCowL2Meta *old_m = *m;
1156
1157 *m = g_malloc0(sizeof(**m));
1158
1159 **m = (QCowL2Meta) {
1160 .next = old_m,
1161
1162 .alloc_offset = alloc_cluster_offset,
1163 .offset = start_of_cluster(s, guest_offset),
1164 .nb_clusters = nb_clusters,
1165 .nb_available = nb_sectors,
1166
1167 .cow_start = {
1168 .offset = 0,
1169 .nb_sectors = alloc_n_start,
1170 },
1171 .cow_end = {
1172 .offset = nb_sectors * BDRV_SECTOR_SIZE,
1173 .nb_sectors = avail_sectors - nb_sectors,
1174 },
1175 };
1176 qemu_co_queue_init(&(*m)->dependent_requests);
1177 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1178
1179 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1180 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1181 - offset_into_cluster(s, guest_offset));
1182 assert(*bytes != 0);
1183
1184 return 1;
1185
1186 fail:
1187 if (*m && (*m)->nb_clusters > 0) {
1188 QLIST_REMOVE(*m, next_in_flight);
1189 }
1190 return ret;
1191 }
1192
1193 /*
1194 * alloc_cluster_offset
1195 *
1196 * For a given offset on the virtual disk, find the cluster offset in qcow2
1197 * file. If the offset is not found, allocate a new cluster.
1198 *
1199 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1200 * other fields in m are meaningless.
1201 *
1202 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1203 * contiguous clusters that have been allocated. In this case, the other
1204 * fields of m are valid and contain information about the first allocated
1205 * cluster.
1206 *
1207 * If the request conflicts with another write request in flight, the coroutine
1208 * is queued and will be reentered when the dependency has completed.
1209 *
1210 * Return 0 on success and -errno in error cases
1211 */
1212 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1213 int *num, uint64_t *host_offset, QCowL2Meta **m)
1214 {
1215 BDRVQcowState *s = bs->opaque;
1216 uint64_t start, remaining;
1217 uint64_t cluster_offset;
1218 uint64_t cur_bytes;
1219 int ret;
1220
1221 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num);
1222
1223 assert((offset & ~BDRV_SECTOR_MASK) == 0);
1224
1225 again:
1226 start = offset;
1227 remaining = *num << BDRV_SECTOR_BITS;
1228 cluster_offset = 0;
1229 *host_offset = 0;
1230 cur_bytes = 0;
1231 *m = NULL;
1232
1233 while (true) {
1234
1235 if (!*host_offset) {
1236 *host_offset = start_of_cluster(s, cluster_offset);
1237 }
1238
1239 assert(remaining >= cur_bytes);
1240
1241 start += cur_bytes;
1242 remaining -= cur_bytes;
1243 cluster_offset += cur_bytes;
1244
1245 if (remaining == 0) {
1246 break;
1247 }
1248
1249 cur_bytes = remaining;
1250
1251 /*
1252 * Now start gathering as many contiguous clusters as possible:
1253 *
1254 * 1. Check for overlaps with in-flight allocations
1255 *
1256 * a) Overlap not in the first cluster -> shorten this request and
1257 * let the caller handle the rest in its next loop iteration.
1258 *
1259 * b) Real overlaps of two requests. Yield and restart the search
1260 * for contiguous clusters (the situation could have changed
1261 * while we were sleeping)
1262 *
1263 * c) TODO: Request starts in the same cluster as the in-flight
1264 * allocation ends. Shorten the COW of the in-fight allocation,
1265 * set cluster_offset to write to the same cluster and set up
1266 * the right synchronisation between the in-flight request and
1267 * the new one.
1268 */
1269 ret = handle_dependencies(bs, start, &cur_bytes, m);
1270 if (ret == -EAGAIN) {
1271 /* Currently handle_dependencies() doesn't yield if we already had
1272 * an allocation. If it did, we would have to clean up the L2Meta
1273 * structs before starting over. */
1274 assert(*m == NULL);
1275 goto again;
1276 } else if (ret < 0) {
1277 return ret;
1278 } else if (cur_bytes == 0) {
1279 break;
1280 } else {
1281 /* handle_dependencies() may have decreased cur_bytes (shortened
1282 * the allocations below) so that the next dependency is processed
1283 * correctly during the next loop iteration. */
1284 }
1285
1286 /*
1287 * 2. Count contiguous COPIED clusters.
1288 */
1289 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1290 if (ret < 0) {
1291 return ret;
1292 } else if (ret) {
1293 continue;
1294 } else if (cur_bytes == 0) {
1295 break;
1296 }
1297
1298 /*
1299 * 3. If the request still hasn't completed, allocate new clusters,
1300 * considering any cluster_offset of steps 1c or 2.
1301 */
1302 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1303 if (ret < 0) {
1304 return ret;
1305 } else if (ret) {
1306 continue;
1307 } else {
1308 assert(cur_bytes == 0);
1309 break;
1310 }
1311 }
1312
1313 *num -= remaining >> BDRV_SECTOR_BITS;
1314 assert(*num > 0);
1315 assert(*host_offset != 0);
1316
1317 return 0;
1318 }
1319
1320 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1321 const uint8_t *buf, int buf_size)
1322 {
1323 z_stream strm1, *strm = &strm1;
1324 int ret, out_len;
1325
1326 memset(strm, 0, sizeof(*strm));
1327
1328 strm->next_in = (uint8_t *)buf;
1329 strm->avail_in = buf_size;
1330 strm->next_out = out_buf;
1331 strm->avail_out = out_buf_size;
1332
1333 ret = inflateInit2(strm, -12);
1334 if (ret != Z_OK)
1335 return -1;
1336 ret = inflate(strm, Z_FINISH);
1337 out_len = strm->next_out - out_buf;
1338 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1339 out_len != out_buf_size) {
1340 inflateEnd(strm);
1341 return -1;
1342 }
1343 inflateEnd(strm);
1344 return 0;
1345 }
1346
1347 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1348 {
1349 BDRVQcowState *s = bs->opaque;
1350 int ret, csize, nb_csectors, sector_offset;
1351 uint64_t coffset;
1352
1353 coffset = cluster_offset & s->cluster_offset_mask;
1354 if (s->cluster_cache_offset != coffset) {
1355 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1356 sector_offset = coffset & 511;
1357 csize = nb_csectors * 512 - sector_offset;
1358 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1359 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1360 if (ret < 0) {
1361 return ret;
1362 }
1363 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1364 s->cluster_data + sector_offset, csize) < 0) {
1365 return -EIO;
1366 }
1367 s->cluster_cache_offset = coffset;
1368 }
1369 return 0;
1370 }
1371
1372 /*
1373 * This discards as many clusters of nb_clusters as possible at once (i.e.
1374 * all clusters in the same L2 table) and returns the number of discarded
1375 * clusters.
1376 */
1377 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1378 unsigned int nb_clusters, enum qcow2_discard_type type)
1379 {
1380 BDRVQcowState *s = bs->opaque;
1381 uint64_t *l2_table;
1382 int l2_index;
1383 int ret;
1384 int i;
1385
1386 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1387 if (ret < 0) {
1388 return ret;
1389 }
1390
1391 /* Limit nb_clusters to one L2 table */
1392 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1393
1394 for (i = 0; i < nb_clusters; i++) {
1395 uint64_t old_l2_entry;
1396
1397 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1398
1399 /*
1400 * Make sure that a discarded area reads back as zeroes for v3 images
1401 * (we cannot do it for v2 without actually writing a zero-filled
1402 * buffer). We can skip the operation if the cluster is already marked
1403 * as zero, or if it's unallocated and we don't have a backing file.
1404 *
1405 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1406 * holding s->lock, so that doesn't work today.
1407 */
1408 switch (qcow2_get_cluster_type(old_l2_entry)) {
1409 case QCOW2_CLUSTER_UNALLOCATED:
1410 if (!bs->backing_hd) {
1411 continue;
1412 }
1413 break;
1414
1415 case QCOW2_CLUSTER_ZERO:
1416 continue;
1417
1418 case QCOW2_CLUSTER_NORMAL:
1419 case QCOW2_CLUSTER_COMPRESSED:
1420 break;
1421
1422 default:
1423 abort();
1424 }
1425
1426 /* First remove L2 entries */
1427 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1428 if (s->qcow_version >= 3) {
1429 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1430 } else {
1431 l2_table[l2_index + i] = cpu_to_be64(0);
1432 }
1433
1434 /* Then decrease the refcount */
1435 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1436 }
1437
1438 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1439 if (ret < 0) {
1440 return ret;
1441 }
1442
1443 return nb_clusters;
1444 }
1445
1446 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1447 int nb_sectors, enum qcow2_discard_type type)
1448 {
1449 BDRVQcowState *s = bs->opaque;
1450 uint64_t end_offset;
1451 unsigned int nb_clusters;
1452 int ret;
1453
1454 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1455
1456 /* Round start up and end down */
1457 offset = align_offset(offset, s->cluster_size);
1458 end_offset = start_of_cluster(s, end_offset);
1459
1460 if (offset > end_offset) {
1461 return 0;
1462 }
1463
1464 nb_clusters = size_to_clusters(s, end_offset - offset);
1465
1466 s->cache_discards = true;
1467
1468 /* Each L2 table is handled by its own loop iteration */
1469 while (nb_clusters > 0) {
1470 ret = discard_single_l2(bs, offset, nb_clusters, type);
1471 if (ret < 0) {
1472 goto fail;
1473 }
1474
1475 nb_clusters -= ret;
1476 offset += (ret * s->cluster_size);
1477 }
1478
1479 ret = 0;
1480 fail:
1481 s->cache_discards = false;
1482 qcow2_process_discards(bs, ret);
1483
1484 return ret;
1485 }
1486
1487 /*
1488 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1489 * all clusters in the same L2 table) and returns the number of zeroed
1490 * clusters.
1491 */
1492 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1493 unsigned int nb_clusters)
1494 {
1495 BDRVQcowState *s = bs->opaque;
1496 uint64_t *l2_table;
1497 int l2_index;
1498 int ret;
1499 int i;
1500
1501 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1502 if (ret < 0) {
1503 return ret;
1504 }
1505
1506 /* Limit nb_clusters to one L2 table */
1507 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1508
1509 for (i = 0; i < nb_clusters; i++) {
1510 uint64_t old_offset;
1511
1512 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1513
1514 /* Update L2 entries */
1515 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1516 if (old_offset & QCOW_OFLAG_COMPRESSED) {
1517 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1518 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1519 } else {
1520 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1521 }
1522 }
1523
1524 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1525 if (ret < 0) {
1526 return ret;
1527 }
1528
1529 return nb_clusters;
1530 }
1531
1532 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1533 {
1534 BDRVQcowState *s = bs->opaque;
1535 unsigned int nb_clusters;
1536 int ret;
1537
1538 /* The zero flag is only supported by version 3 and newer */
1539 if (s->qcow_version < 3) {
1540 return -ENOTSUP;
1541 }
1542
1543 /* Each L2 table is handled by its own loop iteration */
1544 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1545
1546 s->cache_discards = true;
1547
1548 while (nb_clusters > 0) {
1549 ret = zero_single_l2(bs, offset, nb_clusters);
1550 if (ret < 0) {
1551 goto fail;
1552 }
1553
1554 nb_clusters -= ret;
1555 offset += (ret * s->cluster_size);
1556 }
1557
1558 ret = 0;
1559 fail:
1560 s->cache_discards = false;
1561 qcow2_process_discards(bs, ret);
1562
1563 return ret;
1564 }
1565
1566 /*
1567 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1568 * non-backed non-pre-allocated zero clusters).
1569 *
1570 * expanded_clusters is a bitmap where every bit corresponds to one cluster in
1571 * the image file; a bit gets set if the corresponding cluster has been used for
1572 * zero expansion (i.e., has been filled with zeroes and is referenced from an
1573 * L2 table). nb_clusters contains the total cluster count of the image file,
1574 * i.e., the number of bits in expanded_clusters.
1575 */
1576 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1577 int l1_size, uint8_t **expanded_clusters,
1578 uint64_t *nb_clusters)
1579 {
1580 BDRVQcowState *s = bs->opaque;
1581 bool is_active_l1 = (l1_table == s->l1_table);
1582 uint64_t *l2_table = NULL;
1583 int ret;
1584 int i, j;
1585
1586 if (!is_active_l1) {
1587 /* inactive L2 tables require a buffer to be stored in when loading
1588 * them from disk */
1589 l2_table = qemu_try_blockalign(bs->file, s->cluster_size);
1590 if (l2_table == NULL) {
1591 return -ENOMEM;
1592 }
1593 }
1594
1595 for (i = 0; i < l1_size; i++) {
1596 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1597 bool l2_dirty = false;
1598
1599 if (!l2_offset) {
1600 /* unallocated */
1601 continue;
1602 }
1603
1604 if (is_active_l1) {
1605 /* get active L2 tables from cache */
1606 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1607 (void **)&l2_table);
1608 } else {
1609 /* load inactive L2 tables from disk */
1610 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1611 (void *)l2_table, s->cluster_sectors);
1612 }
1613 if (ret < 0) {
1614 goto fail;
1615 }
1616
1617 for (j = 0; j < s->l2_size; j++) {
1618 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1619 int64_t offset = l2_entry & L2E_OFFSET_MASK, cluster_index;
1620 int cluster_type = qcow2_get_cluster_type(l2_entry);
1621 bool preallocated = offset != 0;
1622
1623 if (cluster_type == QCOW2_CLUSTER_NORMAL) {
1624 cluster_index = offset >> s->cluster_bits;
1625 assert((cluster_index >= 0) && (cluster_index < *nb_clusters));
1626 if ((*expanded_clusters)[cluster_index / 8] &
1627 (1 << (cluster_index % 8))) {
1628 /* Probably a shared L2 table; this cluster was a zero
1629 * cluster which has been expanded, its refcount
1630 * therefore most likely requires an update. */
1631 ret = qcow2_update_cluster_refcount(bs, cluster_index, 1,
1632 QCOW2_DISCARD_NEVER);
1633 if (ret < 0) {
1634 goto fail;
1635 }
1636 /* Since we just increased the refcount, the COPIED flag may
1637 * no longer be set. */
1638 l2_table[j] = cpu_to_be64(l2_entry & ~QCOW_OFLAG_COPIED);
1639 l2_dirty = true;
1640 }
1641 continue;
1642 }
1643 else if (qcow2_get_cluster_type(l2_entry) != QCOW2_CLUSTER_ZERO) {
1644 continue;
1645 }
1646
1647 if (!preallocated) {
1648 if (!bs->backing_hd) {
1649 /* not backed; therefore we can simply deallocate the
1650 * cluster */
1651 l2_table[j] = 0;
1652 l2_dirty = true;
1653 continue;
1654 }
1655
1656 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1657 if (offset < 0) {
1658 ret = offset;
1659 goto fail;
1660 }
1661 }
1662
1663 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1664 if (ret < 0) {
1665 if (!preallocated) {
1666 qcow2_free_clusters(bs, offset, s->cluster_size,
1667 QCOW2_DISCARD_ALWAYS);
1668 }
1669 goto fail;
1670 }
1671
1672 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE,
1673 s->cluster_sectors, 0);
1674 if (ret < 0) {
1675 if (!preallocated) {
1676 qcow2_free_clusters(bs, offset, s->cluster_size,
1677 QCOW2_DISCARD_ALWAYS);
1678 }
1679 goto fail;
1680 }
1681
1682 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1683 l2_dirty = true;
1684
1685 cluster_index = offset >> s->cluster_bits;
1686
1687 if (cluster_index >= *nb_clusters) {
1688 uint64_t old_bitmap_size = (*nb_clusters + 7) / 8;
1689 uint64_t new_bitmap_size;
1690 /* The offset may lie beyond the old end of the underlying image
1691 * file for growable files only */
1692 assert(bs->file->growable);
1693 *nb_clusters = size_to_clusters(s, bs->file->total_sectors *
1694 BDRV_SECTOR_SIZE);
1695 new_bitmap_size = (*nb_clusters + 7) / 8;
1696 *expanded_clusters = g_realloc(*expanded_clusters,
1697 new_bitmap_size);
1698 /* clear the newly allocated space */
1699 memset(&(*expanded_clusters)[old_bitmap_size], 0,
1700 new_bitmap_size - old_bitmap_size);
1701 }
1702
1703 assert((cluster_index >= 0) && (cluster_index < *nb_clusters));
1704 (*expanded_clusters)[cluster_index / 8] |= 1 << (cluster_index % 8);
1705 }
1706
1707 if (is_active_l1) {
1708 if (l2_dirty) {
1709 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1710 qcow2_cache_depends_on_flush(s->l2_table_cache);
1711 }
1712 ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
1713 if (ret < 0) {
1714 l2_table = NULL;
1715 goto fail;
1716 }
1717 } else {
1718 if (l2_dirty) {
1719 ret = qcow2_pre_write_overlap_check(bs,
1720 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1721 s->cluster_size);
1722 if (ret < 0) {
1723 goto fail;
1724 }
1725
1726 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1727 (void *)l2_table, s->cluster_sectors);
1728 if (ret < 0) {
1729 goto fail;
1730 }
1731 }
1732 }
1733 }
1734
1735 ret = 0;
1736
1737 fail:
1738 if (l2_table) {
1739 if (!is_active_l1) {
1740 qemu_vfree(l2_table);
1741 } else {
1742 if (ret < 0) {
1743 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
1744 } else {
1745 ret = qcow2_cache_put(bs, s->l2_table_cache,
1746 (void **)&l2_table);
1747 }
1748 }
1749 }
1750 return ret;
1751 }
1752
1753 /*
1754 * For backed images, expands all zero clusters on the image. For non-backed
1755 * images, deallocates all non-pre-allocated zero clusters (and claims the
1756 * allocation for pre-allocated ones). This is important for downgrading to a
1757 * qcow2 version which doesn't yet support metadata zero clusters.
1758 */
1759 int qcow2_expand_zero_clusters(BlockDriverState *bs)
1760 {
1761 BDRVQcowState *s = bs->opaque;
1762 uint64_t *l1_table = NULL;
1763 uint64_t nb_clusters;
1764 uint8_t *expanded_clusters;
1765 int ret;
1766 int i, j;
1767
1768 nb_clusters = size_to_clusters(s, bs->file->total_sectors *
1769 BDRV_SECTOR_SIZE);
1770 expanded_clusters = g_try_malloc0((nb_clusters + 7) / 8);
1771 if (expanded_clusters == NULL) {
1772 ret = -ENOMEM;
1773 goto fail;
1774 }
1775
1776 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
1777 &expanded_clusters, &nb_clusters);
1778 if (ret < 0) {
1779 goto fail;
1780 }
1781
1782 /* Inactive L1 tables may point to active L2 tables - therefore it is
1783 * necessary to flush the L2 table cache before trying to access the L2
1784 * tables pointed to by inactive L1 entries (else we might try to expand
1785 * zero clusters that have already been expanded); furthermore, it is also
1786 * necessary to empty the L2 table cache, since it may contain tables which
1787 * are now going to be modified directly on disk, bypassing the cache.
1788 * qcow2_cache_empty() does both for us. */
1789 ret = qcow2_cache_empty(bs, s->l2_table_cache);
1790 if (ret < 0) {
1791 goto fail;
1792 }
1793
1794 for (i = 0; i < s->nb_snapshots; i++) {
1795 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) +
1796 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE;
1797
1798 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
1799
1800 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset /
1801 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors);
1802 if (ret < 0) {
1803 goto fail;
1804 }
1805
1806 for (j = 0; j < s->snapshots[i].l1_size; j++) {
1807 be64_to_cpus(&l1_table[j]);
1808 }
1809
1810 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
1811 &expanded_clusters, &nb_clusters);
1812 if (ret < 0) {
1813 goto fail;
1814 }
1815 }
1816
1817 ret = 0;
1818
1819 fail:
1820 g_free(expanded_clusters);
1821 g_free(l1_table);
1822 return ret;
1823 }