]> git.proxmox.com Git - mirror_qemu.git/blob - block/qcow2.c
qcow2: Prepare qcow2_get_cluster_type() for external data file
[mirror_qemu.git] / block / qcow2.c
1 /*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26
27 #define ZLIB_CONST
28 #include <zlib.h>
29
30 #include "block/block_int.h"
31 #include "block/qdict.h"
32 #include "sysemu/block-backend.h"
33 #include "qemu/module.h"
34 #include "qcow2.h"
35 #include "qemu/error-report.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-events-block-core.h"
38 #include "qapi/qmp/qdict.h"
39 #include "qapi/qmp/qstring.h"
40 #include "trace.h"
41 #include "qemu/option_int.h"
42 #include "qemu/cutils.h"
43 #include "qemu/bswap.h"
44 #include "qapi/qobject-input-visitor.h"
45 #include "qapi/qapi-visit-block-core.h"
46 #include "crypto.h"
47 #include "block/thread-pool.h"
48
49 /*
50 Differences with QCOW:
51
52 - Support for multiple incremental snapshots.
53 - Memory management by reference counts.
54 - Clusters which have a reference count of one have the bit
55 QCOW_OFLAG_COPIED to optimize write performance.
56 - Size of compressed clusters is stored in sectors to reduce bit usage
57 in the cluster offsets.
58 - Support for storing additional data (such as the VM state) in the
59 snapshots.
60 - If a backing store is used, the cluster size is not constrained
61 (could be backported to QCOW).
62 - L2 tables have always a size of one cluster.
63 */
64
65
66 typedef struct {
67 uint32_t magic;
68 uint32_t len;
69 } QEMU_PACKED QCowExtension;
70
71 #define QCOW2_EXT_MAGIC_END 0
72 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA
73 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857
74 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77
75 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875
76 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441
77
78 static int coroutine_fn
79 qcow2_co_preadv_compressed(BlockDriverState *bs,
80 uint64_t file_cluster_offset,
81 uint64_t offset,
82 uint64_t bytes,
83 QEMUIOVector *qiov);
84
85 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
86 {
87 const QCowHeader *cow_header = (const void *)buf;
88
89 if (buf_size >= sizeof(QCowHeader) &&
90 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
91 be32_to_cpu(cow_header->version) >= 2)
92 return 100;
93 else
94 return 0;
95 }
96
97
98 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset,
99 uint8_t *buf, size_t buflen,
100 void *opaque, Error **errp)
101 {
102 BlockDriverState *bs = opaque;
103 BDRVQcow2State *s = bs->opaque;
104 ssize_t ret;
105
106 if ((offset + buflen) > s->crypto_header.length) {
107 error_setg(errp, "Request for data outside of extension header");
108 return -1;
109 }
110
111 ret = bdrv_pread(bs->file,
112 s->crypto_header.offset + offset, buf, buflen);
113 if (ret < 0) {
114 error_setg_errno(errp, -ret, "Could not read encryption header");
115 return -1;
116 }
117 return ret;
118 }
119
120
121 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen,
122 void *opaque, Error **errp)
123 {
124 BlockDriverState *bs = opaque;
125 BDRVQcow2State *s = bs->opaque;
126 int64_t ret;
127 int64_t clusterlen;
128
129 ret = qcow2_alloc_clusters(bs, headerlen);
130 if (ret < 0) {
131 error_setg_errno(errp, -ret,
132 "Cannot allocate cluster for LUKS header size %zu",
133 headerlen);
134 return -1;
135 }
136
137 s->crypto_header.length = headerlen;
138 s->crypto_header.offset = ret;
139
140 /* Zero fill remaining space in cluster so it has predictable
141 * content in case of future spec changes */
142 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size;
143 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen) == 0);
144 ret = bdrv_pwrite_zeroes(bs->file,
145 ret + headerlen,
146 clusterlen - headerlen, 0);
147 if (ret < 0) {
148 error_setg_errno(errp, -ret, "Could not zero fill encryption header");
149 return -1;
150 }
151
152 return ret;
153 }
154
155
156 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset,
157 const uint8_t *buf, size_t buflen,
158 void *opaque, Error **errp)
159 {
160 BlockDriverState *bs = opaque;
161 BDRVQcow2State *s = bs->opaque;
162 ssize_t ret;
163
164 if ((offset + buflen) > s->crypto_header.length) {
165 error_setg(errp, "Request for data outside of extension header");
166 return -1;
167 }
168
169 ret = bdrv_pwrite(bs->file,
170 s->crypto_header.offset + offset, buf, buflen);
171 if (ret < 0) {
172 error_setg_errno(errp, -ret, "Could not read encryption header");
173 return -1;
174 }
175 return ret;
176 }
177
178
179 /*
180 * read qcow2 extension and fill bs
181 * start reading from start_offset
182 * finish reading upon magic of value 0 or when end_offset reached
183 * unknown magic is skipped (future extension this version knows nothing about)
184 * return 0 upon success, non-0 otherwise
185 */
186 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
187 uint64_t end_offset, void **p_feature_table,
188 int flags, bool *need_update_header,
189 Error **errp)
190 {
191 BDRVQcow2State *s = bs->opaque;
192 QCowExtension ext;
193 uint64_t offset;
194 int ret;
195 Qcow2BitmapHeaderExt bitmaps_ext;
196
197 if (need_update_header != NULL) {
198 *need_update_header = false;
199 }
200
201 #ifdef DEBUG_EXT
202 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
203 #endif
204 offset = start_offset;
205 while (offset < end_offset) {
206
207 #ifdef DEBUG_EXT
208 /* Sanity check */
209 if (offset > s->cluster_size)
210 printf("qcow2_read_extension: suspicious offset %lu\n", offset);
211
212 printf("attempting to read extended header in offset %lu\n", offset);
213 #endif
214
215 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext));
216 if (ret < 0) {
217 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
218 "pread fail from offset %" PRIu64, offset);
219 return 1;
220 }
221 ext.magic = be32_to_cpu(ext.magic);
222 ext.len = be32_to_cpu(ext.len);
223 offset += sizeof(ext);
224 #ifdef DEBUG_EXT
225 printf("ext.magic = 0x%x\n", ext.magic);
226 #endif
227 if (offset > end_offset || ext.len > end_offset - offset) {
228 error_setg(errp, "Header extension too large");
229 return -EINVAL;
230 }
231
232 switch (ext.magic) {
233 case QCOW2_EXT_MAGIC_END:
234 return 0;
235
236 case QCOW2_EXT_MAGIC_BACKING_FORMAT:
237 if (ext.len >= sizeof(bs->backing_format)) {
238 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32
239 " too large (>=%zu)", ext.len,
240 sizeof(bs->backing_format));
241 return 2;
242 }
243 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len);
244 if (ret < 0) {
245 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
246 "Could not read format name");
247 return 3;
248 }
249 bs->backing_format[ext.len] = '\0';
250 s->image_backing_format = g_strdup(bs->backing_format);
251 #ifdef DEBUG_EXT
252 printf("Qcow2: Got format extension %s\n", bs->backing_format);
253 #endif
254 break;
255
256 case QCOW2_EXT_MAGIC_FEATURE_TABLE:
257 if (p_feature_table != NULL) {
258 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
259 ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
260 if (ret < 0) {
261 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
262 "Could not read table");
263 return ret;
264 }
265
266 *p_feature_table = feature_table;
267 }
268 break;
269
270 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: {
271 unsigned int cflags = 0;
272 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
273 error_setg(errp, "CRYPTO header extension only "
274 "expected with LUKS encryption method");
275 return -EINVAL;
276 }
277 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) {
278 error_setg(errp, "CRYPTO header extension size %u, "
279 "but expected size %zu", ext.len,
280 sizeof(Qcow2CryptoHeaderExtension));
281 return -EINVAL;
282 }
283
284 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len);
285 if (ret < 0) {
286 error_setg_errno(errp, -ret,
287 "Unable to read CRYPTO header extension");
288 return ret;
289 }
290 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
291 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
292
293 if ((s->crypto_header.offset % s->cluster_size) != 0) {
294 error_setg(errp, "Encryption header offset '%" PRIu64 "' is "
295 "not a multiple of cluster size '%u'",
296 s->crypto_header.offset, s->cluster_size);
297 return -EINVAL;
298 }
299
300 if (flags & BDRV_O_NO_IO) {
301 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
302 }
303 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
304 qcow2_crypto_hdr_read_func,
305 bs, cflags, 1, errp);
306 if (!s->crypto) {
307 return -EINVAL;
308 }
309 } break;
310
311 case QCOW2_EXT_MAGIC_BITMAPS:
312 if (ext.len != sizeof(bitmaps_ext)) {
313 error_setg_errno(errp, -ret, "bitmaps_ext: "
314 "Invalid extension length");
315 return -EINVAL;
316 }
317
318 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) {
319 if (s->qcow_version < 3) {
320 /* Let's be a bit more specific */
321 warn_report("This qcow2 v2 image contains bitmaps, but "
322 "they may have been modified by a program "
323 "without persistent bitmap support; so now "
324 "they must all be considered inconsistent");
325 } else {
326 warn_report("a program lacking bitmap support "
327 "modified this file, so all bitmaps are now "
328 "considered inconsistent");
329 }
330 error_printf("Some clusters may be leaked, "
331 "run 'qemu-img check -r' on the image "
332 "file to fix.");
333 if (need_update_header != NULL) {
334 /* Updating is needed to drop invalid bitmap extension. */
335 *need_update_header = true;
336 }
337 break;
338 }
339
340 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len);
341 if (ret < 0) {
342 error_setg_errno(errp, -ret, "bitmaps_ext: "
343 "Could not read ext header");
344 return ret;
345 }
346
347 if (bitmaps_ext.reserved32 != 0) {
348 error_setg_errno(errp, -ret, "bitmaps_ext: "
349 "Reserved field is not zero");
350 return -EINVAL;
351 }
352
353 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps);
354 bitmaps_ext.bitmap_directory_size =
355 be64_to_cpu(bitmaps_ext.bitmap_directory_size);
356 bitmaps_ext.bitmap_directory_offset =
357 be64_to_cpu(bitmaps_ext.bitmap_directory_offset);
358
359 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) {
360 error_setg(errp,
361 "bitmaps_ext: Image has %" PRIu32 " bitmaps, "
362 "exceeding the QEMU supported maximum of %d",
363 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS);
364 return -EINVAL;
365 }
366
367 if (bitmaps_ext.nb_bitmaps == 0) {
368 error_setg(errp, "found bitmaps extension with zero bitmaps");
369 return -EINVAL;
370 }
371
372 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) {
373 error_setg(errp, "bitmaps_ext: "
374 "invalid bitmap directory offset");
375 return -EINVAL;
376 }
377
378 if (bitmaps_ext.bitmap_directory_size >
379 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) {
380 error_setg(errp, "bitmaps_ext: "
381 "bitmap directory size (%" PRIu64 ") exceeds "
382 "the maximum supported size (%d)",
383 bitmaps_ext.bitmap_directory_size,
384 QCOW2_MAX_BITMAP_DIRECTORY_SIZE);
385 return -EINVAL;
386 }
387
388 s->nb_bitmaps = bitmaps_ext.nb_bitmaps;
389 s->bitmap_directory_offset =
390 bitmaps_ext.bitmap_directory_offset;
391 s->bitmap_directory_size =
392 bitmaps_ext.bitmap_directory_size;
393
394 #ifdef DEBUG_EXT
395 printf("Qcow2: Got bitmaps extension: "
396 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n",
397 s->bitmap_directory_offset, s->nb_bitmaps);
398 #endif
399 break;
400
401 default:
402 /* unknown magic - save it in case we need to rewrite the header */
403 /* If you add a new feature, make sure to also update the fast
404 * path of qcow2_make_empty() to deal with it. */
405 {
406 Qcow2UnknownHeaderExtension *uext;
407
408 uext = g_malloc0(sizeof(*uext) + ext.len);
409 uext->magic = ext.magic;
410 uext->len = ext.len;
411 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
412
413 ret = bdrv_pread(bs->file, offset , uext->data, uext->len);
414 if (ret < 0) {
415 error_setg_errno(errp, -ret, "ERROR: unknown extension: "
416 "Could not read data");
417 return ret;
418 }
419 }
420 break;
421 }
422
423 offset += ((ext.len + 7) & ~7);
424 }
425
426 return 0;
427 }
428
429 static void cleanup_unknown_header_ext(BlockDriverState *bs)
430 {
431 BDRVQcow2State *s = bs->opaque;
432 Qcow2UnknownHeaderExtension *uext, *next;
433
434 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) {
435 QLIST_REMOVE(uext, next);
436 g_free(uext);
437 }
438 }
439
440 static void report_unsupported_feature(Error **errp, Qcow2Feature *table,
441 uint64_t mask)
442 {
443 char *features = g_strdup("");
444 char *old;
445
446 while (table && table->name[0] != '\0') {
447 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) {
448 if (mask & (1ULL << table->bit)) {
449 old = features;
450 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "",
451 table->name);
452 g_free(old);
453 mask &= ~(1ULL << table->bit);
454 }
455 }
456 table++;
457 }
458
459 if (mask) {
460 old = features;
461 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64,
462 old, *old ? ", " : "", mask);
463 g_free(old);
464 }
465
466 error_setg(errp, "Unsupported qcow2 feature(s): %s", features);
467 g_free(features);
468 }
469
470 /*
471 * Sets the dirty bit and flushes afterwards if necessary.
472 *
473 * The incompatible_features bit is only set if the image file header was
474 * updated successfully. Therefore it is not required to check the return
475 * value of this function.
476 */
477 int qcow2_mark_dirty(BlockDriverState *bs)
478 {
479 BDRVQcow2State *s = bs->opaque;
480 uint64_t val;
481 int ret;
482
483 assert(s->qcow_version >= 3);
484
485 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
486 return 0; /* already dirty */
487 }
488
489 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
490 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features),
491 &val, sizeof(val));
492 if (ret < 0) {
493 return ret;
494 }
495 ret = bdrv_flush(bs->file->bs);
496 if (ret < 0) {
497 return ret;
498 }
499
500 /* Only treat image as dirty if the header was updated successfully */
501 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY;
502 return 0;
503 }
504
505 /*
506 * Clears the dirty bit and flushes before if necessary. Only call this
507 * function when there are no pending requests, it does not guard against
508 * concurrent requests dirtying the image.
509 */
510 static int qcow2_mark_clean(BlockDriverState *bs)
511 {
512 BDRVQcow2State *s = bs->opaque;
513
514 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
515 int ret;
516
517 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
518
519 ret = qcow2_flush_caches(bs);
520 if (ret < 0) {
521 return ret;
522 }
523
524 return qcow2_update_header(bs);
525 }
526 return 0;
527 }
528
529 /*
530 * Marks the image as corrupt.
531 */
532 int qcow2_mark_corrupt(BlockDriverState *bs)
533 {
534 BDRVQcow2State *s = bs->opaque;
535
536 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT;
537 return qcow2_update_header(bs);
538 }
539
540 /*
541 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
542 * before if necessary.
543 */
544 int qcow2_mark_consistent(BlockDriverState *bs)
545 {
546 BDRVQcow2State *s = bs->opaque;
547
548 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
549 int ret = qcow2_flush_caches(bs);
550 if (ret < 0) {
551 return ret;
552 }
553
554 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT;
555 return qcow2_update_header(bs);
556 }
557 return 0;
558 }
559
560 static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs,
561 BdrvCheckResult *result,
562 BdrvCheckMode fix)
563 {
564 int ret = qcow2_check_refcounts(bs, result, fix);
565 if (ret < 0) {
566 return ret;
567 }
568
569 if (fix && result->check_errors == 0 && result->corruptions == 0) {
570 ret = qcow2_mark_clean(bs);
571 if (ret < 0) {
572 return ret;
573 }
574 return qcow2_mark_consistent(bs);
575 }
576 return ret;
577 }
578
579 static int coroutine_fn qcow2_co_check(BlockDriverState *bs,
580 BdrvCheckResult *result,
581 BdrvCheckMode fix)
582 {
583 BDRVQcow2State *s = bs->opaque;
584 int ret;
585
586 qemu_co_mutex_lock(&s->lock);
587 ret = qcow2_co_check_locked(bs, result, fix);
588 qemu_co_mutex_unlock(&s->lock);
589 return ret;
590 }
591
592 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
593 uint64_t entries, size_t entry_len,
594 int64_t max_size_bytes, const char *table_name,
595 Error **errp)
596 {
597 BDRVQcow2State *s = bs->opaque;
598
599 if (entries > max_size_bytes / entry_len) {
600 error_setg(errp, "%s too large", table_name);
601 return -EFBIG;
602 }
603
604 /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
605 * because values will be passed to qemu functions taking int64_t. */
606 if ((INT64_MAX - entries * entry_len < offset) ||
607 (offset_into_cluster(s, offset) != 0)) {
608 error_setg(errp, "%s offset invalid", table_name);
609 return -EINVAL;
610 }
611
612 return 0;
613 }
614
615 static QemuOptsList qcow2_runtime_opts = {
616 .name = "qcow2",
617 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
618 .desc = {
619 {
620 .name = QCOW2_OPT_LAZY_REFCOUNTS,
621 .type = QEMU_OPT_BOOL,
622 .help = "Postpone refcount updates",
623 },
624 {
625 .name = QCOW2_OPT_DISCARD_REQUEST,
626 .type = QEMU_OPT_BOOL,
627 .help = "Pass guest discard requests to the layer below",
628 },
629 {
630 .name = QCOW2_OPT_DISCARD_SNAPSHOT,
631 .type = QEMU_OPT_BOOL,
632 .help = "Generate discard requests when snapshot related space "
633 "is freed",
634 },
635 {
636 .name = QCOW2_OPT_DISCARD_OTHER,
637 .type = QEMU_OPT_BOOL,
638 .help = "Generate discard requests when other clusters are freed",
639 },
640 {
641 .name = QCOW2_OPT_OVERLAP,
642 .type = QEMU_OPT_STRING,
643 .help = "Selects which overlap checks to perform from a range of "
644 "templates (none, constant, cached, all)",
645 },
646 {
647 .name = QCOW2_OPT_OVERLAP_TEMPLATE,
648 .type = QEMU_OPT_STRING,
649 .help = "Selects which overlap checks to perform from a range of "
650 "templates (none, constant, cached, all)",
651 },
652 {
653 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER,
654 .type = QEMU_OPT_BOOL,
655 .help = "Check for unintended writes into the main qcow2 header",
656 },
657 {
658 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1,
659 .type = QEMU_OPT_BOOL,
660 .help = "Check for unintended writes into the active L1 table",
661 },
662 {
663 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2,
664 .type = QEMU_OPT_BOOL,
665 .help = "Check for unintended writes into an active L2 table",
666 },
667 {
668 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
669 .type = QEMU_OPT_BOOL,
670 .help = "Check for unintended writes into the refcount table",
671 },
672 {
673 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
674 .type = QEMU_OPT_BOOL,
675 .help = "Check for unintended writes into a refcount block",
676 },
677 {
678 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
679 .type = QEMU_OPT_BOOL,
680 .help = "Check for unintended writes into the snapshot table",
681 },
682 {
683 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1,
684 .type = QEMU_OPT_BOOL,
685 .help = "Check for unintended writes into an inactive L1 table",
686 },
687 {
688 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2,
689 .type = QEMU_OPT_BOOL,
690 .help = "Check for unintended writes into an inactive L2 table",
691 },
692 {
693 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
694 .type = QEMU_OPT_BOOL,
695 .help = "Check for unintended writes into the bitmap directory",
696 },
697 {
698 .name = QCOW2_OPT_CACHE_SIZE,
699 .type = QEMU_OPT_SIZE,
700 .help = "Maximum combined metadata (L2 tables and refcount blocks) "
701 "cache size",
702 },
703 {
704 .name = QCOW2_OPT_L2_CACHE_SIZE,
705 .type = QEMU_OPT_SIZE,
706 .help = "Maximum L2 table cache size",
707 },
708 {
709 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
710 .type = QEMU_OPT_SIZE,
711 .help = "Size of each entry in the L2 cache",
712 },
713 {
714 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE,
715 .type = QEMU_OPT_SIZE,
716 .help = "Maximum refcount block cache size",
717 },
718 {
719 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL,
720 .type = QEMU_OPT_NUMBER,
721 .help = "Clean unused cache entries after this time (in seconds)",
722 },
723 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.",
724 "ID of secret providing qcow2 AES key or LUKS passphrase"),
725 { /* end of list */ }
726 },
727 };
728
729 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = {
730 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER,
731 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1,
732 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2,
733 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
734 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
735 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
736 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1,
737 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2,
738 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
739 };
740
741 static void cache_clean_timer_cb(void *opaque)
742 {
743 BlockDriverState *bs = opaque;
744 BDRVQcow2State *s = bs->opaque;
745 qcow2_cache_clean_unused(s->l2_table_cache);
746 qcow2_cache_clean_unused(s->refcount_block_cache);
747 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
748 (int64_t) s->cache_clean_interval * 1000);
749 }
750
751 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context)
752 {
753 BDRVQcow2State *s = bs->opaque;
754 if (s->cache_clean_interval > 0) {
755 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL,
756 SCALE_MS, cache_clean_timer_cb,
757 bs);
758 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
759 (int64_t) s->cache_clean_interval * 1000);
760 }
761 }
762
763 static void cache_clean_timer_del(BlockDriverState *bs)
764 {
765 BDRVQcow2State *s = bs->opaque;
766 if (s->cache_clean_timer) {
767 timer_del(s->cache_clean_timer);
768 timer_free(s->cache_clean_timer);
769 s->cache_clean_timer = NULL;
770 }
771 }
772
773 static void qcow2_detach_aio_context(BlockDriverState *bs)
774 {
775 cache_clean_timer_del(bs);
776 }
777
778 static void qcow2_attach_aio_context(BlockDriverState *bs,
779 AioContext *new_context)
780 {
781 cache_clean_timer_init(bs, new_context);
782 }
783
784 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts,
785 uint64_t *l2_cache_size,
786 uint64_t *l2_cache_entry_size,
787 uint64_t *refcount_cache_size, Error **errp)
788 {
789 BDRVQcow2State *s = bs->opaque;
790 uint64_t combined_cache_size, l2_cache_max_setting;
791 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set;
792 bool l2_cache_entry_size_set;
793 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size;
794 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE;
795 uint64_t max_l2_cache = virtual_disk_size / (s->cluster_size / 8);
796
797 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE);
798 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE);
799 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
800 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE);
801
802 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0);
803 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE,
804 DEFAULT_L2_CACHE_MAX_SIZE);
805 *refcount_cache_size = qemu_opt_get_size(opts,
806 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0);
807
808 *l2_cache_entry_size = qemu_opt_get_size(
809 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size);
810
811 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting);
812
813 if (combined_cache_size_set) {
814 if (l2_cache_size_set && refcount_cache_size_set) {
815 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE
816 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set "
817 "at the same time");
818 return;
819 } else if (l2_cache_size_set &&
820 (l2_cache_max_setting > combined_cache_size)) {
821 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed "
822 QCOW2_OPT_CACHE_SIZE);
823 return;
824 } else if (*refcount_cache_size > combined_cache_size) {
825 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed "
826 QCOW2_OPT_CACHE_SIZE);
827 return;
828 }
829
830 if (l2_cache_size_set) {
831 *refcount_cache_size = combined_cache_size - *l2_cache_size;
832 } else if (refcount_cache_size_set) {
833 *l2_cache_size = combined_cache_size - *refcount_cache_size;
834 } else {
835 /* Assign as much memory as possible to the L2 cache, and
836 * use the remainder for the refcount cache */
837 if (combined_cache_size >= max_l2_cache + min_refcount_cache) {
838 *l2_cache_size = max_l2_cache;
839 *refcount_cache_size = combined_cache_size - *l2_cache_size;
840 } else {
841 *refcount_cache_size =
842 MIN(combined_cache_size, min_refcount_cache);
843 *l2_cache_size = combined_cache_size - *refcount_cache_size;
844 }
845 }
846 }
847
848 /*
849 * If the L2 cache is not enough to cover the whole disk then
850 * default to 4KB entries. Smaller entries reduce the cost of
851 * loads and evictions and increase I/O performance.
852 */
853 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) {
854 *l2_cache_entry_size = MIN(s->cluster_size, 4096);
855 }
856
857 /* l2_cache_size and refcount_cache_size are ensured to have at least
858 * their minimum values in qcow2_update_options_prepare() */
859
860 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) ||
861 *l2_cache_entry_size > s->cluster_size ||
862 !is_power_of_2(*l2_cache_entry_size)) {
863 error_setg(errp, "L2 cache entry size must be a power of two "
864 "between %d and the cluster size (%d)",
865 1 << MIN_CLUSTER_BITS, s->cluster_size);
866 return;
867 }
868 }
869
870 typedef struct Qcow2ReopenState {
871 Qcow2Cache *l2_table_cache;
872 Qcow2Cache *refcount_block_cache;
873 int l2_slice_size; /* Number of entries in a slice of the L2 table */
874 bool use_lazy_refcounts;
875 int overlap_check;
876 bool discard_passthrough[QCOW2_DISCARD_MAX];
877 uint64_t cache_clean_interval;
878 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
879 } Qcow2ReopenState;
880
881 static int qcow2_update_options_prepare(BlockDriverState *bs,
882 Qcow2ReopenState *r,
883 QDict *options, int flags,
884 Error **errp)
885 {
886 BDRVQcow2State *s = bs->opaque;
887 QemuOpts *opts = NULL;
888 const char *opt_overlap_check, *opt_overlap_check_template;
889 int overlap_check_template = 0;
890 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size;
891 int i;
892 const char *encryptfmt;
893 QDict *encryptopts = NULL;
894 Error *local_err = NULL;
895 int ret;
896
897 qdict_extract_subqdict(options, &encryptopts, "encrypt.");
898 encryptfmt = qdict_get_try_str(encryptopts, "format");
899
900 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
901 qemu_opts_absorb_qdict(opts, options, &local_err);
902 if (local_err) {
903 error_propagate(errp, local_err);
904 ret = -EINVAL;
905 goto fail;
906 }
907
908 /* get L2 table/refcount block cache size from command line options */
909 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size,
910 &refcount_cache_size, &local_err);
911 if (local_err) {
912 error_propagate(errp, local_err);
913 ret = -EINVAL;
914 goto fail;
915 }
916
917 l2_cache_size /= l2_cache_entry_size;
918 if (l2_cache_size < MIN_L2_CACHE_SIZE) {
919 l2_cache_size = MIN_L2_CACHE_SIZE;
920 }
921 if (l2_cache_size > INT_MAX) {
922 error_setg(errp, "L2 cache size too big");
923 ret = -EINVAL;
924 goto fail;
925 }
926
927 refcount_cache_size /= s->cluster_size;
928 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) {
929 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE;
930 }
931 if (refcount_cache_size > INT_MAX) {
932 error_setg(errp, "Refcount cache size too big");
933 ret = -EINVAL;
934 goto fail;
935 }
936
937 /* alloc new L2 table/refcount block cache, flush old one */
938 if (s->l2_table_cache) {
939 ret = qcow2_cache_flush(bs, s->l2_table_cache);
940 if (ret) {
941 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache");
942 goto fail;
943 }
944 }
945
946 if (s->refcount_block_cache) {
947 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
948 if (ret) {
949 error_setg_errno(errp, -ret,
950 "Failed to flush the refcount block cache");
951 goto fail;
952 }
953 }
954
955 r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t);
956 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size,
957 l2_cache_entry_size);
958 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size,
959 s->cluster_size);
960 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) {
961 error_setg(errp, "Could not allocate metadata caches");
962 ret = -ENOMEM;
963 goto fail;
964 }
965
966 /* New interval for cache cleanup timer */
967 r->cache_clean_interval =
968 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL,
969 DEFAULT_CACHE_CLEAN_INTERVAL);
970 #ifndef CONFIG_LINUX
971 if (r->cache_clean_interval != 0) {
972 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL
973 " not supported on this host");
974 ret = -EINVAL;
975 goto fail;
976 }
977 #endif
978 if (r->cache_clean_interval > UINT_MAX) {
979 error_setg(errp, "Cache clean interval too big");
980 ret = -EINVAL;
981 goto fail;
982 }
983
984 /* lazy-refcounts; flush if going from enabled to disabled */
985 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
986 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
987 if (r->use_lazy_refcounts && s->qcow_version < 3) {
988 error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
989 "qemu 1.1 compatibility level");
990 ret = -EINVAL;
991 goto fail;
992 }
993
994 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) {
995 ret = qcow2_mark_clean(bs);
996 if (ret < 0) {
997 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts");
998 goto fail;
999 }
1000 }
1001
1002 /* Overlap check options */
1003 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP);
1004 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE);
1005 if (opt_overlap_check_template && opt_overlap_check &&
1006 strcmp(opt_overlap_check_template, opt_overlap_check))
1007 {
1008 error_setg(errp, "Conflicting values for qcow2 options '"
1009 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE
1010 "' ('%s')", opt_overlap_check, opt_overlap_check_template);
1011 ret = -EINVAL;
1012 goto fail;
1013 }
1014 if (!opt_overlap_check) {
1015 opt_overlap_check = opt_overlap_check_template ?: "cached";
1016 }
1017
1018 if (!strcmp(opt_overlap_check, "none")) {
1019 overlap_check_template = 0;
1020 } else if (!strcmp(opt_overlap_check, "constant")) {
1021 overlap_check_template = QCOW2_OL_CONSTANT;
1022 } else if (!strcmp(opt_overlap_check, "cached")) {
1023 overlap_check_template = QCOW2_OL_CACHED;
1024 } else if (!strcmp(opt_overlap_check, "all")) {
1025 overlap_check_template = QCOW2_OL_ALL;
1026 } else {
1027 error_setg(errp, "Unsupported value '%s' for qcow2 option "
1028 "'overlap-check'. Allowed are any of the following: "
1029 "none, constant, cached, all", opt_overlap_check);
1030 ret = -EINVAL;
1031 goto fail;
1032 }
1033
1034 r->overlap_check = 0;
1035 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
1036 /* overlap-check defines a template bitmask, but every flag may be
1037 * overwritten through the associated boolean option */
1038 r->overlap_check |=
1039 qemu_opt_get_bool(opts, overlap_bool_option_names[i],
1040 overlap_check_template & (1 << i)) << i;
1041 }
1042
1043 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
1044 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
1045 r->discard_passthrough[QCOW2_DISCARD_REQUEST] =
1046 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
1047 flags & BDRV_O_UNMAP);
1048 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
1049 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
1050 r->discard_passthrough[QCOW2_DISCARD_OTHER] =
1051 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
1052
1053 switch (s->crypt_method_header) {
1054 case QCOW_CRYPT_NONE:
1055 if (encryptfmt) {
1056 error_setg(errp, "No encryption in image header, but options "
1057 "specified format '%s'", encryptfmt);
1058 ret = -EINVAL;
1059 goto fail;
1060 }
1061 break;
1062
1063 case QCOW_CRYPT_AES:
1064 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) {
1065 error_setg(errp,
1066 "Header reported 'aes' encryption format but "
1067 "options specify '%s'", encryptfmt);
1068 ret = -EINVAL;
1069 goto fail;
1070 }
1071 qdict_put_str(encryptopts, "format", "qcow");
1072 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1073 break;
1074
1075 case QCOW_CRYPT_LUKS:
1076 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) {
1077 error_setg(errp,
1078 "Header reported 'luks' encryption format but "
1079 "options specify '%s'", encryptfmt);
1080 ret = -EINVAL;
1081 goto fail;
1082 }
1083 qdict_put_str(encryptopts, "format", "luks");
1084 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1085 break;
1086
1087 default:
1088 error_setg(errp, "Unsupported encryption method %d",
1089 s->crypt_method_header);
1090 break;
1091 }
1092 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) {
1093 ret = -EINVAL;
1094 goto fail;
1095 }
1096
1097 ret = 0;
1098 fail:
1099 qobject_unref(encryptopts);
1100 qemu_opts_del(opts);
1101 opts = NULL;
1102 return ret;
1103 }
1104
1105 static void qcow2_update_options_commit(BlockDriverState *bs,
1106 Qcow2ReopenState *r)
1107 {
1108 BDRVQcow2State *s = bs->opaque;
1109 int i;
1110
1111 if (s->l2_table_cache) {
1112 qcow2_cache_destroy(s->l2_table_cache);
1113 }
1114 if (s->refcount_block_cache) {
1115 qcow2_cache_destroy(s->refcount_block_cache);
1116 }
1117 s->l2_table_cache = r->l2_table_cache;
1118 s->refcount_block_cache = r->refcount_block_cache;
1119 s->l2_slice_size = r->l2_slice_size;
1120
1121 s->overlap_check = r->overlap_check;
1122 s->use_lazy_refcounts = r->use_lazy_refcounts;
1123
1124 for (i = 0; i < QCOW2_DISCARD_MAX; i++) {
1125 s->discard_passthrough[i] = r->discard_passthrough[i];
1126 }
1127
1128 if (s->cache_clean_interval != r->cache_clean_interval) {
1129 cache_clean_timer_del(bs);
1130 s->cache_clean_interval = r->cache_clean_interval;
1131 cache_clean_timer_init(bs, bdrv_get_aio_context(bs));
1132 }
1133
1134 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1135 s->crypto_opts = r->crypto_opts;
1136 }
1137
1138 static void qcow2_update_options_abort(BlockDriverState *bs,
1139 Qcow2ReopenState *r)
1140 {
1141 if (r->l2_table_cache) {
1142 qcow2_cache_destroy(r->l2_table_cache);
1143 }
1144 if (r->refcount_block_cache) {
1145 qcow2_cache_destroy(r->refcount_block_cache);
1146 }
1147 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
1148 }
1149
1150 static int qcow2_update_options(BlockDriverState *bs, QDict *options,
1151 int flags, Error **errp)
1152 {
1153 Qcow2ReopenState r = {};
1154 int ret;
1155
1156 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp);
1157 if (ret >= 0) {
1158 qcow2_update_options_commit(bs, &r);
1159 } else {
1160 qcow2_update_options_abort(bs, &r);
1161 }
1162
1163 return ret;
1164 }
1165
1166 /* Called with s->lock held. */
1167 static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
1168 int flags, Error **errp)
1169 {
1170 BDRVQcow2State *s = bs->opaque;
1171 unsigned int len, i;
1172 int ret = 0;
1173 QCowHeader header;
1174 Error *local_err = NULL;
1175 uint64_t ext_end;
1176 uint64_t l1_vm_state_index;
1177 bool update_header = false;
1178
1179 ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
1180 if (ret < 0) {
1181 error_setg_errno(errp, -ret, "Could not read qcow2 header");
1182 goto fail;
1183 }
1184 header.magic = be32_to_cpu(header.magic);
1185 header.version = be32_to_cpu(header.version);
1186 header.backing_file_offset = be64_to_cpu(header.backing_file_offset);
1187 header.backing_file_size = be32_to_cpu(header.backing_file_size);
1188 header.size = be64_to_cpu(header.size);
1189 header.cluster_bits = be32_to_cpu(header.cluster_bits);
1190 header.crypt_method = be32_to_cpu(header.crypt_method);
1191 header.l1_table_offset = be64_to_cpu(header.l1_table_offset);
1192 header.l1_size = be32_to_cpu(header.l1_size);
1193 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset);
1194 header.refcount_table_clusters =
1195 be32_to_cpu(header.refcount_table_clusters);
1196 header.snapshots_offset = be64_to_cpu(header.snapshots_offset);
1197 header.nb_snapshots = be32_to_cpu(header.nb_snapshots);
1198
1199 if (header.magic != QCOW_MAGIC) {
1200 error_setg(errp, "Image is not in qcow2 format");
1201 ret = -EINVAL;
1202 goto fail;
1203 }
1204 if (header.version < 2 || header.version > 3) {
1205 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version);
1206 ret = -ENOTSUP;
1207 goto fail;
1208 }
1209
1210 s->qcow_version = header.version;
1211
1212 /* Initialise cluster size */
1213 if (header.cluster_bits < MIN_CLUSTER_BITS ||
1214 header.cluster_bits > MAX_CLUSTER_BITS) {
1215 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32,
1216 header.cluster_bits);
1217 ret = -EINVAL;
1218 goto fail;
1219 }
1220
1221 s->cluster_bits = header.cluster_bits;
1222 s->cluster_size = 1 << s->cluster_bits;
1223 s->cluster_sectors = 1 << (s->cluster_bits - BDRV_SECTOR_BITS);
1224
1225 /* Initialise version 3 header fields */
1226 if (header.version == 2) {
1227 header.incompatible_features = 0;
1228 header.compatible_features = 0;
1229 header.autoclear_features = 0;
1230 header.refcount_order = 4;
1231 header.header_length = 72;
1232 } else {
1233 header.incompatible_features =
1234 be64_to_cpu(header.incompatible_features);
1235 header.compatible_features = be64_to_cpu(header.compatible_features);
1236 header.autoclear_features = be64_to_cpu(header.autoclear_features);
1237 header.refcount_order = be32_to_cpu(header.refcount_order);
1238 header.header_length = be32_to_cpu(header.header_length);
1239
1240 if (header.header_length < 104) {
1241 error_setg(errp, "qcow2 header too short");
1242 ret = -EINVAL;
1243 goto fail;
1244 }
1245 }
1246
1247 if (header.header_length > s->cluster_size) {
1248 error_setg(errp, "qcow2 header exceeds cluster size");
1249 ret = -EINVAL;
1250 goto fail;
1251 }
1252
1253 if (header.header_length > sizeof(header)) {
1254 s->unknown_header_fields_size = header.header_length - sizeof(header);
1255 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
1256 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
1257 s->unknown_header_fields_size);
1258 if (ret < 0) {
1259 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
1260 "fields");
1261 goto fail;
1262 }
1263 }
1264
1265 if (header.backing_file_offset > s->cluster_size) {
1266 error_setg(errp, "Invalid backing file offset");
1267 ret = -EINVAL;
1268 goto fail;
1269 }
1270
1271 if (header.backing_file_offset) {
1272 ext_end = header.backing_file_offset;
1273 } else {
1274 ext_end = 1 << header.cluster_bits;
1275 }
1276
1277 /* Handle feature bits */
1278 s->incompatible_features = header.incompatible_features;
1279 s->compatible_features = header.compatible_features;
1280 s->autoclear_features = header.autoclear_features;
1281
1282 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
1283 void *feature_table = NULL;
1284 qcow2_read_extensions(bs, header.header_length, ext_end,
1285 &feature_table, flags, NULL, NULL);
1286 report_unsupported_feature(errp, feature_table,
1287 s->incompatible_features &
1288 ~QCOW2_INCOMPAT_MASK);
1289 ret = -ENOTSUP;
1290 g_free(feature_table);
1291 goto fail;
1292 }
1293
1294 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
1295 /* Corrupt images may not be written to unless they are being repaired
1296 */
1297 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
1298 error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
1299 "read/write");
1300 ret = -EACCES;
1301 goto fail;
1302 }
1303 }
1304
1305 /* Check support for various header values */
1306 if (header.refcount_order > 6) {
1307 error_setg(errp, "Reference count entry width too large; may not "
1308 "exceed 64 bits");
1309 ret = -EINVAL;
1310 goto fail;
1311 }
1312 s->refcount_order = header.refcount_order;
1313 s->refcount_bits = 1 << s->refcount_order;
1314 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
1315 s->refcount_max += s->refcount_max - 1;
1316
1317 s->crypt_method_header = header.crypt_method;
1318 if (s->crypt_method_header) {
1319 if (bdrv_uses_whitelist() &&
1320 s->crypt_method_header == QCOW_CRYPT_AES) {
1321 error_setg(errp,
1322 "Use of AES-CBC encrypted qcow2 images is no longer "
1323 "supported in system emulators");
1324 error_append_hint(errp,
1325 "You can use 'qemu-img convert' to convert your "
1326 "image to an alternative supported format, such "
1327 "as unencrypted qcow2, or raw with the LUKS "
1328 "format instead.\n");
1329 ret = -ENOSYS;
1330 goto fail;
1331 }
1332
1333 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1334 s->crypt_physical_offset = false;
1335 } else {
1336 /* Assuming LUKS and any future crypt methods we
1337 * add will all use physical offsets, due to the
1338 * fact that the alternative is insecure... */
1339 s->crypt_physical_offset = true;
1340 }
1341
1342 bs->encrypted = true;
1343 }
1344
1345 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
1346 s->l2_size = 1 << s->l2_bits;
1347 /* 2^(s->refcount_order - 3) is the refcount width in bytes */
1348 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3);
1349 s->refcount_block_size = 1 << s->refcount_block_bits;
1350 bs->total_sectors = header.size / BDRV_SECTOR_SIZE;
1351 s->csize_shift = (62 - (s->cluster_bits - 8));
1352 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
1353 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
1354
1355 s->refcount_table_offset = header.refcount_table_offset;
1356 s->refcount_table_size =
1357 header.refcount_table_clusters << (s->cluster_bits - 3);
1358
1359 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) {
1360 error_setg(errp, "Image does not contain a reference count table");
1361 ret = -EINVAL;
1362 goto fail;
1363 }
1364
1365 ret = qcow2_validate_table(bs, s->refcount_table_offset,
1366 header.refcount_table_clusters,
1367 s->cluster_size, QCOW_MAX_REFTABLE_SIZE,
1368 "Reference count table", errp);
1369 if (ret < 0) {
1370 goto fail;
1371 }
1372
1373 /* The total size in bytes of the snapshot table is checked in
1374 * qcow2_read_snapshots() because the size of each snapshot is
1375 * variable and we don't know it yet.
1376 * Here we only check the offset and number of snapshots. */
1377 ret = qcow2_validate_table(bs, header.snapshots_offset,
1378 header.nb_snapshots,
1379 sizeof(QCowSnapshotHeader),
1380 sizeof(QCowSnapshotHeader) * QCOW_MAX_SNAPSHOTS,
1381 "Snapshot table", errp);
1382 if (ret < 0) {
1383 goto fail;
1384 }
1385
1386 /* read the level 1 table */
1387 ret = qcow2_validate_table(bs, header.l1_table_offset,
1388 header.l1_size, sizeof(uint64_t),
1389 QCOW_MAX_L1_SIZE, "Active L1 table", errp);
1390 if (ret < 0) {
1391 goto fail;
1392 }
1393 s->l1_size = header.l1_size;
1394 s->l1_table_offset = header.l1_table_offset;
1395
1396 l1_vm_state_index = size_to_l1(s, header.size);
1397 if (l1_vm_state_index > INT_MAX) {
1398 error_setg(errp, "Image is too big");
1399 ret = -EFBIG;
1400 goto fail;
1401 }
1402 s->l1_vm_state_index = l1_vm_state_index;
1403
1404 /* the L1 table must contain at least enough entries to put
1405 header.size bytes */
1406 if (s->l1_size < s->l1_vm_state_index) {
1407 error_setg(errp, "L1 table is too small");
1408 ret = -EINVAL;
1409 goto fail;
1410 }
1411
1412 if (s->l1_size > 0) {
1413 s->l1_table = qemu_try_blockalign(bs->file->bs,
1414 ROUND_UP(s->l1_size * sizeof(uint64_t), 512));
1415 if (s->l1_table == NULL) {
1416 error_setg(errp, "Could not allocate L1 table");
1417 ret = -ENOMEM;
1418 goto fail;
1419 }
1420 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
1421 s->l1_size * sizeof(uint64_t));
1422 if (ret < 0) {
1423 error_setg_errno(errp, -ret, "Could not read L1 table");
1424 goto fail;
1425 }
1426 for(i = 0;i < s->l1_size; i++) {
1427 s->l1_table[i] = be64_to_cpu(s->l1_table[i]);
1428 }
1429 }
1430
1431 /* Parse driver-specific options */
1432 ret = qcow2_update_options(bs, options, flags, errp);
1433 if (ret < 0) {
1434 goto fail;
1435 }
1436
1437 s->flags = flags;
1438
1439 ret = qcow2_refcount_init(bs);
1440 if (ret != 0) {
1441 error_setg_errno(errp, -ret, "Could not initialize refcount handling");
1442 goto fail;
1443 }
1444
1445 QLIST_INIT(&s->cluster_allocs);
1446 QTAILQ_INIT(&s->discards);
1447
1448 /* read qcow2 extensions */
1449 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
1450 flags, &update_header, &local_err)) {
1451 error_propagate(errp, local_err);
1452 ret = -EINVAL;
1453 goto fail;
1454 }
1455
1456 /* TODO Open external data file */
1457 s->data_file = bs->file;
1458
1459 /* qcow2_read_extension may have set up the crypto context
1460 * if the crypt method needs a header region, some methods
1461 * don't need header extensions, so must check here
1462 */
1463 if (s->crypt_method_header && !s->crypto) {
1464 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1465 unsigned int cflags = 0;
1466 if (flags & BDRV_O_NO_IO) {
1467 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
1468 }
1469 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
1470 NULL, NULL, cflags, 1, errp);
1471 if (!s->crypto) {
1472 ret = -EINVAL;
1473 goto fail;
1474 }
1475 } else if (!(flags & BDRV_O_NO_IO)) {
1476 error_setg(errp, "Missing CRYPTO header for crypt method %d",
1477 s->crypt_method_header);
1478 ret = -EINVAL;
1479 goto fail;
1480 }
1481 }
1482
1483 /* read the backing file name */
1484 if (header.backing_file_offset != 0) {
1485 len = header.backing_file_size;
1486 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) ||
1487 len >= sizeof(bs->backing_file)) {
1488 error_setg(errp, "Backing file name too long");
1489 ret = -EINVAL;
1490 goto fail;
1491 }
1492 ret = bdrv_pread(bs->file, header.backing_file_offset,
1493 bs->auto_backing_file, len);
1494 if (ret < 0) {
1495 error_setg_errno(errp, -ret, "Could not read backing file name");
1496 goto fail;
1497 }
1498 bs->auto_backing_file[len] = '\0';
1499 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1500 bs->auto_backing_file);
1501 s->image_backing_file = g_strdup(bs->auto_backing_file);
1502 }
1503
1504 /* Internal snapshots */
1505 s->snapshots_offset = header.snapshots_offset;
1506 s->nb_snapshots = header.nb_snapshots;
1507
1508 ret = qcow2_read_snapshots(bs);
1509 if (ret < 0) {
1510 error_setg_errno(errp, -ret, "Could not read snapshots");
1511 goto fail;
1512 }
1513
1514 /* Clear unknown autoclear feature bits */
1515 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK;
1516 update_header =
1517 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE);
1518 if (update_header) {
1519 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK;
1520 }
1521
1522 /* == Handle persistent dirty bitmaps ==
1523 *
1524 * We want load dirty bitmaps in three cases:
1525 *
1526 * 1. Normal open of the disk in active mode, not related to invalidation
1527 * after migration.
1528 *
1529 * 2. Invalidation of the target vm after pre-copy phase of migration, if
1530 * bitmaps are _not_ migrating through migration channel, i.e.
1531 * 'dirty-bitmaps' capability is disabled.
1532 *
1533 * 3. Invalidation of source vm after failed or canceled migration.
1534 * This is a very interesting case. There are two possible types of
1535 * bitmaps:
1536 *
1537 * A. Stored on inactivation and removed. They should be loaded from the
1538 * image.
1539 *
1540 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through
1541 * the migration channel (with dirty-bitmaps capability).
1542 *
1543 * On the other hand, there are two possible sub-cases:
1544 *
1545 * 3.1 disk was changed by somebody else while were inactive. In this
1546 * case all in-RAM dirty bitmaps (both persistent and not) are
1547 * definitely invalid. And we don't have any method to determine
1548 * this.
1549 *
1550 * Simple and safe thing is to just drop all the bitmaps of type B on
1551 * inactivation. But in this case we lose bitmaps in valid 4.2 case.
1552 *
1553 * On the other hand, resuming source vm, if disk was already changed
1554 * is a bad thing anyway: not only bitmaps, the whole vm state is
1555 * out of sync with disk.
1556 *
1557 * This means, that user or management tool, who for some reason
1558 * decided to resume source vm, after disk was already changed by
1559 * target vm, should at least drop all dirty bitmaps by hand.
1560 *
1561 * So, we can ignore this case for now, but TODO: "generation"
1562 * extension for qcow2, to determine, that image was changed after
1563 * last inactivation. And if it is changed, we will drop (or at least
1564 * mark as 'invalid' all the bitmaps of type B, both persistent
1565 * and not).
1566 *
1567 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved
1568 * to disk ('dirty-bitmaps' capability disabled), or not saved
1569 * ('dirty-bitmaps' capability enabled), but we don't need to care
1570 * of: let's load bitmaps as always: stored bitmaps will be loaded,
1571 * and not stored has flag IN_USE=1 in the image and will be skipped
1572 * on loading.
1573 *
1574 * One remaining possible case when we don't want load bitmaps:
1575 *
1576 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or
1577 * will be loaded on invalidation, no needs try loading them before)
1578 */
1579
1580 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) {
1581 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */
1582 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err);
1583
1584 update_header = update_header && !header_updated;
1585 }
1586 if (local_err != NULL) {
1587 error_propagate(errp, local_err);
1588 ret = -EINVAL;
1589 goto fail;
1590 }
1591
1592 if (update_header) {
1593 ret = qcow2_update_header(bs);
1594 if (ret < 0) {
1595 error_setg_errno(errp, -ret, "Could not update qcow2 header");
1596 goto fail;
1597 }
1598 }
1599
1600 bs->supported_zero_flags = header.version >= 3 ? BDRV_REQ_MAY_UNMAP : 0;
1601
1602 /* Repair image if dirty */
1603 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only &&
1604 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
1605 BdrvCheckResult result = {0};
1606
1607 ret = qcow2_co_check_locked(bs, &result,
1608 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
1609 if (ret < 0 || result.check_errors) {
1610 if (ret >= 0) {
1611 ret = -EIO;
1612 }
1613 error_setg_errno(errp, -ret, "Could not repair dirty image");
1614 goto fail;
1615 }
1616 }
1617
1618 #ifdef DEBUG_ALLOC
1619 {
1620 BdrvCheckResult result = {0};
1621 qcow2_check_refcounts(bs, &result, 0);
1622 }
1623 #endif
1624
1625 qemu_co_queue_init(&s->compress_wait_queue);
1626
1627 return ret;
1628
1629 fail:
1630 g_free(s->unknown_header_fields);
1631 cleanup_unknown_header_ext(bs);
1632 qcow2_free_snapshots(bs);
1633 qcow2_refcount_close(bs);
1634 qemu_vfree(s->l1_table);
1635 /* else pre-write overlap checks in cache_destroy may crash */
1636 s->l1_table = NULL;
1637 cache_clean_timer_del(bs);
1638 if (s->l2_table_cache) {
1639 qcow2_cache_destroy(s->l2_table_cache);
1640 }
1641 if (s->refcount_block_cache) {
1642 qcow2_cache_destroy(s->refcount_block_cache);
1643 }
1644 qcrypto_block_free(s->crypto);
1645 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1646 return ret;
1647 }
1648
1649 typedef struct QCow2OpenCo {
1650 BlockDriverState *bs;
1651 QDict *options;
1652 int flags;
1653 Error **errp;
1654 int ret;
1655 } QCow2OpenCo;
1656
1657 static void coroutine_fn qcow2_open_entry(void *opaque)
1658 {
1659 QCow2OpenCo *qoc = opaque;
1660 BDRVQcow2State *s = qoc->bs->opaque;
1661
1662 qemu_co_mutex_lock(&s->lock);
1663 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
1664 qemu_co_mutex_unlock(&s->lock);
1665 }
1666
1667 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
1668 Error **errp)
1669 {
1670 BDRVQcow2State *s = bs->opaque;
1671 QCow2OpenCo qoc = {
1672 .bs = bs,
1673 .options = options,
1674 .flags = flags,
1675 .errp = errp,
1676 .ret = -EINPROGRESS
1677 };
1678
1679 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
1680 false, errp);
1681 if (!bs->file) {
1682 return -EINVAL;
1683 }
1684
1685 /* Initialise locks */
1686 qemu_co_mutex_init(&s->lock);
1687
1688 if (qemu_in_coroutine()) {
1689 /* From bdrv_co_create. */
1690 qcow2_open_entry(&qoc);
1691 } else {
1692 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1693 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc));
1694 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
1695 }
1696 return qoc.ret;
1697 }
1698
1699 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
1700 {
1701 BDRVQcow2State *s = bs->opaque;
1702
1703 if (bs->encrypted) {
1704 /* Encryption works on a sector granularity */
1705 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto);
1706 }
1707 bs->bl.pwrite_zeroes_alignment = s->cluster_size;
1708 bs->bl.pdiscard_alignment = s->cluster_size;
1709 }
1710
1711 static int qcow2_reopen_prepare(BDRVReopenState *state,
1712 BlockReopenQueue *queue, Error **errp)
1713 {
1714 Qcow2ReopenState *r;
1715 int ret;
1716
1717 r = g_new0(Qcow2ReopenState, 1);
1718 state->opaque = r;
1719
1720 ret = qcow2_update_options_prepare(state->bs, r, state->options,
1721 state->flags, errp);
1722 if (ret < 0) {
1723 goto fail;
1724 }
1725
1726 /* We need to write out any unwritten data if we reopen read-only. */
1727 if ((state->flags & BDRV_O_RDWR) == 0) {
1728 ret = qcow2_reopen_bitmaps_ro(state->bs, errp);
1729 if (ret < 0) {
1730 goto fail;
1731 }
1732
1733 ret = bdrv_flush(state->bs);
1734 if (ret < 0) {
1735 goto fail;
1736 }
1737
1738 ret = qcow2_mark_clean(state->bs);
1739 if (ret < 0) {
1740 goto fail;
1741 }
1742 }
1743
1744 return 0;
1745
1746 fail:
1747 qcow2_update_options_abort(state->bs, r);
1748 g_free(r);
1749 return ret;
1750 }
1751
1752 static void qcow2_reopen_commit(BDRVReopenState *state)
1753 {
1754 qcow2_update_options_commit(state->bs, state->opaque);
1755 g_free(state->opaque);
1756 }
1757
1758 static void qcow2_reopen_abort(BDRVReopenState *state)
1759 {
1760 qcow2_update_options_abort(state->bs, state->opaque);
1761 g_free(state->opaque);
1762 }
1763
1764 static void qcow2_join_options(QDict *options, QDict *old_options)
1765 {
1766 bool has_new_overlap_template =
1767 qdict_haskey(options, QCOW2_OPT_OVERLAP) ||
1768 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE);
1769 bool has_new_total_cache_size =
1770 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE);
1771 bool has_all_cache_options;
1772
1773 /* New overlap template overrides all old overlap options */
1774 if (has_new_overlap_template) {
1775 qdict_del(old_options, QCOW2_OPT_OVERLAP);
1776 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE);
1777 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER);
1778 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1);
1779 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2);
1780 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE);
1781 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK);
1782 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE);
1783 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1);
1784 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2);
1785 }
1786
1787 /* New total cache size overrides all old options */
1788 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) {
1789 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE);
1790 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
1791 }
1792
1793 qdict_join(options, old_options, false);
1794
1795 /*
1796 * If after merging all cache size options are set, an old total size is
1797 * overwritten. Do keep all options, however, if all three are new. The
1798 * resulting error message is what we want to happen.
1799 */
1800 has_all_cache_options =
1801 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) ||
1802 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) ||
1803 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
1804
1805 if (has_all_cache_options && !has_new_total_cache_size) {
1806 qdict_del(options, QCOW2_OPT_CACHE_SIZE);
1807 }
1808 }
1809
1810 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs,
1811 bool want_zero,
1812 int64_t offset, int64_t count,
1813 int64_t *pnum, int64_t *map,
1814 BlockDriverState **file)
1815 {
1816 BDRVQcow2State *s = bs->opaque;
1817 uint64_t cluster_offset;
1818 int index_in_cluster, ret;
1819 unsigned int bytes;
1820 int status = 0;
1821
1822 bytes = MIN(INT_MAX, count);
1823 qemu_co_mutex_lock(&s->lock);
1824 ret = qcow2_get_cluster_offset(bs, offset, &bytes, &cluster_offset);
1825 qemu_co_mutex_unlock(&s->lock);
1826 if (ret < 0) {
1827 return ret;
1828 }
1829
1830 *pnum = bytes;
1831
1832 if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED &&
1833 !s->crypto) {
1834 index_in_cluster = offset & (s->cluster_size - 1);
1835 *map = cluster_offset | index_in_cluster;
1836 *file = bs->file->bs;
1837 status |= BDRV_BLOCK_OFFSET_VALID;
1838 }
1839 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) {
1840 status |= BDRV_BLOCK_ZERO;
1841 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) {
1842 status |= BDRV_BLOCK_DATA;
1843 }
1844 return status;
1845 }
1846
1847 static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs,
1848 QCowL2Meta **pl2meta,
1849 bool link_l2)
1850 {
1851 int ret = 0;
1852 QCowL2Meta *l2meta = *pl2meta;
1853
1854 while (l2meta != NULL) {
1855 QCowL2Meta *next;
1856
1857 if (link_l2) {
1858 ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
1859 if (ret) {
1860 goto out;
1861 }
1862 } else {
1863 qcow2_alloc_cluster_abort(bs, l2meta);
1864 }
1865
1866 /* Take the request off the list of running requests */
1867 if (l2meta->nb_clusters != 0) {
1868 QLIST_REMOVE(l2meta, next_in_flight);
1869 }
1870
1871 qemu_co_queue_restart_all(&l2meta->dependent_requests);
1872
1873 next = l2meta->next;
1874 g_free(l2meta);
1875 l2meta = next;
1876 }
1877 out:
1878 *pl2meta = l2meta;
1879 return ret;
1880 }
1881
1882 static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
1883 uint64_t bytes, QEMUIOVector *qiov,
1884 int flags)
1885 {
1886 BDRVQcow2State *s = bs->opaque;
1887 int offset_in_cluster;
1888 int ret;
1889 unsigned int cur_bytes; /* number of bytes in current iteration */
1890 uint64_t cluster_offset = 0;
1891 uint64_t bytes_done = 0;
1892 QEMUIOVector hd_qiov;
1893 uint8_t *cluster_data = NULL;
1894
1895 qemu_iovec_init(&hd_qiov, qiov->niov);
1896
1897 qemu_co_mutex_lock(&s->lock);
1898
1899 while (bytes != 0) {
1900
1901 /* prepare next request */
1902 cur_bytes = MIN(bytes, INT_MAX);
1903 if (s->crypto) {
1904 cur_bytes = MIN(cur_bytes,
1905 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
1906 }
1907
1908 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset);
1909 if (ret < 0) {
1910 goto fail;
1911 }
1912
1913 offset_in_cluster = offset_into_cluster(s, offset);
1914
1915 qemu_iovec_reset(&hd_qiov);
1916 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
1917
1918 switch (ret) {
1919 case QCOW2_CLUSTER_UNALLOCATED:
1920
1921 if (bs->backing) {
1922 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
1923 qemu_co_mutex_unlock(&s->lock);
1924 ret = bdrv_co_preadv(bs->backing, offset, cur_bytes,
1925 &hd_qiov, 0);
1926 qemu_co_mutex_lock(&s->lock);
1927 if (ret < 0) {
1928 goto fail;
1929 }
1930 } else {
1931 /* Note: in this case, no need to wait */
1932 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes);
1933 }
1934 break;
1935
1936 case QCOW2_CLUSTER_ZERO_PLAIN:
1937 case QCOW2_CLUSTER_ZERO_ALLOC:
1938 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes);
1939 break;
1940
1941 case QCOW2_CLUSTER_COMPRESSED:
1942 qemu_co_mutex_unlock(&s->lock);
1943 ret = qcow2_co_preadv_compressed(bs, cluster_offset,
1944 offset, cur_bytes,
1945 &hd_qiov);
1946 qemu_co_mutex_lock(&s->lock);
1947 if (ret < 0) {
1948 goto fail;
1949 }
1950
1951 break;
1952
1953 case QCOW2_CLUSTER_NORMAL:
1954 if ((cluster_offset & 511) != 0) {
1955 ret = -EIO;
1956 goto fail;
1957 }
1958
1959 if (bs->encrypted) {
1960 assert(s->crypto);
1961
1962 /*
1963 * For encrypted images, read everything into a temporary
1964 * contiguous buffer on which the AES functions can work.
1965 */
1966 if (!cluster_data) {
1967 cluster_data =
1968 qemu_try_blockalign(bs->file->bs,
1969 QCOW_MAX_CRYPT_CLUSTERS
1970 * s->cluster_size);
1971 if (cluster_data == NULL) {
1972 ret = -ENOMEM;
1973 goto fail;
1974 }
1975 }
1976
1977 assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
1978 qemu_iovec_reset(&hd_qiov);
1979 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes);
1980 }
1981
1982 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1983 qemu_co_mutex_unlock(&s->lock);
1984 ret = bdrv_co_preadv(bs->file,
1985 cluster_offset + offset_in_cluster,
1986 cur_bytes, &hd_qiov, 0);
1987 qemu_co_mutex_lock(&s->lock);
1988 if (ret < 0) {
1989 goto fail;
1990 }
1991 if (bs->encrypted) {
1992 assert(s->crypto);
1993 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1994 assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1995 if (qcrypto_block_decrypt(s->crypto,
1996 (s->crypt_physical_offset ?
1997 cluster_offset + offset_in_cluster :
1998 offset),
1999 cluster_data,
2000 cur_bytes,
2001 NULL) < 0) {
2002 ret = -EIO;
2003 goto fail;
2004 }
2005 qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes);
2006 }
2007 break;
2008
2009 default:
2010 g_assert_not_reached();
2011 ret = -EIO;
2012 goto fail;
2013 }
2014
2015 bytes -= cur_bytes;
2016 offset += cur_bytes;
2017 bytes_done += cur_bytes;
2018 }
2019 ret = 0;
2020
2021 fail:
2022 qemu_co_mutex_unlock(&s->lock);
2023
2024 qemu_iovec_destroy(&hd_qiov);
2025 qemu_vfree(cluster_data);
2026
2027 return ret;
2028 }
2029
2030 /* Check if it's possible to merge a write request with the writing of
2031 * the data from the COW regions */
2032 static bool merge_cow(uint64_t offset, unsigned bytes,
2033 QEMUIOVector *hd_qiov, QCowL2Meta *l2meta)
2034 {
2035 QCowL2Meta *m;
2036
2037 for (m = l2meta; m != NULL; m = m->next) {
2038 /* If both COW regions are empty then there's nothing to merge */
2039 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) {
2040 continue;
2041 }
2042
2043 /* The data (middle) region must be immediately after the
2044 * start region */
2045 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
2046 continue;
2047 }
2048
2049 /* The end region must be immediately after the data (middle)
2050 * region */
2051 if (m->offset + m->cow_end.offset != offset + bytes) {
2052 continue;
2053 }
2054
2055 /* Make sure that adding both COW regions to the QEMUIOVector
2056 * does not exceed IOV_MAX */
2057 if (hd_qiov->niov > IOV_MAX - 2) {
2058 continue;
2059 }
2060
2061 m->data_qiov = hd_qiov;
2062 return true;
2063 }
2064
2065 return false;
2066 }
2067
2068 static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
2069 uint64_t bytes, QEMUIOVector *qiov,
2070 int flags)
2071 {
2072 BDRVQcow2State *s = bs->opaque;
2073 int offset_in_cluster;
2074 int ret;
2075 unsigned int cur_bytes; /* number of sectors in current iteration */
2076 uint64_t cluster_offset;
2077 QEMUIOVector hd_qiov;
2078 uint64_t bytes_done = 0;
2079 uint8_t *cluster_data = NULL;
2080 QCowL2Meta *l2meta = NULL;
2081
2082 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
2083
2084 qemu_iovec_init(&hd_qiov, qiov->niov);
2085
2086 qemu_co_mutex_lock(&s->lock);
2087
2088 while (bytes != 0) {
2089
2090 l2meta = NULL;
2091
2092 trace_qcow2_writev_start_part(qemu_coroutine_self());
2093 offset_in_cluster = offset_into_cluster(s, offset);
2094 cur_bytes = MIN(bytes, INT_MAX);
2095 if (bs->encrypted) {
2096 cur_bytes = MIN(cur_bytes,
2097 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
2098 - offset_in_cluster);
2099 }
2100
2101 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
2102 &cluster_offset, &l2meta);
2103 if (ret < 0) {
2104 goto fail;
2105 }
2106
2107 assert((cluster_offset & 511) == 0);
2108
2109 qemu_iovec_reset(&hd_qiov);
2110 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
2111
2112 if (bs->encrypted) {
2113 assert(s->crypto);
2114 if (!cluster_data) {
2115 cluster_data = qemu_try_blockalign(bs->file->bs,
2116 QCOW_MAX_CRYPT_CLUSTERS
2117 * s->cluster_size);
2118 if (cluster_data == NULL) {
2119 ret = -ENOMEM;
2120 goto fail;
2121 }
2122 }
2123
2124 assert(hd_qiov.size <=
2125 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2126 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size);
2127
2128 if (qcrypto_block_encrypt(s->crypto,
2129 (s->crypt_physical_offset ?
2130 cluster_offset + offset_in_cluster :
2131 offset),
2132 cluster_data,
2133 cur_bytes, NULL) < 0) {
2134 ret = -EIO;
2135 goto fail;
2136 }
2137
2138 qemu_iovec_reset(&hd_qiov);
2139 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes);
2140 }
2141
2142 ret = qcow2_pre_write_overlap_check(bs, 0,
2143 cluster_offset + offset_in_cluster, cur_bytes);
2144 if (ret < 0) {
2145 goto fail;
2146 }
2147
2148 /* If we need to do COW, check if it's possible to merge the
2149 * writing of the guest data together with that of the COW regions.
2150 * If it's not possible (or not necessary) then write the
2151 * guest data now. */
2152 if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) {
2153 qemu_co_mutex_unlock(&s->lock);
2154 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
2155 trace_qcow2_writev_data(qemu_coroutine_self(),
2156 cluster_offset + offset_in_cluster);
2157 ret = bdrv_co_pwritev(bs->file,
2158 cluster_offset + offset_in_cluster,
2159 cur_bytes, &hd_qiov, 0);
2160 qemu_co_mutex_lock(&s->lock);
2161 if (ret < 0) {
2162 goto fail;
2163 }
2164 }
2165
2166 ret = qcow2_handle_l2meta(bs, &l2meta, true);
2167 if (ret) {
2168 goto fail;
2169 }
2170
2171 bytes -= cur_bytes;
2172 offset += cur_bytes;
2173 bytes_done += cur_bytes;
2174 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
2175 }
2176 ret = 0;
2177
2178 fail:
2179 qcow2_handle_l2meta(bs, &l2meta, false);
2180
2181 qemu_co_mutex_unlock(&s->lock);
2182
2183 qemu_iovec_destroy(&hd_qiov);
2184 qemu_vfree(cluster_data);
2185 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
2186
2187 return ret;
2188 }
2189
2190 static int qcow2_inactivate(BlockDriverState *bs)
2191 {
2192 BDRVQcow2State *s = bs->opaque;
2193 int ret, result = 0;
2194 Error *local_err = NULL;
2195
2196 qcow2_store_persistent_dirty_bitmaps(bs, &local_err);
2197 if (local_err != NULL) {
2198 result = -EINVAL;
2199 error_reportf_err(local_err, "Lost persistent bitmaps during "
2200 "inactivation of node '%s': ",
2201 bdrv_get_device_or_node_name(bs));
2202 }
2203
2204 ret = qcow2_cache_flush(bs, s->l2_table_cache);
2205 if (ret) {
2206 result = ret;
2207 error_report("Failed to flush the L2 table cache: %s",
2208 strerror(-ret));
2209 }
2210
2211 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
2212 if (ret) {
2213 result = ret;
2214 error_report("Failed to flush the refcount block cache: %s",
2215 strerror(-ret));
2216 }
2217
2218 if (result == 0) {
2219 qcow2_mark_clean(bs);
2220 }
2221
2222 return result;
2223 }
2224
2225 static void qcow2_close(BlockDriverState *bs)
2226 {
2227 BDRVQcow2State *s = bs->opaque;
2228 qemu_vfree(s->l1_table);
2229 /* else pre-write overlap checks in cache_destroy may crash */
2230 s->l1_table = NULL;
2231
2232 if (!(s->flags & BDRV_O_INACTIVE)) {
2233 qcow2_inactivate(bs);
2234 }
2235
2236 cache_clean_timer_del(bs);
2237 qcow2_cache_destroy(s->l2_table_cache);
2238 qcow2_cache_destroy(s->refcount_block_cache);
2239
2240 qcrypto_block_free(s->crypto);
2241 s->crypto = NULL;
2242
2243 g_free(s->unknown_header_fields);
2244 cleanup_unknown_header_ext(bs);
2245
2246 g_free(s->image_backing_file);
2247 g_free(s->image_backing_format);
2248
2249 qcow2_refcount_close(bs);
2250 qcow2_free_snapshots(bs);
2251 }
2252
2253 static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs,
2254 Error **errp)
2255 {
2256 BDRVQcow2State *s = bs->opaque;
2257 int flags = s->flags;
2258 QCryptoBlock *crypto = NULL;
2259 QDict *options;
2260 Error *local_err = NULL;
2261 int ret;
2262
2263 /*
2264 * Backing files are read-only which makes all of their metadata immutable,
2265 * that means we don't have to worry about reopening them here.
2266 */
2267
2268 crypto = s->crypto;
2269 s->crypto = NULL;
2270
2271 qcow2_close(bs);
2272
2273 memset(s, 0, sizeof(BDRVQcow2State));
2274 options = qdict_clone_shallow(bs->options);
2275
2276 flags &= ~BDRV_O_INACTIVE;
2277 qemu_co_mutex_lock(&s->lock);
2278 ret = qcow2_do_open(bs, options, flags, &local_err);
2279 qemu_co_mutex_unlock(&s->lock);
2280 qobject_unref(options);
2281 if (local_err) {
2282 error_propagate_prepend(errp, local_err,
2283 "Could not reopen qcow2 layer: ");
2284 bs->drv = NULL;
2285 return;
2286 } else if (ret < 0) {
2287 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer");
2288 bs->drv = NULL;
2289 return;
2290 }
2291
2292 s->crypto = crypto;
2293 }
2294
2295 static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
2296 size_t len, size_t buflen)
2297 {
2298 QCowExtension *ext_backing_fmt = (QCowExtension*) buf;
2299 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7);
2300
2301 if (buflen < ext_len) {
2302 return -ENOSPC;
2303 }
2304
2305 *ext_backing_fmt = (QCowExtension) {
2306 .magic = cpu_to_be32(magic),
2307 .len = cpu_to_be32(len),
2308 };
2309
2310 if (len) {
2311 memcpy(buf + sizeof(QCowExtension), s, len);
2312 }
2313
2314 return ext_len;
2315 }
2316
2317 /*
2318 * Updates the qcow2 header, including the variable length parts of it, i.e.
2319 * the backing file name and all extensions. qcow2 was not designed to allow
2320 * such changes, so if we run out of space (we can only use the first cluster)
2321 * this function may fail.
2322 *
2323 * Returns 0 on success, -errno in error cases.
2324 */
2325 int qcow2_update_header(BlockDriverState *bs)
2326 {
2327 BDRVQcow2State *s = bs->opaque;
2328 QCowHeader *header;
2329 char *buf;
2330 size_t buflen = s->cluster_size;
2331 int ret;
2332 uint64_t total_size;
2333 uint32_t refcount_table_clusters;
2334 size_t header_length;
2335 Qcow2UnknownHeaderExtension *uext;
2336
2337 buf = qemu_blockalign(bs, buflen);
2338
2339 /* Header structure */
2340 header = (QCowHeader*) buf;
2341
2342 if (buflen < sizeof(*header)) {
2343 ret = -ENOSPC;
2344 goto fail;
2345 }
2346
2347 header_length = sizeof(*header) + s->unknown_header_fields_size;
2348 total_size = bs->total_sectors * BDRV_SECTOR_SIZE;
2349 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2350
2351 *header = (QCowHeader) {
2352 /* Version 2 fields */
2353 .magic = cpu_to_be32(QCOW_MAGIC),
2354 .version = cpu_to_be32(s->qcow_version),
2355 .backing_file_offset = 0,
2356 .backing_file_size = 0,
2357 .cluster_bits = cpu_to_be32(s->cluster_bits),
2358 .size = cpu_to_be64(total_size),
2359 .crypt_method = cpu_to_be32(s->crypt_method_header),
2360 .l1_size = cpu_to_be32(s->l1_size),
2361 .l1_table_offset = cpu_to_be64(s->l1_table_offset),
2362 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset),
2363 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters),
2364 .nb_snapshots = cpu_to_be32(s->nb_snapshots),
2365 .snapshots_offset = cpu_to_be64(s->snapshots_offset),
2366
2367 /* Version 3 fields */
2368 .incompatible_features = cpu_to_be64(s->incompatible_features),
2369 .compatible_features = cpu_to_be64(s->compatible_features),
2370 .autoclear_features = cpu_to_be64(s->autoclear_features),
2371 .refcount_order = cpu_to_be32(s->refcount_order),
2372 .header_length = cpu_to_be32(header_length),
2373 };
2374
2375 /* For older versions, write a shorter header */
2376 switch (s->qcow_version) {
2377 case 2:
2378 ret = offsetof(QCowHeader, incompatible_features);
2379 break;
2380 case 3:
2381 ret = sizeof(*header);
2382 break;
2383 default:
2384 ret = -EINVAL;
2385 goto fail;
2386 }
2387
2388 buf += ret;
2389 buflen -= ret;
2390 memset(buf, 0, buflen);
2391
2392 /* Preserve any unknown field in the header */
2393 if (s->unknown_header_fields_size) {
2394 if (buflen < s->unknown_header_fields_size) {
2395 ret = -ENOSPC;
2396 goto fail;
2397 }
2398
2399 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size);
2400 buf += s->unknown_header_fields_size;
2401 buflen -= s->unknown_header_fields_size;
2402 }
2403
2404 /* Backing file format header extension */
2405 if (s->image_backing_format) {
2406 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
2407 s->image_backing_format,
2408 strlen(s->image_backing_format),
2409 buflen);
2410 if (ret < 0) {
2411 goto fail;
2412 }
2413
2414 buf += ret;
2415 buflen -= ret;
2416 }
2417
2418 /* Full disk encryption header pointer extension */
2419 if (s->crypto_header.offset != 0) {
2420 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset);
2421 s->crypto_header.length = cpu_to_be64(s->crypto_header.length);
2422 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER,
2423 &s->crypto_header, sizeof(s->crypto_header),
2424 buflen);
2425 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
2426 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
2427 if (ret < 0) {
2428 goto fail;
2429 }
2430 buf += ret;
2431 buflen -= ret;
2432 }
2433
2434 /* Feature table */
2435 if (s->qcow_version >= 3) {
2436 Qcow2Feature features[] = {
2437 {
2438 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2439 .bit = QCOW2_INCOMPAT_DIRTY_BITNR,
2440 .name = "dirty bit",
2441 },
2442 {
2443 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2444 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR,
2445 .name = "corrupt bit",
2446 },
2447 {
2448 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2449 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR,
2450 .name = "external data file",
2451 },
2452 {
2453 .type = QCOW2_FEAT_TYPE_COMPATIBLE,
2454 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
2455 .name = "lazy refcounts",
2456 },
2457 };
2458
2459 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
2460 features, sizeof(features), buflen);
2461 if (ret < 0) {
2462 goto fail;
2463 }
2464 buf += ret;
2465 buflen -= ret;
2466 }
2467
2468 /* Bitmap extension */
2469 if (s->nb_bitmaps > 0) {
2470 Qcow2BitmapHeaderExt bitmaps_header = {
2471 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps),
2472 .bitmap_directory_size =
2473 cpu_to_be64(s->bitmap_directory_size),
2474 .bitmap_directory_offset =
2475 cpu_to_be64(s->bitmap_directory_offset)
2476 };
2477 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS,
2478 &bitmaps_header, sizeof(bitmaps_header),
2479 buflen);
2480 if (ret < 0) {
2481 goto fail;
2482 }
2483 buf += ret;
2484 buflen -= ret;
2485 }
2486
2487 /* Keep unknown header extensions */
2488 QLIST_FOREACH(uext, &s->unknown_header_ext, next) {
2489 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen);
2490 if (ret < 0) {
2491 goto fail;
2492 }
2493
2494 buf += ret;
2495 buflen -= ret;
2496 }
2497
2498 /* End of header extensions */
2499 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen);
2500 if (ret < 0) {
2501 goto fail;
2502 }
2503
2504 buf += ret;
2505 buflen -= ret;
2506
2507 /* Backing file name */
2508 if (s->image_backing_file) {
2509 size_t backing_file_len = strlen(s->image_backing_file);
2510
2511 if (buflen < backing_file_len) {
2512 ret = -ENOSPC;
2513 goto fail;
2514 }
2515
2516 /* Using strncpy is ok here, since buf is not NUL-terminated. */
2517 strncpy(buf, s->image_backing_file, buflen);
2518
2519 header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
2520 header->backing_file_size = cpu_to_be32(backing_file_len);
2521 }
2522
2523 /* Write the new header */
2524 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size);
2525 if (ret < 0) {
2526 goto fail;
2527 }
2528
2529 ret = 0;
2530 fail:
2531 qemu_vfree(header);
2532 return ret;
2533 }
2534
2535 static int qcow2_change_backing_file(BlockDriverState *bs,
2536 const char *backing_file, const char *backing_fmt)
2537 {
2538 BDRVQcow2State *s = bs->opaque;
2539
2540 if (backing_file && strlen(backing_file) > 1023) {
2541 return -EINVAL;
2542 }
2543
2544 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
2545 backing_file ?: "");
2546 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2547 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2548
2549 g_free(s->image_backing_file);
2550 g_free(s->image_backing_format);
2551
2552 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL;
2553 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL;
2554
2555 return qcow2_update_header(bs);
2556 }
2557
2558 static int qcow2_crypt_method_from_format(const char *encryptfmt)
2559 {
2560 if (g_str_equal(encryptfmt, "luks")) {
2561 return QCOW_CRYPT_LUKS;
2562 } else if (g_str_equal(encryptfmt, "aes")) {
2563 return QCOW_CRYPT_AES;
2564 } else {
2565 return -EINVAL;
2566 }
2567 }
2568
2569 static int qcow2_set_up_encryption(BlockDriverState *bs,
2570 QCryptoBlockCreateOptions *cryptoopts,
2571 Error **errp)
2572 {
2573 BDRVQcow2State *s = bs->opaque;
2574 QCryptoBlock *crypto = NULL;
2575 int fmt, ret;
2576
2577 switch (cryptoopts->format) {
2578 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
2579 fmt = QCOW_CRYPT_LUKS;
2580 break;
2581 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
2582 fmt = QCOW_CRYPT_AES;
2583 break;
2584 default:
2585 error_setg(errp, "Crypto format not supported in qcow2");
2586 return -EINVAL;
2587 }
2588
2589 s->crypt_method_header = fmt;
2590
2591 crypto = qcrypto_block_create(cryptoopts, "encrypt.",
2592 qcow2_crypto_hdr_init_func,
2593 qcow2_crypto_hdr_write_func,
2594 bs, errp);
2595 if (!crypto) {
2596 return -EINVAL;
2597 }
2598
2599 ret = qcow2_update_header(bs);
2600 if (ret < 0) {
2601 error_setg_errno(errp, -ret, "Could not write encryption header");
2602 goto out;
2603 }
2604
2605 ret = 0;
2606 out:
2607 qcrypto_block_free(crypto);
2608 return ret;
2609 }
2610
2611 /**
2612 * Preallocates metadata structures for data clusters between @offset (in the
2613 * guest disk) and @new_length (which is thus generally the new guest disk
2614 * size).
2615 *
2616 * Returns: 0 on success, -errno on failure.
2617 */
2618 static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
2619 uint64_t new_length)
2620 {
2621 uint64_t bytes;
2622 uint64_t host_offset = 0;
2623 unsigned int cur_bytes;
2624 int ret;
2625 QCowL2Meta *meta;
2626
2627 assert(offset <= new_length);
2628 bytes = new_length - offset;
2629
2630 while (bytes) {
2631 cur_bytes = MIN(bytes, INT_MAX);
2632 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
2633 &host_offset, &meta);
2634 if (ret < 0) {
2635 return ret;
2636 }
2637
2638 while (meta) {
2639 QCowL2Meta *next = meta->next;
2640
2641 ret = qcow2_alloc_cluster_link_l2(bs, meta);
2642 if (ret < 0) {
2643 qcow2_free_any_clusters(bs, meta->alloc_offset,
2644 meta->nb_clusters, QCOW2_DISCARD_NEVER);
2645 return ret;
2646 }
2647
2648 /* There are no dependent requests, but we need to remove our
2649 * request from the list of in-flight requests */
2650 QLIST_REMOVE(meta, next_in_flight);
2651
2652 g_free(meta);
2653 meta = next;
2654 }
2655
2656 /* TODO Preallocate data if requested */
2657
2658 bytes -= cur_bytes;
2659 offset += cur_bytes;
2660 }
2661
2662 /*
2663 * It is expected that the image file is large enough to actually contain
2664 * all of the allocated clusters (otherwise we get failing reads after
2665 * EOF). Extend the image to the last allocated sector.
2666 */
2667 if (host_offset != 0) {
2668 uint8_t data = 0;
2669 ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1,
2670 &data, 1);
2671 if (ret < 0) {
2672 return ret;
2673 }
2674 }
2675
2676 return 0;
2677 }
2678
2679 /* qcow2_refcount_metadata_size:
2680 * @clusters: number of clusters to refcount (including data and L1/L2 tables)
2681 * @cluster_size: size of a cluster, in bytes
2682 * @refcount_order: refcount bits power-of-2 exponent
2683 * @generous_increase: allow for the refcount table to be 1.5x as large as it
2684 * needs to be
2685 *
2686 * Returns: Number of bytes required for refcount blocks and table metadata.
2687 */
2688 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
2689 int refcount_order, bool generous_increase,
2690 uint64_t *refblock_count)
2691 {
2692 /*
2693 * Every host cluster is reference-counted, including metadata (even
2694 * refcount metadata is recursively included).
2695 *
2696 * An accurate formula for the size of refcount metadata size is difficult
2697 * to derive. An easier method of calculation is finding the fixed point
2698 * where no further refcount blocks or table clusters are required to
2699 * reference count every cluster.
2700 */
2701 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t);
2702 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
2703 int64_t table = 0; /* number of refcount table clusters */
2704 int64_t blocks = 0; /* number of refcount block clusters */
2705 int64_t last;
2706 int64_t n = 0;
2707
2708 do {
2709 last = n;
2710 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block);
2711 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster);
2712 n = clusters + blocks + table;
2713
2714 if (n == last && generous_increase) {
2715 clusters += DIV_ROUND_UP(table, 2);
2716 n = 0; /* force another loop */
2717 generous_increase = false;
2718 }
2719 } while (n != last);
2720
2721 if (refblock_count) {
2722 *refblock_count = blocks;
2723 }
2724
2725 return (blocks + table) * cluster_size;
2726 }
2727
2728 /**
2729 * qcow2_calc_prealloc_size:
2730 * @total_size: virtual disk size in bytes
2731 * @cluster_size: cluster size in bytes
2732 * @refcount_order: refcount bits power-of-2 exponent
2733 *
2734 * Returns: Total number of bytes required for the fully allocated image
2735 * (including metadata).
2736 */
2737 static int64_t qcow2_calc_prealloc_size(int64_t total_size,
2738 size_t cluster_size,
2739 int refcount_order)
2740 {
2741 int64_t meta_size = 0;
2742 uint64_t nl1e, nl2e;
2743 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size);
2744
2745 /* header: 1 cluster */
2746 meta_size += cluster_size;
2747
2748 /* total size of L2 tables */
2749 nl2e = aligned_total_size / cluster_size;
2750 nl2e = ROUND_UP(nl2e, cluster_size / sizeof(uint64_t));
2751 meta_size += nl2e * sizeof(uint64_t);
2752
2753 /* total size of L1 tables */
2754 nl1e = nl2e * sizeof(uint64_t) / cluster_size;
2755 nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t));
2756 meta_size += nl1e * sizeof(uint64_t);
2757
2758 /* total size of refcount table and blocks */
2759 meta_size += qcow2_refcount_metadata_size(
2760 (meta_size + aligned_total_size) / cluster_size,
2761 cluster_size, refcount_order, false, NULL);
2762
2763 return meta_size + aligned_total_size;
2764 }
2765
2766 static bool validate_cluster_size(size_t cluster_size, Error **errp)
2767 {
2768 int cluster_bits = ctz32(cluster_size);
2769 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS ||
2770 (1 << cluster_bits) != cluster_size)
2771 {
2772 error_setg(errp, "Cluster size must be a power of two between %d and "
2773 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10));
2774 return false;
2775 }
2776 return true;
2777 }
2778
2779 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp)
2780 {
2781 size_t cluster_size;
2782
2783 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE,
2784 DEFAULT_CLUSTER_SIZE);
2785 if (!validate_cluster_size(cluster_size, errp)) {
2786 return 0;
2787 }
2788 return cluster_size;
2789 }
2790
2791 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp)
2792 {
2793 char *buf;
2794 int ret;
2795
2796 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL);
2797 if (!buf) {
2798 ret = 3; /* default */
2799 } else if (!strcmp(buf, "0.10")) {
2800 ret = 2;
2801 } else if (!strcmp(buf, "1.1")) {
2802 ret = 3;
2803 } else {
2804 error_setg(errp, "Invalid compatibility level: '%s'", buf);
2805 ret = -EINVAL;
2806 }
2807 g_free(buf);
2808 return ret;
2809 }
2810
2811 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version,
2812 Error **errp)
2813 {
2814 uint64_t refcount_bits;
2815
2816 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16);
2817 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) {
2818 error_setg(errp, "Refcount width must be a power of two and may not "
2819 "exceed 64 bits");
2820 return 0;
2821 }
2822
2823 if (version < 3 && refcount_bits != 16) {
2824 error_setg(errp, "Different refcount widths than 16 bits require "
2825 "compatibility level 1.1 or above (use compat=1.1 or "
2826 "greater)");
2827 return 0;
2828 }
2829
2830 return refcount_bits;
2831 }
2832
2833 static int coroutine_fn
2834 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
2835 {
2836 BlockdevCreateOptionsQcow2 *qcow2_opts;
2837 QDict *options;
2838
2839 /*
2840 * Open the image file and write a minimal qcow2 header.
2841 *
2842 * We keep things simple and start with a zero-sized image. We also
2843 * do without refcount blocks or a L1 table for now. We'll fix the
2844 * inconsistency later.
2845 *
2846 * We do need a refcount table because growing the refcount table means
2847 * allocating two new refcount blocks - the seconds of which would be at
2848 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
2849 * size for any qcow2 image.
2850 */
2851 BlockBackend *blk = NULL;
2852 BlockDriverState *bs = NULL;
2853 QCowHeader *header;
2854 size_t cluster_size;
2855 int version;
2856 int refcount_order;
2857 uint64_t* refcount_table;
2858 Error *local_err = NULL;
2859 int ret;
2860
2861 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2);
2862 qcow2_opts = &create_options->u.qcow2;
2863
2864 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp);
2865 if (bs == NULL) {
2866 return -EIO;
2867 }
2868
2869 /* Validate options and set default values */
2870 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) {
2871 error_setg(errp, "Image size must be a multiple of 512 bytes");
2872 ret = -EINVAL;
2873 goto out;
2874 }
2875
2876 if (qcow2_opts->has_version) {
2877 switch (qcow2_opts->version) {
2878 case BLOCKDEV_QCOW2_VERSION_V2:
2879 version = 2;
2880 break;
2881 case BLOCKDEV_QCOW2_VERSION_V3:
2882 version = 3;
2883 break;
2884 default:
2885 g_assert_not_reached();
2886 }
2887 } else {
2888 version = 3;
2889 }
2890
2891 if (qcow2_opts->has_cluster_size) {
2892 cluster_size = qcow2_opts->cluster_size;
2893 } else {
2894 cluster_size = DEFAULT_CLUSTER_SIZE;
2895 }
2896
2897 if (!validate_cluster_size(cluster_size, errp)) {
2898 ret = -EINVAL;
2899 goto out;
2900 }
2901
2902 if (!qcow2_opts->has_preallocation) {
2903 qcow2_opts->preallocation = PREALLOC_MODE_OFF;
2904 }
2905 if (qcow2_opts->has_backing_file &&
2906 qcow2_opts->preallocation != PREALLOC_MODE_OFF)
2907 {
2908 error_setg(errp, "Backing file and preallocation cannot be used at "
2909 "the same time");
2910 ret = -EINVAL;
2911 goto out;
2912 }
2913 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) {
2914 error_setg(errp, "Backing format cannot be used without backing file");
2915 ret = -EINVAL;
2916 goto out;
2917 }
2918
2919 if (!qcow2_opts->has_lazy_refcounts) {
2920 qcow2_opts->lazy_refcounts = false;
2921 }
2922 if (version < 3 && qcow2_opts->lazy_refcounts) {
2923 error_setg(errp, "Lazy refcounts only supported with compatibility "
2924 "level 1.1 and above (use version=v3 or greater)");
2925 ret = -EINVAL;
2926 goto out;
2927 }
2928
2929 if (!qcow2_opts->has_refcount_bits) {
2930 qcow2_opts->refcount_bits = 16;
2931 }
2932 if (qcow2_opts->refcount_bits > 64 ||
2933 !is_power_of_2(qcow2_opts->refcount_bits))
2934 {
2935 error_setg(errp, "Refcount width must be a power of two and may not "
2936 "exceed 64 bits");
2937 ret = -EINVAL;
2938 goto out;
2939 }
2940 if (version < 3 && qcow2_opts->refcount_bits != 16) {
2941 error_setg(errp, "Different refcount widths than 16 bits require "
2942 "compatibility level 1.1 or above (use version=v3 or "
2943 "greater)");
2944 ret = -EINVAL;
2945 goto out;
2946 }
2947 refcount_order = ctz32(qcow2_opts->refcount_bits);
2948
2949
2950 /* Create BlockBackend to write to the image */
2951 blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
2952 ret = blk_insert_bs(blk, bs, errp);
2953 if (ret < 0) {
2954 goto out;
2955 }
2956 blk_set_allow_write_beyond_eof(blk, true);
2957
2958 /* Clear the protocol layer and preallocate it if necessary */
2959 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
2960 if (ret < 0) {
2961 goto out;
2962 }
2963
2964 /* Write the header */
2965 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header));
2966 header = g_malloc0(cluster_size);
2967 *header = (QCowHeader) {
2968 .magic = cpu_to_be32(QCOW_MAGIC),
2969 .version = cpu_to_be32(version),
2970 .cluster_bits = cpu_to_be32(ctz32(cluster_size)),
2971 .size = cpu_to_be64(0),
2972 .l1_table_offset = cpu_to_be64(0),
2973 .l1_size = cpu_to_be32(0),
2974 .refcount_table_offset = cpu_to_be64(cluster_size),
2975 .refcount_table_clusters = cpu_to_be32(1),
2976 .refcount_order = cpu_to_be32(refcount_order),
2977 .header_length = cpu_to_be32(sizeof(*header)),
2978 };
2979
2980 /* We'll update this to correct value later */
2981 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
2982
2983 if (qcow2_opts->lazy_refcounts) {
2984 header->compatible_features |=
2985 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
2986 }
2987
2988 ret = blk_pwrite(blk, 0, header, cluster_size, 0);
2989 g_free(header);
2990 if (ret < 0) {
2991 error_setg_errno(errp, -ret, "Could not write qcow2 header");
2992 goto out;
2993 }
2994
2995 /* Write a refcount table with one refcount block */
2996 refcount_table = g_malloc0(2 * cluster_size);
2997 refcount_table[0] = cpu_to_be64(2 * cluster_size);
2998 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0);
2999 g_free(refcount_table);
3000
3001 if (ret < 0) {
3002 error_setg_errno(errp, -ret, "Could not write refcount table");
3003 goto out;
3004 }
3005
3006 blk_unref(blk);
3007 blk = NULL;
3008
3009 /*
3010 * And now open the image and make it consistent first (i.e. increase the
3011 * refcount of the cluster that is occupied by the header and the refcount
3012 * table)
3013 */
3014 options = qdict_new();
3015 qdict_put_str(options, "driver", "qcow2");
3016 qdict_put_str(options, "file", bs->node_name);
3017 blk = blk_new_open(NULL, NULL, options,
3018 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH,
3019 &local_err);
3020 if (blk == NULL) {
3021 error_propagate(errp, local_err);
3022 ret = -EIO;
3023 goto out;
3024 }
3025
3026 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size);
3027 if (ret < 0) {
3028 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
3029 "header and refcount table");
3030 goto out;
3031
3032 } else if (ret != 0) {
3033 error_report("Huh, first cluster in empty image is already in use?");
3034 abort();
3035 }
3036
3037 /* Create a full header (including things like feature table) */
3038 ret = qcow2_update_header(blk_bs(blk));
3039 if (ret < 0) {
3040 error_setg_errno(errp, -ret, "Could not update qcow2 header");
3041 goto out;
3042 }
3043
3044 /* Okay, now that we have a valid image, let's give it the right size */
3045 ret = blk_truncate(blk, qcow2_opts->size, qcow2_opts->preallocation, errp);
3046 if (ret < 0) {
3047 error_prepend(errp, "Could not resize image: ");
3048 goto out;
3049 }
3050
3051 /* Want a backing file? There you go.*/
3052 if (qcow2_opts->has_backing_file) {
3053 const char *backing_format = NULL;
3054
3055 if (qcow2_opts->has_backing_fmt) {
3056 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt);
3057 }
3058
3059 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file,
3060 backing_format);
3061 if (ret < 0) {
3062 error_setg_errno(errp, -ret, "Could not assign backing file '%s' "
3063 "with format '%s'", qcow2_opts->backing_file,
3064 backing_format);
3065 goto out;
3066 }
3067 }
3068
3069 /* Want encryption? There you go. */
3070 if (qcow2_opts->has_encrypt) {
3071 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp);
3072 if (ret < 0) {
3073 goto out;
3074 }
3075 }
3076
3077 blk_unref(blk);
3078 blk = NULL;
3079
3080 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning.
3081 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to
3082 * have to setup decryption context. We're not doing any I/O on the top
3083 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does
3084 * not have effect.
3085 */
3086 options = qdict_new();
3087 qdict_put_str(options, "driver", "qcow2");
3088 qdict_put_str(options, "file", bs->node_name);
3089 blk = blk_new_open(NULL, NULL, options,
3090 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO,
3091 &local_err);
3092 if (blk == NULL) {
3093 error_propagate(errp, local_err);
3094 ret = -EIO;
3095 goto out;
3096 }
3097
3098 ret = 0;
3099 out:
3100 blk_unref(blk);
3101 bdrv_unref(bs);
3102 return ret;
3103 }
3104
3105 static int coroutine_fn qcow2_co_create_opts(const char *filename, QemuOpts *opts,
3106 Error **errp)
3107 {
3108 BlockdevCreateOptions *create_options = NULL;
3109 QDict *qdict;
3110 Visitor *v;
3111 BlockDriverState *bs = NULL;
3112 Error *local_err = NULL;
3113 const char *val;
3114 int ret;
3115
3116 /* Only the keyval visitor supports the dotted syntax needed for
3117 * encryption, so go through a QDict before getting a QAPI type. Ignore
3118 * options meant for the protocol layer so that the visitor doesn't
3119 * complain. */
3120 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts,
3121 true);
3122
3123 /* Handle encryption options */
3124 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT);
3125 if (val && !strcmp(val, "on")) {
3126 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow");
3127 } else if (val && !strcmp(val, "off")) {
3128 qdict_del(qdict, BLOCK_OPT_ENCRYPT);
3129 }
3130
3131 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT);
3132 if (val && !strcmp(val, "aes")) {
3133 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow");
3134 }
3135
3136 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into
3137 * version=v2/v3 below. */
3138 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL);
3139 if (val && !strcmp(val, "0.10")) {
3140 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2");
3141 } else if (val && !strcmp(val, "1.1")) {
3142 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3");
3143 }
3144
3145 /* Change legacy command line options into QMP ones */
3146 static const QDictRenames opt_renames[] = {
3147 { BLOCK_OPT_BACKING_FILE, "backing-file" },
3148 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
3149 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
3150 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" },
3151 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" },
3152 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT },
3153 { BLOCK_OPT_COMPAT_LEVEL, "version" },
3154 { NULL, NULL },
3155 };
3156
3157 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
3158 ret = -EINVAL;
3159 goto finish;
3160 }
3161
3162 /* Create and open the file (protocol layer) */
3163 ret = bdrv_create_file(filename, opts, errp);
3164 if (ret < 0) {
3165 goto finish;
3166 }
3167
3168 bs = bdrv_open(filename, NULL, NULL,
3169 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
3170 if (bs == NULL) {
3171 ret = -EIO;
3172 goto finish;
3173 }
3174
3175 /* Set 'driver' and 'node' options */
3176 qdict_put_str(qdict, "driver", "qcow2");
3177 qdict_put_str(qdict, "file", bs->node_name);
3178
3179 /* Now get the QAPI type BlockdevCreateOptions */
3180 v = qobject_input_visitor_new_flat_confused(qdict, errp);
3181 if (!v) {
3182 ret = -EINVAL;
3183 goto finish;
3184 }
3185
3186 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
3187 visit_free(v);
3188
3189 if (local_err) {
3190 error_propagate(errp, local_err);
3191 ret = -EINVAL;
3192 goto finish;
3193 }
3194
3195 /* Silently round up size */
3196 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size,
3197 BDRV_SECTOR_SIZE);
3198
3199 /* Create the qcow2 image (format layer) */
3200 ret = qcow2_co_create(create_options, errp);
3201 if (ret < 0) {
3202 goto finish;
3203 }
3204
3205 ret = 0;
3206 finish:
3207 qobject_unref(qdict);
3208 bdrv_unref(bs);
3209 qapi_free_BlockdevCreateOptions(create_options);
3210 return ret;
3211 }
3212
3213
3214 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
3215 {
3216 int64_t nr;
3217 int res;
3218
3219 /* Clamp to image length, before checking status of underlying sectors */
3220 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
3221 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset;
3222 }
3223
3224 if (!bytes) {
3225 return true;
3226 }
3227 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
3228 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes;
3229 }
3230
3231 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
3232 int64_t offset, int bytes, BdrvRequestFlags flags)
3233 {
3234 int ret;
3235 BDRVQcow2State *s = bs->opaque;
3236
3237 uint32_t head = offset % s->cluster_size;
3238 uint32_t tail = (offset + bytes) % s->cluster_size;
3239
3240 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes);
3241 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) {
3242 tail = 0;
3243 }
3244
3245 if (head || tail) {
3246 uint64_t off;
3247 unsigned int nr;
3248
3249 assert(head + bytes <= s->cluster_size);
3250
3251 /* check whether remainder of cluster already reads as zero */
3252 if (!(is_zero(bs, offset - head, head) &&
3253 is_zero(bs, offset + bytes,
3254 tail ? s->cluster_size - tail : 0))) {
3255 return -ENOTSUP;
3256 }
3257
3258 qemu_co_mutex_lock(&s->lock);
3259 /* We can have new write after previous check */
3260 offset = QEMU_ALIGN_DOWN(offset, s->cluster_size);
3261 bytes = s->cluster_size;
3262 nr = s->cluster_size;
3263 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off);
3264 if (ret != QCOW2_CLUSTER_UNALLOCATED &&
3265 ret != QCOW2_CLUSTER_ZERO_PLAIN &&
3266 ret != QCOW2_CLUSTER_ZERO_ALLOC) {
3267 qemu_co_mutex_unlock(&s->lock);
3268 return -ENOTSUP;
3269 }
3270 } else {
3271 qemu_co_mutex_lock(&s->lock);
3272 }
3273
3274 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes);
3275
3276 /* Whatever is left can use real zero clusters */
3277 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags);
3278 qemu_co_mutex_unlock(&s->lock);
3279
3280 return ret;
3281 }
3282
3283 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
3284 int64_t offset, int bytes)
3285 {
3286 int ret;
3287 BDRVQcow2State *s = bs->opaque;
3288
3289 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) {
3290 assert(bytes < s->cluster_size);
3291 /* Ignore partial clusters, except for the special case of the
3292 * complete partial cluster at the end of an unaligned file */
3293 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) ||
3294 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) {
3295 return -ENOTSUP;
3296 }
3297 }
3298
3299 qemu_co_mutex_lock(&s->lock);
3300 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST,
3301 false);
3302 qemu_co_mutex_unlock(&s->lock);
3303 return ret;
3304 }
3305
3306 static int coroutine_fn
3307 qcow2_co_copy_range_from(BlockDriverState *bs,
3308 BdrvChild *src, uint64_t src_offset,
3309 BdrvChild *dst, uint64_t dst_offset,
3310 uint64_t bytes, BdrvRequestFlags read_flags,
3311 BdrvRequestFlags write_flags)
3312 {
3313 BDRVQcow2State *s = bs->opaque;
3314 int ret;
3315 unsigned int cur_bytes; /* number of bytes in current iteration */
3316 BdrvChild *child = NULL;
3317 BdrvRequestFlags cur_write_flags;
3318
3319 assert(!bs->encrypted);
3320 qemu_co_mutex_lock(&s->lock);
3321
3322 while (bytes != 0) {
3323 uint64_t copy_offset = 0;
3324 /* prepare next request */
3325 cur_bytes = MIN(bytes, INT_MAX);
3326 cur_write_flags = write_flags;
3327
3328 ret = qcow2_get_cluster_offset(bs, src_offset, &cur_bytes, &copy_offset);
3329 if (ret < 0) {
3330 goto out;
3331 }
3332
3333 switch (ret) {
3334 case QCOW2_CLUSTER_UNALLOCATED:
3335 if (bs->backing && bs->backing->bs) {
3336 int64_t backing_length = bdrv_getlength(bs->backing->bs);
3337 if (src_offset >= backing_length) {
3338 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
3339 } else {
3340 child = bs->backing;
3341 cur_bytes = MIN(cur_bytes, backing_length - src_offset);
3342 copy_offset = src_offset;
3343 }
3344 } else {
3345 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
3346 }
3347 break;
3348
3349 case QCOW2_CLUSTER_ZERO_PLAIN:
3350 case QCOW2_CLUSTER_ZERO_ALLOC:
3351 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
3352 break;
3353
3354 case QCOW2_CLUSTER_COMPRESSED:
3355 ret = -ENOTSUP;
3356 goto out;
3357
3358 case QCOW2_CLUSTER_NORMAL:
3359 child = bs->file;
3360 copy_offset += offset_into_cluster(s, src_offset);
3361 if ((copy_offset & 511) != 0) {
3362 ret = -EIO;
3363 goto out;
3364 }
3365 break;
3366
3367 default:
3368 abort();
3369 }
3370 qemu_co_mutex_unlock(&s->lock);
3371 ret = bdrv_co_copy_range_from(child,
3372 copy_offset,
3373 dst, dst_offset,
3374 cur_bytes, read_flags, cur_write_flags);
3375 qemu_co_mutex_lock(&s->lock);
3376 if (ret < 0) {
3377 goto out;
3378 }
3379
3380 bytes -= cur_bytes;
3381 src_offset += cur_bytes;
3382 dst_offset += cur_bytes;
3383 }
3384 ret = 0;
3385
3386 out:
3387 qemu_co_mutex_unlock(&s->lock);
3388 return ret;
3389 }
3390
3391 static int coroutine_fn
3392 qcow2_co_copy_range_to(BlockDriverState *bs,
3393 BdrvChild *src, uint64_t src_offset,
3394 BdrvChild *dst, uint64_t dst_offset,
3395 uint64_t bytes, BdrvRequestFlags read_flags,
3396 BdrvRequestFlags write_flags)
3397 {
3398 BDRVQcow2State *s = bs->opaque;
3399 int offset_in_cluster;
3400 int ret;
3401 unsigned int cur_bytes; /* number of sectors in current iteration */
3402 uint64_t cluster_offset;
3403 QCowL2Meta *l2meta = NULL;
3404
3405 assert(!bs->encrypted);
3406
3407 qemu_co_mutex_lock(&s->lock);
3408
3409 while (bytes != 0) {
3410
3411 l2meta = NULL;
3412
3413 offset_in_cluster = offset_into_cluster(s, dst_offset);
3414 cur_bytes = MIN(bytes, INT_MAX);
3415
3416 /* TODO:
3417 * If src->bs == dst->bs, we could simply copy by incrementing
3418 * the refcnt, without copying user data.
3419 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
3420 ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes,
3421 &cluster_offset, &l2meta);
3422 if (ret < 0) {
3423 goto fail;
3424 }
3425
3426 assert((cluster_offset & 511) == 0);
3427
3428 ret = qcow2_pre_write_overlap_check(bs, 0,
3429 cluster_offset + offset_in_cluster, cur_bytes);
3430 if (ret < 0) {
3431 goto fail;
3432 }
3433
3434 qemu_co_mutex_unlock(&s->lock);
3435 ret = bdrv_co_copy_range_to(src, src_offset,
3436 bs->file,
3437 cluster_offset + offset_in_cluster,
3438 cur_bytes, read_flags, write_flags);
3439 qemu_co_mutex_lock(&s->lock);
3440 if (ret < 0) {
3441 goto fail;
3442 }
3443
3444 ret = qcow2_handle_l2meta(bs, &l2meta, true);
3445 if (ret) {
3446 goto fail;
3447 }
3448
3449 bytes -= cur_bytes;
3450 src_offset += cur_bytes;
3451 dst_offset += cur_bytes;
3452 }
3453 ret = 0;
3454
3455 fail:
3456 qcow2_handle_l2meta(bs, &l2meta, false);
3457
3458 qemu_co_mutex_unlock(&s->lock);
3459
3460 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
3461
3462 return ret;
3463 }
3464
3465 static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
3466 PreallocMode prealloc, Error **errp)
3467 {
3468 BDRVQcow2State *s = bs->opaque;
3469 uint64_t old_length;
3470 int64_t new_l1_size;
3471 int ret;
3472 QDict *options;
3473
3474 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA &&
3475 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL)
3476 {
3477 error_setg(errp, "Unsupported preallocation mode '%s'",
3478 PreallocMode_str(prealloc));
3479 return -ENOTSUP;
3480 }
3481
3482 if (offset & 511) {
3483 error_setg(errp, "The new size must be a multiple of 512");
3484 return -EINVAL;
3485 }
3486
3487 qemu_co_mutex_lock(&s->lock);
3488
3489 /* cannot proceed if image has snapshots */
3490 if (s->nb_snapshots) {
3491 error_setg(errp, "Can't resize an image which has snapshots");
3492 ret = -ENOTSUP;
3493 goto fail;
3494 }
3495
3496 /* cannot proceed if image has bitmaps */
3497 if (s->nb_bitmaps) {
3498 /* TODO: resize bitmaps in the image */
3499 error_setg(errp, "Can't resize an image which has bitmaps");
3500 ret = -ENOTSUP;
3501 goto fail;
3502 }
3503
3504 old_length = bs->total_sectors * BDRV_SECTOR_SIZE;
3505 new_l1_size = size_to_l1(s, offset);
3506
3507 if (offset < old_length) {
3508 int64_t last_cluster, old_file_size;
3509 if (prealloc != PREALLOC_MODE_OFF) {
3510 error_setg(errp,
3511 "Preallocation can't be used for shrinking an image");
3512 ret = -EINVAL;
3513 goto fail;
3514 }
3515
3516 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size),
3517 old_length - ROUND_UP(offset,
3518 s->cluster_size),
3519 QCOW2_DISCARD_ALWAYS, true);
3520 if (ret < 0) {
3521 error_setg_errno(errp, -ret, "Failed to discard cropped clusters");
3522 goto fail;
3523 }
3524
3525 ret = qcow2_shrink_l1_table(bs, new_l1_size);
3526 if (ret < 0) {
3527 error_setg_errno(errp, -ret,
3528 "Failed to reduce the number of L2 tables");
3529 goto fail;
3530 }
3531
3532 ret = qcow2_shrink_reftable(bs);
3533 if (ret < 0) {
3534 error_setg_errno(errp, -ret,
3535 "Failed to discard unused refblocks");
3536 goto fail;
3537 }
3538
3539 old_file_size = bdrv_getlength(bs->file->bs);
3540 if (old_file_size < 0) {
3541 error_setg_errno(errp, -old_file_size,
3542 "Failed to inquire current file length");
3543 ret = old_file_size;
3544 goto fail;
3545 }
3546 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
3547 if (last_cluster < 0) {
3548 error_setg_errno(errp, -last_cluster,
3549 "Failed to find the last cluster");
3550 ret = last_cluster;
3551 goto fail;
3552 }
3553 if ((last_cluster + 1) * s->cluster_size < old_file_size) {
3554 Error *local_err = NULL;
3555
3556 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size,
3557 PREALLOC_MODE_OFF, &local_err);
3558 if (local_err) {
3559 warn_reportf_err(local_err,
3560 "Failed to truncate the tail of the image: ");
3561 }
3562 }
3563 } else {
3564 ret = qcow2_grow_l1_table(bs, new_l1_size, true);
3565 if (ret < 0) {
3566 error_setg_errno(errp, -ret, "Failed to grow the L1 table");
3567 goto fail;
3568 }
3569 }
3570
3571 switch (prealloc) {
3572 case PREALLOC_MODE_OFF:
3573 break;
3574
3575 case PREALLOC_MODE_METADATA:
3576 ret = preallocate_co(bs, old_length, offset);
3577 if (ret < 0) {
3578 error_setg_errno(errp, -ret, "Preallocation failed");
3579 goto fail;
3580 }
3581 break;
3582
3583 case PREALLOC_MODE_FALLOC:
3584 case PREALLOC_MODE_FULL:
3585 {
3586 int64_t allocation_start, host_offset, guest_offset;
3587 int64_t clusters_allocated;
3588 int64_t old_file_size, new_file_size;
3589 uint64_t nb_new_data_clusters, nb_new_l2_tables;
3590
3591 old_file_size = bdrv_getlength(bs->file->bs);
3592 if (old_file_size < 0) {
3593 error_setg_errno(errp, -old_file_size,
3594 "Failed to inquire current file length");
3595 ret = old_file_size;
3596 goto fail;
3597 }
3598 old_file_size = ROUND_UP(old_file_size, s->cluster_size);
3599
3600 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length,
3601 s->cluster_size);
3602
3603 /* This is an overestimation; we will not actually allocate space for
3604 * these in the file but just make sure the new refcount structures are
3605 * able to cover them so we will not have to allocate new refblocks
3606 * while entering the data blocks in the potentially new L2 tables.
3607 * (We do not actually care where the L2 tables are placed. Maybe they
3608 * are already allocated or they can be placed somewhere before
3609 * @old_file_size. It does not matter because they will be fully
3610 * allocated automatically, so they do not need to be covered by the
3611 * preallocation. All that matters is that we will not have to allocate
3612 * new refcount structures for them.) */
3613 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters,
3614 s->cluster_size / sizeof(uint64_t));
3615 /* The cluster range may not be aligned to L2 boundaries, so add one L2
3616 * table for a potential head/tail */
3617 nb_new_l2_tables++;
3618
3619 allocation_start = qcow2_refcount_area(bs, old_file_size,
3620 nb_new_data_clusters +
3621 nb_new_l2_tables,
3622 true, 0, 0);
3623 if (allocation_start < 0) {
3624 error_setg_errno(errp, -allocation_start,
3625 "Failed to resize refcount structures");
3626 ret = allocation_start;
3627 goto fail;
3628 }
3629
3630 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start,
3631 nb_new_data_clusters);
3632 if (clusters_allocated < 0) {
3633 error_setg_errno(errp, -clusters_allocated,
3634 "Failed to allocate data clusters");
3635 ret = clusters_allocated;
3636 goto fail;
3637 }
3638
3639 assert(clusters_allocated == nb_new_data_clusters);
3640
3641 /* Allocate the data area */
3642 new_file_size = allocation_start +
3643 nb_new_data_clusters * s->cluster_size;
3644 ret = bdrv_co_truncate(bs->file, new_file_size, prealloc, errp);
3645 if (ret < 0) {
3646 error_prepend(errp, "Failed to resize underlying file: ");
3647 qcow2_free_clusters(bs, allocation_start,
3648 nb_new_data_clusters * s->cluster_size,
3649 QCOW2_DISCARD_OTHER);
3650 goto fail;
3651 }
3652
3653 /* Create the necessary L2 entries */
3654 host_offset = allocation_start;
3655 guest_offset = old_length;
3656 while (nb_new_data_clusters) {
3657 int64_t nb_clusters = MIN(
3658 nb_new_data_clusters,
3659 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset));
3660 QCowL2Meta allocation = {
3661 .offset = guest_offset,
3662 .alloc_offset = host_offset,
3663 .nb_clusters = nb_clusters,
3664 };
3665 qemu_co_queue_init(&allocation.dependent_requests);
3666
3667 ret = qcow2_alloc_cluster_link_l2(bs, &allocation);
3668 if (ret < 0) {
3669 error_setg_errno(errp, -ret, "Failed to update L2 tables");
3670 qcow2_free_clusters(bs, host_offset,
3671 nb_new_data_clusters * s->cluster_size,
3672 QCOW2_DISCARD_OTHER);
3673 goto fail;
3674 }
3675
3676 guest_offset += nb_clusters * s->cluster_size;
3677 host_offset += nb_clusters * s->cluster_size;
3678 nb_new_data_clusters -= nb_clusters;
3679 }
3680 break;
3681 }
3682
3683 default:
3684 g_assert_not_reached();
3685 }
3686
3687 if (prealloc != PREALLOC_MODE_OFF) {
3688 /* Flush metadata before actually changing the image size */
3689 ret = qcow2_write_caches(bs);
3690 if (ret < 0) {
3691 error_setg_errno(errp, -ret,
3692 "Failed to flush the preallocated area to disk");
3693 goto fail;
3694 }
3695 }
3696
3697 bs->total_sectors = offset / BDRV_SECTOR_SIZE;
3698
3699 /* write updated header.size */
3700 offset = cpu_to_be64(offset);
3701 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
3702 &offset, sizeof(uint64_t));
3703 if (ret < 0) {
3704 error_setg_errno(errp, -ret, "Failed to update the image size");
3705 goto fail;
3706 }
3707
3708 s->l1_vm_state_index = new_l1_size;
3709
3710 /* Update cache sizes */
3711 options = qdict_clone_shallow(bs->options);
3712 ret = qcow2_update_options(bs, options, s->flags, errp);
3713 qobject_unref(options);
3714 if (ret < 0) {
3715 goto fail;
3716 }
3717 ret = 0;
3718 fail:
3719 qemu_co_mutex_unlock(&s->lock);
3720 return ret;
3721 }
3722
3723 /*
3724 * qcow2_compress()
3725 *
3726 * @dest - destination buffer, @dest_size bytes
3727 * @src - source buffer, @src_size bytes
3728 *
3729 * Returns: compressed size on success
3730 * -1 destination buffer is not enough to store compressed data
3731 * -2 on any other error
3732 */
3733 static ssize_t qcow2_compress(void *dest, size_t dest_size,
3734 const void *src, size_t src_size)
3735 {
3736 ssize_t ret;
3737 z_stream strm;
3738
3739 /* best compression, small window, no zlib header */
3740 memset(&strm, 0, sizeof(strm));
3741 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
3742 -12, 9, Z_DEFAULT_STRATEGY);
3743 if (ret != Z_OK) {
3744 return -2;
3745 }
3746
3747 /* strm.next_in is not const in old zlib versions, such as those used on
3748 * OpenBSD/NetBSD, so cast the const away */
3749 strm.avail_in = src_size;
3750 strm.next_in = (void *) src;
3751 strm.avail_out = dest_size;
3752 strm.next_out = dest;
3753
3754 ret = deflate(&strm, Z_FINISH);
3755 if (ret == Z_STREAM_END) {
3756 ret = dest_size - strm.avail_out;
3757 } else {
3758 ret = (ret == Z_OK ? -1 : -2);
3759 }
3760
3761 deflateEnd(&strm);
3762
3763 return ret;
3764 }
3765
3766 /*
3767 * qcow2_decompress()
3768 *
3769 * Decompress some data (not more than @src_size bytes) to produce exactly
3770 * @dest_size bytes.
3771 *
3772 * @dest - destination buffer, @dest_size bytes
3773 * @src - source buffer, @src_size bytes
3774 *
3775 * Returns: 0 on success
3776 * -1 on fail
3777 */
3778 static ssize_t qcow2_decompress(void *dest, size_t dest_size,
3779 const void *src, size_t src_size)
3780 {
3781 int ret = 0;
3782 z_stream strm;
3783
3784 memset(&strm, 0, sizeof(strm));
3785 strm.avail_in = src_size;
3786 strm.next_in = (void *) src;
3787 strm.avail_out = dest_size;
3788 strm.next_out = dest;
3789
3790 ret = inflateInit2(&strm, -12);
3791 if (ret != Z_OK) {
3792 return -1;
3793 }
3794
3795 ret = inflate(&strm, Z_FINISH);
3796 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) {
3797 /* We approve Z_BUF_ERROR because we need @dest buffer to be filled, but
3798 * @src buffer may be processed partly (because in qcow2 we know size of
3799 * compressed data with precision of one sector) */
3800 ret = -1;
3801 }
3802
3803 inflateEnd(&strm);
3804
3805 return ret;
3806 }
3807
3808 #define MAX_COMPRESS_THREADS 4
3809
3810 typedef ssize_t (*Qcow2CompressFunc)(void *dest, size_t dest_size,
3811 const void *src, size_t src_size);
3812 typedef struct Qcow2CompressData {
3813 void *dest;
3814 size_t dest_size;
3815 const void *src;
3816 size_t src_size;
3817 ssize_t ret;
3818
3819 Qcow2CompressFunc func;
3820 } Qcow2CompressData;
3821
3822 static int qcow2_compress_pool_func(void *opaque)
3823 {
3824 Qcow2CompressData *data = opaque;
3825
3826 data->ret = data->func(data->dest, data->dest_size,
3827 data->src, data->src_size);
3828
3829 return 0;
3830 }
3831
3832 static void qcow2_compress_complete(void *opaque, int ret)
3833 {
3834 qemu_coroutine_enter(opaque);
3835 }
3836
3837 static ssize_t coroutine_fn
3838 qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
3839 const void *src, size_t src_size, Qcow2CompressFunc func)
3840 {
3841 BDRVQcow2State *s = bs->opaque;
3842 BlockAIOCB *acb;
3843 ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
3844 Qcow2CompressData arg = {
3845 .dest = dest,
3846 .dest_size = dest_size,
3847 .src = src,
3848 .src_size = src_size,
3849 .func = func,
3850 };
3851
3852 while (s->nb_compress_threads >= MAX_COMPRESS_THREADS) {
3853 qemu_co_queue_wait(&s->compress_wait_queue, NULL);
3854 }
3855
3856 s->nb_compress_threads++;
3857 acb = thread_pool_submit_aio(pool, qcow2_compress_pool_func, &arg,
3858 qcow2_compress_complete,
3859 qemu_coroutine_self());
3860
3861 if (!acb) {
3862 s->nb_compress_threads--;
3863 return -EINVAL;
3864 }
3865 qemu_coroutine_yield();
3866 s->nb_compress_threads--;
3867 qemu_co_queue_next(&s->compress_wait_queue);
3868
3869 return arg.ret;
3870 }
3871
3872 static ssize_t coroutine_fn
3873 qcow2_co_compress(BlockDriverState *bs, void *dest, size_t dest_size,
3874 const void *src, size_t src_size)
3875 {
3876 return qcow2_co_do_compress(bs, dest, dest_size, src, src_size,
3877 qcow2_compress);
3878 }
3879
3880 static ssize_t coroutine_fn
3881 qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size,
3882 const void *src, size_t src_size)
3883 {
3884 return qcow2_co_do_compress(bs, dest, dest_size, src, src_size,
3885 qcow2_decompress);
3886 }
3887
3888 /* XXX: put compressed sectors first, then all the cluster aligned
3889 tables to avoid losing bytes in alignment */
3890 static coroutine_fn int
3891 qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
3892 uint64_t bytes, QEMUIOVector *qiov)
3893 {
3894 BDRVQcow2State *s = bs->opaque;
3895 QEMUIOVector hd_qiov;
3896 int ret;
3897 size_t out_len;
3898 uint8_t *buf, *out_buf;
3899 int64_t cluster_offset;
3900
3901 if (bytes == 0) {
3902 /* align end of file to a sector boundary to ease reading with
3903 sector based I/Os */
3904 cluster_offset = bdrv_getlength(bs->file->bs);
3905 if (cluster_offset < 0) {
3906 return cluster_offset;
3907 }
3908 return bdrv_co_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF,
3909 NULL);
3910 }
3911
3912 if (offset_into_cluster(s, offset)) {
3913 return -EINVAL;
3914 }
3915
3916 buf = qemu_blockalign(bs, s->cluster_size);
3917 if (bytes != s->cluster_size) {
3918 if (bytes > s->cluster_size ||
3919 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS)
3920 {
3921 qemu_vfree(buf);
3922 return -EINVAL;
3923 }
3924 /* Zero-pad last write if image size is not cluster aligned */
3925 memset(buf + bytes, 0, s->cluster_size - bytes);
3926 }
3927 qemu_iovec_to_buf(qiov, 0, buf, bytes);
3928
3929 out_buf = g_malloc(s->cluster_size);
3930
3931 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1,
3932 buf, s->cluster_size);
3933 if (out_len == -2) {
3934 ret = -EINVAL;
3935 goto fail;
3936 } else if (out_len == -1) {
3937 /* could not compress: write normal cluster */
3938 ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0);
3939 if (ret < 0) {
3940 goto fail;
3941 }
3942 goto success;
3943 }
3944
3945 qemu_co_mutex_lock(&s->lock);
3946 cluster_offset =
3947 qcow2_alloc_compressed_cluster_offset(bs, offset, out_len);
3948 if (!cluster_offset) {
3949 qemu_co_mutex_unlock(&s->lock);
3950 ret = -EIO;
3951 goto fail;
3952 }
3953 cluster_offset &= s->cluster_offset_mask;
3954
3955 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len);
3956 qemu_co_mutex_unlock(&s->lock);
3957 if (ret < 0) {
3958 goto fail;
3959 }
3960
3961 qemu_iovec_init_buf(&hd_qiov, out_buf, out_len);
3962
3963 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
3964 ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0);
3965 if (ret < 0) {
3966 goto fail;
3967 }
3968 success:
3969 ret = 0;
3970 fail:
3971 qemu_vfree(buf);
3972 g_free(out_buf);
3973 return ret;
3974 }
3975
3976 static int coroutine_fn
3977 qcow2_co_preadv_compressed(BlockDriverState *bs,
3978 uint64_t file_cluster_offset,
3979 uint64_t offset,
3980 uint64_t bytes,
3981 QEMUIOVector *qiov)
3982 {
3983 BDRVQcow2State *s = bs->opaque;
3984 int ret = 0, csize, nb_csectors;
3985 uint64_t coffset;
3986 uint8_t *buf, *out_buf;
3987 QEMUIOVector local_qiov;
3988 int offset_in_cluster = offset_into_cluster(s, offset);
3989
3990 coffset = file_cluster_offset & s->cluster_offset_mask;
3991 nb_csectors = ((file_cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
3992 csize = nb_csectors * 512 - (coffset & 511);
3993
3994 buf = g_try_malloc(csize);
3995 if (!buf) {
3996 return -ENOMEM;
3997 }
3998 qemu_iovec_init_buf(&local_qiov, buf, csize);
3999
4000 out_buf = qemu_blockalign(bs, s->cluster_size);
4001
4002 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
4003 ret = bdrv_co_preadv(bs->file, coffset, csize, &local_qiov, 0);
4004 if (ret < 0) {
4005 goto fail;
4006 }
4007
4008 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) {
4009 ret = -EIO;
4010 goto fail;
4011 }
4012
4013 qemu_iovec_from_buf(qiov, 0, out_buf + offset_in_cluster, bytes);
4014
4015 fail:
4016 qemu_vfree(out_buf);
4017 g_free(buf);
4018
4019 return ret;
4020 }
4021
4022 static int make_completely_empty(BlockDriverState *bs)
4023 {
4024 BDRVQcow2State *s = bs->opaque;
4025 Error *local_err = NULL;
4026 int ret, l1_clusters;
4027 int64_t offset;
4028 uint64_t *new_reftable = NULL;
4029 uint64_t rt_entry, l1_size2;
4030 struct {
4031 uint64_t l1_offset;
4032 uint64_t reftable_offset;
4033 uint32_t reftable_clusters;
4034 } QEMU_PACKED l1_ofs_rt_ofs_cls;
4035
4036 ret = qcow2_cache_empty(bs, s->l2_table_cache);
4037 if (ret < 0) {
4038 goto fail;
4039 }
4040
4041 ret = qcow2_cache_empty(bs, s->refcount_block_cache);
4042 if (ret < 0) {
4043 goto fail;
4044 }
4045
4046 /* Refcounts will be broken utterly */
4047 ret = qcow2_mark_dirty(bs);
4048 if (ret < 0) {
4049 goto fail;
4050 }
4051
4052 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4053
4054 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t));
4055 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t);
4056
4057 /* After this call, neither the in-memory nor the on-disk refcount
4058 * information accurately describe the actual references */
4059
4060 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset,
4061 l1_clusters * s->cluster_size, 0);
4062 if (ret < 0) {
4063 goto fail_broken_refcounts;
4064 }
4065 memset(s->l1_table, 0, l1_size2);
4066
4067 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE);
4068
4069 /* Overwrite enough clusters at the beginning of the sectors to place
4070 * the refcount table, a refcount block and the L1 table in; this may
4071 * overwrite parts of the existing refcount and L1 table, which is not
4072 * an issue because the dirty flag is set, complete data loss is in fact
4073 * desired and partial data loss is consequently fine as well */
4074 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size,
4075 (2 + l1_clusters) * s->cluster_size, 0);
4076 /* This call (even if it failed overall) may have overwritten on-disk
4077 * refcount structures; in that case, the in-memory refcount information
4078 * will probably differ from the on-disk information which makes the BDS
4079 * unusable */
4080 if (ret < 0) {
4081 goto fail_broken_refcounts;
4082 }
4083
4084 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4085 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE);
4086
4087 /* "Create" an empty reftable (one cluster) directly after the image
4088 * header and an empty L1 table three clusters after the image header;
4089 * the cluster between those two will be used as the first refblock */
4090 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size);
4091 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size);
4092 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1);
4093 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset),
4094 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls));
4095 if (ret < 0) {
4096 goto fail_broken_refcounts;
4097 }
4098
4099 s->l1_table_offset = 3 * s->cluster_size;
4100
4101 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t));
4102 if (!new_reftable) {
4103 ret = -ENOMEM;
4104 goto fail_broken_refcounts;
4105 }
4106
4107 s->refcount_table_offset = s->cluster_size;
4108 s->refcount_table_size = s->cluster_size / sizeof(uint64_t);
4109 s->max_refcount_table_index = 0;
4110
4111 g_free(s->refcount_table);
4112 s->refcount_table = new_reftable;
4113 new_reftable = NULL;
4114
4115 /* Now the in-memory refcount information again corresponds to the on-disk
4116 * information (reftable is empty and no refblocks (the refblock cache is
4117 * empty)); however, this means some clusters (e.g. the image header) are
4118 * referenced, but not refcounted, but the normal qcow2 code assumes that
4119 * the in-memory information is always correct */
4120
4121 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
4122
4123 /* Enter the first refblock into the reftable */
4124 rt_entry = cpu_to_be64(2 * s->cluster_size);
4125 ret = bdrv_pwrite_sync(bs->file, s->cluster_size,
4126 &rt_entry, sizeof(rt_entry));
4127 if (ret < 0) {
4128 goto fail_broken_refcounts;
4129 }
4130 s->refcount_table[0] = 2 * s->cluster_size;
4131
4132 s->free_cluster_index = 0;
4133 assert(3 + l1_clusters <= s->refcount_block_size);
4134 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2);
4135 if (offset < 0) {
4136 ret = offset;
4137 goto fail_broken_refcounts;
4138 } else if (offset > 0) {
4139 error_report("First cluster in emptied image is in use");
4140 abort();
4141 }
4142
4143 /* Now finally the in-memory information corresponds to the on-disk
4144 * structures and is correct */
4145 ret = qcow2_mark_clean(bs);
4146 if (ret < 0) {
4147 goto fail;
4148 }
4149
4150 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size,
4151 PREALLOC_MODE_OFF, &local_err);
4152 if (ret < 0) {
4153 error_report_err(local_err);
4154 goto fail;
4155 }
4156
4157 return 0;
4158
4159 fail_broken_refcounts:
4160 /* The BDS is unusable at this point. If we wanted to make it usable, we
4161 * would have to call qcow2_refcount_close(), qcow2_refcount_init(),
4162 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init()
4163 * again. However, because the functions which could have caused this error
4164 * path to be taken are used by those functions as well, it's very likely
4165 * that that sequence will fail as well. Therefore, just eject the BDS. */
4166 bs->drv = NULL;
4167
4168 fail:
4169 g_free(new_reftable);
4170 return ret;
4171 }
4172
4173 static int qcow2_make_empty(BlockDriverState *bs)
4174 {
4175 BDRVQcow2State *s = bs->opaque;
4176 uint64_t offset, end_offset;
4177 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size);
4178 int l1_clusters, ret = 0;
4179
4180 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t));
4181
4182 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps &&
4183 3 + l1_clusters <= s->refcount_block_size &&
4184 s->crypt_method_header != QCOW_CRYPT_LUKS) {
4185 /* The following function only works for qcow2 v3 images (it
4186 * requires the dirty flag) and only as long as there are no
4187 * features that reserve extra clusters (such as snapshots,
4188 * LUKS header, or persistent bitmaps), because it completely
4189 * empties the image. Furthermore, the L1 table and three
4190 * additional clusters (image header, refcount table, one
4191 * refcount block) have to fit inside one refcount block. */
4192 return make_completely_empty(bs);
4193 }
4194
4195 /* This fallback code simply discards every active cluster; this is slow,
4196 * but works in all cases */
4197 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE;
4198 for (offset = 0; offset < end_offset; offset += step) {
4199 /* As this function is generally used after committing an external
4200 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the
4201 * default action for this kind of discard is to pass the discard,
4202 * which will ideally result in an actually smaller image file, as
4203 * is probably desired. */
4204 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset),
4205 QCOW2_DISCARD_SNAPSHOT, true);
4206 if (ret < 0) {
4207 break;
4208 }
4209 }
4210
4211 return ret;
4212 }
4213
4214 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
4215 {
4216 BDRVQcow2State *s = bs->opaque;
4217 int ret;
4218
4219 qemu_co_mutex_lock(&s->lock);
4220 ret = qcow2_write_caches(bs);
4221 qemu_co_mutex_unlock(&s->lock);
4222
4223 return ret;
4224 }
4225
4226 static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block,
4227 size_t headerlen, void *opaque, Error **errp)
4228 {
4229 size_t *headerlenp = opaque;
4230
4231 /* Stash away the payload size */
4232 *headerlenp = headerlen;
4233 return 0;
4234 }
4235
4236 static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block,
4237 size_t offset, const uint8_t *buf, size_t buflen,
4238 void *opaque, Error **errp)
4239 {
4240 /* Discard the bytes, we're not actually writing to an image */
4241 return buflen;
4242 }
4243
4244 /* Determine the number of bytes for the LUKS payload */
4245 static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len,
4246 Error **errp)
4247 {
4248 QDict *opts_qdict;
4249 QDict *cryptoopts_qdict;
4250 QCryptoBlockCreateOptions *cryptoopts;
4251 QCryptoBlock *crypto;
4252
4253 /* Extract "encrypt." options into a qdict */
4254 opts_qdict = qemu_opts_to_qdict(opts, NULL);
4255 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
4256 qobject_unref(opts_qdict);
4257
4258 /* Build QCryptoBlockCreateOptions object from qdict */
4259 qdict_put_str(cryptoopts_qdict, "format", "luks");
4260 cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp);
4261 qobject_unref(cryptoopts_qdict);
4262 if (!cryptoopts) {
4263 return false;
4264 }
4265
4266 /* Fake LUKS creation in order to determine the payload size */
4267 crypto = qcrypto_block_create(cryptoopts, "encrypt.",
4268 qcow2_measure_crypto_hdr_init_func,
4269 qcow2_measure_crypto_hdr_write_func,
4270 len, errp);
4271 qapi_free_QCryptoBlockCreateOptions(cryptoopts);
4272 if (!crypto) {
4273 return false;
4274 }
4275
4276 qcrypto_block_free(crypto);
4277 return true;
4278 }
4279
4280 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
4281 Error **errp)
4282 {
4283 Error *local_err = NULL;
4284 BlockMeasureInfo *info;
4285 uint64_t required = 0; /* bytes that contribute to required size */
4286 uint64_t virtual_size; /* disk size as seen by guest */
4287 uint64_t refcount_bits;
4288 uint64_t l2_tables;
4289 uint64_t luks_payload_size = 0;
4290 size_t cluster_size;
4291 int version;
4292 char *optstr;
4293 PreallocMode prealloc;
4294 bool has_backing_file;
4295 bool has_luks;
4296
4297 /* Parse image creation options */
4298 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err);
4299 if (local_err) {
4300 goto err;
4301 }
4302
4303 version = qcow2_opt_get_version_del(opts, &local_err);
4304 if (local_err) {
4305 goto err;
4306 }
4307
4308 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err);
4309 if (local_err) {
4310 goto err;
4311 }
4312
4313 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
4314 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr,
4315 PREALLOC_MODE_OFF, &local_err);
4316 g_free(optstr);
4317 if (local_err) {
4318 goto err;
4319 }
4320
4321 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
4322 has_backing_file = !!optstr;
4323 g_free(optstr);
4324
4325 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT);
4326 has_luks = optstr && strcmp(optstr, "luks") == 0;
4327 g_free(optstr);
4328
4329 if (has_luks) {
4330 size_t headerlen;
4331
4332 if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) {
4333 goto err;
4334 }
4335
4336 luks_payload_size = ROUND_UP(headerlen, cluster_size);
4337 }
4338
4339 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
4340 virtual_size = ROUND_UP(virtual_size, cluster_size);
4341
4342 /* Check that virtual disk size is valid */
4343 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size,
4344 cluster_size / sizeof(uint64_t));
4345 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) {
4346 error_setg(&local_err, "The image size is too large "
4347 "(try using a larger cluster size)");
4348 goto err;
4349 }
4350
4351 /* Account for input image */
4352 if (in_bs) {
4353 int64_t ssize = bdrv_getlength(in_bs);
4354 if (ssize < 0) {
4355 error_setg_errno(&local_err, -ssize,
4356 "Unable to get image virtual_size");
4357 goto err;
4358 }
4359
4360 virtual_size = ROUND_UP(ssize, cluster_size);
4361
4362 if (has_backing_file) {
4363 /* We don't how much of the backing chain is shared by the input
4364 * image and the new image file. In the worst case the new image's
4365 * backing file has nothing in common with the input image. Be
4366 * conservative and assume all clusters need to be written.
4367 */
4368 required = virtual_size;
4369 } else {
4370 int64_t offset;
4371 int64_t pnum = 0;
4372
4373 for (offset = 0; offset < ssize; offset += pnum) {
4374 int ret;
4375
4376 ret = bdrv_block_status_above(in_bs, NULL, offset,
4377 ssize - offset, &pnum, NULL,
4378 NULL);
4379 if (ret < 0) {
4380 error_setg_errno(&local_err, -ret,
4381 "Unable to get block status");
4382 goto err;
4383 }
4384
4385 if (ret & BDRV_BLOCK_ZERO) {
4386 /* Skip zero regions (safe with no backing file) */
4387 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) ==
4388 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) {
4389 /* Extend pnum to end of cluster for next iteration */
4390 pnum = ROUND_UP(offset + pnum, cluster_size) - offset;
4391
4392 /* Count clusters we've seen */
4393 required += offset % cluster_size + pnum;
4394 }
4395 }
4396 }
4397 }
4398
4399 /* Take into account preallocation. Nothing special is needed for
4400 * PREALLOC_MODE_METADATA since metadata is always counted.
4401 */
4402 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) {
4403 required = virtual_size;
4404 }
4405
4406 info = g_new(BlockMeasureInfo, 1);
4407 info->fully_allocated =
4408 qcow2_calc_prealloc_size(virtual_size, cluster_size,
4409 ctz32(refcount_bits)) + luks_payload_size;
4410
4411 /* Remove data clusters that are not required. This overestimates the
4412 * required size because metadata needed for the fully allocated file is
4413 * still counted.
4414 */
4415 info->required = info->fully_allocated - virtual_size + required;
4416 return info;
4417
4418 err:
4419 error_propagate(errp, local_err);
4420 return NULL;
4421 }
4422
4423 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4424 {
4425 BDRVQcow2State *s = bs->opaque;
4426 bdi->unallocated_blocks_are_zero = true;
4427 bdi->cluster_size = s->cluster_size;
4428 bdi->vm_state_offset = qcow2_vm_state_offset(s);
4429 return 0;
4430 }
4431
4432 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs,
4433 Error **errp)
4434 {
4435 BDRVQcow2State *s = bs->opaque;
4436 ImageInfoSpecific *spec_info;
4437 QCryptoBlockInfo *encrypt_info = NULL;
4438 Error *local_err = NULL;
4439
4440 if (s->crypto != NULL) {
4441 encrypt_info = qcrypto_block_get_info(s->crypto, &local_err);
4442 if (local_err) {
4443 error_propagate(errp, local_err);
4444 return NULL;
4445 }
4446 }
4447
4448 spec_info = g_new(ImageInfoSpecific, 1);
4449 *spec_info = (ImageInfoSpecific){
4450 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2,
4451 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1),
4452 };
4453 if (s->qcow_version == 2) {
4454 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
4455 .compat = g_strdup("0.10"),
4456 .refcount_bits = s->refcount_bits,
4457 };
4458 } else if (s->qcow_version == 3) {
4459 Qcow2BitmapInfoList *bitmaps;
4460 bitmaps = qcow2_get_bitmap_info_list(bs, &local_err);
4461 if (local_err) {
4462 error_propagate(errp, local_err);
4463 qapi_free_ImageInfoSpecific(spec_info);
4464 return NULL;
4465 }
4466 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
4467 .compat = g_strdup("1.1"),
4468 .lazy_refcounts = s->compatible_features &
4469 QCOW2_COMPAT_LAZY_REFCOUNTS,
4470 .has_lazy_refcounts = true,
4471 .corrupt = s->incompatible_features &
4472 QCOW2_INCOMPAT_CORRUPT,
4473 .has_corrupt = true,
4474 .refcount_bits = s->refcount_bits,
4475 .has_bitmaps = !!bitmaps,
4476 .bitmaps = bitmaps,
4477 };
4478 } else {
4479 /* if this assertion fails, this probably means a new version was
4480 * added without having it covered here */
4481 assert(false);
4482 }
4483
4484 if (encrypt_info) {
4485 ImageInfoSpecificQCow2Encryption *qencrypt =
4486 g_new(ImageInfoSpecificQCow2Encryption, 1);
4487 switch (encrypt_info->format) {
4488 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
4489 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES;
4490 break;
4491 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
4492 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS;
4493 qencrypt->u.luks = encrypt_info->u.luks;
4494 break;
4495 default:
4496 abort();
4497 }
4498 /* Since we did shallow copy above, erase any pointers
4499 * in the original info */
4500 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u));
4501 qapi_free_QCryptoBlockInfo(encrypt_info);
4502
4503 spec_info->u.qcow2.data->has_encrypt = true;
4504 spec_info->u.qcow2.data->encrypt = qencrypt;
4505 }
4506
4507 return spec_info;
4508 }
4509
4510 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
4511 int64_t pos)
4512 {
4513 BDRVQcow2State *s = bs->opaque;
4514
4515 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
4516 return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos,
4517 qiov->size, qiov, 0);
4518 }
4519
4520 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
4521 int64_t pos)
4522 {
4523 BDRVQcow2State *s = bs->opaque;
4524
4525 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
4526 return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos,
4527 qiov->size, qiov, 0);
4528 }
4529
4530 /*
4531 * Downgrades an image's version. To achieve this, any incompatible features
4532 * have to be removed.
4533 */
4534 static int qcow2_downgrade(BlockDriverState *bs, int target_version,
4535 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
4536 Error **errp)
4537 {
4538 BDRVQcow2State *s = bs->opaque;
4539 int current_version = s->qcow_version;
4540 int ret;
4541
4542 /* This is qcow2_downgrade(), not qcow2_upgrade() */
4543 assert(target_version < current_version);
4544
4545 /* There are no other versions (now) that you can downgrade to */
4546 assert(target_version == 2);
4547
4548 if (s->refcount_order != 4) {
4549 error_setg(errp, "compat=0.10 requires refcount_bits=16");
4550 return -ENOTSUP;
4551 }
4552
4553 /* clear incompatible features */
4554 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
4555 ret = qcow2_mark_clean(bs);
4556 if (ret < 0) {
4557 error_setg_errno(errp, -ret, "Failed to make the image clean");
4558 return ret;
4559 }
4560 }
4561
4562 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in
4563 * the first place; if that happens nonetheless, returning -ENOTSUP is the
4564 * best thing to do anyway */
4565
4566 if (s->incompatible_features) {
4567 error_setg(errp, "Cannot downgrade an image with incompatible features "
4568 "%#" PRIx64 " set", s->incompatible_features);
4569 return -ENOTSUP;
4570 }
4571
4572 /* since we can ignore compatible features, we can set them to 0 as well */
4573 s->compatible_features = 0;
4574 /* if lazy refcounts have been used, they have already been fixed through
4575 * clearing the dirty flag */
4576
4577 /* clearing autoclear features is trivial */
4578 s->autoclear_features = 0;
4579
4580 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque);
4581 if (ret < 0) {
4582 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters");
4583 return ret;
4584 }
4585
4586 s->qcow_version = target_version;
4587 ret = qcow2_update_header(bs);
4588 if (ret < 0) {
4589 s->qcow_version = current_version;
4590 error_setg_errno(errp, -ret, "Failed to update the image header");
4591 return ret;
4592 }
4593 return 0;
4594 }
4595
4596 typedef enum Qcow2AmendOperation {
4597 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be
4598 * statically initialized to so that the helper CB can discern the first
4599 * invocation from an operation change */
4600 QCOW2_NO_OPERATION = 0,
4601
4602 QCOW2_CHANGING_REFCOUNT_ORDER,
4603 QCOW2_DOWNGRADING,
4604 } Qcow2AmendOperation;
4605
4606 typedef struct Qcow2AmendHelperCBInfo {
4607 /* The code coordinating the amend operations should only modify
4608 * these four fields; the rest will be managed by the CB */
4609 BlockDriverAmendStatusCB *original_status_cb;
4610 void *original_cb_opaque;
4611
4612 Qcow2AmendOperation current_operation;
4613
4614 /* Total number of operations to perform (only set once) */
4615 int total_operations;
4616
4617 /* The following fields are managed by the CB */
4618
4619 /* Number of operations completed */
4620 int operations_completed;
4621
4622 /* Cumulative offset of all completed operations */
4623 int64_t offset_completed;
4624
4625 Qcow2AmendOperation last_operation;
4626 int64_t last_work_size;
4627 } Qcow2AmendHelperCBInfo;
4628
4629 static void qcow2_amend_helper_cb(BlockDriverState *bs,
4630 int64_t operation_offset,
4631 int64_t operation_work_size, void *opaque)
4632 {
4633 Qcow2AmendHelperCBInfo *info = opaque;
4634 int64_t current_work_size;
4635 int64_t projected_work_size;
4636
4637 if (info->current_operation != info->last_operation) {
4638 if (info->last_operation != QCOW2_NO_OPERATION) {
4639 info->offset_completed += info->last_work_size;
4640 info->operations_completed++;
4641 }
4642
4643 info->last_operation = info->current_operation;
4644 }
4645
4646 assert(info->total_operations > 0);
4647 assert(info->operations_completed < info->total_operations);
4648
4649 info->last_work_size = operation_work_size;
4650
4651 current_work_size = info->offset_completed + operation_work_size;
4652
4653 /* current_work_size is the total work size for (operations_completed + 1)
4654 * operations (which includes this one), so multiply it by the number of
4655 * operations not covered and divide it by the number of operations
4656 * covered to get a projection for the operations not covered */
4657 projected_work_size = current_work_size * (info->total_operations -
4658 info->operations_completed - 1)
4659 / (info->operations_completed + 1);
4660
4661 info->original_status_cb(bs, info->offset_completed + operation_offset,
4662 current_work_size + projected_work_size,
4663 info->original_cb_opaque);
4664 }
4665
4666 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
4667 BlockDriverAmendStatusCB *status_cb,
4668 void *cb_opaque,
4669 Error **errp)
4670 {
4671 BDRVQcow2State *s = bs->opaque;
4672 int old_version = s->qcow_version, new_version = old_version;
4673 uint64_t new_size = 0;
4674 const char *backing_file = NULL, *backing_format = NULL;
4675 bool lazy_refcounts = s->use_lazy_refcounts;
4676 const char *compat = NULL;
4677 uint64_t cluster_size = s->cluster_size;
4678 bool encrypt;
4679 int encformat;
4680 int refcount_bits = s->refcount_bits;
4681 int ret;
4682 QemuOptDesc *desc = opts->list->desc;
4683 Qcow2AmendHelperCBInfo helper_cb_info;
4684
4685 while (desc && desc->name) {
4686 if (!qemu_opt_find(opts, desc->name)) {
4687 /* only change explicitly defined options */
4688 desc++;
4689 continue;
4690 }
4691
4692 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) {
4693 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL);
4694 if (!compat) {
4695 /* preserve default */
4696 } else if (!strcmp(compat, "0.10")) {
4697 new_version = 2;
4698 } else if (!strcmp(compat, "1.1")) {
4699 new_version = 3;
4700 } else {
4701 error_setg(errp, "Unknown compatibility level %s", compat);
4702 return -EINVAL;
4703 }
4704 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) {
4705 error_setg(errp, "Cannot change preallocation mode");
4706 return -ENOTSUP;
4707 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) {
4708 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
4709 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) {
4710 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
4711 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) {
4712 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
4713 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) {
4714 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT,
4715 !!s->crypto);
4716
4717 if (encrypt != !!s->crypto) {
4718 error_setg(errp,
4719 "Changing the encryption flag is not supported");
4720 return -ENOTSUP;
4721 }
4722 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) {
4723 encformat = qcow2_crypt_method_from_format(
4724 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT));
4725
4726 if (encformat != s->crypt_method_header) {
4727 error_setg(errp,
4728 "Changing the encryption format is not supported");
4729 return -ENOTSUP;
4730 }
4731 } else if (g_str_has_prefix(desc->name, "encrypt.")) {
4732 error_setg(errp,
4733 "Changing the encryption parameters is not supported");
4734 return -ENOTSUP;
4735 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) {
4736 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE,
4737 cluster_size);
4738 if (cluster_size != s->cluster_size) {
4739 error_setg(errp, "Changing the cluster size is not supported");
4740 return -ENOTSUP;
4741 }
4742 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
4743 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS,
4744 lazy_refcounts);
4745 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) {
4746 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS,
4747 refcount_bits);
4748
4749 if (refcount_bits <= 0 || refcount_bits > 64 ||
4750 !is_power_of_2(refcount_bits))
4751 {
4752 error_setg(errp, "Refcount width must be a power of two and "
4753 "may not exceed 64 bits");
4754 return -EINVAL;
4755 }
4756 } else {
4757 /* if this point is reached, this probably means a new option was
4758 * added without having it covered here */
4759 abort();
4760 }
4761
4762 desc++;
4763 }
4764
4765 helper_cb_info = (Qcow2AmendHelperCBInfo){
4766 .original_status_cb = status_cb,
4767 .original_cb_opaque = cb_opaque,
4768 .total_operations = (new_version < old_version)
4769 + (s->refcount_bits != refcount_bits)
4770 };
4771
4772 /* Upgrade first (some features may require compat=1.1) */
4773 if (new_version > old_version) {
4774 s->qcow_version = new_version;
4775 ret = qcow2_update_header(bs);
4776 if (ret < 0) {
4777 s->qcow_version = old_version;
4778 error_setg_errno(errp, -ret, "Failed to update the image header");
4779 return ret;
4780 }
4781 }
4782
4783 if (s->refcount_bits != refcount_bits) {
4784 int refcount_order = ctz32(refcount_bits);
4785
4786 if (new_version < 3 && refcount_bits != 16) {
4787 error_setg(errp, "Refcount widths other than 16 bits require "
4788 "compatibility level 1.1 or above (use compat=1.1 or "
4789 "greater)");
4790 return -EINVAL;
4791 }
4792
4793 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER;
4794 ret = qcow2_change_refcount_order(bs, refcount_order,
4795 &qcow2_amend_helper_cb,
4796 &helper_cb_info, errp);
4797 if (ret < 0) {
4798 return ret;
4799 }
4800 }
4801
4802 if (backing_file || backing_format) {
4803 ret = qcow2_change_backing_file(bs,
4804 backing_file ?: s->image_backing_file,
4805 backing_format ?: s->image_backing_format);
4806 if (ret < 0) {
4807 error_setg_errno(errp, -ret, "Failed to change the backing file");
4808 return ret;
4809 }
4810 }
4811
4812 if (s->use_lazy_refcounts != lazy_refcounts) {
4813 if (lazy_refcounts) {
4814 if (new_version < 3) {
4815 error_setg(errp, "Lazy refcounts only supported with "
4816 "compatibility level 1.1 and above (use compat=1.1 "
4817 "or greater)");
4818 return -EINVAL;
4819 }
4820 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
4821 ret = qcow2_update_header(bs);
4822 if (ret < 0) {
4823 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
4824 error_setg_errno(errp, -ret, "Failed to update the image header");
4825 return ret;
4826 }
4827 s->use_lazy_refcounts = true;
4828 } else {
4829 /* make image clean first */
4830 ret = qcow2_mark_clean(bs);
4831 if (ret < 0) {
4832 error_setg_errno(errp, -ret, "Failed to make the image clean");
4833 return ret;
4834 }
4835 /* now disallow lazy refcounts */
4836 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
4837 ret = qcow2_update_header(bs);
4838 if (ret < 0) {
4839 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
4840 error_setg_errno(errp, -ret, "Failed to update the image header");
4841 return ret;
4842 }
4843 s->use_lazy_refcounts = false;
4844 }
4845 }
4846
4847 if (new_size) {
4848 BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL);
4849 ret = blk_insert_bs(blk, bs, errp);
4850 if (ret < 0) {
4851 blk_unref(blk);
4852 return ret;
4853 }
4854
4855 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, errp);
4856 blk_unref(blk);
4857 if (ret < 0) {
4858 return ret;
4859 }
4860 }
4861
4862 /* Downgrade last (so unsupported features can be removed before) */
4863 if (new_version < old_version) {
4864 helper_cb_info.current_operation = QCOW2_DOWNGRADING;
4865 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb,
4866 &helper_cb_info, errp);
4867 if (ret < 0) {
4868 return ret;
4869 }
4870 }
4871
4872 return 0;
4873 }
4874
4875 /*
4876 * If offset or size are negative, respectively, they will not be included in
4877 * the BLOCK_IMAGE_CORRUPTED event emitted.
4878 * fatal will be ignored for read-only BDS; corruptions found there will always
4879 * be considered non-fatal.
4880 */
4881 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
4882 int64_t size, const char *message_format, ...)
4883 {
4884 BDRVQcow2State *s = bs->opaque;
4885 const char *node_name;
4886 char *message;
4887 va_list ap;
4888
4889 fatal = fatal && bdrv_is_writable(bs);
4890
4891 if (s->signaled_corruption &&
4892 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT)))
4893 {
4894 return;
4895 }
4896
4897 va_start(ap, message_format);
4898 message = g_strdup_vprintf(message_format, ap);
4899 va_end(ap);
4900
4901 if (fatal) {
4902 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further "
4903 "corruption events will be suppressed\n", message);
4904 } else {
4905 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal "
4906 "corruption events will be suppressed\n", message);
4907 }
4908
4909 node_name = bdrv_get_node_name(bs);
4910 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs),
4911 *node_name != '\0', node_name,
4912 message, offset >= 0, offset,
4913 size >= 0, size,
4914 fatal);
4915 g_free(message);
4916
4917 if (fatal) {
4918 qcow2_mark_corrupt(bs);
4919 bs->drv = NULL; /* make BDS unusable */
4920 }
4921
4922 s->signaled_corruption = true;
4923 }
4924
4925 static QemuOptsList qcow2_create_opts = {
4926 .name = "qcow2-create-opts",
4927 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head),
4928 .desc = {
4929 {
4930 .name = BLOCK_OPT_SIZE,
4931 .type = QEMU_OPT_SIZE,
4932 .help = "Virtual disk size"
4933 },
4934 {
4935 .name = BLOCK_OPT_COMPAT_LEVEL,
4936 .type = QEMU_OPT_STRING,
4937 .help = "Compatibility level (0.10 or 1.1)"
4938 },
4939 {
4940 .name = BLOCK_OPT_BACKING_FILE,
4941 .type = QEMU_OPT_STRING,
4942 .help = "File name of a base image"
4943 },
4944 {
4945 .name = BLOCK_OPT_BACKING_FMT,
4946 .type = QEMU_OPT_STRING,
4947 .help = "Image format of the base image"
4948 },
4949 {
4950 .name = BLOCK_OPT_ENCRYPT,
4951 .type = QEMU_OPT_BOOL,
4952 .help = "Encrypt the image with format 'aes'. (Deprecated "
4953 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)",
4954 },
4955 {
4956 .name = BLOCK_OPT_ENCRYPT_FORMAT,
4957 .type = QEMU_OPT_STRING,
4958 .help = "Encrypt the image, format choices: 'aes', 'luks'",
4959 },
4960 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.",
4961 "ID of secret providing qcow AES key or LUKS passphrase"),
4962 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."),
4963 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."),
4964 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."),
4965 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."),
4966 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."),
4967 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."),
4968 {
4969 .name = BLOCK_OPT_CLUSTER_SIZE,
4970 .type = QEMU_OPT_SIZE,
4971 .help = "qcow2 cluster size",
4972 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE)
4973 },
4974 {
4975 .name = BLOCK_OPT_PREALLOC,
4976 .type = QEMU_OPT_STRING,
4977 .help = "Preallocation mode (allowed values: off, metadata, "
4978 "falloc, full)"
4979 },
4980 {
4981 .name = BLOCK_OPT_LAZY_REFCOUNTS,
4982 .type = QEMU_OPT_BOOL,
4983 .help = "Postpone refcount updates",
4984 .def_value_str = "off"
4985 },
4986 {
4987 .name = BLOCK_OPT_REFCOUNT_BITS,
4988 .type = QEMU_OPT_NUMBER,
4989 .help = "Width of a reference count entry in bits",
4990 .def_value_str = "16"
4991 },
4992 { /* end of list */ }
4993 }
4994 };
4995
4996 static const char *const qcow2_strong_runtime_opts[] = {
4997 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET,
4998
4999 NULL
5000 };
5001
5002 BlockDriver bdrv_qcow2 = {
5003 .format_name = "qcow2",
5004 .instance_size = sizeof(BDRVQcow2State),
5005 .bdrv_probe = qcow2_probe,
5006 .bdrv_open = qcow2_open,
5007 .bdrv_close = qcow2_close,
5008 .bdrv_reopen_prepare = qcow2_reopen_prepare,
5009 .bdrv_reopen_commit = qcow2_reopen_commit,
5010 .bdrv_reopen_abort = qcow2_reopen_abort,
5011 .bdrv_join_options = qcow2_join_options,
5012 .bdrv_child_perm = bdrv_format_default_perms,
5013 .bdrv_co_create_opts = qcow2_co_create_opts,
5014 .bdrv_co_create = qcow2_co_create,
5015 .bdrv_has_zero_init = bdrv_has_zero_init_1,
5016 .bdrv_co_block_status = qcow2_co_block_status,
5017
5018 .bdrv_co_preadv = qcow2_co_preadv,
5019 .bdrv_co_pwritev = qcow2_co_pwritev,
5020 .bdrv_co_flush_to_os = qcow2_co_flush_to_os,
5021
5022 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
5023 .bdrv_co_pdiscard = qcow2_co_pdiscard,
5024 .bdrv_co_copy_range_from = qcow2_co_copy_range_from,
5025 .bdrv_co_copy_range_to = qcow2_co_copy_range_to,
5026 .bdrv_co_truncate = qcow2_co_truncate,
5027 .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed,
5028 .bdrv_make_empty = qcow2_make_empty,
5029
5030 .bdrv_snapshot_create = qcow2_snapshot_create,
5031 .bdrv_snapshot_goto = qcow2_snapshot_goto,
5032 .bdrv_snapshot_delete = qcow2_snapshot_delete,
5033 .bdrv_snapshot_list = qcow2_snapshot_list,
5034 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp,
5035 .bdrv_measure = qcow2_measure,
5036 .bdrv_get_info = qcow2_get_info,
5037 .bdrv_get_specific_info = qcow2_get_specific_info,
5038
5039 .bdrv_save_vmstate = qcow2_save_vmstate,
5040 .bdrv_load_vmstate = qcow2_load_vmstate,
5041
5042 .supports_backing = true,
5043 .bdrv_change_backing_file = qcow2_change_backing_file,
5044
5045 .bdrv_refresh_limits = qcow2_refresh_limits,
5046 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache,
5047 .bdrv_inactivate = qcow2_inactivate,
5048
5049 .create_opts = &qcow2_create_opts,
5050 .strong_runtime_opts = qcow2_strong_runtime_opts,
5051 .bdrv_co_check = qcow2_co_check,
5052 .bdrv_amend_options = qcow2_amend_options,
5053
5054 .bdrv_detach_aio_context = qcow2_detach_aio_context,
5055 .bdrv_attach_aio_context = qcow2_attach_aio_context,
5056
5057 .bdrv_reopen_bitmaps_rw = qcow2_reopen_bitmaps_rw,
5058 .bdrv_can_store_new_dirty_bitmap = qcow2_can_store_new_dirty_bitmap,
5059 .bdrv_remove_persistent_dirty_bitmap = qcow2_remove_persistent_dirty_bitmap,
5060 };
5061
5062 static void bdrv_qcow2_init(void)
5063 {
5064 bdrv_register(&bdrv_qcow2);
5065 }
5066
5067 block_init(bdrv_qcow2_init);