]> git.proxmox.com Git - mirror_qemu.git/blob - block/qcow2.c
Merge tag 'pull-qapi-2023-04-26' of https://repo.or.cz/qemu/armbru into staging
[mirror_qemu.git] / block / qcow2.c
1 /*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26
27 #include "block/qdict.h"
28 #include "sysemu/block-backend.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/module.h"
31 #include "qcow2.h"
32 #include "qemu/error-report.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qdict.h"
36 #include "qapi/qmp/qstring.h"
37 #include "trace.h"
38 #include "qemu/option_int.h"
39 #include "qemu/cutils.h"
40 #include "qemu/bswap.h"
41 #include "qemu/memalign.h"
42 #include "qapi/qobject-input-visitor.h"
43 #include "qapi/qapi-visit-block-core.h"
44 #include "crypto.h"
45 #include "block/aio_task.h"
46 #include "block/dirty-bitmap.h"
47
48 /*
49 Differences with QCOW:
50
51 - Support for multiple incremental snapshots.
52 - Memory management by reference counts.
53 - Clusters which have a reference count of one have the bit
54 QCOW_OFLAG_COPIED to optimize write performance.
55 - Size of compressed clusters is stored in sectors to reduce bit usage
56 in the cluster offsets.
57 - Support for storing additional data (such as the VM state) in the
58 snapshots.
59 - If a backing store is used, the cluster size is not constrained
60 (could be backported to QCOW).
61 - L2 tables have always a size of one cluster.
62 */
63
64
65 typedef struct {
66 uint32_t magic;
67 uint32_t len;
68 } QEMU_PACKED QCowExtension;
69
70 #define QCOW2_EXT_MAGIC_END 0
71 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca
72 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857
73 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77
74 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875
75 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441
76
77 static int coroutine_fn
78 qcow2_co_preadv_compressed(BlockDriverState *bs,
79 uint64_t l2_entry,
80 uint64_t offset,
81 uint64_t bytes,
82 QEMUIOVector *qiov,
83 size_t qiov_offset);
84
85 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
86 {
87 const QCowHeader *cow_header = (const void *)buf;
88
89 if (buf_size >= sizeof(QCowHeader) &&
90 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
91 be32_to_cpu(cow_header->version) >= 2)
92 return 100;
93 else
94 return 0;
95 }
96
97
98 static int qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset,
99 uint8_t *buf, size_t buflen,
100 void *opaque, Error **errp)
101 {
102 BlockDriverState *bs = opaque;
103 BDRVQcow2State *s = bs->opaque;
104 ssize_t ret;
105
106 if ((offset + buflen) > s->crypto_header.length) {
107 error_setg(errp, "Request for data outside of extension header");
108 return -1;
109 }
110
111 ret = bdrv_pread(bs->file, s->crypto_header.offset + offset, buflen, buf,
112 0);
113 if (ret < 0) {
114 error_setg_errno(errp, -ret, "Could not read encryption header");
115 return -1;
116 }
117 return 0;
118 }
119
120
121 static int qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen,
122 void *opaque, Error **errp)
123 {
124 BlockDriverState *bs = opaque;
125 BDRVQcow2State *s = bs->opaque;
126 int64_t ret;
127 int64_t clusterlen;
128
129 ret = qcow2_alloc_clusters(bs, headerlen);
130 if (ret < 0) {
131 error_setg_errno(errp, -ret,
132 "Cannot allocate cluster for LUKS header size %zu",
133 headerlen);
134 return -1;
135 }
136
137 s->crypto_header.length = headerlen;
138 s->crypto_header.offset = ret;
139
140 /*
141 * Zero fill all space in cluster so it has predictable
142 * content, as we may not initialize some regions of the
143 * header (eg only 1 out of 8 key slots will be initialized)
144 */
145 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size;
146 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0);
147 ret = bdrv_pwrite_zeroes(bs->file,
148 ret,
149 clusterlen, 0);
150 if (ret < 0) {
151 error_setg_errno(errp, -ret, "Could not zero fill encryption header");
152 return -1;
153 }
154
155 return 0;
156 }
157
158
159 static int qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset,
160 const uint8_t *buf, size_t buflen,
161 void *opaque, Error **errp)
162 {
163 BlockDriverState *bs = opaque;
164 BDRVQcow2State *s = bs->opaque;
165 ssize_t ret;
166
167 if ((offset + buflen) > s->crypto_header.length) {
168 error_setg(errp, "Request for data outside of extension header");
169 return -1;
170 }
171
172 ret = bdrv_pwrite(bs->file, s->crypto_header.offset + offset, buflen, buf,
173 0);
174 if (ret < 0) {
175 error_setg_errno(errp, -ret, "Could not read encryption header");
176 return -1;
177 }
178 return 0;
179 }
180
181 static QDict*
182 qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp)
183 {
184 QDict *cryptoopts_qdict;
185 QDict *opts_qdict;
186
187 /* Extract "encrypt." options into a qdict */
188 opts_qdict = qemu_opts_to_qdict(opts, NULL);
189 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
190 qobject_unref(opts_qdict);
191 qdict_put_str(cryptoopts_qdict, "format", fmt);
192 return cryptoopts_qdict;
193 }
194
195 /*
196 * read qcow2 extension and fill bs
197 * start reading from start_offset
198 * finish reading upon magic of value 0 or when end_offset reached
199 * unknown magic is skipped (future extension this version knows nothing about)
200 * return 0 upon success, non-0 otherwise
201 */
202 static int coroutine_fn GRAPH_RDLOCK
203 qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
204 uint64_t end_offset, void **p_feature_table,
205 int flags, bool *need_update_header, Error **errp)
206 {
207 BDRVQcow2State *s = bs->opaque;
208 QCowExtension ext;
209 uint64_t offset;
210 int ret;
211 Qcow2BitmapHeaderExt bitmaps_ext;
212
213 if (need_update_header != NULL) {
214 *need_update_header = false;
215 }
216
217 #ifdef DEBUG_EXT
218 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
219 #endif
220 offset = start_offset;
221 while (offset < end_offset) {
222
223 #ifdef DEBUG_EXT
224 /* Sanity check */
225 if (offset > s->cluster_size)
226 printf("qcow2_read_extension: suspicious offset %lu\n", offset);
227
228 printf("attempting to read extended header in offset %lu\n", offset);
229 #endif
230
231 ret = bdrv_co_pread(bs->file, offset, sizeof(ext), &ext, 0);
232 if (ret < 0) {
233 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
234 "pread fail from offset %" PRIu64, offset);
235 return 1;
236 }
237 ext.magic = be32_to_cpu(ext.magic);
238 ext.len = be32_to_cpu(ext.len);
239 offset += sizeof(ext);
240 #ifdef DEBUG_EXT
241 printf("ext.magic = 0x%x\n", ext.magic);
242 #endif
243 if (offset > end_offset || ext.len > end_offset - offset) {
244 error_setg(errp, "Header extension too large");
245 return -EINVAL;
246 }
247
248 switch (ext.magic) {
249 case QCOW2_EXT_MAGIC_END:
250 return 0;
251
252 case QCOW2_EXT_MAGIC_BACKING_FORMAT:
253 if (ext.len >= sizeof(bs->backing_format)) {
254 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32
255 " too large (>=%zu)", ext.len,
256 sizeof(bs->backing_format));
257 return 2;
258 }
259 ret = bdrv_co_pread(bs->file, offset, ext.len, bs->backing_format, 0);
260 if (ret < 0) {
261 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
262 "Could not read format name");
263 return 3;
264 }
265 bs->backing_format[ext.len] = '\0';
266 s->image_backing_format = g_strdup(bs->backing_format);
267 #ifdef DEBUG_EXT
268 printf("Qcow2: Got format extension %s\n", bs->backing_format);
269 #endif
270 break;
271
272 case QCOW2_EXT_MAGIC_FEATURE_TABLE:
273 if (p_feature_table != NULL) {
274 void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
275 ret = bdrv_co_pread(bs->file, offset, ext.len, feature_table, 0);
276 if (ret < 0) {
277 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
278 "Could not read table");
279 g_free(feature_table);
280 return ret;
281 }
282
283 *p_feature_table = feature_table;
284 }
285 break;
286
287 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: {
288 unsigned int cflags = 0;
289 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
290 error_setg(errp, "CRYPTO header extension only "
291 "expected with LUKS encryption method");
292 return -EINVAL;
293 }
294 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) {
295 error_setg(errp, "CRYPTO header extension size %u, "
296 "but expected size %zu", ext.len,
297 sizeof(Qcow2CryptoHeaderExtension));
298 return -EINVAL;
299 }
300
301 ret = bdrv_co_pread(bs->file, offset, ext.len, &s->crypto_header, 0);
302 if (ret < 0) {
303 error_setg_errno(errp, -ret,
304 "Unable to read CRYPTO header extension");
305 return ret;
306 }
307 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
308 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
309
310 if ((s->crypto_header.offset % s->cluster_size) != 0) {
311 error_setg(errp, "Encryption header offset '%" PRIu64 "' is "
312 "not a multiple of cluster size '%u'",
313 s->crypto_header.offset, s->cluster_size);
314 return -EINVAL;
315 }
316
317 if (flags & BDRV_O_NO_IO) {
318 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
319 }
320 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
321 qcow2_crypto_hdr_read_func,
322 bs, cflags, QCOW2_MAX_THREADS, errp);
323 if (!s->crypto) {
324 return -EINVAL;
325 }
326 } break;
327
328 case QCOW2_EXT_MAGIC_BITMAPS:
329 if (ext.len != sizeof(bitmaps_ext)) {
330 error_setg_errno(errp, -ret, "bitmaps_ext: "
331 "Invalid extension length");
332 return -EINVAL;
333 }
334
335 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) {
336 if (s->qcow_version < 3) {
337 /* Let's be a bit more specific */
338 warn_report("This qcow2 v2 image contains bitmaps, but "
339 "they may have been modified by a program "
340 "without persistent bitmap support; so now "
341 "they must all be considered inconsistent");
342 } else {
343 warn_report("a program lacking bitmap support "
344 "modified this file, so all bitmaps are now "
345 "considered inconsistent");
346 }
347 error_printf("Some clusters may be leaked, "
348 "run 'qemu-img check -r' on the image "
349 "file to fix.");
350 if (need_update_header != NULL) {
351 /* Updating is needed to drop invalid bitmap extension. */
352 *need_update_header = true;
353 }
354 break;
355 }
356
357 ret = bdrv_co_pread(bs->file, offset, ext.len, &bitmaps_ext, 0);
358 if (ret < 0) {
359 error_setg_errno(errp, -ret, "bitmaps_ext: "
360 "Could not read ext header");
361 return ret;
362 }
363
364 if (bitmaps_ext.reserved32 != 0) {
365 error_setg_errno(errp, -ret, "bitmaps_ext: "
366 "Reserved field is not zero");
367 return -EINVAL;
368 }
369
370 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps);
371 bitmaps_ext.bitmap_directory_size =
372 be64_to_cpu(bitmaps_ext.bitmap_directory_size);
373 bitmaps_ext.bitmap_directory_offset =
374 be64_to_cpu(bitmaps_ext.bitmap_directory_offset);
375
376 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) {
377 error_setg(errp,
378 "bitmaps_ext: Image has %" PRIu32 " bitmaps, "
379 "exceeding the QEMU supported maximum of %d",
380 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS);
381 return -EINVAL;
382 }
383
384 if (bitmaps_ext.nb_bitmaps == 0) {
385 error_setg(errp, "found bitmaps extension with zero bitmaps");
386 return -EINVAL;
387 }
388
389 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) {
390 error_setg(errp, "bitmaps_ext: "
391 "invalid bitmap directory offset");
392 return -EINVAL;
393 }
394
395 if (bitmaps_ext.bitmap_directory_size >
396 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) {
397 error_setg(errp, "bitmaps_ext: "
398 "bitmap directory size (%" PRIu64 ") exceeds "
399 "the maximum supported size (%d)",
400 bitmaps_ext.bitmap_directory_size,
401 QCOW2_MAX_BITMAP_DIRECTORY_SIZE);
402 return -EINVAL;
403 }
404
405 s->nb_bitmaps = bitmaps_ext.nb_bitmaps;
406 s->bitmap_directory_offset =
407 bitmaps_ext.bitmap_directory_offset;
408 s->bitmap_directory_size =
409 bitmaps_ext.bitmap_directory_size;
410
411 #ifdef DEBUG_EXT
412 printf("Qcow2: Got bitmaps extension: "
413 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n",
414 s->bitmap_directory_offset, s->nb_bitmaps);
415 #endif
416 break;
417
418 case QCOW2_EXT_MAGIC_DATA_FILE:
419 {
420 s->image_data_file = g_malloc0(ext.len + 1);
421 ret = bdrv_co_pread(bs->file, offset, ext.len, s->image_data_file, 0);
422 if (ret < 0) {
423 error_setg_errno(errp, -ret,
424 "ERROR: Could not read data file name");
425 return ret;
426 }
427 #ifdef DEBUG_EXT
428 printf("Qcow2: Got external data file %s\n", s->image_data_file);
429 #endif
430 break;
431 }
432
433 default:
434 /* unknown magic - save it in case we need to rewrite the header */
435 /* If you add a new feature, make sure to also update the fast
436 * path of qcow2_make_empty() to deal with it. */
437 {
438 Qcow2UnknownHeaderExtension *uext;
439
440 uext = g_malloc0(sizeof(*uext) + ext.len);
441 uext->magic = ext.magic;
442 uext->len = ext.len;
443 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
444
445 ret = bdrv_co_pread(bs->file, offset, uext->len, uext->data, 0);
446 if (ret < 0) {
447 error_setg_errno(errp, -ret, "ERROR: unknown extension: "
448 "Could not read data");
449 return ret;
450 }
451 }
452 break;
453 }
454
455 offset += ((ext.len + 7) & ~7);
456 }
457
458 return 0;
459 }
460
461 static void cleanup_unknown_header_ext(BlockDriverState *bs)
462 {
463 BDRVQcow2State *s = bs->opaque;
464 Qcow2UnknownHeaderExtension *uext, *next;
465
466 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) {
467 QLIST_REMOVE(uext, next);
468 g_free(uext);
469 }
470 }
471
472 static void report_unsupported_feature(Error **errp, Qcow2Feature *table,
473 uint64_t mask)
474 {
475 g_autoptr(GString) features = g_string_sized_new(60);
476
477 while (table && table->name[0] != '\0') {
478 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) {
479 if (mask & (1ULL << table->bit)) {
480 if (features->len > 0) {
481 g_string_append(features, ", ");
482 }
483 g_string_append_printf(features, "%.46s", table->name);
484 mask &= ~(1ULL << table->bit);
485 }
486 }
487 table++;
488 }
489
490 if (mask) {
491 if (features->len > 0) {
492 g_string_append(features, ", ");
493 }
494 g_string_append_printf(features,
495 "Unknown incompatible feature: %" PRIx64, mask);
496 }
497
498 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str);
499 }
500
501 /*
502 * Sets the dirty bit and flushes afterwards if necessary.
503 *
504 * The incompatible_features bit is only set if the image file header was
505 * updated successfully. Therefore it is not required to check the return
506 * value of this function.
507 */
508 int qcow2_mark_dirty(BlockDriverState *bs)
509 {
510 BDRVQcow2State *s = bs->opaque;
511 uint64_t val;
512 int ret;
513
514 assert(s->qcow_version >= 3);
515
516 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
517 return 0; /* already dirty */
518 }
519
520 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
521 ret = bdrv_pwrite_sync(bs->file,
522 offsetof(QCowHeader, incompatible_features),
523 sizeof(val), &val, 0);
524 if (ret < 0) {
525 return ret;
526 }
527
528 /* Only treat image as dirty if the header was updated successfully */
529 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY;
530 return 0;
531 }
532
533 /*
534 * Clears the dirty bit and flushes before if necessary. Only call this
535 * function when there are no pending requests, it does not guard against
536 * concurrent requests dirtying the image.
537 */
538 static int qcow2_mark_clean(BlockDriverState *bs)
539 {
540 BDRVQcow2State *s = bs->opaque;
541
542 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
543 int ret;
544
545 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
546
547 ret = qcow2_flush_caches(bs);
548 if (ret < 0) {
549 return ret;
550 }
551
552 return qcow2_update_header(bs);
553 }
554 return 0;
555 }
556
557 /*
558 * Marks the image as corrupt.
559 */
560 int qcow2_mark_corrupt(BlockDriverState *bs)
561 {
562 BDRVQcow2State *s = bs->opaque;
563
564 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT;
565 return qcow2_update_header(bs);
566 }
567
568 /*
569 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
570 * before if necessary.
571 */
572 int qcow2_mark_consistent(BlockDriverState *bs)
573 {
574 BDRVQcow2State *s = bs->opaque;
575
576 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
577 int ret = qcow2_flush_caches(bs);
578 if (ret < 0) {
579 return ret;
580 }
581
582 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT;
583 return qcow2_update_header(bs);
584 }
585 return 0;
586 }
587
588 static void qcow2_add_check_result(BdrvCheckResult *out,
589 const BdrvCheckResult *src,
590 bool set_allocation_info)
591 {
592 out->corruptions += src->corruptions;
593 out->leaks += src->leaks;
594 out->check_errors += src->check_errors;
595 out->corruptions_fixed += src->corruptions_fixed;
596 out->leaks_fixed += src->leaks_fixed;
597
598 if (set_allocation_info) {
599 out->image_end_offset = src->image_end_offset;
600 out->bfi = src->bfi;
601 }
602 }
603
604 static int coroutine_fn GRAPH_RDLOCK
605 qcow2_co_check_locked(BlockDriverState *bs, BdrvCheckResult *result,
606 BdrvCheckMode fix)
607 {
608 BdrvCheckResult snapshot_res = {};
609 BdrvCheckResult refcount_res = {};
610 int ret;
611
612 memset(result, 0, sizeof(*result));
613
614 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix);
615 if (ret < 0) {
616 qcow2_add_check_result(result, &snapshot_res, false);
617 return ret;
618 }
619
620 ret = qcow2_check_refcounts(bs, &refcount_res, fix);
621 qcow2_add_check_result(result, &refcount_res, true);
622 if (ret < 0) {
623 qcow2_add_check_result(result, &snapshot_res, false);
624 return ret;
625 }
626
627 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix);
628 qcow2_add_check_result(result, &snapshot_res, false);
629 if (ret < 0) {
630 return ret;
631 }
632
633 if (fix && result->check_errors == 0 && result->corruptions == 0) {
634 ret = qcow2_mark_clean(bs);
635 if (ret < 0) {
636 return ret;
637 }
638 return qcow2_mark_consistent(bs);
639 }
640 return ret;
641 }
642
643 static int coroutine_fn GRAPH_RDLOCK
644 qcow2_co_check(BlockDriverState *bs, BdrvCheckResult *result,
645 BdrvCheckMode fix)
646 {
647 BDRVQcow2State *s = bs->opaque;
648 int ret;
649
650 qemu_co_mutex_lock(&s->lock);
651 ret = qcow2_co_check_locked(bs, result, fix);
652 qemu_co_mutex_unlock(&s->lock);
653 return ret;
654 }
655
656 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
657 uint64_t entries, size_t entry_len,
658 int64_t max_size_bytes, const char *table_name,
659 Error **errp)
660 {
661 BDRVQcow2State *s = bs->opaque;
662
663 if (entries > max_size_bytes / entry_len) {
664 error_setg(errp, "%s too large", table_name);
665 return -EFBIG;
666 }
667
668 /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
669 * because values will be passed to qemu functions taking int64_t. */
670 if ((INT64_MAX - entries * entry_len < offset) ||
671 (offset_into_cluster(s, offset) != 0)) {
672 error_setg(errp, "%s offset invalid", table_name);
673 return -EINVAL;
674 }
675
676 return 0;
677 }
678
679 static const char *const mutable_opts[] = {
680 QCOW2_OPT_LAZY_REFCOUNTS,
681 QCOW2_OPT_DISCARD_REQUEST,
682 QCOW2_OPT_DISCARD_SNAPSHOT,
683 QCOW2_OPT_DISCARD_OTHER,
684 QCOW2_OPT_OVERLAP,
685 QCOW2_OPT_OVERLAP_TEMPLATE,
686 QCOW2_OPT_OVERLAP_MAIN_HEADER,
687 QCOW2_OPT_OVERLAP_ACTIVE_L1,
688 QCOW2_OPT_OVERLAP_ACTIVE_L2,
689 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
690 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
691 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
692 QCOW2_OPT_OVERLAP_INACTIVE_L1,
693 QCOW2_OPT_OVERLAP_INACTIVE_L2,
694 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
695 QCOW2_OPT_CACHE_SIZE,
696 QCOW2_OPT_L2_CACHE_SIZE,
697 QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
698 QCOW2_OPT_REFCOUNT_CACHE_SIZE,
699 QCOW2_OPT_CACHE_CLEAN_INTERVAL,
700 NULL
701 };
702
703 static QemuOptsList qcow2_runtime_opts = {
704 .name = "qcow2",
705 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
706 .desc = {
707 {
708 .name = QCOW2_OPT_LAZY_REFCOUNTS,
709 .type = QEMU_OPT_BOOL,
710 .help = "Postpone refcount updates",
711 },
712 {
713 .name = QCOW2_OPT_DISCARD_REQUEST,
714 .type = QEMU_OPT_BOOL,
715 .help = "Pass guest discard requests to the layer below",
716 },
717 {
718 .name = QCOW2_OPT_DISCARD_SNAPSHOT,
719 .type = QEMU_OPT_BOOL,
720 .help = "Generate discard requests when snapshot related space "
721 "is freed",
722 },
723 {
724 .name = QCOW2_OPT_DISCARD_OTHER,
725 .type = QEMU_OPT_BOOL,
726 .help = "Generate discard requests when other clusters are freed",
727 },
728 {
729 .name = QCOW2_OPT_OVERLAP,
730 .type = QEMU_OPT_STRING,
731 .help = "Selects which overlap checks to perform from a range of "
732 "templates (none, constant, cached, all)",
733 },
734 {
735 .name = QCOW2_OPT_OVERLAP_TEMPLATE,
736 .type = QEMU_OPT_STRING,
737 .help = "Selects which overlap checks to perform from a range of "
738 "templates (none, constant, cached, all)",
739 },
740 {
741 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER,
742 .type = QEMU_OPT_BOOL,
743 .help = "Check for unintended writes into the main qcow2 header",
744 },
745 {
746 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1,
747 .type = QEMU_OPT_BOOL,
748 .help = "Check for unintended writes into the active L1 table",
749 },
750 {
751 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2,
752 .type = QEMU_OPT_BOOL,
753 .help = "Check for unintended writes into an active L2 table",
754 },
755 {
756 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
757 .type = QEMU_OPT_BOOL,
758 .help = "Check for unintended writes into the refcount table",
759 },
760 {
761 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
762 .type = QEMU_OPT_BOOL,
763 .help = "Check for unintended writes into a refcount block",
764 },
765 {
766 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
767 .type = QEMU_OPT_BOOL,
768 .help = "Check for unintended writes into the snapshot table",
769 },
770 {
771 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1,
772 .type = QEMU_OPT_BOOL,
773 .help = "Check for unintended writes into an inactive L1 table",
774 },
775 {
776 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2,
777 .type = QEMU_OPT_BOOL,
778 .help = "Check for unintended writes into an inactive L2 table",
779 },
780 {
781 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
782 .type = QEMU_OPT_BOOL,
783 .help = "Check for unintended writes into the bitmap directory",
784 },
785 {
786 .name = QCOW2_OPT_CACHE_SIZE,
787 .type = QEMU_OPT_SIZE,
788 .help = "Maximum combined metadata (L2 tables and refcount blocks) "
789 "cache size",
790 },
791 {
792 .name = QCOW2_OPT_L2_CACHE_SIZE,
793 .type = QEMU_OPT_SIZE,
794 .help = "Maximum L2 table cache size",
795 },
796 {
797 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
798 .type = QEMU_OPT_SIZE,
799 .help = "Size of each entry in the L2 cache",
800 },
801 {
802 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE,
803 .type = QEMU_OPT_SIZE,
804 .help = "Maximum refcount block cache size",
805 },
806 {
807 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL,
808 .type = QEMU_OPT_NUMBER,
809 .help = "Clean unused cache entries after this time (in seconds)",
810 },
811 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.",
812 "ID of secret providing qcow2 AES key or LUKS passphrase"),
813 { /* end of list */ }
814 },
815 };
816
817 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = {
818 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER,
819 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1,
820 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2,
821 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
822 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
823 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
824 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1,
825 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2,
826 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
827 };
828
829 static void cache_clean_timer_cb(void *opaque)
830 {
831 BlockDriverState *bs = opaque;
832 BDRVQcow2State *s = bs->opaque;
833 qcow2_cache_clean_unused(s->l2_table_cache);
834 qcow2_cache_clean_unused(s->refcount_block_cache);
835 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
836 (int64_t) s->cache_clean_interval * 1000);
837 }
838
839 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context)
840 {
841 BDRVQcow2State *s = bs->opaque;
842 if (s->cache_clean_interval > 0) {
843 s->cache_clean_timer =
844 aio_timer_new_with_attrs(context, QEMU_CLOCK_VIRTUAL,
845 SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL,
846 cache_clean_timer_cb, bs);
847 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
848 (int64_t) s->cache_clean_interval * 1000);
849 }
850 }
851
852 static void cache_clean_timer_del(BlockDriverState *bs)
853 {
854 BDRVQcow2State *s = bs->opaque;
855 if (s->cache_clean_timer) {
856 timer_free(s->cache_clean_timer);
857 s->cache_clean_timer = NULL;
858 }
859 }
860
861 static void qcow2_detach_aio_context(BlockDriverState *bs)
862 {
863 cache_clean_timer_del(bs);
864 }
865
866 static void qcow2_attach_aio_context(BlockDriverState *bs,
867 AioContext *new_context)
868 {
869 cache_clean_timer_init(bs, new_context);
870 }
871
872 static bool read_cache_sizes(BlockDriverState *bs, QemuOpts *opts,
873 uint64_t *l2_cache_size,
874 uint64_t *l2_cache_entry_size,
875 uint64_t *refcount_cache_size, Error **errp)
876 {
877 BDRVQcow2State *s = bs->opaque;
878 uint64_t combined_cache_size, l2_cache_max_setting;
879 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set;
880 bool l2_cache_entry_size_set;
881 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size;
882 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE;
883 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size);
884 /* An L2 table is always one cluster in size so the max cache size
885 * should be a multiple of the cluster size. */
886 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s),
887 s->cluster_size);
888
889 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE);
890 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE);
891 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
892 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE);
893
894 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0);
895 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE,
896 DEFAULT_L2_CACHE_MAX_SIZE);
897 *refcount_cache_size = qemu_opt_get_size(opts,
898 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0);
899
900 *l2_cache_entry_size = qemu_opt_get_size(
901 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size);
902
903 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting);
904
905 if (combined_cache_size_set) {
906 if (l2_cache_size_set && refcount_cache_size_set) {
907 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE
908 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set "
909 "at the same time");
910 return false;
911 } else if (l2_cache_size_set &&
912 (l2_cache_max_setting > combined_cache_size)) {
913 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed "
914 QCOW2_OPT_CACHE_SIZE);
915 return false;
916 } else if (*refcount_cache_size > combined_cache_size) {
917 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed "
918 QCOW2_OPT_CACHE_SIZE);
919 return false;
920 }
921
922 if (l2_cache_size_set) {
923 *refcount_cache_size = combined_cache_size - *l2_cache_size;
924 } else if (refcount_cache_size_set) {
925 *l2_cache_size = combined_cache_size - *refcount_cache_size;
926 } else {
927 /* Assign as much memory as possible to the L2 cache, and
928 * use the remainder for the refcount cache */
929 if (combined_cache_size >= max_l2_cache + min_refcount_cache) {
930 *l2_cache_size = max_l2_cache;
931 *refcount_cache_size = combined_cache_size - *l2_cache_size;
932 } else {
933 *refcount_cache_size =
934 MIN(combined_cache_size, min_refcount_cache);
935 *l2_cache_size = combined_cache_size - *refcount_cache_size;
936 }
937 }
938 }
939
940 /*
941 * If the L2 cache is not enough to cover the whole disk then
942 * default to 4KB entries. Smaller entries reduce the cost of
943 * loads and evictions and increase I/O performance.
944 */
945 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) {
946 *l2_cache_entry_size = MIN(s->cluster_size, 4096);
947 }
948
949 /* l2_cache_size and refcount_cache_size are ensured to have at least
950 * their minimum values in qcow2_update_options_prepare() */
951
952 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) ||
953 *l2_cache_entry_size > s->cluster_size ||
954 !is_power_of_2(*l2_cache_entry_size)) {
955 error_setg(errp, "L2 cache entry size must be a power of two "
956 "between %d and the cluster size (%d)",
957 1 << MIN_CLUSTER_BITS, s->cluster_size);
958 return false;
959 }
960
961 return true;
962 }
963
964 typedef struct Qcow2ReopenState {
965 Qcow2Cache *l2_table_cache;
966 Qcow2Cache *refcount_block_cache;
967 int l2_slice_size; /* Number of entries in a slice of the L2 table */
968 bool use_lazy_refcounts;
969 int overlap_check;
970 bool discard_passthrough[QCOW2_DISCARD_MAX];
971 uint64_t cache_clean_interval;
972 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
973 } Qcow2ReopenState;
974
975 static int qcow2_update_options_prepare(BlockDriverState *bs,
976 Qcow2ReopenState *r,
977 QDict *options, int flags,
978 Error **errp)
979 {
980 BDRVQcow2State *s = bs->opaque;
981 QemuOpts *opts = NULL;
982 const char *opt_overlap_check, *opt_overlap_check_template;
983 int overlap_check_template = 0;
984 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size;
985 int i;
986 const char *encryptfmt;
987 QDict *encryptopts = NULL;
988 int ret;
989
990 qdict_extract_subqdict(options, &encryptopts, "encrypt.");
991 encryptfmt = qdict_get_try_str(encryptopts, "format");
992
993 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
994 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
995 ret = -EINVAL;
996 goto fail;
997 }
998
999 /* get L2 table/refcount block cache size from command line options */
1000 if (!read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size,
1001 &refcount_cache_size, errp)) {
1002 ret = -EINVAL;
1003 goto fail;
1004 }
1005
1006 l2_cache_size /= l2_cache_entry_size;
1007 if (l2_cache_size < MIN_L2_CACHE_SIZE) {
1008 l2_cache_size = MIN_L2_CACHE_SIZE;
1009 }
1010 if (l2_cache_size > INT_MAX) {
1011 error_setg(errp, "L2 cache size too big");
1012 ret = -EINVAL;
1013 goto fail;
1014 }
1015
1016 refcount_cache_size /= s->cluster_size;
1017 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) {
1018 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE;
1019 }
1020 if (refcount_cache_size > INT_MAX) {
1021 error_setg(errp, "Refcount cache size too big");
1022 ret = -EINVAL;
1023 goto fail;
1024 }
1025
1026 /* alloc new L2 table/refcount block cache, flush old one */
1027 if (s->l2_table_cache) {
1028 ret = qcow2_cache_flush(bs, s->l2_table_cache);
1029 if (ret) {
1030 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache");
1031 goto fail;
1032 }
1033 }
1034
1035 if (s->refcount_block_cache) {
1036 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
1037 if (ret) {
1038 error_setg_errno(errp, -ret,
1039 "Failed to flush the refcount block cache");
1040 goto fail;
1041 }
1042 }
1043
1044 r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s);
1045 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size,
1046 l2_cache_entry_size);
1047 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size,
1048 s->cluster_size);
1049 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) {
1050 error_setg(errp, "Could not allocate metadata caches");
1051 ret = -ENOMEM;
1052 goto fail;
1053 }
1054
1055 /* New interval for cache cleanup timer */
1056 r->cache_clean_interval =
1057 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL,
1058 DEFAULT_CACHE_CLEAN_INTERVAL);
1059 #ifndef CONFIG_LINUX
1060 if (r->cache_clean_interval != 0) {
1061 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL
1062 " not supported on this host");
1063 ret = -EINVAL;
1064 goto fail;
1065 }
1066 #endif
1067 if (r->cache_clean_interval > UINT_MAX) {
1068 error_setg(errp, "Cache clean interval too big");
1069 ret = -EINVAL;
1070 goto fail;
1071 }
1072
1073 /* lazy-refcounts; flush if going from enabled to disabled */
1074 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
1075 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
1076 if (r->use_lazy_refcounts && s->qcow_version < 3) {
1077 error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
1078 "qemu 1.1 compatibility level");
1079 ret = -EINVAL;
1080 goto fail;
1081 }
1082
1083 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) {
1084 ret = qcow2_mark_clean(bs);
1085 if (ret < 0) {
1086 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts");
1087 goto fail;
1088 }
1089 }
1090
1091 /* Overlap check options */
1092 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP);
1093 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE);
1094 if (opt_overlap_check_template && opt_overlap_check &&
1095 strcmp(opt_overlap_check_template, opt_overlap_check))
1096 {
1097 error_setg(errp, "Conflicting values for qcow2 options '"
1098 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE
1099 "' ('%s')", opt_overlap_check, opt_overlap_check_template);
1100 ret = -EINVAL;
1101 goto fail;
1102 }
1103 if (!opt_overlap_check) {
1104 opt_overlap_check = opt_overlap_check_template ?: "cached";
1105 }
1106
1107 if (!strcmp(opt_overlap_check, "none")) {
1108 overlap_check_template = 0;
1109 } else if (!strcmp(opt_overlap_check, "constant")) {
1110 overlap_check_template = QCOW2_OL_CONSTANT;
1111 } else if (!strcmp(opt_overlap_check, "cached")) {
1112 overlap_check_template = QCOW2_OL_CACHED;
1113 } else if (!strcmp(opt_overlap_check, "all")) {
1114 overlap_check_template = QCOW2_OL_ALL;
1115 } else {
1116 error_setg(errp, "Unsupported value '%s' for qcow2 option "
1117 "'overlap-check'. Allowed are any of the following: "
1118 "none, constant, cached, all", opt_overlap_check);
1119 ret = -EINVAL;
1120 goto fail;
1121 }
1122
1123 r->overlap_check = 0;
1124 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
1125 /* overlap-check defines a template bitmask, but every flag may be
1126 * overwritten through the associated boolean option */
1127 r->overlap_check |=
1128 qemu_opt_get_bool(opts, overlap_bool_option_names[i],
1129 overlap_check_template & (1 << i)) << i;
1130 }
1131
1132 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
1133 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
1134 r->discard_passthrough[QCOW2_DISCARD_REQUEST] =
1135 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
1136 flags & BDRV_O_UNMAP);
1137 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
1138 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
1139 r->discard_passthrough[QCOW2_DISCARD_OTHER] =
1140 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
1141
1142 switch (s->crypt_method_header) {
1143 case QCOW_CRYPT_NONE:
1144 if (encryptfmt) {
1145 error_setg(errp, "No encryption in image header, but options "
1146 "specified format '%s'", encryptfmt);
1147 ret = -EINVAL;
1148 goto fail;
1149 }
1150 break;
1151
1152 case QCOW_CRYPT_AES:
1153 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) {
1154 error_setg(errp,
1155 "Header reported 'aes' encryption format but "
1156 "options specify '%s'", encryptfmt);
1157 ret = -EINVAL;
1158 goto fail;
1159 }
1160 qdict_put_str(encryptopts, "format", "qcow");
1161 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1162 if (!r->crypto_opts) {
1163 ret = -EINVAL;
1164 goto fail;
1165 }
1166 break;
1167
1168 case QCOW_CRYPT_LUKS:
1169 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) {
1170 error_setg(errp,
1171 "Header reported 'luks' encryption format but "
1172 "options specify '%s'", encryptfmt);
1173 ret = -EINVAL;
1174 goto fail;
1175 }
1176 qdict_put_str(encryptopts, "format", "luks");
1177 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1178 if (!r->crypto_opts) {
1179 ret = -EINVAL;
1180 goto fail;
1181 }
1182 break;
1183
1184 default:
1185 error_setg(errp, "Unsupported encryption method %d",
1186 s->crypt_method_header);
1187 ret = -EINVAL;
1188 goto fail;
1189 }
1190
1191 ret = 0;
1192 fail:
1193 qobject_unref(encryptopts);
1194 qemu_opts_del(opts);
1195 opts = NULL;
1196 return ret;
1197 }
1198
1199 static void qcow2_update_options_commit(BlockDriverState *bs,
1200 Qcow2ReopenState *r)
1201 {
1202 BDRVQcow2State *s = bs->opaque;
1203 int i;
1204
1205 if (s->l2_table_cache) {
1206 qcow2_cache_destroy(s->l2_table_cache);
1207 }
1208 if (s->refcount_block_cache) {
1209 qcow2_cache_destroy(s->refcount_block_cache);
1210 }
1211 s->l2_table_cache = r->l2_table_cache;
1212 s->refcount_block_cache = r->refcount_block_cache;
1213 s->l2_slice_size = r->l2_slice_size;
1214
1215 s->overlap_check = r->overlap_check;
1216 s->use_lazy_refcounts = r->use_lazy_refcounts;
1217
1218 for (i = 0; i < QCOW2_DISCARD_MAX; i++) {
1219 s->discard_passthrough[i] = r->discard_passthrough[i];
1220 }
1221
1222 if (s->cache_clean_interval != r->cache_clean_interval) {
1223 cache_clean_timer_del(bs);
1224 s->cache_clean_interval = r->cache_clean_interval;
1225 cache_clean_timer_init(bs, bdrv_get_aio_context(bs));
1226 }
1227
1228 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1229 s->crypto_opts = r->crypto_opts;
1230 }
1231
1232 static void qcow2_update_options_abort(BlockDriverState *bs,
1233 Qcow2ReopenState *r)
1234 {
1235 if (r->l2_table_cache) {
1236 qcow2_cache_destroy(r->l2_table_cache);
1237 }
1238 if (r->refcount_block_cache) {
1239 qcow2_cache_destroy(r->refcount_block_cache);
1240 }
1241 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
1242 }
1243
1244 static int coroutine_fn
1245 qcow2_update_options(BlockDriverState *bs, QDict *options, int flags,
1246 Error **errp)
1247 {
1248 Qcow2ReopenState r = {};
1249 int ret;
1250
1251 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp);
1252 if (ret >= 0) {
1253 qcow2_update_options_commit(bs, &r);
1254 } else {
1255 qcow2_update_options_abort(bs, &r);
1256 }
1257
1258 return ret;
1259 }
1260
1261 static int validate_compression_type(BDRVQcow2State *s, Error **errp)
1262 {
1263 switch (s->compression_type) {
1264 case QCOW2_COMPRESSION_TYPE_ZLIB:
1265 #ifdef CONFIG_ZSTD
1266 case QCOW2_COMPRESSION_TYPE_ZSTD:
1267 #endif
1268 break;
1269
1270 default:
1271 error_setg(errp, "qcow2: unknown compression type: %u",
1272 s->compression_type);
1273 return -ENOTSUP;
1274 }
1275
1276 /*
1277 * if the compression type differs from QCOW2_COMPRESSION_TYPE_ZLIB
1278 * the incompatible feature flag must be set
1279 */
1280 if (s->compression_type == QCOW2_COMPRESSION_TYPE_ZLIB) {
1281 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) {
1282 error_setg(errp, "qcow2: Compression type incompatible feature "
1283 "bit must not be set");
1284 return -EINVAL;
1285 }
1286 } else {
1287 if (!(s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION)) {
1288 error_setg(errp, "qcow2: Compression type incompatible feature "
1289 "bit must be set");
1290 return -EINVAL;
1291 }
1292 }
1293
1294 return 0;
1295 }
1296
1297 /* Called with s->lock held. */
1298 static int coroutine_fn GRAPH_RDLOCK
1299 qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
1300 bool open_data_file, Error **errp)
1301 {
1302 ERRP_GUARD();
1303 BDRVQcow2State *s = bs->opaque;
1304 unsigned int len, i;
1305 int ret = 0;
1306 QCowHeader header;
1307 uint64_t ext_end;
1308 uint64_t l1_vm_state_index;
1309 bool update_header = false;
1310
1311 ret = bdrv_co_pread(bs->file, 0, sizeof(header), &header, 0);
1312 if (ret < 0) {
1313 error_setg_errno(errp, -ret, "Could not read qcow2 header");
1314 goto fail;
1315 }
1316 header.magic = be32_to_cpu(header.magic);
1317 header.version = be32_to_cpu(header.version);
1318 header.backing_file_offset = be64_to_cpu(header.backing_file_offset);
1319 header.backing_file_size = be32_to_cpu(header.backing_file_size);
1320 header.size = be64_to_cpu(header.size);
1321 header.cluster_bits = be32_to_cpu(header.cluster_bits);
1322 header.crypt_method = be32_to_cpu(header.crypt_method);
1323 header.l1_table_offset = be64_to_cpu(header.l1_table_offset);
1324 header.l1_size = be32_to_cpu(header.l1_size);
1325 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset);
1326 header.refcount_table_clusters =
1327 be32_to_cpu(header.refcount_table_clusters);
1328 header.snapshots_offset = be64_to_cpu(header.snapshots_offset);
1329 header.nb_snapshots = be32_to_cpu(header.nb_snapshots);
1330
1331 if (header.magic != QCOW_MAGIC) {
1332 error_setg(errp, "Image is not in qcow2 format");
1333 ret = -EINVAL;
1334 goto fail;
1335 }
1336 if (header.version < 2 || header.version > 3) {
1337 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version);
1338 ret = -ENOTSUP;
1339 goto fail;
1340 }
1341
1342 s->qcow_version = header.version;
1343
1344 /* Initialise cluster size */
1345 if (header.cluster_bits < MIN_CLUSTER_BITS ||
1346 header.cluster_bits > MAX_CLUSTER_BITS) {
1347 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32,
1348 header.cluster_bits);
1349 ret = -EINVAL;
1350 goto fail;
1351 }
1352
1353 s->cluster_bits = header.cluster_bits;
1354 s->cluster_size = 1 << s->cluster_bits;
1355
1356 /* Initialise version 3 header fields */
1357 if (header.version == 2) {
1358 header.incompatible_features = 0;
1359 header.compatible_features = 0;
1360 header.autoclear_features = 0;
1361 header.refcount_order = 4;
1362 header.header_length = 72;
1363 } else {
1364 header.incompatible_features =
1365 be64_to_cpu(header.incompatible_features);
1366 header.compatible_features = be64_to_cpu(header.compatible_features);
1367 header.autoclear_features = be64_to_cpu(header.autoclear_features);
1368 header.refcount_order = be32_to_cpu(header.refcount_order);
1369 header.header_length = be32_to_cpu(header.header_length);
1370
1371 if (header.header_length < 104) {
1372 error_setg(errp, "qcow2 header too short");
1373 ret = -EINVAL;
1374 goto fail;
1375 }
1376 }
1377
1378 if (header.header_length > s->cluster_size) {
1379 error_setg(errp, "qcow2 header exceeds cluster size");
1380 ret = -EINVAL;
1381 goto fail;
1382 }
1383
1384 if (header.header_length > sizeof(header)) {
1385 s->unknown_header_fields_size = header.header_length - sizeof(header);
1386 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
1387 ret = bdrv_co_pread(bs->file, sizeof(header),
1388 s->unknown_header_fields_size,
1389 s->unknown_header_fields, 0);
1390 if (ret < 0) {
1391 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
1392 "fields");
1393 goto fail;
1394 }
1395 }
1396
1397 if (header.backing_file_offset > s->cluster_size) {
1398 error_setg(errp, "Invalid backing file offset");
1399 ret = -EINVAL;
1400 goto fail;
1401 }
1402
1403 if (header.backing_file_offset) {
1404 ext_end = header.backing_file_offset;
1405 } else {
1406 ext_end = 1 << header.cluster_bits;
1407 }
1408
1409 /* Handle feature bits */
1410 s->incompatible_features = header.incompatible_features;
1411 s->compatible_features = header.compatible_features;
1412 s->autoclear_features = header.autoclear_features;
1413
1414 /*
1415 * Handle compression type
1416 * Older qcow2 images don't contain the compression type header.
1417 * Distinguish them by the header length and use
1418 * the only valid (default) compression type in that case
1419 */
1420 if (header.header_length > offsetof(QCowHeader, compression_type)) {
1421 s->compression_type = header.compression_type;
1422 } else {
1423 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
1424 }
1425
1426 ret = validate_compression_type(s, errp);
1427 if (ret) {
1428 goto fail;
1429 }
1430
1431 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
1432 void *feature_table = NULL;
1433 qcow2_read_extensions(bs, header.header_length, ext_end,
1434 &feature_table, flags, NULL, NULL);
1435 report_unsupported_feature(errp, feature_table,
1436 s->incompatible_features &
1437 ~QCOW2_INCOMPAT_MASK);
1438 ret = -ENOTSUP;
1439 g_free(feature_table);
1440 goto fail;
1441 }
1442
1443 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
1444 /* Corrupt images may not be written to unless they are being repaired
1445 */
1446 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
1447 error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
1448 "read/write");
1449 ret = -EACCES;
1450 goto fail;
1451 }
1452 }
1453
1454 s->subclusters_per_cluster =
1455 has_subclusters(s) ? QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER : 1;
1456 s->subcluster_size = s->cluster_size / s->subclusters_per_cluster;
1457 s->subcluster_bits = ctz32(s->subcluster_size);
1458
1459 if (s->subcluster_size < (1 << MIN_CLUSTER_BITS)) {
1460 error_setg(errp, "Unsupported subcluster size: %d", s->subcluster_size);
1461 ret = -EINVAL;
1462 goto fail;
1463 }
1464
1465 /* Check support for various header values */
1466 if (header.refcount_order > 6) {
1467 error_setg(errp, "Reference count entry width too large; may not "
1468 "exceed 64 bits");
1469 ret = -EINVAL;
1470 goto fail;
1471 }
1472 s->refcount_order = header.refcount_order;
1473 s->refcount_bits = 1 << s->refcount_order;
1474 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
1475 s->refcount_max += s->refcount_max - 1;
1476
1477 s->crypt_method_header = header.crypt_method;
1478 if (s->crypt_method_header) {
1479 if (bdrv_uses_whitelist() &&
1480 s->crypt_method_header == QCOW_CRYPT_AES) {
1481 error_setg(errp,
1482 "Use of AES-CBC encrypted qcow2 images is no longer "
1483 "supported in system emulators");
1484 error_append_hint(errp,
1485 "You can use 'qemu-img convert' to convert your "
1486 "image to an alternative supported format, such "
1487 "as unencrypted qcow2, or raw with the LUKS "
1488 "format instead.\n");
1489 ret = -ENOSYS;
1490 goto fail;
1491 }
1492
1493 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1494 s->crypt_physical_offset = false;
1495 } else {
1496 /* Assuming LUKS and any future crypt methods we
1497 * add will all use physical offsets, due to the
1498 * fact that the alternative is insecure... */
1499 s->crypt_physical_offset = true;
1500 }
1501
1502 bs->encrypted = true;
1503 }
1504
1505 s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s));
1506 s->l2_size = 1 << s->l2_bits;
1507 /* 2^(s->refcount_order - 3) is the refcount width in bytes */
1508 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3);
1509 s->refcount_block_size = 1 << s->refcount_block_bits;
1510 bs->total_sectors = header.size / BDRV_SECTOR_SIZE;
1511 s->csize_shift = (62 - (s->cluster_bits - 8));
1512 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
1513 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
1514
1515 s->refcount_table_offset = header.refcount_table_offset;
1516 s->refcount_table_size =
1517 header.refcount_table_clusters << (s->cluster_bits - 3);
1518
1519 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) {
1520 error_setg(errp, "Image does not contain a reference count table");
1521 ret = -EINVAL;
1522 goto fail;
1523 }
1524
1525 ret = qcow2_validate_table(bs, s->refcount_table_offset,
1526 header.refcount_table_clusters,
1527 s->cluster_size, QCOW_MAX_REFTABLE_SIZE,
1528 "Reference count table", errp);
1529 if (ret < 0) {
1530 goto fail;
1531 }
1532
1533 if (!(flags & BDRV_O_CHECK)) {
1534 /*
1535 * The total size in bytes of the snapshot table is checked in
1536 * qcow2_read_snapshots() because the size of each snapshot is
1537 * variable and we don't know it yet.
1538 * Here we only check the offset and number of snapshots.
1539 */
1540 ret = qcow2_validate_table(bs, header.snapshots_offset,
1541 header.nb_snapshots,
1542 sizeof(QCowSnapshotHeader),
1543 sizeof(QCowSnapshotHeader) *
1544 QCOW_MAX_SNAPSHOTS,
1545 "Snapshot table", errp);
1546 if (ret < 0) {
1547 goto fail;
1548 }
1549 }
1550
1551 /* read the level 1 table */
1552 ret = qcow2_validate_table(bs, header.l1_table_offset,
1553 header.l1_size, L1E_SIZE,
1554 QCOW_MAX_L1_SIZE, "Active L1 table", errp);
1555 if (ret < 0) {
1556 goto fail;
1557 }
1558 s->l1_size = header.l1_size;
1559 s->l1_table_offset = header.l1_table_offset;
1560
1561 l1_vm_state_index = size_to_l1(s, header.size);
1562 if (l1_vm_state_index > INT_MAX) {
1563 error_setg(errp, "Image is too big");
1564 ret = -EFBIG;
1565 goto fail;
1566 }
1567 s->l1_vm_state_index = l1_vm_state_index;
1568
1569 /* the L1 table must contain at least enough entries to put
1570 header.size bytes */
1571 if (s->l1_size < s->l1_vm_state_index) {
1572 error_setg(errp, "L1 table is too small");
1573 ret = -EINVAL;
1574 goto fail;
1575 }
1576
1577 if (s->l1_size > 0) {
1578 s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE);
1579 if (s->l1_table == NULL) {
1580 error_setg(errp, "Could not allocate L1 table");
1581 ret = -ENOMEM;
1582 goto fail;
1583 }
1584 ret = bdrv_co_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE,
1585 s->l1_table, 0);
1586 if (ret < 0) {
1587 error_setg_errno(errp, -ret, "Could not read L1 table");
1588 goto fail;
1589 }
1590 for(i = 0;i < s->l1_size; i++) {
1591 s->l1_table[i] = be64_to_cpu(s->l1_table[i]);
1592 }
1593 }
1594
1595 /* Parse driver-specific options */
1596 ret = qcow2_update_options(bs, options, flags, errp);
1597 if (ret < 0) {
1598 goto fail;
1599 }
1600
1601 s->flags = flags;
1602
1603 ret = qcow2_refcount_init(bs);
1604 if (ret != 0) {
1605 error_setg_errno(errp, -ret, "Could not initialize refcount handling");
1606 goto fail;
1607 }
1608
1609 QLIST_INIT(&s->cluster_allocs);
1610 QTAILQ_INIT(&s->discards);
1611
1612 /* read qcow2 extensions */
1613 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
1614 flags, &update_header, errp)) {
1615 ret = -EINVAL;
1616 goto fail;
1617 }
1618
1619 if (open_data_file) {
1620 /* Open external data file */
1621 s->data_file = bdrv_co_open_child(NULL, options, "data-file", bs,
1622 &child_of_bds, BDRV_CHILD_DATA,
1623 true, errp);
1624 if (*errp) {
1625 ret = -EINVAL;
1626 goto fail;
1627 }
1628
1629 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) {
1630 if (!s->data_file && s->image_data_file) {
1631 s->data_file = bdrv_co_open_child(s->image_data_file, options,
1632 "data-file", bs,
1633 &child_of_bds,
1634 BDRV_CHILD_DATA, false, errp);
1635 if (!s->data_file) {
1636 ret = -EINVAL;
1637 goto fail;
1638 }
1639 }
1640 if (!s->data_file) {
1641 error_setg(errp, "'data-file' is required for this image");
1642 ret = -EINVAL;
1643 goto fail;
1644 }
1645
1646 /* No data here */
1647 bs->file->role &= ~BDRV_CHILD_DATA;
1648
1649 /* Must succeed because we have given up permissions if anything */
1650 bdrv_child_refresh_perms(bs, bs->file, &error_abort);
1651 } else {
1652 if (s->data_file) {
1653 error_setg(errp, "'data-file' can only be set for images with "
1654 "an external data file");
1655 ret = -EINVAL;
1656 goto fail;
1657 }
1658
1659 s->data_file = bs->file;
1660
1661 if (data_file_is_raw(bs)) {
1662 error_setg(errp, "data-file-raw requires a data file");
1663 ret = -EINVAL;
1664 goto fail;
1665 }
1666 }
1667 }
1668
1669 /* qcow2_read_extension may have set up the crypto context
1670 * if the crypt method needs a header region, some methods
1671 * don't need header extensions, so must check here
1672 */
1673 if (s->crypt_method_header && !s->crypto) {
1674 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1675 unsigned int cflags = 0;
1676 if (flags & BDRV_O_NO_IO) {
1677 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
1678 }
1679 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
1680 NULL, NULL, cflags,
1681 QCOW2_MAX_THREADS, errp);
1682 if (!s->crypto) {
1683 ret = -EINVAL;
1684 goto fail;
1685 }
1686 } else if (!(flags & BDRV_O_NO_IO)) {
1687 error_setg(errp, "Missing CRYPTO header for crypt method %d",
1688 s->crypt_method_header);
1689 ret = -EINVAL;
1690 goto fail;
1691 }
1692 }
1693
1694 /* read the backing file name */
1695 if (header.backing_file_offset != 0) {
1696 len = header.backing_file_size;
1697 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) ||
1698 len >= sizeof(bs->backing_file)) {
1699 error_setg(errp, "Backing file name too long");
1700 ret = -EINVAL;
1701 goto fail;
1702 }
1703
1704 s->image_backing_file = g_malloc(len + 1);
1705 ret = bdrv_co_pread(bs->file, header.backing_file_offset, len,
1706 s->image_backing_file, 0);
1707 if (ret < 0) {
1708 error_setg_errno(errp, -ret, "Could not read backing file name");
1709 goto fail;
1710 }
1711 s->image_backing_file[len] = '\0';
1712
1713 /*
1714 * Update only when something has changed. This function is called by
1715 * qcow2_co_invalidate_cache(), and we do not want to reset
1716 * auto_backing_file unless necessary.
1717 */
1718 if (!g_str_equal(s->image_backing_file, bs->backing_file)) {
1719 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1720 s->image_backing_file);
1721 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
1722 s->image_backing_file);
1723 }
1724 }
1725
1726 /*
1727 * Internal snapshots; skip reading them in check mode, because
1728 * we do not need them then, and we do not want to abort because
1729 * of a broken table.
1730 */
1731 if (!(flags & BDRV_O_CHECK)) {
1732 s->snapshots_offset = header.snapshots_offset;
1733 s->nb_snapshots = header.nb_snapshots;
1734
1735 ret = qcow2_read_snapshots(bs, errp);
1736 if (ret < 0) {
1737 goto fail;
1738 }
1739 }
1740
1741 /* Clear unknown autoclear feature bits */
1742 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK;
1743 update_header = update_header && bdrv_is_writable(bs);
1744 if (update_header) {
1745 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK;
1746 }
1747
1748 /* == Handle persistent dirty bitmaps ==
1749 *
1750 * We want load dirty bitmaps in three cases:
1751 *
1752 * 1. Normal open of the disk in active mode, not related to invalidation
1753 * after migration.
1754 *
1755 * 2. Invalidation of the target vm after pre-copy phase of migration, if
1756 * bitmaps are _not_ migrating through migration channel, i.e.
1757 * 'dirty-bitmaps' capability is disabled.
1758 *
1759 * 3. Invalidation of source vm after failed or canceled migration.
1760 * This is a very interesting case. There are two possible types of
1761 * bitmaps:
1762 *
1763 * A. Stored on inactivation and removed. They should be loaded from the
1764 * image.
1765 *
1766 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through
1767 * the migration channel (with dirty-bitmaps capability).
1768 *
1769 * On the other hand, there are two possible sub-cases:
1770 *
1771 * 3.1 disk was changed by somebody else while were inactive. In this
1772 * case all in-RAM dirty bitmaps (both persistent and not) are
1773 * definitely invalid. And we don't have any method to determine
1774 * this.
1775 *
1776 * Simple and safe thing is to just drop all the bitmaps of type B on
1777 * inactivation. But in this case we lose bitmaps in valid 4.2 case.
1778 *
1779 * On the other hand, resuming source vm, if disk was already changed
1780 * is a bad thing anyway: not only bitmaps, the whole vm state is
1781 * out of sync with disk.
1782 *
1783 * This means, that user or management tool, who for some reason
1784 * decided to resume source vm, after disk was already changed by
1785 * target vm, should at least drop all dirty bitmaps by hand.
1786 *
1787 * So, we can ignore this case for now, but TODO: "generation"
1788 * extension for qcow2, to determine, that image was changed after
1789 * last inactivation. And if it is changed, we will drop (or at least
1790 * mark as 'invalid' all the bitmaps of type B, both persistent
1791 * and not).
1792 *
1793 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved
1794 * to disk ('dirty-bitmaps' capability disabled), or not saved
1795 * ('dirty-bitmaps' capability enabled), but we don't need to care
1796 * of: let's load bitmaps as always: stored bitmaps will be loaded,
1797 * and not stored has flag IN_USE=1 in the image and will be skipped
1798 * on loading.
1799 *
1800 * One remaining possible case when we don't want load bitmaps:
1801 *
1802 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or
1803 * will be loaded on invalidation, no needs try loading them before)
1804 */
1805
1806 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) {
1807 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */
1808 bool header_updated;
1809 if (!qcow2_load_dirty_bitmaps(bs, &header_updated, errp)) {
1810 ret = -EINVAL;
1811 goto fail;
1812 }
1813
1814 update_header = update_header && !header_updated;
1815 }
1816
1817 if (update_header) {
1818 ret = qcow2_update_header(bs);
1819 if (ret < 0) {
1820 error_setg_errno(errp, -ret, "Could not update qcow2 header");
1821 goto fail;
1822 }
1823 }
1824
1825 bs->supported_zero_flags = header.version >= 3 ?
1826 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0;
1827 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
1828
1829 /* Repair image if dirty */
1830 if (!(flags & BDRV_O_CHECK) && bdrv_is_writable(bs) &&
1831 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
1832 BdrvCheckResult result = {0};
1833
1834 ret = qcow2_co_check_locked(bs, &result,
1835 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
1836 if (ret < 0 || result.check_errors) {
1837 if (ret >= 0) {
1838 ret = -EIO;
1839 }
1840 error_setg_errno(errp, -ret, "Could not repair dirty image");
1841 goto fail;
1842 }
1843 }
1844
1845 #ifdef DEBUG_ALLOC
1846 {
1847 BdrvCheckResult result = {0};
1848 qcow2_check_refcounts(bs, &result, 0);
1849 }
1850 #endif
1851
1852 qemu_co_queue_init(&s->thread_task_queue);
1853
1854 return ret;
1855
1856 fail:
1857 g_free(s->image_data_file);
1858 if (open_data_file && has_data_file(bs)) {
1859 bdrv_unref_child(bs, s->data_file);
1860 s->data_file = NULL;
1861 }
1862 g_free(s->unknown_header_fields);
1863 cleanup_unknown_header_ext(bs);
1864 qcow2_free_snapshots(bs);
1865 qcow2_refcount_close(bs);
1866 qemu_vfree(s->l1_table);
1867 /* else pre-write overlap checks in cache_destroy may crash */
1868 s->l1_table = NULL;
1869 cache_clean_timer_del(bs);
1870 if (s->l2_table_cache) {
1871 qcow2_cache_destroy(s->l2_table_cache);
1872 }
1873 if (s->refcount_block_cache) {
1874 qcow2_cache_destroy(s->refcount_block_cache);
1875 }
1876 qcrypto_block_free(s->crypto);
1877 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1878 return ret;
1879 }
1880
1881 typedef struct QCow2OpenCo {
1882 BlockDriverState *bs;
1883 QDict *options;
1884 int flags;
1885 Error **errp;
1886 int ret;
1887 } QCow2OpenCo;
1888
1889 static void coroutine_fn qcow2_open_entry(void *opaque)
1890 {
1891 QCow2OpenCo *qoc = opaque;
1892 BDRVQcow2State *s = qoc->bs->opaque;
1893
1894 assume_graph_lock(); /* FIXME */
1895
1896 qemu_co_mutex_lock(&s->lock);
1897 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true,
1898 qoc->errp);
1899 qemu_co_mutex_unlock(&s->lock);
1900 }
1901
1902 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
1903 Error **errp)
1904 {
1905 BDRVQcow2State *s = bs->opaque;
1906 QCow2OpenCo qoc = {
1907 .bs = bs,
1908 .options = options,
1909 .flags = flags,
1910 .errp = errp,
1911 .ret = -EINPROGRESS
1912 };
1913 int ret;
1914
1915 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
1916 if (ret < 0) {
1917 return ret;
1918 }
1919
1920 /* Initialise locks */
1921 qemu_co_mutex_init(&s->lock);
1922
1923 if (qemu_in_coroutine()) {
1924 /* From bdrv_co_create. */
1925 qcow2_open_entry(&qoc);
1926 } else {
1927 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1928 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc));
1929 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
1930 }
1931 return qoc.ret;
1932 }
1933
1934 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
1935 {
1936 BDRVQcow2State *s = bs->opaque;
1937
1938 if (bs->encrypted) {
1939 /* Encryption works on a sector granularity */
1940 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto);
1941 }
1942 bs->bl.pwrite_zeroes_alignment = s->subcluster_size;
1943 bs->bl.pdiscard_alignment = s->cluster_size;
1944 }
1945
1946 static int qcow2_reopen_prepare(BDRVReopenState *state,
1947 BlockReopenQueue *queue, Error **errp)
1948 {
1949 BDRVQcow2State *s = state->bs->opaque;
1950 Qcow2ReopenState *r;
1951 int ret;
1952
1953 r = g_new0(Qcow2ReopenState, 1);
1954 state->opaque = r;
1955
1956 ret = qcow2_update_options_prepare(state->bs, r, state->options,
1957 state->flags, errp);
1958 if (ret < 0) {
1959 goto fail;
1960 }
1961
1962 /* We need to write out any unwritten data if we reopen read-only. */
1963 if ((state->flags & BDRV_O_RDWR) == 0) {
1964 ret = qcow2_reopen_bitmaps_ro(state->bs, errp);
1965 if (ret < 0) {
1966 goto fail;
1967 }
1968
1969 ret = bdrv_flush(state->bs);
1970 if (ret < 0) {
1971 goto fail;
1972 }
1973
1974 ret = qcow2_mark_clean(state->bs);
1975 if (ret < 0) {
1976 goto fail;
1977 }
1978 }
1979
1980 /*
1981 * Without an external data file, s->data_file points to the same BdrvChild
1982 * as bs->file. It needs to be resynced after reopen because bs->file may
1983 * be changed. We can't use it in the meantime.
1984 */
1985 if (!has_data_file(state->bs)) {
1986 assert(s->data_file == state->bs->file);
1987 s->data_file = NULL;
1988 }
1989
1990 return 0;
1991
1992 fail:
1993 qcow2_update_options_abort(state->bs, r);
1994 g_free(r);
1995 return ret;
1996 }
1997
1998 static void qcow2_reopen_commit(BDRVReopenState *state)
1999 {
2000 BDRVQcow2State *s = state->bs->opaque;
2001
2002 qcow2_update_options_commit(state->bs, state->opaque);
2003 if (!s->data_file) {
2004 /*
2005 * If we don't have an external data file, s->data_file was cleared by
2006 * qcow2_reopen_prepare() and needs to be updated.
2007 */
2008 s->data_file = state->bs->file;
2009 }
2010 g_free(state->opaque);
2011 }
2012
2013 static void qcow2_reopen_commit_post(BDRVReopenState *state)
2014 {
2015 if (state->flags & BDRV_O_RDWR) {
2016 Error *local_err = NULL;
2017
2018 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) {
2019 /*
2020 * This is not fatal, bitmaps just left read-only, so all following
2021 * writes will fail. User can remove read-only bitmaps to unblock
2022 * writes or retry reopen.
2023 */
2024 error_reportf_err(local_err,
2025 "%s: Failed to make dirty bitmaps writable: ",
2026 bdrv_get_node_name(state->bs));
2027 }
2028 }
2029 }
2030
2031 static void qcow2_reopen_abort(BDRVReopenState *state)
2032 {
2033 BDRVQcow2State *s = state->bs->opaque;
2034
2035 if (!s->data_file) {
2036 /*
2037 * If we don't have an external data file, s->data_file was cleared by
2038 * qcow2_reopen_prepare() and needs to be restored.
2039 */
2040 s->data_file = state->bs->file;
2041 }
2042 qcow2_update_options_abort(state->bs, state->opaque);
2043 g_free(state->opaque);
2044 }
2045
2046 static void qcow2_join_options(QDict *options, QDict *old_options)
2047 {
2048 bool has_new_overlap_template =
2049 qdict_haskey(options, QCOW2_OPT_OVERLAP) ||
2050 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE);
2051 bool has_new_total_cache_size =
2052 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE);
2053 bool has_all_cache_options;
2054
2055 /* New overlap template overrides all old overlap options */
2056 if (has_new_overlap_template) {
2057 qdict_del(old_options, QCOW2_OPT_OVERLAP);
2058 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE);
2059 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER);
2060 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1);
2061 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2);
2062 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE);
2063 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK);
2064 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE);
2065 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1);
2066 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2);
2067 }
2068
2069 /* New total cache size overrides all old options */
2070 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) {
2071 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE);
2072 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2073 }
2074
2075 qdict_join(options, old_options, false);
2076
2077 /*
2078 * If after merging all cache size options are set, an old total size is
2079 * overwritten. Do keep all options, however, if all three are new. The
2080 * resulting error message is what we want to happen.
2081 */
2082 has_all_cache_options =
2083 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) ||
2084 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) ||
2085 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2086
2087 if (has_all_cache_options && !has_new_total_cache_size) {
2088 qdict_del(options, QCOW2_OPT_CACHE_SIZE);
2089 }
2090 }
2091
2092 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs,
2093 bool want_zero,
2094 int64_t offset, int64_t count,
2095 int64_t *pnum, int64_t *map,
2096 BlockDriverState **file)
2097 {
2098 BDRVQcow2State *s = bs->opaque;
2099 uint64_t host_offset;
2100 unsigned int bytes;
2101 QCow2SubclusterType type;
2102 int ret, status = 0;
2103
2104 qemu_co_mutex_lock(&s->lock);
2105
2106 if (!s->metadata_preallocation_checked) {
2107 ret = qcow2_detect_metadata_preallocation(bs);
2108 s->metadata_preallocation = (ret == 1);
2109 s->metadata_preallocation_checked = true;
2110 }
2111
2112 bytes = MIN(INT_MAX, count);
2113 ret = qcow2_get_host_offset(bs, offset, &bytes, &host_offset, &type);
2114 qemu_co_mutex_unlock(&s->lock);
2115 if (ret < 0) {
2116 return ret;
2117 }
2118
2119 *pnum = bytes;
2120
2121 if ((type == QCOW2_SUBCLUSTER_NORMAL ||
2122 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
2123 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) && !s->crypto) {
2124 *map = host_offset;
2125 *file = s->data_file->bs;
2126 status |= BDRV_BLOCK_OFFSET_VALID;
2127 }
2128 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2129 type == QCOW2_SUBCLUSTER_ZERO_ALLOC) {
2130 status |= BDRV_BLOCK_ZERO;
2131 } else if (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
2132 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) {
2133 status |= BDRV_BLOCK_DATA;
2134 }
2135 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) &&
2136 (status & BDRV_BLOCK_OFFSET_VALID))
2137 {
2138 status |= BDRV_BLOCK_RECURSE;
2139 }
2140 return status;
2141 }
2142
2143 static int coroutine_fn GRAPH_RDLOCK
2144 qcow2_handle_l2meta(BlockDriverState *bs, QCowL2Meta **pl2meta, bool link_l2)
2145 {
2146 int ret = 0;
2147 QCowL2Meta *l2meta = *pl2meta;
2148
2149 while (l2meta != NULL) {
2150 QCowL2Meta *next;
2151
2152 if (link_l2) {
2153 ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
2154 if (ret) {
2155 goto out;
2156 }
2157 } else {
2158 qcow2_alloc_cluster_abort(bs, l2meta);
2159 }
2160
2161 /* Take the request off the list of running requests */
2162 QLIST_REMOVE(l2meta, next_in_flight);
2163
2164 qemu_co_queue_restart_all(&l2meta->dependent_requests);
2165
2166 next = l2meta->next;
2167 g_free(l2meta);
2168 l2meta = next;
2169 }
2170 out:
2171 *pl2meta = l2meta;
2172 return ret;
2173 }
2174
2175 static int coroutine_fn GRAPH_RDLOCK
2176 qcow2_co_preadv_encrypted(BlockDriverState *bs,
2177 uint64_t host_offset,
2178 uint64_t offset,
2179 uint64_t bytes,
2180 QEMUIOVector *qiov,
2181 uint64_t qiov_offset)
2182 {
2183 int ret;
2184 BDRVQcow2State *s = bs->opaque;
2185 uint8_t *buf;
2186
2187 assert(bs->encrypted && s->crypto);
2188 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2189
2190 /*
2191 * For encrypted images, read everything into a temporary
2192 * contiguous buffer on which the AES functions can work.
2193 * Also, decryption in a separate buffer is better as it
2194 * prevents the guest from learning information about the
2195 * encrypted nature of the virtual disk.
2196 */
2197
2198 buf = qemu_try_blockalign(s->data_file->bs, bytes);
2199 if (buf == NULL) {
2200 return -ENOMEM;
2201 }
2202
2203 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
2204 ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0);
2205 if (ret < 0) {
2206 goto fail;
2207 }
2208
2209 if (qcow2_co_decrypt(bs, host_offset, offset, buf, bytes) < 0)
2210 {
2211 ret = -EIO;
2212 goto fail;
2213 }
2214 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes);
2215
2216 fail:
2217 qemu_vfree(buf);
2218
2219 return ret;
2220 }
2221
2222 typedef struct Qcow2AioTask {
2223 AioTask task;
2224
2225 BlockDriverState *bs;
2226 QCow2SubclusterType subcluster_type; /* only for read */
2227 uint64_t host_offset; /* or l2_entry for compressed read */
2228 uint64_t offset;
2229 uint64_t bytes;
2230 QEMUIOVector *qiov;
2231 uint64_t qiov_offset;
2232 QCowL2Meta *l2meta; /* only for write */
2233 } Qcow2AioTask;
2234
2235 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task);
2236 static coroutine_fn int qcow2_add_task(BlockDriverState *bs,
2237 AioTaskPool *pool,
2238 AioTaskFunc func,
2239 QCow2SubclusterType subcluster_type,
2240 uint64_t host_offset,
2241 uint64_t offset,
2242 uint64_t bytes,
2243 QEMUIOVector *qiov,
2244 size_t qiov_offset,
2245 QCowL2Meta *l2meta)
2246 {
2247 Qcow2AioTask local_task;
2248 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task;
2249
2250 *task = (Qcow2AioTask) {
2251 .task.func = func,
2252 .bs = bs,
2253 .subcluster_type = subcluster_type,
2254 .qiov = qiov,
2255 .host_offset = host_offset,
2256 .offset = offset,
2257 .bytes = bytes,
2258 .qiov_offset = qiov_offset,
2259 .l2meta = l2meta,
2260 };
2261
2262 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool,
2263 func == qcow2_co_preadv_task_entry ? "read" : "write",
2264 subcluster_type, host_offset, offset, bytes,
2265 qiov, qiov_offset);
2266
2267 if (!pool) {
2268 return func(&task->task);
2269 }
2270
2271 aio_task_pool_start_task(pool, &task->task);
2272
2273 return 0;
2274 }
2275
2276 static int coroutine_fn GRAPH_RDLOCK
2277 qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type,
2278 uint64_t host_offset, uint64_t offset, uint64_t bytes,
2279 QEMUIOVector *qiov, size_t qiov_offset)
2280 {
2281 BDRVQcow2State *s = bs->opaque;
2282
2283 switch (subc_type) {
2284 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
2285 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
2286 /* Both zero types are handled in qcow2_co_preadv_part */
2287 g_assert_not_reached();
2288
2289 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
2290 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
2291 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */
2292
2293 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
2294 return bdrv_co_preadv_part(bs->backing, offset, bytes,
2295 qiov, qiov_offset, 0);
2296
2297 case QCOW2_SUBCLUSTER_COMPRESSED:
2298 return qcow2_co_preadv_compressed(bs, host_offset,
2299 offset, bytes, qiov, qiov_offset);
2300
2301 case QCOW2_SUBCLUSTER_NORMAL:
2302 if (bs->encrypted) {
2303 return qcow2_co_preadv_encrypted(bs, host_offset,
2304 offset, bytes, qiov, qiov_offset);
2305 }
2306
2307 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
2308 return bdrv_co_preadv_part(s->data_file, host_offset,
2309 bytes, qiov, qiov_offset, 0);
2310
2311 default:
2312 g_assert_not_reached();
2313 }
2314
2315 g_assert_not_reached();
2316 }
2317
2318 /*
2319 * This function can count as GRAPH_RDLOCK because qcow2_co_preadv_part() holds
2320 * the graph lock and keeps it until this coroutine has terminated.
2321 */
2322 static int coroutine_fn GRAPH_RDLOCK qcow2_co_preadv_task_entry(AioTask *task)
2323 {
2324 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2325
2326 assert(!t->l2meta);
2327
2328 return qcow2_co_preadv_task(t->bs, t->subcluster_type,
2329 t->host_offset, t->offset, t->bytes,
2330 t->qiov, t->qiov_offset);
2331 }
2332
2333 static int coroutine_fn GRAPH_RDLOCK
2334 qcow2_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
2335 QEMUIOVector *qiov, size_t qiov_offset,
2336 BdrvRequestFlags flags)
2337 {
2338 BDRVQcow2State *s = bs->opaque;
2339 int ret = 0;
2340 unsigned int cur_bytes; /* number of bytes in current iteration */
2341 uint64_t host_offset = 0;
2342 QCow2SubclusterType type;
2343 AioTaskPool *aio = NULL;
2344
2345 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
2346 /* prepare next request */
2347 cur_bytes = MIN(bytes, INT_MAX);
2348 if (s->crypto) {
2349 cur_bytes = MIN(cur_bytes,
2350 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2351 }
2352
2353 qemu_co_mutex_lock(&s->lock);
2354 ret = qcow2_get_host_offset(bs, offset, &cur_bytes,
2355 &host_offset, &type);
2356 qemu_co_mutex_unlock(&s->lock);
2357 if (ret < 0) {
2358 goto out;
2359 }
2360
2361 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2362 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
2363 (type == QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && !bs->backing) ||
2364 (type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && !bs->backing))
2365 {
2366 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes);
2367 } else {
2368 if (!aio && cur_bytes != bytes) {
2369 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2370 }
2371 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, type,
2372 host_offset, offset, cur_bytes,
2373 qiov, qiov_offset, NULL);
2374 if (ret < 0) {
2375 goto out;
2376 }
2377 }
2378
2379 bytes -= cur_bytes;
2380 offset += cur_bytes;
2381 qiov_offset += cur_bytes;
2382 }
2383
2384 out:
2385 if (aio) {
2386 aio_task_pool_wait_all(aio);
2387 if (ret == 0) {
2388 ret = aio_task_pool_status(aio);
2389 }
2390 g_free(aio);
2391 }
2392
2393 return ret;
2394 }
2395
2396 /* Check if it's possible to merge a write request with the writing of
2397 * the data from the COW regions */
2398 static bool merge_cow(uint64_t offset, unsigned bytes,
2399 QEMUIOVector *qiov, size_t qiov_offset,
2400 QCowL2Meta *l2meta)
2401 {
2402 QCowL2Meta *m;
2403
2404 for (m = l2meta; m != NULL; m = m->next) {
2405 /* If both COW regions are empty then there's nothing to merge */
2406 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) {
2407 continue;
2408 }
2409
2410 /* If COW regions are handled already, skip this too */
2411 if (m->skip_cow) {
2412 continue;
2413 }
2414
2415 /*
2416 * The write request should start immediately after the first
2417 * COW region. This does not always happen because the area
2418 * touched by the request can be larger than the one defined
2419 * by @m (a single request can span an area consisting of a
2420 * mix of previously unallocated and allocated clusters, that
2421 * is why @l2meta is a list).
2422 */
2423 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
2424 /* In this case the request starts before this region */
2425 assert(offset < l2meta_cow_start(m));
2426 assert(m->cow_start.nb_bytes == 0);
2427 continue;
2428 }
2429
2430 /* The write request should end immediately before the second
2431 * COW region (see above for why it does not always happen) */
2432 if (m->offset + m->cow_end.offset != offset + bytes) {
2433 assert(offset + bytes > m->offset + m->cow_end.offset);
2434 assert(m->cow_end.nb_bytes == 0);
2435 continue;
2436 }
2437
2438 /* Make sure that adding both COW regions to the QEMUIOVector
2439 * does not exceed IOV_MAX */
2440 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) {
2441 continue;
2442 }
2443
2444 m->data_qiov = qiov;
2445 m->data_qiov_offset = qiov_offset;
2446 return true;
2447 }
2448
2449 return false;
2450 }
2451
2452 /*
2453 * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error.
2454 * Note that returning 0 does not guarantee non-zero data.
2455 */
2456 static int coroutine_fn GRAPH_RDLOCK
2457 is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
2458 {
2459 /*
2460 * This check is designed for optimization shortcut so it must be
2461 * efficient.
2462 * Instead of is_zero(), use bdrv_co_is_zero_fast() as it is
2463 * faster (but not as accurate and can result in false negatives).
2464 */
2465 int ret = bdrv_co_is_zero_fast(bs, m->offset + m->cow_start.offset,
2466 m->cow_start.nb_bytes);
2467 if (ret <= 0) {
2468 return ret;
2469 }
2470
2471 return bdrv_co_is_zero_fast(bs, m->offset + m->cow_end.offset,
2472 m->cow_end.nb_bytes);
2473 }
2474
2475 static int coroutine_fn GRAPH_RDLOCK
2476 handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
2477 {
2478 BDRVQcow2State *s = bs->opaque;
2479 QCowL2Meta *m;
2480
2481 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) {
2482 return 0;
2483 }
2484
2485 if (bs->encrypted) {
2486 return 0;
2487 }
2488
2489 for (m = l2meta; m != NULL; m = m->next) {
2490 int ret;
2491 uint64_t start_offset = m->alloc_offset + m->cow_start.offset;
2492 unsigned nb_bytes = m->cow_end.offset + m->cow_end.nb_bytes -
2493 m->cow_start.offset;
2494
2495 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) {
2496 continue;
2497 }
2498
2499 ret = is_zero_cow(bs, m);
2500 if (ret < 0) {
2501 return ret;
2502 } else if (ret == 0) {
2503 continue;
2504 }
2505
2506 /*
2507 * instead of writing zero COW buffers,
2508 * efficiently zero out the whole clusters
2509 */
2510
2511 ret = qcow2_pre_write_overlap_check(bs, 0, start_offset, nb_bytes,
2512 true);
2513 if (ret < 0) {
2514 return ret;
2515 }
2516
2517 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
2518 ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes,
2519 BDRV_REQ_NO_FALLBACK);
2520 if (ret < 0) {
2521 if (ret != -ENOTSUP && ret != -EAGAIN) {
2522 return ret;
2523 }
2524 continue;
2525 }
2526
2527 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters);
2528 m->skip_cow = true;
2529 }
2530 return 0;
2531 }
2532
2533 /*
2534 * qcow2_co_pwritev_task
2535 * Called with s->lock unlocked
2536 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must
2537 * not use it somehow after qcow2_co_pwritev_task() call
2538 */
2539 static coroutine_fn GRAPH_RDLOCK
2540 int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset,
2541 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
2542 uint64_t qiov_offset, QCowL2Meta *l2meta)
2543 {
2544 int ret;
2545 BDRVQcow2State *s = bs->opaque;
2546 void *crypt_buf = NULL;
2547 QEMUIOVector encrypted_qiov;
2548
2549 if (bs->encrypted) {
2550 assert(s->crypto);
2551 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2552 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes);
2553 if (crypt_buf == NULL) {
2554 ret = -ENOMEM;
2555 goto out_unlocked;
2556 }
2557 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes);
2558
2559 if (qcow2_co_encrypt(bs, host_offset, offset, crypt_buf, bytes) < 0) {
2560 ret = -EIO;
2561 goto out_unlocked;
2562 }
2563
2564 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes);
2565 qiov = &encrypted_qiov;
2566 qiov_offset = 0;
2567 }
2568
2569 /* Try to efficiently initialize the physical space with zeroes */
2570 ret = handle_alloc_space(bs, l2meta);
2571 if (ret < 0) {
2572 goto out_unlocked;
2573 }
2574
2575 /*
2576 * If we need to do COW, check if it's possible to merge the
2577 * writing of the guest data together with that of the COW regions.
2578 * If it's not possible (or not necessary) then write the
2579 * guest data now.
2580 */
2581 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) {
2582 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
2583 trace_qcow2_writev_data(qemu_coroutine_self(), host_offset);
2584 ret = bdrv_co_pwritev_part(s->data_file, host_offset,
2585 bytes, qiov, qiov_offset, 0);
2586 if (ret < 0) {
2587 goto out_unlocked;
2588 }
2589 }
2590
2591 qemu_co_mutex_lock(&s->lock);
2592
2593 ret = qcow2_handle_l2meta(bs, &l2meta, true);
2594 goto out_locked;
2595
2596 out_unlocked:
2597 qemu_co_mutex_lock(&s->lock);
2598
2599 out_locked:
2600 qcow2_handle_l2meta(bs, &l2meta, false);
2601 qemu_co_mutex_unlock(&s->lock);
2602
2603 qemu_vfree(crypt_buf);
2604
2605 return ret;
2606 }
2607
2608 /*
2609 * This function can count as GRAPH_RDLOCK because qcow2_co_pwritev_part() holds
2610 * the graph lock and keeps it until this coroutine has terminated.
2611 */
2612 static coroutine_fn GRAPH_RDLOCK int qcow2_co_pwritev_task_entry(AioTask *task)
2613 {
2614 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2615
2616 assert(!t->subcluster_type);
2617
2618 return qcow2_co_pwritev_task(t->bs, t->host_offset,
2619 t->offset, t->bytes, t->qiov, t->qiov_offset,
2620 t->l2meta);
2621 }
2622
2623 static int coroutine_fn GRAPH_RDLOCK
2624 qcow2_co_pwritev_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
2625 QEMUIOVector *qiov, size_t qiov_offset,
2626 BdrvRequestFlags flags)
2627 {
2628 BDRVQcow2State *s = bs->opaque;
2629 int offset_in_cluster;
2630 int ret;
2631 unsigned int cur_bytes; /* number of sectors in current iteration */
2632 uint64_t host_offset;
2633 QCowL2Meta *l2meta = NULL;
2634 AioTaskPool *aio = NULL;
2635
2636 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
2637
2638 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
2639
2640 l2meta = NULL;
2641
2642 trace_qcow2_writev_start_part(qemu_coroutine_self());
2643 offset_in_cluster = offset_into_cluster(s, offset);
2644 cur_bytes = MIN(bytes, INT_MAX);
2645 if (bs->encrypted) {
2646 cur_bytes = MIN(cur_bytes,
2647 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
2648 - offset_in_cluster);
2649 }
2650
2651 qemu_co_mutex_lock(&s->lock);
2652
2653 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
2654 &host_offset, &l2meta);
2655 if (ret < 0) {
2656 goto out_locked;
2657 }
2658
2659 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset,
2660 cur_bytes, true);
2661 if (ret < 0) {
2662 goto out_locked;
2663 }
2664
2665 qemu_co_mutex_unlock(&s->lock);
2666
2667 if (!aio && cur_bytes != bytes) {
2668 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2669 }
2670 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
2671 host_offset, offset,
2672 cur_bytes, qiov, qiov_offset, l2meta);
2673 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
2674 if (ret < 0) {
2675 goto fail_nometa;
2676 }
2677
2678 bytes -= cur_bytes;
2679 offset += cur_bytes;
2680 qiov_offset += cur_bytes;
2681 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
2682 }
2683 ret = 0;
2684
2685 qemu_co_mutex_lock(&s->lock);
2686
2687 out_locked:
2688 qcow2_handle_l2meta(bs, &l2meta, false);
2689
2690 qemu_co_mutex_unlock(&s->lock);
2691
2692 fail_nometa:
2693 if (aio) {
2694 aio_task_pool_wait_all(aio);
2695 if (ret == 0) {
2696 ret = aio_task_pool_status(aio);
2697 }
2698 g_free(aio);
2699 }
2700
2701 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
2702
2703 return ret;
2704 }
2705
2706 static int qcow2_inactivate(BlockDriverState *bs)
2707 {
2708 BDRVQcow2State *s = bs->opaque;
2709 int ret, result = 0;
2710 Error *local_err = NULL;
2711
2712 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err);
2713 if (local_err != NULL) {
2714 result = -EINVAL;
2715 error_reportf_err(local_err, "Lost persistent bitmaps during "
2716 "inactivation of node '%s': ",
2717 bdrv_get_device_or_node_name(bs));
2718 }
2719
2720 ret = qcow2_cache_flush(bs, s->l2_table_cache);
2721 if (ret) {
2722 result = ret;
2723 error_report("Failed to flush the L2 table cache: %s",
2724 strerror(-ret));
2725 }
2726
2727 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
2728 if (ret) {
2729 result = ret;
2730 error_report("Failed to flush the refcount block cache: %s",
2731 strerror(-ret));
2732 }
2733
2734 if (result == 0) {
2735 qcow2_mark_clean(bs);
2736 }
2737
2738 return result;
2739 }
2740
2741 static void qcow2_do_close(BlockDriverState *bs, bool close_data_file)
2742 {
2743 BDRVQcow2State *s = bs->opaque;
2744 qemu_vfree(s->l1_table);
2745 /* else pre-write overlap checks in cache_destroy may crash */
2746 s->l1_table = NULL;
2747
2748 if (!(s->flags & BDRV_O_INACTIVE)) {
2749 qcow2_inactivate(bs);
2750 }
2751
2752 cache_clean_timer_del(bs);
2753 qcow2_cache_destroy(s->l2_table_cache);
2754 qcow2_cache_destroy(s->refcount_block_cache);
2755
2756 qcrypto_block_free(s->crypto);
2757 s->crypto = NULL;
2758 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
2759
2760 g_free(s->unknown_header_fields);
2761 cleanup_unknown_header_ext(bs);
2762
2763 g_free(s->image_data_file);
2764 g_free(s->image_backing_file);
2765 g_free(s->image_backing_format);
2766
2767 if (close_data_file && has_data_file(bs)) {
2768 bdrv_unref_child(bs, s->data_file);
2769 s->data_file = NULL;
2770 }
2771
2772 qcow2_refcount_close(bs);
2773 qcow2_free_snapshots(bs);
2774 }
2775
2776 static void qcow2_close(BlockDriverState *bs)
2777 {
2778 qcow2_do_close(bs, true);
2779 }
2780
2781 static void coroutine_fn GRAPH_RDLOCK
2782 qcow2_co_invalidate_cache(BlockDriverState *bs, Error **errp)
2783 {
2784 ERRP_GUARD();
2785 BDRVQcow2State *s = bs->opaque;
2786 BdrvChild *data_file;
2787 int flags = s->flags;
2788 QCryptoBlock *crypto = NULL;
2789 QDict *options;
2790 int ret;
2791
2792 /*
2793 * Backing files are read-only which makes all of their metadata immutable,
2794 * that means we don't have to worry about reopening them here.
2795 */
2796
2797 crypto = s->crypto;
2798 s->crypto = NULL;
2799
2800 /*
2801 * Do not reopen s->data_file (i.e., have qcow2_do_close() not close it,
2802 * and then prevent qcow2_do_open() from opening it), because this function
2803 * runs in the I/O path and as such we must not invoke global-state
2804 * functions like bdrv_unref_child() and bdrv_open_child().
2805 */
2806
2807 qcow2_do_close(bs, false);
2808
2809 data_file = s->data_file;
2810 memset(s, 0, sizeof(BDRVQcow2State));
2811 s->data_file = data_file;
2812
2813 options = qdict_clone_shallow(bs->options);
2814
2815 flags &= ~BDRV_O_INACTIVE;
2816 qemu_co_mutex_lock(&s->lock);
2817 ret = qcow2_do_open(bs, options, flags, false, errp);
2818 qemu_co_mutex_unlock(&s->lock);
2819 qobject_unref(options);
2820 if (ret < 0) {
2821 error_prepend(errp, "Could not reopen qcow2 layer: ");
2822 bs->drv = NULL;
2823 return;
2824 }
2825
2826 s->crypto = crypto;
2827 }
2828
2829 static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
2830 size_t len, size_t buflen)
2831 {
2832 QCowExtension *ext_backing_fmt = (QCowExtension*) buf;
2833 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7);
2834
2835 if (buflen < ext_len) {
2836 return -ENOSPC;
2837 }
2838
2839 *ext_backing_fmt = (QCowExtension) {
2840 .magic = cpu_to_be32(magic),
2841 .len = cpu_to_be32(len),
2842 };
2843
2844 if (len) {
2845 memcpy(buf + sizeof(QCowExtension), s, len);
2846 }
2847
2848 return ext_len;
2849 }
2850
2851 /*
2852 * Updates the qcow2 header, including the variable length parts of it, i.e.
2853 * the backing file name and all extensions. qcow2 was not designed to allow
2854 * such changes, so if we run out of space (we can only use the first cluster)
2855 * this function may fail.
2856 *
2857 * Returns 0 on success, -errno in error cases.
2858 */
2859 int qcow2_update_header(BlockDriverState *bs)
2860 {
2861 BDRVQcow2State *s = bs->opaque;
2862 QCowHeader *header;
2863 char *buf;
2864 size_t buflen = s->cluster_size;
2865 int ret;
2866 uint64_t total_size;
2867 uint32_t refcount_table_clusters;
2868 size_t header_length;
2869 Qcow2UnknownHeaderExtension *uext;
2870
2871 buf = qemu_blockalign(bs, buflen);
2872
2873 /* Header structure */
2874 header = (QCowHeader*) buf;
2875
2876 if (buflen < sizeof(*header)) {
2877 ret = -ENOSPC;
2878 goto fail;
2879 }
2880
2881 header_length = sizeof(*header) + s->unknown_header_fields_size;
2882 total_size = bs->total_sectors * BDRV_SECTOR_SIZE;
2883 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2884
2885 ret = validate_compression_type(s, NULL);
2886 if (ret) {
2887 goto fail;
2888 }
2889
2890 *header = (QCowHeader) {
2891 /* Version 2 fields */
2892 .magic = cpu_to_be32(QCOW_MAGIC),
2893 .version = cpu_to_be32(s->qcow_version),
2894 .backing_file_offset = 0,
2895 .backing_file_size = 0,
2896 .cluster_bits = cpu_to_be32(s->cluster_bits),
2897 .size = cpu_to_be64(total_size),
2898 .crypt_method = cpu_to_be32(s->crypt_method_header),
2899 .l1_size = cpu_to_be32(s->l1_size),
2900 .l1_table_offset = cpu_to_be64(s->l1_table_offset),
2901 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset),
2902 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters),
2903 .nb_snapshots = cpu_to_be32(s->nb_snapshots),
2904 .snapshots_offset = cpu_to_be64(s->snapshots_offset),
2905
2906 /* Version 3 fields */
2907 .incompatible_features = cpu_to_be64(s->incompatible_features),
2908 .compatible_features = cpu_to_be64(s->compatible_features),
2909 .autoclear_features = cpu_to_be64(s->autoclear_features),
2910 .refcount_order = cpu_to_be32(s->refcount_order),
2911 .header_length = cpu_to_be32(header_length),
2912 .compression_type = s->compression_type,
2913 };
2914
2915 /* For older versions, write a shorter header */
2916 switch (s->qcow_version) {
2917 case 2:
2918 ret = offsetof(QCowHeader, incompatible_features);
2919 break;
2920 case 3:
2921 ret = sizeof(*header);
2922 break;
2923 default:
2924 ret = -EINVAL;
2925 goto fail;
2926 }
2927
2928 buf += ret;
2929 buflen -= ret;
2930 memset(buf, 0, buflen);
2931
2932 /* Preserve any unknown field in the header */
2933 if (s->unknown_header_fields_size) {
2934 if (buflen < s->unknown_header_fields_size) {
2935 ret = -ENOSPC;
2936 goto fail;
2937 }
2938
2939 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size);
2940 buf += s->unknown_header_fields_size;
2941 buflen -= s->unknown_header_fields_size;
2942 }
2943
2944 /* Backing file format header extension */
2945 if (s->image_backing_format) {
2946 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
2947 s->image_backing_format,
2948 strlen(s->image_backing_format),
2949 buflen);
2950 if (ret < 0) {
2951 goto fail;
2952 }
2953
2954 buf += ret;
2955 buflen -= ret;
2956 }
2957
2958 /* External data file header extension */
2959 if (has_data_file(bs) && s->image_data_file) {
2960 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE,
2961 s->image_data_file, strlen(s->image_data_file),
2962 buflen);
2963 if (ret < 0) {
2964 goto fail;
2965 }
2966
2967 buf += ret;
2968 buflen -= ret;
2969 }
2970
2971 /* Full disk encryption header pointer extension */
2972 if (s->crypto_header.offset != 0) {
2973 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset);
2974 s->crypto_header.length = cpu_to_be64(s->crypto_header.length);
2975 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER,
2976 &s->crypto_header, sizeof(s->crypto_header),
2977 buflen);
2978 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
2979 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
2980 if (ret < 0) {
2981 goto fail;
2982 }
2983 buf += ret;
2984 buflen -= ret;
2985 }
2986
2987 /*
2988 * Feature table. A mere 8 feature names occupies 392 bytes, and
2989 * when coupled with the v3 minimum header of 104 bytes plus the
2990 * 8-byte end-of-extension marker, that would leave only 8 bytes
2991 * for a backing file name in an image with 512-byte clusters.
2992 * Thus, we choose to omit this header for cluster sizes 4k and
2993 * smaller.
2994 */
2995 if (s->qcow_version >= 3 && s->cluster_size > 4096) {
2996 static const Qcow2Feature features[] = {
2997 {
2998 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2999 .bit = QCOW2_INCOMPAT_DIRTY_BITNR,
3000 .name = "dirty bit",
3001 },
3002 {
3003 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3004 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR,
3005 .name = "corrupt bit",
3006 },
3007 {
3008 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3009 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR,
3010 .name = "external data file",
3011 },
3012 {
3013 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3014 .bit = QCOW2_INCOMPAT_COMPRESSION_BITNR,
3015 .name = "compression type",
3016 },
3017 {
3018 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3019 .bit = QCOW2_INCOMPAT_EXTL2_BITNR,
3020 .name = "extended L2 entries",
3021 },
3022 {
3023 .type = QCOW2_FEAT_TYPE_COMPATIBLE,
3024 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
3025 .name = "lazy refcounts",
3026 },
3027 {
3028 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
3029 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR,
3030 .name = "bitmaps",
3031 },
3032 {
3033 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
3034 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR,
3035 .name = "raw external data",
3036 },
3037 };
3038
3039 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
3040 features, sizeof(features), buflen);
3041 if (ret < 0) {
3042 goto fail;
3043 }
3044 buf += ret;
3045 buflen -= ret;
3046 }
3047
3048 /* Bitmap extension */
3049 if (s->nb_bitmaps > 0) {
3050 Qcow2BitmapHeaderExt bitmaps_header = {
3051 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps),
3052 .bitmap_directory_size =
3053 cpu_to_be64(s->bitmap_directory_size),
3054 .bitmap_directory_offset =
3055 cpu_to_be64(s->bitmap_directory_offset)
3056 };
3057 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS,
3058 &bitmaps_header, sizeof(bitmaps_header),
3059 buflen);
3060 if (ret < 0) {
3061 goto fail;
3062 }
3063 buf += ret;
3064 buflen -= ret;
3065 }
3066
3067 /* Keep unknown header extensions */
3068 QLIST_FOREACH(uext, &s->unknown_header_ext, next) {
3069 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen);
3070 if (ret < 0) {
3071 goto fail;
3072 }
3073
3074 buf += ret;
3075 buflen -= ret;
3076 }
3077
3078 /* End of header extensions */
3079 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen);
3080 if (ret < 0) {
3081 goto fail;
3082 }
3083
3084 buf += ret;
3085 buflen -= ret;
3086
3087 /* Backing file name */
3088 if (s->image_backing_file) {
3089 size_t backing_file_len = strlen(s->image_backing_file);
3090
3091 if (buflen < backing_file_len) {
3092 ret = -ENOSPC;
3093 goto fail;
3094 }
3095
3096 /* Using strncpy is ok here, since buf is not NUL-terminated. */
3097 strncpy(buf, s->image_backing_file, buflen);
3098
3099 header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
3100 header->backing_file_size = cpu_to_be32(backing_file_len);
3101 }
3102
3103 /* Write the new header */
3104 ret = bdrv_pwrite(bs->file, 0, s->cluster_size, header, 0);
3105 if (ret < 0) {
3106 goto fail;
3107 }
3108
3109 ret = 0;
3110 fail:
3111 qemu_vfree(header);
3112 return ret;
3113 }
3114
3115 static int qcow2_change_backing_file(BlockDriverState *bs,
3116 const char *backing_file, const char *backing_fmt)
3117 {
3118 BDRVQcow2State *s = bs->opaque;
3119
3120 /* Adding a backing file means that the external data file alone won't be
3121 * enough to make sense of the content */
3122 if (backing_file && data_file_is_raw(bs)) {
3123 return -EINVAL;
3124 }
3125
3126 if (backing_file && strlen(backing_file) > 1023) {
3127 return -EINVAL;
3128 }
3129
3130 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
3131 backing_file ?: "");
3132 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
3133 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
3134
3135 g_free(s->image_backing_file);
3136 g_free(s->image_backing_format);
3137
3138 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL;
3139 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL;
3140
3141 return qcow2_update_header(bs);
3142 }
3143
3144 static int qcow2_set_up_encryption(BlockDriverState *bs,
3145 QCryptoBlockCreateOptions *cryptoopts,
3146 Error **errp)
3147 {
3148 BDRVQcow2State *s = bs->opaque;
3149 QCryptoBlock *crypto = NULL;
3150 int fmt, ret;
3151
3152 switch (cryptoopts->format) {
3153 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
3154 fmt = QCOW_CRYPT_LUKS;
3155 break;
3156 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
3157 fmt = QCOW_CRYPT_AES;
3158 break;
3159 default:
3160 error_setg(errp, "Crypto format not supported in qcow2");
3161 return -EINVAL;
3162 }
3163
3164 s->crypt_method_header = fmt;
3165
3166 crypto = qcrypto_block_create(cryptoopts, "encrypt.",
3167 qcow2_crypto_hdr_init_func,
3168 qcow2_crypto_hdr_write_func,
3169 bs, errp);
3170 if (!crypto) {
3171 return -EINVAL;
3172 }
3173
3174 ret = qcow2_update_header(bs);
3175 if (ret < 0) {
3176 error_setg_errno(errp, -ret, "Could not write encryption header");
3177 goto out;
3178 }
3179
3180 ret = 0;
3181 out:
3182 qcrypto_block_free(crypto);
3183 return ret;
3184 }
3185
3186 /**
3187 * Preallocates metadata structures for data clusters between @offset (in the
3188 * guest disk) and @new_length (which is thus generally the new guest disk
3189 * size).
3190 *
3191 * Returns: 0 on success, -errno on failure.
3192 */
3193 static int coroutine_fn GRAPH_RDLOCK
3194 preallocate_co(BlockDriverState *bs, uint64_t offset, uint64_t new_length,
3195 PreallocMode mode, Error **errp)
3196 {
3197 BDRVQcow2State *s = bs->opaque;
3198 uint64_t bytes;
3199 uint64_t host_offset = 0;
3200 int64_t file_length;
3201 unsigned int cur_bytes;
3202 int ret;
3203 QCowL2Meta *meta = NULL, *m;
3204
3205 assert(offset <= new_length);
3206 bytes = new_length - offset;
3207
3208 while (bytes) {
3209 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size));
3210 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
3211 &host_offset, &meta);
3212 if (ret < 0) {
3213 error_setg_errno(errp, -ret, "Allocating clusters failed");
3214 goto out;
3215 }
3216
3217 for (m = meta; m != NULL; m = m->next) {
3218 m->prealloc = true;
3219 }
3220
3221 ret = qcow2_handle_l2meta(bs, &meta, true);
3222 if (ret < 0) {
3223 error_setg_errno(errp, -ret, "Mapping clusters failed");
3224 goto out;
3225 }
3226
3227 /* TODO Preallocate data if requested */
3228
3229 bytes -= cur_bytes;
3230 offset += cur_bytes;
3231 }
3232
3233 /*
3234 * It is expected that the image file is large enough to actually contain
3235 * all of the allocated clusters (otherwise we get failing reads after
3236 * EOF). Extend the image to the last allocated sector.
3237 */
3238 file_length = bdrv_getlength(s->data_file->bs);
3239 if (file_length < 0) {
3240 error_setg_errno(errp, -file_length, "Could not get file size");
3241 ret = file_length;
3242 goto out;
3243 }
3244
3245 if (host_offset + cur_bytes > file_length) {
3246 if (mode == PREALLOC_MODE_METADATA) {
3247 mode = PREALLOC_MODE_OFF;
3248 }
3249 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false,
3250 mode, 0, errp);
3251 if (ret < 0) {
3252 goto out;
3253 }
3254 }
3255
3256 ret = 0;
3257
3258 out:
3259 qcow2_handle_l2meta(bs, &meta, false);
3260 return ret;
3261 }
3262
3263 /* qcow2_refcount_metadata_size:
3264 * @clusters: number of clusters to refcount (including data and L1/L2 tables)
3265 * @cluster_size: size of a cluster, in bytes
3266 * @refcount_order: refcount bits power-of-2 exponent
3267 * @generous_increase: allow for the refcount table to be 1.5x as large as it
3268 * needs to be
3269 *
3270 * Returns: Number of bytes required for refcount blocks and table metadata.
3271 */
3272 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
3273 int refcount_order, bool generous_increase,
3274 uint64_t *refblock_count)
3275 {
3276 /*
3277 * Every host cluster is reference-counted, including metadata (even
3278 * refcount metadata is recursively included).
3279 *
3280 * An accurate formula for the size of refcount metadata size is difficult
3281 * to derive. An easier method of calculation is finding the fixed point
3282 * where no further refcount blocks or table clusters are required to
3283 * reference count every cluster.
3284 */
3285 int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE;
3286 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
3287 int64_t table = 0; /* number of refcount table clusters */
3288 int64_t blocks = 0; /* number of refcount block clusters */
3289 int64_t last;
3290 int64_t n = 0;
3291
3292 do {
3293 last = n;
3294 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block);
3295 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster);
3296 n = clusters + blocks + table;
3297
3298 if (n == last && generous_increase) {
3299 clusters += DIV_ROUND_UP(table, 2);
3300 n = 0; /* force another loop */
3301 generous_increase = false;
3302 }
3303 } while (n != last);
3304
3305 if (refblock_count) {
3306 *refblock_count = blocks;
3307 }
3308
3309 return (blocks + table) * cluster_size;
3310 }
3311
3312 /**
3313 * qcow2_calc_prealloc_size:
3314 * @total_size: virtual disk size in bytes
3315 * @cluster_size: cluster size in bytes
3316 * @refcount_order: refcount bits power-of-2 exponent
3317 * @extended_l2: true if the image has extended L2 entries
3318 *
3319 * Returns: Total number of bytes required for the fully allocated image
3320 * (including metadata).
3321 */
3322 static int64_t qcow2_calc_prealloc_size(int64_t total_size,
3323 size_t cluster_size,
3324 int refcount_order,
3325 bool extended_l2)
3326 {
3327 int64_t meta_size = 0;
3328 uint64_t nl1e, nl2e;
3329 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size);
3330 size_t l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
3331
3332 /* header: 1 cluster */
3333 meta_size += cluster_size;
3334
3335 /* total size of L2 tables */
3336 nl2e = aligned_total_size / cluster_size;
3337 nl2e = ROUND_UP(nl2e, cluster_size / l2e_size);
3338 meta_size += nl2e * l2e_size;
3339
3340 /* total size of L1 tables */
3341 nl1e = nl2e * l2e_size / cluster_size;
3342 nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE);
3343 meta_size += nl1e * L1E_SIZE;
3344
3345 /* total size of refcount table and blocks */
3346 meta_size += qcow2_refcount_metadata_size(
3347 (meta_size + aligned_total_size) / cluster_size,
3348 cluster_size, refcount_order, false, NULL);
3349
3350 return meta_size + aligned_total_size;
3351 }
3352
3353 static bool validate_cluster_size(size_t cluster_size, bool extended_l2,
3354 Error **errp)
3355 {
3356 int cluster_bits = ctz32(cluster_size);
3357 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS ||
3358 (1 << cluster_bits) != cluster_size)
3359 {
3360 error_setg(errp, "Cluster size must be a power of two between %d and "
3361 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10));
3362 return false;
3363 }
3364
3365 if (extended_l2) {
3366 unsigned min_cluster_size =
3367 (1 << MIN_CLUSTER_BITS) * QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER;
3368 if (cluster_size < min_cluster_size) {
3369 error_setg(errp, "Extended L2 entries are only supported with "
3370 "cluster sizes of at least %u bytes", min_cluster_size);
3371 return false;
3372 }
3373 }
3374
3375 return true;
3376 }
3377
3378 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, bool extended_l2,
3379 Error **errp)
3380 {
3381 size_t cluster_size;
3382
3383 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE,
3384 DEFAULT_CLUSTER_SIZE);
3385 if (!validate_cluster_size(cluster_size, extended_l2, errp)) {
3386 return 0;
3387 }
3388 return cluster_size;
3389 }
3390
3391 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp)
3392 {
3393 char *buf;
3394 int ret;
3395
3396 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL);
3397 if (!buf) {
3398 ret = 3; /* default */
3399 } else if (!strcmp(buf, "0.10")) {
3400 ret = 2;
3401 } else if (!strcmp(buf, "1.1")) {
3402 ret = 3;
3403 } else {
3404 error_setg(errp, "Invalid compatibility level: '%s'", buf);
3405 ret = -EINVAL;
3406 }
3407 g_free(buf);
3408 return ret;
3409 }
3410
3411 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version,
3412 Error **errp)
3413 {
3414 uint64_t refcount_bits;
3415
3416 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16);
3417 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) {
3418 error_setg(errp, "Refcount width must be a power of two and may not "
3419 "exceed 64 bits");
3420 return 0;
3421 }
3422
3423 if (version < 3 && refcount_bits != 16) {
3424 error_setg(errp, "Different refcount widths than 16 bits require "
3425 "compatibility level 1.1 or above (use compat=1.1 or "
3426 "greater)");
3427 return 0;
3428 }
3429
3430 return refcount_bits;
3431 }
3432
3433 static int coroutine_fn
3434 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
3435 {
3436 BlockdevCreateOptionsQcow2 *qcow2_opts;
3437 QDict *options;
3438
3439 /*
3440 * Open the image file and write a minimal qcow2 header.
3441 *
3442 * We keep things simple and start with a zero-sized image. We also
3443 * do without refcount blocks or a L1 table for now. We'll fix the
3444 * inconsistency later.
3445 *
3446 * We do need a refcount table because growing the refcount table means
3447 * allocating two new refcount blocks - the second of which would be at
3448 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
3449 * size for any qcow2 image.
3450 */
3451 BlockBackend *blk = NULL;
3452 BlockDriverState *bs = NULL;
3453 BlockDriverState *data_bs = NULL;
3454 QCowHeader *header;
3455 size_t cluster_size;
3456 int version;
3457 int refcount_order;
3458 uint64_t *refcount_table;
3459 int ret;
3460 uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
3461
3462 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2);
3463 qcow2_opts = &create_options->u.qcow2;
3464
3465 bs = bdrv_co_open_blockdev_ref(qcow2_opts->file, errp);
3466 if (bs == NULL) {
3467 return -EIO;
3468 }
3469
3470 /* Validate options and set default values */
3471 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) {
3472 error_setg(errp, "Image size must be a multiple of %u bytes",
3473 (unsigned) BDRV_SECTOR_SIZE);
3474 ret = -EINVAL;
3475 goto out;
3476 }
3477
3478 if (qcow2_opts->has_version) {
3479 switch (qcow2_opts->version) {
3480 case BLOCKDEV_QCOW2_VERSION_V2:
3481 version = 2;
3482 break;
3483 case BLOCKDEV_QCOW2_VERSION_V3:
3484 version = 3;
3485 break;
3486 default:
3487 g_assert_not_reached();
3488 }
3489 } else {
3490 version = 3;
3491 }
3492
3493 if (qcow2_opts->has_cluster_size) {
3494 cluster_size = qcow2_opts->cluster_size;
3495 } else {
3496 cluster_size = DEFAULT_CLUSTER_SIZE;
3497 }
3498
3499 if (!qcow2_opts->has_extended_l2) {
3500 qcow2_opts->extended_l2 = false;
3501 }
3502 if (qcow2_opts->extended_l2) {
3503 if (version < 3) {
3504 error_setg(errp, "Extended L2 entries are only supported with "
3505 "compatibility level 1.1 and above (use version=v3 or "
3506 "greater)");
3507 ret = -EINVAL;
3508 goto out;
3509 }
3510 }
3511
3512 if (!validate_cluster_size(cluster_size, qcow2_opts->extended_l2, errp)) {
3513 ret = -EINVAL;
3514 goto out;
3515 }
3516
3517 if (!qcow2_opts->has_preallocation) {
3518 qcow2_opts->preallocation = PREALLOC_MODE_OFF;
3519 }
3520 if (qcow2_opts->backing_file &&
3521 qcow2_opts->preallocation != PREALLOC_MODE_OFF &&
3522 !qcow2_opts->extended_l2)
3523 {
3524 error_setg(errp, "Backing file and preallocation can only be used at "
3525 "the same time if extended_l2 is on");
3526 ret = -EINVAL;
3527 goto out;
3528 }
3529 if (qcow2_opts->has_backing_fmt && !qcow2_opts->backing_file) {
3530 error_setg(errp, "Backing format cannot be used without backing file");
3531 ret = -EINVAL;
3532 goto out;
3533 }
3534
3535 if (!qcow2_opts->has_lazy_refcounts) {
3536 qcow2_opts->lazy_refcounts = false;
3537 }
3538 if (version < 3 && qcow2_opts->lazy_refcounts) {
3539 error_setg(errp, "Lazy refcounts only supported with compatibility "
3540 "level 1.1 and above (use version=v3 or greater)");
3541 ret = -EINVAL;
3542 goto out;
3543 }
3544
3545 if (!qcow2_opts->has_refcount_bits) {
3546 qcow2_opts->refcount_bits = 16;
3547 }
3548 if (qcow2_opts->refcount_bits > 64 ||
3549 !is_power_of_2(qcow2_opts->refcount_bits))
3550 {
3551 error_setg(errp, "Refcount width must be a power of two and may not "
3552 "exceed 64 bits");
3553 ret = -EINVAL;
3554 goto out;
3555 }
3556 if (version < 3 && qcow2_opts->refcount_bits != 16) {
3557 error_setg(errp, "Different refcount widths than 16 bits require "
3558 "compatibility level 1.1 or above (use version=v3 or "
3559 "greater)");
3560 ret = -EINVAL;
3561 goto out;
3562 }
3563 refcount_order = ctz32(qcow2_opts->refcount_bits);
3564
3565 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) {
3566 error_setg(errp, "data-file-raw requires data-file");
3567 ret = -EINVAL;
3568 goto out;
3569 }
3570 if (qcow2_opts->data_file_raw && qcow2_opts->backing_file) {
3571 error_setg(errp, "Backing file and data-file-raw cannot be used at "
3572 "the same time");
3573 ret = -EINVAL;
3574 goto out;
3575 }
3576 if (qcow2_opts->data_file_raw &&
3577 qcow2_opts->preallocation == PREALLOC_MODE_OFF)
3578 {
3579 /*
3580 * data-file-raw means that "the external data file can be
3581 * read as a consistent standalone raw image without looking
3582 * at the qcow2 metadata." It does not say that the metadata
3583 * must be ignored, though (and the qcow2 driver in fact does
3584 * not ignore it), so the L1/L2 tables must be present and
3585 * give a 1:1 mapping, so you get the same result regardless
3586 * of whether you look at the metadata or whether you ignore
3587 * it.
3588 */
3589 qcow2_opts->preallocation = PREALLOC_MODE_METADATA;
3590
3591 /*
3592 * Cannot use preallocation with backing files, but giving a
3593 * backing file when specifying data_file_raw is an error
3594 * anyway.
3595 */
3596 assert(!qcow2_opts->backing_file);
3597 }
3598
3599 if (qcow2_opts->data_file) {
3600 if (version < 3) {
3601 error_setg(errp, "External data files are only supported with "
3602 "compatibility level 1.1 and above (use version=v3 or "
3603 "greater)");
3604 ret = -EINVAL;
3605 goto out;
3606 }
3607 data_bs = bdrv_co_open_blockdev_ref(qcow2_opts->data_file, errp);
3608 if (data_bs == NULL) {
3609 ret = -EIO;
3610 goto out;
3611 }
3612 }
3613
3614 if (qcow2_opts->has_compression_type &&
3615 qcow2_opts->compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3616
3617 ret = -EINVAL;
3618
3619 if (version < 3) {
3620 error_setg(errp, "Non-zlib compression type is only supported with "
3621 "compatibility level 1.1 and above (use version=v3 or "
3622 "greater)");
3623 goto out;
3624 }
3625
3626 switch (qcow2_opts->compression_type) {
3627 #ifdef CONFIG_ZSTD
3628 case QCOW2_COMPRESSION_TYPE_ZSTD:
3629 break;
3630 #endif
3631 default:
3632 error_setg(errp, "Unknown compression type");
3633 goto out;
3634 }
3635
3636 compression_type = qcow2_opts->compression_type;
3637 }
3638
3639 /* Create BlockBackend to write to the image */
3640 blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
3641 errp);
3642 if (!blk) {
3643 ret = -EPERM;
3644 goto out;
3645 }
3646 blk_set_allow_write_beyond_eof(blk, true);
3647
3648 /* Write the header */
3649 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header));
3650 header = g_malloc0(cluster_size);
3651 *header = (QCowHeader) {
3652 .magic = cpu_to_be32(QCOW_MAGIC),
3653 .version = cpu_to_be32(version),
3654 .cluster_bits = cpu_to_be32(ctz32(cluster_size)),
3655 .size = cpu_to_be64(0),
3656 .l1_table_offset = cpu_to_be64(0),
3657 .l1_size = cpu_to_be32(0),
3658 .refcount_table_offset = cpu_to_be64(cluster_size),
3659 .refcount_table_clusters = cpu_to_be32(1),
3660 .refcount_order = cpu_to_be32(refcount_order),
3661 /* don't deal with endianness since compression_type is 1 byte long */
3662 .compression_type = compression_type,
3663 .header_length = cpu_to_be32(sizeof(*header)),
3664 };
3665
3666 /* We'll update this to correct value later */
3667 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
3668
3669 if (qcow2_opts->lazy_refcounts) {
3670 header->compatible_features |=
3671 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
3672 }
3673 if (data_bs) {
3674 header->incompatible_features |=
3675 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE);
3676 }
3677 if (qcow2_opts->data_file_raw) {
3678 header->autoclear_features |=
3679 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW);
3680 }
3681 if (compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3682 header->incompatible_features |=
3683 cpu_to_be64(QCOW2_INCOMPAT_COMPRESSION);
3684 }
3685
3686 if (qcow2_opts->extended_l2) {
3687 header->incompatible_features |=
3688 cpu_to_be64(QCOW2_INCOMPAT_EXTL2);
3689 }
3690
3691 ret = blk_co_pwrite(blk, 0, cluster_size, header, 0);
3692 g_free(header);
3693 if (ret < 0) {
3694 error_setg_errno(errp, -ret, "Could not write qcow2 header");
3695 goto out;
3696 }
3697
3698 /* Write a refcount table with one refcount block */
3699 refcount_table = g_malloc0(2 * cluster_size);
3700 refcount_table[0] = cpu_to_be64(2 * cluster_size);
3701 ret = blk_co_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0);
3702 g_free(refcount_table);
3703
3704 if (ret < 0) {
3705 error_setg_errno(errp, -ret, "Could not write refcount table");
3706 goto out;
3707 }
3708
3709 blk_unref(blk);
3710 blk = NULL;
3711
3712 /*
3713 * And now open the image and make it consistent first (i.e. increase the
3714 * refcount of the cluster that is occupied by the header and the refcount
3715 * table)
3716 */
3717 options = qdict_new();
3718 qdict_put_str(options, "driver", "qcow2");
3719 qdict_put_str(options, "file", bs->node_name);
3720 if (data_bs) {
3721 qdict_put_str(options, "data-file", data_bs->node_name);
3722 }
3723 blk = blk_co_new_open(NULL, NULL, options,
3724 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH,
3725 errp);
3726 if (blk == NULL) {
3727 ret = -EIO;
3728 goto out;
3729 }
3730
3731 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size);
3732 if (ret < 0) {
3733 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
3734 "header and refcount table");
3735 goto out;
3736
3737 } else if (ret != 0) {
3738 error_report("Huh, first cluster in empty image is already in use?");
3739 abort();
3740 }
3741
3742 /* Set the external data file if necessary */
3743 if (data_bs) {
3744 BDRVQcow2State *s = blk_bs(blk)->opaque;
3745 s->image_data_file = g_strdup(data_bs->filename);
3746 }
3747
3748 /* Create a full header (including things like feature table) */
3749 ret = qcow2_update_header(blk_bs(blk));
3750 if (ret < 0) {
3751 error_setg_errno(errp, -ret, "Could not update qcow2 header");
3752 goto out;
3753 }
3754
3755 /* Okay, now that we have a valid image, let's give it the right size */
3756 ret = blk_co_truncate(blk, qcow2_opts->size, false,
3757 qcow2_opts->preallocation, 0, errp);
3758 if (ret < 0) {
3759 error_prepend(errp, "Could not resize image: ");
3760 goto out;
3761 }
3762
3763 /* Want a backing file? There you go. */
3764 if (qcow2_opts->backing_file) {
3765 const char *backing_format = NULL;
3766
3767 if (qcow2_opts->has_backing_fmt) {
3768 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt);
3769 }
3770
3771 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file,
3772 backing_format, false);
3773 if (ret < 0) {
3774 error_setg_errno(errp, -ret, "Could not assign backing file '%s' "
3775 "with format '%s'", qcow2_opts->backing_file,
3776 backing_format);
3777 goto out;
3778 }
3779 }
3780
3781 /* Want encryption? There you go. */
3782 if (qcow2_opts->encrypt) {
3783 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp);
3784 if (ret < 0) {
3785 goto out;
3786 }
3787 }
3788
3789 blk_unref(blk);
3790 blk = NULL;
3791
3792 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning.
3793 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to
3794 * have to setup decryption context. We're not doing any I/O on the top
3795 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does
3796 * not have effect.
3797 */
3798 options = qdict_new();
3799 qdict_put_str(options, "driver", "qcow2");
3800 qdict_put_str(options, "file", bs->node_name);
3801 if (data_bs) {
3802 qdict_put_str(options, "data-file", data_bs->node_name);
3803 }
3804 blk = blk_co_new_open(NULL, NULL, options,
3805 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO,
3806 errp);
3807 if (blk == NULL) {
3808 ret = -EIO;
3809 goto out;
3810 }
3811
3812 ret = 0;
3813 out:
3814 blk_unref(blk);
3815 bdrv_unref(bs);
3816 bdrv_unref(data_bs);
3817 return ret;
3818 }
3819
3820 static int coroutine_fn GRAPH_RDLOCK
3821 qcow2_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts,
3822 Error **errp)
3823 {
3824 BlockdevCreateOptions *create_options = NULL;
3825 QDict *qdict;
3826 Visitor *v;
3827 BlockDriverState *bs = NULL;
3828 BlockDriverState *data_bs = NULL;
3829 const char *val;
3830 int ret;
3831
3832 /* Only the keyval visitor supports the dotted syntax needed for
3833 * encryption, so go through a QDict before getting a QAPI type. Ignore
3834 * options meant for the protocol layer so that the visitor doesn't
3835 * complain. */
3836 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts,
3837 true);
3838
3839 /* Handle encryption options */
3840 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT);
3841 if (val && !strcmp(val, "on")) {
3842 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow");
3843 } else if (val && !strcmp(val, "off")) {
3844 qdict_del(qdict, BLOCK_OPT_ENCRYPT);
3845 }
3846
3847 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT);
3848 if (val && !strcmp(val, "aes")) {
3849 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow");
3850 }
3851
3852 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into
3853 * version=v2/v3 below. */
3854 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL);
3855 if (val && !strcmp(val, "0.10")) {
3856 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2");
3857 } else if (val && !strcmp(val, "1.1")) {
3858 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3");
3859 }
3860
3861 /* Change legacy command line options into QMP ones */
3862 static const QDictRenames opt_renames[] = {
3863 { BLOCK_OPT_BACKING_FILE, "backing-file" },
3864 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
3865 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
3866 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" },
3867 { BLOCK_OPT_EXTL2, "extended-l2" },
3868 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" },
3869 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT },
3870 { BLOCK_OPT_COMPAT_LEVEL, "version" },
3871 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" },
3872 { BLOCK_OPT_COMPRESSION_TYPE, "compression-type" },
3873 { NULL, NULL },
3874 };
3875
3876 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
3877 ret = -EINVAL;
3878 goto finish;
3879 }
3880
3881 /* Create and open the file (protocol layer) */
3882 ret = bdrv_co_create_file(filename, opts, errp);
3883 if (ret < 0) {
3884 goto finish;
3885 }
3886
3887 bs = bdrv_co_open(filename, NULL, NULL,
3888 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
3889 if (bs == NULL) {
3890 ret = -EIO;
3891 goto finish;
3892 }
3893
3894 /* Create and open an external data file (protocol layer) */
3895 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE);
3896 if (val) {
3897 ret = bdrv_co_create_file(val, opts, errp);
3898 if (ret < 0) {
3899 goto finish;
3900 }
3901
3902 data_bs = bdrv_co_open(val, NULL, NULL,
3903 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
3904 errp);
3905 if (data_bs == NULL) {
3906 ret = -EIO;
3907 goto finish;
3908 }
3909
3910 qdict_del(qdict, BLOCK_OPT_DATA_FILE);
3911 qdict_put_str(qdict, "data-file", data_bs->node_name);
3912 }
3913
3914 /* Set 'driver' and 'node' options */
3915 qdict_put_str(qdict, "driver", "qcow2");
3916 qdict_put_str(qdict, "file", bs->node_name);
3917
3918 /* Now get the QAPI type BlockdevCreateOptions */
3919 v = qobject_input_visitor_new_flat_confused(qdict, errp);
3920 if (!v) {
3921 ret = -EINVAL;
3922 goto finish;
3923 }
3924
3925 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
3926 visit_free(v);
3927 if (!create_options) {
3928 ret = -EINVAL;
3929 goto finish;
3930 }
3931
3932 /* Silently round up size */
3933 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size,
3934 BDRV_SECTOR_SIZE);
3935
3936 /* Create the qcow2 image (format layer) */
3937 ret = qcow2_co_create(create_options, errp);
3938 finish:
3939 if (ret < 0) {
3940 bdrv_co_delete_file_noerr(bs);
3941 bdrv_co_delete_file_noerr(data_bs);
3942 } else {
3943 ret = 0;
3944 }
3945
3946 qobject_unref(qdict);
3947 bdrv_unref(bs);
3948 bdrv_unref(data_bs);
3949 qapi_free_BlockdevCreateOptions(create_options);
3950 return ret;
3951 }
3952
3953
3954 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
3955 {
3956 int64_t nr;
3957 int res;
3958
3959 /* Clamp to image length, before checking status of underlying sectors */
3960 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
3961 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset;
3962 }
3963
3964 if (!bytes) {
3965 return true;
3966 }
3967
3968 /*
3969 * bdrv_block_status_above doesn't merge different types of zeros, for
3970 * example, zeros which come from the region which is unallocated in
3971 * the whole backing chain, and zeros which come because of a short
3972 * backing file. So, we need a loop.
3973 */
3974 do {
3975 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
3976 offset += nr;
3977 bytes -= nr;
3978 } while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
3979
3980 return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0;
3981 }
3982
3983 static int coroutine_fn GRAPH_RDLOCK
3984 qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
3985 BdrvRequestFlags flags)
3986 {
3987 int ret;
3988 BDRVQcow2State *s = bs->opaque;
3989
3990 uint32_t head = offset_into_subcluster(s, offset);
3991 uint32_t tail = ROUND_UP(offset + bytes, s->subcluster_size) -
3992 (offset + bytes);
3993
3994 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes);
3995 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) {
3996 tail = 0;
3997 }
3998
3999 if (head || tail) {
4000 uint64_t off;
4001 unsigned int nr;
4002 QCow2SubclusterType type;
4003
4004 assert(head + bytes + tail <= s->subcluster_size);
4005
4006 /* check whether remainder of cluster already reads as zero */
4007 if (!(is_zero(bs, offset - head, head) &&
4008 is_zero(bs, offset + bytes, tail))) {
4009 return -ENOTSUP;
4010 }
4011
4012 qemu_co_mutex_lock(&s->lock);
4013 /* We can have new write after previous check */
4014 offset -= head;
4015 bytes = s->subcluster_size;
4016 nr = s->subcluster_size;
4017 ret = qcow2_get_host_offset(bs, offset, &nr, &off, &type);
4018 if (ret < 0 ||
4019 (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
4020 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC &&
4021 type != QCOW2_SUBCLUSTER_ZERO_PLAIN &&
4022 type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
4023 qemu_co_mutex_unlock(&s->lock);
4024 return ret < 0 ? ret : -ENOTSUP;
4025 }
4026 } else {
4027 qemu_co_mutex_lock(&s->lock);
4028 }
4029
4030 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes);
4031
4032 /* Whatever is left can use real zero subclusters */
4033 ret = qcow2_subcluster_zeroize(bs, offset, bytes, flags);
4034 qemu_co_mutex_unlock(&s->lock);
4035
4036 return ret;
4037 }
4038
4039 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
4040 int64_t offset, int64_t bytes)
4041 {
4042 int ret;
4043 BDRVQcow2State *s = bs->opaque;
4044
4045 /* If the image does not support QCOW_OFLAG_ZERO then discarding
4046 * clusters could expose stale data from the backing file. */
4047 if (s->qcow_version < 3 && bs->backing) {
4048 return -ENOTSUP;
4049 }
4050
4051 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) {
4052 assert(bytes < s->cluster_size);
4053 /* Ignore partial clusters, except for the special case of the
4054 * complete partial cluster at the end of an unaligned file */
4055 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) ||
4056 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) {
4057 return -ENOTSUP;
4058 }
4059 }
4060
4061 qemu_co_mutex_lock(&s->lock);
4062 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST,
4063 false);
4064 qemu_co_mutex_unlock(&s->lock);
4065 return ret;
4066 }
4067
4068 static int coroutine_fn GRAPH_RDLOCK
4069 qcow2_co_copy_range_from(BlockDriverState *bs,
4070 BdrvChild *src, int64_t src_offset,
4071 BdrvChild *dst, int64_t dst_offset,
4072 int64_t bytes, BdrvRequestFlags read_flags,
4073 BdrvRequestFlags write_flags)
4074 {
4075 BDRVQcow2State *s = bs->opaque;
4076 int ret;
4077 unsigned int cur_bytes; /* number of bytes in current iteration */
4078 BdrvChild *child = NULL;
4079 BdrvRequestFlags cur_write_flags;
4080
4081 assert(!bs->encrypted);
4082 qemu_co_mutex_lock(&s->lock);
4083
4084 while (bytes != 0) {
4085 uint64_t copy_offset = 0;
4086 QCow2SubclusterType type;
4087 /* prepare next request */
4088 cur_bytes = MIN(bytes, INT_MAX);
4089 cur_write_flags = write_flags;
4090
4091 ret = qcow2_get_host_offset(bs, src_offset, &cur_bytes,
4092 &copy_offset, &type);
4093 if (ret < 0) {
4094 goto out;
4095 }
4096
4097 switch (type) {
4098 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
4099 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
4100 if (bs->backing && bs->backing->bs) {
4101 int64_t backing_length = bdrv_getlength(bs->backing->bs);
4102 if (src_offset >= backing_length) {
4103 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4104 } else {
4105 child = bs->backing;
4106 cur_bytes = MIN(cur_bytes, backing_length - src_offset);
4107 copy_offset = src_offset;
4108 }
4109 } else {
4110 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4111 }
4112 break;
4113
4114 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
4115 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
4116 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4117 break;
4118
4119 case QCOW2_SUBCLUSTER_COMPRESSED:
4120 ret = -ENOTSUP;
4121 goto out;
4122
4123 case QCOW2_SUBCLUSTER_NORMAL:
4124 child = s->data_file;
4125 break;
4126
4127 default:
4128 abort();
4129 }
4130 qemu_co_mutex_unlock(&s->lock);
4131 ret = bdrv_co_copy_range_from(child,
4132 copy_offset,
4133 dst, dst_offset,
4134 cur_bytes, read_flags, cur_write_flags);
4135 qemu_co_mutex_lock(&s->lock);
4136 if (ret < 0) {
4137 goto out;
4138 }
4139
4140 bytes -= cur_bytes;
4141 src_offset += cur_bytes;
4142 dst_offset += cur_bytes;
4143 }
4144 ret = 0;
4145
4146 out:
4147 qemu_co_mutex_unlock(&s->lock);
4148 return ret;
4149 }
4150
4151 static int coroutine_fn GRAPH_RDLOCK
4152 qcow2_co_copy_range_to(BlockDriverState *bs,
4153 BdrvChild *src, int64_t src_offset,
4154 BdrvChild *dst, int64_t dst_offset,
4155 int64_t bytes, BdrvRequestFlags read_flags,
4156 BdrvRequestFlags write_flags)
4157 {
4158 BDRVQcow2State *s = bs->opaque;
4159 int ret;
4160 unsigned int cur_bytes; /* number of sectors in current iteration */
4161 uint64_t host_offset;
4162 QCowL2Meta *l2meta = NULL;
4163
4164 assert(!bs->encrypted);
4165
4166 qemu_co_mutex_lock(&s->lock);
4167
4168 while (bytes != 0) {
4169
4170 l2meta = NULL;
4171
4172 cur_bytes = MIN(bytes, INT_MAX);
4173
4174 /* TODO:
4175 * If src->bs == dst->bs, we could simply copy by incrementing
4176 * the refcnt, without copying user data.
4177 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
4178 ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes,
4179 &host_offset, &l2meta);
4180 if (ret < 0) {
4181 goto fail;
4182 }
4183
4184 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes,
4185 true);
4186 if (ret < 0) {
4187 goto fail;
4188 }
4189
4190 qemu_co_mutex_unlock(&s->lock);
4191 ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset,
4192 cur_bytes, read_flags, write_flags);
4193 qemu_co_mutex_lock(&s->lock);
4194 if (ret < 0) {
4195 goto fail;
4196 }
4197
4198 ret = qcow2_handle_l2meta(bs, &l2meta, true);
4199 if (ret) {
4200 goto fail;
4201 }
4202
4203 bytes -= cur_bytes;
4204 src_offset += cur_bytes;
4205 dst_offset += cur_bytes;
4206 }
4207 ret = 0;
4208
4209 fail:
4210 qcow2_handle_l2meta(bs, &l2meta, false);
4211
4212 qemu_co_mutex_unlock(&s->lock);
4213
4214 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
4215
4216 return ret;
4217 }
4218
4219 static int coroutine_fn GRAPH_RDLOCK
4220 qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
4221 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
4222 {
4223 BDRVQcow2State *s = bs->opaque;
4224 uint64_t old_length;
4225 int64_t new_l1_size;
4226 int ret;
4227 QDict *options;
4228
4229 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA &&
4230 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL)
4231 {
4232 error_setg(errp, "Unsupported preallocation mode '%s'",
4233 PreallocMode_str(prealloc));
4234 return -ENOTSUP;
4235 }
4236
4237 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) {
4238 error_setg(errp, "The new size must be a multiple of %u",
4239 (unsigned) BDRV_SECTOR_SIZE);
4240 return -EINVAL;
4241 }
4242
4243 qemu_co_mutex_lock(&s->lock);
4244
4245 /*
4246 * Even though we store snapshot size for all images, it was not
4247 * required until v3, so it is not safe to proceed for v2.
4248 */
4249 if (s->nb_snapshots && s->qcow_version < 3) {
4250 error_setg(errp, "Can't resize a v2 image which has snapshots");
4251 ret = -ENOTSUP;
4252 goto fail;
4253 }
4254
4255 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */
4256 if (qcow2_truncate_bitmaps_check(bs, errp)) {
4257 ret = -ENOTSUP;
4258 goto fail;
4259 }
4260
4261 old_length = bs->total_sectors * BDRV_SECTOR_SIZE;
4262 new_l1_size = size_to_l1(s, offset);
4263
4264 if (offset < old_length) {
4265 int64_t last_cluster, old_file_size;
4266 if (prealloc != PREALLOC_MODE_OFF) {
4267 error_setg(errp,
4268 "Preallocation can't be used for shrinking an image");
4269 ret = -EINVAL;
4270 goto fail;
4271 }
4272
4273 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size),
4274 old_length - ROUND_UP(offset,
4275 s->cluster_size),
4276 QCOW2_DISCARD_ALWAYS, true);
4277 if (ret < 0) {
4278 error_setg_errno(errp, -ret, "Failed to discard cropped clusters");
4279 goto fail;
4280 }
4281
4282 ret = qcow2_shrink_l1_table(bs, new_l1_size);
4283 if (ret < 0) {
4284 error_setg_errno(errp, -ret,
4285 "Failed to reduce the number of L2 tables");
4286 goto fail;
4287 }
4288
4289 ret = qcow2_shrink_reftable(bs);
4290 if (ret < 0) {
4291 error_setg_errno(errp, -ret,
4292 "Failed to discard unused refblocks");
4293 goto fail;
4294 }
4295
4296 old_file_size = bdrv_getlength(bs->file->bs);
4297 if (old_file_size < 0) {
4298 error_setg_errno(errp, -old_file_size,
4299 "Failed to inquire current file length");
4300 ret = old_file_size;
4301 goto fail;
4302 }
4303 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4304 if (last_cluster < 0) {
4305 error_setg_errno(errp, -last_cluster,
4306 "Failed to find the last cluster");
4307 ret = last_cluster;
4308 goto fail;
4309 }
4310 if ((last_cluster + 1) * s->cluster_size < old_file_size) {
4311 Error *local_err = NULL;
4312
4313 /*
4314 * Do not pass @exact here: It will not help the user if
4315 * we get an error here just because they wanted to shrink
4316 * their qcow2 image (on a block device) with qemu-img.
4317 * (And on the qcow2 layer, the @exact requirement is
4318 * always fulfilled, so there is no need to pass it on.)
4319 */
4320 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size,
4321 false, PREALLOC_MODE_OFF, 0, &local_err);
4322 if (local_err) {
4323 warn_reportf_err(local_err,
4324 "Failed to truncate the tail of the image: ");
4325 }
4326 }
4327 } else {
4328 ret = qcow2_grow_l1_table(bs, new_l1_size, true);
4329 if (ret < 0) {
4330 error_setg_errno(errp, -ret, "Failed to grow the L1 table");
4331 goto fail;
4332 }
4333
4334 if (data_file_is_raw(bs) && prealloc == PREALLOC_MODE_OFF) {
4335 /*
4336 * When creating a qcow2 image with data-file-raw, we enforce
4337 * at least prealloc=metadata, so that the L1/L2 tables are
4338 * fully allocated and reading from the data file will return
4339 * the same data as reading from the qcow2 image. When the
4340 * image is grown, we must consequently preallocate the
4341 * metadata structures to cover the added area.
4342 */
4343 prealloc = PREALLOC_MODE_METADATA;
4344 }
4345 }
4346
4347 switch (prealloc) {
4348 case PREALLOC_MODE_OFF:
4349 if (has_data_file(bs)) {
4350 /*
4351 * If the caller wants an exact resize, the external data
4352 * file should be resized to the exact target size, too,
4353 * so we pass @exact here.
4354 */
4355 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0,
4356 errp);
4357 if (ret < 0) {
4358 goto fail;
4359 }
4360 }
4361 break;
4362
4363 case PREALLOC_MODE_METADATA:
4364 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
4365 if (ret < 0) {
4366 goto fail;
4367 }
4368 break;
4369
4370 case PREALLOC_MODE_FALLOC:
4371 case PREALLOC_MODE_FULL:
4372 {
4373 int64_t allocation_start, host_offset, guest_offset;
4374 int64_t clusters_allocated;
4375 int64_t old_file_size, last_cluster, new_file_size;
4376 uint64_t nb_new_data_clusters, nb_new_l2_tables;
4377 bool subclusters_need_allocation = false;
4378
4379 /* With a data file, preallocation means just allocating the metadata
4380 * and forwarding the truncate request to the data file */
4381 if (has_data_file(bs)) {
4382 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
4383 if (ret < 0) {
4384 goto fail;
4385 }
4386 break;
4387 }
4388
4389 old_file_size = bdrv_getlength(bs->file->bs);
4390 if (old_file_size < 0) {
4391 error_setg_errno(errp, -old_file_size,
4392 "Failed to inquire current file length");
4393 ret = old_file_size;
4394 goto fail;
4395 }
4396
4397 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4398 if (last_cluster >= 0) {
4399 old_file_size = (last_cluster + 1) * s->cluster_size;
4400 } else {
4401 old_file_size = ROUND_UP(old_file_size, s->cluster_size);
4402 }
4403
4404 nb_new_data_clusters = (ROUND_UP(offset, s->cluster_size) -
4405 start_of_cluster(s, old_length)) >> s->cluster_bits;
4406
4407 /* This is an overestimation; we will not actually allocate space for
4408 * these in the file but just make sure the new refcount structures are
4409 * able to cover them so we will not have to allocate new refblocks
4410 * while entering the data blocks in the potentially new L2 tables.
4411 * (We do not actually care where the L2 tables are placed. Maybe they
4412 * are already allocated or they can be placed somewhere before
4413 * @old_file_size. It does not matter because they will be fully
4414 * allocated automatically, so they do not need to be covered by the
4415 * preallocation. All that matters is that we will not have to allocate
4416 * new refcount structures for them.) */
4417 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters,
4418 s->cluster_size / l2_entry_size(s));
4419 /* The cluster range may not be aligned to L2 boundaries, so add one L2
4420 * table for a potential head/tail */
4421 nb_new_l2_tables++;
4422
4423 allocation_start = qcow2_refcount_area(bs, old_file_size,
4424 nb_new_data_clusters +
4425 nb_new_l2_tables,
4426 true, 0, 0);
4427 if (allocation_start < 0) {
4428 error_setg_errno(errp, -allocation_start,
4429 "Failed to resize refcount structures");
4430 ret = allocation_start;
4431 goto fail;
4432 }
4433
4434 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start,
4435 nb_new_data_clusters);
4436 if (clusters_allocated < 0) {
4437 error_setg_errno(errp, -clusters_allocated,
4438 "Failed to allocate data clusters");
4439 ret = clusters_allocated;
4440 goto fail;
4441 }
4442
4443 assert(clusters_allocated == nb_new_data_clusters);
4444
4445 /* Allocate the data area */
4446 new_file_size = allocation_start +
4447 nb_new_data_clusters * s->cluster_size;
4448 /*
4449 * Image file grows, so @exact does not matter.
4450 *
4451 * If we need to zero out the new area, try first whether the protocol
4452 * driver can already take care of this.
4453 */
4454 if (flags & BDRV_REQ_ZERO_WRITE) {
4455 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc,
4456 BDRV_REQ_ZERO_WRITE, NULL);
4457 if (ret >= 0) {
4458 flags &= ~BDRV_REQ_ZERO_WRITE;
4459 /* Ensure that we read zeroes and not backing file data */
4460 subclusters_need_allocation = true;
4461 }
4462 } else {
4463 ret = -1;
4464 }
4465 if (ret < 0) {
4466 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0,
4467 errp);
4468 }
4469 if (ret < 0) {
4470 error_prepend(errp, "Failed to resize underlying file: ");
4471 qcow2_free_clusters(bs, allocation_start,
4472 nb_new_data_clusters * s->cluster_size,
4473 QCOW2_DISCARD_OTHER);
4474 goto fail;
4475 }
4476
4477 /* Create the necessary L2 entries */
4478 host_offset = allocation_start;
4479 guest_offset = old_length;
4480 while (nb_new_data_clusters) {
4481 int64_t nb_clusters = MIN(
4482 nb_new_data_clusters,
4483 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset));
4484 unsigned cow_start_length = offset_into_cluster(s, guest_offset);
4485 QCowL2Meta allocation;
4486 guest_offset = start_of_cluster(s, guest_offset);
4487 allocation = (QCowL2Meta) {
4488 .offset = guest_offset,
4489 .alloc_offset = host_offset,
4490 .nb_clusters = nb_clusters,
4491 .cow_start = {
4492 .offset = 0,
4493 .nb_bytes = cow_start_length,
4494 },
4495 .cow_end = {
4496 .offset = nb_clusters << s->cluster_bits,
4497 .nb_bytes = 0,
4498 },
4499 .prealloc = !subclusters_need_allocation,
4500 };
4501 qemu_co_queue_init(&allocation.dependent_requests);
4502
4503 ret = qcow2_alloc_cluster_link_l2(bs, &allocation);
4504 if (ret < 0) {
4505 error_setg_errno(errp, -ret, "Failed to update L2 tables");
4506 qcow2_free_clusters(bs, host_offset,
4507 nb_new_data_clusters * s->cluster_size,
4508 QCOW2_DISCARD_OTHER);
4509 goto fail;
4510 }
4511
4512 guest_offset += nb_clusters * s->cluster_size;
4513 host_offset += nb_clusters * s->cluster_size;
4514 nb_new_data_clusters -= nb_clusters;
4515 }
4516 break;
4517 }
4518
4519 default:
4520 g_assert_not_reached();
4521 }
4522
4523 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) {
4524 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->subcluster_size);
4525
4526 /*
4527 * Use zero clusters as much as we can. qcow2_subcluster_zeroize()
4528 * requires a subcluster-aligned start. The end may be unaligned if
4529 * it is at the end of the image (which it is here).
4530 */
4531 if (offset > zero_start) {
4532 ret = qcow2_subcluster_zeroize(bs, zero_start, offset - zero_start,
4533 0);
4534 if (ret < 0) {
4535 error_setg_errno(errp, -ret, "Failed to zero out new clusters");
4536 goto fail;
4537 }
4538 }
4539
4540 /* Write explicit zeros for the unaligned head */
4541 if (zero_start > old_length) {
4542 uint64_t len = MIN(zero_start, offset) - old_length;
4543 uint8_t *buf = qemu_blockalign0(bs, len);
4544 QEMUIOVector qiov;
4545 qemu_iovec_init_buf(&qiov, buf, len);
4546
4547 qemu_co_mutex_unlock(&s->lock);
4548 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0);
4549 qemu_co_mutex_lock(&s->lock);
4550
4551 qemu_vfree(buf);
4552 if (ret < 0) {
4553 error_setg_errno(errp, -ret, "Failed to zero out the new area");
4554 goto fail;
4555 }
4556 }
4557 }
4558
4559 if (prealloc != PREALLOC_MODE_OFF) {
4560 /* Flush metadata before actually changing the image size */
4561 ret = qcow2_write_caches(bs);
4562 if (ret < 0) {
4563 error_setg_errno(errp, -ret,
4564 "Failed to flush the preallocated area to disk");
4565 goto fail;
4566 }
4567 }
4568
4569 bs->total_sectors = offset / BDRV_SECTOR_SIZE;
4570
4571 /* write updated header.size */
4572 offset = cpu_to_be64(offset);
4573 ret = bdrv_co_pwrite_sync(bs->file, offsetof(QCowHeader, size),
4574 sizeof(offset), &offset, 0);
4575 if (ret < 0) {
4576 error_setg_errno(errp, -ret, "Failed to update the image size");
4577 goto fail;
4578 }
4579
4580 s->l1_vm_state_index = new_l1_size;
4581
4582 /* Update cache sizes */
4583 options = qdict_clone_shallow(bs->options);
4584 ret = qcow2_update_options(bs, options, s->flags, errp);
4585 qobject_unref(options);
4586 if (ret < 0) {
4587 goto fail;
4588 }
4589 ret = 0;
4590 fail:
4591 qemu_co_mutex_unlock(&s->lock);
4592 return ret;
4593 }
4594
4595 static int coroutine_fn GRAPH_RDLOCK
4596 qcow2_co_pwritev_compressed_task(BlockDriverState *bs,
4597 uint64_t offset, uint64_t bytes,
4598 QEMUIOVector *qiov, size_t qiov_offset)
4599 {
4600 BDRVQcow2State *s = bs->opaque;
4601 int ret;
4602 ssize_t out_len;
4603 uint8_t *buf, *out_buf;
4604 uint64_t cluster_offset;
4605
4606 assert(bytes == s->cluster_size || (bytes < s->cluster_size &&
4607 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS)));
4608
4609 buf = qemu_blockalign(bs, s->cluster_size);
4610 if (bytes < s->cluster_size) {
4611 /* Zero-pad last write if image size is not cluster aligned */
4612 memset(buf + bytes, 0, s->cluster_size - bytes);
4613 }
4614 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes);
4615
4616 out_buf = g_malloc(s->cluster_size);
4617
4618 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1,
4619 buf, s->cluster_size);
4620 if (out_len == -ENOMEM) {
4621 /* could not compress: write normal cluster */
4622 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0);
4623 if (ret < 0) {
4624 goto fail;
4625 }
4626 goto success;
4627 } else if (out_len < 0) {
4628 ret = -EINVAL;
4629 goto fail;
4630 }
4631
4632 qemu_co_mutex_lock(&s->lock);
4633 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len,
4634 &cluster_offset);
4635 if (ret < 0) {
4636 qemu_co_mutex_unlock(&s->lock);
4637 goto fail;
4638 }
4639
4640 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true);
4641 qemu_co_mutex_unlock(&s->lock);
4642 if (ret < 0) {
4643 goto fail;
4644 }
4645
4646 BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
4647 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0);
4648 if (ret < 0) {
4649 goto fail;
4650 }
4651 success:
4652 ret = 0;
4653 fail:
4654 qemu_vfree(buf);
4655 g_free(out_buf);
4656 return ret;
4657 }
4658
4659 /*
4660 * This function can count as GRAPH_RDLOCK because
4661 * qcow2_co_pwritev_compressed_part() holds the graph lock and keeps it until
4662 * this coroutine has terminated.
4663 */
4664 static int coroutine_fn GRAPH_RDLOCK
4665 qcow2_co_pwritev_compressed_task_entry(AioTask *task)
4666 {
4667 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
4668
4669 assert(!t->subcluster_type && !t->l2meta);
4670
4671 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov,
4672 t->qiov_offset);
4673 }
4674
4675 /*
4676 * XXX: put compressed sectors first, then all the cluster aligned
4677 * tables to avoid losing bytes in alignment
4678 */
4679 static int coroutine_fn GRAPH_RDLOCK
4680 qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
4681 int64_t offset, int64_t bytes,
4682 QEMUIOVector *qiov, size_t qiov_offset)
4683 {
4684 BDRVQcow2State *s = bs->opaque;
4685 AioTaskPool *aio = NULL;
4686 int ret = 0;
4687
4688 if (has_data_file(bs)) {
4689 return -ENOTSUP;
4690 }
4691
4692 if (bytes == 0) {
4693 /*
4694 * align end of file to a sector boundary to ease reading with
4695 * sector based I/Os
4696 */
4697 int64_t len = bdrv_getlength(bs->file->bs);
4698 if (len < 0) {
4699 return len;
4700 }
4701 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0,
4702 NULL);
4703 }
4704
4705 if (offset_into_cluster(s, offset)) {
4706 return -EINVAL;
4707 }
4708
4709 if (offset_into_cluster(s, bytes) &&
4710 (offset + bytes) != (bs->total_sectors << BDRV_SECTOR_BITS)) {
4711 return -EINVAL;
4712 }
4713
4714 while (bytes && aio_task_pool_status(aio) == 0) {
4715 uint64_t chunk_size = MIN(bytes, s->cluster_size);
4716
4717 if (!aio && chunk_size != bytes) {
4718 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
4719 }
4720
4721 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry,
4722 0, 0, offset, chunk_size, qiov, qiov_offset, NULL);
4723 if (ret < 0) {
4724 break;
4725 }
4726 qiov_offset += chunk_size;
4727 offset += chunk_size;
4728 bytes -= chunk_size;
4729 }
4730
4731 if (aio) {
4732 aio_task_pool_wait_all(aio);
4733 if (ret == 0) {
4734 ret = aio_task_pool_status(aio);
4735 }
4736 g_free(aio);
4737 }
4738
4739 return ret;
4740 }
4741
4742 static int coroutine_fn GRAPH_RDLOCK
4743 qcow2_co_preadv_compressed(BlockDriverState *bs,
4744 uint64_t l2_entry,
4745 uint64_t offset,
4746 uint64_t bytes,
4747 QEMUIOVector *qiov,
4748 size_t qiov_offset)
4749 {
4750 BDRVQcow2State *s = bs->opaque;
4751 int ret = 0, csize;
4752 uint64_t coffset;
4753 uint8_t *buf, *out_buf;
4754 int offset_in_cluster = offset_into_cluster(s, offset);
4755
4756 qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
4757
4758 buf = g_try_malloc(csize);
4759 if (!buf) {
4760 return -ENOMEM;
4761 }
4762
4763 out_buf = qemu_blockalign(bs, s->cluster_size);
4764
4765 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
4766 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0);
4767 if (ret < 0) {
4768 goto fail;
4769 }
4770
4771 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) {
4772 ret = -EIO;
4773 goto fail;
4774 }
4775
4776 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes);
4777
4778 fail:
4779 qemu_vfree(out_buf);
4780 g_free(buf);
4781
4782 return ret;
4783 }
4784
4785 static int make_completely_empty(BlockDriverState *bs)
4786 {
4787 BDRVQcow2State *s = bs->opaque;
4788 Error *local_err = NULL;
4789 int ret, l1_clusters;
4790 int64_t offset;
4791 uint64_t *new_reftable = NULL;
4792 uint64_t rt_entry, l1_size2;
4793 struct {
4794 uint64_t l1_offset;
4795 uint64_t reftable_offset;
4796 uint32_t reftable_clusters;
4797 } QEMU_PACKED l1_ofs_rt_ofs_cls;
4798
4799 ret = qcow2_cache_empty(bs, s->l2_table_cache);
4800 if (ret < 0) {
4801 goto fail;
4802 }
4803
4804 ret = qcow2_cache_empty(bs, s->refcount_block_cache);
4805 if (ret < 0) {
4806 goto fail;
4807 }
4808
4809 /* Refcounts will be broken utterly */
4810 ret = qcow2_mark_dirty(bs);
4811 if (ret < 0) {
4812 goto fail;
4813 }
4814
4815 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4816
4817 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
4818 l1_size2 = (uint64_t)s->l1_size * L1E_SIZE;
4819
4820 /* After this call, neither the in-memory nor the on-disk refcount
4821 * information accurately describe the actual references */
4822
4823 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset,
4824 l1_clusters * s->cluster_size, 0);
4825 if (ret < 0) {
4826 goto fail_broken_refcounts;
4827 }
4828 memset(s->l1_table, 0, l1_size2);
4829
4830 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE);
4831
4832 /* Overwrite enough clusters at the beginning of the sectors to place
4833 * the refcount table, a refcount block and the L1 table in; this may
4834 * overwrite parts of the existing refcount and L1 table, which is not
4835 * an issue because the dirty flag is set, complete data loss is in fact
4836 * desired and partial data loss is consequently fine as well */
4837 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size,
4838 (2 + l1_clusters) * s->cluster_size, 0);
4839 /* This call (even if it failed overall) may have overwritten on-disk
4840 * refcount structures; in that case, the in-memory refcount information
4841 * will probably differ from the on-disk information which makes the BDS
4842 * unusable */
4843 if (ret < 0) {
4844 goto fail_broken_refcounts;
4845 }
4846
4847 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4848 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE);
4849
4850 /* "Create" an empty reftable (one cluster) directly after the image
4851 * header and an empty L1 table three clusters after the image header;
4852 * the cluster between those two will be used as the first refblock */
4853 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size);
4854 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size);
4855 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1);
4856 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset),
4857 sizeof(l1_ofs_rt_ofs_cls), &l1_ofs_rt_ofs_cls, 0);
4858 if (ret < 0) {
4859 goto fail_broken_refcounts;
4860 }
4861
4862 s->l1_table_offset = 3 * s->cluster_size;
4863
4864 new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE);
4865 if (!new_reftable) {
4866 ret = -ENOMEM;
4867 goto fail_broken_refcounts;
4868 }
4869
4870 s->refcount_table_offset = s->cluster_size;
4871 s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE;
4872 s->max_refcount_table_index = 0;
4873
4874 g_free(s->refcount_table);
4875 s->refcount_table = new_reftable;
4876 new_reftable = NULL;
4877
4878 /* Now the in-memory refcount information again corresponds to the on-disk
4879 * information (reftable is empty and no refblocks (the refblock cache is
4880 * empty)); however, this means some clusters (e.g. the image header) are
4881 * referenced, but not refcounted, but the normal qcow2 code assumes that
4882 * the in-memory information is always correct */
4883
4884 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
4885
4886 /* Enter the first refblock into the reftable */
4887 rt_entry = cpu_to_be64(2 * s->cluster_size);
4888 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, sizeof(rt_entry),
4889 &rt_entry, 0);
4890 if (ret < 0) {
4891 goto fail_broken_refcounts;
4892 }
4893 s->refcount_table[0] = 2 * s->cluster_size;
4894
4895 s->free_cluster_index = 0;
4896 assert(3 + l1_clusters <= s->refcount_block_size);
4897 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2);
4898 if (offset < 0) {
4899 ret = offset;
4900 goto fail_broken_refcounts;
4901 } else if (offset > 0) {
4902 error_report("First cluster in emptied image is in use");
4903 abort();
4904 }
4905
4906 /* Now finally the in-memory information corresponds to the on-disk
4907 * structures and is correct */
4908 ret = qcow2_mark_clean(bs);
4909 if (ret < 0) {
4910 goto fail;
4911 }
4912
4913 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, false,
4914 PREALLOC_MODE_OFF, 0, &local_err);
4915 if (ret < 0) {
4916 error_report_err(local_err);
4917 goto fail;
4918 }
4919
4920 return 0;
4921
4922 fail_broken_refcounts:
4923 /* The BDS is unusable at this point. If we wanted to make it usable, we
4924 * would have to call qcow2_refcount_close(), qcow2_refcount_init(),
4925 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init()
4926 * again. However, because the functions which could have caused this error
4927 * path to be taken are used by those functions as well, it's very likely
4928 * that that sequence will fail as well. Therefore, just eject the BDS. */
4929 bs->drv = NULL;
4930
4931 fail:
4932 g_free(new_reftable);
4933 return ret;
4934 }
4935
4936 static int qcow2_make_empty(BlockDriverState *bs)
4937 {
4938 BDRVQcow2State *s = bs->opaque;
4939 uint64_t offset, end_offset;
4940 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size);
4941 int l1_clusters, ret = 0;
4942
4943 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
4944
4945 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps &&
4946 3 + l1_clusters <= s->refcount_block_size &&
4947 s->crypt_method_header != QCOW_CRYPT_LUKS &&
4948 !has_data_file(bs)) {
4949 /* The following function only works for qcow2 v3 images (it
4950 * requires the dirty flag) and only as long as there are no
4951 * features that reserve extra clusters (such as snapshots,
4952 * LUKS header, or persistent bitmaps), because it completely
4953 * empties the image. Furthermore, the L1 table and three
4954 * additional clusters (image header, refcount table, one
4955 * refcount block) have to fit inside one refcount block. It
4956 * only resets the image file, i.e. does not work with an
4957 * external data file. */
4958 return make_completely_empty(bs);
4959 }
4960
4961 /* This fallback code simply discards every active cluster; this is slow,
4962 * but works in all cases */
4963 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE;
4964 for (offset = 0; offset < end_offset; offset += step) {
4965 /* As this function is generally used after committing an external
4966 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the
4967 * default action for this kind of discard is to pass the discard,
4968 * which will ideally result in an actually smaller image file, as
4969 * is probably desired. */
4970 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset),
4971 QCOW2_DISCARD_SNAPSHOT, true);
4972 if (ret < 0) {
4973 break;
4974 }
4975 }
4976
4977 return ret;
4978 }
4979
4980 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
4981 {
4982 BDRVQcow2State *s = bs->opaque;
4983 int ret;
4984
4985 qemu_co_mutex_lock(&s->lock);
4986 ret = qcow2_write_caches(bs);
4987 qemu_co_mutex_unlock(&s->lock);
4988
4989 return ret;
4990 }
4991
4992 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
4993 Error **errp)
4994 {
4995 Error *local_err = NULL;
4996 BlockMeasureInfo *info;
4997 uint64_t required = 0; /* bytes that contribute to required size */
4998 uint64_t virtual_size; /* disk size as seen by guest */
4999 uint64_t refcount_bits;
5000 uint64_t l2_tables;
5001 uint64_t luks_payload_size = 0;
5002 size_t cluster_size;
5003 int version;
5004 char *optstr;
5005 PreallocMode prealloc;
5006 bool has_backing_file;
5007 bool has_luks;
5008 bool extended_l2;
5009 size_t l2e_size;
5010
5011 /* Parse image creation options */
5012 extended_l2 = qemu_opt_get_bool_del(opts, BLOCK_OPT_EXTL2, false);
5013
5014 cluster_size = qcow2_opt_get_cluster_size_del(opts, extended_l2,
5015 &local_err);
5016 if (local_err) {
5017 goto err;
5018 }
5019
5020 version = qcow2_opt_get_version_del(opts, &local_err);
5021 if (local_err) {
5022 goto err;
5023 }
5024
5025 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err);
5026 if (local_err) {
5027 goto err;
5028 }
5029
5030 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
5031 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr,
5032 PREALLOC_MODE_OFF, &local_err);
5033 g_free(optstr);
5034 if (local_err) {
5035 goto err;
5036 }
5037
5038 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
5039 has_backing_file = !!optstr;
5040 g_free(optstr);
5041
5042 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT);
5043 has_luks = optstr && strcmp(optstr, "luks") == 0;
5044 g_free(optstr);
5045
5046 if (has_luks) {
5047 g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
5048 QDict *cryptoopts = qcow2_extract_crypto_opts(opts, "luks", errp);
5049 size_t headerlen;
5050
5051 create_opts = block_crypto_create_opts_init(cryptoopts, errp);
5052 qobject_unref(cryptoopts);
5053 if (!create_opts) {
5054 goto err;
5055 }
5056
5057 if (!qcrypto_block_calculate_payload_offset(create_opts,
5058 "encrypt.",
5059 &headerlen,
5060 &local_err)) {
5061 goto err;
5062 }
5063
5064 luks_payload_size = ROUND_UP(headerlen, cluster_size);
5065 }
5066
5067 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
5068 virtual_size = ROUND_UP(virtual_size, cluster_size);
5069
5070 /* Check that virtual disk size is valid */
5071 l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
5072 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size,
5073 cluster_size / l2e_size);
5074 if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) {
5075 error_setg(&local_err, "The image size is too large "
5076 "(try using a larger cluster size)");
5077 goto err;
5078 }
5079
5080 /* Account for input image */
5081 if (in_bs) {
5082 int64_t ssize = bdrv_getlength(in_bs);
5083 if (ssize < 0) {
5084 error_setg_errno(&local_err, -ssize,
5085 "Unable to get image virtual_size");
5086 goto err;
5087 }
5088
5089 virtual_size = ROUND_UP(ssize, cluster_size);
5090
5091 if (has_backing_file) {
5092 /* We don't how much of the backing chain is shared by the input
5093 * image and the new image file. In the worst case the new image's
5094 * backing file has nothing in common with the input image. Be
5095 * conservative and assume all clusters need to be written.
5096 */
5097 required = virtual_size;
5098 } else {
5099 int64_t offset;
5100 int64_t pnum = 0;
5101
5102 for (offset = 0; offset < ssize; offset += pnum) {
5103 int ret;
5104
5105 ret = bdrv_block_status_above(in_bs, NULL, offset,
5106 ssize - offset, &pnum, NULL,
5107 NULL);
5108 if (ret < 0) {
5109 error_setg_errno(&local_err, -ret,
5110 "Unable to get block status");
5111 goto err;
5112 }
5113
5114 if (ret & BDRV_BLOCK_ZERO) {
5115 /* Skip zero regions (safe with no backing file) */
5116 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) ==
5117 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) {
5118 /* Extend pnum to end of cluster for next iteration */
5119 pnum = ROUND_UP(offset + pnum, cluster_size) - offset;
5120
5121 /* Count clusters we've seen */
5122 required += offset % cluster_size + pnum;
5123 }
5124 }
5125 }
5126 }
5127
5128 /* Take into account preallocation. Nothing special is needed for
5129 * PREALLOC_MODE_METADATA since metadata is always counted.
5130 */
5131 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) {
5132 required = virtual_size;
5133 }
5134
5135 info = g_new0(BlockMeasureInfo, 1);
5136 info->fully_allocated = luks_payload_size +
5137 qcow2_calc_prealloc_size(virtual_size, cluster_size,
5138 ctz32(refcount_bits), extended_l2);
5139
5140 /*
5141 * Remove data clusters that are not required. This overestimates the
5142 * required size because metadata needed for the fully allocated file is
5143 * still counted. Show bitmaps only if both source and destination
5144 * would support them.
5145 */
5146 info->required = info->fully_allocated - virtual_size + required;
5147 info->has_bitmaps = version >= 3 && in_bs &&
5148 bdrv_supports_persistent_dirty_bitmap(in_bs);
5149 if (info->has_bitmaps) {
5150 info->bitmaps = qcow2_get_persistent_dirty_bitmap_size(in_bs,
5151 cluster_size);
5152 }
5153 return info;
5154
5155 err:
5156 error_propagate(errp, local_err);
5157 return NULL;
5158 }
5159
5160 static int coroutine_fn
5161 qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
5162 {
5163 BDRVQcow2State *s = bs->opaque;
5164 bdi->cluster_size = s->cluster_size;
5165 bdi->vm_state_offset = qcow2_vm_state_offset(s);
5166 bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY;
5167 return 0;
5168 }
5169
5170 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs,
5171 Error **errp)
5172 {
5173 BDRVQcow2State *s = bs->opaque;
5174 ImageInfoSpecific *spec_info;
5175 QCryptoBlockInfo *encrypt_info = NULL;
5176
5177 if (s->crypto != NULL) {
5178 encrypt_info = qcrypto_block_get_info(s->crypto, errp);
5179 if (!encrypt_info) {
5180 return NULL;
5181 }
5182 }
5183
5184 spec_info = g_new(ImageInfoSpecific, 1);
5185 *spec_info = (ImageInfoSpecific){
5186 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2,
5187 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1),
5188 };
5189 if (s->qcow_version == 2) {
5190 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
5191 .compat = g_strdup("0.10"),
5192 .refcount_bits = s->refcount_bits,
5193 };
5194 } else if (s->qcow_version == 3) {
5195 Qcow2BitmapInfoList *bitmaps;
5196 if (!qcow2_get_bitmap_info_list(bs, &bitmaps, errp)) {
5197 qapi_free_ImageInfoSpecific(spec_info);
5198 qapi_free_QCryptoBlockInfo(encrypt_info);
5199 return NULL;
5200 }
5201 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
5202 .compat = g_strdup("1.1"),
5203 .lazy_refcounts = s->compatible_features &
5204 QCOW2_COMPAT_LAZY_REFCOUNTS,
5205 .has_lazy_refcounts = true,
5206 .corrupt = s->incompatible_features &
5207 QCOW2_INCOMPAT_CORRUPT,
5208 .has_corrupt = true,
5209 .has_extended_l2 = true,
5210 .extended_l2 = has_subclusters(s),
5211 .refcount_bits = s->refcount_bits,
5212 .has_bitmaps = !!bitmaps,
5213 .bitmaps = bitmaps,
5214 .data_file = g_strdup(s->image_data_file),
5215 .has_data_file_raw = has_data_file(bs),
5216 .data_file_raw = data_file_is_raw(bs),
5217 .compression_type = s->compression_type,
5218 };
5219 } else {
5220 /* if this assertion fails, this probably means a new version was
5221 * added without having it covered here */
5222 assert(false);
5223 }
5224
5225 if (encrypt_info) {
5226 ImageInfoSpecificQCow2Encryption *qencrypt =
5227 g_new(ImageInfoSpecificQCow2Encryption, 1);
5228 switch (encrypt_info->format) {
5229 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
5230 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES;
5231 break;
5232 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
5233 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS;
5234 qencrypt->u.luks = encrypt_info->u.luks;
5235 break;
5236 default:
5237 abort();
5238 }
5239 /* Since we did shallow copy above, erase any pointers
5240 * in the original info */
5241 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u));
5242 qapi_free_QCryptoBlockInfo(encrypt_info);
5243
5244 spec_info->u.qcow2.data->encrypt = qencrypt;
5245 }
5246
5247 return spec_info;
5248 }
5249
5250 static int qcow2_has_zero_init(BlockDriverState *bs)
5251 {
5252 BDRVQcow2State *s = bs->opaque;
5253 bool preallocated;
5254
5255 if (qemu_in_coroutine()) {
5256 qemu_co_mutex_lock(&s->lock);
5257 }
5258 /*
5259 * Check preallocation status: Preallocated images have all L2
5260 * tables allocated, nonpreallocated images have none. It is
5261 * therefore enough to check the first one.
5262 */
5263 preallocated = s->l1_size > 0 && s->l1_table[0] != 0;
5264 if (qemu_in_coroutine()) {
5265 qemu_co_mutex_unlock(&s->lock);
5266 }
5267
5268 if (!preallocated) {
5269 return 1;
5270 } else if (bs->encrypted) {
5271 return 0;
5272 } else {
5273 return bdrv_has_zero_init(s->data_file->bs);
5274 }
5275 }
5276
5277 /*
5278 * Check the request to vmstate. On success return
5279 * qcow2_vm_state_offset(bs) + @pos
5280 */
5281 static int64_t qcow2_check_vmstate_request(BlockDriverState *bs,
5282 QEMUIOVector *qiov, int64_t pos)
5283 {
5284 BDRVQcow2State *s = bs->opaque;
5285 int64_t vmstate_offset = qcow2_vm_state_offset(s);
5286 int ret;
5287
5288 /* Incoming requests must be OK */
5289 bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort);
5290
5291 if (INT64_MAX - pos < vmstate_offset) {
5292 return -EIO;
5293 }
5294
5295 pos += vmstate_offset;
5296 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
5297 if (ret < 0) {
5298 return ret;
5299 }
5300
5301 return pos;
5302 }
5303
5304 static int coroutine_fn GRAPH_RDLOCK
5305 qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
5306 {
5307 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
5308 if (offset < 0) {
5309 return offset;
5310 }
5311
5312 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
5313 return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0);
5314 }
5315
5316 static int coroutine_fn GRAPH_RDLOCK
5317 qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
5318 {
5319 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
5320 if (offset < 0) {
5321 return offset;
5322 }
5323
5324 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
5325 return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
5326 }
5327
5328 static int qcow2_has_compressed_clusters(BlockDriverState *bs)
5329 {
5330 int64_t offset = 0;
5331 int64_t bytes = bdrv_getlength(bs);
5332
5333 if (bytes < 0) {
5334 return bytes;
5335 }
5336
5337 while (bytes != 0) {
5338 int ret;
5339 QCow2SubclusterType type;
5340 unsigned int cur_bytes = MIN(INT_MAX, bytes);
5341 uint64_t host_offset;
5342
5343 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, &host_offset,
5344 &type);
5345 if (ret < 0) {
5346 return ret;
5347 }
5348
5349 if (type == QCOW2_SUBCLUSTER_COMPRESSED) {
5350 return 1;
5351 }
5352
5353 offset += cur_bytes;
5354 bytes -= cur_bytes;
5355 }
5356
5357 return 0;
5358 }
5359
5360 /*
5361 * Downgrades an image's version. To achieve this, any incompatible features
5362 * have to be removed.
5363 */
5364 static int qcow2_downgrade(BlockDriverState *bs, int target_version,
5365 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
5366 Error **errp)
5367 {
5368 BDRVQcow2State *s = bs->opaque;
5369 int current_version = s->qcow_version;
5370 int ret;
5371 int i;
5372
5373 /* This is qcow2_downgrade(), not qcow2_upgrade() */
5374 assert(target_version < current_version);
5375
5376 /* There are no other versions (now) that you can downgrade to */
5377 assert(target_version == 2);
5378
5379 if (s->refcount_order != 4) {
5380 error_setg(errp, "compat=0.10 requires refcount_bits=16");
5381 return -ENOTSUP;
5382 }
5383
5384 if (has_data_file(bs)) {
5385 error_setg(errp, "Cannot downgrade an image with a data file");
5386 return -ENOTSUP;
5387 }
5388
5389 /*
5390 * If any internal snapshot has a different size than the current
5391 * image size, or VM state size that exceeds 32 bits, downgrading
5392 * is unsafe. Even though we would still use v3-compliant output
5393 * to preserve that data, other v2 programs might not realize
5394 * those optional fields are important.
5395 */
5396 for (i = 0; i < s->nb_snapshots; i++) {
5397 if (s->snapshots[i].vm_state_size > UINT32_MAX ||
5398 s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) {
5399 error_setg(errp, "Internal snapshots prevent downgrade of image");
5400 return -ENOTSUP;
5401 }
5402 }
5403
5404 /* clear incompatible features */
5405 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
5406 ret = qcow2_mark_clean(bs);
5407 if (ret < 0) {
5408 error_setg_errno(errp, -ret, "Failed to make the image clean");
5409 return ret;
5410 }
5411 }
5412
5413 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in
5414 * the first place; if that happens nonetheless, returning -ENOTSUP is the
5415 * best thing to do anyway */
5416
5417 if (s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION) {
5418 error_setg(errp, "Cannot downgrade an image with incompatible features "
5419 "0x%" PRIx64 " set",
5420 s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION);
5421 return -ENOTSUP;
5422 }
5423
5424 /* since we can ignore compatible features, we can set them to 0 as well */
5425 s->compatible_features = 0;
5426 /* if lazy refcounts have been used, they have already been fixed through
5427 * clearing the dirty flag */
5428
5429 /* clearing autoclear features is trivial */
5430 s->autoclear_features = 0;
5431
5432 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque);
5433 if (ret < 0) {
5434 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters");
5435 return ret;
5436 }
5437
5438 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) {
5439 ret = qcow2_has_compressed_clusters(bs);
5440 if (ret < 0) {
5441 error_setg(errp, "Failed to check block status");
5442 return -EINVAL;
5443 }
5444 if (ret) {
5445 error_setg(errp, "Cannot downgrade an image with zstd compression "
5446 "type and existing compressed clusters");
5447 return -ENOTSUP;
5448 }
5449 /*
5450 * No compressed clusters for now, so just chose default zlib
5451 * compression.
5452 */
5453 s->incompatible_features &= ~QCOW2_INCOMPAT_COMPRESSION;
5454 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
5455 }
5456
5457 assert(s->incompatible_features == 0);
5458
5459 s->qcow_version = target_version;
5460 ret = qcow2_update_header(bs);
5461 if (ret < 0) {
5462 s->qcow_version = current_version;
5463 error_setg_errno(errp, -ret, "Failed to update the image header");
5464 return ret;
5465 }
5466 return 0;
5467 }
5468
5469 /*
5470 * Upgrades an image's version. While newer versions encompass all
5471 * features of older versions, some things may have to be presented
5472 * differently.
5473 */
5474 static int qcow2_upgrade(BlockDriverState *bs, int target_version,
5475 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
5476 Error **errp)
5477 {
5478 BDRVQcow2State *s = bs->opaque;
5479 bool need_snapshot_update;
5480 int current_version = s->qcow_version;
5481 int i;
5482 int ret;
5483
5484 /* This is qcow2_upgrade(), not qcow2_downgrade() */
5485 assert(target_version > current_version);
5486
5487 /* There are no other versions (yet) that you can upgrade to */
5488 assert(target_version == 3);
5489
5490 status_cb(bs, 0, 2, cb_opaque);
5491
5492 /*
5493 * In v2, snapshots do not need to have extra data. v3 requires
5494 * the 64-bit VM state size and the virtual disk size to be
5495 * present.
5496 * qcow2_write_snapshots() will always write the list in the
5497 * v3-compliant format.
5498 */
5499 need_snapshot_update = false;
5500 for (i = 0; i < s->nb_snapshots; i++) {
5501 if (s->snapshots[i].extra_data_size <
5502 sizeof_field(QCowSnapshotExtraData, vm_state_size_large) +
5503 sizeof_field(QCowSnapshotExtraData, disk_size))
5504 {
5505 need_snapshot_update = true;
5506 break;
5507 }
5508 }
5509 if (need_snapshot_update) {
5510 ret = qcow2_write_snapshots(bs);
5511 if (ret < 0) {
5512 error_setg_errno(errp, -ret, "Failed to update the snapshot table");
5513 return ret;
5514 }
5515 }
5516 status_cb(bs, 1, 2, cb_opaque);
5517
5518 s->qcow_version = target_version;
5519 ret = qcow2_update_header(bs);
5520 if (ret < 0) {
5521 s->qcow_version = current_version;
5522 error_setg_errno(errp, -ret, "Failed to update the image header");
5523 return ret;
5524 }
5525 status_cb(bs, 2, 2, cb_opaque);
5526
5527 return 0;
5528 }
5529
5530 typedef enum Qcow2AmendOperation {
5531 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be
5532 * statically initialized to so that the helper CB can discern the first
5533 * invocation from an operation change */
5534 QCOW2_NO_OPERATION = 0,
5535
5536 QCOW2_UPGRADING,
5537 QCOW2_UPDATING_ENCRYPTION,
5538 QCOW2_CHANGING_REFCOUNT_ORDER,
5539 QCOW2_DOWNGRADING,
5540 } Qcow2AmendOperation;
5541
5542 typedef struct Qcow2AmendHelperCBInfo {
5543 /* The code coordinating the amend operations should only modify
5544 * these four fields; the rest will be managed by the CB */
5545 BlockDriverAmendStatusCB *original_status_cb;
5546 void *original_cb_opaque;
5547
5548 Qcow2AmendOperation current_operation;
5549
5550 /* Total number of operations to perform (only set once) */
5551 int total_operations;
5552
5553 /* The following fields are managed by the CB */
5554
5555 /* Number of operations completed */
5556 int operations_completed;
5557
5558 /* Cumulative offset of all completed operations */
5559 int64_t offset_completed;
5560
5561 Qcow2AmendOperation last_operation;
5562 int64_t last_work_size;
5563 } Qcow2AmendHelperCBInfo;
5564
5565 static void qcow2_amend_helper_cb(BlockDriverState *bs,
5566 int64_t operation_offset,
5567 int64_t operation_work_size, void *opaque)
5568 {
5569 Qcow2AmendHelperCBInfo *info = opaque;
5570 int64_t current_work_size;
5571 int64_t projected_work_size;
5572
5573 if (info->current_operation != info->last_operation) {
5574 if (info->last_operation != QCOW2_NO_OPERATION) {
5575 info->offset_completed += info->last_work_size;
5576 info->operations_completed++;
5577 }
5578
5579 info->last_operation = info->current_operation;
5580 }
5581
5582 assert(info->total_operations > 0);
5583 assert(info->operations_completed < info->total_operations);
5584
5585 info->last_work_size = operation_work_size;
5586
5587 current_work_size = info->offset_completed + operation_work_size;
5588
5589 /* current_work_size is the total work size for (operations_completed + 1)
5590 * operations (which includes this one), so multiply it by the number of
5591 * operations not covered and divide it by the number of operations
5592 * covered to get a projection for the operations not covered */
5593 projected_work_size = current_work_size * (info->total_operations -
5594 info->operations_completed - 1)
5595 / (info->operations_completed + 1);
5596
5597 info->original_status_cb(bs, info->offset_completed + operation_offset,
5598 current_work_size + projected_work_size,
5599 info->original_cb_opaque);
5600 }
5601
5602 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
5603 BlockDriverAmendStatusCB *status_cb,
5604 void *cb_opaque,
5605 bool force,
5606 Error **errp)
5607 {
5608 BDRVQcow2State *s = bs->opaque;
5609 int old_version = s->qcow_version, new_version = old_version;
5610 uint64_t new_size = 0;
5611 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL;
5612 bool lazy_refcounts = s->use_lazy_refcounts;
5613 bool data_file_raw = data_file_is_raw(bs);
5614 const char *compat = NULL;
5615 int refcount_bits = s->refcount_bits;
5616 int ret;
5617 QemuOptDesc *desc = opts->list->desc;
5618 Qcow2AmendHelperCBInfo helper_cb_info;
5619 bool encryption_update = false;
5620
5621 while (desc && desc->name) {
5622 if (!qemu_opt_find(opts, desc->name)) {
5623 /* only change explicitly defined options */
5624 desc++;
5625 continue;
5626 }
5627
5628 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) {
5629 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL);
5630 if (!compat) {
5631 /* preserve default */
5632 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) {
5633 new_version = 2;
5634 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) {
5635 new_version = 3;
5636 } else {
5637 error_setg(errp, "Unknown compatibility level %s", compat);
5638 return -EINVAL;
5639 }
5640 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) {
5641 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5642 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) {
5643 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5644 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) {
5645 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5646 } else if (g_str_has_prefix(desc->name, "encrypt.")) {
5647 if (!s->crypto) {
5648 error_setg(errp,
5649 "Can't amend encryption options - encryption not present");
5650 return -EINVAL;
5651 }
5652 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
5653 error_setg(errp,
5654 "Only LUKS encryption options can be amended");
5655 return -ENOTSUP;
5656 }
5657 encryption_update = true;
5658 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
5659 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS,
5660 lazy_refcounts);
5661 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) {
5662 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS,
5663 refcount_bits);
5664
5665 if (refcount_bits <= 0 || refcount_bits > 64 ||
5666 !is_power_of_2(refcount_bits))
5667 {
5668 error_setg(errp, "Refcount width must be a power of two and "
5669 "may not exceed 64 bits");
5670 return -EINVAL;
5671 }
5672 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) {
5673 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE);
5674 if (data_file && !has_data_file(bs)) {
5675 error_setg(errp, "data-file can only be set for images that "
5676 "use an external data file");
5677 return -EINVAL;
5678 }
5679 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) {
5680 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW,
5681 data_file_raw);
5682 if (data_file_raw && !data_file_is_raw(bs)) {
5683 error_setg(errp, "data-file-raw cannot be set on existing "
5684 "images");
5685 return -EINVAL;
5686 }
5687 } else {
5688 /* if this point is reached, this probably means a new option was
5689 * added without having it covered here */
5690 abort();
5691 }
5692
5693 desc++;
5694 }
5695
5696 helper_cb_info = (Qcow2AmendHelperCBInfo){
5697 .original_status_cb = status_cb,
5698 .original_cb_opaque = cb_opaque,
5699 .total_operations = (new_version != old_version)
5700 + (s->refcount_bits != refcount_bits) +
5701 (encryption_update == true)
5702 };
5703
5704 /* Upgrade first (some features may require compat=1.1) */
5705 if (new_version > old_version) {
5706 helper_cb_info.current_operation = QCOW2_UPGRADING;
5707 ret = qcow2_upgrade(bs, new_version, &qcow2_amend_helper_cb,
5708 &helper_cb_info, errp);
5709 if (ret < 0) {
5710 return ret;
5711 }
5712 }
5713
5714 if (encryption_update) {
5715 QDict *amend_opts_dict;
5716 QCryptoBlockAmendOptions *amend_opts;
5717
5718 helper_cb_info.current_operation = QCOW2_UPDATING_ENCRYPTION;
5719 amend_opts_dict = qcow2_extract_crypto_opts(opts, "luks", errp);
5720 if (!amend_opts_dict) {
5721 return -EINVAL;
5722 }
5723 amend_opts = block_crypto_amend_opts_init(amend_opts_dict, errp);
5724 qobject_unref(amend_opts_dict);
5725 if (!amend_opts) {
5726 return -EINVAL;
5727 }
5728 ret = qcrypto_block_amend_options(s->crypto,
5729 qcow2_crypto_hdr_read_func,
5730 qcow2_crypto_hdr_write_func,
5731 bs,
5732 amend_opts,
5733 force,
5734 errp);
5735 qapi_free_QCryptoBlockAmendOptions(amend_opts);
5736 if (ret < 0) {
5737 return ret;
5738 }
5739 }
5740
5741 if (s->refcount_bits != refcount_bits) {
5742 int refcount_order = ctz32(refcount_bits);
5743
5744 if (new_version < 3 && refcount_bits != 16) {
5745 error_setg(errp, "Refcount widths other than 16 bits require "
5746 "compatibility level 1.1 or above (use compat=1.1 or "
5747 "greater)");
5748 return -EINVAL;
5749 }
5750
5751 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER;
5752 ret = qcow2_change_refcount_order(bs, refcount_order,
5753 &qcow2_amend_helper_cb,
5754 &helper_cb_info, errp);
5755 if (ret < 0) {
5756 return ret;
5757 }
5758 }
5759
5760 /* data-file-raw blocks backing files, so clear it first if requested */
5761 if (data_file_raw) {
5762 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW;
5763 } else {
5764 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW;
5765 }
5766
5767 if (data_file) {
5768 g_free(s->image_data_file);
5769 s->image_data_file = *data_file ? g_strdup(data_file) : NULL;
5770 }
5771
5772 ret = qcow2_update_header(bs);
5773 if (ret < 0) {
5774 error_setg_errno(errp, -ret, "Failed to update the image header");
5775 return ret;
5776 }
5777
5778 if (backing_file || backing_format) {
5779 if (g_strcmp0(backing_file, s->image_backing_file) ||
5780 g_strcmp0(backing_format, s->image_backing_format)) {
5781 error_setg(errp, "Cannot amend the backing file");
5782 error_append_hint(errp,
5783 "You can use 'qemu-img rebase' instead.\n");
5784 return -EINVAL;
5785 }
5786 }
5787
5788 if (s->use_lazy_refcounts != lazy_refcounts) {
5789 if (lazy_refcounts) {
5790 if (new_version < 3) {
5791 error_setg(errp, "Lazy refcounts only supported with "
5792 "compatibility level 1.1 and above (use compat=1.1 "
5793 "or greater)");
5794 return -EINVAL;
5795 }
5796 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
5797 ret = qcow2_update_header(bs);
5798 if (ret < 0) {
5799 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
5800 error_setg_errno(errp, -ret, "Failed to update the image header");
5801 return ret;
5802 }
5803 s->use_lazy_refcounts = true;
5804 } else {
5805 /* make image clean first */
5806 ret = qcow2_mark_clean(bs);
5807 if (ret < 0) {
5808 error_setg_errno(errp, -ret, "Failed to make the image clean");
5809 return ret;
5810 }
5811 /* now disallow lazy refcounts */
5812 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
5813 ret = qcow2_update_header(bs);
5814 if (ret < 0) {
5815 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
5816 error_setg_errno(errp, -ret, "Failed to update the image header");
5817 return ret;
5818 }
5819 s->use_lazy_refcounts = false;
5820 }
5821 }
5822
5823 if (new_size) {
5824 BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL,
5825 errp);
5826 if (!blk) {
5827 return -EPERM;
5828 }
5829
5830 /*
5831 * Amending image options should ensure that the image has
5832 * exactly the given new values, so pass exact=true here.
5833 */
5834 ret = blk_truncate(blk, new_size, true, PREALLOC_MODE_OFF, 0, errp);
5835 blk_unref(blk);
5836 if (ret < 0) {
5837 return ret;
5838 }
5839 }
5840
5841 /* Downgrade last (so unsupported features can be removed before) */
5842 if (new_version < old_version) {
5843 helper_cb_info.current_operation = QCOW2_DOWNGRADING;
5844 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb,
5845 &helper_cb_info, errp);
5846 if (ret < 0) {
5847 return ret;
5848 }
5849 }
5850
5851 return 0;
5852 }
5853
5854 static int coroutine_fn qcow2_co_amend(BlockDriverState *bs,
5855 BlockdevAmendOptions *opts,
5856 bool force,
5857 Error **errp)
5858 {
5859 BlockdevAmendOptionsQcow2 *qopts = &opts->u.qcow2;
5860 BDRVQcow2State *s = bs->opaque;
5861 int ret = 0;
5862
5863 if (qopts->encrypt) {
5864 if (!s->crypto) {
5865 error_setg(errp, "image is not encrypted, can't amend");
5866 return -EOPNOTSUPP;
5867 }
5868
5869 if (qopts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_LUKS) {
5870 error_setg(errp,
5871 "Amend can't be used to change the qcow2 encryption format");
5872 return -EOPNOTSUPP;
5873 }
5874
5875 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
5876 error_setg(errp,
5877 "Only LUKS encryption options can be amended for qcow2 with blockdev-amend");
5878 return -EOPNOTSUPP;
5879 }
5880
5881 ret = qcrypto_block_amend_options(s->crypto,
5882 qcow2_crypto_hdr_read_func,
5883 qcow2_crypto_hdr_write_func,
5884 bs,
5885 qopts->encrypt,
5886 force,
5887 errp);
5888 }
5889 return ret;
5890 }
5891
5892 /*
5893 * If offset or size are negative, respectively, they will not be included in
5894 * the BLOCK_IMAGE_CORRUPTED event emitted.
5895 * fatal will be ignored for read-only BDS; corruptions found there will always
5896 * be considered non-fatal.
5897 */
5898 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
5899 int64_t size, const char *message_format, ...)
5900 {
5901 BDRVQcow2State *s = bs->opaque;
5902 const char *node_name;
5903 char *message;
5904 va_list ap;
5905
5906 fatal = fatal && bdrv_is_writable(bs);
5907
5908 if (s->signaled_corruption &&
5909 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT)))
5910 {
5911 return;
5912 }
5913
5914 va_start(ap, message_format);
5915 message = g_strdup_vprintf(message_format, ap);
5916 va_end(ap);
5917
5918 if (fatal) {
5919 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further "
5920 "corruption events will be suppressed\n", message);
5921 } else {
5922 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal "
5923 "corruption events will be suppressed\n", message);
5924 }
5925
5926 node_name = bdrv_get_node_name(bs);
5927 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs),
5928 *node_name ? node_name : NULL,
5929 message, offset >= 0, offset,
5930 size >= 0, size,
5931 fatal);
5932 g_free(message);
5933
5934 if (fatal) {
5935 qcow2_mark_corrupt(bs);
5936 bs->drv = NULL; /* make BDS unusable */
5937 }
5938
5939 s->signaled_corruption = true;
5940 }
5941
5942 #define QCOW_COMMON_OPTIONS \
5943 { \
5944 .name = BLOCK_OPT_SIZE, \
5945 .type = QEMU_OPT_SIZE, \
5946 .help = "Virtual disk size" \
5947 }, \
5948 { \
5949 .name = BLOCK_OPT_COMPAT_LEVEL, \
5950 .type = QEMU_OPT_STRING, \
5951 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" \
5952 }, \
5953 { \
5954 .name = BLOCK_OPT_BACKING_FILE, \
5955 .type = QEMU_OPT_STRING, \
5956 .help = "File name of a base image" \
5957 }, \
5958 { \
5959 .name = BLOCK_OPT_BACKING_FMT, \
5960 .type = QEMU_OPT_STRING, \
5961 .help = "Image format of the base image" \
5962 }, \
5963 { \
5964 .name = BLOCK_OPT_DATA_FILE, \
5965 .type = QEMU_OPT_STRING, \
5966 .help = "File name of an external data file" \
5967 }, \
5968 { \
5969 .name = BLOCK_OPT_DATA_FILE_RAW, \
5970 .type = QEMU_OPT_BOOL, \
5971 .help = "The external data file must stay valid " \
5972 "as a raw image" \
5973 }, \
5974 { \
5975 .name = BLOCK_OPT_LAZY_REFCOUNTS, \
5976 .type = QEMU_OPT_BOOL, \
5977 .help = "Postpone refcount updates", \
5978 .def_value_str = "off" \
5979 }, \
5980 { \
5981 .name = BLOCK_OPT_REFCOUNT_BITS, \
5982 .type = QEMU_OPT_NUMBER, \
5983 .help = "Width of a reference count entry in bits", \
5984 .def_value_str = "16" \
5985 }
5986
5987 static QemuOptsList qcow2_create_opts = {
5988 .name = "qcow2-create-opts",
5989 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head),
5990 .desc = {
5991 { \
5992 .name = BLOCK_OPT_ENCRYPT, \
5993 .type = QEMU_OPT_BOOL, \
5994 .help = "Encrypt the image with format 'aes'. (Deprecated " \
5995 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", \
5996 }, \
5997 { \
5998 .name = BLOCK_OPT_ENCRYPT_FORMAT, \
5999 .type = QEMU_OPT_STRING, \
6000 .help = "Encrypt the image, format choices: 'aes', 'luks'", \
6001 }, \
6002 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", \
6003 "ID of secret providing qcow AES key or LUKS passphrase"), \
6004 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), \
6005 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), \
6006 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), \
6007 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), \
6008 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), \
6009 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), \
6010 { \
6011 .name = BLOCK_OPT_CLUSTER_SIZE, \
6012 .type = QEMU_OPT_SIZE, \
6013 .help = "qcow2 cluster size", \
6014 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) \
6015 }, \
6016 { \
6017 .name = BLOCK_OPT_EXTL2, \
6018 .type = QEMU_OPT_BOOL, \
6019 .help = "Extended L2 tables", \
6020 .def_value_str = "off" \
6021 }, \
6022 { \
6023 .name = BLOCK_OPT_PREALLOC, \
6024 .type = QEMU_OPT_STRING, \
6025 .help = "Preallocation mode (allowed values: off, " \
6026 "metadata, falloc, full)" \
6027 }, \
6028 { \
6029 .name = BLOCK_OPT_COMPRESSION_TYPE, \
6030 .type = QEMU_OPT_STRING, \
6031 .help = "Compression method used for image cluster " \
6032 "compression", \
6033 .def_value_str = "zlib" \
6034 },
6035 QCOW_COMMON_OPTIONS,
6036 { /* end of list */ }
6037 }
6038 };
6039
6040 static QemuOptsList qcow2_amend_opts = {
6041 .name = "qcow2-amend-opts",
6042 .head = QTAILQ_HEAD_INITIALIZER(qcow2_amend_opts.head),
6043 .desc = {
6044 BLOCK_CRYPTO_OPT_DEF_LUKS_STATE("encrypt."),
6045 BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT("encrypt."),
6046 BLOCK_CRYPTO_OPT_DEF_LUKS_OLD_SECRET("encrypt."),
6047 BLOCK_CRYPTO_OPT_DEF_LUKS_NEW_SECRET("encrypt."),
6048 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."),
6049 QCOW_COMMON_OPTIONS,
6050 { /* end of list */ }
6051 }
6052 };
6053
6054 static const char *const qcow2_strong_runtime_opts[] = {
6055 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET,
6056
6057 NULL
6058 };
6059
6060 BlockDriver bdrv_qcow2 = {
6061 .format_name = "qcow2",
6062 .instance_size = sizeof(BDRVQcow2State),
6063 .bdrv_probe = qcow2_probe,
6064 .bdrv_open = qcow2_open,
6065 .bdrv_close = qcow2_close,
6066 .bdrv_reopen_prepare = qcow2_reopen_prepare,
6067 .bdrv_reopen_commit = qcow2_reopen_commit,
6068 .bdrv_reopen_commit_post = qcow2_reopen_commit_post,
6069 .bdrv_reopen_abort = qcow2_reopen_abort,
6070 .bdrv_join_options = qcow2_join_options,
6071 .bdrv_child_perm = bdrv_default_perms,
6072 .bdrv_co_create_opts = qcow2_co_create_opts,
6073 .bdrv_co_create = qcow2_co_create,
6074 .bdrv_has_zero_init = qcow2_has_zero_init,
6075 .bdrv_co_block_status = qcow2_co_block_status,
6076
6077 .bdrv_co_preadv_part = qcow2_co_preadv_part,
6078 .bdrv_co_pwritev_part = qcow2_co_pwritev_part,
6079 .bdrv_co_flush_to_os = qcow2_co_flush_to_os,
6080
6081 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
6082 .bdrv_co_pdiscard = qcow2_co_pdiscard,
6083 .bdrv_co_copy_range_from = qcow2_co_copy_range_from,
6084 .bdrv_co_copy_range_to = qcow2_co_copy_range_to,
6085 .bdrv_co_truncate = qcow2_co_truncate,
6086 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part,
6087 .bdrv_make_empty = qcow2_make_empty,
6088
6089 .bdrv_snapshot_create = qcow2_snapshot_create,
6090 .bdrv_snapshot_goto = qcow2_snapshot_goto,
6091 .bdrv_snapshot_delete = qcow2_snapshot_delete,
6092 .bdrv_snapshot_list = qcow2_snapshot_list,
6093 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp,
6094 .bdrv_measure = qcow2_measure,
6095 .bdrv_co_get_info = qcow2_co_get_info,
6096 .bdrv_get_specific_info = qcow2_get_specific_info,
6097
6098 .bdrv_co_save_vmstate = qcow2_co_save_vmstate,
6099 .bdrv_co_load_vmstate = qcow2_co_load_vmstate,
6100
6101 .is_format = true,
6102 .supports_backing = true,
6103 .bdrv_change_backing_file = qcow2_change_backing_file,
6104
6105 .bdrv_refresh_limits = qcow2_refresh_limits,
6106 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache,
6107 .bdrv_inactivate = qcow2_inactivate,
6108
6109 .create_opts = &qcow2_create_opts,
6110 .amend_opts = &qcow2_amend_opts,
6111 .strong_runtime_opts = qcow2_strong_runtime_opts,
6112 .mutable_opts = mutable_opts,
6113 .bdrv_co_check = qcow2_co_check,
6114 .bdrv_amend_options = qcow2_amend_options,
6115 .bdrv_co_amend = qcow2_co_amend,
6116
6117 .bdrv_detach_aio_context = qcow2_detach_aio_context,
6118 .bdrv_attach_aio_context = qcow2_attach_aio_context,
6119
6120 .bdrv_supports_persistent_dirty_bitmap =
6121 qcow2_supports_persistent_dirty_bitmap,
6122 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap,
6123 .bdrv_co_remove_persistent_dirty_bitmap =
6124 qcow2_co_remove_persistent_dirty_bitmap,
6125 };
6126
6127 static void bdrv_qcow2_init(void)
6128 {
6129 bdrv_register(&bdrv_qcow2);
6130 }
6131
6132 block_init(bdrv_qcow2_init);