]> git.proxmox.com Git - mirror_qemu.git/blob - block/qcow2.c
meson: remove OS definitions from config_targetos
[mirror_qemu.git] / block / qcow2.c
1 /*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26
27 #include "block/qdict.h"
28 #include "sysemu/block-backend.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/module.h"
31 #include "qcow2.h"
32 #include "qemu/error-report.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qdict.h"
36 #include "qapi/qmp/qstring.h"
37 #include "trace.h"
38 #include "qemu/option_int.h"
39 #include "qemu/cutils.h"
40 #include "qemu/bswap.h"
41 #include "qemu/memalign.h"
42 #include "qapi/qobject-input-visitor.h"
43 #include "qapi/qapi-visit-block-core.h"
44 #include "crypto.h"
45 #include "block/aio_task.h"
46 #include "block/dirty-bitmap.h"
47
48 /*
49 Differences with QCOW:
50
51 - Support for multiple incremental snapshots.
52 - Memory management by reference counts.
53 - Clusters which have a reference count of one have the bit
54 QCOW_OFLAG_COPIED to optimize write performance.
55 - Size of compressed clusters is stored in sectors to reduce bit usage
56 in the cluster offsets.
57 - Support for storing additional data (such as the VM state) in the
58 snapshots.
59 - If a backing store is used, the cluster size is not constrained
60 (could be backported to QCOW).
61 - L2 tables have always a size of one cluster.
62 */
63
64
65 typedef struct {
66 uint32_t magic;
67 uint32_t len;
68 } QEMU_PACKED QCowExtension;
69
70 #define QCOW2_EXT_MAGIC_END 0
71 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca
72 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857
73 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77
74 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875
75 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441
76
77 static int coroutine_fn
78 qcow2_co_preadv_compressed(BlockDriverState *bs,
79 uint64_t l2_entry,
80 uint64_t offset,
81 uint64_t bytes,
82 QEMUIOVector *qiov,
83 size_t qiov_offset);
84
85 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
86 {
87 const QCowHeader *cow_header = (const void *)buf;
88
89 if (buf_size >= sizeof(QCowHeader) &&
90 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
91 be32_to_cpu(cow_header->version) >= 2)
92 return 100;
93 else
94 return 0;
95 }
96
97
98 static int GRAPH_RDLOCK
99 qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset,
100 uint8_t *buf, size_t buflen,
101 void *opaque, Error **errp)
102 {
103 BlockDriverState *bs = opaque;
104 BDRVQcow2State *s = bs->opaque;
105 ssize_t ret;
106
107 if ((offset + buflen) > s->crypto_header.length) {
108 error_setg(errp, "Request for data outside of extension header");
109 return -1;
110 }
111
112 ret = bdrv_pread(bs->file, s->crypto_header.offset + offset, buflen, buf,
113 0);
114 if (ret < 0) {
115 error_setg_errno(errp, -ret, "Could not read encryption header");
116 return -1;
117 }
118 return 0;
119 }
120
121
122 static int coroutine_fn GRAPH_RDLOCK
123 qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, void *opaque,
124 Error **errp)
125 {
126 BlockDriverState *bs = opaque;
127 BDRVQcow2State *s = bs->opaque;
128 int64_t ret;
129 int64_t clusterlen;
130
131 ret = qcow2_alloc_clusters(bs, headerlen);
132 if (ret < 0) {
133 error_setg_errno(errp, -ret,
134 "Cannot allocate cluster for LUKS header size %zu",
135 headerlen);
136 return -1;
137 }
138
139 s->crypto_header.length = headerlen;
140 s->crypto_header.offset = ret;
141
142 /*
143 * Zero fill all space in cluster so it has predictable
144 * content, as we may not initialize some regions of the
145 * header (eg only 1 out of 8 key slots will be initialized)
146 */
147 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size;
148 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0);
149 ret = bdrv_co_pwrite_zeroes(bs->file, ret, clusterlen, 0);
150 if (ret < 0) {
151 error_setg_errno(errp, -ret, "Could not zero fill encryption header");
152 return -1;
153 }
154
155 return 0;
156 }
157
158
159 /* The graph lock must be held when called in coroutine context */
160 static int coroutine_mixed_fn GRAPH_RDLOCK
161 qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset,
162 const uint8_t *buf, size_t buflen,
163 void *opaque, Error **errp)
164 {
165 BlockDriverState *bs = opaque;
166 BDRVQcow2State *s = bs->opaque;
167 ssize_t ret;
168
169 if ((offset + buflen) > s->crypto_header.length) {
170 error_setg(errp, "Request for data outside of extension header");
171 return -1;
172 }
173
174 ret = bdrv_pwrite(bs->file, s->crypto_header.offset + offset, buflen, buf,
175 0);
176 if (ret < 0) {
177 error_setg_errno(errp, -ret, "Could not read encryption header");
178 return -1;
179 }
180 return 0;
181 }
182
183 static QDict*
184 qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp)
185 {
186 QDict *cryptoopts_qdict;
187 QDict *opts_qdict;
188
189 /* Extract "encrypt." options into a qdict */
190 opts_qdict = qemu_opts_to_qdict(opts, NULL);
191 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
192 qobject_unref(opts_qdict);
193 qdict_put_str(cryptoopts_qdict, "format", fmt);
194 return cryptoopts_qdict;
195 }
196
197 /*
198 * read qcow2 extension and fill bs
199 * start reading from start_offset
200 * finish reading upon magic of value 0 or when end_offset reached
201 * unknown magic is skipped (future extension this version knows nothing about)
202 * return 0 upon success, non-0 otherwise
203 */
204 static int coroutine_fn GRAPH_RDLOCK
205 qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
206 uint64_t end_offset, void **p_feature_table,
207 int flags, bool *need_update_header, Error **errp)
208 {
209 BDRVQcow2State *s = bs->opaque;
210 QCowExtension ext;
211 uint64_t offset;
212 int ret;
213 Qcow2BitmapHeaderExt bitmaps_ext;
214
215 if (need_update_header != NULL) {
216 *need_update_header = false;
217 }
218
219 #ifdef DEBUG_EXT
220 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
221 #endif
222 offset = start_offset;
223 while (offset < end_offset) {
224
225 #ifdef DEBUG_EXT
226 /* Sanity check */
227 if (offset > s->cluster_size)
228 printf("qcow2_read_extension: suspicious offset %lu\n", offset);
229
230 printf("attempting to read extended header in offset %lu\n", offset);
231 #endif
232
233 ret = bdrv_co_pread(bs->file, offset, sizeof(ext), &ext, 0);
234 if (ret < 0) {
235 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
236 "pread fail from offset %" PRIu64, offset);
237 return 1;
238 }
239 ext.magic = be32_to_cpu(ext.magic);
240 ext.len = be32_to_cpu(ext.len);
241 offset += sizeof(ext);
242 #ifdef DEBUG_EXT
243 printf("ext.magic = 0x%x\n", ext.magic);
244 #endif
245 if (offset > end_offset || ext.len > end_offset - offset) {
246 error_setg(errp, "Header extension too large");
247 return -EINVAL;
248 }
249
250 switch (ext.magic) {
251 case QCOW2_EXT_MAGIC_END:
252 return 0;
253
254 case QCOW2_EXT_MAGIC_BACKING_FORMAT:
255 if (ext.len >= sizeof(bs->backing_format)) {
256 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32
257 " too large (>=%zu)", ext.len,
258 sizeof(bs->backing_format));
259 return 2;
260 }
261 ret = bdrv_co_pread(bs->file, offset, ext.len, bs->backing_format, 0);
262 if (ret < 0) {
263 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
264 "Could not read format name");
265 return 3;
266 }
267 bs->backing_format[ext.len] = '\0';
268 s->image_backing_format = g_strdup(bs->backing_format);
269 #ifdef DEBUG_EXT
270 printf("Qcow2: Got format extension %s\n", bs->backing_format);
271 #endif
272 break;
273
274 case QCOW2_EXT_MAGIC_FEATURE_TABLE:
275 if (p_feature_table != NULL) {
276 void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
277 ret = bdrv_co_pread(bs->file, offset, ext.len, feature_table, 0);
278 if (ret < 0) {
279 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
280 "Could not read table");
281 g_free(feature_table);
282 return ret;
283 }
284
285 *p_feature_table = feature_table;
286 }
287 break;
288
289 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: {
290 unsigned int cflags = 0;
291 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
292 error_setg(errp, "CRYPTO header extension only "
293 "expected with LUKS encryption method");
294 return -EINVAL;
295 }
296 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) {
297 error_setg(errp, "CRYPTO header extension size %u, "
298 "but expected size %zu", ext.len,
299 sizeof(Qcow2CryptoHeaderExtension));
300 return -EINVAL;
301 }
302
303 ret = bdrv_co_pread(bs->file, offset, ext.len, &s->crypto_header, 0);
304 if (ret < 0) {
305 error_setg_errno(errp, -ret,
306 "Unable to read CRYPTO header extension");
307 return ret;
308 }
309 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
310 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
311
312 if ((s->crypto_header.offset % s->cluster_size) != 0) {
313 error_setg(errp, "Encryption header offset '%" PRIu64 "' is "
314 "not a multiple of cluster size '%u'",
315 s->crypto_header.offset, s->cluster_size);
316 return -EINVAL;
317 }
318
319 if (flags & BDRV_O_NO_IO) {
320 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
321 }
322 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
323 qcow2_crypto_hdr_read_func,
324 bs, cflags, QCOW2_MAX_THREADS, errp);
325 if (!s->crypto) {
326 return -EINVAL;
327 }
328 } break;
329
330 case QCOW2_EXT_MAGIC_BITMAPS:
331 if (ext.len != sizeof(bitmaps_ext)) {
332 error_setg_errno(errp, -ret, "bitmaps_ext: "
333 "Invalid extension length");
334 return -EINVAL;
335 }
336
337 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) {
338 if (s->qcow_version < 3) {
339 /* Let's be a bit more specific */
340 warn_report("This qcow2 v2 image contains bitmaps, but "
341 "they may have been modified by a program "
342 "without persistent bitmap support; so now "
343 "they must all be considered inconsistent");
344 } else {
345 warn_report("a program lacking bitmap support "
346 "modified this file, so all bitmaps are now "
347 "considered inconsistent");
348 }
349 error_printf("Some clusters may be leaked, "
350 "run 'qemu-img check -r' on the image "
351 "file to fix.");
352 if (need_update_header != NULL) {
353 /* Updating is needed to drop invalid bitmap extension. */
354 *need_update_header = true;
355 }
356 break;
357 }
358
359 ret = bdrv_co_pread(bs->file, offset, ext.len, &bitmaps_ext, 0);
360 if (ret < 0) {
361 error_setg_errno(errp, -ret, "bitmaps_ext: "
362 "Could not read ext header");
363 return ret;
364 }
365
366 if (bitmaps_ext.reserved32 != 0) {
367 error_setg_errno(errp, -ret, "bitmaps_ext: "
368 "Reserved field is not zero");
369 return -EINVAL;
370 }
371
372 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps);
373 bitmaps_ext.bitmap_directory_size =
374 be64_to_cpu(bitmaps_ext.bitmap_directory_size);
375 bitmaps_ext.bitmap_directory_offset =
376 be64_to_cpu(bitmaps_ext.bitmap_directory_offset);
377
378 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) {
379 error_setg(errp,
380 "bitmaps_ext: Image has %" PRIu32 " bitmaps, "
381 "exceeding the QEMU supported maximum of %d",
382 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS);
383 return -EINVAL;
384 }
385
386 if (bitmaps_ext.nb_bitmaps == 0) {
387 error_setg(errp, "found bitmaps extension with zero bitmaps");
388 return -EINVAL;
389 }
390
391 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) {
392 error_setg(errp, "bitmaps_ext: "
393 "invalid bitmap directory offset");
394 return -EINVAL;
395 }
396
397 if (bitmaps_ext.bitmap_directory_size >
398 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) {
399 error_setg(errp, "bitmaps_ext: "
400 "bitmap directory size (%" PRIu64 ") exceeds "
401 "the maximum supported size (%d)",
402 bitmaps_ext.bitmap_directory_size,
403 QCOW2_MAX_BITMAP_DIRECTORY_SIZE);
404 return -EINVAL;
405 }
406
407 s->nb_bitmaps = bitmaps_ext.nb_bitmaps;
408 s->bitmap_directory_offset =
409 bitmaps_ext.bitmap_directory_offset;
410 s->bitmap_directory_size =
411 bitmaps_ext.bitmap_directory_size;
412
413 #ifdef DEBUG_EXT
414 printf("Qcow2: Got bitmaps extension: "
415 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n",
416 s->bitmap_directory_offset, s->nb_bitmaps);
417 #endif
418 break;
419
420 case QCOW2_EXT_MAGIC_DATA_FILE:
421 {
422 s->image_data_file = g_malloc0(ext.len + 1);
423 ret = bdrv_co_pread(bs->file, offset, ext.len, s->image_data_file, 0);
424 if (ret < 0) {
425 error_setg_errno(errp, -ret,
426 "ERROR: Could not read data file name");
427 return ret;
428 }
429 #ifdef DEBUG_EXT
430 printf("Qcow2: Got external data file %s\n", s->image_data_file);
431 #endif
432 break;
433 }
434
435 default:
436 /* unknown magic - save it in case we need to rewrite the header */
437 /* If you add a new feature, make sure to also update the fast
438 * path of qcow2_make_empty() to deal with it. */
439 {
440 Qcow2UnknownHeaderExtension *uext;
441
442 uext = g_malloc0(sizeof(*uext) + ext.len);
443 uext->magic = ext.magic;
444 uext->len = ext.len;
445 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
446
447 ret = bdrv_co_pread(bs->file, offset, uext->len, uext->data, 0);
448 if (ret < 0) {
449 error_setg_errno(errp, -ret, "ERROR: unknown extension: "
450 "Could not read data");
451 return ret;
452 }
453 }
454 break;
455 }
456
457 offset += ((ext.len + 7) & ~7);
458 }
459
460 return 0;
461 }
462
463 static void cleanup_unknown_header_ext(BlockDriverState *bs)
464 {
465 BDRVQcow2State *s = bs->opaque;
466 Qcow2UnknownHeaderExtension *uext, *next;
467
468 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) {
469 QLIST_REMOVE(uext, next);
470 g_free(uext);
471 }
472 }
473
474 static void report_unsupported_feature(Error **errp, Qcow2Feature *table,
475 uint64_t mask)
476 {
477 g_autoptr(GString) features = g_string_sized_new(60);
478
479 while (table && table->name[0] != '\0') {
480 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) {
481 if (mask & (1ULL << table->bit)) {
482 if (features->len > 0) {
483 g_string_append(features, ", ");
484 }
485 g_string_append_printf(features, "%.46s", table->name);
486 mask &= ~(1ULL << table->bit);
487 }
488 }
489 table++;
490 }
491
492 if (mask) {
493 if (features->len > 0) {
494 g_string_append(features, ", ");
495 }
496 g_string_append_printf(features,
497 "Unknown incompatible feature: %" PRIx64, mask);
498 }
499
500 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str);
501 }
502
503 /*
504 * Sets the dirty bit and flushes afterwards if necessary.
505 *
506 * The incompatible_features bit is only set if the image file header was
507 * updated successfully. Therefore it is not required to check the return
508 * value of this function.
509 */
510 int qcow2_mark_dirty(BlockDriverState *bs)
511 {
512 BDRVQcow2State *s = bs->opaque;
513 uint64_t val;
514 int ret;
515
516 assert(s->qcow_version >= 3);
517
518 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
519 return 0; /* already dirty */
520 }
521
522 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
523 ret = bdrv_pwrite_sync(bs->file,
524 offsetof(QCowHeader, incompatible_features),
525 sizeof(val), &val, 0);
526 if (ret < 0) {
527 return ret;
528 }
529
530 /* Only treat image as dirty if the header was updated successfully */
531 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY;
532 return 0;
533 }
534
535 /*
536 * Clears the dirty bit and flushes before if necessary. Only call this
537 * function when there are no pending requests, it does not guard against
538 * concurrent requests dirtying the image.
539 */
540 static int GRAPH_RDLOCK qcow2_mark_clean(BlockDriverState *bs)
541 {
542 BDRVQcow2State *s = bs->opaque;
543
544 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
545 int ret;
546
547 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
548
549 ret = qcow2_flush_caches(bs);
550 if (ret < 0) {
551 return ret;
552 }
553
554 return qcow2_update_header(bs);
555 }
556 return 0;
557 }
558
559 /*
560 * Marks the image as corrupt.
561 */
562 int qcow2_mark_corrupt(BlockDriverState *bs)
563 {
564 BDRVQcow2State *s = bs->opaque;
565
566 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT;
567 return qcow2_update_header(bs);
568 }
569
570 /*
571 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
572 * before if necessary.
573 */
574 static int coroutine_fn GRAPH_RDLOCK
575 qcow2_mark_consistent(BlockDriverState *bs)
576 {
577 BDRVQcow2State *s = bs->opaque;
578
579 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
580 int ret = qcow2_flush_caches(bs);
581 if (ret < 0) {
582 return ret;
583 }
584
585 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT;
586 return qcow2_update_header(bs);
587 }
588 return 0;
589 }
590
591 static void qcow2_add_check_result(BdrvCheckResult *out,
592 const BdrvCheckResult *src,
593 bool set_allocation_info)
594 {
595 out->corruptions += src->corruptions;
596 out->leaks += src->leaks;
597 out->check_errors += src->check_errors;
598 out->corruptions_fixed += src->corruptions_fixed;
599 out->leaks_fixed += src->leaks_fixed;
600
601 if (set_allocation_info) {
602 out->image_end_offset = src->image_end_offset;
603 out->bfi = src->bfi;
604 }
605 }
606
607 static int coroutine_fn GRAPH_RDLOCK
608 qcow2_co_check_locked(BlockDriverState *bs, BdrvCheckResult *result,
609 BdrvCheckMode fix)
610 {
611 BdrvCheckResult snapshot_res = {};
612 BdrvCheckResult refcount_res = {};
613 int ret;
614
615 memset(result, 0, sizeof(*result));
616
617 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix);
618 if (ret < 0) {
619 qcow2_add_check_result(result, &snapshot_res, false);
620 return ret;
621 }
622
623 ret = qcow2_check_refcounts(bs, &refcount_res, fix);
624 qcow2_add_check_result(result, &refcount_res, true);
625 if (ret < 0) {
626 qcow2_add_check_result(result, &snapshot_res, false);
627 return ret;
628 }
629
630 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix);
631 qcow2_add_check_result(result, &snapshot_res, false);
632 if (ret < 0) {
633 return ret;
634 }
635
636 if (fix && result->check_errors == 0 && result->corruptions == 0) {
637 ret = qcow2_mark_clean(bs);
638 if (ret < 0) {
639 return ret;
640 }
641 return qcow2_mark_consistent(bs);
642 }
643 return ret;
644 }
645
646 static int coroutine_fn GRAPH_RDLOCK
647 qcow2_co_check(BlockDriverState *bs, BdrvCheckResult *result,
648 BdrvCheckMode fix)
649 {
650 BDRVQcow2State *s = bs->opaque;
651 int ret;
652
653 qemu_co_mutex_lock(&s->lock);
654 ret = qcow2_co_check_locked(bs, result, fix);
655 qemu_co_mutex_unlock(&s->lock);
656 return ret;
657 }
658
659 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
660 uint64_t entries, size_t entry_len,
661 int64_t max_size_bytes, const char *table_name,
662 Error **errp)
663 {
664 BDRVQcow2State *s = bs->opaque;
665
666 if (entries > max_size_bytes / entry_len) {
667 error_setg(errp, "%s too large", table_name);
668 return -EFBIG;
669 }
670
671 /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
672 * because values will be passed to qemu functions taking int64_t. */
673 if ((INT64_MAX - entries * entry_len < offset) ||
674 (offset_into_cluster(s, offset) != 0)) {
675 error_setg(errp, "%s offset invalid", table_name);
676 return -EINVAL;
677 }
678
679 return 0;
680 }
681
682 static const char *const mutable_opts[] = {
683 QCOW2_OPT_LAZY_REFCOUNTS,
684 QCOW2_OPT_DISCARD_REQUEST,
685 QCOW2_OPT_DISCARD_SNAPSHOT,
686 QCOW2_OPT_DISCARD_OTHER,
687 QCOW2_OPT_DISCARD_NO_UNREF,
688 QCOW2_OPT_OVERLAP,
689 QCOW2_OPT_OVERLAP_TEMPLATE,
690 QCOW2_OPT_OVERLAP_MAIN_HEADER,
691 QCOW2_OPT_OVERLAP_ACTIVE_L1,
692 QCOW2_OPT_OVERLAP_ACTIVE_L2,
693 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
694 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
695 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
696 QCOW2_OPT_OVERLAP_INACTIVE_L1,
697 QCOW2_OPT_OVERLAP_INACTIVE_L2,
698 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
699 QCOW2_OPT_CACHE_SIZE,
700 QCOW2_OPT_L2_CACHE_SIZE,
701 QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
702 QCOW2_OPT_REFCOUNT_CACHE_SIZE,
703 QCOW2_OPT_CACHE_CLEAN_INTERVAL,
704 NULL
705 };
706
707 static QemuOptsList qcow2_runtime_opts = {
708 .name = "qcow2",
709 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
710 .desc = {
711 {
712 .name = QCOW2_OPT_LAZY_REFCOUNTS,
713 .type = QEMU_OPT_BOOL,
714 .help = "Postpone refcount updates",
715 },
716 {
717 .name = QCOW2_OPT_DISCARD_REQUEST,
718 .type = QEMU_OPT_BOOL,
719 .help = "Pass guest discard requests to the layer below",
720 },
721 {
722 .name = QCOW2_OPT_DISCARD_SNAPSHOT,
723 .type = QEMU_OPT_BOOL,
724 .help = "Generate discard requests when snapshot related space "
725 "is freed",
726 },
727 {
728 .name = QCOW2_OPT_DISCARD_OTHER,
729 .type = QEMU_OPT_BOOL,
730 .help = "Generate discard requests when other clusters are freed",
731 },
732 {
733 .name = QCOW2_OPT_DISCARD_NO_UNREF,
734 .type = QEMU_OPT_BOOL,
735 .help = "Do not unreference discarded clusters",
736 },
737 {
738 .name = QCOW2_OPT_OVERLAP,
739 .type = QEMU_OPT_STRING,
740 .help = "Selects which overlap checks to perform from a range of "
741 "templates (none, constant, cached, all)",
742 },
743 {
744 .name = QCOW2_OPT_OVERLAP_TEMPLATE,
745 .type = QEMU_OPT_STRING,
746 .help = "Selects which overlap checks to perform from a range of "
747 "templates (none, constant, cached, all)",
748 },
749 {
750 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER,
751 .type = QEMU_OPT_BOOL,
752 .help = "Check for unintended writes into the main qcow2 header",
753 },
754 {
755 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1,
756 .type = QEMU_OPT_BOOL,
757 .help = "Check for unintended writes into the active L1 table",
758 },
759 {
760 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2,
761 .type = QEMU_OPT_BOOL,
762 .help = "Check for unintended writes into an active L2 table",
763 },
764 {
765 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
766 .type = QEMU_OPT_BOOL,
767 .help = "Check for unintended writes into the refcount table",
768 },
769 {
770 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
771 .type = QEMU_OPT_BOOL,
772 .help = "Check for unintended writes into a refcount block",
773 },
774 {
775 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
776 .type = QEMU_OPT_BOOL,
777 .help = "Check for unintended writes into the snapshot table",
778 },
779 {
780 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1,
781 .type = QEMU_OPT_BOOL,
782 .help = "Check for unintended writes into an inactive L1 table",
783 },
784 {
785 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2,
786 .type = QEMU_OPT_BOOL,
787 .help = "Check for unintended writes into an inactive L2 table",
788 },
789 {
790 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
791 .type = QEMU_OPT_BOOL,
792 .help = "Check for unintended writes into the bitmap directory",
793 },
794 {
795 .name = QCOW2_OPT_CACHE_SIZE,
796 .type = QEMU_OPT_SIZE,
797 .help = "Maximum combined metadata (L2 tables and refcount blocks) "
798 "cache size",
799 },
800 {
801 .name = QCOW2_OPT_L2_CACHE_SIZE,
802 .type = QEMU_OPT_SIZE,
803 .help = "Maximum L2 table cache size",
804 },
805 {
806 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
807 .type = QEMU_OPT_SIZE,
808 .help = "Size of each entry in the L2 cache",
809 },
810 {
811 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE,
812 .type = QEMU_OPT_SIZE,
813 .help = "Maximum refcount block cache size",
814 },
815 {
816 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL,
817 .type = QEMU_OPT_NUMBER,
818 .help = "Clean unused cache entries after this time (in seconds)",
819 },
820 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.",
821 "ID of secret providing qcow2 AES key or LUKS passphrase"),
822 { /* end of list */ }
823 },
824 };
825
826 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = {
827 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER,
828 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1,
829 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2,
830 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
831 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
832 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
833 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1,
834 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2,
835 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
836 };
837
838 static void cache_clean_timer_cb(void *opaque)
839 {
840 BlockDriverState *bs = opaque;
841 BDRVQcow2State *s = bs->opaque;
842 qcow2_cache_clean_unused(s->l2_table_cache);
843 qcow2_cache_clean_unused(s->refcount_block_cache);
844 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
845 (int64_t) s->cache_clean_interval * 1000);
846 }
847
848 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context)
849 {
850 BDRVQcow2State *s = bs->opaque;
851 if (s->cache_clean_interval > 0) {
852 s->cache_clean_timer =
853 aio_timer_new_with_attrs(context, QEMU_CLOCK_VIRTUAL,
854 SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL,
855 cache_clean_timer_cb, bs);
856 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
857 (int64_t) s->cache_clean_interval * 1000);
858 }
859 }
860
861 static void cache_clean_timer_del(BlockDriverState *bs)
862 {
863 BDRVQcow2State *s = bs->opaque;
864 if (s->cache_clean_timer) {
865 timer_free(s->cache_clean_timer);
866 s->cache_clean_timer = NULL;
867 }
868 }
869
870 static void qcow2_detach_aio_context(BlockDriverState *bs)
871 {
872 cache_clean_timer_del(bs);
873 }
874
875 static void qcow2_attach_aio_context(BlockDriverState *bs,
876 AioContext *new_context)
877 {
878 cache_clean_timer_init(bs, new_context);
879 }
880
881 static bool read_cache_sizes(BlockDriverState *bs, QemuOpts *opts,
882 uint64_t *l2_cache_size,
883 uint64_t *l2_cache_entry_size,
884 uint64_t *refcount_cache_size, Error **errp)
885 {
886 BDRVQcow2State *s = bs->opaque;
887 uint64_t combined_cache_size, l2_cache_max_setting;
888 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set;
889 bool l2_cache_entry_size_set;
890 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size;
891 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE;
892 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size);
893 /* An L2 table is always one cluster in size so the max cache size
894 * should be a multiple of the cluster size. */
895 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s),
896 s->cluster_size);
897
898 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE);
899 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE);
900 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
901 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE);
902
903 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0);
904 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE,
905 DEFAULT_L2_CACHE_MAX_SIZE);
906 *refcount_cache_size = qemu_opt_get_size(opts,
907 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0);
908
909 *l2_cache_entry_size = qemu_opt_get_size(
910 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size);
911
912 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting);
913
914 if (combined_cache_size_set) {
915 if (l2_cache_size_set && refcount_cache_size_set) {
916 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE
917 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set "
918 "at the same time");
919 return false;
920 } else if (l2_cache_size_set &&
921 (l2_cache_max_setting > combined_cache_size)) {
922 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed "
923 QCOW2_OPT_CACHE_SIZE);
924 return false;
925 } else if (*refcount_cache_size > combined_cache_size) {
926 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed "
927 QCOW2_OPT_CACHE_SIZE);
928 return false;
929 }
930
931 if (l2_cache_size_set) {
932 *refcount_cache_size = combined_cache_size - *l2_cache_size;
933 } else if (refcount_cache_size_set) {
934 *l2_cache_size = combined_cache_size - *refcount_cache_size;
935 } else {
936 /* Assign as much memory as possible to the L2 cache, and
937 * use the remainder for the refcount cache */
938 if (combined_cache_size >= max_l2_cache + min_refcount_cache) {
939 *l2_cache_size = max_l2_cache;
940 *refcount_cache_size = combined_cache_size - *l2_cache_size;
941 } else {
942 *refcount_cache_size =
943 MIN(combined_cache_size, min_refcount_cache);
944 *l2_cache_size = combined_cache_size - *refcount_cache_size;
945 }
946 }
947 }
948
949 /*
950 * If the L2 cache is not enough to cover the whole disk then
951 * default to 4KB entries. Smaller entries reduce the cost of
952 * loads and evictions and increase I/O performance.
953 */
954 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) {
955 *l2_cache_entry_size = MIN(s->cluster_size, 4096);
956 }
957
958 /* l2_cache_size and refcount_cache_size are ensured to have at least
959 * their minimum values in qcow2_update_options_prepare() */
960
961 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) ||
962 *l2_cache_entry_size > s->cluster_size ||
963 !is_power_of_2(*l2_cache_entry_size)) {
964 error_setg(errp, "L2 cache entry size must be a power of two "
965 "between %d and the cluster size (%d)",
966 1 << MIN_CLUSTER_BITS, s->cluster_size);
967 return false;
968 }
969
970 return true;
971 }
972
973 typedef struct Qcow2ReopenState {
974 Qcow2Cache *l2_table_cache;
975 Qcow2Cache *refcount_block_cache;
976 int l2_slice_size; /* Number of entries in a slice of the L2 table */
977 bool use_lazy_refcounts;
978 int overlap_check;
979 bool discard_passthrough[QCOW2_DISCARD_MAX];
980 bool discard_no_unref;
981 uint64_t cache_clean_interval;
982 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
983 } Qcow2ReopenState;
984
985 static int GRAPH_RDLOCK
986 qcow2_update_options_prepare(BlockDriverState *bs, Qcow2ReopenState *r,
987 QDict *options, int flags, Error **errp)
988 {
989 BDRVQcow2State *s = bs->opaque;
990 QemuOpts *opts = NULL;
991 const char *opt_overlap_check, *opt_overlap_check_template;
992 int overlap_check_template = 0;
993 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size;
994 int i;
995 const char *encryptfmt;
996 QDict *encryptopts = NULL;
997 int ret;
998
999 qdict_extract_subqdict(options, &encryptopts, "encrypt.");
1000 encryptfmt = qdict_get_try_str(encryptopts, "format");
1001
1002 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
1003 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
1004 ret = -EINVAL;
1005 goto fail;
1006 }
1007
1008 /* get L2 table/refcount block cache size from command line options */
1009 if (!read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size,
1010 &refcount_cache_size, errp)) {
1011 ret = -EINVAL;
1012 goto fail;
1013 }
1014
1015 l2_cache_size /= l2_cache_entry_size;
1016 if (l2_cache_size < MIN_L2_CACHE_SIZE) {
1017 l2_cache_size = MIN_L2_CACHE_SIZE;
1018 }
1019 if (l2_cache_size > INT_MAX) {
1020 error_setg(errp, "L2 cache size too big");
1021 ret = -EINVAL;
1022 goto fail;
1023 }
1024
1025 refcount_cache_size /= s->cluster_size;
1026 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) {
1027 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE;
1028 }
1029 if (refcount_cache_size > INT_MAX) {
1030 error_setg(errp, "Refcount cache size too big");
1031 ret = -EINVAL;
1032 goto fail;
1033 }
1034
1035 /* alloc new L2 table/refcount block cache, flush old one */
1036 if (s->l2_table_cache) {
1037 ret = qcow2_cache_flush(bs, s->l2_table_cache);
1038 if (ret) {
1039 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache");
1040 goto fail;
1041 }
1042 }
1043
1044 if (s->refcount_block_cache) {
1045 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
1046 if (ret) {
1047 error_setg_errno(errp, -ret,
1048 "Failed to flush the refcount block cache");
1049 goto fail;
1050 }
1051 }
1052
1053 r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s);
1054 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size,
1055 l2_cache_entry_size);
1056 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size,
1057 s->cluster_size);
1058 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) {
1059 error_setg(errp, "Could not allocate metadata caches");
1060 ret = -ENOMEM;
1061 goto fail;
1062 }
1063
1064 /* New interval for cache cleanup timer */
1065 r->cache_clean_interval =
1066 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL,
1067 DEFAULT_CACHE_CLEAN_INTERVAL);
1068 #ifndef CONFIG_LINUX
1069 if (r->cache_clean_interval != 0) {
1070 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL
1071 " not supported on this host");
1072 ret = -EINVAL;
1073 goto fail;
1074 }
1075 #endif
1076 if (r->cache_clean_interval > UINT_MAX) {
1077 error_setg(errp, "Cache clean interval too big");
1078 ret = -EINVAL;
1079 goto fail;
1080 }
1081
1082 /* lazy-refcounts; flush if going from enabled to disabled */
1083 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
1084 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
1085 if (r->use_lazy_refcounts && s->qcow_version < 3) {
1086 error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
1087 "qemu 1.1 compatibility level");
1088 ret = -EINVAL;
1089 goto fail;
1090 }
1091
1092 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) {
1093 ret = qcow2_mark_clean(bs);
1094 if (ret < 0) {
1095 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts");
1096 goto fail;
1097 }
1098 }
1099
1100 /* Overlap check options */
1101 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP);
1102 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE);
1103 if (opt_overlap_check_template && opt_overlap_check &&
1104 strcmp(opt_overlap_check_template, opt_overlap_check))
1105 {
1106 error_setg(errp, "Conflicting values for qcow2 options '"
1107 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE
1108 "' ('%s')", opt_overlap_check, opt_overlap_check_template);
1109 ret = -EINVAL;
1110 goto fail;
1111 }
1112 if (!opt_overlap_check) {
1113 opt_overlap_check = opt_overlap_check_template ?: "cached";
1114 }
1115
1116 if (!strcmp(opt_overlap_check, "none")) {
1117 overlap_check_template = 0;
1118 } else if (!strcmp(opt_overlap_check, "constant")) {
1119 overlap_check_template = QCOW2_OL_CONSTANT;
1120 } else if (!strcmp(opt_overlap_check, "cached")) {
1121 overlap_check_template = QCOW2_OL_CACHED;
1122 } else if (!strcmp(opt_overlap_check, "all")) {
1123 overlap_check_template = QCOW2_OL_ALL;
1124 } else {
1125 error_setg(errp, "Unsupported value '%s' for qcow2 option "
1126 "'overlap-check'. Allowed are any of the following: "
1127 "none, constant, cached, all", opt_overlap_check);
1128 ret = -EINVAL;
1129 goto fail;
1130 }
1131
1132 r->overlap_check = 0;
1133 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
1134 /* overlap-check defines a template bitmask, but every flag may be
1135 * overwritten through the associated boolean option */
1136 r->overlap_check |=
1137 qemu_opt_get_bool(opts, overlap_bool_option_names[i],
1138 overlap_check_template & (1 << i)) << i;
1139 }
1140
1141 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
1142 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
1143 r->discard_passthrough[QCOW2_DISCARD_REQUEST] =
1144 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
1145 flags & BDRV_O_UNMAP);
1146 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
1147 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
1148 r->discard_passthrough[QCOW2_DISCARD_OTHER] =
1149 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
1150
1151 r->discard_no_unref = qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_NO_UNREF,
1152 false);
1153 if (r->discard_no_unref && s->qcow_version < 3) {
1154 error_setg(errp,
1155 "discard-no-unref is only supported since qcow2 version 3");
1156 ret = -EINVAL;
1157 goto fail;
1158 }
1159
1160 switch (s->crypt_method_header) {
1161 case QCOW_CRYPT_NONE:
1162 if (encryptfmt) {
1163 error_setg(errp, "No encryption in image header, but options "
1164 "specified format '%s'", encryptfmt);
1165 ret = -EINVAL;
1166 goto fail;
1167 }
1168 break;
1169
1170 case QCOW_CRYPT_AES:
1171 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) {
1172 error_setg(errp,
1173 "Header reported 'aes' encryption format but "
1174 "options specify '%s'", encryptfmt);
1175 ret = -EINVAL;
1176 goto fail;
1177 }
1178 qdict_put_str(encryptopts, "format", "qcow");
1179 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1180 if (!r->crypto_opts) {
1181 ret = -EINVAL;
1182 goto fail;
1183 }
1184 break;
1185
1186 case QCOW_CRYPT_LUKS:
1187 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) {
1188 error_setg(errp,
1189 "Header reported 'luks' encryption format but "
1190 "options specify '%s'", encryptfmt);
1191 ret = -EINVAL;
1192 goto fail;
1193 }
1194 qdict_put_str(encryptopts, "format", "luks");
1195 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1196 if (!r->crypto_opts) {
1197 ret = -EINVAL;
1198 goto fail;
1199 }
1200 break;
1201
1202 default:
1203 error_setg(errp, "Unsupported encryption method %d",
1204 s->crypt_method_header);
1205 ret = -EINVAL;
1206 goto fail;
1207 }
1208
1209 ret = 0;
1210 fail:
1211 qobject_unref(encryptopts);
1212 qemu_opts_del(opts);
1213 opts = NULL;
1214 return ret;
1215 }
1216
1217 static void qcow2_update_options_commit(BlockDriverState *bs,
1218 Qcow2ReopenState *r)
1219 {
1220 BDRVQcow2State *s = bs->opaque;
1221 int i;
1222
1223 if (s->l2_table_cache) {
1224 qcow2_cache_destroy(s->l2_table_cache);
1225 }
1226 if (s->refcount_block_cache) {
1227 qcow2_cache_destroy(s->refcount_block_cache);
1228 }
1229 s->l2_table_cache = r->l2_table_cache;
1230 s->refcount_block_cache = r->refcount_block_cache;
1231 s->l2_slice_size = r->l2_slice_size;
1232
1233 s->overlap_check = r->overlap_check;
1234 s->use_lazy_refcounts = r->use_lazy_refcounts;
1235
1236 for (i = 0; i < QCOW2_DISCARD_MAX; i++) {
1237 s->discard_passthrough[i] = r->discard_passthrough[i];
1238 }
1239
1240 s->discard_no_unref = r->discard_no_unref;
1241
1242 if (s->cache_clean_interval != r->cache_clean_interval) {
1243 cache_clean_timer_del(bs);
1244 s->cache_clean_interval = r->cache_clean_interval;
1245 cache_clean_timer_init(bs, bdrv_get_aio_context(bs));
1246 }
1247
1248 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1249 s->crypto_opts = r->crypto_opts;
1250 }
1251
1252 static void qcow2_update_options_abort(BlockDriverState *bs,
1253 Qcow2ReopenState *r)
1254 {
1255 if (r->l2_table_cache) {
1256 qcow2_cache_destroy(r->l2_table_cache);
1257 }
1258 if (r->refcount_block_cache) {
1259 qcow2_cache_destroy(r->refcount_block_cache);
1260 }
1261 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
1262 }
1263
1264 static int coroutine_fn GRAPH_RDLOCK
1265 qcow2_update_options(BlockDriverState *bs, QDict *options, int flags,
1266 Error **errp)
1267 {
1268 Qcow2ReopenState r = {};
1269 int ret;
1270
1271 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp);
1272 if (ret >= 0) {
1273 qcow2_update_options_commit(bs, &r);
1274 } else {
1275 qcow2_update_options_abort(bs, &r);
1276 }
1277
1278 return ret;
1279 }
1280
1281 static int validate_compression_type(BDRVQcow2State *s, Error **errp)
1282 {
1283 switch (s->compression_type) {
1284 case QCOW2_COMPRESSION_TYPE_ZLIB:
1285 #ifdef CONFIG_ZSTD
1286 case QCOW2_COMPRESSION_TYPE_ZSTD:
1287 #endif
1288 break;
1289
1290 default:
1291 error_setg(errp, "qcow2: unknown compression type: %u",
1292 s->compression_type);
1293 return -ENOTSUP;
1294 }
1295
1296 /*
1297 * if the compression type differs from QCOW2_COMPRESSION_TYPE_ZLIB
1298 * the incompatible feature flag must be set
1299 */
1300 if (s->compression_type == QCOW2_COMPRESSION_TYPE_ZLIB) {
1301 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) {
1302 error_setg(errp, "qcow2: Compression type incompatible feature "
1303 "bit must not be set");
1304 return -EINVAL;
1305 }
1306 } else {
1307 if (!(s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION)) {
1308 error_setg(errp, "qcow2: Compression type incompatible feature "
1309 "bit must be set");
1310 return -EINVAL;
1311 }
1312 }
1313
1314 return 0;
1315 }
1316
1317 /* Called with s->lock held. */
1318 static int coroutine_fn GRAPH_RDLOCK
1319 qcow2_do_open(BlockDriverState *bs, QDict *options, int flags,
1320 bool open_data_file, Error **errp)
1321 {
1322 ERRP_GUARD();
1323 BDRVQcow2State *s = bs->opaque;
1324 unsigned int len, i;
1325 int ret = 0;
1326 QCowHeader header;
1327 uint64_t ext_end;
1328 uint64_t l1_vm_state_index;
1329 bool update_header = false;
1330
1331 ret = bdrv_co_pread(bs->file, 0, sizeof(header), &header, 0);
1332 if (ret < 0) {
1333 error_setg_errno(errp, -ret, "Could not read qcow2 header");
1334 goto fail;
1335 }
1336 header.magic = be32_to_cpu(header.magic);
1337 header.version = be32_to_cpu(header.version);
1338 header.backing_file_offset = be64_to_cpu(header.backing_file_offset);
1339 header.backing_file_size = be32_to_cpu(header.backing_file_size);
1340 header.size = be64_to_cpu(header.size);
1341 header.cluster_bits = be32_to_cpu(header.cluster_bits);
1342 header.crypt_method = be32_to_cpu(header.crypt_method);
1343 header.l1_table_offset = be64_to_cpu(header.l1_table_offset);
1344 header.l1_size = be32_to_cpu(header.l1_size);
1345 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset);
1346 header.refcount_table_clusters =
1347 be32_to_cpu(header.refcount_table_clusters);
1348 header.snapshots_offset = be64_to_cpu(header.snapshots_offset);
1349 header.nb_snapshots = be32_to_cpu(header.nb_snapshots);
1350
1351 if (header.magic != QCOW_MAGIC) {
1352 error_setg(errp, "Image is not in qcow2 format");
1353 ret = -EINVAL;
1354 goto fail;
1355 }
1356 if (header.version < 2 || header.version > 3) {
1357 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version);
1358 ret = -ENOTSUP;
1359 goto fail;
1360 }
1361
1362 s->qcow_version = header.version;
1363
1364 /* Initialise cluster size */
1365 if (header.cluster_bits < MIN_CLUSTER_BITS ||
1366 header.cluster_bits > MAX_CLUSTER_BITS) {
1367 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32,
1368 header.cluster_bits);
1369 ret = -EINVAL;
1370 goto fail;
1371 }
1372
1373 s->cluster_bits = header.cluster_bits;
1374 s->cluster_size = 1 << s->cluster_bits;
1375
1376 /* Initialise version 3 header fields */
1377 if (header.version == 2) {
1378 header.incompatible_features = 0;
1379 header.compatible_features = 0;
1380 header.autoclear_features = 0;
1381 header.refcount_order = 4;
1382 header.header_length = 72;
1383 } else {
1384 header.incompatible_features =
1385 be64_to_cpu(header.incompatible_features);
1386 header.compatible_features = be64_to_cpu(header.compatible_features);
1387 header.autoclear_features = be64_to_cpu(header.autoclear_features);
1388 header.refcount_order = be32_to_cpu(header.refcount_order);
1389 header.header_length = be32_to_cpu(header.header_length);
1390
1391 if (header.header_length < 104) {
1392 error_setg(errp, "qcow2 header too short");
1393 ret = -EINVAL;
1394 goto fail;
1395 }
1396 }
1397
1398 if (header.header_length > s->cluster_size) {
1399 error_setg(errp, "qcow2 header exceeds cluster size");
1400 ret = -EINVAL;
1401 goto fail;
1402 }
1403
1404 if (header.header_length > sizeof(header)) {
1405 s->unknown_header_fields_size = header.header_length - sizeof(header);
1406 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
1407 ret = bdrv_co_pread(bs->file, sizeof(header),
1408 s->unknown_header_fields_size,
1409 s->unknown_header_fields, 0);
1410 if (ret < 0) {
1411 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
1412 "fields");
1413 goto fail;
1414 }
1415 }
1416
1417 if (header.backing_file_offset > s->cluster_size) {
1418 error_setg(errp, "Invalid backing file offset");
1419 ret = -EINVAL;
1420 goto fail;
1421 }
1422
1423 if (header.backing_file_offset) {
1424 ext_end = header.backing_file_offset;
1425 } else {
1426 ext_end = 1 << header.cluster_bits;
1427 }
1428
1429 /* Handle feature bits */
1430 s->incompatible_features = header.incompatible_features;
1431 s->compatible_features = header.compatible_features;
1432 s->autoclear_features = header.autoclear_features;
1433
1434 /*
1435 * Handle compression type
1436 * Older qcow2 images don't contain the compression type header.
1437 * Distinguish them by the header length and use
1438 * the only valid (default) compression type in that case
1439 */
1440 if (header.header_length > offsetof(QCowHeader, compression_type)) {
1441 s->compression_type = header.compression_type;
1442 } else {
1443 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
1444 }
1445
1446 ret = validate_compression_type(s, errp);
1447 if (ret) {
1448 goto fail;
1449 }
1450
1451 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
1452 void *feature_table = NULL;
1453 qcow2_read_extensions(bs, header.header_length, ext_end,
1454 &feature_table, flags, NULL, NULL);
1455 report_unsupported_feature(errp, feature_table,
1456 s->incompatible_features &
1457 ~QCOW2_INCOMPAT_MASK);
1458 ret = -ENOTSUP;
1459 g_free(feature_table);
1460 goto fail;
1461 }
1462
1463 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
1464 /* Corrupt images may not be written to unless they are being repaired
1465 */
1466 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
1467 error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
1468 "read/write");
1469 ret = -EACCES;
1470 goto fail;
1471 }
1472 }
1473
1474 s->subclusters_per_cluster =
1475 has_subclusters(s) ? QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER : 1;
1476 s->subcluster_size = s->cluster_size / s->subclusters_per_cluster;
1477 s->subcluster_bits = ctz32(s->subcluster_size);
1478
1479 if (s->subcluster_size < (1 << MIN_CLUSTER_BITS)) {
1480 error_setg(errp, "Unsupported subcluster size: %d", s->subcluster_size);
1481 ret = -EINVAL;
1482 goto fail;
1483 }
1484
1485 /* Check support for various header values */
1486 if (header.refcount_order > 6) {
1487 error_setg(errp, "Reference count entry width too large; may not "
1488 "exceed 64 bits");
1489 ret = -EINVAL;
1490 goto fail;
1491 }
1492 s->refcount_order = header.refcount_order;
1493 s->refcount_bits = 1 << s->refcount_order;
1494 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
1495 s->refcount_max += s->refcount_max - 1;
1496
1497 s->crypt_method_header = header.crypt_method;
1498 if (s->crypt_method_header) {
1499 if (bdrv_uses_whitelist() &&
1500 s->crypt_method_header == QCOW_CRYPT_AES) {
1501 error_setg(errp,
1502 "Use of AES-CBC encrypted qcow2 images is no longer "
1503 "supported in system emulators");
1504 error_append_hint(errp,
1505 "You can use 'qemu-img convert' to convert your "
1506 "image to an alternative supported format, such "
1507 "as unencrypted qcow2, or raw with the LUKS "
1508 "format instead.\n");
1509 ret = -ENOSYS;
1510 goto fail;
1511 }
1512
1513 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1514 s->crypt_physical_offset = false;
1515 } else {
1516 /* Assuming LUKS and any future crypt methods we
1517 * add will all use physical offsets, due to the
1518 * fact that the alternative is insecure... */
1519 s->crypt_physical_offset = true;
1520 }
1521
1522 bs->encrypted = true;
1523 }
1524
1525 s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s));
1526 s->l2_size = 1 << s->l2_bits;
1527 /* 2^(s->refcount_order - 3) is the refcount width in bytes */
1528 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3);
1529 s->refcount_block_size = 1 << s->refcount_block_bits;
1530 bs->total_sectors = header.size / BDRV_SECTOR_SIZE;
1531 s->csize_shift = (62 - (s->cluster_bits - 8));
1532 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
1533 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
1534
1535 s->refcount_table_offset = header.refcount_table_offset;
1536 s->refcount_table_size =
1537 header.refcount_table_clusters << (s->cluster_bits - 3);
1538
1539 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) {
1540 error_setg(errp, "Image does not contain a reference count table");
1541 ret = -EINVAL;
1542 goto fail;
1543 }
1544
1545 ret = qcow2_validate_table(bs, s->refcount_table_offset,
1546 header.refcount_table_clusters,
1547 s->cluster_size, QCOW_MAX_REFTABLE_SIZE,
1548 "Reference count table", errp);
1549 if (ret < 0) {
1550 goto fail;
1551 }
1552
1553 if (!(flags & BDRV_O_CHECK)) {
1554 /*
1555 * The total size in bytes of the snapshot table is checked in
1556 * qcow2_read_snapshots() because the size of each snapshot is
1557 * variable and we don't know it yet.
1558 * Here we only check the offset and number of snapshots.
1559 */
1560 ret = qcow2_validate_table(bs, header.snapshots_offset,
1561 header.nb_snapshots,
1562 sizeof(QCowSnapshotHeader),
1563 sizeof(QCowSnapshotHeader) *
1564 QCOW_MAX_SNAPSHOTS,
1565 "Snapshot table", errp);
1566 if (ret < 0) {
1567 goto fail;
1568 }
1569 }
1570
1571 /* read the level 1 table */
1572 ret = qcow2_validate_table(bs, header.l1_table_offset,
1573 header.l1_size, L1E_SIZE,
1574 QCOW_MAX_L1_SIZE, "Active L1 table", errp);
1575 if (ret < 0) {
1576 goto fail;
1577 }
1578 s->l1_size = header.l1_size;
1579 s->l1_table_offset = header.l1_table_offset;
1580
1581 l1_vm_state_index = size_to_l1(s, header.size);
1582 if (l1_vm_state_index > INT_MAX) {
1583 error_setg(errp, "Image is too big");
1584 ret = -EFBIG;
1585 goto fail;
1586 }
1587 s->l1_vm_state_index = l1_vm_state_index;
1588
1589 /* the L1 table must contain at least enough entries to put
1590 header.size bytes */
1591 if (s->l1_size < s->l1_vm_state_index) {
1592 error_setg(errp, "L1 table is too small");
1593 ret = -EINVAL;
1594 goto fail;
1595 }
1596
1597 if (s->l1_size > 0) {
1598 s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE);
1599 if (s->l1_table == NULL) {
1600 error_setg(errp, "Could not allocate L1 table");
1601 ret = -ENOMEM;
1602 goto fail;
1603 }
1604 ret = bdrv_co_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE,
1605 s->l1_table, 0);
1606 if (ret < 0) {
1607 error_setg_errno(errp, -ret, "Could not read L1 table");
1608 goto fail;
1609 }
1610 for(i = 0;i < s->l1_size; i++) {
1611 s->l1_table[i] = be64_to_cpu(s->l1_table[i]);
1612 }
1613 }
1614
1615 /* Parse driver-specific options */
1616 ret = qcow2_update_options(bs, options, flags, errp);
1617 if (ret < 0) {
1618 goto fail;
1619 }
1620
1621 s->flags = flags;
1622
1623 ret = qcow2_refcount_init(bs);
1624 if (ret != 0) {
1625 error_setg_errno(errp, -ret, "Could not initialize refcount handling");
1626 goto fail;
1627 }
1628
1629 QLIST_INIT(&s->cluster_allocs);
1630 QTAILQ_INIT(&s->discards);
1631
1632 /* read qcow2 extensions */
1633 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
1634 flags, &update_header, errp)) {
1635 ret = -EINVAL;
1636 goto fail;
1637 }
1638
1639 if (open_data_file) {
1640 /* Open external data file */
1641 bdrv_graph_co_rdunlock();
1642 s->data_file = bdrv_co_open_child(NULL, options, "data-file", bs,
1643 &child_of_bds, BDRV_CHILD_DATA,
1644 true, errp);
1645 bdrv_graph_co_rdlock();
1646 if (*errp) {
1647 ret = -EINVAL;
1648 goto fail;
1649 }
1650
1651 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) {
1652 if (!s->data_file && s->image_data_file) {
1653 bdrv_graph_co_rdunlock();
1654 s->data_file = bdrv_co_open_child(s->image_data_file, options,
1655 "data-file", bs,
1656 &child_of_bds,
1657 BDRV_CHILD_DATA, false, errp);
1658 bdrv_graph_co_rdlock();
1659 if (!s->data_file) {
1660 ret = -EINVAL;
1661 goto fail;
1662 }
1663 }
1664 if (!s->data_file) {
1665 error_setg(errp, "'data-file' is required for this image");
1666 ret = -EINVAL;
1667 goto fail;
1668 }
1669
1670 /* No data here */
1671 bs->file->role &= ~BDRV_CHILD_DATA;
1672
1673 /* Must succeed because we have given up permissions if anything */
1674 bdrv_child_refresh_perms(bs, bs->file, &error_abort);
1675 } else {
1676 if (s->data_file) {
1677 error_setg(errp, "'data-file' can only be set for images with "
1678 "an external data file");
1679 ret = -EINVAL;
1680 goto fail;
1681 }
1682
1683 s->data_file = bs->file;
1684
1685 if (data_file_is_raw(bs)) {
1686 error_setg(errp, "data-file-raw requires a data file");
1687 ret = -EINVAL;
1688 goto fail;
1689 }
1690 }
1691 }
1692
1693 /* qcow2_read_extension may have set up the crypto context
1694 * if the crypt method needs a header region, some methods
1695 * don't need header extensions, so must check here
1696 */
1697 if (s->crypt_method_header && !s->crypto) {
1698 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1699 unsigned int cflags = 0;
1700 if (flags & BDRV_O_NO_IO) {
1701 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
1702 }
1703 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
1704 NULL, NULL, cflags,
1705 QCOW2_MAX_THREADS, errp);
1706 if (!s->crypto) {
1707 ret = -EINVAL;
1708 goto fail;
1709 }
1710 } else if (!(flags & BDRV_O_NO_IO)) {
1711 error_setg(errp, "Missing CRYPTO header for crypt method %d",
1712 s->crypt_method_header);
1713 ret = -EINVAL;
1714 goto fail;
1715 }
1716 }
1717
1718 /* read the backing file name */
1719 if (header.backing_file_offset != 0) {
1720 len = header.backing_file_size;
1721 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) ||
1722 len >= sizeof(bs->backing_file)) {
1723 error_setg(errp, "Backing file name too long");
1724 ret = -EINVAL;
1725 goto fail;
1726 }
1727
1728 s->image_backing_file = g_malloc(len + 1);
1729 ret = bdrv_co_pread(bs->file, header.backing_file_offset, len,
1730 s->image_backing_file, 0);
1731 if (ret < 0) {
1732 error_setg_errno(errp, -ret, "Could not read backing file name");
1733 goto fail;
1734 }
1735 s->image_backing_file[len] = '\0';
1736
1737 /*
1738 * Update only when something has changed. This function is called by
1739 * qcow2_co_invalidate_cache(), and we do not want to reset
1740 * auto_backing_file unless necessary.
1741 */
1742 if (!g_str_equal(s->image_backing_file, bs->backing_file)) {
1743 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1744 s->image_backing_file);
1745 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
1746 s->image_backing_file);
1747 }
1748 }
1749
1750 /*
1751 * Internal snapshots; skip reading them in check mode, because
1752 * we do not need them then, and we do not want to abort because
1753 * of a broken table.
1754 */
1755 if (!(flags & BDRV_O_CHECK)) {
1756 s->snapshots_offset = header.snapshots_offset;
1757 s->nb_snapshots = header.nb_snapshots;
1758
1759 ret = qcow2_read_snapshots(bs, errp);
1760 if (ret < 0) {
1761 goto fail;
1762 }
1763 }
1764
1765 /* Clear unknown autoclear feature bits */
1766 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK;
1767 update_header = update_header && bdrv_is_writable(bs);
1768 if (update_header) {
1769 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK;
1770 }
1771
1772 /* == Handle persistent dirty bitmaps ==
1773 *
1774 * We want load dirty bitmaps in three cases:
1775 *
1776 * 1. Normal open of the disk in active mode, not related to invalidation
1777 * after migration.
1778 *
1779 * 2. Invalidation of the target vm after pre-copy phase of migration, if
1780 * bitmaps are _not_ migrating through migration channel, i.e.
1781 * 'dirty-bitmaps' capability is disabled.
1782 *
1783 * 3. Invalidation of source vm after failed or canceled migration.
1784 * This is a very interesting case. There are two possible types of
1785 * bitmaps:
1786 *
1787 * A. Stored on inactivation and removed. They should be loaded from the
1788 * image.
1789 *
1790 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through
1791 * the migration channel (with dirty-bitmaps capability).
1792 *
1793 * On the other hand, there are two possible sub-cases:
1794 *
1795 * 3.1 disk was changed by somebody else while were inactive. In this
1796 * case all in-RAM dirty bitmaps (both persistent and not) are
1797 * definitely invalid. And we don't have any method to determine
1798 * this.
1799 *
1800 * Simple and safe thing is to just drop all the bitmaps of type B on
1801 * inactivation. But in this case we lose bitmaps in valid 4.2 case.
1802 *
1803 * On the other hand, resuming source vm, if disk was already changed
1804 * is a bad thing anyway: not only bitmaps, the whole vm state is
1805 * out of sync with disk.
1806 *
1807 * This means, that user or management tool, who for some reason
1808 * decided to resume source vm, after disk was already changed by
1809 * target vm, should at least drop all dirty bitmaps by hand.
1810 *
1811 * So, we can ignore this case for now, but TODO: "generation"
1812 * extension for qcow2, to determine, that image was changed after
1813 * last inactivation. And if it is changed, we will drop (or at least
1814 * mark as 'invalid' all the bitmaps of type B, both persistent
1815 * and not).
1816 *
1817 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved
1818 * to disk ('dirty-bitmaps' capability disabled), or not saved
1819 * ('dirty-bitmaps' capability enabled), but we don't need to care
1820 * of: let's load bitmaps as always: stored bitmaps will be loaded,
1821 * and not stored has flag IN_USE=1 in the image and will be skipped
1822 * on loading.
1823 *
1824 * One remaining possible case when we don't want load bitmaps:
1825 *
1826 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or
1827 * will be loaded on invalidation, no needs try loading them before)
1828 */
1829
1830 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) {
1831 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */
1832 bool header_updated;
1833 if (!qcow2_load_dirty_bitmaps(bs, &header_updated, errp)) {
1834 ret = -EINVAL;
1835 goto fail;
1836 }
1837
1838 update_header = update_header && !header_updated;
1839 }
1840
1841 if (update_header) {
1842 ret = qcow2_update_header(bs);
1843 if (ret < 0) {
1844 error_setg_errno(errp, -ret, "Could not update qcow2 header");
1845 goto fail;
1846 }
1847 }
1848
1849 bs->supported_zero_flags = header.version >= 3 ?
1850 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0;
1851 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
1852
1853 /* Repair image if dirty */
1854 if (!(flags & BDRV_O_CHECK) && bdrv_is_writable(bs) &&
1855 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
1856 BdrvCheckResult result = {0};
1857
1858 ret = qcow2_co_check_locked(bs, &result,
1859 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
1860 if (ret < 0 || result.check_errors) {
1861 if (ret >= 0) {
1862 ret = -EIO;
1863 }
1864 error_setg_errno(errp, -ret, "Could not repair dirty image");
1865 goto fail;
1866 }
1867 }
1868
1869 #ifdef DEBUG_ALLOC
1870 {
1871 BdrvCheckResult result = {0};
1872 qcow2_check_refcounts(bs, &result, 0);
1873 }
1874 #endif
1875
1876 qemu_co_queue_init(&s->thread_task_queue);
1877
1878 return ret;
1879
1880 fail:
1881 g_free(s->image_data_file);
1882 if (open_data_file && has_data_file(bs)) {
1883 bdrv_graph_co_rdunlock();
1884 bdrv_co_unref_child(bs, s->data_file);
1885 bdrv_graph_co_rdlock();
1886 s->data_file = NULL;
1887 }
1888 g_free(s->unknown_header_fields);
1889 cleanup_unknown_header_ext(bs);
1890 qcow2_free_snapshots(bs);
1891 qcow2_refcount_close(bs);
1892 qemu_vfree(s->l1_table);
1893 /* else pre-write overlap checks in cache_destroy may crash */
1894 s->l1_table = NULL;
1895 cache_clean_timer_del(bs);
1896 if (s->l2_table_cache) {
1897 qcow2_cache_destroy(s->l2_table_cache);
1898 }
1899 if (s->refcount_block_cache) {
1900 qcow2_cache_destroy(s->refcount_block_cache);
1901 }
1902 qcrypto_block_free(s->crypto);
1903 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1904 return ret;
1905 }
1906
1907 typedef struct QCow2OpenCo {
1908 BlockDriverState *bs;
1909 QDict *options;
1910 int flags;
1911 Error **errp;
1912 int ret;
1913 } QCow2OpenCo;
1914
1915 static void coroutine_fn qcow2_open_entry(void *opaque)
1916 {
1917 QCow2OpenCo *qoc = opaque;
1918 BDRVQcow2State *s = qoc->bs->opaque;
1919
1920 GRAPH_RDLOCK_GUARD();
1921
1922 qemu_co_mutex_lock(&s->lock);
1923 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true,
1924 qoc->errp);
1925 qemu_co_mutex_unlock(&s->lock);
1926
1927 aio_wait_kick();
1928 }
1929
1930 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
1931 Error **errp)
1932 {
1933 BDRVQcow2State *s = bs->opaque;
1934 QCow2OpenCo qoc = {
1935 .bs = bs,
1936 .options = options,
1937 .flags = flags,
1938 .errp = errp,
1939 .ret = -EINPROGRESS
1940 };
1941 int ret;
1942
1943 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
1944 if (ret < 0) {
1945 return ret;
1946 }
1947
1948 /* Initialise locks */
1949 qemu_co_mutex_init(&s->lock);
1950
1951 assert(!qemu_in_coroutine());
1952 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1953
1954 aio_co_enter(bdrv_get_aio_context(bs),
1955 qemu_coroutine_create(qcow2_open_entry, &qoc));
1956 AIO_WAIT_WHILE_UNLOCKED(NULL, qoc.ret == -EINPROGRESS);
1957
1958 return qoc.ret;
1959 }
1960
1961 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
1962 {
1963 BDRVQcow2State *s = bs->opaque;
1964
1965 if (bs->encrypted) {
1966 /* Encryption works on a sector granularity */
1967 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto);
1968 }
1969 bs->bl.pwrite_zeroes_alignment = s->subcluster_size;
1970 bs->bl.pdiscard_alignment = s->cluster_size;
1971 }
1972
1973 static int GRAPH_UNLOCKED
1974 qcow2_reopen_prepare(BDRVReopenState *state,BlockReopenQueue *queue,
1975 Error **errp)
1976 {
1977 BDRVQcow2State *s = state->bs->opaque;
1978 Qcow2ReopenState *r;
1979 int ret;
1980
1981 GLOBAL_STATE_CODE();
1982 GRAPH_RDLOCK_GUARD_MAINLOOP();
1983
1984 r = g_new0(Qcow2ReopenState, 1);
1985 state->opaque = r;
1986
1987 ret = qcow2_update_options_prepare(state->bs, r, state->options,
1988 state->flags, errp);
1989 if (ret < 0) {
1990 goto fail;
1991 }
1992
1993 /* We need to write out any unwritten data if we reopen read-only. */
1994 if ((state->flags & BDRV_O_RDWR) == 0) {
1995 ret = qcow2_reopen_bitmaps_ro(state->bs, errp);
1996 if (ret < 0) {
1997 goto fail;
1998 }
1999
2000 ret = bdrv_flush(state->bs);
2001 if (ret < 0) {
2002 goto fail;
2003 }
2004
2005 ret = qcow2_mark_clean(state->bs);
2006 if (ret < 0) {
2007 goto fail;
2008 }
2009 }
2010
2011 /*
2012 * Without an external data file, s->data_file points to the same BdrvChild
2013 * as bs->file. It needs to be resynced after reopen because bs->file may
2014 * be changed. We can't use it in the meantime.
2015 */
2016 if (!has_data_file(state->bs)) {
2017 assert(s->data_file == state->bs->file);
2018 s->data_file = NULL;
2019 }
2020
2021 return 0;
2022
2023 fail:
2024 qcow2_update_options_abort(state->bs, r);
2025 g_free(r);
2026 return ret;
2027 }
2028
2029 static void qcow2_reopen_commit(BDRVReopenState *state)
2030 {
2031 BDRVQcow2State *s = state->bs->opaque;
2032
2033 GRAPH_RDLOCK_GUARD_MAINLOOP();
2034
2035 qcow2_update_options_commit(state->bs, state->opaque);
2036 if (!s->data_file) {
2037 /*
2038 * If we don't have an external data file, s->data_file was cleared by
2039 * qcow2_reopen_prepare() and needs to be updated.
2040 */
2041 s->data_file = state->bs->file;
2042 }
2043 g_free(state->opaque);
2044 }
2045
2046 static void qcow2_reopen_commit_post(BDRVReopenState *state)
2047 {
2048 GRAPH_RDLOCK_GUARD_MAINLOOP();
2049
2050 if (state->flags & BDRV_O_RDWR) {
2051 Error *local_err = NULL;
2052
2053 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) {
2054 /*
2055 * This is not fatal, bitmaps just left read-only, so all following
2056 * writes will fail. User can remove read-only bitmaps to unblock
2057 * writes or retry reopen.
2058 */
2059 error_reportf_err(local_err,
2060 "%s: Failed to make dirty bitmaps writable: ",
2061 bdrv_get_node_name(state->bs));
2062 }
2063 }
2064 }
2065
2066 static void qcow2_reopen_abort(BDRVReopenState *state)
2067 {
2068 BDRVQcow2State *s = state->bs->opaque;
2069
2070 GRAPH_RDLOCK_GUARD_MAINLOOP();
2071
2072 if (!s->data_file) {
2073 /*
2074 * If we don't have an external data file, s->data_file was cleared by
2075 * qcow2_reopen_prepare() and needs to be restored.
2076 */
2077 s->data_file = state->bs->file;
2078 }
2079 qcow2_update_options_abort(state->bs, state->opaque);
2080 g_free(state->opaque);
2081 }
2082
2083 static void qcow2_join_options(QDict *options, QDict *old_options)
2084 {
2085 bool has_new_overlap_template =
2086 qdict_haskey(options, QCOW2_OPT_OVERLAP) ||
2087 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE);
2088 bool has_new_total_cache_size =
2089 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE);
2090 bool has_all_cache_options;
2091
2092 /* New overlap template overrides all old overlap options */
2093 if (has_new_overlap_template) {
2094 qdict_del(old_options, QCOW2_OPT_OVERLAP);
2095 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE);
2096 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER);
2097 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1);
2098 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2);
2099 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE);
2100 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK);
2101 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE);
2102 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1);
2103 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2);
2104 }
2105
2106 /* New total cache size overrides all old options */
2107 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) {
2108 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE);
2109 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2110 }
2111
2112 qdict_join(options, old_options, false);
2113
2114 /*
2115 * If after merging all cache size options are set, an old total size is
2116 * overwritten. Do keep all options, however, if all three are new. The
2117 * resulting error message is what we want to happen.
2118 */
2119 has_all_cache_options =
2120 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) ||
2121 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) ||
2122 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2123
2124 if (has_all_cache_options && !has_new_total_cache_size) {
2125 qdict_del(options, QCOW2_OPT_CACHE_SIZE);
2126 }
2127 }
2128
2129 static int coroutine_fn GRAPH_RDLOCK
2130 qcow2_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset,
2131 int64_t count, int64_t *pnum, int64_t *map,
2132 BlockDriverState **file)
2133 {
2134 BDRVQcow2State *s = bs->opaque;
2135 uint64_t host_offset;
2136 unsigned int bytes;
2137 QCow2SubclusterType type;
2138 int ret, status = 0;
2139
2140 qemu_co_mutex_lock(&s->lock);
2141
2142 if (!s->metadata_preallocation_checked) {
2143 ret = qcow2_detect_metadata_preallocation(bs);
2144 s->metadata_preallocation = (ret == 1);
2145 s->metadata_preallocation_checked = true;
2146 }
2147
2148 bytes = MIN(INT_MAX, count);
2149 ret = qcow2_get_host_offset(bs, offset, &bytes, &host_offset, &type);
2150 qemu_co_mutex_unlock(&s->lock);
2151 if (ret < 0) {
2152 return ret;
2153 }
2154
2155 *pnum = bytes;
2156
2157 if ((type == QCOW2_SUBCLUSTER_NORMAL ||
2158 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
2159 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) && !s->crypto) {
2160 *map = host_offset;
2161 *file = s->data_file->bs;
2162 status |= BDRV_BLOCK_OFFSET_VALID;
2163 }
2164 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2165 type == QCOW2_SUBCLUSTER_ZERO_ALLOC) {
2166 status |= BDRV_BLOCK_ZERO;
2167 } else if (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
2168 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) {
2169 status |= BDRV_BLOCK_DATA;
2170 }
2171 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) &&
2172 (status & BDRV_BLOCK_OFFSET_VALID))
2173 {
2174 status |= BDRV_BLOCK_RECURSE;
2175 }
2176 if (type == QCOW2_SUBCLUSTER_COMPRESSED) {
2177 status |= BDRV_BLOCK_COMPRESSED;
2178 }
2179 return status;
2180 }
2181
2182 static int coroutine_fn GRAPH_RDLOCK
2183 qcow2_handle_l2meta(BlockDriverState *bs, QCowL2Meta **pl2meta, bool link_l2)
2184 {
2185 int ret = 0;
2186 QCowL2Meta *l2meta = *pl2meta;
2187
2188 while (l2meta != NULL) {
2189 QCowL2Meta *next;
2190
2191 if (link_l2) {
2192 ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
2193 if (ret) {
2194 goto out;
2195 }
2196 } else {
2197 qcow2_alloc_cluster_abort(bs, l2meta);
2198 }
2199
2200 /* Take the request off the list of running requests */
2201 QLIST_REMOVE(l2meta, next_in_flight);
2202
2203 qemu_co_queue_restart_all(&l2meta->dependent_requests);
2204
2205 next = l2meta->next;
2206 g_free(l2meta);
2207 l2meta = next;
2208 }
2209 out:
2210 *pl2meta = l2meta;
2211 return ret;
2212 }
2213
2214 static int coroutine_fn GRAPH_RDLOCK
2215 qcow2_co_preadv_encrypted(BlockDriverState *bs,
2216 uint64_t host_offset,
2217 uint64_t offset,
2218 uint64_t bytes,
2219 QEMUIOVector *qiov,
2220 uint64_t qiov_offset)
2221 {
2222 int ret;
2223 BDRVQcow2State *s = bs->opaque;
2224 uint8_t *buf;
2225
2226 assert(bs->encrypted && s->crypto);
2227 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2228
2229 /*
2230 * For encrypted images, read everything into a temporary
2231 * contiguous buffer on which the AES functions can work.
2232 * Also, decryption in a separate buffer is better as it
2233 * prevents the guest from learning information about the
2234 * encrypted nature of the virtual disk.
2235 */
2236
2237 buf = qemu_try_blockalign(s->data_file->bs, bytes);
2238 if (buf == NULL) {
2239 return -ENOMEM;
2240 }
2241
2242 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
2243 ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0);
2244 if (ret < 0) {
2245 goto fail;
2246 }
2247
2248 if (qcow2_co_decrypt(bs, host_offset, offset, buf, bytes) < 0)
2249 {
2250 ret = -EIO;
2251 goto fail;
2252 }
2253 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes);
2254
2255 fail:
2256 qemu_vfree(buf);
2257
2258 return ret;
2259 }
2260
2261 typedef struct Qcow2AioTask {
2262 AioTask task;
2263
2264 BlockDriverState *bs;
2265 QCow2SubclusterType subcluster_type; /* only for read */
2266 uint64_t host_offset; /* or l2_entry for compressed read */
2267 uint64_t offset;
2268 uint64_t bytes;
2269 QEMUIOVector *qiov;
2270 uint64_t qiov_offset;
2271 QCowL2Meta *l2meta; /* only for write */
2272 } Qcow2AioTask;
2273
2274 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task);
2275 static coroutine_fn int qcow2_add_task(BlockDriverState *bs,
2276 AioTaskPool *pool,
2277 AioTaskFunc func,
2278 QCow2SubclusterType subcluster_type,
2279 uint64_t host_offset,
2280 uint64_t offset,
2281 uint64_t bytes,
2282 QEMUIOVector *qiov,
2283 size_t qiov_offset,
2284 QCowL2Meta *l2meta)
2285 {
2286 Qcow2AioTask local_task;
2287 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task;
2288
2289 *task = (Qcow2AioTask) {
2290 .task.func = func,
2291 .bs = bs,
2292 .subcluster_type = subcluster_type,
2293 .qiov = qiov,
2294 .host_offset = host_offset,
2295 .offset = offset,
2296 .bytes = bytes,
2297 .qiov_offset = qiov_offset,
2298 .l2meta = l2meta,
2299 };
2300
2301 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool,
2302 func == qcow2_co_preadv_task_entry ? "read" : "write",
2303 subcluster_type, host_offset, offset, bytes,
2304 qiov, qiov_offset);
2305
2306 if (!pool) {
2307 return func(&task->task);
2308 }
2309
2310 aio_task_pool_start_task(pool, &task->task);
2311
2312 return 0;
2313 }
2314
2315 static int coroutine_fn GRAPH_RDLOCK
2316 qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type,
2317 uint64_t host_offset, uint64_t offset, uint64_t bytes,
2318 QEMUIOVector *qiov, size_t qiov_offset)
2319 {
2320 BDRVQcow2State *s = bs->opaque;
2321
2322 switch (subc_type) {
2323 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
2324 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
2325 /* Both zero types are handled in qcow2_co_preadv_part */
2326 g_assert_not_reached();
2327
2328 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
2329 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
2330 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */
2331
2332 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
2333 return bdrv_co_preadv_part(bs->backing, offset, bytes,
2334 qiov, qiov_offset, 0);
2335
2336 case QCOW2_SUBCLUSTER_COMPRESSED:
2337 return qcow2_co_preadv_compressed(bs, host_offset,
2338 offset, bytes, qiov, qiov_offset);
2339
2340 case QCOW2_SUBCLUSTER_NORMAL:
2341 if (bs->encrypted) {
2342 return qcow2_co_preadv_encrypted(bs, host_offset,
2343 offset, bytes, qiov, qiov_offset);
2344 }
2345
2346 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
2347 return bdrv_co_preadv_part(s->data_file, host_offset,
2348 bytes, qiov, qiov_offset, 0);
2349
2350 default:
2351 g_assert_not_reached();
2352 }
2353
2354 g_assert_not_reached();
2355 }
2356
2357 /*
2358 * This function can count as GRAPH_RDLOCK because qcow2_co_preadv_part() holds
2359 * the graph lock and keeps it until this coroutine has terminated.
2360 */
2361 static int coroutine_fn GRAPH_RDLOCK qcow2_co_preadv_task_entry(AioTask *task)
2362 {
2363 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2364
2365 assert(!t->l2meta);
2366
2367 return qcow2_co_preadv_task(t->bs, t->subcluster_type,
2368 t->host_offset, t->offset, t->bytes,
2369 t->qiov, t->qiov_offset);
2370 }
2371
2372 static int coroutine_fn GRAPH_RDLOCK
2373 qcow2_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
2374 QEMUIOVector *qiov, size_t qiov_offset,
2375 BdrvRequestFlags flags)
2376 {
2377 BDRVQcow2State *s = bs->opaque;
2378 int ret = 0;
2379 unsigned int cur_bytes; /* number of bytes in current iteration */
2380 uint64_t host_offset = 0;
2381 QCow2SubclusterType type;
2382 AioTaskPool *aio = NULL;
2383
2384 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
2385 /* prepare next request */
2386 cur_bytes = MIN(bytes, INT_MAX);
2387 if (s->crypto) {
2388 cur_bytes = MIN(cur_bytes,
2389 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2390 }
2391
2392 qemu_co_mutex_lock(&s->lock);
2393 ret = qcow2_get_host_offset(bs, offset, &cur_bytes,
2394 &host_offset, &type);
2395 qemu_co_mutex_unlock(&s->lock);
2396 if (ret < 0) {
2397 goto out;
2398 }
2399
2400 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2401 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
2402 (type == QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && !bs->backing) ||
2403 (type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && !bs->backing))
2404 {
2405 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes);
2406 } else {
2407 if (!aio && cur_bytes != bytes) {
2408 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2409 }
2410 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, type,
2411 host_offset, offset, cur_bytes,
2412 qiov, qiov_offset, NULL);
2413 if (ret < 0) {
2414 goto out;
2415 }
2416 }
2417
2418 bytes -= cur_bytes;
2419 offset += cur_bytes;
2420 qiov_offset += cur_bytes;
2421 }
2422
2423 out:
2424 if (aio) {
2425 aio_task_pool_wait_all(aio);
2426 if (ret == 0) {
2427 ret = aio_task_pool_status(aio);
2428 }
2429 g_free(aio);
2430 }
2431
2432 return ret;
2433 }
2434
2435 /* Check if it's possible to merge a write request with the writing of
2436 * the data from the COW regions */
2437 static bool merge_cow(uint64_t offset, unsigned bytes,
2438 QEMUIOVector *qiov, size_t qiov_offset,
2439 QCowL2Meta *l2meta)
2440 {
2441 QCowL2Meta *m;
2442
2443 for (m = l2meta; m != NULL; m = m->next) {
2444 /* If both COW regions are empty then there's nothing to merge */
2445 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) {
2446 continue;
2447 }
2448
2449 /* If COW regions are handled already, skip this too */
2450 if (m->skip_cow) {
2451 continue;
2452 }
2453
2454 /*
2455 * The write request should start immediately after the first
2456 * COW region. This does not always happen because the area
2457 * touched by the request can be larger than the one defined
2458 * by @m (a single request can span an area consisting of a
2459 * mix of previously unallocated and allocated clusters, that
2460 * is why @l2meta is a list).
2461 */
2462 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
2463 /* In this case the request starts before this region */
2464 assert(offset < l2meta_cow_start(m));
2465 assert(m->cow_start.nb_bytes == 0);
2466 continue;
2467 }
2468
2469 /* The write request should end immediately before the second
2470 * COW region (see above for why it does not always happen) */
2471 if (m->offset + m->cow_end.offset != offset + bytes) {
2472 assert(offset + bytes > m->offset + m->cow_end.offset);
2473 assert(m->cow_end.nb_bytes == 0);
2474 continue;
2475 }
2476
2477 /* Make sure that adding both COW regions to the QEMUIOVector
2478 * does not exceed IOV_MAX */
2479 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) {
2480 continue;
2481 }
2482
2483 m->data_qiov = qiov;
2484 m->data_qiov_offset = qiov_offset;
2485 return true;
2486 }
2487
2488 return false;
2489 }
2490
2491 /*
2492 * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error.
2493 * Note that returning 0 does not guarantee non-zero data.
2494 */
2495 static int coroutine_fn GRAPH_RDLOCK
2496 is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
2497 {
2498 /*
2499 * This check is designed for optimization shortcut so it must be
2500 * efficient.
2501 * Instead of is_zero(), use bdrv_co_is_zero_fast() as it is
2502 * faster (but not as accurate and can result in false negatives).
2503 */
2504 int ret = bdrv_co_is_zero_fast(bs, m->offset + m->cow_start.offset,
2505 m->cow_start.nb_bytes);
2506 if (ret <= 0) {
2507 return ret;
2508 }
2509
2510 return bdrv_co_is_zero_fast(bs, m->offset + m->cow_end.offset,
2511 m->cow_end.nb_bytes);
2512 }
2513
2514 static int coroutine_fn GRAPH_RDLOCK
2515 handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
2516 {
2517 BDRVQcow2State *s = bs->opaque;
2518 QCowL2Meta *m;
2519
2520 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) {
2521 return 0;
2522 }
2523
2524 if (bs->encrypted) {
2525 return 0;
2526 }
2527
2528 for (m = l2meta; m != NULL; m = m->next) {
2529 int ret;
2530 uint64_t start_offset = m->alloc_offset + m->cow_start.offset;
2531 unsigned nb_bytes = m->cow_end.offset + m->cow_end.nb_bytes -
2532 m->cow_start.offset;
2533
2534 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) {
2535 continue;
2536 }
2537
2538 ret = is_zero_cow(bs, m);
2539 if (ret < 0) {
2540 return ret;
2541 } else if (ret == 0) {
2542 continue;
2543 }
2544
2545 /*
2546 * instead of writing zero COW buffers,
2547 * efficiently zero out the whole clusters
2548 */
2549
2550 ret = qcow2_pre_write_overlap_check(bs, 0, start_offset, nb_bytes,
2551 true);
2552 if (ret < 0) {
2553 return ret;
2554 }
2555
2556 BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
2557 ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes,
2558 BDRV_REQ_NO_FALLBACK);
2559 if (ret < 0) {
2560 if (ret != -ENOTSUP && ret != -EAGAIN) {
2561 return ret;
2562 }
2563 continue;
2564 }
2565
2566 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters);
2567 m->skip_cow = true;
2568 }
2569 return 0;
2570 }
2571
2572 /*
2573 * qcow2_co_pwritev_task
2574 * Called with s->lock unlocked
2575 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must
2576 * not use it somehow after qcow2_co_pwritev_task() call
2577 */
2578 static coroutine_fn GRAPH_RDLOCK
2579 int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset,
2580 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
2581 uint64_t qiov_offset, QCowL2Meta *l2meta)
2582 {
2583 int ret;
2584 BDRVQcow2State *s = bs->opaque;
2585 void *crypt_buf = NULL;
2586 QEMUIOVector encrypted_qiov;
2587
2588 if (bs->encrypted) {
2589 assert(s->crypto);
2590 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2591 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes);
2592 if (crypt_buf == NULL) {
2593 ret = -ENOMEM;
2594 goto out_unlocked;
2595 }
2596 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes);
2597
2598 if (qcow2_co_encrypt(bs, host_offset, offset, crypt_buf, bytes) < 0) {
2599 ret = -EIO;
2600 goto out_unlocked;
2601 }
2602
2603 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes);
2604 qiov = &encrypted_qiov;
2605 qiov_offset = 0;
2606 }
2607
2608 /* Try to efficiently initialize the physical space with zeroes */
2609 ret = handle_alloc_space(bs, l2meta);
2610 if (ret < 0) {
2611 goto out_unlocked;
2612 }
2613
2614 /*
2615 * If we need to do COW, check if it's possible to merge the
2616 * writing of the guest data together with that of the COW regions.
2617 * If it's not possible (or not necessary) then write the
2618 * guest data now.
2619 */
2620 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) {
2621 BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO);
2622 trace_qcow2_writev_data(qemu_coroutine_self(), host_offset);
2623 ret = bdrv_co_pwritev_part(s->data_file, host_offset,
2624 bytes, qiov, qiov_offset, 0);
2625 if (ret < 0) {
2626 goto out_unlocked;
2627 }
2628 }
2629
2630 qemu_co_mutex_lock(&s->lock);
2631
2632 ret = qcow2_handle_l2meta(bs, &l2meta, true);
2633 goto out_locked;
2634
2635 out_unlocked:
2636 qemu_co_mutex_lock(&s->lock);
2637
2638 out_locked:
2639 qcow2_handle_l2meta(bs, &l2meta, false);
2640 qemu_co_mutex_unlock(&s->lock);
2641
2642 qemu_vfree(crypt_buf);
2643
2644 return ret;
2645 }
2646
2647 /*
2648 * This function can count as GRAPH_RDLOCK because qcow2_co_pwritev_part() holds
2649 * the graph lock and keeps it until this coroutine has terminated.
2650 */
2651 static coroutine_fn GRAPH_RDLOCK int qcow2_co_pwritev_task_entry(AioTask *task)
2652 {
2653 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2654
2655 assert(!t->subcluster_type);
2656
2657 return qcow2_co_pwritev_task(t->bs, t->host_offset,
2658 t->offset, t->bytes, t->qiov, t->qiov_offset,
2659 t->l2meta);
2660 }
2661
2662 static int coroutine_fn GRAPH_RDLOCK
2663 qcow2_co_pwritev_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
2664 QEMUIOVector *qiov, size_t qiov_offset,
2665 BdrvRequestFlags flags)
2666 {
2667 BDRVQcow2State *s = bs->opaque;
2668 int offset_in_cluster;
2669 int ret;
2670 unsigned int cur_bytes; /* number of sectors in current iteration */
2671 uint64_t host_offset;
2672 QCowL2Meta *l2meta = NULL;
2673 AioTaskPool *aio = NULL;
2674
2675 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
2676
2677 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
2678
2679 l2meta = NULL;
2680
2681 trace_qcow2_writev_start_part(qemu_coroutine_self());
2682 offset_in_cluster = offset_into_cluster(s, offset);
2683 cur_bytes = MIN(bytes, INT_MAX);
2684 if (bs->encrypted) {
2685 cur_bytes = MIN(cur_bytes,
2686 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
2687 - offset_in_cluster);
2688 }
2689
2690 qemu_co_mutex_lock(&s->lock);
2691
2692 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
2693 &host_offset, &l2meta);
2694 if (ret < 0) {
2695 goto out_locked;
2696 }
2697
2698 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset,
2699 cur_bytes, true);
2700 if (ret < 0) {
2701 goto out_locked;
2702 }
2703
2704 qemu_co_mutex_unlock(&s->lock);
2705
2706 if (!aio && cur_bytes != bytes) {
2707 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2708 }
2709 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
2710 host_offset, offset,
2711 cur_bytes, qiov, qiov_offset, l2meta);
2712 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
2713 if (ret < 0) {
2714 goto fail_nometa;
2715 }
2716
2717 bytes -= cur_bytes;
2718 offset += cur_bytes;
2719 qiov_offset += cur_bytes;
2720 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
2721 }
2722 ret = 0;
2723
2724 qemu_co_mutex_lock(&s->lock);
2725
2726 out_locked:
2727 qcow2_handle_l2meta(bs, &l2meta, false);
2728
2729 qemu_co_mutex_unlock(&s->lock);
2730
2731 fail_nometa:
2732 if (aio) {
2733 aio_task_pool_wait_all(aio);
2734 if (ret == 0) {
2735 ret = aio_task_pool_status(aio);
2736 }
2737 g_free(aio);
2738 }
2739
2740 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
2741
2742 return ret;
2743 }
2744
2745 static int GRAPH_RDLOCK qcow2_inactivate(BlockDriverState *bs)
2746 {
2747 BDRVQcow2State *s = bs->opaque;
2748 int ret, result = 0;
2749 Error *local_err = NULL;
2750
2751 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err);
2752 if (local_err != NULL) {
2753 result = -EINVAL;
2754 error_reportf_err(local_err, "Lost persistent bitmaps during "
2755 "inactivation of node '%s': ",
2756 bdrv_get_device_or_node_name(bs));
2757 }
2758
2759 ret = qcow2_cache_flush(bs, s->l2_table_cache);
2760 if (ret) {
2761 result = ret;
2762 error_report("Failed to flush the L2 table cache: %s",
2763 strerror(-ret));
2764 }
2765
2766 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
2767 if (ret) {
2768 result = ret;
2769 error_report("Failed to flush the refcount block cache: %s",
2770 strerror(-ret));
2771 }
2772
2773 if (result == 0) {
2774 qcow2_mark_clean(bs);
2775 }
2776
2777 return result;
2778 }
2779
2780 static void coroutine_mixed_fn GRAPH_RDLOCK
2781 qcow2_do_close(BlockDriverState *bs, bool close_data_file)
2782 {
2783 BDRVQcow2State *s = bs->opaque;
2784 qemu_vfree(s->l1_table);
2785 /* else pre-write overlap checks in cache_destroy may crash */
2786 s->l1_table = NULL;
2787
2788 if (!(s->flags & BDRV_O_INACTIVE)) {
2789 qcow2_inactivate(bs);
2790 }
2791
2792 cache_clean_timer_del(bs);
2793 qcow2_cache_destroy(s->l2_table_cache);
2794 qcow2_cache_destroy(s->refcount_block_cache);
2795
2796 qcrypto_block_free(s->crypto);
2797 s->crypto = NULL;
2798 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
2799
2800 g_free(s->unknown_header_fields);
2801 cleanup_unknown_header_ext(bs);
2802
2803 g_free(s->image_data_file);
2804 g_free(s->image_backing_file);
2805 g_free(s->image_backing_format);
2806
2807 if (close_data_file && has_data_file(bs)) {
2808 GLOBAL_STATE_CODE();
2809 bdrv_graph_rdunlock_main_loop();
2810 bdrv_graph_wrlock(NULL);
2811 bdrv_unref_child(bs, s->data_file);
2812 bdrv_graph_wrunlock(NULL);
2813 s->data_file = NULL;
2814 bdrv_graph_rdlock_main_loop();
2815 }
2816
2817 qcow2_refcount_close(bs);
2818 qcow2_free_snapshots(bs);
2819 }
2820
2821 static void GRAPH_UNLOCKED qcow2_close(BlockDriverState *bs)
2822 {
2823 GLOBAL_STATE_CODE();
2824 GRAPH_RDLOCK_GUARD_MAINLOOP();
2825
2826 qcow2_do_close(bs, true);
2827 }
2828
2829 static void coroutine_fn GRAPH_RDLOCK
2830 qcow2_co_invalidate_cache(BlockDriverState *bs, Error **errp)
2831 {
2832 ERRP_GUARD();
2833 BDRVQcow2State *s = bs->opaque;
2834 BdrvChild *data_file;
2835 int flags = s->flags;
2836 QCryptoBlock *crypto = NULL;
2837 QDict *options;
2838 int ret;
2839
2840 /*
2841 * Backing files are read-only which makes all of their metadata immutable,
2842 * that means we don't have to worry about reopening them here.
2843 */
2844
2845 crypto = s->crypto;
2846 s->crypto = NULL;
2847
2848 /*
2849 * Do not reopen s->data_file (i.e., have qcow2_do_close() not close it,
2850 * and then prevent qcow2_do_open() from opening it), because this function
2851 * runs in the I/O path and as such we must not invoke global-state
2852 * functions like bdrv_unref_child() and bdrv_open_child().
2853 */
2854
2855 qcow2_do_close(bs, false);
2856
2857 data_file = s->data_file;
2858 memset(s, 0, sizeof(BDRVQcow2State));
2859 s->data_file = data_file;
2860
2861 options = qdict_clone_shallow(bs->options);
2862
2863 flags &= ~BDRV_O_INACTIVE;
2864 qemu_co_mutex_lock(&s->lock);
2865 ret = qcow2_do_open(bs, options, flags, false, errp);
2866 qemu_co_mutex_unlock(&s->lock);
2867 qobject_unref(options);
2868 if (ret < 0) {
2869 error_prepend(errp, "Could not reopen qcow2 layer: ");
2870 bs->drv = NULL;
2871 return;
2872 }
2873
2874 s->crypto = crypto;
2875 }
2876
2877 static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
2878 size_t len, size_t buflen)
2879 {
2880 QCowExtension *ext_backing_fmt = (QCowExtension*) buf;
2881 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7);
2882
2883 if (buflen < ext_len) {
2884 return -ENOSPC;
2885 }
2886
2887 *ext_backing_fmt = (QCowExtension) {
2888 .magic = cpu_to_be32(magic),
2889 .len = cpu_to_be32(len),
2890 };
2891
2892 if (len) {
2893 memcpy(buf + sizeof(QCowExtension), s, len);
2894 }
2895
2896 return ext_len;
2897 }
2898
2899 /*
2900 * Updates the qcow2 header, including the variable length parts of it, i.e.
2901 * the backing file name and all extensions. qcow2 was not designed to allow
2902 * such changes, so if we run out of space (we can only use the first cluster)
2903 * this function may fail.
2904 *
2905 * Returns 0 on success, -errno in error cases.
2906 */
2907 int qcow2_update_header(BlockDriverState *bs)
2908 {
2909 BDRVQcow2State *s = bs->opaque;
2910 QCowHeader *header;
2911 char *buf;
2912 size_t buflen = s->cluster_size;
2913 int ret;
2914 uint64_t total_size;
2915 uint32_t refcount_table_clusters;
2916 size_t header_length;
2917 Qcow2UnknownHeaderExtension *uext;
2918
2919 buf = qemu_blockalign(bs, buflen);
2920
2921 /* Header structure */
2922 header = (QCowHeader*) buf;
2923
2924 if (buflen < sizeof(*header)) {
2925 ret = -ENOSPC;
2926 goto fail;
2927 }
2928
2929 header_length = sizeof(*header) + s->unknown_header_fields_size;
2930 total_size = bs->total_sectors * BDRV_SECTOR_SIZE;
2931 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2932
2933 ret = validate_compression_type(s, NULL);
2934 if (ret) {
2935 goto fail;
2936 }
2937
2938 *header = (QCowHeader) {
2939 /* Version 2 fields */
2940 .magic = cpu_to_be32(QCOW_MAGIC),
2941 .version = cpu_to_be32(s->qcow_version),
2942 .backing_file_offset = 0,
2943 .backing_file_size = 0,
2944 .cluster_bits = cpu_to_be32(s->cluster_bits),
2945 .size = cpu_to_be64(total_size),
2946 .crypt_method = cpu_to_be32(s->crypt_method_header),
2947 .l1_size = cpu_to_be32(s->l1_size),
2948 .l1_table_offset = cpu_to_be64(s->l1_table_offset),
2949 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset),
2950 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters),
2951 .nb_snapshots = cpu_to_be32(s->nb_snapshots),
2952 .snapshots_offset = cpu_to_be64(s->snapshots_offset),
2953
2954 /* Version 3 fields */
2955 .incompatible_features = cpu_to_be64(s->incompatible_features),
2956 .compatible_features = cpu_to_be64(s->compatible_features),
2957 .autoclear_features = cpu_to_be64(s->autoclear_features),
2958 .refcount_order = cpu_to_be32(s->refcount_order),
2959 .header_length = cpu_to_be32(header_length),
2960 .compression_type = s->compression_type,
2961 };
2962
2963 /* For older versions, write a shorter header */
2964 switch (s->qcow_version) {
2965 case 2:
2966 ret = offsetof(QCowHeader, incompatible_features);
2967 break;
2968 case 3:
2969 ret = sizeof(*header);
2970 break;
2971 default:
2972 ret = -EINVAL;
2973 goto fail;
2974 }
2975
2976 buf += ret;
2977 buflen -= ret;
2978 memset(buf, 0, buflen);
2979
2980 /* Preserve any unknown field in the header */
2981 if (s->unknown_header_fields_size) {
2982 if (buflen < s->unknown_header_fields_size) {
2983 ret = -ENOSPC;
2984 goto fail;
2985 }
2986
2987 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size);
2988 buf += s->unknown_header_fields_size;
2989 buflen -= s->unknown_header_fields_size;
2990 }
2991
2992 /* Backing file format header extension */
2993 if (s->image_backing_format) {
2994 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
2995 s->image_backing_format,
2996 strlen(s->image_backing_format),
2997 buflen);
2998 if (ret < 0) {
2999 goto fail;
3000 }
3001
3002 buf += ret;
3003 buflen -= ret;
3004 }
3005
3006 /* External data file header extension */
3007 if (has_data_file(bs) && s->image_data_file) {
3008 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE,
3009 s->image_data_file, strlen(s->image_data_file),
3010 buflen);
3011 if (ret < 0) {
3012 goto fail;
3013 }
3014
3015 buf += ret;
3016 buflen -= ret;
3017 }
3018
3019 /* Full disk encryption header pointer extension */
3020 if (s->crypto_header.offset != 0) {
3021 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset);
3022 s->crypto_header.length = cpu_to_be64(s->crypto_header.length);
3023 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER,
3024 &s->crypto_header, sizeof(s->crypto_header),
3025 buflen);
3026 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
3027 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
3028 if (ret < 0) {
3029 goto fail;
3030 }
3031 buf += ret;
3032 buflen -= ret;
3033 }
3034
3035 /*
3036 * Feature table. A mere 8 feature names occupies 392 bytes, and
3037 * when coupled with the v3 minimum header of 104 bytes plus the
3038 * 8-byte end-of-extension marker, that would leave only 8 bytes
3039 * for a backing file name in an image with 512-byte clusters.
3040 * Thus, we choose to omit this header for cluster sizes 4k and
3041 * smaller.
3042 */
3043 if (s->qcow_version >= 3 && s->cluster_size > 4096) {
3044 static const Qcow2Feature features[] = {
3045 {
3046 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3047 .bit = QCOW2_INCOMPAT_DIRTY_BITNR,
3048 .name = "dirty bit",
3049 },
3050 {
3051 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3052 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR,
3053 .name = "corrupt bit",
3054 },
3055 {
3056 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3057 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR,
3058 .name = "external data file",
3059 },
3060 {
3061 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3062 .bit = QCOW2_INCOMPAT_COMPRESSION_BITNR,
3063 .name = "compression type",
3064 },
3065 {
3066 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
3067 .bit = QCOW2_INCOMPAT_EXTL2_BITNR,
3068 .name = "extended L2 entries",
3069 },
3070 {
3071 .type = QCOW2_FEAT_TYPE_COMPATIBLE,
3072 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
3073 .name = "lazy refcounts",
3074 },
3075 {
3076 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
3077 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR,
3078 .name = "bitmaps",
3079 },
3080 {
3081 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
3082 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR,
3083 .name = "raw external data",
3084 },
3085 };
3086
3087 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
3088 features, sizeof(features), buflen);
3089 if (ret < 0) {
3090 goto fail;
3091 }
3092 buf += ret;
3093 buflen -= ret;
3094 }
3095
3096 /* Bitmap extension */
3097 if (s->nb_bitmaps > 0) {
3098 Qcow2BitmapHeaderExt bitmaps_header = {
3099 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps),
3100 .bitmap_directory_size =
3101 cpu_to_be64(s->bitmap_directory_size),
3102 .bitmap_directory_offset =
3103 cpu_to_be64(s->bitmap_directory_offset)
3104 };
3105 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS,
3106 &bitmaps_header, sizeof(bitmaps_header),
3107 buflen);
3108 if (ret < 0) {
3109 goto fail;
3110 }
3111 buf += ret;
3112 buflen -= ret;
3113 }
3114
3115 /* Keep unknown header extensions */
3116 QLIST_FOREACH(uext, &s->unknown_header_ext, next) {
3117 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen);
3118 if (ret < 0) {
3119 goto fail;
3120 }
3121
3122 buf += ret;
3123 buflen -= ret;
3124 }
3125
3126 /* End of header extensions */
3127 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen);
3128 if (ret < 0) {
3129 goto fail;
3130 }
3131
3132 buf += ret;
3133 buflen -= ret;
3134
3135 /* Backing file name */
3136 if (s->image_backing_file) {
3137 size_t backing_file_len = strlen(s->image_backing_file);
3138
3139 if (buflen < backing_file_len) {
3140 ret = -ENOSPC;
3141 goto fail;
3142 }
3143
3144 /* Using strncpy is ok here, since buf is not NUL-terminated. */
3145 strncpy(buf, s->image_backing_file, buflen);
3146
3147 header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
3148 header->backing_file_size = cpu_to_be32(backing_file_len);
3149 }
3150
3151 /* Write the new header */
3152 ret = bdrv_pwrite(bs->file, 0, s->cluster_size, header, 0);
3153 if (ret < 0) {
3154 goto fail;
3155 }
3156
3157 ret = 0;
3158 fail:
3159 qemu_vfree(header);
3160 return ret;
3161 }
3162
3163 static int coroutine_fn GRAPH_RDLOCK
3164 qcow2_co_change_backing_file(BlockDriverState *bs, const char *backing_file,
3165 const char *backing_fmt)
3166 {
3167 BDRVQcow2State *s = bs->opaque;
3168
3169 /* Adding a backing file means that the external data file alone won't be
3170 * enough to make sense of the content */
3171 if (backing_file && data_file_is_raw(bs)) {
3172 return -EINVAL;
3173 }
3174
3175 if (backing_file && strlen(backing_file) > 1023) {
3176 return -EINVAL;
3177 }
3178
3179 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
3180 backing_file ?: "");
3181 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
3182 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
3183
3184 g_free(s->image_backing_file);
3185 g_free(s->image_backing_format);
3186
3187 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL;
3188 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL;
3189
3190 return qcow2_update_header(bs);
3191 }
3192
3193 static int coroutine_fn GRAPH_RDLOCK
3194 qcow2_set_up_encryption(BlockDriverState *bs,
3195 QCryptoBlockCreateOptions *cryptoopts,
3196 Error **errp)
3197 {
3198 BDRVQcow2State *s = bs->opaque;
3199 QCryptoBlock *crypto = NULL;
3200 int fmt, ret;
3201
3202 switch (cryptoopts->format) {
3203 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
3204 fmt = QCOW_CRYPT_LUKS;
3205 break;
3206 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
3207 fmt = QCOW_CRYPT_AES;
3208 break;
3209 default:
3210 error_setg(errp, "Crypto format not supported in qcow2");
3211 return -EINVAL;
3212 }
3213
3214 s->crypt_method_header = fmt;
3215
3216 crypto = qcrypto_block_create(cryptoopts, "encrypt.",
3217 qcow2_crypto_hdr_init_func,
3218 qcow2_crypto_hdr_write_func,
3219 bs, errp);
3220 if (!crypto) {
3221 return -EINVAL;
3222 }
3223
3224 ret = qcow2_update_header(bs);
3225 if (ret < 0) {
3226 error_setg_errno(errp, -ret, "Could not write encryption header");
3227 goto out;
3228 }
3229
3230 ret = 0;
3231 out:
3232 qcrypto_block_free(crypto);
3233 return ret;
3234 }
3235
3236 /**
3237 * Preallocates metadata structures for data clusters between @offset (in the
3238 * guest disk) and @new_length (which is thus generally the new guest disk
3239 * size).
3240 *
3241 * Returns: 0 on success, -errno on failure.
3242 */
3243 static int coroutine_fn GRAPH_RDLOCK
3244 preallocate_co(BlockDriverState *bs, uint64_t offset, uint64_t new_length,
3245 PreallocMode mode, Error **errp)
3246 {
3247 BDRVQcow2State *s = bs->opaque;
3248 uint64_t bytes;
3249 uint64_t host_offset = 0;
3250 int64_t file_length;
3251 unsigned int cur_bytes;
3252 int ret;
3253 QCowL2Meta *meta = NULL, *m;
3254
3255 assert(offset <= new_length);
3256 bytes = new_length - offset;
3257
3258 while (bytes) {
3259 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size));
3260 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
3261 &host_offset, &meta);
3262 if (ret < 0) {
3263 error_setg_errno(errp, -ret, "Allocating clusters failed");
3264 goto out;
3265 }
3266
3267 for (m = meta; m != NULL; m = m->next) {
3268 m->prealloc = true;
3269 }
3270
3271 ret = qcow2_handle_l2meta(bs, &meta, true);
3272 if (ret < 0) {
3273 error_setg_errno(errp, -ret, "Mapping clusters failed");
3274 goto out;
3275 }
3276
3277 /* TODO Preallocate data if requested */
3278
3279 bytes -= cur_bytes;
3280 offset += cur_bytes;
3281 }
3282
3283 /*
3284 * It is expected that the image file is large enough to actually contain
3285 * all of the allocated clusters (otherwise we get failing reads after
3286 * EOF). Extend the image to the last allocated sector.
3287 */
3288 file_length = bdrv_co_getlength(s->data_file->bs);
3289 if (file_length < 0) {
3290 error_setg_errno(errp, -file_length, "Could not get file size");
3291 ret = file_length;
3292 goto out;
3293 }
3294
3295 if (host_offset + cur_bytes > file_length) {
3296 if (mode == PREALLOC_MODE_METADATA) {
3297 mode = PREALLOC_MODE_OFF;
3298 }
3299 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false,
3300 mode, 0, errp);
3301 if (ret < 0) {
3302 goto out;
3303 }
3304 }
3305
3306 ret = 0;
3307
3308 out:
3309 qcow2_handle_l2meta(bs, &meta, false);
3310 return ret;
3311 }
3312
3313 /* qcow2_refcount_metadata_size:
3314 * @clusters: number of clusters to refcount (including data and L1/L2 tables)
3315 * @cluster_size: size of a cluster, in bytes
3316 * @refcount_order: refcount bits power-of-2 exponent
3317 * @generous_increase: allow for the refcount table to be 1.5x as large as it
3318 * needs to be
3319 *
3320 * Returns: Number of bytes required for refcount blocks and table metadata.
3321 */
3322 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
3323 int refcount_order, bool generous_increase,
3324 uint64_t *refblock_count)
3325 {
3326 /*
3327 * Every host cluster is reference-counted, including metadata (even
3328 * refcount metadata is recursively included).
3329 *
3330 * An accurate formula for the size of refcount metadata size is difficult
3331 * to derive. An easier method of calculation is finding the fixed point
3332 * where no further refcount blocks or table clusters are required to
3333 * reference count every cluster.
3334 */
3335 int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE;
3336 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
3337 int64_t table = 0; /* number of refcount table clusters */
3338 int64_t blocks = 0; /* number of refcount block clusters */
3339 int64_t last;
3340 int64_t n = 0;
3341
3342 do {
3343 last = n;
3344 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block);
3345 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster);
3346 n = clusters + blocks + table;
3347
3348 if (n == last && generous_increase) {
3349 clusters += DIV_ROUND_UP(table, 2);
3350 n = 0; /* force another loop */
3351 generous_increase = false;
3352 }
3353 } while (n != last);
3354
3355 if (refblock_count) {
3356 *refblock_count = blocks;
3357 }
3358
3359 return (blocks + table) * cluster_size;
3360 }
3361
3362 /**
3363 * qcow2_calc_prealloc_size:
3364 * @total_size: virtual disk size in bytes
3365 * @cluster_size: cluster size in bytes
3366 * @refcount_order: refcount bits power-of-2 exponent
3367 * @extended_l2: true if the image has extended L2 entries
3368 *
3369 * Returns: Total number of bytes required for the fully allocated image
3370 * (including metadata).
3371 */
3372 static int64_t qcow2_calc_prealloc_size(int64_t total_size,
3373 size_t cluster_size,
3374 int refcount_order,
3375 bool extended_l2)
3376 {
3377 int64_t meta_size = 0;
3378 uint64_t nl1e, nl2e;
3379 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size);
3380 size_t l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
3381
3382 /* header: 1 cluster */
3383 meta_size += cluster_size;
3384
3385 /* total size of L2 tables */
3386 nl2e = aligned_total_size / cluster_size;
3387 nl2e = ROUND_UP(nl2e, cluster_size / l2e_size);
3388 meta_size += nl2e * l2e_size;
3389
3390 /* total size of L1 tables */
3391 nl1e = nl2e * l2e_size / cluster_size;
3392 nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE);
3393 meta_size += nl1e * L1E_SIZE;
3394
3395 /* total size of refcount table and blocks */
3396 meta_size += qcow2_refcount_metadata_size(
3397 (meta_size + aligned_total_size) / cluster_size,
3398 cluster_size, refcount_order, false, NULL);
3399
3400 return meta_size + aligned_total_size;
3401 }
3402
3403 static bool validate_cluster_size(size_t cluster_size, bool extended_l2,
3404 Error **errp)
3405 {
3406 int cluster_bits = ctz32(cluster_size);
3407 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS ||
3408 (1 << cluster_bits) != cluster_size)
3409 {
3410 error_setg(errp, "Cluster size must be a power of two between %d and "
3411 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10));
3412 return false;
3413 }
3414
3415 if (extended_l2) {
3416 unsigned min_cluster_size =
3417 (1 << MIN_CLUSTER_BITS) * QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER;
3418 if (cluster_size < min_cluster_size) {
3419 error_setg(errp, "Extended L2 entries are only supported with "
3420 "cluster sizes of at least %u bytes", min_cluster_size);
3421 return false;
3422 }
3423 }
3424
3425 return true;
3426 }
3427
3428 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, bool extended_l2,
3429 Error **errp)
3430 {
3431 size_t cluster_size;
3432
3433 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE,
3434 DEFAULT_CLUSTER_SIZE);
3435 if (!validate_cluster_size(cluster_size, extended_l2, errp)) {
3436 return 0;
3437 }
3438 return cluster_size;
3439 }
3440
3441 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp)
3442 {
3443 char *buf;
3444 int ret;
3445
3446 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL);
3447 if (!buf) {
3448 ret = 3; /* default */
3449 } else if (!strcmp(buf, "0.10")) {
3450 ret = 2;
3451 } else if (!strcmp(buf, "1.1")) {
3452 ret = 3;
3453 } else {
3454 error_setg(errp, "Invalid compatibility level: '%s'", buf);
3455 ret = -EINVAL;
3456 }
3457 g_free(buf);
3458 return ret;
3459 }
3460
3461 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version,
3462 Error **errp)
3463 {
3464 uint64_t refcount_bits;
3465
3466 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16);
3467 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) {
3468 error_setg(errp, "Refcount width must be a power of two and may not "
3469 "exceed 64 bits");
3470 return 0;
3471 }
3472
3473 if (version < 3 && refcount_bits != 16) {
3474 error_setg(errp, "Different refcount widths than 16 bits require "
3475 "compatibility level 1.1 or above (use compat=1.1 or "
3476 "greater)");
3477 return 0;
3478 }
3479
3480 return refcount_bits;
3481 }
3482
3483 static int coroutine_fn GRAPH_UNLOCKED
3484 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
3485 {
3486 BlockdevCreateOptionsQcow2 *qcow2_opts;
3487 QDict *options;
3488
3489 /*
3490 * Open the image file and write a minimal qcow2 header.
3491 *
3492 * We keep things simple and start with a zero-sized image. We also
3493 * do without refcount blocks or a L1 table for now. We'll fix the
3494 * inconsistency later.
3495 *
3496 * We do need a refcount table because growing the refcount table means
3497 * allocating two new refcount blocks - the second of which would be at
3498 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
3499 * size for any qcow2 image.
3500 */
3501 BlockBackend *blk = NULL;
3502 BlockDriverState *bs = NULL;
3503 BlockDriverState *data_bs = NULL;
3504 QCowHeader *header;
3505 size_t cluster_size;
3506 int version;
3507 int refcount_order;
3508 uint64_t *refcount_table;
3509 int ret;
3510 uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
3511
3512 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2);
3513 qcow2_opts = &create_options->u.qcow2;
3514
3515 bs = bdrv_co_open_blockdev_ref(qcow2_opts->file, errp);
3516 if (bs == NULL) {
3517 return -EIO;
3518 }
3519
3520 /* Validate options and set default values */
3521 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) {
3522 error_setg(errp, "Image size must be a multiple of %u bytes",
3523 (unsigned) BDRV_SECTOR_SIZE);
3524 ret = -EINVAL;
3525 goto out;
3526 }
3527
3528 if (qcow2_opts->has_version) {
3529 switch (qcow2_opts->version) {
3530 case BLOCKDEV_QCOW2_VERSION_V2:
3531 version = 2;
3532 break;
3533 case BLOCKDEV_QCOW2_VERSION_V3:
3534 version = 3;
3535 break;
3536 default:
3537 g_assert_not_reached();
3538 }
3539 } else {
3540 version = 3;
3541 }
3542
3543 if (qcow2_opts->has_cluster_size) {
3544 cluster_size = qcow2_opts->cluster_size;
3545 } else {
3546 cluster_size = DEFAULT_CLUSTER_SIZE;
3547 }
3548
3549 if (!qcow2_opts->has_extended_l2) {
3550 qcow2_opts->extended_l2 = false;
3551 }
3552 if (qcow2_opts->extended_l2) {
3553 if (version < 3) {
3554 error_setg(errp, "Extended L2 entries are only supported with "
3555 "compatibility level 1.1 and above (use version=v3 or "
3556 "greater)");
3557 ret = -EINVAL;
3558 goto out;
3559 }
3560 }
3561
3562 if (!validate_cluster_size(cluster_size, qcow2_opts->extended_l2, errp)) {
3563 ret = -EINVAL;
3564 goto out;
3565 }
3566
3567 if (!qcow2_opts->has_preallocation) {
3568 qcow2_opts->preallocation = PREALLOC_MODE_OFF;
3569 }
3570 if (qcow2_opts->backing_file &&
3571 qcow2_opts->preallocation != PREALLOC_MODE_OFF &&
3572 !qcow2_opts->extended_l2)
3573 {
3574 error_setg(errp, "Backing file and preallocation can only be used at "
3575 "the same time if extended_l2 is on");
3576 ret = -EINVAL;
3577 goto out;
3578 }
3579 if (qcow2_opts->has_backing_fmt && !qcow2_opts->backing_file) {
3580 error_setg(errp, "Backing format cannot be used without backing file");
3581 ret = -EINVAL;
3582 goto out;
3583 }
3584
3585 if (!qcow2_opts->has_lazy_refcounts) {
3586 qcow2_opts->lazy_refcounts = false;
3587 }
3588 if (version < 3 && qcow2_opts->lazy_refcounts) {
3589 error_setg(errp, "Lazy refcounts only supported with compatibility "
3590 "level 1.1 and above (use version=v3 or greater)");
3591 ret = -EINVAL;
3592 goto out;
3593 }
3594
3595 if (!qcow2_opts->has_refcount_bits) {
3596 qcow2_opts->refcount_bits = 16;
3597 }
3598 if (qcow2_opts->refcount_bits > 64 ||
3599 !is_power_of_2(qcow2_opts->refcount_bits))
3600 {
3601 error_setg(errp, "Refcount width must be a power of two and may not "
3602 "exceed 64 bits");
3603 ret = -EINVAL;
3604 goto out;
3605 }
3606 if (version < 3 && qcow2_opts->refcount_bits != 16) {
3607 error_setg(errp, "Different refcount widths than 16 bits require "
3608 "compatibility level 1.1 or above (use version=v3 or "
3609 "greater)");
3610 ret = -EINVAL;
3611 goto out;
3612 }
3613 refcount_order = ctz32(qcow2_opts->refcount_bits);
3614
3615 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) {
3616 error_setg(errp, "data-file-raw requires data-file");
3617 ret = -EINVAL;
3618 goto out;
3619 }
3620 if (qcow2_opts->data_file_raw && qcow2_opts->backing_file) {
3621 error_setg(errp, "Backing file and data-file-raw cannot be used at "
3622 "the same time");
3623 ret = -EINVAL;
3624 goto out;
3625 }
3626 if (qcow2_opts->data_file_raw &&
3627 qcow2_opts->preallocation == PREALLOC_MODE_OFF)
3628 {
3629 /*
3630 * data-file-raw means that "the external data file can be
3631 * read as a consistent standalone raw image without looking
3632 * at the qcow2 metadata." It does not say that the metadata
3633 * must be ignored, though (and the qcow2 driver in fact does
3634 * not ignore it), so the L1/L2 tables must be present and
3635 * give a 1:1 mapping, so you get the same result regardless
3636 * of whether you look at the metadata or whether you ignore
3637 * it.
3638 */
3639 qcow2_opts->preallocation = PREALLOC_MODE_METADATA;
3640
3641 /*
3642 * Cannot use preallocation with backing files, but giving a
3643 * backing file when specifying data_file_raw is an error
3644 * anyway.
3645 */
3646 assert(!qcow2_opts->backing_file);
3647 }
3648
3649 if (qcow2_opts->data_file) {
3650 if (version < 3) {
3651 error_setg(errp, "External data files are only supported with "
3652 "compatibility level 1.1 and above (use version=v3 or "
3653 "greater)");
3654 ret = -EINVAL;
3655 goto out;
3656 }
3657 data_bs = bdrv_co_open_blockdev_ref(qcow2_opts->data_file, errp);
3658 if (data_bs == NULL) {
3659 ret = -EIO;
3660 goto out;
3661 }
3662 }
3663
3664 if (qcow2_opts->has_compression_type &&
3665 qcow2_opts->compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3666
3667 ret = -EINVAL;
3668
3669 if (version < 3) {
3670 error_setg(errp, "Non-zlib compression type is only supported with "
3671 "compatibility level 1.1 and above (use version=v3 or "
3672 "greater)");
3673 goto out;
3674 }
3675
3676 switch (qcow2_opts->compression_type) {
3677 #ifdef CONFIG_ZSTD
3678 case QCOW2_COMPRESSION_TYPE_ZSTD:
3679 break;
3680 #endif
3681 default:
3682 error_setg(errp, "Unknown compression type");
3683 goto out;
3684 }
3685
3686 compression_type = qcow2_opts->compression_type;
3687 }
3688
3689 /* Create BlockBackend to write to the image */
3690 blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
3691 errp);
3692 if (!blk) {
3693 ret = -EPERM;
3694 goto out;
3695 }
3696 blk_set_allow_write_beyond_eof(blk, true);
3697
3698 /* Write the header */
3699 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header));
3700 header = g_malloc0(cluster_size);
3701 *header = (QCowHeader) {
3702 .magic = cpu_to_be32(QCOW_MAGIC),
3703 .version = cpu_to_be32(version),
3704 .cluster_bits = cpu_to_be32(ctz32(cluster_size)),
3705 .size = cpu_to_be64(0),
3706 .l1_table_offset = cpu_to_be64(0),
3707 .l1_size = cpu_to_be32(0),
3708 .refcount_table_offset = cpu_to_be64(cluster_size),
3709 .refcount_table_clusters = cpu_to_be32(1),
3710 .refcount_order = cpu_to_be32(refcount_order),
3711 /* don't deal with endianness since compression_type is 1 byte long */
3712 .compression_type = compression_type,
3713 .header_length = cpu_to_be32(sizeof(*header)),
3714 };
3715
3716 /* We'll update this to correct value later */
3717 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
3718
3719 if (qcow2_opts->lazy_refcounts) {
3720 header->compatible_features |=
3721 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
3722 }
3723 if (data_bs) {
3724 header->incompatible_features |=
3725 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE);
3726 }
3727 if (qcow2_opts->data_file_raw) {
3728 header->autoclear_features |=
3729 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW);
3730 }
3731 if (compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3732 header->incompatible_features |=
3733 cpu_to_be64(QCOW2_INCOMPAT_COMPRESSION);
3734 }
3735
3736 if (qcow2_opts->extended_l2) {
3737 header->incompatible_features |=
3738 cpu_to_be64(QCOW2_INCOMPAT_EXTL2);
3739 }
3740
3741 ret = blk_co_pwrite(blk, 0, cluster_size, header, 0);
3742 g_free(header);
3743 if (ret < 0) {
3744 error_setg_errno(errp, -ret, "Could not write qcow2 header");
3745 goto out;
3746 }
3747
3748 /* Write a refcount table with one refcount block */
3749 refcount_table = g_malloc0(2 * cluster_size);
3750 refcount_table[0] = cpu_to_be64(2 * cluster_size);
3751 ret = blk_co_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0);
3752 g_free(refcount_table);
3753
3754 if (ret < 0) {
3755 error_setg_errno(errp, -ret, "Could not write refcount table");
3756 goto out;
3757 }
3758
3759 blk_co_unref(blk);
3760 blk = NULL;
3761
3762 /*
3763 * And now open the image and make it consistent first (i.e. increase the
3764 * refcount of the cluster that is occupied by the header and the refcount
3765 * table)
3766 */
3767 options = qdict_new();
3768 qdict_put_str(options, "driver", "qcow2");
3769 qdict_put_str(options, "file", bs->node_name);
3770 if (data_bs) {
3771 qdict_put_str(options, "data-file", data_bs->node_name);
3772 }
3773 blk = blk_co_new_open(NULL, NULL, options,
3774 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH,
3775 errp);
3776 if (blk == NULL) {
3777 ret = -EIO;
3778 goto out;
3779 }
3780
3781 bdrv_graph_co_rdlock();
3782 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size);
3783 if (ret < 0) {
3784 bdrv_graph_co_rdunlock();
3785 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
3786 "header and refcount table");
3787 goto out;
3788
3789 } else if (ret != 0) {
3790 error_report("Huh, first cluster in empty image is already in use?");
3791 abort();
3792 }
3793
3794 /* Set the external data file if necessary */
3795 if (data_bs) {
3796 BDRVQcow2State *s = blk_bs(blk)->opaque;
3797 s->image_data_file = g_strdup(data_bs->filename);
3798 }
3799
3800 /* Create a full header (including things like feature table) */
3801 ret = qcow2_update_header(blk_bs(blk));
3802 bdrv_graph_co_rdunlock();
3803
3804 if (ret < 0) {
3805 error_setg_errno(errp, -ret, "Could not update qcow2 header");
3806 goto out;
3807 }
3808
3809 /* Okay, now that we have a valid image, let's give it the right size */
3810 ret = blk_co_truncate(blk, qcow2_opts->size, false,
3811 qcow2_opts->preallocation, 0, errp);
3812 if (ret < 0) {
3813 error_prepend(errp, "Could not resize image: ");
3814 goto out;
3815 }
3816
3817 /* Want a backing file? There you go. */
3818 if (qcow2_opts->backing_file) {
3819 const char *backing_format = NULL;
3820
3821 if (qcow2_opts->has_backing_fmt) {
3822 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt);
3823 }
3824
3825 bdrv_graph_co_rdlock();
3826 ret = bdrv_co_change_backing_file(blk_bs(blk), qcow2_opts->backing_file,
3827 backing_format, false);
3828 bdrv_graph_co_rdunlock();
3829
3830 if (ret < 0) {
3831 error_setg_errno(errp, -ret, "Could not assign backing file '%s' "
3832 "with format '%s'", qcow2_opts->backing_file,
3833 backing_format);
3834 goto out;
3835 }
3836 }
3837
3838 /* Want encryption? There you go. */
3839 if (qcow2_opts->encrypt) {
3840 bdrv_graph_co_rdlock();
3841 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp);
3842 bdrv_graph_co_rdunlock();
3843
3844 if (ret < 0) {
3845 goto out;
3846 }
3847 }
3848
3849 blk_co_unref(blk);
3850 blk = NULL;
3851
3852 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning.
3853 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to
3854 * have to setup decryption context. We're not doing any I/O on the top
3855 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does
3856 * not have effect.
3857 */
3858 options = qdict_new();
3859 qdict_put_str(options, "driver", "qcow2");
3860 qdict_put_str(options, "file", bs->node_name);
3861 if (data_bs) {
3862 qdict_put_str(options, "data-file", data_bs->node_name);
3863 }
3864 blk = blk_co_new_open(NULL, NULL, options,
3865 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO,
3866 errp);
3867 if (blk == NULL) {
3868 ret = -EIO;
3869 goto out;
3870 }
3871
3872 ret = 0;
3873 out:
3874 blk_co_unref(blk);
3875 bdrv_co_unref(bs);
3876 bdrv_co_unref(data_bs);
3877 return ret;
3878 }
3879
3880 static int coroutine_fn GRAPH_UNLOCKED
3881 qcow2_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts,
3882 Error **errp)
3883 {
3884 BlockdevCreateOptions *create_options = NULL;
3885 QDict *qdict;
3886 Visitor *v;
3887 BlockDriverState *bs = NULL;
3888 BlockDriverState *data_bs = NULL;
3889 const char *val;
3890 int ret;
3891
3892 /* Only the keyval visitor supports the dotted syntax needed for
3893 * encryption, so go through a QDict before getting a QAPI type. Ignore
3894 * options meant for the protocol layer so that the visitor doesn't
3895 * complain. */
3896 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts,
3897 true);
3898
3899 /* Handle encryption options */
3900 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT);
3901 if (val && !strcmp(val, "on")) {
3902 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow");
3903 } else if (val && !strcmp(val, "off")) {
3904 qdict_del(qdict, BLOCK_OPT_ENCRYPT);
3905 }
3906
3907 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT);
3908 if (val && !strcmp(val, "aes")) {
3909 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow");
3910 }
3911
3912 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into
3913 * version=v2/v3 below. */
3914 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL);
3915 if (val && !strcmp(val, "0.10")) {
3916 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2");
3917 } else if (val && !strcmp(val, "1.1")) {
3918 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3");
3919 }
3920
3921 /* Change legacy command line options into QMP ones */
3922 static const QDictRenames opt_renames[] = {
3923 { BLOCK_OPT_BACKING_FILE, "backing-file" },
3924 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
3925 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
3926 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" },
3927 { BLOCK_OPT_EXTL2, "extended-l2" },
3928 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" },
3929 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT },
3930 { BLOCK_OPT_COMPAT_LEVEL, "version" },
3931 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" },
3932 { BLOCK_OPT_COMPRESSION_TYPE, "compression-type" },
3933 { NULL, NULL },
3934 };
3935
3936 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
3937 ret = -EINVAL;
3938 goto finish;
3939 }
3940
3941 /* Create and open the file (protocol layer) */
3942 ret = bdrv_co_create_file(filename, opts, errp);
3943 if (ret < 0) {
3944 goto finish;
3945 }
3946
3947 bs = bdrv_co_open(filename, NULL, NULL,
3948 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
3949 if (bs == NULL) {
3950 ret = -EIO;
3951 goto finish;
3952 }
3953
3954 /* Create and open an external data file (protocol layer) */
3955 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE);
3956 if (val) {
3957 ret = bdrv_co_create_file(val, opts, errp);
3958 if (ret < 0) {
3959 goto finish;
3960 }
3961
3962 data_bs = bdrv_co_open(val, NULL, NULL,
3963 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
3964 errp);
3965 if (data_bs == NULL) {
3966 ret = -EIO;
3967 goto finish;
3968 }
3969
3970 qdict_del(qdict, BLOCK_OPT_DATA_FILE);
3971 qdict_put_str(qdict, "data-file", data_bs->node_name);
3972 }
3973
3974 /* Set 'driver' and 'node' options */
3975 qdict_put_str(qdict, "driver", "qcow2");
3976 qdict_put_str(qdict, "file", bs->node_name);
3977
3978 /* Now get the QAPI type BlockdevCreateOptions */
3979 v = qobject_input_visitor_new_flat_confused(qdict, errp);
3980 if (!v) {
3981 ret = -EINVAL;
3982 goto finish;
3983 }
3984
3985 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
3986 visit_free(v);
3987 if (!create_options) {
3988 ret = -EINVAL;
3989 goto finish;
3990 }
3991
3992 /* Silently round up size */
3993 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size,
3994 BDRV_SECTOR_SIZE);
3995
3996 /* Create the qcow2 image (format layer) */
3997 ret = qcow2_co_create(create_options, errp);
3998 finish:
3999 if (ret < 0) {
4000 bdrv_graph_co_rdlock();
4001 bdrv_co_delete_file_noerr(bs);
4002 bdrv_co_delete_file_noerr(data_bs);
4003 bdrv_graph_co_rdunlock();
4004 } else {
4005 ret = 0;
4006 }
4007
4008 qobject_unref(qdict);
4009 bdrv_co_unref(bs);
4010 bdrv_co_unref(data_bs);
4011 qapi_free_BlockdevCreateOptions(create_options);
4012 return ret;
4013 }
4014
4015
4016 static bool coroutine_fn GRAPH_RDLOCK
4017 is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
4018 {
4019 int64_t nr;
4020 int res;
4021
4022 /* Clamp to image length, before checking status of underlying sectors */
4023 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
4024 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset;
4025 }
4026
4027 if (!bytes) {
4028 return true;
4029 }
4030
4031 /*
4032 * bdrv_block_status_above doesn't merge different types of zeros, for
4033 * example, zeros which come from the region which is unallocated in
4034 * the whole backing chain, and zeros which come because of a short
4035 * backing file. So, we need a loop.
4036 */
4037 do {
4038 res = bdrv_co_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
4039 offset += nr;
4040 bytes -= nr;
4041 } while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
4042
4043 return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0;
4044 }
4045
4046 static int coroutine_fn GRAPH_RDLOCK
4047 qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
4048 BdrvRequestFlags flags)
4049 {
4050 int ret;
4051 BDRVQcow2State *s = bs->opaque;
4052
4053 uint32_t head = offset_into_subcluster(s, offset);
4054 uint32_t tail = ROUND_UP(offset + bytes, s->subcluster_size) -
4055 (offset + bytes);
4056
4057 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes);
4058 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) {
4059 tail = 0;
4060 }
4061
4062 if (head || tail) {
4063 uint64_t off;
4064 unsigned int nr;
4065 QCow2SubclusterType type;
4066
4067 assert(head + bytes + tail <= s->subcluster_size);
4068
4069 /* check whether remainder of cluster already reads as zero */
4070 if (!(is_zero(bs, offset - head, head) &&
4071 is_zero(bs, offset + bytes, tail))) {
4072 return -ENOTSUP;
4073 }
4074
4075 qemu_co_mutex_lock(&s->lock);
4076 /* We can have new write after previous check */
4077 offset -= head;
4078 bytes = s->subcluster_size;
4079 nr = s->subcluster_size;
4080 ret = qcow2_get_host_offset(bs, offset, &nr, &off, &type);
4081 if (ret < 0 ||
4082 (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
4083 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC &&
4084 type != QCOW2_SUBCLUSTER_ZERO_PLAIN &&
4085 type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
4086 qemu_co_mutex_unlock(&s->lock);
4087 return ret < 0 ? ret : -ENOTSUP;
4088 }
4089 } else {
4090 qemu_co_mutex_lock(&s->lock);
4091 }
4092
4093 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes);
4094
4095 /* Whatever is left can use real zero subclusters */
4096 ret = qcow2_subcluster_zeroize(bs, offset, bytes, flags);
4097 qemu_co_mutex_unlock(&s->lock);
4098
4099 return ret;
4100 }
4101
4102 static int coroutine_fn GRAPH_RDLOCK
4103 qcow2_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
4104 {
4105 int ret;
4106 BDRVQcow2State *s = bs->opaque;
4107
4108 /* If the image does not support QCOW_OFLAG_ZERO then discarding
4109 * clusters could expose stale data from the backing file. */
4110 if (s->qcow_version < 3 && bs->backing) {
4111 return -ENOTSUP;
4112 }
4113
4114 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) {
4115 assert(bytes < s->cluster_size);
4116 /* Ignore partial clusters, except for the special case of the
4117 * complete partial cluster at the end of an unaligned file */
4118 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) ||
4119 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) {
4120 return -ENOTSUP;
4121 }
4122 }
4123
4124 qemu_co_mutex_lock(&s->lock);
4125 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST,
4126 false);
4127 qemu_co_mutex_unlock(&s->lock);
4128 return ret;
4129 }
4130
4131 static int coroutine_fn GRAPH_RDLOCK
4132 qcow2_co_copy_range_from(BlockDriverState *bs,
4133 BdrvChild *src, int64_t src_offset,
4134 BdrvChild *dst, int64_t dst_offset,
4135 int64_t bytes, BdrvRequestFlags read_flags,
4136 BdrvRequestFlags write_flags)
4137 {
4138 BDRVQcow2State *s = bs->opaque;
4139 int ret;
4140 unsigned int cur_bytes; /* number of bytes in current iteration */
4141 BdrvChild *child = NULL;
4142 BdrvRequestFlags cur_write_flags;
4143
4144 assert(!bs->encrypted);
4145 qemu_co_mutex_lock(&s->lock);
4146
4147 while (bytes != 0) {
4148 uint64_t copy_offset = 0;
4149 QCow2SubclusterType type;
4150 /* prepare next request */
4151 cur_bytes = MIN(bytes, INT_MAX);
4152 cur_write_flags = write_flags;
4153
4154 ret = qcow2_get_host_offset(bs, src_offset, &cur_bytes,
4155 &copy_offset, &type);
4156 if (ret < 0) {
4157 goto out;
4158 }
4159
4160 switch (type) {
4161 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
4162 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
4163 if (bs->backing && bs->backing->bs) {
4164 int64_t backing_length = bdrv_co_getlength(bs->backing->bs);
4165 if (src_offset >= backing_length) {
4166 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4167 } else {
4168 child = bs->backing;
4169 cur_bytes = MIN(cur_bytes, backing_length - src_offset);
4170 copy_offset = src_offset;
4171 }
4172 } else {
4173 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4174 }
4175 break;
4176
4177 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
4178 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
4179 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4180 break;
4181
4182 case QCOW2_SUBCLUSTER_COMPRESSED:
4183 ret = -ENOTSUP;
4184 goto out;
4185
4186 case QCOW2_SUBCLUSTER_NORMAL:
4187 child = s->data_file;
4188 break;
4189
4190 default:
4191 abort();
4192 }
4193 qemu_co_mutex_unlock(&s->lock);
4194 ret = bdrv_co_copy_range_from(child,
4195 copy_offset,
4196 dst, dst_offset,
4197 cur_bytes, read_flags, cur_write_flags);
4198 qemu_co_mutex_lock(&s->lock);
4199 if (ret < 0) {
4200 goto out;
4201 }
4202
4203 bytes -= cur_bytes;
4204 src_offset += cur_bytes;
4205 dst_offset += cur_bytes;
4206 }
4207 ret = 0;
4208
4209 out:
4210 qemu_co_mutex_unlock(&s->lock);
4211 return ret;
4212 }
4213
4214 static int coroutine_fn GRAPH_RDLOCK
4215 qcow2_co_copy_range_to(BlockDriverState *bs,
4216 BdrvChild *src, int64_t src_offset,
4217 BdrvChild *dst, int64_t dst_offset,
4218 int64_t bytes, BdrvRequestFlags read_flags,
4219 BdrvRequestFlags write_flags)
4220 {
4221 BDRVQcow2State *s = bs->opaque;
4222 int ret;
4223 unsigned int cur_bytes; /* number of sectors in current iteration */
4224 uint64_t host_offset;
4225 QCowL2Meta *l2meta = NULL;
4226
4227 assert(!bs->encrypted);
4228
4229 qemu_co_mutex_lock(&s->lock);
4230
4231 while (bytes != 0) {
4232
4233 l2meta = NULL;
4234
4235 cur_bytes = MIN(bytes, INT_MAX);
4236
4237 /* TODO:
4238 * If src->bs == dst->bs, we could simply copy by incrementing
4239 * the refcnt, without copying user data.
4240 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
4241 ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes,
4242 &host_offset, &l2meta);
4243 if (ret < 0) {
4244 goto fail;
4245 }
4246
4247 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes,
4248 true);
4249 if (ret < 0) {
4250 goto fail;
4251 }
4252
4253 qemu_co_mutex_unlock(&s->lock);
4254 ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset,
4255 cur_bytes, read_flags, write_flags);
4256 qemu_co_mutex_lock(&s->lock);
4257 if (ret < 0) {
4258 goto fail;
4259 }
4260
4261 ret = qcow2_handle_l2meta(bs, &l2meta, true);
4262 if (ret) {
4263 goto fail;
4264 }
4265
4266 bytes -= cur_bytes;
4267 src_offset += cur_bytes;
4268 dst_offset += cur_bytes;
4269 }
4270 ret = 0;
4271
4272 fail:
4273 qcow2_handle_l2meta(bs, &l2meta, false);
4274
4275 qemu_co_mutex_unlock(&s->lock);
4276
4277 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
4278
4279 return ret;
4280 }
4281
4282 static int coroutine_fn GRAPH_RDLOCK
4283 qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
4284 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
4285 {
4286 BDRVQcow2State *s = bs->opaque;
4287 uint64_t old_length;
4288 int64_t new_l1_size;
4289 int ret;
4290 QDict *options;
4291
4292 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA &&
4293 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL)
4294 {
4295 error_setg(errp, "Unsupported preallocation mode '%s'",
4296 PreallocMode_str(prealloc));
4297 return -ENOTSUP;
4298 }
4299
4300 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) {
4301 error_setg(errp, "The new size must be a multiple of %u",
4302 (unsigned) BDRV_SECTOR_SIZE);
4303 return -EINVAL;
4304 }
4305
4306 qemu_co_mutex_lock(&s->lock);
4307
4308 /*
4309 * Even though we store snapshot size for all images, it was not
4310 * required until v3, so it is not safe to proceed for v2.
4311 */
4312 if (s->nb_snapshots && s->qcow_version < 3) {
4313 error_setg(errp, "Can't resize a v2 image which has snapshots");
4314 ret = -ENOTSUP;
4315 goto fail;
4316 }
4317
4318 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */
4319 if (qcow2_truncate_bitmaps_check(bs, errp)) {
4320 ret = -ENOTSUP;
4321 goto fail;
4322 }
4323
4324 old_length = bs->total_sectors * BDRV_SECTOR_SIZE;
4325 new_l1_size = size_to_l1(s, offset);
4326
4327 if (offset < old_length) {
4328 int64_t last_cluster, old_file_size;
4329 if (prealloc != PREALLOC_MODE_OFF) {
4330 error_setg(errp,
4331 "Preallocation can't be used for shrinking an image");
4332 ret = -EINVAL;
4333 goto fail;
4334 }
4335
4336 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size),
4337 old_length - ROUND_UP(offset,
4338 s->cluster_size),
4339 QCOW2_DISCARD_ALWAYS, true);
4340 if (ret < 0) {
4341 error_setg_errno(errp, -ret, "Failed to discard cropped clusters");
4342 goto fail;
4343 }
4344
4345 ret = qcow2_shrink_l1_table(bs, new_l1_size);
4346 if (ret < 0) {
4347 error_setg_errno(errp, -ret,
4348 "Failed to reduce the number of L2 tables");
4349 goto fail;
4350 }
4351
4352 ret = qcow2_shrink_reftable(bs);
4353 if (ret < 0) {
4354 error_setg_errno(errp, -ret,
4355 "Failed to discard unused refblocks");
4356 goto fail;
4357 }
4358
4359 old_file_size = bdrv_co_getlength(bs->file->bs);
4360 if (old_file_size < 0) {
4361 error_setg_errno(errp, -old_file_size,
4362 "Failed to inquire current file length");
4363 ret = old_file_size;
4364 goto fail;
4365 }
4366 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4367 if (last_cluster < 0) {
4368 error_setg_errno(errp, -last_cluster,
4369 "Failed to find the last cluster");
4370 ret = last_cluster;
4371 goto fail;
4372 }
4373 if ((last_cluster + 1) * s->cluster_size < old_file_size) {
4374 Error *local_err = NULL;
4375
4376 /*
4377 * Do not pass @exact here: It will not help the user if
4378 * we get an error here just because they wanted to shrink
4379 * their qcow2 image (on a block device) with qemu-img.
4380 * (And on the qcow2 layer, the @exact requirement is
4381 * always fulfilled, so there is no need to pass it on.)
4382 */
4383 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size,
4384 false, PREALLOC_MODE_OFF, 0, &local_err);
4385 if (local_err) {
4386 warn_reportf_err(local_err,
4387 "Failed to truncate the tail of the image: ");
4388 }
4389 }
4390 } else {
4391 ret = qcow2_grow_l1_table(bs, new_l1_size, true);
4392 if (ret < 0) {
4393 error_setg_errno(errp, -ret, "Failed to grow the L1 table");
4394 goto fail;
4395 }
4396
4397 if (data_file_is_raw(bs) && prealloc == PREALLOC_MODE_OFF) {
4398 /*
4399 * When creating a qcow2 image with data-file-raw, we enforce
4400 * at least prealloc=metadata, so that the L1/L2 tables are
4401 * fully allocated and reading from the data file will return
4402 * the same data as reading from the qcow2 image. When the
4403 * image is grown, we must consequently preallocate the
4404 * metadata structures to cover the added area.
4405 */
4406 prealloc = PREALLOC_MODE_METADATA;
4407 }
4408 }
4409
4410 switch (prealloc) {
4411 case PREALLOC_MODE_OFF:
4412 if (has_data_file(bs)) {
4413 /*
4414 * If the caller wants an exact resize, the external data
4415 * file should be resized to the exact target size, too,
4416 * so we pass @exact here.
4417 */
4418 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0,
4419 errp);
4420 if (ret < 0) {
4421 goto fail;
4422 }
4423 }
4424 break;
4425
4426 case PREALLOC_MODE_METADATA:
4427 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
4428 if (ret < 0) {
4429 goto fail;
4430 }
4431 break;
4432
4433 case PREALLOC_MODE_FALLOC:
4434 case PREALLOC_MODE_FULL:
4435 {
4436 int64_t allocation_start, host_offset, guest_offset;
4437 int64_t clusters_allocated;
4438 int64_t old_file_size, last_cluster, new_file_size;
4439 uint64_t nb_new_data_clusters, nb_new_l2_tables;
4440 bool subclusters_need_allocation = false;
4441
4442 /* With a data file, preallocation means just allocating the metadata
4443 * and forwarding the truncate request to the data file */
4444 if (has_data_file(bs)) {
4445 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
4446 if (ret < 0) {
4447 goto fail;
4448 }
4449 break;
4450 }
4451
4452 old_file_size = bdrv_co_getlength(bs->file->bs);
4453 if (old_file_size < 0) {
4454 error_setg_errno(errp, -old_file_size,
4455 "Failed to inquire current file length");
4456 ret = old_file_size;
4457 goto fail;
4458 }
4459
4460 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4461 if (last_cluster >= 0) {
4462 old_file_size = (last_cluster + 1) * s->cluster_size;
4463 } else {
4464 old_file_size = ROUND_UP(old_file_size, s->cluster_size);
4465 }
4466
4467 nb_new_data_clusters = (ROUND_UP(offset, s->cluster_size) -
4468 start_of_cluster(s, old_length)) >> s->cluster_bits;
4469
4470 /* This is an overestimation; we will not actually allocate space for
4471 * these in the file but just make sure the new refcount structures are
4472 * able to cover them so we will not have to allocate new refblocks
4473 * while entering the data blocks in the potentially new L2 tables.
4474 * (We do not actually care where the L2 tables are placed. Maybe they
4475 * are already allocated or they can be placed somewhere before
4476 * @old_file_size. It does not matter because they will be fully
4477 * allocated automatically, so they do not need to be covered by the
4478 * preallocation. All that matters is that we will not have to allocate
4479 * new refcount structures for them.) */
4480 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters,
4481 s->cluster_size / l2_entry_size(s));
4482 /* The cluster range may not be aligned to L2 boundaries, so add one L2
4483 * table for a potential head/tail */
4484 nb_new_l2_tables++;
4485
4486 allocation_start = qcow2_refcount_area(bs, old_file_size,
4487 nb_new_data_clusters +
4488 nb_new_l2_tables,
4489 true, 0, 0);
4490 if (allocation_start < 0) {
4491 error_setg_errno(errp, -allocation_start,
4492 "Failed to resize refcount structures");
4493 ret = allocation_start;
4494 goto fail;
4495 }
4496
4497 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start,
4498 nb_new_data_clusters);
4499 if (clusters_allocated < 0) {
4500 error_setg_errno(errp, -clusters_allocated,
4501 "Failed to allocate data clusters");
4502 ret = clusters_allocated;
4503 goto fail;
4504 }
4505
4506 assert(clusters_allocated == nb_new_data_clusters);
4507
4508 /* Allocate the data area */
4509 new_file_size = allocation_start +
4510 nb_new_data_clusters * s->cluster_size;
4511 /*
4512 * Image file grows, so @exact does not matter.
4513 *
4514 * If we need to zero out the new area, try first whether the protocol
4515 * driver can already take care of this.
4516 */
4517 if (flags & BDRV_REQ_ZERO_WRITE) {
4518 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc,
4519 BDRV_REQ_ZERO_WRITE, NULL);
4520 if (ret >= 0) {
4521 flags &= ~BDRV_REQ_ZERO_WRITE;
4522 /* Ensure that we read zeroes and not backing file data */
4523 subclusters_need_allocation = true;
4524 }
4525 } else {
4526 ret = -1;
4527 }
4528 if (ret < 0) {
4529 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0,
4530 errp);
4531 }
4532 if (ret < 0) {
4533 error_prepend(errp, "Failed to resize underlying file: ");
4534 qcow2_free_clusters(bs, allocation_start,
4535 nb_new_data_clusters * s->cluster_size,
4536 QCOW2_DISCARD_OTHER);
4537 goto fail;
4538 }
4539
4540 /* Create the necessary L2 entries */
4541 host_offset = allocation_start;
4542 guest_offset = old_length;
4543 while (nb_new_data_clusters) {
4544 int64_t nb_clusters = MIN(
4545 nb_new_data_clusters,
4546 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset));
4547 unsigned cow_start_length = offset_into_cluster(s, guest_offset);
4548 QCowL2Meta allocation;
4549 guest_offset = start_of_cluster(s, guest_offset);
4550 allocation = (QCowL2Meta) {
4551 .offset = guest_offset,
4552 .alloc_offset = host_offset,
4553 .nb_clusters = nb_clusters,
4554 .cow_start = {
4555 .offset = 0,
4556 .nb_bytes = cow_start_length,
4557 },
4558 .cow_end = {
4559 .offset = nb_clusters << s->cluster_bits,
4560 .nb_bytes = 0,
4561 },
4562 .prealloc = !subclusters_need_allocation,
4563 };
4564 qemu_co_queue_init(&allocation.dependent_requests);
4565
4566 ret = qcow2_alloc_cluster_link_l2(bs, &allocation);
4567 if (ret < 0) {
4568 error_setg_errno(errp, -ret, "Failed to update L2 tables");
4569 qcow2_free_clusters(bs, host_offset,
4570 nb_new_data_clusters * s->cluster_size,
4571 QCOW2_DISCARD_OTHER);
4572 goto fail;
4573 }
4574
4575 guest_offset += nb_clusters * s->cluster_size;
4576 host_offset += nb_clusters * s->cluster_size;
4577 nb_new_data_clusters -= nb_clusters;
4578 }
4579 break;
4580 }
4581
4582 default:
4583 g_assert_not_reached();
4584 }
4585
4586 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) {
4587 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->subcluster_size);
4588
4589 /*
4590 * Use zero clusters as much as we can. qcow2_subcluster_zeroize()
4591 * requires a subcluster-aligned start. The end may be unaligned if
4592 * it is at the end of the image (which it is here).
4593 */
4594 if (offset > zero_start) {
4595 ret = qcow2_subcluster_zeroize(bs, zero_start, offset - zero_start,
4596 0);
4597 if (ret < 0) {
4598 error_setg_errno(errp, -ret, "Failed to zero out new clusters");
4599 goto fail;
4600 }
4601 }
4602
4603 /* Write explicit zeros for the unaligned head */
4604 if (zero_start > old_length) {
4605 uint64_t len = MIN(zero_start, offset) - old_length;
4606 uint8_t *buf = qemu_blockalign0(bs, len);
4607 QEMUIOVector qiov;
4608 qemu_iovec_init_buf(&qiov, buf, len);
4609
4610 qemu_co_mutex_unlock(&s->lock);
4611 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0);
4612 qemu_co_mutex_lock(&s->lock);
4613
4614 qemu_vfree(buf);
4615 if (ret < 0) {
4616 error_setg_errno(errp, -ret, "Failed to zero out the new area");
4617 goto fail;
4618 }
4619 }
4620 }
4621
4622 if (prealloc != PREALLOC_MODE_OFF) {
4623 /* Flush metadata before actually changing the image size */
4624 ret = qcow2_write_caches(bs);
4625 if (ret < 0) {
4626 error_setg_errno(errp, -ret,
4627 "Failed to flush the preallocated area to disk");
4628 goto fail;
4629 }
4630 }
4631
4632 bs->total_sectors = offset / BDRV_SECTOR_SIZE;
4633
4634 /* write updated header.size */
4635 offset = cpu_to_be64(offset);
4636 ret = bdrv_co_pwrite_sync(bs->file, offsetof(QCowHeader, size),
4637 sizeof(offset), &offset, 0);
4638 if (ret < 0) {
4639 error_setg_errno(errp, -ret, "Failed to update the image size");
4640 goto fail;
4641 }
4642
4643 s->l1_vm_state_index = new_l1_size;
4644
4645 /* Update cache sizes */
4646 options = qdict_clone_shallow(bs->options);
4647 ret = qcow2_update_options(bs, options, s->flags, errp);
4648 qobject_unref(options);
4649 if (ret < 0) {
4650 goto fail;
4651 }
4652 ret = 0;
4653 fail:
4654 qemu_co_mutex_unlock(&s->lock);
4655 return ret;
4656 }
4657
4658 static int coroutine_fn GRAPH_RDLOCK
4659 qcow2_co_pwritev_compressed_task(BlockDriverState *bs,
4660 uint64_t offset, uint64_t bytes,
4661 QEMUIOVector *qiov, size_t qiov_offset)
4662 {
4663 BDRVQcow2State *s = bs->opaque;
4664 int ret;
4665 ssize_t out_len;
4666 uint8_t *buf, *out_buf;
4667 uint64_t cluster_offset;
4668
4669 assert(bytes == s->cluster_size || (bytes < s->cluster_size &&
4670 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS)));
4671
4672 buf = qemu_blockalign(bs, s->cluster_size);
4673 if (bytes < s->cluster_size) {
4674 /* Zero-pad last write if image size is not cluster aligned */
4675 memset(buf + bytes, 0, s->cluster_size - bytes);
4676 }
4677 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes);
4678
4679 out_buf = g_malloc(s->cluster_size);
4680
4681 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1,
4682 buf, s->cluster_size);
4683 if (out_len == -ENOMEM) {
4684 /* could not compress: write normal cluster */
4685 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0);
4686 if (ret < 0) {
4687 goto fail;
4688 }
4689 goto success;
4690 } else if (out_len < 0) {
4691 ret = -EINVAL;
4692 goto fail;
4693 }
4694
4695 qemu_co_mutex_lock(&s->lock);
4696 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len,
4697 &cluster_offset);
4698 if (ret < 0) {
4699 qemu_co_mutex_unlock(&s->lock);
4700 goto fail;
4701 }
4702
4703 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true);
4704 qemu_co_mutex_unlock(&s->lock);
4705 if (ret < 0) {
4706 goto fail;
4707 }
4708
4709 BLKDBG_CO_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
4710 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0);
4711 if (ret < 0) {
4712 goto fail;
4713 }
4714 success:
4715 ret = 0;
4716 fail:
4717 qemu_vfree(buf);
4718 g_free(out_buf);
4719 return ret;
4720 }
4721
4722 /*
4723 * This function can count as GRAPH_RDLOCK because
4724 * qcow2_co_pwritev_compressed_part() holds the graph lock and keeps it until
4725 * this coroutine has terminated.
4726 */
4727 static int coroutine_fn GRAPH_RDLOCK
4728 qcow2_co_pwritev_compressed_task_entry(AioTask *task)
4729 {
4730 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
4731
4732 assert(!t->subcluster_type && !t->l2meta);
4733
4734 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov,
4735 t->qiov_offset);
4736 }
4737
4738 /*
4739 * XXX: put compressed sectors first, then all the cluster aligned
4740 * tables to avoid losing bytes in alignment
4741 */
4742 static int coroutine_fn GRAPH_RDLOCK
4743 qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
4744 int64_t offset, int64_t bytes,
4745 QEMUIOVector *qiov, size_t qiov_offset)
4746 {
4747 BDRVQcow2State *s = bs->opaque;
4748 AioTaskPool *aio = NULL;
4749 int ret = 0;
4750
4751 if (has_data_file(bs)) {
4752 return -ENOTSUP;
4753 }
4754
4755 if (bytes == 0) {
4756 /*
4757 * align end of file to a sector boundary to ease reading with
4758 * sector based I/Os
4759 */
4760 int64_t len = bdrv_co_getlength(bs->file->bs);
4761 if (len < 0) {
4762 return len;
4763 }
4764 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0,
4765 NULL);
4766 }
4767
4768 if (offset_into_cluster(s, offset)) {
4769 return -EINVAL;
4770 }
4771
4772 if (offset_into_cluster(s, bytes) &&
4773 (offset + bytes) != (bs->total_sectors << BDRV_SECTOR_BITS)) {
4774 return -EINVAL;
4775 }
4776
4777 while (bytes && aio_task_pool_status(aio) == 0) {
4778 uint64_t chunk_size = MIN(bytes, s->cluster_size);
4779
4780 if (!aio && chunk_size != bytes) {
4781 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
4782 }
4783
4784 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry,
4785 0, 0, offset, chunk_size, qiov, qiov_offset, NULL);
4786 if (ret < 0) {
4787 break;
4788 }
4789 qiov_offset += chunk_size;
4790 offset += chunk_size;
4791 bytes -= chunk_size;
4792 }
4793
4794 if (aio) {
4795 aio_task_pool_wait_all(aio);
4796 if (ret == 0) {
4797 ret = aio_task_pool_status(aio);
4798 }
4799 g_free(aio);
4800 }
4801
4802 return ret;
4803 }
4804
4805 static int coroutine_fn GRAPH_RDLOCK
4806 qcow2_co_preadv_compressed(BlockDriverState *bs,
4807 uint64_t l2_entry,
4808 uint64_t offset,
4809 uint64_t bytes,
4810 QEMUIOVector *qiov,
4811 size_t qiov_offset)
4812 {
4813 BDRVQcow2State *s = bs->opaque;
4814 int ret = 0, csize;
4815 uint64_t coffset;
4816 uint8_t *buf, *out_buf;
4817 int offset_in_cluster = offset_into_cluster(s, offset);
4818
4819 qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
4820
4821 buf = g_try_malloc(csize);
4822 if (!buf) {
4823 return -ENOMEM;
4824 }
4825
4826 out_buf = qemu_blockalign(bs, s->cluster_size);
4827
4828 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
4829 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0);
4830 if (ret < 0) {
4831 goto fail;
4832 }
4833
4834 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) {
4835 ret = -EIO;
4836 goto fail;
4837 }
4838
4839 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes);
4840
4841 fail:
4842 qemu_vfree(out_buf);
4843 g_free(buf);
4844
4845 return ret;
4846 }
4847
4848 static int GRAPH_RDLOCK make_completely_empty(BlockDriverState *bs)
4849 {
4850 BDRVQcow2State *s = bs->opaque;
4851 Error *local_err = NULL;
4852 int ret, l1_clusters;
4853 int64_t offset;
4854 uint64_t *new_reftable = NULL;
4855 uint64_t rt_entry, l1_size2;
4856 struct {
4857 uint64_t l1_offset;
4858 uint64_t reftable_offset;
4859 uint32_t reftable_clusters;
4860 } QEMU_PACKED l1_ofs_rt_ofs_cls;
4861
4862 ret = qcow2_cache_empty(bs, s->l2_table_cache);
4863 if (ret < 0) {
4864 goto fail;
4865 }
4866
4867 ret = qcow2_cache_empty(bs, s->refcount_block_cache);
4868 if (ret < 0) {
4869 goto fail;
4870 }
4871
4872 /* Refcounts will be broken utterly */
4873 ret = qcow2_mark_dirty(bs);
4874 if (ret < 0) {
4875 goto fail;
4876 }
4877
4878 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4879
4880 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
4881 l1_size2 = (uint64_t)s->l1_size * L1E_SIZE;
4882
4883 /* After this call, neither the in-memory nor the on-disk refcount
4884 * information accurately describe the actual references */
4885
4886 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset,
4887 l1_clusters * s->cluster_size, 0);
4888 if (ret < 0) {
4889 goto fail_broken_refcounts;
4890 }
4891 memset(s->l1_table, 0, l1_size2);
4892
4893 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE);
4894
4895 /* Overwrite enough clusters at the beginning of the sectors to place
4896 * the refcount table, a refcount block and the L1 table in; this may
4897 * overwrite parts of the existing refcount and L1 table, which is not
4898 * an issue because the dirty flag is set, complete data loss is in fact
4899 * desired and partial data loss is consequently fine as well */
4900 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size,
4901 (2 + l1_clusters) * s->cluster_size, 0);
4902 /* This call (even if it failed overall) may have overwritten on-disk
4903 * refcount structures; in that case, the in-memory refcount information
4904 * will probably differ from the on-disk information which makes the BDS
4905 * unusable */
4906 if (ret < 0) {
4907 goto fail_broken_refcounts;
4908 }
4909
4910 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4911 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE);
4912
4913 /* "Create" an empty reftable (one cluster) directly after the image
4914 * header and an empty L1 table three clusters after the image header;
4915 * the cluster between those two will be used as the first refblock */
4916 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size);
4917 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size);
4918 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1);
4919 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset),
4920 sizeof(l1_ofs_rt_ofs_cls), &l1_ofs_rt_ofs_cls, 0);
4921 if (ret < 0) {
4922 goto fail_broken_refcounts;
4923 }
4924
4925 s->l1_table_offset = 3 * s->cluster_size;
4926
4927 new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE);
4928 if (!new_reftable) {
4929 ret = -ENOMEM;
4930 goto fail_broken_refcounts;
4931 }
4932
4933 s->refcount_table_offset = s->cluster_size;
4934 s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE;
4935 s->max_refcount_table_index = 0;
4936
4937 g_free(s->refcount_table);
4938 s->refcount_table = new_reftable;
4939 new_reftable = NULL;
4940
4941 /* Now the in-memory refcount information again corresponds to the on-disk
4942 * information (reftable is empty and no refblocks (the refblock cache is
4943 * empty)); however, this means some clusters (e.g. the image header) are
4944 * referenced, but not refcounted, but the normal qcow2 code assumes that
4945 * the in-memory information is always correct */
4946
4947 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
4948
4949 /* Enter the first refblock into the reftable */
4950 rt_entry = cpu_to_be64(2 * s->cluster_size);
4951 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, sizeof(rt_entry),
4952 &rt_entry, 0);
4953 if (ret < 0) {
4954 goto fail_broken_refcounts;
4955 }
4956 s->refcount_table[0] = 2 * s->cluster_size;
4957
4958 s->free_cluster_index = 0;
4959 assert(3 + l1_clusters <= s->refcount_block_size);
4960 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2);
4961 if (offset < 0) {
4962 ret = offset;
4963 goto fail_broken_refcounts;
4964 } else if (offset > 0) {
4965 error_report("First cluster in emptied image is in use");
4966 abort();
4967 }
4968
4969 /* Now finally the in-memory information corresponds to the on-disk
4970 * structures and is correct */
4971 ret = qcow2_mark_clean(bs);
4972 if (ret < 0) {
4973 goto fail;
4974 }
4975
4976 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, false,
4977 PREALLOC_MODE_OFF, 0, &local_err);
4978 if (ret < 0) {
4979 error_report_err(local_err);
4980 goto fail;
4981 }
4982
4983 return 0;
4984
4985 fail_broken_refcounts:
4986 /* The BDS is unusable at this point. If we wanted to make it usable, we
4987 * would have to call qcow2_refcount_close(), qcow2_refcount_init(),
4988 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init()
4989 * again. However, because the functions which could have caused this error
4990 * path to be taken are used by those functions as well, it's very likely
4991 * that that sequence will fail as well. Therefore, just eject the BDS. */
4992 bs->drv = NULL;
4993
4994 fail:
4995 g_free(new_reftable);
4996 return ret;
4997 }
4998
4999 static int GRAPH_RDLOCK qcow2_make_empty(BlockDriverState *bs)
5000 {
5001 BDRVQcow2State *s = bs->opaque;
5002 uint64_t offset, end_offset;
5003 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size);
5004 int l1_clusters, ret = 0;
5005
5006 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
5007
5008 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps &&
5009 3 + l1_clusters <= s->refcount_block_size &&
5010 s->crypt_method_header != QCOW_CRYPT_LUKS &&
5011 !has_data_file(bs)) {
5012 /* The following function only works for qcow2 v3 images (it
5013 * requires the dirty flag) and only as long as there are no
5014 * features that reserve extra clusters (such as snapshots,
5015 * LUKS header, or persistent bitmaps), because it completely
5016 * empties the image. Furthermore, the L1 table and three
5017 * additional clusters (image header, refcount table, one
5018 * refcount block) have to fit inside one refcount block. It
5019 * only resets the image file, i.e. does not work with an
5020 * external data file. */
5021 return make_completely_empty(bs);
5022 }
5023
5024 /* This fallback code simply discards every active cluster; this is slow,
5025 * but works in all cases */
5026 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE;
5027 for (offset = 0; offset < end_offset; offset += step) {
5028 /* As this function is generally used after committing an external
5029 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the
5030 * default action for this kind of discard is to pass the discard,
5031 * which will ideally result in an actually smaller image file, as
5032 * is probably desired. */
5033 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset),
5034 QCOW2_DISCARD_SNAPSHOT, true);
5035 if (ret < 0) {
5036 break;
5037 }
5038 }
5039
5040 return ret;
5041 }
5042
5043 static coroutine_fn GRAPH_RDLOCK int qcow2_co_flush_to_os(BlockDriverState *bs)
5044 {
5045 BDRVQcow2State *s = bs->opaque;
5046 int ret;
5047
5048 qemu_co_mutex_lock(&s->lock);
5049 ret = qcow2_write_caches(bs);
5050 qemu_co_mutex_unlock(&s->lock);
5051
5052 return ret;
5053 }
5054
5055 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
5056 Error **errp)
5057 {
5058 Error *local_err = NULL;
5059 BlockMeasureInfo *info;
5060 uint64_t required = 0; /* bytes that contribute to required size */
5061 uint64_t virtual_size; /* disk size as seen by guest */
5062 uint64_t refcount_bits;
5063 uint64_t l2_tables;
5064 uint64_t luks_payload_size = 0;
5065 size_t cluster_size;
5066 int version;
5067 char *optstr;
5068 PreallocMode prealloc;
5069 bool has_backing_file;
5070 bool has_luks;
5071 bool extended_l2;
5072 size_t l2e_size;
5073
5074 /* Parse image creation options */
5075 extended_l2 = qemu_opt_get_bool_del(opts, BLOCK_OPT_EXTL2, false);
5076
5077 cluster_size = qcow2_opt_get_cluster_size_del(opts, extended_l2,
5078 &local_err);
5079 if (local_err) {
5080 goto err;
5081 }
5082
5083 version = qcow2_opt_get_version_del(opts, &local_err);
5084 if (local_err) {
5085 goto err;
5086 }
5087
5088 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err);
5089 if (local_err) {
5090 goto err;
5091 }
5092
5093 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
5094 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr,
5095 PREALLOC_MODE_OFF, &local_err);
5096 g_free(optstr);
5097 if (local_err) {
5098 goto err;
5099 }
5100
5101 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
5102 has_backing_file = !!optstr;
5103 g_free(optstr);
5104
5105 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT);
5106 has_luks = optstr && strcmp(optstr, "luks") == 0;
5107 g_free(optstr);
5108
5109 if (has_luks) {
5110 g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
5111 QDict *cryptoopts = qcow2_extract_crypto_opts(opts, "luks", errp);
5112 size_t headerlen;
5113
5114 create_opts = block_crypto_create_opts_init(cryptoopts, errp);
5115 qobject_unref(cryptoopts);
5116 if (!create_opts) {
5117 goto err;
5118 }
5119
5120 if (!qcrypto_block_calculate_payload_offset(create_opts,
5121 "encrypt.",
5122 &headerlen,
5123 &local_err)) {
5124 goto err;
5125 }
5126
5127 luks_payload_size = ROUND_UP(headerlen, cluster_size);
5128 }
5129
5130 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
5131 virtual_size = ROUND_UP(virtual_size, cluster_size);
5132
5133 /* Check that virtual disk size is valid */
5134 l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
5135 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size,
5136 cluster_size / l2e_size);
5137 if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) {
5138 error_setg(&local_err, "The image size is too large "
5139 "(try using a larger cluster size)");
5140 goto err;
5141 }
5142
5143 /* Account for input image */
5144 if (in_bs) {
5145 int64_t ssize = bdrv_getlength(in_bs);
5146 if (ssize < 0) {
5147 error_setg_errno(&local_err, -ssize,
5148 "Unable to get image virtual_size");
5149 goto err;
5150 }
5151
5152 virtual_size = ROUND_UP(ssize, cluster_size);
5153
5154 if (has_backing_file) {
5155 /* We don't how much of the backing chain is shared by the input
5156 * image and the new image file. In the worst case the new image's
5157 * backing file has nothing in common with the input image. Be
5158 * conservative and assume all clusters need to be written.
5159 */
5160 required = virtual_size;
5161 } else {
5162 int64_t offset;
5163 int64_t pnum = 0;
5164
5165 for (offset = 0; offset < ssize; offset += pnum) {
5166 int ret;
5167
5168 ret = bdrv_block_status_above(in_bs, NULL, offset,
5169 ssize - offset, &pnum, NULL,
5170 NULL);
5171 if (ret < 0) {
5172 error_setg_errno(&local_err, -ret,
5173 "Unable to get block status");
5174 goto err;
5175 }
5176
5177 if (ret & BDRV_BLOCK_ZERO) {
5178 /* Skip zero regions (safe with no backing file) */
5179 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) ==
5180 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) {
5181 /* Extend pnum to end of cluster for next iteration */
5182 pnum = ROUND_UP(offset + pnum, cluster_size) - offset;
5183
5184 /* Count clusters we've seen */
5185 required += offset % cluster_size + pnum;
5186 }
5187 }
5188 }
5189 }
5190
5191 /* Take into account preallocation. Nothing special is needed for
5192 * PREALLOC_MODE_METADATA since metadata is always counted.
5193 */
5194 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) {
5195 required = virtual_size;
5196 }
5197
5198 info = g_new0(BlockMeasureInfo, 1);
5199 info->fully_allocated = luks_payload_size +
5200 qcow2_calc_prealloc_size(virtual_size, cluster_size,
5201 ctz32(refcount_bits), extended_l2);
5202
5203 /*
5204 * Remove data clusters that are not required. This overestimates the
5205 * required size because metadata needed for the fully allocated file is
5206 * still counted. Show bitmaps only if both source and destination
5207 * would support them.
5208 */
5209 info->required = info->fully_allocated - virtual_size + required;
5210 info->has_bitmaps = version >= 3 && in_bs &&
5211 bdrv_supports_persistent_dirty_bitmap(in_bs);
5212 if (info->has_bitmaps) {
5213 info->bitmaps = qcow2_get_persistent_dirty_bitmap_size(in_bs,
5214 cluster_size);
5215 }
5216 return info;
5217
5218 err:
5219 error_propagate(errp, local_err);
5220 return NULL;
5221 }
5222
5223 static int coroutine_fn
5224 qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
5225 {
5226 BDRVQcow2State *s = bs->opaque;
5227 bdi->cluster_size = s->cluster_size;
5228 bdi->subcluster_size = s->subcluster_size;
5229 bdi->vm_state_offset = qcow2_vm_state_offset(s);
5230 bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY;
5231 return 0;
5232 }
5233
5234 static ImageInfoSpecific * GRAPH_RDLOCK
5235 qcow2_get_specific_info(BlockDriverState *bs, Error **errp)
5236 {
5237 BDRVQcow2State *s = bs->opaque;
5238 ImageInfoSpecific *spec_info;
5239 QCryptoBlockInfo *encrypt_info = NULL;
5240
5241 if (s->crypto != NULL) {
5242 encrypt_info = qcrypto_block_get_info(s->crypto, errp);
5243 if (!encrypt_info) {
5244 return NULL;
5245 }
5246 }
5247
5248 spec_info = g_new(ImageInfoSpecific, 1);
5249 *spec_info = (ImageInfoSpecific){
5250 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2,
5251 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1),
5252 };
5253 if (s->qcow_version == 2) {
5254 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
5255 .compat = g_strdup("0.10"),
5256 .refcount_bits = s->refcount_bits,
5257 };
5258 } else if (s->qcow_version == 3) {
5259 Qcow2BitmapInfoList *bitmaps;
5260 if (!qcow2_get_bitmap_info_list(bs, &bitmaps, errp)) {
5261 qapi_free_ImageInfoSpecific(spec_info);
5262 qapi_free_QCryptoBlockInfo(encrypt_info);
5263 return NULL;
5264 }
5265 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
5266 .compat = g_strdup("1.1"),
5267 .lazy_refcounts = s->compatible_features &
5268 QCOW2_COMPAT_LAZY_REFCOUNTS,
5269 .has_lazy_refcounts = true,
5270 .corrupt = s->incompatible_features &
5271 QCOW2_INCOMPAT_CORRUPT,
5272 .has_corrupt = true,
5273 .has_extended_l2 = true,
5274 .extended_l2 = has_subclusters(s),
5275 .refcount_bits = s->refcount_bits,
5276 .has_bitmaps = !!bitmaps,
5277 .bitmaps = bitmaps,
5278 .data_file = g_strdup(s->image_data_file),
5279 .has_data_file_raw = has_data_file(bs),
5280 .data_file_raw = data_file_is_raw(bs),
5281 .compression_type = s->compression_type,
5282 };
5283 } else {
5284 /* if this assertion fails, this probably means a new version was
5285 * added without having it covered here */
5286 assert(false);
5287 }
5288
5289 if (encrypt_info) {
5290 ImageInfoSpecificQCow2Encryption *qencrypt =
5291 g_new(ImageInfoSpecificQCow2Encryption, 1);
5292 switch (encrypt_info->format) {
5293 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
5294 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES;
5295 break;
5296 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
5297 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS;
5298 qencrypt->u.luks = encrypt_info->u.luks;
5299 break;
5300 default:
5301 abort();
5302 }
5303 /* Since we did shallow copy above, erase any pointers
5304 * in the original info */
5305 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u));
5306 qapi_free_QCryptoBlockInfo(encrypt_info);
5307
5308 spec_info->u.qcow2.data->encrypt = qencrypt;
5309 }
5310
5311 return spec_info;
5312 }
5313
5314 static int coroutine_mixed_fn GRAPH_RDLOCK
5315 qcow2_has_zero_init(BlockDriverState *bs)
5316 {
5317 BDRVQcow2State *s = bs->opaque;
5318 bool preallocated;
5319
5320 if (qemu_in_coroutine()) {
5321 qemu_co_mutex_lock(&s->lock);
5322 }
5323 /*
5324 * Check preallocation status: Preallocated images have all L2
5325 * tables allocated, nonpreallocated images have none. It is
5326 * therefore enough to check the first one.
5327 */
5328 preallocated = s->l1_size > 0 && s->l1_table[0] != 0;
5329 if (qemu_in_coroutine()) {
5330 qemu_co_mutex_unlock(&s->lock);
5331 }
5332
5333 if (!preallocated) {
5334 return 1;
5335 } else if (bs->encrypted) {
5336 return 0;
5337 } else {
5338 return bdrv_has_zero_init(s->data_file->bs);
5339 }
5340 }
5341
5342 /*
5343 * Check the request to vmstate. On success return
5344 * qcow2_vm_state_offset(bs) + @pos
5345 */
5346 static int64_t qcow2_check_vmstate_request(BlockDriverState *bs,
5347 QEMUIOVector *qiov, int64_t pos)
5348 {
5349 BDRVQcow2State *s = bs->opaque;
5350 int64_t vmstate_offset = qcow2_vm_state_offset(s);
5351 int ret;
5352
5353 /* Incoming requests must be OK */
5354 bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort);
5355
5356 if (INT64_MAX - pos < vmstate_offset) {
5357 return -EIO;
5358 }
5359
5360 pos += vmstate_offset;
5361 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
5362 if (ret < 0) {
5363 return ret;
5364 }
5365
5366 return pos;
5367 }
5368
5369 static int coroutine_fn GRAPH_RDLOCK
5370 qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
5371 {
5372 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
5373 if (offset < 0) {
5374 return offset;
5375 }
5376
5377 BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
5378 return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0);
5379 }
5380
5381 static int coroutine_fn GRAPH_RDLOCK
5382 qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
5383 {
5384 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
5385 if (offset < 0) {
5386 return offset;
5387 }
5388
5389 BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
5390 return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
5391 }
5392
5393 static int GRAPH_RDLOCK qcow2_has_compressed_clusters(BlockDriverState *bs)
5394 {
5395 int64_t offset = 0;
5396 int64_t bytes = bdrv_getlength(bs);
5397
5398 if (bytes < 0) {
5399 return bytes;
5400 }
5401
5402 while (bytes != 0) {
5403 int ret;
5404 QCow2SubclusterType type;
5405 unsigned int cur_bytes = MIN(INT_MAX, bytes);
5406 uint64_t host_offset;
5407
5408 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, &host_offset,
5409 &type);
5410 if (ret < 0) {
5411 return ret;
5412 }
5413
5414 if (type == QCOW2_SUBCLUSTER_COMPRESSED) {
5415 return 1;
5416 }
5417
5418 offset += cur_bytes;
5419 bytes -= cur_bytes;
5420 }
5421
5422 return 0;
5423 }
5424
5425 /*
5426 * Downgrades an image's version. To achieve this, any incompatible features
5427 * have to be removed.
5428 */
5429 static int GRAPH_RDLOCK
5430 qcow2_downgrade(BlockDriverState *bs, int target_version,
5431 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
5432 Error **errp)
5433 {
5434 BDRVQcow2State *s = bs->opaque;
5435 int current_version = s->qcow_version;
5436 int ret;
5437 int i;
5438
5439 /* This is qcow2_downgrade(), not qcow2_upgrade() */
5440 assert(target_version < current_version);
5441
5442 /* There are no other versions (now) that you can downgrade to */
5443 assert(target_version == 2);
5444
5445 if (s->refcount_order != 4) {
5446 error_setg(errp, "compat=0.10 requires refcount_bits=16");
5447 return -ENOTSUP;
5448 }
5449
5450 if (has_data_file(bs)) {
5451 error_setg(errp, "Cannot downgrade an image with a data file");
5452 return -ENOTSUP;
5453 }
5454
5455 /*
5456 * If any internal snapshot has a different size than the current
5457 * image size, or VM state size that exceeds 32 bits, downgrading
5458 * is unsafe. Even though we would still use v3-compliant output
5459 * to preserve that data, other v2 programs might not realize
5460 * those optional fields are important.
5461 */
5462 for (i = 0; i < s->nb_snapshots; i++) {
5463 if (s->snapshots[i].vm_state_size > UINT32_MAX ||
5464 s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) {
5465 error_setg(errp, "Internal snapshots prevent downgrade of image");
5466 return -ENOTSUP;
5467 }
5468 }
5469
5470 /* clear incompatible features */
5471 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
5472 ret = qcow2_mark_clean(bs);
5473 if (ret < 0) {
5474 error_setg_errno(errp, -ret, "Failed to make the image clean");
5475 return ret;
5476 }
5477 }
5478
5479 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in
5480 * the first place; if that happens nonetheless, returning -ENOTSUP is the
5481 * best thing to do anyway */
5482
5483 if (s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION) {
5484 error_setg(errp, "Cannot downgrade an image with incompatible features "
5485 "0x%" PRIx64 " set",
5486 s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION);
5487 return -ENOTSUP;
5488 }
5489
5490 /* since we can ignore compatible features, we can set them to 0 as well */
5491 s->compatible_features = 0;
5492 /* if lazy refcounts have been used, they have already been fixed through
5493 * clearing the dirty flag */
5494
5495 /* clearing autoclear features is trivial */
5496 s->autoclear_features = 0;
5497
5498 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque);
5499 if (ret < 0) {
5500 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters");
5501 return ret;
5502 }
5503
5504 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) {
5505 ret = qcow2_has_compressed_clusters(bs);
5506 if (ret < 0) {
5507 error_setg(errp, "Failed to check block status");
5508 return -EINVAL;
5509 }
5510 if (ret) {
5511 error_setg(errp, "Cannot downgrade an image with zstd compression "
5512 "type and existing compressed clusters");
5513 return -ENOTSUP;
5514 }
5515 /*
5516 * No compressed clusters for now, so just chose default zlib
5517 * compression.
5518 */
5519 s->incompatible_features &= ~QCOW2_INCOMPAT_COMPRESSION;
5520 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
5521 }
5522
5523 assert(s->incompatible_features == 0);
5524
5525 s->qcow_version = target_version;
5526 ret = qcow2_update_header(bs);
5527 if (ret < 0) {
5528 s->qcow_version = current_version;
5529 error_setg_errno(errp, -ret, "Failed to update the image header");
5530 return ret;
5531 }
5532 return 0;
5533 }
5534
5535 /*
5536 * Upgrades an image's version. While newer versions encompass all
5537 * features of older versions, some things may have to be presented
5538 * differently.
5539 */
5540 static int GRAPH_RDLOCK
5541 qcow2_upgrade(BlockDriverState *bs, int target_version,
5542 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
5543 Error **errp)
5544 {
5545 BDRVQcow2State *s = bs->opaque;
5546 bool need_snapshot_update;
5547 int current_version = s->qcow_version;
5548 int i;
5549 int ret;
5550
5551 /* This is qcow2_upgrade(), not qcow2_downgrade() */
5552 assert(target_version > current_version);
5553
5554 /* There are no other versions (yet) that you can upgrade to */
5555 assert(target_version == 3);
5556
5557 status_cb(bs, 0, 2, cb_opaque);
5558
5559 /*
5560 * In v2, snapshots do not need to have extra data. v3 requires
5561 * the 64-bit VM state size and the virtual disk size to be
5562 * present.
5563 * qcow2_write_snapshots() will always write the list in the
5564 * v3-compliant format.
5565 */
5566 need_snapshot_update = false;
5567 for (i = 0; i < s->nb_snapshots; i++) {
5568 if (s->snapshots[i].extra_data_size <
5569 sizeof_field(QCowSnapshotExtraData, vm_state_size_large) +
5570 sizeof_field(QCowSnapshotExtraData, disk_size))
5571 {
5572 need_snapshot_update = true;
5573 break;
5574 }
5575 }
5576 if (need_snapshot_update) {
5577 ret = qcow2_write_snapshots(bs);
5578 if (ret < 0) {
5579 error_setg_errno(errp, -ret, "Failed to update the snapshot table");
5580 return ret;
5581 }
5582 }
5583 status_cb(bs, 1, 2, cb_opaque);
5584
5585 s->qcow_version = target_version;
5586 ret = qcow2_update_header(bs);
5587 if (ret < 0) {
5588 s->qcow_version = current_version;
5589 error_setg_errno(errp, -ret, "Failed to update the image header");
5590 return ret;
5591 }
5592 status_cb(bs, 2, 2, cb_opaque);
5593
5594 return 0;
5595 }
5596
5597 typedef enum Qcow2AmendOperation {
5598 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be
5599 * statically initialized to so that the helper CB can discern the first
5600 * invocation from an operation change */
5601 QCOW2_NO_OPERATION = 0,
5602
5603 QCOW2_UPGRADING,
5604 QCOW2_UPDATING_ENCRYPTION,
5605 QCOW2_CHANGING_REFCOUNT_ORDER,
5606 QCOW2_DOWNGRADING,
5607 } Qcow2AmendOperation;
5608
5609 typedef struct Qcow2AmendHelperCBInfo {
5610 /* The code coordinating the amend operations should only modify
5611 * these four fields; the rest will be managed by the CB */
5612 BlockDriverAmendStatusCB *original_status_cb;
5613 void *original_cb_opaque;
5614
5615 Qcow2AmendOperation current_operation;
5616
5617 /* Total number of operations to perform (only set once) */
5618 int total_operations;
5619
5620 /* The following fields are managed by the CB */
5621
5622 /* Number of operations completed */
5623 int operations_completed;
5624
5625 /* Cumulative offset of all completed operations */
5626 int64_t offset_completed;
5627
5628 Qcow2AmendOperation last_operation;
5629 int64_t last_work_size;
5630 } Qcow2AmendHelperCBInfo;
5631
5632 static void qcow2_amend_helper_cb(BlockDriverState *bs,
5633 int64_t operation_offset,
5634 int64_t operation_work_size, void *opaque)
5635 {
5636 Qcow2AmendHelperCBInfo *info = opaque;
5637 int64_t current_work_size;
5638 int64_t projected_work_size;
5639
5640 if (info->current_operation != info->last_operation) {
5641 if (info->last_operation != QCOW2_NO_OPERATION) {
5642 info->offset_completed += info->last_work_size;
5643 info->operations_completed++;
5644 }
5645
5646 info->last_operation = info->current_operation;
5647 }
5648
5649 assert(info->total_operations > 0);
5650 assert(info->operations_completed < info->total_operations);
5651
5652 info->last_work_size = operation_work_size;
5653
5654 current_work_size = info->offset_completed + operation_work_size;
5655
5656 /* current_work_size is the total work size for (operations_completed + 1)
5657 * operations (which includes this one), so multiply it by the number of
5658 * operations not covered and divide it by the number of operations
5659 * covered to get a projection for the operations not covered */
5660 projected_work_size = current_work_size * (info->total_operations -
5661 info->operations_completed - 1)
5662 / (info->operations_completed + 1);
5663
5664 info->original_status_cb(bs, info->offset_completed + operation_offset,
5665 current_work_size + projected_work_size,
5666 info->original_cb_opaque);
5667 }
5668
5669 static int GRAPH_RDLOCK
5670 qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
5671 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
5672 bool force, Error **errp)
5673 {
5674 BDRVQcow2State *s = bs->opaque;
5675 int old_version = s->qcow_version, new_version = old_version;
5676 uint64_t new_size = 0;
5677 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL;
5678 bool lazy_refcounts = s->use_lazy_refcounts;
5679 bool data_file_raw = data_file_is_raw(bs);
5680 const char *compat = NULL;
5681 int refcount_bits = s->refcount_bits;
5682 int ret;
5683 QemuOptDesc *desc = opts->list->desc;
5684 Qcow2AmendHelperCBInfo helper_cb_info;
5685 bool encryption_update = false;
5686
5687 while (desc && desc->name) {
5688 if (!qemu_opt_find(opts, desc->name)) {
5689 /* only change explicitly defined options */
5690 desc++;
5691 continue;
5692 }
5693
5694 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) {
5695 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL);
5696 if (!compat) {
5697 /* preserve default */
5698 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) {
5699 new_version = 2;
5700 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) {
5701 new_version = 3;
5702 } else {
5703 error_setg(errp, "Unknown compatibility level %s", compat);
5704 return -EINVAL;
5705 }
5706 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) {
5707 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5708 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) {
5709 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5710 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) {
5711 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5712 } else if (g_str_has_prefix(desc->name, "encrypt.")) {
5713 if (!s->crypto) {
5714 error_setg(errp,
5715 "Can't amend encryption options - encryption not present");
5716 return -EINVAL;
5717 }
5718 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
5719 error_setg(errp,
5720 "Only LUKS encryption options can be amended");
5721 return -ENOTSUP;
5722 }
5723 encryption_update = true;
5724 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
5725 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS,
5726 lazy_refcounts);
5727 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) {
5728 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS,
5729 refcount_bits);
5730
5731 if (refcount_bits <= 0 || refcount_bits > 64 ||
5732 !is_power_of_2(refcount_bits))
5733 {
5734 error_setg(errp, "Refcount width must be a power of two and "
5735 "may not exceed 64 bits");
5736 return -EINVAL;
5737 }
5738 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) {
5739 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE);
5740 if (data_file && !has_data_file(bs)) {
5741 error_setg(errp, "data-file can only be set for images that "
5742 "use an external data file");
5743 return -EINVAL;
5744 }
5745 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) {
5746 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW,
5747 data_file_raw);
5748 if (data_file_raw && !data_file_is_raw(bs)) {
5749 error_setg(errp, "data-file-raw cannot be set on existing "
5750 "images");
5751 return -EINVAL;
5752 }
5753 } else {
5754 /* if this point is reached, this probably means a new option was
5755 * added without having it covered here */
5756 abort();
5757 }
5758
5759 desc++;
5760 }
5761
5762 helper_cb_info = (Qcow2AmendHelperCBInfo){
5763 .original_status_cb = status_cb,
5764 .original_cb_opaque = cb_opaque,
5765 .total_operations = (new_version != old_version)
5766 + (s->refcount_bits != refcount_bits) +
5767 (encryption_update == true)
5768 };
5769
5770 /* Upgrade first (some features may require compat=1.1) */
5771 if (new_version > old_version) {
5772 helper_cb_info.current_operation = QCOW2_UPGRADING;
5773 ret = qcow2_upgrade(bs, new_version, &qcow2_amend_helper_cb,
5774 &helper_cb_info, errp);
5775 if (ret < 0) {
5776 return ret;
5777 }
5778 }
5779
5780 if (encryption_update) {
5781 QDict *amend_opts_dict;
5782 QCryptoBlockAmendOptions *amend_opts;
5783
5784 helper_cb_info.current_operation = QCOW2_UPDATING_ENCRYPTION;
5785 amend_opts_dict = qcow2_extract_crypto_opts(opts, "luks", errp);
5786 if (!amend_opts_dict) {
5787 return -EINVAL;
5788 }
5789 amend_opts = block_crypto_amend_opts_init(amend_opts_dict, errp);
5790 qobject_unref(amend_opts_dict);
5791 if (!amend_opts) {
5792 return -EINVAL;
5793 }
5794 ret = qcrypto_block_amend_options(s->crypto,
5795 qcow2_crypto_hdr_read_func,
5796 qcow2_crypto_hdr_write_func,
5797 bs,
5798 amend_opts,
5799 force,
5800 errp);
5801 qapi_free_QCryptoBlockAmendOptions(amend_opts);
5802 if (ret < 0) {
5803 return ret;
5804 }
5805 }
5806
5807 if (s->refcount_bits != refcount_bits) {
5808 int refcount_order = ctz32(refcount_bits);
5809
5810 if (new_version < 3 && refcount_bits != 16) {
5811 error_setg(errp, "Refcount widths other than 16 bits require "
5812 "compatibility level 1.1 or above (use compat=1.1 or "
5813 "greater)");
5814 return -EINVAL;
5815 }
5816
5817 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER;
5818 ret = qcow2_change_refcount_order(bs, refcount_order,
5819 &qcow2_amend_helper_cb,
5820 &helper_cb_info, errp);
5821 if (ret < 0) {
5822 return ret;
5823 }
5824 }
5825
5826 /* data-file-raw blocks backing files, so clear it first if requested */
5827 if (data_file_raw) {
5828 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW;
5829 } else {
5830 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW;
5831 }
5832
5833 if (data_file) {
5834 g_free(s->image_data_file);
5835 s->image_data_file = *data_file ? g_strdup(data_file) : NULL;
5836 }
5837
5838 ret = qcow2_update_header(bs);
5839 if (ret < 0) {
5840 error_setg_errno(errp, -ret, "Failed to update the image header");
5841 return ret;
5842 }
5843
5844 if (backing_file || backing_format) {
5845 if (g_strcmp0(backing_file, s->image_backing_file) ||
5846 g_strcmp0(backing_format, s->image_backing_format)) {
5847 error_setg(errp, "Cannot amend the backing file");
5848 error_append_hint(errp,
5849 "You can use 'qemu-img rebase' instead.\n");
5850 return -EINVAL;
5851 }
5852 }
5853
5854 if (s->use_lazy_refcounts != lazy_refcounts) {
5855 if (lazy_refcounts) {
5856 if (new_version < 3) {
5857 error_setg(errp, "Lazy refcounts only supported with "
5858 "compatibility level 1.1 and above (use compat=1.1 "
5859 "or greater)");
5860 return -EINVAL;
5861 }
5862 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
5863 ret = qcow2_update_header(bs);
5864 if (ret < 0) {
5865 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
5866 error_setg_errno(errp, -ret, "Failed to update the image header");
5867 return ret;
5868 }
5869 s->use_lazy_refcounts = true;
5870 } else {
5871 /* make image clean first */
5872 ret = qcow2_mark_clean(bs);
5873 if (ret < 0) {
5874 error_setg_errno(errp, -ret, "Failed to make the image clean");
5875 return ret;
5876 }
5877 /* now disallow lazy refcounts */
5878 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
5879 ret = qcow2_update_header(bs);
5880 if (ret < 0) {
5881 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
5882 error_setg_errno(errp, -ret, "Failed to update the image header");
5883 return ret;
5884 }
5885 s->use_lazy_refcounts = false;
5886 }
5887 }
5888
5889 if (new_size) {
5890 BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL,
5891 errp);
5892 if (!blk) {
5893 return -EPERM;
5894 }
5895
5896 /*
5897 * Amending image options should ensure that the image has
5898 * exactly the given new values, so pass exact=true here.
5899 */
5900 ret = blk_truncate(blk, new_size, true, PREALLOC_MODE_OFF, 0, errp);
5901 blk_unref(blk);
5902 if (ret < 0) {
5903 return ret;
5904 }
5905 }
5906
5907 /* Downgrade last (so unsupported features can be removed before) */
5908 if (new_version < old_version) {
5909 helper_cb_info.current_operation = QCOW2_DOWNGRADING;
5910 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb,
5911 &helper_cb_info, errp);
5912 if (ret < 0) {
5913 return ret;
5914 }
5915 }
5916
5917 return 0;
5918 }
5919
5920 static int coroutine_fn qcow2_co_amend(BlockDriverState *bs,
5921 BlockdevAmendOptions *opts,
5922 bool force,
5923 Error **errp)
5924 {
5925 BlockdevAmendOptionsQcow2 *qopts = &opts->u.qcow2;
5926 BDRVQcow2State *s = bs->opaque;
5927 int ret = 0;
5928
5929 if (qopts->encrypt) {
5930 if (!s->crypto) {
5931 error_setg(errp, "image is not encrypted, can't amend");
5932 return -EOPNOTSUPP;
5933 }
5934
5935 if (qopts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_LUKS) {
5936 error_setg(errp,
5937 "Amend can't be used to change the qcow2 encryption format");
5938 return -EOPNOTSUPP;
5939 }
5940
5941 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
5942 error_setg(errp,
5943 "Only LUKS encryption options can be amended for qcow2 with blockdev-amend");
5944 return -EOPNOTSUPP;
5945 }
5946
5947 ret = qcrypto_block_amend_options(s->crypto,
5948 qcow2_crypto_hdr_read_func,
5949 qcow2_crypto_hdr_write_func,
5950 bs,
5951 qopts->encrypt,
5952 force,
5953 errp);
5954 }
5955 return ret;
5956 }
5957
5958 /*
5959 * If offset or size are negative, respectively, they will not be included in
5960 * the BLOCK_IMAGE_CORRUPTED event emitted.
5961 * fatal will be ignored for read-only BDS; corruptions found there will always
5962 * be considered non-fatal.
5963 */
5964 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
5965 int64_t size, const char *message_format, ...)
5966 {
5967 BDRVQcow2State *s = bs->opaque;
5968 const char *node_name;
5969 char *message;
5970 va_list ap;
5971
5972 fatal = fatal && bdrv_is_writable(bs);
5973
5974 if (s->signaled_corruption &&
5975 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT)))
5976 {
5977 return;
5978 }
5979
5980 va_start(ap, message_format);
5981 message = g_strdup_vprintf(message_format, ap);
5982 va_end(ap);
5983
5984 if (fatal) {
5985 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further "
5986 "corruption events will be suppressed\n", message);
5987 } else {
5988 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal "
5989 "corruption events will be suppressed\n", message);
5990 }
5991
5992 node_name = bdrv_get_node_name(bs);
5993 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs),
5994 *node_name ? node_name : NULL,
5995 message, offset >= 0, offset,
5996 size >= 0, size,
5997 fatal);
5998 g_free(message);
5999
6000 if (fatal) {
6001 qcow2_mark_corrupt(bs);
6002 bs->drv = NULL; /* make BDS unusable */
6003 }
6004
6005 s->signaled_corruption = true;
6006 }
6007
6008 #define QCOW_COMMON_OPTIONS \
6009 { \
6010 .name = BLOCK_OPT_SIZE, \
6011 .type = QEMU_OPT_SIZE, \
6012 .help = "Virtual disk size" \
6013 }, \
6014 { \
6015 .name = BLOCK_OPT_COMPAT_LEVEL, \
6016 .type = QEMU_OPT_STRING, \
6017 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" \
6018 }, \
6019 { \
6020 .name = BLOCK_OPT_BACKING_FILE, \
6021 .type = QEMU_OPT_STRING, \
6022 .help = "File name of a base image" \
6023 }, \
6024 { \
6025 .name = BLOCK_OPT_BACKING_FMT, \
6026 .type = QEMU_OPT_STRING, \
6027 .help = "Image format of the base image" \
6028 }, \
6029 { \
6030 .name = BLOCK_OPT_DATA_FILE, \
6031 .type = QEMU_OPT_STRING, \
6032 .help = "File name of an external data file" \
6033 }, \
6034 { \
6035 .name = BLOCK_OPT_DATA_FILE_RAW, \
6036 .type = QEMU_OPT_BOOL, \
6037 .help = "The external data file must stay valid " \
6038 "as a raw image" \
6039 }, \
6040 { \
6041 .name = BLOCK_OPT_LAZY_REFCOUNTS, \
6042 .type = QEMU_OPT_BOOL, \
6043 .help = "Postpone refcount updates", \
6044 .def_value_str = "off" \
6045 }, \
6046 { \
6047 .name = BLOCK_OPT_REFCOUNT_BITS, \
6048 .type = QEMU_OPT_NUMBER, \
6049 .help = "Width of a reference count entry in bits", \
6050 .def_value_str = "16" \
6051 }
6052
6053 static QemuOptsList qcow2_create_opts = {
6054 .name = "qcow2-create-opts",
6055 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head),
6056 .desc = {
6057 { \
6058 .name = BLOCK_OPT_ENCRYPT, \
6059 .type = QEMU_OPT_BOOL, \
6060 .help = "Encrypt the image with format 'aes'. (Deprecated " \
6061 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", \
6062 }, \
6063 { \
6064 .name = BLOCK_OPT_ENCRYPT_FORMAT, \
6065 .type = QEMU_OPT_STRING, \
6066 .help = "Encrypt the image, format choices: 'aes', 'luks'", \
6067 }, \
6068 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", \
6069 "ID of secret providing qcow AES key or LUKS passphrase"), \
6070 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), \
6071 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), \
6072 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), \
6073 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), \
6074 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), \
6075 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), \
6076 { \
6077 .name = BLOCK_OPT_CLUSTER_SIZE, \
6078 .type = QEMU_OPT_SIZE, \
6079 .help = "qcow2 cluster size", \
6080 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) \
6081 }, \
6082 { \
6083 .name = BLOCK_OPT_EXTL2, \
6084 .type = QEMU_OPT_BOOL, \
6085 .help = "Extended L2 tables", \
6086 .def_value_str = "off" \
6087 }, \
6088 { \
6089 .name = BLOCK_OPT_PREALLOC, \
6090 .type = QEMU_OPT_STRING, \
6091 .help = "Preallocation mode (allowed values: off, " \
6092 "metadata, falloc, full)" \
6093 }, \
6094 { \
6095 .name = BLOCK_OPT_COMPRESSION_TYPE, \
6096 .type = QEMU_OPT_STRING, \
6097 .help = "Compression method used for image cluster " \
6098 "compression", \
6099 .def_value_str = "zlib" \
6100 },
6101 QCOW_COMMON_OPTIONS,
6102 { /* end of list */ }
6103 }
6104 };
6105
6106 static QemuOptsList qcow2_amend_opts = {
6107 .name = "qcow2-amend-opts",
6108 .head = QTAILQ_HEAD_INITIALIZER(qcow2_amend_opts.head),
6109 .desc = {
6110 BLOCK_CRYPTO_OPT_DEF_LUKS_STATE("encrypt."),
6111 BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT("encrypt."),
6112 BLOCK_CRYPTO_OPT_DEF_LUKS_OLD_SECRET("encrypt."),
6113 BLOCK_CRYPTO_OPT_DEF_LUKS_NEW_SECRET("encrypt."),
6114 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."),
6115 QCOW_COMMON_OPTIONS,
6116 { /* end of list */ }
6117 }
6118 };
6119
6120 static const char *const qcow2_strong_runtime_opts[] = {
6121 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET,
6122
6123 NULL
6124 };
6125
6126 BlockDriver bdrv_qcow2 = {
6127 .format_name = "qcow2",
6128 .instance_size = sizeof(BDRVQcow2State),
6129 .bdrv_probe = qcow2_probe,
6130 .bdrv_open = qcow2_open,
6131 .bdrv_close = qcow2_close,
6132 .bdrv_reopen_prepare = qcow2_reopen_prepare,
6133 .bdrv_reopen_commit = qcow2_reopen_commit,
6134 .bdrv_reopen_commit_post = qcow2_reopen_commit_post,
6135 .bdrv_reopen_abort = qcow2_reopen_abort,
6136 .bdrv_join_options = qcow2_join_options,
6137 .bdrv_child_perm = bdrv_default_perms,
6138 .bdrv_co_create_opts = qcow2_co_create_opts,
6139 .bdrv_co_create = qcow2_co_create,
6140 .bdrv_has_zero_init = qcow2_has_zero_init,
6141 .bdrv_co_block_status = qcow2_co_block_status,
6142
6143 .bdrv_co_preadv_part = qcow2_co_preadv_part,
6144 .bdrv_co_pwritev_part = qcow2_co_pwritev_part,
6145 .bdrv_co_flush_to_os = qcow2_co_flush_to_os,
6146
6147 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
6148 .bdrv_co_pdiscard = qcow2_co_pdiscard,
6149 .bdrv_co_copy_range_from = qcow2_co_copy_range_from,
6150 .bdrv_co_copy_range_to = qcow2_co_copy_range_to,
6151 .bdrv_co_truncate = qcow2_co_truncate,
6152 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part,
6153 .bdrv_make_empty = qcow2_make_empty,
6154
6155 .bdrv_snapshot_create = qcow2_snapshot_create,
6156 .bdrv_snapshot_goto = qcow2_snapshot_goto,
6157 .bdrv_snapshot_delete = qcow2_snapshot_delete,
6158 .bdrv_snapshot_list = qcow2_snapshot_list,
6159 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp,
6160 .bdrv_measure = qcow2_measure,
6161 .bdrv_co_get_info = qcow2_co_get_info,
6162 .bdrv_get_specific_info = qcow2_get_specific_info,
6163
6164 .bdrv_co_save_vmstate = qcow2_co_save_vmstate,
6165 .bdrv_co_load_vmstate = qcow2_co_load_vmstate,
6166
6167 .is_format = true,
6168 .supports_backing = true,
6169 .bdrv_co_change_backing_file = qcow2_co_change_backing_file,
6170
6171 .bdrv_refresh_limits = qcow2_refresh_limits,
6172 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache,
6173 .bdrv_inactivate = qcow2_inactivate,
6174
6175 .create_opts = &qcow2_create_opts,
6176 .amend_opts = &qcow2_amend_opts,
6177 .strong_runtime_opts = qcow2_strong_runtime_opts,
6178 .mutable_opts = mutable_opts,
6179 .bdrv_co_check = qcow2_co_check,
6180 .bdrv_amend_options = qcow2_amend_options,
6181 .bdrv_co_amend = qcow2_co_amend,
6182
6183 .bdrv_detach_aio_context = qcow2_detach_aio_context,
6184 .bdrv_attach_aio_context = qcow2_attach_aio_context,
6185
6186 .bdrv_supports_persistent_dirty_bitmap =
6187 qcow2_supports_persistent_dirty_bitmap,
6188 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap,
6189 .bdrv_co_remove_persistent_dirty_bitmap =
6190 qcow2_co_remove_persistent_dirty_bitmap,
6191 };
6192
6193 static void bdrv_qcow2_init(void)
6194 {
6195 bdrv_register(&bdrv_qcow2);
6196 }
6197
6198 block_init(bdrv_qcow2_init);