]> git.proxmox.com Git - mirror_qemu.git/blob - block/qed.c
Merge remote-tracking branch 'remotes/vivier/tags/m68k-for-2.12-pull-request' into...
[mirror_qemu.git] / block / qed.c
1 /*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "qemu/bswap.h"
19 #include "qemu/option.h"
20 #include "trace.h"
21 #include "qed.h"
22 #include "sysemu/block-backend.h"
23
24 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
25 const char *filename)
26 {
27 const QEDHeader *header = (const QEDHeader *)buf;
28
29 if (buf_size < sizeof(*header)) {
30 return 0;
31 }
32 if (le32_to_cpu(header->magic) != QED_MAGIC) {
33 return 0;
34 }
35 return 100;
36 }
37
38 /**
39 * Check whether an image format is raw
40 *
41 * @fmt: Backing file format, may be NULL
42 */
43 static bool qed_fmt_is_raw(const char *fmt)
44 {
45 return fmt && strcmp(fmt, "raw") == 0;
46 }
47
48 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
49 {
50 cpu->magic = le32_to_cpu(le->magic);
51 cpu->cluster_size = le32_to_cpu(le->cluster_size);
52 cpu->table_size = le32_to_cpu(le->table_size);
53 cpu->header_size = le32_to_cpu(le->header_size);
54 cpu->features = le64_to_cpu(le->features);
55 cpu->compat_features = le64_to_cpu(le->compat_features);
56 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
57 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
58 cpu->image_size = le64_to_cpu(le->image_size);
59 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
60 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
61 }
62
63 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
64 {
65 le->magic = cpu_to_le32(cpu->magic);
66 le->cluster_size = cpu_to_le32(cpu->cluster_size);
67 le->table_size = cpu_to_le32(cpu->table_size);
68 le->header_size = cpu_to_le32(cpu->header_size);
69 le->features = cpu_to_le64(cpu->features);
70 le->compat_features = cpu_to_le64(cpu->compat_features);
71 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
72 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
73 le->image_size = cpu_to_le64(cpu->image_size);
74 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
75 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
76 }
77
78 int qed_write_header_sync(BDRVQEDState *s)
79 {
80 QEDHeader le;
81 int ret;
82
83 qed_header_cpu_to_le(&s->header, &le);
84 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
85 if (ret != sizeof(le)) {
86 return ret;
87 }
88 return 0;
89 }
90
91 /**
92 * Update header in-place (does not rewrite backing filename or other strings)
93 *
94 * This function only updates known header fields in-place and does not affect
95 * extra data after the QED header.
96 *
97 * No new allocating reqs can start while this function runs.
98 */
99 static int coroutine_fn qed_write_header(BDRVQEDState *s)
100 {
101 /* We must write full sectors for O_DIRECT but cannot necessarily generate
102 * the data following the header if an unrecognized compat feature is
103 * active. Therefore, first read the sectors containing the header, update
104 * them, and write back.
105 */
106
107 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
108 size_t len = nsectors * BDRV_SECTOR_SIZE;
109 uint8_t *buf;
110 struct iovec iov;
111 QEMUIOVector qiov;
112 int ret;
113
114 assert(s->allocating_acb || s->allocating_write_reqs_plugged);
115
116 buf = qemu_blockalign(s->bs, len);
117 iov = (struct iovec) {
118 .iov_base = buf,
119 .iov_len = len,
120 };
121 qemu_iovec_init_external(&qiov, &iov, 1);
122
123 ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0);
124 if (ret < 0) {
125 goto out;
126 }
127
128 /* Update header */
129 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
130
131 ret = bdrv_co_pwritev(s->bs->file, 0, qiov.size, &qiov, 0);
132 if (ret < 0) {
133 goto out;
134 }
135
136 ret = 0;
137 out:
138 qemu_vfree(buf);
139 return ret;
140 }
141
142 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
143 {
144 uint64_t table_entries;
145 uint64_t l2_size;
146
147 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
148 l2_size = table_entries * cluster_size;
149
150 return l2_size * table_entries;
151 }
152
153 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
154 {
155 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
156 cluster_size > QED_MAX_CLUSTER_SIZE) {
157 return false;
158 }
159 if (cluster_size & (cluster_size - 1)) {
160 return false; /* not power of 2 */
161 }
162 return true;
163 }
164
165 static bool qed_is_table_size_valid(uint32_t table_size)
166 {
167 if (table_size < QED_MIN_TABLE_SIZE ||
168 table_size > QED_MAX_TABLE_SIZE) {
169 return false;
170 }
171 if (table_size & (table_size - 1)) {
172 return false; /* not power of 2 */
173 }
174 return true;
175 }
176
177 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
178 uint32_t table_size)
179 {
180 if (image_size % BDRV_SECTOR_SIZE != 0) {
181 return false; /* not multiple of sector size */
182 }
183 if (image_size > qed_max_image_size(cluster_size, table_size)) {
184 return false; /* image is too large */
185 }
186 return true;
187 }
188
189 /**
190 * Read a string of known length from the image file
191 *
192 * @file: Image file
193 * @offset: File offset to start of string, in bytes
194 * @n: String length in bytes
195 * @buf: Destination buffer
196 * @buflen: Destination buffer length in bytes
197 * @ret: 0 on success, -errno on failure
198 *
199 * The string is NUL-terminated.
200 */
201 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
202 char *buf, size_t buflen)
203 {
204 int ret;
205 if (n >= buflen) {
206 return -EINVAL;
207 }
208 ret = bdrv_pread(file, offset, buf, n);
209 if (ret < 0) {
210 return ret;
211 }
212 buf[n] = '\0';
213 return 0;
214 }
215
216 /**
217 * Allocate new clusters
218 *
219 * @s: QED state
220 * @n: Number of contiguous clusters to allocate
221 * @ret: Offset of first allocated cluster
222 *
223 * This function only produces the offset where the new clusters should be
224 * written. It updates BDRVQEDState but does not make any changes to the image
225 * file.
226 *
227 * Called with table_lock held.
228 */
229 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
230 {
231 uint64_t offset = s->file_size;
232 s->file_size += n * s->header.cluster_size;
233 return offset;
234 }
235
236 QEDTable *qed_alloc_table(BDRVQEDState *s)
237 {
238 /* Honor O_DIRECT memory alignment requirements */
239 return qemu_blockalign(s->bs,
240 s->header.cluster_size * s->header.table_size);
241 }
242
243 /**
244 * Allocate a new zeroed L2 table
245 *
246 * Called with table_lock held.
247 */
248 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
249 {
250 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
251
252 l2_table->table = qed_alloc_table(s);
253 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
254
255 memset(l2_table->table->offsets, 0,
256 s->header.cluster_size * s->header.table_size);
257 return l2_table;
258 }
259
260 static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
261 {
262 qemu_co_mutex_lock(&s->table_lock);
263
264 /* No reentrancy is allowed. */
265 assert(!s->allocating_write_reqs_plugged);
266 if (s->allocating_acb != NULL) {
267 /* Another allocating write came concurrently. This cannot happen
268 * from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
269 */
270 qemu_co_mutex_unlock(&s->table_lock);
271 return false;
272 }
273
274 s->allocating_write_reqs_plugged = true;
275 qemu_co_mutex_unlock(&s->table_lock);
276 return true;
277 }
278
279 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
280 {
281 qemu_co_mutex_lock(&s->table_lock);
282 assert(s->allocating_write_reqs_plugged);
283 s->allocating_write_reqs_plugged = false;
284 qemu_co_queue_next(&s->allocating_write_reqs);
285 qemu_co_mutex_unlock(&s->table_lock);
286 }
287
288 static void coroutine_fn qed_need_check_timer_entry(void *opaque)
289 {
290 BDRVQEDState *s = opaque;
291 int ret;
292
293 trace_qed_need_check_timer_cb(s);
294
295 if (!qed_plug_allocating_write_reqs(s)) {
296 return;
297 }
298
299 /* Ensure writes are on disk before clearing flag */
300 ret = bdrv_co_flush(s->bs->file->bs);
301 if (ret < 0) {
302 qed_unplug_allocating_write_reqs(s);
303 return;
304 }
305
306 s->header.features &= ~QED_F_NEED_CHECK;
307 ret = qed_write_header(s);
308 (void) ret;
309
310 qed_unplug_allocating_write_reqs(s);
311
312 ret = bdrv_co_flush(s->bs);
313 (void) ret;
314 }
315
316 static void qed_need_check_timer_cb(void *opaque)
317 {
318 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
319 qemu_coroutine_enter(co);
320 }
321
322 static void qed_start_need_check_timer(BDRVQEDState *s)
323 {
324 trace_qed_start_need_check_timer(s);
325
326 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
327 * migration.
328 */
329 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
330 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
331 }
332
333 /* It's okay to call this multiple times or when no timer is started */
334 static void qed_cancel_need_check_timer(BDRVQEDState *s)
335 {
336 trace_qed_cancel_need_check_timer(s);
337 timer_del(s->need_check_timer);
338 }
339
340 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
341 {
342 BDRVQEDState *s = bs->opaque;
343
344 qed_cancel_need_check_timer(s);
345 timer_free(s->need_check_timer);
346 }
347
348 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
349 AioContext *new_context)
350 {
351 BDRVQEDState *s = bs->opaque;
352
353 s->need_check_timer = aio_timer_new(new_context,
354 QEMU_CLOCK_VIRTUAL, SCALE_NS,
355 qed_need_check_timer_cb, s);
356 if (s->header.features & QED_F_NEED_CHECK) {
357 qed_start_need_check_timer(s);
358 }
359 }
360
361 static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
362 {
363 BDRVQEDState *s = bs->opaque;
364
365 /* Fire the timer immediately in order to start doing I/O as soon as the
366 * header is flushed.
367 */
368 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
369 qed_cancel_need_check_timer(s);
370 qed_need_check_timer_entry(s);
371 }
372 }
373
374 static void bdrv_qed_init_state(BlockDriverState *bs)
375 {
376 BDRVQEDState *s = bs->opaque;
377
378 memset(s, 0, sizeof(BDRVQEDState));
379 s->bs = bs;
380 qemu_co_mutex_init(&s->table_lock);
381 qemu_co_queue_init(&s->allocating_write_reqs);
382 }
383
384 static int bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags,
385 Error **errp)
386 {
387 BDRVQEDState *s = bs->opaque;
388 QEDHeader le_header;
389 int64_t file_size;
390 int ret;
391
392 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
393 if (ret < 0) {
394 return ret;
395 }
396 qed_header_le_to_cpu(&le_header, &s->header);
397
398 if (s->header.magic != QED_MAGIC) {
399 error_setg(errp, "Image not in QED format");
400 return -EINVAL;
401 }
402 if (s->header.features & ~QED_FEATURE_MASK) {
403 /* image uses unsupported feature bits */
404 error_setg(errp, "Unsupported QED features: %" PRIx64,
405 s->header.features & ~QED_FEATURE_MASK);
406 return -ENOTSUP;
407 }
408 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
409 return -EINVAL;
410 }
411
412 /* Round down file size to the last cluster */
413 file_size = bdrv_getlength(bs->file->bs);
414 if (file_size < 0) {
415 return file_size;
416 }
417 s->file_size = qed_start_of_cluster(s, file_size);
418
419 if (!qed_is_table_size_valid(s->header.table_size)) {
420 return -EINVAL;
421 }
422 if (!qed_is_image_size_valid(s->header.image_size,
423 s->header.cluster_size,
424 s->header.table_size)) {
425 return -EINVAL;
426 }
427 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
428 return -EINVAL;
429 }
430
431 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
432 sizeof(uint64_t);
433 s->l2_shift = ctz32(s->header.cluster_size);
434 s->l2_mask = s->table_nelems - 1;
435 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
436
437 /* Header size calculation must not overflow uint32_t */
438 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
439 return -EINVAL;
440 }
441
442 if ((s->header.features & QED_F_BACKING_FILE)) {
443 if ((uint64_t)s->header.backing_filename_offset +
444 s->header.backing_filename_size >
445 s->header.cluster_size * s->header.header_size) {
446 return -EINVAL;
447 }
448
449 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
450 s->header.backing_filename_size, bs->backing_file,
451 sizeof(bs->backing_file));
452 if (ret < 0) {
453 return ret;
454 }
455
456 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
457 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
458 }
459 }
460
461 /* Reset unknown autoclear feature bits. This is a backwards
462 * compatibility mechanism that allows images to be opened by older
463 * programs, which "knock out" unknown feature bits. When an image is
464 * opened by a newer program again it can detect that the autoclear
465 * feature is no longer valid.
466 */
467 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
468 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
469 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
470
471 ret = qed_write_header_sync(s);
472 if (ret) {
473 return ret;
474 }
475
476 /* From here on only known autoclear feature bits are valid */
477 bdrv_flush(bs->file->bs);
478 }
479
480 s->l1_table = qed_alloc_table(s);
481 qed_init_l2_cache(&s->l2_cache);
482
483 ret = qed_read_l1_table_sync(s);
484 if (ret) {
485 goto out;
486 }
487
488 /* If image was not closed cleanly, check consistency */
489 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
490 /* Read-only images cannot be fixed. There is no risk of corruption
491 * since write operations are not possible. Therefore, allow
492 * potentially inconsistent images to be opened read-only. This can
493 * aid data recovery from an otherwise inconsistent image.
494 */
495 if (!bdrv_is_read_only(bs->file->bs) &&
496 !(flags & BDRV_O_INACTIVE)) {
497 BdrvCheckResult result = {0};
498
499 ret = qed_check(s, &result, true);
500 if (ret) {
501 goto out;
502 }
503 }
504 }
505
506 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
507
508 out:
509 if (ret) {
510 qed_free_l2_cache(&s->l2_cache);
511 qemu_vfree(s->l1_table);
512 }
513 return ret;
514 }
515
516 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
517 Error **errp)
518 {
519 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
520 false, errp);
521 if (!bs->file) {
522 return -EINVAL;
523 }
524
525 bdrv_qed_init_state(bs);
526 return bdrv_qed_do_open(bs, options, flags, errp);
527 }
528
529 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
530 {
531 BDRVQEDState *s = bs->opaque;
532
533 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
534 }
535
536 /* We have nothing to do for QED reopen, stubs just return
537 * success */
538 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
539 BlockReopenQueue *queue, Error **errp)
540 {
541 return 0;
542 }
543
544 static void bdrv_qed_close(BlockDriverState *bs)
545 {
546 BDRVQEDState *s = bs->opaque;
547
548 bdrv_qed_detach_aio_context(bs);
549
550 /* Ensure writes reach stable storage */
551 bdrv_flush(bs->file->bs);
552
553 /* Clean shutdown, no check required on next open */
554 if (s->header.features & QED_F_NEED_CHECK) {
555 s->header.features &= ~QED_F_NEED_CHECK;
556 qed_write_header_sync(s);
557 }
558
559 qed_free_l2_cache(&s->l2_cache);
560 qemu_vfree(s->l1_table);
561 }
562
563 static int qed_create(const char *filename, uint32_t cluster_size,
564 uint64_t image_size, uint32_t table_size,
565 const char *backing_file, const char *backing_fmt,
566 QemuOpts *opts, Error **errp)
567 {
568 QEDHeader header = {
569 .magic = QED_MAGIC,
570 .cluster_size = cluster_size,
571 .table_size = table_size,
572 .header_size = 1,
573 .features = 0,
574 .compat_features = 0,
575 .l1_table_offset = cluster_size,
576 .image_size = image_size,
577 };
578 QEDHeader le_header;
579 uint8_t *l1_table = NULL;
580 size_t l1_size = header.cluster_size * header.table_size;
581 Error *local_err = NULL;
582 int ret = 0;
583 BlockBackend *blk;
584
585 ret = bdrv_create_file(filename, opts, &local_err);
586 if (ret < 0) {
587 error_propagate(errp, local_err);
588 return ret;
589 }
590
591 blk = blk_new_open(filename, NULL, NULL,
592 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
593 &local_err);
594 if (blk == NULL) {
595 error_propagate(errp, local_err);
596 return -EIO;
597 }
598
599 blk_set_allow_write_beyond_eof(blk, true);
600
601 /* File must start empty and grow, check truncate is supported */
602 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
603 if (ret < 0) {
604 goto out;
605 }
606
607 if (backing_file) {
608 header.features |= QED_F_BACKING_FILE;
609 header.backing_filename_offset = sizeof(le_header);
610 header.backing_filename_size = strlen(backing_file);
611
612 if (qed_fmt_is_raw(backing_fmt)) {
613 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
614 }
615 }
616
617 qed_header_cpu_to_le(&header, &le_header);
618 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
619 if (ret < 0) {
620 goto out;
621 }
622 ret = blk_pwrite(blk, sizeof(le_header), backing_file,
623 header.backing_filename_size, 0);
624 if (ret < 0) {
625 goto out;
626 }
627
628 l1_table = g_malloc0(l1_size);
629 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
630 if (ret < 0) {
631 goto out;
632 }
633
634 ret = 0; /* success */
635 out:
636 g_free(l1_table);
637 blk_unref(blk);
638 return ret;
639 }
640
641 static int coroutine_fn bdrv_qed_co_create_opts(const char *filename,
642 QemuOpts *opts,
643 Error **errp)
644 {
645 uint64_t image_size = 0;
646 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
647 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
648 char *backing_file = NULL;
649 char *backing_fmt = NULL;
650 int ret;
651
652 image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
653 BDRV_SECTOR_SIZE);
654 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
655 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
656 cluster_size = qemu_opt_get_size_del(opts,
657 BLOCK_OPT_CLUSTER_SIZE,
658 QED_DEFAULT_CLUSTER_SIZE);
659 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
660 QED_DEFAULT_TABLE_SIZE);
661
662 if (!qed_is_cluster_size_valid(cluster_size)) {
663 error_setg(errp, "QED cluster size must be within range [%u, %u] "
664 "and power of 2",
665 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
666 ret = -EINVAL;
667 goto finish;
668 }
669 if (!qed_is_table_size_valid(table_size)) {
670 error_setg(errp, "QED table size must be within range [%u, %u] "
671 "and power of 2",
672 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
673 ret = -EINVAL;
674 goto finish;
675 }
676 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
677 error_setg(errp, "QED image size must be a non-zero multiple of "
678 "cluster size and less than %" PRIu64 " bytes",
679 qed_max_image_size(cluster_size, table_size));
680 ret = -EINVAL;
681 goto finish;
682 }
683
684 ret = qed_create(filename, cluster_size, image_size, table_size,
685 backing_file, backing_fmt, opts, errp);
686
687 finish:
688 g_free(backing_file);
689 g_free(backing_fmt);
690 return ret;
691 }
692
693 static int coroutine_fn bdrv_qed_co_block_status(BlockDriverState *bs,
694 bool want_zero,
695 int64_t pos, int64_t bytes,
696 int64_t *pnum, int64_t *map,
697 BlockDriverState **file)
698 {
699 BDRVQEDState *s = bs->opaque;
700 size_t len = MIN(bytes, SIZE_MAX);
701 int status;
702 QEDRequest request = { .l2_table = NULL };
703 uint64_t offset;
704 int ret;
705
706 qemu_co_mutex_lock(&s->table_lock);
707 ret = qed_find_cluster(s, &request, pos, &len, &offset);
708
709 *pnum = len;
710 switch (ret) {
711 case QED_CLUSTER_FOUND:
712 *map = offset | qed_offset_into_cluster(s, pos);
713 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
714 *file = bs->file->bs;
715 break;
716 case QED_CLUSTER_ZERO:
717 status = BDRV_BLOCK_ZERO;
718 break;
719 case QED_CLUSTER_L2:
720 case QED_CLUSTER_L1:
721 status = 0;
722 break;
723 default:
724 assert(ret < 0);
725 status = ret;
726 break;
727 }
728
729 qed_unref_l2_cache_entry(request.l2_table);
730 qemu_co_mutex_unlock(&s->table_lock);
731
732 return status;
733 }
734
735 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
736 {
737 return acb->bs->opaque;
738 }
739
740 /**
741 * Read from the backing file or zero-fill if no backing file
742 *
743 * @s: QED state
744 * @pos: Byte position in device
745 * @qiov: Destination I/O vector
746 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
747 * @cb: Completion function
748 * @opaque: User data for completion function
749 *
750 * This function reads qiov->size bytes starting at pos from the backing file.
751 * If there is no backing file then zeroes are read.
752 */
753 static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
754 QEMUIOVector *qiov,
755 QEMUIOVector **backing_qiov)
756 {
757 uint64_t backing_length = 0;
758 size_t size;
759 int ret;
760
761 /* If there is a backing file, get its length. Treat the absence of a
762 * backing file like a zero length backing file.
763 */
764 if (s->bs->backing) {
765 int64_t l = bdrv_getlength(s->bs->backing->bs);
766 if (l < 0) {
767 return l;
768 }
769 backing_length = l;
770 }
771
772 /* Zero all sectors if reading beyond the end of the backing file */
773 if (pos >= backing_length ||
774 pos + qiov->size > backing_length) {
775 qemu_iovec_memset(qiov, 0, 0, qiov->size);
776 }
777
778 /* Complete now if there are no backing file sectors to read */
779 if (pos >= backing_length) {
780 return 0;
781 }
782
783 /* If the read straddles the end of the backing file, shorten it */
784 size = MIN((uint64_t)backing_length - pos, qiov->size);
785
786 assert(*backing_qiov == NULL);
787 *backing_qiov = g_new(QEMUIOVector, 1);
788 qemu_iovec_init(*backing_qiov, qiov->niov);
789 qemu_iovec_concat(*backing_qiov, qiov, 0, size);
790
791 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
792 ret = bdrv_co_preadv(s->bs->backing, pos, size, *backing_qiov, 0);
793 if (ret < 0) {
794 return ret;
795 }
796 return 0;
797 }
798
799 /**
800 * Copy data from backing file into the image
801 *
802 * @s: QED state
803 * @pos: Byte position in device
804 * @len: Number of bytes
805 * @offset: Byte offset in image file
806 */
807 static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
808 uint64_t pos, uint64_t len,
809 uint64_t offset)
810 {
811 QEMUIOVector qiov;
812 QEMUIOVector *backing_qiov = NULL;
813 struct iovec iov;
814 int ret;
815
816 /* Skip copy entirely if there is no work to do */
817 if (len == 0) {
818 return 0;
819 }
820
821 iov = (struct iovec) {
822 .iov_base = qemu_blockalign(s->bs, len),
823 .iov_len = len,
824 };
825 qemu_iovec_init_external(&qiov, &iov, 1);
826
827 ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov);
828
829 if (backing_qiov) {
830 qemu_iovec_destroy(backing_qiov);
831 g_free(backing_qiov);
832 backing_qiov = NULL;
833 }
834
835 if (ret) {
836 goto out;
837 }
838
839 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
840 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
841 if (ret < 0) {
842 goto out;
843 }
844 ret = 0;
845 out:
846 qemu_vfree(iov.iov_base);
847 return ret;
848 }
849
850 /**
851 * Link one or more contiguous clusters into a table
852 *
853 * @s: QED state
854 * @table: L2 table
855 * @index: First cluster index
856 * @n: Number of contiguous clusters
857 * @cluster: First cluster offset
858 *
859 * The cluster offset may be an allocated byte offset in the image file, the
860 * zero cluster marker, or the unallocated cluster marker.
861 *
862 * Called with table_lock held.
863 */
864 static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
865 int index, unsigned int n,
866 uint64_t cluster)
867 {
868 int i;
869 for (i = index; i < index + n; i++) {
870 table->offsets[i] = cluster;
871 if (!qed_offset_is_unalloc_cluster(cluster) &&
872 !qed_offset_is_zero_cluster(cluster)) {
873 cluster += s->header.cluster_size;
874 }
875 }
876 }
877
878 /* Called with table_lock held. */
879 static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
880 {
881 BDRVQEDState *s = acb_to_s(acb);
882
883 /* Free resources */
884 qemu_iovec_destroy(&acb->cur_qiov);
885 qed_unref_l2_cache_entry(acb->request.l2_table);
886
887 /* Free the buffer we may have allocated for zero writes */
888 if (acb->flags & QED_AIOCB_ZERO) {
889 qemu_vfree(acb->qiov->iov[0].iov_base);
890 acb->qiov->iov[0].iov_base = NULL;
891 }
892
893 /* Start next allocating write request waiting behind this one. Note that
894 * requests enqueue themselves when they first hit an unallocated cluster
895 * but they wait until the entire request is finished before waking up the
896 * next request in the queue. This ensures that we don't cycle through
897 * requests multiple times but rather finish one at a time completely.
898 */
899 if (acb == s->allocating_acb) {
900 s->allocating_acb = NULL;
901 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
902 qemu_co_queue_next(&s->allocating_write_reqs);
903 } else if (s->header.features & QED_F_NEED_CHECK) {
904 qed_start_need_check_timer(s);
905 }
906 }
907 }
908
909 /**
910 * Update L1 table with new L2 table offset and write it out
911 *
912 * Called with table_lock held.
913 */
914 static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb)
915 {
916 BDRVQEDState *s = acb_to_s(acb);
917 CachedL2Table *l2_table = acb->request.l2_table;
918 uint64_t l2_offset = l2_table->offset;
919 int index, ret;
920
921 index = qed_l1_index(s, acb->cur_pos);
922 s->l1_table->offsets[index] = l2_table->offset;
923
924 ret = qed_write_l1_table(s, index, 1);
925
926 /* Commit the current L2 table to the cache */
927 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
928
929 /* This is guaranteed to succeed because we just committed the entry to the
930 * cache.
931 */
932 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
933 assert(acb->request.l2_table != NULL);
934
935 return ret;
936 }
937
938
939 /**
940 * Update L2 table with new cluster offsets and write them out
941 *
942 * Called with table_lock held.
943 */
944 static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
945 {
946 BDRVQEDState *s = acb_to_s(acb);
947 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
948 int index, ret;
949
950 if (need_alloc) {
951 qed_unref_l2_cache_entry(acb->request.l2_table);
952 acb->request.l2_table = qed_new_l2_table(s);
953 }
954
955 index = qed_l2_index(s, acb->cur_pos);
956 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
957 offset);
958
959 if (need_alloc) {
960 /* Write out the whole new L2 table */
961 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
962 if (ret) {
963 return ret;
964 }
965 return qed_aio_write_l1_update(acb);
966 } else {
967 /* Write out only the updated part of the L2 table */
968 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
969 false);
970 if (ret) {
971 return ret;
972 }
973 }
974 return 0;
975 }
976
977 /**
978 * Write data to the image file
979 *
980 * Called with table_lock *not* held.
981 */
982 static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb)
983 {
984 BDRVQEDState *s = acb_to_s(acb);
985 uint64_t offset = acb->cur_cluster +
986 qed_offset_into_cluster(s, acb->cur_pos);
987
988 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
989
990 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
991 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
992 &acb->cur_qiov, 0);
993 }
994
995 /**
996 * Populate untouched regions of new data cluster
997 *
998 * Called with table_lock held.
999 */
1000 static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb)
1001 {
1002 BDRVQEDState *s = acb_to_s(acb);
1003 uint64_t start, len, offset;
1004 int ret;
1005
1006 qemu_co_mutex_unlock(&s->table_lock);
1007
1008 /* Populate front untouched region of new data cluster */
1009 start = qed_start_of_cluster(s, acb->cur_pos);
1010 len = qed_offset_into_cluster(s, acb->cur_pos);
1011
1012 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1013 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
1014 if (ret < 0) {
1015 goto out;
1016 }
1017
1018 /* Populate back untouched region of new data cluster */
1019 start = acb->cur_pos + acb->cur_qiov.size;
1020 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1021 offset = acb->cur_cluster +
1022 qed_offset_into_cluster(s, acb->cur_pos) +
1023 acb->cur_qiov.size;
1024
1025 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1026 ret = qed_copy_from_backing_file(s, start, len, offset);
1027 if (ret < 0) {
1028 goto out;
1029 }
1030
1031 ret = qed_aio_write_main(acb);
1032 if (ret < 0) {
1033 goto out;
1034 }
1035
1036 if (s->bs->backing) {
1037 /*
1038 * Flush new data clusters before updating the L2 table
1039 *
1040 * This flush is necessary when a backing file is in use. A crash
1041 * during an allocating write could result in empty clusters in the
1042 * image. If the write only touched a subregion of the cluster,
1043 * then backing image sectors have been lost in the untouched
1044 * region. The solution is to flush after writing a new data
1045 * cluster and before updating the L2 table.
1046 */
1047 ret = bdrv_co_flush(s->bs->file->bs);
1048 }
1049
1050 out:
1051 qemu_co_mutex_lock(&s->table_lock);
1052 return ret;
1053 }
1054
1055 /**
1056 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1057 */
1058 static bool qed_should_set_need_check(BDRVQEDState *s)
1059 {
1060 /* The flush before L2 update path ensures consistency */
1061 if (s->bs->backing) {
1062 return false;
1063 }
1064
1065 return !(s->header.features & QED_F_NEED_CHECK);
1066 }
1067
1068 /**
1069 * Write new data cluster
1070 *
1071 * @acb: Write request
1072 * @len: Length in bytes
1073 *
1074 * This path is taken when writing to previously unallocated clusters.
1075 *
1076 * Called with table_lock held.
1077 */
1078 static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1079 {
1080 BDRVQEDState *s = acb_to_s(acb);
1081 int ret;
1082
1083 /* Cancel timer when the first allocating request comes in */
1084 if (s->allocating_acb == NULL) {
1085 qed_cancel_need_check_timer(s);
1086 }
1087
1088 /* Freeze this request if another allocating write is in progress */
1089 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1090 if (s->allocating_acb != NULL) {
1091 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
1092 assert(s->allocating_acb == NULL);
1093 }
1094 s->allocating_acb = acb;
1095 return -EAGAIN; /* start over with looking up table entries */
1096 }
1097
1098 acb->cur_nclusters = qed_bytes_to_clusters(s,
1099 qed_offset_into_cluster(s, acb->cur_pos) + len);
1100 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1101
1102 if (acb->flags & QED_AIOCB_ZERO) {
1103 /* Skip ahead if the clusters are already zero */
1104 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1105 return 0;
1106 }
1107 acb->cur_cluster = 1;
1108 } else {
1109 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1110 }
1111
1112 if (qed_should_set_need_check(s)) {
1113 s->header.features |= QED_F_NEED_CHECK;
1114 ret = qed_write_header(s);
1115 if (ret < 0) {
1116 return ret;
1117 }
1118 }
1119
1120 if (!(acb->flags & QED_AIOCB_ZERO)) {
1121 ret = qed_aio_write_cow(acb);
1122 if (ret < 0) {
1123 return ret;
1124 }
1125 }
1126
1127 return qed_aio_write_l2_update(acb, acb->cur_cluster);
1128 }
1129
1130 /**
1131 * Write data cluster in place
1132 *
1133 * @acb: Write request
1134 * @offset: Cluster offset in bytes
1135 * @len: Length in bytes
1136 *
1137 * This path is taken when writing to already allocated clusters.
1138 *
1139 * Called with table_lock held.
1140 */
1141 static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset,
1142 size_t len)
1143 {
1144 BDRVQEDState *s = acb_to_s(acb);
1145 int r;
1146
1147 qemu_co_mutex_unlock(&s->table_lock);
1148
1149 /* Allocate buffer for zero writes */
1150 if (acb->flags & QED_AIOCB_ZERO) {
1151 struct iovec *iov = acb->qiov->iov;
1152
1153 if (!iov->iov_base) {
1154 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
1155 if (iov->iov_base == NULL) {
1156 r = -ENOMEM;
1157 goto out;
1158 }
1159 memset(iov->iov_base, 0, iov->iov_len);
1160 }
1161 }
1162
1163 /* Calculate the I/O vector */
1164 acb->cur_cluster = offset;
1165 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1166
1167 /* Do the actual write. */
1168 r = qed_aio_write_main(acb);
1169 out:
1170 qemu_co_mutex_lock(&s->table_lock);
1171 return r;
1172 }
1173
1174 /**
1175 * Write data cluster
1176 *
1177 * @opaque: Write request
1178 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1179 * @offset: Cluster offset in bytes
1180 * @len: Length in bytes
1181 *
1182 * Called with table_lock held.
1183 */
1184 static int coroutine_fn qed_aio_write_data(void *opaque, int ret,
1185 uint64_t offset, size_t len)
1186 {
1187 QEDAIOCB *acb = opaque;
1188
1189 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1190
1191 acb->find_cluster_ret = ret;
1192
1193 switch (ret) {
1194 case QED_CLUSTER_FOUND:
1195 return qed_aio_write_inplace(acb, offset, len);
1196
1197 case QED_CLUSTER_L2:
1198 case QED_CLUSTER_L1:
1199 case QED_CLUSTER_ZERO:
1200 return qed_aio_write_alloc(acb, len);
1201
1202 default:
1203 g_assert_not_reached();
1204 }
1205 }
1206
1207 /**
1208 * Read data cluster
1209 *
1210 * @opaque: Read request
1211 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1212 * @offset: Cluster offset in bytes
1213 * @len: Length in bytes
1214 *
1215 * Called with table_lock held.
1216 */
1217 static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
1218 uint64_t offset, size_t len)
1219 {
1220 QEDAIOCB *acb = opaque;
1221 BDRVQEDState *s = acb_to_s(acb);
1222 BlockDriverState *bs = acb->bs;
1223 int r;
1224
1225 qemu_co_mutex_unlock(&s->table_lock);
1226
1227 /* Adjust offset into cluster */
1228 offset += qed_offset_into_cluster(s, acb->cur_pos);
1229
1230 trace_qed_aio_read_data(s, acb, ret, offset, len);
1231
1232 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1233
1234 /* Handle zero cluster and backing file reads, otherwise read
1235 * data cluster directly.
1236 */
1237 if (ret == QED_CLUSTER_ZERO) {
1238 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1239 r = 0;
1240 } else if (ret != QED_CLUSTER_FOUND) {
1241 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1242 &acb->backing_qiov);
1243 } else {
1244 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1245 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1246 &acb->cur_qiov, 0);
1247 }
1248
1249 qemu_co_mutex_lock(&s->table_lock);
1250 return r;
1251 }
1252
1253 /**
1254 * Begin next I/O or complete the request
1255 */
1256 static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb)
1257 {
1258 BDRVQEDState *s = acb_to_s(acb);
1259 uint64_t offset;
1260 size_t len;
1261 int ret;
1262
1263 qemu_co_mutex_lock(&s->table_lock);
1264 while (1) {
1265 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
1266
1267 if (acb->backing_qiov) {
1268 qemu_iovec_destroy(acb->backing_qiov);
1269 g_free(acb->backing_qiov);
1270 acb->backing_qiov = NULL;
1271 }
1272
1273 acb->qiov_offset += acb->cur_qiov.size;
1274 acb->cur_pos += acb->cur_qiov.size;
1275 qemu_iovec_reset(&acb->cur_qiov);
1276
1277 /* Complete request */
1278 if (acb->cur_pos >= acb->end_pos) {
1279 ret = 0;
1280 break;
1281 }
1282
1283 /* Find next cluster and start I/O */
1284 len = acb->end_pos - acb->cur_pos;
1285 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1286 if (ret < 0) {
1287 break;
1288 }
1289
1290 if (acb->flags & QED_AIOCB_WRITE) {
1291 ret = qed_aio_write_data(acb, ret, offset, len);
1292 } else {
1293 ret = qed_aio_read_data(acb, ret, offset, len);
1294 }
1295
1296 if (ret < 0 && ret != -EAGAIN) {
1297 break;
1298 }
1299 }
1300
1301 trace_qed_aio_complete(s, acb, ret);
1302 qed_aio_complete(acb);
1303 qemu_co_mutex_unlock(&s->table_lock);
1304 return ret;
1305 }
1306
1307 static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num,
1308 QEMUIOVector *qiov, int nb_sectors,
1309 int flags)
1310 {
1311 QEDAIOCB acb = {
1312 .bs = bs,
1313 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1314 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1315 .qiov = qiov,
1316 .flags = flags,
1317 };
1318 qemu_iovec_init(&acb.cur_qiov, qiov->niov);
1319
1320 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
1321
1322 /* Start request */
1323 return qed_aio_next_io(&acb);
1324 }
1325
1326 static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs,
1327 int64_t sector_num, int nb_sectors,
1328 QEMUIOVector *qiov)
1329 {
1330 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
1331 }
1332
1333 static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
1334 int64_t sector_num, int nb_sectors,
1335 QEMUIOVector *qiov)
1336 {
1337 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
1338 }
1339
1340 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1341 int64_t offset,
1342 int bytes,
1343 BdrvRequestFlags flags)
1344 {
1345 BDRVQEDState *s = bs->opaque;
1346 QEMUIOVector qiov;
1347 struct iovec iov;
1348
1349 /* Fall back if the request is not aligned */
1350 if (qed_offset_into_cluster(s, offset) ||
1351 qed_offset_into_cluster(s, bytes)) {
1352 return -ENOTSUP;
1353 }
1354
1355 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1356 * then it will be allocated during request processing.
1357 */
1358 iov.iov_base = NULL;
1359 iov.iov_len = bytes;
1360
1361 qemu_iovec_init_external(&qiov, &iov, 1);
1362 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1363 bytes >> BDRV_SECTOR_BITS,
1364 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1365 }
1366
1367 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset,
1368 PreallocMode prealloc, Error **errp)
1369 {
1370 BDRVQEDState *s = bs->opaque;
1371 uint64_t old_image_size;
1372 int ret;
1373
1374 if (prealloc != PREALLOC_MODE_OFF) {
1375 error_setg(errp, "Unsupported preallocation mode '%s'",
1376 PreallocMode_str(prealloc));
1377 return -ENOTSUP;
1378 }
1379
1380 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1381 s->header.table_size)) {
1382 error_setg(errp, "Invalid image size specified");
1383 return -EINVAL;
1384 }
1385
1386 if ((uint64_t)offset < s->header.image_size) {
1387 error_setg(errp, "Shrinking images is currently not supported");
1388 return -ENOTSUP;
1389 }
1390
1391 old_image_size = s->header.image_size;
1392 s->header.image_size = offset;
1393 ret = qed_write_header_sync(s);
1394 if (ret < 0) {
1395 s->header.image_size = old_image_size;
1396 error_setg_errno(errp, -ret, "Failed to update the image size");
1397 }
1398 return ret;
1399 }
1400
1401 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1402 {
1403 BDRVQEDState *s = bs->opaque;
1404 return s->header.image_size;
1405 }
1406
1407 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1408 {
1409 BDRVQEDState *s = bs->opaque;
1410
1411 memset(bdi, 0, sizeof(*bdi));
1412 bdi->cluster_size = s->header.cluster_size;
1413 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1414 bdi->unallocated_blocks_are_zero = true;
1415 return 0;
1416 }
1417
1418 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1419 const char *backing_file,
1420 const char *backing_fmt)
1421 {
1422 BDRVQEDState *s = bs->opaque;
1423 QEDHeader new_header, le_header;
1424 void *buffer;
1425 size_t buffer_len, backing_file_len;
1426 int ret;
1427
1428 /* Refuse to set backing filename if unknown compat feature bits are
1429 * active. If the image uses an unknown compat feature then we may not
1430 * know the layout of data following the header structure and cannot safely
1431 * add a new string.
1432 */
1433 if (backing_file && (s->header.compat_features &
1434 ~QED_COMPAT_FEATURE_MASK)) {
1435 return -ENOTSUP;
1436 }
1437
1438 memcpy(&new_header, &s->header, sizeof(new_header));
1439
1440 new_header.features &= ~(QED_F_BACKING_FILE |
1441 QED_F_BACKING_FORMAT_NO_PROBE);
1442
1443 /* Adjust feature flags */
1444 if (backing_file) {
1445 new_header.features |= QED_F_BACKING_FILE;
1446
1447 if (qed_fmt_is_raw(backing_fmt)) {
1448 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1449 }
1450 }
1451
1452 /* Calculate new header size */
1453 backing_file_len = 0;
1454
1455 if (backing_file) {
1456 backing_file_len = strlen(backing_file);
1457 }
1458
1459 buffer_len = sizeof(new_header);
1460 new_header.backing_filename_offset = buffer_len;
1461 new_header.backing_filename_size = backing_file_len;
1462 buffer_len += backing_file_len;
1463
1464 /* Make sure we can rewrite header without failing */
1465 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1466 return -ENOSPC;
1467 }
1468
1469 /* Prepare new header */
1470 buffer = g_malloc(buffer_len);
1471
1472 qed_header_cpu_to_le(&new_header, &le_header);
1473 memcpy(buffer, &le_header, sizeof(le_header));
1474 buffer_len = sizeof(le_header);
1475
1476 if (backing_file) {
1477 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1478 buffer_len += backing_file_len;
1479 }
1480
1481 /* Write new header */
1482 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1483 g_free(buffer);
1484 if (ret == 0) {
1485 memcpy(&s->header, &new_header, sizeof(new_header));
1486 }
1487 return ret;
1488 }
1489
1490 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1491 {
1492 BDRVQEDState *s = bs->opaque;
1493 Error *local_err = NULL;
1494 int ret;
1495
1496 bdrv_qed_close(bs);
1497
1498 bdrv_qed_init_state(bs);
1499 if (qemu_in_coroutine()) {
1500 qemu_co_mutex_lock(&s->table_lock);
1501 }
1502 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
1503 if (qemu_in_coroutine()) {
1504 qemu_co_mutex_unlock(&s->table_lock);
1505 }
1506 if (local_err) {
1507 error_propagate(errp, local_err);
1508 error_prepend(errp, "Could not reopen qed layer: ");
1509 return;
1510 } else if (ret < 0) {
1511 error_setg_errno(errp, -ret, "Could not reopen qed layer");
1512 return;
1513 }
1514 }
1515
1516 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1517 BdrvCheckMode fix)
1518 {
1519 BDRVQEDState *s = bs->opaque;
1520
1521 return qed_check(s, result, !!fix);
1522 }
1523
1524 static QemuOptsList qed_create_opts = {
1525 .name = "qed-create-opts",
1526 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1527 .desc = {
1528 {
1529 .name = BLOCK_OPT_SIZE,
1530 .type = QEMU_OPT_SIZE,
1531 .help = "Virtual disk size"
1532 },
1533 {
1534 .name = BLOCK_OPT_BACKING_FILE,
1535 .type = QEMU_OPT_STRING,
1536 .help = "File name of a base image"
1537 },
1538 {
1539 .name = BLOCK_OPT_BACKING_FMT,
1540 .type = QEMU_OPT_STRING,
1541 .help = "Image format of the base image"
1542 },
1543 {
1544 .name = BLOCK_OPT_CLUSTER_SIZE,
1545 .type = QEMU_OPT_SIZE,
1546 .help = "Cluster size (in bytes)",
1547 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1548 },
1549 {
1550 .name = BLOCK_OPT_TABLE_SIZE,
1551 .type = QEMU_OPT_SIZE,
1552 .help = "L1/L2 table size (in clusters)"
1553 },
1554 { /* end of list */ }
1555 }
1556 };
1557
1558 static BlockDriver bdrv_qed = {
1559 .format_name = "qed",
1560 .instance_size = sizeof(BDRVQEDState),
1561 .create_opts = &qed_create_opts,
1562 .supports_backing = true,
1563
1564 .bdrv_probe = bdrv_qed_probe,
1565 .bdrv_open = bdrv_qed_open,
1566 .bdrv_close = bdrv_qed_close,
1567 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1568 .bdrv_child_perm = bdrv_format_default_perms,
1569 .bdrv_co_create_opts = bdrv_qed_co_create_opts,
1570 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1571 .bdrv_co_block_status = bdrv_qed_co_block_status,
1572 .bdrv_co_readv = bdrv_qed_co_readv,
1573 .bdrv_co_writev = bdrv_qed_co_writev,
1574 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
1575 .bdrv_truncate = bdrv_qed_truncate,
1576 .bdrv_getlength = bdrv_qed_getlength,
1577 .bdrv_get_info = bdrv_qed_get_info,
1578 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
1579 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1580 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
1581 .bdrv_check = bdrv_qed_check,
1582 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1583 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
1584 .bdrv_co_drain_begin = bdrv_qed_co_drain_begin,
1585 };
1586
1587 static void bdrv_qed_init(void)
1588 {
1589 bdrv_register(&bdrv_qed);
1590 }
1591
1592 block_init(bdrv_qed_init);