]> git.proxmox.com Git - mirror_qemu.git/blob - block/qed.c
tcg/sparc: Zero extend address argument to ld/st helpers
[mirror_qemu.git] / block / qed.c
1 /*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "qemu/bswap.h"
19 #include "trace.h"
20 #include "qed.h"
21 #include "qapi/qmp/qerror.h"
22 #include "migration/migration.h"
23 #include "sysemu/block-backend.h"
24
25 static const AIOCBInfo qed_aiocb_info = {
26 .aiocb_size = sizeof(QEDAIOCB),
27 };
28
29 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
30 const char *filename)
31 {
32 const QEDHeader *header = (const QEDHeader *)buf;
33
34 if (buf_size < sizeof(*header)) {
35 return 0;
36 }
37 if (le32_to_cpu(header->magic) != QED_MAGIC) {
38 return 0;
39 }
40 return 100;
41 }
42
43 /**
44 * Check whether an image format is raw
45 *
46 * @fmt: Backing file format, may be NULL
47 */
48 static bool qed_fmt_is_raw(const char *fmt)
49 {
50 return fmt && strcmp(fmt, "raw") == 0;
51 }
52
53 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
54 {
55 cpu->magic = le32_to_cpu(le->magic);
56 cpu->cluster_size = le32_to_cpu(le->cluster_size);
57 cpu->table_size = le32_to_cpu(le->table_size);
58 cpu->header_size = le32_to_cpu(le->header_size);
59 cpu->features = le64_to_cpu(le->features);
60 cpu->compat_features = le64_to_cpu(le->compat_features);
61 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
62 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
63 cpu->image_size = le64_to_cpu(le->image_size);
64 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
65 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
66 }
67
68 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
69 {
70 le->magic = cpu_to_le32(cpu->magic);
71 le->cluster_size = cpu_to_le32(cpu->cluster_size);
72 le->table_size = cpu_to_le32(cpu->table_size);
73 le->header_size = cpu_to_le32(cpu->header_size);
74 le->features = cpu_to_le64(cpu->features);
75 le->compat_features = cpu_to_le64(cpu->compat_features);
76 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
77 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
78 le->image_size = cpu_to_le64(cpu->image_size);
79 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
80 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
81 }
82
83 int qed_write_header_sync(BDRVQEDState *s)
84 {
85 QEDHeader le;
86 int ret;
87
88 qed_header_cpu_to_le(&s->header, &le);
89 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
90 if (ret != sizeof(le)) {
91 return ret;
92 }
93 return 0;
94 }
95
96 typedef struct {
97 GenericCB gencb;
98 BDRVQEDState *s;
99 struct iovec iov;
100 QEMUIOVector qiov;
101 int nsectors;
102 uint8_t *buf;
103 } QEDWriteHeaderCB;
104
105 static void qed_write_header_cb(void *opaque, int ret)
106 {
107 QEDWriteHeaderCB *write_header_cb = opaque;
108
109 qemu_vfree(write_header_cb->buf);
110 gencb_complete(write_header_cb, ret);
111 }
112
113 static void qed_write_header_read_cb(void *opaque, int ret)
114 {
115 QEDWriteHeaderCB *write_header_cb = opaque;
116 BDRVQEDState *s = write_header_cb->s;
117
118 if (ret) {
119 qed_write_header_cb(write_header_cb, ret);
120 return;
121 }
122
123 /* Update header */
124 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
125
126 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
127 write_header_cb->nsectors, qed_write_header_cb,
128 write_header_cb);
129 }
130
131 /**
132 * Update header in-place (does not rewrite backing filename or other strings)
133 *
134 * This function only updates known header fields in-place and does not affect
135 * extra data after the QED header.
136 */
137 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
138 void *opaque)
139 {
140 /* We must write full sectors for O_DIRECT but cannot necessarily generate
141 * the data following the header if an unrecognized compat feature is
142 * active. Therefore, first read the sectors containing the header, update
143 * them, and write back.
144 */
145
146 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
147 size_t len = nsectors * BDRV_SECTOR_SIZE;
148 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
149 cb, opaque);
150
151 write_header_cb->s = s;
152 write_header_cb->nsectors = nsectors;
153 write_header_cb->buf = qemu_blockalign(s->bs, len);
154 write_header_cb->iov.iov_base = write_header_cb->buf;
155 write_header_cb->iov.iov_len = len;
156 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
157
158 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
159 qed_write_header_read_cb, write_header_cb);
160 }
161
162 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
163 {
164 uint64_t table_entries;
165 uint64_t l2_size;
166
167 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
168 l2_size = table_entries * cluster_size;
169
170 return l2_size * table_entries;
171 }
172
173 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
174 {
175 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
176 cluster_size > QED_MAX_CLUSTER_SIZE) {
177 return false;
178 }
179 if (cluster_size & (cluster_size - 1)) {
180 return false; /* not power of 2 */
181 }
182 return true;
183 }
184
185 static bool qed_is_table_size_valid(uint32_t table_size)
186 {
187 if (table_size < QED_MIN_TABLE_SIZE ||
188 table_size > QED_MAX_TABLE_SIZE) {
189 return false;
190 }
191 if (table_size & (table_size - 1)) {
192 return false; /* not power of 2 */
193 }
194 return true;
195 }
196
197 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
198 uint32_t table_size)
199 {
200 if (image_size % BDRV_SECTOR_SIZE != 0) {
201 return false; /* not multiple of sector size */
202 }
203 if (image_size > qed_max_image_size(cluster_size, table_size)) {
204 return false; /* image is too large */
205 }
206 return true;
207 }
208
209 /**
210 * Read a string of known length from the image file
211 *
212 * @file: Image file
213 * @offset: File offset to start of string, in bytes
214 * @n: String length in bytes
215 * @buf: Destination buffer
216 * @buflen: Destination buffer length in bytes
217 * @ret: 0 on success, -errno on failure
218 *
219 * The string is NUL-terminated.
220 */
221 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
222 char *buf, size_t buflen)
223 {
224 int ret;
225 if (n >= buflen) {
226 return -EINVAL;
227 }
228 ret = bdrv_pread(file, offset, buf, n);
229 if (ret < 0) {
230 return ret;
231 }
232 buf[n] = '\0';
233 return 0;
234 }
235
236 /**
237 * Allocate new clusters
238 *
239 * @s: QED state
240 * @n: Number of contiguous clusters to allocate
241 * @ret: Offset of first allocated cluster
242 *
243 * This function only produces the offset where the new clusters should be
244 * written. It updates BDRVQEDState but does not make any changes to the image
245 * file.
246 */
247 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
248 {
249 uint64_t offset = s->file_size;
250 s->file_size += n * s->header.cluster_size;
251 return offset;
252 }
253
254 QEDTable *qed_alloc_table(BDRVQEDState *s)
255 {
256 /* Honor O_DIRECT memory alignment requirements */
257 return qemu_blockalign(s->bs,
258 s->header.cluster_size * s->header.table_size);
259 }
260
261 /**
262 * Allocate a new zeroed L2 table
263 */
264 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
265 {
266 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
267
268 l2_table->table = qed_alloc_table(s);
269 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
270
271 memset(l2_table->table->offsets, 0,
272 s->header.cluster_size * s->header.table_size);
273 return l2_table;
274 }
275
276 static void qed_aio_next_io(QEDAIOCB *acb, int ret);
277
278 static void qed_aio_start_io(QEDAIOCB *acb)
279 {
280 qed_aio_next_io(acb, 0);
281 }
282
283 static void qed_aio_next_io_cb(void *opaque, int ret)
284 {
285 QEDAIOCB *acb = opaque;
286
287 qed_aio_next_io(acb, ret);
288 }
289
290 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
291 {
292 assert(!s->allocating_write_reqs_plugged);
293
294 s->allocating_write_reqs_plugged = true;
295 }
296
297 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
298 {
299 QEDAIOCB *acb;
300
301 assert(s->allocating_write_reqs_plugged);
302
303 s->allocating_write_reqs_plugged = false;
304
305 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
306 if (acb) {
307 qed_aio_start_io(acb);
308 }
309 }
310
311 static void qed_finish_clear_need_check(void *opaque, int ret)
312 {
313 /* Do nothing */
314 }
315
316 static void qed_flush_after_clear_need_check(void *opaque, int ret)
317 {
318 BDRVQEDState *s = opaque;
319
320 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
321
322 /* No need to wait until flush completes */
323 qed_unplug_allocating_write_reqs(s);
324 }
325
326 static void qed_clear_need_check(void *opaque, int ret)
327 {
328 BDRVQEDState *s = opaque;
329
330 if (ret) {
331 qed_unplug_allocating_write_reqs(s);
332 return;
333 }
334
335 s->header.features &= ~QED_F_NEED_CHECK;
336 qed_write_header(s, qed_flush_after_clear_need_check, s);
337 }
338
339 static void qed_need_check_timer_cb(void *opaque)
340 {
341 BDRVQEDState *s = opaque;
342
343 /* The timer should only fire when allocating writes have drained */
344 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
345
346 trace_qed_need_check_timer_cb(s);
347
348 qed_acquire(s);
349 qed_plug_allocating_write_reqs(s);
350
351 /* Ensure writes are on disk before clearing flag */
352 bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s);
353 qed_release(s);
354 }
355
356 void qed_acquire(BDRVQEDState *s)
357 {
358 aio_context_acquire(bdrv_get_aio_context(s->bs));
359 }
360
361 void qed_release(BDRVQEDState *s)
362 {
363 aio_context_release(bdrv_get_aio_context(s->bs));
364 }
365
366 static void qed_start_need_check_timer(BDRVQEDState *s)
367 {
368 trace_qed_start_need_check_timer(s);
369
370 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
371 * migration.
372 */
373 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
374 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
375 }
376
377 /* It's okay to call this multiple times or when no timer is started */
378 static void qed_cancel_need_check_timer(BDRVQEDState *s)
379 {
380 trace_qed_cancel_need_check_timer(s);
381 timer_del(s->need_check_timer);
382 }
383
384 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
385 {
386 BDRVQEDState *s = bs->opaque;
387
388 qed_cancel_need_check_timer(s);
389 timer_free(s->need_check_timer);
390 }
391
392 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
393 AioContext *new_context)
394 {
395 BDRVQEDState *s = bs->opaque;
396
397 s->need_check_timer = aio_timer_new(new_context,
398 QEMU_CLOCK_VIRTUAL, SCALE_NS,
399 qed_need_check_timer_cb, s);
400 if (s->header.features & QED_F_NEED_CHECK) {
401 qed_start_need_check_timer(s);
402 }
403 }
404
405 static void bdrv_qed_drain(BlockDriverState *bs)
406 {
407 BDRVQEDState *s = bs->opaque;
408
409 /* Fire the timer immediately in order to start doing I/O as soon as the
410 * header is flushed.
411 */
412 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
413 qed_cancel_need_check_timer(s);
414 qed_need_check_timer_cb(s);
415 }
416 }
417
418 static int bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags,
419 Error **errp)
420 {
421 BDRVQEDState *s = bs->opaque;
422 QEDHeader le_header;
423 int64_t file_size;
424 int ret;
425
426 s->bs = bs;
427 QSIMPLEQ_INIT(&s->allocating_write_reqs);
428
429 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
430 if (ret < 0) {
431 return ret;
432 }
433 qed_header_le_to_cpu(&le_header, &s->header);
434
435 if (s->header.magic != QED_MAGIC) {
436 error_setg(errp, "Image not in QED format");
437 return -EINVAL;
438 }
439 if (s->header.features & ~QED_FEATURE_MASK) {
440 /* image uses unsupported feature bits */
441 error_setg(errp, "Unsupported QED features: %" PRIx64,
442 s->header.features & ~QED_FEATURE_MASK);
443 return -ENOTSUP;
444 }
445 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
446 return -EINVAL;
447 }
448
449 /* Round down file size to the last cluster */
450 file_size = bdrv_getlength(bs->file->bs);
451 if (file_size < 0) {
452 return file_size;
453 }
454 s->file_size = qed_start_of_cluster(s, file_size);
455
456 if (!qed_is_table_size_valid(s->header.table_size)) {
457 return -EINVAL;
458 }
459 if (!qed_is_image_size_valid(s->header.image_size,
460 s->header.cluster_size,
461 s->header.table_size)) {
462 return -EINVAL;
463 }
464 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
465 return -EINVAL;
466 }
467
468 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
469 sizeof(uint64_t);
470 s->l2_shift = ctz32(s->header.cluster_size);
471 s->l2_mask = s->table_nelems - 1;
472 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
473
474 /* Header size calculation must not overflow uint32_t */
475 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
476 return -EINVAL;
477 }
478
479 if ((s->header.features & QED_F_BACKING_FILE)) {
480 if ((uint64_t)s->header.backing_filename_offset +
481 s->header.backing_filename_size >
482 s->header.cluster_size * s->header.header_size) {
483 return -EINVAL;
484 }
485
486 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
487 s->header.backing_filename_size, bs->backing_file,
488 sizeof(bs->backing_file));
489 if (ret < 0) {
490 return ret;
491 }
492
493 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
494 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
495 }
496 }
497
498 /* Reset unknown autoclear feature bits. This is a backwards
499 * compatibility mechanism that allows images to be opened by older
500 * programs, which "knock out" unknown feature bits. When an image is
501 * opened by a newer program again it can detect that the autoclear
502 * feature is no longer valid.
503 */
504 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
505 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
506 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
507
508 ret = qed_write_header_sync(s);
509 if (ret) {
510 return ret;
511 }
512
513 /* From here on only known autoclear feature bits are valid */
514 bdrv_flush(bs->file->bs);
515 }
516
517 s->l1_table = qed_alloc_table(s);
518 qed_init_l2_cache(&s->l2_cache);
519
520 ret = qed_read_l1_table_sync(s);
521 if (ret) {
522 goto out;
523 }
524
525 /* If image was not closed cleanly, check consistency */
526 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
527 /* Read-only images cannot be fixed. There is no risk of corruption
528 * since write operations are not possible. Therefore, allow
529 * potentially inconsistent images to be opened read-only. This can
530 * aid data recovery from an otherwise inconsistent image.
531 */
532 if (!bdrv_is_read_only(bs->file->bs) &&
533 !(flags & BDRV_O_INACTIVE)) {
534 BdrvCheckResult result = {0};
535
536 ret = qed_check(s, &result, true);
537 if (ret) {
538 goto out;
539 }
540 }
541 }
542
543 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
544
545 out:
546 if (ret) {
547 qed_free_l2_cache(&s->l2_cache);
548 qemu_vfree(s->l1_table);
549 }
550 return ret;
551 }
552
553 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
554 Error **errp)
555 {
556 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
557 false, errp);
558 if (!bs->file) {
559 return -EINVAL;
560 }
561
562 return bdrv_qed_do_open(bs, options, flags, errp);
563 }
564
565 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
566 {
567 BDRVQEDState *s = bs->opaque;
568
569 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
570 }
571
572 /* We have nothing to do for QED reopen, stubs just return
573 * success */
574 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
575 BlockReopenQueue *queue, Error **errp)
576 {
577 return 0;
578 }
579
580 static void bdrv_qed_close(BlockDriverState *bs)
581 {
582 BDRVQEDState *s = bs->opaque;
583
584 bdrv_qed_detach_aio_context(bs);
585
586 /* Ensure writes reach stable storage */
587 bdrv_flush(bs->file->bs);
588
589 /* Clean shutdown, no check required on next open */
590 if (s->header.features & QED_F_NEED_CHECK) {
591 s->header.features &= ~QED_F_NEED_CHECK;
592 qed_write_header_sync(s);
593 }
594
595 qed_free_l2_cache(&s->l2_cache);
596 qemu_vfree(s->l1_table);
597 }
598
599 static int qed_create(const char *filename, uint32_t cluster_size,
600 uint64_t image_size, uint32_t table_size,
601 const char *backing_file, const char *backing_fmt,
602 QemuOpts *opts, Error **errp)
603 {
604 QEDHeader header = {
605 .magic = QED_MAGIC,
606 .cluster_size = cluster_size,
607 .table_size = table_size,
608 .header_size = 1,
609 .features = 0,
610 .compat_features = 0,
611 .l1_table_offset = cluster_size,
612 .image_size = image_size,
613 };
614 QEDHeader le_header;
615 uint8_t *l1_table = NULL;
616 size_t l1_size = header.cluster_size * header.table_size;
617 Error *local_err = NULL;
618 int ret = 0;
619 BlockBackend *blk;
620
621 ret = bdrv_create_file(filename, opts, &local_err);
622 if (ret < 0) {
623 error_propagate(errp, local_err);
624 return ret;
625 }
626
627 blk = blk_new_open(filename, NULL, NULL,
628 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
629 &local_err);
630 if (blk == NULL) {
631 error_propagate(errp, local_err);
632 return -EIO;
633 }
634
635 blk_set_allow_write_beyond_eof(blk, true);
636
637 /* File must start empty and grow, check truncate is supported */
638 ret = blk_truncate(blk, 0);
639 if (ret < 0) {
640 goto out;
641 }
642
643 if (backing_file) {
644 header.features |= QED_F_BACKING_FILE;
645 header.backing_filename_offset = sizeof(le_header);
646 header.backing_filename_size = strlen(backing_file);
647
648 if (qed_fmt_is_raw(backing_fmt)) {
649 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
650 }
651 }
652
653 qed_header_cpu_to_le(&header, &le_header);
654 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
655 if (ret < 0) {
656 goto out;
657 }
658 ret = blk_pwrite(blk, sizeof(le_header), backing_file,
659 header.backing_filename_size, 0);
660 if (ret < 0) {
661 goto out;
662 }
663
664 l1_table = g_malloc0(l1_size);
665 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
666 if (ret < 0) {
667 goto out;
668 }
669
670 ret = 0; /* success */
671 out:
672 g_free(l1_table);
673 blk_unref(blk);
674 return ret;
675 }
676
677 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
678 {
679 uint64_t image_size = 0;
680 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
681 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
682 char *backing_file = NULL;
683 char *backing_fmt = NULL;
684 int ret;
685
686 image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
687 BDRV_SECTOR_SIZE);
688 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
689 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
690 cluster_size = qemu_opt_get_size_del(opts,
691 BLOCK_OPT_CLUSTER_SIZE,
692 QED_DEFAULT_CLUSTER_SIZE);
693 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
694 QED_DEFAULT_TABLE_SIZE);
695
696 if (!qed_is_cluster_size_valid(cluster_size)) {
697 error_setg(errp, "QED cluster size must be within range [%u, %u] "
698 "and power of 2",
699 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
700 ret = -EINVAL;
701 goto finish;
702 }
703 if (!qed_is_table_size_valid(table_size)) {
704 error_setg(errp, "QED table size must be within range [%u, %u] "
705 "and power of 2",
706 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
707 ret = -EINVAL;
708 goto finish;
709 }
710 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
711 error_setg(errp, "QED image size must be a non-zero multiple of "
712 "cluster size and less than %" PRIu64 " bytes",
713 qed_max_image_size(cluster_size, table_size));
714 ret = -EINVAL;
715 goto finish;
716 }
717
718 ret = qed_create(filename, cluster_size, image_size, table_size,
719 backing_file, backing_fmt, opts, errp);
720
721 finish:
722 g_free(backing_file);
723 g_free(backing_fmt);
724 return ret;
725 }
726
727 typedef struct {
728 BlockDriverState *bs;
729 Coroutine *co;
730 uint64_t pos;
731 int64_t status;
732 int *pnum;
733 BlockDriverState **file;
734 } QEDIsAllocatedCB;
735
736 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
737 {
738 QEDIsAllocatedCB *cb = opaque;
739 BDRVQEDState *s = cb->bs->opaque;
740 *cb->pnum = len / BDRV_SECTOR_SIZE;
741 switch (ret) {
742 case QED_CLUSTER_FOUND:
743 offset |= qed_offset_into_cluster(s, cb->pos);
744 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
745 *cb->file = cb->bs->file->bs;
746 break;
747 case QED_CLUSTER_ZERO:
748 cb->status = BDRV_BLOCK_ZERO;
749 break;
750 case QED_CLUSTER_L2:
751 case QED_CLUSTER_L1:
752 cb->status = 0;
753 break;
754 default:
755 assert(ret < 0);
756 cb->status = ret;
757 break;
758 }
759
760 if (cb->co) {
761 aio_co_wake(cb->co);
762 }
763 }
764
765 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
766 int64_t sector_num,
767 int nb_sectors, int *pnum,
768 BlockDriverState **file)
769 {
770 BDRVQEDState *s = bs->opaque;
771 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
772 QEDIsAllocatedCB cb = {
773 .bs = bs,
774 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
775 .status = BDRV_BLOCK_OFFSET_MASK,
776 .pnum = pnum,
777 .file = file,
778 };
779 QEDRequest request = { .l2_table = NULL };
780
781 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
782
783 /* Now sleep if the callback wasn't invoked immediately */
784 while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
785 cb.co = qemu_coroutine_self();
786 qemu_coroutine_yield();
787 }
788
789 qed_unref_l2_cache_entry(request.l2_table);
790
791 return cb.status;
792 }
793
794 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
795 {
796 return acb->common.bs->opaque;
797 }
798
799 /**
800 * Read from the backing file or zero-fill if no backing file
801 *
802 * @s: QED state
803 * @pos: Byte position in device
804 * @qiov: Destination I/O vector
805 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
806 * @cb: Completion function
807 * @opaque: User data for completion function
808 *
809 * This function reads qiov->size bytes starting at pos from the backing file.
810 * If there is no backing file then zeroes are read.
811 */
812 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
813 QEMUIOVector *qiov,
814 QEMUIOVector **backing_qiov,
815 BlockCompletionFunc *cb, void *opaque)
816 {
817 uint64_t backing_length = 0;
818 size_t size;
819
820 /* If there is a backing file, get its length. Treat the absence of a
821 * backing file like a zero length backing file.
822 */
823 if (s->bs->backing) {
824 int64_t l = bdrv_getlength(s->bs->backing->bs);
825 if (l < 0) {
826 cb(opaque, l);
827 return;
828 }
829 backing_length = l;
830 }
831
832 /* Zero all sectors if reading beyond the end of the backing file */
833 if (pos >= backing_length ||
834 pos + qiov->size > backing_length) {
835 qemu_iovec_memset(qiov, 0, 0, qiov->size);
836 }
837
838 /* Complete now if there are no backing file sectors to read */
839 if (pos >= backing_length) {
840 cb(opaque, 0);
841 return;
842 }
843
844 /* If the read straddles the end of the backing file, shorten it */
845 size = MIN((uint64_t)backing_length - pos, qiov->size);
846
847 assert(*backing_qiov == NULL);
848 *backing_qiov = g_new(QEMUIOVector, 1);
849 qemu_iovec_init(*backing_qiov, qiov->niov);
850 qemu_iovec_concat(*backing_qiov, qiov, 0, size);
851
852 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
853 bdrv_aio_readv(s->bs->backing, pos / BDRV_SECTOR_SIZE,
854 *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
855 }
856
857 typedef struct {
858 GenericCB gencb;
859 BDRVQEDState *s;
860 QEMUIOVector qiov;
861 QEMUIOVector *backing_qiov;
862 struct iovec iov;
863 uint64_t offset;
864 } CopyFromBackingFileCB;
865
866 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
867 {
868 CopyFromBackingFileCB *copy_cb = opaque;
869 qemu_vfree(copy_cb->iov.iov_base);
870 gencb_complete(&copy_cb->gencb, ret);
871 }
872
873 static void qed_copy_from_backing_file_write(void *opaque, int ret)
874 {
875 CopyFromBackingFileCB *copy_cb = opaque;
876 BDRVQEDState *s = copy_cb->s;
877
878 if (copy_cb->backing_qiov) {
879 qemu_iovec_destroy(copy_cb->backing_qiov);
880 g_free(copy_cb->backing_qiov);
881 copy_cb->backing_qiov = NULL;
882 }
883
884 if (ret) {
885 qed_copy_from_backing_file_cb(copy_cb, ret);
886 return;
887 }
888
889 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
890 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
891 &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
892 qed_copy_from_backing_file_cb, copy_cb);
893 }
894
895 /**
896 * Copy data from backing file into the image
897 *
898 * @s: QED state
899 * @pos: Byte position in device
900 * @len: Number of bytes
901 * @offset: Byte offset in image file
902 * @cb: Completion function
903 * @opaque: User data for completion function
904 */
905 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
906 uint64_t len, uint64_t offset,
907 BlockCompletionFunc *cb,
908 void *opaque)
909 {
910 CopyFromBackingFileCB *copy_cb;
911
912 /* Skip copy entirely if there is no work to do */
913 if (len == 0) {
914 cb(opaque, 0);
915 return;
916 }
917
918 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
919 copy_cb->s = s;
920 copy_cb->offset = offset;
921 copy_cb->backing_qiov = NULL;
922 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
923 copy_cb->iov.iov_len = len;
924 qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
925
926 qed_read_backing_file(s, pos, &copy_cb->qiov, &copy_cb->backing_qiov,
927 qed_copy_from_backing_file_write, copy_cb);
928 }
929
930 /**
931 * Link one or more contiguous clusters into a table
932 *
933 * @s: QED state
934 * @table: L2 table
935 * @index: First cluster index
936 * @n: Number of contiguous clusters
937 * @cluster: First cluster offset
938 *
939 * The cluster offset may be an allocated byte offset in the image file, the
940 * zero cluster marker, or the unallocated cluster marker.
941 */
942 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
943 unsigned int n, uint64_t cluster)
944 {
945 int i;
946 for (i = index; i < index + n; i++) {
947 table->offsets[i] = cluster;
948 if (!qed_offset_is_unalloc_cluster(cluster) &&
949 !qed_offset_is_zero_cluster(cluster)) {
950 cluster += s->header.cluster_size;
951 }
952 }
953 }
954
955 static void qed_aio_complete_bh(void *opaque)
956 {
957 QEDAIOCB *acb = opaque;
958 BDRVQEDState *s = acb_to_s(acb);
959 BlockCompletionFunc *cb = acb->common.cb;
960 void *user_opaque = acb->common.opaque;
961 int ret = acb->bh_ret;
962
963 qemu_aio_unref(acb);
964
965 /* Invoke callback */
966 qed_acquire(s);
967 cb(user_opaque, ret);
968 qed_release(s);
969 }
970
971 static void qed_aio_complete(QEDAIOCB *acb, int ret)
972 {
973 BDRVQEDState *s = acb_to_s(acb);
974
975 trace_qed_aio_complete(s, acb, ret);
976
977 /* Free resources */
978 qemu_iovec_destroy(&acb->cur_qiov);
979 qed_unref_l2_cache_entry(acb->request.l2_table);
980
981 /* Free the buffer we may have allocated for zero writes */
982 if (acb->flags & QED_AIOCB_ZERO) {
983 qemu_vfree(acb->qiov->iov[0].iov_base);
984 acb->qiov->iov[0].iov_base = NULL;
985 }
986
987 /* Arrange for a bh to invoke the completion function */
988 acb->bh_ret = ret;
989 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
990 qed_aio_complete_bh, acb);
991
992 /* Start next allocating write request waiting behind this one. Note that
993 * requests enqueue themselves when they first hit an unallocated cluster
994 * but they wait until the entire request is finished before waking up the
995 * next request in the queue. This ensures that we don't cycle through
996 * requests multiple times but rather finish one at a time completely.
997 */
998 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
999 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
1000 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
1001 if (acb) {
1002 qed_aio_start_io(acb);
1003 } else if (s->header.features & QED_F_NEED_CHECK) {
1004 qed_start_need_check_timer(s);
1005 }
1006 }
1007 }
1008
1009 /**
1010 * Commit the current L2 table to the cache
1011 */
1012 static void qed_commit_l2_update(void *opaque, int ret)
1013 {
1014 QEDAIOCB *acb = opaque;
1015 BDRVQEDState *s = acb_to_s(acb);
1016 CachedL2Table *l2_table = acb->request.l2_table;
1017 uint64_t l2_offset = l2_table->offset;
1018
1019 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
1020
1021 /* This is guaranteed to succeed because we just committed the entry to the
1022 * cache.
1023 */
1024 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
1025 assert(acb->request.l2_table != NULL);
1026
1027 qed_aio_next_io(acb, ret);
1028 }
1029
1030 /**
1031 * Update L1 table with new L2 table offset and write it out
1032 */
1033 static void qed_aio_write_l1_update(void *opaque, int ret)
1034 {
1035 QEDAIOCB *acb = opaque;
1036 BDRVQEDState *s = acb_to_s(acb);
1037 int index;
1038
1039 if (ret) {
1040 qed_aio_complete(acb, ret);
1041 return;
1042 }
1043
1044 index = qed_l1_index(s, acb->cur_pos);
1045 s->l1_table->offsets[index] = acb->request.l2_table->offset;
1046
1047 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
1048 }
1049
1050 /**
1051 * Update L2 table with new cluster offsets and write them out
1052 */
1053 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
1054 {
1055 BDRVQEDState *s = acb_to_s(acb);
1056 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1057 int index;
1058
1059 if (ret) {
1060 goto err;
1061 }
1062
1063 if (need_alloc) {
1064 qed_unref_l2_cache_entry(acb->request.l2_table);
1065 acb->request.l2_table = qed_new_l2_table(s);
1066 }
1067
1068 index = qed_l2_index(s, acb->cur_pos);
1069 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1070 offset);
1071
1072 if (need_alloc) {
1073 /* Write out the whole new L2 table */
1074 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
1075 qed_aio_write_l1_update, acb);
1076 } else {
1077 /* Write out only the updated part of the L2 table */
1078 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1079 qed_aio_next_io_cb, acb);
1080 }
1081 return;
1082
1083 err:
1084 qed_aio_complete(acb, ret);
1085 }
1086
1087 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1088 {
1089 QEDAIOCB *acb = opaque;
1090 qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1091 }
1092
1093 /**
1094 * Flush new data clusters before updating the L2 table
1095 *
1096 * This flush is necessary when a backing file is in use. A crash during an
1097 * allocating write could result in empty clusters in the image. If the write
1098 * only touched a subregion of the cluster, then backing image sectors have
1099 * been lost in the untouched region. The solution is to flush after writing a
1100 * new data cluster and before updating the L2 table.
1101 */
1102 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1103 {
1104 QEDAIOCB *acb = opaque;
1105 BDRVQEDState *s = acb_to_s(acb);
1106
1107 if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
1108 qed_aio_complete(acb, -EIO);
1109 }
1110 }
1111
1112 /**
1113 * Write data to the image file
1114 */
1115 static void qed_aio_write_main(void *opaque, int ret)
1116 {
1117 QEDAIOCB *acb = opaque;
1118 BDRVQEDState *s = acb_to_s(acb);
1119 uint64_t offset = acb->cur_cluster +
1120 qed_offset_into_cluster(s, acb->cur_pos);
1121 BlockCompletionFunc *next_fn;
1122
1123 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1124
1125 if (ret) {
1126 qed_aio_complete(acb, ret);
1127 return;
1128 }
1129
1130 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1131 next_fn = qed_aio_next_io_cb;
1132 } else {
1133 if (s->bs->backing) {
1134 next_fn = qed_aio_write_flush_before_l2_update;
1135 } else {
1136 next_fn = qed_aio_write_l2_update_cb;
1137 }
1138 }
1139
1140 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1141 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1142 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1143 next_fn, acb);
1144 }
1145
1146 /**
1147 * Populate back untouched region of new data cluster
1148 */
1149 static void qed_aio_write_postfill(void *opaque, int ret)
1150 {
1151 QEDAIOCB *acb = opaque;
1152 BDRVQEDState *s = acb_to_s(acb);
1153 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1154 uint64_t len =
1155 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1156 uint64_t offset = acb->cur_cluster +
1157 qed_offset_into_cluster(s, acb->cur_pos) +
1158 acb->cur_qiov.size;
1159
1160 if (ret) {
1161 qed_aio_complete(acb, ret);
1162 return;
1163 }
1164
1165 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1166 qed_copy_from_backing_file(s, start, len, offset,
1167 qed_aio_write_main, acb);
1168 }
1169
1170 /**
1171 * Populate front untouched region of new data cluster
1172 */
1173 static void qed_aio_write_prefill(void *opaque, int ret)
1174 {
1175 QEDAIOCB *acb = opaque;
1176 BDRVQEDState *s = acb_to_s(acb);
1177 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1178 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1179
1180 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1181 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1182 qed_aio_write_postfill, acb);
1183 }
1184
1185 /**
1186 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1187 */
1188 static bool qed_should_set_need_check(BDRVQEDState *s)
1189 {
1190 /* The flush before L2 update path ensures consistency */
1191 if (s->bs->backing) {
1192 return false;
1193 }
1194
1195 return !(s->header.features & QED_F_NEED_CHECK);
1196 }
1197
1198 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1199 {
1200 QEDAIOCB *acb = opaque;
1201
1202 if (ret) {
1203 qed_aio_complete(acb, ret);
1204 return;
1205 }
1206
1207 qed_aio_write_l2_update(acb, 0, 1);
1208 }
1209
1210 /**
1211 * Write new data cluster
1212 *
1213 * @acb: Write request
1214 * @len: Length in bytes
1215 *
1216 * This path is taken when writing to previously unallocated clusters.
1217 */
1218 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1219 {
1220 BDRVQEDState *s = acb_to_s(acb);
1221 BlockCompletionFunc *cb;
1222
1223 /* Cancel timer when the first allocating request comes in */
1224 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1225 qed_cancel_need_check_timer(s);
1226 }
1227
1228 /* Freeze this request if another allocating write is in progress */
1229 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1230 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1231 }
1232 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1233 s->allocating_write_reqs_plugged) {
1234 return; /* wait for existing request to finish */
1235 }
1236
1237 acb->cur_nclusters = qed_bytes_to_clusters(s,
1238 qed_offset_into_cluster(s, acb->cur_pos) + len);
1239 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1240
1241 if (acb->flags & QED_AIOCB_ZERO) {
1242 /* Skip ahead if the clusters are already zero */
1243 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1244 qed_aio_start_io(acb);
1245 return;
1246 }
1247
1248 cb = qed_aio_write_zero_cluster;
1249 } else {
1250 cb = qed_aio_write_prefill;
1251 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1252 }
1253
1254 if (qed_should_set_need_check(s)) {
1255 s->header.features |= QED_F_NEED_CHECK;
1256 qed_write_header(s, cb, acb);
1257 } else {
1258 cb(acb, 0);
1259 }
1260 }
1261
1262 /**
1263 * Write data cluster in place
1264 *
1265 * @acb: Write request
1266 * @offset: Cluster offset in bytes
1267 * @len: Length in bytes
1268 *
1269 * This path is taken when writing to already allocated clusters.
1270 */
1271 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1272 {
1273 /* Allocate buffer for zero writes */
1274 if (acb->flags & QED_AIOCB_ZERO) {
1275 struct iovec *iov = acb->qiov->iov;
1276
1277 if (!iov->iov_base) {
1278 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
1279 if (iov->iov_base == NULL) {
1280 qed_aio_complete(acb, -ENOMEM);
1281 return;
1282 }
1283 memset(iov->iov_base, 0, iov->iov_len);
1284 }
1285 }
1286
1287 /* Calculate the I/O vector */
1288 acb->cur_cluster = offset;
1289 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1290
1291 /* Do the actual write */
1292 qed_aio_write_main(acb, 0);
1293 }
1294
1295 /**
1296 * Write data cluster
1297 *
1298 * @opaque: Write request
1299 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1300 * or -errno
1301 * @offset: Cluster offset in bytes
1302 * @len: Length in bytes
1303 *
1304 * Callback from qed_find_cluster().
1305 */
1306 static void qed_aio_write_data(void *opaque, int ret,
1307 uint64_t offset, size_t len)
1308 {
1309 QEDAIOCB *acb = opaque;
1310
1311 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1312
1313 acb->find_cluster_ret = ret;
1314
1315 switch (ret) {
1316 case QED_CLUSTER_FOUND:
1317 qed_aio_write_inplace(acb, offset, len);
1318 break;
1319
1320 case QED_CLUSTER_L2:
1321 case QED_CLUSTER_L1:
1322 case QED_CLUSTER_ZERO:
1323 qed_aio_write_alloc(acb, len);
1324 break;
1325
1326 default:
1327 qed_aio_complete(acb, ret);
1328 break;
1329 }
1330 }
1331
1332 /**
1333 * Read data cluster
1334 *
1335 * @opaque: Read request
1336 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1337 * or -errno
1338 * @offset: Cluster offset in bytes
1339 * @len: Length in bytes
1340 *
1341 * Callback from qed_find_cluster().
1342 */
1343 static void qed_aio_read_data(void *opaque, int ret,
1344 uint64_t offset, size_t len)
1345 {
1346 QEDAIOCB *acb = opaque;
1347 BDRVQEDState *s = acb_to_s(acb);
1348 BlockDriverState *bs = acb->common.bs;
1349
1350 /* Adjust offset into cluster */
1351 offset += qed_offset_into_cluster(s, acb->cur_pos);
1352
1353 trace_qed_aio_read_data(s, acb, ret, offset, len);
1354
1355 if (ret < 0) {
1356 goto err;
1357 }
1358
1359 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1360
1361 /* Handle zero cluster and backing file reads */
1362 if (ret == QED_CLUSTER_ZERO) {
1363 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1364 qed_aio_start_io(acb);
1365 return;
1366 } else if (ret != QED_CLUSTER_FOUND) {
1367 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1368 &acb->backing_qiov, qed_aio_next_io_cb, acb);
1369 return;
1370 }
1371
1372 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1373 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1374 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1375 qed_aio_next_io_cb, acb);
1376 return;
1377
1378 err:
1379 qed_aio_complete(acb, ret);
1380 }
1381
1382 /**
1383 * Begin next I/O or complete the request
1384 */
1385 static void qed_aio_next_io(QEDAIOCB *acb, int ret)
1386 {
1387 BDRVQEDState *s = acb_to_s(acb);
1388 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1389 qed_aio_write_data : qed_aio_read_data;
1390
1391 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1392
1393 if (acb->backing_qiov) {
1394 qemu_iovec_destroy(acb->backing_qiov);
1395 g_free(acb->backing_qiov);
1396 acb->backing_qiov = NULL;
1397 }
1398
1399 /* Handle I/O error */
1400 if (ret) {
1401 qed_aio_complete(acb, ret);
1402 return;
1403 }
1404
1405 acb->qiov_offset += acb->cur_qiov.size;
1406 acb->cur_pos += acb->cur_qiov.size;
1407 qemu_iovec_reset(&acb->cur_qiov);
1408
1409 /* Complete request */
1410 if (acb->cur_pos >= acb->end_pos) {
1411 qed_aio_complete(acb, 0);
1412 return;
1413 }
1414
1415 /* Find next cluster and start I/O */
1416 qed_find_cluster(s, &acb->request,
1417 acb->cur_pos, acb->end_pos - acb->cur_pos,
1418 io_fn, acb);
1419 }
1420
1421 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
1422 int64_t sector_num,
1423 QEMUIOVector *qiov, int nb_sectors,
1424 BlockCompletionFunc *cb,
1425 void *opaque, int flags)
1426 {
1427 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1428
1429 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1430 opaque, flags);
1431
1432 acb->flags = flags;
1433 acb->qiov = qiov;
1434 acb->qiov_offset = 0;
1435 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1436 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1437 acb->backing_qiov = NULL;
1438 acb->request.l2_table = NULL;
1439 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1440
1441 /* Start request */
1442 qed_aio_start_io(acb);
1443 return &acb->common;
1444 }
1445
1446 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1447 int64_t sector_num,
1448 QEMUIOVector *qiov, int nb_sectors,
1449 BlockCompletionFunc *cb,
1450 void *opaque)
1451 {
1452 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1453 }
1454
1455 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1456 int64_t sector_num,
1457 QEMUIOVector *qiov, int nb_sectors,
1458 BlockCompletionFunc *cb,
1459 void *opaque)
1460 {
1461 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1462 opaque, QED_AIOCB_WRITE);
1463 }
1464
1465 typedef struct {
1466 Coroutine *co;
1467 int ret;
1468 bool done;
1469 } QEDWriteZeroesCB;
1470
1471 static void coroutine_fn qed_co_pwrite_zeroes_cb(void *opaque, int ret)
1472 {
1473 QEDWriteZeroesCB *cb = opaque;
1474
1475 cb->done = true;
1476 cb->ret = ret;
1477 if (cb->co) {
1478 aio_co_wake(cb->co);
1479 }
1480 }
1481
1482 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1483 int64_t offset,
1484 int count,
1485 BdrvRequestFlags flags)
1486 {
1487 BlockAIOCB *blockacb;
1488 BDRVQEDState *s = bs->opaque;
1489 QEDWriteZeroesCB cb = { .done = false };
1490 QEMUIOVector qiov;
1491 struct iovec iov;
1492
1493 /* Fall back if the request is not aligned */
1494 if (qed_offset_into_cluster(s, offset) ||
1495 qed_offset_into_cluster(s, count)) {
1496 return -ENOTSUP;
1497 }
1498
1499 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1500 * then it will be allocated during request processing.
1501 */
1502 iov.iov_base = NULL;
1503 iov.iov_len = count;
1504
1505 qemu_iovec_init_external(&qiov, &iov, 1);
1506 blockacb = qed_aio_setup(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1507 count >> BDRV_SECTOR_BITS,
1508 qed_co_pwrite_zeroes_cb, &cb,
1509 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1510 if (!blockacb) {
1511 return -EIO;
1512 }
1513 if (!cb.done) {
1514 cb.co = qemu_coroutine_self();
1515 qemu_coroutine_yield();
1516 }
1517 assert(cb.done);
1518 return cb.ret;
1519 }
1520
1521 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1522 {
1523 BDRVQEDState *s = bs->opaque;
1524 uint64_t old_image_size;
1525 int ret;
1526
1527 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1528 s->header.table_size)) {
1529 return -EINVAL;
1530 }
1531
1532 /* Shrinking is currently not supported */
1533 if ((uint64_t)offset < s->header.image_size) {
1534 return -ENOTSUP;
1535 }
1536
1537 old_image_size = s->header.image_size;
1538 s->header.image_size = offset;
1539 ret = qed_write_header_sync(s);
1540 if (ret < 0) {
1541 s->header.image_size = old_image_size;
1542 }
1543 return ret;
1544 }
1545
1546 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1547 {
1548 BDRVQEDState *s = bs->opaque;
1549 return s->header.image_size;
1550 }
1551
1552 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1553 {
1554 BDRVQEDState *s = bs->opaque;
1555
1556 memset(bdi, 0, sizeof(*bdi));
1557 bdi->cluster_size = s->header.cluster_size;
1558 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1559 bdi->unallocated_blocks_are_zero = true;
1560 bdi->can_write_zeroes_with_unmap = true;
1561 return 0;
1562 }
1563
1564 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1565 const char *backing_file,
1566 const char *backing_fmt)
1567 {
1568 BDRVQEDState *s = bs->opaque;
1569 QEDHeader new_header, le_header;
1570 void *buffer;
1571 size_t buffer_len, backing_file_len;
1572 int ret;
1573
1574 /* Refuse to set backing filename if unknown compat feature bits are
1575 * active. If the image uses an unknown compat feature then we may not
1576 * know the layout of data following the header structure and cannot safely
1577 * add a new string.
1578 */
1579 if (backing_file && (s->header.compat_features &
1580 ~QED_COMPAT_FEATURE_MASK)) {
1581 return -ENOTSUP;
1582 }
1583
1584 memcpy(&new_header, &s->header, sizeof(new_header));
1585
1586 new_header.features &= ~(QED_F_BACKING_FILE |
1587 QED_F_BACKING_FORMAT_NO_PROBE);
1588
1589 /* Adjust feature flags */
1590 if (backing_file) {
1591 new_header.features |= QED_F_BACKING_FILE;
1592
1593 if (qed_fmt_is_raw(backing_fmt)) {
1594 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1595 }
1596 }
1597
1598 /* Calculate new header size */
1599 backing_file_len = 0;
1600
1601 if (backing_file) {
1602 backing_file_len = strlen(backing_file);
1603 }
1604
1605 buffer_len = sizeof(new_header);
1606 new_header.backing_filename_offset = buffer_len;
1607 new_header.backing_filename_size = backing_file_len;
1608 buffer_len += backing_file_len;
1609
1610 /* Make sure we can rewrite header without failing */
1611 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1612 return -ENOSPC;
1613 }
1614
1615 /* Prepare new header */
1616 buffer = g_malloc(buffer_len);
1617
1618 qed_header_cpu_to_le(&new_header, &le_header);
1619 memcpy(buffer, &le_header, sizeof(le_header));
1620 buffer_len = sizeof(le_header);
1621
1622 if (backing_file) {
1623 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1624 buffer_len += backing_file_len;
1625 }
1626
1627 /* Write new header */
1628 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1629 g_free(buffer);
1630 if (ret == 0) {
1631 memcpy(&s->header, &new_header, sizeof(new_header));
1632 }
1633 return ret;
1634 }
1635
1636 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1637 {
1638 BDRVQEDState *s = bs->opaque;
1639 Error *local_err = NULL;
1640 int ret;
1641
1642 bdrv_qed_close(bs);
1643
1644 memset(s, 0, sizeof(BDRVQEDState));
1645 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
1646 if (local_err) {
1647 error_propagate(errp, local_err);
1648 error_prepend(errp, "Could not reopen qed layer: ");
1649 return;
1650 } else if (ret < 0) {
1651 error_setg_errno(errp, -ret, "Could not reopen qed layer");
1652 return;
1653 }
1654 }
1655
1656 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1657 BdrvCheckMode fix)
1658 {
1659 BDRVQEDState *s = bs->opaque;
1660
1661 return qed_check(s, result, !!fix);
1662 }
1663
1664 static QemuOptsList qed_create_opts = {
1665 .name = "qed-create-opts",
1666 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1667 .desc = {
1668 {
1669 .name = BLOCK_OPT_SIZE,
1670 .type = QEMU_OPT_SIZE,
1671 .help = "Virtual disk size"
1672 },
1673 {
1674 .name = BLOCK_OPT_BACKING_FILE,
1675 .type = QEMU_OPT_STRING,
1676 .help = "File name of a base image"
1677 },
1678 {
1679 .name = BLOCK_OPT_BACKING_FMT,
1680 .type = QEMU_OPT_STRING,
1681 .help = "Image format of the base image"
1682 },
1683 {
1684 .name = BLOCK_OPT_CLUSTER_SIZE,
1685 .type = QEMU_OPT_SIZE,
1686 .help = "Cluster size (in bytes)",
1687 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1688 },
1689 {
1690 .name = BLOCK_OPT_TABLE_SIZE,
1691 .type = QEMU_OPT_SIZE,
1692 .help = "L1/L2 table size (in clusters)"
1693 },
1694 { /* end of list */ }
1695 }
1696 };
1697
1698 static BlockDriver bdrv_qed = {
1699 .format_name = "qed",
1700 .instance_size = sizeof(BDRVQEDState),
1701 .create_opts = &qed_create_opts,
1702 .supports_backing = true,
1703
1704 .bdrv_probe = bdrv_qed_probe,
1705 .bdrv_open = bdrv_qed_open,
1706 .bdrv_close = bdrv_qed_close,
1707 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1708 .bdrv_child_perm = bdrv_format_default_perms,
1709 .bdrv_create = bdrv_qed_create,
1710 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1711 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1712 .bdrv_aio_readv = bdrv_qed_aio_readv,
1713 .bdrv_aio_writev = bdrv_qed_aio_writev,
1714 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
1715 .bdrv_truncate = bdrv_qed_truncate,
1716 .bdrv_getlength = bdrv_qed_getlength,
1717 .bdrv_get_info = bdrv_qed_get_info,
1718 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
1719 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1720 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
1721 .bdrv_check = bdrv_qed_check,
1722 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1723 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
1724 .bdrv_drain = bdrv_qed_drain,
1725 };
1726
1727 static void bdrv_qed_init(void)
1728 {
1729 bdrv_register(&bdrv_qed);
1730 }
1731
1732 block_init(bdrv_qed_init);