]> git.proxmox.com Git - mirror_qemu.git/blob - block/qed.c
ed94bb61cab3af8abe7a17ba3a0ce4af38e18c04
[mirror_qemu.git] / block / qed.c
1 /*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "block/qdict.h"
17 #include "qapi/error.h"
18 #include "qemu/timer.h"
19 #include "qemu/bswap.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/option.h"
23 #include "qemu/memalign.h"
24 #include "trace.h"
25 #include "qed.h"
26 #include "sysemu/block-backend.h"
27 #include "qapi/qmp/qdict.h"
28 #include "qapi/qobject-input-visitor.h"
29 #include "qapi/qapi-visit-block-core.h"
30
31 static QemuOptsList qed_create_opts;
32
33 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
34 const char *filename)
35 {
36 const QEDHeader *header = (const QEDHeader *)buf;
37
38 if (buf_size < sizeof(*header)) {
39 return 0;
40 }
41 if (le32_to_cpu(header->magic) != QED_MAGIC) {
42 return 0;
43 }
44 return 100;
45 }
46
47 /**
48 * Check whether an image format is raw
49 *
50 * @fmt: Backing file format, may be NULL
51 */
52 static bool qed_fmt_is_raw(const char *fmt)
53 {
54 return fmt && strcmp(fmt, "raw") == 0;
55 }
56
57 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
58 {
59 cpu->magic = le32_to_cpu(le->magic);
60 cpu->cluster_size = le32_to_cpu(le->cluster_size);
61 cpu->table_size = le32_to_cpu(le->table_size);
62 cpu->header_size = le32_to_cpu(le->header_size);
63 cpu->features = le64_to_cpu(le->features);
64 cpu->compat_features = le64_to_cpu(le->compat_features);
65 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
66 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
67 cpu->image_size = le64_to_cpu(le->image_size);
68 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
69 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
70 }
71
72 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
73 {
74 le->magic = cpu_to_le32(cpu->magic);
75 le->cluster_size = cpu_to_le32(cpu->cluster_size);
76 le->table_size = cpu_to_le32(cpu->table_size);
77 le->header_size = cpu_to_le32(cpu->header_size);
78 le->features = cpu_to_le64(cpu->features);
79 le->compat_features = cpu_to_le64(cpu->compat_features);
80 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
81 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
82 le->image_size = cpu_to_le64(cpu->image_size);
83 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
84 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
85 }
86
87 int qed_write_header_sync(BDRVQEDState *s)
88 {
89 QEDHeader le;
90
91 qed_header_cpu_to_le(&s->header, &le);
92 return bdrv_pwrite(s->bs->file, 0, sizeof(le), &le, 0);
93 }
94
95 /**
96 * Update header in-place (does not rewrite backing filename or other strings)
97 *
98 * This function only updates known header fields in-place and does not affect
99 * extra data after the QED header.
100 *
101 * No new allocating reqs can start while this function runs.
102 */
103 static int coroutine_fn GRAPH_RDLOCK qed_write_header(BDRVQEDState *s)
104 {
105 /* We must write full sectors for O_DIRECT but cannot necessarily generate
106 * the data following the header if an unrecognized compat feature is
107 * active. Therefore, first read the sectors containing the header, update
108 * them, and write back.
109 */
110
111 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
112 size_t len = nsectors * BDRV_SECTOR_SIZE;
113 uint8_t *buf;
114 int ret;
115
116 assert(s->allocating_acb || s->allocating_write_reqs_plugged);
117
118 buf = qemu_blockalign(s->bs, len);
119
120 ret = bdrv_co_pread(s->bs->file, 0, len, buf, 0);
121 if (ret < 0) {
122 goto out;
123 }
124
125 /* Update header */
126 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
127
128 ret = bdrv_co_pwrite(s->bs->file, 0, len, buf, 0);
129 if (ret < 0) {
130 goto out;
131 }
132
133 ret = 0;
134 out:
135 qemu_vfree(buf);
136 return ret;
137 }
138
139 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
140 {
141 uint64_t table_entries;
142 uint64_t l2_size;
143
144 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
145 l2_size = table_entries * cluster_size;
146
147 return l2_size * table_entries;
148 }
149
150 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
151 {
152 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
153 cluster_size > QED_MAX_CLUSTER_SIZE) {
154 return false;
155 }
156 if (cluster_size & (cluster_size - 1)) {
157 return false; /* not power of 2 */
158 }
159 return true;
160 }
161
162 static bool qed_is_table_size_valid(uint32_t table_size)
163 {
164 if (table_size < QED_MIN_TABLE_SIZE ||
165 table_size > QED_MAX_TABLE_SIZE) {
166 return false;
167 }
168 if (table_size & (table_size - 1)) {
169 return false; /* not power of 2 */
170 }
171 return true;
172 }
173
174 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
175 uint32_t table_size)
176 {
177 if (image_size % BDRV_SECTOR_SIZE != 0) {
178 return false; /* not multiple of sector size */
179 }
180 if (image_size > qed_max_image_size(cluster_size, table_size)) {
181 return false; /* image is too large */
182 }
183 return true;
184 }
185
186 /**
187 * Read a string of known length from the image file
188 *
189 * @file: Image file
190 * @offset: File offset to start of string, in bytes
191 * @n: String length in bytes
192 * @buf: Destination buffer
193 * @buflen: Destination buffer length in bytes
194 * @ret: 0 on success, -errno on failure
195 *
196 * The string is NUL-terminated.
197 */
198 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
199 char *buf, size_t buflen)
200 {
201 int ret;
202 if (n >= buflen) {
203 return -EINVAL;
204 }
205 ret = bdrv_pread(file, offset, n, buf, 0);
206 if (ret < 0) {
207 return ret;
208 }
209 buf[n] = '\0';
210 return 0;
211 }
212
213 /**
214 * Allocate new clusters
215 *
216 * @s: QED state
217 * @n: Number of contiguous clusters to allocate
218 * @ret: Offset of first allocated cluster
219 *
220 * This function only produces the offset where the new clusters should be
221 * written. It updates BDRVQEDState but does not make any changes to the image
222 * file.
223 *
224 * Called with table_lock held.
225 */
226 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
227 {
228 uint64_t offset = s->file_size;
229 s->file_size += n * s->header.cluster_size;
230 return offset;
231 }
232
233 QEDTable *qed_alloc_table(BDRVQEDState *s)
234 {
235 /* Honor O_DIRECT memory alignment requirements */
236 return qemu_blockalign(s->bs,
237 s->header.cluster_size * s->header.table_size);
238 }
239
240 /**
241 * Allocate a new zeroed L2 table
242 *
243 * Called with table_lock held.
244 */
245 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
246 {
247 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
248
249 l2_table->table = qed_alloc_table(s);
250 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
251
252 memset(l2_table->table->offsets, 0,
253 s->header.cluster_size * s->header.table_size);
254 return l2_table;
255 }
256
257 static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s)
258 {
259 qemu_co_mutex_lock(&s->table_lock);
260
261 /* No reentrancy is allowed. */
262 assert(!s->allocating_write_reqs_plugged);
263 if (s->allocating_acb != NULL) {
264 /* Another allocating write came concurrently. This cannot happen
265 * from bdrv_qed_drain_begin, but it can happen when the timer runs.
266 */
267 qemu_co_mutex_unlock(&s->table_lock);
268 return false;
269 }
270
271 s->allocating_write_reqs_plugged = true;
272 qemu_co_mutex_unlock(&s->table_lock);
273 return true;
274 }
275
276 static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s)
277 {
278 qemu_co_mutex_lock(&s->table_lock);
279 assert(s->allocating_write_reqs_plugged);
280 s->allocating_write_reqs_plugged = false;
281 qemu_co_queue_next(&s->allocating_write_reqs);
282 qemu_co_mutex_unlock(&s->table_lock);
283 }
284
285 static void coroutine_fn GRAPH_RDLOCK qed_need_check_timer(BDRVQEDState *s)
286 {
287 int ret;
288
289 trace_qed_need_check_timer_cb(s);
290 assert_bdrv_graph_readable();
291
292 if (!qed_plug_allocating_write_reqs(s)) {
293 return;
294 }
295
296 /* Ensure writes are on disk before clearing flag */
297 ret = bdrv_co_flush(s->bs->file->bs);
298 if (ret < 0) {
299 qed_unplug_allocating_write_reqs(s);
300 return;
301 }
302
303 s->header.features &= ~QED_F_NEED_CHECK;
304 ret = qed_write_header(s);
305 (void) ret;
306
307 qed_unplug_allocating_write_reqs(s);
308
309 ret = bdrv_co_flush(s->bs);
310 (void) ret;
311 }
312
313 static void coroutine_fn qed_need_check_timer_entry(void *opaque)
314 {
315 BDRVQEDState *s = opaque;
316 GRAPH_RDLOCK_GUARD();
317
318 qed_need_check_timer(opaque);
319 bdrv_dec_in_flight(s->bs);
320 }
321
322 static void qed_need_check_timer_cb(void *opaque)
323 {
324 BDRVQEDState *s = opaque;
325 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
326
327 bdrv_inc_in_flight(s->bs);
328 qemu_coroutine_enter(co);
329 }
330
331 static void qed_start_need_check_timer(BDRVQEDState *s)
332 {
333 trace_qed_start_need_check_timer(s);
334
335 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
336 * migration.
337 */
338 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
339 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
340 }
341
342 /* It's okay to call this multiple times or when no timer is started */
343 static void qed_cancel_need_check_timer(BDRVQEDState *s)
344 {
345 trace_qed_cancel_need_check_timer(s);
346 timer_del(s->need_check_timer);
347 }
348
349 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
350 {
351 BDRVQEDState *s = bs->opaque;
352
353 qed_cancel_need_check_timer(s);
354 timer_free(s->need_check_timer);
355 }
356
357 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
358 AioContext *new_context)
359 {
360 BDRVQEDState *s = bs->opaque;
361
362 s->need_check_timer = aio_timer_new(new_context,
363 QEMU_CLOCK_VIRTUAL, SCALE_NS,
364 qed_need_check_timer_cb, s);
365 if (s->header.features & QED_F_NEED_CHECK) {
366 qed_start_need_check_timer(s);
367 }
368 }
369
370 static void bdrv_qed_drain_begin(BlockDriverState *bs)
371 {
372 BDRVQEDState *s = bs->opaque;
373
374 /* Fire the timer immediately in order to start doing I/O as soon as the
375 * header is flushed.
376 */
377 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
378 Coroutine *co;
379
380 qed_cancel_need_check_timer(s);
381 co = qemu_coroutine_create(qed_need_check_timer_entry, s);
382 bdrv_inc_in_flight(bs);
383 aio_co_enter(bdrv_get_aio_context(bs), co);
384 }
385 }
386
387 static void bdrv_qed_init_state(BlockDriverState *bs)
388 {
389 BDRVQEDState *s = bs->opaque;
390
391 memset(s, 0, sizeof(BDRVQEDState));
392 s->bs = bs;
393 qemu_co_mutex_init(&s->table_lock);
394 qemu_co_queue_init(&s->allocating_write_reqs);
395 }
396
397 /* Called with table_lock held. */
398 static int coroutine_fn GRAPH_RDLOCK
399 bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags, Error **errp)
400 {
401 BDRVQEDState *s = bs->opaque;
402 QEDHeader le_header;
403 int64_t file_size;
404 int ret;
405
406 ret = bdrv_co_pread(bs->file, 0, sizeof(le_header), &le_header, 0);
407 if (ret < 0) {
408 error_setg(errp, "Failed to read QED header");
409 return ret;
410 }
411 qed_header_le_to_cpu(&le_header, &s->header);
412
413 if (s->header.magic != QED_MAGIC) {
414 error_setg(errp, "Image not in QED format");
415 return -EINVAL;
416 }
417 if (s->header.features & ~QED_FEATURE_MASK) {
418 /* image uses unsupported feature bits */
419 error_setg(errp, "Unsupported QED features: %" PRIx64,
420 s->header.features & ~QED_FEATURE_MASK);
421 return -ENOTSUP;
422 }
423 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
424 error_setg(errp, "QED cluster size is invalid");
425 return -EINVAL;
426 }
427
428 /* Round down file size to the last cluster */
429 file_size = bdrv_co_getlength(bs->file->bs);
430 if (file_size < 0) {
431 error_setg(errp, "Failed to get file length");
432 return file_size;
433 }
434 s->file_size = qed_start_of_cluster(s, file_size);
435
436 if (!qed_is_table_size_valid(s->header.table_size)) {
437 error_setg(errp, "QED table size is invalid");
438 return -EINVAL;
439 }
440 if (!qed_is_image_size_valid(s->header.image_size,
441 s->header.cluster_size,
442 s->header.table_size)) {
443 error_setg(errp, "QED image size is invalid");
444 return -EINVAL;
445 }
446 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
447 error_setg(errp, "QED table offset is invalid");
448 return -EINVAL;
449 }
450
451 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
452 sizeof(uint64_t);
453 s->l2_shift = ctz32(s->header.cluster_size);
454 s->l2_mask = s->table_nelems - 1;
455 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
456
457 /* Header size calculation must not overflow uint32_t */
458 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
459 error_setg(errp, "QED header size is too large");
460 return -EINVAL;
461 }
462
463 if ((s->header.features & QED_F_BACKING_FILE)) {
464 g_autofree char *backing_file_str = NULL;
465
466 if ((uint64_t)s->header.backing_filename_offset +
467 s->header.backing_filename_size >
468 s->header.cluster_size * s->header.header_size) {
469 error_setg(errp, "QED backing filename offset is invalid");
470 return -EINVAL;
471 }
472
473 backing_file_str = g_malloc(sizeof(bs->backing_file));
474 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
475 s->header.backing_filename_size,
476 backing_file_str, sizeof(bs->backing_file));
477 if (ret < 0) {
478 error_setg(errp, "Failed to read backing filename");
479 return ret;
480 }
481
482 if (!g_str_equal(backing_file_str, bs->backing_file)) {
483 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
484 backing_file_str);
485 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
486 backing_file_str);
487 }
488
489 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
490 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
491 }
492 }
493
494 /* Reset unknown autoclear feature bits. This is a backwards
495 * compatibility mechanism that allows images to be opened by older
496 * programs, which "knock out" unknown feature bits. When an image is
497 * opened by a newer program again it can detect that the autoclear
498 * feature is no longer valid.
499 */
500 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
501 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
502 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
503
504 ret = qed_write_header_sync(s);
505 if (ret) {
506 error_setg(errp, "Failed to update header");
507 return ret;
508 }
509
510 /* From here on only known autoclear feature bits are valid */
511 bdrv_co_flush(bs->file->bs);
512 }
513
514 s->l1_table = qed_alloc_table(s);
515 qed_init_l2_cache(&s->l2_cache);
516
517 ret = qed_read_l1_table_sync(s);
518 if (ret) {
519 error_setg(errp, "Failed to read L1 table");
520 goto out;
521 }
522
523 /* If image was not closed cleanly, check consistency */
524 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
525 /* Read-only images cannot be fixed. There is no risk of corruption
526 * since write operations are not possible. Therefore, allow
527 * potentially inconsistent images to be opened read-only. This can
528 * aid data recovery from an otherwise inconsistent image.
529 */
530 if (!bdrv_is_read_only(bs->file->bs) &&
531 !(flags & BDRV_O_INACTIVE)) {
532 BdrvCheckResult result = {0};
533
534 ret = qed_check(s, &result, true);
535 if (ret) {
536 error_setg(errp, "Image corrupted");
537 goto out;
538 }
539 }
540 }
541
542 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
543
544 out:
545 if (ret) {
546 qed_free_l2_cache(&s->l2_cache);
547 qemu_vfree(s->l1_table);
548 }
549 return ret;
550 }
551
552 typedef struct QEDOpenCo {
553 BlockDriverState *bs;
554 QDict *options;
555 int flags;
556 Error **errp;
557 int ret;
558 } QEDOpenCo;
559
560 static void coroutine_fn GRAPH_RDLOCK bdrv_qed_open_entry(void *opaque)
561 {
562 QEDOpenCo *qoc = opaque;
563 BDRVQEDState *s = qoc->bs->opaque;
564
565 qemu_co_mutex_lock(&s->table_lock);
566 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
567 qemu_co_mutex_unlock(&s->table_lock);
568 }
569
570 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
571 Error **errp)
572 {
573 QEDOpenCo qoc = {
574 .bs = bs,
575 .options = options,
576 .flags = flags,
577 .errp = errp,
578 .ret = -EINPROGRESS
579 };
580 int ret;
581
582 assume_graph_lock(); /* FIXME */
583
584 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
585 if (ret < 0) {
586 return ret;
587 }
588
589 bdrv_qed_init_state(bs);
590 if (qemu_in_coroutine()) {
591 bdrv_qed_open_entry(&qoc);
592 } else {
593 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
594 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
595 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
596 }
597 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
598 return qoc.ret;
599 }
600
601 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
602 {
603 BDRVQEDState *s = bs->opaque;
604
605 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
606 bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size);
607 }
608
609 /* We have nothing to do for QED reopen, stubs just return
610 * success */
611 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
612 BlockReopenQueue *queue, Error **errp)
613 {
614 return 0;
615 }
616
617 static void bdrv_qed_close(BlockDriverState *bs)
618 {
619 BDRVQEDState *s = bs->opaque;
620
621 bdrv_qed_detach_aio_context(bs);
622
623 /* Ensure writes reach stable storage */
624 bdrv_flush(bs->file->bs);
625
626 /* Clean shutdown, no check required on next open */
627 if (s->header.features & QED_F_NEED_CHECK) {
628 s->header.features &= ~QED_F_NEED_CHECK;
629 qed_write_header_sync(s);
630 }
631
632 qed_free_l2_cache(&s->l2_cache);
633 qemu_vfree(s->l1_table);
634 }
635
636 static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
637 Error **errp)
638 {
639 BlockdevCreateOptionsQed *qed_opts;
640 BlockBackend *blk = NULL;
641 BlockDriverState *bs = NULL;
642
643 QEDHeader header;
644 QEDHeader le_header;
645 uint8_t *l1_table = NULL;
646 size_t l1_size;
647 int ret = 0;
648
649 assert(opts->driver == BLOCKDEV_DRIVER_QED);
650 qed_opts = &opts->u.qed;
651
652 /* Validate options and set default values */
653 if (!qed_opts->has_cluster_size) {
654 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE;
655 }
656 if (!qed_opts->has_table_size) {
657 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE;
658 }
659
660 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) {
661 error_setg(errp, "QED cluster size must be within range [%u, %u] "
662 "and power of 2",
663 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
664 return -EINVAL;
665 }
666 if (!qed_is_table_size_valid(qed_opts->table_size)) {
667 error_setg(errp, "QED table size must be within range [%u, %u] "
668 "and power of 2",
669 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
670 return -EINVAL;
671 }
672 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size,
673 qed_opts->table_size))
674 {
675 error_setg(errp, "QED image size must be a non-zero multiple of "
676 "cluster size and less than %" PRIu64 " bytes",
677 qed_max_image_size(qed_opts->cluster_size,
678 qed_opts->table_size));
679 return -EINVAL;
680 }
681
682 /* Create BlockBackend to write to the image */
683 bs = bdrv_co_open_blockdev_ref(qed_opts->file, errp);
684 if (bs == NULL) {
685 return -EIO;
686 }
687
688 blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
689 errp);
690 if (!blk) {
691 ret = -EPERM;
692 goto out;
693 }
694 blk_set_allow_write_beyond_eof(blk, true);
695
696 /* Prepare image format */
697 header = (QEDHeader) {
698 .magic = QED_MAGIC,
699 .cluster_size = qed_opts->cluster_size,
700 .table_size = qed_opts->table_size,
701 .header_size = 1,
702 .features = 0,
703 .compat_features = 0,
704 .l1_table_offset = qed_opts->cluster_size,
705 .image_size = qed_opts->size,
706 };
707
708 l1_size = header.cluster_size * header.table_size;
709
710 /*
711 * The QED format associates file length with allocation status,
712 * so a new file (which is empty) must have a length of 0.
713 */
714 ret = blk_co_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp);
715 if (ret < 0) {
716 goto out;
717 }
718
719 if (qed_opts->backing_file) {
720 header.features |= QED_F_BACKING_FILE;
721 header.backing_filename_offset = sizeof(le_header);
722 header.backing_filename_size = strlen(qed_opts->backing_file);
723
724 if (qed_opts->has_backing_fmt) {
725 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt);
726 if (qed_fmt_is_raw(backing_fmt)) {
727 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
728 }
729 }
730 }
731
732 qed_header_cpu_to_le(&header, &le_header);
733 ret = blk_co_pwrite(blk, 0, sizeof(le_header), &le_header, 0);
734 if (ret < 0) {
735 goto out;
736 }
737 ret = blk_co_pwrite(blk, sizeof(le_header), header.backing_filename_size,
738 qed_opts->backing_file, 0);
739 if (ret < 0) {
740 goto out;
741 }
742
743 l1_table = g_malloc0(l1_size);
744 ret = blk_co_pwrite(blk, header.l1_table_offset, l1_size, l1_table, 0);
745 if (ret < 0) {
746 goto out;
747 }
748
749 ret = 0; /* success */
750 out:
751 g_free(l1_table);
752 blk_unref(blk);
753 bdrv_unref(bs);
754 return ret;
755 }
756
757 static int coroutine_fn GRAPH_RDLOCK
758 bdrv_qed_co_create_opts(BlockDriver *drv, const char *filename,
759 QemuOpts *opts, Error **errp)
760 {
761 BlockdevCreateOptions *create_options = NULL;
762 QDict *qdict;
763 Visitor *v;
764 BlockDriverState *bs = NULL;
765 int ret;
766
767 static const QDictRenames opt_renames[] = {
768 { BLOCK_OPT_BACKING_FILE, "backing-file" },
769 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
770 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
771 { BLOCK_OPT_TABLE_SIZE, "table-size" },
772 { NULL, NULL },
773 };
774
775 /* Parse options and convert legacy syntax */
776 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true);
777
778 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
779 ret = -EINVAL;
780 goto fail;
781 }
782
783 /* Create and open the file (protocol layer) */
784 ret = bdrv_co_create_file(filename, opts, errp);
785 if (ret < 0) {
786 goto fail;
787 }
788
789 bs = bdrv_co_open(filename, NULL, NULL,
790 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
791 if (bs == NULL) {
792 ret = -EIO;
793 goto fail;
794 }
795
796 /* Now get the QAPI type BlockdevCreateOptions */
797 qdict_put_str(qdict, "driver", "qed");
798 qdict_put_str(qdict, "file", bs->node_name);
799
800 v = qobject_input_visitor_new_flat_confused(qdict, errp);
801 if (!v) {
802 ret = -EINVAL;
803 goto fail;
804 }
805
806 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
807 visit_free(v);
808 if (!create_options) {
809 ret = -EINVAL;
810 goto fail;
811 }
812
813 /* Silently round up size */
814 assert(create_options->driver == BLOCKDEV_DRIVER_QED);
815 create_options->u.qed.size =
816 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE);
817
818 /* Create the qed image (format layer) */
819 ret = bdrv_qed_co_create(create_options, errp);
820
821 fail:
822 qobject_unref(qdict);
823 bdrv_unref(bs);
824 qapi_free_BlockdevCreateOptions(create_options);
825 return ret;
826 }
827
828 static int coroutine_fn GRAPH_RDLOCK
829 bdrv_qed_co_block_status(BlockDriverState *bs, bool want_zero, int64_t pos,
830 int64_t bytes, int64_t *pnum, int64_t *map,
831 BlockDriverState **file)
832 {
833 BDRVQEDState *s = bs->opaque;
834 size_t len = MIN(bytes, SIZE_MAX);
835 int status;
836 QEDRequest request = { .l2_table = NULL };
837 uint64_t offset;
838 int ret;
839
840 qemu_co_mutex_lock(&s->table_lock);
841 ret = qed_find_cluster(s, &request, pos, &len, &offset);
842
843 *pnum = len;
844 switch (ret) {
845 case QED_CLUSTER_FOUND:
846 *map = offset | qed_offset_into_cluster(s, pos);
847 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
848 *file = bs->file->bs;
849 break;
850 case QED_CLUSTER_ZERO:
851 status = BDRV_BLOCK_ZERO;
852 break;
853 case QED_CLUSTER_L2:
854 case QED_CLUSTER_L1:
855 status = 0;
856 break;
857 default:
858 assert(ret < 0);
859 status = ret;
860 break;
861 }
862
863 qed_unref_l2_cache_entry(request.l2_table);
864 qemu_co_mutex_unlock(&s->table_lock);
865
866 return status;
867 }
868
869 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
870 {
871 return acb->bs->opaque;
872 }
873
874 /**
875 * Read from the backing file or zero-fill if no backing file
876 *
877 * @s: QED state
878 * @pos: Byte position in device
879 * @qiov: Destination I/O vector
880 *
881 * This function reads qiov->size bytes starting at pos from the backing file.
882 * If there is no backing file then zeroes are read.
883 */
884 static int coroutine_fn GRAPH_RDLOCK
885 qed_read_backing_file(BDRVQEDState *s, uint64_t pos, QEMUIOVector *qiov)
886 {
887 if (s->bs->backing) {
888 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
889 return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0);
890 }
891 qemu_iovec_memset(qiov, 0, 0, qiov->size);
892 return 0;
893 }
894
895 /**
896 * Copy data from backing file into the image
897 *
898 * @s: QED state
899 * @pos: Byte position in device
900 * @len: Number of bytes
901 * @offset: Byte offset in image file
902 */
903 static int coroutine_fn GRAPH_RDLOCK
904 qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, uint64_t len,
905 uint64_t offset)
906 {
907 QEMUIOVector qiov;
908 int ret;
909
910 /* Skip copy entirely if there is no work to do */
911 if (len == 0) {
912 return 0;
913 }
914
915 qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len);
916
917 ret = qed_read_backing_file(s, pos, &qiov);
918
919 if (ret) {
920 goto out;
921 }
922
923 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
924 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
925 if (ret < 0) {
926 goto out;
927 }
928 ret = 0;
929 out:
930 qemu_vfree(qemu_iovec_buf(&qiov));
931 return ret;
932 }
933
934 /**
935 * Link one or more contiguous clusters into a table
936 *
937 * @s: QED state
938 * @table: L2 table
939 * @index: First cluster index
940 * @n: Number of contiguous clusters
941 * @cluster: First cluster offset
942 *
943 * The cluster offset may be an allocated byte offset in the image file, the
944 * zero cluster marker, or the unallocated cluster marker.
945 *
946 * Called with table_lock held.
947 */
948 static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
949 int index, unsigned int n,
950 uint64_t cluster)
951 {
952 int i;
953 for (i = index; i < index + n; i++) {
954 table->offsets[i] = cluster;
955 if (!qed_offset_is_unalloc_cluster(cluster) &&
956 !qed_offset_is_zero_cluster(cluster)) {
957 cluster += s->header.cluster_size;
958 }
959 }
960 }
961
962 /* Called with table_lock held. */
963 static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
964 {
965 BDRVQEDState *s = acb_to_s(acb);
966
967 /* Free resources */
968 qemu_iovec_destroy(&acb->cur_qiov);
969 qed_unref_l2_cache_entry(acb->request.l2_table);
970
971 /* Free the buffer we may have allocated for zero writes */
972 if (acb->flags & QED_AIOCB_ZERO) {
973 qemu_vfree(acb->qiov->iov[0].iov_base);
974 acb->qiov->iov[0].iov_base = NULL;
975 }
976
977 /* Start next allocating write request waiting behind this one. Note that
978 * requests enqueue themselves when they first hit an unallocated cluster
979 * but they wait until the entire request is finished before waking up the
980 * next request in the queue. This ensures that we don't cycle through
981 * requests multiple times but rather finish one at a time completely.
982 */
983 if (acb == s->allocating_acb) {
984 s->allocating_acb = NULL;
985 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
986 qemu_co_queue_next(&s->allocating_write_reqs);
987 } else if (s->header.features & QED_F_NEED_CHECK) {
988 qed_start_need_check_timer(s);
989 }
990 }
991 }
992
993 /**
994 * Update L1 table with new L2 table offset and write it out
995 *
996 * Called with table_lock held.
997 */
998 static int coroutine_fn GRAPH_RDLOCK qed_aio_write_l1_update(QEDAIOCB *acb)
999 {
1000 BDRVQEDState *s = acb_to_s(acb);
1001 CachedL2Table *l2_table = acb->request.l2_table;
1002 uint64_t l2_offset = l2_table->offset;
1003 int index, ret;
1004
1005 index = qed_l1_index(s, acb->cur_pos);
1006 s->l1_table->offsets[index] = l2_table->offset;
1007
1008 ret = qed_write_l1_table(s, index, 1);
1009
1010 /* Commit the current L2 table to the cache */
1011 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
1012
1013 /* This is guaranteed to succeed because we just committed the entry to the
1014 * cache.
1015 */
1016 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
1017 assert(acb->request.l2_table != NULL);
1018
1019 return ret;
1020 }
1021
1022
1023 /**
1024 * Update L2 table with new cluster offsets and write them out
1025 *
1026 * Called with table_lock held.
1027 */
1028 static int coroutine_fn GRAPH_RDLOCK
1029 qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
1030 {
1031 BDRVQEDState *s = acb_to_s(acb);
1032 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1033 int index, ret;
1034
1035 if (need_alloc) {
1036 qed_unref_l2_cache_entry(acb->request.l2_table);
1037 acb->request.l2_table = qed_new_l2_table(s);
1038 }
1039
1040 index = qed_l2_index(s, acb->cur_pos);
1041 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1042 offset);
1043
1044 if (need_alloc) {
1045 /* Write out the whole new L2 table */
1046 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
1047 if (ret) {
1048 return ret;
1049 }
1050 return qed_aio_write_l1_update(acb);
1051 } else {
1052 /* Write out only the updated part of the L2 table */
1053 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1054 false);
1055 if (ret) {
1056 return ret;
1057 }
1058 }
1059 return 0;
1060 }
1061
1062 /**
1063 * Write data to the image file
1064 *
1065 * Called with table_lock *not* held.
1066 */
1067 static int coroutine_fn GRAPH_RDLOCK qed_aio_write_main(QEDAIOCB *acb)
1068 {
1069 BDRVQEDState *s = acb_to_s(acb);
1070 uint64_t offset = acb->cur_cluster +
1071 qed_offset_into_cluster(s, acb->cur_pos);
1072
1073 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
1074
1075 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1076 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
1077 &acb->cur_qiov, 0);
1078 }
1079
1080 /**
1081 * Populate untouched regions of new data cluster
1082 *
1083 * Called with table_lock held.
1084 */
1085 static int coroutine_fn GRAPH_RDLOCK qed_aio_write_cow(QEDAIOCB *acb)
1086 {
1087 BDRVQEDState *s = acb_to_s(acb);
1088 uint64_t start, len, offset;
1089 int ret;
1090
1091 qemu_co_mutex_unlock(&s->table_lock);
1092
1093 /* Populate front untouched region of new data cluster */
1094 start = qed_start_of_cluster(s, acb->cur_pos);
1095 len = qed_offset_into_cluster(s, acb->cur_pos);
1096
1097 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1098 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
1099 if (ret < 0) {
1100 goto out;
1101 }
1102
1103 /* Populate back untouched region of new data cluster */
1104 start = acb->cur_pos + acb->cur_qiov.size;
1105 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1106 offset = acb->cur_cluster +
1107 qed_offset_into_cluster(s, acb->cur_pos) +
1108 acb->cur_qiov.size;
1109
1110 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1111 ret = qed_copy_from_backing_file(s, start, len, offset);
1112 if (ret < 0) {
1113 goto out;
1114 }
1115
1116 ret = qed_aio_write_main(acb);
1117 if (ret < 0) {
1118 goto out;
1119 }
1120
1121 if (s->bs->backing) {
1122 /*
1123 * Flush new data clusters before updating the L2 table
1124 *
1125 * This flush is necessary when a backing file is in use. A crash
1126 * during an allocating write could result in empty clusters in the
1127 * image. If the write only touched a subregion of the cluster,
1128 * then backing image sectors have been lost in the untouched
1129 * region. The solution is to flush after writing a new data
1130 * cluster and before updating the L2 table.
1131 */
1132 ret = bdrv_co_flush(s->bs->file->bs);
1133 }
1134
1135 out:
1136 qemu_co_mutex_lock(&s->table_lock);
1137 return ret;
1138 }
1139
1140 /**
1141 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1142 */
1143 static bool qed_should_set_need_check(BDRVQEDState *s)
1144 {
1145 /* The flush before L2 update path ensures consistency */
1146 if (s->bs->backing) {
1147 return false;
1148 }
1149
1150 return !(s->header.features & QED_F_NEED_CHECK);
1151 }
1152
1153 /**
1154 * Write new data cluster
1155 *
1156 * @acb: Write request
1157 * @len: Length in bytes
1158 *
1159 * This path is taken when writing to previously unallocated clusters.
1160 *
1161 * Called with table_lock held.
1162 */
1163 static int coroutine_fn GRAPH_RDLOCK
1164 qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1165 {
1166 BDRVQEDState *s = acb_to_s(acb);
1167 int ret;
1168
1169 /* Cancel timer when the first allocating request comes in */
1170 if (s->allocating_acb == NULL) {
1171 qed_cancel_need_check_timer(s);
1172 }
1173
1174 /* Freeze this request if another allocating write is in progress */
1175 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1176 if (s->allocating_acb != NULL) {
1177 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
1178 assert(s->allocating_acb == NULL);
1179 }
1180 s->allocating_acb = acb;
1181 return -EAGAIN; /* start over with looking up table entries */
1182 }
1183
1184 acb->cur_nclusters = qed_bytes_to_clusters(s,
1185 qed_offset_into_cluster(s, acb->cur_pos) + len);
1186 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1187
1188 if (acb->flags & QED_AIOCB_ZERO) {
1189 /* Skip ahead if the clusters are already zero */
1190 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1191 return 0;
1192 }
1193 acb->cur_cluster = 1;
1194 } else {
1195 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1196 }
1197
1198 if (qed_should_set_need_check(s)) {
1199 s->header.features |= QED_F_NEED_CHECK;
1200 ret = qed_write_header(s);
1201 if (ret < 0) {
1202 return ret;
1203 }
1204 }
1205
1206 if (!(acb->flags & QED_AIOCB_ZERO)) {
1207 ret = qed_aio_write_cow(acb);
1208 if (ret < 0) {
1209 return ret;
1210 }
1211 }
1212
1213 return qed_aio_write_l2_update(acb, acb->cur_cluster);
1214 }
1215
1216 /**
1217 * Write data cluster in place
1218 *
1219 * @acb: Write request
1220 * @offset: Cluster offset in bytes
1221 * @len: Length in bytes
1222 *
1223 * This path is taken when writing to already allocated clusters.
1224 *
1225 * Called with table_lock held.
1226 */
1227 static int coroutine_fn GRAPH_RDLOCK
1228 qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1229 {
1230 BDRVQEDState *s = acb_to_s(acb);
1231 int r;
1232
1233 qemu_co_mutex_unlock(&s->table_lock);
1234
1235 /* Allocate buffer for zero writes */
1236 if (acb->flags & QED_AIOCB_ZERO) {
1237 struct iovec *iov = acb->qiov->iov;
1238
1239 if (!iov->iov_base) {
1240 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
1241 if (iov->iov_base == NULL) {
1242 r = -ENOMEM;
1243 goto out;
1244 }
1245 memset(iov->iov_base, 0, iov->iov_len);
1246 }
1247 }
1248
1249 /* Calculate the I/O vector */
1250 acb->cur_cluster = offset;
1251 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1252
1253 /* Do the actual write. */
1254 r = qed_aio_write_main(acb);
1255 out:
1256 qemu_co_mutex_lock(&s->table_lock);
1257 return r;
1258 }
1259
1260 /**
1261 * Write data cluster
1262 *
1263 * @opaque: Write request
1264 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1265 * @offset: Cluster offset in bytes
1266 * @len: Length in bytes
1267 *
1268 * Called with table_lock held.
1269 */
1270 static int coroutine_fn GRAPH_RDLOCK
1271 qed_aio_write_data(void *opaque, int ret, uint64_t offset, size_t len)
1272 {
1273 QEDAIOCB *acb = opaque;
1274
1275 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1276
1277 acb->find_cluster_ret = ret;
1278
1279 switch (ret) {
1280 case QED_CLUSTER_FOUND:
1281 return qed_aio_write_inplace(acb, offset, len);
1282
1283 case QED_CLUSTER_L2:
1284 case QED_CLUSTER_L1:
1285 case QED_CLUSTER_ZERO:
1286 return qed_aio_write_alloc(acb, len);
1287
1288 default:
1289 g_assert_not_reached();
1290 }
1291 }
1292
1293 /**
1294 * Read data cluster
1295 *
1296 * @opaque: Read request
1297 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1298 * @offset: Cluster offset in bytes
1299 * @len: Length in bytes
1300 *
1301 * Called with table_lock held.
1302 */
1303 static int coroutine_fn GRAPH_RDLOCK
1304 qed_aio_read_data(void *opaque, int ret, uint64_t offset, size_t len)
1305 {
1306 QEDAIOCB *acb = opaque;
1307 BDRVQEDState *s = acb_to_s(acb);
1308 BlockDriverState *bs = acb->bs;
1309 int r;
1310
1311 qemu_co_mutex_unlock(&s->table_lock);
1312
1313 /* Adjust offset into cluster */
1314 offset += qed_offset_into_cluster(s, acb->cur_pos);
1315
1316 trace_qed_aio_read_data(s, acb, ret, offset, len);
1317
1318 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1319
1320 /* Handle zero cluster and backing file reads, otherwise read
1321 * data cluster directly.
1322 */
1323 if (ret == QED_CLUSTER_ZERO) {
1324 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1325 r = 0;
1326 } else if (ret != QED_CLUSTER_FOUND) {
1327 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov);
1328 } else {
1329 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1330 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1331 &acb->cur_qiov, 0);
1332 }
1333
1334 qemu_co_mutex_lock(&s->table_lock);
1335 return r;
1336 }
1337
1338 /**
1339 * Begin next I/O or complete the request
1340 */
1341 static int coroutine_fn GRAPH_RDLOCK qed_aio_next_io(QEDAIOCB *acb)
1342 {
1343 BDRVQEDState *s = acb_to_s(acb);
1344 uint64_t offset;
1345 size_t len;
1346 int ret;
1347
1348 qemu_co_mutex_lock(&s->table_lock);
1349 while (1) {
1350 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
1351
1352 acb->qiov_offset += acb->cur_qiov.size;
1353 acb->cur_pos += acb->cur_qiov.size;
1354 qemu_iovec_reset(&acb->cur_qiov);
1355
1356 /* Complete request */
1357 if (acb->cur_pos >= acb->end_pos) {
1358 ret = 0;
1359 break;
1360 }
1361
1362 /* Find next cluster and start I/O */
1363 len = acb->end_pos - acb->cur_pos;
1364 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1365 if (ret < 0) {
1366 break;
1367 }
1368
1369 if (acb->flags & QED_AIOCB_WRITE) {
1370 ret = qed_aio_write_data(acb, ret, offset, len);
1371 } else {
1372 ret = qed_aio_read_data(acb, ret, offset, len);
1373 }
1374
1375 if (ret < 0 && ret != -EAGAIN) {
1376 break;
1377 }
1378 }
1379
1380 trace_qed_aio_complete(s, acb, ret);
1381 qed_aio_complete(acb);
1382 qemu_co_mutex_unlock(&s->table_lock);
1383 return ret;
1384 }
1385
1386 static int coroutine_fn GRAPH_RDLOCK
1387 qed_co_request(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov,
1388 int nb_sectors, int flags)
1389 {
1390 QEDAIOCB acb = {
1391 .bs = bs,
1392 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1393 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1394 .qiov = qiov,
1395 .flags = flags,
1396 };
1397 qemu_iovec_init(&acb.cur_qiov, qiov->niov);
1398
1399 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
1400
1401 /* Start request */
1402 return qed_aio_next_io(&acb);
1403 }
1404
1405 static int coroutine_fn GRAPH_RDLOCK
1406 bdrv_qed_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1407 QEMUIOVector *qiov)
1408 {
1409 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
1410 }
1411
1412 static int coroutine_fn GRAPH_RDLOCK
1413 bdrv_qed_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1414 QEMUIOVector *qiov, int flags)
1415 {
1416 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
1417 }
1418
1419 static int coroutine_fn GRAPH_RDLOCK
1420 bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1421 BdrvRequestFlags flags)
1422 {
1423 BDRVQEDState *s = bs->opaque;
1424
1425 /*
1426 * Zero writes start without an I/O buffer. If a buffer becomes necessary
1427 * then it will be allocated during request processing.
1428 */
1429 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
1430
1431 /*
1432 * QED is not prepared for 63bit write-zero requests, so rely on
1433 * max_pwrite_zeroes.
1434 */
1435 assert(bytes <= INT_MAX);
1436
1437 /* Fall back if the request is not aligned */
1438 if (qed_offset_into_cluster(s, offset) ||
1439 qed_offset_into_cluster(s, bytes)) {
1440 return -ENOTSUP;
1441 }
1442
1443 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1444 bytes >> BDRV_SECTOR_BITS,
1445 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1446 }
1447
1448 static int coroutine_fn bdrv_qed_co_truncate(BlockDriverState *bs,
1449 int64_t offset,
1450 bool exact,
1451 PreallocMode prealloc,
1452 BdrvRequestFlags flags,
1453 Error **errp)
1454 {
1455 BDRVQEDState *s = bs->opaque;
1456 uint64_t old_image_size;
1457 int ret;
1458
1459 if (prealloc != PREALLOC_MODE_OFF) {
1460 error_setg(errp, "Unsupported preallocation mode '%s'",
1461 PreallocMode_str(prealloc));
1462 return -ENOTSUP;
1463 }
1464
1465 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1466 s->header.table_size)) {
1467 error_setg(errp, "Invalid image size specified");
1468 return -EINVAL;
1469 }
1470
1471 if ((uint64_t)offset < s->header.image_size) {
1472 error_setg(errp, "Shrinking images is currently not supported");
1473 return -ENOTSUP;
1474 }
1475
1476 old_image_size = s->header.image_size;
1477 s->header.image_size = offset;
1478 ret = qed_write_header_sync(s);
1479 if (ret < 0) {
1480 s->header.image_size = old_image_size;
1481 error_setg_errno(errp, -ret, "Failed to update the image size");
1482 }
1483 return ret;
1484 }
1485
1486 static int64_t coroutine_fn bdrv_qed_co_getlength(BlockDriverState *bs)
1487 {
1488 BDRVQEDState *s = bs->opaque;
1489 return s->header.image_size;
1490 }
1491
1492 static int coroutine_fn
1493 bdrv_qed_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1494 {
1495 BDRVQEDState *s = bs->opaque;
1496
1497 memset(bdi, 0, sizeof(*bdi));
1498 bdi->cluster_size = s->header.cluster_size;
1499 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1500 return 0;
1501 }
1502
1503 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1504 const char *backing_file,
1505 const char *backing_fmt)
1506 {
1507 BDRVQEDState *s = bs->opaque;
1508 QEDHeader new_header, le_header;
1509 void *buffer;
1510 size_t buffer_len, backing_file_len;
1511 int ret;
1512
1513 /* Refuse to set backing filename if unknown compat feature bits are
1514 * active. If the image uses an unknown compat feature then we may not
1515 * know the layout of data following the header structure and cannot safely
1516 * add a new string.
1517 */
1518 if (backing_file && (s->header.compat_features &
1519 ~QED_COMPAT_FEATURE_MASK)) {
1520 return -ENOTSUP;
1521 }
1522
1523 memcpy(&new_header, &s->header, sizeof(new_header));
1524
1525 new_header.features &= ~(QED_F_BACKING_FILE |
1526 QED_F_BACKING_FORMAT_NO_PROBE);
1527
1528 /* Adjust feature flags */
1529 if (backing_file) {
1530 new_header.features |= QED_F_BACKING_FILE;
1531
1532 if (qed_fmt_is_raw(backing_fmt)) {
1533 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1534 }
1535 }
1536
1537 /* Calculate new header size */
1538 backing_file_len = 0;
1539
1540 if (backing_file) {
1541 backing_file_len = strlen(backing_file);
1542 }
1543
1544 buffer_len = sizeof(new_header);
1545 new_header.backing_filename_offset = buffer_len;
1546 new_header.backing_filename_size = backing_file_len;
1547 buffer_len += backing_file_len;
1548
1549 /* Make sure we can rewrite header without failing */
1550 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1551 return -ENOSPC;
1552 }
1553
1554 /* Prepare new header */
1555 buffer = g_malloc(buffer_len);
1556
1557 qed_header_cpu_to_le(&new_header, &le_header);
1558 memcpy(buffer, &le_header, sizeof(le_header));
1559 buffer_len = sizeof(le_header);
1560
1561 if (backing_file) {
1562 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1563 buffer_len += backing_file_len;
1564 }
1565
1566 /* Write new header */
1567 ret = bdrv_pwrite_sync(bs->file, 0, buffer_len, buffer, 0);
1568 g_free(buffer);
1569 if (ret == 0) {
1570 memcpy(&s->header, &new_header, sizeof(new_header));
1571 }
1572 return ret;
1573 }
1574
1575 static void coroutine_fn GRAPH_RDLOCK
1576 bdrv_qed_co_invalidate_cache(BlockDriverState *bs, Error **errp)
1577 {
1578 BDRVQEDState *s = bs->opaque;
1579 int ret;
1580
1581 bdrv_qed_close(bs);
1582
1583 bdrv_qed_init_state(bs);
1584 qemu_co_mutex_lock(&s->table_lock);
1585 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, errp);
1586 qemu_co_mutex_unlock(&s->table_lock);
1587 if (ret < 0) {
1588 error_prepend(errp, "Could not reopen qed layer: ");
1589 }
1590 }
1591
1592 static int coroutine_fn GRAPH_RDLOCK
1593 bdrv_qed_co_check(BlockDriverState *bs, BdrvCheckResult *result,
1594 BdrvCheckMode fix)
1595 {
1596 BDRVQEDState *s = bs->opaque;
1597 int ret;
1598
1599 qemu_co_mutex_lock(&s->table_lock);
1600 ret = qed_check(s, result, !!fix);
1601 qemu_co_mutex_unlock(&s->table_lock);
1602
1603 return ret;
1604 }
1605
1606 static QemuOptsList qed_create_opts = {
1607 .name = "qed-create-opts",
1608 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1609 .desc = {
1610 {
1611 .name = BLOCK_OPT_SIZE,
1612 .type = QEMU_OPT_SIZE,
1613 .help = "Virtual disk size"
1614 },
1615 {
1616 .name = BLOCK_OPT_BACKING_FILE,
1617 .type = QEMU_OPT_STRING,
1618 .help = "File name of a base image"
1619 },
1620 {
1621 .name = BLOCK_OPT_BACKING_FMT,
1622 .type = QEMU_OPT_STRING,
1623 .help = "Image format of the base image"
1624 },
1625 {
1626 .name = BLOCK_OPT_CLUSTER_SIZE,
1627 .type = QEMU_OPT_SIZE,
1628 .help = "Cluster size (in bytes)",
1629 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1630 },
1631 {
1632 .name = BLOCK_OPT_TABLE_SIZE,
1633 .type = QEMU_OPT_SIZE,
1634 .help = "L1/L2 table size (in clusters)"
1635 },
1636 { /* end of list */ }
1637 }
1638 };
1639
1640 static BlockDriver bdrv_qed = {
1641 .format_name = "qed",
1642 .instance_size = sizeof(BDRVQEDState),
1643 .create_opts = &qed_create_opts,
1644 .is_format = true,
1645 .supports_backing = true,
1646
1647 .bdrv_probe = bdrv_qed_probe,
1648 .bdrv_open = bdrv_qed_open,
1649 .bdrv_close = bdrv_qed_close,
1650 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1651 .bdrv_child_perm = bdrv_default_perms,
1652 .bdrv_co_create = bdrv_qed_co_create,
1653 .bdrv_co_create_opts = bdrv_qed_co_create_opts,
1654 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1655 .bdrv_co_block_status = bdrv_qed_co_block_status,
1656 .bdrv_co_readv = bdrv_qed_co_readv,
1657 .bdrv_co_writev = bdrv_qed_co_writev,
1658 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
1659 .bdrv_co_truncate = bdrv_qed_co_truncate,
1660 .bdrv_co_getlength = bdrv_qed_co_getlength,
1661 .bdrv_co_get_info = bdrv_qed_co_get_info,
1662 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
1663 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1664 .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
1665 .bdrv_co_check = bdrv_qed_co_check,
1666 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1667 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
1668 .bdrv_drain_begin = bdrv_qed_drain_begin,
1669 };
1670
1671 static void bdrv_qed_init(void)
1672 {
1673 bdrv_register(&bdrv_qed);
1674 }
1675
1676 block_init(bdrv_qed_init);