]> git.proxmox.com Git - mirror_qemu.git/blob - block/qed.c
block: drop bs->job
[mirror_qemu.git] / block / qed.c
1 /*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "block/qdict.h"
17 #include "qapi/error.h"
18 #include "qemu/timer.h"
19 #include "qemu/bswap.h"
20 #include "qemu/module.h"
21 #include "qemu/option.h"
22 #include "trace.h"
23 #include "qed.h"
24 #include "sysemu/block-backend.h"
25 #include "qapi/qmp/qdict.h"
26 #include "qapi/qobject-input-visitor.h"
27 #include "qapi/qapi-visit-block-core.h"
28
29 static QemuOptsList qed_create_opts;
30
31 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
32 const char *filename)
33 {
34 const QEDHeader *header = (const QEDHeader *)buf;
35
36 if (buf_size < sizeof(*header)) {
37 return 0;
38 }
39 if (le32_to_cpu(header->magic) != QED_MAGIC) {
40 return 0;
41 }
42 return 100;
43 }
44
45 /**
46 * Check whether an image format is raw
47 *
48 * @fmt: Backing file format, may be NULL
49 */
50 static bool qed_fmt_is_raw(const char *fmt)
51 {
52 return fmt && strcmp(fmt, "raw") == 0;
53 }
54
55 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
56 {
57 cpu->magic = le32_to_cpu(le->magic);
58 cpu->cluster_size = le32_to_cpu(le->cluster_size);
59 cpu->table_size = le32_to_cpu(le->table_size);
60 cpu->header_size = le32_to_cpu(le->header_size);
61 cpu->features = le64_to_cpu(le->features);
62 cpu->compat_features = le64_to_cpu(le->compat_features);
63 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
64 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
65 cpu->image_size = le64_to_cpu(le->image_size);
66 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
67 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
68 }
69
70 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
71 {
72 le->magic = cpu_to_le32(cpu->magic);
73 le->cluster_size = cpu_to_le32(cpu->cluster_size);
74 le->table_size = cpu_to_le32(cpu->table_size);
75 le->header_size = cpu_to_le32(cpu->header_size);
76 le->features = cpu_to_le64(cpu->features);
77 le->compat_features = cpu_to_le64(cpu->compat_features);
78 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
79 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
80 le->image_size = cpu_to_le64(cpu->image_size);
81 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
82 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
83 }
84
85 int qed_write_header_sync(BDRVQEDState *s)
86 {
87 QEDHeader le;
88 int ret;
89
90 qed_header_cpu_to_le(&s->header, &le);
91 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
92 if (ret != sizeof(le)) {
93 return ret;
94 }
95 return 0;
96 }
97
98 /**
99 * Update header in-place (does not rewrite backing filename or other strings)
100 *
101 * This function only updates known header fields in-place and does not affect
102 * extra data after the QED header.
103 *
104 * No new allocating reqs can start while this function runs.
105 */
106 static int coroutine_fn qed_write_header(BDRVQEDState *s)
107 {
108 /* We must write full sectors for O_DIRECT but cannot necessarily generate
109 * the data following the header if an unrecognized compat feature is
110 * active. Therefore, first read the sectors containing the header, update
111 * them, and write back.
112 */
113
114 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
115 size_t len = nsectors * BDRV_SECTOR_SIZE;
116 uint8_t *buf;
117 int ret;
118
119 assert(s->allocating_acb || s->allocating_write_reqs_plugged);
120
121 buf = qemu_blockalign(s->bs, len);
122
123 ret = bdrv_co_pread(s->bs->file, 0, len, buf, 0);
124 if (ret < 0) {
125 goto out;
126 }
127
128 /* Update header */
129 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
130
131 ret = bdrv_co_pwrite(s->bs->file, 0, len, buf, 0);
132 if (ret < 0) {
133 goto out;
134 }
135
136 ret = 0;
137 out:
138 qemu_vfree(buf);
139 return ret;
140 }
141
142 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
143 {
144 uint64_t table_entries;
145 uint64_t l2_size;
146
147 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
148 l2_size = table_entries * cluster_size;
149
150 return l2_size * table_entries;
151 }
152
153 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
154 {
155 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
156 cluster_size > QED_MAX_CLUSTER_SIZE) {
157 return false;
158 }
159 if (cluster_size & (cluster_size - 1)) {
160 return false; /* not power of 2 */
161 }
162 return true;
163 }
164
165 static bool qed_is_table_size_valid(uint32_t table_size)
166 {
167 if (table_size < QED_MIN_TABLE_SIZE ||
168 table_size > QED_MAX_TABLE_SIZE) {
169 return false;
170 }
171 if (table_size & (table_size - 1)) {
172 return false; /* not power of 2 */
173 }
174 return true;
175 }
176
177 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
178 uint32_t table_size)
179 {
180 if (image_size % BDRV_SECTOR_SIZE != 0) {
181 return false; /* not multiple of sector size */
182 }
183 if (image_size > qed_max_image_size(cluster_size, table_size)) {
184 return false; /* image is too large */
185 }
186 return true;
187 }
188
189 /**
190 * Read a string of known length from the image file
191 *
192 * @file: Image file
193 * @offset: File offset to start of string, in bytes
194 * @n: String length in bytes
195 * @buf: Destination buffer
196 * @buflen: Destination buffer length in bytes
197 * @ret: 0 on success, -errno on failure
198 *
199 * The string is NUL-terminated.
200 */
201 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
202 char *buf, size_t buflen)
203 {
204 int ret;
205 if (n >= buflen) {
206 return -EINVAL;
207 }
208 ret = bdrv_pread(file, offset, buf, n);
209 if (ret < 0) {
210 return ret;
211 }
212 buf[n] = '\0';
213 return 0;
214 }
215
216 /**
217 * Allocate new clusters
218 *
219 * @s: QED state
220 * @n: Number of contiguous clusters to allocate
221 * @ret: Offset of first allocated cluster
222 *
223 * This function only produces the offset where the new clusters should be
224 * written. It updates BDRVQEDState but does not make any changes to the image
225 * file.
226 *
227 * Called with table_lock held.
228 */
229 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
230 {
231 uint64_t offset = s->file_size;
232 s->file_size += n * s->header.cluster_size;
233 return offset;
234 }
235
236 QEDTable *qed_alloc_table(BDRVQEDState *s)
237 {
238 /* Honor O_DIRECT memory alignment requirements */
239 return qemu_blockalign(s->bs,
240 s->header.cluster_size * s->header.table_size);
241 }
242
243 /**
244 * Allocate a new zeroed L2 table
245 *
246 * Called with table_lock held.
247 */
248 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
249 {
250 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
251
252 l2_table->table = qed_alloc_table(s);
253 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
254
255 memset(l2_table->table->offsets, 0,
256 s->header.cluster_size * s->header.table_size);
257 return l2_table;
258 }
259
260 static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
261 {
262 qemu_co_mutex_lock(&s->table_lock);
263
264 /* No reentrancy is allowed. */
265 assert(!s->allocating_write_reqs_plugged);
266 if (s->allocating_acb != NULL) {
267 /* Another allocating write came concurrently. This cannot happen
268 * from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
269 */
270 qemu_co_mutex_unlock(&s->table_lock);
271 return false;
272 }
273
274 s->allocating_write_reqs_plugged = true;
275 qemu_co_mutex_unlock(&s->table_lock);
276 return true;
277 }
278
279 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
280 {
281 qemu_co_mutex_lock(&s->table_lock);
282 assert(s->allocating_write_reqs_plugged);
283 s->allocating_write_reqs_plugged = false;
284 qemu_co_queue_next(&s->allocating_write_reqs);
285 qemu_co_mutex_unlock(&s->table_lock);
286 }
287
288 static void coroutine_fn qed_need_check_timer_entry(void *opaque)
289 {
290 BDRVQEDState *s = opaque;
291 int ret;
292
293 trace_qed_need_check_timer_cb(s);
294
295 if (!qed_plug_allocating_write_reqs(s)) {
296 return;
297 }
298
299 /* Ensure writes are on disk before clearing flag */
300 ret = bdrv_co_flush(s->bs->file->bs);
301 if (ret < 0) {
302 qed_unplug_allocating_write_reqs(s);
303 return;
304 }
305
306 s->header.features &= ~QED_F_NEED_CHECK;
307 ret = qed_write_header(s);
308 (void) ret;
309
310 qed_unplug_allocating_write_reqs(s);
311
312 ret = bdrv_co_flush(s->bs);
313 (void) ret;
314 }
315
316 static void qed_need_check_timer_cb(void *opaque)
317 {
318 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
319 qemu_coroutine_enter(co);
320 }
321
322 static void qed_start_need_check_timer(BDRVQEDState *s)
323 {
324 trace_qed_start_need_check_timer(s);
325
326 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
327 * migration.
328 */
329 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
330 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
331 }
332
333 /* It's okay to call this multiple times or when no timer is started */
334 static void qed_cancel_need_check_timer(BDRVQEDState *s)
335 {
336 trace_qed_cancel_need_check_timer(s);
337 timer_del(s->need_check_timer);
338 }
339
340 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
341 {
342 BDRVQEDState *s = bs->opaque;
343
344 qed_cancel_need_check_timer(s);
345 timer_free(s->need_check_timer);
346 }
347
348 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
349 AioContext *new_context)
350 {
351 BDRVQEDState *s = bs->opaque;
352
353 s->need_check_timer = aio_timer_new(new_context,
354 QEMU_CLOCK_VIRTUAL, SCALE_NS,
355 qed_need_check_timer_cb, s);
356 if (s->header.features & QED_F_NEED_CHECK) {
357 qed_start_need_check_timer(s);
358 }
359 }
360
361 static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
362 {
363 BDRVQEDState *s = bs->opaque;
364
365 /* Fire the timer immediately in order to start doing I/O as soon as the
366 * header is flushed.
367 */
368 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
369 qed_cancel_need_check_timer(s);
370 qed_need_check_timer_entry(s);
371 }
372 }
373
374 static void bdrv_qed_init_state(BlockDriverState *bs)
375 {
376 BDRVQEDState *s = bs->opaque;
377
378 memset(s, 0, sizeof(BDRVQEDState));
379 s->bs = bs;
380 qemu_co_mutex_init(&s->table_lock);
381 qemu_co_queue_init(&s->allocating_write_reqs);
382 }
383
384 /* Called with table_lock held. */
385 static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
386 int flags, Error **errp)
387 {
388 BDRVQEDState *s = bs->opaque;
389 QEDHeader le_header;
390 int64_t file_size;
391 int ret;
392
393 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
394 if (ret < 0) {
395 return ret;
396 }
397 qed_header_le_to_cpu(&le_header, &s->header);
398
399 if (s->header.magic != QED_MAGIC) {
400 error_setg(errp, "Image not in QED format");
401 return -EINVAL;
402 }
403 if (s->header.features & ~QED_FEATURE_MASK) {
404 /* image uses unsupported feature bits */
405 error_setg(errp, "Unsupported QED features: %" PRIx64,
406 s->header.features & ~QED_FEATURE_MASK);
407 return -ENOTSUP;
408 }
409 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
410 return -EINVAL;
411 }
412
413 /* Round down file size to the last cluster */
414 file_size = bdrv_getlength(bs->file->bs);
415 if (file_size < 0) {
416 return file_size;
417 }
418 s->file_size = qed_start_of_cluster(s, file_size);
419
420 if (!qed_is_table_size_valid(s->header.table_size)) {
421 return -EINVAL;
422 }
423 if (!qed_is_image_size_valid(s->header.image_size,
424 s->header.cluster_size,
425 s->header.table_size)) {
426 return -EINVAL;
427 }
428 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
429 return -EINVAL;
430 }
431
432 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
433 sizeof(uint64_t);
434 s->l2_shift = ctz32(s->header.cluster_size);
435 s->l2_mask = s->table_nelems - 1;
436 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
437
438 /* Header size calculation must not overflow uint32_t */
439 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
440 return -EINVAL;
441 }
442
443 if ((s->header.features & QED_F_BACKING_FILE)) {
444 if ((uint64_t)s->header.backing_filename_offset +
445 s->header.backing_filename_size >
446 s->header.cluster_size * s->header.header_size) {
447 return -EINVAL;
448 }
449
450 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
451 s->header.backing_filename_size,
452 bs->auto_backing_file,
453 sizeof(bs->auto_backing_file));
454 if (ret < 0) {
455 return ret;
456 }
457 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
458 bs->auto_backing_file);
459
460 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
461 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
462 }
463 }
464
465 /* Reset unknown autoclear feature bits. This is a backwards
466 * compatibility mechanism that allows images to be opened by older
467 * programs, which "knock out" unknown feature bits. When an image is
468 * opened by a newer program again it can detect that the autoclear
469 * feature is no longer valid.
470 */
471 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
472 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
473 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
474
475 ret = qed_write_header_sync(s);
476 if (ret) {
477 return ret;
478 }
479
480 /* From here on only known autoclear feature bits are valid */
481 bdrv_flush(bs->file->bs);
482 }
483
484 s->l1_table = qed_alloc_table(s);
485 qed_init_l2_cache(&s->l2_cache);
486
487 ret = qed_read_l1_table_sync(s);
488 if (ret) {
489 goto out;
490 }
491
492 /* If image was not closed cleanly, check consistency */
493 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
494 /* Read-only images cannot be fixed. There is no risk of corruption
495 * since write operations are not possible. Therefore, allow
496 * potentially inconsistent images to be opened read-only. This can
497 * aid data recovery from an otherwise inconsistent image.
498 */
499 if (!bdrv_is_read_only(bs->file->bs) &&
500 !(flags & BDRV_O_INACTIVE)) {
501 BdrvCheckResult result = {0};
502
503 ret = qed_check(s, &result, true);
504 if (ret) {
505 goto out;
506 }
507 }
508 }
509
510 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
511
512 out:
513 if (ret) {
514 qed_free_l2_cache(&s->l2_cache);
515 qemu_vfree(s->l1_table);
516 }
517 return ret;
518 }
519
520 typedef struct QEDOpenCo {
521 BlockDriverState *bs;
522 QDict *options;
523 int flags;
524 Error **errp;
525 int ret;
526 } QEDOpenCo;
527
528 static void coroutine_fn bdrv_qed_open_entry(void *opaque)
529 {
530 QEDOpenCo *qoc = opaque;
531 BDRVQEDState *s = qoc->bs->opaque;
532
533 qemu_co_mutex_lock(&s->table_lock);
534 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
535 qemu_co_mutex_unlock(&s->table_lock);
536 }
537
538 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
539 Error **errp)
540 {
541 QEDOpenCo qoc = {
542 .bs = bs,
543 .options = options,
544 .flags = flags,
545 .errp = errp,
546 .ret = -EINPROGRESS
547 };
548
549 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
550 false, errp);
551 if (!bs->file) {
552 return -EINVAL;
553 }
554
555 bdrv_qed_init_state(bs);
556 if (qemu_in_coroutine()) {
557 bdrv_qed_open_entry(&qoc);
558 } else {
559 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
560 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
561 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
562 }
563 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
564 return qoc.ret;
565 }
566
567 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
568 {
569 BDRVQEDState *s = bs->opaque;
570
571 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
572 }
573
574 /* We have nothing to do for QED reopen, stubs just return
575 * success */
576 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
577 BlockReopenQueue *queue, Error **errp)
578 {
579 return 0;
580 }
581
582 static void bdrv_qed_close(BlockDriverState *bs)
583 {
584 BDRVQEDState *s = bs->opaque;
585
586 bdrv_qed_detach_aio_context(bs);
587
588 /* Ensure writes reach stable storage */
589 bdrv_flush(bs->file->bs);
590
591 /* Clean shutdown, no check required on next open */
592 if (s->header.features & QED_F_NEED_CHECK) {
593 s->header.features &= ~QED_F_NEED_CHECK;
594 qed_write_header_sync(s);
595 }
596
597 qed_free_l2_cache(&s->l2_cache);
598 qemu_vfree(s->l1_table);
599 }
600
601 static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
602 Error **errp)
603 {
604 BlockdevCreateOptionsQed *qed_opts;
605 BlockBackend *blk = NULL;
606 BlockDriverState *bs = NULL;
607
608 QEDHeader header;
609 QEDHeader le_header;
610 uint8_t *l1_table = NULL;
611 size_t l1_size;
612 int ret = 0;
613
614 assert(opts->driver == BLOCKDEV_DRIVER_QED);
615 qed_opts = &opts->u.qed;
616
617 /* Validate options and set default values */
618 if (!qed_opts->has_cluster_size) {
619 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE;
620 }
621 if (!qed_opts->has_table_size) {
622 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE;
623 }
624
625 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) {
626 error_setg(errp, "QED cluster size must be within range [%u, %u] "
627 "and power of 2",
628 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
629 return -EINVAL;
630 }
631 if (!qed_is_table_size_valid(qed_opts->table_size)) {
632 error_setg(errp, "QED table size must be within range [%u, %u] "
633 "and power of 2",
634 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
635 return -EINVAL;
636 }
637 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size,
638 qed_opts->table_size))
639 {
640 error_setg(errp, "QED image size must be a non-zero multiple of "
641 "cluster size and less than %" PRIu64 " bytes",
642 qed_max_image_size(qed_opts->cluster_size,
643 qed_opts->table_size));
644 return -EINVAL;
645 }
646
647 /* Create BlockBackend to write to the image */
648 bs = bdrv_open_blockdev_ref(qed_opts->file, errp);
649 if (bs == NULL) {
650 return -EIO;
651 }
652
653 blk = blk_new(bdrv_get_aio_context(bs),
654 BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
655 ret = blk_insert_bs(blk, bs, errp);
656 if (ret < 0) {
657 goto out;
658 }
659 blk_set_allow_write_beyond_eof(blk, true);
660
661 /* Prepare image format */
662 header = (QEDHeader) {
663 .magic = QED_MAGIC,
664 .cluster_size = qed_opts->cluster_size,
665 .table_size = qed_opts->table_size,
666 .header_size = 1,
667 .features = 0,
668 .compat_features = 0,
669 .l1_table_offset = qed_opts->cluster_size,
670 .image_size = qed_opts->size,
671 };
672
673 l1_size = header.cluster_size * header.table_size;
674
675 /* File must start empty and grow, check truncate is supported */
676 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
677 if (ret < 0) {
678 goto out;
679 }
680
681 if (qed_opts->has_backing_file) {
682 header.features |= QED_F_BACKING_FILE;
683 header.backing_filename_offset = sizeof(le_header);
684 header.backing_filename_size = strlen(qed_opts->backing_file);
685
686 if (qed_opts->has_backing_fmt) {
687 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt);
688 if (qed_fmt_is_raw(backing_fmt)) {
689 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
690 }
691 }
692 }
693
694 qed_header_cpu_to_le(&header, &le_header);
695 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
696 if (ret < 0) {
697 goto out;
698 }
699 ret = blk_pwrite(blk, sizeof(le_header), qed_opts->backing_file,
700 header.backing_filename_size, 0);
701 if (ret < 0) {
702 goto out;
703 }
704
705 l1_table = g_malloc0(l1_size);
706 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
707 if (ret < 0) {
708 goto out;
709 }
710
711 ret = 0; /* success */
712 out:
713 g_free(l1_table);
714 blk_unref(blk);
715 bdrv_unref(bs);
716 return ret;
717 }
718
719 static int coroutine_fn bdrv_qed_co_create_opts(const char *filename,
720 QemuOpts *opts,
721 Error **errp)
722 {
723 BlockdevCreateOptions *create_options = NULL;
724 QDict *qdict;
725 Visitor *v;
726 BlockDriverState *bs = NULL;
727 Error *local_err = NULL;
728 int ret;
729
730 static const QDictRenames opt_renames[] = {
731 { BLOCK_OPT_BACKING_FILE, "backing-file" },
732 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
733 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
734 { BLOCK_OPT_TABLE_SIZE, "table-size" },
735 { NULL, NULL },
736 };
737
738 /* Parse options and convert legacy syntax */
739 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true);
740
741 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
742 ret = -EINVAL;
743 goto fail;
744 }
745
746 /* Create and open the file (protocol layer) */
747 ret = bdrv_create_file(filename, opts, &local_err);
748 if (ret < 0) {
749 error_propagate(errp, local_err);
750 goto fail;
751 }
752
753 bs = bdrv_open(filename, NULL, NULL,
754 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
755 if (bs == NULL) {
756 ret = -EIO;
757 goto fail;
758 }
759
760 /* Now get the QAPI type BlockdevCreateOptions */
761 qdict_put_str(qdict, "driver", "qed");
762 qdict_put_str(qdict, "file", bs->node_name);
763
764 v = qobject_input_visitor_new_flat_confused(qdict, errp);
765 if (!v) {
766 ret = -EINVAL;
767 goto fail;
768 }
769
770 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
771 visit_free(v);
772
773 if (local_err) {
774 error_propagate(errp, local_err);
775 ret = -EINVAL;
776 goto fail;
777 }
778
779 /* Silently round up size */
780 assert(create_options->driver == BLOCKDEV_DRIVER_QED);
781 create_options->u.qed.size =
782 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE);
783
784 /* Create the qed image (format layer) */
785 ret = bdrv_qed_co_create(create_options, errp);
786
787 fail:
788 qobject_unref(qdict);
789 bdrv_unref(bs);
790 qapi_free_BlockdevCreateOptions(create_options);
791 return ret;
792 }
793
794 static int coroutine_fn bdrv_qed_co_block_status(BlockDriverState *bs,
795 bool want_zero,
796 int64_t pos, int64_t bytes,
797 int64_t *pnum, int64_t *map,
798 BlockDriverState **file)
799 {
800 BDRVQEDState *s = bs->opaque;
801 size_t len = MIN(bytes, SIZE_MAX);
802 int status;
803 QEDRequest request = { .l2_table = NULL };
804 uint64_t offset;
805 int ret;
806
807 qemu_co_mutex_lock(&s->table_lock);
808 ret = qed_find_cluster(s, &request, pos, &len, &offset);
809
810 *pnum = len;
811 switch (ret) {
812 case QED_CLUSTER_FOUND:
813 *map = offset | qed_offset_into_cluster(s, pos);
814 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
815 *file = bs->file->bs;
816 break;
817 case QED_CLUSTER_ZERO:
818 status = BDRV_BLOCK_ZERO;
819 break;
820 case QED_CLUSTER_L2:
821 case QED_CLUSTER_L1:
822 status = 0;
823 break;
824 default:
825 assert(ret < 0);
826 status = ret;
827 break;
828 }
829
830 qed_unref_l2_cache_entry(request.l2_table);
831 qemu_co_mutex_unlock(&s->table_lock);
832
833 return status;
834 }
835
836 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
837 {
838 return acb->bs->opaque;
839 }
840
841 /**
842 * Read from the backing file or zero-fill if no backing file
843 *
844 * @s: QED state
845 * @pos: Byte position in device
846 * @qiov: Destination I/O vector
847 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
848 * @cb: Completion function
849 * @opaque: User data for completion function
850 *
851 * This function reads qiov->size bytes starting at pos from the backing file.
852 * If there is no backing file then zeroes are read.
853 */
854 static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
855 QEMUIOVector *qiov,
856 QEMUIOVector **backing_qiov)
857 {
858 uint64_t backing_length = 0;
859 size_t size;
860 int ret;
861
862 /* If there is a backing file, get its length. Treat the absence of a
863 * backing file like a zero length backing file.
864 */
865 if (s->bs->backing) {
866 int64_t l = bdrv_getlength(s->bs->backing->bs);
867 if (l < 0) {
868 return l;
869 }
870 backing_length = l;
871 }
872
873 /* Zero all sectors if reading beyond the end of the backing file */
874 if (pos >= backing_length ||
875 pos + qiov->size > backing_length) {
876 qemu_iovec_memset(qiov, 0, 0, qiov->size);
877 }
878
879 /* Complete now if there are no backing file sectors to read */
880 if (pos >= backing_length) {
881 return 0;
882 }
883
884 /* If the read straddles the end of the backing file, shorten it */
885 size = MIN((uint64_t)backing_length - pos, qiov->size);
886
887 assert(*backing_qiov == NULL);
888 *backing_qiov = g_new(QEMUIOVector, 1);
889 qemu_iovec_init(*backing_qiov, qiov->niov);
890 qemu_iovec_concat(*backing_qiov, qiov, 0, size);
891
892 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
893 ret = bdrv_co_preadv(s->bs->backing, pos, size, *backing_qiov, 0);
894 if (ret < 0) {
895 return ret;
896 }
897 return 0;
898 }
899
900 /**
901 * Copy data from backing file into the image
902 *
903 * @s: QED state
904 * @pos: Byte position in device
905 * @len: Number of bytes
906 * @offset: Byte offset in image file
907 */
908 static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
909 uint64_t pos, uint64_t len,
910 uint64_t offset)
911 {
912 QEMUIOVector qiov;
913 QEMUIOVector *backing_qiov = NULL;
914 int ret;
915
916 /* Skip copy entirely if there is no work to do */
917 if (len == 0) {
918 return 0;
919 }
920
921 qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len);
922
923 ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov);
924
925 if (backing_qiov) {
926 qemu_iovec_destroy(backing_qiov);
927 g_free(backing_qiov);
928 backing_qiov = NULL;
929 }
930
931 if (ret) {
932 goto out;
933 }
934
935 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
936 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
937 if (ret < 0) {
938 goto out;
939 }
940 ret = 0;
941 out:
942 qemu_vfree(qemu_iovec_buf(&qiov));
943 return ret;
944 }
945
946 /**
947 * Link one or more contiguous clusters into a table
948 *
949 * @s: QED state
950 * @table: L2 table
951 * @index: First cluster index
952 * @n: Number of contiguous clusters
953 * @cluster: First cluster offset
954 *
955 * The cluster offset may be an allocated byte offset in the image file, the
956 * zero cluster marker, or the unallocated cluster marker.
957 *
958 * Called with table_lock held.
959 */
960 static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
961 int index, unsigned int n,
962 uint64_t cluster)
963 {
964 int i;
965 for (i = index; i < index + n; i++) {
966 table->offsets[i] = cluster;
967 if (!qed_offset_is_unalloc_cluster(cluster) &&
968 !qed_offset_is_zero_cluster(cluster)) {
969 cluster += s->header.cluster_size;
970 }
971 }
972 }
973
974 /* Called with table_lock held. */
975 static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
976 {
977 BDRVQEDState *s = acb_to_s(acb);
978
979 /* Free resources */
980 qemu_iovec_destroy(&acb->cur_qiov);
981 qed_unref_l2_cache_entry(acb->request.l2_table);
982
983 /* Free the buffer we may have allocated for zero writes */
984 if (acb->flags & QED_AIOCB_ZERO) {
985 qemu_vfree(acb->qiov->iov[0].iov_base);
986 acb->qiov->iov[0].iov_base = NULL;
987 }
988
989 /* Start next allocating write request waiting behind this one. Note that
990 * requests enqueue themselves when they first hit an unallocated cluster
991 * but they wait until the entire request is finished before waking up the
992 * next request in the queue. This ensures that we don't cycle through
993 * requests multiple times but rather finish one at a time completely.
994 */
995 if (acb == s->allocating_acb) {
996 s->allocating_acb = NULL;
997 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
998 qemu_co_queue_next(&s->allocating_write_reqs);
999 } else if (s->header.features & QED_F_NEED_CHECK) {
1000 qed_start_need_check_timer(s);
1001 }
1002 }
1003 }
1004
1005 /**
1006 * Update L1 table with new L2 table offset and write it out
1007 *
1008 * Called with table_lock held.
1009 */
1010 static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb)
1011 {
1012 BDRVQEDState *s = acb_to_s(acb);
1013 CachedL2Table *l2_table = acb->request.l2_table;
1014 uint64_t l2_offset = l2_table->offset;
1015 int index, ret;
1016
1017 index = qed_l1_index(s, acb->cur_pos);
1018 s->l1_table->offsets[index] = l2_table->offset;
1019
1020 ret = qed_write_l1_table(s, index, 1);
1021
1022 /* Commit the current L2 table to the cache */
1023 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
1024
1025 /* This is guaranteed to succeed because we just committed the entry to the
1026 * cache.
1027 */
1028 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
1029 assert(acb->request.l2_table != NULL);
1030
1031 return ret;
1032 }
1033
1034
1035 /**
1036 * Update L2 table with new cluster offsets and write them out
1037 *
1038 * Called with table_lock held.
1039 */
1040 static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
1041 {
1042 BDRVQEDState *s = acb_to_s(acb);
1043 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1044 int index, ret;
1045
1046 if (need_alloc) {
1047 qed_unref_l2_cache_entry(acb->request.l2_table);
1048 acb->request.l2_table = qed_new_l2_table(s);
1049 }
1050
1051 index = qed_l2_index(s, acb->cur_pos);
1052 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1053 offset);
1054
1055 if (need_alloc) {
1056 /* Write out the whole new L2 table */
1057 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
1058 if (ret) {
1059 return ret;
1060 }
1061 return qed_aio_write_l1_update(acb);
1062 } else {
1063 /* Write out only the updated part of the L2 table */
1064 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1065 false);
1066 if (ret) {
1067 return ret;
1068 }
1069 }
1070 return 0;
1071 }
1072
1073 /**
1074 * Write data to the image file
1075 *
1076 * Called with table_lock *not* held.
1077 */
1078 static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb)
1079 {
1080 BDRVQEDState *s = acb_to_s(acb);
1081 uint64_t offset = acb->cur_cluster +
1082 qed_offset_into_cluster(s, acb->cur_pos);
1083
1084 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
1085
1086 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1087 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
1088 &acb->cur_qiov, 0);
1089 }
1090
1091 /**
1092 * Populate untouched regions of new data cluster
1093 *
1094 * Called with table_lock held.
1095 */
1096 static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb)
1097 {
1098 BDRVQEDState *s = acb_to_s(acb);
1099 uint64_t start, len, offset;
1100 int ret;
1101
1102 qemu_co_mutex_unlock(&s->table_lock);
1103
1104 /* Populate front untouched region of new data cluster */
1105 start = qed_start_of_cluster(s, acb->cur_pos);
1106 len = qed_offset_into_cluster(s, acb->cur_pos);
1107
1108 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1109 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
1110 if (ret < 0) {
1111 goto out;
1112 }
1113
1114 /* Populate back untouched region of new data cluster */
1115 start = acb->cur_pos + acb->cur_qiov.size;
1116 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1117 offset = acb->cur_cluster +
1118 qed_offset_into_cluster(s, acb->cur_pos) +
1119 acb->cur_qiov.size;
1120
1121 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1122 ret = qed_copy_from_backing_file(s, start, len, offset);
1123 if (ret < 0) {
1124 goto out;
1125 }
1126
1127 ret = qed_aio_write_main(acb);
1128 if (ret < 0) {
1129 goto out;
1130 }
1131
1132 if (s->bs->backing) {
1133 /*
1134 * Flush new data clusters before updating the L2 table
1135 *
1136 * This flush is necessary when a backing file is in use. A crash
1137 * during an allocating write could result in empty clusters in the
1138 * image. If the write only touched a subregion of the cluster,
1139 * then backing image sectors have been lost in the untouched
1140 * region. The solution is to flush after writing a new data
1141 * cluster and before updating the L2 table.
1142 */
1143 ret = bdrv_co_flush(s->bs->file->bs);
1144 }
1145
1146 out:
1147 qemu_co_mutex_lock(&s->table_lock);
1148 return ret;
1149 }
1150
1151 /**
1152 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1153 */
1154 static bool qed_should_set_need_check(BDRVQEDState *s)
1155 {
1156 /* The flush before L2 update path ensures consistency */
1157 if (s->bs->backing) {
1158 return false;
1159 }
1160
1161 return !(s->header.features & QED_F_NEED_CHECK);
1162 }
1163
1164 /**
1165 * Write new data cluster
1166 *
1167 * @acb: Write request
1168 * @len: Length in bytes
1169 *
1170 * This path is taken when writing to previously unallocated clusters.
1171 *
1172 * Called with table_lock held.
1173 */
1174 static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1175 {
1176 BDRVQEDState *s = acb_to_s(acb);
1177 int ret;
1178
1179 /* Cancel timer when the first allocating request comes in */
1180 if (s->allocating_acb == NULL) {
1181 qed_cancel_need_check_timer(s);
1182 }
1183
1184 /* Freeze this request if another allocating write is in progress */
1185 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1186 if (s->allocating_acb != NULL) {
1187 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
1188 assert(s->allocating_acb == NULL);
1189 }
1190 s->allocating_acb = acb;
1191 return -EAGAIN; /* start over with looking up table entries */
1192 }
1193
1194 acb->cur_nclusters = qed_bytes_to_clusters(s,
1195 qed_offset_into_cluster(s, acb->cur_pos) + len);
1196 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1197
1198 if (acb->flags & QED_AIOCB_ZERO) {
1199 /* Skip ahead if the clusters are already zero */
1200 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1201 return 0;
1202 }
1203 acb->cur_cluster = 1;
1204 } else {
1205 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1206 }
1207
1208 if (qed_should_set_need_check(s)) {
1209 s->header.features |= QED_F_NEED_CHECK;
1210 ret = qed_write_header(s);
1211 if (ret < 0) {
1212 return ret;
1213 }
1214 }
1215
1216 if (!(acb->flags & QED_AIOCB_ZERO)) {
1217 ret = qed_aio_write_cow(acb);
1218 if (ret < 0) {
1219 return ret;
1220 }
1221 }
1222
1223 return qed_aio_write_l2_update(acb, acb->cur_cluster);
1224 }
1225
1226 /**
1227 * Write data cluster in place
1228 *
1229 * @acb: Write request
1230 * @offset: Cluster offset in bytes
1231 * @len: Length in bytes
1232 *
1233 * This path is taken when writing to already allocated clusters.
1234 *
1235 * Called with table_lock held.
1236 */
1237 static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset,
1238 size_t len)
1239 {
1240 BDRVQEDState *s = acb_to_s(acb);
1241 int r;
1242
1243 qemu_co_mutex_unlock(&s->table_lock);
1244
1245 /* Allocate buffer for zero writes */
1246 if (acb->flags & QED_AIOCB_ZERO) {
1247 struct iovec *iov = acb->qiov->iov;
1248
1249 if (!iov->iov_base) {
1250 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
1251 if (iov->iov_base == NULL) {
1252 r = -ENOMEM;
1253 goto out;
1254 }
1255 memset(iov->iov_base, 0, iov->iov_len);
1256 }
1257 }
1258
1259 /* Calculate the I/O vector */
1260 acb->cur_cluster = offset;
1261 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1262
1263 /* Do the actual write. */
1264 r = qed_aio_write_main(acb);
1265 out:
1266 qemu_co_mutex_lock(&s->table_lock);
1267 return r;
1268 }
1269
1270 /**
1271 * Write data cluster
1272 *
1273 * @opaque: Write request
1274 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1275 * @offset: Cluster offset in bytes
1276 * @len: Length in bytes
1277 *
1278 * Called with table_lock held.
1279 */
1280 static int coroutine_fn qed_aio_write_data(void *opaque, int ret,
1281 uint64_t offset, size_t len)
1282 {
1283 QEDAIOCB *acb = opaque;
1284
1285 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1286
1287 acb->find_cluster_ret = ret;
1288
1289 switch (ret) {
1290 case QED_CLUSTER_FOUND:
1291 return qed_aio_write_inplace(acb, offset, len);
1292
1293 case QED_CLUSTER_L2:
1294 case QED_CLUSTER_L1:
1295 case QED_CLUSTER_ZERO:
1296 return qed_aio_write_alloc(acb, len);
1297
1298 default:
1299 g_assert_not_reached();
1300 }
1301 }
1302
1303 /**
1304 * Read data cluster
1305 *
1306 * @opaque: Read request
1307 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1308 * @offset: Cluster offset in bytes
1309 * @len: Length in bytes
1310 *
1311 * Called with table_lock held.
1312 */
1313 static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
1314 uint64_t offset, size_t len)
1315 {
1316 QEDAIOCB *acb = opaque;
1317 BDRVQEDState *s = acb_to_s(acb);
1318 BlockDriverState *bs = acb->bs;
1319 int r;
1320
1321 qemu_co_mutex_unlock(&s->table_lock);
1322
1323 /* Adjust offset into cluster */
1324 offset += qed_offset_into_cluster(s, acb->cur_pos);
1325
1326 trace_qed_aio_read_data(s, acb, ret, offset, len);
1327
1328 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1329
1330 /* Handle zero cluster and backing file reads, otherwise read
1331 * data cluster directly.
1332 */
1333 if (ret == QED_CLUSTER_ZERO) {
1334 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1335 r = 0;
1336 } else if (ret != QED_CLUSTER_FOUND) {
1337 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1338 &acb->backing_qiov);
1339 } else {
1340 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1341 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1342 &acb->cur_qiov, 0);
1343 }
1344
1345 qemu_co_mutex_lock(&s->table_lock);
1346 return r;
1347 }
1348
1349 /**
1350 * Begin next I/O or complete the request
1351 */
1352 static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb)
1353 {
1354 BDRVQEDState *s = acb_to_s(acb);
1355 uint64_t offset;
1356 size_t len;
1357 int ret;
1358
1359 qemu_co_mutex_lock(&s->table_lock);
1360 while (1) {
1361 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
1362
1363 if (acb->backing_qiov) {
1364 qemu_iovec_destroy(acb->backing_qiov);
1365 g_free(acb->backing_qiov);
1366 acb->backing_qiov = NULL;
1367 }
1368
1369 acb->qiov_offset += acb->cur_qiov.size;
1370 acb->cur_pos += acb->cur_qiov.size;
1371 qemu_iovec_reset(&acb->cur_qiov);
1372
1373 /* Complete request */
1374 if (acb->cur_pos >= acb->end_pos) {
1375 ret = 0;
1376 break;
1377 }
1378
1379 /* Find next cluster and start I/O */
1380 len = acb->end_pos - acb->cur_pos;
1381 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1382 if (ret < 0) {
1383 break;
1384 }
1385
1386 if (acb->flags & QED_AIOCB_WRITE) {
1387 ret = qed_aio_write_data(acb, ret, offset, len);
1388 } else {
1389 ret = qed_aio_read_data(acb, ret, offset, len);
1390 }
1391
1392 if (ret < 0 && ret != -EAGAIN) {
1393 break;
1394 }
1395 }
1396
1397 trace_qed_aio_complete(s, acb, ret);
1398 qed_aio_complete(acb);
1399 qemu_co_mutex_unlock(&s->table_lock);
1400 return ret;
1401 }
1402
1403 static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num,
1404 QEMUIOVector *qiov, int nb_sectors,
1405 int flags)
1406 {
1407 QEDAIOCB acb = {
1408 .bs = bs,
1409 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1410 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1411 .qiov = qiov,
1412 .flags = flags,
1413 };
1414 qemu_iovec_init(&acb.cur_qiov, qiov->niov);
1415
1416 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
1417
1418 /* Start request */
1419 return qed_aio_next_io(&acb);
1420 }
1421
1422 static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs,
1423 int64_t sector_num, int nb_sectors,
1424 QEMUIOVector *qiov)
1425 {
1426 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
1427 }
1428
1429 static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
1430 int64_t sector_num, int nb_sectors,
1431 QEMUIOVector *qiov, int flags)
1432 {
1433 assert(!flags);
1434 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
1435 }
1436
1437 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1438 int64_t offset,
1439 int bytes,
1440 BdrvRequestFlags flags)
1441 {
1442 BDRVQEDState *s = bs->opaque;
1443
1444 /*
1445 * Zero writes start without an I/O buffer. If a buffer becomes necessary
1446 * then it will be allocated during request processing.
1447 */
1448 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
1449
1450 /* Fall back if the request is not aligned */
1451 if (qed_offset_into_cluster(s, offset) ||
1452 qed_offset_into_cluster(s, bytes)) {
1453 return -ENOTSUP;
1454 }
1455
1456 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1457 bytes >> BDRV_SECTOR_BITS,
1458 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1459 }
1460
1461 static int coroutine_fn bdrv_qed_co_truncate(BlockDriverState *bs,
1462 int64_t offset,
1463 PreallocMode prealloc,
1464 Error **errp)
1465 {
1466 BDRVQEDState *s = bs->opaque;
1467 uint64_t old_image_size;
1468 int ret;
1469
1470 if (prealloc != PREALLOC_MODE_OFF) {
1471 error_setg(errp, "Unsupported preallocation mode '%s'",
1472 PreallocMode_str(prealloc));
1473 return -ENOTSUP;
1474 }
1475
1476 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1477 s->header.table_size)) {
1478 error_setg(errp, "Invalid image size specified");
1479 return -EINVAL;
1480 }
1481
1482 if ((uint64_t)offset < s->header.image_size) {
1483 error_setg(errp, "Shrinking images is currently not supported");
1484 return -ENOTSUP;
1485 }
1486
1487 old_image_size = s->header.image_size;
1488 s->header.image_size = offset;
1489 ret = qed_write_header_sync(s);
1490 if (ret < 0) {
1491 s->header.image_size = old_image_size;
1492 error_setg_errno(errp, -ret, "Failed to update the image size");
1493 }
1494 return ret;
1495 }
1496
1497 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1498 {
1499 BDRVQEDState *s = bs->opaque;
1500 return s->header.image_size;
1501 }
1502
1503 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1504 {
1505 BDRVQEDState *s = bs->opaque;
1506
1507 memset(bdi, 0, sizeof(*bdi));
1508 bdi->cluster_size = s->header.cluster_size;
1509 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1510 bdi->unallocated_blocks_are_zero = true;
1511 return 0;
1512 }
1513
1514 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1515 const char *backing_file,
1516 const char *backing_fmt)
1517 {
1518 BDRVQEDState *s = bs->opaque;
1519 QEDHeader new_header, le_header;
1520 void *buffer;
1521 size_t buffer_len, backing_file_len;
1522 int ret;
1523
1524 /* Refuse to set backing filename if unknown compat feature bits are
1525 * active. If the image uses an unknown compat feature then we may not
1526 * know the layout of data following the header structure and cannot safely
1527 * add a new string.
1528 */
1529 if (backing_file && (s->header.compat_features &
1530 ~QED_COMPAT_FEATURE_MASK)) {
1531 return -ENOTSUP;
1532 }
1533
1534 memcpy(&new_header, &s->header, sizeof(new_header));
1535
1536 new_header.features &= ~(QED_F_BACKING_FILE |
1537 QED_F_BACKING_FORMAT_NO_PROBE);
1538
1539 /* Adjust feature flags */
1540 if (backing_file) {
1541 new_header.features |= QED_F_BACKING_FILE;
1542
1543 if (qed_fmt_is_raw(backing_fmt)) {
1544 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1545 }
1546 }
1547
1548 /* Calculate new header size */
1549 backing_file_len = 0;
1550
1551 if (backing_file) {
1552 backing_file_len = strlen(backing_file);
1553 }
1554
1555 buffer_len = sizeof(new_header);
1556 new_header.backing_filename_offset = buffer_len;
1557 new_header.backing_filename_size = backing_file_len;
1558 buffer_len += backing_file_len;
1559
1560 /* Make sure we can rewrite header without failing */
1561 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1562 return -ENOSPC;
1563 }
1564
1565 /* Prepare new header */
1566 buffer = g_malloc(buffer_len);
1567
1568 qed_header_cpu_to_le(&new_header, &le_header);
1569 memcpy(buffer, &le_header, sizeof(le_header));
1570 buffer_len = sizeof(le_header);
1571
1572 if (backing_file) {
1573 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1574 buffer_len += backing_file_len;
1575 }
1576
1577 /* Write new header */
1578 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1579 g_free(buffer);
1580 if (ret == 0) {
1581 memcpy(&s->header, &new_header, sizeof(new_header));
1582 }
1583 return ret;
1584 }
1585
1586 static void coroutine_fn bdrv_qed_co_invalidate_cache(BlockDriverState *bs,
1587 Error **errp)
1588 {
1589 BDRVQEDState *s = bs->opaque;
1590 Error *local_err = NULL;
1591 int ret;
1592
1593 bdrv_qed_close(bs);
1594
1595 bdrv_qed_init_state(bs);
1596 qemu_co_mutex_lock(&s->table_lock);
1597 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
1598 qemu_co_mutex_unlock(&s->table_lock);
1599 if (local_err) {
1600 error_propagate_prepend(errp, local_err,
1601 "Could not reopen qed layer: ");
1602 return;
1603 } else if (ret < 0) {
1604 error_setg_errno(errp, -ret, "Could not reopen qed layer");
1605 return;
1606 }
1607 }
1608
1609 static int coroutine_fn bdrv_qed_co_check(BlockDriverState *bs,
1610 BdrvCheckResult *result,
1611 BdrvCheckMode fix)
1612 {
1613 BDRVQEDState *s = bs->opaque;
1614 int ret;
1615
1616 qemu_co_mutex_lock(&s->table_lock);
1617 ret = qed_check(s, result, !!fix);
1618 qemu_co_mutex_unlock(&s->table_lock);
1619
1620 return ret;
1621 }
1622
1623 static QemuOptsList qed_create_opts = {
1624 .name = "qed-create-opts",
1625 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1626 .desc = {
1627 {
1628 .name = BLOCK_OPT_SIZE,
1629 .type = QEMU_OPT_SIZE,
1630 .help = "Virtual disk size"
1631 },
1632 {
1633 .name = BLOCK_OPT_BACKING_FILE,
1634 .type = QEMU_OPT_STRING,
1635 .help = "File name of a base image"
1636 },
1637 {
1638 .name = BLOCK_OPT_BACKING_FMT,
1639 .type = QEMU_OPT_STRING,
1640 .help = "Image format of the base image"
1641 },
1642 {
1643 .name = BLOCK_OPT_CLUSTER_SIZE,
1644 .type = QEMU_OPT_SIZE,
1645 .help = "Cluster size (in bytes)",
1646 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1647 },
1648 {
1649 .name = BLOCK_OPT_TABLE_SIZE,
1650 .type = QEMU_OPT_SIZE,
1651 .help = "L1/L2 table size (in clusters)"
1652 },
1653 { /* end of list */ }
1654 }
1655 };
1656
1657 static BlockDriver bdrv_qed = {
1658 .format_name = "qed",
1659 .instance_size = sizeof(BDRVQEDState),
1660 .create_opts = &qed_create_opts,
1661 .supports_backing = true,
1662
1663 .bdrv_probe = bdrv_qed_probe,
1664 .bdrv_open = bdrv_qed_open,
1665 .bdrv_close = bdrv_qed_close,
1666 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1667 .bdrv_child_perm = bdrv_format_default_perms,
1668 .bdrv_co_create = bdrv_qed_co_create,
1669 .bdrv_co_create_opts = bdrv_qed_co_create_opts,
1670 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1671 .bdrv_co_block_status = bdrv_qed_co_block_status,
1672 .bdrv_co_readv = bdrv_qed_co_readv,
1673 .bdrv_co_writev = bdrv_qed_co_writev,
1674 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
1675 .bdrv_co_truncate = bdrv_qed_co_truncate,
1676 .bdrv_getlength = bdrv_qed_getlength,
1677 .bdrv_get_info = bdrv_qed_get_info,
1678 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
1679 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1680 .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
1681 .bdrv_co_check = bdrv_qed_co_check,
1682 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1683 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
1684 .bdrv_co_drain_begin = bdrv_qed_co_drain_begin,
1685 };
1686
1687 static void bdrv_qed_init(void)
1688 {
1689 bdrv_register(&bdrv_qed);
1690 }
1691
1692 block_init(bdrv_qed_init);