]> git.proxmox.com Git - mirror_qemu.git/blame - block/qed.c
aio: push aio_context_acquire/release down to dispatching
[mirror_qemu.git] / block / qed.c
CommitLineData
75411d23
SH
1/*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
80c71a24 15#include "qemu/osdep.h"
da34e65c 16#include "qapi/error.h"
1de7afc9 17#include "qemu/timer.h"
58369e22 18#include "qemu/bswap.h"
eabba580 19#include "trace.h"
75411d23 20#include "qed.h"
7b1b5d19 21#include "qapi/qmp/qerror.h"
caf71f86 22#include "migration/migration.h"
8a56fdad 23#include "sysemu/block-backend.h"
75411d23 24
d7331bed 25static const AIOCBInfo qed_aiocb_info = {
eabba580 26 .aiocb_size = sizeof(QEDAIOCB),
eabba580
SH
27};
28
75411d23
SH
29static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
30 const char *filename)
31{
32 const QEDHeader *header = (const QEDHeader *)buf;
33
34 if (buf_size < sizeof(*header)) {
35 return 0;
36 }
37 if (le32_to_cpu(header->magic) != QED_MAGIC) {
38 return 0;
39 }
40 return 100;
41}
42
43/**
44 * Check whether an image format is raw
45 *
46 * @fmt: Backing file format, may be NULL
47 */
48static bool qed_fmt_is_raw(const char *fmt)
49{
50 return fmt && strcmp(fmt, "raw") == 0;
51}
52
53static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
54{
55 cpu->magic = le32_to_cpu(le->magic);
56 cpu->cluster_size = le32_to_cpu(le->cluster_size);
57 cpu->table_size = le32_to_cpu(le->table_size);
58 cpu->header_size = le32_to_cpu(le->header_size);
59 cpu->features = le64_to_cpu(le->features);
60 cpu->compat_features = le64_to_cpu(le->compat_features);
61 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
62 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
63 cpu->image_size = le64_to_cpu(le->image_size);
64 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
65 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
66}
67
68static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
69{
70 le->magic = cpu_to_le32(cpu->magic);
71 le->cluster_size = cpu_to_le32(cpu->cluster_size);
72 le->table_size = cpu_to_le32(cpu->table_size);
73 le->header_size = cpu_to_le32(cpu->header_size);
74 le->features = cpu_to_le64(cpu->features);
75 le->compat_features = cpu_to_le64(cpu->compat_features);
76 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
77 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
78 le->image_size = cpu_to_le64(cpu->image_size);
79 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
80 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
81}
82
b10170ac 83int qed_write_header_sync(BDRVQEDState *s)
75411d23
SH
84{
85 QEDHeader le;
86 int ret;
87
88 qed_header_cpu_to_le(&s->header, &le);
d9ca2ea2 89 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
75411d23
SH
90 if (ret != sizeof(le)) {
91 return ret;
92 }
93 return 0;
94}
95
01979a98
SH
96typedef struct {
97 GenericCB gencb;
98 BDRVQEDState *s;
99 struct iovec iov;
100 QEMUIOVector qiov;
101 int nsectors;
102 uint8_t *buf;
103} QEDWriteHeaderCB;
104
105static void qed_write_header_cb(void *opaque, int ret)
106{
107 QEDWriteHeaderCB *write_header_cb = opaque;
108
109 qemu_vfree(write_header_cb->buf);
110 gencb_complete(write_header_cb, ret);
111}
112
113static void qed_write_header_read_cb(void *opaque, int ret)
114{
115 QEDWriteHeaderCB *write_header_cb = opaque;
116 BDRVQEDState *s = write_header_cb->s;
01979a98
SH
117
118 if (ret) {
119 qed_write_header_cb(write_header_cb, ret);
120 return;
121 }
122
123 /* Update header */
124 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
125
0d1049c7 126 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
ad54ae80
PB
127 write_header_cb->nsectors, qed_write_header_cb,
128 write_header_cb);
01979a98
SH
129}
130
131/**
132 * Update header in-place (does not rewrite backing filename or other strings)
133 *
134 * This function only updates known header fields in-place and does not affect
135 * extra data after the QED header.
136 */
097310b5 137static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
01979a98
SH
138 void *opaque)
139{
140 /* We must write full sectors for O_DIRECT but cannot necessarily generate
141 * the data following the header if an unrecognized compat feature is
142 * active. Therefore, first read the sectors containing the header, update
143 * them, and write back.
144 */
145
c41a73ff 146 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
01979a98
SH
147 size_t len = nsectors * BDRV_SECTOR_SIZE;
148 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
149 cb, opaque);
150
151 write_header_cb->s = s;
152 write_header_cb->nsectors = nsectors;
153 write_header_cb->buf = qemu_blockalign(s->bs, len);
154 write_header_cb->iov.iov_base = write_header_cb->buf;
155 write_header_cb->iov.iov_len = len;
156 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
157
ebb7af21 158 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
ad54ae80 159 qed_write_header_read_cb, write_header_cb);
01979a98
SH
160}
161
75411d23
SH
162static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
163{
164 uint64_t table_entries;
165 uint64_t l2_size;
166
167 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
168 l2_size = table_entries * cluster_size;
169
170 return l2_size * table_entries;
171}
172
173static bool qed_is_cluster_size_valid(uint32_t cluster_size)
174{
175 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
176 cluster_size > QED_MAX_CLUSTER_SIZE) {
177 return false;
178 }
179 if (cluster_size & (cluster_size - 1)) {
180 return false; /* not power of 2 */
181 }
182 return true;
183}
184
185static bool qed_is_table_size_valid(uint32_t table_size)
186{
187 if (table_size < QED_MIN_TABLE_SIZE ||
188 table_size > QED_MAX_TABLE_SIZE) {
189 return false;
190 }
191 if (table_size & (table_size - 1)) {
192 return false; /* not power of 2 */
193 }
194 return true;
195}
196
197static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
198 uint32_t table_size)
199{
200 if (image_size % BDRV_SECTOR_SIZE != 0) {
201 return false; /* not multiple of sector size */
202 }
203 if (image_size > qed_max_image_size(cluster_size, table_size)) {
204 return false; /* image is too large */
205 }
206 return true;
207}
208
209/**
210 * Read a string of known length from the image file
211 *
212 * @file: Image file
213 * @offset: File offset to start of string, in bytes
214 * @n: String length in bytes
215 * @buf: Destination buffer
216 * @buflen: Destination buffer length in bytes
217 * @ret: 0 on success, -errno on failure
218 *
219 * The string is NUL-terminated.
220 */
cf2ab8fc 221static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
75411d23
SH
222 char *buf, size_t buflen)
223{
224 int ret;
225 if (n >= buflen) {
226 return -EINVAL;
227 }
228 ret = bdrv_pread(file, offset, buf, n);
229 if (ret < 0) {
230 return ret;
231 }
232 buf[n] = '\0';
233 return 0;
234}
235
eabba580
SH
236/**
237 * Allocate new clusters
238 *
239 * @s: QED state
240 * @n: Number of contiguous clusters to allocate
241 * @ret: Offset of first allocated cluster
242 *
243 * This function only produces the offset where the new clusters should be
244 * written. It updates BDRVQEDState but does not make any changes to the image
245 * file.
246 */
247static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
248{
249 uint64_t offset = s->file_size;
250 s->file_size += n * s->header.cluster_size;
251 return offset;
252}
253
298800ca
SH
254QEDTable *qed_alloc_table(BDRVQEDState *s)
255{
256 /* Honor O_DIRECT memory alignment requirements */
257 return qemu_blockalign(s->bs,
258 s->header.cluster_size * s->header.table_size);
259}
260
eabba580
SH
261/**
262 * Allocate a new zeroed L2 table
263 */
264static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
265{
266 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
267
268 l2_table->table = qed_alloc_table(s);
269 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
270
271 memset(l2_table->table->offsets, 0,
272 s->header.cluster_size * s->header.table_size);
273 return l2_table;
274}
275
b20123a2
PB
276static void qed_aio_next_io(QEDAIOCB *acb, int ret);
277
278static void qed_aio_start_io(QEDAIOCB *acb)
279{
280 qed_aio_next_io(acb, 0);
281}
282
283static void qed_aio_next_io_cb(void *opaque, int ret)
284{
285 QEDAIOCB *acb = opaque;
286
287 qed_aio_next_io(acb, ret);
288}
eabba580 289
6f321e93
SH
290static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
291{
292 assert(!s->allocating_write_reqs_plugged);
293
294 s->allocating_write_reqs_plugged = true;
295}
296
297static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
298{
299 QEDAIOCB *acb;
300
301 assert(s->allocating_write_reqs_plugged);
302
303 s->allocating_write_reqs_plugged = false;
304
305 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
306 if (acb) {
b20123a2 307 qed_aio_start_io(acb);
6f321e93
SH
308 }
309}
310
311static void qed_finish_clear_need_check(void *opaque, int ret)
312{
313 /* Do nothing */
314}
315
316static void qed_flush_after_clear_need_check(void *opaque, int ret)
317{
318 BDRVQEDState *s = opaque;
319
320 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
321
322 /* No need to wait until flush completes */
323 qed_unplug_allocating_write_reqs(s);
324}
325
326static void qed_clear_need_check(void *opaque, int ret)
327{
328 BDRVQEDState *s = opaque;
329
330 if (ret) {
331 qed_unplug_allocating_write_reqs(s);
332 return;
333 }
334
335 s->header.features &= ~QED_F_NEED_CHECK;
336 qed_write_header(s, qed_flush_after_clear_need_check, s);
337}
338
339static void qed_need_check_timer_cb(void *opaque)
340{
341 BDRVQEDState *s = opaque;
342
343 /* The timer should only fire when allocating writes have drained */
344 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
345
346 trace_qed_need_check_timer_cb(s);
347
348 qed_plug_allocating_write_reqs(s);
349
350 /* Ensure writes are on disk before clearing flag */
6653a73d 351 bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s);
6f321e93
SH
352}
353
354static void qed_start_need_check_timer(BDRVQEDState *s)
355{
356 trace_qed_start_need_check_timer(s);
357
bc72ad67 358 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
6f321e93
SH
359 * migration.
360 */
bc72ad67 361 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 362 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
6f321e93
SH
363}
364
365/* It's okay to call this multiple times or when no timer is started */
366static void qed_cancel_need_check_timer(BDRVQEDState *s)
367{
368 trace_qed_cancel_need_check_timer(s);
bc72ad67 369 timer_del(s->need_check_timer);
6f321e93
SH
370}
371
a8c868c3
SH
372static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
373{
374 BDRVQEDState *s = bs->opaque;
375
376 qed_cancel_need_check_timer(s);
377 timer_free(s->need_check_timer);
378}
379
380static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
381 AioContext *new_context)
382{
383 BDRVQEDState *s = bs->opaque;
384
385 s->need_check_timer = aio_timer_new(new_context,
386 QEMU_CLOCK_VIRTUAL, SCALE_NS,
387 qed_need_check_timer_cb, s);
388 if (s->header.features & QED_F_NEED_CHECK) {
389 qed_start_need_check_timer(s);
390 }
391}
392
6653a73d
FZ
393static void bdrv_qed_drain(BlockDriverState *bs)
394{
395 BDRVQEDState *s = bs->opaque;
396
397 /* Fire the timer immediately in order to start doing I/O as soon as the
398 * header is flushed.
399 */
400 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
401 qed_cancel_need_check_timer(s);
402 qed_need_check_timer_cb(s);
403 }
404}
405
015a1036
HR
406static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
407 Error **errp)
75411d23
SH
408{
409 BDRVQEDState *s = bs->opaque;
410 QEDHeader le_header;
411 int64_t file_size;
412 int ret;
413
414 s->bs = bs;
eabba580 415 QSIMPLEQ_INIT(&s->allocating_write_reqs);
75411d23 416
cf2ab8fc 417 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
75411d23
SH
418 if (ret < 0) {
419 return ret;
420 }
75411d23
SH
421 qed_header_le_to_cpu(&le_header, &s->header);
422
423 if (s->header.magic != QED_MAGIC) {
76abe407
PB
424 error_setg(errp, "Image not in QED format");
425 return -EINVAL;
75411d23
SH
426 }
427 if (s->header.features & ~QED_FEATURE_MASK) {
10b758e8 428 /* image uses unsupported feature bits */
a55448b3
HR
429 error_setg(errp, "Unsupported QED features: %" PRIx64,
430 s->header.features & ~QED_FEATURE_MASK);
10b758e8 431 return -ENOTSUP;
75411d23
SH
432 }
433 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
434 return -EINVAL;
435 }
436
437 /* Round down file size to the last cluster */
9a4f4c31 438 file_size = bdrv_getlength(bs->file->bs);
75411d23
SH
439 if (file_size < 0) {
440 return file_size;
441 }
442 s->file_size = qed_start_of_cluster(s, file_size);
443
444 if (!qed_is_table_size_valid(s->header.table_size)) {
445 return -EINVAL;
446 }
447 if (!qed_is_image_size_valid(s->header.image_size,
448 s->header.cluster_size,
449 s->header.table_size)) {
450 return -EINVAL;
451 }
452 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
453 return -EINVAL;
454 }
455
456 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
457 sizeof(uint64_t);
786a4ea8 458 s->l2_shift = ctz32(s->header.cluster_size);
75411d23 459 s->l2_mask = s->table_nelems - 1;
786a4ea8 460 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
75411d23 461
0adfa1ed
SH
462 /* Header size calculation must not overflow uint32_t */
463 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
464 return -EINVAL;
465 }
466
75411d23
SH
467 if ((s->header.features & QED_F_BACKING_FILE)) {
468 if ((uint64_t)s->header.backing_filename_offset +
469 s->header.backing_filename_size >
470 s->header.cluster_size * s->header.header_size) {
471 return -EINVAL;
472 }
473
cf2ab8fc 474 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
75411d23
SH
475 s->header.backing_filename_size, bs->backing_file,
476 sizeof(bs->backing_file));
477 if (ret < 0) {
478 return ret;
479 }
480
481 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
482 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
483 }
484 }
485
486 /* Reset unknown autoclear feature bits. This is a backwards
487 * compatibility mechanism that allows images to be opened by older
488 * programs, which "knock out" unknown feature bits. When an image is
489 * opened by a newer program again it can detect that the autoclear
490 * feature is no longer valid.
491 */
492 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
04c01a5c 493 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
75411d23
SH
494 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
495
496 ret = qed_write_header_sync(s);
497 if (ret) {
498 return ret;
499 }
500
501 /* From here on only known autoclear feature bits are valid */
9a4f4c31 502 bdrv_flush(bs->file->bs);
75411d23
SH
503 }
504
298800ca
SH
505 s->l1_table = qed_alloc_table(s);
506 qed_init_l2_cache(&s->l2_cache);
507
508 ret = qed_read_l1_table_sync(s);
01979a98
SH
509 if (ret) {
510 goto out;
511 }
512
513 /* If image was not closed cleanly, check consistency */
058f8f16 514 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
01979a98
SH
515 /* Read-only images cannot be fixed. There is no risk of corruption
516 * since write operations are not possible. Therefore, allow
517 * potentially inconsistent images to be opened read-only. This can
518 * aid data recovery from an otherwise inconsistent image.
519 */
9a4f4c31 520 if (!bdrv_is_read_only(bs->file->bs) &&
04c01a5c 521 !(flags & BDRV_O_INACTIVE)) {
01979a98
SH
522 BdrvCheckResult result = {0};
523
524 ret = qed_check(s, &result, true);
6f321e93
SH
525 if (ret) {
526 goto out;
527 }
01979a98
SH
528 }
529 }
530
a8c868c3 531 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
6f321e93 532
01979a98 533out:
298800ca
SH
534 if (ret) {
535 qed_free_l2_cache(&s->l2_cache);
536 qemu_vfree(s->l1_table);
537 }
75411d23
SH
538 return ret;
539}
540
3baca891 541static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
d34682cd
KW
542{
543 BDRVQEDState *s = bs->opaque;
544
cf081fca 545 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
d34682cd
KW
546}
547
f9cb20f1
JC
548/* We have nothing to do for QED reopen, stubs just return
549 * success */
550static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
551 BlockReopenQueue *queue, Error **errp)
552{
553 return 0;
554}
555
75411d23
SH
556static void bdrv_qed_close(BlockDriverState *bs)
557{
298800ca
SH
558 BDRVQEDState *s = bs->opaque;
559
a8c868c3 560 bdrv_qed_detach_aio_context(bs);
6f321e93 561
01979a98 562 /* Ensure writes reach stable storage */
9a4f4c31 563 bdrv_flush(bs->file->bs);
01979a98
SH
564
565 /* Clean shutdown, no check required on next open */
566 if (s->header.features & QED_F_NEED_CHECK) {
567 s->header.features &= ~QED_F_NEED_CHECK;
568 qed_write_header_sync(s);
569 }
570
298800ca
SH
571 qed_free_l2_cache(&s->l2_cache);
572 qemu_vfree(s->l1_table);
75411d23
SH
573}
574
75411d23
SH
575static int qed_create(const char *filename, uint32_t cluster_size,
576 uint64_t image_size, uint32_t table_size,
0fea6b79 577 const char *backing_file, const char *backing_fmt,
4ab15590 578 QemuOpts *opts, Error **errp)
75411d23
SH
579{
580 QEDHeader header = {
581 .magic = QED_MAGIC,
582 .cluster_size = cluster_size,
583 .table_size = table_size,
584 .header_size = 1,
585 .features = 0,
586 .compat_features = 0,
587 .l1_table_offset = cluster_size,
588 .image_size = image_size,
589 };
590 QEDHeader le_header;
591 uint8_t *l1_table = NULL;
592 size_t l1_size = header.cluster_size * header.table_size;
34b5d2c6 593 Error *local_err = NULL;
75411d23 594 int ret = 0;
8a56fdad 595 BlockBackend *blk;
75411d23 596
4ab15590 597 ret = bdrv_create_file(filename, opts, &local_err);
75411d23 598 if (ret < 0) {
0fea6b79 599 error_propagate(errp, local_err);
75411d23
SH
600 return ret;
601 }
602
efaa7c4e 603 blk = blk_new_open(filename, NULL, NULL,
72e775c7 604 BDRV_O_RDWR | BDRV_O_PROTOCOL, &local_err);
8a56fdad 605 if (blk == NULL) {
0fea6b79 606 error_propagate(errp, local_err);
8a56fdad 607 return -EIO;
75411d23
SH
608 }
609
8a56fdad
KW
610 blk_set_allow_write_beyond_eof(blk, true);
611
c743849b 612 /* File must start empty and grow, check truncate is supported */
8a56fdad 613 ret = blk_truncate(blk, 0);
c743849b
SH
614 if (ret < 0) {
615 goto out;
616 }
617
75411d23
SH
618 if (backing_file) {
619 header.features |= QED_F_BACKING_FILE;
620 header.backing_filename_offset = sizeof(le_header);
621 header.backing_filename_size = strlen(backing_file);
622
623 if (qed_fmt_is_raw(backing_fmt)) {
624 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
625 }
626 }
627
628 qed_header_cpu_to_le(&header, &le_header);
8341f00d 629 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
75411d23
SH
630 if (ret < 0) {
631 goto out;
632 }
8a56fdad 633 ret = blk_pwrite(blk, sizeof(le_header), backing_file,
8341f00d 634 header.backing_filename_size, 0);
75411d23
SH
635 if (ret < 0) {
636 goto out;
637 }
638
7267c094 639 l1_table = g_malloc0(l1_size);
8341f00d 640 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
75411d23
SH
641 if (ret < 0) {
642 goto out;
643 }
644
645 ret = 0; /* success */
646out:
7267c094 647 g_free(l1_table);
8a56fdad 648 blk_unref(blk);
75411d23
SH
649 return ret;
650}
651
7ab74849 652static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
75411d23
SH
653{
654 uint64_t image_size = 0;
655 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
656 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
7ab74849
CL
657 char *backing_file = NULL;
658 char *backing_fmt = NULL;
659 int ret;
660
c2eb918e
HT
661 image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
662 BDRV_SECTOR_SIZE);
7ab74849
CL
663 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
664 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
665 cluster_size = qemu_opt_get_size_del(opts,
666 BLOCK_OPT_CLUSTER_SIZE,
667 QED_DEFAULT_CLUSTER_SIZE);
668 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
669 QED_DEFAULT_TABLE_SIZE);
75411d23
SH
670
671 if (!qed_is_cluster_size_valid(cluster_size)) {
5ff679b4
AG
672 error_setg(errp, "QED cluster size must be within range [%u, %u] "
673 "and power of 2",
674 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
7ab74849
CL
675 ret = -EINVAL;
676 goto finish;
75411d23
SH
677 }
678 if (!qed_is_table_size_valid(table_size)) {
5ff679b4
AG
679 error_setg(errp, "QED table size must be within range [%u, %u] "
680 "and power of 2",
681 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
7ab74849
CL
682 ret = -EINVAL;
683 goto finish;
75411d23
SH
684 }
685 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
5ff679b4
AG
686 error_setg(errp, "QED image size must be a non-zero multiple of "
687 "cluster size and less than %" PRIu64 " bytes",
688 qed_max_image_size(cluster_size, table_size));
7ab74849
CL
689 ret = -EINVAL;
690 goto finish;
75411d23
SH
691 }
692
7ab74849 693 ret = qed_create(filename, cluster_size, image_size, table_size,
4ab15590 694 backing_file, backing_fmt, opts, errp);
7ab74849
CL
695
696finish:
697 g_free(backing_file);
698 g_free(backing_fmt);
699 return ret;
75411d23
SH
700}
701
298800ca 702typedef struct {
4bc74be9 703 BlockDriverState *bs;
b7d5a5b8 704 Coroutine *co;
4bc74be9
PB
705 uint64_t pos;
706 int64_t status;
298800ca 707 int *pnum;
53f1dfd1 708 BlockDriverState **file;
298800ca
SH
709} QEDIsAllocatedCB;
710
711static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
712{
713 QEDIsAllocatedCB *cb = opaque;
4bc74be9 714 BDRVQEDState *s = cb->bs->opaque;
298800ca 715 *cb->pnum = len / BDRV_SECTOR_SIZE;
4bc74be9
PB
716 switch (ret) {
717 case QED_CLUSTER_FOUND:
718 offset |= qed_offset_into_cluster(s, cb->pos);
719 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
53f1dfd1 720 *cb->file = cb->bs->file->bs;
4bc74be9
PB
721 break;
722 case QED_CLUSTER_ZERO:
723 cb->status = BDRV_BLOCK_ZERO;
724 break;
725 case QED_CLUSTER_L2:
726 case QED_CLUSTER_L1:
727 cb->status = 0;
728 break;
729 default:
730 assert(ret < 0);
731 cb->status = ret;
732 break;
733 }
734
b7d5a5b8 735 if (cb->co) {
0b8b8753 736 qemu_coroutine_enter(cb->co);
b7d5a5b8 737 }
298800ca
SH
738}
739
b6b8a333 740static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
b7d5a5b8 741 int64_t sector_num,
67a0fd2a
FZ
742 int nb_sectors, int *pnum,
743 BlockDriverState **file)
75411d23 744{
298800ca 745 BDRVQEDState *s = bs->opaque;
298800ca
SH
746 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
747 QEDIsAllocatedCB cb = {
4bc74be9
PB
748 .bs = bs,
749 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
750 .status = BDRV_BLOCK_OFFSET_MASK,
298800ca 751 .pnum = pnum,
53f1dfd1 752 .file = file,
298800ca
SH
753 };
754 QEDRequest request = { .l2_table = NULL };
755
4bc74be9 756 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
298800ca 757
b7d5a5b8 758 /* Now sleep if the callback wasn't invoked immediately */
4bc74be9 759 while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
b7d5a5b8
SH
760 cb.co = qemu_coroutine_self();
761 qemu_coroutine_yield();
298800ca
SH
762 }
763
298800ca
SH
764 qed_unref_l2_cache_entry(request.l2_table);
765
4bc74be9 766 return cb.status;
75411d23
SH
767}
768
eabba580
SH
769static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
770{
771 return acb->common.bs->opaque;
772}
773
774/**
775 * Read from the backing file or zero-fill if no backing file
776 *
f06ee3d4
KW
777 * @s: QED state
778 * @pos: Byte position in device
779 * @qiov: Destination I/O vector
780 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
781 * @cb: Completion function
782 * @opaque: User data for completion function
eabba580
SH
783 *
784 * This function reads qiov->size bytes starting at pos from the backing file.
785 * If there is no backing file then zeroes are read.
786 */
787static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
788 QEMUIOVector *qiov,
f06ee3d4 789 QEMUIOVector **backing_qiov,
097310b5 790 BlockCompletionFunc *cb, void *opaque)
eabba580 791{
eabba580
SH
792 uint64_t backing_length = 0;
793 size_t size;
794
795 /* If there is a backing file, get its length. Treat the absence of a
796 * backing file like a zero length backing file.
797 */
760e0063
KW
798 if (s->bs->backing) {
799 int64_t l = bdrv_getlength(s->bs->backing->bs);
eabba580
SH
800 if (l < 0) {
801 cb(opaque, l);
802 return;
803 }
804 backing_length = l;
805 }
806
807 /* Zero all sectors if reading beyond the end of the backing file */
808 if (pos >= backing_length ||
809 pos + qiov->size > backing_length) {
3d9b4925 810 qemu_iovec_memset(qiov, 0, 0, qiov->size);
eabba580
SH
811 }
812
813 /* Complete now if there are no backing file sectors to read */
814 if (pos >= backing_length) {
815 cb(opaque, 0);
816 return;
817 }
818
819 /* If the read straddles the end of the backing file, shorten it */
820 size = MIN((uint64_t)backing_length - pos, qiov->size);
821
f06ee3d4
KW
822 assert(*backing_qiov == NULL);
823 *backing_qiov = g_new(QEMUIOVector, 1);
824 qemu_iovec_init(*backing_qiov, qiov->niov);
825 qemu_iovec_concat(*backing_qiov, qiov, 0, size);
826
820100fd 827 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
ebb7af21 828 bdrv_aio_readv(s->bs->backing, pos / BDRV_SECTOR_SIZE,
f06ee3d4 829 *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
eabba580
SH
830}
831
832typedef struct {
833 GenericCB gencb;
834 BDRVQEDState *s;
835 QEMUIOVector qiov;
f06ee3d4 836 QEMUIOVector *backing_qiov;
eabba580
SH
837 struct iovec iov;
838 uint64_t offset;
839} CopyFromBackingFileCB;
840
841static void qed_copy_from_backing_file_cb(void *opaque, int ret)
842{
843 CopyFromBackingFileCB *copy_cb = opaque;
844 qemu_vfree(copy_cb->iov.iov_base);
845 gencb_complete(&copy_cb->gencb, ret);
846}
847
848static void qed_copy_from_backing_file_write(void *opaque, int ret)
849{
850 CopyFromBackingFileCB *copy_cb = opaque;
851 BDRVQEDState *s = copy_cb->s;
eabba580 852
f06ee3d4
KW
853 if (copy_cb->backing_qiov) {
854 qemu_iovec_destroy(copy_cb->backing_qiov);
855 g_free(copy_cb->backing_qiov);
856 copy_cb->backing_qiov = NULL;
857 }
858
eabba580
SH
859 if (ret) {
860 qed_copy_from_backing_file_cb(copy_cb, ret);
861 return;
862 }
863
864 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
0d1049c7 865 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
ad54ae80
PB
866 &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
867 qed_copy_from_backing_file_cb, copy_cb);
eabba580
SH
868}
869
870/**
871 * Copy data from backing file into the image
872 *
873 * @s: QED state
874 * @pos: Byte position in device
875 * @len: Number of bytes
876 * @offset: Byte offset in image file
877 * @cb: Completion function
878 * @opaque: User data for completion function
879 */
880static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
881 uint64_t len, uint64_t offset,
097310b5 882 BlockCompletionFunc *cb,
eabba580
SH
883 void *opaque)
884{
885 CopyFromBackingFileCB *copy_cb;
886
887 /* Skip copy entirely if there is no work to do */
888 if (len == 0) {
889 cb(opaque, 0);
890 return;
891 }
892
893 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
894 copy_cb->s = s;
895 copy_cb->offset = offset;
f06ee3d4 896 copy_cb->backing_qiov = NULL;
eabba580
SH
897 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
898 copy_cb->iov.iov_len = len;
899 qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
900
f06ee3d4 901 qed_read_backing_file(s, pos, &copy_cb->qiov, &copy_cb->backing_qiov,
eabba580
SH
902 qed_copy_from_backing_file_write, copy_cb);
903}
904
905/**
906 * Link one or more contiguous clusters into a table
907 *
908 * @s: QED state
909 * @table: L2 table
910 * @index: First cluster index
911 * @n: Number of contiguous clusters
21df65b6
AL
912 * @cluster: First cluster offset
913 *
914 * The cluster offset may be an allocated byte offset in the image file, the
915 * zero cluster marker, or the unallocated cluster marker.
eabba580
SH
916 */
917static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
918 unsigned int n, uint64_t cluster)
919{
920 int i;
921 for (i = index; i < index + n; i++) {
922 table->offsets[i] = cluster;
21df65b6
AL
923 if (!qed_offset_is_unalloc_cluster(cluster) &&
924 !qed_offset_is_zero_cluster(cluster)) {
925 cluster += s->header.cluster_size;
926 }
eabba580
SH
927 }
928}
929
930static void qed_aio_complete_bh(void *opaque)
931{
932 QEDAIOCB *acb = opaque;
097310b5 933 BlockCompletionFunc *cb = acb->common.cb;
eabba580
SH
934 void *user_opaque = acb->common.opaque;
935 int ret = acb->bh_ret;
eabba580 936
8007429a 937 qemu_aio_unref(acb);
eabba580
SH
938
939 /* Invoke callback */
940 cb(user_opaque, ret);
eabba580
SH
941}
942
943static void qed_aio_complete(QEDAIOCB *acb, int ret)
944{
945 BDRVQEDState *s = acb_to_s(acb);
946
947 trace_qed_aio_complete(s, acb, ret);
948
949 /* Free resources */
950 qemu_iovec_destroy(&acb->cur_qiov);
951 qed_unref_l2_cache_entry(acb->request.l2_table);
952
0e71be19
SH
953 /* Free the buffer we may have allocated for zero writes */
954 if (acb->flags & QED_AIOCB_ZERO) {
955 qemu_vfree(acb->qiov->iov[0].iov_base);
956 acb->qiov->iov[0].iov_base = NULL;
957 }
958
eabba580
SH
959 /* Arrange for a bh to invoke the completion function */
960 acb->bh_ret = ret;
fffb6e12
PB
961 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
962 qed_aio_complete_bh, acb);
eabba580
SH
963
964 /* Start next allocating write request waiting behind this one. Note that
965 * requests enqueue themselves when they first hit an unallocated cluster
966 * but they wait until the entire request is finished before waking up the
967 * next request in the queue. This ensures that we don't cycle through
968 * requests multiple times but rather finish one at a time completely.
969 */
970 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
971 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
972 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
973 if (acb) {
b20123a2 974 qed_aio_start_io(acb);
6f321e93
SH
975 } else if (s->header.features & QED_F_NEED_CHECK) {
976 qed_start_need_check_timer(s);
eabba580
SH
977 }
978 }
979}
980
981/**
982 * Commit the current L2 table to the cache
983 */
984static void qed_commit_l2_update(void *opaque, int ret)
985{
986 QEDAIOCB *acb = opaque;
987 BDRVQEDState *s = acb_to_s(acb);
988 CachedL2Table *l2_table = acb->request.l2_table;
e4fc8781 989 uint64_t l2_offset = l2_table->offset;
eabba580
SH
990
991 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
992
993 /* This is guaranteed to succeed because we just committed the entry to the
994 * cache.
995 */
e4fc8781 996 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
eabba580
SH
997 assert(acb->request.l2_table != NULL);
998
b20123a2 999 qed_aio_next_io(acb, ret);
eabba580
SH
1000}
1001
1002/**
1003 * Update L1 table with new L2 table offset and write it out
1004 */
1005static void qed_aio_write_l1_update(void *opaque, int ret)
1006{
1007 QEDAIOCB *acb = opaque;
1008 BDRVQEDState *s = acb_to_s(acb);
1009 int index;
1010
1011 if (ret) {
1012 qed_aio_complete(acb, ret);
1013 return;
1014 }
1015
1016 index = qed_l1_index(s, acb->cur_pos);
1017 s->l1_table->offsets[index] = acb->request.l2_table->offset;
1018
1019 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
1020}
1021
1022/**
1023 * Update L2 table with new cluster offsets and write them out
1024 */
0e71be19 1025static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
eabba580 1026{
eabba580
SH
1027 BDRVQEDState *s = acb_to_s(acb);
1028 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1029 int index;
1030
1031 if (ret) {
1032 goto err;
1033 }
1034
1035 if (need_alloc) {
1036 qed_unref_l2_cache_entry(acb->request.l2_table);
1037 acb->request.l2_table = qed_new_l2_table(s);
1038 }
1039
1040 index = qed_l2_index(s, acb->cur_pos);
1041 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
0e71be19 1042 offset);
eabba580
SH
1043
1044 if (need_alloc) {
1045 /* Write out the whole new L2 table */
1046 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
b20123a2 1047 qed_aio_write_l1_update, acb);
eabba580
SH
1048 } else {
1049 /* Write out only the updated part of the L2 table */
1050 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
b20123a2 1051 qed_aio_next_io_cb, acb);
eabba580
SH
1052 }
1053 return;
1054
1055err:
1056 qed_aio_complete(acb, ret);
1057}
1058
0e71be19
SH
1059static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1060{
1061 QEDAIOCB *acb = opaque;
1062 qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1063}
1064
eabba580
SH
1065/**
1066 * Flush new data clusters before updating the L2 table
1067 *
1068 * This flush is necessary when a backing file is in use. A crash during an
1069 * allocating write could result in empty clusters in the image. If the write
1070 * only touched a subregion of the cluster, then backing image sectors have
1071 * been lost in the untouched region. The solution is to flush after writing a
1072 * new data cluster and before updating the L2 table.
1073 */
1074static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1075{
1076 QEDAIOCB *acb = opaque;
1077 BDRVQEDState *s = acb_to_s(acb);
1078
9a4f4c31 1079 if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
eabba580
SH
1080 qed_aio_complete(acb, -EIO);
1081 }
1082}
1083
1084/**
1085 * Write data to the image file
1086 */
1087static void qed_aio_write_main(void *opaque, int ret)
1088{
1089 QEDAIOCB *acb = opaque;
1090 BDRVQEDState *s = acb_to_s(acb);
1091 uint64_t offset = acb->cur_cluster +
1092 qed_offset_into_cluster(s, acb->cur_pos);
097310b5 1093 BlockCompletionFunc *next_fn;
eabba580
SH
1094
1095 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1096
1097 if (ret) {
1098 qed_aio_complete(acb, ret);
1099 return;
1100 }
1101
1102 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
b20123a2 1103 next_fn = qed_aio_next_io_cb;
eabba580 1104 } else {
760e0063 1105 if (s->bs->backing) {
eabba580
SH
1106 next_fn = qed_aio_write_flush_before_l2_update;
1107 } else {
0e71be19 1108 next_fn = qed_aio_write_l2_update_cb;
eabba580
SH
1109 }
1110 }
1111
1112 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
0d1049c7 1113 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
ad54ae80
PB
1114 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1115 next_fn, acb);
eabba580
SH
1116}
1117
1118/**
1119 * Populate back untouched region of new data cluster
1120 */
1121static void qed_aio_write_postfill(void *opaque, int ret)
1122{
1123 QEDAIOCB *acb = opaque;
1124 BDRVQEDState *s = acb_to_s(acb);
1125 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1126 uint64_t len =
1127 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1128 uint64_t offset = acb->cur_cluster +
1129 qed_offset_into_cluster(s, acb->cur_pos) +
1130 acb->cur_qiov.size;
1131
1132 if (ret) {
1133 qed_aio_complete(acb, ret);
1134 return;
1135 }
1136
1137 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1138 qed_copy_from_backing_file(s, start, len, offset,
1139 qed_aio_write_main, acb);
1140}
1141
1142/**
1143 * Populate front untouched region of new data cluster
1144 */
1145static void qed_aio_write_prefill(void *opaque, int ret)
1146{
1147 QEDAIOCB *acb = opaque;
1148 BDRVQEDState *s = acb_to_s(acb);
1149 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1150 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1151
1152 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1153 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1154 qed_aio_write_postfill, acb);
1155}
1156
0d09c797
SH
1157/**
1158 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1159 */
1160static bool qed_should_set_need_check(BDRVQEDState *s)
1161{
1162 /* The flush before L2 update path ensures consistency */
760e0063 1163 if (s->bs->backing) {
0d09c797
SH
1164 return false;
1165 }
1166
1167 return !(s->header.features & QED_F_NEED_CHECK);
1168}
1169
0e71be19
SH
1170static void qed_aio_write_zero_cluster(void *opaque, int ret)
1171{
1172 QEDAIOCB *acb = opaque;
1173
1174 if (ret) {
1175 qed_aio_complete(acb, ret);
1176 return;
1177 }
1178
1179 qed_aio_write_l2_update(acb, 0, 1);
1180}
1181
eabba580
SH
1182/**
1183 * Write new data cluster
1184 *
1185 * @acb: Write request
1186 * @len: Length in bytes
1187 *
1188 * This path is taken when writing to previously unallocated clusters.
1189 */
1190static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1191{
1192 BDRVQEDState *s = acb_to_s(acb);
097310b5 1193 BlockCompletionFunc *cb;
eabba580 1194
6f321e93
SH
1195 /* Cancel timer when the first allocating request comes in */
1196 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1197 qed_cancel_need_check_timer(s);
1198 }
1199
eabba580
SH
1200 /* Freeze this request if another allocating write is in progress */
1201 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1202 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1203 }
6f321e93
SH
1204 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1205 s->allocating_write_reqs_plugged) {
eabba580
SH
1206 return; /* wait for existing request to finish */
1207 }
1208
1209 acb->cur_nclusters = qed_bytes_to_clusters(s,
1210 qed_offset_into_cluster(s, acb->cur_pos) + len);
1b093c48 1211 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1212
0e71be19
SH
1213 if (acb->flags & QED_AIOCB_ZERO) {
1214 /* Skip ahead if the clusters are already zero */
1215 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
b20123a2 1216 qed_aio_start_io(acb);
0e71be19
SH
1217 return;
1218 }
1219
1220 cb = qed_aio_write_zero_cluster;
1221 } else {
1222 cb = qed_aio_write_prefill;
1223 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1224 }
1225
0d09c797
SH
1226 if (qed_should_set_need_check(s)) {
1227 s->header.features |= QED_F_NEED_CHECK;
0e71be19 1228 qed_write_header(s, cb, acb);
0d09c797 1229 } else {
0e71be19 1230 cb(acb, 0);
01979a98 1231 }
eabba580
SH
1232}
1233
1234/**
1235 * Write data cluster in place
1236 *
1237 * @acb: Write request
1238 * @offset: Cluster offset in bytes
1239 * @len: Length in bytes
1240 *
1241 * This path is taken when writing to already allocated clusters.
1242 */
1243static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1244{
0e71be19
SH
1245 /* Allocate buffer for zero writes */
1246 if (acb->flags & QED_AIOCB_ZERO) {
1247 struct iovec *iov = acb->qiov->iov;
1248
1249 if (!iov->iov_base) {
4f4896db
KW
1250 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
1251 if (iov->iov_base == NULL) {
1252 qed_aio_complete(acb, -ENOMEM);
1253 return;
1254 }
0e71be19
SH
1255 memset(iov->iov_base, 0, iov->iov_len);
1256 }
1257 }
1258
eabba580
SH
1259 /* Calculate the I/O vector */
1260 acb->cur_cluster = offset;
1b093c48 1261 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580
SH
1262
1263 /* Do the actual write */
1264 qed_aio_write_main(acb, 0);
1265}
1266
1267/**
1268 * Write data cluster
1269 *
1270 * @opaque: Write request
1271 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1272 * or -errno
1273 * @offset: Cluster offset in bytes
1274 * @len: Length in bytes
1275 *
1276 * Callback from qed_find_cluster().
1277 */
1278static void qed_aio_write_data(void *opaque, int ret,
1279 uint64_t offset, size_t len)
1280{
1281 QEDAIOCB *acb = opaque;
1282
1283 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1284
1285 acb->find_cluster_ret = ret;
1286
1287 switch (ret) {
1288 case QED_CLUSTER_FOUND:
1289 qed_aio_write_inplace(acb, offset, len);
1290 break;
1291
1292 case QED_CLUSTER_L2:
1293 case QED_CLUSTER_L1:
21df65b6 1294 case QED_CLUSTER_ZERO:
eabba580
SH
1295 qed_aio_write_alloc(acb, len);
1296 break;
1297
1298 default:
1299 qed_aio_complete(acb, ret);
1300 break;
1301 }
1302}
1303
1304/**
1305 * Read data cluster
1306 *
1307 * @opaque: Read request
1308 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1309 * or -errno
1310 * @offset: Cluster offset in bytes
1311 * @len: Length in bytes
1312 *
1313 * Callback from qed_find_cluster().
1314 */
1315static void qed_aio_read_data(void *opaque, int ret,
1316 uint64_t offset, size_t len)
1317{
1318 QEDAIOCB *acb = opaque;
1319 BDRVQEDState *s = acb_to_s(acb);
1320 BlockDriverState *bs = acb->common.bs;
eabba580
SH
1321
1322 /* Adjust offset into cluster */
1323 offset += qed_offset_into_cluster(s, acb->cur_pos);
1324
1325 trace_qed_aio_read_data(s, acb, ret, offset, len);
1326
1327 if (ret < 0) {
1328 goto err;
1329 }
1330
1b093c48 1331 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1332
21df65b6
AL
1333 /* Handle zero cluster and backing file reads */
1334 if (ret == QED_CLUSTER_ZERO) {
3d9b4925 1335 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
b20123a2 1336 qed_aio_start_io(acb);
21df65b6
AL
1337 return;
1338 } else if (ret != QED_CLUSTER_FOUND) {
eabba580 1339 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
b20123a2 1340 &acb->backing_qiov, qed_aio_next_io_cb, acb);
eabba580
SH
1341 return;
1342 }
1343
1344 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
ebb7af21 1345 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
ad54ae80 1346 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
b20123a2 1347 qed_aio_next_io_cb, acb);
eabba580
SH
1348 return;
1349
1350err:
1351 qed_aio_complete(acb, ret);
1352}
1353
1354/**
1355 * Begin next I/O or complete the request
1356 */
b20123a2 1357static void qed_aio_next_io(QEDAIOCB *acb, int ret)
eabba580 1358{
eabba580 1359 BDRVQEDState *s = acb_to_s(acb);
6e4f59bd
SH
1360 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1361 qed_aio_write_data : qed_aio_read_data;
eabba580
SH
1362
1363 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1364
f06ee3d4
KW
1365 if (acb->backing_qiov) {
1366 qemu_iovec_destroy(acb->backing_qiov);
1367 g_free(acb->backing_qiov);
1368 acb->backing_qiov = NULL;
1369 }
1370
eabba580
SH
1371 /* Handle I/O error */
1372 if (ret) {
1373 qed_aio_complete(acb, ret);
1374 return;
1375 }
1376
1377 acb->qiov_offset += acb->cur_qiov.size;
1378 acb->cur_pos += acb->cur_qiov.size;
1379 qemu_iovec_reset(&acb->cur_qiov);
1380
1381 /* Complete request */
1382 if (acb->cur_pos >= acb->end_pos) {
1383 qed_aio_complete(acb, 0);
1384 return;
1385 }
1386
1387 /* Find next cluster and start I/O */
1388 qed_find_cluster(s, &acb->request,
1389 acb->cur_pos, acb->end_pos - acb->cur_pos,
1390 io_fn, acb);
1391}
1392
7c84b1b8
MA
1393static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
1394 int64_t sector_num,
1395 QEMUIOVector *qiov, int nb_sectors,
097310b5 1396 BlockCompletionFunc *cb,
7c84b1b8 1397 void *opaque, int flags)
eabba580 1398{
d7331bed 1399 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
eabba580
SH
1400
1401 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
6e4f59bd 1402 opaque, flags);
eabba580 1403
6e4f59bd 1404 acb->flags = flags;
eabba580
SH
1405 acb->qiov = qiov;
1406 acb->qiov_offset = 0;
1407 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1408 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
f06ee3d4 1409 acb->backing_qiov = NULL;
eabba580
SH
1410 acb->request.l2_table = NULL;
1411 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1412
1413 /* Start request */
b20123a2 1414 qed_aio_start_io(acb);
eabba580
SH
1415 return &acb->common;
1416}
1417
7c84b1b8
MA
1418static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1419 int64_t sector_num,
1420 QEMUIOVector *qiov, int nb_sectors,
097310b5 1421 BlockCompletionFunc *cb,
7c84b1b8 1422 void *opaque)
75411d23 1423{
6e4f59bd 1424 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
75411d23
SH
1425}
1426
7c84b1b8
MA
1427static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1428 int64_t sector_num,
1429 QEMUIOVector *qiov, int nb_sectors,
097310b5 1430 BlockCompletionFunc *cb,
7c84b1b8 1431 void *opaque)
75411d23 1432{
6e4f59bd
SH
1433 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1434 opaque, QED_AIOCB_WRITE);
75411d23
SH
1435}
1436
0e71be19
SH
1437typedef struct {
1438 Coroutine *co;
1439 int ret;
1440 bool done;
1441} QEDWriteZeroesCB;
1442
49a2e483 1443static void coroutine_fn qed_co_pwrite_zeroes_cb(void *opaque, int ret)
0e71be19
SH
1444{
1445 QEDWriteZeroesCB *cb = opaque;
1446
1447 cb->done = true;
1448 cb->ret = ret;
1449 if (cb->co) {
0b8b8753 1450 qemu_coroutine_enter(cb->co);
0e71be19
SH
1451 }
1452}
1453
49a2e483
EB
1454static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1455 int64_t offset,
1456 int count,
1457 BdrvRequestFlags flags)
0e71be19 1458{
7c84b1b8 1459 BlockAIOCB *blockacb;
ef72f76e 1460 BDRVQEDState *s = bs->opaque;
0e71be19
SH
1461 QEDWriteZeroesCB cb = { .done = false };
1462 QEMUIOVector qiov;
1463 struct iovec iov;
1464
49a2e483
EB
1465 /* Fall back if the request is not aligned */
1466 if (qed_offset_into_cluster(s, offset) ||
1467 qed_offset_into_cluster(s, count)) {
1468 return -ENOTSUP;
ef72f76e
SH
1469 }
1470
0e71be19
SH
1471 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1472 * then it will be allocated during request processing.
1473 */
49a2e483
EB
1474 iov.iov_base = NULL;
1475 iov.iov_len = count;
0e71be19
SH
1476
1477 qemu_iovec_init_external(&qiov, &iov, 1);
49a2e483
EB
1478 blockacb = qed_aio_setup(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1479 count >> BDRV_SECTOR_BITS,
1480 qed_co_pwrite_zeroes_cb, &cb,
0e71be19
SH
1481 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1482 if (!blockacb) {
1483 return -EIO;
1484 }
1485 if (!cb.done) {
1486 cb.co = qemu_coroutine_self();
1487 qemu_coroutine_yield();
1488 }
1489 assert(cb.done);
1490 return cb.ret;
1491}
1492
75411d23
SH
1493static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1494{
77a5a000
SH
1495 BDRVQEDState *s = bs->opaque;
1496 uint64_t old_image_size;
1497 int ret;
1498
1499 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1500 s->header.table_size)) {
1501 return -EINVAL;
1502 }
1503
1504 /* Shrinking is currently not supported */
1505 if ((uint64_t)offset < s->header.image_size) {
1506 return -ENOTSUP;
1507 }
1508
1509 old_image_size = s->header.image_size;
1510 s->header.image_size = offset;
1511 ret = qed_write_header_sync(s);
1512 if (ret < 0) {
1513 s->header.image_size = old_image_size;
1514 }
1515 return ret;
75411d23
SH
1516}
1517
1518static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1519{
1520 BDRVQEDState *s = bs->opaque;
1521 return s->header.image_size;
1522}
1523
1524static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1525{
1526 BDRVQEDState *s = bs->opaque;
1527
1528 memset(bdi, 0, sizeof(*bdi));
1529 bdi->cluster_size = s->header.cluster_size;
d68dbee8 1530 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
95de6d70
PB
1531 bdi->unallocated_blocks_are_zero = true;
1532 bdi->can_write_zeroes_with_unmap = true;
75411d23
SH
1533 return 0;
1534}
1535
1536static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1537 const char *backing_file,
1538 const char *backing_fmt)
1539{
1540 BDRVQEDState *s = bs->opaque;
1541 QEDHeader new_header, le_header;
1542 void *buffer;
1543 size_t buffer_len, backing_file_len;
1544 int ret;
1545
1546 /* Refuse to set backing filename if unknown compat feature bits are
1547 * active. If the image uses an unknown compat feature then we may not
1548 * know the layout of data following the header structure and cannot safely
1549 * add a new string.
1550 */
1551 if (backing_file && (s->header.compat_features &
1552 ~QED_COMPAT_FEATURE_MASK)) {
1553 return -ENOTSUP;
1554 }
1555
1556 memcpy(&new_header, &s->header, sizeof(new_header));
1557
1558 new_header.features &= ~(QED_F_BACKING_FILE |
1559 QED_F_BACKING_FORMAT_NO_PROBE);
1560
1561 /* Adjust feature flags */
1562 if (backing_file) {
1563 new_header.features |= QED_F_BACKING_FILE;
1564
1565 if (qed_fmt_is_raw(backing_fmt)) {
1566 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1567 }
1568 }
1569
1570 /* Calculate new header size */
1571 backing_file_len = 0;
1572
1573 if (backing_file) {
1574 backing_file_len = strlen(backing_file);
1575 }
1576
1577 buffer_len = sizeof(new_header);
1578 new_header.backing_filename_offset = buffer_len;
1579 new_header.backing_filename_size = backing_file_len;
1580 buffer_len += backing_file_len;
1581
1582 /* Make sure we can rewrite header without failing */
1583 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1584 return -ENOSPC;
1585 }
1586
1587 /* Prepare new header */
7267c094 1588 buffer = g_malloc(buffer_len);
75411d23
SH
1589
1590 qed_header_cpu_to_le(&new_header, &le_header);
1591 memcpy(buffer, &le_header, sizeof(le_header));
1592 buffer_len = sizeof(le_header);
1593
feba23b1
PB
1594 if (backing_file) {
1595 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1596 buffer_len += backing_file_len;
1597 }
75411d23
SH
1598
1599 /* Write new header */
d9ca2ea2 1600 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
7267c094 1601 g_free(buffer);
75411d23
SH
1602 if (ret == 0) {
1603 memcpy(&s->header, &new_header, sizeof(new_header));
1604 }
1605 return ret;
1606}
1607
5a8a30db 1608static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
c82954e5
BC
1609{
1610 BDRVQEDState *s = bs->opaque;
5a8a30db
KW
1611 Error *local_err = NULL;
1612 int ret;
c82954e5
BC
1613
1614 bdrv_qed_close(bs);
3456a8d1 1615
c82954e5 1616 memset(s, 0, sizeof(BDRVQEDState));
5a8a30db
KW
1617 ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
1618 if (local_err) {
e43bfd9c
MA
1619 error_propagate(errp, local_err);
1620 error_prepend(errp, "Could not reopen qed layer: ");
5a8a30db
KW
1621 return;
1622 } else if (ret < 0) {
1623 error_setg_errno(errp, -ret, "Could not reopen qed layer");
1624 return;
1625 }
c82954e5
BC
1626}
1627
4534ff54
KW
1628static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1629 BdrvCheckMode fix)
75411d23 1630{
01979a98
SH
1631 BDRVQEDState *s = bs->opaque;
1632
4534ff54 1633 return qed_check(s, result, !!fix);
75411d23
SH
1634}
1635
7ab74849
CL
1636static QemuOptsList qed_create_opts = {
1637 .name = "qed-create-opts",
1638 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1639 .desc = {
1640 {
1641 .name = BLOCK_OPT_SIZE,
1642 .type = QEMU_OPT_SIZE,
1643 .help = "Virtual disk size"
1644 },
1645 {
1646 .name = BLOCK_OPT_BACKING_FILE,
1647 .type = QEMU_OPT_STRING,
1648 .help = "File name of a base image"
1649 },
1650 {
1651 .name = BLOCK_OPT_BACKING_FMT,
1652 .type = QEMU_OPT_STRING,
1653 .help = "Image format of the base image"
1654 },
1655 {
1656 .name = BLOCK_OPT_CLUSTER_SIZE,
1657 .type = QEMU_OPT_SIZE,
1658 .help = "Cluster size (in bytes)",
1659 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1660 },
1661 {
1662 .name = BLOCK_OPT_TABLE_SIZE,
1663 .type = QEMU_OPT_SIZE,
1664 .help = "L1/L2 table size (in clusters)"
1665 },
1666 { /* end of list */ }
1667 }
75411d23
SH
1668};
1669
1670static BlockDriver bdrv_qed = {
1671 .format_name = "qed",
1672 .instance_size = sizeof(BDRVQEDState),
7ab74849 1673 .create_opts = &qed_create_opts,
8ee79e70 1674 .supports_backing = true,
75411d23
SH
1675
1676 .bdrv_probe = bdrv_qed_probe,
1677 .bdrv_open = bdrv_qed_open,
1678 .bdrv_close = bdrv_qed_close,
f9cb20f1 1679 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
c282e1fd 1680 .bdrv_create = bdrv_qed_create,
3ac21627 1681 .bdrv_has_zero_init = bdrv_has_zero_init_1,
b6b8a333 1682 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
75411d23
SH
1683 .bdrv_aio_readv = bdrv_qed_aio_readv,
1684 .bdrv_aio_writev = bdrv_qed_aio_writev,
49a2e483 1685 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
75411d23
SH
1686 .bdrv_truncate = bdrv_qed_truncate,
1687 .bdrv_getlength = bdrv_qed_getlength,
1688 .bdrv_get_info = bdrv_qed_get_info,
d34682cd 1689 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
75411d23 1690 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
c82954e5 1691 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
75411d23 1692 .bdrv_check = bdrv_qed_check,
a8c868c3
SH
1693 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1694 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
6653a73d 1695 .bdrv_drain = bdrv_qed_drain,
75411d23
SH
1696};
1697
1698static void bdrv_qed_init(void)
1699{
1700 bdrv_register(&bdrv_qed);
1701}
1702
1703block_init(bdrv_qed_init);