]> git.proxmox.com Git - mirror_qemu.git/blame - block/qed.c
qapi block: Elide redundant has_FOO in generated C
[mirror_qemu.git] / block / qed.c
CommitLineData
75411d23
SH
1/*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
80c71a24 15#include "qemu/osdep.h"
609f45ea 16#include "block/qdict.h"
da34e65c 17#include "qapi/error.h"
1de7afc9 18#include "qemu/timer.h"
58369e22 19#include "qemu/bswap.h"
db725815 20#include "qemu/main-loop.h"
0b8fa32f 21#include "qemu/module.h"
922a01a0 22#include "qemu/option.h"
5df022cf 23#include "qemu/memalign.h"
eabba580 24#include "trace.h"
75411d23 25#include "qed.h"
8a56fdad 26#include "sysemu/block-backend.h"
959355a4
KW
27#include "qapi/qmp/qdict.h"
28#include "qapi/qobject-input-visitor.h"
29#include "qapi/qapi-visit-block-core.h"
30
31static QemuOptsList qed_create_opts;
75411d23
SH
32
33static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
34 const char *filename)
35{
36 const QEDHeader *header = (const QEDHeader *)buf;
37
38 if (buf_size < sizeof(*header)) {
39 return 0;
40 }
41 if (le32_to_cpu(header->magic) != QED_MAGIC) {
42 return 0;
43 }
44 return 100;
45}
46
47/**
48 * Check whether an image format is raw
49 *
50 * @fmt: Backing file format, may be NULL
51 */
52static bool qed_fmt_is_raw(const char *fmt)
53{
54 return fmt && strcmp(fmt, "raw") == 0;
55}
56
57static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
58{
59 cpu->magic = le32_to_cpu(le->magic);
60 cpu->cluster_size = le32_to_cpu(le->cluster_size);
61 cpu->table_size = le32_to_cpu(le->table_size);
62 cpu->header_size = le32_to_cpu(le->header_size);
63 cpu->features = le64_to_cpu(le->features);
64 cpu->compat_features = le64_to_cpu(le->compat_features);
65 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
66 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
67 cpu->image_size = le64_to_cpu(le->image_size);
68 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
69 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
70}
71
72static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
73{
74 le->magic = cpu_to_le32(cpu->magic);
75 le->cluster_size = cpu_to_le32(cpu->cluster_size);
76 le->table_size = cpu_to_le32(cpu->table_size);
77 le->header_size = cpu_to_le32(cpu->header_size);
78 le->features = cpu_to_le64(cpu->features);
79 le->compat_features = cpu_to_le64(cpu->compat_features);
80 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
81 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
82 le->image_size = cpu_to_le64(cpu->image_size);
83 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
84 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
85}
86
b10170ac 87int qed_write_header_sync(BDRVQEDState *s)
75411d23
SH
88{
89 QEDHeader le;
75411d23
SH
90
91 qed_header_cpu_to_le(&s->header, &le);
353a5d84 92 return bdrv_pwrite(s->bs->file, 0, sizeof(le), &le, 0);
75411d23
SH
93}
94
01979a98
SH
95/**
96 * Update header in-place (does not rewrite backing filename or other strings)
97 *
98 * This function only updates known header fields in-place and does not affect
99 * extra data after the QED header.
1f01e50b
PB
100 *
101 * No new allocating reqs can start while this function runs.
01979a98 102 */
87f0d882 103static int coroutine_fn qed_write_header(BDRVQEDState *s)
01979a98
SH
104{
105 /* We must write full sectors for O_DIRECT but cannot necessarily generate
106 * the data following the header if an unrecognized compat feature is
107 * active. Therefore, first read the sectors containing the header, update
108 * them, and write back.
109 */
110
c41a73ff 111 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
01979a98 112 size_t len = nsectors * BDRV_SECTOR_SIZE;
7076309a 113 uint8_t *buf;
7076309a
KW
114 int ret;
115
1f01e50b
PB
116 assert(s->allocating_acb || s->allocating_write_reqs_plugged);
117
7076309a 118 buf = qemu_blockalign(s->bs, len);
7076309a 119
696e8cb2 120 ret = bdrv_co_pread(s->bs->file, 0, len, buf, 0);
7076309a
KW
121 if (ret < 0) {
122 goto out;
123 }
124
125 /* Update header */
126 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
127
696e8cb2 128 ret = bdrv_co_pwrite(s->bs->file, 0, len, buf, 0);
7076309a
KW
129 if (ret < 0) {
130 goto out;
131 }
132
133 ret = 0;
134out:
135 qemu_vfree(buf);
f13d712b 136 return ret;
01979a98
SH
137}
138
75411d23
SH
139static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
140{
141 uint64_t table_entries;
142 uint64_t l2_size;
143
144 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
145 l2_size = table_entries * cluster_size;
146
147 return l2_size * table_entries;
148}
149
150static bool qed_is_cluster_size_valid(uint32_t cluster_size)
151{
152 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
153 cluster_size > QED_MAX_CLUSTER_SIZE) {
154 return false;
155 }
156 if (cluster_size & (cluster_size - 1)) {
157 return false; /* not power of 2 */
158 }
159 return true;
160}
161
162static bool qed_is_table_size_valid(uint32_t table_size)
163{
164 if (table_size < QED_MIN_TABLE_SIZE ||
165 table_size > QED_MAX_TABLE_SIZE) {
166 return false;
167 }
168 if (table_size & (table_size - 1)) {
169 return false; /* not power of 2 */
170 }
171 return true;
172}
173
174static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
175 uint32_t table_size)
176{
177 if (image_size % BDRV_SECTOR_SIZE != 0) {
178 return false; /* not multiple of sector size */
179 }
180 if (image_size > qed_max_image_size(cluster_size, table_size)) {
181 return false; /* image is too large */
182 }
183 return true;
184}
185
186/**
187 * Read a string of known length from the image file
188 *
189 * @file: Image file
190 * @offset: File offset to start of string, in bytes
191 * @n: String length in bytes
192 * @buf: Destination buffer
193 * @buflen: Destination buffer length in bytes
194 * @ret: 0 on success, -errno on failure
195 *
196 * The string is NUL-terminated.
197 */
cf2ab8fc 198static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
75411d23
SH
199 char *buf, size_t buflen)
200{
201 int ret;
202 if (n >= buflen) {
203 return -EINVAL;
204 }
32cc71de 205 ret = bdrv_pread(file, offset, n, buf, 0);
75411d23
SH
206 if (ret < 0) {
207 return ret;
208 }
209 buf[n] = '\0';
210 return 0;
211}
212
eabba580
SH
213/**
214 * Allocate new clusters
215 *
216 * @s: QED state
217 * @n: Number of contiguous clusters to allocate
218 * @ret: Offset of first allocated cluster
219 *
220 * This function only produces the offset where the new clusters should be
221 * written. It updates BDRVQEDState but does not make any changes to the image
222 * file.
1f01e50b
PB
223 *
224 * Called with table_lock held.
eabba580
SH
225 */
226static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
227{
228 uint64_t offset = s->file_size;
229 s->file_size += n * s->header.cluster_size;
230 return offset;
231}
232
298800ca
SH
233QEDTable *qed_alloc_table(BDRVQEDState *s)
234{
235 /* Honor O_DIRECT memory alignment requirements */
236 return qemu_blockalign(s->bs,
237 s->header.cluster_size * s->header.table_size);
238}
239
eabba580
SH
240/**
241 * Allocate a new zeroed L2 table
1f01e50b
PB
242 *
243 * Called with table_lock held.
eabba580
SH
244 */
245static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
246{
247 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
248
249 l2_table->table = qed_alloc_table(s);
250 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
251
252 memset(l2_table->table->offsets, 0,
253 s->header.cluster_size * s->header.table_size);
254 return l2_table;
255}
256
b383ae60 257static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s)
6f321e93 258{
1f01e50b
PB
259 qemu_co_mutex_lock(&s->table_lock);
260
261 /* No reentrancy is allowed. */
6f321e93 262 assert(!s->allocating_write_reqs_plugged);
1f01e50b
PB
263 if (s->allocating_acb != NULL) {
264 /* Another allocating write came concurrently. This cannot happen
f8ea8dac 265 * from bdrv_qed_co_drain_begin, but it can happen when the timer runs.
1f01e50b
PB
266 */
267 qemu_co_mutex_unlock(&s->table_lock);
268 return false;
269 }
6f321e93
SH
270
271 s->allocating_write_reqs_plugged = true;
1f01e50b
PB
272 qemu_co_mutex_unlock(&s->table_lock);
273 return true;
6f321e93
SH
274}
275
b383ae60 276static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s)
6f321e93 277{
1f01e50b 278 qemu_co_mutex_lock(&s->table_lock);
6f321e93 279 assert(s->allocating_write_reqs_plugged);
6f321e93 280 s->allocating_write_reqs_plugged = false;
1f01e50b
PB
281 qemu_co_queue_next(&s->allocating_write_reqs);
282 qemu_co_mutex_unlock(&s->table_lock);
6f321e93
SH
283}
284
87f0d882 285static void coroutine_fn qed_need_check_timer_entry(void *opaque)
6f321e93
SH
286{
287 BDRVQEDState *s = opaque;
c0e8f989
KW
288 int ret;
289
c0e8f989 290 trace_qed_need_check_timer_cb(s);
6f321e93 291
1f01e50b
PB
292 if (!qed_plug_allocating_write_reqs(s)) {
293 return;
294 }
c0e8f989
KW
295
296 /* Ensure writes are on disk before clearing flag */
297 ret = bdrv_co_flush(s->bs->file->bs);
c0e8f989 298 if (ret < 0) {
6f321e93
SH
299 qed_unplug_allocating_write_reqs(s);
300 return;
301 }
302
303 s->header.features &= ~QED_F_NEED_CHECK;
f13d712b
KW
304 ret = qed_write_header(s);
305 (void) ret;
306
307 qed_unplug_allocating_write_reqs(s);
308
c0e8f989 309 ret = bdrv_co_flush(s->bs);
f13d712b 310 (void) ret;
6f321e93
SH
311}
312
313static void qed_need_check_timer_cb(void *opaque)
314{
c0e8f989
KW
315 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
316 qemu_coroutine_enter(co);
2f47da5f
PB
317}
318
6f321e93
SH
319static void qed_start_need_check_timer(BDRVQEDState *s)
320{
321 trace_qed_start_need_check_timer(s);
322
bc72ad67 323 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
6f321e93
SH
324 * migration.
325 */
bc72ad67 326 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 327 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
6f321e93
SH
328}
329
330/* It's okay to call this multiple times or when no timer is started */
331static void qed_cancel_need_check_timer(BDRVQEDState *s)
332{
333 trace_qed_cancel_need_check_timer(s);
bc72ad67 334 timer_del(s->need_check_timer);
6f321e93
SH
335}
336
a8c868c3
SH
337static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
338{
339 BDRVQEDState *s = bs->opaque;
340
341 qed_cancel_need_check_timer(s);
342 timer_free(s->need_check_timer);
343}
344
345static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
346 AioContext *new_context)
347{
348 BDRVQEDState *s = bs->opaque;
349
350 s->need_check_timer = aio_timer_new(new_context,
351 QEMU_CLOCK_VIRTUAL, SCALE_NS,
352 qed_need_check_timer_cb, s);
353 if (s->header.features & QED_F_NEED_CHECK) {
354 qed_start_need_check_timer(s);
355 }
356}
357
f8ea8dac 358static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs)
6653a73d
FZ
359{
360 BDRVQEDState *s = bs->opaque;
361
362 /* Fire the timer immediately in order to start doing I/O as soon as the
363 * header is flushed.
364 */
365 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
366 qed_cancel_need_check_timer(s);
61124f03 367 qed_need_check_timer_entry(s);
6653a73d
FZ
368 }
369}
370
61c7887e
PB
371static void bdrv_qed_init_state(BlockDriverState *bs)
372{
373 BDRVQEDState *s = bs->opaque;
374
375 memset(s, 0, sizeof(BDRVQEDState));
376 s->bs = bs;
1f01e50b 377 qemu_co_mutex_init(&s->table_lock);
61c7887e
PB
378 qemu_co_queue_init(&s->allocating_write_reqs);
379}
380
9fb4dfc5
PB
381/* Called with table_lock held. */
382static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options,
383 int flags, Error **errp)
75411d23
SH
384{
385 BDRVQEDState *s = bs->opaque;
386 QEDHeader le_header;
387 int64_t file_size;
388 int ret;
389
3aba34ad 390 ret = bdrv_co_pread(bs->file, 0, sizeof(le_header), &le_header, 0);
75411d23 391 if (ret < 0) {
15ce94a6 392 error_setg(errp, "Failed to read QED header");
75411d23
SH
393 return ret;
394 }
75411d23
SH
395 qed_header_le_to_cpu(&le_header, &s->header);
396
397 if (s->header.magic != QED_MAGIC) {
76abe407
PB
398 error_setg(errp, "Image not in QED format");
399 return -EINVAL;
75411d23
SH
400 }
401 if (s->header.features & ~QED_FEATURE_MASK) {
10b758e8 402 /* image uses unsupported feature bits */
a55448b3
HR
403 error_setg(errp, "Unsupported QED features: %" PRIx64,
404 s->header.features & ~QED_FEATURE_MASK);
10b758e8 405 return -ENOTSUP;
75411d23
SH
406 }
407 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
15ce94a6 408 error_setg(errp, "QED cluster size is invalid");
75411d23
SH
409 return -EINVAL;
410 }
411
412 /* Round down file size to the last cluster */
9a4f4c31 413 file_size = bdrv_getlength(bs->file->bs);
75411d23 414 if (file_size < 0) {
15ce94a6 415 error_setg(errp, "Failed to get file length");
75411d23
SH
416 return file_size;
417 }
418 s->file_size = qed_start_of_cluster(s, file_size);
419
420 if (!qed_is_table_size_valid(s->header.table_size)) {
15ce94a6 421 error_setg(errp, "QED table size is invalid");
75411d23
SH
422 return -EINVAL;
423 }
424 if (!qed_is_image_size_valid(s->header.image_size,
425 s->header.cluster_size,
426 s->header.table_size)) {
15ce94a6 427 error_setg(errp, "QED image size is invalid");
75411d23
SH
428 return -EINVAL;
429 }
430 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
15ce94a6 431 error_setg(errp, "QED table offset is invalid");
75411d23
SH
432 return -EINVAL;
433 }
434
435 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
436 sizeof(uint64_t);
786a4ea8 437 s->l2_shift = ctz32(s->header.cluster_size);
75411d23 438 s->l2_mask = s->table_nelems - 1;
786a4ea8 439 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
75411d23 440
0adfa1ed
SH
441 /* Header size calculation must not overflow uint32_t */
442 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
15ce94a6 443 error_setg(errp, "QED header size is too large");
0adfa1ed
SH
444 return -EINVAL;
445 }
446
75411d23 447 if ((s->header.features & QED_F_BACKING_FILE)) {
dc70638f
HR
448 g_autofree char *backing_file_str = NULL;
449
75411d23
SH
450 if ((uint64_t)s->header.backing_filename_offset +
451 s->header.backing_filename_size >
452 s->header.cluster_size * s->header.header_size) {
15ce94a6 453 error_setg(errp, "QED backing filename offset is invalid");
75411d23
SH
454 return -EINVAL;
455 }
456
dc70638f 457 backing_file_str = g_malloc(sizeof(bs->backing_file));
cf2ab8fc 458 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
998c2019 459 s->header.backing_filename_size,
dc70638f 460 backing_file_str, sizeof(bs->backing_file));
75411d23 461 if (ret < 0) {
15ce94a6 462 error_setg(errp, "Failed to read backing filename");
75411d23
SH
463 return ret;
464 }
dc70638f
HR
465
466 if (!g_str_equal(backing_file_str, bs->backing_file)) {
467 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
468 backing_file_str);
469 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
470 backing_file_str);
471 }
75411d23
SH
472
473 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
474 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
475 }
476 }
477
478 /* Reset unknown autoclear feature bits. This is a backwards
479 * compatibility mechanism that allows images to be opened by older
480 * programs, which "knock out" unknown feature bits. When an image is
481 * opened by a newer program again it can detect that the autoclear
482 * feature is no longer valid.
483 */
484 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
04c01a5c 485 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
75411d23
SH
486 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
487
488 ret = qed_write_header_sync(s);
489 if (ret) {
15ce94a6 490 error_setg(errp, "Failed to update header");
75411d23
SH
491 return ret;
492 }
493
494 /* From here on only known autoclear feature bits are valid */
3aba34ad 495 bdrv_co_flush(bs->file->bs);
75411d23
SH
496 }
497
298800ca
SH
498 s->l1_table = qed_alloc_table(s);
499 qed_init_l2_cache(&s->l2_cache);
500
501 ret = qed_read_l1_table_sync(s);
01979a98 502 if (ret) {
15ce94a6 503 error_setg(errp, "Failed to read L1 table");
01979a98
SH
504 goto out;
505 }
506
507 /* If image was not closed cleanly, check consistency */
058f8f16 508 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
01979a98
SH
509 /* Read-only images cannot be fixed. There is no risk of corruption
510 * since write operations are not possible. Therefore, allow
511 * potentially inconsistent images to be opened read-only. This can
512 * aid data recovery from an otherwise inconsistent image.
513 */
9a4f4c31 514 if (!bdrv_is_read_only(bs->file->bs) &&
04c01a5c 515 !(flags & BDRV_O_INACTIVE)) {
01979a98
SH
516 BdrvCheckResult result = {0};
517
518 ret = qed_check(s, &result, true);
6f321e93 519 if (ret) {
15ce94a6 520 error_setg(errp, "Image corrupted");
6f321e93
SH
521 goto out;
522 }
01979a98
SH
523 }
524 }
525
a8c868c3 526 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
6f321e93 527
01979a98 528out:
298800ca
SH
529 if (ret) {
530 qed_free_l2_cache(&s->l2_cache);
531 qemu_vfree(s->l1_table);
532 }
75411d23
SH
533 return ret;
534}
535
9fb4dfc5
PB
536typedef struct QEDOpenCo {
537 BlockDriverState *bs;
538 QDict *options;
539 int flags;
540 Error **errp;
541 int ret;
542} QEDOpenCo;
543
544static void coroutine_fn bdrv_qed_open_entry(void *opaque)
545{
546 QEDOpenCo *qoc = opaque;
547 BDRVQEDState *s = qoc->bs->opaque;
548
549 qemu_co_mutex_lock(&s->table_lock);
550 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
551 qemu_co_mutex_unlock(&s->table_lock);
552}
553
4e4bf5c4
KW
554static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
555 Error **errp)
556{
9fb4dfc5
PB
557 QEDOpenCo qoc = {
558 .bs = bs,
559 .options = options,
560 .flags = flags,
561 .errp = errp,
562 .ret = -EINPROGRESS
563 };
83930780 564 int ret;
9fb4dfc5 565
83930780
VSO
566 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
567 if (ret < 0) {
568 return ret;
4e4bf5c4
KW
569 }
570
61c7887e 571 bdrv_qed_init_state(bs);
9fb4dfc5
PB
572 if (qemu_in_coroutine()) {
573 bdrv_qed_open_entry(&qoc);
574 } else {
4720cbee 575 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
9fb4dfc5
PB
576 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
577 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
578 }
579 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
580 return qoc.ret;
4e4bf5c4
KW
581}
582
3baca891 583static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
d34682cd
KW
584{
585 BDRVQEDState *s = bs->opaque;
586
cf081fca 587 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
f34b2bcf 588 bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size);
d34682cd
KW
589}
590
f9cb20f1
JC
591/* We have nothing to do for QED reopen, stubs just return
592 * success */
593static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
594 BlockReopenQueue *queue, Error **errp)
595{
596 return 0;
597}
598
75411d23
SH
599static void bdrv_qed_close(BlockDriverState *bs)
600{
298800ca
SH
601 BDRVQEDState *s = bs->opaque;
602
a8c868c3 603 bdrv_qed_detach_aio_context(bs);
6f321e93 604
01979a98 605 /* Ensure writes reach stable storage */
9a4f4c31 606 bdrv_flush(bs->file->bs);
01979a98
SH
607
608 /* Clean shutdown, no check required on next open */
609 if (s->header.features & QED_F_NEED_CHECK) {
610 s->header.features &= ~QED_F_NEED_CHECK;
611 qed_write_header_sync(s);
612 }
613
298800ca
SH
614 qed_free_l2_cache(&s->l2_cache);
615 qemu_vfree(s->l1_table);
75411d23
SH
616}
617
959355a4
KW
618static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts,
619 Error **errp)
75411d23 620{
959355a4
KW
621 BlockdevCreateOptionsQed *qed_opts;
622 BlockBackend *blk = NULL;
623 BlockDriverState *bs = NULL;
624
625 QEDHeader header;
75411d23
SH
626 QEDHeader le_header;
627 uint8_t *l1_table = NULL;
959355a4 628 size_t l1_size;
75411d23 629 int ret = 0;
75411d23 630
959355a4
KW
631 assert(opts->driver == BLOCKDEV_DRIVER_QED);
632 qed_opts = &opts->u.qed;
633
634 /* Validate options and set default values */
635 if (!qed_opts->has_cluster_size) {
636 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE;
637 }
638 if (!qed_opts->has_table_size) {
639 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE;
75411d23
SH
640 }
641
959355a4
KW
642 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) {
643 error_setg(errp, "QED cluster size must be within range [%u, %u] "
644 "and power of 2",
645 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
646 return -EINVAL;
647 }
648 if (!qed_is_table_size_valid(qed_opts->table_size)) {
649 error_setg(errp, "QED table size must be within range [%u, %u] "
650 "and power of 2",
651 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
652 return -EINVAL;
653 }
654 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size,
655 qed_opts->table_size))
656 {
657 error_setg(errp, "QED image size must be a non-zero multiple of "
658 "cluster size and less than %" PRIu64 " bytes",
659 qed_max_image_size(qed_opts->cluster_size,
660 qed_opts->table_size));
661 return -EINVAL;
662 }
663
664 /* Create BlockBackend to write to the image */
665 bs = bdrv_open_blockdev_ref(qed_opts->file, errp);
666 if (bs == NULL) {
8a56fdad 667 return -EIO;
75411d23
SH
668 }
669
a3aeeab5
EB
670 blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
671 errp);
672 if (!blk) {
673 ret = -EPERM;
959355a4
KW
674 goto out;
675 }
8a56fdad
KW
676 blk_set_allow_write_beyond_eof(blk, true);
677
959355a4
KW
678 /* Prepare image format */
679 header = (QEDHeader) {
680 .magic = QED_MAGIC,
681 .cluster_size = qed_opts->cluster_size,
682 .table_size = qed_opts->table_size,
683 .header_size = 1,
684 .features = 0,
685 .compat_features = 0,
686 .l1_table_offset = qed_opts->cluster_size,
687 .image_size = qed_opts->size,
688 };
689
690 l1_size = header.cluster_size * header.table_size;
691
e8d04f92
HR
692 /*
693 * The QED format associates file length with allocation status,
694 * so a new file (which is empty) must have a length of 0.
695 */
3aba34ad 696 ret = blk_co_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp);
c743849b
SH
697 if (ret < 0) {
698 goto out;
699 }
700
54fde4ff 701 if (qed_opts->backing_file) {
75411d23
SH
702 header.features |= QED_F_BACKING_FILE;
703 header.backing_filename_offset = sizeof(le_header);
959355a4 704 header.backing_filename_size = strlen(qed_opts->backing_file);
75411d23 705
959355a4
KW
706 if (qed_opts->has_backing_fmt) {
707 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt);
708 if (qed_fmt_is_raw(backing_fmt)) {
709 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
710 }
75411d23
SH
711 }
712 }
713
714 qed_header_cpu_to_le(&header, &le_header);
3aba34ad 715 ret = blk_co_pwrite(blk, 0, sizeof(le_header), &le_header, 0);
75411d23
SH
716 if (ret < 0) {
717 goto out;
718 }
3aba34ad 719 ret = blk_co_pwrite(blk, sizeof(le_header), header.backing_filename_size,
a9262f55 720 qed_opts->backing_file, 0);
75411d23
SH
721 if (ret < 0) {
722 goto out;
723 }
724
7267c094 725 l1_table = g_malloc0(l1_size);
3aba34ad 726 ret = blk_co_pwrite(blk, header.l1_table_offset, l1_size, l1_table, 0);
75411d23
SH
727 if (ret < 0) {
728 goto out;
729 }
730
731 ret = 0; /* success */
732out:
7267c094 733 g_free(l1_table);
8a56fdad 734 blk_unref(blk);
959355a4 735 bdrv_unref(bs);
75411d23
SH
736 return ret;
737}
738
b92902df
ML
739static int coroutine_fn bdrv_qed_co_create_opts(BlockDriver *drv,
740 const char *filename,
efc75e2a
SH
741 QemuOpts *opts,
742 Error **errp)
75411d23 743{
959355a4 744 BlockdevCreateOptions *create_options = NULL;
92adf9db 745 QDict *qdict;
959355a4
KW
746 Visitor *v;
747 BlockDriverState *bs = NULL;
7ab74849
CL
748 int ret;
749
959355a4
KW
750 static const QDictRenames opt_renames[] = {
751 { BLOCK_OPT_BACKING_FILE, "backing-file" },
752 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
753 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
754 { BLOCK_OPT_TABLE_SIZE, "table-size" },
755 { NULL, NULL },
756 };
757
758 /* Parse options and convert legacy syntax */
759 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true);
760
761 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
7ab74849 762 ret = -EINVAL;
959355a4 763 goto fail;
75411d23 764 }
959355a4
KW
765
766 /* Create and open the file (protocol layer) */
668f62ec 767 ret = bdrv_create_file(filename, opts, errp);
959355a4 768 if (ret < 0) {
959355a4
KW
769 goto fail;
770 }
771
772 bs = bdrv_open(filename, NULL, NULL,
773 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
774 if (bs == NULL) {
775 ret = -EIO;
776 goto fail;
777 }
778
779 /* Now get the QAPI type BlockdevCreateOptions */
780 qdict_put_str(qdict, "driver", "qed");
781 qdict_put_str(qdict, "file", bs->node_name);
782
af91062e
MA
783 v = qobject_input_visitor_new_flat_confused(qdict, errp);
784 if (!v) {
7ab74849 785 ret = -EINVAL;
959355a4 786 goto fail;
75411d23 787 }
959355a4 788
b11a093c 789 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
959355a4 790 visit_free(v);
b11a093c 791 if (!create_options) {
7ab74849 792 ret = -EINVAL;
959355a4 793 goto fail;
75411d23
SH
794 }
795
959355a4
KW
796 /* Silently round up size */
797 assert(create_options->driver == BLOCKDEV_DRIVER_QED);
798 create_options->u.qed.size =
799 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE);
800
801 /* Create the qed image (format layer) */
802 ret = bdrv_qed_co_create(create_options, errp);
7ab74849 803
959355a4 804fail:
cb3e7f08 805 qobject_unref(qdict);
959355a4
KW
806 bdrv_unref(bs);
807 qapi_free_BlockdevCreateOptions(create_options);
7ab74849 808 return ret;
75411d23
SH
809}
810
b8d739fd
EB
811static int coroutine_fn bdrv_qed_co_block_status(BlockDriverState *bs,
812 bool want_zero,
813 int64_t pos, int64_t bytes,
814 int64_t *pnum, int64_t *map,
815 BlockDriverState **file)
298800ca 816{
b8d739fd
EB
817 BDRVQEDState *s = bs->opaque;
818 size_t len = MIN(bytes, SIZE_MAX);
819 int status;
820 QEDRequest request = { .l2_table = NULL };
821 uint64_t offset;
822 int ret;
823
824 qemu_co_mutex_lock(&s->table_lock);
825 ret = qed_find_cluster(s, &request, pos, &len, &offset);
826
827 *pnum = len;
4bc74be9
PB
828 switch (ret) {
829 case QED_CLUSTER_FOUND:
b8d739fd
EB
830 *map = offset | qed_offset_into_cluster(s, pos);
831 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
832 *file = bs->file->bs;
4bc74be9
PB
833 break;
834 case QED_CLUSTER_ZERO:
b8d739fd 835 status = BDRV_BLOCK_ZERO;
4bc74be9
PB
836 break;
837 case QED_CLUSTER_L2:
838 case QED_CLUSTER_L1:
b8d739fd 839 status = 0;
4bc74be9
PB
840 break;
841 default:
842 assert(ret < 0);
b8d739fd 843 status = ret;
4bc74be9
PB
844 break;
845 }
846
298800ca 847 qed_unref_l2_cache_entry(request.l2_table);
1f01e50b 848 qemu_co_mutex_unlock(&s->table_lock);
298800ca 849
b8d739fd 850 return status;
75411d23
SH
851}
852
eabba580
SH
853static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
854{
48cc565e 855 return acb->bs->opaque;
eabba580
SH
856}
857
858/**
859 * Read from the backing file or zero-fill if no backing file
860 *
f06ee3d4
KW
861 * @s: QED state
862 * @pos: Byte position in device
863 * @qiov: Destination I/O vector
eabba580
SH
864 *
865 * This function reads qiov->size bytes starting at pos from the backing file.
866 * If there is no backing file then zeroes are read.
867 */
87f0d882 868static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
365fed51 869 QEMUIOVector *qiov)
eabba580 870{
760e0063 871 if (s->bs->backing) {
365fed51
EB
872 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
873 return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0);
e85c5281 874 }
365fed51 875 qemu_iovec_memset(qiov, 0, 0, qiov->size);
e85c5281 876 return 0;
eabba580
SH
877}
878
eabba580
SH
879/**
880 * Copy data from backing file into the image
881 *
882 * @s: QED state
883 * @pos: Byte position in device
884 * @len: Number of bytes
885 * @offset: Byte offset in image file
eabba580 886 */
87f0d882
KW
887static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
888 uint64_t pos, uint64_t len,
889 uint64_t offset)
eabba580 890{
0f7aa24d 891 QEMUIOVector qiov;
e85c5281 892 int ret;
eabba580
SH
893
894 /* Skip copy entirely if there is no work to do */
895 if (len == 0) {
b4ac32f3 896 return 0;
eabba580
SH
897 }
898
342544f9 899 qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len);
0f7aa24d 900
365fed51 901 ret = qed_read_backing_file(s, pos, &qiov);
0f7aa24d
KW
902
903 if (ret) {
904 goto out;
905 }
eabba580 906
0f7aa24d 907 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
0f714ec7 908 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
0f7aa24d
KW
909 if (ret < 0) {
910 goto out;
911 }
912 ret = 0;
913out:
342544f9 914 qemu_vfree(qemu_iovec_buf(&qiov));
b4ac32f3 915 return ret;
eabba580
SH
916}
917
918/**
919 * Link one or more contiguous clusters into a table
920 *
921 * @s: QED state
922 * @table: L2 table
923 * @index: First cluster index
924 * @n: Number of contiguous clusters
21df65b6
AL
925 * @cluster: First cluster offset
926 *
927 * The cluster offset may be an allocated byte offset in the image file, the
928 * zero cluster marker, or the unallocated cluster marker.
1f01e50b
PB
929 *
930 * Called with table_lock held.
eabba580 931 */
87f0d882
KW
932static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
933 int index, unsigned int n,
934 uint64_t cluster)
eabba580
SH
935{
936 int i;
937 for (i = index; i < index + n; i++) {
938 table->offsets[i] = cluster;
21df65b6
AL
939 if (!qed_offset_is_unalloc_cluster(cluster) &&
940 !qed_offset_is_zero_cluster(cluster)) {
941 cluster += s->header.cluster_size;
942 }
eabba580
SH
943 }
944}
945
1f01e50b 946/* Called with table_lock held. */
87f0d882 947static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
eabba580 948{
1919631e 949 BDRVQEDState *s = acb_to_s(acb);
eabba580
SH
950
951 /* Free resources */
952 qemu_iovec_destroy(&acb->cur_qiov);
953 qed_unref_l2_cache_entry(acb->request.l2_table);
954
0e71be19
SH
955 /* Free the buffer we may have allocated for zero writes */
956 if (acb->flags & QED_AIOCB_ZERO) {
957 qemu_vfree(acb->qiov->iov[0].iov_base);
958 acb->qiov->iov[0].iov_base = NULL;
959 }
960
eabba580
SH
961 /* Start next allocating write request waiting behind this one. Note that
962 * requests enqueue themselves when they first hit an unallocated cluster
963 * but they wait until the entire request is finished before waking up the
964 * next request in the queue. This ensures that we don't cycle through
965 * requests multiple times but rather finish one at a time completely.
966 */
0806c3b5
KW
967 if (acb == s->allocating_acb) {
968 s->allocating_acb = NULL;
969 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
1f01e50b 970 qemu_co_queue_next(&s->allocating_write_reqs);
6f321e93
SH
971 } else if (s->header.features & QED_F_NEED_CHECK) {
972 qed_start_need_check_timer(s);
eabba580
SH
973 }
974 }
975}
976
977/**
fae25ac7 978 * Update L1 table with new L2 table offset and write it out
1f01e50b
PB
979 *
980 * Called with table_lock held.
eabba580 981 */
87f0d882 982static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb)
eabba580 983{
eabba580
SH
984 BDRVQEDState *s = acb_to_s(acb);
985 CachedL2Table *l2_table = acb->request.l2_table;
e4fc8781 986 uint64_t l2_offset = l2_table->offset;
fb18de21 987 int index, ret;
eabba580 988
fae25ac7
KW
989 index = qed_l1_index(s, acb->cur_pos);
990 s->l1_table->offsets[index] = l2_table->offset;
991
992 ret = qed_write_l1_table(s, index, 1);
993
994 /* Commit the current L2 table to the cache */
eabba580
SH
995 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
996
997 /* This is guaranteed to succeed because we just committed the entry to the
998 * cache.
999 */
e4fc8781 1000 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
eabba580
SH
1001 assert(acb->request.l2_table != NULL);
1002
fb18de21 1003 return ret;
eabba580
SH
1004}
1005
eabba580
SH
1006
1007/**
1008 * Update L2 table with new cluster offsets and write them out
1f01e50b
PB
1009 *
1010 * Called with table_lock held.
eabba580 1011 */
87f0d882 1012static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
eabba580 1013{
eabba580
SH
1014 BDRVQEDState *s = acb_to_s(acb);
1015 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
88d2dd72 1016 int index, ret;
eabba580
SH
1017
1018 if (need_alloc) {
1019 qed_unref_l2_cache_entry(acb->request.l2_table);
1020 acb->request.l2_table = qed_new_l2_table(s);
1021 }
1022
1023 index = qed_l2_index(s, acb->cur_pos);
1024 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
0e71be19 1025 offset);
eabba580
SH
1026
1027 if (need_alloc) {
1028 /* Write out the whole new L2 table */
453e53e2 1029 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
fb18de21 1030 if (ret) {
88d2dd72 1031 return ret;
fb18de21 1032 }
88d2dd72 1033 return qed_aio_write_l1_update(acb);
eabba580
SH
1034 } else {
1035 /* Write out only the updated part of the L2 table */
453e53e2
KW
1036 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1037 false);
88d2dd72
KW
1038 if (ret) {
1039 return ret;
1040 }
eabba580 1041 }
88d2dd72 1042 return 0;
eabba580
SH
1043}
1044
eabba580
SH
1045/**
1046 * Write data to the image file
1f01e50b
PB
1047 *
1048 * Called with table_lock *not* held.
eabba580 1049 */
87f0d882 1050static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb)
eabba580 1051{
eabba580
SH
1052 BDRVQEDState *s = acb_to_s(acb);
1053 uint64_t offset = acb->cur_cluster +
1054 qed_offset_into_cluster(s, acb->cur_pos);
eabba580 1055
eaf0bc56 1056 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
eabba580 1057
a4d8f1ae 1058 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
e7569c18
PB
1059 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
1060 &acb->cur_qiov, 0);
eabba580
SH
1061}
1062
1063/**
b4ac32f3 1064 * Populate untouched regions of new data cluster
1f01e50b
PB
1065 *
1066 * Called with table_lock held.
eabba580 1067 */
87f0d882 1068static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb)
eabba580 1069{
eabba580 1070 BDRVQEDState *s = acb_to_s(acb);
b4ac32f3 1071 uint64_t start, len, offset;
a101341a 1072 int ret;
eabba580 1073
1f01e50b
PB
1074 qemu_co_mutex_unlock(&s->table_lock);
1075
b4ac32f3
KW
1076 /* Populate front untouched region of new data cluster */
1077 start = qed_start_of_cluster(s, acb->cur_pos);
1078 len = qed_offset_into_cluster(s, acb->cur_pos);
1079
1080 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1081 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
a101341a 1082 if (ret < 0) {
1f01e50b 1083 goto out;
eabba580
SH
1084 }
1085
b4ac32f3
KW
1086 /* Populate back untouched region of new data cluster */
1087 start = acb->cur_pos + acb->cur_qiov.size;
1088 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1089 offset = acb->cur_cluster +
1090 qed_offset_into_cluster(s, acb->cur_pos) +
1091 acb->cur_qiov.size;
eabba580 1092
b4ac32f3
KW
1093 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1094 ret = qed_copy_from_backing_file(s, start, len, offset);
eaf0bc56 1095 if (ret < 0) {
1f01e50b 1096 goto out;
eaf0bc56 1097 }
a101341a 1098
e7569c18
PB
1099 ret = qed_aio_write_main(acb);
1100 if (ret < 0) {
1f01e50b 1101 goto out;
e7569c18
PB
1102 }
1103
1104 if (s->bs->backing) {
1105 /*
1106 * Flush new data clusters before updating the L2 table
1107 *
1108 * This flush is necessary when a backing file is in use. A crash
1109 * during an allocating write could result in empty clusters in the
1110 * image. If the write only touched a subregion of the cluster,
1111 * then backing image sectors have been lost in the untouched
1112 * region. The solution is to flush after writing a new data
1113 * cluster and before updating the L2 table.
1114 */
1115 ret = bdrv_co_flush(s->bs->file->bs);
e7569c18
PB
1116 }
1117
1f01e50b
PB
1118out:
1119 qemu_co_mutex_lock(&s->table_lock);
1120 return ret;
eabba580
SH
1121}
1122
0d09c797
SH
1123/**
1124 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1125 */
1126static bool qed_should_set_need_check(BDRVQEDState *s)
1127{
1128 /* The flush before L2 update path ensures consistency */
760e0063 1129 if (s->bs->backing) {
0d09c797
SH
1130 return false;
1131 }
1132
1133 return !(s->header.features & QED_F_NEED_CHECK);
1134}
1135
eabba580
SH
1136/**
1137 * Write new data cluster
1138 *
1139 * @acb: Write request
1140 * @len: Length in bytes
1141 *
1142 * This path is taken when writing to previously unallocated clusters.
1f01e50b
PB
1143 *
1144 * Called with table_lock held.
eabba580 1145 */
87f0d882 1146static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
eabba580
SH
1147{
1148 BDRVQEDState *s = acb_to_s(acb);
f13d712b 1149 int ret;
eabba580 1150
6f321e93 1151 /* Cancel timer when the first allocating request comes in */
0806c3b5 1152 if (s->allocating_acb == NULL) {
6f321e93
SH
1153 qed_cancel_need_check_timer(s);
1154 }
1155
eabba580 1156 /* Freeze this request if another allocating write is in progress */
0806c3b5
KW
1157 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1158 if (s->allocating_acb != NULL) {
1f01e50b 1159 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
0806c3b5
KW
1160 assert(s->allocating_acb == NULL);
1161 }
1162 s->allocating_acb = acb;
1163 return -EAGAIN; /* start over with looking up table entries */
eabba580
SH
1164 }
1165
1166 acb->cur_nclusters = qed_bytes_to_clusters(s,
1167 qed_offset_into_cluster(s, acb->cur_pos) + len);
1b093c48 1168 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1169
0e71be19
SH
1170 if (acb->flags & QED_AIOCB_ZERO) {
1171 /* Skip ahead if the clusters are already zero */
1172 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
d6daddcd 1173 return 0;
0e71be19 1174 }
e7569c18 1175 acb->cur_cluster = 1;
0e71be19 1176 } else {
0e71be19
SH
1177 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1178 }
1179
0d09c797
SH
1180 if (qed_should_set_need_check(s)) {
1181 s->header.features |= QED_F_NEED_CHECK;
f13d712b 1182 ret = qed_write_header(s);
a101341a 1183 if (ret < 0) {
d6daddcd 1184 return ret;
a101341a
KW
1185 }
1186 }
1187
e7569c18 1188 if (!(acb->flags & QED_AIOCB_ZERO)) {
a101341a 1189 ret = qed_aio_write_cow(acb);
e7569c18
PB
1190 if (ret < 0) {
1191 return ret;
1192 }
01979a98 1193 }
e7569c18
PB
1194
1195 return qed_aio_write_l2_update(acb, acb->cur_cluster);
eabba580
SH
1196}
1197
1198/**
1199 * Write data cluster in place
1200 *
1201 * @acb: Write request
1202 * @offset: Cluster offset in bytes
1203 * @len: Length in bytes
1204 *
1205 * This path is taken when writing to already allocated clusters.
1f01e50b
PB
1206 *
1207 * Called with table_lock held.
eabba580 1208 */
87f0d882
KW
1209static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset,
1210 size_t len)
eabba580 1211{
1f01e50b
PB
1212 BDRVQEDState *s = acb_to_s(acb);
1213 int r;
1214
1215 qemu_co_mutex_unlock(&s->table_lock);
1216
0e71be19
SH
1217 /* Allocate buffer for zero writes */
1218 if (acb->flags & QED_AIOCB_ZERO) {
1219 struct iovec *iov = acb->qiov->iov;
1220
1221 if (!iov->iov_base) {
48cc565e 1222 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
4f4896db 1223 if (iov->iov_base == NULL) {
1f01e50b
PB
1224 r = -ENOMEM;
1225 goto out;
4f4896db 1226 }
0e71be19
SH
1227 memset(iov->iov_base, 0, iov->iov_len);
1228 }
1229 }
1230
eabba580
SH
1231 /* Calculate the I/O vector */
1232 acb->cur_cluster = offset;
1b093c48 1233 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1234
1f01e50b
PB
1235 /* Do the actual write. */
1236 r = qed_aio_write_main(acb);
1237out:
1238 qemu_co_mutex_lock(&s->table_lock);
1239 return r;
eabba580
SH
1240}
1241
1242/**
1243 * Write data cluster
1244 *
1245 * @opaque: Write request
0596be7e 1246 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
eabba580
SH
1247 * @offset: Cluster offset in bytes
1248 * @len: Length in bytes
1f01e50b
PB
1249 *
1250 * Called with table_lock held.
eabba580 1251 */
87f0d882
KW
1252static int coroutine_fn qed_aio_write_data(void *opaque, int ret,
1253 uint64_t offset, size_t len)
eabba580
SH
1254{
1255 QEDAIOCB *acb = opaque;
1256
1257 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1258
1259 acb->find_cluster_ret = ret;
1260
1261 switch (ret) {
1262 case QED_CLUSTER_FOUND:
0596be7e 1263 return qed_aio_write_inplace(acb, offset, len);
eabba580
SH
1264
1265 case QED_CLUSTER_L2:
1266 case QED_CLUSTER_L1:
21df65b6 1267 case QED_CLUSTER_ZERO:
0596be7e 1268 return qed_aio_write_alloc(acb, len);
eabba580
SH
1269
1270 default:
0596be7e 1271 g_assert_not_reached();
d6daddcd 1272 }
eabba580
SH
1273}
1274
1275/**
1276 * Read data cluster
1277 *
1278 * @opaque: Read request
0596be7e 1279 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
eabba580
SH
1280 * @offset: Cluster offset in bytes
1281 * @len: Length in bytes
1f01e50b
PB
1282 *
1283 * Called with table_lock held.
eabba580 1284 */
87f0d882
KW
1285static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
1286 uint64_t offset, size_t len)
eabba580
SH
1287{
1288 QEDAIOCB *acb = opaque;
1289 BDRVQEDState *s = acb_to_s(acb);
48cc565e 1290 BlockDriverState *bs = acb->bs;
1f01e50b
PB
1291 int r;
1292
1293 qemu_co_mutex_unlock(&s->table_lock);
eabba580
SH
1294
1295 /* Adjust offset into cluster */
1296 offset += qed_offset_into_cluster(s, acb->cur_pos);
1297
1298 trace_qed_aio_read_data(s, acb, ret, offset, len);
1299
1b093c48 1300 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1301
1f01e50b
PB
1302 /* Handle zero cluster and backing file reads, otherwise read
1303 * data cluster directly.
1304 */
21df65b6 1305 if (ret == QED_CLUSTER_ZERO) {
3d9b4925 1306 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1f01e50b 1307 r = 0;
21df65b6 1308 } else if (ret != QED_CLUSTER_FOUND) {
365fed51 1309 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov);
1f01e50b
PB
1310 } else {
1311 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1312 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1313 &acb->cur_qiov, 0);
eabba580
SH
1314 }
1315
1f01e50b
PB
1316 qemu_co_mutex_lock(&s->table_lock);
1317 return r;
eabba580
SH
1318}
1319
1320/**
1321 * Begin next I/O or complete the request
1322 */
87f0d882 1323static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb)
eabba580 1324{
eabba580 1325 BDRVQEDState *s = acb_to_s(acb);
0f21b7a1
KW
1326 uint64_t offset;
1327 size_t len;
dddf8db1 1328 int ret;
eabba580 1329
1f01e50b 1330 qemu_co_mutex_lock(&s->table_lock);
01859874
KW
1331 while (1) {
1332 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
eabba580 1333
01859874
KW
1334 acb->qiov_offset += acb->cur_qiov.size;
1335 acb->cur_pos += acb->cur_qiov.size;
1336 qemu_iovec_reset(&acb->cur_qiov);
eabba580 1337
01859874
KW
1338 /* Complete request */
1339 if (acb->cur_pos >= acb->end_pos) {
48cc565e
KW
1340 ret = 0;
1341 break;
01859874 1342 }
eabba580 1343
01859874
KW
1344 /* Find next cluster and start I/O */
1345 len = acb->end_pos - acb->cur_pos;
1346 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1347 if (ret < 0) {
48cc565e 1348 break;
01859874 1349 }
0596be7e 1350
01859874
KW
1351 if (acb->flags & QED_AIOCB_WRITE) {
1352 ret = qed_aio_write_data(acb, ret, offset, len);
1353 } else {
1354 ret = qed_aio_read_data(acb, ret, offset, len);
1355 }
0596be7e 1356
0806c3b5 1357 if (ret < 0 && ret != -EAGAIN) {
48cc565e 1358 break;
0596be7e 1359 }
0596be7e 1360 }
eabba580 1361
48cc565e
KW
1362 trace_qed_aio_complete(s, acb, ret);
1363 qed_aio_complete(acb);
1f01e50b 1364 qemu_co_mutex_unlock(&s->table_lock);
48cc565e 1365 return ret;
89f89709
KW
1366}
1367
1368static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num,
1369 QEMUIOVector *qiov, int nb_sectors,
1370 int flags)
1371{
48cc565e
KW
1372 QEDAIOCB acb = {
1373 .bs = bs,
1374 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1375 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1376 .qiov = qiov,
1377 .flags = flags,
89f89709 1378 };
48cc565e 1379 qemu_iovec_init(&acb.cur_qiov, qiov->niov);
eabba580 1380
48cc565e 1381 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
eabba580
SH
1382
1383 /* Start request */
48cc565e 1384 return qed_aio_next_io(&acb);
75411d23
SH
1385}
1386
89f89709
KW
1387static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs,
1388 int64_t sector_num, int nb_sectors,
1389 QEMUIOVector *qiov)
75411d23 1390{
89f89709 1391 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
75411d23
SH
1392}
1393
89f89709
KW
1394static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
1395 int64_t sector_num, int nb_sectors,
e18a58b4 1396 QEMUIOVector *qiov, int flags)
0e71be19 1397{
89f89709 1398 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
0e71be19
SH
1399}
1400
49a2e483
EB
1401static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1402 int64_t offset,
f34b2bcf 1403 int64_t bytes,
49a2e483 1404 BdrvRequestFlags flags)
0e71be19 1405{
ef72f76e 1406 BDRVQEDState *s = bs->opaque;
342544f9
VSO
1407
1408 /*
1409 * Zero writes start without an I/O buffer. If a buffer becomes necessary
1410 * then it will be allocated during request processing.
1411 */
1412 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
0e71be19 1413
f34b2bcf
VSO
1414 /*
1415 * QED is not prepared for 63bit write-zero requests, so rely on
1416 * max_pwrite_zeroes.
1417 */
1418 assert(bytes <= INT_MAX);
1419
49a2e483
EB
1420 /* Fall back if the request is not aligned */
1421 if (qed_offset_into_cluster(s, offset) ||
f5a5ca79 1422 qed_offset_into_cluster(s, bytes)) {
49a2e483 1423 return -ENOTSUP;
ef72f76e
SH
1424 }
1425
89f89709 1426 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
f5a5ca79 1427 bytes >> BDRV_SECTOR_BITS,
89f89709 1428 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
0e71be19
SH
1429}
1430
061ca8a3
KW
1431static int coroutine_fn bdrv_qed_co_truncate(BlockDriverState *bs,
1432 int64_t offset,
c80d8b06 1433 bool exact,
061ca8a3 1434 PreallocMode prealloc,
92b92799 1435 BdrvRequestFlags flags,
061ca8a3 1436 Error **errp)
75411d23 1437{
77a5a000
SH
1438 BDRVQEDState *s = bs->opaque;
1439 uint64_t old_image_size;
1440 int ret;
1441
8243ccb7
HR
1442 if (prealloc != PREALLOC_MODE_OFF) {
1443 error_setg(errp, "Unsupported preallocation mode '%s'",
977c736f 1444 PreallocMode_str(prealloc));
8243ccb7
HR
1445 return -ENOTSUP;
1446 }
1447
77a5a000
SH
1448 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1449 s->header.table_size)) {
f59adb32 1450 error_setg(errp, "Invalid image size specified");
77a5a000
SH
1451 return -EINVAL;
1452 }
1453
77a5a000 1454 if ((uint64_t)offset < s->header.image_size) {
f59adb32 1455 error_setg(errp, "Shrinking images is currently not supported");
77a5a000
SH
1456 return -ENOTSUP;
1457 }
1458
1459 old_image_size = s->header.image_size;
1460 s->header.image_size = offset;
1461 ret = qed_write_header_sync(s);
1462 if (ret < 0) {
1463 s->header.image_size = old_image_size;
f59adb32 1464 error_setg_errno(errp, -ret, "Failed to update the image size");
77a5a000
SH
1465 }
1466 return ret;
75411d23
SH
1467}
1468
1469static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1470{
1471 BDRVQEDState *s = bs->opaque;
1472 return s->header.image_size;
1473}
1474
1475static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1476{
1477 BDRVQEDState *s = bs->opaque;
1478
1479 memset(bdi, 0, sizeof(*bdi));
1480 bdi->cluster_size = s->header.cluster_size;
d68dbee8 1481 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
75411d23
SH
1482 return 0;
1483}
1484
1485static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1486 const char *backing_file,
1487 const char *backing_fmt)
1488{
1489 BDRVQEDState *s = bs->opaque;
1490 QEDHeader new_header, le_header;
1491 void *buffer;
1492 size_t buffer_len, backing_file_len;
1493 int ret;
1494
1495 /* Refuse to set backing filename if unknown compat feature bits are
1496 * active. If the image uses an unknown compat feature then we may not
1497 * know the layout of data following the header structure and cannot safely
1498 * add a new string.
1499 */
1500 if (backing_file && (s->header.compat_features &
1501 ~QED_COMPAT_FEATURE_MASK)) {
1502 return -ENOTSUP;
1503 }
1504
1505 memcpy(&new_header, &s->header, sizeof(new_header));
1506
1507 new_header.features &= ~(QED_F_BACKING_FILE |
1508 QED_F_BACKING_FORMAT_NO_PROBE);
1509
1510 /* Adjust feature flags */
1511 if (backing_file) {
1512 new_header.features |= QED_F_BACKING_FILE;
1513
1514 if (qed_fmt_is_raw(backing_fmt)) {
1515 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1516 }
1517 }
1518
1519 /* Calculate new header size */
1520 backing_file_len = 0;
1521
1522 if (backing_file) {
1523 backing_file_len = strlen(backing_file);
1524 }
1525
1526 buffer_len = sizeof(new_header);
1527 new_header.backing_filename_offset = buffer_len;
1528 new_header.backing_filename_size = backing_file_len;
1529 buffer_len += backing_file_len;
1530
1531 /* Make sure we can rewrite header without failing */
1532 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1533 return -ENOSPC;
1534 }
1535
1536 /* Prepare new header */
7267c094 1537 buffer = g_malloc(buffer_len);
75411d23
SH
1538
1539 qed_header_cpu_to_le(&new_header, &le_header);
1540 memcpy(buffer, &le_header, sizeof(le_header));
1541 buffer_len = sizeof(le_header);
1542
feba23b1
PB
1543 if (backing_file) {
1544 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1545 buffer_len += backing_file_len;
1546 }
75411d23
SH
1547
1548 /* Write new header */
32cc71de 1549 ret = bdrv_pwrite_sync(bs->file, 0, buffer_len, buffer, 0);
7267c094 1550 g_free(buffer);
75411d23
SH
1551 if (ret == 0) {
1552 memcpy(&s->header, &new_header, sizeof(new_header));
1553 }
1554 return ret;
1555}
1556
2b148f39
PB
1557static void coroutine_fn bdrv_qed_co_invalidate_cache(BlockDriverState *bs,
1558 Error **errp)
c82954e5 1559{
1f01e50b 1560 BDRVQEDState *s = bs->opaque;
5a8a30db 1561 int ret;
c82954e5
BC
1562
1563 bdrv_qed_close(bs);
3456a8d1 1564
61c7887e 1565 bdrv_qed_init_state(bs);
2b148f39 1566 qemu_co_mutex_lock(&s->table_lock);
15ce94a6 1567 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, errp);
2b148f39 1568 qemu_co_mutex_unlock(&s->table_lock);
15ce94a6
VSO
1569 if (ret < 0) {
1570 error_prepend(errp, "Could not reopen qed layer: ");
5a8a30db 1571 }
c82954e5
BC
1572}
1573
54277a2a
VSO
1574static int coroutine_fn bdrv_qed_co_check(BlockDriverState *bs,
1575 BdrvCheckResult *result,
1576 BdrvCheckMode fix)
75411d23 1577{
01979a98 1578 BDRVQEDState *s = bs->opaque;
2fd61638 1579 int ret;
01979a98 1580
2fd61638
PB
1581 qemu_co_mutex_lock(&s->table_lock);
1582 ret = qed_check(s, result, !!fix);
1583 qemu_co_mutex_unlock(&s->table_lock);
1584
1585 return ret;
75411d23
SH
1586}
1587
7ab74849
CL
1588static QemuOptsList qed_create_opts = {
1589 .name = "qed-create-opts",
1590 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1591 .desc = {
1592 {
1593 .name = BLOCK_OPT_SIZE,
1594 .type = QEMU_OPT_SIZE,
1595 .help = "Virtual disk size"
1596 },
1597 {
1598 .name = BLOCK_OPT_BACKING_FILE,
1599 .type = QEMU_OPT_STRING,
1600 .help = "File name of a base image"
1601 },
1602 {
1603 .name = BLOCK_OPT_BACKING_FMT,
1604 .type = QEMU_OPT_STRING,
1605 .help = "Image format of the base image"
1606 },
1607 {
1608 .name = BLOCK_OPT_CLUSTER_SIZE,
1609 .type = QEMU_OPT_SIZE,
1610 .help = "Cluster size (in bytes)",
1611 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1612 },
1613 {
1614 .name = BLOCK_OPT_TABLE_SIZE,
1615 .type = QEMU_OPT_SIZE,
1616 .help = "L1/L2 table size (in clusters)"
1617 },
1618 { /* end of list */ }
1619 }
75411d23
SH
1620};
1621
1622static BlockDriver bdrv_qed = {
1623 .format_name = "qed",
1624 .instance_size = sizeof(BDRVQEDState),
7ab74849 1625 .create_opts = &qed_create_opts,
d67066d8 1626 .is_format = true,
8ee79e70 1627 .supports_backing = true,
75411d23
SH
1628
1629 .bdrv_probe = bdrv_qed_probe,
1630 .bdrv_open = bdrv_qed_open,
1631 .bdrv_close = bdrv_qed_close,
f9cb20f1 1632 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
69dca43d 1633 .bdrv_child_perm = bdrv_default_perms,
959355a4 1634 .bdrv_co_create = bdrv_qed_co_create,
efc75e2a 1635 .bdrv_co_create_opts = bdrv_qed_co_create_opts,
3ac21627 1636 .bdrv_has_zero_init = bdrv_has_zero_init_1,
b8d739fd 1637 .bdrv_co_block_status = bdrv_qed_co_block_status,
89f89709
KW
1638 .bdrv_co_readv = bdrv_qed_co_readv,
1639 .bdrv_co_writev = bdrv_qed_co_writev,
49a2e483 1640 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
061ca8a3 1641 .bdrv_co_truncate = bdrv_qed_co_truncate,
75411d23
SH
1642 .bdrv_getlength = bdrv_qed_getlength,
1643 .bdrv_get_info = bdrv_qed_get_info,
d34682cd 1644 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
75411d23 1645 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
2b148f39 1646 .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
2fd61638 1647 .bdrv_co_check = bdrv_qed_co_check,
a8c868c3
SH
1648 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1649 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
f8ea8dac 1650 .bdrv_co_drain_begin = bdrv_qed_co_drain_begin,
75411d23
SH
1651};
1652
1653static void bdrv_qed_init(void)
1654{
1655 bdrv_register(&bdrv_qed);
1656}
1657
1658block_init(bdrv_qed_init);