]> git.proxmox.com Git - mirror_qemu.git/blame - block/qed.c
gdbstub: Fix target_xml initialization
[mirror_qemu.git] / block / qed.c
CommitLineData
75411d23
SH
1/*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
80c71a24 15#include "qemu/osdep.h"
609f45ea 16#include "block/qdict.h"
da34e65c 17#include "qapi/error.h"
1de7afc9 18#include "qemu/timer.h"
58369e22 19#include "qemu/bswap.h"
db725815 20#include "qemu/main-loop.h"
0b8fa32f 21#include "qemu/module.h"
922a01a0 22#include "qemu/option.h"
5df022cf 23#include "qemu/memalign.h"
eabba580 24#include "trace.h"
75411d23 25#include "qed.h"
8a56fdad 26#include "sysemu/block-backend.h"
959355a4
KW
27#include "qapi/qmp/qdict.h"
28#include "qapi/qobject-input-visitor.h"
29#include "qapi/qapi-visit-block-core.h"
30
31static QemuOptsList qed_create_opts;
75411d23
SH
32
33static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
34 const char *filename)
35{
36 const QEDHeader *header = (const QEDHeader *)buf;
37
38 if (buf_size < sizeof(*header)) {
39 return 0;
40 }
41 if (le32_to_cpu(header->magic) != QED_MAGIC) {
42 return 0;
43 }
44 return 100;
45}
46
47/**
48 * Check whether an image format is raw
49 *
50 * @fmt: Backing file format, may be NULL
51 */
52static bool qed_fmt_is_raw(const char *fmt)
53{
54 return fmt && strcmp(fmt, "raw") == 0;
55}
56
57static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
58{
59 cpu->magic = le32_to_cpu(le->magic);
60 cpu->cluster_size = le32_to_cpu(le->cluster_size);
61 cpu->table_size = le32_to_cpu(le->table_size);
62 cpu->header_size = le32_to_cpu(le->header_size);
63 cpu->features = le64_to_cpu(le->features);
64 cpu->compat_features = le64_to_cpu(le->compat_features);
65 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
66 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
67 cpu->image_size = le64_to_cpu(le->image_size);
68 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
69 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
70}
71
72static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
73{
74 le->magic = cpu_to_le32(cpu->magic);
75 le->cluster_size = cpu_to_le32(cpu->cluster_size);
76 le->table_size = cpu_to_le32(cpu->table_size);
77 le->header_size = cpu_to_le32(cpu->header_size);
78 le->features = cpu_to_le64(cpu->features);
79 le->compat_features = cpu_to_le64(cpu->compat_features);
80 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
81 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
82 le->image_size = cpu_to_le64(cpu->image_size);
83 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
84 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
85}
86
b10170ac 87int qed_write_header_sync(BDRVQEDState *s)
75411d23
SH
88{
89 QEDHeader le;
75411d23
SH
90
91 qed_header_cpu_to_le(&s->header, &le);
353a5d84 92 return bdrv_pwrite(s->bs->file, 0, sizeof(le), &le, 0);
75411d23
SH
93}
94
01979a98
SH
95/**
96 * Update header in-place (does not rewrite backing filename or other strings)
97 *
98 * This function only updates known header fields in-place and does not affect
99 * extra data after the QED header.
1f01e50b
PB
100 *
101 * No new allocating reqs can start while this function runs.
01979a98 102 */
b9b10c35 103static int coroutine_fn GRAPH_RDLOCK qed_write_header(BDRVQEDState *s)
01979a98
SH
104{
105 /* We must write full sectors for O_DIRECT but cannot necessarily generate
106 * the data following the header if an unrecognized compat feature is
107 * active. Therefore, first read the sectors containing the header, update
108 * them, and write back.
109 */
110
c41a73ff 111 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
01979a98 112 size_t len = nsectors * BDRV_SECTOR_SIZE;
7076309a 113 uint8_t *buf;
7076309a
KW
114 int ret;
115
1f01e50b
PB
116 assert(s->allocating_acb || s->allocating_write_reqs_plugged);
117
7076309a 118 buf = qemu_blockalign(s->bs, len);
7076309a 119
696e8cb2 120 ret = bdrv_co_pread(s->bs->file, 0, len, buf, 0);
7076309a
KW
121 if (ret < 0) {
122 goto out;
123 }
124
125 /* Update header */
126 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
127
696e8cb2 128 ret = bdrv_co_pwrite(s->bs->file, 0, len, buf, 0);
7076309a
KW
129 if (ret < 0) {
130 goto out;
131 }
132
133 ret = 0;
134out:
135 qemu_vfree(buf);
f13d712b 136 return ret;
01979a98
SH
137}
138
75411d23
SH
139static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
140{
141 uint64_t table_entries;
142 uint64_t l2_size;
143
144 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
145 l2_size = table_entries * cluster_size;
146
147 return l2_size * table_entries;
148}
149
150static bool qed_is_cluster_size_valid(uint32_t cluster_size)
151{
152 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
153 cluster_size > QED_MAX_CLUSTER_SIZE) {
154 return false;
155 }
156 if (cluster_size & (cluster_size - 1)) {
157 return false; /* not power of 2 */
158 }
159 return true;
160}
161
162static bool qed_is_table_size_valid(uint32_t table_size)
163{
164 if (table_size < QED_MIN_TABLE_SIZE ||
165 table_size > QED_MAX_TABLE_SIZE) {
166 return false;
167 }
168 if (table_size & (table_size - 1)) {
169 return false; /* not power of 2 */
170 }
171 return true;
172}
173
174static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
175 uint32_t table_size)
176{
177 if (image_size % BDRV_SECTOR_SIZE != 0) {
178 return false; /* not multiple of sector size */
179 }
180 if (image_size > qed_max_image_size(cluster_size, table_size)) {
181 return false; /* image is too large */
182 }
183 return true;
184}
185
186/**
187 * Read a string of known length from the image file
188 *
189 * @file: Image file
190 * @offset: File offset to start of string, in bytes
191 * @n: String length in bytes
192 * @buf: Destination buffer
193 * @buflen: Destination buffer length in bytes
194 * @ret: 0 on success, -errno on failure
195 *
196 * The string is NUL-terminated.
197 */
bba667da
PB
198static int coroutine_fn GRAPH_RDLOCK
199qed_read_string(BdrvChild *file, uint64_t offset,
200 size_t n, char *buf, size_t buflen)
75411d23
SH
201{
202 int ret;
203 if (n >= buflen) {
204 return -EINVAL;
205 }
bba667da 206 ret = bdrv_co_pread(file, offset, n, buf, 0);
75411d23
SH
207 if (ret < 0) {
208 return ret;
209 }
210 buf[n] = '\0';
211 return 0;
212}
213
eabba580
SH
214/**
215 * Allocate new clusters
216 *
217 * @s: QED state
218 * @n: Number of contiguous clusters to allocate
219 * @ret: Offset of first allocated cluster
220 *
221 * This function only produces the offset where the new clusters should be
222 * written. It updates BDRVQEDState but does not make any changes to the image
223 * file.
1f01e50b
PB
224 *
225 * Called with table_lock held.
eabba580
SH
226 */
227static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
228{
229 uint64_t offset = s->file_size;
230 s->file_size += n * s->header.cluster_size;
231 return offset;
232}
233
298800ca
SH
234QEDTable *qed_alloc_table(BDRVQEDState *s)
235{
236 /* Honor O_DIRECT memory alignment requirements */
237 return qemu_blockalign(s->bs,
238 s->header.cluster_size * s->header.table_size);
239}
240
eabba580
SH
241/**
242 * Allocate a new zeroed L2 table
1f01e50b
PB
243 *
244 * Called with table_lock held.
eabba580
SH
245 */
246static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
247{
248 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
249
250 l2_table->table = qed_alloc_table(s);
251 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
252
253 memset(l2_table->table->offsets, 0,
254 s->header.cluster_size * s->header.table_size);
255 return l2_table;
256}
257
b383ae60 258static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s)
6f321e93 259{
1f01e50b
PB
260 qemu_co_mutex_lock(&s->table_lock);
261
262 /* No reentrancy is allowed. */
6f321e93 263 assert(!s->allocating_write_reqs_plugged);
1f01e50b
PB
264 if (s->allocating_acb != NULL) {
265 /* Another allocating write came concurrently. This cannot happen
5e8ac217 266 * from bdrv_qed_drain_begin, but it can happen when the timer runs.
1f01e50b
PB
267 */
268 qemu_co_mutex_unlock(&s->table_lock);
269 return false;
270 }
6f321e93
SH
271
272 s->allocating_write_reqs_plugged = true;
1f01e50b
PB
273 qemu_co_mutex_unlock(&s->table_lock);
274 return true;
6f321e93
SH
275}
276
b383ae60 277static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s)
6f321e93 278{
1f01e50b 279 qemu_co_mutex_lock(&s->table_lock);
6f321e93 280 assert(s->allocating_write_reqs_plugged);
6f321e93 281 s->allocating_write_reqs_plugged = false;
1f01e50b
PB
282 qemu_co_queue_next(&s->allocating_write_reqs);
283 qemu_co_mutex_unlock(&s->table_lock);
6f321e93
SH
284}
285
c16b8bd4 286static void coroutine_fn GRAPH_RDLOCK qed_need_check_timer(BDRVQEDState *s)
6f321e93 287{
c0e8f989
KW
288 int ret;
289
c0e8f989 290 trace_qed_need_check_timer_cb(s);
c16b8bd4 291 assert_bdrv_graph_readable();
6f321e93 292
1f01e50b
PB
293 if (!qed_plug_allocating_write_reqs(s)) {
294 return;
295 }
c0e8f989
KW
296
297 /* Ensure writes are on disk before clearing flag */
298 ret = bdrv_co_flush(s->bs->file->bs);
c0e8f989 299 if (ret < 0) {
6f321e93
SH
300 qed_unplug_allocating_write_reqs(s);
301 return;
302 }
303
304 s->header.features &= ~QED_F_NEED_CHECK;
f13d712b
KW
305 ret = qed_write_header(s);
306 (void) ret;
307
308 qed_unplug_allocating_write_reqs(s);
309
c0e8f989 310 ret = bdrv_co_flush(s->bs);
f13d712b 311 (void) ret;
6f321e93
SH
312}
313
6d47eb0c
KW
314static void coroutine_fn qed_need_check_timer_entry(void *opaque)
315{
316 BDRVQEDState *s = opaque;
c16b8bd4 317 GRAPH_RDLOCK_GUARD();
6d47eb0c
KW
318
319 qed_need_check_timer(opaque);
320 bdrv_dec_in_flight(s->bs);
321}
322
6f321e93
SH
323static void qed_need_check_timer_cb(void *opaque)
324{
6d47eb0c 325 BDRVQEDState *s = opaque;
c0e8f989 326 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
6d47eb0c
KW
327
328 bdrv_inc_in_flight(s->bs);
c0e8f989 329 qemu_coroutine_enter(co);
2f47da5f
PB
330}
331
6f321e93
SH
332static void qed_start_need_check_timer(BDRVQEDState *s)
333{
334 trace_qed_start_need_check_timer(s);
335
bc72ad67 336 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
6f321e93
SH
337 * migration.
338 */
bc72ad67 339 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
73bcb24d 340 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
6f321e93
SH
341}
342
343/* It's okay to call this multiple times or when no timer is started */
344static void qed_cancel_need_check_timer(BDRVQEDState *s)
345{
346 trace_qed_cancel_need_check_timer(s);
bc72ad67 347 timer_del(s->need_check_timer);
6f321e93
SH
348}
349
a8c868c3
SH
350static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
351{
352 BDRVQEDState *s = bs->opaque;
353
354 qed_cancel_need_check_timer(s);
355 timer_free(s->need_check_timer);
356}
357
358static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
359 AioContext *new_context)
360{
361 BDRVQEDState *s = bs->opaque;
362
363 s->need_check_timer = aio_timer_new(new_context,
364 QEMU_CLOCK_VIRTUAL, SCALE_NS,
365 qed_need_check_timer_cb, s);
366 if (s->header.features & QED_F_NEED_CHECK) {
367 qed_start_need_check_timer(s);
368 }
369}
370
5e8ac217 371static void bdrv_qed_drain_begin(BlockDriverState *bs)
6653a73d
FZ
372{
373 BDRVQEDState *s = bs->opaque;
374
375 /* Fire the timer immediately in order to start doing I/O as soon as the
376 * header is flushed.
377 */
378 if (s->need_check_timer && timer_pending(s->need_check_timer)) {
6d47eb0c
KW
379 Coroutine *co;
380
6653a73d 381 qed_cancel_need_check_timer(s);
6d47eb0c
KW
382 co = qemu_coroutine_create(qed_need_check_timer_entry, s);
383 bdrv_inc_in_flight(bs);
384 aio_co_enter(bdrv_get_aio_context(bs), co);
6653a73d
FZ
385 }
386}
387
61c7887e
PB
388static void bdrv_qed_init_state(BlockDriverState *bs)
389{
390 BDRVQEDState *s = bs->opaque;
391
392 memset(s, 0, sizeof(BDRVQEDState));
393 s->bs = bs;
1f01e50b 394 qemu_co_mutex_init(&s->table_lock);
61c7887e
PB
395 qemu_co_queue_init(&s->allocating_write_reqs);
396}
397
9fb4dfc5 398/* Called with table_lock held. */
88095349
EGE
399static int coroutine_fn GRAPH_RDLOCK
400bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags, Error **errp)
75411d23
SH
401{
402 BDRVQEDState *s = bs->opaque;
403 QEDHeader le_header;
404 int64_t file_size;
405 int ret;
406
3aba34ad 407 ret = bdrv_co_pread(bs->file, 0, sizeof(le_header), &le_header, 0);
75411d23 408 if (ret < 0) {
15ce94a6 409 error_setg(errp, "Failed to read QED header");
75411d23
SH
410 return ret;
411 }
75411d23
SH
412 qed_header_le_to_cpu(&le_header, &s->header);
413
414 if (s->header.magic != QED_MAGIC) {
76abe407
PB
415 error_setg(errp, "Image not in QED format");
416 return -EINVAL;
75411d23
SH
417 }
418 if (s->header.features & ~QED_FEATURE_MASK) {
10b758e8 419 /* image uses unsupported feature bits */
a55448b3
HR
420 error_setg(errp, "Unsupported QED features: %" PRIx64,
421 s->header.features & ~QED_FEATURE_MASK);
10b758e8 422 return -ENOTSUP;
75411d23
SH
423 }
424 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
15ce94a6 425 error_setg(errp, "QED cluster size is invalid");
75411d23
SH
426 return -EINVAL;
427 }
428
429 /* Round down file size to the last cluster */
bd53086e 430 file_size = bdrv_co_getlength(bs->file->bs);
75411d23 431 if (file_size < 0) {
15ce94a6 432 error_setg(errp, "Failed to get file length");
75411d23
SH
433 return file_size;
434 }
435 s->file_size = qed_start_of_cluster(s, file_size);
436
437 if (!qed_is_table_size_valid(s->header.table_size)) {
15ce94a6 438 error_setg(errp, "QED table size is invalid");
75411d23
SH
439 return -EINVAL;
440 }
441 if (!qed_is_image_size_valid(s->header.image_size,
442 s->header.cluster_size,
443 s->header.table_size)) {
15ce94a6 444 error_setg(errp, "QED image size is invalid");
75411d23
SH
445 return -EINVAL;
446 }
447 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
15ce94a6 448 error_setg(errp, "QED table offset is invalid");
75411d23
SH
449 return -EINVAL;
450 }
451
452 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
453 sizeof(uint64_t);
786a4ea8 454 s->l2_shift = ctz32(s->header.cluster_size);
75411d23 455 s->l2_mask = s->table_nelems - 1;
786a4ea8 456 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
75411d23 457
0adfa1ed
SH
458 /* Header size calculation must not overflow uint32_t */
459 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
15ce94a6 460 error_setg(errp, "QED header size is too large");
0adfa1ed
SH
461 return -EINVAL;
462 }
463
75411d23 464 if ((s->header.features & QED_F_BACKING_FILE)) {
dc70638f
HR
465 g_autofree char *backing_file_str = NULL;
466
75411d23
SH
467 if ((uint64_t)s->header.backing_filename_offset +
468 s->header.backing_filename_size >
469 s->header.cluster_size * s->header.header_size) {
15ce94a6 470 error_setg(errp, "QED backing filename offset is invalid");
75411d23
SH
471 return -EINVAL;
472 }
473
dc70638f 474 backing_file_str = g_malloc(sizeof(bs->backing_file));
cf2ab8fc 475 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
998c2019 476 s->header.backing_filename_size,
dc70638f 477 backing_file_str, sizeof(bs->backing_file));
75411d23 478 if (ret < 0) {
15ce94a6 479 error_setg(errp, "Failed to read backing filename");
75411d23
SH
480 return ret;
481 }
dc70638f
HR
482
483 if (!g_str_equal(backing_file_str, bs->backing_file)) {
484 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
485 backing_file_str);
486 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
487 backing_file_str);
488 }
75411d23
SH
489
490 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
491 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
492 }
493 }
494
495 /* Reset unknown autoclear feature bits. This is a backwards
496 * compatibility mechanism that allows images to be opened by older
497 * programs, which "knock out" unknown feature bits. When an image is
498 * opened by a newer program again it can detect that the autoclear
499 * feature is no longer valid.
500 */
501 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
04c01a5c 502 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
75411d23
SH
503 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
504
505 ret = qed_write_header_sync(s);
506 if (ret) {
15ce94a6 507 error_setg(errp, "Failed to update header");
75411d23
SH
508 return ret;
509 }
510
511 /* From here on only known autoclear feature bits are valid */
3aba34ad 512 bdrv_co_flush(bs->file->bs);
75411d23
SH
513 }
514
298800ca
SH
515 s->l1_table = qed_alloc_table(s);
516 qed_init_l2_cache(&s->l2_cache);
517
518 ret = qed_read_l1_table_sync(s);
01979a98 519 if (ret) {
15ce94a6 520 error_setg(errp, "Failed to read L1 table");
01979a98
SH
521 goto out;
522 }
523
524 /* If image was not closed cleanly, check consistency */
058f8f16 525 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
01979a98
SH
526 /* Read-only images cannot be fixed. There is no risk of corruption
527 * since write operations are not possible. Therefore, allow
528 * potentially inconsistent images to be opened read-only. This can
529 * aid data recovery from an otherwise inconsistent image.
530 */
9a4f4c31 531 if (!bdrv_is_read_only(bs->file->bs) &&
04c01a5c 532 !(flags & BDRV_O_INACTIVE)) {
01979a98
SH
533 BdrvCheckResult result = {0};
534
535 ret = qed_check(s, &result, true);
6f321e93 536 if (ret) {
15ce94a6 537 error_setg(errp, "Image corrupted");
6f321e93
SH
538 goto out;
539 }
01979a98
SH
540 }
541 }
542
a8c868c3 543 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
6f321e93 544
01979a98 545out:
298800ca
SH
546 if (ret) {
547 qed_free_l2_cache(&s->l2_cache);
548 qemu_vfree(s->l1_table);
549 }
75411d23
SH
550 return ret;
551}
552
9fb4dfc5
PB
553typedef struct QEDOpenCo {
554 BlockDriverState *bs;
555 QDict *options;
556 int flags;
557 Error **errp;
558 int ret;
559} QEDOpenCo;
560
1a30b0f5 561static void coroutine_fn bdrv_qed_open_entry(void *opaque)
9fb4dfc5
PB
562{
563 QEDOpenCo *qoc = opaque;
564 BDRVQEDState *s = qoc->bs->opaque;
565
1a30b0f5
KW
566 GRAPH_RDLOCK_GUARD();
567
9fb4dfc5
PB
568 qemu_co_mutex_lock(&s->table_lock);
569 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
570 qemu_co_mutex_unlock(&s->table_lock);
571}
572
e2dbca03
PB
573static int coroutine_mixed_fn bdrv_qed_open(BlockDriverState *bs, QDict *options,
574 int flags, Error **errp)
4e4bf5c4 575{
9fb4dfc5
PB
576 QEDOpenCo qoc = {
577 .bs = bs,
578 .options = options,
579 .flags = flags,
580 .errp = errp,
581 .ret = -EINPROGRESS
582 };
83930780 583 int ret;
9fb4dfc5 584
83930780
VSO
585 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
586 if (ret < 0) {
587 return ret;
4e4bf5c4
KW
588 }
589
61c7887e 590 bdrv_qed_init_state(bs);
1a30b0f5
KW
591 assert(!qemu_in_coroutine());
592 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
593 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc));
594 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
595
9fb4dfc5 596 return qoc.ret;
4e4bf5c4
KW
597}
598
3baca891 599static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
d34682cd
KW
600{
601 BDRVQEDState *s = bs->opaque;
602
cf081fca 603 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
f34b2bcf 604 bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size);
d34682cd
KW
605}
606
f9cb20f1
JC
607/* We have nothing to do for QED reopen, stubs just return
608 * success */
609static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
610 BlockReopenQueue *queue, Error **errp)
611{
612 return 0;
613}
614
75411d23
SH
615static void bdrv_qed_close(BlockDriverState *bs)
616{
298800ca
SH
617 BDRVQEDState *s = bs->opaque;
618
a8c868c3 619 bdrv_qed_detach_aio_context(bs);
6f321e93 620
01979a98 621 /* Ensure writes reach stable storage */
9a4f4c31 622 bdrv_flush(bs->file->bs);
01979a98
SH
623
624 /* Clean shutdown, no check required on next open */
625 if (s->header.features & QED_F_NEED_CHECK) {
626 s->header.features &= ~QED_F_NEED_CHECK;
627 qed_write_header_sync(s);
628 }
629
298800ca
SH
630 qed_free_l2_cache(&s->l2_cache);
631 qemu_vfree(s->l1_table);
75411d23
SH
632}
633
4db7ba3b
KW
634static int coroutine_fn GRAPH_UNLOCKED
635bdrv_qed_co_create(BlockdevCreateOptions *opts, Error **errp)
75411d23 636{
959355a4
KW
637 BlockdevCreateOptionsQed *qed_opts;
638 BlockBackend *blk = NULL;
639 BlockDriverState *bs = NULL;
640
641 QEDHeader header;
75411d23
SH
642 QEDHeader le_header;
643 uint8_t *l1_table = NULL;
959355a4 644 size_t l1_size;
75411d23 645 int ret = 0;
75411d23 646
959355a4
KW
647 assert(opts->driver == BLOCKDEV_DRIVER_QED);
648 qed_opts = &opts->u.qed;
649
650 /* Validate options and set default values */
651 if (!qed_opts->has_cluster_size) {
652 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE;
653 }
654 if (!qed_opts->has_table_size) {
655 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE;
75411d23
SH
656 }
657
959355a4
KW
658 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) {
659 error_setg(errp, "QED cluster size must be within range [%u, %u] "
660 "and power of 2",
661 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
662 return -EINVAL;
663 }
664 if (!qed_is_table_size_valid(qed_opts->table_size)) {
665 error_setg(errp, "QED table size must be within range [%u, %u] "
666 "and power of 2",
667 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
668 return -EINVAL;
669 }
670 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size,
671 qed_opts->table_size))
672 {
673 error_setg(errp, "QED image size must be a non-zero multiple of "
674 "cluster size and less than %" PRIu64 " bytes",
675 qed_max_image_size(qed_opts->cluster_size,
676 qed_opts->table_size));
677 return -EINVAL;
678 }
679
680 /* Create BlockBackend to write to the image */
0b1e95cf 681 bs = bdrv_co_open_blockdev_ref(qed_opts->file, errp);
959355a4 682 if (bs == NULL) {
8a56fdad 683 return -EIO;
75411d23
SH
684 }
685
0b1e95cf
KW
686 blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
687 errp);
a3aeeab5
EB
688 if (!blk) {
689 ret = -EPERM;
959355a4
KW
690 goto out;
691 }
8a56fdad
KW
692 blk_set_allow_write_beyond_eof(blk, true);
693
959355a4
KW
694 /* Prepare image format */
695 header = (QEDHeader) {
696 .magic = QED_MAGIC,
697 .cluster_size = qed_opts->cluster_size,
698 .table_size = qed_opts->table_size,
699 .header_size = 1,
700 .features = 0,
701 .compat_features = 0,
702 .l1_table_offset = qed_opts->cluster_size,
703 .image_size = qed_opts->size,
704 };
705
706 l1_size = header.cluster_size * header.table_size;
707
e8d04f92
HR
708 /*
709 * The QED format associates file length with allocation status,
710 * so a new file (which is empty) must have a length of 0.
711 */
3aba34ad 712 ret = blk_co_truncate(blk, 0, true, PREALLOC_MODE_OFF, 0, errp);
c743849b
SH
713 if (ret < 0) {
714 goto out;
715 }
716
54fde4ff 717 if (qed_opts->backing_file) {
75411d23
SH
718 header.features |= QED_F_BACKING_FILE;
719 header.backing_filename_offset = sizeof(le_header);
959355a4 720 header.backing_filename_size = strlen(qed_opts->backing_file);
75411d23 721
959355a4
KW
722 if (qed_opts->has_backing_fmt) {
723 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt);
724 if (qed_fmt_is_raw(backing_fmt)) {
725 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
726 }
75411d23
SH
727 }
728 }
729
730 qed_header_cpu_to_le(&header, &le_header);
3aba34ad 731 ret = blk_co_pwrite(blk, 0, sizeof(le_header), &le_header, 0);
75411d23
SH
732 if (ret < 0) {
733 goto out;
734 }
3aba34ad 735 ret = blk_co_pwrite(blk, sizeof(le_header), header.backing_filename_size,
a9262f55 736 qed_opts->backing_file, 0);
75411d23
SH
737 if (ret < 0) {
738 goto out;
739 }
740
7267c094 741 l1_table = g_malloc0(l1_size);
3aba34ad 742 ret = blk_co_pwrite(blk, header.l1_table_offset, l1_size, l1_table, 0);
75411d23
SH
743 if (ret < 0) {
744 goto out;
745 }
746
747 ret = 0; /* success */
748out:
7267c094 749 g_free(l1_table);
b2ab5f54
KW
750 blk_co_unref(blk);
751 bdrv_co_unref(bs);
75411d23
SH
752 return ret;
753}
754
4db7ba3b 755static int coroutine_fn GRAPH_UNLOCKED
4ec8df01
KW
756bdrv_qed_co_create_opts(BlockDriver *drv, const char *filename,
757 QemuOpts *opts, Error **errp)
75411d23 758{
959355a4 759 BlockdevCreateOptions *create_options = NULL;
92adf9db 760 QDict *qdict;
959355a4
KW
761 Visitor *v;
762 BlockDriverState *bs = NULL;
7ab74849
CL
763 int ret;
764
959355a4
KW
765 static const QDictRenames opt_renames[] = {
766 { BLOCK_OPT_BACKING_FILE, "backing-file" },
767 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
768 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
769 { BLOCK_OPT_TABLE_SIZE, "table-size" },
770 { NULL, NULL },
771 };
772
773 /* Parse options and convert legacy syntax */
774 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true);
775
776 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
7ab74849 777 ret = -EINVAL;
959355a4 778 goto fail;
75411d23 779 }
959355a4
KW
780
781 /* Create and open the file (protocol layer) */
2475a0d0 782 ret = bdrv_co_create_file(filename, opts, errp);
959355a4 783 if (ret < 0) {
959355a4
KW
784 goto fail;
785 }
786
0b1e95cf
KW
787 bs = bdrv_co_open(filename, NULL, NULL,
788 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
959355a4
KW
789 if (bs == NULL) {
790 ret = -EIO;
791 goto fail;
792 }
793
794 /* Now get the QAPI type BlockdevCreateOptions */
795 qdict_put_str(qdict, "driver", "qed");
796 qdict_put_str(qdict, "file", bs->node_name);
797
af91062e
MA
798 v = qobject_input_visitor_new_flat_confused(qdict, errp);
799 if (!v) {
7ab74849 800 ret = -EINVAL;
959355a4 801 goto fail;
75411d23 802 }
959355a4 803
b11a093c 804 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
959355a4 805 visit_free(v);
b11a093c 806 if (!create_options) {
7ab74849 807 ret = -EINVAL;
959355a4 808 goto fail;
75411d23
SH
809 }
810
959355a4
KW
811 /* Silently round up size */
812 assert(create_options->driver == BLOCKDEV_DRIVER_QED);
813 create_options->u.qed.size =
814 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE);
815
816 /* Create the qed image (format layer) */
817 ret = bdrv_qed_co_create(create_options, errp);
7ab74849 818
959355a4 819fail:
cb3e7f08 820 qobject_unref(qdict);
b2ab5f54 821 bdrv_co_unref(bs);
959355a4 822 qapi_free_BlockdevCreateOptions(create_options);
7ab74849 823 return ret;
75411d23
SH
824}
825
b9b10c35
KW
826static int coroutine_fn GRAPH_RDLOCK
827bdrv_qed_co_block_status(BlockDriverState *bs, bool want_zero, int64_t pos,
828 int64_t bytes, int64_t *pnum, int64_t *map,
829 BlockDriverState **file)
298800ca 830{
b8d739fd
EB
831 BDRVQEDState *s = bs->opaque;
832 size_t len = MIN(bytes, SIZE_MAX);
833 int status;
834 QEDRequest request = { .l2_table = NULL };
835 uint64_t offset;
836 int ret;
837
838 qemu_co_mutex_lock(&s->table_lock);
839 ret = qed_find_cluster(s, &request, pos, &len, &offset);
840
841 *pnum = len;
4bc74be9
PB
842 switch (ret) {
843 case QED_CLUSTER_FOUND:
b8d739fd
EB
844 *map = offset | qed_offset_into_cluster(s, pos);
845 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
846 *file = bs->file->bs;
4bc74be9
PB
847 break;
848 case QED_CLUSTER_ZERO:
b8d739fd 849 status = BDRV_BLOCK_ZERO;
4bc74be9
PB
850 break;
851 case QED_CLUSTER_L2:
852 case QED_CLUSTER_L1:
b8d739fd 853 status = 0;
4bc74be9
PB
854 break;
855 default:
856 assert(ret < 0);
b8d739fd 857 status = ret;
4bc74be9
PB
858 break;
859 }
860
298800ca 861 qed_unref_l2_cache_entry(request.l2_table);
1f01e50b 862 qemu_co_mutex_unlock(&s->table_lock);
298800ca 863
b8d739fd 864 return status;
75411d23
SH
865}
866
eabba580
SH
867static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
868{
48cc565e 869 return acb->bs->opaque;
eabba580
SH
870}
871
872/**
873 * Read from the backing file or zero-fill if no backing file
874 *
f06ee3d4
KW
875 * @s: QED state
876 * @pos: Byte position in device
877 * @qiov: Destination I/O vector
eabba580
SH
878 *
879 * This function reads qiov->size bytes starting at pos from the backing file.
880 * If there is no backing file then zeroes are read.
881 */
b9b10c35
KW
882static int coroutine_fn GRAPH_RDLOCK
883qed_read_backing_file(BDRVQEDState *s, uint64_t pos, QEMUIOVector *qiov)
eabba580 884{
760e0063 885 if (s->bs->backing) {
17362398 886 BLKDBG_CO_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
365fed51 887 return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0);
e85c5281 888 }
365fed51 889 qemu_iovec_memset(qiov, 0, 0, qiov->size);
e85c5281 890 return 0;
eabba580
SH
891}
892
eabba580
SH
893/**
894 * Copy data from backing file into the image
895 *
896 * @s: QED state
897 * @pos: Byte position in device
898 * @len: Number of bytes
899 * @offset: Byte offset in image file
eabba580 900 */
b9b10c35
KW
901static int coroutine_fn GRAPH_RDLOCK
902qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, uint64_t len,
903 uint64_t offset)
eabba580 904{
0f7aa24d 905 QEMUIOVector qiov;
e85c5281 906 int ret;
eabba580
SH
907
908 /* Skip copy entirely if there is no work to do */
909 if (len == 0) {
b4ac32f3 910 return 0;
eabba580
SH
911 }
912
342544f9 913 qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len);
0f7aa24d 914
365fed51 915 ret = qed_read_backing_file(s, pos, &qiov);
0f7aa24d
KW
916
917 if (ret) {
918 goto out;
919 }
eabba580 920
17362398 921 BLKDBG_CO_EVENT(s->bs->file, BLKDBG_COW_WRITE);
0f714ec7 922 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
0f7aa24d
KW
923 if (ret < 0) {
924 goto out;
925 }
926 ret = 0;
927out:
342544f9 928 qemu_vfree(qemu_iovec_buf(&qiov));
b4ac32f3 929 return ret;
eabba580
SH
930}
931
932/**
933 * Link one or more contiguous clusters into a table
934 *
935 * @s: QED state
936 * @table: L2 table
937 * @index: First cluster index
938 * @n: Number of contiguous clusters
21df65b6
AL
939 * @cluster: First cluster offset
940 *
941 * The cluster offset may be an allocated byte offset in the image file, the
942 * zero cluster marker, or the unallocated cluster marker.
1f01e50b
PB
943 *
944 * Called with table_lock held.
eabba580 945 */
87f0d882
KW
946static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
947 int index, unsigned int n,
948 uint64_t cluster)
eabba580
SH
949{
950 int i;
951 for (i = index; i < index + n; i++) {
952 table->offsets[i] = cluster;
21df65b6
AL
953 if (!qed_offset_is_unalloc_cluster(cluster) &&
954 !qed_offset_is_zero_cluster(cluster)) {
955 cluster += s->header.cluster_size;
956 }
eabba580
SH
957 }
958}
959
1f01e50b 960/* Called with table_lock held. */
87f0d882 961static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
eabba580 962{
1919631e 963 BDRVQEDState *s = acb_to_s(acb);
eabba580
SH
964
965 /* Free resources */
966 qemu_iovec_destroy(&acb->cur_qiov);
967 qed_unref_l2_cache_entry(acb->request.l2_table);
968
0e71be19
SH
969 /* Free the buffer we may have allocated for zero writes */
970 if (acb->flags & QED_AIOCB_ZERO) {
971 qemu_vfree(acb->qiov->iov[0].iov_base);
972 acb->qiov->iov[0].iov_base = NULL;
973 }
974
eabba580
SH
975 /* Start next allocating write request waiting behind this one. Note that
976 * requests enqueue themselves when they first hit an unallocated cluster
977 * but they wait until the entire request is finished before waking up the
978 * next request in the queue. This ensures that we don't cycle through
979 * requests multiple times but rather finish one at a time completely.
980 */
0806c3b5
KW
981 if (acb == s->allocating_acb) {
982 s->allocating_acb = NULL;
983 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
1f01e50b 984 qemu_co_queue_next(&s->allocating_write_reqs);
6f321e93
SH
985 } else if (s->header.features & QED_F_NEED_CHECK) {
986 qed_start_need_check_timer(s);
eabba580
SH
987 }
988 }
989}
990
991/**
fae25ac7 992 * Update L1 table with new L2 table offset and write it out
1f01e50b
PB
993 *
994 * Called with table_lock held.
eabba580 995 */
88095349 996static int coroutine_fn GRAPH_RDLOCK qed_aio_write_l1_update(QEDAIOCB *acb)
eabba580 997{
eabba580
SH
998 BDRVQEDState *s = acb_to_s(acb);
999 CachedL2Table *l2_table = acb->request.l2_table;
e4fc8781 1000 uint64_t l2_offset = l2_table->offset;
fb18de21 1001 int index, ret;
eabba580 1002
fae25ac7
KW
1003 index = qed_l1_index(s, acb->cur_pos);
1004 s->l1_table->offsets[index] = l2_table->offset;
1005
1006 ret = qed_write_l1_table(s, index, 1);
1007
1008 /* Commit the current L2 table to the cache */
eabba580
SH
1009 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
1010
1011 /* This is guaranteed to succeed because we just committed the entry to the
1012 * cache.
1013 */
e4fc8781 1014 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
eabba580
SH
1015 assert(acb->request.l2_table != NULL);
1016
fb18de21 1017 return ret;
eabba580
SH
1018}
1019
eabba580
SH
1020
1021/**
1022 * Update L2 table with new cluster offsets and write them out
1f01e50b
PB
1023 *
1024 * Called with table_lock held.
eabba580 1025 */
88095349
EGE
1026static int coroutine_fn GRAPH_RDLOCK
1027qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
eabba580 1028{
eabba580
SH
1029 BDRVQEDState *s = acb_to_s(acb);
1030 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
88d2dd72 1031 int index, ret;
eabba580
SH
1032
1033 if (need_alloc) {
1034 qed_unref_l2_cache_entry(acb->request.l2_table);
1035 acb->request.l2_table = qed_new_l2_table(s);
1036 }
1037
1038 index = qed_l2_index(s, acb->cur_pos);
1039 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
0e71be19 1040 offset);
eabba580
SH
1041
1042 if (need_alloc) {
1043 /* Write out the whole new L2 table */
453e53e2 1044 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
fb18de21 1045 if (ret) {
88d2dd72 1046 return ret;
fb18de21 1047 }
88d2dd72 1048 return qed_aio_write_l1_update(acb);
eabba580
SH
1049 } else {
1050 /* Write out only the updated part of the L2 table */
453e53e2
KW
1051 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1052 false);
88d2dd72
KW
1053 if (ret) {
1054 return ret;
1055 }
eabba580 1056 }
88d2dd72 1057 return 0;
eabba580
SH
1058}
1059
eabba580
SH
1060/**
1061 * Write data to the image file
1f01e50b
PB
1062 *
1063 * Called with table_lock *not* held.
eabba580 1064 */
b9b10c35 1065static int coroutine_fn GRAPH_RDLOCK qed_aio_write_main(QEDAIOCB *acb)
eabba580 1066{
eabba580
SH
1067 BDRVQEDState *s = acb_to_s(acb);
1068 uint64_t offset = acb->cur_cluster +
1069 qed_offset_into_cluster(s, acb->cur_pos);
eabba580 1070
eaf0bc56 1071 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
eabba580 1072
17362398 1073 BLKDBG_CO_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
e7569c18
PB
1074 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
1075 &acb->cur_qiov, 0);
eabba580
SH
1076}
1077
1078/**
b4ac32f3 1079 * Populate untouched regions of new data cluster
1f01e50b
PB
1080 *
1081 * Called with table_lock held.
eabba580 1082 */
88095349 1083static int coroutine_fn GRAPH_RDLOCK qed_aio_write_cow(QEDAIOCB *acb)
eabba580 1084{
eabba580 1085 BDRVQEDState *s = acb_to_s(acb);
b4ac32f3 1086 uint64_t start, len, offset;
a101341a 1087 int ret;
eabba580 1088
1f01e50b
PB
1089 qemu_co_mutex_unlock(&s->table_lock);
1090
b4ac32f3
KW
1091 /* Populate front untouched region of new data cluster */
1092 start = qed_start_of_cluster(s, acb->cur_pos);
1093 len = qed_offset_into_cluster(s, acb->cur_pos);
1094
1095 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1096 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
a101341a 1097 if (ret < 0) {
1f01e50b 1098 goto out;
eabba580
SH
1099 }
1100
b4ac32f3
KW
1101 /* Populate back untouched region of new data cluster */
1102 start = acb->cur_pos + acb->cur_qiov.size;
1103 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1104 offset = acb->cur_cluster +
1105 qed_offset_into_cluster(s, acb->cur_pos) +
1106 acb->cur_qiov.size;
eabba580 1107
b4ac32f3
KW
1108 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1109 ret = qed_copy_from_backing_file(s, start, len, offset);
eaf0bc56 1110 if (ret < 0) {
1f01e50b 1111 goto out;
eaf0bc56 1112 }
a101341a 1113
e7569c18
PB
1114 ret = qed_aio_write_main(acb);
1115 if (ret < 0) {
1f01e50b 1116 goto out;
e7569c18
PB
1117 }
1118
1119 if (s->bs->backing) {
1120 /*
1121 * Flush new data clusters before updating the L2 table
1122 *
1123 * This flush is necessary when a backing file is in use. A crash
1124 * during an allocating write could result in empty clusters in the
1125 * image. If the write only touched a subregion of the cluster,
1126 * then backing image sectors have been lost in the untouched
1127 * region. The solution is to flush after writing a new data
1128 * cluster and before updating the L2 table.
1129 */
1130 ret = bdrv_co_flush(s->bs->file->bs);
e7569c18
PB
1131 }
1132
1f01e50b
PB
1133out:
1134 qemu_co_mutex_lock(&s->table_lock);
1135 return ret;
eabba580
SH
1136}
1137
0d09c797
SH
1138/**
1139 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1140 */
1141static bool qed_should_set_need_check(BDRVQEDState *s)
1142{
1143 /* The flush before L2 update path ensures consistency */
760e0063 1144 if (s->bs->backing) {
0d09c797
SH
1145 return false;
1146 }
1147
1148 return !(s->header.features & QED_F_NEED_CHECK);
1149}
1150
eabba580
SH
1151/**
1152 * Write new data cluster
1153 *
1154 * @acb: Write request
1155 * @len: Length in bytes
1156 *
1157 * This path is taken when writing to previously unallocated clusters.
1f01e50b
PB
1158 *
1159 * Called with table_lock held.
eabba580 1160 */
88095349
EGE
1161static int coroutine_fn GRAPH_RDLOCK
1162qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
eabba580
SH
1163{
1164 BDRVQEDState *s = acb_to_s(acb);
f13d712b 1165 int ret;
eabba580 1166
6f321e93 1167 /* Cancel timer when the first allocating request comes in */
0806c3b5 1168 if (s->allocating_acb == NULL) {
6f321e93
SH
1169 qed_cancel_need_check_timer(s);
1170 }
1171
eabba580 1172 /* Freeze this request if another allocating write is in progress */
0806c3b5
KW
1173 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1174 if (s->allocating_acb != NULL) {
1f01e50b 1175 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
0806c3b5
KW
1176 assert(s->allocating_acb == NULL);
1177 }
1178 s->allocating_acb = acb;
1179 return -EAGAIN; /* start over with looking up table entries */
eabba580
SH
1180 }
1181
1182 acb->cur_nclusters = qed_bytes_to_clusters(s,
1183 qed_offset_into_cluster(s, acb->cur_pos) + len);
1b093c48 1184 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1185
0e71be19
SH
1186 if (acb->flags & QED_AIOCB_ZERO) {
1187 /* Skip ahead if the clusters are already zero */
1188 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
d6daddcd 1189 return 0;
0e71be19 1190 }
e7569c18 1191 acb->cur_cluster = 1;
0e71be19 1192 } else {
0e71be19
SH
1193 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1194 }
1195
0d09c797
SH
1196 if (qed_should_set_need_check(s)) {
1197 s->header.features |= QED_F_NEED_CHECK;
f13d712b 1198 ret = qed_write_header(s);
a101341a 1199 if (ret < 0) {
d6daddcd 1200 return ret;
a101341a
KW
1201 }
1202 }
1203
e7569c18 1204 if (!(acb->flags & QED_AIOCB_ZERO)) {
a101341a 1205 ret = qed_aio_write_cow(acb);
e7569c18
PB
1206 if (ret < 0) {
1207 return ret;
1208 }
01979a98 1209 }
e7569c18
PB
1210
1211 return qed_aio_write_l2_update(acb, acb->cur_cluster);
eabba580
SH
1212}
1213
1214/**
1215 * Write data cluster in place
1216 *
1217 * @acb: Write request
1218 * @offset: Cluster offset in bytes
1219 * @len: Length in bytes
1220 *
1221 * This path is taken when writing to already allocated clusters.
1f01e50b
PB
1222 *
1223 * Called with table_lock held.
eabba580 1224 */
b9b10c35
KW
1225static int coroutine_fn GRAPH_RDLOCK
1226qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
eabba580 1227{
1f01e50b
PB
1228 BDRVQEDState *s = acb_to_s(acb);
1229 int r;
1230
1231 qemu_co_mutex_unlock(&s->table_lock);
1232
0e71be19
SH
1233 /* Allocate buffer for zero writes */
1234 if (acb->flags & QED_AIOCB_ZERO) {
1235 struct iovec *iov = acb->qiov->iov;
1236
1237 if (!iov->iov_base) {
48cc565e 1238 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
4f4896db 1239 if (iov->iov_base == NULL) {
1f01e50b
PB
1240 r = -ENOMEM;
1241 goto out;
4f4896db 1242 }
0e71be19
SH
1243 memset(iov->iov_base, 0, iov->iov_len);
1244 }
1245 }
1246
eabba580
SH
1247 /* Calculate the I/O vector */
1248 acb->cur_cluster = offset;
1b093c48 1249 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1250
1f01e50b
PB
1251 /* Do the actual write. */
1252 r = qed_aio_write_main(acb);
1253out:
1254 qemu_co_mutex_lock(&s->table_lock);
1255 return r;
eabba580
SH
1256}
1257
1258/**
1259 * Write data cluster
1260 *
1261 * @opaque: Write request
0596be7e 1262 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
eabba580
SH
1263 * @offset: Cluster offset in bytes
1264 * @len: Length in bytes
1f01e50b
PB
1265 *
1266 * Called with table_lock held.
eabba580 1267 */
88095349
EGE
1268static int coroutine_fn GRAPH_RDLOCK
1269qed_aio_write_data(void *opaque, int ret, uint64_t offset, size_t len)
eabba580
SH
1270{
1271 QEDAIOCB *acb = opaque;
1272
1273 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1274
1275 acb->find_cluster_ret = ret;
1276
1277 switch (ret) {
1278 case QED_CLUSTER_FOUND:
0596be7e 1279 return qed_aio_write_inplace(acb, offset, len);
eabba580
SH
1280
1281 case QED_CLUSTER_L2:
1282 case QED_CLUSTER_L1:
21df65b6 1283 case QED_CLUSTER_ZERO:
0596be7e 1284 return qed_aio_write_alloc(acb, len);
eabba580
SH
1285
1286 default:
0596be7e 1287 g_assert_not_reached();
d6daddcd 1288 }
eabba580
SH
1289}
1290
1291/**
1292 * Read data cluster
1293 *
1294 * @opaque: Read request
0596be7e 1295 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
eabba580
SH
1296 * @offset: Cluster offset in bytes
1297 * @len: Length in bytes
1f01e50b
PB
1298 *
1299 * Called with table_lock held.
eabba580 1300 */
b9b10c35
KW
1301static int coroutine_fn GRAPH_RDLOCK
1302qed_aio_read_data(void *opaque, int ret, uint64_t offset, size_t len)
eabba580
SH
1303{
1304 QEDAIOCB *acb = opaque;
1305 BDRVQEDState *s = acb_to_s(acb);
48cc565e 1306 BlockDriverState *bs = acb->bs;
1f01e50b
PB
1307 int r;
1308
1309 qemu_co_mutex_unlock(&s->table_lock);
eabba580
SH
1310
1311 /* Adjust offset into cluster */
1312 offset += qed_offset_into_cluster(s, acb->cur_pos);
1313
1314 trace_qed_aio_read_data(s, acb, ret, offset, len);
1315
1b093c48 1316 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1317
1f01e50b
PB
1318 /* Handle zero cluster and backing file reads, otherwise read
1319 * data cluster directly.
1320 */
21df65b6 1321 if (ret == QED_CLUSTER_ZERO) {
3d9b4925 1322 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1f01e50b 1323 r = 0;
21df65b6 1324 } else if (ret != QED_CLUSTER_FOUND) {
365fed51 1325 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov);
1f01e50b 1326 } else {
17362398 1327 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO);
1f01e50b
PB
1328 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1329 &acb->cur_qiov, 0);
eabba580
SH
1330 }
1331
1f01e50b
PB
1332 qemu_co_mutex_lock(&s->table_lock);
1333 return r;
eabba580
SH
1334}
1335
1336/**
1337 * Begin next I/O or complete the request
1338 */
88095349 1339static int coroutine_fn GRAPH_RDLOCK qed_aio_next_io(QEDAIOCB *acb)
eabba580 1340{
eabba580 1341 BDRVQEDState *s = acb_to_s(acb);
0f21b7a1
KW
1342 uint64_t offset;
1343 size_t len;
dddf8db1 1344 int ret;
eabba580 1345
1f01e50b 1346 qemu_co_mutex_lock(&s->table_lock);
01859874
KW
1347 while (1) {
1348 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
eabba580 1349
01859874
KW
1350 acb->qiov_offset += acb->cur_qiov.size;
1351 acb->cur_pos += acb->cur_qiov.size;
1352 qemu_iovec_reset(&acb->cur_qiov);
eabba580 1353
01859874
KW
1354 /* Complete request */
1355 if (acb->cur_pos >= acb->end_pos) {
48cc565e
KW
1356 ret = 0;
1357 break;
01859874 1358 }
eabba580 1359
01859874
KW
1360 /* Find next cluster and start I/O */
1361 len = acb->end_pos - acb->cur_pos;
1362 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1363 if (ret < 0) {
48cc565e 1364 break;
01859874 1365 }
0596be7e 1366
01859874
KW
1367 if (acb->flags & QED_AIOCB_WRITE) {
1368 ret = qed_aio_write_data(acb, ret, offset, len);
1369 } else {
1370 ret = qed_aio_read_data(acb, ret, offset, len);
1371 }
0596be7e 1372
0806c3b5 1373 if (ret < 0 && ret != -EAGAIN) {
48cc565e 1374 break;
0596be7e 1375 }
0596be7e 1376 }
eabba580 1377
48cc565e
KW
1378 trace_qed_aio_complete(s, acb, ret);
1379 qed_aio_complete(acb);
1f01e50b 1380 qemu_co_mutex_unlock(&s->table_lock);
48cc565e 1381 return ret;
89f89709
KW
1382}
1383
88095349
EGE
1384static int coroutine_fn GRAPH_RDLOCK
1385qed_co_request(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov,
1386 int nb_sectors, int flags)
89f89709 1387{
48cc565e
KW
1388 QEDAIOCB acb = {
1389 .bs = bs,
1390 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1391 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1392 .qiov = qiov,
1393 .flags = flags,
89f89709 1394 };
48cc565e 1395 qemu_iovec_init(&acb.cur_qiov, qiov->niov);
eabba580 1396
48cc565e 1397 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
eabba580
SH
1398
1399 /* Start request */
48cc565e 1400 return qed_aio_next_io(&acb);
75411d23
SH
1401}
1402
7b1fb72e
KW
1403static int coroutine_fn GRAPH_RDLOCK
1404bdrv_qed_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1405 QEMUIOVector *qiov)
75411d23 1406{
89f89709 1407 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
75411d23
SH
1408}
1409
7b1fb72e
KW
1410static int coroutine_fn GRAPH_RDLOCK
1411bdrv_qed_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1412 QEMUIOVector *qiov, int flags)
0e71be19 1413{
89f89709 1414 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
0e71be19
SH
1415}
1416
abaf8b75
KW
1417static int coroutine_fn GRAPH_RDLOCK
1418bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1419 BdrvRequestFlags flags)
0e71be19 1420{
ef72f76e 1421 BDRVQEDState *s = bs->opaque;
342544f9
VSO
1422
1423 /*
1424 * Zero writes start without an I/O buffer. If a buffer becomes necessary
1425 * then it will be allocated during request processing.
1426 */
1427 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
0e71be19 1428
f34b2bcf
VSO
1429 /*
1430 * QED is not prepared for 63bit write-zero requests, so rely on
1431 * max_pwrite_zeroes.
1432 */
1433 assert(bytes <= INT_MAX);
1434
49a2e483
EB
1435 /* Fall back if the request is not aligned */
1436 if (qed_offset_into_cluster(s, offset) ||
f5a5ca79 1437 qed_offset_into_cluster(s, bytes)) {
49a2e483 1438 return -ENOTSUP;
ef72f76e
SH
1439 }
1440
89f89709 1441 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
f5a5ca79 1442 bytes >> BDRV_SECTOR_BITS,
89f89709 1443 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
0e71be19
SH
1444}
1445
061ca8a3
KW
1446static int coroutine_fn bdrv_qed_co_truncate(BlockDriverState *bs,
1447 int64_t offset,
c80d8b06 1448 bool exact,
061ca8a3 1449 PreallocMode prealloc,
92b92799 1450 BdrvRequestFlags flags,
061ca8a3 1451 Error **errp)
75411d23 1452{
77a5a000
SH
1453 BDRVQEDState *s = bs->opaque;
1454 uint64_t old_image_size;
1455 int ret;
1456
8243ccb7
HR
1457 if (prealloc != PREALLOC_MODE_OFF) {
1458 error_setg(errp, "Unsupported preallocation mode '%s'",
977c736f 1459 PreallocMode_str(prealloc));
8243ccb7
HR
1460 return -ENOTSUP;
1461 }
1462
77a5a000
SH
1463 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1464 s->header.table_size)) {
f59adb32 1465 error_setg(errp, "Invalid image size specified");
77a5a000
SH
1466 return -EINVAL;
1467 }
1468
77a5a000 1469 if ((uint64_t)offset < s->header.image_size) {
f59adb32 1470 error_setg(errp, "Shrinking images is currently not supported");
77a5a000
SH
1471 return -ENOTSUP;
1472 }
1473
1474 old_image_size = s->header.image_size;
1475 s->header.image_size = offset;
1476 ret = qed_write_header_sync(s);
1477 if (ret < 0) {
1478 s->header.image_size = old_image_size;
f59adb32 1479 error_setg_errno(errp, -ret, "Failed to update the image size");
77a5a000
SH
1480 }
1481 return ret;
75411d23
SH
1482}
1483
c86422c5 1484static int64_t coroutine_fn bdrv_qed_co_getlength(BlockDriverState *bs)
75411d23
SH
1485{
1486 BDRVQEDState *s = bs->opaque;
1487 return s->header.image_size;
1488}
1489
3d47eb0a
EGE
1490static int coroutine_fn
1491bdrv_qed_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
75411d23
SH
1492{
1493 BDRVQEDState *s = bs->opaque;
1494
1495 memset(bdi, 0, sizeof(*bdi));
1496 bdi->cluster_size = s->header.cluster_size;
d68dbee8 1497 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
75411d23
SH
1498 return 0;
1499}
1500
1501static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1502 const char *backing_file,
1503 const char *backing_fmt)
1504{
1505 BDRVQEDState *s = bs->opaque;
1506 QEDHeader new_header, le_header;
1507 void *buffer;
1508 size_t buffer_len, backing_file_len;
1509 int ret;
1510
1511 /* Refuse to set backing filename if unknown compat feature bits are
1512 * active. If the image uses an unknown compat feature then we may not
1513 * know the layout of data following the header structure and cannot safely
1514 * add a new string.
1515 */
1516 if (backing_file && (s->header.compat_features &
1517 ~QED_COMPAT_FEATURE_MASK)) {
1518 return -ENOTSUP;
1519 }
1520
1521 memcpy(&new_header, &s->header, sizeof(new_header));
1522
1523 new_header.features &= ~(QED_F_BACKING_FILE |
1524 QED_F_BACKING_FORMAT_NO_PROBE);
1525
1526 /* Adjust feature flags */
1527 if (backing_file) {
1528 new_header.features |= QED_F_BACKING_FILE;
1529
1530 if (qed_fmt_is_raw(backing_fmt)) {
1531 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1532 }
1533 }
1534
1535 /* Calculate new header size */
1536 backing_file_len = 0;
1537
1538 if (backing_file) {
1539 backing_file_len = strlen(backing_file);
1540 }
1541
1542 buffer_len = sizeof(new_header);
1543 new_header.backing_filename_offset = buffer_len;
1544 new_header.backing_filename_size = backing_file_len;
1545 buffer_len += backing_file_len;
1546
1547 /* Make sure we can rewrite header without failing */
1548 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1549 return -ENOSPC;
1550 }
1551
1552 /* Prepare new header */
7267c094 1553 buffer = g_malloc(buffer_len);
75411d23
SH
1554
1555 qed_header_cpu_to_le(&new_header, &le_header);
1556 memcpy(buffer, &le_header, sizeof(le_header));
1557 buffer_len = sizeof(le_header);
1558
feba23b1
PB
1559 if (backing_file) {
1560 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1561 buffer_len += backing_file_len;
1562 }
75411d23
SH
1563
1564 /* Write new header */
32cc71de 1565 ret = bdrv_pwrite_sync(bs->file, 0, buffer_len, buffer, 0);
7267c094 1566 g_free(buffer);
75411d23
SH
1567 if (ret == 0) {
1568 memcpy(&s->header, &new_header, sizeof(new_header));
1569 }
1570 return ret;
1571}
1572
88095349
EGE
1573static void coroutine_fn GRAPH_RDLOCK
1574bdrv_qed_co_invalidate_cache(BlockDriverState *bs, Error **errp)
c82954e5 1575{
1f01e50b 1576 BDRVQEDState *s = bs->opaque;
5a8a30db 1577 int ret;
c82954e5
BC
1578
1579 bdrv_qed_close(bs);
3456a8d1 1580
61c7887e 1581 bdrv_qed_init_state(bs);
2b148f39 1582 qemu_co_mutex_lock(&s->table_lock);
15ce94a6 1583 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, errp);
2b148f39 1584 qemu_co_mutex_unlock(&s->table_lock);
15ce94a6
VSO
1585 if (ret < 0) {
1586 error_prepend(errp, "Could not reopen qed layer: ");
5a8a30db 1587 }
c82954e5
BC
1588}
1589
88095349
EGE
1590static int coroutine_fn GRAPH_RDLOCK
1591bdrv_qed_co_check(BlockDriverState *bs, BdrvCheckResult *result,
1592 BdrvCheckMode fix)
75411d23 1593{
01979a98 1594 BDRVQEDState *s = bs->opaque;
2fd61638 1595 int ret;
01979a98 1596
2fd61638
PB
1597 qemu_co_mutex_lock(&s->table_lock);
1598 ret = qed_check(s, result, !!fix);
1599 qemu_co_mutex_unlock(&s->table_lock);
1600
1601 return ret;
75411d23
SH
1602}
1603
7ab74849
CL
1604static QemuOptsList qed_create_opts = {
1605 .name = "qed-create-opts",
1606 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1607 .desc = {
1608 {
1609 .name = BLOCK_OPT_SIZE,
1610 .type = QEMU_OPT_SIZE,
1611 .help = "Virtual disk size"
1612 },
1613 {
1614 .name = BLOCK_OPT_BACKING_FILE,
1615 .type = QEMU_OPT_STRING,
1616 .help = "File name of a base image"
1617 },
1618 {
1619 .name = BLOCK_OPT_BACKING_FMT,
1620 .type = QEMU_OPT_STRING,
1621 .help = "Image format of the base image"
1622 },
1623 {
1624 .name = BLOCK_OPT_CLUSTER_SIZE,
1625 .type = QEMU_OPT_SIZE,
1626 .help = "Cluster size (in bytes)",
1627 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1628 },
1629 {
1630 .name = BLOCK_OPT_TABLE_SIZE,
1631 .type = QEMU_OPT_SIZE,
1632 .help = "L1/L2 table size (in clusters)"
1633 },
1634 { /* end of list */ }
1635 }
75411d23
SH
1636};
1637
1638static BlockDriver bdrv_qed = {
1639 .format_name = "qed",
1640 .instance_size = sizeof(BDRVQEDState),
7ab74849 1641 .create_opts = &qed_create_opts,
d67066d8 1642 .is_format = true,
8ee79e70 1643 .supports_backing = true,
75411d23
SH
1644
1645 .bdrv_probe = bdrv_qed_probe,
1646 .bdrv_open = bdrv_qed_open,
1647 .bdrv_close = bdrv_qed_close,
f9cb20f1 1648 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
69dca43d 1649 .bdrv_child_perm = bdrv_default_perms,
959355a4 1650 .bdrv_co_create = bdrv_qed_co_create,
efc75e2a 1651 .bdrv_co_create_opts = bdrv_qed_co_create_opts,
3ac21627 1652 .bdrv_has_zero_init = bdrv_has_zero_init_1,
b8d739fd 1653 .bdrv_co_block_status = bdrv_qed_co_block_status,
89f89709
KW
1654 .bdrv_co_readv = bdrv_qed_co_readv,
1655 .bdrv_co_writev = bdrv_qed_co_writev,
49a2e483 1656 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes,
061ca8a3 1657 .bdrv_co_truncate = bdrv_qed_co_truncate,
c86422c5 1658 .bdrv_co_getlength = bdrv_qed_co_getlength,
3d47eb0a 1659 .bdrv_co_get_info = bdrv_qed_co_get_info,
d34682cd 1660 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
75411d23 1661 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
2b148f39 1662 .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache,
2fd61638 1663 .bdrv_co_check = bdrv_qed_co_check,
a8c868c3
SH
1664 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1665 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
5e8ac217 1666 .bdrv_drain_begin = bdrv_qed_drain_begin,
75411d23
SH
1667};
1668
1669static void bdrv_qed_init(void)
1670{
1671 bdrv_register(&bdrv_qed);
1672}
1673
1674block_init(bdrv_qed_init);