]> git.proxmox.com Git - qemu.git/blame - block/qed.c
qemu-tool: Stub out qemu-timer functions
[qemu.git] / block / qed.c
CommitLineData
75411d23
SH
1/*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
eabba580 15#include "trace.h"
75411d23 16#include "qed.h"
10b758e8 17#include "qerror.h"
75411d23 18
eabba580
SH
19static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
20{
21 QEDAIOCB *acb = (QEDAIOCB *)blockacb;
22 bool finished = false;
23
24 /* Wait for the request to finish */
25 acb->finished = &finished;
26 while (!finished) {
27 qemu_aio_wait();
28 }
29}
30
31static AIOPool qed_aio_pool = {
32 .aiocb_size = sizeof(QEDAIOCB),
33 .cancel = qed_aio_cancel,
34};
35
75411d23
SH
36static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
37 const char *filename)
38{
39 const QEDHeader *header = (const QEDHeader *)buf;
40
41 if (buf_size < sizeof(*header)) {
42 return 0;
43 }
44 if (le32_to_cpu(header->magic) != QED_MAGIC) {
45 return 0;
46 }
47 return 100;
48}
49
50/**
51 * Check whether an image format is raw
52 *
53 * @fmt: Backing file format, may be NULL
54 */
55static bool qed_fmt_is_raw(const char *fmt)
56{
57 return fmt && strcmp(fmt, "raw") == 0;
58}
59
60static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
61{
62 cpu->magic = le32_to_cpu(le->magic);
63 cpu->cluster_size = le32_to_cpu(le->cluster_size);
64 cpu->table_size = le32_to_cpu(le->table_size);
65 cpu->header_size = le32_to_cpu(le->header_size);
66 cpu->features = le64_to_cpu(le->features);
67 cpu->compat_features = le64_to_cpu(le->compat_features);
68 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
69 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
70 cpu->image_size = le64_to_cpu(le->image_size);
71 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
72 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
73}
74
75static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
76{
77 le->magic = cpu_to_le32(cpu->magic);
78 le->cluster_size = cpu_to_le32(cpu->cluster_size);
79 le->table_size = cpu_to_le32(cpu->table_size);
80 le->header_size = cpu_to_le32(cpu->header_size);
81 le->features = cpu_to_le64(cpu->features);
82 le->compat_features = cpu_to_le64(cpu->compat_features);
83 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
84 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
85 le->image_size = cpu_to_le64(cpu->image_size);
86 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
87 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
88}
89
90static int qed_write_header_sync(BDRVQEDState *s)
91{
92 QEDHeader le;
93 int ret;
94
95 qed_header_cpu_to_le(&s->header, &le);
96 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
97 if (ret != sizeof(le)) {
98 return ret;
99 }
100 return 0;
101}
102
01979a98
SH
103typedef struct {
104 GenericCB gencb;
105 BDRVQEDState *s;
106 struct iovec iov;
107 QEMUIOVector qiov;
108 int nsectors;
109 uint8_t *buf;
110} QEDWriteHeaderCB;
111
112static void qed_write_header_cb(void *opaque, int ret)
113{
114 QEDWriteHeaderCB *write_header_cb = opaque;
115
116 qemu_vfree(write_header_cb->buf);
117 gencb_complete(write_header_cb, ret);
118}
119
120static void qed_write_header_read_cb(void *opaque, int ret)
121{
122 QEDWriteHeaderCB *write_header_cb = opaque;
123 BDRVQEDState *s = write_header_cb->s;
124 BlockDriverAIOCB *acb;
125
126 if (ret) {
127 qed_write_header_cb(write_header_cb, ret);
128 return;
129 }
130
131 /* Update header */
132 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
133
134 acb = bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
135 write_header_cb->nsectors, qed_write_header_cb,
136 write_header_cb);
137 if (!acb) {
138 qed_write_header_cb(write_header_cb, -EIO);
139 }
140}
141
142/**
143 * Update header in-place (does not rewrite backing filename or other strings)
144 *
145 * This function only updates known header fields in-place and does not affect
146 * extra data after the QED header.
147 */
148static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
149 void *opaque)
150{
151 /* We must write full sectors for O_DIRECT but cannot necessarily generate
152 * the data following the header if an unrecognized compat feature is
153 * active. Therefore, first read the sectors containing the header, update
154 * them, and write back.
155 */
156
157 BlockDriverAIOCB *acb;
158 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
159 BDRV_SECTOR_SIZE;
160 size_t len = nsectors * BDRV_SECTOR_SIZE;
161 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
162 cb, opaque);
163
164 write_header_cb->s = s;
165 write_header_cb->nsectors = nsectors;
166 write_header_cb->buf = qemu_blockalign(s->bs, len);
167 write_header_cb->iov.iov_base = write_header_cb->buf;
168 write_header_cb->iov.iov_len = len;
169 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
170
171 acb = bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
172 qed_write_header_read_cb, write_header_cb);
173 if (!acb) {
174 qed_write_header_cb(write_header_cb, -EIO);
175 }
176}
177
75411d23
SH
178static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
179{
180 uint64_t table_entries;
181 uint64_t l2_size;
182
183 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
184 l2_size = table_entries * cluster_size;
185
186 return l2_size * table_entries;
187}
188
189static bool qed_is_cluster_size_valid(uint32_t cluster_size)
190{
191 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
192 cluster_size > QED_MAX_CLUSTER_SIZE) {
193 return false;
194 }
195 if (cluster_size & (cluster_size - 1)) {
196 return false; /* not power of 2 */
197 }
198 return true;
199}
200
201static bool qed_is_table_size_valid(uint32_t table_size)
202{
203 if (table_size < QED_MIN_TABLE_SIZE ||
204 table_size > QED_MAX_TABLE_SIZE) {
205 return false;
206 }
207 if (table_size & (table_size - 1)) {
208 return false; /* not power of 2 */
209 }
210 return true;
211}
212
213static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
214 uint32_t table_size)
215{
216 if (image_size % BDRV_SECTOR_SIZE != 0) {
217 return false; /* not multiple of sector size */
218 }
219 if (image_size > qed_max_image_size(cluster_size, table_size)) {
220 return false; /* image is too large */
221 }
222 return true;
223}
224
225/**
226 * Read a string of known length from the image file
227 *
228 * @file: Image file
229 * @offset: File offset to start of string, in bytes
230 * @n: String length in bytes
231 * @buf: Destination buffer
232 * @buflen: Destination buffer length in bytes
233 * @ret: 0 on success, -errno on failure
234 *
235 * The string is NUL-terminated.
236 */
237static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
238 char *buf, size_t buflen)
239{
240 int ret;
241 if (n >= buflen) {
242 return -EINVAL;
243 }
244 ret = bdrv_pread(file, offset, buf, n);
245 if (ret < 0) {
246 return ret;
247 }
248 buf[n] = '\0';
249 return 0;
250}
251
eabba580
SH
252/**
253 * Allocate new clusters
254 *
255 * @s: QED state
256 * @n: Number of contiguous clusters to allocate
257 * @ret: Offset of first allocated cluster
258 *
259 * This function only produces the offset where the new clusters should be
260 * written. It updates BDRVQEDState but does not make any changes to the image
261 * file.
262 */
263static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
264{
265 uint64_t offset = s->file_size;
266 s->file_size += n * s->header.cluster_size;
267 return offset;
268}
269
298800ca
SH
270QEDTable *qed_alloc_table(BDRVQEDState *s)
271{
272 /* Honor O_DIRECT memory alignment requirements */
273 return qemu_blockalign(s->bs,
274 s->header.cluster_size * s->header.table_size);
275}
276
eabba580
SH
277/**
278 * Allocate a new zeroed L2 table
279 */
280static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
281{
282 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
283
284 l2_table->table = qed_alloc_table(s);
285 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
286
287 memset(l2_table->table->offsets, 0,
288 s->header.cluster_size * s->header.table_size);
289 return l2_table;
290}
291
292static void qed_aio_next_io(void *opaque, int ret);
293
75411d23
SH
294static int bdrv_qed_open(BlockDriverState *bs, int flags)
295{
296 BDRVQEDState *s = bs->opaque;
297 QEDHeader le_header;
298 int64_t file_size;
299 int ret;
300
301 s->bs = bs;
eabba580 302 QSIMPLEQ_INIT(&s->allocating_write_reqs);
75411d23
SH
303
304 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
305 if (ret < 0) {
306 return ret;
307 }
308 ret = 0; /* ret should always be 0 or -errno */
309 qed_header_le_to_cpu(&le_header, &s->header);
310
311 if (s->header.magic != QED_MAGIC) {
312 return -EINVAL;
313 }
314 if (s->header.features & ~QED_FEATURE_MASK) {
10b758e8
KW
315 /* image uses unsupported feature bits */
316 char buf[64];
317 snprintf(buf, sizeof(buf), "%" PRIx64,
318 s->header.features & ~QED_FEATURE_MASK);
319 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
320 bs->device_name, "QED", buf);
321 return -ENOTSUP;
75411d23
SH
322 }
323 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
324 return -EINVAL;
325 }
326
327 /* Round down file size to the last cluster */
328 file_size = bdrv_getlength(bs->file);
329 if (file_size < 0) {
330 return file_size;
331 }
332 s->file_size = qed_start_of_cluster(s, file_size);
333
334 if (!qed_is_table_size_valid(s->header.table_size)) {
335 return -EINVAL;
336 }
337 if (!qed_is_image_size_valid(s->header.image_size,
338 s->header.cluster_size,
339 s->header.table_size)) {
340 return -EINVAL;
341 }
342 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
343 return -EINVAL;
344 }
345
346 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
347 sizeof(uint64_t);
348 s->l2_shift = ffs(s->header.cluster_size) - 1;
349 s->l2_mask = s->table_nelems - 1;
350 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
351
352 if ((s->header.features & QED_F_BACKING_FILE)) {
353 if ((uint64_t)s->header.backing_filename_offset +
354 s->header.backing_filename_size >
355 s->header.cluster_size * s->header.header_size) {
356 return -EINVAL;
357 }
358
359 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
360 s->header.backing_filename_size, bs->backing_file,
361 sizeof(bs->backing_file));
362 if (ret < 0) {
363 return ret;
364 }
365
366 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
367 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
368 }
369 }
370
371 /* Reset unknown autoclear feature bits. This is a backwards
372 * compatibility mechanism that allows images to be opened by older
373 * programs, which "knock out" unknown feature bits. When an image is
374 * opened by a newer program again it can detect that the autoclear
375 * feature is no longer valid.
376 */
377 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
378 !bdrv_is_read_only(bs->file)) {
379 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
380
381 ret = qed_write_header_sync(s);
382 if (ret) {
383 return ret;
384 }
385
386 /* From here on only known autoclear feature bits are valid */
387 bdrv_flush(bs->file);
388 }
389
298800ca
SH
390 s->l1_table = qed_alloc_table(s);
391 qed_init_l2_cache(&s->l2_cache);
392
393 ret = qed_read_l1_table_sync(s);
01979a98
SH
394 if (ret) {
395 goto out;
396 }
397
398 /* If image was not closed cleanly, check consistency */
399 if (s->header.features & QED_F_NEED_CHECK) {
400 /* Read-only images cannot be fixed. There is no risk of corruption
401 * since write operations are not possible. Therefore, allow
402 * potentially inconsistent images to be opened read-only. This can
403 * aid data recovery from an otherwise inconsistent image.
404 */
405 if (!bdrv_is_read_only(bs->file)) {
406 BdrvCheckResult result = {0};
407
408 ret = qed_check(s, &result, true);
409 if (!ret && !result.corruptions && !result.check_errors) {
410 /* Ensure fixes reach storage before clearing check bit */
411 bdrv_flush(s->bs);
412
413 s->header.features &= ~QED_F_NEED_CHECK;
414 qed_write_header_sync(s);
415 }
416 }
417 }
418
419out:
298800ca
SH
420 if (ret) {
421 qed_free_l2_cache(&s->l2_cache);
422 qemu_vfree(s->l1_table);
423 }
75411d23
SH
424 return ret;
425}
426
427static void bdrv_qed_close(BlockDriverState *bs)
428{
298800ca
SH
429 BDRVQEDState *s = bs->opaque;
430
01979a98
SH
431 /* Ensure writes reach stable storage */
432 bdrv_flush(bs->file);
433
434 /* Clean shutdown, no check required on next open */
435 if (s->header.features & QED_F_NEED_CHECK) {
436 s->header.features &= ~QED_F_NEED_CHECK;
437 qed_write_header_sync(s);
438 }
439
298800ca
SH
440 qed_free_l2_cache(&s->l2_cache);
441 qemu_vfree(s->l1_table);
75411d23
SH
442}
443
444static int bdrv_qed_flush(BlockDriverState *bs)
445{
446 return bdrv_flush(bs->file);
447}
448
449static int qed_create(const char *filename, uint32_t cluster_size,
450 uint64_t image_size, uint32_t table_size,
451 const char *backing_file, const char *backing_fmt)
452{
453 QEDHeader header = {
454 .magic = QED_MAGIC,
455 .cluster_size = cluster_size,
456 .table_size = table_size,
457 .header_size = 1,
458 .features = 0,
459 .compat_features = 0,
460 .l1_table_offset = cluster_size,
461 .image_size = image_size,
462 };
463 QEDHeader le_header;
464 uint8_t *l1_table = NULL;
465 size_t l1_size = header.cluster_size * header.table_size;
466 int ret = 0;
467 BlockDriverState *bs = NULL;
468
469 ret = bdrv_create_file(filename, NULL);
470 if (ret < 0) {
471 return ret;
472 }
473
474 ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB);
475 if (ret < 0) {
476 return ret;
477 }
478
c743849b
SH
479 /* File must start empty and grow, check truncate is supported */
480 ret = bdrv_truncate(bs, 0);
481 if (ret < 0) {
482 goto out;
483 }
484
75411d23
SH
485 if (backing_file) {
486 header.features |= QED_F_BACKING_FILE;
487 header.backing_filename_offset = sizeof(le_header);
488 header.backing_filename_size = strlen(backing_file);
489
490 if (qed_fmt_is_raw(backing_fmt)) {
491 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
492 }
493 }
494
495 qed_header_cpu_to_le(&header, &le_header);
496 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
497 if (ret < 0) {
498 goto out;
499 }
500 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
501 header.backing_filename_size);
502 if (ret < 0) {
503 goto out;
504 }
505
506 l1_table = qemu_mallocz(l1_size);
507 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
508 if (ret < 0) {
509 goto out;
510 }
511
512 ret = 0; /* success */
513out:
514 qemu_free(l1_table);
515 bdrv_delete(bs);
516 return ret;
517}
518
519static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options)
520{
521 uint64_t image_size = 0;
522 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
523 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
524 const char *backing_file = NULL;
525 const char *backing_fmt = NULL;
526
527 while (options && options->name) {
528 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
529 image_size = options->value.n;
530 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
531 backing_file = options->value.s;
532 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
533 backing_fmt = options->value.s;
534 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
535 if (options->value.n) {
536 cluster_size = options->value.n;
537 }
538 } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) {
539 if (options->value.n) {
540 table_size = options->value.n;
541 }
542 }
543 options++;
544 }
545
546 if (!qed_is_cluster_size_valid(cluster_size)) {
547 fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
548 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
549 return -EINVAL;
550 }
551 if (!qed_is_table_size_valid(table_size)) {
552 fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
553 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
554 return -EINVAL;
555 }
556 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
557 fprintf(stderr, "QED image size must be a non-zero multiple of "
558 "cluster size and less than %" PRIu64 " bytes\n",
559 qed_max_image_size(cluster_size, table_size));
560 return -EINVAL;
561 }
562
563 return qed_create(filename, cluster_size, image_size, table_size,
564 backing_file, backing_fmt);
565}
566
298800ca
SH
567typedef struct {
568 int is_allocated;
569 int *pnum;
570} QEDIsAllocatedCB;
571
572static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
573{
574 QEDIsAllocatedCB *cb = opaque;
575 *cb->pnum = len / BDRV_SECTOR_SIZE;
21df65b6 576 cb->is_allocated = (ret == QED_CLUSTER_FOUND || ret == QED_CLUSTER_ZERO);
298800ca
SH
577}
578
75411d23
SH
579static int bdrv_qed_is_allocated(BlockDriverState *bs, int64_t sector_num,
580 int nb_sectors, int *pnum)
581{
298800ca
SH
582 BDRVQEDState *s = bs->opaque;
583 uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
584 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
585 QEDIsAllocatedCB cb = {
586 .is_allocated = -1,
587 .pnum = pnum,
588 };
589 QEDRequest request = { .l2_table = NULL };
590
591 async_context_push();
592
593 qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb);
594
595 while (cb.is_allocated == -1) {
596 qemu_aio_wait();
597 }
598
599 async_context_pop();
600
601 qed_unref_l2_cache_entry(request.l2_table);
602
603 return cb.is_allocated;
75411d23
SH
604}
605
606static int bdrv_qed_make_empty(BlockDriverState *bs)
607{
608 return -ENOTSUP;
609}
610
eabba580
SH
611static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
612{
613 return acb->common.bs->opaque;
614}
615
616/**
617 * Read from the backing file or zero-fill if no backing file
618 *
619 * @s: QED state
620 * @pos: Byte position in device
621 * @qiov: Destination I/O vector
622 * @cb: Completion function
623 * @opaque: User data for completion function
624 *
625 * This function reads qiov->size bytes starting at pos from the backing file.
626 * If there is no backing file then zeroes are read.
627 */
628static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
629 QEMUIOVector *qiov,
630 BlockDriverCompletionFunc *cb, void *opaque)
631{
632 BlockDriverAIOCB *aiocb;
633 uint64_t backing_length = 0;
634 size_t size;
635
636 /* If there is a backing file, get its length. Treat the absence of a
637 * backing file like a zero length backing file.
638 */
639 if (s->bs->backing_hd) {
640 int64_t l = bdrv_getlength(s->bs->backing_hd);
641 if (l < 0) {
642 cb(opaque, l);
643 return;
644 }
645 backing_length = l;
646 }
647
648 /* Zero all sectors if reading beyond the end of the backing file */
649 if (pos >= backing_length ||
650 pos + qiov->size > backing_length) {
651 qemu_iovec_memset(qiov, 0, qiov->size);
652 }
653
654 /* Complete now if there are no backing file sectors to read */
655 if (pos >= backing_length) {
656 cb(opaque, 0);
657 return;
658 }
659
660 /* If the read straddles the end of the backing file, shorten it */
661 size = MIN((uint64_t)backing_length - pos, qiov->size);
662
663 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING);
664 aiocb = bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
665 qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
666 if (!aiocb) {
667 cb(opaque, -EIO);
668 }
669}
670
671typedef struct {
672 GenericCB gencb;
673 BDRVQEDState *s;
674 QEMUIOVector qiov;
675 struct iovec iov;
676 uint64_t offset;
677} CopyFromBackingFileCB;
678
679static void qed_copy_from_backing_file_cb(void *opaque, int ret)
680{
681 CopyFromBackingFileCB *copy_cb = opaque;
682 qemu_vfree(copy_cb->iov.iov_base);
683 gencb_complete(&copy_cb->gencb, ret);
684}
685
686static void qed_copy_from_backing_file_write(void *opaque, int ret)
687{
688 CopyFromBackingFileCB *copy_cb = opaque;
689 BDRVQEDState *s = copy_cb->s;
690 BlockDriverAIOCB *aiocb;
691
692 if (ret) {
693 qed_copy_from_backing_file_cb(copy_cb, ret);
694 return;
695 }
696
697 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
698 aiocb = bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
699 &copy_cb->qiov,
700 copy_cb->qiov.size / BDRV_SECTOR_SIZE,
701 qed_copy_from_backing_file_cb, copy_cb);
702 if (!aiocb) {
703 qed_copy_from_backing_file_cb(copy_cb, -EIO);
704 }
705}
706
707/**
708 * Copy data from backing file into the image
709 *
710 * @s: QED state
711 * @pos: Byte position in device
712 * @len: Number of bytes
713 * @offset: Byte offset in image file
714 * @cb: Completion function
715 * @opaque: User data for completion function
716 */
717static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
718 uint64_t len, uint64_t offset,
719 BlockDriverCompletionFunc *cb,
720 void *opaque)
721{
722 CopyFromBackingFileCB *copy_cb;
723
724 /* Skip copy entirely if there is no work to do */
725 if (len == 0) {
726 cb(opaque, 0);
727 return;
728 }
729
730 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
731 copy_cb->s = s;
732 copy_cb->offset = offset;
733 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
734 copy_cb->iov.iov_len = len;
735 qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
736
737 qed_read_backing_file(s, pos, &copy_cb->qiov,
738 qed_copy_from_backing_file_write, copy_cb);
739}
740
741/**
742 * Link one or more contiguous clusters into a table
743 *
744 * @s: QED state
745 * @table: L2 table
746 * @index: First cluster index
747 * @n: Number of contiguous clusters
21df65b6
AL
748 * @cluster: First cluster offset
749 *
750 * The cluster offset may be an allocated byte offset in the image file, the
751 * zero cluster marker, or the unallocated cluster marker.
eabba580
SH
752 */
753static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
754 unsigned int n, uint64_t cluster)
755{
756 int i;
757 for (i = index; i < index + n; i++) {
758 table->offsets[i] = cluster;
21df65b6
AL
759 if (!qed_offset_is_unalloc_cluster(cluster) &&
760 !qed_offset_is_zero_cluster(cluster)) {
761 cluster += s->header.cluster_size;
762 }
eabba580
SH
763 }
764}
765
766static void qed_aio_complete_bh(void *opaque)
767{
768 QEDAIOCB *acb = opaque;
769 BlockDriverCompletionFunc *cb = acb->common.cb;
770 void *user_opaque = acb->common.opaque;
771 int ret = acb->bh_ret;
772 bool *finished = acb->finished;
773
774 qemu_bh_delete(acb->bh);
775 qemu_aio_release(acb);
776
777 /* Invoke callback */
778 cb(user_opaque, ret);
779
780 /* Signal cancel completion */
781 if (finished) {
782 *finished = true;
783 }
784}
785
786static void qed_aio_complete(QEDAIOCB *acb, int ret)
787{
788 BDRVQEDState *s = acb_to_s(acb);
789
790 trace_qed_aio_complete(s, acb, ret);
791
792 /* Free resources */
793 qemu_iovec_destroy(&acb->cur_qiov);
794 qed_unref_l2_cache_entry(acb->request.l2_table);
795
796 /* Arrange for a bh to invoke the completion function */
797 acb->bh_ret = ret;
798 acb->bh = qemu_bh_new(qed_aio_complete_bh, acb);
799 qemu_bh_schedule(acb->bh);
800
801 /* Start next allocating write request waiting behind this one. Note that
802 * requests enqueue themselves when they first hit an unallocated cluster
803 * but they wait until the entire request is finished before waking up the
804 * next request in the queue. This ensures that we don't cycle through
805 * requests multiple times but rather finish one at a time completely.
806 */
807 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
808 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
809 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
810 if (acb) {
811 qed_aio_next_io(acb, 0);
812 }
813 }
814}
815
816/**
817 * Commit the current L2 table to the cache
818 */
819static void qed_commit_l2_update(void *opaque, int ret)
820{
821 QEDAIOCB *acb = opaque;
822 BDRVQEDState *s = acb_to_s(acb);
823 CachedL2Table *l2_table = acb->request.l2_table;
824
825 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
826
827 /* This is guaranteed to succeed because we just committed the entry to the
828 * cache.
829 */
830 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache,
831 l2_table->offset);
832 assert(acb->request.l2_table != NULL);
833
834 qed_aio_next_io(opaque, ret);
835}
836
837/**
838 * Update L1 table with new L2 table offset and write it out
839 */
840static void qed_aio_write_l1_update(void *opaque, int ret)
841{
842 QEDAIOCB *acb = opaque;
843 BDRVQEDState *s = acb_to_s(acb);
844 int index;
845
846 if (ret) {
847 qed_aio_complete(acb, ret);
848 return;
849 }
850
851 index = qed_l1_index(s, acb->cur_pos);
852 s->l1_table->offsets[index] = acb->request.l2_table->offset;
853
854 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
855}
856
857/**
858 * Update L2 table with new cluster offsets and write them out
859 */
860static void qed_aio_write_l2_update(void *opaque, int ret)
861{
862 QEDAIOCB *acb = opaque;
863 BDRVQEDState *s = acb_to_s(acb);
864 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
865 int index;
866
867 if (ret) {
868 goto err;
869 }
870
871 if (need_alloc) {
872 qed_unref_l2_cache_entry(acb->request.l2_table);
873 acb->request.l2_table = qed_new_l2_table(s);
874 }
875
876 index = qed_l2_index(s, acb->cur_pos);
877 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
878 acb->cur_cluster);
879
880 if (need_alloc) {
881 /* Write out the whole new L2 table */
882 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
883 qed_aio_write_l1_update, acb);
884 } else {
885 /* Write out only the updated part of the L2 table */
886 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
887 qed_aio_next_io, acb);
888 }
889 return;
890
891err:
892 qed_aio_complete(acb, ret);
893}
894
895/**
896 * Flush new data clusters before updating the L2 table
897 *
898 * This flush is necessary when a backing file is in use. A crash during an
899 * allocating write could result in empty clusters in the image. If the write
900 * only touched a subregion of the cluster, then backing image sectors have
901 * been lost in the untouched region. The solution is to flush after writing a
902 * new data cluster and before updating the L2 table.
903 */
904static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
905{
906 QEDAIOCB *acb = opaque;
907 BDRVQEDState *s = acb_to_s(acb);
908
909 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update, opaque)) {
910 qed_aio_complete(acb, -EIO);
911 }
912}
913
914/**
915 * Write data to the image file
916 */
917static void qed_aio_write_main(void *opaque, int ret)
918{
919 QEDAIOCB *acb = opaque;
920 BDRVQEDState *s = acb_to_s(acb);
921 uint64_t offset = acb->cur_cluster +
922 qed_offset_into_cluster(s, acb->cur_pos);
923 BlockDriverCompletionFunc *next_fn;
924 BlockDriverAIOCB *file_acb;
925
926 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
927
928 if (ret) {
929 qed_aio_complete(acb, ret);
930 return;
931 }
932
933 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
934 next_fn = qed_aio_next_io;
935 } else {
936 if (s->bs->backing_hd) {
937 next_fn = qed_aio_write_flush_before_l2_update;
938 } else {
939 next_fn = qed_aio_write_l2_update;
940 }
941 }
942
943 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
944 file_acb = bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
945 &acb->cur_qiov,
946 acb->cur_qiov.size / BDRV_SECTOR_SIZE,
947 next_fn, acb);
948 if (!file_acb) {
949 qed_aio_complete(acb, -EIO);
950 }
951}
952
953/**
954 * Populate back untouched region of new data cluster
955 */
956static void qed_aio_write_postfill(void *opaque, int ret)
957{
958 QEDAIOCB *acb = opaque;
959 BDRVQEDState *s = acb_to_s(acb);
960 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
961 uint64_t len =
962 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
963 uint64_t offset = acb->cur_cluster +
964 qed_offset_into_cluster(s, acb->cur_pos) +
965 acb->cur_qiov.size;
966
967 if (ret) {
968 qed_aio_complete(acb, ret);
969 return;
970 }
971
972 trace_qed_aio_write_postfill(s, acb, start, len, offset);
973 qed_copy_from_backing_file(s, start, len, offset,
974 qed_aio_write_main, acb);
975}
976
977/**
978 * Populate front untouched region of new data cluster
979 */
980static void qed_aio_write_prefill(void *opaque, int ret)
981{
982 QEDAIOCB *acb = opaque;
983 BDRVQEDState *s = acb_to_s(acb);
984 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
985 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
986
987 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
988 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
989 qed_aio_write_postfill, acb);
990}
991
0d09c797
SH
992/**
993 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
994 */
995static bool qed_should_set_need_check(BDRVQEDState *s)
996{
997 /* The flush before L2 update path ensures consistency */
998 if (s->bs->backing_hd) {
999 return false;
1000 }
1001
1002 return !(s->header.features & QED_F_NEED_CHECK);
1003}
1004
eabba580
SH
1005/**
1006 * Write new data cluster
1007 *
1008 * @acb: Write request
1009 * @len: Length in bytes
1010 *
1011 * This path is taken when writing to previously unallocated clusters.
1012 */
1013static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1014{
1015 BDRVQEDState *s = acb_to_s(acb);
1016
1017 /* Freeze this request if another allocating write is in progress */
1018 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1019 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1020 }
1021 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1022 return; /* wait for existing request to finish */
1023 }
1024
1025 acb->cur_nclusters = qed_bytes_to_clusters(s,
1026 qed_offset_into_cluster(s, acb->cur_pos) + len);
1027 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1028 qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1029
0d09c797
SH
1030 if (qed_should_set_need_check(s)) {
1031 s->header.features |= QED_F_NEED_CHECK;
1032 qed_write_header(s, qed_aio_write_prefill, acb);
1033 } else {
01979a98 1034 qed_aio_write_prefill(acb, 0);
01979a98 1035 }
eabba580
SH
1036}
1037
1038/**
1039 * Write data cluster in place
1040 *
1041 * @acb: Write request
1042 * @offset: Cluster offset in bytes
1043 * @len: Length in bytes
1044 *
1045 * This path is taken when writing to already allocated clusters.
1046 */
1047static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1048{
1049 /* Calculate the I/O vector */
1050 acb->cur_cluster = offset;
1051 qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1052
1053 /* Do the actual write */
1054 qed_aio_write_main(acb, 0);
1055}
1056
1057/**
1058 * Write data cluster
1059 *
1060 * @opaque: Write request
1061 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1062 * or -errno
1063 * @offset: Cluster offset in bytes
1064 * @len: Length in bytes
1065 *
1066 * Callback from qed_find_cluster().
1067 */
1068static void qed_aio_write_data(void *opaque, int ret,
1069 uint64_t offset, size_t len)
1070{
1071 QEDAIOCB *acb = opaque;
1072
1073 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1074
1075 acb->find_cluster_ret = ret;
1076
1077 switch (ret) {
1078 case QED_CLUSTER_FOUND:
1079 qed_aio_write_inplace(acb, offset, len);
1080 break;
1081
1082 case QED_CLUSTER_L2:
1083 case QED_CLUSTER_L1:
21df65b6 1084 case QED_CLUSTER_ZERO:
eabba580
SH
1085 qed_aio_write_alloc(acb, len);
1086 break;
1087
1088 default:
1089 qed_aio_complete(acb, ret);
1090 break;
1091 }
1092}
1093
1094/**
1095 * Read data cluster
1096 *
1097 * @opaque: Read request
1098 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1099 * or -errno
1100 * @offset: Cluster offset in bytes
1101 * @len: Length in bytes
1102 *
1103 * Callback from qed_find_cluster().
1104 */
1105static void qed_aio_read_data(void *opaque, int ret,
1106 uint64_t offset, size_t len)
1107{
1108 QEDAIOCB *acb = opaque;
1109 BDRVQEDState *s = acb_to_s(acb);
1110 BlockDriverState *bs = acb->common.bs;
1111 BlockDriverAIOCB *file_acb;
1112
1113 /* Adjust offset into cluster */
1114 offset += qed_offset_into_cluster(s, acb->cur_pos);
1115
1116 trace_qed_aio_read_data(s, acb, ret, offset, len);
1117
1118 if (ret < 0) {
1119 goto err;
1120 }
1121
1122 qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1123
21df65b6
AL
1124 /* Handle zero cluster and backing file reads */
1125 if (ret == QED_CLUSTER_ZERO) {
1126 qemu_iovec_memset(&acb->cur_qiov, 0, acb->cur_qiov.size);
1127 qed_aio_next_io(acb, 0);
1128 return;
1129 } else if (ret != QED_CLUSTER_FOUND) {
eabba580
SH
1130 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1131 qed_aio_next_io, acb);
1132 return;
1133 }
1134
1135 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1136 file_acb = bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1137 &acb->cur_qiov,
1138 acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1139 qed_aio_next_io, acb);
1140 if (!file_acb) {
1141 ret = -EIO;
1142 goto err;
1143 }
1144 return;
1145
1146err:
1147 qed_aio_complete(acb, ret);
1148}
1149
1150/**
1151 * Begin next I/O or complete the request
1152 */
1153static void qed_aio_next_io(void *opaque, int ret)
1154{
1155 QEDAIOCB *acb = opaque;
1156 BDRVQEDState *s = acb_to_s(acb);
1157 QEDFindClusterFunc *io_fn =
1158 acb->is_write ? qed_aio_write_data : qed_aio_read_data;
1159
1160 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1161
1162 /* Handle I/O error */
1163 if (ret) {
1164 qed_aio_complete(acb, ret);
1165 return;
1166 }
1167
1168 acb->qiov_offset += acb->cur_qiov.size;
1169 acb->cur_pos += acb->cur_qiov.size;
1170 qemu_iovec_reset(&acb->cur_qiov);
1171
1172 /* Complete request */
1173 if (acb->cur_pos >= acb->end_pos) {
1174 qed_aio_complete(acb, 0);
1175 return;
1176 }
1177
1178 /* Find next cluster and start I/O */
1179 qed_find_cluster(s, &acb->request,
1180 acb->cur_pos, acb->end_pos - acb->cur_pos,
1181 io_fn, acb);
1182}
1183
1184static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1185 int64_t sector_num,
1186 QEMUIOVector *qiov, int nb_sectors,
1187 BlockDriverCompletionFunc *cb,
1188 void *opaque, bool is_write)
1189{
1190 QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque);
1191
1192 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1193 opaque, is_write);
1194
1195 acb->is_write = is_write;
1196 acb->finished = NULL;
1197 acb->qiov = qiov;
1198 acb->qiov_offset = 0;
1199 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1200 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1201 acb->request.l2_table = NULL;
1202 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1203
1204 /* Start request */
1205 qed_aio_next_io(acb, 0);
1206 return &acb->common;
1207}
1208
75411d23
SH
1209static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1210 int64_t sector_num,
1211 QEMUIOVector *qiov, int nb_sectors,
1212 BlockDriverCompletionFunc *cb,
1213 void *opaque)
1214{
eabba580 1215 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, false);
75411d23
SH
1216}
1217
1218static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1219 int64_t sector_num,
1220 QEMUIOVector *qiov, int nb_sectors,
1221 BlockDriverCompletionFunc *cb,
1222 void *opaque)
1223{
eabba580 1224 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, true);
75411d23
SH
1225}
1226
1227static BlockDriverAIOCB *bdrv_qed_aio_flush(BlockDriverState *bs,
1228 BlockDriverCompletionFunc *cb,
1229 void *opaque)
1230{
1231 return bdrv_aio_flush(bs->file, cb, opaque);
1232}
1233
1234static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1235{
1236 return -ENOTSUP;
1237}
1238
1239static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1240{
1241 BDRVQEDState *s = bs->opaque;
1242 return s->header.image_size;
1243}
1244
1245static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1246{
1247 BDRVQEDState *s = bs->opaque;
1248
1249 memset(bdi, 0, sizeof(*bdi));
1250 bdi->cluster_size = s->header.cluster_size;
1251 return 0;
1252}
1253
1254static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1255 const char *backing_file,
1256 const char *backing_fmt)
1257{
1258 BDRVQEDState *s = bs->opaque;
1259 QEDHeader new_header, le_header;
1260 void *buffer;
1261 size_t buffer_len, backing_file_len;
1262 int ret;
1263
1264 /* Refuse to set backing filename if unknown compat feature bits are
1265 * active. If the image uses an unknown compat feature then we may not
1266 * know the layout of data following the header structure and cannot safely
1267 * add a new string.
1268 */
1269 if (backing_file && (s->header.compat_features &
1270 ~QED_COMPAT_FEATURE_MASK)) {
1271 return -ENOTSUP;
1272 }
1273
1274 memcpy(&new_header, &s->header, sizeof(new_header));
1275
1276 new_header.features &= ~(QED_F_BACKING_FILE |
1277 QED_F_BACKING_FORMAT_NO_PROBE);
1278
1279 /* Adjust feature flags */
1280 if (backing_file) {
1281 new_header.features |= QED_F_BACKING_FILE;
1282
1283 if (qed_fmt_is_raw(backing_fmt)) {
1284 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1285 }
1286 }
1287
1288 /* Calculate new header size */
1289 backing_file_len = 0;
1290
1291 if (backing_file) {
1292 backing_file_len = strlen(backing_file);
1293 }
1294
1295 buffer_len = sizeof(new_header);
1296 new_header.backing_filename_offset = buffer_len;
1297 new_header.backing_filename_size = backing_file_len;
1298 buffer_len += backing_file_len;
1299
1300 /* Make sure we can rewrite header without failing */
1301 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1302 return -ENOSPC;
1303 }
1304
1305 /* Prepare new header */
1306 buffer = qemu_malloc(buffer_len);
1307
1308 qed_header_cpu_to_le(&new_header, &le_header);
1309 memcpy(buffer, &le_header, sizeof(le_header));
1310 buffer_len = sizeof(le_header);
1311
1312 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1313 buffer_len += backing_file_len;
1314
1315 /* Write new header */
1316 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1317 qemu_free(buffer);
1318 if (ret == 0) {
1319 memcpy(&s->header, &new_header, sizeof(new_header));
1320 }
1321 return ret;
1322}
1323
1324static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result)
1325{
01979a98
SH
1326 BDRVQEDState *s = bs->opaque;
1327
1328 return qed_check(s, result, false);
75411d23
SH
1329}
1330
1331static QEMUOptionParameter qed_create_options[] = {
1332 {
1333 .name = BLOCK_OPT_SIZE,
1334 .type = OPT_SIZE,
1335 .help = "Virtual disk size (in bytes)"
1336 }, {
1337 .name = BLOCK_OPT_BACKING_FILE,
1338 .type = OPT_STRING,
1339 .help = "File name of a base image"
1340 }, {
1341 .name = BLOCK_OPT_BACKING_FMT,
1342 .type = OPT_STRING,
1343 .help = "Image format of the base image"
1344 }, {
1345 .name = BLOCK_OPT_CLUSTER_SIZE,
1346 .type = OPT_SIZE,
1347 .help = "Cluster size (in bytes)"
1348 }, {
1349 .name = BLOCK_OPT_TABLE_SIZE,
1350 .type = OPT_SIZE,
1351 .help = "L1/L2 table size (in clusters)"
1352 },
1353 { /* end of list */ }
1354};
1355
1356static BlockDriver bdrv_qed = {
1357 .format_name = "qed",
1358 .instance_size = sizeof(BDRVQEDState),
1359 .create_options = qed_create_options,
1360
1361 .bdrv_probe = bdrv_qed_probe,
1362 .bdrv_open = bdrv_qed_open,
1363 .bdrv_close = bdrv_qed_close,
1364 .bdrv_create = bdrv_qed_create,
1365 .bdrv_flush = bdrv_qed_flush,
1366 .bdrv_is_allocated = bdrv_qed_is_allocated,
1367 .bdrv_make_empty = bdrv_qed_make_empty,
1368 .bdrv_aio_readv = bdrv_qed_aio_readv,
1369 .bdrv_aio_writev = bdrv_qed_aio_writev,
1370 .bdrv_aio_flush = bdrv_qed_aio_flush,
1371 .bdrv_truncate = bdrv_qed_truncate,
1372 .bdrv_getlength = bdrv_qed_getlength,
1373 .bdrv_get_info = bdrv_qed_get_info,
1374 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1375 .bdrv_check = bdrv_qed_check,
1376};
1377
1378static void bdrv_qed_init(void)
1379{
1380 bdrv_register(&bdrv_qed);
1381}
1382
1383block_init(bdrv_qed_init);