2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "qemu/bswap.h"
21 #include "qapi/qmp/qerror.h"
22 #include "sysemu/block-backend.h"
24 static const AIOCBInfo qed_aiocb_info
= {
25 .aiocb_size
= sizeof(QEDAIOCB
),
28 static int bdrv_qed_probe(const uint8_t *buf
, int buf_size
,
31 const QEDHeader
*header
= (const QEDHeader
*)buf
;
33 if (buf_size
< sizeof(*header
)) {
36 if (le32_to_cpu(header
->magic
) != QED_MAGIC
) {
43 * Check whether an image format is raw
45 * @fmt: Backing file format, may be NULL
47 static bool qed_fmt_is_raw(const char *fmt
)
49 return fmt
&& strcmp(fmt
, "raw") == 0;
52 static void qed_header_le_to_cpu(const QEDHeader
*le
, QEDHeader
*cpu
)
54 cpu
->magic
= le32_to_cpu(le
->magic
);
55 cpu
->cluster_size
= le32_to_cpu(le
->cluster_size
);
56 cpu
->table_size
= le32_to_cpu(le
->table_size
);
57 cpu
->header_size
= le32_to_cpu(le
->header_size
);
58 cpu
->features
= le64_to_cpu(le
->features
);
59 cpu
->compat_features
= le64_to_cpu(le
->compat_features
);
60 cpu
->autoclear_features
= le64_to_cpu(le
->autoclear_features
);
61 cpu
->l1_table_offset
= le64_to_cpu(le
->l1_table_offset
);
62 cpu
->image_size
= le64_to_cpu(le
->image_size
);
63 cpu
->backing_filename_offset
= le32_to_cpu(le
->backing_filename_offset
);
64 cpu
->backing_filename_size
= le32_to_cpu(le
->backing_filename_size
);
67 static void qed_header_cpu_to_le(const QEDHeader
*cpu
, QEDHeader
*le
)
69 le
->magic
= cpu_to_le32(cpu
->magic
);
70 le
->cluster_size
= cpu_to_le32(cpu
->cluster_size
);
71 le
->table_size
= cpu_to_le32(cpu
->table_size
);
72 le
->header_size
= cpu_to_le32(cpu
->header_size
);
73 le
->features
= cpu_to_le64(cpu
->features
);
74 le
->compat_features
= cpu_to_le64(cpu
->compat_features
);
75 le
->autoclear_features
= cpu_to_le64(cpu
->autoclear_features
);
76 le
->l1_table_offset
= cpu_to_le64(cpu
->l1_table_offset
);
77 le
->image_size
= cpu_to_le64(cpu
->image_size
);
78 le
->backing_filename_offset
= cpu_to_le32(cpu
->backing_filename_offset
);
79 le
->backing_filename_size
= cpu_to_le32(cpu
->backing_filename_size
);
82 int qed_write_header_sync(BDRVQEDState
*s
)
87 qed_header_cpu_to_le(&s
->header
, &le
);
88 ret
= bdrv_pwrite(s
->bs
->file
, 0, &le
, sizeof(le
));
89 if (ret
!= sizeof(le
)) {
96 * Update header in-place (does not rewrite backing filename or other strings)
98 * This function only updates known header fields in-place and does not affect
99 * extra data after the QED header.
101 static int qed_write_header(BDRVQEDState
*s
)
103 /* We must write full sectors for O_DIRECT but cannot necessarily generate
104 * the data following the header if an unrecognized compat feature is
105 * active. Therefore, first read the sectors containing the header, update
106 * them, and write back.
109 int nsectors
= DIV_ROUND_UP(sizeof(QEDHeader
), BDRV_SECTOR_SIZE
);
110 size_t len
= nsectors
* BDRV_SECTOR_SIZE
;
116 buf
= qemu_blockalign(s
->bs
, len
);
117 iov
= (struct iovec
) {
121 qemu_iovec_init_external(&qiov
, &iov
, 1);
123 ret
= bdrv_preadv(s
->bs
->file
, 0, &qiov
);
129 qed_header_cpu_to_le(&s
->header
, (QEDHeader
*) buf
);
131 ret
= bdrv_pwritev(s
->bs
->file
, 0, &qiov
);
142 static uint64_t qed_max_image_size(uint32_t cluster_size
, uint32_t table_size
)
144 uint64_t table_entries
;
147 table_entries
= (table_size
* cluster_size
) / sizeof(uint64_t);
148 l2_size
= table_entries
* cluster_size
;
150 return l2_size
* table_entries
;
153 static bool qed_is_cluster_size_valid(uint32_t cluster_size
)
155 if (cluster_size
< QED_MIN_CLUSTER_SIZE
||
156 cluster_size
> QED_MAX_CLUSTER_SIZE
) {
159 if (cluster_size
& (cluster_size
- 1)) {
160 return false; /* not power of 2 */
165 static bool qed_is_table_size_valid(uint32_t table_size
)
167 if (table_size
< QED_MIN_TABLE_SIZE
||
168 table_size
> QED_MAX_TABLE_SIZE
) {
171 if (table_size
& (table_size
- 1)) {
172 return false; /* not power of 2 */
177 static bool qed_is_image_size_valid(uint64_t image_size
, uint32_t cluster_size
,
180 if (image_size
% BDRV_SECTOR_SIZE
!= 0) {
181 return false; /* not multiple of sector size */
183 if (image_size
> qed_max_image_size(cluster_size
, table_size
)) {
184 return false; /* image is too large */
190 * Read a string of known length from the image file
193 * @offset: File offset to start of string, in bytes
194 * @n: String length in bytes
195 * @buf: Destination buffer
196 * @buflen: Destination buffer length in bytes
197 * @ret: 0 on success, -errno on failure
199 * The string is NUL-terminated.
201 static int qed_read_string(BdrvChild
*file
, uint64_t offset
, size_t n
,
202 char *buf
, size_t buflen
)
208 ret
= bdrv_pread(file
, offset
, buf
, n
);
217 * Allocate new clusters
220 * @n: Number of contiguous clusters to allocate
221 * @ret: Offset of first allocated cluster
223 * This function only produces the offset where the new clusters should be
224 * written. It updates BDRVQEDState but does not make any changes to the image
227 static uint64_t qed_alloc_clusters(BDRVQEDState
*s
, unsigned int n
)
229 uint64_t offset
= s
->file_size
;
230 s
->file_size
+= n
* s
->header
.cluster_size
;
234 QEDTable
*qed_alloc_table(BDRVQEDState
*s
)
236 /* Honor O_DIRECT memory alignment requirements */
237 return qemu_blockalign(s
->bs
,
238 s
->header
.cluster_size
* s
->header
.table_size
);
242 * Allocate a new zeroed L2 table
244 static CachedL2Table
*qed_new_l2_table(BDRVQEDState
*s
)
246 CachedL2Table
*l2_table
= qed_alloc_l2_cache_entry(&s
->l2_cache
);
248 l2_table
->table
= qed_alloc_table(s
);
249 l2_table
->offset
= qed_alloc_clusters(s
, s
->header
.table_size
);
251 memset(l2_table
->table
->offsets
, 0,
252 s
->header
.cluster_size
* s
->header
.table_size
);
256 static void qed_aio_next_io(QEDAIOCB
*acb
);
258 static void qed_aio_start_io(QEDAIOCB
*acb
)
260 qed_aio_next_io(acb
);
263 static void qed_plug_allocating_write_reqs(BDRVQEDState
*s
)
265 assert(!s
->allocating_write_reqs_plugged
);
267 s
->allocating_write_reqs_plugged
= true;
270 static void qed_unplug_allocating_write_reqs(BDRVQEDState
*s
)
274 assert(s
->allocating_write_reqs_plugged
);
276 s
->allocating_write_reqs_plugged
= false;
278 acb
= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
);
280 qed_aio_start_io(acb
);
284 static void qed_clear_need_check(void *opaque
, int ret
)
286 BDRVQEDState
*s
= opaque
;
289 qed_unplug_allocating_write_reqs(s
);
293 s
->header
.features
&= ~QED_F_NEED_CHECK
;
294 ret
= qed_write_header(s
);
297 qed_unplug_allocating_write_reqs(s
);
299 ret
= bdrv_flush(s
->bs
);
303 static void qed_need_check_timer_cb(void *opaque
)
305 BDRVQEDState
*s
= opaque
;
307 /* The timer should only fire when allocating writes have drained */
308 assert(!QSIMPLEQ_FIRST(&s
->allocating_write_reqs
));
310 trace_qed_need_check_timer_cb(s
);
313 qed_plug_allocating_write_reqs(s
);
315 /* Ensure writes are on disk before clearing flag */
316 bdrv_aio_flush(s
->bs
->file
->bs
, qed_clear_need_check
, s
);
320 void qed_acquire(BDRVQEDState
*s
)
322 aio_context_acquire(bdrv_get_aio_context(s
->bs
));
325 void qed_release(BDRVQEDState
*s
)
327 aio_context_release(bdrv_get_aio_context(s
->bs
));
330 static void qed_start_need_check_timer(BDRVQEDState
*s
)
332 trace_qed_start_need_check_timer(s
);
334 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
337 timer_mod(s
->need_check_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
338 NANOSECONDS_PER_SECOND
* QED_NEED_CHECK_TIMEOUT
);
341 /* It's okay to call this multiple times or when no timer is started */
342 static void qed_cancel_need_check_timer(BDRVQEDState
*s
)
344 trace_qed_cancel_need_check_timer(s
);
345 timer_del(s
->need_check_timer
);
348 static void bdrv_qed_detach_aio_context(BlockDriverState
*bs
)
350 BDRVQEDState
*s
= bs
->opaque
;
352 qed_cancel_need_check_timer(s
);
353 timer_free(s
->need_check_timer
);
356 static void bdrv_qed_attach_aio_context(BlockDriverState
*bs
,
357 AioContext
*new_context
)
359 BDRVQEDState
*s
= bs
->opaque
;
361 s
->need_check_timer
= aio_timer_new(new_context
,
362 QEMU_CLOCK_VIRTUAL
, SCALE_NS
,
363 qed_need_check_timer_cb
, s
);
364 if (s
->header
.features
& QED_F_NEED_CHECK
) {
365 qed_start_need_check_timer(s
);
369 static void bdrv_qed_drain(BlockDriverState
*bs
)
371 BDRVQEDState
*s
= bs
->opaque
;
373 /* Fire the timer immediately in order to start doing I/O as soon as the
376 if (s
->need_check_timer
&& timer_pending(s
->need_check_timer
)) {
377 qed_cancel_need_check_timer(s
);
378 qed_need_check_timer_cb(s
);
382 static int bdrv_qed_do_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
385 BDRVQEDState
*s
= bs
->opaque
;
391 QSIMPLEQ_INIT(&s
->allocating_write_reqs
);
393 ret
= bdrv_pread(bs
->file
, 0, &le_header
, sizeof(le_header
));
397 qed_header_le_to_cpu(&le_header
, &s
->header
);
399 if (s
->header
.magic
!= QED_MAGIC
) {
400 error_setg(errp
, "Image not in QED format");
403 if (s
->header
.features
& ~QED_FEATURE_MASK
) {
404 /* image uses unsupported feature bits */
405 error_setg(errp
, "Unsupported QED features: %" PRIx64
,
406 s
->header
.features
& ~QED_FEATURE_MASK
);
409 if (!qed_is_cluster_size_valid(s
->header
.cluster_size
)) {
413 /* Round down file size to the last cluster */
414 file_size
= bdrv_getlength(bs
->file
->bs
);
418 s
->file_size
= qed_start_of_cluster(s
, file_size
);
420 if (!qed_is_table_size_valid(s
->header
.table_size
)) {
423 if (!qed_is_image_size_valid(s
->header
.image_size
,
424 s
->header
.cluster_size
,
425 s
->header
.table_size
)) {
428 if (!qed_check_table_offset(s
, s
->header
.l1_table_offset
)) {
432 s
->table_nelems
= (s
->header
.cluster_size
* s
->header
.table_size
) /
434 s
->l2_shift
= ctz32(s
->header
.cluster_size
);
435 s
->l2_mask
= s
->table_nelems
- 1;
436 s
->l1_shift
= s
->l2_shift
+ ctz32(s
->table_nelems
);
438 /* Header size calculation must not overflow uint32_t */
439 if (s
->header
.header_size
> UINT32_MAX
/ s
->header
.cluster_size
) {
443 if ((s
->header
.features
& QED_F_BACKING_FILE
)) {
444 if ((uint64_t)s
->header
.backing_filename_offset
+
445 s
->header
.backing_filename_size
>
446 s
->header
.cluster_size
* s
->header
.header_size
) {
450 ret
= qed_read_string(bs
->file
, s
->header
.backing_filename_offset
,
451 s
->header
.backing_filename_size
, bs
->backing_file
,
452 sizeof(bs
->backing_file
));
457 if (s
->header
.features
& QED_F_BACKING_FORMAT_NO_PROBE
) {
458 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), "raw");
462 /* Reset unknown autoclear feature bits. This is a backwards
463 * compatibility mechanism that allows images to be opened by older
464 * programs, which "knock out" unknown feature bits. When an image is
465 * opened by a newer program again it can detect that the autoclear
466 * feature is no longer valid.
468 if ((s
->header
.autoclear_features
& ~QED_AUTOCLEAR_FEATURE_MASK
) != 0 &&
469 !bdrv_is_read_only(bs
->file
->bs
) && !(flags
& BDRV_O_INACTIVE
)) {
470 s
->header
.autoclear_features
&= QED_AUTOCLEAR_FEATURE_MASK
;
472 ret
= qed_write_header_sync(s
);
477 /* From here on only known autoclear feature bits are valid */
478 bdrv_flush(bs
->file
->bs
);
481 s
->l1_table
= qed_alloc_table(s
);
482 qed_init_l2_cache(&s
->l2_cache
);
484 ret
= qed_read_l1_table_sync(s
);
489 /* If image was not closed cleanly, check consistency */
490 if (!(flags
& BDRV_O_CHECK
) && (s
->header
.features
& QED_F_NEED_CHECK
)) {
491 /* Read-only images cannot be fixed. There is no risk of corruption
492 * since write operations are not possible. Therefore, allow
493 * potentially inconsistent images to be opened read-only. This can
494 * aid data recovery from an otherwise inconsistent image.
496 if (!bdrv_is_read_only(bs
->file
->bs
) &&
497 !(flags
& BDRV_O_INACTIVE
)) {
498 BdrvCheckResult result
= {0};
500 ret
= qed_check(s
, &result
, true);
507 bdrv_qed_attach_aio_context(bs
, bdrv_get_aio_context(bs
));
511 qed_free_l2_cache(&s
->l2_cache
);
512 qemu_vfree(s
->l1_table
);
517 static int bdrv_qed_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
520 bs
->file
= bdrv_open_child(NULL
, options
, "file", bs
, &child_file
,
526 return bdrv_qed_do_open(bs
, options
, flags
, errp
);
529 static void bdrv_qed_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
531 BDRVQEDState
*s
= bs
->opaque
;
533 bs
->bl
.pwrite_zeroes_alignment
= s
->header
.cluster_size
;
536 /* We have nothing to do for QED reopen, stubs just return
538 static int bdrv_qed_reopen_prepare(BDRVReopenState
*state
,
539 BlockReopenQueue
*queue
, Error
**errp
)
544 static void bdrv_qed_close(BlockDriverState
*bs
)
546 BDRVQEDState
*s
= bs
->opaque
;
548 bdrv_qed_detach_aio_context(bs
);
550 /* Ensure writes reach stable storage */
551 bdrv_flush(bs
->file
->bs
);
553 /* Clean shutdown, no check required on next open */
554 if (s
->header
.features
& QED_F_NEED_CHECK
) {
555 s
->header
.features
&= ~QED_F_NEED_CHECK
;
556 qed_write_header_sync(s
);
559 qed_free_l2_cache(&s
->l2_cache
);
560 qemu_vfree(s
->l1_table
);
563 static int qed_create(const char *filename
, uint32_t cluster_size
,
564 uint64_t image_size
, uint32_t table_size
,
565 const char *backing_file
, const char *backing_fmt
,
566 QemuOpts
*opts
, Error
**errp
)
570 .cluster_size
= cluster_size
,
571 .table_size
= table_size
,
574 .compat_features
= 0,
575 .l1_table_offset
= cluster_size
,
576 .image_size
= image_size
,
579 uint8_t *l1_table
= NULL
;
580 size_t l1_size
= header
.cluster_size
* header
.table_size
;
581 Error
*local_err
= NULL
;
585 ret
= bdrv_create_file(filename
, opts
, &local_err
);
587 error_propagate(errp
, local_err
);
591 blk
= blk_new_open(filename
, NULL
, NULL
,
592 BDRV_O_RDWR
| BDRV_O_RESIZE
| BDRV_O_PROTOCOL
,
595 error_propagate(errp
, local_err
);
599 blk_set_allow_write_beyond_eof(blk
, true);
601 /* File must start empty and grow, check truncate is supported */
602 ret
= blk_truncate(blk
, 0, errp
);
608 header
.features
|= QED_F_BACKING_FILE
;
609 header
.backing_filename_offset
= sizeof(le_header
);
610 header
.backing_filename_size
= strlen(backing_file
);
612 if (qed_fmt_is_raw(backing_fmt
)) {
613 header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
617 qed_header_cpu_to_le(&header
, &le_header
);
618 ret
= blk_pwrite(blk
, 0, &le_header
, sizeof(le_header
), 0);
622 ret
= blk_pwrite(blk
, sizeof(le_header
), backing_file
,
623 header
.backing_filename_size
, 0);
628 l1_table
= g_malloc0(l1_size
);
629 ret
= blk_pwrite(blk
, header
.l1_table_offset
, l1_table
, l1_size
, 0);
634 ret
= 0; /* success */
641 static int bdrv_qed_create(const char *filename
, QemuOpts
*opts
, Error
**errp
)
643 uint64_t image_size
= 0;
644 uint32_t cluster_size
= QED_DEFAULT_CLUSTER_SIZE
;
645 uint32_t table_size
= QED_DEFAULT_TABLE_SIZE
;
646 char *backing_file
= NULL
;
647 char *backing_fmt
= NULL
;
650 image_size
= ROUND_UP(qemu_opt_get_size_del(opts
, BLOCK_OPT_SIZE
, 0),
652 backing_file
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FILE
);
653 backing_fmt
= qemu_opt_get_del(opts
, BLOCK_OPT_BACKING_FMT
);
654 cluster_size
= qemu_opt_get_size_del(opts
,
655 BLOCK_OPT_CLUSTER_SIZE
,
656 QED_DEFAULT_CLUSTER_SIZE
);
657 table_size
= qemu_opt_get_size_del(opts
, BLOCK_OPT_TABLE_SIZE
,
658 QED_DEFAULT_TABLE_SIZE
);
660 if (!qed_is_cluster_size_valid(cluster_size
)) {
661 error_setg(errp
, "QED cluster size must be within range [%u, %u] "
663 QED_MIN_CLUSTER_SIZE
, QED_MAX_CLUSTER_SIZE
);
667 if (!qed_is_table_size_valid(table_size
)) {
668 error_setg(errp
, "QED table size must be within range [%u, %u] "
670 QED_MIN_TABLE_SIZE
, QED_MAX_TABLE_SIZE
);
674 if (!qed_is_image_size_valid(image_size
, cluster_size
, table_size
)) {
675 error_setg(errp
, "QED image size must be a non-zero multiple of "
676 "cluster size and less than %" PRIu64
" bytes",
677 qed_max_image_size(cluster_size
, table_size
));
682 ret
= qed_create(filename
, cluster_size
, image_size
, table_size
,
683 backing_file
, backing_fmt
, opts
, errp
);
686 g_free(backing_file
);
692 BlockDriverState
*bs
;
697 BlockDriverState
**file
;
700 static void qed_is_allocated_cb(void *opaque
, int ret
, uint64_t offset
, size_t len
)
702 QEDIsAllocatedCB
*cb
= opaque
;
703 BDRVQEDState
*s
= cb
->bs
->opaque
;
704 *cb
->pnum
= len
/ BDRV_SECTOR_SIZE
;
706 case QED_CLUSTER_FOUND
:
707 offset
|= qed_offset_into_cluster(s
, cb
->pos
);
708 cb
->status
= BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
| offset
;
709 *cb
->file
= cb
->bs
->file
->bs
;
711 case QED_CLUSTER_ZERO
:
712 cb
->status
= BDRV_BLOCK_ZERO
;
729 static int64_t coroutine_fn
bdrv_qed_co_get_block_status(BlockDriverState
*bs
,
731 int nb_sectors
, int *pnum
,
732 BlockDriverState
**file
)
734 BDRVQEDState
*s
= bs
->opaque
;
735 size_t len
= (size_t)nb_sectors
* BDRV_SECTOR_SIZE
;
736 QEDIsAllocatedCB cb
= {
738 .pos
= (uint64_t)sector_num
* BDRV_SECTOR_SIZE
,
739 .status
= BDRV_BLOCK_OFFSET_MASK
,
743 QEDRequest request
= { .l2_table
= NULL
};
747 ret
= qed_find_cluster(s
, &request
, cb
.pos
, &len
, &offset
);
748 qed_is_allocated_cb(&cb
, ret
, offset
, len
);
750 /* The callback was invoked immediately */
751 assert(cb
.status
!= BDRV_BLOCK_OFFSET_MASK
);
753 qed_unref_l2_cache_entry(request
.l2_table
);
758 static BDRVQEDState
*acb_to_s(QEDAIOCB
*acb
)
760 return acb
->common
.bs
->opaque
;
764 * Read from the backing file or zero-fill if no backing file
767 * @pos: Byte position in device
768 * @qiov: Destination I/O vector
769 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
770 * @cb: Completion function
771 * @opaque: User data for completion function
773 * This function reads qiov->size bytes starting at pos from the backing file.
774 * If there is no backing file then zeroes are read.
776 static int qed_read_backing_file(BDRVQEDState
*s
, uint64_t pos
,
778 QEMUIOVector
**backing_qiov
)
780 uint64_t backing_length
= 0;
784 /* If there is a backing file, get its length. Treat the absence of a
785 * backing file like a zero length backing file.
787 if (s
->bs
->backing
) {
788 int64_t l
= bdrv_getlength(s
->bs
->backing
->bs
);
795 /* Zero all sectors if reading beyond the end of the backing file */
796 if (pos
>= backing_length
||
797 pos
+ qiov
->size
> backing_length
) {
798 qemu_iovec_memset(qiov
, 0, 0, qiov
->size
);
801 /* Complete now if there are no backing file sectors to read */
802 if (pos
>= backing_length
) {
806 /* If the read straddles the end of the backing file, shorten it */
807 size
= MIN((uint64_t)backing_length
- pos
, qiov
->size
);
809 assert(*backing_qiov
== NULL
);
810 *backing_qiov
= g_new(QEMUIOVector
, 1);
811 qemu_iovec_init(*backing_qiov
, qiov
->niov
);
812 qemu_iovec_concat(*backing_qiov
, qiov
, 0, size
);
814 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_READ_BACKING_AIO
);
815 ret
= bdrv_preadv(s
->bs
->backing
, pos
, *backing_qiov
);
823 * Copy data from backing file into the image
826 * @pos: Byte position in device
827 * @len: Number of bytes
828 * @offset: Byte offset in image file
830 static int qed_copy_from_backing_file(BDRVQEDState
*s
, uint64_t pos
,
831 uint64_t len
, uint64_t offset
)
834 QEMUIOVector
*backing_qiov
= NULL
;
838 /* Skip copy entirely if there is no work to do */
843 iov
= (struct iovec
) {
844 .iov_base
= qemu_blockalign(s
->bs
, len
),
847 qemu_iovec_init_external(&qiov
, &iov
, 1);
849 ret
= qed_read_backing_file(s
, pos
, &qiov
, &backing_qiov
);
852 qemu_iovec_destroy(backing_qiov
);
853 g_free(backing_qiov
);
861 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_COW_WRITE
);
862 ret
= bdrv_pwritev(s
->bs
->file
, offset
, &qiov
);
868 qemu_vfree(iov
.iov_base
);
873 * Link one or more contiguous clusters into a table
877 * @index: First cluster index
878 * @n: Number of contiguous clusters
879 * @cluster: First cluster offset
881 * The cluster offset may be an allocated byte offset in the image file, the
882 * zero cluster marker, or the unallocated cluster marker.
884 static void qed_update_l2_table(BDRVQEDState
*s
, QEDTable
*table
, int index
,
885 unsigned int n
, uint64_t cluster
)
888 for (i
= index
; i
< index
+ n
; i
++) {
889 table
->offsets
[i
] = cluster
;
890 if (!qed_offset_is_unalloc_cluster(cluster
) &&
891 !qed_offset_is_zero_cluster(cluster
)) {
892 cluster
+= s
->header
.cluster_size
;
897 static void qed_aio_complete_bh(void *opaque
)
899 QEDAIOCB
*acb
= opaque
;
900 BDRVQEDState
*s
= acb_to_s(acb
);
901 BlockCompletionFunc
*cb
= acb
->common
.cb
;
902 void *user_opaque
= acb
->common
.opaque
;
903 int ret
= acb
->bh_ret
;
907 /* Invoke callback */
909 cb(user_opaque
, ret
);
913 static void qed_resume_alloc_bh(void *opaque
)
915 qed_aio_start_io(opaque
);
918 static void qed_aio_complete(QEDAIOCB
*acb
, int ret
)
920 BDRVQEDState
*s
= acb_to_s(acb
);
922 trace_qed_aio_complete(s
, acb
, ret
);
925 qemu_iovec_destroy(&acb
->cur_qiov
);
926 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
928 /* Free the buffer we may have allocated for zero writes */
929 if (acb
->flags
& QED_AIOCB_ZERO
) {
930 qemu_vfree(acb
->qiov
->iov
[0].iov_base
);
931 acb
->qiov
->iov
[0].iov_base
= NULL
;
934 /* Arrange for a bh to invoke the completion function */
936 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb
->common
.bs
),
937 qed_aio_complete_bh
, acb
);
939 /* Start next allocating write request waiting behind this one. Note that
940 * requests enqueue themselves when they first hit an unallocated cluster
941 * but they wait until the entire request is finished before waking up the
942 * next request in the queue. This ensures that we don't cycle through
943 * requests multiple times but rather finish one at a time completely.
945 if (acb
== QSIMPLEQ_FIRST(&s
->allocating_write_reqs
)) {
947 QSIMPLEQ_REMOVE_HEAD(&s
->allocating_write_reqs
, next
);
948 next_acb
= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
);
950 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb
->common
.bs
),
951 qed_resume_alloc_bh
, next_acb
);
952 } else if (s
->header
.features
& QED_F_NEED_CHECK
) {
953 qed_start_need_check_timer(s
);
959 * Update L1 table with new L2 table offset and write it out
961 static int qed_aio_write_l1_update(QEDAIOCB
*acb
)
963 BDRVQEDState
*s
= acb_to_s(acb
);
964 CachedL2Table
*l2_table
= acb
->request
.l2_table
;
965 uint64_t l2_offset
= l2_table
->offset
;
968 index
= qed_l1_index(s
, acb
->cur_pos
);
969 s
->l1_table
->offsets
[index
] = l2_table
->offset
;
971 ret
= qed_write_l1_table(s
, index
, 1);
973 /* Commit the current L2 table to the cache */
974 qed_commit_l2_cache_entry(&s
->l2_cache
, l2_table
);
976 /* This is guaranteed to succeed because we just committed the entry to the
979 acb
->request
.l2_table
= qed_find_l2_cache_entry(&s
->l2_cache
, l2_offset
);
980 assert(acb
->request
.l2_table
!= NULL
);
987 * Update L2 table with new cluster offsets and write them out
989 static int qed_aio_write_l2_update(QEDAIOCB
*acb
, uint64_t offset
)
991 BDRVQEDState
*s
= acb_to_s(acb
);
992 bool need_alloc
= acb
->find_cluster_ret
== QED_CLUSTER_L1
;
996 qed_unref_l2_cache_entry(acb
->request
.l2_table
);
997 acb
->request
.l2_table
= qed_new_l2_table(s
);
1000 index
= qed_l2_index(s
, acb
->cur_pos
);
1001 qed_update_l2_table(s
, acb
->request
.l2_table
->table
, index
, acb
->cur_nclusters
,
1005 /* Write out the whole new L2 table */
1006 ret
= qed_write_l2_table(s
, &acb
->request
, 0, s
->table_nelems
, true);
1010 return qed_aio_write_l1_update(acb
);
1012 /* Write out only the updated part of the L2 table */
1013 ret
= qed_write_l2_table(s
, &acb
->request
, index
, acb
->cur_nclusters
,
1023 * Write data to the image file
1025 static int qed_aio_write_main(QEDAIOCB
*acb
)
1027 BDRVQEDState
*s
= acb_to_s(acb
);
1028 uint64_t offset
= acb
->cur_cluster
+
1029 qed_offset_into_cluster(s
, acb
->cur_pos
);
1032 trace_qed_aio_write_main(s
, acb
, 0, offset
, acb
->cur_qiov
.size
);
1034 BLKDBG_EVENT(s
->bs
->file
, BLKDBG_WRITE_AIO
);
1035 ret
= bdrv_pwritev(s
->bs
->file
, offset
, &acb
->cur_qiov
);
1040 if (acb
->find_cluster_ret
!= QED_CLUSTER_FOUND
) {
1041 if (s
->bs
->backing
) {
1043 * Flush new data clusters before updating the L2 table
1045 * This flush is necessary when a backing file is in use. A crash
1046 * during an allocating write could result in empty clusters in the
1047 * image. If the write only touched a subregion of the cluster,
1048 * then backing image sectors have been lost in the untouched
1049 * region. The solution is to flush after writing a new data
1050 * cluster and before updating the L2 table.
1052 ret
= bdrv_flush(s
->bs
->file
->bs
);
1057 ret
= qed_aio_write_l2_update(acb
, acb
->cur_cluster
);
1066 * Populate untouched regions of new data cluster
1068 static int qed_aio_write_cow(QEDAIOCB
*acb
)
1070 BDRVQEDState
*s
= acb_to_s(acb
);
1071 uint64_t start
, len
, offset
;
1074 /* Populate front untouched region of new data cluster */
1075 start
= qed_start_of_cluster(s
, acb
->cur_pos
);
1076 len
= qed_offset_into_cluster(s
, acb
->cur_pos
);
1078 trace_qed_aio_write_prefill(s
, acb
, start
, len
, acb
->cur_cluster
);
1079 ret
= qed_copy_from_backing_file(s
, start
, len
, acb
->cur_cluster
);
1084 /* Populate back untouched region of new data cluster */
1085 start
= acb
->cur_pos
+ acb
->cur_qiov
.size
;
1086 len
= qed_start_of_cluster(s
, start
+ s
->header
.cluster_size
- 1) - start
;
1087 offset
= acb
->cur_cluster
+
1088 qed_offset_into_cluster(s
, acb
->cur_pos
) +
1091 trace_qed_aio_write_postfill(s
, acb
, start
, len
, offset
);
1092 ret
= qed_copy_from_backing_file(s
, start
, len
, offset
);
1097 return qed_aio_write_main(acb
);
1101 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1103 static bool qed_should_set_need_check(BDRVQEDState
*s
)
1105 /* The flush before L2 update path ensures consistency */
1106 if (s
->bs
->backing
) {
1110 return !(s
->header
.features
& QED_F_NEED_CHECK
);
1114 * Write new data cluster
1116 * @acb: Write request
1117 * @len: Length in bytes
1119 * This path is taken when writing to previously unallocated clusters.
1121 static int qed_aio_write_alloc(QEDAIOCB
*acb
, size_t len
)
1123 BDRVQEDState
*s
= acb_to_s(acb
);
1126 /* Cancel timer when the first allocating request comes in */
1127 if (QSIMPLEQ_EMPTY(&s
->allocating_write_reqs
)) {
1128 qed_cancel_need_check_timer(s
);
1131 /* Freeze this request if another allocating write is in progress */
1132 if (acb
!= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
)) {
1133 QSIMPLEQ_INSERT_TAIL(&s
->allocating_write_reqs
, acb
, next
);
1135 if (acb
!= QSIMPLEQ_FIRST(&s
->allocating_write_reqs
) ||
1136 s
->allocating_write_reqs_plugged
) {
1137 return -EINPROGRESS
; /* wait for existing request to finish */
1140 acb
->cur_nclusters
= qed_bytes_to_clusters(s
,
1141 qed_offset_into_cluster(s
, acb
->cur_pos
) + len
);
1142 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1144 if (acb
->flags
& QED_AIOCB_ZERO
) {
1145 /* Skip ahead if the clusters are already zero */
1146 if (acb
->find_cluster_ret
== QED_CLUSTER_ZERO
) {
1150 acb
->cur_cluster
= qed_alloc_clusters(s
, acb
->cur_nclusters
);
1153 if (qed_should_set_need_check(s
)) {
1154 s
->header
.features
|= QED_F_NEED_CHECK
;
1155 ret
= qed_write_header(s
);
1161 if (acb
->flags
& QED_AIOCB_ZERO
) {
1162 ret
= qed_aio_write_l2_update(acb
, 1);
1164 ret
= qed_aio_write_cow(acb
);
1173 * Write data cluster in place
1175 * @acb: Write request
1176 * @offset: Cluster offset in bytes
1177 * @len: Length in bytes
1179 * This path is taken when writing to already allocated clusters.
1181 static int qed_aio_write_inplace(QEDAIOCB
*acb
, uint64_t offset
, size_t len
)
1183 /* Allocate buffer for zero writes */
1184 if (acb
->flags
& QED_AIOCB_ZERO
) {
1185 struct iovec
*iov
= acb
->qiov
->iov
;
1187 if (!iov
->iov_base
) {
1188 iov
->iov_base
= qemu_try_blockalign(acb
->common
.bs
, iov
->iov_len
);
1189 if (iov
->iov_base
== NULL
) {
1192 memset(iov
->iov_base
, 0, iov
->iov_len
);
1196 /* Calculate the I/O vector */
1197 acb
->cur_cluster
= offset
;
1198 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1200 /* Do the actual write */
1201 return qed_aio_write_main(acb
);
1205 * Write data cluster
1207 * @opaque: Write request
1208 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1209 * @offset: Cluster offset in bytes
1210 * @len: Length in bytes
1212 static int qed_aio_write_data(void *opaque
, int ret
,
1213 uint64_t offset
, size_t len
)
1215 QEDAIOCB
*acb
= opaque
;
1217 trace_qed_aio_write_data(acb_to_s(acb
), acb
, ret
, offset
, len
);
1219 acb
->find_cluster_ret
= ret
;
1222 case QED_CLUSTER_FOUND
:
1223 return qed_aio_write_inplace(acb
, offset
, len
);
1225 case QED_CLUSTER_L2
:
1226 case QED_CLUSTER_L1
:
1227 case QED_CLUSTER_ZERO
:
1228 return qed_aio_write_alloc(acb
, len
);
1231 g_assert_not_reached();
1238 * @opaque: Read request
1239 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1240 * @offset: Cluster offset in bytes
1241 * @len: Length in bytes
1243 static int qed_aio_read_data(void *opaque
, int ret
, uint64_t offset
, size_t len
)
1245 QEDAIOCB
*acb
= opaque
;
1246 BDRVQEDState
*s
= acb_to_s(acb
);
1247 BlockDriverState
*bs
= acb
->common
.bs
;
1249 /* Adjust offset into cluster */
1250 offset
+= qed_offset_into_cluster(s
, acb
->cur_pos
);
1252 trace_qed_aio_read_data(s
, acb
, ret
, offset
, len
);
1254 qemu_iovec_concat(&acb
->cur_qiov
, acb
->qiov
, acb
->qiov_offset
, len
);
1256 /* Handle zero cluster and backing file reads */
1257 if (ret
== QED_CLUSTER_ZERO
) {
1258 qemu_iovec_memset(&acb
->cur_qiov
, 0, 0, acb
->cur_qiov
.size
);
1260 } else if (ret
!= QED_CLUSTER_FOUND
) {
1261 return qed_read_backing_file(s
, acb
->cur_pos
, &acb
->cur_qiov
,
1262 &acb
->backing_qiov
);
1265 BLKDBG_EVENT(bs
->file
, BLKDBG_READ_AIO
);
1266 ret
= bdrv_preadv(bs
->file
, offset
, &acb
->cur_qiov
);
1274 * Begin next I/O or complete the request
1276 static void qed_aio_next_io(QEDAIOCB
*acb
)
1278 BDRVQEDState
*s
= acb_to_s(acb
);
1284 trace_qed_aio_next_io(s
, acb
, 0, acb
->cur_pos
+ acb
->cur_qiov
.size
);
1286 if (acb
->backing_qiov
) {
1287 qemu_iovec_destroy(acb
->backing_qiov
);
1288 g_free(acb
->backing_qiov
);
1289 acb
->backing_qiov
= NULL
;
1292 acb
->qiov_offset
+= acb
->cur_qiov
.size
;
1293 acb
->cur_pos
+= acb
->cur_qiov
.size
;
1294 qemu_iovec_reset(&acb
->cur_qiov
);
1296 /* Complete request */
1297 if (acb
->cur_pos
>= acb
->end_pos
) {
1298 qed_aio_complete(acb
, 0);
1302 /* Find next cluster and start I/O */
1303 len
= acb
->end_pos
- acb
->cur_pos
;
1304 ret
= qed_find_cluster(s
, &acb
->request
, acb
->cur_pos
, &len
, &offset
);
1306 qed_aio_complete(acb
, ret
);
1310 if (acb
->flags
& QED_AIOCB_WRITE
) {
1311 ret
= qed_aio_write_data(acb
, ret
, offset
, len
);
1313 ret
= qed_aio_read_data(acb
, ret
, offset
, len
);
1317 if (ret
!= -EINPROGRESS
) {
1318 qed_aio_complete(acb
, ret
);
1325 typedef struct QEDRequestCo
{
1331 static void qed_co_request_cb(void *opaque
, int ret
)
1333 QEDRequestCo
*co
= opaque
;
1337 qemu_coroutine_enter_if_inactive(co
->co
);
1340 static int coroutine_fn
qed_co_request(BlockDriverState
*bs
, int64_t sector_num
,
1341 QEMUIOVector
*qiov
, int nb_sectors
,
1345 .co
= qemu_coroutine_self(),
1348 QEDAIOCB
*acb
= qemu_aio_get(&qed_aiocb_info
, bs
, qed_co_request_cb
, &co
);
1350 trace_qed_aio_setup(bs
->opaque
, acb
, sector_num
, nb_sectors
, &co
, flags
);
1354 acb
->qiov_offset
= 0;
1355 acb
->cur_pos
= (uint64_t)sector_num
* BDRV_SECTOR_SIZE
;
1356 acb
->end_pos
= acb
->cur_pos
+ nb_sectors
* BDRV_SECTOR_SIZE
;
1357 acb
->backing_qiov
= NULL
;
1358 acb
->request
.l2_table
= NULL
;
1359 qemu_iovec_init(&acb
->cur_qiov
, qiov
->niov
);
1362 qed_aio_start_io(acb
);
1365 qemu_coroutine_yield();
1371 static int coroutine_fn
bdrv_qed_co_readv(BlockDriverState
*bs
,
1372 int64_t sector_num
, int nb_sectors
,
1375 return qed_co_request(bs
, sector_num
, qiov
, nb_sectors
, 0);
1378 static int coroutine_fn
bdrv_qed_co_writev(BlockDriverState
*bs
,
1379 int64_t sector_num
, int nb_sectors
,
1382 return qed_co_request(bs
, sector_num
, qiov
, nb_sectors
, QED_AIOCB_WRITE
);
1385 static int coroutine_fn
bdrv_qed_co_pwrite_zeroes(BlockDriverState
*bs
,
1388 BdrvRequestFlags flags
)
1390 BDRVQEDState
*s
= bs
->opaque
;
1394 /* Fall back if the request is not aligned */
1395 if (qed_offset_into_cluster(s
, offset
) ||
1396 qed_offset_into_cluster(s
, count
)) {
1400 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1401 * then it will be allocated during request processing.
1403 iov
.iov_base
= NULL
;
1404 iov
.iov_len
= count
;
1406 qemu_iovec_init_external(&qiov
, &iov
, 1);
1407 return qed_co_request(bs
, offset
>> BDRV_SECTOR_BITS
, &qiov
,
1408 count
>> BDRV_SECTOR_BITS
,
1409 QED_AIOCB_WRITE
| QED_AIOCB_ZERO
);
1412 static int bdrv_qed_truncate(BlockDriverState
*bs
, int64_t offset
, Error
**errp
)
1414 BDRVQEDState
*s
= bs
->opaque
;
1415 uint64_t old_image_size
;
1418 if (!qed_is_image_size_valid(offset
, s
->header
.cluster_size
,
1419 s
->header
.table_size
)) {
1420 error_setg(errp
, "Invalid image size specified");
1424 if ((uint64_t)offset
< s
->header
.image_size
) {
1425 error_setg(errp
, "Shrinking images is currently not supported");
1429 old_image_size
= s
->header
.image_size
;
1430 s
->header
.image_size
= offset
;
1431 ret
= qed_write_header_sync(s
);
1433 s
->header
.image_size
= old_image_size
;
1434 error_setg_errno(errp
, -ret
, "Failed to update the image size");
1439 static int64_t bdrv_qed_getlength(BlockDriverState
*bs
)
1441 BDRVQEDState
*s
= bs
->opaque
;
1442 return s
->header
.image_size
;
1445 static int bdrv_qed_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
1447 BDRVQEDState
*s
= bs
->opaque
;
1449 memset(bdi
, 0, sizeof(*bdi
));
1450 bdi
->cluster_size
= s
->header
.cluster_size
;
1451 bdi
->is_dirty
= s
->header
.features
& QED_F_NEED_CHECK
;
1452 bdi
->unallocated_blocks_are_zero
= true;
1453 bdi
->can_write_zeroes_with_unmap
= true;
1457 static int bdrv_qed_change_backing_file(BlockDriverState
*bs
,
1458 const char *backing_file
,
1459 const char *backing_fmt
)
1461 BDRVQEDState
*s
= bs
->opaque
;
1462 QEDHeader new_header
, le_header
;
1464 size_t buffer_len
, backing_file_len
;
1467 /* Refuse to set backing filename if unknown compat feature bits are
1468 * active. If the image uses an unknown compat feature then we may not
1469 * know the layout of data following the header structure and cannot safely
1472 if (backing_file
&& (s
->header
.compat_features
&
1473 ~QED_COMPAT_FEATURE_MASK
)) {
1477 memcpy(&new_header
, &s
->header
, sizeof(new_header
));
1479 new_header
.features
&= ~(QED_F_BACKING_FILE
|
1480 QED_F_BACKING_FORMAT_NO_PROBE
);
1482 /* Adjust feature flags */
1484 new_header
.features
|= QED_F_BACKING_FILE
;
1486 if (qed_fmt_is_raw(backing_fmt
)) {
1487 new_header
.features
|= QED_F_BACKING_FORMAT_NO_PROBE
;
1491 /* Calculate new header size */
1492 backing_file_len
= 0;
1495 backing_file_len
= strlen(backing_file
);
1498 buffer_len
= sizeof(new_header
);
1499 new_header
.backing_filename_offset
= buffer_len
;
1500 new_header
.backing_filename_size
= backing_file_len
;
1501 buffer_len
+= backing_file_len
;
1503 /* Make sure we can rewrite header without failing */
1504 if (buffer_len
> new_header
.header_size
* new_header
.cluster_size
) {
1508 /* Prepare new header */
1509 buffer
= g_malloc(buffer_len
);
1511 qed_header_cpu_to_le(&new_header
, &le_header
);
1512 memcpy(buffer
, &le_header
, sizeof(le_header
));
1513 buffer_len
= sizeof(le_header
);
1516 memcpy(buffer
+ buffer_len
, backing_file
, backing_file_len
);
1517 buffer_len
+= backing_file_len
;
1520 /* Write new header */
1521 ret
= bdrv_pwrite_sync(bs
->file
, 0, buffer
, buffer_len
);
1524 memcpy(&s
->header
, &new_header
, sizeof(new_header
));
1529 static void bdrv_qed_invalidate_cache(BlockDriverState
*bs
, Error
**errp
)
1531 BDRVQEDState
*s
= bs
->opaque
;
1532 Error
*local_err
= NULL
;
1537 memset(s
, 0, sizeof(BDRVQEDState
));
1538 ret
= bdrv_qed_do_open(bs
, NULL
, bs
->open_flags
, &local_err
);
1540 error_propagate(errp
, local_err
);
1541 error_prepend(errp
, "Could not reopen qed layer: ");
1543 } else if (ret
< 0) {
1544 error_setg_errno(errp
, -ret
, "Could not reopen qed layer");
1549 static int bdrv_qed_check(BlockDriverState
*bs
, BdrvCheckResult
*result
,
1552 BDRVQEDState
*s
= bs
->opaque
;
1554 return qed_check(s
, result
, !!fix
);
1557 static QemuOptsList qed_create_opts
= {
1558 .name
= "qed-create-opts",
1559 .head
= QTAILQ_HEAD_INITIALIZER(qed_create_opts
.head
),
1562 .name
= BLOCK_OPT_SIZE
,
1563 .type
= QEMU_OPT_SIZE
,
1564 .help
= "Virtual disk size"
1567 .name
= BLOCK_OPT_BACKING_FILE
,
1568 .type
= QEMU_OPT_STRING
,
1569 .help
= "File name of a base image"
1572 .name
= BLOCK_OPT_BACKING_FMT
,
1573 .type
= QEMU_OPT_STRING
,
1574 .help
= "Image format of the base image"
1577 .name
= BLOCK_OPT_CLUSTER_SIZE
,
1578 .type
= QEMU_OPT_SIZE
,
1579 .help
= "Cluster size (in bytes)",
1580 .def_value_str
= stringify(QED_DEFAULT_CLUSTER_SIZE
)
1583 .name
= BLOCK_OPT_TABLE_SIZE
,
1584 .type
= QEMU_OPT_SIZE
,
1585 .help
= "L1/L2 table size (in clusters)"
1587 { /* end of list */ }
1591 static BlockDriver bdrv_qed
= {
1592 .format_name
= "qed",
1593 .instance_size
= sizeof(BDRVQEDState
),
1594 .create_opts
= &qed_create_opts
,
1595 .supports_backing
= true,
1597 .bdrv_probe
= bdrv_qed_probe
,
1598 .bdrv_open
= bdrv_qed_open
,
1599 .bdrv_close
= bdrv_qed_close
,
1600 .bdrv_reopen_prepare
= bdrv_qed_reopen_prepare
,
1601 .bdrv_child_perm
= bdrv_format_default_perms
,
1602 .bdrv_create
= bdrv_qed_create
,
1603 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
1604 .bdrv_co_get_block_status
= bdrv_qed_co_get_block_status
,
1605 .bdrv_co_readv
= bdrv_qed_co_readv
,
1606 .bdrv_co_writev
= bdrv_qed_co_writev
,
1607 .bdrv_co_pwrite_zeroes
= bdrv_qed_co_pwrite_zeroes
,
1608 .bdrv_truncate
= bdrv_qed_truncate
,
1609 .bdrv_getlength
= bdrv_qed_getlength
,
1610 .bdrv_get_info
= bdrv_qed_get_info
,
1611 .bdrv_refresh_limits
= bdrv_qed_refresh_limits
,
1612 .bdrv_change_backing_file
= bdrv_qed_change_backing_file
,
1613 .bdrv_invalidate_cache
= bdrv_qed_invalidate_cache
,
1614 .bdrv_check
= bdrv_qed_check
,
1615 .bdrv_detach_aio_context
= bdrv_qed_detach_aio_context
,
1616 .bdrv_attach_aio_context
= bdrv_qed_attach_aio_context
,
1617 .bdrv_drain
= bdrv_qed_drain
,
1620 static void bdrv_qed_init(void)
1622 bdrv_register(&bdrv_qed
);
1625 block_init(bdrv_qed_init
);