2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "block/block_int.h"
28 #include "block/blockjob.h"
29 #include "qemu/module.h"
30 #include "qapi/qmp/qjson.h"
31 #include "sysemu/block-backend.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
38 #include "qapi-event.h"
41 #include <sys/types.h>
43 #include <sys/ioctl.h>
44 #include <sys/queue.h>
54 struct BdrvDirtyBitmap
{
56 QLIST_ENTRY(BdrvDirtyBitmap
) list
;
59 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
61 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
62 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
63 BlockCompletionFunc
*cb
, void *opaque
);
64 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
65 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
66 BlockCompletionFunc
*cb
, void *opaque
);
67 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
68 int64_t sector_num
, int nb_sectors
,
70 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
71 int64_t sector_num
, int nb_sectors
,
73 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
74 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
75 BdrvRequestFlags flags
);
76 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
77 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
78 BdrvRequestFlags flags
);
79 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
83 BdrvRequestFlags flags
,
84 BlockCompletionFunc
*cb
,
87 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
88 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
89 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
91 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
92 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
94 static QTAILQ_HEAD(, BlockDriverState
) graph_bdrv_states
=
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states
);
97 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
98 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist
;
104 static int is_windows_drive_prefix(const char *filename
)
106 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
107 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
111 int is_windows_drive(const char *filename
)
113 if (is_windows_drive_prefix(filename
) &&
116 if (strstart(filename
, "\\\\.\\", NULL
) ||
117 strstart(filename
, "//./", NULL
))
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState
*bs
,
129 throttle_config(&bs
->throttle_state
, cfg
);
131 for (i
= 0; i
< 2; i
++) {
132 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
139 bool drained
= false;
140 bool enabled
= bs
->io_limits_enabled
;
143 bs
->io_limits_enabled
= false;
145 for (i
= 0; i
< 2; i
++) {
146 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
151 bs
->io_limits_enabled
= enabled
;
156 void bdrv_io_limits_disable(BlockDriverState
*bs
)
158 bs
->io_limits_enabled
= false;
160 bdrv_start_throttled_reqs(bs
);
162 throttle_destroy(&bs
->throttle_state
);
165 static void bdrv_throttle_read_timer_cb(void *opaque
)
167 BlockDriverState
*bs
= opaque
;
168 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque
)
173 BlockDriverState
*bs
= opaque
;
174 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState
*bs
)
180 assert(!bs
->io_limits_enabled
);
181 throttle_init(&bs
->throttle_state
,
182 bdrv_get_aio_context(bs
),
184 bdrv_throttle_read_timer_cb
,
185 bdrv_throttle_write_timer_cb
,
187 bs
->io_limits_enabled
= true;
190 /* This function makes an IO wait if needed
192 * @nb_sectors: the number of sectors of the IO
193 * @is_write: is the IO a write
195 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
199 /* does this io must wait */
200 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
202 /* if must wait or any request of this type throttled queue the IO */
204 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
205 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
208 /* the IO will be executed, do the accounting */
209 throttle_account(&bs
->throttle_state
, is_write
, bytes
);
212 /* if the next request must wait -> do nothing */
213 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
217 /* else queue next request for execution */
218 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
221 size_t bdrv_opt_mem_align(BlockDriverState
*bs
)
223 if (!bs
|| !bs
->drv
) {
224 /* 4k should be on the safe side */
228 return bs
->bl
.opt_mem_alignment
;
231 /* check if the path starts with "<protocol>:" */
232 int path_has_protocol(const char *path
)
237 if (is_windows_drive(path
) ||
238 is_windows_drive_prefix(path
)) {
241 p
= path
+ strcspn(path
, ":/\\");
243 p
= path
+ strcspn(path
, ":/");
249 int path_is_absolute(const char *path
)
252 /* specific case for names like: "\\.\d:" */
253 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
256 return (*path
== '/' || *path
== '\\');
258 return (*path
== '/');
262 /* if filename is absolute, just copy it to dest. Otherwise, build a
263 path to it by considering it is relative to base_path. URL are
265 void path_combine(char *dest
, int dest_size
,
266 const char *base_path
,
267 const char *filename
)
274 if (path_is_absolute(filename
)) {
275 pstrcpy(dest
, dest_size
, filename
);
277 p
= strchr(base_path
, ':');
282 p1
= strrchr(base_path
, '/');
286 p2
= strrchr(base_path
, '\\');
298 if (len
> dest_size
- 1)
300 memcpy(dest
, base_path
, len
);
302 pstrcat(dest
, dest_size
, filename
);
306 void bdrv_get_full_backing_filename_from_filename(const char *backed
,
308 char *dest
, size_t sz
,
311 if (backing
[0] == '\0' || path_has_protocol(backing
) ||
312 path_is_absolute(backing
))
314 pstrcpy(dest
, sz
, backing
);
315 } else if (backed
[0] == '\0' || strstart(backed
, "json:", NULL
)) {
316 error_setg(errp
, "Cannot use relative backing file names for '%s'",
319 path_combine(dest
, sz
, backed
, backing
);
323 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
,
326 char *backed
= bs
->exact_filename
[0] ? bs
->exact_filename
: bs
->filename
;
328 bdrv_get_full_backing_filename_from_filename(backed
, bs
->backing_file
,
332 void bdrv_register(BlockDriver
*bdrv
)
334 /* Block drivers without coroutine functions need emulation */
335 if (!bdrv
->bdrv_co_readv
) {
336 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
337 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
339 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
340 * the block driver lacks aio we need to emulate that too.
342 if (!bdrv
->bdrv_aio_readv
) {
343 /* add AIO emulation layer */
344 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
345 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
349 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
352 BlockDriverState
*bdrv_new_root(void)
354 BlockDriverState
*bs
= bdrv_new();
356 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, device_list
);
360 BlockDriverState
*bdrv_new(void)
362 BlockDriverState
*bs
;
365 bs
= g_new0(BlockDriverState
, 1);
366 QLIST_INIT(&bs
->dirty_bitmaps
);
367 for (i
= 0; i
< BLOCK_OP_TYPE_MAX
; i
++) {
368 QLIST_INIT(&bs
->op_blockers
[i
]);
370 bdrv_iostatus_disable(bs
);
371 notifier_list_init(&bs
->close_notifiers
);
372 notifier_with_return_list_init(&bs
->before_write_notifiers
);
373 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
374 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
376 bs
->aio_context
= qemu_get_aio_context();
381 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
383 notifier_list_add(&bs
->close_notifiers
, notify
);
386 BlockDriver
*bdrv_find_format(const char *format_name
)
389 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
390 if (!strcmp(drv1
->format_name
, format_name
)) {
397 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
399 static const char *whitelist_rw
[] = {
400 CONFIG_BDRV_RW_WHITELIST
402 static const char *whitelist_ro
[] = {
403 CONFIG_BDRV_RO_WHITELIST
407 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
408 return 1; /* no whitelist, anything goes */
411 for (p
= whitelist_rw
; *p
; p
++) {
412 if (!strcmp(drv
->format_name
, *p
)) {
417 for (p
= whitelist_ro
; *p
; p
++) {
418 if (!strcmp(drv
->format_name
, *p
)) {
426 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
429 BlockDriver
*drv
= bdrv_find_format(format_name
);
430 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
433 typedef struct CreateCo
{
441 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
443 Error
*local_err
= NULL
;
446 CreateCo
*cco
= opaque
;
449 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->opts
, &local_err
);
451 error_propagate(&cco
->err
, local_err
);
456 int bdrv_create(BlockDriver
*drv
, const char* filename
,
457 QemuOpts
*opts
, Error
**errp
)
464 .filename
= g_strdup(filename
),
470 if (!drv
->bdrv_create
) {
471 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
476 if (qemu_in_coroutine()) {
477 /* Fast-path if already in coroutine context */
478 bdrv_create_co_entry(&cco
);
480 co
= qemu_coroutine_create(bdrv_create_co_entry
);
481 qemu_coroutine_enter(co
, &cco
);
482 while (cco
.ret
== NOT_DONE
) {
483 aio_poll(qemu_get_aio_context(), true);
490 error_propagate(errp
, cco
.err
);
492 error_setg_errno(errp
, -ret
, "Could not create image");
497 g_free(cco
.filename
);
501 int bdrv_create_file(const char *filename
, QemuOpts
*opts
, Error
**errp
)
504 Error
*local_err
= NULL
;
507 drv
= bdrv_find_protocol(filename
, true);
509 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
513 ret
= bdrv_create(drv
, filename
, opts
, &local_err
);
515 error_propagate(errp
, local_err
);
520 void bdrv_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
522 BlockDriver
*drv
= bs
->drv
;
523 Error
*local_err
= NULL
;
525 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
531 /* Take some limits from the children as a default */
533 bdrv_refresh_limits(bs
->file
, &local_err
);
535 error_propagate(errp
, local_err
);
538 bs
->bl
.opt_transfer_length
= bs
->file
->bl
.opt_transfer_length
;
539 bs
->bl
.max_transfer_length
= bs
->file
->bl
.max_transfer_length
;
540 bs
->bl
.opt_mem_alignment
= bs
->file
->bl
.opt_mem_alignment
;
542 bs
->bl
.opt_mem_alignment
= 512;
545 if (bs
->backing_hd
) {
546 bdrv_refresh_limits(bs
->backing_hd
, &local_err
);
548 error_propagate(errp
, local_err
);
551 bs
->bl
.opt_transfer_length
=
552 MAX(bs
->bl
.opt_transfer_length
,
553 bs
->backing_hd
->bl
.opt_transfer_length
);
554 bs
->bl
.max_transfer_length
=
555 MIN_NON_ZERO(bs
->bl
.max_transfer_length
,
556 bs
->backing_hd
->bl
.max_transfer_length
);
557 bs
->bl
.opt_mem_alignment
=
558 MAX(bs
->bl
.opt_mem_alignment
,
559 bs
->backing_hd
->bl
.opt_mem_alignment
);
562 /* Then let the driver override it */
563 if (drv
->bdrv_refresh_limits
) {
564 drv
->bdrv_refresh_limits(bs
, errp
);
569 * Create a uniquely-named empty temporary file.
570 * Return 0 upon success, otherwise a negative errno value.
572 int get_tmp_filename(char *filename
, int size
)
575 char temp_dir
[MAX_PATH
];
576 /* GetTempFileName requires that its output buffer (4th param)
577 have length MAX_PATH or greater. */
578 assert(size
>= MAX_PATH
);
579 return (GetTempPath(MAX_PATH
, temp_dir
)
580 && GetTempFileName(temp_dir
, "qem", 0, filename
)
581 ? 0 : -GetLastError());
585 tmpdir
= getenv("TMPDIR");
589 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
592 fd
= mkstemp(filename
);
596 if (close(fd
) != 0) {
605 * Detect host devices. By convention, /dev/cdrom[N] is always
606 * recognized as a host CDROM.
608 static BlockDriver
*find_hdev_driver(const char *filename
)
610 int score_max
= 0, score
;
611 BlockDriver
*drv
= NULL
, *d
;
613 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
614 if (d
->bdrv_probe_device
) {
615 score
= d
->bdrv_probe_device(filename
);
616 if (score
> score_max
) {
626 BlockDriver
*bdrv_find_protocol(const char *filename
,
627 bool allow_protocol_prefix
)
634 /* TODO Drivers without bdrv_file_open must be specified explicitly */
637 * XXX(hch): we really should not let host device detection
638 * override an explicit protocol specification, but moving this
639 * later breaks access to device names with colons in them.
640 * Thanks to the brain-dead persistent naming schemes on udev-
641 * based Linux systems those actually are quite common.
643 drv1
= find_hdev_driver(filename
);
648 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
652 p
= strchr(filename
, ':');
655 if (len
> sizeof(protocol
) - 1)
656 len
= sizeof(protocol
) - 1;
657 memcpy(protocol
, filename
, len
);
658 protocol
[len
] = '\0';
659 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
660 if (drv1
->protocol_name
&&
661 !strcmp(drv1
->protocol_name
, protocol
)) {
669 * Guess image format by probing its contents.
670 * This is not a good idea when your image is raw (CVE-2008-2004), but
671 * we do it anyway for backward compatibility.
673 * @buf contains the image's first @buf_size bytes.
674 * @buf_size is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
675 * but can be smaller if the image file is smaller)
676 * @filename is its filename.
678 * For all block drivers, call the bdrv_probe() method to get its
680 * Return the first block driver with the highest probing score.
682 BlockDriver
*bdrv_probe_all(const uint8_t *buf
, int buf_size
,
683 const char *filename
)
685 int score_max
= 0, score
;
686 BlockDriver
*drv
= NULL
, *d
;
688 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
690 score
= d
->bdrv_probe(buf
, buf_size
, filename
);
691 if (score
> score_max
) {
701 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
702 BlockDriver
**pdrv
, Error
**errp
)
705 uint8_t buf
[BLOCK_PROBE_BUF_SIZE
];
708 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
709 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
714 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
716 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
722 drv
= bdrv_probe_all(buf
, ret
, filename
);
724 error_setg(errp
, "Could not determine image format: No compatible "
733 * Set the current 'total_sectors' value
734 * Return 0 on success, -errno on error.
736 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
738 BlockDriver
*drv
= bs
->drv
;
740 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
744 /* query actual device if possible, otherwise just trust the hint */
745 if (drv
->bdrv_getlength
) {
746 int64_t length
= drv
->bdrv_getlength(bs
);
750 hint
= DIV_ROUND_UP(length
, BDRV_SECTOR_SIZE
);
753 bs
->total_sectors
= hint
;
758 * Set open flags for a given discard mode
760 * Return 0 on success, -1 if the discard mode was invalid.
762 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
764 *flags
&= ~BDRV_O_UNMAP
;
766 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
768 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
769 *flags
|= BDRV_O_UNMAP
;
778 * Set open flags for a given cache mode
780 * Return 0 on success, -1 if the cache mode was invalid.
782 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
784 *flags
&= ~BDRV_O_CACHE_MASK
;
786 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
787 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
788 } else if (!strcmp(mode
, "directsync")) {
789 *flags
|= BDRV_O_NOCACHE
;
790 } else if (!strcmp(mode
, "writeback")) {
791 *flags
|= BDRV_O_CACHE_WB
;
792 } else if (!strcmp(mode
, "unsafe")) {
793 *flags
|= BDRV_O_CACHE_WB
;
794 *flags
|= BDRV_O_NO_FLUSH
;
795 } else if (!strcmp(mode
, "writethrough")) {
796 /* this is the default */
805 * The copy-on-read flag is actually a reference count so multiple users may
806 * use the feature without worrying about clobbering its previous state.
807 * Copy-on-read stays enabled until all users have called to disable it.
809 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
814 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
816 assert(bs
->copy_on_read
> 0);
821 * Returns the flags that a temporary snapshot should get, based on the
822 * originally requested flags (the originally requested image will have flags
823 * like a backing file)
825 static int bdrv_temp_snapshot_flags(int flags
)
827 return (flags
& ~BDRV_O_SNAPSHOT
) | BDRV_O_TEMPORARY
;
831 * Returns the flags that bs->file should get, based on the given flags for
834 static int bdrv_inherited_flags(int flags
)
836 /* Enable protocol handling, disable format probing for bs->file */
837 flags
|= BDRV_O_PROTOCOL
;
839 /* Our block drivers take care to send flushes and respect unmap policy,
840 * so we can enable both unconditionally on lower layers. */
841 flags
|= BDRV_O_CACHE_WB
| BDRV_O_UNMAP
;
843 /* Clear flags that only apply to the top layer */
844 flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
| BDRV_O_COPY_ON_READ
);
850 * Returns the flags that bs->backing_hd should get, based on the given flags
853 static int bdrv_backing_flags(int flags
)
855 /* backing files always opened read-only */
856 flags
&= ~(BDRV_O_RDWR
| BDRV_O_COPY_ON_READ
);
858 /* snapshot=on is handled on the top layer */
859 flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_TEMPORARY
);
864 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
866 int open_flags
= flags
| BDRV_O_CACHE_WB
;
869 * Clear flags that are internal to the block layer before opening the
872 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
| BDRV_O_PROTOCOL
);
875 * Snapshots should be writable.
877 if (flags
& BDRV_O_TEMPORARY
) {
878 open_flags
|= BDRV_O_RDWR
;
884 static void bdrv_assign_node_name(BlockDriverState
*bs
,
885 const char *node_name
,
892 /* Check for empty string or invalid characters */
893 if (!id_wellformed(node_name
)) {
894 error_setg(errp
, "Invalid node name");
898 /* takes care of avoiding namespaces collisions */
899 if (blk_by_name(node_name
)) {
900 error_setg(errp
, "node-name=%s is conflicting with a device id",
905 /* takes care of avoiding duplicates node names */
906 if (bdrv_find_node(node_name
)) {
907 error_setg(errp
, "Duplicate node name");
911 /* copy node name into the bs and insert it into the graph list */
912 pstrcpy(bs
->node_name
, sizeof(bs
->node_name
), node_name
);
913 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs
, node_list
);
917 * Common part for opening disk images and files
919 * Removes all processed options from *options.
921 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
922 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
925 const char *filename
;
926 const char *node_name
= NULL
;
927 Error
*local_err
= NULL
;
930 assert(bs
->file
== NULL
);
931 assert(options
!= NULL
&& bs
->options
!= options
);
934 filename
= file
->filename
;
936 filename
= qdict_get_try_str(options
, "filename");
939 if (drv
->bdrv_needs_filename
&& !filename
) {
940 error_setg(errp
, "The '%s' block driver requires a file name",
945 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
947 node_name
= qdict_get_try_str(options
, "node-name");
948 bdrv_assign_node_name(bs
, node_name
, &local_err
);
950 error_propagate(errp
, local_err
);
953 qdict_del(options
, "node-name");
955 /* bdrv_open() with directly using a protocol as drv. This layer is already
956 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
957 * and return immediately. */
958 if (file
!= NULL
&& drv
->bdrv_file_open
) {
963 bs
->open_flags
= flags
;
964 bs
->guest_block_size
= 512;
965 bs
->request_alignment
= 512;
966 bs
->zero_beyond_eof
= true;
967 open_flags
= bdrv_open_flags(bs
, flags
);
968 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
969 bs
->growable
= !!(flags
& BDRV_O_PROTOCOL
);
971 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
973 !bs
->read_only
&& bdrv_is_whitelisted(drv
, true)
974 ? "Driver '%s' can only be used for read-only devices"
975 : "Driver '%s' is not whitelisted",
980 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
981 if (flags
& BDRV_O_COPY_ON_READ
) {
982 if (!bs
->read_only
) {
983 bdrv_enable_copy_on_read(bs
);
985 error_setg(errp
, "Can't use copy-on-read on read-only device");
990 if (filename
!= NULL
) {
991 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
993 bs
->filename
[0] = '\0';
995 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
), bs
->filename
);
998 bs
->opaque
= g_malloc0(drv
->instance_size
);
1000 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
1002 /* Open the image, either directly or using a protocol */
1003 if (drv
->bdrv_file_open
) {
1004 assert(file
== NULL
);
1005 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
1006 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
1009 error_setg(errp
, "Can't use '%s' as a block driver for the "
1010 "protocol level", drv
->format_name
);
1015 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
1020 error_propagate(errp
, local_err
);
1021 } else if (bs
->filename
[0]) {
1022 error_setg_errno(errp
, -ret
, "Could not open '%s'", bs
->filename
);
1024 error_setg_errno(errp
, -ret
, "Could not open image");
1029 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
1031 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
1035 bdrv_refresh_limits(bs
, &local_err
);
1037 error_propagate(errp
, local_err
);
1042 assert(bdrv_opt_mem_align(bs
) != 0);
1043 assert((bs
->request_alignment
!= 0) || bs
->sg
);
1054 static QDict
*parse_json_filename(const char *filename
, Error
**errp
)
1056 QObject
*options_obj
;
1060 ret
= strstart(filename
, "json:", &filename
);
1063 options_obj
= qobject_from_json(filename
);
1065 error_setg(errp
, "Could not parse the JSON options");
1069 if (qobject_type(options_obj
) != QTYPE_QDICT
) {
1070 qobject_decref(options_obj
);
1071 error_setg(errp
, "Invalid JSON object given");
1075 options
= qobject_to_qdict(options_obj
);
1076 qdict_flatten(options
);
1082 * Fills in default options for opening images and converts the legacy
1083 * filename/flags pair to option QDict entries.
1085 static int bdrv_fill_options(QDict
**options
, const char **pfilename
, int flags
,
1086 BlockDriver
*drv
, Error
**errp
)
1088 const char *filename
= *pfilename
;
1089 const char *drvname
;
1090 bool protocol
= flags
& BDRV_O_PROTOCOL
;
1091 bool parse_filename
= false;
1092 Error
*local_err
= NULL
;
1094 /* Parse json: pseudo-protocol */
1095 if (filename
&& g_str_has_prefix(filename
, "json:")) {
1096 QDict
*json_options
= parse_json_filename(filename
, &local_err
);
1098 error_propagate(errp
, local_err
);
1102 /* Options given in the filename have lower priority than options
1103 * specified directly */
1104 qdict_join(*options
, json_options
, false);
1105 QDECREF(json_options
);
1106 *pfilename
= filename
= NULL
;
1109 /* Fetch the file name from the options QDict if necessary */
1110 if (protocol
&& filename
) {
1111 if (!qdict_haskey(*options
, "filename")) {
1112 qdict_put(*options
, "filename", qstring_from_str(filename
));
1113 parse_filename
= true;
1115 error_setg(errp
, "Can't specify 'file' and 'filename' options at "
1121 /* Find the right block driver */
1122 filename
= qdict_get_try_str(*options
, "filename");
1123 drvname
= qdict_get_try_str(*options
, "driver");
1127 error_setg(errp
, "Driver specified twice");
1130 drvname
= drv
->format_name
;
1131 qdict_put(*options
, "driver", qstring_from_str(drvname
));
1133 if (!drvname
&& protocol
) {
1135 drv
= bdrv_find_protocol(filename
, parse_filename
);
1137 error_setg(errp
, "Unknown protocol");
1141 drvname
= drv
->format_name
;
1142 qdict_put(*options
, "driver", qstring_from_str(drvname
));
1144 error_setg(errp
, "Must specify either driver or file");
1147 } else if (drvname
) {
1148 drv
= bdrv_find_format(drvname
);
1150 error_setg(errp
, "Unknown driver '%s'", drvname
);
1156 assert(drv
|| !protocol
);
1158 /* Driver-specific filename parsing */
1159 if (drv
&& drv
->bdrv_parse_filename
&& parse_filename
) {
1160 drv
->bdrv_parse_filename(filename
, *options
, &local_err
);
1162 error_propagate(errp
, local_err
);
1166 if (!drv
->bdrv_needs_filename
) {
1167 qdict_del(*options
, "filename");
1174 void bdrv_set_backing_hd(BlockDriverState
*bs
, BlockDriverState
*backing_hd
)
1177 if (bs
->backing_hd
) {
1178 assert(bs
->backing_blocker
);
1179 bdrv_op_unblock_all(bs
->backing_hd
, bs
->backing_blocker
);
1180 } else if (backing_hd
) {
1181 error_setg(&bs
->backing_blocker
,
1182 "device is used as backing hd of '%s'",
1183 bdrv_get_device_name(bs
));
1186 bs
->backing_hd
= backing_hd
;
1188 error_free(bs
->backing_blocker
);
1189 bs
->backing_blocker
= NULL
;
1192 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
1193 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_hd
->filename
);
1194 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
),
1195 backing_hd
->drv
? backing_hd
->drv
->format_name
: "");
1197 bdrv_op_block_all(bs
->backing_hd
, bs
->backing_blocker
);
1198 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1199 bdrv_op_unblock(bs
->backing_hd
, BLOCK_OP_TYPE_COMMIT
,
1200 bs
->backing_blocker
);
1202 bdrv_refresh_limits(bs
, NULL
);
1206 * Opens the backing file for a BlockDriverState if not yet open
1208 * options is a QDict of options to pass to the block drivers, or NULL for an
1209 * empty set of options. The reference to the QDict is transferred to this
1210 * function (even on failure), so if the caller intends to reuse the dictionary,
1211 * it needs to use QINCREF() before calling bdrv_file_open.
1213 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
1215 char *backing_filename
= g_malloc0(PATH_MAX
);
1217 BlockDriverState
*backing_hd
;
1218 Error
*local_err
= NULL
;
1220 if (bs
->backing_hd
!= NULL
) {
1225 /* NULL means an empty set of options */
1226 if (options
== NULL
) {
1227 options
= qdict_new();
1230 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
1231 if (qdict_haskey(options
, "file.filename")) {
1232 backing_filename
[0] = '\0';
1233 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
1237 bdrv_get_full_backing_filename(bs
, backing_filename
, PATH_MAX
,
1241 error_propagate(errp
, local_err
);
1247 if (!bs
->drv
|| !bs
->drv
->supports_backing
) {
1249 error_setg(errp
, "Driver doesn't support backing files");
1254 backing_hd
= bdrv_new();
1256 if (bs
->backing_format
[0] != '\0' && !qdict_haskey(options
, "driver")) {
1257 qdict_put(options
, "driver", qstring_from_str(bs
->backing_format
));
1260 assert(bs
->backing_hd
== NULL
);
1261 ret
= bdrv_open(&backing_hd
,
1262 *backing_filename
? backing_filename
: NULL
, NULL
, options
,
1263 bdrv_backing_flags(bs
->open_flags
), NULL
, &local_err
);
1265 bdrv_unref(backing_hd
);
1267 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1268 error_setg(errp
, "Could not open backing file: %s",
1269 error_get_pretty(local_err
));
1270 error_free(local_err
);
1273 bdrv_set_backing_hd(bs
, backing_hd
);
1276 g_free(backing_filename
);
1281 * Opens a disk image whose options are given as BlockdevRef in another block
1284 * If allow_none is true, no image will be opened if filename is false and no
1285 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1287 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1288 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1289 * itself, all options starting with "${bdref_key}." are considered part of the
1292 * The BlockdevRef will be removed from the options QDict.
1294 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1296 int bdrv_open_image(BlockDriverState
**pbs
, const char *filename
,
1297 QDict
*options
, const char *bdref_key
, int flags
,
1298 bool allow_none
, Error
**errp
)
1300 QDict
*image_options
;
1302 char *bdref_key_dot
;
1303 const char *reference
;
1306 assert(*pbs
== NULL
);
1308 bdref_key_dot
= g_strdup_printf("%s.", bdref_key
);
1309 qdict_extract_subqdict(options
, &image_options
, bdref_key_dot
);
1310 g_free(bdref_key_dot
);
1312 reference
= qdict_get_try_str(options
, bdref_key
);
1313 if (!filename
&& !reference
&& !qdict_size(image_options
)) {
1317 error_setg(errp
, "A block device must be specified for \"%s\"",
1321 QDECREF(image_options
);
1325 ret
= bdrv_open(pbs
, filename
, reference
, image_options
, flags
, NULL
, errp
);
1328 qdict_del(options
, bdref_key
);
1332 int bdrv_append_temp_snapshot(BlockDriverState
*bs
, int flags
, Error
**errp
)
1334 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1335 char *tmp_filename
= g_malloc0(PATH_MAX
+ 1);
1337 QemuOpts
*opts
= NULL
;
1338 QDict
*snapshot_options
;
1339 BlockDriverState
*bs_snapshot
;
1343 /* if snapshot, we create a temporary backing file and open it
1344 instead of opening 'filename' directly */
1346 /* Get the required size from the image */
1347 total_size
= bdrv_getlength(bs
);
1348 if (total_size
< 0) {
1350 error_setg_errno(errp
, -total_size
, "Could not get image size");
1354 /* Create the temporary image */
1355 ret
= get_tmp_filename(tmp_filename
, PATH_MAX
+ 1);
1357 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1361 opts
= qemu_opts_create(bdrv_qcow2
.create_opts
, NULL
, 0,
1363 qemu_opt_set_number(opts
, BLOCK_OPT_SIZE
, total_size
);
1364 ret
= bdrv_create(&bdrv_qcow2
, tmp_filename
, opts
, &local_err
);
1365 qemu_opts_del(opts
);
1367 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1368 "'%s': %s", tmp_filename
,
1369 error_get_pretty(local_err
));
1370 error_free(local_err
);
1374 /* Prepare a new options QDict for the temporary file */
1375 snapshot_options
= qdict_new();
1376 qdict_put(snapshot_options
, "file.driver",
1377 qstring_from_str("file"));
1378 qdict_put(snapshot_options
, "file.filename",
1379 qstring_from_str(tmp_filename
));
1381 bs_snapshot
= bdrv_new();
1383 ret
= bdrv_open(&bs_snapshot
, NULL
, NULL
, snapshot_options
,
1384 flags
, &bdrv_qcow2
, &local_err
);
1386 error_propagate(errp
, local_err
);
1390 bdrv_append(bs_snapshot
, bs
);
1393 g_free(tmp_filename
);
1398 * Opens a disk image (raw, qcow2, vmdk, ...)
1400 * options is a QDict of options to pass to the block drivers, or NULL for an
1401 * empty set of options. The reference to the QDict belongs to the block layer
1402 * after the call (even on failure), so if the caller intends to reuse the
1403 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1405 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1406 * If it is not NULL, the referenced BDS will be reused.
1408 * The reference parameter may be used to specify an existing block device which
1409 * should be opened. If specified, neither options nor a filename may be given,
1410 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1412 int bdrv_open(BlockDriverState
**pbs
, const char *filename
,
1413 const char *reference
, QDict
*options
, int flags
,
1414 BlockDriver
*drv
, Error
**errp
)
1417 BlockDriverState
*file
= NULL
, *bs
;
1418 const char *drvname
;
1419 Error
*local_err
= NULL
;
1420 int snapshot_flags
= 0;
1425 bool options_non_empty
= options
? qdict_size(options
) : false;
1429 error_setg(errp
, "Cannot reuse an existing BDS when referencing "
1430 "another block device");
1434 if (filename
|| options_non_empty
) {
1435 error_setg(errp
, "Cannot reference an existing block device with "
1436 "additional options or a new filename");
1440 bs
= bdrv_lookup_bs(reference
, reference
, errp
);
1455 /* NULL means an empty set of options */
1456 if (options
== NULL
) {
1457 options
= qdict_new();
1460 ret
= bdrv_fill_options(&options
, &filename
, flags
, drv
, &local_err
);
1465 /* Find the right image format driver */
1467 drvname
= qdict_get_try_str(options
, "driver");
1469 drv
= bdrv_find_format(drvname
);
1470 qdict_del(options
, "driver");
1472 error_setg(errp
, "Unknown driver: '%s'", drvname
);
1478 assert(drvname
|| !(flags
& BDRV_O_PROTOCOL
));
1479 if (drv
&& !drv
->bdrv_file_open
) {
1480 /* If the user explicitly wants a format driver here, we'll need to add
1481 * another layer for the protocol in bs->file */
1482 flags
&= ~BDRV_O_PROTOCOL
;
1485 bs
->options
= options
;
1486 options
= qdict_clone_shallow(options
);
1488 /* Open image file without format layer */
1489 if ((flags
& BDRV_O_PROTOCOL
) == 0) {
1490 if (flags
& BDRV_O_RDWR
) {
1491 flags
|= BDRV_O_ALLOW_RDWR
;
1493 if (flags
& BDRV_O_SNAPSHOT
) {
1494 snapshot_flags
= bdrv_temp_snapshot_flags(flags
);
1495 flags
= bdrv_backing_flags(flags
);
1498 assert(file
== NULL
);
1499 ret
= bdrv_open_image(&file
, filename
, options
, "file",
1500 bdrv_inherited_flags(flags
),
1507 /* Image format probing */
1510 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1515 error_setg(errp
, "Must specify either driver or file");
1520 /* Open the image */
1521 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1526 if (file
&& (bs
->file
!= file
)) {
1531 /* If there is a backing file, use it */
1532 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1533 QDict
*backing_options
;
1535 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1536 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1538 goto close_and_fail
;
1542 bdrv_refresh_filename(bs
);
1544 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1545 * temporary snapshot afterwards. */
1546 if (snapshot_flags
) {
1547 ret
= bdrv_append_temp_snapshot(bs
, snapshot_flags
, &local_err
);
1549 goto close_and_fail
;
1553 /* Check if any unknown options were used */
1554 if (options
&& (qdict_size(options
) != 0)) {
1555 const QDictEntry
*entry
= qdict_first(options
);
1556 if (flags
& BDRV_O_PROTOCOL
) {
1557 error_setg(errp
, "Block protocol '%s' doesn't support the option "
1558 "'%s'", drv
->format_name
, entry
->key
);
1560 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1561 "support the option '%s'", drv
->format_name
,
1562 bdrv_get_device_name(bs
), entry
->key
);
1566 goto close_and_fail
;
1569 if (!bdrv_key_required(bs
)) {
1571 blk_dev_change_media_cb(bs
->blk
, true);
1573 } else if (!runstate_check(RUN_STATE_PRELAUNCH
)
1574 && !runstate_check(RUN_STATE_INMIGRATE
)
1575 && !runstate_check(RUN_STATE_PAUSED
)) { /* HACK */
1577 "Guest must be stopped for opening of encrypted image");
1579 goto close_and_fail
;
1590 QDECREF(bs
->options
);
1594 /* If *pbs is NULL, a new BDS has been created in this function and
1595 needs to be freed now. Otherwise, it does not need to be closed,
1596 since it has not really been opened yet. */
1600 error_propagate(errp
, local_err
);
1605 /* See fail path, but now the BDS has to be always closed */
1613 error_propagate(errp
, local_err
);
1618 typedef struct BlockReopenQueueEntry
{
1620 BDRVReopenState state
;
1621 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1622 } BlockReopenQueueEntry
;
1625 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1626 * reopen of multiple devices.
1628 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1629 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1630 * be created and initialized. This newly created BlockReopenQueue should be
1631 * passed back in for subsequent calls that are intended to be of the same
1634 * bs is the BlockDriverState to add to the reopen queue.
1636 * flags contains the open flags for the associated bs
1638 * returns a pointer to bs_queue, which is either the newly allocated
1639 * bs_queue, or the existing bs_queue being used.
1642 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1643 BlockDriverState
*bs
, int flags
)
1647 BlockReopenQueueEntry
*bs_entry
;
1648 if (bs_queue
== NULL
) {
1649 bs_queue
= g_new0(BlockReopenQueue
, 1);
1650 QSIMPLEQ_INIT(bs_queue
);
1653 /* bdrv_open() masks this flag out */
1654 flags
&= ~BDRV_O_PROTOCOL
;
1657 bdrv_reopen_queue(bs_queue
, bs
->file
, bdrv_inherited_flags(flags
));
1660 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1661 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1663 bs_entry
->state
.bs
= bs
;
1664 bs_entry
->state
.flags
= flags
;
1670 * Reopen multiple BlockDriverStates atomically & transactionally.
1672 * The queue passed in (bs_queue) must have been built up previous
1673 * via bdrv_reopen_queue().
1675 * Reopens all BDS specified in the queue, with the appropriate
1676 * flags. All devices are prepared for reopen, and failure of any
1677 * device will cause all device changes to be abandonded, and intermediate
1680 * If all devices prepare successfully, then the changes are committed
1684 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1687 BlockReopenQueueEntry
*bs_entry
, *next
;
1688 Error
*local_err
= NULL
;
1690 assert(bs_queue
!= NULL
);
1694 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1695 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1696 error_propagate(errp
, local_err
);
1699 bs_entry
->prepared
= true;
1702 /* If we reach this point, we have success and just need to apply the
1705 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1706 bdrv_reopen_commit(&bs_entry
->state
);
1712 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1713 if (ret
&& bs_entry
->prepared
) {
1714 bdrv_reopen_abort(&bs_entry
->state
);
1723 /* Reopen a single BlockDriverState with the specified flags. */
1724 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1727 Error
*local_err
= NULL
;
1728 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1730 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1731 if (local_err
!= NULL
) {
1732 error_propagate(errp
, local_err
);
1739 * Prepares a BlockDriverState for reopen. All changes are staged in the
1740 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1741 * the block driver layer .bdrv_reopen_prepare()
1743 * bs is the BlockDriverState to reopen
1744 * flags are the new open flags
1745 * queue is the reopen queue
1747 * Returns 0 on success, non-zero on error. On error errp will be set
1750 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1751 * It is the responsibility of the caller to then call the abort() or
1752 * commit() for any other BDS that have been left in a prepare() state
1755 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1759 Error
*local_err
= NULL
;
1762 assert(reopen_state
!= NULL
);
1763 assert(reopen_state
->bs
->drv
!= NULL
);
1764 drv
= reopen_state
->bs
->drv
;
1766 /* if we are to stay read-only, do not allow permission change
1768 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1769 reopen_state
->flags
& BDRV_O_RDWR
) {
1770 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1771 bdrv_get_device_name(reopen_state
->bs
));
1776 ret
= bdrv_flush(reopen_state
->bs
);
1778 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1783 if (drv
->bdrv_reopen_prepare
) {
1784 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1786 if (local_err
!= NULL
) {
1787 error_propagate(errp
, local_err
);
1789 error_setg(errp
, "failed while preparing to reopen image '%s'",
1790 reopen_state
->bs
->filename
);
1795 /* It is currently mandatory to have a bdrv_reopen_prepare()
1796 * handler for each supported drv. */
1797 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1798 drv
->format_name
, bdrv_get_device_name(reopen_state
->bs
),
1799 "reopening of file");
1811 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1812 * makes them final by swapping the staging BlockDriverState contents into
1813 * the active BlockDriverState contents.
1815 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1819 assert(reopen_state
!= NULL
);
1820 drv
= reopen_state
->bs
->drv
;
1821 assert(drv
!= NULL
);
1823 /* If there are any driver level actions to take */
1824 if (drv
->bdrv_reopen_commit
) {
1825 drv
->bdrv_reopen_commit(reopen_state
);
1828 /* set BDS specific flags now */
1829 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1830 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1832 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1834 bdrv_refresh_limits(reopen_state
->bs
, NULL
);
1838 * Abort the reopen, and delete and free the staged changes in
1841 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1845 assert(reopen_state
!= NULL
);
1846 drv
= reopen_state
->bs
->drv
;
1847 assert(drv
!= NULL
);
1849 if (drv
->bdrv_reopen_abort
) {
1850 drv
->bdrv_reopen_abort(reopen_state
);
1855 void bdrv_close(BlockDriverState
*bs
)
1857 BdrvAioNotifier
*ban
, *ban_next
;
1860 block_job_cancel_sync(bs
->job
);
1862 bdrv_drain_all(); /* complete I/O */
1864 bdrv_drain_all(); /* in case flush left pending I/O */
1865 notifier_list_notify(&bs
->close_notifiers
, bs
);
1868 if (bs
->backing_hd
) {
1869 BlockDriverState
*backing_hd
= bs
->backing_hd
;
1870 bdrv_set_backing_hd(bs
, NULL
);
1871 bdrv_unref(backing_hd
);
1873 bs
->drv
->bdrv_close(bs
);
1877 bs
->copy_on_read
= 0;
1878 bs
->backing_file
[0] = '\0';
1879 bs
->backing_format
[0] = '\0';
1880 bs
->total_sectors
= 0;
1885 bs
->zero_beyond_eof
= false;
1886 QDECREF(bs
->options
);
1888 QDECREF(bs
->full_open_options
);
1889 bs
->full_open_options
= NULL
;
1891 if (bs
->file
!= NULL
) {
1892 bdrv_unref(bs
->file
);
1898 blk_dev_change_media_cb(bs
->blk
, false);
1901 /*throttling disk I/O limits*/
1902 if (bs
->io_limits_enabled
) {
1903 bdrv_io_limits_disable(bs
);
1906 QLIST_FOREACH_SAFE(ban
, &bs
->aio_notifiers
, list
, ban_next
) {
1909 QLIST_INIT(&bs
->aio_notifiers
);
1912 void bdrv_close_all(void)
1914 BlockDriverState
*bs
;
1916 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1917 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1919 aio_context_acquire(aio_context
);
1921 aio_context_release(aio_context
);
1925 /* Check if any requests are in-flight (including throttled requests) */
1926 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1928 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1931 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1934 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1937 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1940 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1946 static bool bdrv_drain_one(BlockDriverState
*bs
)
1950 bdrv_flush_io_queue(bs
);
1951 bdrv_start_throttled_reqs(bs
);
1952 bs_busy
= bdrv_requests_pending(bs
);
1953 bs_busy
|= aio_poll(bdrv_get_aio_context(bs
), bs_busy
);
1958 * Wait for pending requests to complete on a single BlockDriverState subtree
1960 * See the warning in bdrv_drain_all(). This function can only be called if
1961 * you are sure nothing can generate I/O because you have op blockers
1964 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
1967 void bdrv_drain(BlockDriverState
*bs
)
1969 while (bdrv_drain_one(bs
)) {
1970 /* Keep iterating */
1975 * Wait for pending requests to complete across all BlockDriverStates
1977 * This function does not flush data to disk, use bdrv_flush_all() for that
1978 * after calling this function.
1980 * Note that completion of an asynchronous I/O operation can trigger any
1981 * number of other I/O operations on other devices---for example a coroutine
1982 * can be arbitrarily complex and a constant flow of I/O can come until the
1983 * coroutine is complete. Because of this, it is not possible to have a
1984 * function to drain a single device's I/O queue.
1986 void bdrv_drain_all(void)
1988 /* Always run first iteration so any pending completion BHs run */
1990 BlockDriverState
*bs
;
1995 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1996 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
1998 aio_context_acquire(aio_context
);
1999 busy
|= bdrv_drain_one(bs
);
2000 aio_context_release(aio_context
);
2005 /* make a BlockDriverState anonymous by removing from bdrv_state and
2006 * graph_bdrv_state list.
2007 Also, NULL terminate the device_name to prevent double remove */
2008 void bdrv_make_anon(BlockDriverState
*bs
)
2011 * Take care to remove bs from bdrv_states only when it's actually
2012 * in it. Note that bs->device_list.tqe_prev is initially null,
2013 * and gets set to non-null by QTAILQ_INSERT_TAIL(). Establish
2014 * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by
2015 * resetting it to null on remove.
2017 if (bs
->device_list
.tqe_prev
) {
2018 QTAILQ_REMOVE(&bdrv_states
, bs
, device_list
);
2019 bs
->device_list
.tqe_prev
= NULL
;
2021 if (bs
->node_name
[0] != '\0') {
2022 QTAILQ_REMOVE(&graph_bdrv_states
, bs
, node_list
);
2024 bs
->node_name
[0] = '\0';
2027 static void bdrv_rebind(BlockDriverState
*bs
)
2029 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
2030 bs
->drv
->bdrv_rebind(bs
);
2034 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
2035 BlockDriverState
*bs_src
)
2037 /* move some fields that need to stay attached to the device */
2040 bs_dest
->guest_block_size
= bs_src
->guest_block_size
;
2041 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
2043 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
2045 /* i/o throttled req */
2046 memcpy(&bs_dest
->throttle_state
,
2047 &bs_src
->throttle_state
,
2048 sizeof(ThrottleState
));
2049 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
2050 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
2051 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
2054 bs_dest
->on_read_error
= bs_src
->on_read_error
;
2055 bs_dest
->on_write_error
= bs_src
->on_write_error
;
2058 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
2059 bs_dest
->iostatus
= bs_src
->iostatus
;
2062 bs_dest
->dirty_bitmaps
= bs_src
->dirty_bitmaps
;
2064 /* reference count */
2065 bs_dest
->refcnt
= bs_src
->refcnt
;
2068 bs_dest
->job
= bs_src
->job
;
2070 /* keep the same entry in bdrv_states */
2071 bs_dest
->device_list
= bs_src
->device_list
;
2072 bs_dest
->blk
= bs_src
->blk
;
2074 memcpy(bs_dest
->op_blockers
, bs_src
->op_blockers
,
2075 sizeof(bs_dest
->op_blockers
));
2079 * Swap bs contents for two image chains while they are live,
2080 * while keeping required fields on the BlockDriverState that is
2081 * actually attached to a device.
2083 * This will modify the BlockDriverState fields, and swap contents
2084 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2086 * bs_new must not be attached to a BlockBackend.
2088 * This function does not create any image files.
2090 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
2092 BlockDriverState tmp
;
2094 /* The code needs to swap the node_name but simply swapping node_list won't
2095 * work so first remove the nodes from the graph list, do the swap then
2096 * insert them back if needed.
2098 if (bs_new
->node_name
[0] != '\0') {
2099 QTAILQ_REMOVE(&graph_bdrv_states
, bs_new
, node_list
);
2101 if (bs_old
->node_name
[0] != '\0') {
2102 QTAILQ_REMOVE(&graph_bdrv_states
, bs_old
, node_list
);
2105 /* bs_new must be unattached and shouldn't have anything fancy enabled */
2106 assert(!bs_new
->blk
);
2107 assert(QLIST_EMPTY(&bs_new
->dirty_bitmaps
));
2108 assert(bs_new
->job
== NULL
);
2109 assert(bs_new
->io_limits_enabled
== false);
2110 assert(!throttle_have_timer(&bs_new
->throttle_state
));
2116 /* there are some fields that should not be swapped, move them back */
2117 bdrv_move_feature_fields(&tmp
, bs_old
);
2118 bdrv_move_feature_fields(bs_old
, bs_new
);
2119 bdrv_move_feature_fields(bs_new
, &tmp
);
2121 /* bs_new must remain unattached */
2122 assert(!bs_new
->blk
);
2124 /* Check a few fields that should remain attached to the device */
2125 assert(bs_new
->job
== NULL
);
2126 assert(bs_new
->io_limits_enabled
== false);
2127 assert(!throttle_have_timer(&bs_new
->throttle_state
));
2129 /* insert the nodes back into the graph node list if needed */
2130 if (bs_new
->node_name
[0] != '\0') {
2131 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs_new
, node_list
);
2133 if (bs_old
->node_name
[0] != '\0') {
2134 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs_old
, node_list
);
2137 bdrv_rebind(bs_new
);
2138 bdrv_rebind(bs_old
);
2142 * Add new bs contents at the top of an image chain while the chain is
2143 * live, while keeping required fields on the top layer.
2145 * This will modify the BlockDriverState fields, and swap contents
2146 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2148 * bs_new must not be attached to a BlockBackend.
2150 * This function does not create any image files.
2152 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
2154 bdrv_swap(bs_new
, bs_top
);
2156 /* The contents of 'tmp' will become bs_top, as we are
2157 * swapping bs_new and bs_top contents. */
2158 bdrv_set_backing_hd(bs_top
, bs_new
);
2161 static void bdrv_delete(BlockDriverState
*bs
)
2164 assert(bdrv_op_blocker_is_empty(bs
));
2165 assert(!bs
->refcnt
);
2166 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
2170 /* remove from list, if necessary */
2177 * Run consistency checks on an image
2179 * Returns 0 if the check could be completed (it doesn't mean that the image is
2180 * free of errors) or -errno when an internal error occurred. The results of the
2181 * check are stored in res.
2183 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
2185 if (bs
->drv
== NULL
) {
2188 if (bs
->drv
->bdrv_check
== NULL
) {
2192 memset(res
, 0, sizeof(*res
));
2193 return bs
->drv
->bdrv_check(bs
, res
, fix
);
2196 #define COMMIT_BUF_SECTORS 2048
2198 /* commit COW file into the raw image */
2199 int bdrv_commit(BlockDriverState
*bs
)
2201 BlockDriver
*drv
= bs
->drv
;
2202 int64_t sector
, total_sectors
, length
, backing_length
;
2203 int n
, ro
, open_flags
;
2205 uint8_t *buf
= NULL
;
2206 char filename
[PATH_MAX
];
2211 if (!bs
->backing_hd
) {
2215 if (bdrv_op_is_blocked(bs
, BLOCK_OP_TYPE_COMMIT
, NULL
) ||
2216 bdrv_op_is_blocked(bs
->backing_hd
, BLOCK_OP_TYPE_COMMIT
, NULL
)) {
2220 ro
= bs
->backing_hd
->read_only
;
2221 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2222 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
2223 open_flags
= bs
->backing_hd
->open_flags
;
2226 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
2231 length
= bdrv_getlength(bs
);
2237 backing_length
= bdrv_getlength(bs
->backing_hd
);
2238 if (backing_length
< 0) {
2239 ret
= backing_length
;
2243 /* If our top snapshot is larger than the backing file image,
2244 * grow the backing file image if possible. If not possible,
2245 * we must return an error */
2246 if (length
> backing_length
) {
2247 ret
= bdrv_truncate(bs
->backing_hd
, length
);
2253 total_sectors
= length
>> BDRV_SECTOR_BITS
;
2255 /* qemu_try_blockalign() for bs will choose an alignment that works for
2256 * bs->backing_hd as well, so no need to compare the alignment manually. */
2257 buf
= qemu_try_blockalign(bs
, COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
2263 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
2264 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
2269 ret
= bdrv_read(bs
, sector
, buf
, n
);
2274 ret
= bdrv_write(bs
->backing_hd
, sector
, buf
, n
);
2281 if (drv
->bdrv_make_empty
) {
2282 ret
= drv
->bdrv_make_empty(bs
);
2290 * Make sure all data we wrote to the backing device is actually
2293 if (bs
->backing_hd
) {
2294 bdrv_flush(bs
->backing_hd
);
2302 /* ignoring error return here */
2303 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
2309 int bdrv_commit_all(void)
2311 BlockDriverState
*bs
;
2313 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
2314 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2316 aio_context_acquire(aio_context
);
2317 if (bs
->drv
&& bs
->backing_hd
) {
2318 int ret
= bdrv_commit(bs
);
2320 aio_context_release(aio_context
);
2324 aio_context_release(aio_context
);
2330 * Remove an active request from the tracked requests list
2332 * This function should be called when a tracked request is completing.
2334 static void tracked_request_end(BdrvTrackedRequest
*req
)
2336 if (req
->serialising
) {
2337 req
->bs
->serialising_in_flight
--;
2340 QLIST_REMOVE(req
, list
);
2341 qemu_co_queue_restart_all(&req
->wait_queue
);
2345 * Add an active request to the tracked requests list
2347 static void tracked_request_begin(BdrvTrackedRequest
*req
,
2348 BlockDriverState
*bs
,
2350 unsigned int bytes
, bool is_write
)
2352 *req
= (BdrvTrackedRequest
){
2356 .is_write
= is_write
,
2357 .co
= qemu_coroutine_self(),
2358 .serialising
= false,
2359 .overlap_offset
= offset
,
2360 .overlap_bytes
= bytes
,
2363 qemu_co_queue_init(&req
->wait_queue
);
2365 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
2368 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
2370 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
2371 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
2374 if (!req
->serialising
) {
2375 req
->bs
->serialising_in_flight
++;
2376 req
->serialising
= true;
2379 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
2380 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
2384 * Round a region to cluster boundaries
2386 void bdrv_round_to_clusters(BlockDriverState
*bs
,
2387 int64_t sector_num
, int nb_sectors
,
2388 int64_t *cluster_sector_num
,
2389 int *cluster_nb_sectors
)
2391 BlockDriverInfo bdi
;
2393 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
2394 *cluster_sector_num
= sector_num
;
2395 *cluster_nb_sectors
= nb_sectors
;
2397 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2398 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2399 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2404 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
2406 BlockDriverInfo bdi
;
2409 ret
= bdrv_get_info(bs
, &bdi
);
2410 if (ret
< 0 || bdi
.cluster_size
== 0) {
2411 return bs
->request_alignment
;
2413 return bdi
.cluster_size
;
2417 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2418 int64_t offset
, unsigned int bytes
)
2421 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
2425 if (req
->overlap_offset
>= offset
+ bytes
) {
2431 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
2433 BlockDriverState
*bs
= self
->bs
;
2434 BdrvTrackedRequest
*req
;
2436 bool waited
= false;
2438 if (!bs
->serialising_in_flight
) {
2444 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2445 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
2448 if (tracked_request_overlaps(req
, self
->overlap_offset
,
2449 self
->overlap_bytes
))
2451 /* Hitting this means there was a reentrant request, for
2452 * example, a block driver issuing nested requests. This must
2453 * never happen since it means deadlock.
2455 assert(qemu_coroutine_self() != req
->co
);
2457 /* If the request is already (indirectly) waiting for us, or
2458 * will wait for us as soon as it wakes up, then just go on
2459 * (instead of producing a deadlock in the former case). */
2460 if (!req
->waiting_for
) {
2461 self
->waiting_for
= req
;
2462 qemu_co_queue_wait(&req
->wait_queue
);
2463 self
->waiting_for
= NULL
;
2478 * -EINVAL - backing format specified, but no file
2479 * -ENOSPC - can't update the backing file because no space is left in the
2481 * -ENOTSUP - format driver doesn't support changing the backing file
2483 int bdrv_change_backing_file(BlockDriverState
*bs
,
2484 const char *backing_file
, const char *backing_fmt
)
2486 BlockDriver
*drv
= bs
->drv
;
2489 /* Backing file format doesn't make sense without a backing file */
2490 if (backing_fmt
&& !backing_file
) {
2494 if (drv
->bdrv_change_backing_file
!= NULL
) {
2495 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2501 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2502 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2508 * Finds the image layer in the chain that has 'bs' as its backing file.
2510 * active is the current topmost image.
2512 * Returns NULL if bs is not found in active's image chain,
2513 * or if active == bs.
2515 * Returns the bottommost base image if bs == NULL.
2517 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2518 BlockDriverState
*bs
)
2520 while (active
&& bs
!= active
->backing_hd
) {
2521 active
= active
->backing_hd
;
2527 /* Given a BDS, searches for the base layer. */
2528 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
2530 return bdrv_find_overlay(bs
, NULL
);
2533 typedef struct BlkIntermediateStates
{
2534 BlockDriverState
*bs
;
2535 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2536 } BlkIntermediateStates
;
2540 * Drops images above 'base' up to and including 'top', and sets the image
2541 * above 'top' to have base as its backing file.
2543 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2544 * information in 'bs' can be properly updated.
2546 * E.g., this will convert the following chain:
2547 * bottom <- base <- intermediate <- top <- active
2551 * bottom <- base <- active
2553 * It is allowed for bottom==base, in which case it converts:
2555 * base <- intermediate <- top <- active
2561 * If backing_file_str is non-NULL, it will be used when modifying top's
2562 * overlay image metadata.
2565 * if active == top, that is considered an error
2568 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2569 BlockDriverState
*base
, const char *backing_file_str
)
2571 BlockDriverState
*intermediate
;
2572 BlockDriverState
*base_bs
= NULL
;
2573 BlockDriverState
*new_top_bs
= NULL
;
2574 BlkIntermediateStates
*intermediate_state
, *next
;
2577 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2578 QSIMPLEQ_INIT(&states_to_delete
);
2580 if (!top
->drv
|| !base
->drv
) {
2584 new_top_bs
= bdrv_find_overlay(active
, top
);
2586 if (new_top_bs
== NULL
) {
2587 /* we could not find the image above 'top', this is an error */
2591 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2592 * to do, no intermediate images */
2593 if (new_top_bs
->backing_hd
== base
) {
2600 /* now we will go down through the list, and add each BDS we find
2601 * into our deletion queue, until we hit the 'base'
2603 while (intermediate
) {
2604 intermediate_state
= g_new0(BlkIntermediateStates
, 1);
2605 intermediate_state
->bs
= intermediate
;
2606 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2608 if (intermediate
->backing_hd
== base
) {
2609 base_bs
= intermediate
->backing_hd
;
2612 intermediate
= intermediate
->backing_hd
;
2614 if (base_bs
== NULL
) {
2615 /* something went wrong, we did not end at the base. safely
2616 * unravel everything, and exit with error */
2620 /* success - we can delete the intermediate states, and link top->base */
2621 backing_file_str
= backing_file_str
? backing_file_str
: base_bs
->filename
;
2622 ret
= bdrv_change_backing_file(new_top_bs
, backing_file_str
,
2623 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2627 bdrv_set_backing_hd(new_top_bs
, base_bs
);
2629 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2630 /* so that bdrv_close() does not recursively close the chain */
2631 bdrv_set_backing_hd(intermediate_state
->bs
, NULL
);
2632 bdrv_unref(intermediate_state
->bs
);
2637 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2638 g_free(intermediate_state
);
2644 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2649 if (size
> INT_MAX
) {
2653 if (!bdrv_is_inserted(bs
))
2659 len
= bdrv_getlength(bs
);
2664 if ((offset
> len
) || (len
- offset
< size
))
2670 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2673 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
2677 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2678 nb_sectors
* BDRV_SECTOR_SIZE
);
2681 typedef struct RwCo
{
2682 BlockDriverState
*bs
;
2687 BdrvRequestFlags flags
;
2690 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2692 RwCo
*rwco
= opaque
;
2694 if (!rwco
->is_write
) {
2695 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
2696 rwco
->qiov
->size
, rwco
->qiov
,
2699 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
2700 rwco
->qiov
->size
, rwco
->qiov
,
2706 * Process a vectored synchronous request using coroutines
2708 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
2709 QEMUIOVector
*qiov
, bool is_write
,
2710 BdrvRequestFlags flags
)
2717 .is_write
= is_write
,
2723 * In sync call context, when the vcpu is blocked, this throttling timer
2724 * will not fire; so the I/O throttling function has to be disabled here
2725 * if it has been enabled.
2727 if (bs
->io_limits_enabled
) {
2728 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2729 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2730 bdrv_io_limits_disable(bs
);
2733 if (qemu_in_coroutine()) {
2734 /* Fast-path if already in coroutine context */
2735 bdrv_rw_co_entry(&rwco
);
2737 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
2739 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2740 qemu_coroutine_enter(co
, &rwco
);
2741 while (rwco
.ret
== NOT_DONE
) {
2742 aio_poll(aio_context
, true);
2749 * Process a synchronous request using coroutines
2751 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2752 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2755 struct iovec iov
= {
2756 .iov_base
= (void *)buf
,
2757 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2760 if (nb_sectors
< 0 || nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
2764 qemu_iovec_init_external(&qiov
, &iov
, 1);
2765 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
2766 &qiov
, is_write
, flags
);
2769 /* return < 0 if error. See bdrv_write() for the return codes */
2770 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2771 uint8_t *buf
, int nb_sectors
)
2773 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2776 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2777 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2778 uint8_t *buf
, int nb_sectors
)
2783 enabled
= bs
->io_limits_enabled
;
2784 bs
->io_limits_enabled
= false;
2785 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2786 bs
->io_limits_enabled
= enabled
;
2790 /* Return < 0 if error. Important errors are:
2791 -EIO generic I/O error (may happen for all errors)
2792 -ENOMEDIUM No media inserted.
2793 -EINVAL Invalid sector number or nb_sectors
2794 -EACCES Trying to write a read-only device
2796 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2797 const uint8_t *buf
, int nb_sectors
)
2799 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2802 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
2803 int nb_sectors
, BdrvRequestFlags flags
)
2805 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2806 BDRV_REQ_ZERO_WRITE
| flags
);
2810 * Completely zero out a block device with the help of bdrv_write_zeroes.
2811 * The operation is sped up by checking the block status and only writing
2812 * zeroes to the device if they currently do not return zeroes. Optional
2813 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2815 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2817 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
2819 int64_t target_sectors
, ret
, nb_sectors
, sector_num
= 0;
2822 target_sectors
= bdrv_nb_sectors(bs
);
2823 if (target_sectors
< 0) {
2824 return target_sectors
;
2828 nb_sectors
= target_sectors
- sector_num
;
2829 if (nb_sectors
<= 0) {
2832 if (nb_sectors
> INT_MAX
/ BDRV_SECTOR_SIZE
) {
2833 nb_sectors
= INT_MAX
/ BDRV_SECTOR_SIZE
;
2835 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
2837 error_report("error getting block status at sector %" PRId64
": %s",
2838 sector_num
, strerror(-ret
));
2841 if (ret
& BDRV_BLOCK_ZERO
) {
2845 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
2847 error_report("error writing zeroes at sector %" PRId64
": %s",
2848 sector_num
, strerror(-ret
));
2855 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
2858 struct iovec iov
= {
2859 .iov_base
= (void *)buf
,
2868 qemu_iovec_init_external(&qiov
, &iov
, 1);
2869 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
2877 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2881 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
2889 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2890 const void *buf
, int bytes
)
2893 struct iovec iov
= {
2894 .iov_base
= (void *) buf
,
2902 qemu_iovec_init_external(&qiov
, &iov
, 1);
2903 return bdrv_pwritev(bs
, offset
, &qiov
);
2907 * Writes to the file and ensures that no writes are reordered across this
2908 * request (acts as a barrier)
2910 * Returns 0 on success, -errno in error cases.
2912 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2913 const void *buf
, int count
)
2917 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2922 /* No flush needed for cache modes that already do it */
2923 if (bs
->enable_write_cache
) {
2930 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2931 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2933 /* Perform I/O through a temporary buffer so that users who scribble over
2934 * their read buffer while the operation is in progress do not end up
2935 * modifying the image file. This is critical for zero-copy guest I/O
2936 * where anything might happen inside guest memory.
2938 void *bounce_buffer
;
2940 BlockDriver
*drv
= bs
->drv
;
2942 QEMUIOVector bounce_qiov
;
2943 int64_t cluster_sector_num
;
2944 int cluster_nb_sectors
;
2948 /* Cover entire cluster so no additional backing file I/O is required when
2949 * allocating cluster in the image file.
2951 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2952 &cluster_sector_num
, &cluster_nb_sectors
);
2954 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2955 cluster_sector_num
, cluster_nb_sectors
);
2957 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2958 iov
.iov_base
= bounce_buffer
= qemu_try_blockalign(bs
, iov
.iov_len
);
2959 if (bounce_buffer
== NULL
) {
2964 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2966 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2972 if (drv
->bdrv_co_write_zeroes
&&
2973 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2974 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2975 cluster_nb_sectors
, 0);
2977 /* This does not change the data on the disk, it is not necessary
2978 * to flush even in cache=writethrough mode.
2980 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2985 /* It might be okay to ignore write errors for guest requests. If this
2986 * is a deliberate copy-on-read then we don't want to ignore the error.
2987 * Simply report it in all cases.
2992 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2993 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2994 nb_sectors
* BDRV_SECTOR_SIZE
);
2997 qemu_vfree(bounce_buffer
);
3002 * Forwards an already correctly aligned request to the BlockDriver. This
3003 * handles copy on read and zeroing after EOF; any other features must be
3004 * implemented by the caller.
3006 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
3007 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
3008 int64_t align
, QEMUIOVector
*qiov
, int flags
)
3010 BlockDriver
*drv
= bs
->drv
;
3013 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
3014 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
3016 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3017 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3018 assert(!qiov
|| bytes
== qiov
->size
);
3020 /* Handle Copy on Read and associated serialisation */
3021 if (flags
& BDRV_REQ_COPY_ON_READ
) {
3022 /* If we touch the same cluster it counts as an overlap. This
3023 * guarantees that allocating writes will be serialized and not race
3024 * with each other for the same cluster. For example, in copy-on-read
3025 * it ensures that the CoR read and write operations are atomic and
3026 * guest writes cannot interleave between them. */
3027 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
3030 wait_serialising_requests(req
);
3032 if (flags
& BDRV_REQ_COPY_ON_READ
) {
3035 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
3040 if (!ret
|| pnum
!= nb_sectors
) {
3041 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
3046 /* Forward the request to the BlockDriver */
3047 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
3048 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
3050 /* Read zeros after EOF of growable BDSes */
3051 int64_t total_sectors
, max_nb_sectors
;
3053 total_sectors
= bdrv_nb_sectors(bs
);
3054 if (total_sectors
< 0) {
3055 ret
= total_sectors
;
3059 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
3060 align
>> BDRV_SECTOR_BITS
);
3061 if (nb_sectors
< max_nb_sectors
) {
3062 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
3063 } else if (max_nb_sectors
> 0) {
3064 QEMUIOVector local_qiov
;
3066 qemu_iovec_init(&local_qiov
, qiov
->niov
);
3067 qemu_iovec_concat(&local_qiov
, qiov
, 0,
3068 max_nb_sectors
* BDRV_SECTOR_SIZE
);
3070 ret
= drv
->bdrv_co_readv(bs
, sector_num
, max_nb_sectors
,
3073 qemu_iovec_destroy(&local_qiov
);
3078 /* Reading beyond end of file is supposed to produce zeroes */
3079 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
3080 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
3081 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
3083 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
3092 * Handle a read request in coroutine context
3094 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
3095 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
3096 BdrvRequestFlags flags
)
3098 BlockDriver
*drv
= bs
->drv
;
3099 BdrvTrackedRequest req
;
3101 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3102 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
3103 uint8_t *head_buf
= NULL
;
3104 uint8_t *tail_buf
= NULL
;
3105 QEMUIOVector local_qiov
;
3106 bool use_local_qiov
= false;
3112 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3116 if (bs
->copy_on_read
) {
3117 flags
|= BDRV_REQ_COPY_ON_READ
;
3120 /* throttling disk I/O */
3121 if (bs
->io_limits_enabled
) {
3122 bdrv_io_limits_intercept(bs
, bytes
, false);
3125 /* Align read if necessary by padding qiov */
3126 if (offset
& (align
- 1)) {
3127 head_buf
= qemu_blockalign(bs
, align
);
3128 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3129 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3130 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3131 use_local_qiov
= true;
3133 bytes
+= offset
& (align
- 1);
3134 offset
= offset
& ~(align
- 1);
3137 if ((offset
+ bytes
) & (align
- 1)) {
3138 if (!use_local_qiov
) {
3139 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3140 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3141 use_local_qiov
= true;
3143 tail_buf
= qemu_blockalign(bs
, align
);
3144 qemu_iovec_add(&local_qiov
, tail_buf
,
3145 align
- ((offset
+ bytes
) & (align
- 1)));
3147 bytes
= ROUND_UP(bytes
, align
);
3150 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
3151 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
3152 use_local_qiov
? &local_qiov
: qiov
,
3154 tracked_request_end(&req
);
3156 if (use_local_qiov
) {
3157 qemu_iovec_destroy(&local_qiov
);
3158 qemu_vfree(head_buf
);
3159 qemu_vfree(tail_buf
);
3165 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
3166 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3167 BdrvRequestFlags flags
)
3169 if (nb_sectors
< 0 || nb_sectors
> (UINT_MAX
>> BDRV_SECTOR_BITS
)) {
3173 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3174 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3177 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
3178 int nb_sectors
, QEMUIOVector
*qiov
)
3180 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
3182 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
3185 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
3186 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
3188 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
3190 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
3191 BDRV_REQ_COPY_ON_READ
);
3194 /* if no limit is specified in the BlockLimits use a default
3195 * of 32768 512-byte sectors (16 MiB) per request.
3197 #define MAX_WRITE_ZEROES_DEFAULT 32768
3199 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
3200 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
3202 BlockDriver
*drv
= bs
->drv
;
3204 struct iovec iov
= {0};
3207 int max_write_zeroes
= bs
->bl
.max_write_zeroes
?
3208 bs
->bl
.max_write_zeroes
: MAX_WRITE_ZEROES_DEFAULT
;
3210 while (nb_sectors
> 0 && !ret
) {
3211 int num
= nb_sectors
;
3213 /* Align request. Block drivers can expect the "bulk" of the request
3216 if (bs
->bl
.write_zeroes_alignment
3217 && num
> bs
->bl
.write_zeroes_alignment
) {
3218 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
3219 /* Make a small request up to the first aligned sector. */
3220 num
= bs
->bl
.write_zeroes_alignment
;
3221 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
3222 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
3223 /* Shorten the request to the last aligned sector. num cannot
3224 * underflow because num > bs->bl.write_zeroes_alignment.
3226 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
3230 /* limit request size */
3231 if (num
> max_write_zeroes
) {
3232 num
= max_write_zeroes
;
3236 /* First try the efficient write zeroes operation */
3237 if (drv
->bdrv_co_write_zeroes
) {
3238 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
3241 if (ret
== -ENOTSUP
) {
3242 /* Fall back to bounce buffer if write zeroes is unsupported */
3243 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
3244 if (iov
.iov_base
== NULL
) {
3245 iov
.iov_base
= qemu_try_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
3246 if (iov
.iov_base
== NULL
) {
3250 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
3252 qemu_iovec_init_external(&qiov
, &iov
, 1);
3254 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
3256 /* Keep bounce buffer around if it is big enough for all
3257 * all future requests.
3259 if (num
< max_write_zeroes
) {
3260 qemu_vfree(iov
.iov_base
);
3261 iov
.iov_base
= NULL
;
3270 qemu_vfree(iov
.iov_base
);
3275 * Forwards an already correctly aligned write request to the BlockDriver.
3277 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
3278 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
3279 QEMUIOVector
*qiov
, int flags
)
3281 BlockDriver
*drv
= bs
->drv
;
3285 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
3286 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
3288 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3289 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3290 assert(!qiov
|| bytes
== qiov
->size
);
3292 waited
= wait_serialising_requests(req
);
3293 assert(!waited
|| !req
->serialising
);
3294 assert(req
->overlap_offset
<= offset
);
3295 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
3297 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
3299 if (!ret
&& bs
->detect_zeroes
!= BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF
&&
3300 !(flags
& BDRV_REQ_ZERO_WRITE
) && drv
->bdrv_co_write_zeroes
&&
3301 qemu_iovec_is_zero(qiov
)) {
3302 flags
|= BDRV_REQ_ZERO_WRITE
;
3303 if (bs
->detect_zeroes
== BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP
) {
3304 flags
|= BDRV_REQ_MAY_UNMAP
;
3309 /* Do nothing, write notifier decided to fail this request */
3310 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
3311 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_ZERO
);
3312 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3314 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV
);
3315 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
3317 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_DONE
);
3319 if (ret
== 0 && !bs
->enable_write_cache
) {
3320 ret
= bdrv_co_flush(bs
);
3323 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
3325 block_acct_highest_sector(&bs
->stats
, sector_num
, nb_sectors
);
3327 if (bs
->growable
&& ret
>= 0) {
3328 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
3335 * Handle a write request in coroutine context
3337 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
3338 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
3339 BdrvRequestFlags flags
)
3341 BdrvTrackedRequest req
;
3342 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3343 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
3344 uint8_t *head_buf
= NULL
;
3345 uint8_t *tail_buf
= NULL
;
3346 QEMUIOVector local_qiov
;
3347 bool use_local_qiov
= false;
3353 if (bs
->read_only
) {
3356 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3360 /* throttling disk I/O */
3361 if (bs
->io_limits_enabled
) {
3362 bdrv_io_limits_intercept(bs
, bytes
, true);
3366 * Align write if necessary by performing a read-modify-write cycle.
3367 * Pad qiov with the read parts and be sure to have a tracked request not
3368 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3370 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
3372 if (offset
& (align
- 1)) {
3373 QEMUIOVector head_qiov
;
3374 struct iovec head_iov
;
3376 mark_request_serialising(&req
, align
);
3377 wait_serialising_requests(&req
);
3379 head_buf
= qemu_blockalign(bs
, align
);
3380 head_iov
= (struct iovec
) {
3381 .iov_base
= head_buf
,
3384 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
3386 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
3387 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
3388 align
, &head_qiov
, 0);
3392 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
3394 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3395 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3396 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3397 use_local_qiov
= true;
3399 bytes
+= offset
& (align
- 1);
3400 offset
= offset
& ~(align
- 1);
3403 if ((offset
+ bytes
) & (align
- 1)) {
3404 QEMUIOVector tail_qiov
;
3405 struct iovec tail_iov
;
3409 mark_request_serialising(&req
, align
);
3410 waited
= wait_serialising_requests(&req
);
3411 assert(!waited
|| !use_local_qiov
);
3413 tail_buf
= qemu_blockalign(bs
, align
);
3414 tail_iov
= (struct iovec
) {
3415 .iov_base
= tail_buf
,
3418 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
3420 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
3421 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
3422 align
, &tail_qiov
, 0);
3426 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
3428 if (!use_local_qiov
) {
3429 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3430 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3431 use_local_qiov
= true;
3434 tail_bytes
= (offset
+ bytes
) & (align
- 1);
3435 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
3437 bytes
= ROUND_UP(bytes
, align
);
3440 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
3441 use_local_qiov
? &local_qiov
: qiov
,
3445 tracked_request_end(&req
);
3447 if (use_local_qiov
) {
3448 qemu_iovec_destroy(&local_qiov
);
3450 qemu_vfree(head_buf
);
3451 qemu_vfree(tail_buf
);
3456 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
3457 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3458 BdrvRequestFlags flags
)
3460 if (nb_sectors
< 0 || nb_sectors
> (INT_MAX
>> BDRV_SECTOR_BITS
)) {
3464 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3465 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3468 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
3469 int nb_sectors
, QEMUIOVector
*qiov
)
3471 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
3473 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
3476 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
3477 int64_t sector_num
, int nb_sectors
,
3478 BdrvRequestFlags flags
)
3480 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3482 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
3483 flags
&= ~BDRV_REQ_MAY_UNMAP
;
3486 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
3487 BDRV_REQ_ZERO_WRITE
| flags
);
3491 * Truncate file to 'offset' bytes (needed only for file protocols)
3493 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
3495 BlockDriver
*drv
= bs
->drv
;
3499 if (!drv
->bdrv_truncate
)
3504 ret
= drv
->bdrv_truncate(bs
, offset
);
3506 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3508 blk_dev_resize_cb(bs
->blk
);
3515 * Length of a allocated file in bytes. Sparse files are counted by actual
3516 * allocated space. Return < 0 if error or unknown.
3518 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
3520 BlockDriver
*drv
= bs
->drv
;
3524 if (drv
->bdrv_get_allocated_file_size
) {
3525 return drv
->bdrv_get_allocated_file_size(bs
);
3528 return bdrv_get_allocated_file_size(bs
->file
);
3534 * Return number of sectors on success, -errno on error.
3536 int64_t bdrv_nb_sectors(BlockDriverState
*bs
)
3538 BlockDriver
*drv
= bs
->drv
;
3543 if (drv
->has_variable_length
) {
3544 int ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
3549 return bs
->total_sectors
;
3553 * Return length in bytes on success, -errno on error.
3554 * The length is always a multiple of BDRV_SECTOR_SIZE.
3556 int64_t bdrv_getlength(BlockDriverState
*bs
)
3558 int64_t ret
= bdrv_nb_sectors(bs
);
3560 return ret
< 0 ? ret
: ret
* BDRV_SECTOR_SIZE
;
3563 /* return 0 as number of sectors if no device present or error */
3564 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
3566 int64_t nb_sectors
= bdrv_nb_sectors(bs
);
3568 *nb_sectors_ptr
= nb_sectors
< 0 ? 0 : nb_sectors
;
3571 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
3572 BlockdevOnError on_write_error
)
3574 bs
->on_read_error
= on_read_error
;
3575 bs
->on_write_error
= on_write_error
;
3578 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
3580 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
3583 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
3585 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
3588 case BLOCKDEV_ON_ERROR_ENOSPC
:
3589 return (error
== ENOSPC
) ?
3590 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
3591 case BLOCKDEV_ON_ERROR_STOP
:
3592 return BLOCK_ERROR_ACTION_STOP
;
3593 case BLOCKDEV_ON_ERROR_REPORT
:
3594 return BLOCK_ERROR_ACTION_REPORT
;
3595 case BLOCKDEV_ON_ERROR_IGNORE
:
3596 return BLOCK_ERROR_ACTION_IGNORE
;
3602 static void send_qmp_error_event(BlockDriverState
*bs
,
3603 BlockErrorAction action
,
3604 bool is_read
, int error
)
3606 IoOperationType optype
;
3608 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
3609 qapi_event_send_block_io_error(bdrv_get_device_name(bs
), optype
, action
,
3610 bdrv_iostatus_is_enabled(bs
),
3611 error
== ENOSPC
, strerror(error
),
3615 /* This is done by device models because, while the block layer knows
3616 * about the error, it does not know whether an operation comes from
3617 * the device or the block layer (from a job, for example).
3619 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
3620 bool is_read
, int error
)
3624 if (action
== BLOCK_ERROR_ACTION_STOP
) {
3625 /* First set the iostatus, so that "info block" returns an iostatus
3626 * that matches the events raised so far (an additional error iostatus
3627 * is fine, but not a lost one).
3629 bdrv_iostatus_set_err(bs
, error
);
3631 /* Then raise the request to stop the VM and the event.
3632 * qemu_system_vmstop_request_prepare has two effects. First,
3633 * it ensures that the STOP event always comes after the
3634 * BLOCK_IO_ERROR event. Second, it ensures that even if management
3635 * can observe the STOP event and do a "cont" before the STOP
3636 * event is issued, the VM will not stop. In this case, vm_start()
3637 * also ensures that the STOP/RESUME pair of events is emitted.
3639 qemu_system_vmstop_request_prepare();
3640 send_qmp_error_event(bs
, action
, is_read
, error
);
3641 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
3643 send_qmp_error_event(bs
, action
, is_read
, error
);
3647 int bdrv_is_read_only(BlockDriverState
*bs
)
3649 return bs
->read_only
;
3652 int bdrv_is_sg(BlockDriverState
*bs
)
3657 int bdrv_enable_write_cache(BlockDriverState
*bs
)
3659 return bs
->enable_write_cache
;
3662 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
3664 bs
->enable_write_cache
= wce
;
3666 /* so a reopen() will preserve wce */
3668 bs
->open_flags
|= BDRV_O_CACHE_WB
;
3670 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
3674 int bdrv_is_encrypted(BlockDriverState
*bs
)
3676 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3678 return bs
->encrypted
;
3681 int bdrv_key_required(BlockDriverState
*bs
)
3683 BlockDriverState
*backing_hd
= bs
->backing_hd
;
3685 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
3687 return (bs
->encrypted
&& !bs
->valid_key
);
3690 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
3693 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
3694 ret
= bdrv_set_key(bs
->backing_hd
, key
);
3700 if (!bs
->encrypted
) {
3702 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
3705 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
3708 } else if (!bs
->valid_key
) {
3711 /* call the change callback now, we skipped it on open */
3712 blk_dev_change_media_cb(bs
->blk
, true);
3718 const char *bdrv_get_format_name(BlockDriverState
*bs
)
3720 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3723 static int qsort_strcmp(const void *a
, const void *b
)
3725 return strcmp(a
, b
);
3728 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3734 const char **formats
= NULL
;
3736 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3737 if (drv
->format_name
) {
3740 while (formats
&& i
&& !found
) {
3741 found
= !strcmp(formats
[--i
], drv
->format_name
);
3745 formats
= g_renew(const char *, formats
, count
+ 1);
3746 formats
[count
++] = drv
->format_name
;
3751 qsort(formats
, count
, sizeof(formats
[0]), qsort_strcmp
);
3753 for (i
= 0; i
< count
; i
++) {
3754 it(opaque
, formats
[i
]);
3760 /* This function is to find block backend bs */
3761 /* TODO convert callers to blk_by_name(), then remove */
3762 BlockDriverState
*bdrv_find(const char *name
)
3764 BlockBackend
*blk
= blk_by_name(name
);
3766 return blk
? blk_bs(blk
) : NULL
;
3769 /* This function is to find a node in the bs graph */
3770 BlockDriverState
*bdrv_find_node(const char *node_name
)
3772 BlockDriverState
*bs
;
3776 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3777 if (!strcmp(node_name
, bs
->node_name
)) {
3784 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3785 BlockDeviceInfoList
*bdrv_named_nodes_list(void)
3787 BlockDeviceInfoList
*list
, *entry
;
3788 BlockDriverState
*bs
;
3791 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3792 entry
= g_malloc0(sizeof(*entry
));
3793 entry
->value
= bdrv_block_device_info(bs
);
3801 BlockDriverState
*bdrv_lookup_bs(const char *device
,
3802 const char *node_name
,
3806 BlockDriverState
*bs
;
3809 blk
= blk_by_name(device
);
3817 bs
= bdrv_find_node(node_name
);
3824 error_setg(errp
, "Cannot find device=%s nor node_name=%s",
3825 device
? device
: "",
3826 node_name
? node_name
: "");
3830 /* If 'base' is in the same chain as 'top', return true. Otherwise,
3831 * return false. If either argument is NULL, return false. */
3832 bool bdrv_chain_contains(BlockDriverState
*top
, BlockDriverState
*base
)
3834 while (top
&& top
!= base
) {
3835 top
= top
->backing_hd
;
3841 BlockDriverState
*bdrv_next_node(BlockDriverState
*bs
)
3844 return QTAILQ_FIRST(&graph_bdrv_states
);
3846 return QTAILQ_NEXT(bs
, node_list
);
3849 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3852 return QTAILQ_FIRST(&bdrv_states
);
3854 return QTAILQ_NEXT(bs
, device_list
);
3857 const char *bdrv_get_node_name(const BlockDriverState
*bs
)
3859 return bs
->node_name
;
3862 /* TODO check what callers really want: bs->node_name or blk_name() */
3863 const char *bdrv_get_device_name(const BlockDriverState
*bs
)
3865 return bs
->blk
? blk_name(bs
->blk
) : "";
3868 int bdrv_get_flags(BlockDriverState
*bs
)
3870 return bs
->open_flags
;
3873 int bdrv_flush_all(void)
3875 BlockDriverState
*bs
;
3878 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3879 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
3882 aio_context_acquire(aio_context
);
3883 ret
= bdrv_flush(bs
);
3884 if (ret
< 0 && !result
) {
3887 aio_context_release(aio_context
);
3893 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3898 int bdrv_has_zero_init(BlockDriverState
*bs
)
3902 /* If BS is a copy on write image, it is initialized to
3903 the contents of the base image, which may not be zeroes. */
3904 if (bs
->backing_hd
) {
3907 if (bs
->drv
->bdrv_has_zero_init
) {
3908 return bs
->drv
->bdrv_has_zero_init(bs
);
3915 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
)
3917 BlockDriverInfo bdi
;
3919 if (bs
->backing_hd
) {
3923 if (bdrv_get_info(bs
, &bdi
) == 0) {
3924 return bdi
.unallocated_blocks_are_zero
;
3930 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
)
3932 BlockDriverInfo bdi
;
3934 if (bs
->backing_hd
|| !(bs
->open_flags
& BDRV_O_UNMAP
)) {
3938 if (bdrv_get_info(bs
, &bdi
) == 0) {
3939 return bdi
.can_write_zeroes_with_unmap
;
3945 typedef struct BdrvCoGetBlockStatusData
{
3946 BlockDriverState
*bs
;
3947 BlockDriverState
*base
;
3953 } BdrvCoGetBlockStatusData
;
3956 * Returns the allocation status of the specified sectors.
3957 * Drivers not implementing the functionality are assumed to not support
3958 * backing files, hence all their sectors are reported as allocated.
3960 * If 'sector_num' is beyond the end of the disk image the return value is 0
3961 * and 'pnum' is set to 0.
3963 * 'pnum' is set to the number of sectors (including and immediately following
3964 * the specified sector) that are known to be in the same
3965 * allocated/unallocated state.
3967 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3968 * beyond the end of the disk image it will be clamped.
3970 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3972 int nb_sectors
, int *pnum
)
3974 int64_t total_sectors
;
3978 total_sectors
= bdrv_nb_sectors(bs
);
3979 if (total_sectors
< 0) {
3980 return total_sectors
;
3983 if (sector_num
>= total_sectors
) {
3988 n
= total_sectors
- sector_num
;
3989 if (n
< nb_sectors
) {
3993 if (!bs
->drv
->bdrv_co_get_block_status
) {
3995 ret
= BDRV_BLOCK_DATA
| BDRV_BLOCK_ALLOCATED
;
3996 if (bs
->drv
->protocol_name
) {
3997 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
4002 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
4008 if (ret
& BDRV_BLOCK_RAW
) {
4009 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
4010 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
4014 if (ret
& (BDRV_BLOCK_DATA
| BDRV_BLOCK_ZERO
)) {
4015 ret
|= BDRV_BLOCK_ALLOCATED
;
4018 if (!(ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
)) {
4019 if (bdrv_unallocated_blocks_are_zero(bs
)) {
4020 ret
|= BDRV_BLOCK_ZERO
;
4021 } else if (bs
->backing_hd
) {
4022 BlockDriverState
*bs2
= bs
->backing_hd
;
4023 int64_t nb_sectors2
= bdrv_nb_sectors(bs2
);
4024 if (nb_sectors2
>= 0 && sector_num
>= nb_sectors2
) {
4025 ret
|= BDRV_BLOCK_ZERO
;
4031 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
4032 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
4035 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
4038 /* Ignore errors. This is just providing extra information, it
4039 * is useful but not necessary.
4042 /* !file_pnum indicates an offset at or beyond the EOF; it is
4043 * perfectly valid for the format block driver to point to such
4044 * offsets, so catch it and mark everything as zero */
4045 ret
|= BDRV_BLOCK_ZERO
;
4047 /* Limit request to the range reported by the protocol driver */
4049 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
4057 /* Coroutine wrapper for bdrv_get_block_status() */
4058 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
4060 BdrvCoGetBlockStatusData
*data
= opaque
;
4061 BlockDriverState
*bs
= data
->bs
;
4063 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
4069 * Synchronous wrapper around bdrv_co_get_block_status().
4071 * See bdrv_co_get_block_status() for details.
4073 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
4074 int nb_sectors
, int *pnum
)
4077 BdrvCoGetBlockStatusData data
= {
4079 .sector_num
= sector_num
,
4080 .nb_sectors
= nb_sectors
,
4085 if (qemu_in_coroutine()) {
4086 /* Fast-path if already in coroutine context */
4087 bdrv_get_block_status_co_entry(&data
);
4089 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
4091 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
4092 qemu_coroutine_enter(co
, &data
);
4093 while (!data
.done
) {
4094 aio_poll(aio_context
, true);
4100 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
4101 int nb_sectors
, int *pnum
)
4103 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
4107 return !!(ret
& BDRV_BLOCK_ALLOCATED
);
4111 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4113 * Return true if the given sector is allocated in any image between
4114 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4115 * sector is allocated in any image of the chain. Return false otherwise.
4117 * 'pnum' is set to the number of sectors (including and immediately following
4118 * the specified sector) that are known to be in the same
4119 * allocated/unallocated state.
4122 int bdrv_is_allocated_above(BlockDriverState
*top
,
4123 BlockDriverState
*base
,
4125 int nb_sectors
, int *pnum
)
4127 BlockDriverState
*intermediate
;
4128 int ret
, n
= nb_sectors
;
4131 while (intermediate
&& intermediate
!= base
) {
4133 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
4143 * [sector_num, nb_sectors] is unallocated on top but intermediate
4146 * [sector_num+x, nr_sectors] allocated.
4148 if (n
> pnum_inter
&&
4149 (intermediate
== top
||
4150 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
4154 intermediate
= intermediate
->backing_hd
;
4161 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
4163 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
4164 return bs
->backing_file
;
4165 else if (bs
->encrypted
)
4166 return bs
->filename
;
4171 void bdrv_get_backing_filename(BlockDriverState
*bs
,
4172 char *filename
, int filename_size
)
4174 pstrcpy(filename
, filename_size
, bs
->backing_file
);
4177 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
4178 const uint8_t *buf
, int nb_sectors
)
4180 BlockDriver
*drv
= bs
->drv
;
4183 if (!drv
->bdrv_write_compressed
)
4185 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
4188 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
4190 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
4193 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
4195 BlockDriver
*drv
= bs
->drv
;
4198 if (!drv
->bdrv_get_info
)
4200 memset(bdi
, 0, sizeof(*bdi
));
4201 return drv
->bdrv_get_info(bs
, bdi
);
4204 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
)
4206 BlockDriver
*drv
= bs
->drv
;
4207 if (drv
&& drv
->bdrv_get_specific_info
) {
4208 return drv
->bdrv_get_specific_info(bs
);
4213 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
4214 int64_t pos
, int size
)
4217 struct iovec iov
= {
4218 .iov_base
= (void *) buf
,
4222 qemu_iovec_init_external(&qiov
, &iov
, 1);
4223 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
4226 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
4228 BlockDriver
*drv
= bs
->drv
;
4232 } else if (drv
->bdrv_save_vmstate
) {
4233 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
4234 } else if (bs
->file
) {
4235 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
4241 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
4242 int64_t pos
, int size
)
4244 BlockDriver
*drv
= bs
->drv
;
4247 if (drv
->bdrv_load_vmstate
)
4248 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
4250 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
4254 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
4256 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
4260 bs
->drv
->bdrv_debug_event(bs
, event
);
4263 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
4266 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
4270 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
4271 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
4277 int bdrv_debug_remove_breakpoint(BlockDriverState
*bs
, const char *tag
)
4279 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_remove_breakpoint
) {
4283 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_remove_breakpoint
) {
4284 return bs
->drv
->bdrv_debug_remove_breakpoint(bs
, tag
);
4290 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
4292 while (bs
&& (!bs
->drv
|| !bs
->drv
->bdrv_debug_resume
)) {
4296 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
4297 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
4303 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
4305 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
4309 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
4310 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
4316 int bdrv_is_snapshot(BlockDriverState
*bs
)
4318 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
4321 /* backing_file can either be relative, or absolute, or a protocol. If it is
4322 * relative, it must be relative to the chain. So, passing in bs->filename
4323 * from a BDS as backing_file should not be done, as that may be relative to
4324 * the CWD rather than the chain. */
4325 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
4326 const char *backing_file
)
4328 char *filename_full
= NULL
;
4329 char *backing_file_full
= NULL
;
4330 char *filename_tmp
= NULL
;
4331 int is_protocol
= 0;
4332 BlockDriverState
*curr_bs
= NULL
;
4333 BlockDriverState
*retval
= NULL
;
4335 if (!bs
|| !bs
->drv
|| !backing_file
) {
4339 filename_full
= g_malloc(PATH_MAX
);
4340 backing_file_full
= g_malloc(PATH_MAX
);
4341 filename_tmp
= g_malloc(PATH_MAX
);
4343 is_protocol
= path_has_protocol(backing_file
);
4345 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
4347 /* If either of the filename paths is actually a protocol, then
4348 * compare unmodified paths; otherwise make paths relative */
4349 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
4350 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
4351 retval
= curr_bs
->backing_hd
;
4355 /* If not an absolute filename path, make it relative to the current
4356 * image's filename path */
4357 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4360 /* We are going to compare absolute pathnames */
4361 if (!realpath(filename_tmp
, filename_full
)) {
4365 /* We need to make sure the backing filename we are comparing against
4366 * is relative to the current image filename (or absolute) */
4367 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4368 curr_bs
->backing_file
);
4370 if (!realpath(filename_tmp
, backing_file_full
)) {
4374 if (strcmp(backing_file_full
, filename_full
) == 0) {
4375 retval
= curr_bs
->backing_hd
;
4381 g_free(filename_full
);
4382 g_free(backing_file_full
);
4383 g_free(filename_tmp
);
4387 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
4393 if (!bs
->backing_hd
) {
4397 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
4400 /**************************************************************/
4403 BlockAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
4404 QEMUIOVector
*qiov
, int nb_sectors
,
4405 BlockCompletionFunc
*cb
, void *opaque
)
4407 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
4409 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4413 BlockAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
4414 QEMUIOVector
*qiov
, int nb_sectors
,
4415 BlockCompletionFunc
*cb
, void *opaque
)
4417 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
4419 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4423 BlockAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
4424 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
4425 BlockCompletionFunc
*cb
, void *opaque
)
4427 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
4429 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
4430 BDRV_REQ_ZERO_WRITE
| flags
,
4435 typedef struct MultiwriteCB
{
4440 BlockCompletionFunc
*cb
;
4442 QEMUIOVector
*free_qiov
;
4446 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
4450 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
4451 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
4452 if (mcb
->callbacks
[i
].free_qiov
) {
4453 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
4455 g_free(mcb
->callbacks
[i
].free_qiov
);
4459 static void multiwrite_cb(void *opaque
, int ret
)
4461 MultiwriteCB
*mcb
= opaque
;
4463 trace_multiwrite_cb(mcb
, ret
);
4465 if (ret
< 0 && !mcb
->error
) {
4469 mcb
->num_requests
--;
4470 if (mcb
->num_requests
== 0) {
4471 multiwrite_user_cb(mcb
);
4476 static int multiwrite_req_compare(const void *a
, const void *b
)
4478 const BlockRequest
*req1
= a
, *req2
= b
;
4481 * Note that we can't simply subtract req2->sector from req1->sector
4482 * here as that could overflow the return value.
4484 if (req1
->sector
> req2
->sector
) {
4486 } else if (req1
->sector
< req2
->sector
) {
4494 * Takes a bunch of requests and tries to merge them. Returns the number of
4495 * requests that remain after merging.
4497 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
4498 int num_reqs
, MultiwriteCB
*mcb
)
4502 // Sort requests by start sector
4503 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
4505 // Check if adjacent requests touch the same clusters. If so, combine them,
4506 // filling up gaps with zero sectors.
4508 for (i
= 1; i
< num_reqs
; i
++) {
4510 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
4512 // Handle exactly sequential writes and overlapping writes.
4513 if (reqs
[i
].sector
<= oldreq_last
) {
4517 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
4521 if (bs
->bl
.max_transfer_length
&& reqs
[outidx
].nb_sectors
+
4522 reqs
[i
].nb_sectors
> bs
->bl
.max_transfer_length
) {
4528 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
4529 qemu_iovec_init(qiov
,
4530 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
4532 // Add the first request to the merged one. If the requests are
4533 // overlapping, drop the last sectors of the first request.
4534 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
4535 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
4537 // We should need to add any zeros between the two requests
4538 assert (reqs
[i
].sector
<= oldreq_last
);
4540 // Add the second request
4541 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
4543 // Add tail of first request, if necessary
4544 if (qiov
->size
< reqs
[outidx
].qiov
->size
) {
4545 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, qiov
->size
,
4546 reqs
[outidx
].qiov
->size
- qiov
->size
);
4549 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
4550 reqs
[outidx
].qiov
= qiov
;
4552 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
4555 reqs
[outidx
].sector
= reqs
[i
].sector
;
4556 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
4557 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
4565 * Submit multiple AIO write requests at once.
4567 * On success, the function returns 0 and all requests in the reqs array have
4568 * been submitted. In error case this function returns -1, and any of the
4569 * requests may or may not be submitted yet. In particular, this means that the
4570 * callback will be called for some of the requests, for others it won't. The
4571 * caller must check the error field of the BlockRequest to wait for the right
4572 * callbacks (if error != 0, no callback will be called).
4574 * The implementation may modify the contents of the reqs array, e.g. to merge
4575 * requests. However, the fields opaque and error are left unmodified as they
4576 * are used to signal failure for a single request to the caller.
4578 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
4583 /* don't submit writes if we don't have a medium */
4584 if (bs
->drv
== NULL
) {
4585 for (i
= 0; i
< num_reqs
; i
++) {
4586 reqs
[i
].error
= -ENOMEDIUM
;
4591 if (num_reqs
== 0) {
4595 // Create MultiwriteCB structure
4596 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
4597 mcb
->num_requests
= 0;
4598 mcb
->num_callbacks
= num_reqs
;
4600 for (i
= 0; i
< num_reqs
; i
++) {
4601 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
4602 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
4605 // Check for mergable requests
4606 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
4608 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
4610 /* Run the aio requests. */
4611 mcb
->num_requests
= num_reqs
;
4612 for (i
= 0; i
< num_reqs
; i
++) {
4613 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
4614 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
4622 void bdrv_aio_cancel(BlockAIOCB
*acb
)
4625 bdrv_aio_cancel_async(acb
);
4626 while (acb
->refcnt
> 1) {
4627 if (acb
->aiocb_info
->get_aio_context
) {
4628 aio_poll(acb
->aiocb_info
->get_aio_context(acb
), true);
4629 } else if (acb
->bs
) {
4630 aio_poll(bdrv_get_aio_context(acb
->bs
), true);
4635 qemu_aio_unref(acb
);
4638 /* Async version of aio cancel. The caller is not blocked if the acb implements
4639 * cancel_async, otherwise we do nothing and let the request normally complete.
4640 * In either case the completion callback must be called. */
4641 void bdrv_aio_cancel_async(BlockAIOCB
*acb
)
4643 if (acb
->aiocb_info
->cancel_async
) {
4644 acb
->aiocb_info
->cancel_async(acb
);
4648 /**************************************************************/
4649 /* async block device emulation */
4651 typedef struct BlockAIOCBSync
{
4655 /* vector translation state */
4661 static const AIOCBInfo bdrv_em_aiocb_info
= {
4662 .aiocb_size
= sizeof(BlockAIOCBSync
),
4665 static void bdrv_aio_bh_cb(void *opaque
)
4667 BlockAIOCBSync
*acb
= opaque
;
4669 if (!acb
->is_write
&& acb
->ret
>= 0) {
4670 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
4672 qemu_vfree(acb
->bounce
);
4673 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
4674 qemu_bh_delete(acb
->bh
);
4676 qemu_aio_unref(acb
);
4679 static BlockAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
4683 BlockCompletionFunc
*cb
,
4688 BlockAIOCBSync
*acb
;
4690 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
4691 acb
->is_write
= is_write
;
4693 acb
->bounce
= qemu_try_blockalign(bs
, qiov
->size
);
4694 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_aio_bh_cb
, acb
);
4696 if (acb
->bounce
== NULL
) {
4698 } else if (is_write
) {
4699 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
4700 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4702 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4705 qemu_bh_schedule(acb
->bh
);
4707 return &acb
->common
;
4710 static BlockAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
4711 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4712 BlockCompletionFunc
*cb
, void *opaque
)
4714 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
4717 static BlockAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
4718 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4719 BlockCompletionFunc
*cb
, void *opaque
)
4721 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
4725 typedef struct BlockAIOCBCoroutine
{
4731 } BlockAIOCBCoroutine
;
4733 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
4734 .aiocb_size
= sizeof(BlockAIOCBCoroutine
),
4737 static void bdrv_co_em_bh(void *opaque
)
4739 BlockAIOCBCoroutine
*acb
= opaque
;
4741 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
4743 qemu_bh_delete(acb
->bh
);
4744 qemu_aio_unref(acb
);
4747 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4748 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
4750 BlockAIOCBCoroutine
*acb
= opaque
;
4751 BlockDriverState
*bs
= acb
->common
.bs
;
4753 if (!acb
->is_write
) {
4754 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
4755 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4757 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
4758 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4761 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
4762 qemu_bh_schedule(acb
->bh
);
4765 static BlockAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
4769 BdrvRequestFlags flags
,
4770 BlockCompletionFunc
*cb
,
4775 BlockAIOCBCoroutine
*acb
;
4777 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4778 acb
->req
.sector
= sector_num
;
4779 acb
->req
.nb_sectors
= nb_sectors
;
4780 acb
->req
.qiov
= qiov
;
4781 acb
->req
.flags
= flags
;
4782 acb
->is_write
= is_write
;
4784 co
= qemu_coroutine_create(bdrv_co_do_rw
);
4785 qemu_coroutine_enter(co
, acb
);
4787 return &acb
->common
;
4790 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
4792 BlockAIOCBCoroutine
*acb
= opaque
;
4793 BlockDriverState
*bs
= acb
->common
.bs
;
4795 acb
->req
.error
= bdrv_co_flush(bs
);
4796 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
4797 qemu_bh_schedule(acb
->bh
);
4800 BlockAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
4801 BlockCompletionFunc
*cb
, void *opaque
)
4803 trace_bdrv_aio_flush(bs
, opaque
);
4806 BlockAIOCBCoroutine
*acb
;
4808 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4810 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4811 qemu_coroutine_enter(co
, acb
);
4813 return &acb
->common
;
4816 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4818 BlockAIOCBCoroutine
*acb
= opaque
;
4819 BlockDriverState
*bs
= acb
->common
.bs
;
4821 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4822 acb
->bh
= aio_bh_new(bdrv_get_aio_context(bs
), bdrv_co_em_bh
, acb
);
4823 qemu_bh_schedule(acb
->bh
);
4826 BlockAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4827 int64_t sector_num
, int nb_sectors
,
4828 BlockCompletionFunc
*cb
, void *opaque
)
4831 BlockAIOCBCoroutine
*acb
;
4833 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4835 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4836 acb
->req
.sector
= sector_num
;
4837 acb
->req
.nb_sectors
= nb_sectors
;
4838 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4839 qemu_coroutine_enter(co
, acb
);
4841 return &acb
->common
;
4844 void bdrv_init(void)
4846 module_call_init(MODULE_INIT_BLOCK
);
4849 void bdrv_init_with_whitelist(void)
4851 use_bdrv_whitelist
= 1;
4855 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4856 BlockCompletionFunc
*cb
, void *opaque
)
4860 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4861 acb
->aiocb_info
= aiocb_info
;
4864 acb
->opaque
= opaque
;
4869 void qemu_aio_ref(void *p
)
4871 BlockAIOCB
*acb
= p
;
4875 void qemu_aio_unref(void *p
)
4877 BlockAIOCB
*acb
= p
;
4878 assert(acb
->refcnt
> 0);
4879 if (--acb
->refcnt
== 0) {
4880 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4884 /**************************************************************/
4885 /* Coroutine block device emulation */
4887 typedef struct CoroutineIOCompletion
{
4888 Coroutine
*coroutine
;
4890 } CoroutineIOCompletion
;
4892 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4894 CoroutineIOCompletion
*co
= opaque
;
4897 qemu_coroutine_enter(co
->coroutine
, NULL
);
4900 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4901 int nb_sectors
, QEMUIOVector
*iov
,
4904 CoroutineIOCompletion co
= {
4905 .coroutine
= qemu_coroutine_self(),
4910 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4911 bdrv_co_io_em_complete
, &co
);
4913 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4914 bdrv_co_io_em_complete
, &co
);
4917 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4921 qemu_coroutine_yield();
4926 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4927 int64_t sector_num
, int nb_sectors
,
4930 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4933 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4934 int64_t sector_num
, int nb_sectors
,
4937 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4940 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4942 RwCo
*rwco
= opaque
;
4944 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4947 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4951 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4955 /* Write back cached data to the OS even with cache=unsafe */
4956 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4957 if (bs
->drv
->bdrv_co_flush_to_os
) {
4958 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4964 /* But don't actually force it to the disk with cache=unsafe */
4965 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4969 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4970 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4971 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4972 } else if (bs
->drv
->bdrv_aio_flush
) {
4974 CoroutineIOCompletion co
= {
4975 .coroutine
= qemu_coroutine_self(),
4978 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4982 qemu_coroutine_yield();
4987 * Some block drivers always operate in either writethrough or unsafe
4988 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4989 * know how the server works (because the behaviour is hardcoded or
4990 * depends on server-side configuration), so we can't ensure that
4991 * everything is safe on disk. Returning an error doesn't work because
4992 * that would break guests even if the server operates in writethrough
4995 * Let's hope the user knows what he's doing.
5003 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
5004 * in the case of cache=unsafe, so there are no useless flushes.
5007 return bdrv_co_flush(bs
->file
);
5010 void bdrv_invalidate_cache(BlockDriverState
*bs
, Error
**errp
)
5012 Error
*local_err
= NULL
;
5019 if (!(bs
->open_flags
& BDRV_O_INCOMING
)) {
5022 bs
->open_flags
&= ~BDRV_O_INCOMING
;
5024 if (bs
->drv
->bdrv_invalidate_cache
) {
5025 bs
->drv
->bdrv_invalidate_cache(bs
, &local_err
);
5026 } else if (bs
->file
) {
5027 bdrv_invalidate_cache(bs
->file
, &local_err
);
5030 error_propagate(errp
, local_err
);
5034 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
5036 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
5041 void bdrv_invalidate_cache_all(Error
**errp
)
5043 BlockDriverState
*bs
;
5044 Error
*local_err
= NULL
;
5046 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
5047 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
5049 aio_context_acquire(aio_context
);
5050 bdrv_invalidate_cache(bs
, &local_err
);
5051 aio_context_release(aio_context
);
5053 error_propagate(errp
, local_err
);
5059 int bdrv_flush(BlockDriverState
*bs
)
5067 if (qemu_in_coroutine()) {
5068 /* Fast-path if already in coroutine context */
5069 bdrv_flush_co_entry(&rwco
);
5071 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
5073 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
5074 qemu_coroutine_enter(co
, &rwco
);
5075 while (rwco
.ret
== NOT_DONE
) {
5076 aio_poll(aio_context
, true);
5083 typedef struct DiscardCo
{
5084 BlockDriverState
*bs
;
5089 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
5091 DiscardCo
*rwco
= opaque
;
5093 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
5096 /* if no limit is specified in the BlockLimits use a default
5097 * of 32768 512-byte sectors (16 MiB) per request.
5099 #define MAX_DISCARD_DEFAULT 32768
5101 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
5108 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
5110 } else if (bs
->read_only
) {
5114 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
5116 /* Do nothing if disabled. */
5117 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
5121 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
5125 max_discard
= bs
->bl
.max_discard
? bs
->bl
.max_discard
: MAX_DISCARD_DEFAULT
;
5126 while (nb_sectors
> 0) {
5128 int num
= nb_sectors
;
5131 if (bs
->bl
.discard_alignment
&&
5132 num
>= bs
->bl
.discard_alignment
&&
5133 sector_num
% bs
->bl
.discard_alignment
) {
5134 if (num
> bs
->bl
.discard_alignment
) {
5135 num
= bs
->bl
.discard_alignment
;
5137 num
-= sector_num
% bs
->bl
.discard_alignment
;
5140 /* limit request size */
5141 if (num
> max_discard
) {
5145 if (bs
->drv
->bdrv_co_discard
) {
5146 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
5149 CoroutineIOCompletion co
= {
5150 .coroutine
= qemu_coroutine_self(),
5153 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
5154 bdrv_co_io_em_complete
, &co
);
5158 qemu_coroutine_yield();
5162 if (ret
&& ret
!= -ENOTSUP
) {
5172 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
5177 .sector_num
= sector_num
,
5178 .nb_sectors
= nb_sectors
,
5182 if (qemu_in_coroutine()) {
5183 /* Fast-path if already in coroutine context */
5184 bdrv_discard_co_entry(&rwco
);
5186 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
5188 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
5189 qemu_coroutine_enter(co
, &rwco
);
5190 while (rwco
.ret
== NOT_DONE
) {
5191 aio_poll(aio_context
, true);
5198 /**************************************************************/
5199 /* removable device support */
5202 * Return TRUE if the media is present
5204 int bdrv_is_inserted(BlockDriverState
*bs
)
5206 BlockDriver
*drv
= bs
->drv
;
5210 if (!drv
->bdrv_is_inserted
)
5212 return drv
->bdrv_is_inserted(bs
);
5216 * Return whether the media changed since the last call to this
5217 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5219 int bdrv_media_changed(BlockDriverState
*bs
)
5221 BlockDriver
*drv
= bs
->drv
;
5223 if (drv
&& drv
->bdrv_media_changed
) {
5224 return drv
->bdrv_media_changed(bs
);
5230 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5232 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
5234 BlockDriver
*drv
= bs
->drv
;
5235 const char *device_name
;
5237 if (drv
&& drv
->bdrv_eject
) {
5238 drv
->bdrv_eject(bs
, eject_flag
);
5241 device_name
= bdrv_get_device_name(bs
);
5242 if (device_name
[0] != '\0') {
5243 qapi_event_send_device_tray_moved(device_name
,
5244 eject_flag
, &error_abort
);
5249 * Lock or unlock the media (if it is locked, the user won't be able
5250 * to eject it manually).
5252 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
5254 BlockDriver
*drv
= bs
->drv
;
5256 trace_bdrv_lock_medium(bs
, locked
);
5258 if (drv
&& drv
->bdrv_lock_medium
) {
5259 drv
->bdrv_lock_medium(bs
, locked
);
5263 /* needed for generic scsi interface */
5265 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
5267 BlockDriver
*drv
= bs
->drv
;
5269 if (drv
&& drv
->bdrv_ioctl
)
5270 return drv
->bdrv_ioctl(bs
, req
, buf
);
5274 BlockAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
5275 unsigned long int req
, void *buf
,
5276 BlockCompletionFunc
*cb
, void *opaque
)
5278 BlockDriver
*drv
= bs
->drv
;
5280 if (drv
&& drv
->bdrv_aio_ioctl
)
5281 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
5285 void bdrv_set_guest_block_size(BlockDriverState
*bs
, int align
)
5287 bs
->guest_block_size
= align
;
5290 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
5292 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
5295 void *qemu_blockalign0(BlockDriverState
*bs
, size_t size
)
5297 return memset(qemu_blockalign(bs
, size
), 0, size
);
5300 void *qemu_try_blockalign(BlockDriverState
*bs
, size_t size
)
5302 size_t align
= bdrv_opt_mem_align(bs
);
5304 /* Ensure that NULL is never returned on success */
5310 return qemu_try_memalign(align
, size
);
5313 void *qemu_try_blockalign0(BlockDriverState
*bs
, size_t size
)
5315 void *mem
= qemu_try_blockalign(bs
, size
);
5318 memset(mem
, 0, size
);
5325 * Check if all memory in this vector is sector aligned.
5327 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
5330 size_t alignment
= bdrv_opt_mem_align(bs
);
5332 for (i
= 0; i
< qiov
->niov
; i
++) {
5333 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
5336 if (qiov
->iov
[i
].iov_len
% alignment
) {
5344 BdrvDirtyBitmap
*bdrv_create_dirty_bitmap(BlockDriverState
*bs
, int granularity
,
5347 int64_t bitmap_size
;
5348 BdrvDirtyBitmap
*bitmap
;
5350 assert((granularity
& (granularity
- 1)) == 0);
5352 granularity
>>= BDRV_SECTOR_BITS
;
5353 assert(granularity
);
5354 bitmap_size
= bdrv_nb_sectors(bs
);
5355 if (bitmap_size
< 0) {
5356 error_setg_errno(errp
, -bitmap_size
, "could not get length of device");
5357 errno
= -bitmap_size
;
5360 bitmap
= g_new0(BdrvDirtyBitmap
, 1);
5361 bitmap
->bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
5362 QLIST_INSERT_HEAD(&bs
->dirty_bitmaps
, bitmap
, list
);
5366 void bdrv_release_dirty_bitmap(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5368 BdrvDirtyBitmap
*bm
, *next
;
5369 QLIST_FOREACH_SAFE(bm
, &bs
->dirty_bitmaps
, list
, next
) {
5371 QLIST_REMOVE(bitmap
, list
);
5372 hbitmap_free(bitmap
->bitmap
);
5379 BlockDirtyInfoList
*bdrv_query_dirty_bitmaps(BlockDriverState
*bs
)
5381 BdrvDirtyBitmap
*bm
;
5382 BlockDirtyInfoList
*list
= NULL
;
5383 BlockDirtyInfoList
**plist
= &list
;
5385 QLIST_FOREACH(bm
, &bs
->dirty_bitmaps
, list
) {
5386 BlockDirtyInfo
*info
= g_new0(BlockDirtyInfo
, 1);
5387 BlockDirtyInfoList
*entry
= g_new0(BlockDirtyInfoList
, 1);
5388 info
->count
= bdrv_get_dirty_count(bs
, bm
);
5390 ((int64_t) BDRV_SECTOR_SIZE
<< hbitmap_granularity(bm
->bitmap
));
5391 entry
->value
= info
;
5393 plist
= &entry
->next
;
5399 int bdrv_get_dirty(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
, int64_t sector
)
5402 return hbitmap_get(bitmap
->bitmap
, sector
);
5408 void bdrv_dirty_iter_init(BlockDriverState
*bs
,
5409 BdrvDirtyBitmap
*bitmap
, HBitmapIter
*hbi
)
5411 hbitmap_iter_init(hbi
, bitmap
->bitmap
, 0);
5414 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
5417 BdrvDirtyBitmap
*bitmap
;
5418 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5419 hbitmap_set(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5423 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
, int nr_sectors
)
5425 BdrvDirtyBitmap
*bitmap
;
5426 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5427 hbitmap_reset(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5431 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5433 return hbitmap_count(bitmap
->bitmap
);
5436 /* Get a reference to bs */
5437 void bdrv_ref(BlockDriverState
*bs
)
5442 /* Release a previously grabbed reference to bs.
5443 * If after releasing, reference count is zero, the BlockDriverState is
5445 void bdrv_unref(BlockDriverState
*bs
)
5450 assert(bs
->refcnt
> 0);
5451 if (--bs
->refcnt
== 0) {
5456 struct BdrvOpBlocker
{
5458 QLIST_ENTRY(BdrvOpBlocker
) list
;
5461 bool bdrv_op_is_blocked(BlockDriverState
*bs
, BlockOpType op
, Error
**errp
)
5463 BdrvOpBlocker
*blocker
;
5464 assert((int) op
>= 0 && op
< BLOCK_OP_TYPE_MAX
);
5465 if (!QLIST_EMPTY(&bs
->op_blockers
[op
])) {
5466 blocker
= QLIST_FIRST(&bs
->op_blockers
[op
]);
5468 error_setg(errp
, "Device '%s' is busy: %s",
5469 bdrv_get_device_name(bs
),
5470 error_get_pretty(blocker
->reason
));
5477 void bdrv_op_block(BlockDriverState
*bs
, BlockOpType op
, Error
*reason
)
5479 BdrvOpBlocker
*blocker
;
5480 assert((int) op
>= 0 && op
< BLOCK_OP_TYPE_MAX
);
5482 blocker
= g_new0(BdrvOpBlocker
, 1);
5483 blocker
->reason
= reason
;
5484 QLIST_INSERT_HEAD(&bs
->op_blockers
[op
], blocker
, list
);
5487 void bdrv_op_unblock(BlockDriverState
*bs
, BlockOpType op
, Error
*reason
)
5489 BdrvOpBlocker
*blocker
, *next
;
5490 assert((int) op
>= 0 && op
< BLOCK_OP_TYPE_MAX
);
5491 QLIST_FOREACH_SAFE(blocker
, &bs
->op_blockers
[op
], list
, next
) {
5492 if (blocker
->reason
== reason
) {
5493 QLIST_REMOVE(blocker
, list
);
5499 void bdrv_op_block_all(BlockDriverState
*bs
, Error
*reason
)
5502 for (i
= 0; i
< BLOCK_OP_TYPE_MAX
; i
++) {
5503 bdrv_op_block(bs
, i
, reason
);
5507 void bdrv_op_unblock_all(BlockDriverState
*bs
, Error
*reason
)
5510 for (i
= 0; i
< BLOCK_OP_TYPE_MAX
; i
++) {
5511 bdrv_op_unblock(bs
, i
, reason
);
5515 bool bdrv_op_blocker_is_empty(BlockDriverState
*bs
)
5519 for (i
= 0; i
< BLOCK_OP_TYPE_MAX
; i
++) {
5520 if (!QLIST_EMPTY(&bs
->op_blockers
[i
])) {
5527 void bdrv_iostatus_enable(BlockDriverState
*bs
)
5529 bs
->iostatus_enabled
= true;
5530 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5533 /* The I/O status is only enabled if the drive explicitly
5534 * enables it _and_ the VM is configured to stop on errors */
5535 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
5537 return (bs
->iostatus_enabled
&&
5538 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
5539 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
5540 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
5543 void bdrv_iostatus_disable(BlockDriverState
*bs
)
5545 bs
->iostatus_enabled
= false;
5548 void bdrv_iostatus_reset(BlockDriverState
*bs
)
5550 if (bdrv_iostatus_is_enabled(bs
)) {
5551 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5553 block_job_iostatus_reset(bs
->job
);
5558 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
5560 assert(bdrv_iostatus_is_enabled(bs
));
5561 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
5562 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
5563 BLOCK_DEVICE_IO_STATUS_FAILED
;
5567 void bdrv_img_create(const char *filename
, const char *fmt
,
5568 const char *base_filename
, const char *base_fmt
,
5569 char *options
, uint64_t img_size
, int flags
,
5570 Error
**errp
, bool quiet
)
5572 QemuOptsList
*create_opts
= NULL
;
5573 QemuOpts
*opts
= NULL
;
5574 const char *backing_fmt
, *backing_file
;
5576 BlockDriver
*drv
, *proto_drv
;
5577 BlockDriver
*backing_drv
= NULL
;
5578 Error
*local_err
= NULL
;
5581 /* Find driver and parse its options */
5582 drv
= bdrv_find_format(fmt
);
5584 error_setg(errp
, "Unknown file format '%s'", fmt
);
5588 proto_drv
= bdrv_find_protocol(filename
, true);
5590 error_setg(errp
, "Unknown protocol '%s'", filename
);
5594 if (!drv
->create_opts
) {
5595 error_setg(errp
, "Format driver '%s' does not support image creation",
5600 if (!proto_drv
->create_opts
) {
5601 error_setg(errp
, "Protocol driver '%s' does not support image creation",
5602 proto_drv
->format_name
);
5606 create_opts
= qemu_opts_append(create_opts
, drv
->create_opts
);
5607 create_opts
= qemu_opts_append(create_opts
, proto_drv
->create_opts
);
5609 /* Create parameter list with default values */
5610 opts
= qemu_opts_create(create_opts
, NULL
, 0, &error_abort
);
5611 qemu_opt_set_number(opts
, BLOCK_OPT_SIZE
, img_size
);
5613 /* Parse -o options */
5615 if (qemu_opts_do_parse(opts
, options
, NULL
) != 0) {
5616 error_setg(errp
, "Invalid options for file format '%s'", fmt
);
5621 if (base_filename
) {
5622 if (qemu_opt_set(opts
, BLOCK_OPT_BACKING_FILE
, base_filename
)) {
5623 error_setg(errp
, "Backing file not supported for file format '%s'",
5630 if (qemu_opt_set(opts
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
5631 error_setg(errp
, "Backing file format not supported for file "
5632 "format '%s'", fmt
);
5637 backing_file
= qemu_opt_get(opts
, BLOCK_OPT_BACKING_FILE
);
5639 if (!strcmp(filename
, backing_file
)) {
5640 error_setg(errp
, "Error: Trying to create an image with the "
5641 "same filename as the backing file");
5646 backing_fmt
= qemu_opt_get(opts
, BLOCK_OPT_BACKING_FMT
);
5648 backing_drv
= bdrv_find_format(backing_fmt
);
5650 error_setg(errp
, "Unknown backing file format '%s'",
5656 // The size for the image must always be specified, with one exception:
5657 // If we are using a backing file, we can obtain the size from there
5658 size
= qemu_opt_get_size(opts
, BLOCK_OPT_SIZE
, 0);
5661 BlockDriverState
*bs
;
5665 /* backing files always opened read-only */
5667 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
5670 ret
= bdrv_open(&bs
, backing_file
, NULL
, NULL
, back_flags
,
5671 backing_drv
, &local_err
);
5675 size
= bdrv_getlength(bs
);
5677 error_setg_errno(errp
, -size
, "Could not get size of '%s'",
5683 qemu_opt_set_number(opts
, BLOCK_OPT_SIZE
, size
);
5687 error_setg(errp
, "Image creation needs a size parameter");
5693 printf("Formatting '%s', fmt=%s", filename
, fmt
);
5694 qemu_opts_print(opts
, " ");
5698 ret
= bdrv_create(drv
, filename
, opts
, &local_err
);
5700 if (ret
== -EFBIG
) {
5701 /* This is generally a better message than whatever the driver would
5702 * deliver (especially because of the cluster_size_hint), since that
5703 * is most probably not much different from "image too large". */
5704 const char *cluster_size_hint
= "";
5705 if (qemu_opt_get_size(opts
, BLOCK_OPT_CLUSTER_SIZE
, 0)) {
5706 cluster_size_hint
= " (try using a larger cluster size)";
5708 error_setg(errp
, "The image size is too large for file format '%s'"
5709 "%s", fmt
, cluster_size_hint
);
5710 error_free(local_err
);
5715 qemu_opts_del(opts
);
5716 qemu_opts_free(create_opts
);
5718 error_propagate(errp
, local_err
);
5722 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
5724 return bs
->aio_context
;
5727 void bdrv_detach_aio_context(BlockDriverState
*bs
)
5729 BdrvAioNotifier
*baf
;
5735 QLIST_FOREACH(baf
, &bs
->aio_notifiers
, list
) {
5736 baf
->detach_aio_context(baf
->opaque
);
5739 if (bs
->io_limits_enabled
) {
5740 throttle_detach_aio_context(&bs
->throttle_state
);
5742 if (bs
->drv
->bdrv_detach_aio_context
) {
5743 bs
->drv
->bdrv_detach_aio_context(bs
);
5746 bdrv_detach_aio_context(bs
->file
);
5748 if (bs
->backing_hd
) {
5749 bdrv_detach_aio_context(bs
->backing_hd
);
5752 bs
->aio_context
= NULL
;
5755 void bdrv_attach_aio_context(BlockDriverState
*bs
,
5756 AioContext
*new_context
)
5758 BdrvAioNotifier
*ban
;
5764 bs
->aio_context
= new_context
;
5766 if (bs
->backing_hd
) {
5767 bdrv_attach_aio_context(bs
->backing_hd
, new_context
);
5770 bdrv_attach_aio_context(bs
->file
, new_context
);
5772 if (bs
->drv
->bdrv_attach_aio_context
) {
5773 bs
->drv
->bdrv_attach_aio_context(bs
, new_context
);
5775 if (bs
->io_limits_enabled
) {
5776 throttle_attach_aio_context(&bs
->throttle_state
, new_context
);
5779 QLIST_FOREACH(ban
, &bs
->aio_notifiers
, list
) {
5780 ban
->attached_aio_context(new_context
, ban
->opaque
);
5784 void bdrv_set_aio_context(BlockDriverState
*bs
, AioContext
*new_context
)
5786 bdrv_drain_all(); /* ensure there are no in-flight requests */
5788 bdrv_detach_aio_context(bs
);
5790 /* This function executes in the old AioContext so acquire the new one in
5791 * case it runs in a different thread.
5793 aio_context_acquire(new_context
);
5794 bdrv_attach_aio_context(bs
, new_context
);
5795 aio_context_release(new_context
);
5798 void bdrv_add_aio_context_notifier(BlockDriverState
*bs
,
5799 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
5800 void (*detach_aio_context
)(void *opaque
), void *opaque
)
5802 BdrvAioNotifier
*ban
= g_new(BdrvAioNotifier
, 1);
5803 *ban
= (BdrvAioNotifier
){
5804 .attached_aio_context
= attached_aio_context
,
5805 .detach_aio_context
= detach_aio_context
,
5809 QLIST_INSERT_HEAD(&bs
->aio_notifiers
, ban
, list
);
5812 void bdrv_remove_aio_context_notifier(BlockDriverState
*bs
,
5813 void (*attached_aio_context
)(AioContext
*,
5815 void (*detach_aio_context
)(void *),
5818 BdrvAioNotifier
*ban
, *ban_next
;
5820 QLIST_FOREACH_SAFE(ban
, &bs
->aio_notifiers
, list
, ban_next
) {
5821 if (ban
->attached_aio_context
== attached_aio_context
&&
5822 ban
->detach_aio_context
== detach_aio_context
&&
5823 ban
->opaque
== opaque
)
5825 QLIST_REMOVE(ban
, list
);
5835 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
5836 NotifierWithReturn
*notifier
)
5838 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
5841 int bdrv_amend_options(BlockDriverState
*bs
, QemuOpts
*opts
,
5842 BlockDriverAmendStatusCB
*status_cb
)
5844 if (!bs
->drv
->bdrv_amend_options
) {
5847 return bs
->drv
->bdrv_amend_options(bs
, opts
, status_cb
);
5850 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5851 * of block filter and by bdrv_is_first_non_filter.
5852 * It is used to test if the given bs is the candidate or recurse more in the
5855 bool bdrv_recurse_is_first_non_filter(BlockDriverState
*bs
,
5856 BlockDriverState
*candidate
)
5858 /* return false if basic checks fails */
5859 if (!bs
|| !bs
->drv
) {
5863 /* the code reached a non block filter driver -> check if the bs is
5864 * the same as the candidate. It's the recursion termination condition.
5866 if (!bs
->drv
->is_filter
) {
5867 return bs
== candidate
;
5869 /* Down this path the driver is a block filter driver */
5871 /* If the block filter recursion method is defined use it to recurse down
5874 if (bs
->drv
->bdrv_recurse_is_first_non_filter
) {
5875 return bs
->drv
->bdrv_recurse_is_first_non_filter(bs
, candidate
);
5878 /* the driver is a block filter but don't allow to recurse -> return false
5883 /* This function checks if the candidate is the first non filter bs down it's
5884 * bs chain. Since we don't have pointers to parents it explore all bs chains
5885 * from the top. Some filters can choose not to pass down the recursion.
5887 bool bdrv_is_first_non_filter(BlockDriverState
*candidate
)
5889 BlockDriverState
*bs
;
5891 /* walk down the bs forest recursively */
5892 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
5895 /* try to recurse in this top level bs */
5896 perm
= bdrv_recurse_is_first_non_filter(bs
, candidate
);
5898 /* candidate is the first non filter */
5907 BlockDriverState
*check_to_replace_node(const char *node_name
, Error
**errp
)
5909 BlockDriverState
*to_replace_bs
= bdrv_find_node(node_name
);
5910 AioContext
*aio_context
;
5912 if (!to_replace_bs
) {
5913 error_setg(errp
, "Node name '%s' not found", node_name
);
5917 aio_context
= bdrv_get_aio_context(to_replace_bs
);
5918 aio_context_acquire(aio_context
);
5920 if (bdrv_op_is_blocked(to_replace_bs
, BLOCK_OP_TYPE_REPLACE
, errp
)) {
5921 to_replace_bs
= NULL
;
5925 /* We don't want arbitrary node of the BDS chain to be replaced only the top
5926 * most non filter in order to prevent data corruption.
5927 * Another benefit is that this tests exclude backing files which are
5928 * blocked by the backing blockers.
5930 if (!bdrv_is_first_non_filter(to_replace_bs
)) {
5931 error_setg(errp
, "Only top most non filter can be replaced");
5932 to_replace_bs
= NULL
;
5937 aio_context_release(aio_context
);
5938 return to_replace_bs
;
5941 void bdrv_io_plug(BlockDriverState
*bs
)
5943 BlockDriver
*drv
= bs
->drv
;
5944 if (drv
&& drv
->bdrv_io_plug
) {
5945 drv
->bdrv_io_plug(bs
);
5946 } else if (bs
->file
) {
5947 bdrv_io_plug(bs
->file
);
5951 void bdrv_io_unplug(BlockDriverState
*bs
)
5953 BlockDriver
*drv
= bs
->drv
;
5954 if (drv
&& drv
->bdrv_io_unplug
) {
5955 drv
->bdrv_io_unplug(bs
);
5956 } else if (bs
->file
) {
5957 bdrv_io_unplug(bs
->file
);
5961 void bdrv_flush_io_queue(BlockDriverState
*bs
)
5963 BlockDriver
*drv
= bs
->drv
;
5964 if (drv
&& drv
->bdrv_flush_io_queue
) {
5965 drv
->bdrv_flush_io_queue(bs
);
5966 } else if (bs
->file
) {
5967 bdrv_flush_io_queue(bs
->file
);
5971 static bool append_open_options(QDict
*d
, BlockDriverState
*bs
)
5973 const QDictEntry
*entry
;
5974 bool found_any
= false;
5976 for (entry
= qdict_first(bs
->options
); entry
;
5977 entry
= qdict_next(bs
->options
, entry
))
5979 /* Only take options for this level and exclude all non-driver-specific
5981 if (!strchr(qdict_entry_key(entry
), '.') &&
5982 strcmp(qdict_entry_key(entry
), "node-name"))
5984 qobject_incref(qdict_entry_value(entry
));
5985 qdict_put_obj(d
, qdict_entry_key(entry
), qdict_entry_value(entry
));
5993 /* Updates the following BDS fields:
5994 * - exact_filename: A filename which may be used for opening a block device
5995 * which (mostly) equals the given BDS (even without any
5996 * other options; so reading and writing must return the same
5997 * results, but caching etc. may be different)
5998 * - full_open_options: Options which, when given when opening a block device
5999 * (without a filename), result in a BDS (mostly)
6000 * equalling the given one
6001 * - filename: If exact_filename is set, it is copied here. Otherwise,
6002 * full_open_options is converted to a JSON object, prefixed with
6003 * "json:" (for use through the JSON pseudo protocol) and put here.
6005 void bdrv_refresh_filename(BlockDriverState
*bs
)
6007 BlockDriver
*drv
= bs
->drv
;
6014 /* This BDS's file name will most probably depend on its file's name, so
6015 * refresh that first */
6017 bdrv_refresh_filename(bs
->file
);
6020 if (drv
->bdrv_refresh_filename
) {
6021 /* Obsolete information is of no use here, so drop the old file name
6022 * information before refreshing it */
6023 bs
->exact_filename
[0] = '\0';
6024 if (bs
->full_open_options
) {
6025 QDECREF(bs
->full_open_options
);
6026 bs
->full_open_options
= NULL
;
6029 drv
->bdrv_refresh_filename(bs
);
6030 } else if (bs
->file
) {
6031 /* Try to reconstruct valid information from the underlying file */
6032 bool has_open_options
;
6034 bs
->exact_filename
[0] = '\0';
6035 if (bs
->full_open_options
) {
6036 QDECREF(bs
->full_open_options
);
6037 bs
->full_open_options
= NULL
;
6041 has_open_options
= append_open_options(opts
, bs
);
6043 /* If no specific options have been given for this BDS, the filename of
6044 * the underlying file should suffice for this one as well */
6045 if (bs
->file
->exact_filename
[0] && !has_open_options
) {
6046 strcpy(bs
->exact_filename
, bs
->file
->exact_filename
);
6048 /* Reconstructing the full options QDict is simple for most format block
6049 * drivers, as long as the full options are known for the underlying
6050 * file BDS. The full options QDict of that file BDS should somehow
6051 * contain a representation of the filename, therefore the following
6052 * suffices without querying the (exact_)filename of this BDS. */
6053 if (bs
->file
->full_open_options
) {
6054 qdict_put_obj(opts
, "driver",
6055 QOBJECT(qstring_from_str(drv
->format_name
)));
6056 QINCREF(bs
->file
->full_open_options
);
6057 qdict_put_obj(opts
, "file", QOBJECT(bs
->file
->full_open_options
));
6059 bs
->full_open_options
= opts
;
6063 } else if (!bs
->full_open_options
&& qdict_size(bs
->options
)) {
6064 /* There is no underlying file BDS (at least referenced by BDS.file),
6065 * so the full options QDict should be equal to the options given
6066 * specifically for this block device when it was opened (plus the
6067 * driver specification).
6068 * Because those options don't change, there is no need to update
6069 * full_open_options when it's already set. */
6072 append_open_options(opts
, bs
);
6073 qdict_put_obj(opts
, "driver",
6074 QOBJECT(qstring_from_str(drv
->format_name
)));
6076 if (bs
->exact_filename
[0]) {
6077 /* This may not work for all block protocol drivers (some may
6078 * require this filename to be parsed), but we have to find some
6079 * default solution here, so just include it. If some block driver
6080 * does not support pure options without any filename at all or
6081 * needs some special format of the options QDict, it needs to
6082 * implement the driver-specific bdrv_refresh_filename() function.
6084 qdict_put_obj(opts
, "filename",
6085 QOBJECT(qstring_from_str(bs
->exact_filename
)));
6088 bs
->full_open_options
= opts
;
6091 if (bs
->exact_filename
[0]) {
6092 pstrcpy(bs
->filename
, sizeof(bs
->filename
), bs
->exact_filename
);
6093 } else if (bs
->full_open_options
) {
6094 QString
*json
= qobject_to_json(QOBJECT(bs
->full_open_options
));
6095 snprintf(bs
->filename
, sizeof(bs
->filename
), "json:%s",
6096 qstring_get_str(json
));
6101 /* This accessor function purpose is to allow the device models to access the
6102 * BlockAcctStats structure embedded inside a BlockDriverState without being
6103 * aware of the BlockDriverState structure layout.
6104 * It will go away when the BlockAcctStats structure will be moved inside
6105 * the device models.
6107 BlockAcctStats
*bdrv_get_stats(BlockDriverState
*bs
)