2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
39 #include <sys/types.h>
41 #include <sys/ioctl.h>
42 #include <sys/queue.h>
52 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
54 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
55 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
56 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
57 BlockDriverCompletionFunc
*cb
, void *opaque
);
58 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
59 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
60 BlockDriverCompletionFunc
*cb
, void *opaque
);
61 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
62 int64_t sector_num
, int nb_sectors
,
64 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
65 int64_t sector_num
, int nb_sectors
,
67 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
68 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
69 BdrvRequestFlags flags
);
70 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
71 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
72 BdrvRequestFlags flags
);
73 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
77 BlockDriverCompletionFunc
*cb
,
80 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
81 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
82 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
84 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
85 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
87 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
88 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
90 /* If non-zero, use only whitelisted block drivers */
91 static int use_bdrv_whitelist
;
94 static int is_windows_drive_prefix(const char *filename
)
96 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
97 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
101 int is_windows_drive(const char *filename
)
103 if (is_windows_drive_prefix(filename
) &&
106 if (strstart(filename
, "\\\\.\\", NULL
) ||
107 strstart(filename
, "//./", NULL
))
113 /* throttling disk I/O limits */
114 void bdrv_set_io_limits(BlockDriverState
*bs
,
119 throttle_config(&bs
->throttle_state
, cfg
);
121 for (i
= 0; i
< 2; i
++) {
122 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
126 /* this function drain all the throttled IOs */
127 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
129 bool drained
= false;
130 bool enabled
= bs
->io_limits_enabled
;
133 bs
->io_limits_enabled
= false;
135 for (i
= 0; i
< 2; i
++) {
136 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
141 bs
->io_limits_enabled
= enabled
;
146 void bdrv_io_limits_disable(BlockDriverState
*bs
)
148 bs
->io_limits_enabled
= false;
150 bdrv_start_throttled_reqs(bs
);
152 throttle_destroy(&bs
->throttle_state
);
155 static void bdrv_throttle_read_timer_cb(void *opaque
)
157 BlockDriverState
*bs
= opaque
;
158 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
161 static void bdrv_throttle_write_timer_cb(void *opaque
)
163 BlockDriverState
*bs
= opaque
;
164 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
167 /* should be called before bdrv_set_io_limits if a limit is set */
168 void bdrv_io_limits_enable(BlockDriverState
*bs
)
170 assert(!bs
->io_limits_enabled
);
171 throttle_init(&bs
->throttle_state
,
173 bdrv_throttle_read_timer_cb
,
174 bdrv_throttle_write_timer_cb
,
176 bs
->io_limits_enabled
= true;
179 /* This function makes an IO wait if needed
181 * @nb_sectors: the number of sectors of the IO
182 * @is_write: is the IO a write
184 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
188 /* does this io must wait */
189 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
191 /* if must wait or any request of this type throttled queue the IO */
193 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
194 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
197 /* the IO will be executed, do the accounting */
198 throttle_account(&bs
->throttle_state
,
200 nb_sectors
* BDRV_SECTOR_SIZE
);
202 /* if the next request must wait -> do nothing */
203 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
207 /* else queue next request for execution */
208 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
211 /* check if the path starts with "<protocol>:" */
212 static int path_has_protocol(const char *path
)
217 if (is_windows_drive(path
) ||
218 is_windows_drive_prefix(path
)) {
221 p
= path
+ strcspn(path
, ":/\\");
223 p
= path
+ strcspn(path
, ":/");
229 int path_is_absolute(const char *path
)
232 /* specific case for names like: "\\.\d:" */
233 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
236 return (*path
== '/' || *path
== '\\');
238 return (*path
== '/');
242 /* if filename is absolute, just copy it to dest. Otherwise, build a
243 path to it by considering it is relative to base_path. URL are
245 void path_combine(char *dest
, int dest_size
,
246 const char *base_path
,
247 const char *filename
)
254 if (path_is_absolute(filename
)) {
255 pstrcpy(dest
, dest_size
, filename
);
257 p
= strchr(base_path
, ':');
262 p1
= strrchr(base_path
, '/');
266 p2
= strrchr(base_path
, '\\');
278 if (len
> dest_size
- 1)
280 memcpy(dest
, base_path
, len
);
282 pstrcat(dest
, dest_size
, filename
);
286 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
288 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
289 pstrcpy(dest
, sz
, bs
->backing_file
);
291 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
295 void bdrv_register(BlockDriver
*bdrv
)
297 /* Block drivers without coroutine functions need emulation */
298 if (!bdrv
->bdrv_co_readv
) {
299 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
300 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
302 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
303 * the block driver lacks aio we need to emulate that too.
305 if (!bdrv
->bdrv_aio_readv
) {
306 /* add AIO emulation layer */
307 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
308 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
312 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
315 /* create a new block device (by default it is empty) */
316 BlockDriverState
*bdrv_new(const char *device_name
)
318 BlockDriverState
*bs
;
320 bs
= g_malloc0(sizeof(BlockDriverState
));
321 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
322 if (device_name
[0] != '\0') {
323 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, list
);
325 bdrv_iostatus_disable(bs
);
326 notifier_list_init(&bs
->close_notifiers
);
327 notifier_with_return_list_init(&bs
->before_write_notifiers
);
328 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
329 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
335 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
337 notifier_list_add(&bs
->close_notifiers
, notify
);
340 BlockDriver
*bdrv_find_format(const char *format_name
)
343 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
344 if (!strcmp(drv1
->format_name
, format_name
)) {
351 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
353 static const char *whitelist_rw
[] = {
354 CONFIG_BDRV_RW_WHITELIST
356 static const char *whitelist_ro
[] = {
357 CONFIG_BDRV_RO_WHITELIST
361 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
362 return 1; /* no whitelist, anything goes */
365 for (p
= whitelist_rw
; *p
; p
++) {
366 if (!strcmp(drv
->format_name
, *p
)) {
371 for (p
= whitelist_ro
; *p
; p
++) {
372 if (!strcmp(drv
->format_name
, *p
)) {
380 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
383 BlockDriver
*drv
= bdrv_find_format(format_name
);
384 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
387 typedef struct CreateCo
{
390 QEMUOptionParameter
*options
;
395 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
397 Error
*local_err
= NULL
;
400 CreateCo
*cco
= opaque
;
403 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
, &local_err
);
404 if (error_is_set(&local_err
)) {
405 error_propagate(&cco
->err
, local_err
);
410 int bdrv_create(BlockDriver
*drv
, const char* filename
,
411 QEMUOptionParameter
*options
, Error
**errp
)
418 .filename
= g_strdup(filename
),
424 if (!drv
->bdrv_create
) {
425 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
430 if (qemu_in_coroutine()) {
431 /* Fast-path if already in coroutine context */
432 bdrv_create_co_entry(&cco
);
434 co
= qemu_coroutine_create(bdrv_create_co_entry
);
435 qemu_coroutine_enter(co
, &cco
);
436 while (cco
.ret
== NOT_DONE
) {
443 if (error_is_set(&cco
.err
)) {
444 error_propagate(errp
, cco
.err
);
446 error_setg_errno(errp
, -ret
, "Could not create image");
451 g_free(cco
.filename
);
455 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
,
459 Error
*local_err
= NULL
;
462 drv
= bdrv_find_protocol(filename
, true);
464 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
468 ret
= bdrv_create(drv
, filename
, options
, &local_err
);
469 if (error_is_set(&local_err
)) {
470 error_propagate(errp
, local_err
);
476 * Create a uniquely-named empty temporary file.
477 * Return 0 upon success, otherwise a negative errno value.
479 int get_tmp_filename(char *filename
, int size
)
482 char temp_dir
[MAX_PATH
];
483 /* GetTempFileName requires that its output buffer (4th param)
484 have length MAX_PATH or greater. */
485 assert(size
>= MAX_PATH
);
486 return (GetTempPath(MAX_PATH
, temp_dir
)
487 && GetTempFileName(temp_dir
, "qem", 0, filename
)
488 ? 0 : -GetLastError());
492 tmpdir
= getenv("TMPDIR");
495 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
498 fd
= mkstemp(filename
);
502 if (close(fd
) != 0) {
511 * Detect host devices. By convention, /dev/cdrom[N] is always
512 * recognized as a host CDROM.
514 static BlockDriver
*find_hdev_driver(const char *filename
)
516 int score_max
= 0, score
;
517 BlockDriver
*drv
= NULL
, *d
;
519 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
520 if (d
->bdrv_probe_device
) {
521 score
= d
->bdrv_probe_device(filename
);
522 if (score
> score_max
) {
532 BlockDriver
*bdrv_find_protocol(const char *filename
,
533 bool allow_protocol_prefix
)
540 /* TODO Drivers without bdrv_file_open must be specified explicitly */
543 * XXX(hch): we really should not let host device detection
544 * override an explicit protocol specification, but moving this
545 * later breaks access to device names with colons in them.
546 * Thanks to the brain-dead persistent naming schemes on udev-
547 * based Linux systems those actually are quite common.
549 drv1
= find_hdev_driver(filename
);
554 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
555 return bdrv_find_format("file");
558 p
= strchr(filename
, ':');
561 if (len
> sizeof(protocol
) - 1)
562 len
= sizeof(protocol
) - 1;
563 memcpy(protocol
, filename
, len
);
564 protocol
[len
] = '\0';
565 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
566 if (drv1
->protocol_name
&&
567 !strcmp(drv1
->protocol_name
, protocol
)) {
574 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
575 BlockDriver
**pdrv
, Error
**errp
)
577 int score
, score_max
;
578 BlockDriver
*drv1
, *drv
;
582 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
583 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
584 drv
= bdrv_find_format("raw");
586 error_setg(errp
, "Could not find raw image format");
593 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
595 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
603 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
604 if (drv1
->bdrv_probe
) {
605 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
606 if (score
> score_max
) {
613 error_setg(errp
, "Could not determine image format: No compatible "
622 * Set the current 'total_sectors' value
624 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
626 BlockDriver
*drv
= bs
->drv
;
628 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
632 /* query actual device if possible, otherwise just trust the hint */
633 if (drv
->bdrv_getlength
) {
634 int64_t length
= drv
->bdrv_getlength(bs
);
638 hint
= DIV_ROUND_UP(length
, BDRV_SECTOR_SIZE
);
641 bs
->total_sectors
= hint
;
646 * Set open flags for a given discard mode
648 * Return 0 on success, -1 if the discard mode was invalid.
650 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
652 *flags
&= ~BDRV_O_UNMAP
;
654 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
656 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
657 *flags
|= BDRV_O_UNMAP
;
666 * Set open flags for a given cache mode
668 * Return 0 on success, -1 if the cache mode was invalid.
670 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
672 *flags
&= ~BDRV_O_CACHE_MASK
;
674 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
675 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
676 } else if (!strcmp(mode
, "directsync")) {
677 *flags
|= BDRV_O_NOCACHE
;
678 } else if (!strcmp(mode
, "writeback")) {
679 *flags
|= BDRV_O_CACHE_WB
;
680 } else if (!strcmp(mode
, "unsafe")) {
681 *flags
|= BDRV_O_CACHE_WB
;
682 *flags
|= BDRV_O_NO_FLUSH
;
683 } else if (!strcmp(mode
, "writethrough")) {
684 /* this is the default */
693 * The copy-on-read flag is actually a reference count so multiple users may
694 * use the feature without worrying about clobbering its previous state.
695 * Copy-on-read stays enabled until all users have called to disable it.
697 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
702 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
704 assert(bs
->copy_on_read
> 0);
708 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
710 int open_flags
= flags
| BDRV_O_CACHE_WB
;
713 * Clear flags that are internal to the block layer before opening the
716 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
719 * Snapshots should be writable.
721 if (bs
->is_temporary
) {
722 open_flags
|= BDRV_O_RDWR
;
729 * Common part for opening disk images and files
731 * Removes all processed options from *options.
733 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
734 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
737 const char *filename
;
738 Error
*local_err
= NULL
;
741 assert(bs
->file
== NULL
);
742 assert(options
!= NULL
&& bs
->options
!= options
);
745 filename
= file
->filename
;
747 filename
= qdict_get_try_str(options
, "filename");
750 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
752 /* bdrv_open() with directly using a protocol as drv. This layer is already
753 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
754 * and return immediately. */
755 if (file
!= NULL
&& drv
->bdrv_file_open
) {
760 bs
->open_flags
= flags
;
761 bs
->buffer_alignment
= 512;
762 bs
->zero_beyond_eof
= true;
763 open_flags
= bdrv_open_flags(bs
, flags
);
764 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
766 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
768 !bs
->read_only
&& bdrv_is_whitelisted(drv
, true)
769 ? "Driver '%s' can only be used for read-only devices"
770 : "Driver '%s' is not whitelisted",
775 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
776 if (flags
& BDRV_O_COPY_ON_READ
) {
777 if (!bs
->read_only
) {
778 bdrv_enable_copy_on_read(bs
);
780 error_setg(errp
, "Can't use copy-on-read on read-only device");
785 if (filename
!= NULL
) {
786 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
788 bs
->filename
[0] = '\0';
792 bs
->opaque
= g_malloc0(drv
->instance_size
);
794 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
796 /* Open the image, either directly or using a protocol */
797 if (drv
->bdrv_file_open
) {
798 assert(file
== NULL
);
799 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
800 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
803 error_setg(errp
, "Can't use '%s' as a block driver for the "
804 "protocol level", drv
->format_name
);
809 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
813 if (error_is_set(&local_err
)) {
814 error_propagate(errp
, local_err
);
815 } else if (bs
->filename
[0]) {
816 error_setg_errno(errp
, -ret
, "Could not open '%s'", bs
->filename
);
818 error_setg_errno(errp
, -ret
, "Could not open image");
823 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
825 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
830 if (bs
->is_temporary
) {
831 assert(bs
->filename
[0] != '\0');
832 unlink(bs
->filename
);
846 * Opens a file using a protocol (file, host_device, nbd, ...)
848 * options is a QDict of options to pass to the block drivers, or NULL for an
849 * empty set of options. The reference to the QDict belongs to the block layer
850 * after the call (even on failure), so if the caller intends to reuse the
851 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
853 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
,
854 QDict
*options
, int flags
, Error
**errp
)
856 BlockDriverState
*bs
;
859 bool allow_protocol_prefix
= false;
860 Error
*local_err
= NULL
;
863 /* NULL means an empty set of options */
864 if (options
== NULL
) {
865 options
= qdict_new();
869 bs
->options
= options
;
870 options
= qdict_clone_shallow(options
);
872 /* Fetch the file name from the options QDict if necessary */
874 filename
= qdict_get_try_str(options
, "filename");
875 } else if (filename
&& !qdict_haskey(options
, "filename")) {
876 qdict_put(options
, "filename", qstring_from_str(filename
));
877 allow_protocol_prefix
= true;
879 error_setg(errp
, "Can't specify 'file' and 'filename' options at the "
885 /* Find the right block driver */
886 drvname
= qdict_get_try_str(options
, "driver");
888 drv
= bdrv_find_format(drvname
);
890 error_setg(errp
, "Unknown driver '%s'", drvname
);
892 qdict_del(options
, "driver");
893 } else if (filename
) {
894 drv
= bdrv_find_protocol(filename
, allow_protocol_prefix
);
896 error_setg(errp
, "Unknown protocol");
899 error_setg(errp
, "Must specify either driver or file");
904 /* errp has been set already */
909 /* Parse the filename and open it */
910 if (drv
->bdrv_parse_filename
&& filename
) {
911 drv
->bdrv_parse_filename(filename
, options
, &local_err
);
912 if (error_is_set(&local_err
)) {
913 error_propagate(errp
, local_err
);
917 qdict_del(options
, "filename");
918 } else if (drv
->bdrv_needs_filename
&& !filename
) {
919 error_setg(errp
, "The '%s' block driver requires a file name",
925 ret
= bdrv_open_common(bs
, NULL
, options
, flags
, drv
, &local_err
);
927 error_propagate(errp
, local_err
);
931 /* Check if any unknown options were used */
932 if (qdict_size(options
) != 0) {
933 const QDictEntry
*entry
= qdict_first(options
);
934 error_setg(errp
, "Block protocol '%s' doesn't support the option '%s'",
935 drv
->format_name
, entry
->key
);
948 QDECREF(bs
->options
);
955 * Opens the backing file for a BlockDriverState if not yet open
957 * options is a QDict of options to pass to the block drivers, or NULL for an
958 * empty set of options. The reference to the QDict is transferred to this
959 * function (even on failure), so if the caller intends to reuse the dictionary,
960 * it needs to use QINCREF() before calling bdrv_file_open.
962 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
964 char backing_filename
[PATH_MAX
];
966 BlockDriver
*back_drv
= NULL
;
967 Error
*local_err
= NULL
;
969 if (bs
->backing_hd
!= NULL
) {
974 /* NULL means an empty set of options */
975 if (options
== NULL
) {
976 options
= qdict_new();
979 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
980 if (qdict_haskey(options
, "file.filename")) {
981 backing_filename
[0] = '\0';
982 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
986 bdrv_get_full_backing_filename(bs
, backing_filename
,
987 sizeof(backing_filename
));
990 bs
->backing_hd
= bdrv_new("");
992 if (bs
->backing_format
[0] != '\0') {
993 back_drv
= bdrv_find_format(bs
->backing_format
);
996 /* backing files always opened read-only */
997 back_flags
= bs
->open_flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
|
998 BDRV_O_COPY_ON_READ
);
1000 ret
= bdrv_open(bs
->backing_hd
,
1001 *backing_filename
? backing_filename
: NULL
, options
,
1002 back_flags
, back_drv
, &local_err
);
1004 bdrv_unref(bs
->backing_hd
);
1005 bs
->backing_hd
= NULL
;
1006 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1007 error_setg(errp
, "Could not open backing file: %s",
1008 error_get_pretty(local_err
));
1009 error_free(local_err
);
1012 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
1013 bs
->backing_hd
->file
->filename
);
1018 * Opens a disk image (raw, qcow2, vmdk, ...)
1020 * options is a QDict of options to pass to the block drivers, or NULL for an
1021 * empty set of options. The reference to the QDict belongs to the block layer
1022 * after the call (even on failure), so if the caller intends to reuse the
1023 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1025 int bdrv_open(BlockDriverState
*bs
, const char *filename
, QDict
*options
,
1026 int flags
, BlockDriver
*drv
, Error
**errp
)
1029 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1030 char tmp_filename
[PATH_MAX
+ 1];
1031 BlockDriverState
*file
= NULL
;
1032 QDict
*file_options
= NULL
;
1033 const char *drvname
;
1034 Error
*local_err
= NULL
;
1036 /* NULL means an empty set of options */
1037 if (options
== NULL
) {
1038 options
= qdict_new();
1041 bs
->options
= options
;
1042 options
= qdict_clone_shallow(options
);
1044 /* For snapshot=on, create a temporary qcow2 overlay */
1045 if (flags
& BDRV_O_SNAPSHOT
) {
1046 BlockDriverState
*bs1
;
1048 BlockDriver
*bdrv_qcow2
;
1049 QEMUOptionParameter
*create_options
;
1050 char backing_filename
[PATH_MAX
];
1052 if (qdict_size(options
) != 0) {
1053 error_setg(errp
, "Can't use snapshot=on with driver-specific options");
1057 assert(filename
!= NULL
);
1059 /* if snapshot, we create a temporary backing file and open it
1060 instead of opening 'filename' directly */
1062 /* if there is a backing file, use it */
1064 ret
= bdrv_open(bs1
, filename
, NULL
, 0, drv
, &local_err
);
1069 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
1073 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
1075 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1079 /* Real path is meaningless for protocols */
1080 if (path_has_protocol(filename
)) {
1081 snprintf(backing_filename
, sizeof(backing_filename
),
1083 } else if (!realpath(filename
, backing_filename
)) {
1085 error_setg_errno(errp
, errno
, "Could not resolve path '%s'", filename
);
1089 bdrv_qcow2
= bdrv_find_format("qcow2");
1090 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
1093 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
1094 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FILE
,
1097 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FMT
,
1101 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
, &local_err
);
1102 free_option_parameters(create_options
);
1104 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1105 "'%s': %s", tmp_filename
,
1106 error_get_pretty(local_err
));
1107 error_free(local_err
);
1112 filename
= tmp_filename
;
1114 bs
->is_temporary
= 1;
1117 /* Open image file without format layer */
1118 if (flags
& BDRV_O_RDWR
) {
1119 flags
|= BDRV_O_ALLOW_RDWR
;
1122 qdict_extract_subqdict(options
, &file_options
, "file.");
1124 ret
= bdrv_file_open(&file
, filename
, file_options
,
1125 bdrv_open_flags(bs
, flags
| BDRV_O_UNMAP
), &local_err
);
1130 /* Find the right image format driver */
1131 drvname
= qdict_get_try_str(options
, "driver");
1133 drv
= bdrv_find_format(drvname
);
1134 qdict_del(options
, "driver");
1136 error_setg(errp
, "Invalid driver: '%s'", drvname
);
1138 goto unlink_and_fail
;
1143 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1147 goto unlink_and_fail
;
1150 /* Open the image */
1151 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1153 goto unlink_and_fail
;
1156 if (bs
->file
!= file
) {
1161 /* If there is a backing file, use it */
1162 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1163 QDict
*backing_options
;
1165 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1166 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1168 goto close_and_fail
;
1172 /* Check if any unknown options were used */
1173 if (qdict_size(options
) != 0) {
1174 const QDictEntry
*entry
= qdict_first(options
);
1175 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1176 "support the option '%s'", drv
->format_name
, bs
->device_name
,
1180 goto close_and_fail
;
1184 if (!bdrv_key_required(bs
)) {
1185 bdrv_dev_change_media_cb(bs
, true);
1194 if (bs
->is_temporary
) {
1198 QDECREF(bs
->options
);
1201 if (error_is_set(&local_err
)) {
1202 error_propagate(errp
, local_err
);
1209 if (error_is_set(&local_err
)) {
1210 error_propagate(errp
, local_err
);
1215 typedef struct BlockReopenQueueEntry
{
1217 BDRVReopenState state
;
1218 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1219 } BlockReopenQueueEntry
;
1222 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1223 * reopen of multiple devices.
1225 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1226 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1227 * be created and initialized. This newly created BlockReopenQueue should be
1228 * passed back in for subsequent calls that are intended to be of the same
1231 * bs is the BlockDriverState to add to the reopen queue.
1233 * flags contains the open flags for the associated bs
1235 * returns a pointer to bs_queue, which is either the newly allocated
1236 * bs_queue, or the existing bs_queue being used.
1239 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1240 BlockDriverState
*bs
, int flags
)
1244 BlockReopenQueueEntry
*bs_entry
;
1245 if (bs_queue
== NULL
) {
1246 bs_queue
= g_new0(BlockReopenQueue
, 1);
1247 QSIMPLEQ_INIT(bs_queue
);
1251 bdrv_reopen_queue(bs_queue
, bs
->file
, flags
);
1254 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1255 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1257 bs_entry
->state
.bs
= bs
;
1258 bs_entry
->state
.flags
= flags
;
1264 * Reopen multiple BlockDriverStates atomically & transactionally.
1266 * The queue passed in (bs_queue) must have been built up previous
1267 * via bdrv_reopen_queue().
1269 * Reopens all BDS specified in the queue, with the appropriate
1270 * flags. All devices are prepared for reopen, and failure of any
1271 * device will cause all device changes to be abandonded, and intermediate
1274 * If all devices prepare successfully, then the changes are committed
1278 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1281 BlockReopenQueueEntry
*bs_entry
, *next
;
1282 Error
*local_err
= NULL
;
1284 assert(bs_queue
!= NULL
);
1288 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1289 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1290 error_propagate(errp
, local_err
);
1293 bs_entry
->prepared
= true;
1296 /* If we reach this point, we have success and just need to apply the
1299 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1300 bdrv_reopen_commit(&bs_entry
->state
);
1306 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1307 if (ret
&& bs_entry
->prepared
) {
1308 bdrv_reopen_abort(&bs_entry
->state
);
1317 /* Reopen a single BlockDriverState with the specified flags. */
1318 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1321 Error
*local_err
= NULL
;
1322 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1324 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1325 if (local_err
!= NULL
) {
1326 error_propagate(errp
, local_err
);
1333 * Prepares a BlockDriverState for reopen. All changes are staged in the
1334 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1335 * the block driver layer .bdrv_reopen_prepare()
1337 * bs is the BlockDriverState to reopen
1338 * flags are the new open flags
1339 * queue is the reopen queue
1341 * Returns 0 on success, non-zero on error. On error errp will be set
1344 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1345 * It is the responsibility of the caller to then call the abort() or
1346 * commit() for any other BDS that have been left in a prepare() state
1349 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1353 Error
*local_err
= NULL
;
1356 assert(reopen_state
!= NULL
);
1357 assert(reopen_state
->bs
->drv
!= NULL
);
1358 drv
= reopen_state
->bs
->drv
;
1360 /* if we are to stay read-only, do not allow permission change
1362 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1363 reopen_state
->flags
& BDRV_O_RDWR
) {
1364 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1365 reopen_state
->bs
->device_name
);
1370 ret
= bdrv_flush(reopen_state
->bs
);
1372 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1377 if (drv
->bdrv_reopen_prepare
) {
1378 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1380 if (local_err
!= NULL
) {
1381 error_propagate(errp
, local_err
);
1383 error_setg(errp
, "failed while preparing to reopen image '%s'",
1384 reopen_state
->bs
->filename
);
1389 /* It is currently mandatory to have a bdrv_reopen_prepare()
1390 * handler for each supported drv. */
1391 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1392 drv
->format_name
, reopen_state
->bs
->device_name
,
1393 "reopening of file");
1405 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1406 * makes them final by swapping the staging BlockDriverState contents into
1407 * the active BlockDriverState contents.
1409 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1413 assert(reopen_state
!= NULL
);
1414 drv
= reopen_state
->bs
->drv
;
1415 assert(drv
!= NULL
);
1417 /* If there are any driver level actions to take */
1418 if (drv
->bdrv_reopen_commit
) {
1419 drv
->bdrv_reopen_commit(reopen_state
);
1422 /* set BDS specific flags now */
1423 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1424 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1426 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1430 * Abort the reopen, and delete and free the staged changes in
1433 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1437 assert(reopen_state
!= NULL
);
1438 drv
= reopen_state
->bs
->drv
;
1439 assert(drv
!= NULL
);
1441 if (drv
->bdrv_reopen_abort
) {
1442 drv
->bdrv_reopen_abort(reopen_state
);
1447 void bdrv_close(BlockDriverState
*bs
)
1450 block_job_cancel_sync(bs
->job
);
1452 bdrv_drain_all(); /* complete I/O */
1454 bdrv_drain_all(); /* in case flush left pending I/O */
1455 notifier_list_notify(&bs
->close_notifiers
, bs
);
1458 if (bs
->backing_hd
) {
1459 bdrv_unref(bs
->backing_hd
);
1460 bs
->backing_hd
= NULL
;
1462 bs
->drv
->bdrv_close(bs
);
1465 if (bs
->is_temporary
) {
1466 unlink(bs
->filename
);
1471 bs
->copy_on_read
= 0;
1472 bs
->backing_file
[0] = '\0';
1473 bs
->backing_format
[0] = '\0';
1474 bs
->total_sectors
= 0;
1479 bs
->zero_beyond_eof
= false;
1480 QDECREF(bs
->options
);
1483 if (bs
->file
!= NULL
) {
1484 bdrv_unref(bs
->file
);
1489 bdrv_dev_change_media_cb(bs
, false);
1491 /*throttling disk I/O limits*/
1492 if (bs
->io_limits_enabled
) {
1493 bdrv_io_limits_disable(bs
);
1497 void bdrv_close_all(void)
1499 BlockDriverState
*bs
;
1501 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1506 /* Check if any requests are in-flight (including throttled requests) */
1507 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1509 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1512 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1515 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1518 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1521 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1527 static bool bdrv_requests_pending_all(void)
1529 BlockDriverState
*bs
;
1530 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1531 if (bdrv_requests_pending(bs
)) {
1539 * Wait for pending requests to complete across all BlockDriverStates
1541 * This function does not flush data to disk, use bdrv_flush_all() for that
1542 * after calling this function.
1544 * Note that completion of an asynchronous I/O operation can trigger any
1545 * number of other I/O operations on other devices---for example a coroutine
1546 * can be arbitrarily complex and a constant flow of I/O can come until the
1547 * coroutine is complete. Because of this, it is not possible to have a
1548 * function to drain a single device's I/O queue.
1550 void bdrv_drain_all(void)
1552 /* Always run first iteration so any pending completion BHs run */
1554 BlockDriverState
*bs
;
1557 /* FIXME: We do not have timer support here, so this is effectively
1560 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1561 if (bdrv_start_throttled_reqs(bs
)) {
1566 busy
= bdrv_requests_pending_all();
1567 busy
|= aio_poll(qemu_get_aio_context(), busy
);
1571 /* make a BlockDriverState anonymous by removing from bdrv_state list.
1572 Also, NULL terminate the device_name to prevent double remove */
1573 void bdrv_make_anon(BlockDriverState
*bs
)
1575 if (bs
->device_name
[0] != '\0') {
1576 QTAILQ_REMOVE(&bdrv_states
, bs
, list
);
1578 bs
->device_name
[0] = '\0';
1581 static void bdrv_rebind(BlockDriverState
*bs
)
1583 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1584 bs
->drv
->bdrv_rebind(bs
);
1588 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1589 BlockDriverState
*bs_src
)
1591 /* move some fields that need to stay attached to the device */
1592 bs_dest
->open_flags
= bs_src
->open_flags
;
1595 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1596 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1597 bs_dest
->dev
= bs_src
->dev
;
1598 bs_dest
->buffer_alignment
= bs_src
->buffer_alignment
;
1599 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1601 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1603 /* i/o throttled req */
1604 memcpy(&bs_dest
->throttle_state
,
1605 &bs_src
->throttle_state
,
1606 sizeof(ThrottleState
));
1607 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
1608 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
1609 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1612 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1613 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1616 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1617 bs_dest
->iostatus
= bs_src
->iostatus
;
1620 bs_dest
->dirty_bitmap
= bs_src
->dirty_bitmap
;
1622 /* reference count */
1623 bs_dest
->refcnt
= bs_src
->refcnt
;
1626 bs_dest
->in_use
= bs_src
->in_use
;
1627 bs_dest
->job
= bs_src
->job
;
1629 /* keep the same entry in bdrv_states */
1630 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1631 bs_src
->device_name
);
1632 bs_dest
->list
= bs_src
->list
;
1636 * Swap bs contents for two image chains while they are live,
1637 * while keeping required fields on the BlockDriverState that is
1638 * actually attached to a device.
1640 * This will modify the BlockDriverState fields, and swap contents
1641 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1643 * bs_new is required to be anonymous.
1645 * This function does not create any image files.
1647 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1649 BlockDriverState tmp
;
1651 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1652 assert(bs_new
->device_name
[0] == '\0');
1653 assert(bs_new
->dirty_bitmap
== NULL
);
1654 assert(bs_new
->job
== NULL
);
1655 assert(bs_new
->dev
== NULL
);
1656 assert(bs_new
->in_use
== 0);
1657 assert(bs_new
->io_limits_enabled
== false);
1658 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1664 /* there are some fields that should not be swapped, move them back */
1665 bdrv_move_feature_fields(&tmp
, bs_old
);
1666 bdrv_move_feature_fields(bs_old
, bs_new
);
1667 bdrv_move_feature_fields(bs_new
, &tmp
);
1669 /* bs_new shouldn't be in bdrv_states even after the swap! */
1670 assert(bs_new
->device_name
[0] == '\0');
1672 /* Check a few fields that should remain attached to the device */
1673 assert(bs_new
->dev
== NULL
);
1674 assert(bs_new
->job
== NULL
);
1675 assert(bs_new
->in_use
== 0);
1676 assert(bs_new
->io_limits_enabled
== false);
1677 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1679 bdrv_rebind(bs_new
);
1680 bdrv_rebind(bs_old
);
1684 * Add new bs contents at the top of an image chain while the chain is
1685 * live, while keeping required fields on the top layer.
1687 * This will modify the BlockDriverState fields, and swap contents
1688 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1690 * bs_new is required to be anonymous.
1692 * This function does not create any image files.
1694 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1696 bdrv_swap(bs_new
, bs_top
);
1698 /* The contents of 'tmp' will become bs_top, as we are
1699 * swapping bs_new and bs_top contents. */
1700 bs_top
->backing_hd
= bs_new
;
1701 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1702 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1704 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1705 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1708 static void bdrv_delete(BlockDriverState
*bs
)
1712 assert(!bs
->in_use
);
1713 assert(!bs
->refcnt
);
1717 /* remove from list, if necessary */
1723 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1724 /* TODO change to DeviceState *dev when all users are qdevified */
1730 bdrv_iostatus_reset(bs
);
1734 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1735 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1737 if (bdrv_attach_dev(bs
, dev
) < 0) {
1742 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1743 /* TODO change to DeviceState *dev when all users are qdevified */
1745 assert(bs
->dev
== dev
);
1748 bs
->dev_opaque
= NULL
;
1749 bs
->buffer_alignment
= 512;
1752 /* TODO change to return DeviceState * when all users are qdevified */
1753 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1758 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1762 bs
->dev_opaque
= opaque
;
1765 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1766 enum MonitorEvent ev
,
1767 BlockErrorAction action
, bool is_read
)
1770 const char *action_str
;
1773 case BDRV_ACTION_REPORT
:
1774 action_str
= "report";
1776 case BDRV_ACTION_IGNORE
:
1777 action_str
= "ignore";
1779 case BDRV_ACTION_STOP
:
1780 action_str
= "stop";
1786 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1789 is_read
? "read" : "write");
1790 monitor_protocol_event(ev
, data
);
1792 qobject_decref(data
);
1795 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
1799 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1800 bdrv_get_device_name(bs
), ejected
);
1801 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
1803 qobject_decref(data
);
1806 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
1808 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
1809 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
1810 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
1811 if (tray_was_closed
) {
1813 bdrv_emit_qmp_eject_event(bs
, true);
1817 bdrv_emit_qmp_eject_event(bs
, false);
1822 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
1824 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
1827 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
1829 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
1830 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
1834 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
1836 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
1837 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
1842 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
1844 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
1845 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
1849 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
1851 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
1852 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
1858 * Run consistency checks on an image
1860 * Returns 0 if the check could be completed (it doesn't mean that the image is
1861 * free of errors) or -errno when an internal error occurred. The results of the
1862 * check are stored in res.
1864 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
1866 if (bs
->drv
->bdrv_check
== NULL
) {
1870 memset(res
, 0, sizeof(*res
));
1871 return bs
->drv
->bdrv_check(bs
, res
, fix
);
1874 #define COMMIT_BUF_SECTORS 2048
1876 /* commit COW file into the raw image */
1877 int bdrv_commit(BlockDriverState
*bs
)
1879 BlockDriver
*drv
= bs
->drv
;
1880 int64_t sector
, total_sectors
;
1881 int n
, ro
, open_flags
;
1884 char filename
[PATH_MAX
];
1889 if (!bs
->backing_hd
) {
1893 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
1897 ro
= bs
->backing_hd
->read_only
;
1898 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
1899 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
1900 open_flags
= bs
->backing_hd
->open_flags
;
1903 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
1908 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
1909 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
1911 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
1912 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
1917 if (bdrv_read(bs
, sector
, buf
, n
) != 0) {
1922 if (bdrv_write(bs
->backing_hd
, sector
, buf
, n
) != 0) {
1929 if (drv
->bdrv_make_empty
) {
1930 ret
= drv
->bdrv_make_empty(bs
);
1935 * Make sure all data we wrote to the backing device is actually
1939 bdrv_flush(bs
->backing_hd
);
1945 /* ignoring error return here */
1946 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
1952 int bdrv_commit_all(void)
1954 BlockDriverState
*bs
;
1956 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1957 if (bs
->drv
&& bs
->backing_hd
) {
1958 int ret
= bdrv_commit(bs
);
1968 * Remove an active request from the tracked requests list
1970 * This function should be called when a tracked request is completing.
1972 static void tracked_request_end(BdrvTrackedRequest
*req
)
1974 QLIST_REMOVE(req
, list
);
1975 qemu_co_queue_restart_all(&req
->wait_queue
);
1979 * Add an active request to the tracked requests list
1981 static void tracked_request_begin(BdrvTrackedRequest
*req
,
1982 BlockDriverState
*bs
,
1984 int nb_sectors
, bool is_write
)
1986 *req
= (BdrvTrackedRequest
){
1988 .sector_num
= sector_num
,
1989 .nb_sectors
= nb_sectors
,
1990 .is_write
= is_write
,
1991 .co
= qemu_coroutine_self(),
1994 qemu_co_queue_init(&req
->wait_queue
);
1996 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
2000 * Round a region to cluster boundaries
2002 void bdrv_round_to_clusters(BlockDriverState
*bs
,
2003 int64_t sector_num
, int nb_sectors
,
2004 int64_t *cluster_sector_num
,
2005 int *cluster_nb_sectors
)
2007 BlockDriverInfo bdi
;
2009 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
2010 *cluster_sector_num
= sector_num
;
2011 *cluster_nb_sectors
= nb_sectors
;
2013 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2014 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2015 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2020 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2021 int64_t sector_num
, int nb_sectors
) {
2023 if (sector_num
>= req
->sector_num
+ req
->nb_sectors
) {
2027 if (req
->sector_num
>= sector_num
+ nb_sectors
) {
2033 static void coroutine_fn
wait_for_overlapping_requests(BlockDriverState
*bs
,
2034 int64_t sector_num
, int nb_sectors
)
2036 BdrvTrackedRequest
*req
;
2037 int64_t cluster_sector_num
;
2038 int cluster_nb_sectors
;
2041 /* If we touch the same cluster it counts as an overlap. This guarantees
2042 * that allocating writes will be serialized and not race with each other
2043 * for the same cluster. For example, in copy-on-read it ensures that the
2044 * CoR read and write operations are atomic and guest writes cannot
2045 * interleave between them.
2047 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2048 &cluster_sector_num
, &cluster_nb_sectors
);
2052 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2053 if (tracked_request_overlaps(req
, cluster_sector_num
,
2054 cluster_nb_sectors
)) {
2055 /* Hitting this means there was a reentrant request, for
2056 * example, a block driver issuing nested requests. This must
2057 * never happen since it means deadlock.
2059 assert(qemu_coroutine_self() != req
->co
);
2061 qemu_co_queue_wait(&req
->wait_queue
);
2072 * -EINVAL - backing format specified, but no file
2073 * -ENOSPC - can't update the backing file because no space is left in the
2075 * -ENOTSUP - format driver doesn't support changing the backing file
2077 int bdrv_change_backing_file(BlockDriverState
*bs
,
2078 const char *backing_file
, const char *backing_fmt
)
2080 BlockDriver
*drv
= bs
->drv
;
2083 /* Backing file format doesn't make sense without a backing file */
2084 if (backing_fmt
&& !backing_file
) {
2088 if (drv
->bdrv_change_backing_file
!= NULL
) {
2089 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2095 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2096 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2102 * Finds the image layer in the chain that has 'bs' as its backing file.
2104 * active is the current topmost image.
2106 * Returns NULL if bs is not found in active's image chain,
2107 * or if active == bs.
2109 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2110 BlockDriverState
*bs
)
2112 BlockDriverState
*overlay
= NULL
;
2113 BlockDriverState
*intermediate
;
2115 assert(active
!= NULL
);
2118 /* if bs is the same as active, then by definition it has no overlay
2124 intermediate
= active
;
2125 while (intermediate
->backing_hd
) {
2126 if (intermediate
->backing_hd
== bs
) {
2127 overlay
= intermediate
;
2130 intermediate
= intermediate
->backing_hd
;
2136 typedef struct BlkIntermediateStates
{
2137 BlockDriverState
*bs
;
2138 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2139 } BlkIntermediateStates
;
2143 * Drops images above 'base' up to and including 'top', and sets the image
2144 * above 'top' to have base as its backing file.
2146 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2147 * information in 'bs' can be properly updated.
2149 * E.g., this will convert the following chain:
2150 * bottom <- base <- intermediate <- top <- active
2154 * bottom <- base <- active
2156 * It is allowed for bottom==base, in which case it converts:
2158 * base <- intermediate <- top <- active
2165 * if active == top, that is considered an error
2168 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2169 BlockDriverState
*base
)
2171 BlockDriverState
*intermediate
;
2172 BlockDriverState
*base_bs
= NULL
;
2173 BlockDriverState
*new_top_bs
= NULL
;
2174 BlkIntermediateStates
*intermediate_state
, *next
;
2177 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2178 QSIMPLEQ_INIT(&states_to_delete
);
2180 if (!top
->drv
|| !base
->drv
) {
2184 new_top_bs
= bdrv_find_overlay(active
, top
);
2186 if (new_top_bs
== NULL
) {
2187 /* we could not find the image above 'top', this is an error */
2191 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2192 * to do, no intermediate images */
2193 if (new_top_bs
->backing_hd
== base
) {
2200 /* now we will go down through the list, and add each BDS we find
2201 * into our deletion queue, until we hit the 'base'
2203 while (intermediate
) {
2204 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2205 intermediate_state
->bs
= intermediate
;
2206 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2208 if (intermediate
->backing_hd
== base
) {
2209 base_bs
= intermediate
->backing_hd
;
2212 intermediate
= intermediate
->backing_hd
;
2214 if (base_bs
== NULL
) {
2215 /* something went wrong, we did not end at the base. safely
2216 * unravel everything, and exit with error */
2220 /* success - we can delete the intermediate states, and link top->base */
2221 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2222 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2226 new_top_bs
->backing_hd
= base_bs
;
2229 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2230 /* so that bdrv_close() does not recursively close the chain */
2231 intermediate_state
->bs
->backing_hd
= NULL
;
2232 bdrv_unref(intermediate_state
->bs
);
2237 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2238 g_free(intermediate_state
);
2244 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2249 if (!bdrv_is_inserted(bs
))
2255 len
= bdrv_getlength(bs
);
2260 if ((offset
> len
) || (len
- offset
< size
))
2266 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2269 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2270 nb_sectors
* BDRV_SECTOR_SIZE
);
2273 typedef struct RwCo
{
2274 BlockDriverState
*bs
;
2280 BdrvRequestFlags flags
;
2283 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2285 RwCo
*rwco
= opaque
;
2287 if (!rwco
->is_write
) {
2288 rwco
->ret
= bdrv_co_do_readv(rwco
->bs
, rwco
->sector_num
,
2289 rwco
->nb_sectors
, rwco
->qiov
,
2292 rwco
->ret
= bdrv_co_do_writev(rwco
->bs
, rwco
->sector_num
,
2293 rwco
->nb_sectors
, rwco
->qiov
,
2299 * Process a vectored synchronous request using coroutines
2301 static int bdrv_rwv_co(BlockDriverState
*bs
, int64_t sector_num
,
2302 QEMUIOVector
*qiov
, bool is_write
,
2303 BdrvRequestFlags flags
)
2308 .sector_num
= sector_num
,
2309 .nb_sectors
= qiov
->size
>> BDRV_SECTOR_BITS
,
2311 .is_write
= is_write
,
2315 assert((qiov
->size
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2318 * In sync call context, when the vcpu is blocked, this throttling timer
2319 * will not fire; so the I/O throttling function has to be disabled here
2320 * if it has been enabled.
2322 if (bs
->io_limits_enabled
) {
2323 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2324 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2325 bdrv_io_limits_disable(bs
);
2328 if (qemu_in_coroutine()) {
2329 /* Fast-path if already in coroutine context */
2330 bdrv_rw_co_entry(&rwco
);
2332 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2333 qemu_coroutine_enter(co
, &rwco
);
2334 while (rwco
.ret
== NOT_DONE
) {
2342 * Process a synchronous request using coroutines
2344 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2345 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2348 struct iovec iov
= {
2349 .iov_base
= (void *)buf
,
2350 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2353 qemu_iovec_init_external(&qiov
, &iov
, 1);
2354 return bdrv_rwv_co(bs
, sector_num
, &qiov
, is_write
, flags
);
2357 /* return < 0 if error. See bdrv_write() for the return codes */
2358 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2359 uint8_t *buf
, int nb_sectors
)
2361 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2364 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2365 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2366 uint8_t *buf
, int nb_sectors
)
2371 enabled
= bs
->io_limits_enabled
;
2372 bs
->io_limits_enabled
= false;
2373 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2374 bs
->io_limits_enabled
= enabled
;
2378 /* Return < 0 if error. Important errors are:
2379 -EIO generic I/O error (may happen for all errors)
2380 -ENOMEDIUM No media inserted.
2381 -EINVAL Invalid sector number or nb_sectors
2382 -EACCES Trying to write a read-only device
2384 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2385 const uint8_t *buf
, int nb_sectors
)
2387 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2390 int bdrv_writev(BlockDriverState
*bs
, int64_t sector_num
, QEMUIOVector
*qiov
)
2392 return bdrv_rwv_co(bs
, sector_num
, qiov
, true, 0);
2395 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
2396 int nb_sectors
, BdrvRequestFlags flags
)
2398 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2399 BDRV_REQ_ZERO_WRITE
| flags
);
2403 * Completely zero out a block device with the help of bdrv_write_zeroes.
2404 * The operation is sped up by checking the block status and only writing
2405 * zeroes to the device if they currently do not return zeroes. Optional
2406 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2408 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2410 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
2412 int64_t target_size
= bdrv_getlength(bs
) / BDRV_SECTOR_SIZE
;
2413 int64_t ret
, nb_sectors
, sector_num
= 0;
2417 nb_sectors
= target_size
- sector_num
;
2418 if (nb_sectors
<= 0) {
2421 if (nb_sectors
> INT_MAX
) {
2422 nb_sectors
= INT_MAX
;
2424 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
2425 if (ret
& BDRV_BLOCK_ZERO
) {
2429 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
2431 error_report("error writing zeroes at sector %" PRId64
": %s",
2432 sector_num
, strerror(-ret
));
2439 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
,
2440 void *buf
, int count1
)
2442 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2443 int len
, nb_sectors
, count
;
2448 /* first read to align to sector start */
2449 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2452 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2454 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2456 memcpy(buf
, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), len
);
2464 /* read the sectors "in place" */
2465 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2466 if (nb_sectors
> 0) {
2467 if ((ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
)) < 0)
2469 sector_num
+= nb_sectors
;
2470 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2475 /* add data from the last sector */
2477 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2479 memcpy(buf
, tmp_buf
, count
);
2484 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2486 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2487 int len
, nb_sectors
, count
;
2493 /* first write to align to sector start */
2494 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2497 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2499 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2501 qemu_iovec_to_buf(qiov
, 0, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)),
2503 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2511 /* write the sectors "in place" */
2512 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2513 if (nb_sectors
> 0) {
2514 QEMUIOVector qiov_inplace
;
2516 qemu_iovec_init(&qiov_inplace
, qiov
->niov
);
2517 qemu_iovec_concat(&qiov_inplace
, qiov
, len
,
2518 nb_sectors
<< BDRV_SECTOR_BITS
);
2519 ret
= bdrv_writev(bs
, sector_num
, &qiov_inplace
);
2520 qemu_iovec_destroy(&qiov_inplace
);
2525 sector_num
+= nb_sectors
;
2526 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2530 /* add data from the last sector */
2532 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2534 qemu_iovec_to_buf(qiov
, qiov
->size
- count
, tmp_buf
, count
);
2535 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2541 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2542 const void *buf
, int count1
)
2545 struct iovec iov
= {
2546 .iov_base
= (void *) buf
,
2550 qemu_iovec_init_external(&qiov
, &iov
, 1);
2551 return bdrv_pwritev(bs
, offset
, &qiov
);
2555 * Writes to the file and ensures that no writes are reordered across this
2556 * request (acts as a barrier)
2558 * Returns 0 on success, -errno in error cases.
2560 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2561 const void *buf
, int count
)
2565 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2570 /* No flush needed for cache modes that already do it */
2571 if (bs
->enable_write_cache
) {
2578 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2579 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2581 /* Perform I/O through a temporary buffer so that users who scribble over
2582 * their read buffer while the operation is in progress do not end up
2583 * modifying the image file. This is critical for zero-copy guest I/O
2584 * where anything might happen inside guest memory.
2586 void *bounce_buffer
;
2588 BlockDriver
*drv
= bs
->drv
;
2590 QEMUIOVector bounce_qiov
;
2591 int64_t cluster_sector_num
;
2592 int cluster_nb_sectors
;
2596 /* Cover entire cluster so no additional backing file I/O is required when
2597 * allocating cluster in the image file.
2599 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2600 &cluster_sector_num
, &cluster_nb_sectors
);
2602 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2603 cluster_sector_num
, cluster_nb_sectors
);
2605 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2606 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2607 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2609 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2615 if (drv
->bdrv_co_write_zeroes
&&
2616 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2617 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2618 cluster_nb_sectors
, 0);
2620 /* This does not change the data on the disk, it is not necessary
2621 * to flush even in cache=writethrough mode.
2623 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2628 /* It might be okay to ignore write errors for guest requests. If this
2629 * is a deliberate copy-on-read then we don't want to ignore the error.
2630 * Simply report it in all cases.
2635 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2636 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2637 nb_sectors
* BDRV_SECTOR_SIZE
);
2640 qemu_vfree(bounce_buffer
);
2645 * Handle a read request in coroutine context
2647 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
2648 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2649 BdrvRequestFlags flags
)
2651 BlockDriver
*drv
= bs
->drv
;
2652 BdrvTrackedRequest req
;
2658 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2662 if (bs
->copy_on_read
) {
2663 flags
|= BDRV_REQ_COPY_ON_READ
;
2665 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2666 bs
->copy_on_read_in_flight
++;
2669 if (bs
->copy_on_read_in_flight
) {
2670 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2673 /* throttling disk I/O */
2674 if (bs
->io_limits_enabled
) {
2675 bdrv_io_limits_intercept(bs
, nb_sectors
, false);
2678 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, false);
2680 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2683 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
2688 if (!ret
|| pnum
!= nb_sectors
) {
2689 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
2694 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
2695 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
2697 /* Read zeros after EOF of growable BDSes */
2698 int64_t len
, total_sectors
, max_nb_sectors
;
2700 len
= bdrv_getlength(bs
);
2706 total_sectors
= DIV_ROUND_UP(len
, BDRV_SECTOR_SIZE
);
2707 max_nb_sectors
= MAX(0, total_sectors
- sector_num
);
2708 if (max_nb_sectors
> 0) {
2709 ret
= drv
->bdrv_co_readv(bs
, sector_num
,
2710 MIN(nb_sectors
, max_nb_sectors
), qiov
);
2715 /* Reading beyond end of file is supposed to produce zeroes */
2716 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
2717 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
2718 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
2720 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
2725 tracked_request_end(&req
);
2727 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2728 bs
->copy_on_read_in_flight
--;
2734 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
2735 int nb_sectors
, QEMUIOVector
*qiov
)
2737 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
2739 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
2742 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
2743 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2745 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
2747 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
2748 BDRV_REQ_COPY_ON_READ
);
2751 /* if no limit is specified in the BlockLimits use a default
2752 * of 32768 512-byte sectors (16 MiB) per request.
2754 #define MAX_WRITE_ZEROES_DEFAULT 32768
2756 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
2757 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
2759 BlockDriver
*drv
= bs
->drv
;
2761 struct iovec iov
= {0};
2764 int max_write_zeroes
= bs
->bl
.max_write_zeroes
?
2765 bs
->bl
.max_write_zeroes
: MAX_WRITE_ZEROES_DEFAULT
;
2767 while (nb_sectors
> 0 && !ret
) {
2768 int num
= nb_sectors
;
2771 if (bs
->bl
.write_zeroes_alignment
&&
2772 num
>= bs
->bl
.write_zeroes_alignment
&&
2773 sector_num
% bs
->bl
.write_zeroes_alignment
) {
2774 if (num
> bs
->bl
.write_zeroes_alignment
) {
2775 num
= bs
->bl
.write_zeroes_alignment
;
2777 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
2780 /* limit request size */
2781 if (num
> max_write_zeroes
) {
2782 num
= max_write_zeroes
;
2786 /* First try the efficient write zeroes operation */
2787 if (drv
->bdrv_co_write_zeroes
) {
2788 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
2791 if (ret
== -ENOTSUP
) {
2792 /* Fall back to bounce buffer if write zeroes is unsupported */
2793 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
2794 if (iov
.iov_base
== NULL
) {
2795 /* allocate bounce buffer only once and ensure that it
2796 * is big enough for this and all future requests.
2798 size_t bufsize
= num
<= nb_sectors
? num
: max_write_zeroes
;
2799 iov
.iov_base
= qemu_blockalign(bs
, bufsize
* BDRV_SECTOR_SIZE
);
2800 memset(iov
.iov_base
, 0, bufsize
* BDRV_SECTOR_SIZE
);
2802 qemu_iovec_init_external(&qiov
, &iov
, 1);
2804 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
2811 qemu_vfree(iov
.iov_base
);
2816 * Handle a write request in coroutine context
2818 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
2819 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2820 BdrvRequestFlags flags
)
2822 BlockDriver
*drv
= bs
->drv
;
2823 BdrvTrackedRequest req
;
2829 if (bs
->read_only
) {
2832 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2836 if (bs
->copy_on_read_in_flight
) {
2837 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2840 /* throttling disk I/O */
2841 if (bs
->io_limits_enabled
) {
2842 bdrv_io_limits_intercept(bs
, nb_sectors
, true);
2845 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, true);
2847 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, &req
);
2850 /* Do nothing, write notifier decided to fail this request */
2851 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
2852 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
2854 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
2857 if (ret
== 0 && !bs
->enable_write_cache
) {
2858 ret
= bdrv_co_flush(bs
);
2861 if (bs
->dirty_bitmap
) {
2862 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2865 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
2866 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
2868 if (bs
->growable
&& ret
>= 0) {
2869 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
2872 tracked_request_end(&req
);
2877 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
2878 int nb_sectors
, QEMUIOVector
*qiov
)
2880 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
2882 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
2885 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
2886 int64_t sector_num
, int nb_sectors
,
2887 BdrvRequestFlags flags
)
2889 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2891 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
2892 flags
&= ~BDRV_REQ_MAY_UNMAP
;
2895 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
2896 BDRV_REQ_ZERO_WRITE
| flags
);
2900 * Truncate file to 'offset' bytes (needed only for file protocols)
2902 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
2904 BlockDriver
*drv
= bs
->drv
;
2908 if (!drv
->bdrv_truncate
)
2912 if (bdrv_in_use(bs
))
2914 ret
= drv
->bdrv_truncate(bs
, offset
);
2916 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
2917 bdrv_dev_resize_cb(bs
);
2923 * Length of a allocated file in bytes. Sparse files are counted by actual
2924 * allocated space. Return < 0 if error or unknown.
2926 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
2928 BlockDriver
*drv
= bs
->drv
;
2932 if (drv
->bdrv_get_allocated_file_size
) {
2933 return drv
->bdrv_get_allocated_file_size(bs
);
2936 return bdrv_get_allocated_file_size(bs
->file
);
2942 * Length of a file in bytes. Return < 0 if error or unknown.
2944 int64_t bdrv_getlength(BlockDriverState
*bs
)
2946 BlockDriver
*drv
= bs
->drv
;
2950 if (drv
->has_variable_length
) {
2951 int ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
2956 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
2959 /* return 0 as number of sectors if no device present or error */
2960 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
2963 length
= bdrv_getlength(bs
);
2967 length
= length
>> BDRV_SECTOR_BITS
;
2968 *nb_sectors_ptr
= length
;
2971 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
2972 BlockdevOnError on_write_error
)
2974 bs
->on_read_error
= on_read_error
;
2975 bs
->on_write_error
= on_write_error
;
2978 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
2980 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
2983 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
2985 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
2988 case BLOCKDEV_ON_ERROR_ENOSPC
:
2989 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
2990 case BLOCKDEV_ON_ERROR_STOP
:
2991 return BDRV_ACTION_STOP
;
2992 case BLOCKDEV_ON_ERROR_REPORT
:
2993 return BDRV_ACTION_REPORT
;
2994 case BLOCKDEV_ON_ERROR_IGNORE
:
2995 return BDRV_ACTION_IGNORE
;
3001 /* This is done by device models because, while the block layer knows
3002 * about the error, it does not know whether an operation comes from
3003 * the device or the block layer (from a job, for example).
3005 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
3006 bool is_read
, int error
)
3009 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
3010 if (action
== BDRV_ACTION_STOP
) {
3011 vm_stop(RUN_STATE_IO_ERROR
);
3012 bdrv_iostatus_set_err(bs
, error
);
3016 int bdrv_is_read_only(BlockDriverState
*bs
)
3018 return bs
->read_only
;
3021 int bdrv_is_sg(BlockDriverState
*bs
)
3026 int bdrv_enable_write_cache(BlockDriverState
*bs
)
3028 return bs
->enable_write_cache
;
3031 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
3033 bs
->enable_write_cache
= wce
;
3035 /* so a reopen() will preserve wce */
3037 bs
->open_flags
|= BDRV_O_CACHE_WB
;
3039 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
3043 int bdrv_is_encrypted(BlockDriverState
*bs
)
3045 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3047 return bs
->encrypted
;
3050 int bdrv_key_required(BlockDriverState
*bs
)
3052 BlockDriverState
*backing_hd
= bs
->backing_hd
;
3054 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
3056 return (bs
->encrypted
&& !bs
->valid_key
);
3059 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
3062 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
3063 ret
= bdrv_set_key(bs
->backing_hd
, key
);
3069 if (!bs
->encrypted
) {
3071 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
3074 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
3077 } else if (!bs
->valid_key
) {
3079 /* call the change callback now, we skipped it on open */
3080 bdrv_dev_change_media_cb(bs
, true);
3085 const char *bdrv_get_format_name(BlockDriverState
*bs
)
3087 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3090 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3095 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3096 it(opaque
, drv
->format_name
);
3100 BlockDriverState
*bdrv_find(const char *name
)
3102 BlockDriverState
*bs
;
3104 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3105 if (!strcmp(name
, bs
->device_name
)) {
3112 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3115 return QTAILQ_FIRST(&bdrv_states
);
3117 return QTAILQ_NEXT(bs
, list
);
3120 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
3122 BlockDriverState
*bs
;
3124 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3129 const char *bdrv_get_device_name(BlockDriverState
*bs
)
3131 return bs
->device_name
;
3134 int bdrv_get_flags(BlockDriverState
*bs
)
3136 return bs
->open_flags
;
3139 int bdrv_flush_all(void)
3141 BlockDriverState
*bs
;
3144 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3145 int ret
= bdrv_flush(bs
);
3146 if (ret
< 0 && !result
) {
3154 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3159 int bdrv_has_zero_init(BlockDriverState
*bs
)
3163 /* If BS is a copy on write image, it is initialized to
3164 the contents of the base image, which may not be zeroes. */
3165 if (bs
->backing_hd
) {
3168 if (bs
->drv
->bdrv_has_zero_init
) {
3169 return bs
->drv
->bdrv_has_zero_init(bs
);
3176 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
)
3178 BlockDriverInfo bdi
;
3180 if (bs
->backing_hd
) {
3184 if (bdrv_get_info(bs
, &bdi
) == 0) {
3185 return bdi
.unallocated_blocks_are_zero
;
3191 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
)
3193 BlockDriverInfo bdi
;
3195 if (bs
->backing_hd
|| !(bs
->open_flags
& BDRV_O_UNMAP
)) {
3199 if (bdrv_get_info(bs
, &bdi
) == 0) {
3200 return bdi
.can_write_zeroes_with_unmap
;
3206 typedef struct BdrvCoGetBlockStatusData
{
3207 BlockDriverState
*bs
;
3208 BlockDriverState
*base
;
3214 } BdrvCoGetBlockStatusData
;
3217 * Returns true iff the specified sector is present in the disk image. Drivers
3218 * not implementing the functionality are assumed to not support backing files,
3219 * hence all their sectors are reported as allocated.
3221 * If 'sector_num' is beyond the end of the disk image the return value is 0
3222 * and 'pnum' is set to 0.
3224 * 'pnum' is set to the number of sectors (including and immediately following
3225 * the specified sector) that are known to be in the same
3226 * allocated/unallocated state.
3228 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3229 * beyond the end of the disk image it will be clamped.
3231 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3233 int nb_sectors
, int *pnum
)
3239 length
= bdrv_getlength(bs
);
3244 if (sector_num
>= (length
>> BDRV_SECTOR_BITS
)) {
3249 n
= bs
->total_sectors
- sector_num
;
3250 if (n
< nb_sectors
) {
3254 if (!bs
->drv
->bdrv_co_get_block_status
) {
3256 ret
= BDRV_BLOCK_DATA
;
3257 if (bs
->drv
->protocol_name
) {
3258 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
3263 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3269 if (ret
& BDRV_BLOCK_RAW
) {
3270 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
3271 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3275 if (!(ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
)) {
3276 if (bdrv_unallocated_blocks_are_zero(bs
)) {
3277 ret
|= BDRV_BLOCK_ZERO
;
3278 } else if (bs
->backing_hd
) {
3279 BlockDriverState
*bs2
= bs
->backing_hd
;
3280 int64_t length2
= bdrv_getlength(bs2
);
3281 if (length2
>= 0 && sector_num
>= (length2
>> BDRV_SECTOR_BITS
)) {
3282 ret
|= BDRV_BLOCK_ZERO
;
3288 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
3289 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
3290 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3293 /* Ignore errors. This is just providing extra information, it
3294 * is useful but not necessary.
3296 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
3303 /* Coroutine wrapper for bdrv_get_block_status() */
3304 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
3306 BdrvCoGetBlockStatusData
*data
= opaque
;
3307 BlockDriverState
*bs
= data
->bs
;
3309 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
3315 * Synchronous wrapper around bdrv_co_get_block_status().
3317 * See bdrv_co_get_block_status() for details.
3319 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
3320 int nb_sectors
, int *pnum
)
3323 BdrvCoGetBlockStatusData data
= {
3325 .sector_num
= sector_num
,
3326 .nb_sectors
= nb_sectors
,
3331 if (qemu_in_coroutine()) {
3332 /* Fast-path if already in coroutine context */
3333 bdrv_get_block_status_co_entry(&data
);
3335 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
3336 qemu_coroutine_enter(co
, &data
);
3337 while (!data
.done
) {
3344 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
3345 int nb_sectors
, int *pnum
)
3347 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3352 (ret
& BDRV_BLOCK_DATA
) ||
3353 ((ret
& BDRV_BLOCK_ZERO
) && !bdrv_has_zero_init(bs
));
3357 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3359 * Return true if the given sector is allocated in any image between
3360 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3361 * sector is allocated in any image of the chain. Return false otherwise.
3363 * 'pnum' is set to the number of sectors (including and immediately following
3364 * the specified sector) that are known to be in the same
3365 * allocated/unallocated state.
3368 int bdrv_is_allocated_above(BlockDriverState
*top
,
3369 BlockDriverState
*base
,
3371 int nb_sectors
, int *pnum
)
3373 BlockDriverState
*intermediate
;
3374 int ret
, n
= nb_sectors
;
3377 while (intermediate
&& intermediate
!= base
) {
3379 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
3389 * [sector_num, nb_sectors] is unallocated on top but intermediate
3392 * [sector_num+x, nr_sectors] allocated.
3394 if (n
> pnum_inter
&&
3395 (intermediate
== top
||
3396 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
3400 intermediate
= intermediate
->backing_hd
;
3407 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
3409 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3410 return bs
->backing_file
;
3411 else if (bs
->encrypted
)
3412 return bs
->filename
;
3417 void bdrv_get_backing_filename(BlockDriverState
*bs
,
3418 char *filename
, int filename_size
)
3420 pstrcpy(filename
, filename_size
, bs
->backing_file
);
3423 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
3424 const uint8_t *buf
, int nb_sectors
)
3426 BlockDriver
*drv
= bs
->drv
;
3429 if (!drv
->bdrv_write_compressed
)
3431 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
3434 assert(!bs
->dirty_bitmap
);
3436 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
3439 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3441 BlockDriver
*drv
= bs
->drv
;
3444 if (!drv
->bdrv_get_info
)
3446 memset(bdi
, 0, sizeof(*bdi
));
3447 return drv
->bdrv_get_info(bs
, bdi
);
3450 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
)
3452 BlockDriver
*drv
= bs
->drv
;
3453 if (drv
&& drv
->bdrv_get_specific_info
) {
3454 return drv
->bdrv_get_specific_info(bs
);
3459 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
3460 int64_t pos
, int size
)
3463 struct iovec iov
= {
3464 .iov_base
= (void *) buf
,
3468 qemu_iovec_init_external(&qiov
, &iov
, 1);
3469 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
3472 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
3474 BlockDriver
*drv
= bs
->drv
;
3478 } else if (drv
->bdrv_save_vmstate
) {
3479 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
3480 } else if (bs
->file
) {
3481 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
3487 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
3488 int64_t pos
, int size
)
3490 BlockDriver
*drv
= bs
->drv
;
3493 if (drv
->bdrv_load_vmstate
)
3494 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
3496 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
3500 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
3502 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
3506 bs
->drv
->bdrv_debug_event(bs
, event
);
3509 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
3512 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
3516 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
3517 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
3523 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
3525 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_resume
) {
3529 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
3530 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
3536 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
3538 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
3542 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
3543 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
3549 int bdrv_is_snapshot(BlockDriverState
*bs
)
3551 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
3554 /* backing_file can either be relative, or absolute, or a protocol. If it is
3555 * relative, it must be relative to the chain. So, passing in bs->filename
3556 * from a BDS as backing_file should not be done, as that may be relative to
3557 * the CWD rather than the chain. */
3558 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
3559 const char *backing_file
)
3561 char *filename_full
= NULL
;
3562 char *backing_file_full
= NULL
;
3563 char *filename_tmp
= NULL
;
3564 int is_protocol
= 0;
3565 BlockDriverState
*curr_bs
= NULL
;
3566 BlockDriverState
*retval
= NULL
;
3568 if (!bs
|| !bs
->drv
|| !backing_file
) {
3572 filename_full
= g_malloc(PATH_MAX
);
3573 backing_file_full
= g_malloc(PATH_MAX
);
3574 filename_tmp
= g_malloc(PATH_MAX
);
3576 is_protocol
= path_has_protocol(backing_file
);
3578 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
3580 /* If either of the filename paths is actually a protocol, then
3581 * compare unmodified paths; otherwise make paths relative */
3582 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
3583 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
3584 retval
= curr_bs
->backing_hd
;
3588 /* If not an absolute filename path, make it relative to the current
3589 * image's filename path */
3590 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3593 /* We are going to compare absolute pathnames */
3594 if (!realpath(filename_tmp
, filename_full
)) {
3598 /* We need to make sure the backing filename we are comparing against
3599 * is relative to the current image filename (or absolute) */
3600 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3601 curr_bs
->backing_file
);
3603 if (!realpath(filename_tmp
, backing_file_full
)) {
3607 if (strcmp(backing_file_full
, filename_full
) == 0) {
3608 retval
= curr_bs
->backing_hd
;
3614 g_free(filename_full
);
3615 g_free(backing_file_full
);
3616 g_free(filename_tmp
);
3620 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
3626 if (!bs
->backing_hd
) {
3630 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
3633 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
3635 BlockDriverState
*curr_bs
= NULL
;
3643 while (curr_bs
->backing_hd
) {
3644 curr_bs
= curr_bs
->backing_hd
;
3649 /**************************************************************/
3652 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
3653 QEMUIOVector
*qiov
, int nb_sectors
,
3654 BlockDriverCompletionFunc
*cb
, void *opaque
)
3656 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
3658 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3662 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
3663 QEMUIOVector
*qiov
, int nb_sectors
,
3664 BlockDriverCompletionFunc
*cb
, void *opaque
)
3666 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
3668 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3673 typedef struct MultiwriteCB
{
3678 BlockDriverCompletionFunc
*cb
;
3680 QEMUIOVector
*free_qiov
;
3684 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
3688 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
3689 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
3690 if (mcb
->callbacks
[i
].free_qiov
) {
3691 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
3693 g_free(mcb
->callbacks
[i
].free_qiov
);
3697 static void multiwrite_cb(void *opaque
, int ret
)
3699 MultiwriteCB
*mcb
= opaque
;
3701 trace_multiwrite_cb(mcb
, ret
);
3703 if (ret
< 0 && !mcb
->error
) {
3707 mcb
->num_requests
--;
3708 if (mcb
->num_requests
== 0) {
3709 multiwrite_user_cb(mcb
);
3714 static int multiwrite_req_compare(const void *a
, const void *b
)
3716 const BlockRequest
*req1
= a
, *req2
= b
;
3719 * Note that we can't simply subtract req2->sector from req1->sector
3720 * here as that could overflow the return value.
3722 if (req1
->sector
> req2
->sector
) {
3724 } else if (req1
->sector
< req2
->sector
) {
3732 * Takes a bunch of requests and tries to merge them. Returns the number of
3733 * requests that remain after merging.
3735 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
3736 int num_reqs
, MultiwriteCB
*mcb
)
3740 // Sort requests by start sector
3741 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
3743 // Check if adjacent requests touch the same clusters. If so, combine them,
3744 // filling up gaps with zero sectors.
3746 for (i
= 1; i
< num_reqs
; i
++) {
3748 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
3750 // Handle exactly sequential writes and overlapping writes.
3751 if (reqs
[i
].sector
<= oldreq_last
) {
3755 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
3761 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
3762 qemu_iovec_init(qiov
,
3763 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
3765 // Add the first request to the merged one. If the requests are
3766 // overlapping, drop the last sectors of the first request.
3767 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
3768 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
3770 // We should need to add any zeros between the two requests
3771 assert (reqs
[i
].sector
<= oldreq_last
);
3773 // Add the second request
3774 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
3776 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
3777 reqs
[outidx
].qiov
= qiov
;
3779 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
3782 reqs
[outidx
].sector
= reqs
[i
].sector
;
3783 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
3784 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
3792 * Submit multiple AIO write requests at once.
3794 * On success, the function returns 0 and all requests in the reqs array have
3795 * been submitted. In error case this function returns -1, and any of the
3796 * requests may or may not be submitted yet. In particular, this means that the
3797 * callback will be called for some of the requests, for others it won't. The
3798 * caller must check the error field of the BlockRequest to wait for the right
3799 * callbacks (if error != 0, no callback will be called).
3801 * The implementation may modify the contents of the reqs array, e.g. to merge
3802 * requests. However, the fields opaque and error are left unmodified as they
3803 * are used to signal failure for a single request to the caller.
3805 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
3810 /* don't submit writes if we don't have a medium */
3811 if (bs
->drv
== NULL
) {
3812 for (i
= 0; i
< num_reqs
; i
++) {
3813 reqs
[i
].error
= -ENOMEDIUM
;
3818 if (num_reqs
== 0) {
3822 // Create MultiwriteCB structure
3823 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
3824 mcb
->num_requests
= 0;
3825 mcb
->num_callbacks
= num_reqs
;
3827 for (i
= 0; i
< num_reqs
; i
++) {
3828 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
3829 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
3832 // Check for mergable requests
3833 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
3835 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
3837 /* Run the aio requests. */
3838 mcb
->num_requests
= num_reqs
;
3839 for (i
= 0; i
< num_reqs
; i
++) {
3840 bdrv_aio_writev(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
3841 reqs
[i
].nb_sectors
, multiwrite_cb
, mcb
);
3847 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
3849 acb
->aiocb_info
->cancel(acb
);
3852 /**************************************************************/
3853 /* async block device emulation */
3855 typedef struct BlockDriverAIOCBSync
{
3856 BlockDriverAIOCB common
;
3859 /* vector translation state */
3863 } BlockDriverAIOCBSync
;
3865 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
3867 BlockDriverAIOCBSync
*acb
=
3868 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
3869 qemu_bh_delete(acb
->bh
);
3871 qemu_aio_release(acb
);
3874 static const AIOCBInfo bdrv_em_aiocb_info
= {
3875 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
3876 .cancel
= bdrv_aio_cancel_em
,
3879 static void bdrv_aio_bh_cb(void *opaque
)
3881 BlockDriverAIOCBSync
*acb
= opaque
;
3884 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
3885 qemu_vfree(acb
->bounce
);
3886 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
3887 qemu_bh_delete(acb
->bh
);
3889 qemu_aio_release(acb
);
3892 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
3896 BlockDriverCompletionFunc
*cb
,
3901 BlockDriverAIOCBSync
*acb
;
3903 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
3904 acb
->is_write
= is_write
;
3906 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
3907 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
3910 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
3911 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3913 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3916 qemu_bh_schedule(acb
->bh
);
3918 return &acb
->common
;
3921 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
3922 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3923 BlockDriverCompletionFunc
*cb
, void *opaque
)
3925 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
3928 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
3929 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3930 BlockDriverCompletionFunc
*cb
, void *opaque
)
3932 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
3936 typedef struct BlockDriverAIOCBCoroutine
{
3937 BlockDriverAIOCB common
;
3942 } BlockDriverAIOCBCoroutine
;
3944 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
3946 BlockDriverAIOCBCoroutine
*acb
=
3947 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
3956 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
3957 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
3958 .cancel
= bdrv_aio_co_cancel_em
,
3961 static void bdrv_co_em_bh(void *opaque
)
3963 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3965 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
3971 qemu_bh_delete(acb
->bh
);
3972 qemu_aio_release(acb
);
3975 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3976 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
3978 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3979 BlockDriverState
*bs
= acb
->common
.bs
;
3981 if (!acb
->is_write
) {
3982 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
3983 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3985 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
3986 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3989 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3990 qemu_bh_schedule(acb
->bh
);
3993 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
3997 BlockDriverCompletionFunc
*cb
,
4002 BlockDriverAIOCBCoroutine
*acb
;
4004 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4005 acb
->req
.sector
= sector_num
;
4006 acb
->req
.nb_sectors
= nb_sectors
;
4007 acb
->req
.qiov
= qiov
;
4008 acb
->is_write
= is_write
;
4011 co
= qemu_coroutine_create(bdrv_co_do_rw
);
4012 qemu_coroutine_enter(co
, acb
);
4014 return &acb
->common
;
4017 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
4019 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4020 BlockDriverState
*bs
= acb
->common
.bs
;
4022 acb
->req
.error
= bdrv_co_flush(bs
);
4023 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4024 qemu_bh_schedule(acb
->bh
);
4027 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
4028 BlockDriverCompletionFunc
*cb
, void *opaque
)
4030 trace_bdrv_aio_flush(bs
, opaque
);
4033 BlockDriverAIOCBCoroutine
*acb
;
4035 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4038 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4039 qemu_coroutine_enter(co
, acb
);
4041 return &acb
->common
;
4044 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4046 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4047 BlockDriverState
*bs
= acb
->common
.bs
;
4049 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4050 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4051 qemu_bh_schedule(acb
->bh
);
4054 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4055 int64_t sector_num
, int nb_sectors
,
4056 BlockDriverCompletionFunc
*cb
, void *opaque
)
4059 BlockDriverAIOCBCoroutine
*acb
;
4061 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4063 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4064 acb
->req
.sector
= sector_num
;
4065 acb
->req
.nb_sectors
= nb_sectors
;
4067 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4068 qemu_coroutine_enter(co
, acb
);
4070 return &acb
->common
;
4073 void bdrv_init(void)
4075 module_call_init(MODULE_INIT_BLOCK
);
4078 void bdrv_init_with_whitelist(void)
4080 use_bdrv_whitelist
= 1;
4084 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4085 BlockDriverCompletionFunc
*cb
, void *opaque
)
4087 BlockDriverAIOCB
*acb
;
4089 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4090 acb
->aiocb_info
= aiocb_info
;
4093 acb
->opaque
= opaque
;
4097 void qemu_aio_release(void *p
)
4099 BlockDriverAIOCB
*acb
= p
;
4100 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4103 /**************************************************************/
4104 /* Coroutine block device emulation */
4106 typedef struct CoroutineIOCompletion
{
4107 Coroutine
*coroutine
;
4109 } CoroutineIOCompletion
;
4111 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4113 CoroutineIOCompletion
*co
= opaque
;
4116 qemu_coroutine_enter(co
->coroutine
, NULL
);
4119 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4120 int nb_sectors
, QEMUIOVector
*iov
,
4123 CoroutineIOCompletion co
= {
4124 .coroutine
= qemu_coroutine_self(),
4126 BlockDriverAIOCB
*acb
;
4129 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4130 bdrv_co_io_em_complete
, &co
);
4132 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4133 bdrv_co_io_em_complete
, &co
);
4136 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4140 qemu_coroutine_yield();
4145 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4146 int64_t sector_num
, int nb_sectors
,
4149 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4152 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4153 int64_t sector_num
, int nb_sectors
,
4156 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4159 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4161 RwCo
*rwco
= opaque
;
4163 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4166 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4170 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4174 /* Write back cached data to the OS even with cache=unsafe */
4175 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4176 if (bs
->drv
->bdrv_co_flush_to_os
) {
4177 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4183 /* But don't actually force it to the disk with cache=unsafe */
4184 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4188 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4189 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4190 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4191 } else if (bs
->drv
->bdrv_aio_flush
) {
4192 BlockDriverAIOCB
*acb
;
4193 CoroutineIOCompletion co
= {
4194 .coroutine
= qemu_coroutine_self(),
4197 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4201 qemu_coroutine_yield();
4206 * Some block drivers always operate in either writethrough or unsafe
4207 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4208 * know how the server works (because the behaviour is hardcoded or
4209 * depends on server-side configuration), so we can't ensure that
4210 * everything is safe on disk. Returning an error doesn't work because
4211 * that would break guests even if the server operates in writethrough
4214 * Let's hope the user knows what he's doing.
4222 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4223 * in the case of cache=unsafe, so there are no useless flushes.
4226 return bdrv_co_flush(bs
->file
);
4229 void bdrv_invalidate_cache(BlockDriverState
*bs
)
4231 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
4232 bs
->drv
->bdrv_invalidate_cache(bs
);
4236 void bdrv_invalidate_cache_all(void)
4238 BlockDriverState
*bs
;
4240 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4241 bdrv_invalidate_cache(bs
);
4245 void bdrv_clear_incoming_migration_all(void)
4247 BlockDriverState
*bs
;
4249 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4250 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4254 int bdrv_flush(BlockDriverState
*bs
)
4262 if (qemu_in_coroutine()) {
4263 /* Fast-path if already in coroutine context */
4264 bdrv_flush_co_entry(&rwco
);
4266 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4267 qemu_coroutine_enter(co
, &rwco
);
4268 while (rwco
.ret
== NOT_DONE
) {
4276 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4278 RwCo
*rwco
= opaque
;
4280 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
4283 /* if no limit is specified in the BlockLimits use a default
4284 * of 32768 512-byte sectors (16 MiB) per request.
4286 #define MAX_DISCARD_DEFAULT 32768
4288 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
4293 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
4295 } else if (bs
->read_only
) {
4299 if (bs
->dirty_bitmap
) {
4300 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
4303 /* Do nothing if disabled. */
4304 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
4308 if (bs
->drv
->bdrv_co_discard
) {
4309 int max_discard
= bs
->bl
.max_discard
?
4310 bs
->bl
.max_discard
: MAX_DISCARD_DEFAULT
;
4312 while (nb_sectors
> 0) {
4314 int num
= nb_sectors
;
4317 if (bs
->bl
.discard_alignment
&&
4318 num
>= bs
->bl
.discard_alignment
&&
4319 sector_num
% bs
->bl
.discard_alignment
) {
4320 if (num
> bs
->bl
.discard_alignment
) {
4321 num
= bs
->bl
.discard_alignment
;
4323 num
-= sector_num
% bs
->bl
.discard_alignment
;
4326 /* limit request size */
4327 if (num
> max_discard
) {
4331 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
4340 } else if (bs
->drv
->bdrv_aio_discard
) {
4341 BlockDriverAIOCB
*acb
;
4342 CoroutineIOCompletion co
= {
4343 .coroutine
= qemu_coroutine_self(),
4346 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
4347 bdrv_co_io_em_complete
, &co
);
4351 qemu_coroutine_yield();
4359 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
4364 .sector_num
= sector_num
,
4365 .nb_sectors
= nb_sectors
,
4369 if (qemu_in_coroutine()) {
4370 /* Fast-path if already in coroutine context */
4371 bdrv_discard_co_entry(&rwco
);
4373 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
4374 qemu_coroutine_enter(co
, &rwco
);
4375 while (rwco
.ret
== NOT_DONE
) {
4383 /**************************************************************/
4384 /* removable device support */
4387 * Return TRUE if the media is present
4389 int bdrv_is_inserted(BlockDriverState
*bs
)
4391 BlockDriver
*drv
= bs
->drv
;
4395 if (!drv
->bdrv_is_inserted
)
4397 return drv
->bdrv_is_inserted(bs
);
4401 * Return whether the media changed since the last call to this
4402 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4404 int bdrv_media_changed(BlockDriverState
*bs
)
4406 BlockDriver
*drv
= bs
->drv
;
4408 if (drv
&& drv
->bdrv_media_changed
) {
4409 return drv
->bdrv_media_changed(bs
);
4415 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4417 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
4419 BlockDriver
*drv
= bs
->drv
;
4421 if (drv
&& drv
->bdrv_eject
) {
4422 drv
->bdrv_eject(bs
, eject_flag
);
4425 if (bs
->device_name
[0] != '\0') {
4426 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
4431 * Lock or unlock the media (if it is locked, the user won't be able
4432 * to eject it manually).
4434 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
4436 BlockDriver
*drv
= bs
->drv
;
4438 trace_bdrv_lock_medium(bs
, locked
);
4440 if (drv
&& drv
->bdrv_lock_medium
) {
4441 drv
->bdrv_lock_medium(bs
, locked
);
4445 /* needed for generic scsi interface */
4447 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
4449 BlockDriver
*drv
= bs
->drv
;
4451 if (drv
&& drv
->bdrv_ioctl
)
4452 return drv
->bdrv_ioctl(bs
, req
, buf
);
4456 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
4457 unsigned long int req
, void *buf
,
4458 BlockDriverCompletionFunc
*cb
, void *opaque
)
4460 BlockDriver
*drv
= bs
->drv
;
4462 if (drv
&& drv
->bdrv_aio_ioctl
)
4463 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
4467 void bdrv_set_buffer_alignment(BlockDriverState
*bs
, int align
)
4469 bs
->buffer_alignment
= align
;
4472 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
4474 return qemu_memalign((bs
&& bs
->buffer_alignment
) ? bs
->buffer_alignment
: 512, size
);
4478 * Check if all memory in this vector is sector aligned.
4480 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
4484 for (i
= 0; i
< qiov
->niov
; i
++) {
4485 if ((uintptr_t) qiov
->iov
[i
].iov_base
% bs
->buffer_alignment
) {
4493 void bdrv_set_dirty_tracking(BlockDriverState
*bs
, int granularity
)
4495 int64_t bitmap_size
;
4497 assert((granularity
& (granularity
- 1)) == 0);
4500 granularity
>>= BDRV_SECTOR_BITS
;
4501 assert(!bs
->dirty_bitmap
);
4502 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
);
4503 bs
->dirty_bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
4505 if (bs
->dirty_bitmap
) {
4506 hbitmap_free(bs
->dirty_bitmap
);
4507 bs
->dirty_bitmap
= NULL
;
4512 int bdrv_get_dirty(BlockDriverState
*bs
, int64_t sector
)
4514 if (bs
->dirty_bitmap
) {
4515 return hbitmap_get(bs
->dirty_bitmap
, sector
);
4521 void bdrv_dirty_iter_init(BlockDriverState
*bs
, HBitmapIter
*hbi
)
4523 hbitmap_iter_init(hbi
, bs
->dirty_bitmap
, 0);
4526 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4529 hbitmap_set(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4532 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4535 hbitmap_reset(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4538 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
)
4540 if (bs
->dirty_bitmap
) {
4541 return hbitmap_count(bs
->dirty_bitmap
);
4547 /* Get a reference to bs */
4548 void bdrv_ref(BlockDriverState
*bs
)
4553 /* Release a previously grabbed reference to bs.
4554 * If after releasing, reference count is zero, the BlockDriverState is
4556 void bdrv_unref(BlockDriverState
*bs
)
4558 assert(bs
->refcnt
> 0);
4559 if (--bs
->refcnt
== 0) {
4564 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
4566 assert(bs
->in_use
!= in_use
);
4567 bs
->in_use
= in_use
;
4570 int bdrv_in_use(BlockDriverState
*bs
)
4575 void bdrv_iostatus_enable(BlockDriverState
*bs
)
4577 bs
->iostatus_enabled
= true;
4578 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4581 /* The I/O status is only enabled if the drive explicitly
4582 * enables it _and_ the VM is configured to stop on errors */
4583 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
4585 return (bs
->iostatus_enabled
&&
4586 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
4587 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
4588 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
4591 void bdrv_iostatus_disable(BlockDriverState
*bs
)
4593 bs
->iostatus_enabled
= false;
4596 void bdrv_iostatus_reset(BlockDriverState
*bs
)
4598 if (bdrv_iostatus_is_enabled(bs
)) {
4599 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4601 block_job_iostatus_reset(bs
->job
);
4606 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
4608 assert(bdrv_iostatus_is_enabled(bs
));
4609 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
4610 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
4611 BLOCK_DEVICE_IO_STATUS_FAILED
;
4616 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
4617 enum BlockAcctType type
)
4619 assert(type
< BDRV_MAX_IOTYPE
);
4621 cookie
->bytes
= bytes
;
4622 cookie
->start_time_ns
= get_clock();
4623 cookie
->type
= type
;
4627 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
4629 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
4631 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
4632 bs
->nr_ops
[cookie
->type
]++;
4633 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
4636 void bdrv_img_create(const char *filename
, const char *fmt
,
4637 const char *base_filename
, const char *base_fmt
,
4638 char *options
, uint64_t img_size
, int flags
,
4639 Error
**errp
, bool quiet
)
4641 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
4642 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
4643 BlockDriverState
*bs
= NULL
;
4644 BlockDriver
*drv
, *proto_drv
;
4645 BlockDriver
*backing_drv
= NULL
;
4646 Error
*local_err
= NULL
;
4649 /* Find driver and parse its options */
4650 drv
= bdrv_find_format(fmt
);
4652 error_setg(errp
, "Unknown file format '%s'", fmt
);
4656 proto_drv
= bdrv_find_protocol(filename
, true);
4658 error_setg(errp
, "Unknown protocol '%s'", filename
);
4662 create_options
= append_option_parameters(create_options
,
4663 drv
->create_options
);
4664 create_options
= append_option_parameters(create_options
,
4665 proto_drv
->create_options
);
4667 /* Create parameter list with default values */
4668 param
= parse_option_parameters("", create_options
, param
);
4670 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
4672 /* Parse -o options */
4674 param
= parse_option_parameters(options
, create_options
, param
);
4675 if (param
== NULL
) {
4676 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
4681 if (base_filename
) {
4682 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
4684 error_setg(errp
, "Backing file not supported for file format '%s'",
4691 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
4692 error_setg(errp
, "Backing file format not supported for file "
4693 "format '%s'", fmt
);
4698 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
4699 if (backing_file
&& backing_file
->value
.s
) {
4700 if (!strcmp(filename
, backing_file
->value
.s
)) {
4701 error_setg(errp
, "Error: Trying to create an image with the "
4702 "same filename as the backing file");
4707 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
4708 if (backing_fmt
&& backing_fmt
->value
.s
) {
4709 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
4711 error_setg(errp
, "Unknown backing file format '%s'",
4712 backing_fmt
->value
.s
);
4717 // The size for the image must always be specified, with one exception:
4718 // If we are using a backing file, we can obtain the size from there
4719 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
4720 if (size
&& size
->value
.n
== -1) {
4721 if (backing_file
&& backing_file
->value
.s
) {
4726 /* backing files always opened read-only */
4728 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
4732 ret
= bdrv_open(bs
, backing_file
->value
.s
, NULL
, back_flags
,
4733 backing_drv
, &local_err
);
4735 error_setg_errno(errp
, -ret
, "Could not open '%s': %s",
4736 backing_file
->value
.s
,
4737 error_get_pretty(local_err
));
4738 error_free(local_err
);
4742 bdrv_get_geometry(bs
, &size
);
4745 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
4746 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
4748 error_setg(errp
, "Image creation needs a size parameter");
4754 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
4755 print_option_parameters(param
);
4758 ret
= bdrv_create(drv
, filename
, param
, &local_err
);
4759 if (ret
== -EFBIG
) {
4760 /* This is generally a better message than whatever the driver would
4761 * deliver (especially because of the cluster_size_hint), since that
4762 * is most probably not much different from "image too large". */
4763 const char *cluster_size_hint
= "";
4764 if (get_option_parameter(create_options
, BLOCK_OPT_CLUSTER_SIZE
)) {
4765 cluster_size_hint
= " (try using a larger cluster size)";
4767 error_setg(errp
, "The image size is too large for file format '%s'"
4768 "%s", fmt
, cluster_size_hint
);
4769 error_free(local_err
);
4774 free_option_parameters(create_options
);
4775 free_option_parameters(param
);
4780 if (error_is_set(&local_err
)) {
4781 error_propagate(errp
, local_err
);
4785 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
4787 /* Currently BlockDriverState always uses the main loop AioContext */
4788 return qemu_get_aio_context();
4791 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
4792 NotifierWithReturn
*notifier
)
4794 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
4797 int bdrv_amend_options(BlockDriverState
*bs
, QEMUOptionParameter
*options
)
4799 if (bs
->drv
->bdrv_amend_options
== NULL
) {
4802 return bs
->drv
->bdrv_amend_options(bs
, options
);
4805 ExtSnapshotPerm
bdrv_check_ext_snapshot(BlockDriverState
*bs
)
4807 if (bs
->drv
->bdrv_check_ext_snapshot
) {
4808 return bs
->drv
->bdrv_check_ext_snapshot(bs
);
4811 if (bs
->file
&& bs
->file
->drv
&& bs
->file
->drv
->bdrv_check_ext_snapshot
) {
4812 return bs
->file
->drv
->bdrv_check_ext_snapshot(bs
);
4815 /* external snapshots are allowed by default */
4816 return EXT_SNAPSHOT_ALLOWED
;
4819 ExtSnapshotPerm
bdrv_check_ext_snapshot_forbidden(BlockDriverState
*bs
)
4821 return EXT_SNAPSHOT_FORBIDDEN
;