2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
39 #include <sys/types.h>
41 #include <sys/ioctl.h>
42 #include <sys/queue.h>
52 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
55 BDRV_REQ_COPY_ON_READ
= 0x1,
56 BDRV_REQ_ZERO_WRITE
= 0x2,
59 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
60 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
61 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
62 BlockDriverCompletionFunc
*cb
, void *opaque
);
63 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
64 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
65 BlockDriverCompletionFunc
*cb
, void *opaque
);
66 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
67 int64_t sector_num
, int nb_sectors
,
69 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
70 int64_t sector_num
, int nb_sectors
,
72 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
73 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
74 BdrvRequestFlags flags
);
75 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
76 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
77 BdrvRequestFlags flags
);
78 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
82 BlockDriverCompletionFunc
*cb
,
85 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
86 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
87 int64_t sector_num
, int nb_sectors
);
89 static bool bdrv_exceed_bps_limits(BlockDriverState
*bs
, int nb_sectors
,
90 bool is_write
, double elapsed_time
, uint64_t *wait
);
91 static bool bdrv_exceed_iops_limits(BlockDriverState
*bs
, bool is_write
,
92 double elapsed_time
, uint64_t *wait
);
93 static bool bdrv_exceed_io_limits(BlockDriverState
*bs
, int nb_sectors
,
94 bool is_write
, int64_t *wait
);
96 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
97 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
99 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
100 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
102 /* The device to use for VM snapshots */
103 static BlockDriverState
*bs_snapshots
;
105 /* If non-zero, use only whitelisted block drivers */
106 static int use_bdrv_whitelist
;
109 static int is_windows_drive_prefix(const char *filename
)
111 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
112 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
116 int is_windows_drive(const char *filename
)
118 if (is_windows_drive_prefix(filename
) &&
121 if (strstart(filename
, "\\\\.\\", NULL
) ||
122 strstart(filename
, "//./", NULL
))
128 /* throttling disk I/O limits */
129 void bdrv_io_limits_disable(BlockDriverState
*bs
)
131 bs
->io_limits_enabled
= false;
133 while (qemu_co_queue_next(&bs
->throttled_reqs
));
135 if (bs
->block_timer
) {
136 qemu_del_timer(bs
->block_timer
);
137 qemu_free_timer(bs
->block_timer
);
138 bs
->block_timer
= NULL
;
144 memset(&bs
->io_base
, 0, sizeof(bs
->io_base
));
147 static void bdrv_block_timer(void *opaque
)
149 BlockDriverState
*bs
= opaque
;
151 qemu_co_queue_next(&bs
->throttled_reqs
);
154 void bdrv_io_limits_enable(BlockDriverState
*bs
)
156 qemu_co_queue_init(&bs
->throttled_reqs
);
157 bs
->block_timer
= qemu_new_timer_ns(vm_clock
, bdrv_block_timer
, bs
);
158 bs
->io_limits_enabled
= true;
161 bool bdrv_io_limits_enabled(BlockDriverState
*bs
)
163 BlockIOLimit
*io_limits
= &bs
->io_limits
;
164 return io_limits
->bps
[BLOCK_IO_LIMIT_READ
]
165 || io_limits
->bps
[BLOCK_IO_LIMIT_WRITE
]
166 || io_limits
->bps
[BLOCK_IO_LIMIT_TOTAL
]
167 || io_limits
->iops
[BLOCK_IO_LIMIT_READ
]
168 || io_limits
->iops
[BLOCK_IO_LIMIT_WRITE
]
169 || io_limits
->iops
[BLOCK_IO_LIMIT_TOTAL
];
172 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
173 bool is_write
, int nb_sectors
)
175 int64_t wait_time
= -1;
177 if (!qemu_co_queue_empty(&bs
->throttled_reqs
)) {
178 qemu_co_queue_wait(&bs
->throttled_reqs
);
181 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
182 * throttled requests will not be dequeued until the current request is
183 * allowed to be serviced. So if the current request still exceeds the
184 * limits, it will be inserted to the head. All requests followed it will
185 * be still in throttled_reqs queue.
188 while (bdrv_exceed_io_limits(bs
, nb_sectors
, is_write
, &wait_time
)) {
189 qemu_mod_timer(bs
->block_timer
,
190 wait_time
+ qemu_get_clock_ns(vm_clock
));
191 qemu_co_queue_wait_insert_head(&bs
->throttled_reqs
);
194 qemu_co_queue_next(&bs
->throttled_reqs
);
197 /* check if the path starts with "<protocol>:" */
198 static int path_has_protocol(const char *path
)
203 if (is_windows_drive(path
) ||
204 is_windows_drive_prefix(path
)) {
207 p
= path
+ strcspn(path
, ":/\\");
209 p
= path
+ strcspn(path
, ":/");
215 int path_is_absolute(const char *path
)
218 /* specific case for names like: "\\.\d:" */
219 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
222 return (*path
== '/' || *path
== '\\');
224 return (*path
== '/');
228 /* if filename is absolute, just copy it to dest. Otherwise, build a
229 path to it by considering it is relative to base_path. URL are
231 void path_combine(char *dest
, int dest_size
,
232 const char *base_path
,
233 const char *filename
)
240 if (path_is_absolute(filename
)) {
241 pstrcpy(dest
, dest_size
, filename
);
243 p
= strchr(base_path
, ':');
248 p1
= strrchr(base_path
, '/');
252 p2
= strrchr(base_path
, '\\');
264 if (len
> dest_size
- 1)
266 memcpy(dest
, base_path
, len
);
268 pstrcat(dest
, dest_size
, filename
);
272 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
274 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
275 pstrcpy(dest
, sz
, bs
->backing_file
);
277 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
281 void bdrv_register(BlockDriver
*bdrv
)
283 /* Block drivers without coroutine functions need emulation */
284 if (!bdrv
->bdrv_co_readv
) {
285 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
286 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
288 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
289 * the block driver lacks aio we need to emulate that too.
291 if (!bdrv
->bdrv_aio_readv
) {
292 /* add AIO emulation layer */
293 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
294 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
298 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
301 /* create a new block device (by default it is empty) */
302 BlockDriverState
*bdrv_new(const char *device_name
)
304 BlockDriverState
*bs
;
306 bs
= g_malloc0(sizeof(BlockDriverState
));
307 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
308 if (device_name
[0] != '\0') {
309 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, list
);
311 bdrv_iostatus_disable(bs
);
312 notifier_list_init(&bs
->close_notifiers
);
317 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
319 notifier_list_add(&bs
->close_notifiers
, notify
);
322 BlockDriver
*bdrv_find_format(const char *format_name
)
325 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
326 if (!strcmp(drv1
->format_name
, format_name
)) {
333 static int bdrv_is_whitelisted(BlockDriver
*drv
)
335 static const char *whitelist
[] = {
336 CONFIG_BDRV_WHITELIST
341 return 1; /* no whitelist, anything goes */
343 for (p
= whitelist
; *p
; p
++) {
344 if (!strcmp(drv
->format_name
, *p
)) {
351 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
)
353 BlockDriver
*drv
= bdrv_find_format(format_name
);
354 return drv
&& bdrv_is_whitelisted(drv
) ? drv
: NULL
;
357 typedef struct CreateCo
{
360 QEMUOptionParameter
*options
;
364 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
366 CreateCo
*cco
= opaque
;
369 cco
->ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
);
372 int bdrv_create(BlockDriver
*drv
, const char* filename
,
373 QEMUOptionParameter
*options
)
380 .filename
= g_strdup(filename
),
385 if (!drv
->bdrv_create
) {
390 if (qemu_in_coroutine()) {
391 /* Fast-path if already in coroutine context */
392 bdrv_create_co_entry(&cco
);
394 co
= qemu_coroutine_create(bdrv_create_co_entry
);
395 qemu_coroutine_enter(co
, &cco
);
396 while (cco
.ret
== NOT_DONE
) {
404 g_free(cco
.filename
);
408 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
)
412 drv
= bdrv_find_protocol(filename
);
417 return bdrv_create(drv
, filename
, options
);
421 * Create a uniquely-named empty temporary file.
422 * Return 0 upon success, otherwise a negative errno value.
424 int get_tmp_filename(char *filename
, int size
)
427 char temp_dir
[MAX_PATH
];
428 /* GetTempFileName requires that its output buffer (4th param)
429 have length MAX_PATH or greater. */
430 assert(size
>= MAX_PATH
);
431 return (GetTempPath(MAX_PATH
, temp_dir
)
432 && GetTempFileName(temp_dir
, "qem", 0, filename
)
433 ? 0 : -GetLastError());
437 tmpdir
= getenv("TMPDIR");
440 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
443 fd
= mkstemp(filename
);
447 if (close(fd
) != 0) {
456 * Detect host devices. By convention, /dev/cdrom[N] is always
457 * recognized as a host CDROM.
459 static BlockDriver
*find_hdev_driver(const char *filename
)
461 int score_max
= 0, score
;
462 BlockDriver
*drv
= NULL
, *d
;
464 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
465 if (d
->bdrv_probe_device
) {
466 score
= d
->bdrv_probe_device(filename
);
467 if (score
> score_max
) {
477 BlockDriver
*bdrv_find_protocol(const char *filename
)
484 /* TODO Drivers without bdrv_file_open must be specified explicitly */
487 * XXX(hch): we really should not let host device detection
488 * override an explicit protocol specification, but moving this
489 * later breaks access to device names with colons in them.
490 * Thanks to the brain-dead persistent naming schemes on udev-
491 * based Linux systems those actually are quite common.
493 drv1
= find_hdev_driver(filename
);
498 if (!path_has_protocol(filename
)) {
499 return bdrv_find_format("file");
501 p
= strchr(filename
, ':');
504 if (len
> sizeof(protocol
) - 1)
505 len
= sizeof(protocol
) - 1;
506 memcpy(protocol
, filename
, len
);
507 protocol
[len
] = '\0';
508 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
509 if (drv1
->protocol_name
&&
510 !strcmp(drv1
->protocol_name
, protocol
)) {
517 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
520 int score
, score_max
;
521 BlockDriver
*drv1
, *drv
;
525 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
526 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
527 drv
= bdrv_find_format("raw");
535 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
543 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
544 if (drv1
->bdrv_probe
) {
545 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
546 if (score
> score_max
) {
560 * Set the current 'total_sectors' value
562 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
564 BlockDriver
*drv
= bs
->drv
;
566 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
570 /* query actual device if possible, otherwise just trust the hint */
571 if (drv
->bdrv_getlength
) {
572 int64_t length
= drv
->bdrv_getlength(bs
);
576 hint
= length
>> BDRV_SECTOR_BITS
;
579 bs
->total_sectors
= hint
;
584 * Set open flags for a given discard mode
586 * Return 0 on success, -1 if the discard mode was invalid.
588 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
590 *flags
&= ~BDRV_O_UNMAP
;
592 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
594 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
595 *flags
|= BDRV_O_UNMAP
;
604 * Set open flags for a given cache mode
606 * Return 0 on success, -1 if the cache mode was invalid.
608 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
610 *flags
&= ~BDRV_O_CACHE_MASK
;
612 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
613 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
614 } else if (!strcmp(mode
, "directsync")) {
615 *flags
|= BDRV_O_NOCACHE
;
616 } else if (!strcmp(mode
, "writeback")) {
617 *flags
|= BDRV_O_CACHE_WB
;
618 } else if (!strcmp(mode
, "unsafe")) {
619 *flags
|= BDRV_O_CACHE_WB
;
620 *flags
|= BDRV_O_NO_FLUSH
;
621 } else if (!strcmp(mode
, "writethrough")) {
622 /* this is the default */
631 * The copy-on-read flag is actually a reference count so multiple users may
632 * use the feature without worrying about clobbering its previous state.
633 * Copy-on-read stays enabled until all users have called to disable it.
635 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
640 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
642 assert(bs
->copy_on_read
> 0);
646 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
648 int open_flags
= flags
| BDRV_O_CACHE_WB
;
651 * Clear flags that are internal to the block layer before opening the
654 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
657 * Snapshots should be writable.
659 if (bs
->is_temporary
) {
660 open_flags
|= BDRV_O_RDWR
;
667 * Common part for opening disk images and files
669 * Removes all processed options from *options.
671 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
672 const char *filename
, QDict
*options
,
673 int flags
, BlockDriver
*drv
)
678 assert(bs
->file
== NULL
);
679 assert(options
!= NULL
&& bs
->options
!= options
);
681 trace_bdrv_open_common(bs
, filename
, flags
, drv
->format_name
);
683 bs
->open_flags
= flags
;
684 bs
->buffer_alignment
= 512;
686 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
687 if ((flags
& BDRV_O_RDWR
) && (flags
& BDRV_O_COPY_ON_READ
)) {
688 bdrv_enable_copy_on_read(bs
);
691 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
693 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
)) {
698 bs
->opaque
= g_malloc0(drv
->instance_size
);
700 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
701 open_flags
= bdrv_open_flags(bs
, flags
);
703 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
705 /* Open the image, either directly or using a protocol */
706 if (drv
->bdrv_file_open
) {
711 ret
= drv
->bdrv_file_open(bs
, filename
, options
, open_flags
);
714 assert(file
!= NULL
);
716 ret
= drv
->bdrv_open(bs
, options
, open_flags
);
723 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
729 if (bs
->is_temporary
) {
744 * Opens a file using a protocol (file, host_device, nbd, ...)
746 * options is a QDict of options to pass to the block drivers, or NULL for an
747 * empty set of options. The reference to the QDict belongs to the block layer
748 * after the call (even on failure), so if the caller intends to reuse the
749 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
751 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
,
752 QDict
*options
, int flags
)
754 BlockDriverState
*bs
;
758 drv
= bdrv_find_protocol(filename
);
764 /* NULL means an empty set of options */
765 if (options
== NULL
) {
766 options
= qdict_new();
770 bs
->options
= options
;
771 options
= qdict_clone_shallow(options
);
773 if (drv
->bdrv_parse_filename
) {
774 Error
*local_err
= NULL
;
775 drv
->bdrv_parse_filename(filename
, options
, &local_err
);
776 if (error_is_set(&local_err
)) {
777 qerror_report_err(local_err
);
778 error_free(local_err
);
784 ret
= bdrv_open_common(bs
, NULL
, filename
, options
, flags
, drv
);
789 /* Check if any unknown options were used */
790 if (qdict_size(options
) != 0) {
791 const QDictEntry
*entry
= qdict_first(options
);
792 qerror_report(ERROR_CLASS_GENERIC_ERROR
, "Block protocol '%s' doesn't "
793 "support the option '%s'",
794 drv
->format_name
, entry
->key
);
807 QDECREF(bs
->options
);
813 int bdrv_open_backing_file(BlockDriverState
*bs
)
815 char backing_filename
[PATH_MAX
];
817 BlockDriver
*back_drv
= NULL
;
819 if (bs
->backing_hd
!= NULL
) {
823 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
824 if (bs
->backing_file
[0] == '\0') {
828 bs
->backing_hd
= bdrv_new("");
829 bdrv_get_full_backing_filename(bs
, backing_filename
,
830 sizeof(backing_filename
));
832 if (bs
->backing_format
[0] != '\0') {
833 back_drv
= bdrv_find_format(bs
->backing_format
);
836 /* backing files always opened read-only */
837 back_flags
= bs
->open_flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
);
839 ret
= bdrv_open(bs
->backing_hd
, backing_filename
, NULL
,
840 back_flags
, back_drv
);
842 bdrv_delete(bs
->backing_hd
);
843 bs
->backing_hd
= NULL
;
844 bs
->open_flags
|= BDRV_O_NO_BACKING
;
850 static void extract_subqdict(QDict
*src
, QDict
**dst
, const char *start
)
852 const QDictEntry
*entry
, *next
;
856 entry
= qdict_first(src
);
858 while (entry
!= NULL
) {
859 next
= qdict_next(src
, entry
);
860 if (strstart(entry
->key
, start
, &p
)) {
861 qobject_incref(entry
->value
);
862 qdict_put_obj(*dst
, p
, entry
->value
);
863 qdict_del(src
, entry
->key
);
870 * Opens a disk image (raw, qcow2, vmdk, ...)
872 * options is a QDict of options to pass to the block drivers, or NULL for an
873 * empty set of options. The reference to the QDict belongs to the block layer
874 * after the call (even on failure), so if the caller intends to reuse the
875 * dictionary, it needs to use QINCREF() before calling bdrv_open.
877 int bdrv_open(BlockDriverState
*bs
, const char *filename
, QDict
*options
,
878 int flags
, BlockDriver
*drv
)
881 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
882 char tmp_filename
[PATH_MAX
+ 1];
883 BlockDriverState
*file
= NULL
;
884 QDict
*file_options
= NULL
;
886 /* NULL means an empty set of options */
887 if (options
== NULL
) {
888 options
= qdict_new();
891 bs
->options
= options
;
892 options
= qdict_clone_shallow(options
);
894 /* For snapshot=on, create a temporary qcow2 overlay */
895 if (flags
& BDRV_O_SNAPSHOT
) {
896 BlockDriverState
*bs1
;
898 BlockDriver
*bdrv_qcow2
;
899 QEMUOptionParameter
*create_options
;
900 char backing_filename
[PATH_MAX
];
902 /* if snapshot, we create a temporary backing file and open it
903 instead of opening 'filename' directly */
905 /* if there is a backing file, use it */
907 ret
= bdrv_open(bs1
, filename
, NULL
, 0, drv
);
912 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
916 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
921 /* Real path is meaningless for protocols */
922 if (path_has_protocol(filename
)) {
923 snprintf(backing_filename
, sizeof(backing_filename
),
925 } else if (!realpath(filename
, backing_filename
)) {
930 bdrv_qcow2
= bdrv_find_format("qcow2");
931 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
934 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
935 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FILE
,
938 set_option_parameter(create_options
, BLOCK_OPT_BACKING_FMT
,
942 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
);
943 free_option_parameters(create_options
);
948 filename
= tmp_filename
;
950 bs
->is_temporary
= 1;
953 /* Open image file without format layer */
954 if (flags
& BDRV_O_RDWR
) {
955 flags
|= BDRV_O_ALLOW_RDWR
;
958 extract_subqdict(options
, &file_options
, "file.");
960 ret
= bdrv_file_open(&file
, filename
, file_options
,
961 bdrv_open_flags(bs
, flags
));
966 /* Find the right image format driver */
968 ret
= find_image_format(file
, filename
, &drv
);
972 goto unlink_and_fail
;
976 ret
= bdrv_open_common(bs
, file
, filename
, options
, flags
, drv
);
978 goto unlink_and_fail
;
981 if (bs
->file
!= file
) {
986 /* If there is a backing file, use it */
987 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
988 ret
= bdrv_open_backing_file(bs
);
994 /* Check if any unknown options were used */
995 if (qdict_size(options
) != 0) {
996 const QDictEntry
*entry
= qdict_first(options
);
997 qerror_report(ERROR_CLASS_GENERIC_ERROR
, "Block format '%s' used by "
998 "device '%s' doesn't support the option '%s'",
999 drv
->format_name
, bs
->device_name
, entry
->key
);
1002 goto close_and_fail
;
1006 if (!bdrv_key_required(bs
)) {
1007 bdrv_dev_change_media_cb(bs
, true);
1010 /* throttling disk I/O limits */
1011 if (bs
->io_limits_enabled
) {
1012 bdrv_io_limits_enable(bs
);
1021 if (bs
->is_temporary
) {
1025 QDECREF(bs
->options
);
1036 typedef struct BlockReopenQueueEntry
{
1038 BDRVReopenState state
;
1039 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1040 } BlockReopenQueueEntry
;
1043 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1044 * reopen of multiple devices.
1046 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1047 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1048 * be created and initialized. This newly created BlockReopenQueue should be
1049 * passed back in for subsequent calls that are intended to be of the same
1052 * bs is the BlockDriverState to add to the reopen queue.
1054 * flags contains the open flags for the associated bs
1056 * returns a pointer to bs_queue, which is either the newly allocated
1057 * bs_queue, or the existing bs_queue being used.
1060 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1061 BlockDriverState
*bs
, int flags
)
1065 BlockReopenQueueEntry
*bs_entry
;
1066 if (bs_queue
== NULL
) {
1067 bs_queue
= g_new0(BlockReopenQueue
, 1);
1068 QSIMPLEQ_INIT(bs_queue
);
1072 bdrv_reopen_queue(bs_queue
, bs
->file
, flags
);
1075 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1076 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1078 bs_entry
->state
.bs
= bs
;
1079 bs_entry
->state
.flags
= flags
;
1085 * Reopen multiple BlockDriverStates atomically & transactionally.
1087 * The queue passed in (bs_queue) must have been built up previous
1088 * via bdrv_reopen_queue().
1090 * Reopens all BDS specified in the queue, with the appropriate
1091 * flags. All devices are prepared for reopen, and failure of any
1092 * device will cause all device changes to be abandonded, and intermediate
1095 * If all devices prepare successfully, then the changes are committed
1099 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1102 BlockReopenQueueEntry
*bs_entry
, *next
;
1103 Error
*local_err
= NULL
;
1105 assert(bs_queue
!= NULL
);
1109 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1110 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1111 error_propagate(errp
, local_err
);
1114 bs_entry
->prepared
= true;
1117 /* If we reach this point, we have success and just need to apply the
1120 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1121 bdrv_reopen_commit(&bs_entry
->state
);
1127 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1128 if (ret
&& bs_entry
->prepared
) {
1129 bdrv_reopen_abort(&bs_entry
->state
);
1138 /* Reopen a single BlockDriverState with the specified flags. */
1139 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1142 Error
*local_err
= NULL
;
1143 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1145 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1146 if (local_err
!= NULL
) {
1147 error_propagate(errp
, local_err
);
1154 * Prepares a BlockDriverState for reopen. All changes are staged in the
1155 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1156 * the block driver layer .bdrv_reopen_prepare()
1158 * bs is the BlockDriverState to reopen
1159 * flags are the new open flags
1160 * queue is the reopen queue
1162 * Returns 0 on success, non-zero on error. On error errp will be set
1165 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1166 * It is the responsibility of the caller to then call the abort() or
1167 * commit() for any other BDS that have been left in a prepare() state
1170 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1174 Error
*local_err
= NULL
;
1177 assert(reopen_state
!= NULL
);
1178 assert(reopen_state
->bs
->drv
!= NULL
);
1179 drv
= reopen_state
->bs
->drv
;
1181 /* if we are to stay read-only, do not allow permission change
1183 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1184 reopen_state
->flags
& BDRV_O_RDWR
) {
1185 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1186 reopen_state
->bs
->device_name
);
1191 ret
= bdrv_flush(reopen_state
->bs
);
1193 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1198 if (drv
->bdrv_reopen_prepare
) {
1199 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1201 if (local_err
!= NULL
) {
1202 error_propagate(errp
, local_err
);
1204 error_set(errp
, QERR_OPEN_FILE_FAILED
,
1205 reopen_state
->bs
->filename
);
1210 /* It is currently mandatory to have a bdrv_reopen_prepare()
1211 * handler for each supported drv. */
1212 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1213 drv
->format_name
, reopen_state
->bs
->device_name
,
1214 "reopening of file");
1226 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1227 * makes them final by swapping the staging BlockDriverState contents into
1228 * the active BlockDriverState contents.
1230 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1234 assert(reopen_state
!= NULL
);
1235 drv
= reopen_state
->bs
->drv
;
1236 assert(drv
!= NULL
);
1238 /* If there are any driver level actions to take */
1239 if (drv
->bdrv_reopen_commit
) {
1240 drv
->bdrv_reopen_commit(reopen_state
);
1243 /* set BDS specific flags now */
1244 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1245 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1247 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1251 * Abort the reopen, and delete and free the staged changes in
1254 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1258 assert(reopen_state
!= NULL
);
1259 drv
= reopen_state
->bs
->drv
;
1260 assert(drv
!= NULL
);
1262 if (drv
->bdrv_reopen_abort
) {
1263 drv
->bdrv_reopen_abort(reopen_state
);
1268 void bdrv_close(BlockDriverState
*bs
)
1272 block_job_cancel_sync(bs
->job
);
1275 notifier_list_notify(&bs
->close_notifiers
, bs
);
1278 if (bs
== bs_snapshots
) {
1279 bs_snapshots
= NULL
;
1281 if (bs
->backing_hd
) {
1282 bdrv_delete(bs
->backing_hd
);
1283 bs
->backing_hd
= NULL
;
1285 bs
->drv
->bdrv_close(bs
);
1288 if (bs
->is_temporary
) {
1289 unlink(bs
->filename
);
1294 bs
->copy_on_read
= 0;
1295 bs
->backing_file
[0] = '\0';
1296 bs
->backing_format
[0] = '\0';
1297 bs
->total_sectors
= 0;
1302 QDECREF(bs
->options
);
1305 if (bs
->file
!= NULL
) {
1306 bdrv_delete(bs
->file
);
1311 bdrv_dev_change_media_cb(bs
, false);
1313 /*throttling disk I/O limits*/
1314 if (bs
->io_limits_enabled
) {
1315 bdrv_io_limits_disable(bs
);
1319 void bdrv_close_all(void)
1321 BlockDriverState
*bs
;
1323 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1329 * Wait for pending requests to complete across all BlockDriverStates
1331 * This function does not flush data to disk, use bdrv_flush_all() for that
1332 * after calling this function.
1334 * Note that completion of an asynchronous I/O operation can trigger any
1335 * number of other I/O operations on other devices---for example a coroutine
1336 * can be arbitrarily complex and a constant flow of I/O can come until the
1337 * coroutine is complete. Because of this, it is not possible to have a
1338 * function to drain a single device's I/O queue.
1340 void bdrv_drain_all(void)
1342 BlockDriverState
*bs
;
1346 busy
= qemu_aio_wait();
1348 /* FIXME: We do not have timer support here, so this is effectively
1351 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1352 if (!qemu_co_queue_empty(&bs
->throttled_reqs
)) {
1353 qemu_co_queue_restart_all(&bs
->throttled_reqs
);
1359 /* If requests are still pending there is a bug somewhere */
1360 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1361 assert(QLIST_EMPTY(&bs
->tracked_requests
));
1362 assert(qemu_co_queue_empty(&bs
->throttled_reqs
));
1366 /* make a BlockDriverState anonymous by removing from bdrv_state list.
1367 Also, NULL terminate the device_name to prevent double remove */
1368 void bdrv_make_anon(BlockDriverState
*bs
)
1370 if (bs
->device_name
[0] != '\0') {
1371 QTAILQ_REMOVE(&bdrv_states
, bs
, list
);
1373 bs
->device_name
[0] = '\0';
1376 static void bdrv_rebind(BlockDriverState
*bs
)
1378 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1379 bs
->drv
->bdrv_rebind(bs
);
1383 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1384 BlockDriverState
*bs_src
)
1386 /* move some fields that need to stay attached to the device */
1387 bs_dest
->open_flags
= bs_src
->open_flags
;
1390 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1391 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1392 bs_dest
->dev
= bs_src
->dev
;
1393 bs_dest
->buffer_alignment
= bs_src
->buffer_alignment
;
1394 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1396 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1398 /* i/o timing parameters */
1399 bs_dest
->slice_time
= bs_src
->slice_time
;
1400 bs_dest
->slice_start
= bs_src
->slice_start
;
1401 bs_dest
->slice_end
= bs_src
->slice_end
;
1402 bs_dest
->io_limits
= bs_src
->io_limits
;
1403 bs_dest
->io_base
= bs_src
->io_base
;
1404 bs_dest
->throttled_reqs
= bs_src
->throttled_reqs
;
1405 bs_dest
->block_timer
= bs_src
->block_timer
;
1406 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1409 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1410 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1413 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1414 bs_dest
->iostatus
= bs_src
->iostatus
;
1417 bs_dest
->dirty_bitmap
= bs_src
->dirty_bitmap
;
1420 bs_dest
->in_use
= bs_src
->in_use
;
1421 bs_dest
->job
= bs_src
->job
;
1423 /* keep the same entry in bdrv_states */
1424 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1425 bs_src
->device_name
);
1426 bs_dest
->list
= bs_src
->list
;
1430 * Swap bs contents for two image chains while they are live,
1431 * while keeping required fields on the BlockDriverState that is
1432 * actually attached to a device.
1434 * This will modify the BlockDriverState fields, and swap contents
1435 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1437 * bs_new is required to be anonymous.
1439 * This function does not create any image files.
1441 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1443 BlockDriverState tmp
;
1445 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1446 assert(bs_new
->device_name
[0] == '\0');
1447 assert(bs_new
->dirty_bitmap
== NULL
);
1448 assert(bs_new
->job
== NULL
);
1449 assert(bs_new
->dev
== NULL
);
1450 assert(bs_new
->in_use
== 0);
1451 assert(bs_new
->io_limits_enabled
== false);
1452 assert(bs_new
->block_timer
== NULL
);
1458 /* there are some fields that should not be swapped, move them back */
1459 bdrv_move_feature_fields(&tmp
, bs_old
);
1460 bdrv_move_feature_fields(bs_old
, bs_new
);
1461 bdrv_move_feature_fields(bs_new
, &tmp
);
1463 /* bs_new shouldn't be in bdrv_states even after the swap! */
1464 assert(bs_new
->device_name
[0] == '\0');
1466 /* Check a few fields that should remain attached to the device */
1467 assert(bs_new
->dev
== NULL
);
1468 assert(bs_new
->job
== NULL
);
1469 assert(bs_new
->in_use
== 0);
1470 assert(bs_new
->io_limits_enabled
== false);
1471 assert(bs_new
->block_timer
== NULL
);
1473 bdrv_rebind(bs_new
);
1474 bdrv_rebind(bs_old
);
1478 * Add new bs contents at the top of an image chain while the chain is
1479 * live, while keeping required fields on the top layer.
1481 * This will modify the BlockDriverState fields, and swap contents
1482 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1484 * bs_new is required to be anonymous.
1486 * This function does not create any image files.
1488 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1490 bdrv_swap(bs_new
, bs_top
);
1492 /* The contents of 'tmp' will become bs_top, as we are
1493 * swapping bs_new and bs_top contents. */
1494 bs_top
->backing_hd
= bs_new
;
1495 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1496 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1498 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1499 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1502 void bdrv_delete(BlockDriverState
*bs
)
1506 assert(!bs
->in_use
);
1508 /* remove from list, if necessary */
1513 assert(bs
!= bs_snapshots
);
1517 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1518 /* TODO change to DeviceState *dev when all users are qdevified */
1524 bdrv_iostatus_reset(bs
);
1528 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1529 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1531 if (bdrv_attach_dev(bs
, dev
) < 0) {
1536 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1537 /* TODO change to DeviceState *dev when all users are qdevified */
1539 assert(bs
->dev
== dev
);
1542 bs
->dev_opaque
= NULL
;
1543 bs
->buffer_alignment
= 512;
1546 /* TODO change to return DeviceState * when all users are qdevified */
1547 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1552 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1556 bs
->dev_opaque
= opaque
;
1557 if (bdrv_dev_has_removable_media(bs
) && bs
== bs_snapshots
) {
1558 bs_snapshots
= NULL
;
1562 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1563 enum MonitorEvent ev
,
1564 BlockErrorAction action
, bool is_read
)
1567 const char *action_str
;
1570 case BDRV_ACTION_REPORT
:
1571 action_str
= "report";
1573 case BDRV_ACTION_IGNORE
:
1574 action_str
= "ignore";
1576 case BDRV_ACTION_STOP
:
1577 action_str
= "stop";
1583 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1586 is_read
? "read" : "write");
1587 monitor_protocol_event(ev
, data
);
1589 qobject_decref(data
);
1592 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
1596 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1597 bdrv_get_device_name(bs
), ejected
);
1598 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
1600 qobject_decref(data
);
1603 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
1605 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
1606 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
1607 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
1608 if (tray_was_closed
) {
1610 bdrv_emit_qmp_eject_event(bs
, true);
1614 bdrv_emit_qmp_eject_event(bs
, false);
1619 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
1621 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
1624 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
1626 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
1627 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
1631 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
1633 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
1634 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
1639 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
1641 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
1642 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
1646 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
1648 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
1649 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
1655 * Run consistency checks on an image
1657 * Returns 0 if the check could be completed (it doesn't mean that the image is
1658 * free of errors) or -errno when an internal error occurred. The results of the
1659 * check are stored in res.
1661 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
1663 if (bs
->drv
->bdrv_check
== NULL
) {
1667 memset(res
, 0, sizeof(*res
));
1668 return bs
->drv
->bdrv_check(bs
, res
, fix
);
1671 #define COMMIT_BUF_SECTORS 2048
1673 /* commit COW file into the raw image */
1674 int bdrv_commit(BlockDriverState
*bs
)
1676 BlockDriver
*drv
= bs
->drv
;
1677 int64_t sector
, total_sectors
;
1678 int n
, ro
, open_flags
;
1681 char filename
[PATH_MAX
];
1686 if (!bs
->backing_hd
) {
1690 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
1694 ro
= bs
->backing_hd
->read_only
;
1695 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
1696 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
1697 open_flags
= bs
->backing_hd
->open_flags
;
1700 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
1705 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
1706 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
1708 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
1709 if (bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
)) {
1711 if (bdrv_read(bs
, sector
, buf
, n
) != 0) {
1716 if (bdrv_write(bs
->backing_hd
, sector
, buf
, n
) != 0) {
1723 if (drv
->bdrv_make_empty
) {
1724 ret
= drv
->bdrv_make_empty(bs
);
1729 * Make sure all data we wrote to the backing device is actually
1733 bdrv_flush(bs
->backing_hd
);
1739 /* ignoring error return here */
1740 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
1746 int bdrv_commit_all(void)
1748 BlockDriverState
*bs
;
1750 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1751 if (bs
->drv
&& bs
->backing_hd
) {
1752 int ret
= bdrv_commit(bs
);
1761 struct BdrvTrackedRequest
{
1762 BlockDriverState
*bs
;
1766 QLIST_ENTRY(BdrvTrackedRequest
) list
;
1767 Coroutine
*co
; /* owner, used for deadlock detection */
1768 CoQueue wait_queue
; /* coroutines blocked on this request */
1772 * Remove an active request from the tracked requests list
1774 * This function should be called when a tracked request is completing.
1776 static void tracked_request_end(BdrvTrackedRequest
*req
)
1778 QLIST_REMOVE(req
, list
);
1779 qemu_co_queue_restart_all(&req
->wait_queue
);
1783 * Add an active request to the tracked requests list
1785 static void tracked_request_begin(BdrvTrackedRequest
*req
,
1786 BlockDriverState
*bs
,
1788 int nb_sectors
, bool is_write
)
1790 *req
= (BdrvTrackedRequest
){
1792 .sector_num
= sector_num
,
1793 .nb_sectors
= nb_sectors
,
1794 .is_write
= is_write
,
1795 .co
= qemu_coroutine_self(),
1798 qemu_co_queue_init(&req
->wait_queue
);
1800 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
1804 * Round a region to cluster boundaries
1806 void bdrv_round_to_clusters(BlockDriverState
*bs
,
1807 int64_t sector_num
, int nb_sectors
,
1808 int64_t *cluster_sector_num
,
1809 int *cluster_nb_sectors
)
1811 BlockDriverInfo bdi
;
1813 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
1814 *cluster_sector_num
= sector_num
;
1815 *cluster_nb_sectors
= nb_sectors
;
1817 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
1818 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
1819 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
1824 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
1825 int64_t sector_num
, int nb_sectors
) {
1827 if (sector_num
>= req
->sector_num
+ req
->nb_sectors
) {
1831 if (req
->sector_num
>= sector_num
+ nb_sectors
) {
1837 static void coroutine_fn
wait_for_overlapping_requests(BlockDriverState
*bs
,
1838 int64_t sector_num
, int nb_sectors
)
1840 BdrvTrackedRequest
*req
;
1841 int64_t cluster_sector_num
;
1842 int cluster_nb_sectors
;
1845 /* If we touch the same cluster it counts as an overlap. This guarantees
1846 * that allocating writes will be serialized and not race with each other
1847 * for the same cluster. For example, in copy-on-read it ensures that the
1848 * CoR read and write operations are atomic and guest writes cannot
1849 * interleave between them.
1851 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
1852 &cluster_sector_num
, &cluster_nb_sectors
);
1856 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
1857 if (tracked_request_overlaps(req
, cluster_sector_num
,
1858 cluster_nb_sectors
)) {
1859 /* Hitting this means there was a reentrant request, for
1860 * example, a block driver issuing nested requests. This must
1861 * never happen since it means deadlock.
1863 assert(qemu_coroutine_self() != req
->co
);
1865 qemu_co_queue_wait(&req
->wait_queue
);
1876 * -EINVAL - backing format specified, but no file
1877 * -ENOSPC - can't update the backing file because no space is left in the
1879 * -ENOTSUP - format driver doesn't support changing the backing file
1881 int bdrv_change_backing_file(BlockDriverState
*bs
,
1882 const char *backing_file
, const char *backing_fmt
)
1884 BlockDriver
*drv
= bs
->drv
;
1887 /* Backing file format doesn't make sense without a backing file */
1888 if (backing_fmt
&& !backing_file
) {
1892 if (drv
->bdrv_change_backing_file
!= NULL
) {
1893 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
1899 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
1900 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
1906 * Finds the image layer in the chain that has 'bs' as its backing file.
1908 * active is the current topmost image.
1910 * Returns NULL if bs is not found in active's image chain,
1911 * or if active == bs.
1913 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
1914 BlockDriverState
*bs
)
1916 BlockDriverState
*overlay
= NULL
;
1917 BlockDriverState
*intermediate
;
1919 assert(active
!= NULL
);
1922 /* if bs is the same as active, then by definition it has no overlay
1928 intermediate
= active
;
1929 while (intermediate
->backing_hd
) {
1930 if (intermediate
->backing_hd
== bs
) {
1931 overlay
= intermediate
;
1934 intermediate
= intermediate
->backing_hd
;
1940 typedef struct BlkIntermediateStates
{
1941 BlockDriverState
*bs
;
1942 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
1943 } BlkIntermediateStates
;
1947 * Drops images above 'base' up to and including 'top', and sets the image
1948 * above 'top' to have base as its backing file.
1950 * Requires that the overlay to 'top' is opened r/w, so that the backing file
1951 * information in 'bs' can be properly updated.
1953 * E.g., this will convert the following chain:
1954 * bottom <- base <- intermediate <- top <- active
1958 * bottom <- base <- active
1960 * It is allowed for bottom==base, in which case it converts:
1962 * base <- intermediate <- top <- active
1969 * if active == top, that is considered an error
1972 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
1973 BlockDriverState
*base
)
1975 BlockDriverState
*intermediate
;
1976 BlockDriverState
*base_bs
= NULL
;
1977 BlockDriverState
*new_top_bs
= NULL
;
1978 BlkIntermediateStates
*intermediate_state
, *next
;
1981 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
1982 QSIMPLEQ_INIT(&states_to_delete
);
1984 if (!top
->drv
|| !base
->drv
) {
1988 new_top_bs
= bdrv_find_overlay(active
, top
);
1990 if (new_top_bs
== NULL
) {
1991 /* we could not find the image above 'top', this is an error */
1995 /* special case of new_top_bs->backing_hd already pointing to base - nothing
1996 * to do, no intermediate images */
1997 if (new_top_bs
->backing_hd
== base
) {
2004 /* now we will go down through the list, and add each BDS we find
2005 * into our deletion queue, until we hit the 'base'
2007 while (intermediate
) {
2008 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2009 intermediate_state
->bs
= intermediate
;
2010 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2012 if (intermediate
->backing_hd
== base
) {
2013 base_bs
= intermediate
->backing_hd
;
2016 intermediate
= intermediate
->backing_hd
;
2018 if (base_bs
== NULL
) {
2019 /* something went wrong, we did not end at the base. safely
2020 * unravel everything, and exit with error */
2024 /* success - we can delete the intermediate states, and link top->base */
2025 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2026 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2030 new_top_bs
->backing_hd
= base_bs
;
2033 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2034 /* so that bdrv_close() does not recursively close the chain */
2035 intermediate_state
->bs
->backing_hd
= NULL
;
2036 bdrv_delete(intermediate_state
->bs
);
2041 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2042 g_free(intermediate_state
);
2048 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2053 if (!bdrv_is_inserted(bs
))
2059 len
= bdrv_getlength(bs
);
2064 if ((offset
> len
) || (len
- offset
< size
))
2070 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2073 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2074 nb_sectors
* BDRV_SECTOR_SIZE
);
2077 typedef struct RwCo
{
2078 BlockDriverState
*bs
;
2086 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2088 RwCo
*rwco
= opaque
;
2090 if (!rwco
->is_write
) {
2091 rwco
->ret
= bdrv_co_do_readv(rwco
->bs
, rwco
->sector_num
,
2092 rwco
->nb_sectors
, rwco
->qiov
, 0);
2094 rwco
->ret
= bdrv_co_do_writev(rwco
->bs
, rwco
->sector_num
,
2095 rwco
->nb_sectors
, rwco
->qiov
, 0);
2100 * Process a synchronous request using coroutines
2102 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2103 int nb_sectors
, bool is_write
)
2106 struct iovec iov
= {
2107 .iov_base
= (void *)buf
,
2108 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2113 .sector_num
= sector_num
,
2114 .nb_sectors
= nb_sectors
,
2116 .is_write
= is_write
,
2120 qemu_iovec_init_external(&qiov
, &iov
, 1);
2123 * In sync call context, when the vcpu is blocked, this throttling timer
2124 * will not fire; so the I/O throttling function has to be disabled here
2125 * if it has been enabled.
2127 if (bs
->io_limits_enabled
) {
2128 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2129 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2130 bdrv_io_limits_disable(bs
);
2133 if (qemu_in_coroutine()) {
2134 /* Fast-path if already in coroutine context */
2135 bdrv_rw_co_entry(&rwco
);
2137 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2138 qemu_coroutine_enter(co
, &rwco
);
2139 while (rwco
.ret
== NOT_DONE
) {
2146 /* return < 0 if error. See bdrv_write() for the return codes */
2147 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2148 uint8_t *buf
, int nb_sectors
)
2150 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false);
2153 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2154 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2155 uint8_t *buf
, int nb_sectors
)
2160 enabled
= bs
->io_limits_enabled
;
2161 bs
->io_limits_enabled
= false;
2162 ret
= bdrv_read(bs
, 0, buf
, 1);
2163 bs
->io_limits_enabled
= enabled
;
2167 /* Return < 0 if error. Important errors are:
2168 -EIO generic I/O error (may happen for all errors)
2169 -ENOMEDIUM No media inserted.
2170 -EINVAL Invalid sector number or nb_sectors
2171 -EACCES Trying to write a read-only device
2173 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2174 const uint8_t *buf
, int nb_sectors
)
2176 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true);
2179 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
,
2180 void *buf
, int count1
)
2182 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2183 int len
, nb_sectors
, count
;
2188 /* first read to align to sector start */
2189 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2192 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2194 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2196 memcpy(buf
, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), len
);
2204 /* read the sectors "in place" */
2205 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2206 if (nb_sectors
> 0) {
2207 if ((ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
)) < 0)
2209 sector_num
+= nb_sectors
;
2210 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2215 /* add data from the last sector */
2217 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2219 memcpy(buf
, tmp_buf
, count
);
2224 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2225 const void *buf
, int count1
)
2227 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
2228 int len
, nb_sectors
, count
;
2233 /* first write to align to sector start */
2234 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
2237 sector_num
= offset
>> BDRV_SECTOR_BITS
;
2239 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2241 memcpy(tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), buf
, len
);
2242 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2251 /* write the sectors "in place" */
2252 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
2253 if (nb_sectors
> 0) {
2254 if ((ret
= bdrv_write(bs
, sector_num
, buf
, nb_sectors
)) < 0)
2256 sector_num
+= nb_sectors
;
2257 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
2262 /* add data from the last sector */
2264 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
2266 memcpy(tmp_buf
, buf
, count
);
2267 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
2274 * Writes to the file and ensures that no writes are reordered across this
2275 * request (acts as a barrier)
2277 * Returns 0 on success, -errno in error cases.
2279 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2280 const void *buf
, int count
)
2284 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2289 /* No flush needed for cache modes that already do it */
2290 if (bs
->enable_write_cache
) {
2297 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2298 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2300 /* Perform I/O through a temporary buffer so that users who scribble over
2301 * their read buffer while the operation is in progress do not end up
2302 * modifying the image file. This is critical for zero-copy guest I/O
2303 * where anything might happen inside guest memory.
2305 void *bounce_buffer
;
2307 BlockDriver
*drv
= bs
->drv
;
2309 QEMUIOVector bounce_qiov
;
2310 int64_t cluster_sector_num
;
2311 int cluster_nb_sectors
;
2315 /* Cover entire cluster so no additional backing file I/O is required when
2316 * allocating cluster in the image file.
2318 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2319 &cluster_sector_num
, &cluster_nb_sectors
);
2321 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2322 cluster_sector_num
, cluster_nb_sectors
);
2324 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2325 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2326 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2328 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2334 if (drv
->bdrv_co_write_zeroes
&&
2335 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2336 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2337 cluster_nb_sectors
);
2339 /* This does not change the data on the disk, it is not necessary
2340 * to flush even in cache=writethrough mode.
2342 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2347 /* It might be okay to ignore write errors for guest requests. If this
2348 * is a deliberate copy-on-read then we don't want to ignore the error.
2349 * Simply report it in all cases.
2354 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2355 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2356 nb_sectors
* BDRV_SECTOR_SIZE
);
2359 qemu_vfree(bounce_buffer
);
2364 * Handle a read request in coroutine context
2366 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
2367 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2368 BdrvRequestFlags flags
)
2370 BlockDriver
*drv
= bs
->drv
;
2371 BdrvTrackedRequest req
;
2377 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2381 /* throttling disk read I/O */
2382 if (bs
->io_limits_enabled
) {
2383 bdrv_io_limits_intercept(bs
, false, nb_sectors
);
2386 if (bs
->copy_on_read
) {
2387 flags
|= BDRV_REQ_COPY_ON_READ
;
2389 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2390 bs
->copy_on_read_in_flight
++;
2393 if (bs
->copy_on_read_in_flight
) {
2394 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2397 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, false);
2399 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2402 ret
= bdrv_co_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
2407 if (!ret
|| pnum
!= nb_sectors
) {
2408 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
2413 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
2416 tracked_request_end(&req
);
2418 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2419 bs
->copy_on_read_in_flight
--;
2425 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
2426 int nb_sectors
, QEMUIOVector
*qiov
)
2428 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
2430 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
2433 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
2434 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2436 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
2438 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
2439 BDRV_REQ_COPY_ON_READ
);
2442 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
2443 int64_t sector_num
, int nb_sectors
)
2445 BlockDriver
*drv
= bs
->drv
;
2450 /* TODO Emulate only part of misaligned requests instead of letting block
2451 * drivers return -ENOTSUP and emulate everything */
2453 /* First try the efficient write zeroes operation */
2454 if (drv
->bdrv_co_write_zeroes
) {
2455 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2456 if (ret
!= -ENOTSUP
) {
2461 /* Fall back to bounce buffer if write zeroes is unsupported */
2462 iov
.iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
;
2463 iov
.iov_base
= qemu_blockalign(bs
, iov
.iov_len
);
2464 memset(iov
.iov_base
, 0, iov
.iov_len
);
2465 qemu_iovec_init_external(&qiov
, &iov
, 1);
2467 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, &qiov
);
2469 qemu_vfree(iov
.iov_base
);
2474 * Handle a write request in coroutine context
2476 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
2477 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
2478 BdrvRequestFlags flags
)
2480 BlockDriver
*drv
= bs
->drv
;
2481 BdrvTrackedRequest req
;
2487 if (bs
->read_only
) {
2490 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
2494 /* throttling disk write I/O */
2495 if (bs
->io_limits_enabled
) {
2496 bdrv_io_limits_intercept(bs
, true, nb_sectors
);
2499 if (bs
->copy_on_read_in_flight
) {
2500 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
2503 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, true);
2505 if (flags
& BDRV_REQ_ZERO_WRITE
) {
2506 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
);
2508 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
2511 if (ret
== 0 && !bs
->enable_write_cache
) {
2512 ret
= bdrv_co_flush(bs
);
2515 if (bs
->dirty_bitmap
) {
2516 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
2519 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
2520 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
2523 tracked_request_end(&req
);
2528 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
2529 int nb_sectors
, QEMUIOVector
*qiov
)
2531 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
2533 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
2536 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
2537 int64_t sector_num
, int nb_sectors
)
2539 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2541 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
2542 BDRV_REQ_ZERO_WRITE
);
2546 * Truncate file to 'offset' bytes (needed only for file protocols)
2548 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
2550 BlockDriver
*drv
= bs
->drv
;
2554 if (!drv
->bdrv_truncate
)
2558 if (bdrv_in_use(bs
))
2560 ret
= drv
->bdrv_truncate(bs
, offset
);
2562 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
2563 bdrv_dev_resize_cb(bs
);
2569 * Length of a allocated file in bytes. Sparse files are counted by actual
2570 * allocated space. Return < 0 if error or unknown.
2572 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
2574 BlockDriver
*drv
= bs
->drv
;
2578 if (drv
->bdrv_get_allocated_file_size
) {
2579 return drv
->bdrv_get_allocated_file_size(bs
);
2582 return bdrv_get_allocated_file_size(bs
->file
);
2588 * Length of a file in bytes. Return < 0 if error or unknown.
2590 int64_t bdrv_getlength(BlockDriverState
*bs
)
2592 BlockDriver
*drv
= bs
->drv
;
2596 if (bs
->growable
|| bdrv_dev_has_removable_media(bs
)) {
2597 if (drv
->bdrv_getlength
) {
2598 return drv
->bdrv_getlength(bs
);
2601 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
2604 /* return 0 as number of sectors if no device present or error */
2605 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
2608 length
= bdrv_getlength(bs
);
2612 length
= length
>> BDRV_SECTOR_BITS
;
2613 *nb_sectors_ptr
= length
;
2616 /* throttling disk io limits */
2617 void bdrv_set_io_limits(BlockDriverState
*bs
,
2618 BlockIOLimit
*io_limits
)
2620 bs
->io_limits
= *io_limits
;
2621 bs
->io_limits_enabled
= bdrv_io_limits_enabled(bs
);
2624 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
2625 BlockdevOnError on_write_error
)
2627 bs
->on_read_error
= on_read_error
;
2628 bs
->on_write_error
= on_write_error
;
2631 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
2633 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
2636 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
2638 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
2641 case BLOCKDEV_ON_ERROR_ENOSPC
:
2642 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
2643 case BLOCKDEV_ON_ERROR_STOP
:
2644 return BDRV_ACTION_STOP
;
2645 case BLOCKDEV_ON_ERROR_REPORT
:
2646 return BDRV_ACTION_REPORT
;
2647 case BLOCKDEV_ON_ERROR_IGNORE
:
2648 return BDRV_ACTION_IGNORE
;
2654 /* This is done by device models because, while the block layer knows
2655 * about the error, it does not know whether an operation comes from
2656 * the device or the block layer (from a job, for example).
2658 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
2659 bool is_read
, int error
)
2662 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
2663 if (action
== BDRV_ACTION_STOP
) {
2664 vm_stop(RUN_STATE_IO_ERROR
);
2665 bdrv_iostatus_set_err(bs
, error
);
2669 int bdrv_is_read_only(BlockDriverState
*bs
)
2671 return bs
->read_only
;
2674 int bdrv_is_sg(BlockDriverState
*bs
)
2679 int bdrv_enable_write_cache(BlockDriverState
*bs
)
2681 return bs
->enable_write_cache
;
2684 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
2686 bs
->enable_write_cache
= wce
;
2688 /* so a reopen() will preserve wce */
2690 bs
->open_flags
|= BDRV_O_CACHE_WB
;
2692 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
2696 int bdrv_is_encrypted(BlockDriverState
*bs
)
2698 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
2700 return bs
->encrypted
;
2703 int bdrv_key_required(BlockDriverState
*bs
)
2705 BlockDriverState
*backing_hd
= bs
->backing_hd
;
2707 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
2709 return (bs
->encrypted
&& !bs
->valid_key
);
2712 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
2715 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
2716 ret
= bdrv_set_key(bs
->backing_hd
, key
);
2722 if (!bs
->encrypted
) {
2724 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
2727 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
2730 } else if (!bs
->valid_key
) {
2732 /* call the change callback now, we skipped it on open */
2733 bdrv_dev_change_media_cb(bs
, true);
2738 const char *bdrv_get_format_name(BlockDriverState
*bs
)
2740 return bs
->drv
? bs
->drv
->format_name
: NULL
;
2743 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
2748 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
2749 it(opaque
, drv
->format_name
);
2753 BlockDriverState
*bdrv_find(const char *name
)
2755 BlockDriverState
*bs
;
2757 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2758 if (!strcmp(name
, bs
->device_name
)) {
2765 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
2768 return QTAILQ_FIRST(&bdrv_states
);
2770 return QTAILQ_NEXT(bs
, list
);
2773 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
2775 BlockDriverState
*bs
;
2777 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2782 const char *bdrv_get_device_name(BlockDriverState
*bs
)
2784 return bs
->device_name
;
2787 int bdrv_get_flags(BlockDriverState
*bs
)
2789 return bs
->open_flags
;
2792 void bdrv_flush_all(void)
2794 BlockDriverState
*bs
;
2796 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2801 int bdrv_has_zero_init(BlockDriverState
*bs
)
2805 if (bs
->drv
->bdrv_has_zero_init
) {
2806 return bs
->drv
->bdrv_has_zero_init(bs
);
2812 typedef struct BdrvCoIsAllocatedData
{
2813 BlockDriverState
*bs
;
2814 BlockDriverState
*base
;
2820 } BdrvCoIsAllocatedData
;
2823 * Returns true iff the specified sector is present in the disk image. Drivers
2824 * not implementing the functionality are assumed to not support backing files,
2825 * hence all their sectors are reported as allocated.
2827 * If 'sector_num' is beyond the end of the disk image the return value is 0
2828 * and 'pnum' is set to 0.
2830 * 'pnum' is set to the number of sectors (including and immediately following
2831 * the specified sector) that are known to be in the same
2832 * allocated/unallocated state.
2834 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2835 * beyond the end of the disk image it will be clamped.
2837 int coroutine_fn
bdrv_co_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
2838 int nb_sectors
, int *pnum
)
2842 if (sector_num
>= bs
->total_sectors
) {
2847 n
= bs
->total_sectors
- sector_num
;
2848 if (n
< nb_sectors
) {
2852 if (!bs
->drv
->bdrv_co_is_allocated
) {
2857 return bs
->drv
->bdrv_co_is_allocated(bs
, sector_num
, nb_sectors
, pnum
);
2860 /* Coroutine wrapper for bdrv_is_allocated() */
2861 static void coroutine_fn
bdrv_is_allocated_co_entry(void *opaque
)
2863 BdrvCoIsAllocatedData
*data
= opaque
;
2864 BlockDriverState
*bs
= data
->bs
;
2866 data
->ret
= bdrv_co_is_allocated(bs
, data
->sector_num
, data
->nb_sectors
,
2872 * Synchronous wrapper around bdrv_co_is_allocated().
2874 * See bdrv_co_is_allocated() for details.
2876 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
,
2880 BdrvCoIsAllocatedData data
= {
2882 .sector_num
= sector_num
,
2883 .nb_sectors
= nb_sectors
,
2888 co
= qemu_coroutine_create(bdrv_is_allocated_co_entry
);
2889 qemu_coroutine_enter(co
, &data
);
2890 while (!data
.done
) {
2897 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2899 * Return true if the given sector is allocated in any image between
2900 * BASE and TOP (inclusive). BASE can be NULL to check if the given
2901 * sector is allocated in any image of the chain. Return false otherwise.
2903 * 'pnum' is set to the number of sectors (including and immediately following
2904 * the specified sector) that are known to be in the same
2905 * allocated/unallocated state.
2908 int coroutine_fn
bdrv_co_is_allocated_above(BlockDriverState
*top
,
2909 BlockDriverState
*base
,
2911 int nb_sectors
, int *pnum
)
2913 BlockDriverState
*intermediate
;
2914 int ret
, n
= nb_sectors
;
2917 while (intermediate
&& intermediate
!= base
) {
2919 ret
= bdrv_co_is_allocated(intermediate
, sector_num
, nb_sectors
,
2929 * [sector_num, nb_sectors] is unallocated on top but intermediate
2932 * [sector_num+x, nr_sectors] allocated.
2934 if (n
> pnum_inter
&&
2935 (intermediate
== top
||
2936 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
2940 intermediate
= intermediate
->backing_hd
;
2947 /* Coroutine wrapper for bdrv_is_allocated_above() */
2948 static void coroutine_fn
bdrv_is_allocated_above_co_entry(void *opaque
)
2950 BdrvCoIsAllocatedData
*data
= opaque
;
2951 BlockDriverState
*top
= data
->bs
;
2952 BlockDriverState
*base
= data
->base
;
2954 data
->ret
= bdrv_co_is_allocated_above(top
, base
, data
->sector_num
,
2955 data
->nb_sectors
, data
->pnum
);
2960 * Synchronous wrapper around bdrv_co_is_allocated_above().
2962 * See bdrv_co_is_allocated_above() for details.
2964 int bdrv_is_allocated_above(BlockDriverState
*top
, BlockDriverState
*base
,
2965 int64_t sector_num
, int nb_sectors
, int *pnum
)
2968 BdrvCoIsAllocatedData data
= {
2971 .sector_num
= sector_num
,
2972 .nb_sectors
= nb_sectors
,
2977 co
= qemu_coroutine_create(bdrv_is_allocated_above_co_entry
);
2978 qemu_coroutine_enter(co
, &data
);
2979 while (!data
.done
) {
2985 BlockInfo
*bdrv_query_info(BlockDriverState
*bs
)
2987 BlockInfo
*info
= g_malloc0(sizeof(*info
));
2988 info
->device
= g_strdup(bs
->device_name
);
2989 info
->type
= g_strdup("unknown");
2990 info
->locked
= bdrv_dev_is_medium_locked(bs
);
2991 info
->removable
= bdrv_dev_has_removable_media(bs
);
2993 if (bdrv_dev_has_removable_media(bs
)) {
2994 info
->has_tray_open
= true;
2995 info
->tray_open
= bdrv_dev_is_tray_open(bs
);
2998 if (bdrv_iostatus_is_enabled(bs
)) {
2999 info
->has_io_status
= true;
3000 info
->io_status
= bs
->iostatus
;
3003 if (bs
->dirty_bitmap
) {
3004 info
->has_dirty
= true;
3005 info
->dirty
= g_malloc0(sizeof(*info
->dirty
));
3006 info
->dirty
->count
= bdrv_get_dirty_count(bs
) * BDRV_SECTOR_SIZE
;
3007 info
->dirty
->granularity
=
3008 ((int64_t) BDRV_SECTOR_SIZE
<< hbitmap_granularity(bs
->dirty_bitmap
));
3012 info
->has_inserted
= true;
3013 info
->inserted
= g_malloc0(sizeof(*info
->inserted
));
3014 info
->inserted
->file
= g_strdup(bs
->filename
);
3015 info
->inserted
->ro
= bs
->read_only
;
3016 info
->inserted
->drv
= g_strdup(bs
->drv
->format_name
);
3017 info
->inserted
->encrypted
= bs
->encrypted
;
3018 info
->inserted
->encryption_key_missing
= bdrv_key_required(bs
);
3020 if (bs
->backing_file
[0]) {
3021 info
->inserted
->has_backing_file
= true;
3022 info
->inserted
->backing_file
= g_strdup(bs
->backing_file
);
3025 info
->inserted
->backing_file_depth
= bdrv_get_backing_file_depth(bs
);
3027 if (bs
->io_limits_enabled
) {
3028 info
->inserted
->bps
=
3029 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
];
3030 info
->inserted
->bps_rd
=
3031 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_READ
];
3032 info
->inserted
->bps_wr
=
3033 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_WRITE
];
3034 info
->inserted
->iops
=
3035 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
];
3036 info
->inserted
->iops_rd
=
3037 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_READ
];
3038 info
->inserted
->iops_wr
=
3039 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_WRITE
];
3045 BlockInfoList
*qmp_query_block(Error
**errp
)
3047 BlockInfoList
*head
= NULL
, **p_next
= &head
;
3048 BlockDriverState
*bs
;
3050 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3051 BlockInfoList
*info
= g_malloc0(sizeof(*info
));
3052 info
->value
= bdrv_query_info(bs
);
3055 p_next
= &info
->next
;
3061 BlockStats
*bdrv_query_stats(const BlockDriverState
*bs
)
3065 s
= g_malloc0(sizeof(*s
));
3067 if (bs
->device_name
[0]) {
3068 s
->has_device
= true;
3069 s
->device
= g_strdup(bs
->device_name
);
3072 s
->stats
= g_malloc0(sizeof(*s
->stats
));
3073 s
->stats
->rd_bytes
= bs
->nr_bytes
[BDRV_ACCT_READ
];
3074 s
->stats
->wr_bytes
= bs
->nr_bytes
[BDRV_ACCT_WRITE
];
3075 s
->stats
->rd_operations
= bs
->nr_ops
[BDRV_ACCT_READ
];
3076 s
->stats
->wr_operations
= bs
->nr_ops
[BDRV_ACCT_WRITE
];
3077 s
->stats
->wr_highest_offset
= bs
->wr_highest_sector
* BDRV_SECTOR_SIZE
;
3078 s
->stats
->flush_operations
= bs
->nr_ops
[BDRV_ACCT_FLUSH
];
3079 s
->stats
->wr_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_WRITE
];
3080 s
->stats
->rd_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_READ
];
3081 s
->stats
->flush_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_FLUSH
];
3084 s
->has_parent
= true;
3085 s
->parent
= bdrv_query_stats(bs
->file
);
3091 BlockStatsList
*qmp_query_blockstats(Error
**errp
)
3093 BlockStatsList
*head
= NULL
, **p_next
= &head
;
3094 BlockDriverState
*bs
;
3096 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3097 BlockStatsList
*info
= g_malloc0(sizeof(*info
));
3098 info
->value
= bdrv_query_stats(bs
);
3101 p_next
= &info
->next
;
3107 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
3109 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3110 return bs
->backing_file
;
3111 else if (bs
->encrypted
)
3112 return bs
->filename
;
3117 void bdrv_get_backing_filename(BlockDriverState
*bs
,
3118 char *filename
, int filename_size
)
3120 pstrcpy(filename
, filename_size
, bs
->backing_file
);
3123 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
3124 const uint8_t *buf
, int nb_sectors
)
3126 BlockDriver
*drv
= bs
->drv
;
3129 if (!drv
->bdrv_write_compressed
)
3131 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
3134 assert(!bs
->dirty_bitmap
);
3136 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
3139 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3141 BlockDriver
*drv
= bs
->drv
;
3144 if (!drv
->bdrv_get_info
)
3146 memset(bdi
, 0, sizeof(*bdi
));
3147 return drv
->bdrv_get_info(bs
, bdi
);
3150 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
3151 int64_t pos
, int size
)
3153 BlockDriver
*drv
= bs
->drv
;
3156 if (drv
->bdrv_save_vmstate
)
3157 return drv
->bdrv_save_vmstate(bs
, buf
, pos
, size
);
3159 return bdrv_save_vmstate(bs
->file
, buf
, pos
, size
);
3163 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
3164 int64_t pos
, int size
)
3166 BlockDriver
*drv
= bs
->drv
;
3169 if (drv
->bdrv_load_vmstate
)
3170 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
3172 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
3176 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
3178 BlockDriver
*drv
= bs
->drv
;
3180 if (!drv
|| !drv
->bdrv_debug_event
) {
3184 drv
->bdrv_debug_event(bs
, event
);
3187 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
3190 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
3194 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
3195 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
3201 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
3203 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_resume
) {
3207 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
3208 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
3214 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
3216 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
3220 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
3221 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
3227 /**************************************************************/
3228 /* handling of snapshots */
3230 int bdrv_can_snapshot(BlockDriverState
*bs
)
3232 BlockDriver
*drv
= bs
->drv
;
3233 if (!drv
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
3237 if (!drv
->bdrv_snapshot_create
) {
3238 if (bs
->file
!= NULL
) {
3239 return bdrv_can_snapshot(bs
->file
);
3247 int bdrv_is_snapshot(BlockDriverState
*bs
)
3249 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
3252 BlockDriverState
*bdrv_snapshots(void)
3254 BlockDriverState
*bs
;
3257 return bs_snapshots
;
3261 while ((bs
= bdrv_next(bs
))) {
3262 if (bdrv_can_snapshot(bs
)) {
3270 int bdrv_snapshot_create(BlockDriverState
*bs
,
3271 QEMUSnapshotInfo
*sn_info
)
3273 BlockDriver
*drv
= bs
->drv
;
3276 if (drv
->bdrv_snapshot_create
)
3277 return drv
->bdrv_snapshot_create(bs
, sn_info
);
3279 return bdrv_snapshot_create(bs
->file
, sn_info
);
3283 int bdrv_snapshot_goto(BlockDriverState
*bs
,
3284 const char *snapshot_id
)
3286 BlockDriver
*drv
= bs
->drv
;
3291 if (drv
->bdrv_snapshot_goto
)
3292 return drv
->bdrv_snapshot_goto(bs
, snapshot_id
);
3295 drv
->bdrv_close(bs
);
3296 ret
= bdrv_snapshot_goto(bs
->file
, snapshot_id
);
3297 open_ret
= drv
->bdrv_open(bs
, NULL
, bs
->open_flags
);
3299 bdrv_delete(bs
->file
);
3309 int bdrv_snapshot_delete(BlockDriverState
*bs
, const char *snapshot_id
)
3311 BlockDriver
*drv
= bs
->drv
;
3314 if (drv
->bdrv_snapshot_delete
)
3315 return drv
->bdrv_snapshot_delete(bs
, snapshot_id
);
3317 return bdrv_snapshot_delete(bs
->file
, snapshot_id
);
3321 int bdrv_snapshot_list(BlockDriverState
*bs
,
3322 QEMUSnapshotInfo
**psn_info
)
3324 BlockDriver
*drv
= bs
->drv
;
3327 if (drv
->bdrv_snapshot_list
)
3328 return drv
->bdrv_snapshot_list(bs
, psn_info
);
3330 return bdrv_snapshot_list(bs
->file
, psn_info
);
3334 int bdrv_snapshot_load_tmp(BlockDriverState
*bs
,
3335 const char *snapshot_name
)
3337 BlockDriver
*drv
= bs
->drv
;
3341 if (!bs
->read_only
) {
3344 if (drv
->bdrv_snapshot_load_tmp
) {
3345 return drv
->bdrv_snapshot_load_tmp(bs
, snapshot_name
);
3350 /* backing_file can either be relative, or absolute, or a protocol. If it is
3351 * relative, it must be relative to the chain. So, passing in bs->filename
3352 * from a BDS as backing_file should not be done, as that may be relative to
3353 * the CWD rather than the chain. */
3354 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
3355 const char *backing_file
)
3357 char *filename_full
= NULL
;
3358 char *backing_file_full
= NULL
;
3359 char *filename_tmp
= NULL
;
3360 int is_protocol
= 0;
3361 BlockDriverState
*curr_bs
= NULL
;
3362 BlockDriverState
*retval
= NULL
;
3364 if (!bs
|| !bs
->drv
|| !backing_file
) {
3368 filename_full
= g_malloc(PATH_MAX
);
3369 backing_file_full
= g_malloc(PATH_MAX
);
3370 filename_tmp
= g_malloc(PATH_MAX
);
3372 is_protocol
= path_has_protocol(backing_file
);
3374 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
3376 /* If either of the filename paths is actually a protocol, then
3377 * compare unmodified paths; otherwise make paths relative */
3378 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
3379 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
3380 retval
= curr_bs
->backing_hd
;
3384 /* If not an absolute filename path, make it relative to the current
3385 * image's filename path */
3386 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3389 /* We are going to compare absolute pathnames */
3390 if (!realpath(filename_tmp
, filename_full
)) {
3394 /* We need to make sure the backing filename we are comparing against
3395 * is relative to the current image filename (or absolute) */
3396 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
3397 curr_bs
->backing_file
);
3399 if (!realpath(filename_tmp
, backing_file_full
)) {
3403 if (strcmp(backing_file_full
, filename_full
) == 0) {
3404 retval
= curr_bs
->backing_hd
;
3410 g_free(filename_full
);
3411 g_free(backing_file_full
);
3412 g_free(filename_tmp
);
3416 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
3422 if (!bs
->backing_hd
) {
3426 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
3429 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
3431 BlockDriverState
*curr_bs
= NULL
;
3439 while (curr_bs
->backing_hd
) {
3440 curr_bs
= curr_bs
->backing_hd
;
3445 #define NB_SUFFIXES 4
3447 char *get_human_readable_size(char *buf
, int buf_size
, int64_t size
)
3449 static const char suffixes
[NB_SUFFIXES
] = "KMGT";
3454 snprintf(buf
, buf_size
, "%" PRId64
, size
);
3457 for(i
= 0; i
< NB_SUFFIXES
; i
++) {
3458 if (size
< (10 * base
)) {
3459 snprintf(buf
, buf_size
, "%0.1f%c",
3460 (double)size
/ base
,
3463 } else if (size
< (1000 * base
) || i
== (NB_SUFFIXES
- 1)) {
3464 snprintf(buf
, buf_size
, "%" PRId64
"%c",
3465 ((size
+ (base
>> 1)) / base
),
3475 char *bdrv_snapshot_dump(char *buf
, int buf_size
, QEMUSnapshotInfo
*sn
)
3477 char buf1
[128], date_buf
[128], clock_buf
[128];
3483 snprintf(buf
, buf_size
,
3484 "%-10s%-20s%7s%20s%15s",
3485 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
3488 localtime_r(&ti
, &tm
);
3489 strftime(date_buf
, sizeof(date_buf
),
3490 "%Y-%m-%d %H:%M:%S", &tm
);
3491 secs
= sn
->vm_clock_nsec
/ 1000000000;
3492 snprintf(clock_buf
, sizeof(clock_buf
),
3493 "%02d:%02d:%02d.%03d",
3495 (int)((secs
/ 60) % 60),
3497 (int)((sn
->vm_clock_nsec
/ 1000000) % 1000));
3498 snprintf(buf
, buf_size
,
3499 "%-10s%-20s%7s%20s%15s",
3500 sn
->id_str
, sn
->name
,
3501 get_human_readable_size(buf1
, sizeof(buf1
), sn
->vm_state_size
),
3508 /**************************************************************/
3511 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
3512 QEMUIOVector
*qiov
, int nb_sectors
,
3513 BlockDriverCompletionFunc
*cb
, void *opaque
)
3515 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
3517 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3521 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
3522 QEMUIOVector
*qiov
, int nb_sectors
,
3523 BlockDriverCompletionFunc
*cb
, void *opaque
)
3525 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
3527 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3532 typedef struct MultiwriteCB
{
3537 BlockDriverCompletionFunc
*cb
;
3539 QEMUIOVector
*free_qiov
;
3543 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
3547 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
3548 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
3549 if (mcb
->callbacks
[i
].free_qiov
) {
3550 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
3552 g_free(mcb
->callbacks
[i
].free_qiov
);
3556 static void multiwrite_cb(void *opaque
, int ret
)
3558 MultiwriteCB
*mcb
= opaque
;
3560 trace_multiwrite_cb(mcb
, ret
);
3562 if (ret
< 0 && !mcb
->error
) {
3566 mcb
->num_requests
--;
3567 if (mcb
->num_requests
== 0) {
3568 multiwrite_user_cb(mcb
);
3573 static int multiwrite_req_compare(const void *a
, const void *b
)
3575 const BlockRequest
*req1
= a
, *req2
= b
;
3578 * Note that we can't simply subtract req2->sector from req1->sector
3579 * here as that could overflow the return value.
3581 if (req1
->sector
> req2
->sector
) {
3583 } else if (req1
->sector
< req2
->sector
) {
3591 * Takes a bunch of requests and tries to merge them. Returns the number of
3592 * requests that remain after merging.
3594 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
3595 int num_reqs
, MultiwriteCB
*mcb
)
3599 // Sort requests by start sector
3600 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
3602 // Check if adjacent requests touch the same clusters. If so, combine them,
3603 // filling up gaps with zero sectors.
3605 for (i
= 1; i
< num_reqs
; i
++) {
3607 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
3609 // Handle exactly sequential writes and overlapping writes.
3610 if (reqs
[i
].sector
<= oldreq_last
) {
3614 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
3620 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
3621 qemu_iovec_init(qiov
,
3622 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
3624 // Add the first request to the merged one. If the requests are
3625 // overlapping, drop the last sectors of the first request.
3626 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
3627 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
3629 // We should need to add any zeros between the two requests
3630 assert (reqs
[i
].sector
<= oldreq_last
);
3632 // Add the second request
3633 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
3635 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
3636 reqs
[outidx
].qiov
= qiov
;
3638 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
3641 reqs
[outidx
].sector
= reqs
[i
].sector
;
3642 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
3643 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
3651 * Submit multiple AIO write requests at once.
3653 * On success, the function returns 0 and all requests in the reqs array have
3654 * been submitted. In error case this function returns -1, and any of the
3655 * requests may or may not be submitted yet. In particular, this means that the
3656 * callback will be called for some of the requests, for others it won't. The
3657 * caller must check the error field of the BlockRequest to wait for the right
3658 * callbacks (if error != 0, no callback will be called).
3660 * The implementation may modify the contents of the reqs array, e.g. to merge
3661 * requests. However, the fields opaque and error are left unmodified as they
3662 * are used to signal failure for a single request to the caller.
3664 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
3669 /* don't submit writes if we don't have a medium */
3670 if (bs
->drv
== NULL
) {
3671 for (i
= 0; i
< num_reqs
; i
++) {
3672 reqs
[i
].error
= -ENOMEDIUM
;
3677 if (num_reqs
== 0) {
3681 // Create MultiwriteCB structure
3682 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
3683 mcb
->num_requests
= 0;
3684 mcb
->num_callbacks
= num_reqs
;
3686 for (i
= 0; i
< num_reqs
; i
++) {
3687 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
3688 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
3691 // Check for mergable requests
3692 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
3694 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
3696 /* Run the aio requests. */
3697 mcb
->num_requests
= num_reqs
;
3698 for (i
= 0; i
< num_reqs
; i
++) {
3699 bdrv_aio_writev(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
3700 reqs
[i
].nb_sectors
, multiwrite_cb
, mcb
);
3706 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
3708 acb
->aiocb_info
->cancel(acb
);
3711 /* block I/O throttling */
3712 static bool bdrv_exceed_bps_limits(BlockDriverState
*bs
, int nb_sectors
,
3713 bool is_write
, double elapsed_time
, uint64_t *wait
)
3715 uint64_t bps_limit
= 0;
3716 double bytes_limit
, bytes_base
, bytes_res
;
3717 double slice_time
, wait_time
;
3719 if (bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
]) {
3720 bps_limit
= bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
];
3721 } else if (bs
->io_limits
.bps
[is_write
]) {
3722 bps_limit
= bs
->io_limits
.bps
[is_write
];
3731 slice_time
= bs
->slice_end
- bs
->slice_start
;
3732 slice_time
/= (NANOSECONDS_PER_SECOND
);
3733 bytes_limit
= bps_limit
* slice_time
;
3734 bytes_base
= bs
->nr_bytes
[is_write
] - bs
->io_base
.bytes
[is_write
];
3735 if (bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
]) {
3736 bytes_base
+= bs
->nr_bytes
[!is_write
] - bs
->io_base
.bytes
[!is_write
];
3739 /* bytes_base: the bytes of data which have been read/written; and
3740 * it is obtained from the history statistic info.
3741 * bytes_res: the remaining bytes of data which need to be read/written.
3742 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3743 * the total time for completing reading/writting all data.
3745 bytes_res
= (unsigned) nb_sectors
* BDRV_SECTOR_SIZE
;
3747 if (bytes_base
+ bytes_res
<= bytes_limit
) {
3755 /* Calc approx time to dispatch */
3756 wait_time
= (bytes_base
+ bytes_res
) / bps_limit
- elapsed_time
;
3758 /* When the I/O rate at runtime exceeds the limits,
3759 * bs->slice_end need to be extended in order that the current statistic
3760 * info can be kept until the timer fire, so it is increased and tuned
3761 * based on the result of experiment.
3763 bs
->slice_time
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3764 bs
->slice_end
+= bs
->slice_time
- 3 * BLOCK_IO_SLICE_TIME
;
3766 *wait
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3772 static bool bdrv_exceed_iops_limits(BlockDriverState
*bs
, bool is_write
,
3773 double elapsed_time
, uint64_t *wait
)
3775 uint64_t iops_limit
= 0;
3776 double ios_limit
, ios_base
;
3777 double slice_time
, wait_time
;
3779 if (bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
]) {
3780 iops_limit
= bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
];
3781 } else if (bs
->io_limits
.iops
[is_write
]) {
3782 iops_limit
= bs
->io_limits
.iops
[is_write
];
3791 slice_time
= bs
->slice_end
- bs
->slice_start
;
3792 slice_time
/= (NANOSECONDS_PER_SECOND
);
3793 ios_limit
= iops_limit
* slice_time
;
3794 ios_base
= bs
->nr_ops
[is_write
] - bs
->io_base
.ios
[is_write
];
3795 if (bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
]) {
3796 ios_base
+= bs
->nr_ops
[!is_write
] - bs
->io_base
.ios
[!is_write
];
3799 if (ios_base
+ 1 <= ios_limit
) {
3807 /* Calc approx time to dispatch */
3808 wait_time
= (ios_base
+ 1) / iops_limit
;
3809 if (wait_time
> elapsed_time
) {
3810 wait_time
= wait_time
- elapsed_time
;
3815 bs
->slice_time
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3816 bs
->slice_end
+= bs
->slice_time
- 3 * BLOCK_IO_SLICE_TIME
;
3818 *wait
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3824 static bool bdrv_exceed_io_limits(BlockDriverState
*bs
, int nb_sectors
,
3825 bool is_write
, int64_t *wait
)
3827 int64_t now
, max_wait
;
3828 uint64_t bps_wait
= 0, iops_wait
= 0;
3829 double elapsed_time
;
3830 int bps_ret
, iops_ret
;
3832 now
= qemu_get_clock_ns(vm_clock
);
3833 if ((bs
->slice_start
< now
)
3834 && (bs
->slice_end
> now
)) {
3835 bs
->slice_end
= now
+ bs
->slice_time
;
3837 bs
->slice_time
= 5 * BLOCK_IO_SLICE_TIME
;
3838 bs
->slice_start
= now
;
3839 bs
->slice_end
= now
+ bs
->slice_time
;
3841 bs
->io_base
.bytes
[is_write
] = bs
->nr_bytes
[is_write
];
3842 bs
->io_base
.bytes
[!is_write
] = bs
->nr_bytes
[!is_write
];
3844 bs
->io_base
.ios
[is_write
] = bs
->nr_ops
[is_write
];
3845 bs
->io_base
.ios
[!is_write
] = bs
->nr_ops
[!is_write
];
3848 elapsed_time
= now
- bs
->slice_start
;
3849 elapsed_time
/= (NANOSECONDS_PER_SECOND
);
3851 bps_ret
= bdrv_exceed_bps_limits(bs
, nb_sectors
,
3852 is_write
, elapsed_time
, &bps_wait
);
3853 iops_ret
= bdrv_exceed_iops_limits(bs
, is_write
,
3854 elapsed_time
, &iops_wait
);
3855 if (bps_ret
|| iops_ret
) {
3856 max_wait
= bps_wait
> iops_wait
? bps_wait
: iops_wait
;
3861 now
= qemu_get_clock_ns(vm_clock
);
3862 if (bs
->slice_end
< now
+ max_wait
) {
3863 bs
->slice_end
= now
+ max_wait
;
3876 /**************************************************************/
3877 /* async block device emulation */
3879 typedef struct BlockDriverAIOCBSync
{
3880 BlockDriverAIOCB common
;
3883 /* vector translation state */
3887 } BlockDriverAIOCBSync
;
3889 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
3891 BlockDriverAIOCBSync
*acb
=
3892 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
3893 qemu_bh_delete(acb
->bh
);
3895 qemu_aio_release(acb
);
3898 static const AIOCBInfo bdrv_em_aiocb_info
= {
3899 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
3900 .cancel
= bdrv_aio_cancel_em
,
3903 static void bdrv_aio_bh_cb(void *opaque
)
3905 BlockDriverAIOCBSync
*acb
= opaque
;
3908 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
3909 qemu_vfree(acb
->bounce
);
3910 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
3911 qemu_bh_delete(acb
->bh
);
3913 qemu_aio_release(acb
);
3916 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
3920 BlockDriverCompletionFunc
*cb
,
3925 BlockDriverAIOCBSync
*acb
;
3927 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
3928 acb
->is_write
= is_write
;
3930 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
3931 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
3934 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
3935 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3937 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3940 qemu_bh_schedule(acb
->bh
);
3942 return &acb
->common
;
3945 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
3946 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3947 BlockDriverCompletionFunc
*cb
, void *opaque
)
3949 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
3952 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
3953 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3954 BlockDriverCompletionFunc
*cb
, void *opaque
)
3956 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
3960 typedef struct BlockDriverAIOCBCoroutine
{
3961 BlockDriverAIOCB common
;
3966 } BlockDriverAIOCBCoroutine
;
3968 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
3970 BlockDriverAIOCBCoroutine
*acb
=
3971 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
3980 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
3981 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
3982 .cancel
= bdrv_aio_co_cancel_em
,
3985 static void bdrv_co_em_bh(void *opaque
)
3987 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3989 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
3995 qemu_bh_delete(acb
->bh
);
3996 qemu_aio_release(acb
);
3999 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4000 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
4002 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4003 BlockDriverState
*bs
= acb
->common
.bs
;
4005 if (!acb
->is_write
) {
4006 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
4007 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
4009 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
4010 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
4013 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4014 qemu_bh_schedule(acb
->bh
);
4017 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
4021 BlockDriverCompletionFunc
*cb
,
4026 BlockDriverAIOCBCoroutine
*acb
;
4028 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4029 acb
->req
.sector
= sector_num
;
4030 acb
->req
.nb_sectors
= nb_sectors
;
4031 acb
->req
.qiov
= qiov
;
4032 acb
->is_write
= is_write
;
4035 co
= qemu_coroutine_create(bdrv_co_do_rw
);
4036 qemu_coroutine_enter(co
, acb
);
4038 return &acb
->common
;
4041 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
4043 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4044 BlockDriverState
*bs
= acb
->common
.bs
;
4046 acb
->req
.error
= bdrv_co_flush(bs
);
4047 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4048 qemu_bh_schedule(acb
->bh
);
4051 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
4052 BlockDriverCompletionFunc
*cb
, void *opaque
)
4054 trace_bdrv_aio_flush(bs
, opaque
);
4057 BlockDriverAIOCBCoroutine
*acb
;
4059 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4062 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4063 qemu_coroutine_enter(co
, acb
);
4065 return &acb
->common
;
4068 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4070 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4071 BlockDriverState
*bs
= acb
->common
.bs
;
4073 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4074 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4075 qemu_bh_schedule(acb
->bh
);
4078 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4079 int64_t sector_num
, int nb_sectors
,
4080 BlockDriverCompletionFunc
*cb
, void *opaque
)
4083 BlockDriverAIOCBCoroutine
*acb
;
4085 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4087 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4088 acb
->req
.sector
= sector_num
;
4089 acb
->req
.nb_sectors
= nb_sectors
;
4091 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4092 qemu_coroutine_enter(co
, acb
);
4094 return &acb
->common
;
4097 void bdrv_init(void)
4099 module_call_init(MODULE_INIT_BLOCK
);
4102 void bdrv_init_with_whitelist(void)
4104 use_bdrv_whitelist
= 1;
4108 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4109 BlockDriverCompletionFunc
*cb
, void *opaque
)
4111 BlockDriverAIOCB
*acb
;
4113 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4114 acb
->aiocb_info
= aiocb_info
;
4117 acb
->opaque
= opaque
;
4121 void qemu_aio_release(void *p
)
4123 BlockDriverAIOCB
*acb
= p
;
4124 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4127 /**************************************************************/
4128 /* Coroutine block device emulation */
4130 typedef struct CoroutineIOCompletion
{
4131 Coroutine
*coroutine
;
4133 } CoroutineIOCompletion
;
4135 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4137 CoroutineIOCompletion
*co
= opaque
;
4140 qemu_coroutine_enter(co
->coroutine
, NULL
);
4143 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4144 int nb_sectors
, QEMUIOVector
*iov
,
4147 CoroutineIOCompletion co
= {
4148 .coroutine
= qemu_coroutine_self(),
4150 BlockDriverAIOCB
*acb
;
4153 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4154 bdrv_co_io_em_complete
, &co
);
4156 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4157 bdrv_co_io_em_complete
, &co
);
4160 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4164 qemu_coroutine_yield();
4169 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4170 int64_t sector_num
, int nb_sectors
,
4173 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4176 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4177 int64_t sector_num
, int nb_sectors
,
4180 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4183 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4185 RwCo
*rwco
= opaque
;
4187 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4190 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4194 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4198 /* Write back cached data to the OS even with cache=unsafe */
4199 if (bs
->drv
->bdrv_co_flush_to_os
) {
4200 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4206 /* But don't actually force it to the disk with cache=unsafe */
4207 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4211 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4212 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4213 } else if (bs
->drv
->bdrv_aio_flush
) {
4214 BlockDriverAIOCB
*acb
;
4215 CoroutineIOCompletion co
= {
4216 .coroutine
= qemu_coroutine_self(),
4219 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4223 qemu_coroutine_yield();
4228 * Some block drivers always operate in either writethrough or unsafe
4229 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4230 * know how the server works (because the behaviour is hardcoded or
4231 * depends on server-side configuration), so we can't ensure that
4232 * everything is safe on disk. Returning an error doesn't work because
4233 * that would break guests even if the server operates in writethrough
4236 * Let's hope the user knows what he's doing.
4244 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4245 * in the case of cache=unsafe, so there are no useless flushes.
4248 return bdrv_co_flush(bs
->file
);
4251 void bdrv_invalidate_cache(BlockDriverState
*bs
)
4253 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
4254 bs
->drv
->bdrv_invalidate_cache(bs
);
4258 void bdrv_invalidate_cache_all(void)
4260 BlockDriverState
*bs
;
4262 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4263 bdrv_invalidate_cache(bs
);
4267 void bdrv_clear_incoming_migration_all(void)
4269 BlockDriverState
*bs
;
4271 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
4272 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4276 int bdrv_flush(BlockDriverState
*bs
)
4284 if (qemu_in_coroutine()) {
4285 /* Fast-path if already in coroutine context */
4286 bdrv_flush_co_entry(&rwco
);
4288 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4289 qemu_coroutine_enter(co
, &rwco
);
4290 while (rwco
.ret
== NOT_DONE
) {
4298 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4300 RwCo
*rwco
= opaque
;
4302 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
4305 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
4310 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
4312 } else if (bs
->read_only
) {
4316 if (bs
->dirty_bitmap
) {
4317 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
4320 /* Do nothing if disabled. */
4321 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
4325 if (bs
->drv
->bdrv_co_discard
) {
4326 return bs
->drv
->bdrv_co_discard(bs
, sector_num
, nb_sectors
);
4327 } else if (bs
->drv
->bdrv_aio_discard
) {
4328 BlockDriverAIOCB
*acb
;
4329 CoroutineIOCompletion co
= {
4330 .coroutine
= qemu_coroutine_self(),
4333 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
4334 bdrv_co_io_em_complete
, &co
);
4338 qemu_coroutine_yield();
4346 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
4351 .sector_num
= sector_num
,
4352 .nb_sectors
= nb_sectors
,
4356 if (qemu_in_coroutine()) {
4357 /* Fast-path if already in coroutine context */
4358 bdrv_discard_co_entry(&rwco
);
4360 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
4361 qemu_coroutine_enter(co
, &rwco
);
4362 while (rwco
.ret
== NOT_DONE
) {
4370 /**************************************************************/
4371 /* removable device support */
4374 * Return TRUE if the media is present
4376 int bdrv_is_inserted(BlockDriverState
*bs
)
4378 BlockDriver
*drv
= bs
->drv
;
4382 if (!drv
->bdrv_is_inserted
)
4384 return drv
->bdrv_is_inserted(bs
);
4388 * Return whether the media changed since the last call to this
4389 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4391 int bdrv_media_changed(BlockDriverState
*bs
)
4393 BlockDriver
*drv
= bs
->drv
;
4395 if (drv
&& drv
->bdrv_media_changed
) {
4396 return drv
->bdrv_media_changed(bs
);
4402 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4404 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
4406 BlockDriver
*drv
= bs
->drv
;
4408 if (drv
&& drv
->bdrv_eject
) {
4409 drv
->bdrv_eject(bs
, eject_flag
);
4412 if (bs
->device_name
[0] != '\0') {
4413 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
4418 * Lock or unlock the media (if it is locked, the user won't be able
4419 * to eject it manually).
4421 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
4423 BlockDriver
*drv
= bs
->drv
;
4425 trace_bdrv_lock_medium(bs
, locked
);
4427 if (drv
&& drv
->bdrv_lock_medium
) {
4428 drv
->bdrv_lock_medium(bs
, locked
);
4432 /* needed for generic scsi interface */
4434 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
4436 BlockDriver
*drv
= bs
->drv
;
4438 if (drv
&& drv
->bdrv_ioctl
)
4439 return drv
->bdrv_ioctl(bs
, req
, buf
);
4443 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
4444 unsigned long int req
, void *buf
,
4445 BlockDriverCompletionFunc
*cb
, void *opaque
)
4447 BlockDriver
*drv
= bs
->drv
;
4449 if (drv
&& drv
->bdrv_aio_ioctl
)
4450 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
4454 void bdrv_set_buffer_alignment(BlockDriverState
*bs
, int align
)
4456 bs
->buffer_alignment
= align
;
4459 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
4461 return qemu_memalign((bs
&& bs
->buffer_alignment
) ? bs
->buffer_alignment
: 512, size
);
4465 * Check if all memory in this vector is sector aligned.
4467 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
4471 for (i
= 0; i
< qiov
->niov
; i
++) {
4472 if ((uintptr_t) qiov
->iov
[i
].iov_base
% bs
->buffer_alignment
) {
4480 void bdrv_set_dirty_tracking(BlockDriverState
*bs
, int granularity
)
4482 int64_t bitmap_size
;
4484 assert((granularity
& (granularity
- 1)) == 0);
4487 granularity
>>= BDRV_SECTOR_BITS
;
4488 assert(!bs
->dirty_bitmap
);
4489 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
);
4490 bs
->dirty_bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
4492 if (bs
->dirty_bitmap
) {
4493 hbitmap_free(bs
->dirty_bitmap
);
4494 bs
->dirty_bitmap
= NULL
;
4499 int bdrv_get_dirty(BlockDriverState
*bs
, int64_t sector
)
4501 if (bs
->dirty_bitmap
) {
4502 return hbitmap_get(bs
->dirty_bitmap
, sector
);
4508 void bdrv_dirty_iter_init(BlockDriverState
*bs
, HBitmapIter
*hbi
)
4510 hbitmap_iter_init(hbi
, bs
->dirty_bitmap
, 0);
4513 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4516 hbitmap_set(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4519 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4522 hbitmap_reset(bs
->dirty_bitmap
, cur_sector
, nr_sectors
);
4525 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
)
4527 if (bs
->dirty_bitmap
) {
4528 return hbitmap_count(bs
->dirty_bitmap
);
4534 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
4536 assert(bs
->in_use
!= in_use
);
4537 bs
->in_use
= in_use
;
4540 int bdrv_in_use(BlockDriverState
*bs
)
4545 void bdrv_iostatus_enable(BlockDriverState
*bs
)
4547 bs
->iostatus_enabled
= true;
4548 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4551 /* The I/O status is only enabled if the drive explicitly
4552 * enables it _and_ the VM is configured to stop on errors */
4553 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
4555 return (bs
->iostatus_enabled
&&
4556 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
4557 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
4558 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
4561 void bdrv_iostatus_disable(BlockDriverState
*bs
)
4563 bs
->iostatus_enabled
= false;
4566 void bdrv_iostatus_reset(BlockDriverState
*bs
)
4568 if (bdrv_iostatus_is_enabled(bs
)) {
4569 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4571 block_job_iostatus_reset(bs
->job
);
4576 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
4578 assert(bdrv_iostatus_is_enabled(bs
));
4579 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
4580 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
4581 BLOCK_DEVICE_IO_STATUS_FAILED
;
4586 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
4587 enum BlockAcctType type
)
4589 assert(type
< BDRV_MAX_IOTYPE
);
4591 cookie
->bytes
= bytes
;
4592 cookie
->start_time_ns
= get_clock();
4593 cookie
->type
= type
;
4597 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
4599 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
4601 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
4602 bs
->nr_ops
[cookie
->type
]++;
4603 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
4606 void bdrv_img_create(const char *filename
, const char *fmt
,
4607 const char *base_filename
, const char *base_fmt
,
4608 char *options
, uint64_t img_size
, int flags
,
4609 Error
**errp
, bool quiet
)
4611 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
4612 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
4613 BlockDriverState
*bs
= NULL
;
4614 BlockDriver
*drv
, *proto_drv
;
4615 BlockDriver
*backing_drv
= NULL
;
4618 /* Find driver and parse its options */
4619 drv
= bdrv_find_format(fmt
);
4621 error_setg(errp
, "Unknown file format '%s'", fmt
);
4625 proto_drv
= bdrv_find_protocol(filename
);
4627 error_setg(errp
, "Unknown protocol '%s'", filename
);
4631 create_options
= append_option_parameters(create_options
,
4632 drv
->create_options
);
4633 create_options
= append_option_parameters(create_options
,
4634 proto_drv
->create_options
);
4636 /* Create parameter list with default values */
4637 param
= parse_option_parameters("", create_options
, param
);
4639 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
4641 /* Parse -o options */
4643 param
= parse_option_parameters(options
, create_options
, param
);
4644 if (param
== NULL
) {
4645 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
4650 if (base_filename
) {
4651 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
4653 error_setg(errp
, "Backing file not supported for file format '%s'",
4660 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
4661 error_setg(errp
, "Backing file format not supported for file "
4662 "format '%s'", fmt
);
4667 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
4668 if (backing_file
&& backing_file
->value
.s
) {
4669 if (!strcmp(filename
, backing_file
->value
.s
)) {
4670 error_setg(errp
, "Error: Trying to create an image with the "
4671 "same filename as the backing file");
4676 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
4677 if (backing_fmt
&& backing_fmt
->value
.s
) {
4678 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
4680 error_setg(errp
, "Unknown backing file format '%s'",
4681 backing_fmt
->value
.s
);
4686 // The size for the image must always be specified, with one exception:
4687 // If we are using a backing file, we can obtain the size from there
4688 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
4689 if (size
&& size
->value
.n
== -1) {
4690 if (backing_file
&& backing_file
->value
.s
) {
4695 /* backing files always opened read-only */
4697 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
4701 ret
= bdrv_open(bs
, backing_file
->value
.s
, NULL
, back_flags
,
4704 error_setg_errno(errp
, -ret
, "Could not open '%s'",
4705 backing_file
->value
.s
);
4708 bdrv_get_geometry(bs
, &size
);
4711 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
4712 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
4714 error_setg(errp
, "Image creation needs a size parameter");
4720 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
4721 print_option_parameters(param
);
4724 ret
= bdrv_create(drv
, filename
, param
);
4726 if (ret
== -ENOTSUP
) {
4727 error_setg(errp
,"Formatting or formatting option not supported for "
4728 "file format '%s'", fmt
);
4729 } else if (ret
== -EFBIG
) {
4730 error_setg(errp
, "The image size is too large for file format '%s'",
4733 error_setg(errp
, "%s: error while creating %s: %s", filename
, fmt
,
4739 free_option_parameters(create_options
);
4740 free_option_parameters(param
);
4747 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
4749 /* Currently BlockDriverState always uses the main loop AioContext */
4750 return qemu_get_aio_context();