2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
40 #include <sys/types.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
53 struct BdrvDirtyBitmap
{
55 QLIST_ENTRY(BdrvDirtyBitmap
) list
;
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
61 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
62 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
63 BlockDriverCompletionFunc
*cb
, void *opaque
);
64 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
65 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
66 BlockDriverCompletionFunc
*cb
, void *opaque
);
67 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
68 int64_t sector_num
, int nb_sectors
,
70 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
71 int64_t sector_num
, int nb_sectors
,
73 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
74 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
75 BdrvRequestFlags flags
);
76 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
77 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
78 BdrvRequestFlags flags
);
79 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
83 BdrvRequestFlags flags
,
84 BlockDriverCompletionFunc
*cb
,
87 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
88 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
89 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
);
91 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
92 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
94 static QTAILQ_HEAD(, BlockDriverState
) graph_bdrv_states
=
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states
);
97 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
98 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist
;
104 static int is_windows_drive_prefix(const char *filename
)
106 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
107 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
111 int is_windows_drive(const char *filename
)
113 if (is_windows_drive_prefix(filename
) &&
116 if (strstart(filename
, "\\\\.\\", NULL
) ||
117 strstart(filename
, "//./", NULL
))
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState
*bs
,
129 throttle_config(&bs
->throttle_state
, cfg
);
131 for (i
= 0; i
< 2; i
++) {
132 qemu_co_enter_next(&bs
->throttled_reqs
[i
]);
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState
*bs
)
139 bool drained
= false;
140 bool enabled
= bs
->io_limits_enabled
;
143 bs
->io_limits_enabled
= false;
145 for (i
= 0; i
< 2; i
++) {
146 while (qemu_co_enter_next(&bs
->throttled_reqs
[i
])) {
151 bs
->io_limits_enabled
= enabled
;
156 void bdrv_io_limits_disable(BlockDriverState
*bs
)
158 bs
->io_limits_enabled
= false;
160 bdrv_start_throttled_reqs(bs
);
162 throttle_destroy(&bs
->throttle_state
);
165 static void bdrv_throttle_read_timer_cb(void *opaque
)
167 BlockDriverState
*bs
= opaque
;
168 qemu_co_enter_next(&bs
->throttled_reqs
[0]);
171 static void bdrv_throttle_write_timer_cb(void *opaque
)
173 BlockDriverState
*bs
= opaque
;
174 qemu_co_enter_next(&bs
->throttled_reqs
[1]);
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState
*bs
)
180 assert(!bs
->io_limits_enabled
);
181 throttle_init(&bs
->throttle_state
,
183 bdrv_throttle_read_timer_cb
,
184 bdrv_throttle_write_timer_cb
,
186 bs
->io_limits_enabled
= true;
189 /* This function makes an IO wait if needed
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
194 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
198 /* does this io must wait */
199 bool must_wait
= throttle_schedule_timer(&bs
->throttle_state
, is_write
);
201 /* if must wait or any request of this type throttled queue the IO */
203 !qemu_co_queue_empty(&bs
->throttled_reqs
[is_write
])) {
204 qemu_co_queue_wait(&bs
->throttled_reqs
[is_write
]);
207 /* the IO will be executed, do the accounting */
208 throttle_account(&bs
->throttle_state
, is_write
, bytes
);
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs
->throttle_state
, is_write
)) {
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs
->throttled_reqs
[is_write
]);
220 size_t bdrv_opt_mem_align(BlockDriverState
*bs
)
222 if (!bs
|| !bs
->drv
) {
223 /* 4k should be on the safe side */
227 return bs
->bl
.opt_mem_alignment
;
230 /* check if the path starts with "<protocol>:" */
231 static int path_has_protocol(const char *path
)
236 if (is_windows_drive(path
) ||
237 is_windows_drive_prefix(path
)) {
240 p
= path
+ strcspn(path
, ":/\\");
242 p
= path
+ strcspn(path
, ":/");
248 int path_is_absolute(const char *path
)
251 /* specific case for names like: "\\.\d:" */
252 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
255 return (*path
== '/' || *path
== '\\');
257 return (*path
== '/');
261 /* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
264 void path_combine(char *dest
, int dest_size
,
265 const char *base_path
,
266 const char *filename
)
273 if (path_is_absolute(filename
)) {
274 pstrcpy(dest
, dest_size
, filename
);
276 p
= strchr(base_path
, ':');
281 p1
= strrchr(base_path
, '/');
285 p2
= strrchr(base_path
, '\\');
297 if (len
> dest_size
- 1)
299 memcpy(dest
, base_path
, len
);
301 pstrcat(dest
, dest_size
, filename
);
305 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
307 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
308 pstrcpy(dest
, sz
, bs
->backing_file
);
310 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
314 void bdrv_register(BlockDriver
*bdrv
)
316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv
->bdrv_co_readv
) {
318 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
319 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
324 if (!bdrv
->bdrv_aio_readv
) {
325 /* add AIO emulation layer */
326 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
327 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
331 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
334 /* create a new block device (by default it is empty) */
335 BlockDriverState
*bdrv_new(const char *device_name
)
337 BlockDriverState
*bs
;
339 bs
= g_malloc0(sizeof(BlockDriverState
));
340 QLIST_INIT(&bs
->dirty_bitmaps
);
341 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
342 if (device_name
[0] != '\0') {
343 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, device_list
);
345 bdrv_iostatus_disable(bs
);
346 notifier_list_init(&bs
->close_notifiers
);
347 notifier_with_return_list_init(&bs
->before_write_notifiers
);
348 qemu_co_queue_init(&bs
->throttled_reqs
[0]);
349 qemu_co_queue_init(&bs
->throttled_reqs
[1]);
355 void bdrv_add_close_notifier(BlockDriverState
*bs
, Notifier
*notify
)
357 notifier_list_add(&bs
->close_notifiers
, notify
);
360 BlockDriver
*bdrv_find_format(const char *format_name
)
363 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
364 if (!strcmp(drv1
->format_name
, format_name
)) {
371 static int bdrv_is_whitelisted(BlockDriver
*drv
, bool read_only
)
373 static const char *whitelist_rw
[] = {
374 CONFIG_BDRV_RW_WHITELIST
376 static const char *whitelist_ro
[] = {
377 CONFIG_BDRV_RO_WHITELIST
381 if (!whitelist_rw
[0] && !whitelist_ro
[0]) {
382 return 1; /* no whitelist, anything goes */
385 for (p
= whitelist_rw
; *p
; p
++) {
386 if (!strcmp(drv
->format_name
, *p
)) {
391 for (p
= whitelist_ro
; *p
; p
++) {
392 if (!strcmp(drv
->format_name
, *p
)) {
400 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
,
403 BlockDriver
*drv
= bdrv_find_format(format_name
);
404 return drv
&& bdrv_is_whitelisted(drv
, read_only
) ? drv
: NULL
;
407 typedef struct CreateCo
{
410 QEMUOptionParameter
*options
;
415 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
417 Error
*local_err
= NULL
;
420 CreateCo
*cco
= opaque
;
423 ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
, &local_err
);
425 error_propagate(&cco
->err
, local_err
);
430 int bdrv_create(BlockDriver
*drv
, const char* filename
,
431 QEMUOptionParameter
*options
, Error
**errp
)
438 .filename
= g_strdup(filename
),
444 if (!drv
->bdrv_create
) {
445 error_setg(errp
, "Driver '%s' does not support image creation", drv
->format_name
);
450 if (qemu_in_coroutine()) {
451 /* Fast-path if already in coroutine context */
452 bdrv_create_co_entry(&cco
);
454 co
= qemu_coroutine_create(bdrv_create_co_entry
);
455 qemu_coroutine_enter(co
, &cco
);
456 while (cco
.ret
== NOT_DONE
) {
464 error_propagate(errp
, cco
.err
);
466 error_setg_errno(errp
, -ret
, "Could not create image");
471 g_free(cco
.filename
);
475 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
,
479 Error
*local_err
= NULL
;
482 drv
= bdrv_find_protocol(filename
, true);
484 error_setg(errp
, "Could not find protocol for file '%s'", filename
);
488 ret
= bdrv_create(drv
, filename
, options
, &local_err
);
490 error_propagate(errp
, local_err
);
495 int bdrv_refresh_limits(BlockDriverState
*bs
)
497 BlockDriver
*drv
= bs
->drv
;
499 memset(&bs
->bl
, 0, sizeof(bs
->bl
));
505 /* Take some limits from the children as a default */
507 bdrv_refresh_limits(bs
->file
);
508 bs
->bl
.opt_transfer_length
= bs
->file
->bl
.opt_transfer_length
;
509 bs
->bl
.opt_mem_alignment
= bs
->file
->bl
.opt_mem_alignment
;
511 bs
->bl
.opt_mem_alignment
= 512;
514 if (bs
->backing_hd
) {
515 bdrv_refresh_limits(bs
->backing_hd
);
516 bs
->bl
.opt_transfer_length
=
517 MAX(bs
->bl
.opt_transfer_length
,
518 bs
->backing_hd
->bl
.opt_transfer_length
);
519 bs
->bl
.opt_mem_alignment
=
520 MAX(bs
->bl
.opt_mem_alignment
,
521 bs
->backing_hd
->bl
.opt_mem_alignment
);
524 /* Then let the driver override it */
525 if (drv
->bdrv_refresh_limits
) {
526 return drv
->bdrv_refresh_limits(bs
);
533 * Create a uniquely-named empty temporary file.
534 * Return 0 upon success, otherwise a negative errno value.
536 int get_tmp_filename(char *filename
, int size
)
539 char temp_dir
[MAX_PATH
];
540 /* GetTempFileName requires that its output buffer (4th param)
541 have length MAX_PATH or greater. */
542 assert(size
>= MAX_PATH
);
543 return (GetTempPath(MAX_PATH
, temp_dir
)
544 && GetTempFileName(temp_dir
, "qem", 0, filename
)
545 ? 0 : -GetLastError());
549 tmpdir
= getenv("TMPDIR");
552 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
555 fd
= mkstemp(filename
);
559 if (close(fd
) != 0) {
568 * Detect host devices. By convention, /dev/cdrom[N] is always
569 * recognized as a host CDROM.
571 static BlockDriver
*find_hdev_driver(const char *filename
)
573 int score_max
= 0, score
;
574 BlockDriver
*drv
= NULL
, *d
;
576 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
577 if (d
->bdrv_probe_device
) {
578 score
= d
->bdrv_probe_device(filename
);
579 if (score
> score_max
) {
589 BlockDriver
*bdrv_find_protocol(const char *filename
,
590 bool allow_protocol_prefix
)
597 /* TODO Drivers without bdrv_file_open must be specified explicitly */
600 * XXX(hch): we really should not let host device detection
601 * override an explicit protocol specification, but moving this
602 * later breaks access to device names with colons in them.
603 * Thanks to the brain-dead persistent naming schemes on udev-
604 * based Linux systems those actually are quite common.
606 drv1
= find_hdev_driver(filename
);
611 if (!path_has_protocol(filename
) || !allow_protocol_prefix
) {
612 return bdrv_find_format("file");
615 p
= strchr(filename
, ':');
618 if (len
> sizeof(protocol
) - 1)
619 len
= sizeof(protocol
) - 1;
620 memcpy(protocol
, filename
, len
);
621 protocol
[len
] = '\0';
622 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
623 if (drv1
->protocol_name
&&
624 !strcmp(drv1
->protocol_name
, protocol
)) {
631 static int find_image_format(BlockDriverState
*bs
, const char *filename
,
632 BlockDriver
**pdrv
, Error
**errp
)
634 int score
, score_max
;
635 BlockDriver
*drv1
, *drv
;
639 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
640 if (bs
->sg
|| !bdrv_is_inserted(bs
) || bdrv_getlength(bs
) == 0) {
641 drv
= bdrv_find_format("raw");
643 error_setg(errp
, "Could not find raw image format");
650 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
652 error_setg_errno(errp
, -ret
, "Could not read image for determining its "
660 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
661 if (drv1
->bdrv_probe
) {
662 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
663 if (score
> score_max
) {
670 error_setg(errp
, "Could not determine image format: No compatible "
679 * Set the current 'total_sectors' value
681 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
683 BlockDriver
*drv
= bs
->drv
;
685 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
689 /* query actual device if possible, otherwise just trust the hint */
690 if (drv
->bdrv_getlength
) {
691 int64_t length
= drv
->bdrv_getlength(bs
);
695 hint
= DIV_ROUND_UP(length
, BDRV_SECTOR_SIZE
);
698 bs
->total_sectors
= hint
;
703 * Set open flags for a given discard mode
705 * Return 0 on success, -1 if the discard mode was invalid.
707 int bdrv_parse_discard_flags(const char *mode
, int *flags
)
709 *flags
&= ~BDRV_O_UNMAP
;
711 if (!strcmp(mode
, "off") || !strcmp(mode
, "ignore")) {
713 } else if (!strcmp(mode
, "on") || !strcmp(mode
, "unmap")) {
714 *flags
|= BDRV_O_UNMAP
;
723 * Set open flags for a given cache mode
725 * Return 0 on success, -1 if the cache mode was invalid.
727 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
729 *flags
&= ~BDRV_O_CACHE_MASK
;
731 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
732 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
733 } else if (!strcmp(mode
, "directsync")) {
734 *flags
|= BDRV_O_NOCACHE
;
735 } else if (!strcmp(mode
, "writeback")) {
736 *flags
|= BDRV_O_CACHE_WB
;
737 } else if (!strcmp(mode
, "unsafe")) {
738 *flags
|= BDRV_O_CACHE_WB
;
739 *flags
|= BDRV_O_NO_FLUSH
;
740 } else if (!strcmp(mode
, "writethrough")) {
741 /* this is the default */
750 * The copy-on-read flag is actually a reference count so multiple users may
751 * use the feature without worrying about clobbering its previous state.
752 * Copy-on-read stays enabled until all users have called to disable it.
754 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
759 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
761 assert(bs
->copy_on_read
> 0);
765 static int bdrv_open_flags(BlockDriverState
*bs
, int flags
)
767 int open_flags
= flags
| BDRV_O_CACHE_WB
;
770 * Clear flags that are internal to the block layer before opening the
773 open_flags
&= ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
776 * Snapshots should be writable.
778 if (bs
->is_temporary
) {
779 open_flags
|= BDRV_O_RDWR
;
785 static int bdrv_assign_node_name(BlockDriverState
*bs
,
786 const char *node_name
,
793 /* empty string node name is invalid */
794 if (node_name
[0] == '\0') {
795 error_setg(errp
, "Empty node name");
799 /* takes care of avoiding namespaces collisions */
800 if (bdrv_find(node_name
)) {
801 error_setg(errp
, "node-name=%s is conflicting with a device id",
806 /* takes care of avoiding duplicates node names */
807 if (bdrv_find_node(node_name
)) {
808 error_setg(errp
, "Duplicate node name");
812 /* copy node name into the bs and insert it into the graph list */
813 pstrcpy(bs
->node_name
, sizeof(bs
->node_name
), node_name
);
814 QTAILQ_INSERT_TAIL(&graph_bdrv_states
, bs
, node_list
);
820 * Common part for opening disk images and files
822 * Removes all processed options from *options.
824 static int bdrv_open_common(BlockDriverState
*bs
, BlockDriverState
*file
,
825 QDict
*options
, int flags
, BlockDriver
*drv
, Error
**errp
)
828 const char *filename
;
829 const char *node_name
= NULL
;
830 Error
*local_err
= NULL
;
833 assert(bs
->file
== NULL
);
834 assert(options
!= NULL
&& bs
->options
!= options
);
837 filename
= file
->filename
;
839 filename
= qdict_get_try_str(options
, "filename");
842 if (drv
->bdrv_needs_filename
&& !filename
) {
843 error_setg(errp
, "The '%s' block driver requires a file name",
848 trace_bdrv_open_common(bs
, filename
?: "", flags
, drv
->format_name
);
850 node_name
= qdict_get_try_str(options
, "node-name");
851 ret
= bdrv_assign_node_name(bs
, node_name
, errp
);
855 qdict_del(options
, "node-name");
857 /* bdrv_open() with directly using a protocol as drv. This layer is already
858 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
859 * and return immediately. */
860 if (file
!= NULL
&& drv
->bdrv_file_open
) {
865 bs
->open_flags
= flags
;
866 bs
->guest_block_size
= 512;
867 bs
->request_alignment
= 512;
868 bs
->zero_beyond_eof
= true;
869 open_flags
= bdrv_open_flags(bs
, flags
);
870 bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
872 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
, bs
->read_only
)) {
874 !bs
->read_only
&& bdrv_is_whitelisted(drv
, true)
875 ? "Driver '%s' can only be used for read-only devices"
876 : "Driver '%s' is not whitelisted",
881 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
882 if (flags
& BDRV_O_COPY_ON_READ
) {
883 if (!bs
->read_only
) {
884 bdrv_enable_copy_on_read(bs
);
886 error_setg(errp
, "Can't use copy-on-read on read-only device");
891 if (filename
!= NULL
) {
892 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
894 bs
->filename
[0] = '\0';
898 bs
->opaque
= g_malloc0(drv
->instance_size
);
900 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
902 /* Open the image, either directly or using a protocol */
903 if (drv
->bdrv_file_open
) {
904 assert(file
== NULL
);
905 assert(!drv
->bdrv_needs_filename
|| filename
!= NULL
);
906 ret
= drv
->bdrv_file_open(bs
, options
, open_flags
, &local_err
);
909 error_setg(errp
, "Can't use '%s' as a block driver for the "
910 "protocol level", drv
->format_name
);
915 ret
= drv
->bdrv_open(bs
, options
, open_flags
, &local_err
);
920 error_propagate(errp
, local_err
);
921 } else if (bs
->filename
[0]) {
922 error_setg_errno(errp
, -ret
, "Could not open '%s'", bs
->filename
);
924 error_setg_errno(errp
, -ret
, "Could not open image");
929 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
931 error_setg_errno(errp
, -ret
, "Could not refresh total sector count");
935 bdrv_refresh_limits(bs
);
936 assert(bdrv_opt_mem_align(bs
) != 0);
937 assert(bs
->request_alignment
!= 0);
940 if (bs
->is_temporary
) {
941 assert(bs
->filename
[0] != '\0');
942 unlink(bs
->filename
);
956 * Opens a file using a protocol (file, host_device, nbd, ...)
958 * options is a QDict of options to pass to the block drivers, or NULL for an
959 * empty set of options. The reference to the QDict belongs to the block layer
960 * after the call (even on failure), so if the caller intends to reuse the
961 * dictionary, it needs to use QINCREF() before calling bdrv_file_open.
963 static int bdrv_file_open(BlockDriverState
*bs
, const char *filename
,
964 QDict
*options
, int flags
, Error
**errp
)
968 bool allow_protocol_prefix
= false;
969 Error
*local_err
= NULL
;
972 /* NULL means an empty set of options */
973 if (options
== NULL
) {
974 options
= qdict_new();
977 bs
->options
= options
;
978 options
= qdict_clone_shallow(options
);
980 /* Fetch the file name from the options QDict if necessary */
982 filename
= qdict_get_try_str(options
, "filename");
983 } else if (filename
&& !qdict_haskey(options
, "filename")) {
984 qdict_put(options
, "filename", qstring_from_str(filename
));
985 allow_protocol_prefix
= true;
987 error_setg(errp
, "Can't specify 'file' and 'filename' options at the "
993 /* Find the right block driver */
994 drvname
= qdict_get_try_str(options
, "driver");
996 drv
= bdrv_find_format(drvname
);
998 error_setg(errp
, "Unknown driver '%s'", drvname
);
1000 qdict_del(options
, "driver");
1001 } else if (filename
) {
1002 drv
= bdrv_find_protocol(filename
, allow_protocol_prefix
);
1004 error_setg(errp
, "Unknown protocol");
1007 error_setg(errp
, "Must specify either driver or file");
1012 /* errp has been set already */
1017 /* Parse the filename and open it */
1018 if (drv
->bdrv_parse_filename
&& filename
) {
1019 drv
->bdrv_parse_filename(filename
, options
, &local_err
);
1021 error_propagate(errp
, local_err
);
1025 qdict_del(options
, "filename");
1028 if (!drv
->bdrv_file_open
) {
1029 ret
= bdrv_open(&bs
, filename
, NULL
, options
, flags
, drv
, &local_err
);
1032 ret
= bdrv_open_common(bs
, NULL
, options
, flags
, drv
, &local_err
);
1035 error_propagate(errp
, local_err
);
1039 /* Check if any unknown options were used */
1040 if (options
&& (qdict_size(options
) != 0)) {
1041 const QDictEntry
*entry
= qdict_first(options
);
1042 error_setg(errp
, "Block protocol '%s' doesn't support the option '%s'",
1043 drv
->format_name
, entry
->key
);
1055 QDECREF(bs
->options
);
1061 * Opens the backing file for a BlockDriverState if not yet open
1063 * options is a QDict of options to pass to the block drivers, or NULL for an
1064 * empty set of options. The reference to the QDict is transferred to this
1065 * function (even on failure), so if the caller intends to reuse the dictionary,
1066 * it needs to use QINCREF() before calling bdrv_file_open.
1068 int bdrv_open_backing_file(BlockDriverState
*bs
, QDict
*options
, Error
**errp
)
1070 char backing_filename
[PATH_MAX
];
1071 int back_flags
, ret
;
1072 BlockDriver
*back_drv
= NULL
;
1073 Error
*local_err
= NULL
;
1075 if (bs
->backing_hd
!= NULL
) {
1080 /* NULL means an empty set of options */
1081 if (options
== NULL
) {
1082 options
= qdict_new();
1085 bs
->open_flags
&= ~BDRV_O_NO_BACKING
;
1086 if (qdict_haskey(options
, "file.filename")) {
1087 backing_filename
[0] = '\0';
1088 } else if (bs
->backing_file
[0] == '\0' && qdict_size(options
) == 0) {
1092 bdrv_get_full_backing_filename(bs
, backing_filename
,
1093 sizeof(backing_filename
));
1096 if (bs
->backing_format
[0] != '\0') {
1097 back_drv
= bdrv_find_format(bs
->backing_format
);
1100 /* backing files always opened read-only */
1101 back_flags
= bs
->open_flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
|
1102 BDRV_O_COPY_ON_READ
);
1104 assert(bs
->backing_hd
== NULL
);
1105 ret
= bdrv_open(&bs
->backing_hd
,
1106 *backing_filename
? backing_filename
: NULL
, NULL
, options
,
1107 back_flags
, back_drv
, &local_err
);
1109 bs
->backing_hd
= NULL
;
1110 bs
->open_flags
|= BDRV_O_NO_BACKING
;
1111 error_setg(errp
, "Could not open backing file: %s",
1112 error_get_pretty(local_err
));
1113 error_free(local_err
);
1117 if (bs
->backing_hd
->file
) {
1118 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
),
1119 bs
->backing_hd
->file
->filename
);
1122 /* Recalculate the BlockLimits with the backing file */
1123 bdrv_refresh_limits(bs
);
1129 * Opens a disk image whose options are given as BlockdevRef in another block
1132 * If force_raw is true, bdrv_file_open() will be used, thereby preventing any
1133 * image format auto-detection. If it is false and a filename is given,
1134 * bdrv_open() will be used for auto-detection.
1136 * If allow_none is true, no image will be opened if filename is false and no
1137 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1139 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1140 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1141 * itself, all options starting with "${bdref_key}." are considered part of the
1144 * The BlockdevRef will be removed from the options QDict.
1146 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1148 int bdrv_open_image(BlockDriverState
**pbs
, const char *filename
,
1149 QDict
*options
, const char *bdref_key
, int flags
,
1150 bool force_raw
, bool allow_none
, Error
**errp
)
1152 QDict
*image_options
;
1154 char *bdref_key_dot
;
1155 const char *reference
;
1158 assert(*pbs
== NULL
);
1160 bdref_key_dot
= g_strdup_printf("%s.", bdref_key
);
1161 qdict_extract_subqdict(options
, &image_options
, bdref_key_dot
);
1162 g_free(bdref_key_dot
);
1164 reference
= qdict_get_try_str(options
, bdref_key
);
1165 if (!filename
&& !reference
&& !qdict_size(image_options
)) {
1169 error_setg(errp
, "A block device must be specified for \"%s\"",
1176 if (filename
&& !force_raw
) {
1177 /* If a filename is given and the block driver should be detected
1178 automatically (instead of using none), use bdrv_open() in order to do
1179 that auto-detection. */
1181 error_setg(errp
, "Cannot reference an existing block device while "
1182 "giving a filename");
1187 ret
= bdrv_open(pbs
, filename
, NULL
, image_options
, flags
, NULL
, errp
);
1189 ret
= bdrv_open(pbs
, filename
, reference
, image_options
,
1190 flags
| BDRV_O_PROTOCOL
, NULL
, errp
);
1194 qdict_del(options
, bdref_key
);
1199 * Opens a disk image (raw, qcow2, vmdk, ...)
1201 * options is a QDict of options to pass to the block drivers, or NULL for an
1202 * empty set of options. The reference to the QDict belongs to the block layer
1203 * after the call (even on failure), so if the caller intends to reuse the
1204 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1206 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1207 * If it is not NULL, the referenced BDS will be reused.
1209 * The reference parameter may be used to specify an existing block device which
1210 * should be opened. If specified, neither options nor a filename may be given,
1211 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1213 int bdrv_open(BlockDriverState
**pbs
, const char *filename
,
1214 const char *reference
, QDict
*options
, int flags
,
1215 BlockDriver
*drv
, Error
**errp
)
1218 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1219 char tmp_filename
[PATH_MAX
+ 1];
1220 BlockDriverState
*file
= NULL
, *bs
;
1221 const char *drvname
;
1222 Error
*local_err
= NULL
;
1227 bool options_non_empty
= options
? qdict_size(options
) : false;
1231 error_setg(errp
, "Cannot reuse an existing BDS when referencing "
1232 "another block device");
1236 if (filename
|| options_non_empty
) {
1237 error_setg(errp
, "Cannot reference an existing block device with "
1238 "additional options or a new filename");
1242 bs
= bdrv_lookup_bs(reference
, reference
, errp
);
1257 if (flags
& BDRV_O_PROTOCOL
) {
1259 ret
= bdrv_file_open(bs
, filename
, options
, flags
& ~BDRV_O_PROTOCOL
,
1269 /* NULL means an empty set of options */
1270 if (options
== NULL
) {
1271 options
= qdict_new();
1274 bs
->options
= options
;
1275 options
= qdict_clone_shallow(options
);
1277 /* For snapshot=on, create a temporary qcow2 overlay */
1278 if (flags
& BDRV_O_SNAPSHOT
) {
1279 BlockDriverState
*bs1
;
1281 BlockDriver
*bdrv_qcow2
;
1282 QEMUOptionParameter
*create_options
;
1283 QDict
*snapshot_options
;
1285 /* if snapshot, we create a temporary backing file and open it
1286 instead of opening 'filename' directly */
1288 /* Get the required size from the image */
1291 ret
= bdrv_open(&bs1
, filename
, NULL
, options
, BDRV_O_NO_BACKING
,
1296 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
1300 /* Create the temporary image */
1301 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
1303 error_setg_errno(errp
, -ret
, "Could not get temporary filename");
1307 bdrv_qcow2
= bdrv_find_format("qcow2");
1308 create_options
= parse_option_parameters("", bdrv_qcow2
->create_options
,
1311 set_option_parameter_int(create_options
, BLOCK_OPT_SIZE
, total_size
);
1313 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, create_options
, &local_err
);
1314 free_option_parameters(create_options
);
1316 error_setg_errno(errp
, -ret
, "Could not create temporary overlay "
1317 "'%s': %s", tmp_filename
,
1318 error_get_pretty(local_err
));
1319 error_free(local_err
);
1324 /* Prepare a new options QDict for the temporary file, where user
1325 * options refer to the backing file */
1327 qdict_put(options
, "file.filename", qstring_from_str(filename
));
1330 qdict_put(options
, "driver", qstring_from_str(drv
->format_name
));
1333 snapshot_options
= qdict_new();
1334 qdict_put(snapshot_options
, "backing", options
);
1335 qdict_flatten(snapshot_options
);
1337 bs
->options
= snapshot_options
;
1338 options
= qdict_clone_shallow(bs
->options
);
1340 filename
= tmp_filename
;
1342 bs
->is_temporary
= 1;
1345 /* Open image file without format layer */
1346 if (flags
& BDRV_O_RDWR
) {
1347 flags
|= BDRV_O_ALLOW_RDWR
;
1350 assert(file
== NULL
);
1351 ret
= bdrv_open_image(&file
, filename
, options
, "file",
1352 bdrv_open_flags(bs
, flags
| BDRV_O_UNMAP
), true, true,
1358 /* Find the right image format driver */
1359 drvname
= qdict_get_try_str(options
, "driver");
1361 drv
= bdrv_find_format(drvname
);
1362 qdict_del(options
, "driver");
1364 error_setg(errp
, "Invalid driver: '%s'", drvname
);
1366 goto unlink_and_fail
;
1372 ret
= find_image_format(file
, filename
, &drv
, &local_err
);
1374 error_setg(errp
, "Must specify either driver or file");
1376 goto unlink_and_fail
;
1381 goto unlink_and_fail
;
1384 /* Open the image */
1385 ret
= bdrv_open_common(bs
, file
, options
, flags
, drv
, &local_err
);
1387 goto unlink_and_fail
;
1390 if (file
&& (bs
->file
!= file
)) {
1395 /* If there is a backing file, use it */
1396 if ((flags
& BDRV_O_NO_BACKING
) == 0) {
1397 QDict
*backing_options
;
1399 qdict_extract_subqdict(options
, &backing_options
, "backing.");
1400 ret
= bdrv_open_backing_file(bs
, backing_options
, &local_err
);
1402 goto close_and_fail
;
1406 /* Check if any unknown options were used */
1407 if (qdict_size(options
) != 0) {
1408 const QDictEntry
*entry
= qdict_first(options
);
1409 error_setg(errp
, "Block format '%s' used by device '%s' doesn't "
1410 "support the option '%s'", drv
->format_name
, bs
->device_name
,
1414 goto close_and_fail
;
1418 if (!bdrv_key_required(bs
)) {
1419 bdrv_dev_change_media_cb(bs
, true);
1429 if (bs
->is_temporary
) {
1433 QDECREF(bs
->options
);
1437 /* If *pbs is NULL, a new BDS has been created in this function and
1438 needs to be freed now. Otherwise, it does not need to be closed,
1439 since it has not really been opened yet. */
1443 error_propagate(errp
, local_err
);
1448 /* See fail path, but now the BDS has to be always closed */
1456 error_propagate(errp
, local_err
);
1461 typedef struct BlockReopenQueueEntry
{
1463 BDRVReopenState state
;
1464 QSIMPLEQ_ENTRY(BlockReopenQueueEntry
) entry
;
1465 } BlockReopenQueueEntry
;
1468 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1469 * reopen of multiple devices.
1471 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1472 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1473 * be created and initialized. This newly created BlockReopenQueue should be
1474 * passed back in for subsequent calls that are intended to be of the same
1477 * bs is the BlockDriverState to add to the reopen queue.
1479 * flags contains the open flags for the associated bs
1481 * returns a pointer to bs_queue, which is either the newly allocated
1482 * bs_queue, or the existing bs_queue being used.
1485 BlockReopenQueue
*bdrv_reopen_queue(BlockReopenQueue
*bs_queue
,
1486 BlockDriverState
*bs
, int flags
)
1490 BlockReopenQueueEntry
*bs_entry
;
1491 if (bs_queue
== NULL
) {
1492 bs_queue
= g_new0(BlockReopenQueue
, 1);
1493 QSIMPLEQ_INIT(bs_queue
);
1497 bdrv_reopen_queue(bs_queue
, bs
->file
, flags
);
1500 bs_entry
= g_new0(BlockReopenQueueEntry
, 1);
1501 QSIMPLEQ_INSERT_TAIL(bs_queue
, bs_entry
, entry
);
1503 bs_entry
->state
.bs
= bs
;
1504 bs_entry
->state
.flags
= flags
;
1510 * Reopen multiple BlockDriverStates atomically & transactionally.
1512 * The queue passed in (bs_queue) must have been built up previous
1513 * via bdrv_reopen_queue().
1515 * Reopens all BDS specified in the queue, with the appropriate
1516 * flags. All devices are prepared for reopen, and failure of any
1517 * device will cause all device changes to be abandonded, and intermediate
1520 * If all devices prepare successfully, then the changes are committed
1524 int bdrv_reopen_multiple(BlockReopenQueue
*bs_queue
, Error
**errp
)
1527 BlockReopenQueueEntry
*bs_entry
, *next
;
1528 Error
*local_err
= NULL
;
1530 assert(bs_queue
!= NULL
);
1534 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1535 if (bdrv_reopen_prepare(&bs_entry
->state
, bs_queue
, &local_err
)) {
1536 error_propagate(errp
, local_err
);
1539 bs_entry
->prepared
= true;
1542 /* If we reach this point, we have success and just need to apply the
1545 QSIMPLEQ_FOREACH(bs_entry
, bs_queue
, entry
) {
1546 bdrv_reopen_commit(&bs_entry
->state
);
1552 QSIMPLEQ_FOREACH_SAFE(bs_entry
, bs_queue
, entry
, next
) {
1553 if (ret
&& bs_entry
->prepared
) {
1554 bdrv_reopen_abort(&bs_entry
->state
);
1563 /* Reopen a single BlockDriverState with the specified flags. */
1564 int bdrv_reopen(BlockDriverState
*bs
, int bdrv_flags
, Error
**errp
)
1567 Error
*local_err
= NULL
;
1568 BlockReopenQueue
*queue
= bdrv_reopen_queue(NULL
, bs
, bdrv_flags
);
1570 ret
= bdrv_reopen_multiple(queue
, &local_err
);
1571 if (local_err
!= NULL
) {
1572 error_propagate(errp
, local_err
);
1579 * Prepares a BlockDriverState for reopen. All changes are staged in the
1580 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1581 * the block driver layer .bdrv_reopen_prepare()
1583 * bs is the BlockDriverState to reopen
1584 * flags are the new open flags
1585 * queue is the reopen queue
1587 * Returns 0 on success, non-zero on error. On error errp will be set
1590 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1591 * It is the responsibility of the caller to then call the abort() or
1592 * commit() for any other BDS that have been left in a prepare() state
1595 int bdrv_reopen_prepare(BDRVReopenState
*reopen_state
, BlockReopenQueue
*queue
,
1599 Error
*local_err
= NULL
;
1602 assert(reopen_state
!= NULL
);
1603 assert(reopen_state
->bs
->drv
!= NULL
);
1604 drv
= reopen_state
->bs
->drv
;
1606 /* if we are to stay read-only, do not allow permission change
1608 if (!(reopen_state
->bs
->open_flags
& BDRV_O_ALLOW_RDWR
) &&
1609 reopen_state
->flags
& BDRV_O_RDWR
) {
1610 error_set(errp
, QERR_DEVICE_IS_READ_ONLY
,
1611 reopen_state
->bs
->device_name
);
1616 ret
= bdrv_flush(reopen_state
->bs
);
1618 error_set(errp
, ERROR_CLASS_GENERIC_ERROR
, "Error (%s) flushing drive",
1623 if (drv
->bdrv_reopen_prepare
) {
1624 ret
= drv
->bdrv_reopen_prepare(reopen_state
, queue
, &local_err
);
1626 if (local_err
!= NULL
) {
1627 error_propagate(errp
, local_err
);
1629 error_setg(errp
, "failed while preparing to reopen image '%s'",
1630 reopen_state
->bs
->filename
);
1635 /* It is currently mandatory to have a bdrv_reopen_prepare()
1636 * handler for each supported drv. */
1637 error_set(errp
, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED
,
1638 drv
->format_name
, reopen_state
->bs
->device_name
,
1639 "reopening of file");
1651 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1652 * makes them final by swapping the staging BlockDriverState contents into
1653 * the active BlockDriverState contents.
1655 void bdrv_reopen_commit(BDRVReopenState
*reopen_state
)
1659 assert(reopen_state
!= NULL
);
1660 drv
= reopen_state
->bs
->drv
;
1661 assert(drv
!= NULL
);
1663 /* If there are any driver level actions to take */
1664 if (drv
->bdrv_reopen_commit
) {
1665 drv
->bdrv_reopen_commit(reopen_state
);
1668 /* set BDS specific flags now */
1669 reopen_state
->bs
->open_flags
= reopen_state
->flags
;
1670 reopen_state
->bs
->enable_write_cache
= !!(reopen_state
->flags
&
1672 reopen_state
->bs
->read_only
= !(reopen_state
->flags
& BDRV_O_RDWR
);
1674 bdrv_refresh_limits(reopen_state
->bs
);
1678 * Abort the reopen, and delete and free the staged changes in
1681 void bdrv_reopen_abort(BDRVReopenState
*reopen_state
)
1685 assert(reopen_state
!= NULL
);
1686 drv
= reopen_state
->bs
->drv
;
1687 assert(drv
!= NULL
);
1689 if (drv
->bdrv_reopen_abort
) {
1690 drv
->bdrv_reopen_abort(reopen_state
);
1695 void bdrv_close(BlockDriverState
*bs
)
1698 block_job_cancel_sync(bs
->job
);
1700 bdrv_drain_all(); /* complete I/O */
1702 bdrv_drain_all(); /* in case flush left pending I/O */
1703 notifier_list_notify(&bs
->close_notifiers
, bs
);
1706 if (bs
->backing_hd
) {
1707 bdrv_unref(bs
->backing_hd
);
1708 bs
->backing_hd
= NULL
;
1710 bs
->drv
->bdrv_close(bs
);
1713 if (bs
->is_temporary
) {
1714 unlink(bs
->filename
);
1719 bs
->copy_on_read
= 0;
1720 bs
->backing_file
[0] = '\0';
1721 bs
->backing_format
[0] = '\0';
1722 bs
->total_sectors
= 0;
1727 bs
->zero_beyond_eof
= false;
1728 QDECREF(bs
->options
);
1731 if (bs
->file
!= NULL
) {
1732 bdrv_unref(bs
->file
);
1737 bdrv_dev_change_media_cb(bs
, false);
1739 /*throttling disk I/O limits*/
1740 if (bs
->io_limits_enabled
) {
1741 bdrv_io_limits_disable(bs
);
1745 void bdrv_close_all(void)
1747 BlockDriverState
*bs
;
1749 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1754 /* Check if any requests are in-flight (including throttled requests) */
1755 static bool bdrv_requests_pending(BlockDriverState
*bs
)
1757 if (!QLIST_EMPTY(&bs
->tracked_requests
)) {
1760 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[0])) {
1763 if (!qemu_co_queue_empty(&bs
->throttled_reqs
[1])) {
1766 if (bs
->file
&& bdrv_requests_pending(bs
->file
)) {
1769 if (bs
->backing_hd
&& bdrv_requests_pending(bs
->backing_hd
)) {
1775 static bool bdrv_requests_pending_all(void)
1777 BlockDriverState
*bs
;
1778 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1779 if (bdrv_requests_pending(bs
)) {
1787 * Wait for pending requests to complete across all BlockDriverStates
1789 * This function does not flush data to disk, use bdrv_flush_all() for that
1790 * after calling this function.
1792 * Note that completion of an asynchronous I/O operation can trigger any
1793 * number of other I/O operations on other devices---for example a coroutine
1794 * can be arbitrarily complex and a constant flow of I/O can come until the
1795 * coroutine is complete. Because of this, it is not possible to have a
1796 * function to drain a single device's I/O queue.
1798 void bdrv_drain_all(void)
1800 /* Always run first iteration so any pending completion BHs run */
1802 BlockDriverState
*bs
;
1805 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
1806 bdrv_start_throttled_reqs(bs
);
1809 busy
= bdrv_requests_pending_all();
1810 busy
|= aio_poll(qemu_get_aio_context(), busy
);
1814 /* make a BlockDriverState anonymous by removing from bdrv_state and
1815 * graph_bdrv_state list.
1816 Also, NULL terminate the device_name to prevent double remove */
1817 void bdrv_make_anon(BlockDriverState
*bs
)
1819 if (bs
->device_name
[0] != '\0') {
1820 QTAILQ_REMOVE(&bdrv_states
, bs
, device_list
);
1822 bs
->device_name
[0] = '\0';
1823 if (bs
->node_name
[0] != '\0') {
1824 QTAILQ_REMOVE(&graph_bdrv_states
, bs
, node_list
);
1826 bs
->node_name
[0] = '\0';
1829 static void bdrv_rebind(BlockDriverState
*bs
)
1831 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
1832 bs
->drv
->bdrv_rebind(bs
);
1836 static void bdrv_move_feature_fields(BlockDriverState
*bs_dest
,
1837 BlockDriverState
*bs_src
)
1839 /* move some fields that need to stay attached to the device */
1840 bs_dest
->open_flags
= bs_src
->open_flags
;
1843 bs_dest
->dev_ops
= bs_src
->dev_ops
;
1844 bs_dest
->dev_opaque
= bs_src
->dev_opaque
;
1845 bs_dest
->dev
= bs_src
->dev
;
1846 bs_dest
->guest_block_size
= bs_src
->guest_block_size
;
1847 bs_dest
->copy_on_read
= bs_src
->copy_on_read
;
1849 bs_dest
->enable_write_cache
= bs_src
->enable_write_cache
;
1851 /* i/o throttled req */
1852 memcpy(&bs_dest
->throttle_state
,
1853 &bs_src
->throttle_state
,
1854 sizeof(ThrottleState
));
1855 bs_dest
->throttled_reqs
[0] = bs_src
->throttled_reqs
[0];
1856 bs_dest
->throttled_reqs
[1] = bs_src
->throttled_reqs
[1];
1857 bs_dest
->io_limits_enabled
= bs_src
->io_limits_enabled
;
1860 bs_dest
->on_read_error
= bs_src
->on_read_error
;
1861 bs_dest
->on_write_error
= bs_src
->on_write_error
;
1864 bs_dest
->iostatus_enabled
= bs_src
->iostatus_enabled
;
1865 bs_dest
->iostatus
= bs_src
->iostatus
;
1868 bs_dest
->dirty_bitmaps
= bs_src
->dirty_bitmaps
;
1870 /* reference count */
1871 bs_dest
->refcnt
= bs_src
->refcnt
;
1874 bs_dest
->in_use
= bs_src
->in_use
;
1875 bs_dest
->job
= bs_src
->job
;
1877 /* keep the same entry in bdrv_states */
1878 pstrcpy(bs_dest
->device_name
, sizeof(bs_dest
->device_name
),
1879 bs_src
->device_name
);
1880 bs_dest
->device_list
= bs_src
->device_list
;
1882 /* keep the same entry in graph_bdrv_states
1883 * We do want to swap name but don't want to swap linked list entries
1885 bs_dest
->node_list
= bs_src
->node_list
;
1889 * Swap bs contents for two image chains while they are live,
1890 * while keeping required fields on the BlockDriverState that is
1891 * actually attached to a device.
1893 * This will modify the BlockDriverState fields, and swap contents
1894 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1896 * bs_new is required to be anonymous.
1898 * This function does not create any image files.
1900 void bdrv_swap(BlockDriverState
*bs_new
, BlockDriverState
*bs_old
)
1902 BlockDriverState tmp
;
1904 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1905 assert(bs_new
->device_name
[0] == '\0');
1906 assert(QLIST_EMPTY(&bs_new
->dirty_bitmaps
));
1907 assert(bs_new
->job
== NULL
);
1908 assert(bs_new
->dev
== NULL
);
1909 assert(bs_new
->in_use
== 0);
1910 assert(bs_new
->io_limits_enabled
== false);
1911 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1917 /* there are some fields that should not be swapped, move them back */
1918 bdrv_move_feature_fields(&tmp
, bs_old
);
1919 bdrv_move_feature_fields(bs_old
, bs_new
);
1920 bdrv_move_feature_fields(bs_new
, &tmp
);
1922 /* bs_new shouldn't be in bdrv_states even after the swap! */
1923 assert(bs_new
->device_name
[0] == '\0');
1925 /* Check a few fields that should remain attached to the device */
1926 assert(bs_new
->dev
== NULL
);
1927 assert(bs_new
->job
== NULL
);
1928 assert(bs_new
->in_use
== 0);
1929 assert(bs_new
->io_limits_enabled
== false);
1930 assert(!throttle_have_timer(&bs_new
->throttle_state
));
1932 bdrv_rebind(bs_new
);
1933 bdrv_rebind(bs_old
);
1937 * Add new bs contents at the top of an image chain while the chain is
1938 * live, while keeping required fields on the top layer.
1940 * This will modify the BlockDriverState fields, and swap contents
1941 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1943 * bs_new is required to be anonymous.
1945 * This function does not create any image files.
1947 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
1949 bdrv_swap(bs_new
, bs_top
);
1951 /* The contents of 'tmp' will become bs_top, as we are
1952 * swapping bs_new and bs_top contents. */
1953 bs_top
->backing_hd
= bs_new
;
1954 bs_top
->open_flags
&= ~BDRV_O_NO_BACKING
;
1955 pstrcpy(bs_top
->backing_file
, sizeof(bs_top
->backing_file
),
1957 pstrcpy(bs_top
->backing_format
, sizeof(bs_top
->backing_format
),
1958 bs_new
->drv
? bs_new
->drv
->format_name
: "");
1961 static void bdrv_delete(BlockDriverState
*bs
)
1965 assert(!bs
->in_use
);
1966 assert(!bs
->refcnt
);
1967 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
1971 /* remove from list, if necessary */
1977 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1978 /* TODO change to DeviceState *dev when all users are qdevified */
1984 bdrv_iostatus_reset(bs
);
1988 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1989 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1991 if (bdrv_attach_dev(bs
, dev
) < 0) {
1996 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1997 /* TODO change to DeviceState *dev when all users are qdevified */
1999 assert(bs
->dev
== dev
);
2002 bs
->dev_opaque
= NULL
;
2003 bs
->guest_block_size
= 512;
2006 /* TODO change to return DeviceState * when all users are qdevified */
2007 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
2012 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
2016 bs
->dev_opaque
= opaque
;
2019 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
2020 enum MonitorEvent ev
,
2021 BlockErrorAction action
, bool is_read
)
2024 const char *action_str
;
2027 case BDRV_ACTION_REPORT
:
2028 action_str
= "report";
2030 case BDRV_ACTION_IGNORE
:
2031 action_str
= "ignore";
2033 case BDRV_ACTION_STOP
:
2034 action_str
= "stop";
2040 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2043 is_read
? "read" : "write");
2044 monitor_protocol_event(ev
, data
);
2046 qobject_decref(data
);
2049 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
2053 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2054 bdrv_get_device_name(bs
), ejected
);
2055 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
2057 qobject_decref(data
);
2060 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
2062 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
2063 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
2064 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
2065 if (tray_was_closed
) {
2067 bdrv_emit_qmp_eject_event(bs
, true);
2071 bdrv_emit_qmp_eject_event(bs
, false);
2076 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
2078 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
2081 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
2083 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
2084 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
2088 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
2090 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
2091 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
2096 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
2098 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
2099 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
2103 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
2105 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
2106 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
2112 * Run consistency checks on an image
2114 * Returns 0 if the check could be completed (it doesn't mean that the image is
2115 * free of errors) or -errno when an internal error occurred. The results of the
2116 * check are stored in res.
2118 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
2120 if (bs
->drv
->bdrv_check
== NULL
) {
2124 memset(res
, 0, sizeof(*res
));
2125 return bs
->drv
->bdrv_check(bs
, res
, fix
);
2128 #define COMMIT_BUF_SECTORS 2048
2130 /* commit COW file into the raw image */
2131 int bdrv_commit(BlockDriverState
*bs
)
2133 BlockDriver
*drv
= bs
->drv
;
2134 int64_t sector
, total_sectors
, length
, backing_length
;
2135 int n
, ro
, open_flags
;
2137 uint8_t *buf
= NULL
;
2138 char filename
[PATH_MAX
];
2143 if (!bs
->backing_hd
) {
2147 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
2151 ro
= bs
->backing_hd
->read_only
;
2152 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2153 pstrcpy(filename
, sizeof(filename
), bs
->backing_hd
->filename
);
2154 open_flags
= bs
->backing_hd
->open_flags
;
2157 if (bdrv_reopen(bs
->backing_hd
, open_flags
| BDRV_O_RDWR
, NULL
)) {
2162 length
= bdrv_getlength(bs
);
2168 backing_length
= bdrv_getlength(bs
->backing_hd
);
2169 if (backing_length
< 0) {
2170 ret
= backing_length
;
2174 /* If our top snapshot is larger than the backing file image,
2175 * grow the backing file image if possible. If not possible,
2176 * we must return an error */
2177 if (length
> backing_length
) {
2178 ret
= bdrv_truncate(bs
->backing_hd
, length
);
2184 total_sectors
= length
>> BDRV_SECTOR_BITS
;
2185 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
2187 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
2188 ret
= bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
);
2193 ret
= bdrv_read(bs
, sector
, buf
, n
);
2198 ret
= bdrv_write(bs
->backing_hd
, sector
, buf
, n
);
2205 if (drv
->bdrv_make_empty
) {
2206 ret
= drv
->bdrv_make_empty(bs
);
2214 * Make sure all data we wrote to the backing device is actually
2217 if (bs
->backing_hd
) {
2218 bdrv_flush(bs
->backing_hd
);
2226 /* ignoring error return here */
2227 bdrv_reopen(bs
->backing_hd
, open_flags
& ~BDRV_O_RDWR
, NULL
);
2233 int bdrv_commit_all(void)
2235 BlockDriverState
*bs
;
2237 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
2238 if (bs
->drv
&& bs
->backing_hd
) {
2239 int ret
= bdrv_commit(bs
);
2249 * Remove an active request from the tracked requests list
2251 * This function should be called when a tracked request is completing.
2253 static void tracked_request_end(BdrvTrackedRequest
*req
)
2255 if (req
->serialising
) {
2256 req
->bs
->serialising_in_flight
--;
2259 QLIST_REMOVE(req
, list
);
2260 qemu_co_queue_restart_all(&req
->wait_queue
);
2264 * Add an active request to the tracked requests list
2266 static void tracked_request_begin(BdrvTrackedRequest
*req
,
2267 BlockDriverState
*bs
,
2269 unsigned int bytes
, bool is_write
)
2271 *req
= (BdrvTrackedRequest
){
2275 .is_write
= is_write
,
2276 .co
= qemu_coroutine_self(),
2277 .serialising
= false,
2278 .overlap_offset
= offset
,
2279 .overlap_bytes
= bytes
,
2282 qemu_co_queue_init(&req
->wait_queue
);
2284 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
2287 static void mark_request_serialising(BdrvTrackedRequest
*req
, uint64_t align
)
2289 int64_t overlap_offset
= req
->offset
& ~(align
- 1);
2290 unsigned int overlap_bytes
= ROUND_UP(req
->offset
+ req
->bytes
, align
)
2293 if (!req
->serialising
) {
2294 req
->bs
->serialising_in_flight
++;
2295 req
->serialising
= true;
2298 req
->overlap_offset
= MIN(req
->overlap_offset
, overlap_offset
);
2299 req
->overlap_bytes
= MAX(req
->overlap_bytes
, overlap_bytes
);
2303 * Round a region to cluster boundaries
2305 void bdrv_round_to_clusters(BlockDriverState
*bs
,
2306 int64_t sector_num
, int nb_sectors
,
2307 int64_t *cluster_sector_num
,
2308 int *cluster_nb_sectors
)
2310 BlockDriverInfo bdi
;
2312 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
2313 *cluster_sector_num
= sector_num
;
2314 *cluster_nb_sectors
= nb_sectors
;
2316 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
2317 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
2318 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
2323 static int bdrv_get_cluster_size(BlockDriverState
*bs
)
2325 BlockDriverInfo bdi
;
2328 ret
= bdrv_get_info(bs
, &bdi
);
2329 if (ret
< 0 || bdi
.cluster_size
== 0) {
2330 return bs
->request_alignment
;
2332 return bdi
.cluster_size
;
2336 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
2337 int64_t offset
, unsigned int bytes
)
2340 if (offset
>= req
->overlap_offset
+ req
->overlap_bytes
) {
2344 if (req
->overlap_offset
>= offset
+ bytes
) {
2350 static bool coroutine_fn
wait_serialising_requests(BdrvTrackedRequest
*self
)
2352 BlockDriverState
*bs
= self
->bs
;
2353 BdrvTrackedRequest
*req
;
2355 bool waited
= false;
2357 if (!bs
->serialising_in_flight
) {
2363 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
2364 if (req
== self
|| (!req
->serialising
&& !self
->serialising
)) {
2367 if (tracked_request_overlaps(req
, self
->overlap_offset
,
2368 self
->overlap_bytes
))
2370 /* Hitting this means there was a reentrant request, for
2371 * example, a block driver issuing nested requests. This must
2372 * never happen since it means deadlock.
2374 assert(qemu_coroutine_self() != req
->co
);
2376 /* If the request is already (indirectly) waiting for us, or
2377 * will wait for us as soon as it wakes up, then just go on
2378 * (instead of producing a deadlock in the former case). */
2379 if (!req
->waiting_for
) {
2380 self
->waiting_for
= req
;
2381 qemu_co_queue_wait(&req
->wait_queue
);
2382 self
->waiting_for
= NULL
;
2397 * -EINVAL - backing format specified, but no file
2398 * -ENOSPC - can't update the backing file because no space is left in the
2400 * -ENOTSUP - format driver doesn't support changing the backing file
2402 int bdrv_change_backing_file(BlockDriverState
*bs
,
2403 const char *backing_file
, const char *backing_fmt
)
2405 BlockDriver
*drv
= bs
->drv
;
2408 /* Backing file format doesn't make sense without a backing file */
2409 if (backing_fmt
&& !backing_file
) {
2413 if (drv
->bdrv_change_backing_file
!= NULL
) {
2414 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
2420 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
2421 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
2427 * Finds the image layer in the chain that has 'bs' as its backing file.
2429 * active is the current topmost image.
2431 * Returns NULL if bs is not found in active's image chain,
2432 * or if active == bs.
2434 BlockDriverState
*bdrv_find_overlay(BlockDriverState
*active
,
2435 BlockDriverState
*bs
)
2437 BlockDriverState
*overlay
= NULL
;
2438 BlockDriverState
*intermediate
;
2440 assert(active
!= NULL
);
2443 /* if bs is the same as active, then by definition it has no overlay
2449 intermediate
= active
;
2450 while (intermediate
->backing_hd
) {
2451 if (intermediate
->backing_hd
== bs
) {
2452 overlay
= intermediate
;
2455 intermediate
= intermediate
->backing_hd
;
2461 typedef struct BlkIntermediateStates
{
2462 BlockDriverState
*bs
;
2463 QSIMPLEQ_ENTRY(BlkIntermediateStates
) entry
;
2464 } BlkIntermediateStates
;
2468 * Drops images above 'base' up to and including 'top', and sets the image
2469 * above 'top' to have base as its backing file.
2471 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2472 * information in 'bs' can be properly updated.
2474 * E.g., this will convert the following chain:
2475 * bottom <- base <- intermediate <- top <- active
2479 * bottom <- base <- active
2481 * It is allowed for bottom==base, in which case it converts:
2483 * base <- intermediate <- top <- active
2490 * if active == top, that is considered an error
2493 int bdrv_drop_intermediate(BlockDriverState
*active
, BlockDriverState
*top
,
2494 BlockDriverState
*base
)
2496 BlockDriverState
*intermediate
;
2497 BlockDriverState
*base_bs
= NULL
;
2498 BlockDriverState
*new_top_bs
= NULL
;
2499 BlkIntermediateStates
*intermediate_state
, *next
;
2502 QSIMPLEQ_HEAD(states_to_delete
, BlkIntermediateStates
) states_to_delete
;
2503 QSIMPLEQ_INIT(&states_to_delete
);
2505 if (!top
->drv
|| !base
->drv
) {
2509 new_top_bs
= bdrv_find_overlay(active
, top
);
2511 if (new_top_bs
== NULL
) {
2512 /* we could not find the image above 'top', this is an error */
2516 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2517 * to do, no intermediate images */
2518 if (new_top_bs
->backing_hd
== base
) {
2525 /* now we will go down through the list, and add each BDS we find
2526 * into our deletion queue, until we hit the 'base'
2528 while (intermediate
) {
2529 intermediate_state
= g_malloc0(sizeof(BlkIntermediateStates
));
2530 intermediate_state
->bs
= intermediate
;
2531 QSIMPLEQ_INSERT_TAIL(&states_to_delete
, intermediate_state
, entry
);
2533 if (intermediate
->backing_hd
== base
) {
2534 base_bs
= intermediate
->backing_hd
;
2537 intermediate
= intermediate
->backing_hd
;
2539 if (base_bs
== NULL
) {
2540 /* something went wrong, we did not end at the base. safely
2541 * unravel everything, and exit with error */
2545 /* success - we can delete the intermediate states, and link top->base */
2546 ret
= bdrv_change_backing_file(new_top_bs
, base_bs
->filename
,
2547 base_bs
->drv
? base_bs
->drv
->format_name
: "");
2551 new_top_bs
->backing_hd
= base_bs
;
2553 bdrv_refresh_limits(new_top_bs
);
2555 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2556 /* so that bdrv_close() does not recursively close the chain */
2557 intermediate_state
->bs
->backing_hd
= NULL
;
2558 bdrv_unref(intermediate_state
->bs
);
2563 QSIMPLEQ_FOREACH_SAFE(intermediate_state
, &states_to_delete
, entry
, next
) {
2564 g_free(intermediate_state
);
2570 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
2575 if (!bdrv_is_inserted(bs
))
2581 len
= bdrv_getlength(bs
);
2586 if ((offset
> len
) || (len
- offset
< size
))
2592 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
2595 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
2596 nb_sectors
* BDRV_SECTOR_SIZE
);
2599 typedef struct RwCo
{
2600 BlockDriverState
*bs
;
2605 BdrvRequestFlags flags
;
2608 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
2610 RwCo
*rwco
= opaque
;
2612 if (!rwco
->is_write
) {
2613 rwco
->ret
= bdrv_co_do_preadv(rwco
->bs
, rwco
->offset
,
2614 rwco
->qiov
->size
, rwco
->qiov
,
2617 rwco
->ret
= bdrv_co_do_pwritev(rwco
->bs
, rwco
->offset
,
2618 rwco
->qiov
->size
, rwco
->qiov
,
2624 * Process a vectored synchronous request using coroutines
2626 static int bdrv_prwv_co(BlockDriverState
*bs
, int64_t offset
,
2627 QEMUIOVector
*qiov
, bool is_write
,
2628 BdrvRequestFlags flags
)
2635 .is_write
= is_write
,
2641 * In sync call context, when the vcpu is blocked, this throttling timer
2642 * will not fire; so the I/O throttling function has to be disabled here
2643 * if it has been enabled.
2645 if (bs
->io_limits_enabled
) {
2646 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
2647 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
2648 bdrv_io_limits_disable(bs
);
2651 if (qemu_in_coroutine()) {
2652 /* Fast-path if already in coroutine context */
2653 bdrv_rw_co_entry(&rwco
);
2655 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
2656 qemu_coroutine_enter(co
, &rwco
);
2657 while (rwco
.ret
== NOT_DONE
) {
2665 * Process a synchronous request using coroutines
2667 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
2668 int nb_sectors
, bool is_write
, BdrvRequestFlags flags
)
2671 struct iovec iov
= {
2672 .iov_base
= (void *)buf
,
2673 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
2676 qemu_iovec_init_external(&qiov
, &iov
, 1);
2677 return bdrv_prwv_co(bs
, sector_num
<< BDRV_SECTOR_BITS
,
2678 &qiov
, is_write
, flags
);
2681 /* return < 0 if error. See bdrv_write() for the return codes */
2682 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
2683 uint8_t *buf
, int nb_sectors
)
2685 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false, 0);
2688 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2689 int bdrv_read_unthrottled(BlockDriverState
*bs
, int64_t sector_num
,
2690 uint8_t *buf
, int nb_sectors
)
2695 enabled
= bs
->io_limits_enabled
;
2696 bs
->io_limits_enabled
= false;
2697 ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
);
2698 bs
->io_limits_enabled
= enabled
;
2702 /* Return < 0 if error. Important errors are:
2703 -EIO generic I/O error (may happen for all errors)
2704 -ENOMEDIUM No media inserted.
2705 -EINVAL Invalid sector number or nb_sectors
2706 -EACCES Trying to write a read-only device
2708 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
2709 const uint8_t *buf
, int nb_sectors
)
2711 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true, 0);
2714 int bdrv_write_zeroes(BlockDriverState
*bs
, int64_t sector_num
,
2715 int nb_sectors
, BdrvRequestFlags flags
)
2717 return bdrv_rw_co(bs
, sector_num
, NULL
, nb_sectors
, true,
2718 BDRV_REQ_ZERO_WRITE
| flags
);
2722 * Completely zero out a block device with the help of bdrv_write_zeroes.
2723 * The operation is sped up by checking the block status and only writing
2724 * zeroes to the device if they currently do not return zeroes. Optional
2725 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2727 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2729 int bdrv_make_zero(BlockDriverState
*bs
, BdrvRequestFlags flags
)
2731 int64_t target_size
= bdrv_getlength(bs
) / BDRV_SECTOR_SIZE
;
2732 int64_t ret
, nb_sectors
, sector_num
= 0;
2736 nb_sectors
= target_size
- sector_num
;
2737 if (nb_sectors
<= 0) {
2740 if (nb_sectors
> INT_MAX
) {
2741 nb_sectors
= INT_MAX
;
2743 ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, &n
);
2745 error_report("error getting block status at sector %" PRId64
": %s",
2746 sector_num
, strerror(-ret
));
2749 if (ret
& BDRV_BLOCK_ZERO
) {
2753 ret
= bdrv_write_zeroes(bs
, sector_num
, n
, flags
);
2755 error_report("error writing zeroes at sector %" PRId64
": %s",
2756 sector_num
, strerror(-ret
));
2763 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
, void *buf
, int bytes
)
2766 struct iovec iov
= {
2767 .iov_base
= (void *)buf
,
2776 qemu_iovec_init_external(&qiov
, &iov
, 1);
2777 ret
= bdrv_prwv_co(bs
, offset
, &qiov
, false, 0);
2785 int bdrv_pwritev(BlockDriverState
*bs
, int64_t offset
, QEMUIOVector
*qiov
)
2789 ret
= bdrv_prwv_co(bs
, offset
, qiov
, true, 0);
2797 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
2798 const void *buf
, int bytes
)
2801 struct iovec iov
= {
2802 .iov_base
= (void *) buf
,
2810 qemu_iovec_init_external(&qiov
, &iov
, 1);
2811 return bdrv_pwritev(bs
, offset
, &qiov
);
2815 * Writes to the file and ensures that no writes are reordered across this
2816 * request (acts as a barrier)
2818 * Returns 0 on success, -errno in error cases.
2820 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
2821 const void *buf
, int count
)
2825 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
2830 /* No flush needed for cache modes that already do it */
2831 if (bs
->enable_write_cache
) {
2838 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
2839 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
2841 /* Perform I/O through a temporary buffer so that users who scribble over
2842 * their read buffer while the operation is in progress do not end up
2843 * modifying the image file. This is critical for zero-copy guest I/O
2844 * where anything might happen inside guest memory.
2846 void *bounce_buffer
;
2848 BlockDriver
*drv
= bs
->drv
;
2850 QEMUIOVector bounce_qiov
;
2851 int64_t cluster_sector_num
;
2852 int cluster_nb_sectors
;
2856 /* Cover entire cluster so no additional backing file I/O is required when
2857 * allocating cluster in the image file.
2859 bdrv_round_to_clusters(bs
, sector_num
, nb_sectors
,
2860 &cluster_sector_num
, &cluster_nb_sectors
);
2862 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
2863 cluster_sector_num
, cluster_nb_sectors
);
2865 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
2866 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
2867 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
2869 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
2875 if (drv
->bdrv_co_write_zeroes
&&
2876 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
2877 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
2878 cluster_nb_sectors
, 0);
2880 /* This does not change the data on the disk, it is not necessary
2881 * to flush even in cache=writethrough mode.
2883 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
2888 /* It might be okay to ignore write errors for guest requests. If this
2889 * is a deliberate copy-on-read then we don't want to ignore the error.
2890 * Simply report it in all cases.
2895 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
2896 qemu_iovec_from_buf(qiov
, 0, bounce_buffer
+ skip_bytes
,
2897 nb_sectors
* BDRV_SECTOR_SIZE
);
2900 qemu_vfree(bounce_buffer
);
2905 * Forwards an already correctly aligned request to the BlockDriver. This
2906 * handles copy on read and zeroing after EOF; any other features must be
2907 * implemented by the caller.
2909 static int coroutine_fn
bdrv_aligned_preadv(BlockDriverState
*bs
,
2910 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
2911 int64_t align
, QEMUIOVector
*qiov
, int flags
)
2913 BlockDriver
*drv
= bs
->drv
;
2916 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
2917 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
2919 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2920 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
2922 /* Handle Copy on Read and associated serialisation */
2923 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2924 /* If we touch the same cluster it counts as an overlap. This
2925 * guarantees that allocating writes will be serialized and not race
2926 * with each other for the same cluster. For example, in copy-on-read
2927 * it ensures that the CoR read and write operations are atomic and
2928 * guest writes cannot interleave between them. */
2929 mark_request_serialising(req
, bdrv_get_cluster_size(bs
));
2932 wait_serialising_requests(req
);
2934 if (flags
& BDRV_REQ_COPY_ON_READ
) {
2937 ret
= bdrv_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
2942 if (!ret
|| pnum
!= nb_sectors
) {
2943 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
2948 /* Forward the request to the BlockDriver */
2949 if (!(bs
->zero_beyond_eof
&& bs
->growable
)) {
2950 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
2952 /* Read zeros after EOF of growable BDSes */
2953 int64_t len
, total_sectors
, max_nb_sectors
;
2955 len
= bdrv_getlength(bs
);
2961 total_sectors
= DIV_ROUND_UP(len
, BDRV_SECTOR_SIZE
);
2962 max_nb_sectors
= ROUND_UP(MAX(0, total_sectors
- sector_num
),
2963 align
>> BDRV_SECTOR_BITS
);
2964 if (max_nb_sectors
> 0) {
2965 ret
= drv
->bdrv_co_readv(bs
, sector_num
,
2966 MIN(nb_sectors
, max_nb_sectors
), qiov
);
2971 /* Reading beyond end of file is supposed to produce zeroes */
2972 if (ret
== 0 && total_sectors
< sector_num
+ nb_sectors
) {
2973 uint64_t offset
= MAX(0, total_sectors
- sector_num
);
2974 uint64_t bytes
= (sector_num
+ nb_sectors
- offset
) *
2976 qemu_iovec_memset(qiov
, offset
* BDRV_SECTOR_SIZE
, 0, bytes
);
2985 * Handle a read request in coroutine context
2987 static int coroutine_fn
bdrv_co_do_preadv(BlockDriverState
*bs
,
2988 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
2989 BdrvRequestFlags flags
)
2991 BlockDriver
*drv
= bs
->drv
;
2992 BdrvTrackedRequest req
;
2994 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
2995 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
2996 uint8_t *head_buf
= NULL
;
2997 uint8_t *tail_buf
= NULL
;
2998 QEMUIOVector local_qiov
;
2999 bool use_local_qiov
= false;
3005 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3009 if (bs
->copy_on_read
) {
3010 flags
|= BDRV_REQ_COPY_ON_READ
;
3013 /* throttling disk I/O */
3014 if (bs
->io_limits_enabled
) {
3015 bdrv_io_limits_intercept(bs
, bytes
, false);
3018 /* Align read if necessary by padding qiov */
3019 if (offset
& (align
- 1)) {
3020 head_buf
= qemu_blockalign(bs
, align
);
3021 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3022 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3023 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3024 use_local_qiov
= true;
3026 bytes
+= offset
& (align
- 1);
3027 offset
= offset
& ~(align
- 1);
3030 if ((offset
+ bytes
) & (align
- 1)) {
3031 if (!use_local_qiov
) {
3032 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3033 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3034 use_local_qiov
= true;
3036 tail_buf
= qemu_blockalign(bs
, align
);
3037 qemu_iovec_add(&local_qiov
, tail_buf
,
3038 align
- ((offset
+ bytes
) & (align
- 1)));
3040 bytes
= ROUND_UP(bytes
, align
);
3043 tracked_request_begin(&req
, bs
, offset
, bytes
, false);
3044 ret
= bdrv_aligned_preadv(bs
, &req
, offset
, bytes
, align
,
3045 use_local_qiov
? &local_qiov
: qiov
,
3047 tracked_request_end(&req
);
3049 if (use_local_qiov
) {
3050 qemu_iovec_destroy(&local_qiov
);
3051 qemu_vfree(head_buf
);
3052 qemu_vfree(tail_buf
);
3058 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
3059 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3060 BdrvRequestFlags flags
)
3062 if (nb_sectors
< 0 || nb_sectors
> (UINT_MAX
>> BDRV_SECTOR_BITS
)) {
3066 return bdrv_co_do_preadv(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3067 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3070 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
3071 int nb_sectors
, QEMUIOVector
*qiov
)
3073 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
3075 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
3078 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
3079 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
3081 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
3083 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
3084 BDRV_REQ_COPY_ON_READ
);
3087 /* if no limit is specified in the BlockLimits use a default
3088 * of 32768 512-byte sectors (16 MiB) per request.
3090 #define MAX_WRITE_ZEROES_DEFAULT 32768
3092 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
3093 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
)
3095 BlockDriver
*drv
= bs
->drv
;
3097 struct iovec iov
= {0};
3100 int max_write_zeroes
= bs
->bl
.max_write_zeroes
?
3101 bs
->bl
.max_write_zeroes
: MAX_WRITE_ZEROES_DEFAULT
;
3103 while (nb_sectors
> 0 && !ret
) {
3104 int num
= nb_sectors
;
3106 /* Align request. Block drivers can expect the "bulk" of the request
3109 if (bs
->bl
.write_zeroes_alignment
3110 && num
> bs
->bl
.write_zeroes_alignment
) {
3111 if (sector_num
% bs
->bl
.write_zeroes_alignment
!= 0) {
3112 /* Make a small request up to the first aligned sector. */
3113 num
= bs
->bl
.write_zeroes_alignment
;
3114 num
-= sector_num
% bs
->bl
.write_zeroes_alignment
;
3115 } else if ((sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
!= 0) {
3116 /* Shorten the request to the last aligned sector. num cannot
3117 * underflow because num > bs->bl.write_zeroes_alignment.
3119 num
-= (sector_num
+ num
) % bs
->bl
.write_zeroes_alignment
;
3123 /* limit request size */
3124 if (num
> max_write_zeroes
) {
3125 num
= max_write_zeroes
;
3129 /* First try the efficient write zeroes operation */
3130 if (drv
->bdrv_co_write_zeroes
) {
3131 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, num
, flags
);
3134 if (ret
== -ENOTSUP
) {
3135 /* Fall back to bounce buffer if write zeroes is unsupported */
3136 iov
.iov_len
= num
* BDRV_SECTOR_SIZE
;
3137 if (iov
.iov_base
== NULL
) {
3138 iov
.iov_base
= qemu_blockalign(bs
, num
* BDRV_SECTOR_SIZE
);
3139 memset(iov
.iov_base
, 0, num
* BDRV_SECTOR_SIZE
);
3141 qemu_iovec_init_external(&qiov
, &iov
, 1);
3143 ret
= drv
->bdrv_co_writev(bs
, sector_num
, num
, &qiov
);
3145 /* Keep bounce buffer around if it is big enough for all
3146 * all future requests.
3148 if (num
< max_write_zeroes
) {
3149 qemu_vfree(iov
.iov_base
);
3150 iov
.iov_base
= NULL
;
3158 qemu_vfree(iov
.iov_base
);
3163 * Forwards an already correctly aligned write request to the BlockDriver.
3165 static int coroutine_fn
bdrv_aligned_pwritev(BlockDriverState
*bs
,
3166 BdrvTrackedRequest
*req
, int64_t offset
, unsigned int bytes
,
3167 QEMUIOVector
*qiov
, int flags
)
3169 BlockDriver
*drv
= bs
->drv
;
3173 int64_t sector_num
= offset
>> BDRV_SECTOR_BITS
;
3174 unsigned int nb_sectors
= bytes
>> BDRV_SECTOR_BITS
;
3176 assert((offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3177 assert((bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
3179 waited
= wait_serialising_requests(req
);
3180 assert(!waited
|| !req
->serialising
);
3181 assert(req
->overlap_offset
<= offset
);
3182 assert(offset
+ bytes
<= req
->overlap_offset
+ req
->overlap_bytes
);
3184 ret
= notifier_with_return_list_notify(&bs
->before_write_notifiers
, req
);
3187 /* Do nothing, write notifier decided to fail this request */
3188 } else if (flags
& BDRV_REQ_ZERO_WRITE
) {
3189 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_ZERO
);
3190 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3192 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV
);
3193 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
3195 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_DONE
);
3197 if (ret
== 0 && !bs
->enable_write_cache
) {
3198 ret
= bdrv_co_flush(bs
);
3201 bdrv_set_dirty(bs
, sector_num
, nb_sectors
);
3203 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
3204 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
3206 if (bs
->growable
&& ret
>= 0) {
3207 bs
->total_sectors
= MAX(bs
->total_sectors
, sector_num
+ nb_sectors
);
3214 * Handle a write request in coroutine context
3216 static int coroutine_fn
bdrv_co_do_pwritev(BlockDriverState
*bs
,
3217 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
3218 BdrvRequestFlags flags
)
3220 BdrvTrackedRequest req
;
3221 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3222 uint64_t align
= MAX(BDRV_SECTOR_SIZE
, bs
->request_alignment
);
3223 uint8_t *head_buf
= NULL
;
3224 uint8_t *tail_buf
= NULL
;
3225 QEMUIOVector local_qiov
;
3226 bool use_local_qiov
= false;
3232 if (bs
->read_only
) {
3235 if (bdrv_check_byte_request(bs
, offset
, bytes
)) {
3239 /* throttling disk I/O */
3240 if (bs
->io_limits_enabled
) {
3241 bdrv_io_limits_intercept(bs
, bytes
, true);
3245 * Align write if necessary by performing a read-modify-write cycle.
3246 * Pad qiov with the read parts and be sure to have a tracked request not
3247 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3249 tracked_request_begin(&req
, bs
, offset
, bytes
, true);
3251 if (offset
& (align
- 1)) {
3252 QEMUIOVector head_qiov
;
3253 struct iovec head_iov
;
3255 mark_request_serialising(&req
, align
);
3256 wait_serialising_requests(&req
);
3258 head_buf
= qemu_blockalign(bs
, align
);
3259 head_iov
= (struct iovec
) {
3260 .iov_base
= head_buf
,
3263 qemu_iovec_init_external(&head_qiov
, &head_iov
, 1);
3265 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_HEAD
);
3266 ret
= bdrv_aligned_preadv(bs
, &req
, offset
& ~(align
- 1), align
,
3267 align
, &head_qiov
, 0);
3271 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_HEAD
);
3273 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 2);
3274 qemu_iovec_add(&local_qiov
, head_buf
, offset
& (align
- 1));
3275 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3276 use_local_qiov
= true;
3278 bytes
+= offset
& (align
- 1);
3279 offset
= offset
& ~(align
- 1);
3282 if ((offset
+ bytes
) & (align
- 1)) {
3283 QEMUIOVector tail_qiov
;
3284 struct iovec tail_iov
;
3288 mark_request_serialising(&req
, align
);
3289 waited
= wait_serialising_requests(&req
);
3290 assert(!waited
|| !use_local_qiov
);
3292 tail_buf
= qemu_blockalign(bs
, align
);
3293 tail_iov
= (struct iovec
) {
3294 .iov_base
= tail_buf
,
3297 qemu_iovec_init_external(&tail_qiov
, &tail_iov
, 1);
3299 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_TAIL
);
3300 ret
= bdrv_aligned_preadv(bs
, &req
, (offset
+ bytes
) & ~(align
- 1), align
,
3301 align
, &tail_qiov
, 0);
3305 BLKDBG_EVENT(bs
, BLKDBG_PWRITEV_RMW_AFTER_TAIL
);
3307 if (!use_local_qiov
) {
3308 qemu_iovec_init(&local_qiov
, qiov
->niov
+ 1);
3309 qemu_iovec_concat(&local_qiov
, qiov
, 0, qiov
->size
);
3310 use_local_qiov
= true;
3313 tail_bytes
= (offset
+ bytes
) & (align
- 1);
3314 qemu_iovec_add(&local_qiov
, tail_buf
+ tail_bytes
, align
- tail_bytes
);
3316 bytes
= ROUND_UP(bytes
, align
);
3319 ret
= bdrv_aligned_pwritev(bs
, &req
, offset
, bytes
,
3320 use_local_qiov
? &local_qiov
: qiov
,
3324 tracked_request_end(&req
);
3326 if (use_local_qiov
) {
3327 qemu_iovec_destroy(&local_qiov
);
3329 qemu_vfree(head_buf
);
3330 qemu_vfree(tail_buf
);
3335 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
3336 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
3337 BdrvRequestFlags flags
)
3339 if (nb_sectors
< 0 || nb_sectors
> (INT_MAX
>> BDRV_SECTOR_BITS
)) {
3343 return bdrv_co_do_pwritev(bs
, sector_num
<< BDRV_SECTOR_BITS
,
3344 nb_sectors
<< BDRV_SECTOR_BITS
, qiov
, flags
);
3347 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
3348 int nb_sectors
, QEMUIOVector
*qiov
)
3350 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
3352 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
3355 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
3356 int64_t sector_num
, int nb_sectors
,
3357 BdrvRequestFlags flags
)
3359 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
, flags
);
3361 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
3362 flags
&= ~BDRV_REQ_MAY_UNMAP
;
3365 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
3366 BDRV_REQ_ZERO_WRITE
| flags
);
3370 * Truncate file to 'offset' bytes (needed only for file protocols)
3372 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
3374 BlockDriver
*drv
= bs
->drv
;
3378 if (!drv
->bdrv_truncate
)
3382 if (bdrv_in_use(bs
))
3384 ret
= drv
->bdrv_truncate(bs
, offset
);
3386 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
3387 bdrv_dev_resize_cb(bs
);
3393 * Length of a allocated file in bytes. Sparse files are counted by actual
3394 * allocated space. Return < 0 if error or unknown.
3396 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
3398 BlockDriver
*drv
= bs
->drv
;
3402 if (drv
->bdrv_get_allocated_file_size
) {
3403 return drv
->bdrv_get_allocated_file_size(bs
);
3406 return bdrv_get_allocated_file_size(bs
->file
);
3412 * Length of a file in bytes. Return < 0 if error or unknown.
3414 int64_t bdrv_getlength(BlockDriverState
*bs
)
3416 BlockDriver
*drv
= bs
->drv
;
3420 if (drv
->has_variable_length
) {
3421 int ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
3426 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
3429 /* return 0 as number of sectors if no device present or error */
3430 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
3433 length
= bdrv_getlength(bs
);
3437 length
= length
>> BDRV_SECTOR_BITS
;
3438 *nb_sectors_ptr
= length
;
3441 void bdrv_set_on_error(BlockDriverState
*bs
, BlockdevOnError on_read_error
,
3442 BlockdevOnError on_write_error
)
3444 bs
->on_read_error
= on_read_error
;
3445 bs
->on_write_error
= on_write_error
;
3448 BlockdevOnError
bdrv_get_on_error(BlockDriverState
*bs
, bool is_read
)
3450 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
3453 BlockErrorAction
bdrv_get_error_action(BlockDriverState
*bs
, bool is_read
, int error
)
3455 BlockdevOnError on_err
= is_read
? bs
->on_read_error
: bs
->on_write_error
;
3458 case BLOCKDEV_ON_ERROR_ENOSPC
:
3459 return (error
== ENOSPC
) ? BDRV_ACTION_STOP
: BDRV_ACTION_REPORT
;
3460 case BLOCKDEV_ON_ERROR_STOP
:
3461 return BDRV_ACTION_STOP
;
3462 case BLOCKDEV_ON_ERROR_REPORT
:
3463 return BDRV_ACTION_REPORT
;
3464 case BLOCKDEV_ON_ERROR_IGNORE
:
3465 return BDRV_ACTION_IGNORE
;
3471 /* This is done by device models because, while the block layer knows
3472 * about the error, it does not know whether an operation comes from
3473 * the device or the block layer (from a job, for example).
3475 void bdrv_error_action(BlockDriverState
*bs
, BlockErrorAction action
,
3476 bool is_read
, int error
)
3479 bdrv_emit_qmp_error_event(bs
, QEVENT_BLOCK_IO_ERROR
, action
, is_read
);
3480 if (action
== BDRV_ACTION_STOP
) {
3481 vm_stop(RUN_STATE_IO_ERROR
);
3482 bdrv_iostatus_set_err(bs
, error
);
3486 int bdrv_is_read_only(BlockDriverState
*bs
)
3488 return bs
->read_only
;
3491 int bdrv_is_sg(BlockDriverState
*bs
)
3496 int bdrv_enable_write_cache(BlockDriverState
*bs
)
3498 return bs
->enable_write_cache
;
3501 void bdrv_set_enable_write_cache(BlockDriverState
*bs
, bool wce
)
3503 bs
->enable_write_cache
= wce
;
3505 /* so a reopen() will preserve wce */
3507 bs
->open_flags
|= BDRV_O_CACHE_WB
;
3509 bs
->open_flags
&= ~BDRV_O_CACHE_WB
;
3513 int bdrv_is_encrypted(BlockDriverState
*bs
)
3515 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3517 return bs
->encrypted
;
3520 int bdrv_key_required(BlockDriverState
*bs
)
3522 BlockDriverState
*backing_hd
= bs
->backing_hd
;
3524 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
3526 return (bs
->encrypted
&& !bs
->valid_key
);
3529 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
3532 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
3533 ret
= bdrv_set_key(bs
->backing_hd
, key
);
3539 if (!bs
->encrypted
) {
3541 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
3544 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
3547 } else if (!bs
->valid_key
) {
3549 /* call the change callback now, we skipped it on open */
3550 bdrv_dev_change_media_cb(bs
, true);
3555 const char *bdrv_get_format_name(BlockDriverState
*bs
)
3557 return bs
->drv
? bs
->drv
->format_name
: NULL
;
3560 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
3565 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
3566 it(opaque
, drv
->format_name
);
3570 /* This function is to find block backend bs */
3571 BlockDriverState
*bdrv_find(const char *name
)
3573 BlockDriverState
*bs
;
3575 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3576 if (!strcmp(name
, bs
->device_name
)) {
3583 /* This function is to find a node in the bs graph */
3584 BlockDriverState
*bdrv_find_node(const char *node_name
)
3586 BlockDriverState
*bs
;
3590 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3591 if (!strcmp(node_name
, bs
->node_name
)) {
3598 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3599 BlockDeviceInfoList
*bdrv_named_nodes_list(void)
3601 BlockDeviceInfoList
*list
, *entry
;
3602 BlockDriverState
*bs
;
3605 QTAILQ_FOREACH(bs
, &graph_bdrv_states
, node_list
) {
3606 entry
= g_malloc0(sizeof(*entry
));
3607 entry
->value
= bdrv_block_device_info(bs
);
3615 BlockDriverState
*bdrv_lookup_bs(const char *device
,
3616 const char *node_name
,
3619 BlockDriverState
*bs
= NULL
;
3622 bs
= bdrv_find(device
);
3630 bs
= bdrv_find_node(node_name
);
3637 error_setg(errp
, "Cannot find device=%s nor node_name=%s",
3638 device
? device
: "",
3639 node_name
? node_name
: "");
3643 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
3646 return QTAILQ_FIRST(&bdrv_states
);
3648 return QTAILQ_NEXT(bs
, device_list
);
3651 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
3653 BlockDriverState
*bs
;
3655 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3660 const char *bdrv_get_device_name(BlockDriverState
*bs
)
3662 return bs
->device_name
;
3665 int bdrv_get_flags(BlockDriverState
*bs
)
3667 return bs
->open_flags
;
3670 int bdrv_flush_all(void)
3672 BlockDriverState
*bs
;
3675 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
3676 int ret
= bdrv_flush(bs
);
3677 if (ret
< 0 && !result
) {
3685 int bdrv_has_zero_init_1(BlockDriverState
*bs
)
3690 int bdrv_has_zero_init(BlockDriverState
*bs
)
3694 /* If BS is a copy on write image, it is initialized to
3695 the contents of the base image, which may not be zeroes. */
3696 if (bs
->backing_hd
) {
3699 if (bs
->drv
->bdrv_has_zero_init
) {
3700 return bs
->drv
->bdrv_has_zero_init(bs
);
3707 bool bdrv_unallocated_blocks_are_zero(BlockDriverState
*bs
)
3709 BlockDriverInfo bdi
;
3711 if (bs
->backing_hd
) {
3715 if (bdrv_get_info(bs
, &bdi
) == 0) {
3716 return bdi
.unallocated_blocks_are_zero
;
3722 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState
*bs
)
3724 BlockDriverInfo bdi
;
3726 if (bs
->backing_hd
|| !(bs
->open_flags
& BDRV_O_UNMAP
)) {
3730 if (bdrv_get_info(bs
, &bdi
) == 0) {
3731 return bdi
.can_write_zeroes_with_unmap
;
3737 typedef struct BdrvCoGetBlockStatusData
{
3738 BlockDriverState
*bs
;
3739 BlockDriverState
*base
;
3745 } BdrvCoGetBlockStatusData
;
3748 * Returns true iff the specified sector is present in the disk image. Drivers
3749 * not implementing the functionality are assumed to not support backing files,
3750 * hence all their sectors are reported as allocated.
3752 * If 'sector_num' is beyond the end of the disk image the return value is 0
3753 * and 'pnum' is set to 0.
3755 * 'pnum' is set to the number of sectors (including and immediately following
3756 * the specified sector) that are known to be in the same
3757 * allocated/unallocated state.
3759 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3760 * beyond the end of the disk image it will be clamped.
3762 static int64_t coroutine_fn
bdrv_co_get_block_status(BlockDriverState
*bs
,
3764 int nb_sectors
, int *pnum
)
3770 length
= bdrv_getlength(bs
);
3775 if (sector_num
>= (length
>> BDRV_SECTOR_BITS
)) {
3780 n
= bs
->total_sectors
- sector_num
;
3781 if (n
< nb_sectors
) {
3785 if (!bs
->drv
->bdrv_co_get_block_status
) {
3787 ret
= BDRV_BLOCK_DATA
;
3788 if (bs
->drv
->protocol_name
) {
3789 ret
|= BDRV_BLOCK_OFFSET_VALID
| (sector_num
* BDRV_SECTOR_SIZE
);
3794 ret
= bs
->drv
->bdrv_co_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3800 if (ret
& BDRV_BLOCK_RAW
) {
3801 assert(ret
& BDRV_BLOCK_OFFSET_VALID
);
3802 return bdrv_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3806 if (!(ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
)) {
3807 if (bdrv_unallocated_blocks_are_zero(bs
)) {
3808 ret
|= BDRV_BLOCK_ZERO
;
3809 } else if (bs
->backing_hd
) {
3810 BlockDriverState
*bs2
= bs
->backing_hd
;
3811 int64_t length2
= bdrv_getlength(bs2
);
3812 if (length2
>= 0 && sector_num
>= (length2
>> BDRV_SECTOR_BITS
)) {
3813 ret
|= BDRV_BLOCK_ZERO
;
3819 (ret
& BDRV_BLOCK_DATA
) && !(ret
& BDRV_BLOCK_ZERO
) &&
3820 (ret
& BDRV_BLOCK_OFFSET_VALID
)) {
3821 ret2
= bdrv_co_get_block_status(bs
->file
, ret
>> BDRV_SECTOR_BITS
,
3824 /* Ignore errors. This is just providing extra information, it
3825 * is useful but not necessary.
3827 ret
|= (ret2
& BDRV_BLOCK_ZERO
);
3834 /* Coroutine wrapper for bdrv_get_block_status() */
3835 static void coroutine_fn
bdrv_get_block_status_co_entry(void *opaque
)
3837 BdrvCoGetBlockStatusData
*data
= opaque
;
3838 BlockDriverState
*bs
= data
->bs
;
3840 data
->ret
= bdrv_co_get_block_status(bs
, data
->sector_num
, data
->nb_sectors
,
3846 * Synchronous wrapper around bdrv_co_get_block_status().
3848 * See bdrv_co_get_block_status() for details.
3850 int64_t bdrv_get_block_status(BlockDriverState
*bs
, int64_t sector_num
,
3851 int nb_sectors
, int *pnum
)
3854 BdrvCoGetBlockStatusData data
= {
3856 .sector_num
= sector_num
,
3857 .nb_sectors
= nb_sectors
,
3862 if (qemu_in_coroutine()) {
3863 /* Fast-path if already in coroutine context */
3864 bdrv_get_block_status_co_entry(&data
);
3866 co
= qemu_coroutine_create(bdrv_get_block_status_co_entry
);
3867 qemu_coroutine_enter(co
, &data
);
3868 while (!data
.done
) {
3875 int coroutine_fn
bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
3876 int nb_sectors
, int *pnum
)
3878 int64_t ret
= bdrv_get_block_status(bs
, sector_num
, nb_sectors
, pnum
);
3883 (ret
& BDRV_BLOCK_DATA
) ||
3884 ((ret
& BDRV_BLOCK_ZERO
) && !bdrv_has_zero_init(bs
));
3888 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3890 * Return true if the given sector is allocated in any image between
3891 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3892 * sector is allocated in any image of the chain. Return false otherwise.
3894 * 'pnum' is set to the number of sectors (including and immediately following
3895 * the specified sector) that are known to be in the same
3896 * allocated/unallocated state.
3899 int bdrv_is_allocated_above(BlockDriverState
*top
,
3900 BlockDriverState
*base
,
3902 int nb_sectors
, int *pnum
)
3904 BlockDriverState
*intermediate
;
3905 int ret
, n
= nb_sectors
;
3908 while (intermediate
&& intermediate
!= base
) {
3910 ret
= bdrv_is_allocated(intermediate
, sector_num
, nb_sectors
,
3920 * [sector_num, nb_sectors] is unallocated on top but intermediate
3923 * [sector_num+x, nr_sectors] allocated.
3925 if (n
> pnum_inter
&&
3926 (intermediate
== top
||
3927 sector_num
+ pnum_inter
< intermediate
->total_sectors
)) {
3931 intermediate
= intermediate
->backing_hd
;
3938 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
3940 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
3941 return bs
->backing_file
;
3942 else if (bs
->encrypted
)
3943 return bs
->filename
;
3948 void bdrv_get_backing_filename(BlockDriverState
*bs
,
3949 char *filename
, int filename_size
)
3951 pstrcpy(filename
, filename_size
, bs
->backing_file
);
3954 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
3955 const uint8_t *buf
, int nb_sectors
)
3957 BlockDriver
*drv
= bs
->drv
;
3960 if (!drv
->bdrv_write_compressed
)
3962 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
3965 assert(QLIST_EMPTY(&bs
->dirty_bitmaps
));
3967 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
3970 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
3972 BlockDriver
*drv
= bs
->drv
;
3975 if (!drv
->bdrv_get_info
)
3977 memset(bdi
, 0, sizeof(*bdi
));
3978 return drv
->bdrv_get_info(bs
, bdi
);
3981 ImageInfoSpecific
*bdrv_get_specific_info(BlockDriverState
*bs
)
3983 BlockDriver
*drv
= bs
->drv
;
3984 if (drv
&& drv
->bdrv_get_specific_info
) {
3985 return drv
->bdrv_get_specific_info(bs
);
3990 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
3991 int64_t pos
, int size
)
3994 struct iovec iov
= {
3995 .iov_base
= (void *) buf
,
3999 qemu_iovec_init_external(&qiov
, &iov
, 1);
4000 return bdrv_writev_vmstate(bs
, &qiov
, pos
);
4003 int bdrv_writev_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
, int64_t pos
)
4005 BlockDriver
*drv
= bs
->drv
;
4009 } else if (drv
->bdrv_save_vmstate
) {
4010 return drv
->bdrv_save_vmstate(bs
, qiov
, pos
);
4011 } else if (bs
->file
) {
4012 return bdrv_writev_vmstate(bs
->file
, qiov
, pos
);
4018 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
4019 int64_t pos
, int size
)
4021 BlockDriver
*drv
= bs
->drv
;
4024 if (drv
->bdrv_load_vmstate
)
4025 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
4027 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
4031 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
4033 if (!bs
|| !bs
->drv
|| !bs
->drv
->bdrv_debug_event
) {
4037 bs
->drv
->bdrv_debug_event(bs
, event
);
4040 int bdrv_debug_breakpoint(BlockDriverState
*bs
, const char *event
,
4043 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_breakpoint
) {
4047 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_breakpoint
) {
4048 return bs
->drv
->bdrv_debug_breakpoint(bs
, event
, tag
);
4054 int bdrv_debug_remove_breakpoint(BlockDriverState
*bs
, const char *tag
)
4056 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_remove_breakpoint
) {
4060 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_remove_breakpoint
) {
4061 return bs
->drv
->bdrv_debug_remove_breakpoint(bs
, tag
);
4067 int bdrv_debug_resume(BlockDriverState
*bs
, const char *tag
)
4069 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_resume
) {
4073 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_resume
) {
4074 return bs
->drv
->bdrv_debug_resume(bs
, tag
);
4080 bool bdrv_debug_is_suspended(BlockDriverState
*bs
, const char *tag
)
4082 while (bs
&& bs
->drv
&& !bs
->drv
->bdrv_debug_is_suspended
) {
4086 if (bs
&& bs
->drv
&& bs
->drv
->bdrv_debug_is_suspended
) {
4087 return bs
->drv
->bdrv_debug_is_suspended(bs
, tag
);
4093 int bdrv_is_snapshot(BlockDriverState
*bs
)
4095 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
4098 /* backing_file can either be relative, or absolute, or a protocol. If it is
4099 * relative, it must be relative to the chain. So, passing in bs->filename
4100 * from a BDS as backing_file should not be done, as that may be relative to
4101 * the CWD rather than the chain. */
4102 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
4103 const char *backing_file
)
4105 char *filename_full
= NULL
;
4106 char *backing_file_full
= NULL
;
4107 char *filename_tmp
= NULL
;
4108 int is_protocol
= 0;
4109 BlockDriverState
*curr_bs
= NULL
;
4110 BlockDriverState
*retval
= NULL
;
4112 if (!bs
|| !bs
->drv
|| !backing_file
) {
4116 filename_full
= g_malloc(PATH_MAX
);
4117 backing_file_full
= g_malloc(PATH_MAX
);
4118 filename_tmp
= g_malloc(PATH_MAX
);
4120 is_protocol
= path_has_protocol(backing_file
);
4122 for (curr_bs
= bs
; curr_bs
->backing_hd
; curr_bs
= curr_bs
->backing_hd
) {
4124 /* If either of the filename paths is actually a protocol, then
4125 * compare unmodified paths; otherwise make paths relative */
4126 if (is_protocol
|| path_has_protocol(curr_bs
->backing_file
)) {
4127 if (strcmp(backing_file
, curr_bs
->backing_file
) == 0) {
4128 retval
= curr_bs
->backing_hd
;
4132 /* If not an absolute filename path, make it relative to the current
4133 * image's filename path */
4134 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4137 /* We are going to compare absolute pathnames */
4138 if (!realpath(filename_tmp
, filename_full
)) {
4142 /* We need to make sure the backing filename we are comparing against
4143 * is relative to the current image filename (or absolute) */
4144 path_combine(filename_tmp
, PATH_MAX
, curr_bs
->filename
,
4145 curr_bs
->backing_file
);
4147 if (!realpath(filename_tmp
, backing_file_full
)) {
4151 if (strcmp(backing_file_full
, filename_full
) == 0) {
4152 retval
= curr_bs
->backing_hd
;
4158 g_free(filename_full
);
4159 g_free(backing_file_full
);
4160 g_free(filename_tmp
);
4164 int bdrv_get_backing_file_depth(BlockDriverState
*bs
)
4170 if (!bs
->backing_hd
) {
4174 return 1 + bdrv_get_backing_file_depth(bs
->backing_hd
);
4177 BlockDriverState
*bdrv_find_base(BlockDriverState
*bs
)
4179 BlockDriverState
*curr_bs
= NULL
;
4187 while (curr_bs
->backing_hd
) {
4188 curr_bs
= curr_bs
->backing_hd
;
4193 /**************************************************************/
4196 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
4197 QEMUIOVector
*qiov
, int nb_sectors
,
4198 BlockDriverCompletionFunc
*cb
, void *opaque
)
4200 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
4202 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4206 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
4207 QEMUIOVector
*qiov
, int nb_sectors
,
4208 BlockDriverCompletionFunc
*cb
, void *opaque
)
4210 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
4212 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, 0,
4216 BlockDriverAIOCB
*bdrv_aio_write_zeroes(BlockDriverState
*bs
,
4217 int64_t sector_num
, int nb_sectors
, BdrvRequestFlags flags
,
4218 BlockDriverCompletionFunc
*cb
, void *opaque
)
4220 trace_bdrv_aio_write_zeroes(bs
, sector_num
, nb_sectors
, flags
, opaque
);
4222 return bdrv_co_aio_rw_vector(bs
, sector_num
, NULL
, nb_sectors
,
4223 BDRV_REQ_ZERO_WRITE
| flags
,
4228 typedef struct MultiwriteCB
{
4233 BlockDriverCompletionFunc
*cb
;
4235 QEMUIOVector
*free_qiov
;
4239 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
4243 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
4244 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
4245 if (mcb
->callbacks
[i
].free_qiov
) {
4246 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
4248 g_free(mcb
->callbacks
[i
].free_qiov
);
4252 static void multiwrite_cb(void *opaque
, int ret
)
4254 MultiwriteCB
*mcb
= opaque
;
4256 trace_multiwrite_cb(mcb
, ret
);
4258 if (ret
< 0 && !mcb
->error
) {
4262 mcb
->num_requests
--;
4263 if (mcb
->num_requests
== 0) {
4264 multiwrite_user_cb(mcb
);
4269 static int multiwrite_req_compare(const void *a
, const void *b
)
4271 const BlockRequest
*req1
= a
, *req2
= b
;
4274 * Note that we can't simply subtract req2->sector from req1->sector
4275 * here as that could overflow the return value.
4277 if (req1
->sector
> req2
->sector
) {
4279 } else if (req1
->sector
< req2
->sector
) {
4287 * Takes a bunch of requests and tries to merge them. Returns the number of
4288 * requests that remain after merging.
4290 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
4291 int num_reqs
, MultiwriteCB
*mcb
)
4295 // Sort requests by start sector
4296 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
4298 // Check if adjacent requests touch the same clusters. If so, combine them,
4299 // filling up gaps with zero sectors.
4301 for (i
= 1; i
< num_reqs
; i
++) {
4303 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
4305 // Handle exactly sequential writes and overlapping writes.
4306 if (reqs
[i
].sector
<= oldreq_last
) {
4310 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
4316 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
4317 qemu_iovec_init(qiov
,
4318 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
4320 // Add the first request to the merged one. If the requests are
4321 // overlapping, drop the last sectors of the first request.
4322 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
4323 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, 0, size
);
4325 // We should need to add any zeros between the two requests
4326 assert (reqs
[i
].sector
<= oldreq_last
);
4328 // Add the second request
4329 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, 0, reqs
[i
].qiov
->size
);
4331 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
4332 reqs
[outidx
].qiov
= qiov
;
4334 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
4337 reqs
[outidx
].sector
= reqs
[i
].sector
;
4338 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
4339 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
4347 * Submit multiple AIO write requests at once.
4349 * On success, the function returns 0 and all requests in the reqs array have
4350 * been submitted. In error case this function returns -1, and any of the
4351 * requests may or may not be submitted yet. In particular, this means that the
4352 * callback will be called for some of the requests, for others it won't. The
4353 * caller must check the error field of the BlockRequest to wait for the right
4354 * callbacks (if error != 0, no callback will be called).
4356 * The implementation may modify the contents of the reqs array, e.g. to merge
4357 * requests. However, the fields opaque and error are left unmodified as they
4358 * are used to signal failure for a single request to the caller.
4360 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
4365 /* don't submit writes if we don't have a medium */
4366 if (bs
->drv
== NULL
) {
4367 for (i
= 0; i
< num_reqs
; i
++) {
4368 reqs
[i
].error
= -ENOMEDIUM
;
4373 if (num_reqs
== 0) {
4377 // Create MultiwriteCB structure
4378 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
4379 mcb
->num_requests
= 0;
4380 mcb
->num_callbacks
= num_reqs
;
4382 for (i
= 0; i
< num_reqs
; i
++) {
4383 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
4384 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
4387 // Check for mergable requests
4388 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
4390 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
4392 /* Run the aio requests. */
4393 mcb
->num_requests
= num_reqs
;
4394 for (i
= 0; i
< num_reqs
; i
++) {
4395 bdrv_co_aio_rw_vector(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
4396 reqs
[i
].nb_sectors
, reqs
[i
].flags
,
4404 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
4406 acb
->aiocb_info
->cancel(acb
);
4409 /**************************************************************/
4410 /* async block device emulation */
4412 typedef struct BlockDriverAIOCBSync
{
4413 BlockDriverAIOCB common
;
4416 /* vector translation state */
4420 } BlockDriverAIOCBSync
;
4422 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
4424 BlockDriverAIOCBSync
*acb
=
4425 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
4426 qemu_bh_delete(acb
->bh
);
4428 qemu_aio_release(acb
);
4431 static const AIOCBInfo bdrv_em_aiocb_info
= {
4432 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
4433 .cancel
= bdrv_aio_cancel_em
,
4436 static void bdrv_aio_bh_cb(void *opaque
)
4438 BlockDriverAIOCBSync
*acb
= opaque
;
4441 qemu_iovec_from_buf(acb
->qiov
, 0, acb
->bounce
, acb
->qiov
->size
);
4442 qemu_vfree(acb
->bounce
);
4443 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
4444 qemu_bh_delete(acb
->bh
);
4446 qemu_aio_release(acb
);
4449 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
4453 BlockDriverCompletionFunc
*cb
,
4458 BlockDriverAIOCBSync
*acb
;
4460 acb
= qemu_aio_get(&bdrv_em_aiocb_info
, bs
, cb
, opaque
);
4461 acb
->is_write
= is_write
;
4463 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
4464 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
4467 qemu_iovec_to_buf(acb
->qiov
, 0, acb
->bounce
, qiov
->size
);
4468 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4470 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
4473 qemu_bh_schedule(acb
->bh
);
4475 return &acb
->common
;
4478 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
4479 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4480 BlockDriverCompletionFunc
*cb
, void *opaque
)
4482 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
4485 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
4486 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
4487 BlockDriverCompletionFunc
*cb
, void *opaque
)
4489 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
4493 typedef struct BlockDriverAIOCBCoroutine
{
4494 BlockDriverAIOCB common
;
4499 } BlockDriverAIOCBCoroutine
;
4501 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
4503 BlockDriverAIOCBCoroutine
*acb
=
4504 container_of(blockacb
, BlockDriverAIOCBCoroutine
, common
);
4513 static const AIOCBInfo bdrv_em_co_aiocb_info
= {
4514 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
4515 .cancel
= bdrv_aio_co_cancel_em
,
4518 static void bdrv_co_em_bh(void *opaque
)
4520 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4522 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
4528 qemu_bh_delete(acb
->bh
);
4529 qemu_aio_release(acb
);
4532 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4533 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
4535 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4536 BlockDriverState
*bs
= acb
->common
.bs
;
4538 if (!acb
->is_write
) {
4539 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
4540 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4542 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
4543 acb
->req
.nb_sectors
, acb
->req
.qiov
, acb
->req
.flags
);
4546 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4547 qemu_bh_schedule(acb
->bh
);
4550 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
4554 BdrvRequestFlags flags
,
4555 BlockDriverCompletionFunc
*cb
,
4560 BlockDriverAIOCBCoroutine
*acb
;
4562 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4563 acb
->req
.sector
= sector_num
;
4564 acb
->req
.nb_sectors
= nb_sectors
;
4565 acb
->req
.qiov
= qiov
;
4566 acb
->req
.flags
= flags
;
4567 acb
->is_write
= is_write
;
4570 co
= qemu_coroutine_create(bdrv_co_do_rw
);
4571 qemu_coroutine_enter(co
, acb
);
4573 return &acb
->common
;
4576 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
4578 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4579 BlockDriverState
*bs
= acb
->common
.bs
;
4581 acb
->req
.error
= bdrv_co_flush(bs
);
4582 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4583 qemu_bh_schedule(acb
->bh
);
4586 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
4587 BlockDriverCompletionFunc
*cb
, void *opaque
)
4589 trace_bdrv_aio_flush(bs
, opaque
);
4592 BlockDriverAIOCBCoroutine
*acb
;
4594 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4597 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
4598 qemu_coroutine_enter(co
, acb
);
4600 return &acb
->common
;
4603 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
4605 BlockDriverAIOCBCoroutine
*acb
= opaque
;
4606 BlockDriverState
*bs
= acb
->common
.bs
;
4608 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
4609 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
4610 qemu_bh_schedule(acb
->bh
);
4613 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
4614 int64_t sector_num
, int nb_sectors
,
4615 BlockDriverCompletionFunc
*cb
, void *opaque
)
4618 BlockDriverAIOCBCoroutine
*acb
;
4620 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
4622 acb
= qemu_aio_get(&bdrv_em_co_aiocb_info
, bs
, cb
, opaque
);
4623 acb
->req
.sector
= sector_num
;
4624 acb
->req
.nb_sectors
= nb_sectors
;
4626 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
4627 qemu_coroutine_enter(co
, acb
);
4629 return &acb
->common
;
4632 void bdrv_init(void)
4634 module_call_init(MODULE_INIT_BLOCK
);
4637 void bdrv_init_with_whitelist(void)
4639 use_bdrv_whitelist
= 1;
4643 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
4644 BlockDriverCompletionFunc
*cb
, void *opaque
)
4646 BlockDriverAIOCB
*acb
;
4648 acb
= g_slice_alloc(aiocb_info
->aiocb_size
);
4649 acb
->aiocb_info
= aiocb_info
;
4652 acb
->opaque
= opaque
;
4656 void qemu_aio_release(void *p
)
4658 BlockDriverAIOCB
*acb
= p
;
4659 g_slice_free1(acb
->aiocb_info
->aiocb_size
, acb
);
4662 /**************************************************************/
4663 /* Coroutine block device emulation */
4665 typedef struct CoroutineIOCompletion
{
4666 Coroutine
*coroutine
;
4668 } CoroutineIOCompletion
;
4670 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
4672 CoroutineIOCompletion
*co
= opaque
;
4675 qemu_coroutine_enter(co
->coroutine
, NULL
);
4678 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
4679 int nb_sectors
, QEMUIOVector
*iov
,
4682 CoroutineIOCompletion co
= {
4683 .coroutine
= qemu_coroutine_self(),
4685 BlockDriverAIOCB
*acb
;
4688 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
4689 bdrv_co_io_em_complete
, &co
);
4691 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
4692 bdrv_co_io_em_complete
, &co
);
4695 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
4699 qemu_coroutine_yield();
4704 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
4705 int64_t sector_num
, int nb_sectors
,
4708 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
4711 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
4712 int64_t sector_num
, int nb_sectors
,
4715 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
4718 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
4720 RwCo
*rwco
= opaque
;
4722 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
4725 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
4729 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
4733 /* Write back cached data to the OS even with cache=unsafe */
4734 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_OS
);
4735 if (bs
->drv
->bdrv_co_flush_to_os
) {
4736 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
4742 /* But don't actually force it to the disk with cache=unsafe */
4743 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
4747 BLKDBG_EVENT(bs
->file
, BLKDBG_FLUSH_TO_DISK
);
4748 if (bs
->drv
->bdrv_co_flush_to_disk
) {
4749 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
4750 } else if (bs
->drv
->bdrv_aio_flush
) {
4751 BlockDriverAIOCB
*acb
;
4752 CoroutineIOCompletion co
= {
4753 .coroutine
= qemu_coroutine_self(),
4756 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
4760 qemu_coroutine_yield();
4765 * Some block drivers always operate in either writethrough or unsafe
4766 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4767 * know how the server works (because the behaviour is hardcoded or
4768 * depends on server-side configuration), so we can't ensure that
4769 * everything is safe on disk. Returning an error doesn't work because
4770 * that would break guests even if the server operates in writethrough
4773 * Let's hope the user knows what he's doing.
4781 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4782 * in the case of cache=unsafe, so there are no useless flushes.
4785 return bdrv_co_flush(bs
->file
);
4788 void bdrv_invalidate_cache(BlockDriverState
*bs
)
4790 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
4791 bs
->drv
->bdrv_invalidate_cache(bs
);
4795 void bdrv_invalidate_cache_all(void)
4797 BlockDriverState
*bs
;
4799 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4800 bdrv_invalidate_cache(bs
);
4804 void bdrv_clear_incoming_migration_all(void)
4806 BlockDriverState
*bs
;
4808 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
4809 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
4813 int bdrv_flush(BlockDriverState
*bs
)
4821 if (qemu_in_coroutine()) {
4822 /* Fast-path if already in coroutine context */
4823 bdrv_flush_co_entry(&rwco
);
4825 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
4826 qemu_coroutine_enter(co
, &rwco
);
4827 while (rwco
.ret
== NOT_DONE
) {
4835 typedef struct DiscardCo
{
4836 BlockDriverState
*bs
;
4841 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
4843 DiscardCo
*rwco
= opaque
;
4845 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
4848 /* if no limit is specified in the BlockLimits use a default
4849 * of 32768 512-byte sectors (16 MiB) per request.
4851 #define MAX_DISCARD_DEFAULT 32768
4853 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
4860 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
4862 } else if (bs
->read_only
) {
4866 bdrv_reset_dirty(bs
, sector_num
, nb_sectors
);
4868 /* Do nothing if disabled. */
4869 if (!(bs
->open_flags
& BDRV_O_UNMAP
)) {
4873 if (!bs
->drv
->bdrv_co_discard
&& !bs
->drv
->bdrv_aio_discard
) {
4877 max_discard
= bs
->bl
.max_discard
? bs
->bl
.max_discard
: MAX_DISCARD_DEFAULT
;
4878 while (nb_sectors
> 0) {
4880 int num
= nb_sectors
;
4883 if (bs
->bl
.discard_alignment
&&
4884 num
>= bs
->bl
.discard_alignment
&&
4885 sector_num
% bs
->bl
.discard_alignment
) {
4886 if (num
> bs
->bl
.discard_alignment
) {
4887 num
= bs
->bl
.discard_alignment
;
4889 num
-= sector_num
% bs
->bl
.discard_alignment
;
4892 /* limit request size */
4893 if (num
> max_discard
) {
4897 if (bs
->drv
->bdrv_co_discard
) {
4898 ret
= bs
->drv
->bdrv_co_discard(bs
, sector_num
, num
);
4900 BlockDriverAIOCB
*acb
;
4901 CoroutineIOCompletion co
= {
4902 .coroutine
= qemu_coroutine_self(),
4905 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
4906 bdrv_co_io_em_complete
, &co
);
4910 qemu_coroutine_yield();
4914 if (ret
&& ret
!= -ENOTSUP
) {
4924 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
4929 .sector_num
= sector_num
,
4930 .nb_sectors
= nb_sectors
,
4934 if (qemu_in_coroutine()) {
4935 /* Fast-path if already in coroutine context */
4936 bdrv_discard_co_entry(&rwco
);
4938 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
4939 qemu_coroutine_enter(co
, &rwco
);
4940 while (rwco
.ret
== NOT_DONE
) {
4948 /**************************************************************/
4949 /* removable device support */
4952 * Return TRUE if the media is present
4954 int bdrv_is_inserted(BlockDriverState
*bs
)
4956 BlockDriver
*drv
= bs
->drv
;
4960 if (!drv
->bdrv_is_inserted
)
4962 return drv
->bdrv_is_inserted(bs
);
4966 * Return whether the media changed since the last call to this
4967 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4969 int bdrv_media_changed(BlockDriverState
*bs
)
4971 BlockDriver
*drv
= bs
->drv
;
4973 if (drv
&& drv
->bdrv_media_changed
) {
4974 return drv
->bdrv_media_changed(bs
);
4980 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4982 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
4984 BlockDriver
*drv
= bs
->drv
;
4986 if (drv
&& drv
->bdrv_eject
) {
4987 drv
->bdrv_eject(bs
, eject_flag
);
4990 if (bs
->device_name
[0] != '\0') {
4991 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
4996 * Lock or unlock the media (if it is locked, the user won't be able
4997 * to eject it manually).
4999 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
5001 BlockDriver
*drv
= bs
->drv
;
5003 trace_bdrv_lock_medium(bs
, locked
);
5005 if (drv
&& drv
->bdrv_lock_medium
) {
5006 drv
->bdrv_lock_medium(bs
, locked
);
5010 /* needed for generic scsi interface */
5012 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
5014 BlockDriver
*drv
= bs
->drv
;
5016 if (drv
&& drv
->bdrv_ioctl
)
5017 return drv
->bdrv_ioctl(bs
, req
, buf
);
5021 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
5022 unsigned long int req
, void *buf
,
5023 BlockDriverCompletionFunc
*cb
, void *opaque
)
5025 BlockDriver
*drv
= bs
->drv
;
5027 if (drv
&& drv
->bdrv_aio_ioctl
)
5028 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
5032 void bdrv_set_guest_block_size(BlockDriverState
*bs
, int align
)
5034 bs
->guest_block_size
= align
;
5037 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
5039 return qemu_memalign(bdrv_opt_mem_align(bs
), size
);
5043 * Check if all memory in this vector is sector aligned.
5045 bool bdrv_qiov_is_aligned(BlockDriverState
*bs
, QEMUIOVector
*qiov
)
5048 size_t alignment
= bdrv_opt_mem_align(bs
);
5050 for (i
= 0; i
< qiov
->niov
; i
++) {
5051 if ((uintptr_t) qiov
->iov
[i
].iov_base
% alignment
) {
5054 if (qiov
->iov
[i
].iov_len
% alignment
) {
5062 BdrvDirtyBitmap
*bdrv_create_dirty_bitmap(BlockDriverState
*bs
, int granularity
)
5064 int64_t bitmap_size
;
5065 BdrvDirtyBitmap
*bitmap
;
5067 assert((granularity
& (granularity
- 1)) == 0);
5069 granularity
>>= BDRV_SECTOR_BITS
;
5070 assert(granularity
);
5071 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
);
5072 bitmap
= g_malloc0(sizeof(BdrvDirtyBitmap
));
5073 bitmap
->bitmap
= hbitmap_alloc(bitmap_size
, ffs(granularity
) - 1);
5074 QLIST_INSERT_HEAD(&bs
->dirty_bitmaps
, bitmap
, list
);
5078 void bdrv_release_dirty_bitmap(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5080 BdrvDirtyBitmap
*bm
, *next
;
5081 QLIST_FOREACH_SAFE(bm
, &bs
->dirty_bitmaps
, list
, next
) {
5083 QLIST_REMOVE(bitmap
, list
);
5084 hbitmap_free(bitmap
->bitmap
);
5091 BlockDirtyInfoList
*bdrv_query_dirty_bitmaps(BlockDriverState
*bs
)
5093 BdrvDirtyBitmap
*bm
;
5094 BlockDirtyInfoList
*list
= NULL
;
5095 BlockDirtyInfoList
**plist
= &list
;
5097 QLIST_FOREACH(bm
, &bs
->dirty_bitmaps
, list
) {
5098 BlockDirtyInfo
*info
= g_malloc0(sizeof(BlockDirtyInfo
));
5099 BlockDirtyInfoList
*entry
= g_malloc0(sizeof(BlockDirtyInfoList
));
5100 info
->count
= bdrv_get_dirty_count(bs
, bm
);
5102 ((int64_t) BDRV_SECTOR_SIZE
<< hbitmap_granularity(bm
->bitmap
));
5103 entry
->value
= info
;
5105 plist
= &entry
->next
;
5111 int bdrv_get_dirty(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
, int64_t sector
)
5114 return hbitmap_get(bitmap
->bitmap
, sector
);
5120 void bdrv_dirty_iter_init(BlockDriverState
*bs
,
5121 BdrvDirtyBitmap
*bitmap
, HBitmapIter
*hbi
)
5123 hbitmap_iter_init(hbi
, bitmap
->bitmap
, 0);
5126 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
5129 BdrvDirtyBitmap
*bitmap
;
5130 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5131 hbitmap_set(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5135 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
, int nr_sectors
)
5137 BdrvDirtyBitmap
*bitmap
;
5138 QLIST_FOREACH(bitmap
, &bs
->dirty_bitmaps
, list
) {
5139 hbitmap_reset(bitmap
->bitmap
, cur_sector
, nr_sectors
);
5143 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
, BdrvDirtyBitmap
*bitmap
)
5145 return hbitmap_count(bitmap
->bitmap
);
5148 /* Get a reference to bs */
5149 void bdrv_ref(BlockDriverState
*bs
)
5154 /* Release a previously grabbed reference to bs.
5155 * If after releasing, reference count is zero, the BlockDriverState is
5157 void bdrv_unref(BlockDriverState
*bs
)
5159 assert(bs
->refcnt
> 0);
5160 if (--bs
->refcnt
== 0) {
5165 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
5167 assert(bs
->in_use
!= in_use
);
5168 bs
->in_use
= in_use
;
5171 int bdrv_in_use(BlockDriverState
*bs
)
5176 void bdrv_iostatus_enable(BlockDriverState
*bs
)
5178 bs
->iostatus_enabled
= true;
5179 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5182 /* The I/O status is only enabled if the drive explicitly
5183 * enables it _and_ the VM is configured to stop on errors */
5184 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
5186 return (bs
->iostatus_enabled
&&
5187 (bs
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
5188 bs
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
5189 bs
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
5192 void bdrv_iostatus_disable(BlockDriverState
*bs
)
5194 bs
->iostatus_enabled
= false;
5197 void bdrv_iostatus_reset(BlockDriverState
*bs
)
5199 if (bdrv_iostatus_is_enabled(bs
)) {
5200 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
5202 block_job_iostatus_reset(bs
->job
);
5207 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
5209 assert(bdrv_iostatus_is_enabled(bs
));
5210 if (bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
5211 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
5212 BLOCK_DEVICE_IO_STATUS_FAILED
;
5217 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
5218 enum BlockAcctType type
)
5220 assert(type
< BDRV_MAX_IOTYPE
);
5222 cookie
->bytes
= bytes
;
5223 cookie
->start_time_ns
= get_clock();
5224 cookie
->type
= type
;
5228 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
5230 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
5232 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
5233 bs
->nr_ops
[cookie
->type
]++;
5234 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
5237 void bdrv_img_create(const char *filename
, const char *fmt
,
5238 const char *base_filename
, const char *base_fmt
,
5239 char *options
, uint64_t img_size
, int flags
,
5240 Error
**errp
, bool quiet
)
5242 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
5243 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
5244 BlockDriver
*drv
, *proto_drv
;
5245 BlockDriver
*backing_drv
= NULL
;
5246 Error
*local_err
= NULL
;
5249 /* Find driver and parse its options */
5250 drv
= bdrv_find_format(fmt
);
5252 error_setg(errp
, "Unknown file format '%s'", fmt
);
5256 proto_drv
= bdrv_find_protocol(filename
, true);
5258 error_setg(errp
, "Unknown protocol '%s'", filename
);
5262 create_options
= append_option_parameters(create_options
,
5263 drv
->create_options
);
5264 create_options
= append_option_parameters(create_options
,
5265 proto_drv
->create_options
);
5267 /* Create parameter list with default values */
5268 param
= parse_option_parameters("", create_options
, param
);
5270 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
5272 /* Parse -o options */
5274 param
= parse_option_parameters(options
, create_options
, param
);
5275 if (param
== NULL
) {
5276 error_setg(errp
, "Invalid options for file format '%s'.", fmt
);
5281 if (base_filename
) {
5282 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
5284 error_setg(errp
, "Backing file not supported for file format '%s'",
5291 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
5292 error_setg(errp
, "Backing file format not supported for file "
5293 "format '%s'", fmt
);
5298 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
5299 if (backing_file
&& backing_file
->value
.s
) {
5300 if (!strcmp(filename
, backing_file
->value
.s
)) {
5301 error_setg(errp
, "Error: Trying to create an image with the "
5302 "same filename as the backing file");
5307 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
5308 if (backing_fmt
&& backing_fmt
->value
.s
) {
5309 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
5311 error_setg(errp
, "Unknown backing file format '%s'",
5312 backing_fmt
->value
.s
);
5317 // The size for the image must always be specified, with one exception:
5318 // If we are using a backing file, we can obtain the size from there
5319 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
5320 if (size
&& size
->value
.n
== -1) {
5321 if (backing_file
&& backing_file
->value
.s
) {
5322 BlockDriverState
*bs
;
5327 /* backing files always opened read-only */
5329 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
5332 ret
= bdrv_open(&bs
, backing_file
->value
.s
, NULL
, NULL
, back_flags
,
5333 backing_drv
, &local_err
);
5335 error_setg_errno(errp
, -ret
, "Could not open '%s': %s",
5336 backing_file
->value
.s
,
5337 error_get_pretty(local_err
));
5338 error_free(local_err
);
5342 bdrv_get_geometry(bs
, &size
);
5345 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
5346 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
5350 error_setg(errp
, "Image creation needs a size parameter");
5356 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
5357 print_option_parameters(param
);
5360 ret
= bdrv_create(drv
, filename
, param
, &local_err
);
5361 if (ret
== -EFBIG
) {
5362 /* This is generally a better message than whatever the driver would
5363 * deliver (especially because of the cluster_size_hint), since that
5364 * is most probably not much different from "image too large". */
5365 const char *cluster_size_hint
= "";
5366 if (get_option_parameter(create_options
, BLOCK_OPT_CLUSTER_SIZE
)) {
5367 cluster_size_hint
= " (try using a larger cluster size)";
5369 error_setg(errp
, "The image size is too large for file format '%s'"
5370 "%s", fmt
, cluster_size_hint
);
5371 error_free(local_err
);
5376 free_option_parameters(create_options
);
5377 free_option_parameters(param
);
5380 error_propagate(errp
, local_err
);
5384 AioContext
*bdrv_get_aio_context(BlockDriverState
*bs
)
5386 /* Currently BlockDriverState always uses the main loop AioContext */
5387 return qemu_get_aio_context();
5390 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
5391 NotifierWithReturn
*notifier
)
5393 notifier_with_return_list_add(&bs
->before_write_notifiers
, notifier
);
5396 int bdrv_amend_options(BlockDriverState
*bs
, QEMUOptionParameter
*options
)
5398 if (bs
->drv
->bdrv_amend_options
== NULL
) {
5401 return bs
->drv
->bdrv_amend_options(bs
, options
);
5404 /* Used to recurse on single child block filters.
5405 * Single child block filter will store their child in bs->file.
5407 bool bdrv_generic_is_first_non_filter(BlockDriverState
*bs
,
5408 BlockDriverState
*candidate
)
5414 if (!bs
->drv
->authorizations
[BS_IS_A_FILTER
]) {
5415 if (bs
== candidate
) {
5422 if (!bs
->drv
->authorizations
[BS_FILTER_PASS_DOWN
]) {
5430 return bdrv_recurse_is_first_non_filter(bs
->file
, candidate
);
5433 bool bdrv_recurse_is_first_non_filter(BlockDriverState
*bs
,
5434 BlockDriverState
*candidate
)
5436 if (bs
->drv
&& bs
->drv
->bdrv_recurse_is_first_non_filter
) {
5437 return bs
->drv
->bdrv_recurse_is_first_non_filter(bs
, candidate
);
5440 return bdrv_generic_is_first_non_filter(bs
, candidate
);
5443 /* This function checks if the candidate is the first non filter bs down it's
5444 * bs chain. Since we don't have pointers to parents it explore all bs chains
5445 * from the top. Some filters can choose not to pass down the recursion.
5447 bool bdrv_is_first_non_filter(BlockDriverState
*candidate
)
5449 BlockDriverState
*bs
;
5451 /* walk down the bs forest recursively */
5452 QTAILQ_FOREACH(bs
, &bdrv_states
, device_list
) {
5455 perm
= bdrv_recurse_is_first_non_filter(bs
, candidate
);
5457 /* candidate is the first non filter */