2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
28 #include "block_int.h"
31 #include "qemu-coroutine.h"
32 #include "qmp-commands.h"
33 #include "qemu-timer.h"
36 #include <sys/types.h>
38 #include <sys/ioctl.h>
39 #include <sys/queue.h>
49 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
52 BDRV_REQ_COPY_ON_READ
= 0x1,
53 BDRV_REQ_ZERO_WRITE
= 0x2,
56 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
);
57 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
58 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
59 BlockDriverCompletionFunc
*cb
, void *opaque
);
60 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
61 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
62 BlockDriverCompletionFunc
*cb
, void *opaque
);
63 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
64 int64_t sector_num
, int nb_sectors
,
66 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
67 int64_t sector_num
, int nb_sectors
,
69 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
70 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
71 BdrvRequestFlags flags
);
72 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
73 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
74 BdrvRequestFlags flags
);
75 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
79 BlockDriverCompletionFunc
*cb
,
82 static void coroutine_fn
bdrv_co_do_rw(void *opaque
);
83 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
84 int64_t sector_num
, int nb_sectors
);
86 static bool bdrv_exceed_bps_limits(BlockDriverState
*bs
, int nb_sectors
,
87 bool is_write
, double elapsed_time
, uint64_t *wait
);
88 static bool bdrv_exceed_iops_limits(BlockDriverState
*bs
, bool is_write
,
89 double elapsed_time
, uint64_t *wait
);
90 static bool bdrv_exceed_io_limits(BlockDriverState
*bs
, int nb_sectors
,
91 bool is_write
, int64_t *wait
);
93 static QTAILQ_HEAD(, BlockDriverState
) bdrv_states
=
94 QTAILQ_HEAD_INITIALIZER(bdrv_states
);
96 static QLIST_HEAD(, BlockDriver
) bdrv_drivers
=
97 QLIST_HEAD_INITIALIZER(bdrv_drivers
);
99 /* The device to use for VM snapshots */
100 static BlockDriverState
*bs_snapshots
;
102 /* If non-zero, use only whitelisted block drivers */
103 static int use_bdrv_whitelist
;
106 static int is_windows_drive_prefix(const char *filename
)
108 return (((filename
[0] >= 'a' && filename
[0] <= 'z') ||
109 (filename
[0] >= 'A' && filename
[0] <= 'Z')) &&
113 int is_windows_drive(const char *filename
)
115 if (is_windows_drive_prefix(filename
) &&
118 if (strstart(filename
, "\\\\.\\", NULL
) ||
119 strstart(filename
, "//./", NULL
))
125 /* throttling disk I/O limits */
126 void bdrv_io_limits_disable(BlockDriverState
*bs
)
128 bs
->io_limits_enabled
= false;
130 while (qemu_co_queue_next(&bs
->throttled_reqs
));
132 if (bs
->block_timer
) {
133 qemu_del_timer(bs
->block_timer
);
134 qemu_free_timer(bs
->block_timer
);
135 bs
->block_timer
= NULL
;
141 memset(&bs
->io_base
, 0, sizeof(bs
->io_base
));
144 static void bdrv_block_timer(void *opaque
)
146 BlockDriverState
*bs
= opaque
;
148 qemu_co_queue_next(&bs
->throttled_reqs
);
151 void bdrv_io_limits_enable(BlockDriverState
*bs
)
153 qemu_co_queue_init(&bs
->throttled_reqs
);
154 bs
->block_timer
= qemu_new_timer_ns(vm_clock
, bdrv_block_timer
, bs
);
155 bs
->slice_time
= 5 * BLOCK_IO_SLICE_TIME
;
156 bs
->slice_start
= qemu_get_clock_ns(vm_clock
);
157 bs
->slice_end
= bs
->slice_start
+ bs
->slice_time
;
158 memset(&bs
->io_base
, 0, sizeof(bs
->io_base
));
159 bs
->io_limits_enabled
= true;
162 bool bdrv_io_limits_enabled(BlockDriverState
*bs
)
164 BlockIOLimit
*io_limits
= &bs
->io_limits
;
165 return io_limits
->bps
[BLOCK_IO_LIMIT_READ
]
166 || io_limits
->bps
[BLOCK_IO_LIMIT_WRITE
]
167 || io_limits
->bps
[BLOCK_IO_LIMIT_TOTAL
]
168 || io_limits
->iops
[BLOCK_IO_LIMIT_READ
]
169 || io_limits
->iops
[BLOCK_IO_LIMIT_WRITE
]
170 || io_limits
->iops
[BLOCK_IO_LIMIT_TOTAL
];
173 static void bdrv_io_limits_intercept(BlockDriverState
*bs
,
174 bool is_write
, int nb_sectors
)
176 int64_t wait_time
= -1;
178 if (!qemu_co_queue_empty(&bs
->throttled_reqs
)) {
179 qemu_co_queue_wait(&bs
->throttled_reqs
);
182 /* In fact, we hope to keep each request's timing, in FIFO mode. The next
183 * throttled requests will not be dequeued until the current request is
184 * allowed to be serviced. So if the current request still exceeds the
185 * limits, it will be inserted to the head. All requests followed it will
186 * be still in throttled_reqs queue.
189 while (bdrv_exceed_io_limits(bs
, nb_sectors
, is_write
, &wait_time
)) {
190 qemu_mod_timer(bs
->block_timer
,
191 wait_time
+ qemu_get_clock_ns(vm_clock
));
192 qemu_co_queue_wait_insert_head(&bs
->throttled_reqs
);
195 qemu_co_queue_next(&bs
->throttled_reqs
);
198 /* check if the path starts with "<protocol>:" */
199 static int path_has_protocol(const char *path
)
204 if (is_windows_drive(path
) ||
205 is_windows_drive_prefix(path
)) {
208 p
= path
+ strcspn(path
, ":/\\");
210 p
= path
+ strcspn(path
, ":/");
216 int path_is_absolute(const char *path
)
219 /* specific case for names like: "\\.\d:" */
220 if (is_windows_drive(path
) || is_windows_drive_prefix(path
)) {
223 return (*path
== '/' || *path
== '\\');
225 return (*path
== '/');
229 /* if filename is absolute, just copy it to dest. Otherwise, build a
230 path to it by considering it is relative to base_path. URL are
232 void path_combine(char *dest
, int dest_size
,
233 const char *base_path
,
234 const char *filename
)
241 if (path_is_absolute(filename
)) {
242 pstrcpy(dest
, dest_size
, filename
);
244 p
= strchr(base_path
, ':');
249 p1
= strrchr(base_path
, '/');
253 p2
= strrchr(base_path
, '\\');
265 if (len
> dest_size
- 1)
267 memcpy(dest
, base_path
, len
);
269 pstrcat(dest
, dest_size
, filename
);
273 void bdrv_get_full_backing_filename(BlockDriverState
*bs
, char *dest
, size_t sz
)
275 if (bs
->backing_file
[0] == '\0' || path_has_protocol(bs
->backing_file
)) {
276 pstrcpy(dest
, sz
, bs
->backing_file
);
278 path_combine(dest
, sz
, bs
->filename
, bs
->backing_file
);
282 void bdrv_register(BlockDriver
*bdrv
)
284 /* Block drivers without coroutine functions need emulation */
285 if (!bdrv
->bdrv_co_readv
) {
286 bdrv
->bdrv_co_readv
= bdrv_co_readv_em
;
287 bdrv
->bdrv_co_writev
= bdrv_co_writev_em
;
289 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
290 * the block driver lacks aio we need to emulate that too.
292 if (!bdrv
->bdrv_aio_readv
) {
293 /* add AIO emulation layer */
294 bdrv
->bdrv_aio_readv
= bdrv_aio_readv_em
;
295 bdrv
->bdrv_aio_writev
= bdrv_aio_writev_em
;
299 QLIST_INSERT_HEAD(&bdrv_drivers
, bdrv
, list
);
302 /* create a new block device (by default it is empty) */
303 BlockDriverState
*bdrv_new(const char *device_name
)
305 BlockDriverState
*bs
;
307 bs
= g_malloc0(sizeof(BlockDriverState
));
308 pstrcpy(bs
->device_name
, sizeof(bs
->device_name
), device_name
);
309 if (device_name
[0] != '\0') {
310 QTAILQ_INSERT_TAIL(&bdrv_states
, bs
, list
);
312 bdrv_iostatus_disable(bs
);
316 BlockDriver
*bdrv_find_format(const char *format_name
)
319 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
320 if (!strcmp(drv1
->format_name
, format_name
)) {
327 static int bdrv_is_whitelisted(BlockDriver
*drv
)
329 static const char *whitelist
[] = {
330 CONFIG_BDRV_WHITELIST
335 return 1; /* no whitelist, anything goes */
337 for (p
= whitelist
; *p
; p
++) {
338 if (!strcmp(drv
->format_name
, *p
)) {
345 BlockDriver
*bdrv_find_whitelisted_format(const char *format_name
)
347 BlockDriver
*drv
= bdrv_find_format(format_name
);
348 return drv
&& bdrv_is_whitelisted(drv
) ? drv
: NULL
;
351 typedef struct CreateCo
{
354 QEMUOptionParameter
*options
;
358 static void coroutine_fn
bdrv_create_co_entry(void *opaque
)
360 CreateCo
*cco
= opaque
;
363 cco
->ret
= cco
->drv
->bdrv_create(cco
->filename
, cco
->options
);
366 int bdrv_create(BlockDriver
*drv
, const char* filename
,
367 QEMUOptionParameter
*options
)
374 .filename
= g_strdup(filename
),
379 if (!drv
->bdrv_create
) {
383 if (qemu_in_coroutine()) {
384 /* Fast-path if already in coroutine context */
385 bdrv_create_co_entry(&cco
);
387 co
= qemu_coroutine_create(bdrv_create_co_entry
);
388 qemu_coroutine_enter(co
, &cco
);
389 while (cco
.ret
== NOT_DONE
) {
395 g_free(cco
.filename
);
400 int bdrv_create_file(const char* filename
, QEMUOptionParameter
*options
)
404 drv
= bdrv_find_protocol(filename
);
409 return bdrv_create(drv
, filename
, options
);
413 * Create a uniquely-named empty temporary file.
414 * Return 0 upon success, otherwise a negative errno value.
416 int get_tmp_filename(char *filename
, int size
)
419 char temp_dir
[MAX_PATH
];
420 /* GetTempFileName requires that its output buffer (4th param)
421 have length MAX_PATH or greater. */
422 assert(size
>= MAX_PATH
);
423 return (GetTempPath(MAX_PATH
, temp_dir
)
424 && GetTempFileName(temp_dir
, "qem", 0, filename
)
425 ? 0 : -GetLastError());
429 tmpdir
= getenv("TMPDIR");
432 if (snprintf(filename
, size
, "%s/vl.XXXXXX", tmpdir
) >= size
) {
435 fd
= mkstemp(filename
);
436 if (fd
< 0 || close(fd
)) {
444 * Detect host devices. By convention, /dev/cdrom[N] is always
445 * recognized as a host CDROM.
447 static BlockDriver
*find_hdev_driver(const char *filename
)
449 int score_max
= 0, score
;
450 BlockDriver
*drv
= NULL
, *d
;
452 QLIST_FOREACH(d
, &bdrv_drivers
, list
) {
453 if (d
->bdrv_probe_device
) {
454 score
= d
->bdrv_probe_device(filename
);
455 if (score
> score_max
) {
465 BlockDriver
*bdrv_find_protocol(const char *filename
)
472 /* TODO Drivers without bdrv_file_open must be specified explicitly */
475 * XXX(hch): we really should not let host device detection
476 * override an explicit protocol specification, but moving this
477 * later breaks access to device names with colons in them.
478 * Thanks to the brain-dead persistent naming schemes on udev-
479 * based Linux systems those actually are quite common.
481 drv1
= find_hdev_driver(filename
);
486 if (!path_has_protocol(filename
)) {
487 return bdrv_find_format("file");
489 p
= strchr(filename
, ':');
492 if (len
> sizeof(protocol
) - 1)
493 len
= sizeof(protocol
) - 1;
494 memcpy(protocol
, filename
, len
);
495 protocol
[len
] = '\0';
496 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
497 if (drv1
->protocol_name
&&
498 !strcmp(drv1
->protocol_name
, protocol
)) {
505 static int find_image_format(const char *filename
, BlockDriver
**pdrv
)
507 int ret
, score
, score_max
;
508 BlockDriver
*drv1
, *drv
;
510 BlockDriverState
*bs
;
512 ret
= bdrv_file_open(&bs
, filename
, 0);
518 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
519 if (bs
->sg
|| !bdrv_is_inserted(bs
)) {
521 drv
= bdrv_find_format("raw");
529 ret
= bdrv_pread(bs
, 0, buf
, sizeof(buf
));
538 QLIST_FOREACH(drv1
, &bdrv_drivers
, list
) {
539 if (drv1
->bdrv_probe
) {
540 score
= drv1
->bdrv_probe(buf
, ret
, filename
);
541 if (score
> score_max
) {
555 * Set the current 'total_sectors' value
557 static int refresh_total_sectors(BlockDriverState
*bs
, int64_t hint
)
559 BlockDriver
*drv
= bs
->drv
;
561 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
565 /* query actual device if possible, otherwise just trust the hint */
566 if (drv
->bdrv_getlength
) {
567 int64_t length
= drv
->bdrv_getlength(bs
);
571 hint
= length
>> BDRV_SECTOR_BITS
;
574 bs
->total_sectors
= hint
;
579 * Set open flags for a given cache mode
581 * Return 0 on success, -1 if the cache mode was invalid.
583 int bdrv_parse_cache_flags(const char *mode
, int *flags
)
585 *flags
&= ~BDRV_O_CACHE_MASK
;
587 if (!strcmp(mode
, "off") || !strcmp(mode
, "none")) {
588 *flags
|= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
;
589 } else if (!strcmp(mode
, "directsync")) {
590 *flags
|= BDRV_O_NOCACHE
;
591 } else if (!strcmp(mode
, "writeback")) {
592 *flags
|= BDRV_O_CACHE_WB
;
593 } else if (!strcmp(mode
, "unsafe")) {
594 *flags
|= BDRV_O_CACHE_WB
;
595 *flags
|= BDRV_O_NO_FLUSH
;
596 } else if (!strcmp(mode
, "writethrough")) {
597 /* this is the default */
606 * The copy-on-read flag is actually a reference count so multiple users may
607 * use the feature without worrying about clobbering its previous state.
608 * Copy-on-read stays enabled until all users have called to disable it.
610 void bdrv_enable_copy_on_read(BlockDriverState
*bs
)
615 void bdrv_disable_copy_on_read(BlockDriverState
*bs
)
617 assert(bs
->copy_on_read
> 0);
622 * Common part for opening disk images and files
624 static int bdrv_open_common(BlockDriverState
*bs
, const char *filename
,
625 int flags
, BlockDriver
*drv
)
630 assert(bs
->file
== NULL
);
632 trace_bdrv_open_common(bs
, filename
, flags
, drv
->format_name
);
634 bs
->open_flags
= flags
;
635 bs
->buffer_alignment
= 512;
637 assert(bs
->copy_on_read
== 0); /* bdrv_new() and bdrv_close() make it so */
638 if ((flags
& BDRV_O_RDWR
) && (flags
& BDRV_O_COPY_ON_READ
)) {
639 bdrv_enable_copy_on_read(bs
);
642 pstrcpy(bs
->filename
, sizeof(bs
->filename
), filename
);
644 if (use_bdrv_whitelist
&& !bdrv_is_whitelisted(drv
)) {
649 bs
->opaque
= g_malloc0(drv
->instance_size
);
651 bs
->enable_write_cache
= !!(flags
& BDRV_O_CACHE_WB
);
654 * Clear flags that are internal to the block layer before opening the
657 open_flags
= flags
& ~(BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
660 * Snapshots should be writable.
662 if (bs
->is_temporary
) {
663 open_flags
|= BDRV_O_RDWR
;
666 bs
->keep_read_only
= bs
->read_only
= !(open_flags
& BDRV_O_RDWR
);
668 /* Open the image, either directly or using a protocol */
669 if (drv
->bdrv_file_open
) {
670 ret
= drv
->bdrv_file_open(bs
, filename
, open_flags
);
672 ret
= bdrv_file_open(&bs
->file
, filename
, open_flags
);
674 ret
= drv
->bdrv_open(bs
, open_flags
);
682 ret
= refresh_total_sectors(bs
, bs
->total_sectors
);
688 if (bs
->is_temporary
) {
696 bdrv_delete(bs
->file
);
706 * Opens a file using a protocol (file, host_device, nbd, ...)
708 int bdrv_file_open(BlockDriverState
**pbs
, const char *filename
, int flags
)
710 BlockDriverState
*bs
;
714 drv
= bdrv_find_protocol(filename
);
720 ret
= bdrv_open_common(bs
, filename
, flags
, drv
);
731 * Opens a disk image (raw, qcow2, vmdk, ...)
733 int bdrv_open(BlockDriverState
*bs
, const char *filename
, int flags
,
737 char tmp_filename
[PATH_MAX
];
739 if (flags
& BDRV_O_SNAPSHOT
) {
740 BlockDriverState
*bs1
;
743 BlockDriver
*bdrv_qcow2
;
744 QEMUOptionParameter
*options
;
745 char backing_filename
[PATH_MAX
];
747 /* if snapshot, we create a temporary backing file and open it
748 instead of opening 'filename' directly */
750 /* if there is a backing file, use it */
752 ret
= bdrv_open(bs1
, filename
, 0, drv
);
757 total_size
= bdrv_getlength(bs1
) & BDRV_SECTOR_MASK
;
759 if (bs1
->drv
&& bs1
->drv
->protocol_name
)
764 ret
= get_tmp_filename(tmp_filename
, sizeof(tmp_filename
));
769 /* Real path is meaningless for protocols */
771 snprintf(backing_filename
, sizeof(backing_filename
),
773 else if (!realpath(filename
, backing_filename
))
776 bdrv_qcow2
= bdrv_find_format("qcow2");
777 options
= parse_option_parameters("", bdrv_qcow2
->create_options
, NULL
);
779 set_option_parameter_int(options
, BLOCK_OPT_SIZE
, total_size
);
780 set_option_parameter(options
, BLOCK_OPT_BACKING_FILE
, backing_filename
);
782 set_option_parameter(options
, BLOCK_OPT_BACKING_FMT
,
786 ret
= bdrv_create(bdrv_qcow2
, tmp_filename
, options
);
787 free_option_parameters(options
);
792 filename
= tmp_filename
;
794 bs
->is_temporary
= 1;
797 /* Find the right image format driver */
799 ret
= find_image_format(filename
, &drv
);
803 goto unlink_and_fail
;
807 ret
= bdrv_open_common(bs
, filename
, flags
, drv
);
809 goto unlink_and_fail
;
812 /* If there is a backing file, use it */
813 if ((flags
& BDRV_O_NO_BACKING
) == 0 && bs
->backing_file
[0] != '\0') {
814 char backing_filename
[PATH_MAX
];
816 BlockDriver
*back_drv
= NULL
;
818 bs
->backing_hd
= bdrv_new("");
819 bdrv_get_full_backing_filename(bs
, backing_filename
,
820 sizeof(backing_filename
));
822 if (bs
->backing_format
[0] != '\0') {
823 back_drv
= bdrv_find_format(bs
->backing_format
);
826 /* backing files always opened read-only */
828 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
830 ret
= bdrv_open(bs
->backing_hd
, backing_filename
, back_flags
, back_drv
);
835 if (bs
->is_temporary
) {
836 bs
->backing_hd
->keep_read_only
= !(flags
& BDRV_O_RDWR
);
838 /* base image inherits from "parent" */
839 bs
->backing_hd
->keep_read_only
= bs
->keep_read_only
;
843 if (!bdrv_key_required(bs
)) {
844 bdrv_dev_change_media_cb(bs
, true);
847 /* throttling disk I/O limits */
848 if (bs
->io_limits_enabled
) {
849 bdrv_io_limits_enable(bs
);
855 if (bs
->is_temporary
) {
861 void bdrv_close(BlockDriverState
*bs
)
866 block_job_cancel_sync(bs
->job
);
870 if (bs
== bs_snapshots
) {
873 if (bs
->backing_hd
) {
874 bdrv_delete(bs
->backing_hd
);
875 bs
->backing_hd
= NULL
;
877 bs
->drv
->bdrv_close(bs
);
880 if (bs
->is_temporary
) {
881 unlink(bs
->filename
);
886 bs
->copy_on_read
= 0;
887 bs
->backing_file
[0] = '\0';
888 bs
->backing_format
[0] = '\0';
889 bs
->total_sectors
= 0;
895 if (bs
->file
!= NULL
) {
896 bdrv_delete(bs
->file
);
900 bdrv_dev_change_media_cb(bs
, false);
903 /*throttling disk I/O limits*/
904 if (bs
->io_limits_enabled
) {
905 bdrv_io_limits_disable(bs
);
909 void bdrv_close_all(void)
911 BlockDriverState
*bs
;
913 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
919 * Wait for pending requests to complete across all BlockDriverStates
921 * This function does not flush data to disk, use bdrv_flush_all() for that
922 * after calling this function.
924 * Note that completion of an asynchronous I/O operation can trigger any
925 * number of other I/O operations on other devices---for example a coroutine
926 * can be arbitrarily complex and a constant flow of I/O can come until the
927 * coroutine is complete. Because of this, it is not possible to have a
928 * function to drain a single device's I/O queue.
930 void bdrv_drain_all(void)
932 BlockDriverState
*bs
;
936 busy
= qemu_aio_wait();
938 /* FIXME: We do not have timer support here, so this is effectively
941 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
942 if (!qemu_co_queue_empty(&bs
->throttled_reqs
)) {
943 qemu_co_queue_restart_all(&bs
->throttled_reqs
);
949 /* If requests are still pending there is a bug somewhere */
950 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
951 assert(QLIST_EMPTY(&bs
->tracked_requests
));
952 assert(qemu_co_queue_empty(&bs
->throttled_reqs
));
956 /* make a BlockDriverState anonymous by removing from bdrv_state list.
957 Also, NULL terminate the device_name to prevent double remove */
958 void bdrv_make_anon(BlockDriverState
*bs
)
960 if (bs
->device_name
[0] != '\0') {
961 QTAILQ_REMOVE(&bdrv_states
, bs
, list
);
963 bs
->device_name
[0] = '\0';
966 static void bdrv_rebind(BlockDriverState
*bs
)
968 if (bs
->drv
&& bs
->drv
->bdrv_rebind
) {
969 bs
->drv
->bdrv_rebind(bs
);
974 * Add new bs contents at the top of an image chain while the chain is
975 * live, while keeping required fields on the top layer.
977 * This will modify the BlockDriverState fields, and swap contents
978 * between bs_new and bs_top. Both bs_new and bs_top are modified.
980 * bs_new is required to be anonymous.
982 * This function does not create any image files.
984 void bdrv_append(BlockDriverState
*bs_new
, BlockDriverState
*bs_top
)
986 BlockDriverState tmp
;
988 /* bs_new must be anonymous */
989 assert(bs_new
->device_name
[0] == '\0');
993 /* there are some fields that need to stay on the top layer: */
994 tmp
.open_flags
= bs_top
->open_flags
;
997 tmp
.dev_ops
= bs_top
->dev_ops
;
998 tmp
.dev_opaque
= bs_top
->dev_opaque
;
999 tmp
.dev
= bs_top
->dev
;
1000 tmp
.buffer_alignment
= bs_top
->buffer_alignment
;
1001 tmp
.copy_on_read
= bs_top
->copy_on_read
;
1003 /* i/o timing parameters */
1004 tmp
.slice_time
= bs_top
->slice_time
;
1005 tmp
.slice_start
= bs_top
->slice_start
;
1006 tmp
.slice_end
= bs_top
->slice_end
;
1007 tmp
.io_limits
= bs_top
->io_limits
;
1008 tmp
.io_base
= bs_top
->io_base
;
1009 tmp
.throttled_reqs
= bs_top
->throttled_reqs
;
1010 tmp
.block_timer
= bs_top
->block_timer
;
1011 tmp
.io_limits_enabled
= bs_top
->io_limits_enabled
;
1014 tmp
.cyls
= bs_top
->cyls
;
1015 tmp
.heads
= bs_top
->heads
;
1016 tmp
.secs
= bs_top
->secs
;
1017 tmp
.translation
= bs_top
->translation
;
1020 tmp
.on_read_error
= bs_top
->on_read_error
;
1021 tmp
.on_write_error
= bs_top
->on_write_error
;
1024 tmp
.iostatus_enabled
= bs_top
->iostatus_enabled
;
1025 tmp
.iostatus
= bs_top
->iostatus
;
1027 /* keep the same entry in bdrv_states */
1028 pstrcpy(tmp
.device_name
, sizeof(tmp
.device_name
), bs_top
->device_name
);
1029 tmp
.list
= bs_top
->list
;
1031 /* The contents of 'tmp' will become bs_top, as we are
1032 * swapping bs_new and bs_top contents. */
1033 tmp
.backing_hd
= bs_new
;
1034 pstrcpy(tmp
.backing_file
, sizeof(tmp
.backing_file
), bs_top
->filename
);
1035 bdrv_get_format(bs_top
, tmp
.backing_format
, sizeof(tmp
.backing_format
));
1037 /* swap contents of the fixed new bs and the current top */
1041 /* device_name[] was carried over from the old bs_top. bs_new
1042 * shouldn't be in bdrv_states, so we need to make device_name[]
1043 * reflect the anonymity of bs_new
1045 bs_new
->device_name
[0] = '\0';
1047 /* clear the copied fields in the new backing file */
1048 bdrv_detach_dev(bs_new
, bs_new
->dev
);
1050 qemu_co_queue_init(&bs_new
->throttled_reqs
);
1051 memset(&bs_new
->io_base
, 0, sizeof(bs_new
->io_base
));
1052 memset(&bs_new
->io_limits
, 0, sizeof(bs_new
->io_limits
));
1053 bdrv_iostatus_disable(bs_new
);
1055 /* we don't use bdrv_io_limits_disable() for this, because we don't want
1056 * to affect or delete the block_timer, as it has been moved to bs_top */
1057 bs_new
->io_limits_enabled
= false;
1058 bs_new
->block_timer
= NULL
;
1059 bs_new
->slice_time
= 0;
1060 bs_new
->slice_start
= 0;
1061 bs_new
->slice_end
= 0;
1063 bdrv_rebind(bs_new
);
1064 bdrv_rebind(bs_top
);
1067 void bdrv_delete(BlockDriverState
*bs
)
1071 assert(!bs
->in_use
);
1073 /* remove from list, if necessary */
1078 assert(bs
!= bs_snapshots
);
1082 int bdrv_attach_dev(BlockDriverState
*bs
, void *dev
)
1083 /* TODO change to DeviceState *dev when all users are qdevified */
1089 bdrv_iostatus_reset(bs
);
1093 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1094 void bdrv_attach_dev_nofail(BlockDriverState
*bs
, void *dev
)
1096 if (bdrv_attach_dev(bs
, dev
) < 0) {
1101 void bdrv_detach_dev(BlockDriverState
*bs
, void *dev
)
1102 /* TODO change to DeviceState *dev when all users are qdevified */
1104 assert(bs
->dev
== dev
);
1107 bs
->dev_opaque
= NULL
;
1108 bs
->buffer_alignment
= 512;
1111 /* TODO change to return DeviceState * when all users are qdevified */
1112 void *bdrv_get_attached_dev(BlockDriverState
*bs
)
1117 void bdrv_set_dev_ops(BlockDriverState
*bs
, const BlockDevOps
*ops
,
1121 bs
->dev_opaque
= opaque
;
1122 if (bdrv_dev_has_removable_media(bs
) && bs
== bs_snapshots
) {
1123 bs_snapshots
= NULL
;
1127 void bdrv_emit_qmp_error_event(const BlockDriverState
*bdrv
,
1128 BlockQMPEventAction action
, int is_read
)
1131 const char *action_str
;
1134 case BDRV_ACTION_REPORT
:
1135 action_str
= "report";
1137 case BDRV_ACTION_IGNORE
:
1138 action_str
= "ignore";
1140 case BDRV_ACTION_STOP
:
1141 action_str
= "stop";
1147 data
= qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1150 is_read
? "read" : "write");
1151 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR
, data
);
1153 qobject_decref(data
);
1156 static void bdrv_emit_qmp_eject_event(BlockDriverState
*bs
, bool ejected
)
1160 data
= qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
1161 bdrv_get_device_name(bs
), ejected
);
1162 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED
, data
);
1164 qobject_decref(data
);
1167 static void bdrv_dev_change_media_cb(BlockDriverState
*bs
, bool load
)
1169 if (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
) {
1170 bool tray_was_closed
= !bdrv_dev_is_tray_open(bs
);
1171 bs
->dev_ops
->change_media_cb(bs
->dev_opaque
, load
);
1172 if (tray_was_closed
) {
1174 bdrv_emit_qmp_eject_event(bs
, true);
1178 bdrv_emit_qmp_eject_event(bs
, false);
1183 bool bdrv_dev_has_removable_media(BlockDriverState
*bs
)
1185 return !bs
->dev
|| (bs
->dev_ops
&& bs
->dev_ops
->change_media_cb
);
1188 void bdrv_dev_eject_request(BlockDriverState
*bs
, bool force
)
1190 if (bs
->dev_ops
&& bs
->dev_ops
->eject_request_cb
) {
1191 bs
->dev_ops
->eject_request_cb(bs
->dev_opaque
, force
);
1195 bool bdrv_dev_is_tray_open(BlockDriverState
*bs
)
1197 if (bs
->dev_ops
&& bs
->dev_ops
->is_tray_open
) {
1198 return bs
->dev_ops
->is_tray_open(bs
->dev_opaque
);
1203 static void bdrv_dev_resize_cb(BlockDriverState
*bs
)
1205 if (bs
->dev_ops
&& bs
->dev_ops
->resize_cb
) {
1206 bs
->dev_ops
->resize_cb(bs
->dev_opaque
);
1210 bool bdrv_dev_is_medium_locked(BlockDriverState
*bs
)
1212 if (bs
->dev_ops
&& bs
->dev_ops
->is_medium_locked
) {
1213 return bs
->dev_ops
->is_medium_locked(bs
->dev_opaque
);
1219 * Run consistency checks on an image
1221 * Returns 0 if the check could be completed (it doesn't mean that the image is
1222 * free of errors) or -errno when an internal error occurred. The results of the
1223 * check are stored in res.
1225 int bdrv_check(BlockDriverState
*bs
, BdrvCheckResult
*res
, BdrvCheckMode fix
)
1227 if (bs
->drv
->bdrv_check
== NULL
) {
1231 memset(res
, 0, sizeof(*res
));
1232 return bs
->drv
->bdrv_check(bs
, res
, fix
);
1235 #define COMMIT_BUF_SECTORS 2048
1237 /* commit COW file into the raw image */
1238 int bdrv_commit(BlockDriverState
*bs
)
1240 BlockDriver
*drv
= bs
->drv
;
1241 BlockDriver
*backing_drv
;
1242 int64_t sector
, total_sectors
;
1243 int n
, ro
, open_flags
;
1244 int ret
= 0, rw_ret
= 0;
1246 char filename
[1024];
1247 BlockDriverState
*bs_rw
, *bs_ro
;
1252 if (!bs
->backing_hd
) {
1256 if (bs
->backing_hd
->keep_read_only
) {
1260 if (bdrv_in_use(bs
) || bdrv_in_use(bs
->backing_hd
)) {
1264 backing_drv
= bs
->backing_hd
->drv
;
1265 ro
= bs
->backing_hd
->read_only
;
1266 strncpy(filename
, bs
->backing_hd
->filename
, sizeof(filename
));
1267 open_flags
= bs
->backing_hd
->open_flags
;
1271 bdrv_delete(bs
->backing_hd
);
1272 bs
->backing_hd
= NULL
;
1273 bs_rw
= bdrv_new("");
1274 rw_ret
= bdrv_open(bs_rw
, filename
, open_flags
| BDRV_O_RDWR
,
1278 /* try to re-open read-only */
1279 bs_ro
= bdrv_new("");
1280 ret
= bdrv_open(bs_ro
, filename
, open_flags
& ~BDRV_O_RDWR
,
1284 /* drive not functional anymore */
1288 bs
->backing_hd
= bs_ro
;
1291 bs
->backing_hd
= bs_rw
;
1294 total_sectors
= bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
;
1295 buf
= g_malloc(COMMIT_BUF_SECTORS
* BDRV_SECTOR_SIZE
);
1297 for (sector
= 0; sector
< total_sectors
; sector
+= n
) {
1298 if (bdrv_is_allocated(bs
, sector
, COMMIT_BUF_SECTORS
, &n
)) {
1300 if (bdrv_read(bs
, sector
, buf
, n
) != 0) {
1305 if (bdrv_write(bs
->backing_hd
, sector
, buf
, n
) != 0) {
1312 if (drv
->bdrv_make_empty
) {
1313 ret
= drv
->bdrv_make_empty(bs
);
1318 * Make sure all data we wrote to the backing device is actually
1322 bdrv_flush(bs
->backing_hd
);
1329 bdrv_delete(bs
->backing_hd
);
1330 bs
->backing_hd
= NULL
;
1331 bs_ro
= bdrv_new("");
1332 ret
= bdrv_open(bs_ro
, filename
, open_flags
& ~BDRV_O_RDWR
,
1336 /* drive not functional anymore */
1340 bs
->backing_hd
= bs_ro
;
1341 bs
->backing_hd
->keep_read_only
= 0;
1347 int bdrv_commit_all(void)
1349 BlockDriverState
*bs
;
1351 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
1352 int ret
= bdrv_commit(bs
);
1360 struct BdrvTrackedRequest
{
1361 BlockDriverState
*bs
;
1365 QLIST_ENTRY(BdrvTrackedRequest
) list
;
1366 Coroutine
*co
; /* owner, used for deadlock detection */
1367 CoQueue wait_queue
; /* coroutines blocked on this request */
1371 * Remove an active request from the tracked requests list
1373 * This function should be called when a tracked request is completing.
1375 static void tracked_request_end(BdrvTrackedRequest
*req
)
1377 QLIST_REMOVE(req
, list
);
1378 qemu_co_queue_restart_all(&req
->wait_queue
);
1382 * Add an active request to the tracked requests list
1384 static void tracked_request_begin(BdrvTrackedRequest
*req
,
1385 BlockDriverState
*bs
,
1387 int nb_sectors
, bool is_write
)
1389 *req
= (BdrvTrackedRequest
){
1391 .sector_num
= sector_num
,
1392 .nb_sectors
= nb_sectors
,
1393 .is_write
= is_write
,
1394 .co
= qemu_coroutine_self(),
1397 qemu_co_queue_init(&req
->wait_queue
);
1399 QLIST_INSERT_HEAD(&bs
->tracked_requests
, req
, list
);
1403 * Round a region to cluster boundaries
1405 static void round_to_clusters(BlockDriverState
*bs
,
1406 int64_t sector_num
, int nb_sectors
,
1407 int64_t *cluster_sector_num
,
1408 int *cluster_nb_sectors
)
1410 BlockDriverInfo bdi
;
1412 if (bdrv_get_info(bs
, &bdi
) < 0 || bdi
.cluster_size
== 0) {
1413 *cluster_sector_num
= sector_num
;
1414 *cluster_nb_sectors
= nb_sectors
;
1416 int64_t c
= bdi
.cluster_size
/ BDRV_SECTOR_SIZE
;
1417 *cluster_sector_num
= QEMU_ALIGN_DOWN(sector_num
, c
);
1418 *cluster_nb_sectors
= QEMU_ALIGN_UP(sector_num
- *cluster_sector_num
+
1423 static bool tracked_request_overlaps(BdrvTrackedRequest
*req
,
1424 int64_t sector_num
, int nb_sectors
) {
1426 if (sector_num
>= req
->sector_num
+ req
->nb_sectors
) {
1430 if (req
->sector_num
>= sector_num
+ nb_sectors
) {
1436 static void coroutine_fn
wait_for_overlapping_requests(BlockDriverState
*bs
,
1437 int64_t sector_num
, int nb_sectors
)
1439 BdrvTrackedRequest
*req
;
1440 int64_t cluster_sector_num
;
1441 int cluster_nb_sectors
;
1444 /* If we touch the same cluster it counts as an overlap. This guarantees
1445 * that allocating writes will be serialized and not race with each other
1446 * for the same cluster. For example, in copy-on-read it ensures that the
1447 * CoR read and write operations are atomic and guest writes cannot
1448 * interleave between them.
1450 round_to_clusters(bs
, sector_num
, nb_sectors
,
1451 &cluster_sector_num
, &cluster_nb_sectors
);
1455 QLIST_FOREACH(req
, &bs
->tracked_requests
, list
) {
1456 if (tracked_request_overlaps(req
, cluster_sector_num
,
1457 cluster_nb_sectors
)) {
1458 /* Hitting this means there was a reentrant request, for
1459 * example, a block driver issuing nested requests. This must
1460 * never happen since it means deadlock.
1462 assert(qemu_coroutine_self() != req
->co
);
1464 qemu_co_queue_wait(&req
->wait_queue
);
1475 * -EINVAL - backing format specified, but no file
1476 * -ENOSPC - can't update the backing file because no space is left in the
1478 * -ENOTSUP - format driver doesn't support changing the backing file
1480 int bdrv_change_backing_file(BlockDriverState
*bs
,
1481 const char *backing_file
, const char *backing_fmt
)
1483 BlockDriver
*drv
= bs
->drv
;
1486 /* Backing file format doesn't make sense without a backing file */
1487 if (backing_fmt
&& !backing_file
) {
1491 if (drv
->bdrv_change_backing_file
!= NULL
) {
1492 ret
= drv
->bdrv_change_backing_file(bs
, backing_file
, backing_fmt
);
1498 pstrcpy(bs
->backing_file
, sizeof(bs
->backing_file
), backing_file
?: "");
1499 pstrcpy(bs
->backing_format
, sizeof(bs
->backing_format
), backing_fmt
?: "");
1504 static int bdrv_check_byte_request(BlockDriverState
*bs
, int64_t offset
,
1509 if (!bdrv_is_inserted(bs
))
1515 len
= bdrv_getlength(bs
);
1520 if ((offset
> len
) || (len
- offset
< size
))
1526 static int bdrv_check_request(BlockDriverState
*bs
, int64_t sector_num
,
1529 return bdrv_check_byte_request(bs
, sector_num
* BDRV_SECTOR_SIZE
,
1530 nb_sectors
* BDRV_SECTOR_SIZE
);
1533 typedef struct RwCo
{
1534 BlockDriverState
*bs
;
1542 static void coroutine_fn
bdrv_rw_co_entry(void *opaque
)
1544 RwCo
*rwco
= opaque
;
1546 if (!rwco
->is_write
) {
1547 rwco
->ret
= bdrv_co_do_readv(rwco
->bs
, rwco
->sector_num
,
1548 rwco
->nb_sectors
, rwco
->qiov
, 0);
1550 rwco
->ret
= bdrv_co_do_writev(rwco
->bs
, rwco
->sector_num
,
1551 rwco
->nb_sectors
, rwco
->qiov
, 0);
1556 * Process a synchronous request using coroutines
1558 static int bdrv_rw_co(BlockDriverState
*bs
, int64_t sector_num
, uint8_t *buf
,
1559 int nb_sectors
, bool is_write
)
1562 struct iovec iov
= {
1563 .iov_base
= (void *)buf
,
1564 .iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
,
1569 .sector_num
= sector_num
,
1570 .nb_sectors
= nb_sectors
,
1572 .is_write
= is_write
,
1576 qemu_iovec_init_external(&qiov
, &iov
, 1);
1579 * In sync call context, when the vcpu is blocked, this throttling timer
1580 * will not fire; so the I/O throttling function has to be disabled here
1581 * if it has been enabled.
1583 if (bs
->io_limits_enabled
) {
1584 fprintf(stderr
, "Disabling I/O throttling on '%s' due "
1585 "to synchronous I/O.\n", bdrv_get_device_name(bs
));
1586 bdrv_io_limits_disable(bs
);
1589 if (qemu_in_coroutine()) {
1590 /* Fast-path if already in coroutine context */
1591 bdrv_rw_co_entry(&rwco
);
1593 co
= qemu_coroutine_create(bdrv_rw_co_entry
);
1594 qemu_coroutine_enter(co
, &rwco
);
1595 while (rwco
.ret
== NOT_DONE
) {
1602 /* return < 0 if error. See bdrv_write() for the return codes */
1603 int bdrv_read(BlockDriverState
*bs
, int64_t sector_num
,
1604 uint8_t *buf
, int nb_sectors
)
1606 return bdrv_rw_co(bs
, sector_num
, buf
, nb_sectors
, false);
1609 #define BITS_PER_LONG (sizeof(unsigned long) * 8)
1611 static void set_dirty_bitmap(BlockDriverState
*bs
, int64_t sector_num
,
1612 int nb_sectors
, int dirty
)
1615 unsigned long val
, idx
, bit
;
1617 start
= sector_num
/ BDRV_SECTORS_PER_DIRTY_CHUNK
;
1618 end
= (sector_num
+ nb_sectors
- 1) / BDRV_SECTORS_PER_DIRTY_CHUNK
;
1620 for (; start
<= end
; start
++) {
1621 idx
= start
/ BITS_PER_LONG
;
1622 bit
= start
% BITS_PER_LONG
;
1623 val
= bs
->dirty_bitmap
[idx
];
1625 if (!(val
& (1UL << bit
))) {
1630 if (val
& (1UL << bit
)) {
1632 val
&= ~(1UL << bit
);
1635 bs
->dirty_bitmap
[idx
] = val
;
1639 /* Return < 0 if error. Important errors are:
1640 -EIO generic I/O error (may happen for all errors)
1641 -ENOMEDIUM No media inserted.
1642 -EINVAL Invalid sector number or nb_sectors
1643 -EACCES Trying to write a read-only device
1645 int bdrv_write(BlockDriverState
*bs
, int64_t sector_num
,
1646 const uint8_t *buf
, int nb_sectors
)
1648 return bdrv_rw_co(bs
, sector_num
, (uint8_t *)buf
, nb_sectors
, true);
1651 int bdrv_pread(BlockDriverState
*bs
, int64_t offset
,
1652 void *buf
, int count1
)
1654 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
1655 int len
, nb_sectors
, count
;
1660 /* first read to align to sector start */
1661 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
1664 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1666 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1668 memcpy(buf
, tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), len
);
1676 /* read the sectors "in place" */
1677 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
1678 if (nb_sectors
> 0) {
1679 if ((ret
= bdrv_read(bs
, sector_num
, buf
, nb_sectors
)) < 0)
1681 sector_num
+= nb_sectors
;
1682 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
1687 /* add data from the last sector */
1689 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1691 memcpy(buf
, tmp_buf
, count
);
1696 int bdrv_pwrite(BlockDriverState
*bs
, int64_t offset
,
1697 const void *buf
, int count1
)
1699 uint8_t tmp_buf
[BDRV_SECTOR_SIZE
];
1700 int len
, nb_sectors
, count
;
1705 /* first write to align to sector start */
1706 len
= (BDRV_SECTOR_SIZE
- offset
) & (BDRV_SECTOR_SIZE
- 1);
1709 sector_num
= offset
>> BDRV_SECTOR_BITS
;
1711 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1713 memcpy(tmp_buf
+ (offset
& (BDRV_SECTOR_SIZE
- 1)), buf
, len
);
1714 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
1723 /* write the sectors "in place" */
1724 nb_sectors
= count
>> BDRV_SECTOR_BITS
;
1725 if (nb_sectors
> 0) {
1726 if ((ret
= bdrv_write(bs
, sector_num
, buf
, nb_sectors
)) < 0)
1728 sector_num
+= nb_sectors
;
1729 len
= nb_sectors
<< BDRV_SECTOR_BITS
;
1734 /* add data from the last sector */
1736 if ((ret
= bdrv_read(bs
, sector_num
, tmp_buf
, 1)) < 0)
1738 memcpy(tmp_buf
, buf
, count
);
1739 if ((ret
= bdrv_write(bs
, sector_num
, tmp_buf
, 1)) < 0)
1746 * Writes to the file and ensures that no writes are reordered across this
1747 * request (acts as a barrier)
1749 * Returns 0 on success, -errno in error cases.
1751 int bdrv_pwrite_sync(BlockDriverState
*bs
, int64_t offset
,
1752 const void *buf
, int count
)
1756 ret
= bdrv_pwrite(bs
, offset
, buf
, count
);
1761 /* No flush needed for cache modes that already do it */
1762 if (bs
->enable_write_cache
) {
1769 static int coroutine_fn
bdrv_co_do_copy_on_readv(BlockDriverState
*bs
,
1770 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1772 /* Perform I/O through a temporary buffer so that users who scribble over
1773 * their read buffer while the operation is in progress do not end up
1774 * modifying the image file. This is critical for zero-copy guest I/O
1775 * where anything might happen inside guest memory.
1777 void *bounce_buffer
;
1779 BlockDriver
*drv
= bs
->drv
;
1781 QEMUIOVector bounce_qiov
;
1782 int64_t cluster_sector_num
;
1783 int cluster_nb_sectors
;
1787 /* Cover entire cluster so no additional backing file I/O is required when
1788 * allocating cluster in the image file.
1790 round_to_clusters(bs
, sector_num
, nb_sectors
,
1791 &cluster_sector_num
, &cluster_nb_sectors
);
1793 trace_bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
,
1794 cluster_sector_num
, cluster_nb_sectors
);
1796 iov
.iov_len
= cluster_nb_sectors
* BDRV_SECTOR_SIZE
;
1797 iov
.iov_base
= bounce_buffer
= qemu_blockalign(bs
, iov
.iov_len
);
1798 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
1800 ret
= drv
->bdrv_co_readv(bs
, cluster_sector_num
, cluster_nb_sectors
,
1806 if (drv
->bdrv_co_write_zeroes
&&
1807 buffer_is_zero(bounce_buffer
, iov
.iov_len
)) {
1808 ret
= bdrv_co_do_write_zeroes(bs
, cluster_sector_num
,
1809 cluster_nb_sectors
);
1811 /* This does not change the data on the disk, it is not necessary
1812 * to flush even in cache=writethrough mode.
1814 ret
= drv
->bdrv_co_writev(bs
, cluster_sector_num
, cluster_nb_sectors
,
1819 /* It might be okay to ignore write errors for guest requests. If this
1820 * is a deliberate copy-on-read then we don't want to ignore the error.
1821 * Simply report it in all cases.
1826 skip_bytes
= (sector_num
- cluster_sector_num
) * BDRV_SECTOR_SIZE
;
1827 qemu_iovec_from_buffer(qiov
, bounce_buffer
+ skip_bytes
,
1828 nb_sectors
* BDRV_SECTOR_SIZE
);
1831 qemu_vfree(bounce_buffer
);
1836 * Handle a read request in coroutine context
1838 static int coroutine_fn
bdrv_co_do_readv(BlockDriverState
*bs
,
1839 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1840 BdrvRequestFlags flags
)
1842 BlockDriver
*drv
= bs
->drv
;
1843 BdrvTrackedRequest req
;
1849 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
1853 /* throttling disk read I/O */
1854 if (bs
->io_limits_enabled
) {
1855 bdrv_io_limits_intercept(bs
, false, nb_sectors
);
1858 if (bs
->copy_on_read
) {
1859 flags
|= BDRV_REQ_COPY_ON_READ
;
1861 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1862 bs
->copy_on_read_in_flight
++;
1865 if (bs
->copy_on_read_in_flight
) {
1866 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
1869 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, false);
1871 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1874 ret
= bdrv_co_is_allocated(bs
, sector_num
, nb_sectors
, &pnum
);
1879 if (!ret
|| pnum
!= nb_sectors
) {
1880 ret
= bdrv_co_do_copy_on_readv(bs
, sector_num
, nb_sectors
, qiov
);
1885 ret
= drv
->bdrv_co_readv(bs
, sector_num
, nb_sectors
, qiov
);
1888 tracked_request_end(&req
);
1890 if (flags
& BDRV_REQ_COPY_ON_READ
) {
1891 bs
->copy_on_read_in_flight
--;
1897 int coroutine_fn
bdrv_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
1898 int nb_sectors
, QEMUIOVector
*qiov
)
1900 trace_bdrv_co_readv(bs
, sector_num
, nb_sectors
);
1902 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
, 0);
1905 int coroutine_fn
bdrv_co_copy_on_readv(BlockDriverState
*bs
,
1906 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
)
1908 trace_bdrv_co_copy_on_readv(bs
, sector_num
, nb_sectors
);
1910 return bdrv_co_do_readv(bs
, sector_num
, nb_sectors
, qiov
,
1911 BDRV_REQ_COPY_ON_READ
);
1914 static int coroutine_fn
bdrv_co_do_write_zeroes(BlockDriverState
*bs
,
1915 int64_t sector_num
, int nb_sectors
)
1917 BlockDriver
*drv
= bs
->drv
;
1922 /* TODO Emulate only part of misaligned requests instead of letting block
1923 * drivers return -ENOTSUP and emulate everything */
1925 /* First try the efficient write zeroes operation */
1926 if (drv
->bdrv_co_write_zeroes
) {
1927 ret
= drv
->bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
1928 if (ret
!= -ENOTSUP
) {
1933 /* Fall back to bounce buffer if write zeroes is unsupported */
1934 iov
.iov_len
= nb_sectors
* BDRV_SECTOR_SIZE
;
1935 iov
.iov_base
= qemu_blockalign(bs
, iov
.iov_len
);
1936 memset(iov
.iov_base
, 0, iov
.iov_len
);
1937 qemu_iovec_init_external(&qiov
, &iov
, 1);
1939 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, &qiov
);
1941 qemu_vfree(iov
.iov_base
);
1946 * Handle a write request in coroutine context
1948 static int coroutine_fn
bdrv_co_do_writev(BlockDriverState
*bs
,
1949 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
,
1950 BdrvRequestFlags flags
)
1952 BlockDriver
*drv
= bs
->drv
;
1953 BdrvTrackedRequest req
;
1959 if (bs
->read_only
) {
1962 if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
1966 /* throttling disk write I/O */
1967 if (bs
->io_limits_enabled
) {
1968 bdrv_io_limits_intercept(bs
, true, nb_sectors
);
1971 if (bs
->copy_on_read_in_flight
) {
1972 wait_for_overlapping_requests(bs
, sector_num
, nb_sectors
);
1975 tracked_request_begin(&req
, bs
, sector_num
, nb_sectors
, true);
1977 if (flags
& BDRV_REQ_ZERO_WRITE
) {
1978 ret
= bdrv_co_do_write_zeroes(bs
, sector_num
, nb_sectors
);
1980 ret
= drv
->bdrv_co_writev(bs
, sector_num
, nb_sectors
, qiov
);
1983 if (ret
== 0 && !bs
->enable_write_cache
) {
1984 ret
= bdrv_co_flush(bs
);
1987 if (bs
->dirty_bitmap
) {
1988 set_dirty_bitmap(bs
, sector_num
, nb_sectors
, 1);
1991 if (bs
->wr_highest_sector
< sector_num
+ nb_sectors
- 1) {
1992 bs
->wr_highest_sector
= sector_num
+ nb_sectors
- 1;
1995 tracked_request_end(&req
);
2000 int coroutine_fn
bdrv_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
2001 int nb_sectors
, QEMUIOVector
*qiov
)
2003 trace_bdrv_co_writev(bs
, sector_num
, nb_sectors
);
2005 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, qiov
, 0);
2008 int coroutine_fn
bdrv_co_write_zeroes(BlockDriverState
*bs
,
2009 int64_t sector_num
, int nb_sectors
)
2011 trace_bdrv_co_write_zeroes(bs
, sector_num
, nb_sectors
);
2013 return bdrv_co_do_writev(bs
, sector_num
, nb_sectors
, NULL
,
2014 BDRV_REQ_ZERO_WRITE
);
2018 * Truncate file to 'offset' bytes (needed only for file protocols)
2020 int bdrv_truncate(BlockDriverState
*bs
, int64_t offset
)
2022 BlockDriver
*drv
= bs
->drv
;
2026 if (!drv
->bdrv_truncate
)
2030 if (bdrv_in_use(bs
))
2032 ret
= drv
->bdrv_truncate(bs
, offset
);
2034 ret
= refresh_total_sectors(bs
, offset
>> BDRV_SECTOR_BITS
);
2035 bdrv_dev_resize_cb(bs
);
2041 * Length of a allocated file in bytes. Sparse files are counted by actual
2042 * allocated space. Return < 0 if error or unknown.
2044 int64_t bdrv_get_allocated_file_size(BlockDriverState
*bs
)
2046 BlockDriver
*drv
= bs
->drv
;
2050 if (drv
->bdrv_get_allocated_file_size
) {
2051 return drv
->bdrv_get_allocated_file_size(bs
);
2054 return bdrv_get_allocated_file_size(bs
->file
);
2060 * Length of a file in bytes. Return < 0 if error or unknown.
2062 int64_t bdrv_getlength(BlockDriverState
*bs
)
2064 BlockDriver
*drv
= bs
->drv
;
2068 if (bs
->growable
|| bdrv_dev_has_removable_media(bs
)) {
2069 if (drv
->bdrv_getlength
) {
2070 return drv
->bdrv_getlength(bs
);
2073 return bs
->total_sectors
* BDRV_SECTOR_SIZE
;
2076 /* return 0 as number of sectors if no device present or error */
2077 void bdrv_get_geometry(BlockDriverState
*bs
, uint64_t *nb_sectors_ptr
)
2080 length
= bdrv_getlength(bs
);
2084 length
= length
>> BDRV_SECTOR_BITS
;
2085 *nb_sectors_ptr
= length
;
2089 uint8_t boot_ind
; /* 0x80 - active */
2090 uint8_t head
; /* starting head */
2091 uint8_t sector
; /* starting sector */
2092 uint8_t cyl
; /* starting cylinder */
2093 uint8_t sys_ind
; /* What partition type */
2094 uint8_t end_head
; /* end head */
2095 uint8_t end_sector
; /* end sector */
2096 uint8_t end_cyl
; /* end cylinder */
2097 uint32_t start_sect
; /* starting sector counting from 0 */
2098 uint32_t nr_sects
; /* nr of sectors in partition */
2101 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
2102 static int guess_disk_lchs(BlockDriverState
*bs
,
2103 int *pcylinders
, int *pheads
, int *psectors
)
2105 uint8_t buf
[BDRV_SECTOR_SIZE
];
2106 int ret
, i
, heads
, sectors
, cylinders
;
2107 struct partition
*p
;
2109 uint64_t nb_sectors
;
2112 bdrv_get_geometry(bs
, &nb_sectors
);
2115 * The function will be invoked during startup not only in sync I/O mode,
2116 * but also in async I/O mode. So the I/O throttling function has to
2117 * be disabled temporarily here, not permanently.
2119 enabled
= bs
->io_limits_enabled
;
2120 bs
->io_limits_enabled
= false;
2121 ret
= bdrv_read(bs
, 0, buf
, 1);
2122 bs
->io_limits_enabled
= enabled
;
2125 /* test msdos magic */
2126 if (buf
[510] != 0x55 || buf
[511] != 0xaa)
2128 for(i
= 0; i
< 4; i
++) {
2129 p
= ((struct partition
*)(buf
+ 0x1be)) + i
;
2130 nr_sects
= le32_to_cpu(p
->nr_sects
);
2131 if (nr_sects
&& p
->end_head
) {
2132 /* We make the assumption that the partition terminates on
2133 a cylinder boundary */
2134 heads
= p
->end_head
+ 1;
2135 sectors
= p
->end_sector
& 63;
2138 cylinders
= nb_sectors
/ (heads
* sectors
);
2139 if (cylinders
< 1 || cylinders
> 16383)
2142 *psectors
= sectors
;
2143 *pcylinders
= cylinders
;
2145 printf("guessed geometry: LCHS=%d %d %d\n",
2146 cylinders
, heads
, sectors
);
2154 void bdrv_guess_geometry(BlockDriverState
*bs
, int *pcyls
, int *pheads
, int *psecs
)
2156 int translation
, lba_detected
= 0;
2157 int cylinders
, heads
, secs
;
2158 uint64_t nb_sectors
;
2160 /* if a geometry hint is available, use it */
2161 bdrv_get_geometry(bs
, &nb_sectors
);
2162 bdrv_get_geometry_hint(bs
, &cylinders
, &heads
, &secs
);
2163 translation
= bdrv_get_translation_hint(bs
);
2164 if (cylinders
!= 0) {
2169 if (guess_disk_lchs(bs
, &cylinders
, &heads
, &secs
) == 0) {
2171 /* if heads > 16, it means that a BIOS LBA
2172 translation was active, so the default
2173 hardware geometry is OK */
2175 goto default_geometry
;
2180 /* disable any translation to be in sync with
2181 the logical geometry */
2182 if (translation
== BIOS_ATA_TRANSLATION_AUTO
) {
2183 bdrv_set_translation_hint(bs
,
2184 BIOS_ATA_TRANSLATION_NONE
);
2189 /* if no geometry, use a standard physical disk geometry */
2190 cylinders
= nb_sectors
/ (16 * 63);
2192 if (cylinders
> 16383)
2194 else if (cylinders
< 2)
2199 if ((lba_detected
== 1) && (translation
== BIOS_ATA_TRANSLATION_AUTO
)) {
2200 if ((*pcyls
* *pheads
) <= 131072) {
2201 bdrv_set_translation_hint(bs
,
2202 BIOS_ATA_TRANSLATION_LARGE
);
2204 bdrv_set_translation_hint(bs
,
2205 BIOS_ATA_TRANSLATION_LBA
);
2209 bdrv_set_geometry_hint(bs
, *pcyls
, *pheads
, *psecs
);
2213 void bdrv_set_geometry_hint(BlockDriverState
*bs
,
2214 int cyls
, int heads
, int secs
)
2221 void bdrv_set_translation_hint(BlockDriverState
*bs
, int translation
)
2223 bs
->translation
= translation
;
2226 void bdrv_get_geometry_hint(BlockDriverState
*bs
,
2227 int *pcyls
, int *pheads
, int *psecs
)
2230 *pheads
= bs
->heads
;
2234 /* throttling disk io limits */
2235 void bdrv_set_io_limits(BlockDriverState
*bs
,
2236 BlockIOLimit
*io_limits
)
2238 bs
->io_limits
= *io_limits
;
2239 bs
->io_limits_enabled
= bdrv_io_limits_enabled(bs
);
2242 /* Recognize floppy formats */
2243 typedef struct FDFormat
{
2251 static const FDFormat fd_formats
[] = {
2252 /* First entry is default format */
2253 /* 1.44 MB 3"1/2 floppy disks */
2254 { FDRIVE_DRV_144
, 18, 80, 1, FDRIVE_RATE_500K
, },
2255 { FDRIVE_DRV_144
, 20, 80, 1, FDRIVE_RATE_500K
, },
2256 { FDRIVE_DRV_144
, 21, 80, 1, FDRIVE_RATE_500K
, },
2257 { FDRIVE_DRV_144
, 21, 82, 1, FDRIVE_RATE_500K
, },
2258 { FDRIVE_DRV_144
, 21, 83, 1, FDRIVE_RATE_500K
, },
2259 { FDRIVE_DRV_144
, 22, 80, 1, FDRIVE_RATE_500K
, },
2260 { FDRIVE_DRV_144
, 23, 80, 1, FDRIVE_RATE_500K
, },
2261 { FDRIVE_DRV_144
, 24, 80, 1, FDRIVE_RATE_500K
, },
2262 /* 2.88 MB 3"1/2 floppy disks */
2263 { FDRIVE_DRV_288
, 36, 80, 1, FDRIVE_RATE_1M
, },
2264 { FDRIVE_DRV_288
, 39, 80, 1, FDRIVE_RATE_1M
, },
2265 { FDRIVE_DRV_288
, 40, 80, 1, FDRIVE_RATE_1M
, },
2266 { FDRIVE_DRV_288
, 44, 80, 1, FDRIVE_RATE_1M
, },
2267 { FDRIVE_DRV_288
, 48, 80, 1, FDRIVE_RATE_1M
, },
2268 /* 720 kB 3"1/2 floppy disks */
2269 { FDRIVE_DRV_144
, 9, 80, 1, FDRIVE_RATE_250K
, },
2270 { FDRIVE_DRV_144
, 10, 80, 1, FDRIVE_RATE_250K
, },
2271 { FDRIVE_DRV_144
, 10, 82, 1, FDRIVE_RATE_250K
, },
2272 { FDRIVE_DRV_144
, 10, 83, 1, FDRIVE_RATE_250K
, },
2273 { FDRIVE_DRV_144
, 13, 80, 1, FDRIVE_RATE_250K
, },
2274 { FDRIVE_DRV_144
, 14, 80, 1, FDRIVE_RATE_250K
, },
2275 /* 1.2 MB 5"1/4 floppy disks */
2276 { FDRIVE_DRV_120
, 15, 80, 1, FDRIVE_RATE_500K
, },
2277 { FDRIVE_DRV_120
, 18, 80, 1, FDRIVE_RATE_500K
, },
2278 { FDRIVE_DRV_120
, 18, 82, 1, FDRIVE_RATE_500K
, },
2279 { FDRIVE_DRV_120
, 18, 83, 1, FDRIVE_RATE_500K
, },
2280 { FDRIVE_DRV_120
, 20, 80, 1, FDRIVE_RATE_500K
, },
2281 /* 720 kB 5"1/4 floppy disks */
2282 { FDRIVE_DRV_120
, 9, 80, 1, FDRIVE_RATE_250K
, },
2283 { FDRIVE_DRV_120
, 11, 80, 1, FDRIVE_RATE_250K
, },
2284 /* 360 kB 5"1/4 floppy disks */
2285 { FDRIVE_DRV_120
, 9, 40, 1, FDRIVE_RATE_300K
, },
2286 { FDRIVE_DRV_120
, 9, 40, 0, FDRIVE_RATE_300K
, },
2287 { FDRIVE_DRV_120
, 10, 41, 1, FDRIVE_RATE_300K
, },
2288 { FDRIVE_DRV_120
, 10, 42, 1, FDRIVE_RATE_300K
, },
2289 /* 320 kB 5"1/4 floppy disks */
2290 { FDRIVE_DRV_120
, 8, 40, 1, FDRIVE_RATE_250K
, },
2291 { FDRIVE_DRV_120
, 8, 40, 0, FDRIVE_RATE_250K
, },
2292 /* 360 kB must match 5"1/4 better than 3"1/2... */
2293 { FDRIVE_DRV_144
, 9, 80, 0, FDRIVE_RATE_250K
, },
2295 { FDRIVE_DRV_NONE
, -1, -1, 0, 0, },
2298 void bdrv_get_floppy_geometry_hint(BlockDriverState
*bs
, int *nb_heads
,
2299 int *max_track
, int *last_sect
,
2300 FDriveType drive_in
, FDriveType
*drive
,
2303 const FDFormat
*parse
;
2304 uint64_t nb_sectors
, size
;
2305 int i
, first_match
, match
;
2307 bdrv_get_geometry_hint(bs
, nb_heads
, max_track
, last_sect
);
2308 if (*nb_heads
!= 0 && *max_track
!= 0 && *last_sect
!= 0) {
2309 /* User defined disk */
2310 *rate
= FDRIVE_RATE_500K
;
2312 bdrv_get_geometry(bs
, &nb_sectors
);
2315 for (i
= 0; ; i
++) {
2316 parse
= &fd_formats
[i
];
2317 if (parse
->drive
== FDRIVE_DRV_NONE
) {
2320 if (drive_in
== parse
->drive
||
2321 drive_in
== FDRIVE_DRV_NONE
) {
2322 size
= (parse
->max_head
+ 1) * parse
->max_track
*
2324 if (nb_sectors
== size
) {
2328 if (first_match
== -1) {
2334 if (first_match
== -1) {
2337 match
= first_match
;
2339 parse
= &fd_formats
[match
];
2341 *nb_heads
= parse
->max_head
+ 1;
2342 *max_track
= parse
->max_track
;
2343 *last_sect
= parse
->last_sect
;
2344 *drive
= parse
->drive
;
2345 *rate
= parse
->rate
;
2349 int bdrv_get_translation_hint(BlockDriverState
*bs
)
2351 return bs
->translation
;
2354 void bdrv_set_on_error(BlockDriverState
*bs
, BlockErrorAction on_read_error
,
2355 BlockErrorAction on_write_error
)
2357 bs
->on_read_error
= on_read_error
;
2358 bs
->on_write_error
= on_write_error
;
2361 BlockErrorAction
bdrv_get_on_error(BlockDriverState
*bs
, int is_read
)
2363 return is_read
? bs
->on_read_error
: bs
->on_write_error
;
2366 int bdrv_is_read_only(BlockDriverState
*bs
)
2368 return bs
->read_only
;
2371 int bdrv_is_sg(BlockDriverState
*bs
)
2376 int bdrv_enable_write_cache(BlockDriverState
*bs
)
2378 return bs
->enable_write_cache
;
2381 int bdrv_is_encrypted(BlockDriverState
*bs
)
2383 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
2385 return bs
->encrypted
;
2388 int bdrv_key_required(BlockDriverState
*bs
)
2390 BlockDriverState
*backing_hd
= bs
->backing_hd
;
2392 if (backing_hd
&& backing_hd
->encrypted
&& !backing_hd
->valid_key
)
2394 return (bs
->encrypted
&& !bs
->valid_key
);
2397 int bdrv_set_key(BlockDriverState
*bs
, const char *key
)
2400 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
) {
2401 ret
= bdrv_set_key(bs
->backing_hd
, key
);
2407 if (!bs
->encrypted
) {
2409 } else if (!bs
->drv
|| !bs
->drv
->bdrv_set_key
) {
2412 ret
= bs
->drv
->bdrv_set_key(bs
, key
);
2415 } else if (!bs
->valid_key
) {
2417 /* call the change callback now, we skipped it on open */
2418 bdrv_dev_change_media_cb(bs
, true);
2423 void bdrv_get_format(BlockDriverState
*bs
, char *buf
, int buf_size
)
2428 pstrcpy(buf
, buf_size
, bs
->drv
->format_name
);
2432 void bdrv_iterate_format(void (*it
)(void *opaque
, const char *name
),
2437 QLIST_FOREACH(drv
, &bdrv_drivers
, list
) {
2438 it(opaque
, drv
->format_name
);
2442 BlockDriverState
*bdrv_find(const char *name
)
2444 BlockDriverState
*bs
;
2446 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2447 if (!strcmp(name
, bs
->device_name
)) {
2454 BlockDriverState
*bdrv_next(BlockDriverState
*bs
)
2457 return QTAILQ_FIRST(&bdrv_states
);
2459 return QTAILQ_NEXT(bs
, list
);
2462 void bdrv_iterate(void (*it
)(void *opaque
, BlockDriverState
*bs
), void *opaque
)
2464 BlockDriverState
*bs
;
2466 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2471 const char *bdrv_get_device_name(BlockDriverState
*bs
)
2473 return bs
->device_name
;
2476 int bdrv_get_flags(BlockDriverState
*bs
)
2478 return bs
->open_flags
;
2481 void bdrv_flush_all(void)
2483 BlockDriverState
*bs
;
2485 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2490 int bdrv_has_zero_init(BlockDriverState
*bs
)
2494 if (bs
->drv
->bdrv_has_zero_init
) {
2495 return bs
->drv
->bdrv_has_zero_init(bs
);
2501 typedef struct BdrvCoIsAllocatedData
{
2502 BlockDriverState
*bs
;
2508 } BdrvCoIsAllocatedData
;
2511 * Returns true iff the specified sector is present in the disk image. Drivers
2512 * not implementing the functionality are assumed to not support backing files,
2513 * hence all their sectors are reported as allocated.
2515 * If 'sector_num' is beyond the end of the disk image the return value is 0
2516 * and 'pnum' is set to 0.
2518 * 'pnum' is set to the number of sectors (including and immediately following
2519 * the specified sector) that are known to be in the same
2520 * allocated/unallocated state.
2522 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
2523 * beyond the end of the disk image it will be clamped.
2525 int coroutine_fn
bdrv_co_is_allocated(BlockDriverState
*bs
, int64_t sector_num
,
2526 int nb_sectors
, int *pnum
)
2530 if (sector_num
>= bs
->total_sectors
) {
2535 n
= bs
->total_sectors
- sector_num
;
2536 if (n
< nb_sectors
) {
2540 if (!bs
->drv
->bdrv_co_is_allocated
) {
2545 return bs
->drv
->bdrv_co_is_allocated(bs
, sector_num
, nb_sectors
, pnum
);
2548 /* Coroutine wrapper for bdrv_is_allocated() */
2549 static void coroutine_fn
bdrv_is_allocated_co_entry(void *opaque
)
2551 BdrvCoIsAllocatedData
*data
= opaque
;
2552 BlockDriverState
*bs
= data
->bs
;
2554 data
->ret
= bdrv_co_is_allocated(bs
, data
->sector_num
, data
->nb_sectors
,
2560 * Synchronous wrapper around bdrv_co_is_allocated().
2562 * See bdrv_co_is_allocated() for details.
2564 int bdrv_is_allocated(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
,
2568 BdrvCoIsAllocatedData data
= {
2570 .sector_num
= sector_num
,
2571 .nb_sectors
= nb_sectors
,
2576 co
= qemu_coroutine_create(bdrv_is_allocated_co_entry
);
2577 qemu_coroutine_enter(co
, &data
);
2578 while (!data
.done
) {
2585 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2587 * Return true if the given sector is allocated in any image between
2588 * BASE and TOP (inclusive). BASE can be NULL to check if the given
2589 * sector is allocated in any image of the chain. Return false otherwise.
2591 * 'pnum' is set to the number of sectors (including and immediately following
2592 * the specified sector) that are known to be in the same
2593 * allocated/unallocated state.
2596 int coroutine_fn
bdrv_co_is_allocated_above(BlockDriverState
*top
,
2597 BlockDriverState
*base
,
2599 int nb_sectors
, int *pnum
)
2601 BlockDriverState
*intermediate
;
2602 int ret
, n
= nb_sectors
;
2605 while (intermediate
&& intermediate
!= base
) {
2607 ret
= bdrv_co_is_allocated(intermediate
, sector_num
, nb_sectors
,
2617 * [sector_num, nb_sectors] is unallocated on top but intermediate
2620 * [sector_num+x, nr_sectors] allocated.
2622 if (n
> pnum_inter
) {
2626 intermediate
= intermediate
->backing_hd
;
2633 BlockInfoList
*qmp_query_block(Error
**errp
)
2635 BlockInfoList
*head
= NULL
, *cur_item
= NULL
;
2636 BlockDriverState
*bs
;
2638 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2639 BlockInfoList
*info
= g_malloc0(sizeof(*info
));
2641 info
->value
= g_malloc0(sizeof(*info
->value
));
2642 info
->value
->device
= g_strdup(bs
->device_name
);
2643 info
->value
->type
= g_strdup("unknown");
2644 info
->value
->locked
= bdrv_dev_is_medium_locked(bs
);
2645 info
->value
->removable
= bdrv_dev_has_removable_media(bs
);
2647 if (bdrv_dev_has_removable_media(bs
)) {
2648 info
->value
->has_tray_open
= true;
2649 info
->value
->tray_open
= bdrv_dev_is_tray_open(bs
);
2652 if (bdrv_iostatus_is_enabled(bs
)) {
2653 info
->value
->has_io_status
= true;
2654 info
->value
->io_status
= bs
->iostatus
;
2658 info
->value
->has_inserted
= true;
2659 info
->value
->inserted
= g_malloc0(sizeof(*info
->value
->inserted
));
2660 info
->value
->inserted
->file
= g_strdup(bs
->filename
);
2661 info
->value
->inserted
->ro
= bs
->read_only
;
2662 info
->value
->inserted
->drv
= g_strdup(bs
->drv
->format_name
);
2663 info
->value
->inserted
->encrypted
= bs
->encrypted
;
2664 if (bs
->backing_file
[0]) {
2665 info
->value
->inserted
->has_backing_file
= true;
2666 info
->value
->inserted
->backing_file
= g_strdup(bs
->backing_file
);
2669 if (bs
->io_limits_enabled
) {
2670 info
->value
->inserted
->bps
=
2671 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
];
2672 info
->value
->inserted
->bps_rd
=
2673 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_READ
];
2674 info
->value
->inserted
->bps_wr
=
2675 bs
->io_limits
.bps
[BLOCK_IO_LIMIT_WRITE
];
2676 info
->value
->inserted
->iops
=
2677 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
];
2678 info
->value
->inserted
->iops_rd
=
2679 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_READ
];
2680 info
->value
->inserted
->iops_wr
=
2681 bs
->io_limits
.iops
[BLOCK_IO_LIMIT_WRITE
];
2685 /* XXX: waiting for the qapi to support GSList */
2687 head
= cur_item
= info
;
2689 cur_item
->next
= info
;
2697 /* Consider exposing this as a full fledged QMP command */
2698 static BlockStats
*qmp_query_blockstat(const BlockDriverState
*bs
, Error
**errp
)
2702 s
= g_malloc0(sizeof(*s
));
2704 if (bs
->device_name
[0]) {
2705 s
->has_device
= true;
2706 s
->device
= g_strdup(bs
->device_name
);
2709 s
->stats
= g_malloc0(sizeof(*s
->stats
));
2710 s
->stats
->rd_bytes
= bs
->nr_bytes
[BDRV_ACCT_READ
];
2711 s
->stats
->wr_bytes
= bs
->nr_bytes
[BDRV_ACCT_WRITE
];
2712 s
->stats
->rd_operations
= bs
->nr_ops
[BDRV_ACCT_READ
];
2713 s
->stats
->wr_operations
= bs
->nr_ops
[BDRV_ACCT_WRITE
];
2714 s
->stats
->wr_highest_offset
= bs
->wr_highest_sector
* BDRV_SECTOR_SIZE
;
2715 s
->stats
->flush_operations
= bs
->nr_ops
[BDRV_ACCT_FLUSH
];
2716 s
->stats
->wr_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_WRITE
];
2717 s
->stats
->rd_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_READ
];
2718 s
->stats
->flush_total_time_ns
= bs
->total_time_ns
[BDRV_ACCT_FLUSH
];
2721 s
->has_parent
= true;
2722 s
->parent
= qmp_query_blockstat(bs
->file
, NULL
);
2728 BlockStatsList
*qmp_query_blockstats(Error
**errp
)
2730 BlockStatsList
*head
= NULL
, *cur_item
= NULL
;
2731 BlockDriverState
*bs
;
2733 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
2734 BlockStatsList
*info
= g_malloc0(sizeof(*info
));
2735 info
->value
= qmp_query_blockstat(bs
, NULL
);
2737 /* XXX: waiting for the qapi to support GSList */
2739 head
= cur_item
= info
;
2741 cur_item
->next
= info
;
2749 const char *bdrv_get_encrypted_filename(BlockDriverState
*bs
)
2751 if (bs
->backing_hd
&& bs
->backing_hd
->encrypted
)
2752 return bs
->backing_file
;
2753 else if (bs
->encrypted
)
2754 return bs
->filename
;
2759 void bdrv_get_backing_filename(BlockDriverState
*bs
,
2760 char *filename
, int filename_size
)
2762 pstrcpy(filename
, filename_size
, bs
->backing_file
);
2765 int bdrv_write_compressed(BlockDriverState
*bs
, int64_t sector_num
,
2766 const uint8_t *buf
, int nb_sectors
)
2768 BlockDriver
*drv
= bs
->drv
;
2771 if (!drv
->bdrv_write_compressed
)
2773 if (bdrv_check_request(bs
, sector_num
, nb_sectors
))
2776 if (bs
->dirty_bitmap
) {
2777 set_dirty_bitmap(bs
, sector_num
, nb_sectors
, 1);
2780 return drv
->bdrv_write_compressed(bs
, sector_num
, buf
, nb_sectors
);
2783 int bdrv_get_info(BlockDriverState
*bs
, BlockDriverInfo
*bdi
)
2785 BlockDriver
*drv
= bs
->drv
;
2788 if (!drv
->bdrv_get_info
)
2790 memset(bdi
, 0, sizeof(*bdi
));
2791 return drv
->bdrv_get_info(bs
, bdi
);
2794 int bdrv_save_vmstate(BlockDriverState
*bs
, const uint8_t *buf
,
2795 int64_t pos
, int size
)
2797 BlockDriver
*drv
= bs
->drv
;
2800 if (drv
->bdrv_save_vmstate
)
2801 return drv
->bdrv_save_vmstate(bs
, buf
, pos
, size
);
2803 return bdrv_save_vmstate(bs
->file
, buf
, pos
, size
);
2807 int bdrv_load_vmstate(BlockDriverState
*bs
, uint8_t *buf
,
2808 int64_t pos
, int size
)
2810 BlockDriver
*drv
= bs
->drv
;
2813 if (drv
->bdrv_load_vmstate
)
2814 return drv
->bdrv_load_vmstate(bs
, buf
, pos
, size
);
2816 return bdrv_load_vmstate(bs
->file
, buf
, pos
, size
);
2820 void bdrv_debug_event(BlockDriverState
*bs
, BlkDebugEvent event
)
2822 BlockDriver
*drv
= bs
->drv
;
2824 if (!drv
|| !drv
->bdrv_debug_event
) {
2828 return drv
->bdrv_debug_event(bs
, event
);
2832 /**************************************************************/
2833 /* handling of snapshots */
2835 int bdrv_can_snapshot(BlockDriverState
*bs
)
2837 BlockDriver
*drv
= bs
->drv
;
2838 if (!drv
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
2842 if (!drv
->bdrv_snapshot_create
) {
2843 if (bs
->file
!= NULL
) {
2844 return bdrv_can_snapshot(bs
->file
);
2852 int bdrv_is_snapshot(BlockDriverState
*bs
)
2854 return !!(bs
->open_flags
& BDRV_O_SNAPSHOT
);
2857 BlockDriverState
*bdrv_snapshots(void)
2859 BlockDriverState
*bs
;
2862 return bs_snapshots
;
2866 while ((bs
= bdrv_next(bs
))) {
2867 if (bdrv_can_snapshot(bs
)) {
2875 int bdrv_snapshot_create(BlockDriverState
*bs
,
2876 QEMUSnapshotInfo
*sn_info
)
2878 BlockDriver
*drv
= bs
->drv
;
2881 if (drv
->bdrv_snapshot_create
)
2882 return drv
->bdrv_snapshot_create(bs
, sn_info
);
2884 return bdrv_snapshot_create(bs
->file
, sn_info
);
2888 int bdrv_snapshot_goto(BlockDriverState
*bs
,
2889 const char *snapshot_id
)
2891 BlockDriver
*drv
= bs
->drv
;
2896 if (drv
->bdrv_snapshot_goto
)
2897 return drv
->bdrv_snapshot_goto(bs
, snapshot_id
);
2900 drv
->bdrv_close(bs
);
2901 ret
= bdrv_snapshot_goto(bs
->file
, snapshot_id
);
2902 open_ret
= drv
->bdrv_open(bs
, bs
->open_flags
);
2904 bdrv_delete(bs
->file
);
2914 int bdrv_snapshot_delete(BlockDriverState
*bs
, const char *snapshot_id
)
2916 BlockDriver
*drv
= bs
->drv
;
2919 if (drv
->bdrv_snapshot_delete
)
2920 return drv
->bdrv_snapshot_delete(bs
, snapshot_id
);
2922 return bdrv_snapshot_delete(bs
->file
, snapshot_id
);
2926 int bdrv_snapshot_list(BlockDriverState
*bs
,
2927 QEMUSnapshotInfo
**psn_info
)
2929 BlockDriver
*drv
= bs
->drv
;
2932 if (drv
->bdrv_snapshot_list
)
2933 return drv
->bdrv_snapshot_list(bs
, psn_info
);
2935 return bdrv_snapshot_list(bs
->file
, psn_info
);
2939 int bdrv_snapshot_load_tmp(BlockDriverState
*bs
,
2940 const char *snapshot_name
)
2942 BlockDriver
*drv
= bs
->drv
;
2946 if (!bs
->read_only
) {
2949 if (drv
->bdrv_snapshot_load_tmp
) {
2950 return drv
->bdrv_snapshot_load_tmp(bs
, snapshot_name
);
2955 BlockDriverState
*bdrv_find_backing_image(BlockDriverState
*bs
,
2956 const char *backing_file
)
2962 if (bs
->backing_hd
) {
2963 if (strcmp(bs
->backing_file
, backing_file
) == 0) {
2964 return bs
->backing_hd
;
2966 return bdrv_find_backing_image(bs
->backing_hd
, backing_file
);
2973 #define NB_SUFFIXES 4
2975 char *get_human_readable_size(char *buf
, int buf_size
, int64_t size
)
2977 static const char suffixes
[NB_SUFFIXES
] = "KMGT";
2982 snprintf(buf
, buf_size
, "%" PRId64
, size
);
2985 for(i
= 0; i
< NB_SUFFIXES
; i
++) {
2986 if (size
< (10 * base
)) {
2987 snprintf(buf
, buf_size
, "%0.1f%c",
2988 (double)size
/ base
,
2991 } else if (size
< (1000 * base
) || i
== (NB_SUFFIXES
- 1)) {
2992 snprintf(buf
, buf_size
, "%" PRId64
"%c",
2993 ((size
+ (base
>> 1)) / base
),
3003 char *bdrv_snapshot_dump(char *buf
, int buf_size
, QEMUSnapshotInfo
*sn
)
3005 char buf1
[128], date_buf
[128], clock_buf
[128];
3015 snprintf(buf
, buf_size
,
3016 "%-10s%-20s%7s%20s%15s",
3017 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
3021 ptm
= localtime(&ti
);
3022 strftime(date_buf
, sizeof(date_buf
),
3023 "%Y-%m-%d %H:%M:%S", ptm
);
3025 localtime_r(&ti
, &tm
);
3026 strftime(date_buf
, sizeof(date_buf
),
3027 "%Y-%m-%d %H:%M:%S", &tm
);
3029 secs
= sn
->vm_clock_nsec
/ 1000000000;
3030 snprintf(clock_buf
, sizeof(clock_buf
),
3031 "%02d:%02d:%02d.%03d",
3033 (int)((secs
/ 60) % 60),
3035 (int)((sn
->vm_clock_nsec
/ 1000000) % 1000));
3036 snprintf(buf
, buf_size
,
3037 "%-10s%-20s%7s%20s%15s",
3038 sn
->id_str
, sn
->name
,
3039 get_human_readable_size(buf1
, sizeof(buf1
), sn
->vm_state_size
),
3046 /**************************************************************/
3049 BlockDriverAIOCB
*bdrv_aio_readv(BlockDriverState
*bs
, int64_t sector_num
,
3050 QEMUIOVector
*qiov
, int nb_sectors
,
3051 BlockDriverCompletionFunc
*cb
, void *opaque
)
3053 trace_bdrv_aio_readv(bs
, sector_num
, nb_sectors
, opaque
);
3055 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3059 BlockDriverAIOCB
*bdrv_aio_writev(BlockDriverState
*bs
, int64_t sector_num
,
3060 QEMUIOVector
*qiov
, int nb_sectors
,
3061 BlockDriverCompletionFunc
*cb
, void *opaque
)
3063 trace_bdrv_aio_writev(bs
, sector_num
, nb_sectors
, opaque
);
3065 return bdrv_co_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
,
3070 typedef struct MultiwriteCB
{
3075 BlockDriverCompletionFunc
*cb
;
3077 QEMUIOVector
*free_qiov
;
3081 static void multiwrite_user_cb(MultiwriteCB
*mcb
)
3085 for (i
= 0; i
< mcb
->num_callbacks
; i
++) {
3086 mcb
->callbacks
[i
].cb(mcb
->callbacks
[i
].opaque
, mcb
->error
);
3087 if (mcb
->callbacks
[i
].free_qiov
) {
3088 qemu_iovec_destroy(mcb
->callbacks
[i
].free_qiov
);
3090 g_free(mcb
->callbacks
[i
].free_qiov
);
3094 static void multiwrite_cb(void *opaque
, int ret
)
3096 MultiwriteCB
*mcb
= opaque
;
3098 trace_multiwrite_cb(mcb
, ret
);
3100 if (ret
< 0 && !mcb
->error
) {
3104 mcb
->num_requests
--;
3105 if (mcb
->num_requests
== 0) {
3106 multiwrite_user_cb(mcb
);
3111 static int multiwrite_req_compare(const void *a
, const void *b
)
3113 const BlockRequest
*req1
= a
, *req2
= b
;
3116 * Note that we can't simply subtract req2->sector from req1->sector
3117 * here as that could overflow the return value.
3119 if (req1
->sector
> req2
->sector
) {
3121 } else if (req1
->sector
< req2
->sector
) {
3129 * Takes a bunch of requests and tries to merge them. Returns the number of
3130 * requests that remain after merging.
3132 static int multiwrite_merge(BlockDriverState
*bs
, BlockRequest
*reqs
,
3133 int num_reqs
, MultiwriteCB
*mcb
)
3137 // Sort requests by start sector
3138 qsort(reqs
, num_reqs
, sizeof(*reqs
), &multiwrite_req_compare
);
3140 // Check if adjacent requests touch the same clusters. If so, combine them,
3141 // filling up gaps with zero sectors.
3143 for (i
= 1; i
< num_reqs
; i
++) {
3145 int64_t oldreq_last
= reqs
[outidx
].sector
+ reqs
[outidx
].nb_sectors
;
3147 // Handle exactly sequential writes and overlapping writes.
3148 if (reqs
[i
].sector
<= oldreq_last
) {
3152 if (reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1 > IOV_MAX
) {
3158 QEMUIOVector
*qiov
= g_malloc0(sizeof(*qiov
));
3159 qemu_iovec_init(qiov
,
3160 reqs
[outidx
].qiov
->niov
+ reqs
[i
].qiov
->niov
+ 1);
3162 // Add the first request to the merged one. If the requests are
3163 // overlapping, drop the last sectors of the first request.
3164 size
= (reqs
[i
].sector
- reqs
[outidx
].sector
) << 9;
3165 qemu_iovec_concat(qiov
, reqs
[outidx
].qiov
, size
);
3167 // We should need to add any zeros between the two requests
3168 assert (reqs
[i
].sector
<= oldreq_last
);
3170 // Add the second request
3171 qemu_iovec_concat(qiov
, reqs
[i
].qiov
, reqs
[i
].qiov
->size
);
3173 reqs
[outidx
].nb_sectors
= qiov
->size
>> 9;
3174 reqs
[outidx
].qiov
= qiov
;
3176 mcb
->callbacks
[i
].free_qiov
= reqs
[outidx
].qiov
;
3179 reqs
[outidx
].sector
= reqs
[i
].sector
;
3180 reqs
[outidx
].nb_sectors
= reqs
[i
].nb_sectors
;
3181 reqs
[outidx
].qiov
= reqs
[i
].qiov
;
3189 * Submit multiple AIO write requests at once.
3191 * On success, the function returns 0 and all requests in the reqs array have
3192 * been submitted. In error case this function returns -1, and any of the
3193 * requests may or may not be submitted yet. In particular, this means that the
3194 * callback will be called for some of the requests, for others it won't. The
3195 * caller must check the error field of the BlockRequest to wait for the right
3196 * callbacks (if error != 0, no callback will be called).
3198 * The implementation may modify the contents of the reqs array, e.g. to merge
3199 * requests. However, the fields opaque and error are left unmodified as they
3200 * are used to signal failure for a single request to the caller.
3202 int bdrv_aio_multiwrite(BlockDriverState
*bs
, BlockRequest
*reqs
, int num_reqs
)
3207 /* don't submit writes if we don't have a medium */
3208 if (bs
->drv
== NULL
) {
3209 for (i
= 0; i
< num_reqs
; i
++) {
3210 reqs
[i
].error
= -ENOMEDIUM
;
3215 if (num_reqs
== 0) {
3219 // Create MultiwriteCB structure
3220 mcb
= g_malloc0(sizeof(*mcb
) + num_reqs
* sizeof(*mcb
->callbacks
));
3221 mcb
->num_requests
= 0;
3222 mcb
->num_callbacks
= num_reqs
;
3224 for (i
= 0; i
< num_reqs
; i
++) {
3225 mcb
->callbacks
[i
].cb
= reqs
[i
].cb
;
3226 mcb
->callbacks
[i
].opaque
= reqs
[i
].opaque
;
3229 // Check for mergable requests
3230 num_reqs
= multiwrite_merge(bs
, reqs
, num_reqs
, mcb
);
3232 trace_bdrv_aio_multiwrite(mcb
, mcb
->num_callbacks
, num_reqs
);
3234 /* Run the aio requests. */
3235 mcb
->num_requests
= num_reqs
;
3236 for (i
= 0; i
< num_reqs
; i
++) {
3237 bdrv_aio_writev(bs
, reqs
[i
].sector
, reqs
[i
].qiov
,
3238 reqs
[i
].nb_sectors
, multiwrite_cb
, mcb
);
3244 void bdrv_aio_cancel(BlockDriverAIOCB
*acb
)
3246 acb
->pool
->cancel(acb
);
3249 /* block I/O throttling */
3250 static bool bdrv_exceed_bps_limits(BlockDriverState
*bs
, int nb_sectors
,
3251 bool is_write
, double elapsed_time
, uint64_t *wait
)
3253 uint64_t bps_limit
= 0;
3254 double bytes_limit
, bytes_base
, bytes_res
;
3255 double slice_time
, wait_time
;
3257 if (bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
]) {
3258 bps_limit
= bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
];
3259 } else if (bs
->io_limits
.bps
[is_write
]) {
3260 bps_limit
= bs
->io_limits
.bps
[is_write
];
3269 slice_time
= bs
->slice_end
- bs
->slice_start
;
3270 slice_time
/= (NANOSECONDS_PER_SECOND
);
3271 bytes_limit
= bps_limit
* slice_time
;
3272 bytes_base
= bs
->nr_bytes
[is_write
] - bs
->io_base
.bytes
[is_write
];
3273 if (bs
->io_limits
.bps
[BLOCK_IO_LIMIT_TOTAL
]) {
3274 bytes_base
+= bs
->nr_bytes
[!is_write
] - bs
->io_base
.bytes
[!is_write
];
3277 /* bytes_base: the bytes of data which have been read/written; and
3278 * it is obtained from the history statistic info.
3279 * bytes_res: the remaining bytes of data which need to be read/written.
3280 * (bytes_base + bytes_res) / bps_limit: used to calcuate
3281 * the total time for completing reading/writting all data.
3283 bytes_res
= (unsigned) nb_sectors
* BDRV_SECTOR_SIZE
;
3285 if (bytes_base
+ bytes_res
<= bytes_limit
) {
3293 /* Calc approx time to dispatch */
3294 wait_time
= (bytes_base
+ bytes_res
) / bps_limit
- elapsed_time
;
3296 /* When the I/O rate at runtime exceeds the limits,
3297 * bs->slice_end need to be extended in order that the current statistic
3298 * info can be kept until the timer fire, so it is increased and tuned
3299 * based on the result of experiment.
3301 bs
->slice_time
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3302 bs
->slice_end
+= bs
->slice_time
- 3 * BLOCK_IO_SLICE_TIME
;
3304 *wait
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3310 static bool bdrv_exceed_iops_limits(BlockDriverState
*bs
, bool is_write
,
3311 double elapsed_time
, uint64_t *wait
)
3313 uint64_t iops_limit
= 0;
3314 double ios_limit
, ios_base
;
3315 double slice_time
, wait_time
;
3317 if (bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
]) {
3318 iops_limit
= bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
];
3319 } else if (bs
->io_limits
.iops
[is_write
]) {
3320 iops_limit
= bs
->io_limits
.iops
[is_write
];
3329 slice_time
= bs
->slice_end
- bs
->slice_start
;
3330 slice_time
/= (NANOSECONDS_PER_SECOND
);
3331 ios_limit
= iops_limit
* slice_time
;
3332 ios_base
= bs
->nr_ops
[is_write
] - bs
->io_base
.ios
[is_write
];
3333 if (bs
->io_limits
.iops
[BLOCK_IO_LIMIT_TOTAL
]) {
3334 ios_base
+= bs
->nr_ops
[!is_write
] - bs
->io_base
.ios
[!is_write
];
3337 if (ios_base
+ 1 <= ios_limit
) {
3345 /* Calc approx time to dispatch */
3346 wait_time
= (ios_base
+ 1) / iops_limit
;
3347 if (wait_time
> elapsed_time
) {
3348 wait_time
= wait_time
- elapsed_time
;
3353 bs
->slice_time
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3354 bs
->slice_end
+= bs
->slice_time
- 3 * BLOCK_IO_SLICE_TIME
;
3356 *wait
= wait_time
* BLOCK_IO_SLICE_TIME
* 10;
3362 static bool bdrv_exceed_io_limits(BlockDriverState
*bs
, int nb_sectors
,
3363 bool is_write
, int64_t *wait
)
3365 int64_t now
, max_wait
;
3366 uint64_t bps_wait
= 0, iops_wait
= 0;
3367 double elapsed_time
;
3368 int bps_ret
, iops_ret
;
3370 now
= qemu_get_clock_ns(vm_clock
);
3371 if ((bs
->slice_start
< now
)
3372 && (bs
->slice_end
> now
)) {
3373 bs
->slice_end
= now
+ bs
->slice_time
;
3375 bs
->slice_time
= 5 * BLOCK_IO_SLICE_TIME
;
3376 bs
->slice_start
= now
;
3377 bs
->slice_end
= now
+ bs
->slice_time
;
3379 bs
->io_base
.bytes
[is_write
] = bs
->nr_bytes
[is_write
];
3380 bs
->io_base
.bytes
[!is_write
] = bs
->nr_bytes
[!is_write
];
3382 bs
->io_base
.ios
[is_write
] = bs
->nr_ops
[is_write
];
3383 bs
->io_base
.ios
[!is_write
] = bs
->nr_ops
[!is_write
];
3386 elapsed_time
= now
- bs
->slice_start
;
3387 elapsed_time
/= (NANOSECONDS_PER_SECOND
);
3389 bps_ret
= bdrv_exceed_bps_limits(bs
, nb_sectors
,
3390 is_write
, elapsed_time
, &bps_wait
);
3391 iops_ret
= bdrv_exceed_iops_limits(bs
, is_write
,
3392 elapsed_time
, &iops_wait
);
3393 if (bps_ret
|| iops_ret
) {
3394 max_wait
= bps_wait
> iops_wait
? bps_wait
: iops_wait
;
3399 now
= qemu_get_clock_ns(vm_clock
);
3400 if (bs
->slice_end
< now
+ max_wait
) {
3401 bs
->slice_end
= now
+ max_wait
;
3414 /**************************************************************/
3415 /* async block device emulation */
3417 typedef struct BlockDriverAIOCBSync
{
3418 BlockDriverAIOCB common
;
3421 /* vector translation state */
3425 } BlockDriverAIOCBSync
;
3427 static void bdrv_aio_cancel_em(BlockDriverAIOCB
*blockacb
)
3429 BlockDriverAIOCBSync
*acb
=
3430 container_of(blockacb
, BlockDriverAIOCBSync
, common
);
3431 qemu_bh_delete(acb
->bh
);
3433 qemu_aio_release(acb
);
3436 static AIOPool bdrv_em_aio_pool
= {
3437 .aiocb_size
= sizeof(BlockDriverAIOCBSync
),
3438 .cancel
= bdrv_aio_cancel_em
,
3441 static void bdrv_aio_bh_cb(void *opaque
)
3443 BlockDriverAIOCBSync
*acb
= opaque
;
3446 qemu_iovec_from_buffer(acb
->qiov
, acb
->bounce
, acb
->qiov
->size
);
3447 qemu_vfree(acb
->bounce
);
3448 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
3449 qemu_bh_delete(acb
->bh
);
3451 qemu_aio_release(acb
);
3454 static BlockDriverAIOCB
*bdrv_aio_rw_vector(BlockDriverState
*bs
,
3458 BlockDriverCompletionFunc
*cb
,
3463 BlockDriverAIOCBSync
*acb
;
3465 acb
= qemu_aio_get(&bdrv_em_aio_pool
, bs
, cb
, opaque
);
3466 acb
->is_write
= is_write
;
3468 acb
->bounce
= qemu_blockalign(bs
, qiov
->size
);
3469 acb
->bh
= qemu_bh_new(bdrv_aio_bh_cb
, acb
);
3472 qemu_iovec_to_buffer(acb
->qiov
, acb
->bounce
);
3473 acb
->ret
= bs
->drv
->bdrv_write(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3475 acb
->ret
= bs
->drv
->bdrv_read(bs
, sector_num
, acb
->bounce
, nb_sectors
);
3478 qemu_bh_schedule(acb
->bh
);
3480 return &acb
->common
;
3483 static BlockDriverAIOCB
*bdrv_aio_readv_em(BlockDriverState
*bs
,
3484 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3485 BlockDriverCompletionFunc
*cb
, void *opaque
)
3487 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
3490 static BlockDriverAIOCB
*bdrv_aio_writev_em(BlockDriverState
*bs
,
3491 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
3492 BlockDriverCompletionFunc
*cb
, void *opaque
)
3494 return bdrv_aio_rw_vector(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
3498 typedef struct BlockDriverAIOCBCoroutine
{
3499 BlockDriverAIOCB common
;
3503 } BlockDriverAIOCBCoroutine
;
3505 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB
*blockacb
)
3510 static AIOPool bdrv_em_co_aio_pool
= {
3511 .aiocb_size
= sizeof(BlockDriverAIOCBCoroutine
),
3512 .cancel
= bdrv_aio_co_cancel_em
,
3515 static void bdrv_co_em_bh(void *opaque
)
3517 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3519 acb
->common
.cb(acb
->common
.opaque
, acb
->req
.error
);
3520 qemu_bh_delete(acb
->bh
);
3521 qemu_aio_release(acb
);
3524 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
3525 static void coroutine_fn
bdrv_co_do_rw(void *opaque
)
3527 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3528 BlockDriverState
*bs
= acb
->common
.bs
;
3530 if (!acb
->is_write
) {
3531 acb
->req
.error
= bdrv_co_do_readv(bs
, acb
->req
.sector
,
3532 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3534 acb
->req
.error
= bdrv_co_do_writev(bs
, acb
->req
.sector
,
3535 acb
->req
.nb_sectors
, acb
->req
.qiov
, 0);
3538 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3539 qemu_bh_schedule(acb
->bh
);
3542 static BlockDriverAIOCB
*bdrv_co_aio_rw_vector(BlockDriverState
*bs
,
3546 BlockDriverCompletionFunc
*cb
,
3551 BlockDriverAIOCBCoroutine
*acb
;
3553 acb
= qemu_aio_get(&bdrv_em_co_aio_pool
, bs
, cb
, opaque
);
3554 acb
->req
.sector
= sector_num
;
3555 acb
->req
.nb_sectors
= nb_sectors
;
3556 acb
->req
.qiov
= qiov
;
3557 acb
->is_write
= is_write
;
3559 co
= qemu_coroutine_create(bdrv_co_do_rw
);
3560 qemu_coroutine_enter(co
, acb
);
3562 return &acb
->common
;
3565 static void coroutine_fn
bdrv_aio_flush_co_entry(void *opaque
)
3567 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3568 BlockDriverState
*bs
= acb
->common
.bs
;
3570 acb
->req
.error
= bdrv_co_flush(bs
);
3571 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3572 qemu_bh_schedule(acb
->bh
);
3575 BlockDriverAIOCB
*bdrv_aio_flush(BlockDriverState
*bs
,
3576 BlockDriverCompletionFunc
*cb
, void *opaque
)
3578 trace_bdrv_aio_flush(bs
, opaque
);
3581 BlockDriverAIOCBCoroutine
*acb
;
3583 acb
= qemu_aio_get(&bdrv_em_co_aio_pool
, bs
, cb
, opaque
);
3584 co
= qemu_coroutine_create(bdrv_aio_flush_co_entry
);
3585 qemu_coroutine_enter(co
, acb
);
3587 return &acb
->common
;
3590 static void coroutine_fn
bdrv_aio_discard_co_entry(void *opaque
)
3592 BlockDriverAIOCBCoroutine
*acb
= opaque
;
3593 BlockDriverState
*bs
= acb
->common
.bs
;
3595 acb
->req
.error
= bdrv_co_discard(bs
, acb
->req
.sector
, acb
->req
.nb_sectors
);
3596 acb
->bh
= qemu_bh_new(bdrv_co_em_bh
, acb
);
3597 qemu_bh_schedule(acb
->bh
);
3600 BlockDriverAIOCB
*bdrv_aio_discard(BlockDriverState
*bs
,
3601 int64_t sector_num
, int nb_sectors
,
3602 BlockDriverCompletionFunc
*cb
, void *opaque
)
3605 BlockDriverAIOCBCoroutine
*acb
;
3607 trace_bdrv_aio_discard(bs
, sector_num
, nb_sectors
, opaque
);
3609 acb
= qemu_aio_get(&bdrv_em_co_aio_pool
, bs
, cb
, opaque
);
3610 acb
->req
.sector
= sector_num
;
3611 acb
->req
.nb_sectors
= nb_sectors
;
3612 co
= qemu_coroutine_create(bdrv_aio_discard_co_entry
);
3613 qemu_coroutine_enter(co
, acb
);
3615 return &acb
->common
;
3618 void bdrv_init(void)
3620 module_call_init(MODULE_INIT_BLOCK
);
3623 void bdrv_init_with_whitelist(void)
3625 use_bdrv_whitelist
= 1;
3629 void *qemu_aio_get(AIOPool
*pool
, BlockDriverState
*bs
,
3630 BlockDriverCompletionFunc
*cb
, void *opaque
)
3632 BlockDriverAIOCB
*acb
;
3634 if (pool
->free_aiocb
) {
3635 acb
= pool
->free_aiocb
;
3636 pool
->free_aiocb
= acb
->next
;
3638 acb
= g_malloc0(pool
->aiocb_size
);
3643 acb
->opaque
= opaque
;
3647 void qemu_aio_release(void *p
)
3649 BlockDriverAIOCB
*acb
= (BlockDriverAIOCB
*)p
;
3650 AIOPool
*pool
= acb
->pool
;
3651 acb
->next
= pool
->free_aiocb
;
3652 pool
->free_aiocb
= acb
;
3655 /**************************************************************/
3656 /* Coroutine block device emulation */
3658 typedef struct CoroutineIOCompletion
{
3659 Coroutine
*coroutine
;
3661 } CoroutineIOCompletion
;
3663 static void bdrv_co_io_em_complete(void *opaque
, int ret
)
3665 CoroutineIOCompletion
*co
= opaque
;
3668 qemu_coroutine_enter(co
->coroutine
, NULL
);
3671 static int coroutine_fn
bdrv_co_io_em(BlockDriverState
*bs
, int64_t sector_num
,
3672 int nb_sectors
, QEMUIOVector
*iov
,
3675 CoroutineIOCompletion co
= {
3676 .coroutine
= qemu_coroutine_self(),
3678 BlockDriverAIOCB
*acb
;
3681 acb
= bs
->drv
->bdrv_aio_writev(bs
, sector_num
, iov
, nb_sectors
,
3682 bdrv_co_io_em_complete
, &co
);
3684 acb
= bs
->drv
->bdrv_aio_readv(bs
, sector_num
, iov
, nb_sectors
,
3685 bdrv_co_io_em_complete
, &co
);
3688 trace_bdrv_co_io_em(bs
, sector_num
, nb_sectors
, is_write
, acb
);
3692 qemu_coroutine_yield();
3697 static int coroutine_fn
bdrv_co_readv_em(BlockDriverState
*bs
,
3698 int64_t sector_num
, int nb_sectors
,
3701 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, false);
3704 static int coroutine_fn
bdrv_co_writev_em(BlockDriverState
*bs
,
3705 int64_t sector_num
, int nb_sectors
,
3708 return bdrv_co_io_em(bs
, sector_num
, nb_sectors
, iov
, true);
3711 static void coroutine_fn
bdrv_flush_co_entry(void *opaque
)
3713 RwCo
*rwco
= opaque
;
3715 rwco
->ret
= bdrv_co_flush(rwco
->bs
);
3718 int coroutine_fn
bdrv_co_flush(BlockDriverState
*bs
)
3722 if (!bs
|| !bdrv_is_inserted(bs
) || bdrv_is_read_only(bs
)) {
3726 /* Write back cached data to the OS even with cache=unsafe */
3727 if (bs
->drv
->bdrv_co_flush_to_os
) {
3728 ret
= bs
->drv
->bdrv_co_flush_to_os(bs
);
3734 /* But don't actually force it to the disk with cache=unsafe */
3735 if (bs
->open_flags
& BDRV_O_NO_FLUSH
) {
3739 if (bs
->drv
->bdrv_co_flush_to_disk
) {
3740 ret
= bs
->drv
->bdrv_co_flush_to_disk(bs
);
3741 } else if (bs
->drv
->bdrv_aio_flush
) {
3742 BlockDriverAIOCB
*acb
;
3743 CoroutineIOCompletion co
= {
3744 .coroutine
= qemu_coroutine_self(),
3747 acb
= bs
->drv
->bdrv_aio_flush(bs
, bdrv_co_io_em_complete
, &co
);
3751 qemu_coroutine_yield();
3756 * Some block drivers always operate in either writethrough or unsafe
3757 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3758 * know how the server works (because the behaviour is hardcoded or
3759 * depends on server-side configuration), so we can't ensure that
3760 * everything is safe on disk. Returning an error doesn't work because
3761 * that would break guests even if the server operates in writethrough
3764 * Let's hope the user knows what he's doing.
3772 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
3773 * in the case of cache=unsafe, so there are no useless flushes.
3775 return bdrv_co_flush(bs
->file
);
3778 void bdrv_invalidate_cache(BlockDriverState
*bs
)
3780 if (bs
->drv
&& bs
->drv
->bdrv_invalidate_cache
) {
3781 bs
->drv
->bdrv_invalidate_cache(bs
);
3785 void bdrv_invalidate_cache_all(void)
3787 BlockDriverState
*bs
;
3789 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3790 bdrv_invalidate_cache(bs
);
3794 void bdrv_clear_incoming_migration_all(void)
3796 BlockDriverState
*bs
;
3798 QTAILQ_FOREACH(bs
, &bdrv_states
, list
) {
3799 bs
->open_flags
= bs
->open_flags
& ~(BDRV_O_INCOMING
);
3803 int bdrv_flush(BlockDriverState
*bs
)
3811 if (qemu_in_coroutine()) {
3812 /* Fast-path if already in coroutine context */
3813 bdrv_flush_co_entry(&rwco
);
3815 co
= qemu_coroutine_create(bdrv_flush_co_entry
);
3816 qemu_coroutine_enter(co
, &rwco
);
3817 while (rwco
.ret
== NOT_DONE
) {
3825 static void coroutine_fn
bdrv_discard_co_entry(void *opaque
)
3827 RwCo
*rwco
= opaque
;
3829 rwco
->ret
= bdrv_co_discard(rwco
->bs
, rwco
->sector_num
, rwco
->nb_sectors
);
3832 int coroutine_fn
bdrv_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
3837 } else if (bdrv_check_request(bs
, sector_num
, nb_sectors
)) {
3839 } else if (bs
->read_only
) {
3841 } else if (bs
->drv
->bdrv_co_discard
) {
3842 return bs
->drv
->bdrv_co_discard(bs
, sector_num
, nb_sectors
);
3843 } else if (bs
->drv
->bdrv_aio_discard
) {
3844 BlockDriverAIOCB
*acb
;
3845 CoroutineIOCompletion co
= {
3846 .coroutine
= qemu_coroutine_self(),
3849 acb
= bs
->drv
->bdrv_aio_discard(bs
, sector_num
, nb_sectors
,
3850 bdrv_co_io_em_complete
, &co
);
3854 qemu_coroutine_yield();
3862 int bdrv_discard(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
)
3867 .sector_num
= sector_num
,
3868 .nb_sectors
= nb_sectors
,
3872 if (qemu_in_coroutine()) {
3873 /* Fast-path if already in coroutine context */
3874 bdrv_discard_co_entry(&rwco
);
3876 co
= qemu_coroutine_create(bdrv_discard_co_entry
);
3877 qemu_coroutine_enter(co
, &rwco
);
3878 while (rwco
.ret
== NOT_DONE
) {
3886 /**************************************************************/
3887 /* removable device support */
3890 * Return TRUE if the media is present
3892 int bdrv_is_inserted(BlockDriverState
*bs
)
3894 BlockDriver
*drv
= bs
->drv
;
3898 if (!drv
->bdrv_is_inserted
)
3900 return drv
->bdrv_is_inserted(bs
);
3904 * Return whether the media changed since the last call to this
3905 * function, or -ENOTSUP if we don't know. Most drivers don't know.
3907 int bdrv_media_changed(BlockDriverState
*bs
)
3909 BlockDriver
*drv
= bs
->drv
;
3911 if (drv
&& drv
->bdrv_media_changed
) {
3912 return drv
->bdrv_media_changed(bs
);
3918 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
3920 void bdrv_eject(BlockDriverState
*bs
, bool eject_flag
)
3922 BlockDriver
*drv
= bs
->drv
;
3924 if (drv
&& drv
->bdrv_eject
) {
3925 drv
->bdrv_eject(bs
, eject_flag
);
3928 if (bs
->device_name
[0] != '\0') {
3929 bdrv_emit_qmp_eject_event(bs
, eject_flag
);
3934 * Lock or unlock the media (if it is locked, the user won't be able
3935 * to eject it manually).
3937 void bdrv_lock_medium(BlockDriverState
*bs
, bool locked
)
3939 BlockDriver
*drv
= bs
->drv
;
3941 trace_bdrv_lock_medium(bs
, locked
);
3943 if (drv
&& drv
->bdrv_lock_medium
) {
3944 drv
->bdrv_lock_medium(bs
, locked
);
3948 /* needed for generic scsi interface */
3950 int bdrv_ioctl(BlockDriverState
*bs
, unsigned long int req
, void *buf
)
3952 BlockDriver
*drv
= bs
->drv
;
3954 if (drv
&& drv
->bdrv_ioctl
)
3955 return drv
->bdrv_ioctl(bs
, req
, buf
);
3959 BlockDriverAIOCB
*bdrv_aio_ioctl(BlockDriverState
*bs
,
3960 unsigned long int req
, void *buf
,
3961 BlockDriverCompletionFunc
*cb
, void *opaque
)
3963 BlockDriver
*drv
= bs
->drv
;
3965 if (drv
&& drv
->bdrv_aio_ioctl
)
3966 return drv
->bdrv_aio_ioctl(bs
, req
, buf
, cb
, opaque
);
3970 void bdrv_set_buffer_alignment(BlockDriverState
*bs
, int align
)
3972 bs
->buffer_alignment
= align
;
3975 void *qemu_blockalign(BlockDriverState
*bs
, size_t size
)
3977 return qemu_memalign((bs
&& bs
->buffer_alignment
) ? bs
->buffer_alignment
: 512, size
);
3980 void bdrv_set_dirty_tracking(BlockDriverState
*bs
, int enable
)
3982 int64_t bitmap_size
;
3984 bs
->dirty_count
= 0;
3986 if (!bs
->dirty_bitmap
) {
3987 bitmap_size
= (bdrv_getlength(bs
) >> BDRV_SECTOR_BITS
) +
3988 BDRV_SECTORS_PER_DIRTY_CHUNK
* BITS_PER_LONG
- 1;
3989 bitmap_size
/= BDRV_SECTORS_PER_DIRTY_CHUNK
* BITS_PER_LONG
;
3991 bs
->dirty_bitmap
= g_new0(unsigned long, bitmap_size
);
3994 if (bs
->dirty_bitmap
) {
3995 g_free(bs
->dirty_bitmap
);
3996 bs
->dirty_bitmap
= NULL
;
4001 int bdrv_get_dirty(BlockDriverState
*bs
, int64_t sector
)
4003 int64_t chunk
= sector
/ (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
;
4005 if (bs
->dirty_bitmap
&&
4006 (sector
<< BDRV_SECTOR_BITS
) < bdrv_getlength(bs
)) {
4007 return !!(bs
->dirty_bitmap
[chunk
/ (sizeof(unsigned long) * 8)] &
4008 (1UL << (chunk
% (sizeof(unsigned long) * 8))));
4014 void bdrv_reset_dirty(BlockDriverState
*bs
, int64_t cur_sector
,
4017 set_dirty_bitmap(bs
, cur_sector
, nr_sectors
, 0);
4020 int64_t bdrv_get_dirty_count(BlockDriverState
*bs
)
4022 return bs
->dirty_count
;
4025 void bdrv_set_in_use(BlockDriverState
*bs
, int in_use
)
4027 assert(bs
->in_use
!= in_use
);
4028 bs
->in_use
= in_use
;
4031 int bdrv_in_use(BlockDriverState
*bs
)
4036 void bdrv_iostatus_enable(BlockDriverState
*bs
)
4038 bs
->iostatus_enabled
= true;
4039 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4042 /* The I/O status is only enabled if the drive explicitly
4043 * enables it _and_ the VM is configured to stop on errors */
4044 bool bdrv_iostatus_is_enabled(const BlockDriverState
*bs
)
4046 return (bs
->iostatus_enabled
&&
4047 (bs
->on_write_error
== BLOCK_ERR_STOP_ENOSPC
||
4048 bs
->on_write_error
== BLOCK_ERR_STOP_ANY
||
4049 bs
->on_read_error
== BLOCK_ERR_STOP_ANY
));
4052 void bdrv_iostatus_disable(BlockDriverState
*bs
)
4054 bs
->iostatus_enabled
= false;
4057 void bdrv_iostatus_reset(BlockDriverState
*bs
)
4059 if (bdrv_iostatus_is_enabled(bs
)) {
4060 bs
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
4064 /* XXX: Today this is set by device models because it makes the implementation
4065 quite simple. However, the block layer knows about the error, so it's
4066 possible to implement this without device models being involved */
4067 void bdrv_iostatus_set_err(BlockDriverState
*bs
, int error
)
4069 if (bdrv_iostatus_is_enabled(bs
) &&
4070 bs
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
4072 bs
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
4073 BLOCK_DEVICE_IO_STATUS_FAILED
;
4078 bdrv_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
, int64_t bytes
,
4079 enum BlockAcctType type
)
4081 assert(type
< BDRV_MAX_IOTYPE
);
4083 cookie
->bytes
= bytes
;
4084 cookie
->start_time_ns
= get_clock();
4085 cookie
->type
= type
;
4089 bdrv_acct_done(BlockDriverState
*bs
, BlockAcctCookie
*cookie
)
4091 assert(cookie
->type
< BDRV_MAX_IOTYPE
);
4093 bs
->nr_bytes
[cookie
->type
] += cookie
->bytes
;
4094 bs
->nr_ops
[cookie
->type
]++;
4095 bs
->total_time_ns
[cookie
->type
] += get_clock() - cookie
->start_time_ns
;
4098 int bdrv_img_create(const char *filename
, const char *fmt
,
4099 const char *base_filename
, const char *base_fmt
,
4100 char *options
, uint64_t img_size
, int flags
)
4102 QEMUOptionParameter
*param
= NULL
, *create_options
= NULL
;
4103 QEMUOptionParameter
*backing_fmt
, *backing_file
, *size
;
4104 BlockDriverState
*bs
= NULL
;
4105 BlockDriver
*drv
, *proto_drv
;
4106 BlockDriver
*backing_drv
= NULL
;
4109 /* Find driver and parse its options */
4110 drv
= bdrv_find_format(fmt
);
4112 error_report("Unknown file format '%s'", fmt
);
4117 proto_drv
= bdrv_find_protocol(filename
);
4119 error_report("Unknown protocol '%s'", filename
);
4124 create_options
= append_option_parameters(create_options
,
4125 drv
->create_options
);
4126 create_options
= append_option_parameters(create_options
,
4127 proto_drv
->create_options
);
4129 /* Create parameter list with default values */
4130 param
= parse_option_parameters("", create_options
, param
);
4132 set_option_parameter_int(param
, BLOCK_OPT_SIZE
, img_size
);
4134 /* Parse -o options */
4136 param
= parse_option_parameters(options
, create_options
, param
);
4137 if (param
== NULL
) {
4138 error_report("Invalid options for file format '%s'.", fmt
);
4144 if (base_filename
) {
4145 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FILE
,
4147 error_report("Backing file not supported for file format '%s'",
4155 if (set_option_parameter(param
, BLOCK_OPT_BACKING_FMT
, base_fmt
)) {
4156 error_report("Backing file format not supported for file "
4157 "format '%s'", fmt
);
4163 backing_file
= get_option_parameter(param
, BLOCK_OPT_BACKING_FILE
);
4164 if (backing_file
&& backing_file
->value
.s
) {
4165 if (!strcmp(filename
, backing_file
->value
.s
)) {
4166 error_report("Error: Trying to create an image with the "
4167 "same filename as the backing file");
4173 backing_fmt
= get_option_parameter(param
, BLOCK_OPT_BACKING_FMT
);
4174 if (backing_fmt
&& backing_fmt
->value
.s
) {
4175 backing_drv
= bdrv_find_format(backing_fmt
->value
.s
);
4177 error_report("Unknown backing file format '%s'",
4178 backing_fmt
->value
.s
);
4184 // The size for the image must always be specified, with one exception:
4185 // If we are using a backing file, we can obtain the size from there
4186 size
= get_option_parameter(param
, BLOCK_OPT_SIZE
);
4187 if (size
&& size
->value
.n
== -1) {
4188 if (backing_file
&& backing_file
->value
.s
) {
4193 /* backing files always opened read-only */
4195 flags
& ~(BDRV_O_RDWR
| BDRV_O_SNAPSHOT
| BDRV_O_NO_BACKING
);
4199 ret
= bdrv_open(bs
, backing_file
->value
.s
, back_flags
, backing_drv
);
4201 error_report("Could not open '%s'", backing_file
->value
.s
);
4204 bdrv_get_geometry(bs
, &size
);
4207 snprintf(buf
, sizeof(buf
), "%" PRId64
, size
);
4208 set_option_parameter(param
, BLOCK_OPT_SIZE
, buf
);
4210 error_report("Image creation needs a size parameter");
4216 printf("Formatting '%s', fmt=%s ", filename
, fmt
);
4217 print_option_parameters(param
);
4220 ret
= bdrv_create(drv
, filename
, param
);
4223 if (ret
== -ENOTSUP
) {
4224 error_report("Formatting or formatting option not supported for "
4225 "file format '%s'", fmt
);
4226 } else if (ret
== -EFBIG
) {
4227 error_report("The image size is too large for file format '%s'",
4230 error_report("%s: error while creating %s: %s", filename
, fmt
,
4236 free_option_parameters(create_options
);
4237 free_option_parameters(param
);
4246 void *block_job_create(const BlockJobType
*job_type
, BlockDriverState
*bs
,
4247 int64_t speed
, BlockDriverCompletionFunc
*cb
,
4248 void *opaque
, Error
**errp
)
4252 if (bs
->job
|| bdrv_in_use(bs
)) {
4253 error_set(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
4256 bdrv_set_in_use(bs
, 1);
4258 job
= g_malloc0(job_type
->instance_size
);
4259 job
->job_type
= job_type
;
4262 job
->opaque
= opaque
;
4266 /* Only set speed when necessary to avoid NotSupported error */
4268 Error
*local_err
= NULL
;
4270 block_job_set_speed(job
, speed
, &local_err
);
4271 if (error_is_set(&local_err
)) {
4274 bdrv_set_in_use(bs
, 0);
4275 error_propagate(errp
, local_err
);
4282 void block_job_complete(BlockJob
*job
, int ret
)
4284 BlockDriverState
*bs
= job
->bs
;
4286 assert(bs
->job
== job
);
4287 job
->cb(job
->opaque
, ret
);
4290 bdrv_set_in_use(bs
, 0);
4293 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
4295 Error
*local_err
= NULL
;
4297 if (!job
->job_type
->set_speed
) {
4298 error_set(errp
, QERR_NOT_SUPPORTED
);
4301 job
->job_type
->set_speed(job
, speed
, &local_err
);
4302 if (error_is_set(&local_err
)) {
4303 error_propagate(errp
, local_err
);
4310 void block_job_cancel(BlockJob
*job
)
4312 job
->cancelled
= true;
4313 if (job
->co
&& !job
->busy
) {
4314 qemu_coroutine_enter(job
->co
, NULL
);
4318 bool block_job_is_cancelled(BlockJob
*job
)
4320 return job
->cancelled
;
4323 struct BlockCancelData
{
4325 BlockDriverCompletionFunc
*cb
;
4331 static void block_job_cancel_cb(void *opaque
, int ret
)
4333 struct BlockCancelData
*data
= opaque
;
4335 data
->cancelled
= block_job_is_cancelled(data
->job
);
4337 data
->cb(data
->opaque
, ret
);
4340 int block_job_cancel_sync(BlockJob
*job
)
4342 struct BlockCancelData data
;
4343 BlockDriverState
*bs
= job
->bs
;
4345 assert(bs
->job
== job
);
4347 /* Set up our own callback to store the result and chain to
4348 * the original callback.
4352 data
.opaque
= job
->opaque
;
4353 data
.ret
= -EINPROGRESS
;
4354 job
->cb
= block_job_cancel_cb
;
4355 job
->opaque
= &data
;
4356 block_job_cancel(job
);
4357 while (data
.ret
== -EINPROGRESS
) {
4360 return (data
.cancelled
&& data
.ret
== 0) ? -ECANCELED
: data
.ret
;
4363 void block_job_sleep_ns(BlockJob
*job
, QEMUClock
*clock
, int64_t ns
)
4365 /* Check cancellation *before* setting busy = false, too! */
4366 if (!block_job_is_cancelled(job
)) {
4368 co_sleep_ns(clock
, ns
);