4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/coroutines.h"
18 #include "block/throttle-groups.h"
19 #include "hw/qdev-core.h"
20 #include "sysemu/blockdev.h"
21 #include "sysemu/runstate.h"
22 #include "sysemu/replay.h"
23 #include "qapi/error.h"
24 #include "qapi/qapi-events-block.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/option.h"
29 #include "migration/misc.h"
31 /* Number of coroutines to reserve per attached device model */
32 #define COROUTINE_POOL_RESERVATION 64
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
38 typedef struct BlockBackendAioNotifier
{
39 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
);
40 void (*detach_aio_context
)(void *opaque
);
42 QLIST_ENTRY(BlockBackendAioNotifier
) list
;
43 } BlockBackendAioNotifier
;
50 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
51 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
52 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
53 BlockBackendPublic
public;
55 DeviceState
*dev
; /* attached device model, if any */
56 const BlockDevOps
*dev_ops
;
59 /* If the BDS tree is removed, some of its options are stored here (which
60 * can be used to restore those options in the new BDS on insert) */
61 BlockBackendRootState root_state
;
63 bool enable_write_cache
;
65 /* I/O stats (display with "info blockstats"). */
68 BlockdevOnError on_read_error
, on_write_error
;
69 bool iostatus_enabled
;
70 BlockDeviceIoStatus iostatus
;
76 bool allow_aio_context_change
;
77 bool allow_write_beyond_eof
;
79 /* Protected by BQL */
80 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
81 QLIST_HEAD(, BlockBackendAioNotifier
) aio_notifiers
;
84 CoQueue queued_requests
;
85 bool disable_request_queuing
;
87 VMChangeStateEntry
*vmsh
;
88 bool force_allow_inactivate
;
90 /* Number of in-flight aio requests. BlockDriverState also counts
91 * in-flight requests but aio requests can exist even when blk->root is
92 * NULL, so we cannot rely on its counter for that case.
93 * Accessed with atomic ops.
95 unsigned int in_flight
;
98 typedef struct BlockBackendAIOCB
{
104 static const AIOCBInfo block_backend_aiocb_info
= {
105 .get_aio_context
= blk_aiocb_get_aio_context
,
106 .aiocb_size
= sizeof(BlockBackendAIOCB
),
109 static void drive_info_del(DriveInfo
*dinfo
);
110 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
);
112 /* All BlockBackends. Protected by BQL. */
113 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
114 QTAILQ_HEAD_INITIALIZER(block_backends
);
117 * All BlockBackends referenced by the monitor and which are iterated through by
118 * blk_next(). Protected by BQL.
120 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
121 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
123 static void blk_root_inherit_options(BdrvChildRole role
, bool parent_is_format
,
124 int *child_flags
, QDict
*child_options
,
125 int parent_flags
, QDict
*parent_options
)
127 /* We're not supposed to call this function for root nodes */
130 static void blk_root_drained_begin(BdrvChild
*child
);
131 static bool blk_root_drained_poll(BdrvChild
*child
);
132 static void blk_root_drained_end(BdrvChild
*child
, int *drained_end_counter
);
134 static void blk_root_change_media(BdrvChild
*child
, bool load
);
135 static void blk_root_resize(BdrvChild
*child
);
137 static bool blk_root_change_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
138 GHashTable
*visited
, Transaction
*tran
,
141 static char *blk_root_get_parent_desc(BdrvChild
*child
)
143 BlockBackend
*blk
= child
->opaque
;
144 g_autofree
char *dev_id
= NULL
;
147 return g_strdup_printf("block device '%s'", blk
->name
);
150 dev_id
= blk_get_attached_dev_id(blk
);
152 return g_strdup_printf("block device '%s'", dev_id
);
154 /* TODO Callback into the BB owner for something more detailed */
155 return g_strdup("an unnamed block device");
159 static const char *blk_root_get_name(BdrvChild
*child
)
161 return blk_name(child
->opaque
);
164 static void blk_vm_state_changed(void *opaque
, bool running
, RunState state
)
166 Error
*local_err
= NULL
;
167 BlockBackend
*blk
= opaque
;
169 if (state
== RUN_STATE_INMIGRATE
) {
173 qemu_del_vm_change_state_handler(blk
->vmsh
);
175 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
177 error_report_err(local_err
);
182 * Notifies the user of the BlockBackend that migration has completed. qdev
183 * devices can tighten their permissions in response (specifically revoke
184 * shared write permissions that we needed for storage migration).
186 * If an error is returned, the VM cannot be allowed to be resumed.
188 static void blk_root_activate(BdrvChild
*child
, Error
**errp
)
190 BlockBackend
*blk
= child
->opaque
;
191 Error
*local_err
= NULL
;
192 uint64_t saved_shared_perm
;
194 if (!blk
->disable_perm
) {
198 blk
->disable_perm
= false;
201 * blk->shared_perm contains the permissions we want to share once
202 * migration is really completely done. For now, we need to share
203 * all; but we also need to retain blk->shared_perm, which is
204 * overwritten by a successful blk_set_perm() call. Save it and
207 saved_shared_perm
= blk
->shared_perm
;
209 blk_set_perm(blk
, blk
->perm
, BLK_PERM_ALL
, &local_err
);
211 error_propagate(errp
, local_err
);
212 blk
->disable_perm
= true;
215 blk
->shared_perm
= saved_shared_perm
;
217 if (runstate_check(RUN_STATE_INMIGRATE
)) {
218 /* Activation can happen when migration process is still active, for
219 * example when nbd_server_add is called during non-shared storage
220 * migration. Defer the shared_perm update to migration completion. */
222 blk
->vmsh
= qemu_add_vm_change_state_handler(blk_vm_state_changed
,
228 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
230 error_propagate(errp
, local_err
);
231 blk
->disable_perm
= true;
236 void blk_set_force_allow_inactivate(BlockBackend
*blk
)
239 blk
->force_allow_inactivate
= true;
242 static bool blk_can_inactivate(BlockBackend
*blk
)
244 /* If it is a guest device, inactivate is ok. */
245 if (blk
->dev
|| blk_name(blk
)[0]) {
249 /* Inactivating means no more writes to the image can be done,
250 * even if those writes would be changes invisible to the
251 * guest. For block job BBs that satisfy this, we can just allow
252 * it. This is the case for mirror job source, which is required
253 * by libvirt non-shared block migration. */
254 if (!(blk
->perm
& (BLK_PERM_WRITE
| BLK_PERM_WRITE_UNCHANGED
))) {
258 return blk
->force_allow_inactivate
;
261 static int blk_root_inactivate(BdrvChild
*child
)
263 BlockBackend
*blk
= child
->opaque
;
265 if (blk
->disable_perm
) {
269 if (!blk_can_inactivate(blk
)) {
273 blk
->disable_perm
= true;
275 bdrv_child_try_set_perm(blk
->root
, 0, BLK_PERM_ALL
, &error_abort
);
281 static void blk_root_attach(BdrvChild
*child
)
283 BlockBackend
*blk
= child
->opaque
;
284 BlockBackendAioNotifier
*notifier
;
286 trace_blk_root_attach(child
, blk
, child
->bs
);
288 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
289 bdrv_add_aio_context_notifier(child
->bs
,
290 notifier
->attached_aio_context
,
291 notifier
->detach_aio_context
,
296 static void blk_root_detach(BdrvChild
*child
)
298 BlockBackend
*blk
= child
->opaque
;
299 BlockBackendAioNotifier
*notifier
;
301 trace_blk_root_detach(child
, blk
, child
->bs
);
303 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
304 bdrv_remove_aio_context_notifier(child
->bs
,
305 notifier
->attached_aio_context
,
306 notifier
->detach_aio_context
,
311 static AioContext
*blk_root_get_parent_aio_context(BdrvChild
*c
)
313 BlockBackend
*blk
= c
->opaque
;
315 return blk_get_aio_context(blk
);
318 static const BdrvChildClass child_root
= {
319 .inherit_options
= blk_root_inherit_options
,
321 .change_media
= blk_root_change_media
,
322 .resize
= blk_root_resize
,
323 .get_name
= blk_root_get_name
,
324 .get_parent_desc
= blk_root_get_parent_desc
,
326 .drained_begin
= blk_root_drained_begin
,
327 .drained_poll
= blk_root_drained_poll
,
328 .drained_end
= blk_root_drained_end
,
330 .activate
= blk_root_activate
,
331 .inactivate
= blk_root_inactivate
,
333 .attach
= blk_root_attach
,
334 .detach
= blk_root_detach
,
336 .change_aio_ctx
= blk_root_change_aio_ctx
,
338 .get_parent_aio_context
= blk_root_get_parent_aio_context
,
342 * Create a new BlockBackend with a reference count of one.
344 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
345 * to request for a block driver node that is attached to this BlockBackend.
346 * @shared_perm is a bitmask which describes which permissions may be granted
347 * to other users of the attached node.
348 * Both sets of permissions can be changed later using blk_set_perm().
350 * Return the new BlockBackend on success, null on failure.
352 BlockBackend
*blk_new(AioContext
*ctx
, uint64_t perm
, uint64_t shared_perm
)
358 blk
= g_new0(BlockBackend
, 1);
362 blk
->shared_perm
= shared_perm
;
363 blk_set_enable_write_cache(blk
, true);
365 blk
->on_read_error
= BLOCKDEV_ON_ERROR_REPORT
;
366 blk
->on_write_error
= BLOCKDEV_ON_ERROR_ENOSPC
;
368 block_acct_init(&blk
->stats
);
370 qemu_co_queue_init(&blk
->queued_requests
);
371 notifier_list_init(&blk
->remove_bs_notifiers
);
372 notifier_list_init(&blk
->insert_bs_notifiers
);
373 QLIST_INIT(&blk
->aio_notifiers
);
375 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
380 * Create a new BlockBackend connected to an existing BlockDriverState.
382 * @perm is a bitmasks of BLK_PERM_* constants which describes the
383 * permissions to request for @bs that is attached to this
384 * BlockBackend. @shared_perm is a bitmask which describes which
385 * permissions may be granted to other users of the attached node.
386 * Both sets of permissions can be changed later using blk_set_perm().
388 * Return the new BlockBackend on success, null on failure.
390 BlockBackend
*blk_new_with_bs(BlockDriverState
*bs
, uint64_t perm
,
391 uint64_t shared_perm
, Error
**errp
)
393 BlockBackend
*blk
= blk_new(bdrv_get_aio_context(bs
), perm
, shared_perm
);
397 if (blk_insert_bs(blk
, bs
, errp
) < 0) {
405 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
406 * The new BlockBackend is in the main AioContext.
408 * Just as with bdrv_open(), after having called this function the reference to
409 * @options belongs to the block layer (even on failure).
411 * TODO: Remove @filename and @flags; it should be possible to specify a whole
412 * BDS tree just by specifying the @options QDict (or @reference,
413 * alternatively). At the time of adding this function, this is not possible,
414 * though, so callers of this function have to be able to specify @filename and
417 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
418 QDict
*options
, int flags
, Error
**errp
)
421 BlockDriverState
*bs
;
423 uint64_t shared
= BLK_PERM_ALL
;
428 * blk_new_open() is mainly used in .bdrv_create implementations and the
429 * tools where sharing isn't a major concern because the BDS stays private
430 * and the file is generally not supposed to be used by a second process,
431 * so we just request permission according to the flags.
433 * The exceptions are xen_disk and blockdev_init(); in these cases, the
434 * caller of blk_new_open() doesn't make use of the permissions, but they
435 * shouldn't hurt either. We can still share everything here because the
436 * guest devices will add their own blockers if they can't share.
438 if ((flags
& BDRV_O_NO_IO
) == 0) {
439 perm
|= BLK_PERM_CONSISTENT_READ
;
440 if (flags
& BDRV_O_RDWR
) {
441 perm
|= BLK_PERM_WRITE
;
444 if (flags
& BDRV_O_RESIZE
) {
445 perm
|= BLK_PERM_RESIZE
;
447 if (flags
& BDRV_O_NO_SHARE
) {
448 shared
= BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
;
451 blk
= blk_new(qemu_get_aio_context(), perm
, shared
);
452 bs
= bdrv_open(filename
, reference
, options
, flags
, errp
);
458 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
459 BDRV_CHILD_FILTERED
| BDRV_CHILD_PRIMARY
,
460 perm
, shared
, blk
, errp
);
469 static void blk_delete(BlockBackend
*blk
)
471 assert(!blk
->refcnt
);
474 if (blk
->public.throttle_group_member
.throttle_state
) {
475 blk_io_limits_disable(blk
);
481 qemu_del_vm_change_state_handler(blk
->vmsh
);
484 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
485 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
486 assert(QLIST_EMPTY(&blk
->aio_notifiers
));
487 QTAILQ_REMOVE(&block_backends
, blk
, link
);
488 drive_info_del(blk
->legacy_dinfo
);
489 block_acct_cleanup(&blk
->stats
);
493 static void drive_info_del(DriveInfo
*dinfo
)
498 qemu_opts_del(dinfo
->opts
);
502 int blk_get_refcnt(BlockBackend
*blk
)
505 return blk
? blk
->refcnt
: 0;
509 * Increment @blk's reference count.
510 * @blk must not be null.
512 void blk_ref(BlockBackend
*blk
)
514 assert(blk
->refcnt
> 0);
520 * Decrement @blk's reference count.
521 * If this drops it to zero, destroy @blk.
522 * For convenience, do nothing if @blk is null.
524 void blk_unref(BlockBackend
*blk
)
528 assert(blk
->refcnt
> 0);
529 if (blk
->refcnt
> 1) {
533 /* blk_drain() cannot resurrect blk, nobody held a reference */
534 assert(blk
->refcnt
== 1);
542 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
543 * ones which are hidden (i.e. are not referenced by the monitor).
545 BlockBackend
*blk_all_next(BlockBackend
*blk
)
548 return blk
? QTAILQ_NEXT(blk
, link
)
549 : QTAILQ_FIRST(&block_backends
);
552 void blk_remove_all_bs(void)
554 BlockBackend
*blk
= NULL
;
558 while ((blk
= blk_all_next(blk
)) != NULL
) {
559 AioContext
*ctx
= blk_get_aio_context(blk
);
561 aio_context_acquire(ctx
);
565 aio_context_release(ctx
);
570 * Return the monitor-owned BlockBackend after @blk.
571 * If @blk is null, return the first one.
572 * Else, return @blk's next sibling, which may be null.
574 * To iterate over all BlockBackends, do
575 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
579 BlockBackend
*blk_next(BlockBackend
*blk
)
582 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
583 : QTAILQ_FIRST(&monitor_block_backends
);
586 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
587 * the monitor or attached to a BlockBackend */
588 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
)
590 BlockDriverState
*bs
, *old_bs
;
592 /* Must be called from the main loop */
593 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
595 /* First, return all root nodes of BlockBackends. In order to avoid
596 * returning a BDS twice when multiple BBs refer to it, we only return it
597 * if the BB is the first one in the parent list of the BDS. */
598 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
599 BlockBackend
*old_blk
= it
->blk
;
601 old_bs
= old_blk
? blk_bs(old_blk
) : NULL
;
604 it
->blk
= blk_all_next(it
->blk
);
605 bs
= it
->blk
? blk_bs(it
->blk
) : NULL
;
606 } while (it
->blk
&& (bs
== NULL
|| bdrv_first_blk(bs
) != it
->blk
));
618 it
->phase
= BDRV_NEXT_MONITOR_OWNED
;
623 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
624 * BDSes that are attached to a BlockBackend here; they have been handled
625 * by the above block already */
627 it
->bs
= bdrv_next_monitor_owned(it
->bs
);
629 } while (bs
&& bdrv_has_blk(bs
));
639 static void bdrv_next_reset(BdrvNextIterator
*it
)
641 *it
= (BdrvNextIterator
) {
642 .phase
= BDRV_NEXT_BACKEND_ROOTS
,
646 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
)
650 return bdrv_next(it
);
653 /* Must be called when aborting a bdrv_next() iteration before
654 * bdrv_next() returns NULL */
655 void bdrv_next_cleanup(BdrvNextIterator
*it
)
657 /* Must be called from the main loop */
658 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
660 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
662 bdrv_unref(blk_bs(it
->blk
));
673 * Add a BlockBackend into the list of backends referenced by the monitor, with
674 * the given @name acting as the handle for the monitor.
675 * Strictly for use by blockdev.c.
677 * @name must not be null or empty.
679 * Returns true on success and false on failure. In the latter case, an Error
680 * object is returned through @errp.
682 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
685 assert(name
&& name
[0]);
688 if (!id_wellformed(name
)) {
689 error_setg(errp
, "Invalid device name");
692 if (blk_by_name(name
)) {
693 error_setg(errp
, "Device with id '%s' already exists", name
);
696 if (bdrv_find_node(name
)) {
698 "Device name '%s' conflicts with an existing node name",
703 blk
->name
= g_strdup(name
);
704 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
709 * Remove a BlockBackend from the list of backends referenced by the monitor.
710 * Strictly for use by blockdev.c.
712 void monitor_remove_blk(BlockBackend
*blk
)
720 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
726 * Return @blk's name, a non-null string.
727 * Returns an empty string iff @blk is not referenced by the monitor.
729 const char *blk_name(const BlockBackend
*blk
)
732 return blk
->name
?: "";
736 * Return the BlockBackend with name @name if it exists, else null.
737 * @name must not be null.
739 BlockBackend
*blk_by_name(const char *name
)
741 BlockBackend
*blk
= NULL
;
745 while ((blk
= blk_next(blk
)) != NULL
) {
746 if (!strcmp(name
, blk
->name
)) {
754 * Return the BlockDriverState attached to @blk if any, else null.
756 BlockDriverState
*blk_bs(BlockBackend
*blk
)
759 return blk
->root
? blk
->root
->bs
: NULL
;
762 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
)
768 QLIST_FOREACH(child
, &bs
->parents
, next_parent
) {
769 if (child
->klass
== &child_root
) {
770 return child
->opaque
;
778 * Returns true if @bs has an associated BlockBackend.
780 bool bdrv_has_blk(BlockDriverState
*bs
)
783 return bdrv_first_blk(bs
) != NULL
;
787 * Returns true if @bs has only BlockBackends as parents.
789 bool bdrv_is_root_node(BlockDriverState
*bs
)
794 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
795 if (c
->klass
!= &child_root
) {
804 * Return @blk's DriveInfo if any, else null.
806 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
809 return blk
->legacy_dinfo
;
813 * Set @blk's DriveInfo to @dinfo, and return it.
814 * @blk must not have a DriveInfo set already.
815 * No other BlockBackend may have the same DriveInfo set.
817 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
819 assert(!blk
->legacy_dinfo
);
821 return blk
->legacy_dinfo
= dinfo
;
825 * Return the BlockBackend with DriveInfo @dinfo.
828 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
830 BlockBackend
*blk
= NULL
;
833 while ((blk
= blk_next(blk
)) != NULL
) {
834 if (blk
->legacy_dinfo
== dinfo
) {
842 * Returns a pointer to the publicly accessible fields of @blk.
844 BlockBackendPublic
*blk_get_public(BlockBackend
*blk
)
851 * Returns a BlockBackend given the associated @public fields.
853 BlockBackend
*blk_by_public(BlockBackendPublic
*public)
856 return container_of(public, BlockBackend
, public);
860 * Disassociates the currently associated BlockDriverState from @blk.
862 void blk_remove_bs(BlockBackend
*blk
)
864 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
869 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
870 if (tgm
->throttle_state
) {
871 BlockDriverState
*bs
= blk_bs(blk
);
874 * Take a ref in case blk_bs() changes across bdrv_drained_begin(), for
875 * example, if a temporary filter node is removed by a blockjob.
878 bdrv_drained_begin(bs
);
879 throttle_group_detach_aio_context(tgm
);
880 throttle_group_attach_aio_context(tgm
, qemu_get_aio_context());
881 bdrv_drained_end(bs
);
885 blk_update_root_state(blk
);
887 /* bdrv_root_unref_child() will cause blk->root to become stale and may
888 * switch to a completion coroutine later on. Let's drain all I/O here
889 * to avoid that and a potential QEMU crash.
894 bdrv_root_unref_child(root
);
898 * Associates a new BlockDriverState with @blk.
900 int blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
, Error
**errp
)
902 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
905 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
906 BDRV_CHILD_FILTERED
| BDRV_CHILD_PRIMARY
,
907 blk
->perm
, blk
->shared_perm
,
909 if (blk
->root
== NULL
) {
913 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
914 if (tgm
->throttle_state
) {
915 throttle_group_detach_aio_context(tgm
);
916 throttle_group_attach_aio_context(tgm
, bdrv_get_aio_context(bs
));
923 * Change BlockDriverState associated with @blk.
925 int blk_replace_bs(BlockBackend
*blk
, BlockDriverState
*new_bs
, Error
**errp
)
928 return bdrv_replace_child_bs(blk
->root
, new_bs
, errp
);
932 * Sets the permission bitmasks that the user of the BlockBackend needs.
934 int blk_set_perm(BlockBackend
*blk
, uint64_t perm
, uint64_t shared_perm
,
940 if (blk
->root
&& !blk
->disable_perm
) {
941 ret
= bdrv_child_try_set_perm(blk
->root
, perm
, shared_perm
, errp
);
948 blk
->shared_perm
= shared_perm
;
953 void blk_get_perm(BlockBackend
*blk
, uint64_t *perm
, uint64_t *shared_perm
)
957 *shared_perm
= blk
->shared_perm
;
961 * Attach device model @dev to @blk.
962 * Return 0 on success, -EBUSY when a device model is attached already.
964 int blk_attach_dev(BlockBackend
*blk
, DeviceState
*dev
)
971 /* While migration is still incoming, we don't need to apply the
972 * permissions of guest device BlockBackends. We might still have a block
973 * job or NBD server writing to the image for storage migration. */
974 if (runstate_check(RUN_STATE_INMIGRATE
)) {
975 blk
->disable_perm
= true;
980 blk_iostatus_reset(blk
);
986 * Detach device model @dev from @blk.
987 * @dev must be currently attached to @blk.
989 void blk_detach_dev(BlockBackend
*blk
, DeviceState
*dev
)
991 assert(blk
->dev
== dev
);
995 blk
->dev_opaque
= NULL
;
996 blk_set_perm(blk
, 0, BLK_PERM_ALL
, &error_abort
);
1001 * Return the device model attached to @blk if any, else null.
1003 DeviceState
*blk_get_attached_dev(BlockBackend
*blk
)
1005 GLOBAL_STATE_CODE();
1009 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
1010 * device attached to the BlockBackend. */
1011 char *blk_get_attached_dev_id(BlockBackend
*blk
)
1013 DeviceState
*dev
= blk
->dev
;
1017 return g_strdup("");
1018 } else if (dev
->id
) {
1019 return g_strdup(dev
->id
);
1022 return object_get_canonical_path(OBJECT(dev
)) ?: g_strdup("");
1026 * Return the BlockBackend which has the device model @dev attached if it
1027 * exists, else null.
1029 * @dev must not be null.
1031 BlockBackend
*blk_by_dev(void *dev
)
1033 BlockBackend
*blk
= NULL
;
1035 GLOBAL_STATE_CODE();
1037 assert(dev
!= NULL
);
1038 while ((blk
= blk_all_next(blk
)) != NULL
) {
1039 if (blk
->dev
== dev
) {
1047 * Set @blk's device model callbacks to @ops.
1048 * @opaque is the opaque argument to pass to the callbacks.
1049 * This is for use by device models.
1051 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
1054 GLOBAL_STATE_CODE();
1056 blk
->dev_opaque
= opaque
;
1058 /* Are we currently quiesced? Should we enforce this right now? */
1059 if (blk
->quiesce_counter
&& ops
&& ops
->drained_begin
) {
1060 ops
->drained_begin(opaque
);
1065 * Notify @blk's attached device model of media change.
1067 * If @load is true, notify of media load. This action can fail, meaning that
1068 * the medium cannot be loaded. @errp is set then.
1070 * If @load is false, notify of media eject. This can never fail.
1072 * Also send DEVICE_TRAY_MOVED events as appropriate.
1074 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
, Error
**errp
)
1076 GLOBAL_STATE_CODE();
1077 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
1078 bool tray_was_open
, tray_is_open
;
1079 Error
*local_err
= NULL
;
1081 tray_was_open
= blk_dev_is_tray_open(blk
);
1082 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
, &local_err
);
1084 assert(load
== true);
1085 error_propagate(errp
, local_err
);
1088 tray_is_open
= blk_dev_is_tray_open(blk
);
1090 if (tray_was_open
!= tray_is_open
) {
1091 char *id
= blk_get_attached_dev_id(blk
);
1092 qapi_event_send_device_tray_moved(blk_name(blk
), id
, tray_is_open
);
1098 static void blk_root_change_media(BdrvChild
*child
, bool load
)
1100 blk_dev_change_media_cb(child
->opaque
, load
, NULL
);
1104 * Does @blk's attached device model have removable media?
1105 * %true if no device model is attached.
1107 bool blk_dev_has_removable_media(BlockBackend
*blk
)
1109 GLOBAL_STATE_CODE();
1110 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
1114 * Does @blk's attached device model have a tray?
1116 bool blk_dev_has_tray(BlockBackend
*blk
)
1119 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
1123 * Notify @blk's attached device model of a media eject request.
1124 * If @force is true, the medium is about to be yanked out forcefully.
1126 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
1128 GLOBAL_STATE_CODE();
1129 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
1130 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
1135 * Does @blk's attached device model have a tray, and is it open?
1137 bool blk_dev_is_tray_open(BlockBackend
*blk
)
1140 if (blk_dev_has_tray(blk
)) {
1141 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
1147 * Does @blk's attached device model have the medium locked?
1148 * %false if the device model has no such lock.
1150 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
1152 GLOBAL_STATE_CODE();
1153 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
1154 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
1160 * Notify @blk's attached device model of a backend size change.
1162 static void blk_root_resize(BdrvChild
*child
)
1164 BlockBackend
*blk
= child
->opaque
;
1166 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
1167 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
1171 void blk_iostatus_enable(BlockBackend
*blk
)
1173 GLOBAL_STATE_CODE();
1174 blk
->iostatus_enabled
= true;
1175 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1178 /* The I/O status is only enabled if the drive explicitly
1179 * enables it _and_ the VM is configured to stop on errors */
1180 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
1183 return (blk
->iostatus_enabled
&&
1184 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
1185 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
1186 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
1189 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
1191 GLOBAL_STATE_CODE();
1192 return blk
->iostatus
;
1195 void blk_iostatus_disable(BlockBackend
*blk
)
1197 GLOBAL_STATE_CODE();
1198 blk
->iostatus_enabled
= false;
1201 void blk_iostatus_reset(BlockBackend
*blk
)
1203 GLOBAL_STATE_CODE();
1204 if (blk_iostatus_is_enabled(blk
)) {
1205 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1209 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
1212 assert(blk_iostatus_is_enabled(blk
));
1213 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1214 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
1215 BLOCK_DEVICE_IO_STATUS_FAILED
;
1219 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
1222 blk
->allow_write_beyond_eof
= allow
;
1225 void blk_set_allow_aio_context_change(BlockBackend
*blk
, bool allow
)
1228 blk
->allow_aio_context_change
= allow
;
1231 void blk_set_disable_request_queuing(BlockBackend
*blk
, bool disable
)
1234 blk
->disable_request_queuing
= disable
;
1237 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
1246 if (!blk_is_available(blk
)) {
1254 if (!blk
->allow_write_beyond_eof
) {
1255 len
= blk_getlength(blk
);
1260 if (offset
> len
|| len
- offset
< bytes
) {
1268 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1269 static void coroutine_fn
blk_wait_while_drained(BlockBackend
*blk
)
1271 assert(blk
->in_flight
> 0);
1273 if (blk
->quiesce_counter
&& !blk
->disable_request_queuing
) {
1274 blk_dec_in_flight(blk
);
1275 qemu_co_queue_wait(&blk
->queued_requests
, NULL
);
1276 blk_inc_in_flight(blk
);
1280 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1281 static int coroutine_fn
1282 blk_co_do_preadv_part(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1283 QEMUIOVector
*qiov
, size_t qiov_offset
,
1284 BdrvRequestFlags flags
)
1287 BlockDriverState
*bs
;
1290 blk_wait_while_drained(blk
);
1292 /* Call blk_bs() only after waiting, the graph may have changed */
1294 trace_blk_co_preadv(blk
, bs
, offset
, bytes
, flags
);
1296 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1301 bdrv_inc_in_flight(bs
);
1303 /* throttling disk I/O */
1304 if (blk
->public.throttle_group_member
.throttle_state
) {
1305 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1309 ret
= bdrv_co_preadv_part(blk
->root
, offset
, bytes
, qiov
, qiov_offset
,
1311 bdrv_dec_in_flight(bs
);
1315 int coroutine_fn
blk_co_pread(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1316 void *buf
, BdrvRequestFlags flags
)
1318 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1321 assert(bytes
<= SIZE_MAX
);
1323 return blk_co_preadv(blk
, offset
, bytes
, &qiov
, flags
);
1326 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
1327 int64_t bytes
, QEMUIOVector
*qiov
,
1328 BdrvRequestFlags flags
)
1333 blk_inc_in_flight(blk
);
1334 ret
= blk_co_do_preadv_part(blk
, offset
, bytes
, qiov
, 0, flags
);
1335 blk_dec_in_flight(blk
);
1340 int coroutine_fn
blk_co_preadv_part(BlockBackend
*blk
, int64_t offset
,
1341 int64_t bytes
, QEMUIOVector
*qiov
,
1342 size_t qiov_offset
, BdrvRequestFlags flags
)
1347 blk_inc_in_flight(blk
);
1348 ret
= blk_co_do_preadv_part(blk
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1349 blk_dec_in_flight(blk
);
1354 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1355 static int coroutine_fn
1356 blk_co_do_pwritev_part(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1357 QEMUIOVector
*qiov
, size_t qiov_offset
,
1358 BdrvRequestFlags flags
)
1361 BlockDriverState
*bs
;
1364 blk_wait_while_drained(blk
);
1366 /* Call blk_bs() only after waiting, the graph may have changed */
1368 trace_blk_co_pwritev(blk
, bs
, offset
, bytes
, flags
);
1370 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1375 bdrv_inc_in_flight(bs
);
1376 /* throttling disk I/O */
1377 if (blk
->public.throttle_group_member
.throttle_state
) {
1378 throttle_group_co_io_limits_intercept(&blk
->public.throttle_group_member
,
1382 if (!blk
->enable_write_cache
) {
1383 flags
|= BDRV_REQ_FUA
;
1386 ret
= bdrv_co_pwritev_part(blk
->root
, offset
, bytes
, qiov
, qiov_offset
,
1388 bdrv_dec_in_flight(bs
);
1392 int coroutine_fn
blk_co_pwritev_part(BlockBackend
*blk
, int64_t offset
,
1394 QEMUIOVector
*qiov
, size_t qiov_offset
,
1395 BdrvRequestFlags flags
)
1400 blk_inc_in_flight(blk
);
1401 ret
= blk_co_do_pwritev_part(blk
, offset
, bytes
, qiov
, qiov_offset
, flags
);
1402 blk_dec_in_flight(blk
);
1407 int coroutine_fn
blk_co_pwrite(BlockBackend
*blk
, int64_t offset
, int64_t bytes
,
1408 const void *buf
, BdrvRequestFlags flags
)
1410 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
1413 assert(bytes
<= SIZE_MAX
);
1415 return blk_co_pwritev(blk
, offset
, bytes
, &qiov
, flags
);
1418 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
1419 int64_t bytes
, QEMUIOVector
*qiov
,
1420 BdrvRequestFlags flags
)
1423 return blk_co_pwritev_part(blk
, offset
, bytes
, qiov
, 0, flags
);
1426 typedef struct BlkRwCo
{
1431 BdrvRequestFlags flags
;
1434 int blk_make_zero(BlockBackend
*blk
, BdrvRequestFlags flags
)
1436 GLOBAL_STATE_CODE();
1437 return bdrv_make_zero(blk
->root
, flags
);
1440 void blk_inc_in_flight(BlockBackend
*blk
)
1443 qatomic_inc(&blk
->in_flight
);
1446 void blk_dec_in_flight(BlockBackend
*blk
)
1449 qatomic_dec(&blk
->in_flight
);
1453 static void error_callback_bh(void *opaque
)
1455 struct BlockBackendAIOCB
*acb
= opaque
;
1457 blk_dec_in_flight(acb
->blk
);
1458 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1459 qemu_aio_unref(acb
);
1462 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
1463 BlockCompletionFunc
*cb
,
1464 void *opaque
, int ret
)
1466 struct BlockBackendAIOCB
*acb
;
1469 blk_inc_in_flight(blk
);
1470 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
1474 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1475 error_callback_bh
, acb
);
1476 return &acb
->common
;
1479 typedef struct BlkAioEmAIOCB
{
1486 static AioContext
*blk_aio_em_aiocb_get_aio_context(BlockAIOCB
*acb_
)
1488 BlkAioEmAIOCB
*acb
= container_of(acb_
, BlkAioEmAIOCB
, common
);
1490 return blk_get_aio_context(acb
->rwco
.blk
);
1493 static const AIOCBInfo blk_aio_em_aiocb_info
= {
1494 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
1495 .get_aio_context
= blk_aio_em_aiocb_get_aio_context
,
1498 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
1500 if (acb
->has_returned
) {
1501 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
1502 blk_dec_in_flight(acb
->rwco
.blk
);
1503 qemu_aio_unref(acb
);
1507 static void blk_aio_complete_bh(void *opaque
)
1509 BlkAioEmAIOCB
*acb
= opaque
;
1510 assert(acb
->has_returned
);
1511 blk_aio_complete(acb
);
1514 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
,
1516 void *iobuf
, CoroutineEntry co_entry
,
1517 BdrvRequestFlags flags
,
1518 BlockCompletionFunc
*cb
, void *opaque
)
1523 blk_inc_in_flight(blk
);
1524 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1525 acb
->rwco
= (BlkRwCo
) {
1533 acb
->has_returned
= false;
1535 co
= qemu_coroutine_create(co_entry
, acb
);
1536 bdrv_coroutine_enter(blk_bs(blk
), co
);
1538 acb
->has_returned
= true;
1539 if (acb
->rwco
.ret
!= NOT_DONE
) {
1540 replay_bh_schedule_oneshot_event(blk_get_aio_context(blk
),
1541 blk_aio_complete_bh
, acb
);
1544 return &acb
->common
;
1547 static void coroutine_fn
blk_aio_read_entry(void *opaque
)
1549 BlkAioEmAIOCB
*acb
= opaque
;
1550 BlkRwCo
*rwco
= &acb
->rwco
;
1551 QEMUIOVector
*qiov
= rwco
->iobuf
;
1553 assert(qiov
->size
== acb
->bytes
);
1554 rwco
->ret
= blk_co_do_preadv_part(rwco
->blk
, rwco
->offset
, acb
->bytes
, qiov
,
1556 blk_aio_complete(acb
);
1559 static void coroutine_fn
blk_aio_write_entry(void *opaque
)
1561 BlkAioEmAIOCB
*acb
= opaque
;
1562 BlkRwCo
*rwco
= &acb
->rwco
;
1563 QEMUIOVector
*qiov
= rwco
->iobuf
;
1565 assert(!qiov
|| qiov
->size
== acb
->bytes
);
1566 rwco
->ret
= blk_co_do_pwritev_part(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1567 qiov
, 0, rwco
->flags
);
1568 blk_aio_complete(acb
);
1571 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1572 int64_t bytes
, BdrvRequestFlags flags
,
1573 BlockCompletionFunc
*cb
, void *opaque
)
1576 return blk_aio_prwv(blk
, offset
, bytes
, NULL
, blk_aio_write_entry
,
1577 flags
| BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
1580 int64_t blk_getlength(BlockBackend
*blk
)
1583 if (!blk_is_available(blk
)) {
1587 return bdrv_getlength(blk_bs(blk
));
1590 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
1594 *nb_sectors_ptr
= 0;
1596 bdrv_get_geometry(blk_bs(blk
), nb_sectors_ptr
);
1600 int64_t blk_nb_sectors(BlockBackend
*blk
)
1603 if (!blk_is_available(blk
)) {
1607 return bdrv_nb_sectors(blk_bs(blk
));
1610 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
1611 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1612 BlockCompletionFunc
*cb
, void *opaque
)
1615 assert((uint64_t)qiov
->size
<= INT64_MAX
);
1616 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1617 blk_aio_read_entry
, flags
, cb
, opaque
);
1620 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
1621 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1622 BlockCompletionFunc
*cb
, void *opaque
)
1625 assert((uint64_t)qiov
->size
<= INT64_MAX
);
1626 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1627 blk_aio_write_entry
, flags
, cb
, opaque
);
1630 void blk_aio_cancel(BlockAIOCB
*acb
)
1632 GLOBAL_STATE_CODE();
1633 bdrv_aio_cancel(acb
);
1636 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1639 bdrv_aio_cancel_async(acb
);
1642 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1643 static int coroutine_fn
1644 blk_co_do_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1648 blk_wait_while_drained(blk
);
1650 if (!blk_is_available(blk
)) {
1654 return bdrv_co_ioctl(blk_bs(blk
), req
, buf
);
1657 int coroutine_fn
blk_co_ioctl(BlockBackend
*blk
, unsigned long int req
,
1663 blk_inc_in_flight(blk
);
1664 ret
= blk_co_do_ioctl(blk
, req
, buf
);
1665 blk_dec_in_flight(blk
);
1670 static void coroutine_fn
blk_aio_ioctl_entry(void *opaque
)
1672 BlkAioEmAIOCB
*acb
= opaque
;
1673 BlkRwCo
*rwco
= &acb
->rwco
;
1675 rwco
->ret
= blk_co_do_ioctl(rwco
->blk
, rwco
->offset
, rwco
->iobuf
);
1677 blk_aio_complete(acb
);
1680 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1681 BlockCompletionFunc
*cb
, void *opaque
)
1684 return blk_aio_prwv(blk
, req
, 0, buf
, blk_aio_ioctl_entry
, 0, cb
, opaque
);
1687 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1688 static int coroutine_fn
1689 blk_co_do_pdiscard(BlockBackend
*blk
, int64_t offset
, int64_t bytes
)
1694 blk_wait_while_drained(blk
);
1696 ret
= blk_check_byte_request(blk
, offset
, bytes
);
1701 return bdrv_co_pdiscard(blk
->root
, offset
, bytes
);
1704 static void coroutine_fn
blk_aio_pdiscard_entry(void *opaque
)
1706 BlkAioEmAIOCB
*acb
= opaque
;
1707 BlkRwCo
*rwco
= &acb
->rwco
;
1709 rwco
->ret
= blk_co_do_pdiscard(rwco
->blk
, rwco
->offset
, acb
->bytes
);
1710 blk_aio_complete(acb
);
1713 BlockAIOCB
*blk_aio_pdiscard(BlockBackend
*blk
,
1714 int64_t offset
, int64_t bytes
,
1715 BlockCompletionFunc
*cb
, void *opaque
)
1718 return blk_aio_prwv(blk
, offset
, bytes
, NULL
, blk_aio_pdiscard_entry
, 0,
1722 int coroutine_fn
blk_co_pdiscard(BlockBackend
*blk
, int64_t offset
,
1728 blk_inc_in_flight(blk
);
1729 ret
= blk_co_do_pdiscard(blk
, offset
, bytes
);
1730 blk_dec_in_flight(blk
);
1735 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1736 static int coroutine_fn
blk_co_do_flush(BlockBackend
*blk
)
1738 blk_wait_while_drained(blk
);
1741 if (!blk_is_available(blk
)) {
1745 return bdrv_co_flush(blk_bs(blk
));
1748 static void coroutine_fn
blk_aio_flush_entry(void *opaque
)
1750 BlkAioEmAIOCB
*acb
= opaque
;
1751 BlkRwCo
*rwco
= &acb
->rwco
;
1753 rwco
->ret
= blk_co_do_flush(rwco
->blk
);
1754 blk_aio_complete(acb
);
1757 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1758 BlockCompletionFunc
*cb
, void *opaque
)
1761 return blk_aio_prwv(blk
, 0, 0, NULL
, blk_aio_flush_entry
, 0, cb
, opaque
);
1764 int coroutine_fn
blk_co_flush(BlockBackend
*blk
)
1769 blk_inc_in_flight(blk
);
1770 ret
= blk_co_do_flush(blk
);
1771 blk_dec_in_flight(blk
);
1776 void blk_drain(BlockBackend
*blk
)
1778 BlockDriverState
*bs
= blk_bs(blk
);
1779 GLOBAL_STATE_CODE();
1783 bdrv_drained_begin(bs
);
1786 /* We may have -ENOMEDIUM completions in flight */
1787 AIO_WAIT_WHILE(blk_get_aio_context(blk
),
1788 qatomic_mb_read(&blk
->in_flight
) > 0);
1791 bdrv_drained_end(bs
);
1796 void blk_drain_all(void)
1798 BlockBackend
*blk
= NULL
;
1800 GLOBAL_STATE_CODE();
1802 bdrv_drain_all_begin();
1804 while ((blk
= blk_all_next(blk
)) != NULL
) {
1805 AioContext
*ctx
= blk_get_aio_context(blk
);
1807 aio_context_acquire(ctx
);
1809 /* We may have -ENOMEDIUM completions in flight */
1810 AIO_WAIT_WHILE(ctx
, qatomic_mb_read(&blk
->in_flight
) > 0);
1812 aio_context_release(ctx
);
1815 bdrv_drain_all_end();
1818 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
1819 BlockdevOnError on_write_error
)
1821 GLOBAL_STATE_CODE();
1822 blk
->on_read_error
= on_read_error
;
1823 blk
->on_write_error
= on_write_error
;
1826 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
1829 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
1832 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
1835 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
1839 case BLOCKDEV_ON_ERROR_ENOSPC
:
1840 return (error
== ENOSPC
) ?
1841 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1842 case BLOCKDEV_ON_ERROR_STOP
:
1843 return BLOCK_ERROR_ACTION_STOP
;
1844 case BLOCKDEV_ON_ERROR_REPORT
:
1845 return BLOCK_ERROR_ACTION_REPORT
;
1846 case BLOCKDEV_ON_ERROR_IGNORE
:
1847 return BLOCK_ERROR_ACTION_IGNORE
;
1848 case BLOCKDEV_ON_ERROR_AUTO
:
1854 static void send_qmp_error_event(BlockBackend
*blk
,
1855 BlockErrorAction action
,
1856 bool is_read
, int error
)
1858 IoOperationType optype
;
1859 BlockDriverState
*bs
= blk_bs(blk
);
1861 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
1862 qapi_event_send_block_io_error(blk_name(blk
), !!bs
,
1863 bs
? bdrv_get_node_name(bs
) : NULL
, optype
,
1864 action
, blk_iostatus_is_enabled(blk
),
1865 error
== ENOSPC
, strerror(error
));
1868 /* This is done by device models because, while the block layer knows
1869 * about the error, it does not know whether an operation comes from
1870 * the device or the block layer (from a job, for example).
1872 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
1873 bool is_read
, int error
)
1878 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1879 /* First set the iostatus, so that "info block" returns an iostatus
1880 * that matches the events raised so far (an additional error iostatus
1881 * is fine, but not a lost one).
1883 blk_iostatus_set_err(blk
, error
);
1885 /* Then raise the request to stop the VM and the event.
1886 * qemu_system_vmstop_request_prepare has two effects. First,
1887 * it ensures that the STOP event always comes after the
1888 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1889 * can observe the STOP event and do a "cont" before the STOP
1890 * event is issued, the VM will not stop. In this case, vm_start()
1891 * also ensures that the STOP/RESUME pair of events is emitted.
1893 qemu_system_vmstop_request_prepare();
1894 send_qmp_error_event(blk
, action
, is_read
, error
);
1895 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
1897 send_qmp_error_event(blk
, action
, is_read
, error
);
1902 * Returns true if the BlockBackend can support taking write permissions
1903 * (because its root node is not read-only).
1905 bool blk_supports_write_perm(BlockBackend
*blk
)
1907 BlockDriverState
*bs
= blk_bs(blk
);
1908 GLOBAL_STATE_CODE();
1911 return !bdrv_is_read_only(bs
);
1913 return blk
->root_state
.open_flags
& BDRV_O_RDWR
;
1918 * Returns true if the BlockBackend can be written to in its current
1919 * configuration (i.e. if write permission have been requested)
1921 bool blk_is_writable(BlockBackend
*blk
)
1924 return blk
->perm
& BLK_PERM_WRITE
;
1927 bool blk_is_sg(BlockBackend
*blk
)
1929 BlockDriverState
*bs
= blk_bs(blk
);
1930 GLOBAL_STATE_CODE();
1936 return bdrv_is_sg(bs
);
1939 bool blk_enable_write_cache(BlockBackend
*blk
)
1942 return blk
->enable_write_cache
;
1945 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1948 blk
->enable_write_cache
= wce
;
1951 void blk_activate(BlockBackend
*blk
, Error
**errp
)
1953 BlockDriverState
*bs
= blk_bs(blk
);
1954 GLOBAL_STATE_CODE();
1957 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1961 bdrv_activate(bs
, errp
);
1964 bool blk_is_inserted(BlockBackend
*blk
)
1966 BlockDriverState
*bs
= blk_bs(blk
);
1969 return bs
&& bdrv_is_inserted(bs
);
1972 bool blk_is_available(BlockBackend
*blk
)
1975 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1978 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1980 BlockDriverState
*bs
= blk_bs(blk
);
1984 bdrv_lock_medium(bs
, locked
);
1988 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1990 BlockDriverState
*bs
= blk_bs(blk
);
1995 bdrv_eject(bs
, eject_flag
);
1998 /* Whether or not we ejected on the backend,
1999 * the frontend experienced a tray event. */
2000 id
= blk_get_attached_dev_id(blk
);
2001 qapi_event_send_device_tray_moved(blk_name(blk
), id
,
2006 int blk_get_flags(BlockBackend
*blk
)
2008 BlockDriverState
*bs
= blk_bs(blk
);
2009 GLOBAL_STATE_CODE();
2012 return bdrv_get_flags(bs
);
2014 return blk
->root_state
.open_flags
;
2018 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */
2019 uint32_t blk_get_request_alignment(BlockBackend
*blk
)
2021 BlockDriverState
*bs
= blk_bs(blk
);
2023 return bs
? bs
->bl
.request_alignment
: BDRV_SECTOR_SIZE
;
2026 /* Returns the maximum hardware transfer length, in bytes; guaranteed nonzero */
2027 uint64_t blk_get_max_hw_transfer(BlockBackend
*blk
)
2029 BlockDriverState
*bs
= blk_bs(blk
);
2030 uint64_t max
= INT_MAX
;
2034 max
= MIN_NON_ZERO(max
, bs
->bl
.max_hw_transfer
);
2035 max
= MIN_NON_ZERO(max
, bs
->bl
.max_transfer
);
2037 return ROUND_DOWN(max
, blk_get_request_alignment(blk
));
2040 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
2041 uint32_t blk_get_max_transfer(BlockBackend
*blk
)
2043 BlockDriverState
*bs
= blk_bs(blk
);
2044 uint32_t max
= INT_MAX
;
2048 max
= MIN_NON_ZERO(max
, bs
->bl
.max_transfer
);
2050 return ROUND_DOWN(max
, blk_get_request_alignment(blk
));
2053 int blk_get_max_hw_iov(BlockBackend
*blk
)
2056 return MIN_NON_ZERO(blk
->root
->bs
->bl
.max_hw_iov
,
2057 blk
->root
->bs
->bl
.max_iov
);
2060 int blk_get_max_iov(BlockBackend
*blk
)
2063 return blk
->root
->bs
->bl
.max_iov
;
2066 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
2069 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
2072 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
2075 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
2078 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
2080 BlockDriverState
*bs
= blk_bs(blk
);
2081 GLOBAL_STATE_CODE();
2087 return bdrv_op_is_blocked(bs
, op
, errp
);
2090 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
2092 BlockDriverState
*bs
= blk_bs(blk
);
2093 GLOBAL_STATE_CODE();
2096 bdrv_op_unblock(bs
, op
, reason
);
2100 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
2102 BlockDriverState
*bs
= blk_bs(blk
);
2103 GLOBAL_STATE_CODE();
2106 bdrv_op_block_all(bs
, reason
);
2110 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
2112 BlockDriverState
*bs
= blk_bs(blk
);
2113 GLOBAL_STATE_CODE();
2116 bdrv_op_unblock_all(bs
, reason
);
2120 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
2122 BlockDriverState
*bs
= blk_bs(blk
);
2126 AioContext
*ctx
= bdrv_get_aio_context(blk_bs(blk
));
2127 assert(ctx
== blk
->ctx
);
2133 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
2135 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
2136 return blk_get_aio_context(blk_acb
->blk
);
2139 static int blk_do_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
2140 bool update_root_node
, Error
**errp
)
2142 BlockDriverState
*bs
= blk_bs(blk
);
2143 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2149 if (update_root_node
) {
2151 * update_root_node MUST be false for blk_root_set_aio_ctx_commit(),
2152 * as we are already in the commit function of a transaction.
2154 ret
= bdrv_try_change_aio_context(bs
, new_context
, blk
->root
, errp
);
2160 if (tgm
->throttle_state
) {
2161 bdrv_drained_begin(bs
);
2162 throttle_group_detach_aio_context(tgm
);
2163 throttle_group_attach_aio_context(tgm
, new_context
);
2164 bdrv_drained_end(bs
);
2170 blk
->ctx
= new_context
;
2174 int blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
,
2177 GLOBAL_STATE_CODE();
2178 return blk_do_set_aio_context(blk
, new_context
, true, errp
);
2181 typedef struct BdrvStateBlkRootContext
{
2182 AioContext
*new_ctx
;
2184 } BdrvStateBlkRootContext
;
2186 static void blk_root_set_aio_ctx_commit(void *opaque
)
2188 BdrvStateBlkRootContext
*s
= opaque
;
2189 BlockBackend
*blk
= s
->blk
;
2191 blk_do_set_aio_context(blk
, s
->new_ctx
, false, &error_abort
);
2194 static TransactionActionDrv set_blk_root_context
= {
2195 .commit
= blk_root_set_aio_ctx_commit
,
2199 static bool blk_root_change_aio_ctx(BdrvChild
*child
, AioContext
*ctx
,
2200 GHashTable
*visited
, Transaction
*tran
,
2203 BlockBackend
*blk
= child
->opaque
;
2204 BdrvStateBlkRootContext
*s
;
2206 if (!blk
->allow_aio_context_change
) {
2208 * Manually created BlockBackends (those with a name) that are not
2209 * attached to anything can change their AioContext without updating
2210 * their user; return an error for others.
2212 if (!blk
->name
|| blk
->dev
) {
2213 /* TODO Add BB name/QOM path */
2214 error_setg(errp
, "Cannot change iothread of active block backend");
2219 s
= g_new(BdrvStateBlkRootContext
, 1);
2220 *s
= (BdrvStateBlkRootContext
) {
2225 tran_add(tran
, &set_blk_root_context
, s
);
2229 void blk_add_aio_context_notifier(BlockBackend
*blk
,
2230 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
2231 void (*detach_aio_context
)(void *opaque
), void *opaque
)
2233 BlockBackendAioNotifier
*notifier
;
2234 BlockDriverState
*bs
= blk_bs(blk
);
2235 GLOBAL_STATE_CODE();
2237 notifier
= g_new(BlockBackendAioNotifier
, 1);
2238 notifier
->attached_aio_context
= attached_aio_context
;
2239 notifier
->detach_aio_context
= detach_aio_context
;
2240 notifier
->opaque
= opaque
;
2241 QLIST_INSERT_HEAD(&blk
->aio_notifiers
, notifier
, list
);
2244 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
2245 detach_aio_context
, opaque
);
2249 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
2250 void (*attached_aio_context
)(AioContext
*,
2252 void (*detach_aio_context
)(void *),
2255 BlockBackendAioNotifier
*notifier
;
2256 BlockDriverState
*bs
= blk_bs(blk
);
2258 GLOBAL_STATE_CODE();
2261 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
2262 detach_aio_context
, opaque
);
2265 QLIST_FOREACH(notifier
, &blk
->aio_notifiers
, list
) {
2266 if (notifier
->attached_aio_context
== attached_aio_context
&&
2267 notifier
->detach_aio_context
== detach_aio_context
&&
2268 notifier
->opaque
== opaque
) {
2269 QLIST_REMOVE(notifier
, list
);
2278 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
2280 GLOBAL_STATE_CODE();
2281 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
2284 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
2286 GLOBAL_STATE_CODE();
2287 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
2290 void blk_io_plug(BlockBackend
*blk
)
2292 BlockDriverState
*bs
= blk_bs(blk
);
2300 void blk_io_unplug(BlockBackend
*blk
)
2302 BlockDriverState
*bs
= blk_bs(blk
);
2310 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
2316 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
2317 BlockCompletionFunc
*cb
, void *opaque
)
2320 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
2323 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
2324 int64_t bytes
, BdrvRequestFlags flags
)
2327 return blk_co_pwritev(blk
, offset
, bytes
, NULL
,
2328 flags
| BDRV_REQ_ZERO_WRITE
);
2331 int coroutine_fn
blk_co_pwrite_compressed(BlockBackend
*blk
, int64_t offset
,
2332 int64_t bytes
, const void *buf
)
2334 QEMUIOVector qiov
= QEMU_IOVEC_INIT_BUF(qiov
, buf
, bytes
);
2336 return blk_co_pwritev_part(blk
, offset
, bytes
, &qiov
, 0,
2337 BDRV_REQ_WRITE_COMPRESSED
);
2340 int coroutine_fn
blk_co_truncate(BlockBackend
*blk
, int64_t offset
, bool exact
,
2341 PreallocMode prealloc
, BdrvRequestFlags flags
,
2345 if (!blk_is_available(blk
)) {
2346 error_setg(errp
, "No medium inserted");
2350 return bdrv_co_truncate(blk
->root
, offset
, exact
, prealloc
, flags
, errp
);
2353 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
2354 int64_t pos
, int size
)
2357 GLOBAL_STATE_CODE();
2359 if (!blk_is_available(blk
)) {
2363 ret
= bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
2368 if (ret
== size
&& !blk
->enable_write_cache
) {
2369 ret
= bdrv_flush(blk_bs(blk
));
2372 return ret
< 0 ? ret
: size
;
2375 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
2377 GLOBAL_STATE_CODE();
2378 if (!blk_is_available(blk
)) {
2382 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
2385 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
2387 GLOBAL_STATE_CODE();
2388 if (!blk_is_available(blk
)) {
2392 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
2395 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
2397 GLOBAL_STATE_CODE();
2398 if (!blk_is_available(blk
)) {
2402 return bdrv_probe_geometry(blk_bs(blk
), geo
);
2406 * Updates the BlockBackendRootState object with data from the currently
2407 * attached BlockDriverState.
2409 void blk_update_root_state(BlockBackend
*blk
)
2411 GLOBAL_STATE_CODE();
2414 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
2415 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
2419 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2420 * BlockDriverState which is supposed to inherit the root state.
2422 bool blk_get_detect_zeroes_from_root_state(BlockBackend
*blk
)
2424 GLOBAL_STATE_CODE();
2425 return blk
->root_state
.detect_zeroes
;
2429 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2430 * supposed to inherit the root state.
2432 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
2434 GLOBAL_STATE_CODE();
2435 return blk
->root_state
.open_flags
;
2438 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
2440 GLOBAL_STATE_CODE();
2441 return &blk
->root_state
;
2444 int blk_commit_all(void)
2446 BlockBackend
*blk
= NULL
;
2447 GLOBAL_STATE_CODE();
2449 while ((blk
= blk_all_next(blk
)) != NULL
) {
2450 AioContext
*aio_context
= blk_get_aio_context(blk
);
2451 BlockDriverState
*unfiltered_bs
= bdrv_skip_filters(blk_bs(blk
));
2453 aio_context_acquire(aio_context
);
2454 if (blk_is_inserted(blk
) && bdrv_cow_child(unfiltered_bs
)) {
2457 ret
= bdrv_commit(unfiltered_bs
);
2459 aio_context_release(aio_context
);
2463 aio_context_release(aio_context
);
2469 /* throttling disk I/O limits */
2470 void blk_set_io_limits(BlockBackend
*blk
, ThrottleConfig
*cfg
)
2472 GLOBAL_STATE_CODE();
2473 throttle_group_config(&blk
->public.throttle_group_member
, cfg
);
2476 void blk_io_limits_disable(BlockBackend
*blk
)
2478 BlockDriverState
*bs
= blk_bs(blk
);
2479 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2480 assert(tgm
->throttle_state
);
2481 GLOBAL_STATE_CODE();
2484 bdrv_drained_begin(bs
);
2486 throttle_group_unregister_tgm(tgm
);
2488 bdrv_drained_end(bs
);
2493 /* should be called before blk_set_io_limits if a limit is set */
2494 void blk_io_limits_enable(BlockBackend
*blk
, const char *group
)
2496 assert(!blk
->public.throttle_group_member
.throttle_state
);
2497 GLOBAL_STATE_CODE();
2498 throttle_group_register_tgm(&blk
->public.throttle_group_member
,
2499 group
, blk_get_aio_context(blk
));
2502 void blk_io_limits_update_group(BlockBackend
*blk
, const char *group
)
2504 GLOBAL_STATE_CODE();
2505 /* this BB is not part of any group */
2506 if (!blk
->public.throttle_group_member
.throttle_state
) {
2510 /* this BB is a part of the same group than the one we want */
2511 if (!g_strcmp0(throttle_group_get_name(&blk
->public.throttle_group_member
),
2516 /* need to change the group this bs belong to */
2517 blk_io_limits_disable(blk
);
2518 blk_io_limits_enable(blk
, group
);
2521 static void blk_root_drained_begin(BdrvChild
*child
)
2523 BlockBackend
*blk
= child
->opaque
;
2524 ThrottleGroupMember
*tgm
= &blk
->public.throttle_group_member
;
2526 if (++blk
->quiesce_counter
== 1) {
2527 if (blk
->dev_ops
&& blk
->dev_ops
->drained_begin
) {
2528 blk
->dev_ops
->drained_begin(blk
->dev_opaque
);
2532 /* Note that blk->root may not be accessible here yet if we are just
2533 * attaching to a BlockDriverState that is drained. Use child instead. */
2535 if (qatomic_fetch_inc(&tgm
->io_limits_disabled
) == 0) {
2536 throttle_group_restart_tgm(tgm
);
2540 static bool blk_root_drained_poll(BdrvChild
*child
)
2542 BlockBackend
*blk
= child
->opaque
;
2544 assert(blk
->quiesce_counter
);
2546 if (blk
->dev_ops
&& blk
->dev_ops
->drained_poll
) {
2547 busy
= blk
->dev_ops
->drained_poll(blk
->dev_opaque
);
2549 return busy
|| !!blk
->in_flight
;
2552 static void blk_root_drained_end(BdrvChild
*child
, int *drained_end_counter
)
2554 BlockBackend
*blk
= child
->opaque
;
2555 assert(blk
->quiesce_counter
);
2557 assert(blk
->public.throttle_group_member
.io_limits_disabled
);
2558 qatomic_dec(&blk
->public.throttle_group_member
.io_limits_disabled
);
2560 if (--blk
->quiesce_counter
== 0) {
2561 if (blk
->dev_ops
&& blk
->dev_ops
->drained_end
) {
2562 blk
->dev_ops
->drained_end(blk
->dev_opaque
);
2564 while (qemu_co_enter_next(&blk
->queued_requests
, NULL
)) {
2565 /* Resume all queued requests */
2570 bool blk_register_buf(BlockBackend
*blk
, void *host
, size_t size
, Error
**errp
)
2572 GLOBAL_STATE_CODE();
2573 return bdrv_register_buf(blk_bs(blk
), host
, size
, errp
);
2576 void blk_unregister_buf(BlockBackend
*blk
, void *host
, size_t size
)
2578 GLOBAL_STATE_CODE();
2579 bdrv_unregister_buf(blk_bs(blk
), host
, size
);
2582 int coroutine_fn
blk_co_copy_range(BlockBackend
*blk_in
, int64_t off_in
,
2583 BlockBackend
*blk_out
, int64_t off_out
,
2584 int64_t bytes
, BdrvRequestFlags read_flags
,
2585 BdrvRequestFlags write_flags
)
2590 r
= blk_check_byte_request(blk_in
, off_in
, bytes
);
2594 r
= blk_check_byte_request(blk_out
, off_out
, bytes
);
2598 return bdrv_co_copy_range(blk_in
->root
, off_in
,
2599 blk_out
->root
, off_out
,
2600 bytes
, read_flags
, write_flags
);
2603 const BdrvChild
*blk_root(BlockBackend
*blk
)
2605 GLOBAL_STATE_CODE();
2609 int blk_make_empty(BlockBackend
*blk
, Error
**errp
)
2611 GLOBAL_STATE_CODE();
2612 if (!blk_is_available(blk
)) {
2613 error_setg(errp
, "No medium inserted");
2617 return bdrv_make_empty(blk
->root
, errp
);