4 * Copyright (C) 2014-2016 Red Hat, Inc.
7 * Markus Armbruster <armbru@redhat.com>,
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
24 /* Number of coroutines to reserve per attached device model */
25 #define COROUTINE_POOL_RESERVATION 64
27 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
29 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
);
35 DriveInfo
*legacy_dinfo
; /* null unless created by drive_new() */
36 QTAILQ_ENTRY(BlockBackend
) link
; /* for block_backends */
37 QTAILQ_ENTRY(BlockBackend
) monitor_link
; /* for monitor_block_backends */
38 BlockBackendPublic
public;
40 void *dev
; /* attached device model, if any */
41 bool legacy_dev
; /* true if dev is not a DeviceState */
42 /* TODO change to DeviceState when all users are qdevified */
43 const BlockDevOps
*dev_ops
;
46 /* the block size for which the guest device expects atomicity */
49 /* If the BDS tree is removed, some of its options are stored here (which
50 * can be used to restore those options in the new BDS on insert) */
51 BlockBackendRootState root_state
;
53 bool enable_write_cache
;
55 /* I/O stats (display with "info blockstats"). */
58 BlockdevOnError on_read_error
, on_write_error
;
59 bool iostatus_enabled
;
60 BlockDeviceIoStatus iostatus
;
66 bool allow_write_beyond_eof
;
68 NotifierList remove_bs_notifiers
, insert_bs_notifiers
;
73 typedef struct BlockBackendAIOCB
{
79 static const AIOCBInfo block_backend_aiocb_info
= {
80 .get_aio_context
= blk_aiocb_get_aio_context
,
81 .aiocb_size
= sizeof(BlockBackendAIOCB
),
84 static void drive_info_del(DriveInfo
*dinfo
);
85 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
);
86 static char *blk_get_attached_dev_id(BlockBackend
*blk
);
88 /* All BlockBackends */
89 static QTAILQ_HEAD(, BlockBackend
) block_backends
=
90 QTAILQ_HEAD_INITIALIZER(block_backends
);
92 /* All BlockBackends referenced by the monitor and which are iterated through by
94 static QTAILQ_HEAD(, BlockBackend
) monitor_block_backends
=
95 QTAILQ_HEAD_INITIALIZER(monitor_block_backends
);
97 static void blk_root_inherit_options(int *child_flags
, QDict
*child_options
,
98 int parent_flags
, QDict
*parent_options
)
100 /* We're not supposed to call this function for root nodes */
103 static void blk_root_drained_begin(BdrvChild
*child
);
104 static void blk_root_drained_end(BdrvChild
*child
);
106 static void blk_root_change_media(BdrvChild
*child
, bool load
);
107 static void blk_root_resize(BdrvChild
*child
);
109 static char *blk_root_get_parent_desc(BdrvChild
*child
)
111 BlockBackend
*blk
= child
->opaque
;
115 return g_strdup(blk
->name
);
118 dev_id
= blk_get_attached_dev_id(blk
);
122 /* TODO Callback into the BB owner for something more detailed */
124 return g_strdup("a block device");
128 static const char *blk_root_get_name(BdrvChild
*child
)
130 return blk_name(child
->opaque
);
133 static const BdrvChildRole child_root
= {
134 .inherit_options
= blk_root_inherit_options
,
136 .change_media
= blk_root_change_media
,
137 .resize
= blk_root_resize
,
138 .get_name
= blk_root_get_name
,
139 .get_parent_desc
= blk_root_get_parent_desc
,
141 .drained_begin
= blk_root_drained_begin
,
142 .drained_end
= blk_root_drained_end
,
146 * Create a new BlockBackend with a reference count of one.
148 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
149 * to request for a block driver node that is attached to this BlockBackend.
150 * @shared_perm is a bitmask which describes which permissions may be granted
151 * to other users of the attached node.
152 * Both sets of permissions can be changed later using blk_set_perm().
154 * Return the new BlockBackend on success, null on failure.
156 BlockBackend
*blk_new(uint64_t perm
, uint64_t shared_perm
)
160 blk
= g_new0(BlockBackend
, 1);
163 blk
->shared_perm
= shared_perm
;
164 blk_set_enable_write_cache(blk
, true);
166 qemu_co_queue_init(&blk
->public.throttled_reqs
[0]);
167 qemu_co_queue_init(&blk
->public.throttled_reqs
[1]);
169 notifier_list_init(&blk
->remove_bs_notifiers
);
170 notifier_list_init(&blk
->insert_bs_notifiers
);
172 QTAILQ_INSERT_TAIL(&block_backends
, blk
, link
);
177 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
179 * Just as with bdrv_open(), after having called this function the reference to
180 * @options belongs to the block layer (even on failure).
182 * TODO: Remove @filename and @flags; it should be possible to specify a whole
183 * BDS tree just by specifying the @options QDict (or @reference,
184 * alternatively). At the time of adding this function, this is not possible,
185 * though, so callers of this function have to be able to specify @filename and
188 BlockBackend
*blk_new_open(const char *filename
, const char *reference
,
189 QDict
*options
, int flags
, Error
**errp
)
192 BlockDriverState
*bs
;
195 /* blk_new_open() is mainly used in .bdrv_create implementations and the
196 * tools where sharing isn't a concern because the BDS stays private, so we
197 * just request permission according to the flags.
199 * The exceptions are xen_disk and blockdev_init(); in these cases, the
200 * caller of blk_new_open() doesn't make use of the permissions, but they
201 * shouldn't hurt either. We can still share everything here because the
202 * guest devices will add their own blockers if they can't share. */
203 perm
= BLK_PERM_CONSISTENT_READ
;
204 if (flags
& BDRV_O_RDWR
) {
205 perm
|= BLK_PERM_WRITE
;
207 if (flags
& BDRV_O_RESIZE
) {
208 perm
|= BLK_PERM_RESIZE
;
211 blk
= blk_new(perm
, BLK_PERM_ALL
);
212 bs
= bdrv_open(filename
, reference
, options
, flags
, errp
);
218 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
219 perm
, BLK_PERM_ALL
, blk
, errp
);
229 static void blk_delete(BlockBackend
*blk
)
231 assert(!blk
->refcnt
);
237 assert(QLIST_EMPTY(&blk
->remove_bs_notifiers
.notifiers
));
238 assert(QLIST_EMPTY(&blk
->insert_bs_notifiers
.notifiers
));
239 QTAILQ_REMOVE(&block_backends
, blk
, link
);
240 drive_info_del(blk
->legacy_dinfo
);
241 block_acct_cleanup(&blk
->stats
);
245 static void drive_info_del(DriveInfo
*dinfo
)
250 qemu_opts_del(dinfo
->opts
);
251 g_free(dinfo
->serial
);
255 int blk_get_refcnt(BlockBackend
*blk
)
257 return blk
? blk
->refcnt
: 0;
261 * Increment @blk's reference count.
262 * @blk must not be null.
264 void blk_ref(BlockBackend
*blk
)
270 * Decrement @blk's reference count.
271 * If this drops it to zero, destroy @blk.
272 * For convenience, do nothing if @blk is null.
274 void blk_unref(BlockBackend
*blk
)
277 assert(blk
->refcnt
> 0);
278 if (!--blk
->refcnt
) {
285 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
286 * ones which are hidden (i.e. are not referenced by the monitor).
288 static BlockBackend
*blk_all_next(BlockBackend
*blk
)
290 return blk
? QTAILQ_NEXT(blk
, link
)
291 : QTAILQ_FIRST(&block_backends
);
294 void blk_remove_all_bs(void)
296 BlockBackend
*blk
= NULL
;
298 while ((blk
= blk_all_next(blk
)) != NULL
) {
299 AioContext
*ctx
= blk_get_aio_context(blk
);
301 aio_context_acquire(ctx
);
305 aio_context_release(ctx
);
310 * Return the monitor-owned BlockBackend after @blk.
311 * If @blk is null, return the first one.
312 * Else, return @blk's next sibling, which may be null.
314 * To iterate over all BlockBackends, do
315 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
319 BlockBackend
*blk_next(BlockBackend
*blk
)
321 return blk
? QTAILQ_NEXT(blk
, monitor_link
)
322 : QTAILQ_FIRST(&monitor_block_backends
);
325 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
326 * the monitor or attached to a BlockBackend */
327 BlockDriverState
*bdrv_next(BdrvNextIterator
*it
)
329 BlockDriverState
*bs
;
331 /* First, return all root nodes of BlockBackends. In order to avoid
332 * returning a BDS twice when multiple BBs refer to it, we only return it
333 * if the BB is the first one in the parent list of the BDS. */
334 if (it
->phase
== BDRV_NEXT_BACKEND_ROOTS
) {
336 it
->blk
= blk_all_next(it
->blk
);
337 bs
= it
->blk
? blk_bs(it
->blk
) : NULL
;
338 } while (it
->blk
&& (bs
== NULL
|| bdrv_first_blk(bs
) != it
->blk
));
343 it
->phase
= BDRV_NEXT_MONITOR_OWNED
;
346 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
347 * BDSes that are attached to a BlockBackend here; they have been handled
348 * by the above block already */
350 it
->bs
= bdrv_next_monitor_owned(it
->bs
);
352 } while (bs
&& bdrv_has_blk(bs
));
357 BlockDriverState
*bdrv_first(BdrvNextIterator
*it
)
359 *it
= (BdrvNextIterator
) {
360 .phase
= BDRV_NEXT_BACKEND_ROOTS
,
363 return bdrv_next(it
);
367 * Add a BlockBackend into the list of backends referenced by the monitor, with
368 * the given @name acting as the handle for the monitor.
369 * Strictly for use by blockdev.c.
371 * @name must not be null or empty.
373 * Returns true on success and false on failure. In the latter case, an Error
374 * object is returned through @errp.
376 bool monitor_add_blk(BlockBackend
*blk
, const char *name
, Error
**errp
)
379 assert(name
&& name
[0]);
381 if (!id_wellformed(name
)) {
382 error_setg(errp
, "Invalid device name");
385 if (blk_by_name(name
)) {
386 error_setg(errp
, "Device with id '%s' already exists", name
);
389 if (bdrv_find_node(name
)) {
391 "Device name '%s' conflicts with an existing node name",
396 blk
->name
= g_strdup(name
);
397 QTAILQ_INSERT_TAIL(&monitor_block_backends
, blk
, monitor_link
);
402 * Remove a BlockBackend from the list of backends referenced by the monitor.
403 * Strictly for use by blockdev.c.
405 void monitor_remove_blk(BlockBackend
*blk
)
411 QTAILQ_REMOVE(&monitor_block_backends
, blk
, monitor_link
);
417 * Return @blk's name, a non-null string.
418 * Returns an empty string iff @blk is not referenced by the monitor.
420 const char *blk_name(BlockBackend
*blk
)
422 return blk
->name
?: "";
426 * Return the BlockBackend with name @name if it exists, else null.
427 * @name must not be null.
429 BlockBackend
*blk_by_name(const char *name
)
431 BlockBackend
*blk
= NULL
;
434 while ((blk
= blk_next(blk
)) != NULL
) {
435 if (!strcmp(name
, blk
->name
)) {
443 * Return the BlockDriverState attached to @blk if any, else null.
445 BlockDriverState
*blk_bs(BlockBackend
*blk
)
447 return blk
->root
? blk
->root
->bs
: NULL
;
450 static BlockBackend
*bdrv_first_blk(BlockDriverState
*bs
)
453 QLIST_FOREACH(child
, &bs
->parents
, next_parent
) {
454 if (child
->role
== &child_root
) {
455 return child
->opaque
;
463 * Returns true if @bs has an associated BlockBackend.
465 bool bdrv_has_blk(BlockDriverState
*bs
)
467 return bdrv_first_blk(bs
) != NULL
;
471 * Returns true if @bs has only BlockBackends as parents.
473 bool bdrv_is_root_node(BlockDriverState
*bs
)
477 QLIST_FOREACH(c
, &bs
->parents
, next_parent
) {
478 if (c
->role
!= &child_root
) {
487 * Return @blk's DriveInfo if any, else null.
489 DriveInfo
*blk_legacy_dinfo(BlockBackend
*blk
)
491 return blk
->legacy_dinfo
;
495 * Set @blk's DriveInfo to @dinfo, and return it.
496 * @blk must not have a DriveInfo set already.
497 * No other BlockBackend may have the same DriveInfo set.
499 DriveInfo
*blk_set_legacy_dinfo(BlockBackend
*blk
, DriveInfo
*dinfo
)
501 assert(!blk
->legacy_dinfo
);
502 return blk
->legacy_dinfo
= dinfo
;
506 * Return the BlockBackend with DriveInfo @dinfo.
509 BlockBackend
*blk_by_legacy_dinfo(DriveInfo
*dinfo
)
511 BlockBackend
*blk
= NULL
;
513 while ((blk
= blk_next(blk
)) != NULL
) {
514 if (blk
->legacy_dinfo
== dinfo
) {
522 * Returns a pointer to the publicly accessible fields of @blk.
524 BlockBackendPublic
*blk_get_public(BlockBackend
*blk
)
530 * Returns a BlockBackend given the associated @public fields.
532 BlockBackend
*blk_by_public(BlockBackendPublic
*public)
534 return container_of(public, BlockBackend
, public);
538 * Disassociates the currently associated BlockDriverState from @blk.
540 void blk_remove_bs(BlockBackend
*blk
)
542 notifier_list_notify(&blk
->remove_bs_notifiers
, blk
);
543 if (blk
->public.throttle_state
) {
544 throttle_timers_detach_aio_context(&blk
->public.throttle_timers
);
547 blk_update_root_state(blk
);
549 bdrv_root_unref_child(blk
->root
);
554 * Associates a new BlockDriverState with @blk.
556 int blk_insert_bs(BlockBackend
*blk
, BlockDriverState
*bs
, Error
**errp
)
558 blk
->root
= bdrv_root_attach_child(bs
, "root", &child_root
,
559 blk
->perm
, blk
->shared_perm
, blk
, errp
);
560 if (blk
->root
== NULL
) {
565 notifier_list_notify(&blk
->insert_bs_notifiers
, blk
);
566 if (blk
->public.throttle_state
) {
567 throttle_timers_attach_aio_context(
568 &blk
->public.throttle_timers
, bdrv_get_aio_context(bs
));
575 * Sets the permission bitmasks that the user of the BlockBackend needs.
577 int blk_set_perm(BlockBackend
*blk
, uint64_t perm
, uint64_t shared_perm
,
582 if (blk
->root
&& !blk
->disable_perm
) {
583 ret
= bdrv_child_try_set_perm(blk
->root
, perm
, shared_perm
, errp
);
590 blk
->shared_perm
= shared_perm
;
595 void blk_get_perm(BlockBackend
*blk
, uint64_t *perm
, uint64_t *shared_perm
)
598 *shared_perm
= blk
->shared_perm
;
602 * Notifies the user of all BlockBackends that migration has completed. qdev
603 * devices can tighten their permissions in response (specifically revoke
604 * shared write permissions that we needed for storage migration).
606 * If an error is returned, the VM cannot be allowed to be resumed.
608 void blk_resume_after_migration(Error
**errp
)
611 Error
*local_err
= NULL
;
613 for (blk
= blk_all_next(NULL
); blk
; blk
= blk_all_next(blk
)) {
614 if (!blk
->disable_perm
) {
618 blk
->disable_perm
= false;
620 blk_set_perm(blk
, blk
->perm
, blk
->shared_perm
, &local_err
);
622 error_propagate(errp
, local_err
);
623 blk
->disable_perm
= true;
629 static int blk_do_attach_dev(BlockBackend
*blk
, void *dev
)
635 /* While migration is still incoming, we don't need to apply the
636 * permissions of guest device BlockBackends. We might still have a block
637 * job or NBD server writing to the image for storage migration. */
638 if (runstate_check(RUN_STATE_INMIGRATE
)) {
639 blk
->disable_perm
= true;
644 blk
->legacy_dev
= false;
645 blk_iostatus_reset(blk
);
651 * Attach device model @dev to @blk.
652 * Return 0 on success, -EBUSY when a device model is attached already.
654 int blk_attach_dev(BlockBackend
*blk
, DeviceState
*dev
)
656 return blk_do_attach_dev(blk
, dev
);
660 * Attach device model @dev to @blk.
661 * @blk must not have a device model attached already.
662 * TODO qdevified devices don't use this, remove when devices are qdevified
664 void blk_attach_dev_legacy(BlockBackend
*blk
, void *dev
)
666 if (blk_do_attach_dev(blk
, dev
) < 0) {
669 blk
->legacy_dev
= true;
673 * Detach device model @dev from @blk.
674 * @dev must be currently attached to @blk.
676 void blk_detach_dev(BlockBackend
*blk
, void *dev
)
677 /* TODO change to DeviceState *dev when all users are qdevified */
679 assert(blk
->dev
== dev
);
682 blk
->dev_opaque
= NULL
;
683 blk
->guest_block_size
= 512;
684 blk_set_perm(blk
, 0, BLK_PERM_ALL
, &error_abort
);
689 * Return the device model attached to @blk if any, else null.
691 void *blk_get_attached_dev(BlockBackend
*blk
)
692 /* TODO change to return DeviceState * when all users are qdevified */
697 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
698 * device attached to the BlockBackend. */
699 static char *blk_get_attached_dev_id(BlockBackend
*blk
)
703 assert(!blk
->legacy_dev
);
708 } else if (dev
->id
) {
709 return g_strdup(dev
->id
);
711 return object_get_canonical_path(OBJECT(dev
));
715 * Return the BlockBackend which has the device model @dev attached if it
718 * @dev must not be null.
720 BlockBackend
*blk_by_dev(void *dev
)
722 BlockBackend
*blk
= NULL
;
725 while ((blk
= blk_all_next(blk
)) != NULL
) {
726 if (blk
->dev
== dev
) {
734 * Set @blk's device model callbacks to @ops.
735 * @opaque is the opaque argument to pass to the callbacks.
736 * This is for use by device models.
738 void blk_set_dev_ops(BlockBackend
*blk
, const BlockDevOps
*ops
,
741 /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
742 * it that way, so we can assume blk->dev, if present, is a DeviceState if
743 * blk->dev_ops is set. Non-device users may use dev_ops without device. */
744 assert(!blk
->legacy_dev
);
747 blk
->dev_opaque
= opaque
;
749 /* Are we currently quiesced? Should we enforce this right now? */
750 if (blk
->quiesce_counter
&& ops
->drained_begin
) {
751 ops
->drained_begin(opaque
);
756 * Notify @blk's attached device model of media change.
758 * If @load is true, notify of media load. This action can fail, meaning that
759 * the medium cannot be loaded. @errp is set then.
761 * If @load is false, notify of media eject. This can never fail.
763 * Also send DEVICE_TRAY_MOVED events as appropriate.
765 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
, Error
**errp
)
767 if (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
) {
768 bool tray_was_open
, tray_is_open
;
769 Error
*local_err
= NULL
;
771 assert(!blk
->legacy_dev
);
773 tray_was_open
= blk_dev_is_tray_open(blk
);
774 blk
->dev_ops
->change_media_cb(blk
->dev_opaque
, load
, &local_err
);
776 assert(load
== true);
777 error_propagate(errp
, local_err
);
780 tray_is_open
= blk_dev_is_tray_open(blk
);
782 if (tray_was_open
!= tray_is_open
) {
783 char *id
= blk_get_attached_dev_id(blk
);
784 qapi_event_send_device_tray_moved(blk_name(blk
), id
, tray_is_open
,
791 static void blk_root_change_media(BdrvChild
*child
, bool load
)
793 blk_dev_change_media_cb(child
->opaque
, load
, NULL
);
797 * Does @blk's attached device model have removable media?
798 * %true if no device model is attached.
800 bool blk_dev_has_removable_media(BlockBackend
*blk
)
802 return !blk
->dev
|| (blk
->dev_ops
&& blk
->dev_ops
->change_media_cb
);
806 * Does @blk's attached device model have a tray?
808 bool blk_dev_has_tray(BlockBackend
*blk
)
810 return blk
->dev_ops
&& blk
->dev_ops
->is_tray_open
;
814 * Notify @blk's attached device model of a media eject request.
815 * If @force is true, the medium is about to be yanked out forcefully.
817 void blk_dev_eject_request(BlockBackend
*blk
, bool force
)
819 if (blk
->dev_ops
&& blk
->dev_ops
->eject_request_cb
) {
820 blk
->dev_ops
->eject_request_cb(blk
->dev_opaque
, force
);
825 * Does @blk's attached device model have a tray, and is it open?
827 bool blk_dev_is_tray_open(BlockBackend
*blk
)
829 if (blk_dev_has_tray(blk
)) {
830 return blk
->dev_ops
->is_tray_open(blk
->dev_opaque
);
836 * Does @blk's attached device model have the medium locked?
837 * %false if the device model has no such lock.
839 bool blk_dev_is_medium_locked(BlockBackend
*blk
)
841 if (blk
->dev_ops
&& blk
->dev_ops
->is_medium_locked
) {
842 return blk
->dev_ops
->is_medium_locked(blk
->dev_opaque
);
848 * Notify @blk's attached device model of a backend size change.
850 static void blk_root_resize(BdrvChild
*child
)
852 BlockBackend
*blk
= child
->opaque
;
854 if (blk
->dev_ops
&& blk
->dev_ops
->resize_cb
) {
855 blk
->dev_ops
->resize_cb(blk
->dev_opaque
);
859 void blk_iostatus_enable(BlockBackend
*blk
)
861 blk
->iostatus_enabled
= true;
862 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
865 /* The I/O status is only enabled if the drive explicitly
866 * enables it _and_ the VM is configured to stop on errors */
867 bool blk_iostatus_is_enabled(const BlockBackend
*blk
)
869 return (blk
->iostatus_enabled
&&
870 (blk
->on_write_error
== BLOCKDEV_ON_ERROR_ENOSPC
||
871 blk
->on_write_error
== BLOCKDEV_ON_ERROR_STOP
||
872 blk
->on_read_error
== BLOCKDEV_ON_ERROR_STOP
));
875 BlockDeviceIoStatus
blk_iostatus(const BlockBackend
*blk
)
877 return blk
->iostatus
;
880 void blk_iostatus_disable(BlockBackend
*blk
)
882 blk
->iostatus_enabled
= false;
885 void blk_iostatus_reset(BlockBackend
*blk
)
887 if (blk_iostatus_is_enabled(blk
)) {
888 BlockDriverState
*bs
= blk_bs(blk
);
889 blk
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
891 block_job_iostatus_reset(bs
->job
);
896 void blk_iostatus_set_err(BlockBackend
*blk
, int error
)
898 assert(blk_iostatus_is_enabled(blk
));
899 if (blk
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
900 blk
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
901 BLOCK_DEVICE_IO_STATUS_FAILED
;
905 void blk_set_allow_write_beyond_eof(BlockBackend
*blk
, bool allow
)
907 blk
->allow_write_beyond_eof
= allow
;
910 static int blk_check_byte_request(BlockBackend
*blk
, int64_t offset
,
915 if (size
> INT_MAX
) {
919 if (!blk_is_available(blk
)) {
927 if (!blk
->allow_write_beyond_eof
) {
928 len
= blk_getlength(blk
);
933 if (offset
> len
|| len
- offset
< size
) {
941 int coroutine_fn
blk_co_preadv(BlockBackend
*blk
, int64_t offset
,
942 unsigned int bytes
, QEMUIOVector
*qiov
,
943 BdrvRequestFlags flags
)
946 BlockDriverState
*bs
= blk_bs(blk
);
948 trace_blk_co_preadv(blk
, bs
, offset
, bytes
, flags
);
950 ret
= blk_check_byte_request(blk
, offset
, bytes
);
955 bdrv_inc_in_flight(bs
);
957 /* throttling disk I/O */
958 if (blk
->public.throttle_state
) {
959 throttle_group_co_io_limits_intercept(blk
, bytes
, false);
962 ret
= bdrv_co_preadv(blk
->root
, offset
, bytes
, qiov
, flags
);
963 bdrv_dec_in_flight(bs
);
967 int coroutine_fn
blk_co_pwritev(BlockBackend
*blk
, int64_t offset
,
968 unsigned int bytes
, QEMUIOVector
*qiov
,
969 BdrvRequestFlags flags
)
972 BlockDriverState
*bs
= blk_bs(blk
);
974 trace_blk_co_pwritev(blk
, bs
, offset
, bytes
, flags
);
976 ret
= blk_check_byte_request(blk
, offset
, bytes
);
981 bdrv_inc_in_flight(bs
);
983 /* throttling disk I/O */
984 if (blk
->public.throttle_state
) {
985 throttle_group_co_io_limits_intercept(blk
, bytes
, true);
988 if (!blk
->enable_write_cache
) {
989 flags
|= BDRV_REQ_FUA
;
992 ret
= bdrv_co_pwritev(blk
->root
, offset
, bytes
, qiov
, flags
);
993 bdrv_dec_in_flight(bs
);
997 typedef struct BlkRwCo
{
1002 BdrvRequestFlags flags
;
1005 static void blk_read_entry(void *opaque
)
1007 BlkRwCo
*rwco
= opaque
;
1009 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
,
1010 rwco
->qiov
, rwco
->flags
);
1013 static void blk_write_entry(void *opaque
)
1015 BlkRwCo
*rwco
= opaque
;
1017 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
,
1018 rwco
->qiov
, rwco
->flags
);
1021 static int blk_prw(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
1022 int64_t bytes
, CoroutineEntry co_entry
,
1023 BdrvRequestFlags flags
)
1029 iov
= (struct iovec
) {
1033 qemu_iovec_init_external(&qiov
, &iov
, 1);
1043 if (qemu_in_coroutine()) {
1044 /* Fast-path if already in coroutine context */
1047 Coroutine
*co
= qemu_coroutine_create(co_entry
, &rwco
);
1048 qemu_coroutine_enter(co
);
1049 BDRV_POLL_WHILE(blk_bs(blk
), rwco
.ret
== NOT_DONE
);
1055 int blk_pread_unthrottled(BlockBackend
*blk
, int64_t offset
, uint8_t *buf
,
1060 ret
= blk_check_byte_request(blk
, offset
, count
);
1065 blk_root_drained_begin(blk
->root
);
1066 ret
= blk_pread(blk
, offset
, buf
, count
);
1067 blk_root_drained_end(blk
->root
);
1071 int blk_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1072 int count
, BdrvRequestFlags flags
)
1074 return blk_prw(blk
, offset
, NULL
, count
, blk_write_entry
,
1075 flags
| BDRV_REQ_ZERO_WRITE
);
1078 int blk_make_zero(BlockBackend
*blk
, BdrvRequestFlags flags
)
1080 return bdrv_make_zero(blk
->root
, flags
);
1083 static void error_callback_bh(void *opaque
)
1085 struct BlockBackendAIOCB
*acb
= opaque
;
1087 bdrv_dec_in_flight(acb
->common
.bs
);
1088 acb
->common
.cb(acb
->common
.opaque
, acb
->ret
);
1089 qemu_aio_unref(acb
);
1092 BlockAIOCB
*blk_abort_aio_request(BlockBackend
*blk
,
1093 BlockCompletionFunc
*cb
,
1094 void *opaque
, int ret
)
1096 struct BlockBackendAIOCB
*acb
;
1098 bdrv_inc_in_flight(blk_bs(blk
));
1099 acb
= blk_aio_get(&block_backend_aiocb_info
, blk
, cb
, opaque
);
1103 aio_bh_schedule_oneshot(blk_get_aio_context(blk
), error_callback_bh
, acb
);
1104 return &acb
->common
;
1107 typedef struct BlkAioEmAIOCB
{
1114 static const AIOCBInfo blk_aio_em_aiocb_info
= {
1115 .aiocb_size
= sizeof(BlkAioEmAIOCB
),
1118 static void blk_aio_complete(BlkAioEmAIOCB
*acb
)
1120 if (acb
->has_returned
) {
1121 bdrv_dec_in_flight(acb
->common
.bs
);
1122 acb
->common
.cb(acb
->common
.opaque
, acb
->rwco
.ret
);
1123 qemu_aio_unref(acb
);
1127 static void blk_aio_complete_bh(void *opaque
)
1129 BlkAioEmAIOCB
*acb
= opaque
;
1130 assert(acb
->has_returned
);
1131 blk_aio_complete(acb
);
1134 static BlockAIOCB
*blk_aio_prwv(BlockBackend
*blk
, int64_t offset
, int bytes
,
1135 QEMUIOVector
*qiov
, CoroutineEntry co_entry
,
1136 BdrvRequestFlags flags
,
1137 BlockCompletionFunc
*cb
, void *opaque
)
1142 bdrv_inc_in_flight(blk_bs(blk
));
1143 acb
= blk_aio_get(&blk_aio_em_aiocb_info
, blk
, cb
, opaque
);
1144 acb
->rwco
= (BlkRwCo
) {
1152 acb
->has_returned
= false;
1154 co
= qemu_coroutine_create(co_entry
, acb
);
1155 qemu_coroutine_enter(co
);
1157 acb
->has_returned
= true;
1158 if (acb
->rwco
.ret
!= NOT_DONE
) {
1159 aio_bh_schedule_oneshot(blk_get_aio_context(blk
),
1160 blk_aio_complete_bh
, acb
);
1163 return &acb
->common
;
1166 static void blk_aio_read_entry(void *opaque
)
1168 BlkAioEmAIOCB
*acb
= opaque
;
1169 BlkRwCo
*rwco
= &acb
->rwco
;
1171 assert(rwco
->qiov
->size
== acb
->bytes
);
1172 rwco
->ret
= blk_co_preadv(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1173 rwco
->qiov
, rwco
->flags
);
1174 blk_aio_complete(acb
);
1177 static void blk_aio_write_entry(void *opaque
)
1179 BlkAioEmAIOCB
*acb
= opaque
;
1180 BlkRwCo
*rwco
= &acb
->rwco
;
1182 assert(!rwco
->qiov
|| rwco
->qiov
->size
== acb
->bytes
);
1183 rwco
->ret
= blk_co_pwritev(rwco
->blk
, rwco
->offset
, acb
->bytes
,
1184 rwco
->qiov
, rwco
->flags
);
1185 blk_aio_complete(acb
);
1188 BlockAIOCB
*blk_aio_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1189 int count
, BdrvRequestFlags flags
,
1190 BlockCompletionFunc
*cb
, void *opaque
)
1192 return blk_aio_prwv(blk
, offset
, count
, NULL
, blk_aio_write_entry
,
1193 flags
| BDRV_REQ_ZERO_WRITE
, cb
, opaque
);
1196 int blk_pread(BlockBackend
*blk
, int64_t offset
, void *buf
, int count
)
1198 int ret
= blk_prw(blk
, offset
, buf
, count
, blk_read_entry
, 0);
1205 int blk_pwrite(BlockBackend
*blk
, int64_t offset
, const void *buf
, int count
,
1206 BdrvRequestFlags flags
)
1208 int ret
= blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
1216 int64_t blk_getlength(BlockBackend
*blk
)
1218 if (!blk_is_available(blk
)) {
1222 return bdrv_getlength(blk_bs(blk
));
1225 void blk_get_geometry(BlockBackend
*blk
, uint64_t *nb_sectors_ptr
)
1228 *nb_sectors_ptr
= 0;
1230 bdrv_get_geometry(blk_bs(blk
), nb_sectors_ptr
);
1234 int64_t blk_nb_sectors(BlockBackend
*blk
)
1236 if (!blk_is_available(blk
)) {
1240 return bdrv_nb_sectors(blk_bs(blk
));
1243 BlockAIOCB
*blk_aio_preadv(BlockBackend
*blk
, int64_t offset
,
1244 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1245 BlockCompletionFunc
*cb
, void *opaque
)
1247 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1248 blk_aio_read_entry
, flags
, cb
, opaque
);
1251 BlockAIOCB
*blk_aio_pwritev(BlockBackend
*blk
, int64_t offset
,
1252 QEMUIOVector
*qiov
, BdrvRequestFlags flags
,
1253 BlockCompletionFunc
*cb
, void *opaque
)
1255 return blk_aio_prwv(blk
, offset
, qiov
->size
, qiov
,
1256 blk_aio_write_entry
, flags
, cb
, opaque
);
1259 static void blk_aio_flush_entry(void *opaque
)
1261 BlkAioEmAIOCB
*acb
= opaque
;
1262 BlkRwCo
*rwco
= &acb
->rwco
;
1264 rwco
->ret
= blk_co_flush(rwco
->blk
);
1265 blk_aio_complete(acb
);
1268 BlockAIOCB
*blk_aio_flush(BlockBackend
*blk
,
1269 BlockCompletionFunc
*cb
, void *opaque
)
1271 return blk_aio_prwv(blk
, 0, 0, NULL
, blk_aio_flush_entry
, 0, cb
, opaque
);
1274 static void blk_aio_pdiscard_entry(void *opaque
)
1276 BlkAioEmAIOCB
*acb
= opaque
;
1277 BlkRwCo
*rwco
= &acb
->rwco
;
1279 rwco
->ret
= blk_co_pdiscard(rwco
->blk
, rwco
->offset
, acb
->bytes
);
1280 blk_aio_complete(acb
);
1283 BlockAIOCB
*blk_aio_pdiscard(BlockBackend
*blk
,
1284 int64_t offset
, int count
,
1285 BlockCompletionFunc
*cb
, void *opaque
)
1287 return blk_aio_prwv(blk
, offset
, count
, NULL
, blk_aio_pdiscard_entry
, 0,
1291 void blk_aio_cancel(BlockAIOCB
*acb
)
1293 bdrv_aio_cancel(acb
);
1296 void blk_aio_cancel_async(BlockAIOCB
*acb
)
1298 bdrv_aio_cancel_async(acb
);
1301 int blk_co_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1303 if (!blk_is_available(blk
)) {
1307 return bdrv_co_ioctl(blk_bs(blk
), req
, buf
);
1310 static void blk_ioctl_entry(void *opaque
)
1312 BlkRwCo
*rwco
= opaque
;
1313 rwco
->ret
= blk_co_ioctl(rwco
->blk
, rwco
->offset
,
1314 rwco
->qiov
->iov
[0].iov_base
);
1317 int blk_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
)
1319 return blk_prw(blk
, req
, buf
, 0, blk_ioctl_entry
, 0);
1322 static void blk_aio_ioctl_entry(void *opaque
)
1324 BlkAioEmAIOCB
*acb
= opaque
;
1325 BlkRwCo
*rwco
= &acb
->rwco
;
1327 rwco
->ret
= blk_co_ioctl(rwco
->blk
, rwco
->offset
,
1328 rwco
->qiov
->iov
[0].iov_base
);
1329 blk_aio_complete(acb
);
1332 BlockAIOCB
*blk_aio_ioctl(BlockBackend
*blk
, unsigned long int req
, void *buf
,
1333 BlockCompletionFunc
*cb
, void *opaque
)
1338 iov
= (struct iovec
) {
1342 qemu_iovec_init_external(&qiov
, &iov
, 1);
1344 return blk_aio_prwv(blk
, req
, 0, &qiov
, blk_aio_ioctl_entry
, 0, cb
, opaque
);
1347 int blk_co_pdiscard(BlockBackend
*blk
, int64_t offset
, int count
)
1349 int ret
= blk_check_byte_request(blk
, offset
, count
);
1354 return bdrv_co_pdiscard(blk_bs(blk
), offset
, count
);
1357 int blk_co_flush(BlockBackend
*blk
)
1359 if (!blk_is_available(blk
)) {
1363 return bdrv_co_flush(blk_bs(blk
));
1366 static void blk_flush_entry(void *opaque
)
1368 BlkRwCo
*rwco
= opaque
;
1369 rwco
->ret
= blk_co_flush(rwco
->blk
);
1372 int blk_flush(BlockBackend
*blk
)
1374 return blk_prw(blk
, 0, NULL
, 0, blk_flush_entry
, 0);
1377 void blk_drain(BlockBackend
*blk
)
1380 bdrv_drain(blk_bs(blk
));
1384 void blk_drain_all(void)
1389 void blk_set_on_error(BlockBackend
*blk
, BlockdevOnError on_read_error
,
1390 BlockdevOnError on_write_error
)
1392 blk
->on_read_error
= on_read_error
;
1393 blk
->on_write_error
= on_write_error
;
1396 BlockdevOnError
blk_get_on_error(BlockBackend
*blk
, bool is_read
)
1398 return is_read
? blk
->on_read_error
: blk
->on_write_error
;
1401 BlockErrorAction
blk_get_error_action(BlockBackend
*blk
, bool is_read
,
1404 BlockdevOnError on_err
= blk_get_on_error(blk
, is_read
);
1407 case BLOCKDEV_ON_ERROR_ENOSPC
:
1408 return (error
== ENOSPC
) ?
1409 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1410 case BLOCKDEV_ON_ERROR_STOP
:
1411 return BLOCK_ERROR_ACTION_STOP
;
1412 case BLOCKDEV_ON_ERROR_REPORT
:
1413 return BLOCK_ERROR_ACTION_REPORT
;
1414 case BLOCKDEV_ON_ERROR_IGNORE
:
1415 return BLOCK_ERROR_ACTION_IGNORE
;
1416 case BLOCKDEV_ON_ERROR_AUTO
:
1422 static void send_qmp_error_event(BlockBackend
*blk
,
1423 BlockErrorAction action
,
1424 bool is_read
, int error
)
1426 IoOperationType optype
;
1428 optype
= is_read
? IO_OPERATION_TYPE_READ
: IO_OPERATION_TYPE_WRITE
;
1429 qapi_event_send_block_io_error(blk_name(blk
),
1430 bdrv_get_node_name(blk_bs(blk
)), optype
,
1431 action
, blk_iostatus_is_enabled(blk
),
1432 error
== ENOSPC
, strerror(error
),
1436 /* This is done by device models because, while the block layer knows
1437 * about the error, it does not know whether an operation comes from
1438 * the device or the block layer (from a job, for example).
1440 void blk_error_action(BlockBackend
*blk
, BlockErrorAction action
,
1441 bool is_read
, int error
)
1445 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1446 /* First set the iostatus, so that "info block" returns an iostatus
1447 * that matches the events raised so far (an additional error iostatus
1448 * is fine, but not a lost one).
1450 blk_iostatus_set_err(blk
, error
);
1452 /* Then raise the request to stop the VM and the event.
1453 * qemu_system_vmstop_request_prepare has two effects. First,
1454 * it ensures that the STOP event always comes after the
1455 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1456 * can observe the STOP event and do a "cont" before the STOP
1457 * event is issued, the VM will not stop. In this case, vm_start()
1458 * also ensures that the STOP/RESUME pair of events is emitted.
1460 qemu_system_vmstop_request_prepare();
1461 send_qmp_error_event(blk
, action
, is_read
, error
);
1462 qemu_system_vmstop_request(RUN_STATE_IO_ERROR
);
1464 send_qmp_error_event(blk
, action
, is_read
, error
);
1468 int blk_is_read_only(BlockBackend
*blk
)
1470 BlockDriverState
*bs
= blk_bs(blk
);
1473 return bdrv_is_read_only(bs
);
1475 return blk
->root_state
.read_only
;
1479 int blk_is_sg(BlockBackend
*blk
)
1481 BlockDriverState
*bs
= blk_bs(blk
);
1487 return bdrv_is_sg(bs
);
1490 int blk_enable_write_cache(BlockBackend
*blk
)
1492 return blk
->enable_write_cache
;
1495 void blk_set_enable_write_cache(BlockBackend
*blk
, bool wce
)
1497 blk
->enable_write_cache
= wce
;
1500 void blk_invalidate_cache(BlockBackend
*blk
, Error
**errp
)
1502 BlockDriverState
*bs
= blk_bs(blk
);
1505 error_setg(errp
, "Device '%s' has no medium", blk
->name
);
1509 bdrv_invalidate_cache(bs
, errp
);
1512 bool blk_is_inserted(BlockBackend
*blk
)
1514 BlockDriverState
*bs
= blk_bs(blk
);
1516 return bs
&& bdrv_is_inserted(bs
);
1519 bool blk_is_available(BlockBackend
*blk
)
1521 return blk_is_inserted(blk
) && !blk_dev_is_tray_open(blk
);
1524 void blk_lock_medium(BlockBackend
*blk
, bool locked
)
1526 BlockDriverState
*bs
= blk_bs(blk
);
1529 bdrv_lock_medium(bs
, locked
);
1533 void blk_eject(BlockBackend
*blk
, bool eject_flag
)
1535 BlockDriverState
*bs
= blk_bs(blk
);
1538 /* blk_eject is only called by qdevified devices */
1539 assert(!blk
->legacy_dev
);
1542 bdrv_eject(bs
, eject_flag
);
1545 /* Whether or not we ejected on the backend,
1546 * the frontend experienced a tray event. */
1547 id
= blk_get_attached_dev_id(blk
);
1548 qapi_event_send_device_tray_moved(blk_name(blk
), id
,
1549 eject_flag
, &error_abort
);
1553 int blk_get_flags(BlockBackend
*blk
)
1555 BlockDriverState
*bs
= blk_bs(blk
);
1558 return bdrv_get_flags(bs
);
1560 return blk
->root_state
.open_flags
;
1564 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1565 uint32_t blk_get_max_transfer(BlockBackend
*blk
)
1567 BlockDriverState
*bs
= blk_bs(blk
);
1571 max
= bs
->bl
.max_transfer
;
1573 return MIN_NON_ZERO(max
, INT_MAX
);
1576 int blk_get_max_iov(BlockBackend
*blk
)
1578 return blk
->root
->bs
->bl
.max_iov
;
1581 void blk_set_guest_block_size(BlockBackend
*blk
, int align
)
1583 blk
->guest_block_size
= align
;
1586 void *blk_try_blockalign(BlockBackend
*blk
, size_t size
)
1588 return qemu_try_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1591 void *blk_blockalign(BlockBackend
*blk
, size_t size
)
1593 return qemu_blockalign(blk
? blk_bs(blk
) : NULL
, size
);
1596 bool blk_op_is_blocked(BlockBackend
*blk
, BlockOpType op
, Error
**errp
)
1598 BlockDriverState
*bs
= blk_bs(blk
);
1604 return bdrv_op_is_blocked(bs
, op
, errp
);
1607 void blk_op_unblock(BlockBackend
*blk
, BlockOpType op
, Error
*reason
)
1609 BlockDriverState
*bs
= blk_bs(blk
);
1612 bdrv_op_unblock(bs
, op
, reason
);
1616 void blk_op_block_all(BlockBackend
*blk
, Error
*reason
)
1618 BlockDriverState
*bs
= blk_bs(blk
);
1621 bdrv_op_block_all(bs
, reason
);
1625 void blk_op_unblock_all(BlockBackend
*blk
, Error
*reason
)
1627 BlockDriverState
*bs
= blk_bs(blk
);
1630 bdrv_op_unblock_all(bs
, reason
);
1634 AioContext
*blk_get_aio_context(BlockBackend
*blk
)
1636 BlockDriverState
*bs
= blk_bs(blk
);
1639 return bdrv_get_aio_context(bs
);
1641 return qemu_get_aio_context();
1645 static AioContext
*blk_aiocb_get_aio_context(BlockAIOCB
*acb
)
1647 BlockBackendAIOCB
*blk_acb
= DO_UPCAST(BlockBackendAIOCB
, common
, acb
);
1648 return blk_get_aio_context(blk_acb
->blk
);
1651 void blk_set_aio_context(BlockBackend
*blk
, AioContext
*new_context
)
1653 BlockDriverState
*bs
= blk_bs(blk
);
1656 if (blk
->public.throttle_state
) {
1657 throttle_timers_detach_aio_context(&blk
->public.throttle_timers
);
1659 bdrv_set_aio_context(bs
, new_context
);
1660 if (blk
->public.throttle_state
) {
1661 throttle_timers_attach_aio_context(&blk
->public.throttle_timers
,
1667 void blk_add_aio_context_notifier(BlockBackend
*blk
,
1668 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
1669 void (*detach_aio_context
)(void *opaque
), void *opaque
)
1671 BlockDriverState
*bs
= blk_bs(blk
);
1674 bdrv_add_aio_context_notifier(bs
, attached_aio_context
,
1675 detach_aio_context
, opaque
);
1679 void blk_remove_aio_context_notifier(BlockBackend
*blk
,
1680 void (*attached_aio_context
)(AioContext
*,
1682 void (*detach_aio_context
)(void *),
1685 BlockDriverState
*bs
= blk_bs(blk
);
1688 bdrv_remove_aio_context_notifier(bs
, attached_aio_context
,
1689 detach_aio_context
, opaque
);
1693 void blk_add_remove_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1695 notifier_list_add(&blk
->remove_bs_notifiers
, notify
);
1698 void blk_add_insert_bs_notifier(BlockBackend
*blk
, Notifier
*notify
)
1700 notifier_list_add(&blk
->insert_bs_notifiers
, notify
);
1703 void blk_io_plug(BlockBackend
*blk
)
1705 BlockDriverState
*bs
= blk_bs(blk
);
1712 void blk_io_unplug(BlockBackend
*blk
)
1714 BlockDriverState
*bs
= blk_bs(blk
);
1721 BlockAcctStats
*blk_get_stats(BlockBackend
*blk
)
1726 void *blk_aio_get(const AIOCBInfo
*aiocb_info
, BlockBackend
*blk
,
1727 BlockCompletionFunc
*cb
, void *opaque
)
1729 return qemu_aio_get(aiocb_info
, blk_bs(blk
), cb
, opaque
);
1732 int coroutine_fn
blk_co_pwrite_zeroes(BlockBackend
*blk
, int64_t offset
,
1733 int count
, BdrvRequestFlags flags
)
1735 return blk_co_pwritev(blk
, offset
, count
, NULL
,
1736 flags
| BDRV_REQ_ZERO_WRITE
);
1739 int blk_pwrite_compressed(BlockBackend
*blk
, int64_t offset
, const void *buf
,
1742 return blk_prw(blk
, offset
, (void *) buf
, count
, blk_write_entry
,
1743 BDRV_REQ_WRITE_COMPRESSED
);
1746 int blk_truncate(BlockBackend
*blk
, int64_t offset
)
1748 if (!blk_is_available(blk
)) {
1752 return bdrv_truncate(blk
->root
, offset
);
1755 static void blk_pdiscard_entry(void *opaque
)
1757 BlkRwCo
*rwco
= opaque
;
1758 rwco
->ret
= blk_co_pdiscard(rwco
->blk
, rwco
->offset
, rwco
->qiov
->size
);
1761 int blk_pdiscard(BlockBackend
*blk
, int64_t offset
, int count
)
1763 return blk_prw(blk
, offset
, NULL
, count
, blk_pdiscard_entry
, 0);
1766 int blk_save_vmstate(BlockBackend
*blk
, const uint8_t *buf
,
1767 int64_t pos
, int size
)
1771 if (!blk_is_available(blk
)) {
1775 ret
= bdrv_save_vmstate(blk_bs(blk
), buf
, pos
, size
);
1780 if (ret
== size
&& !blk
->enable_write_cache
) {
1781 ret
= bdrv_flush(blk_bs(blk
));
1784 return ret
< 0 ? ret
: size
;
1787 int blk_load_vmstate(BlockBackend
*blk
, uint8_t *buf
, int64_t pos
, int size
)
1789 if (!blk_is_available(blk
)) {
1793 return bdrv_load_vmstate(blk_bs(blk
), buf
, pos
, size
);
1796 int blk_probe_blocksizes(BlockBackend
*blk
, BlockSizes
*bsz
)
1798 if (!blk_is_available(blk
)) {
1802 return bdrv_probe_blocksizes(blk_bs(blk
), bsz
);
1805 int blk_probe_geometry(BlockBackend
*blk
, HDGeometry
*geo
)
1807 if (!blk_is_available(blk
)) {
1811 return bdrv_probe_geometry(blk_bs(blk
), geo
);
1815 * Updates the BlockBackendRootState object with data from the currently
1816 * attached BlockDriverState.
1818 void blk_update_root_state(BlockBackend
*blk
)
1822 blk
->root_state
.open_flags
= blk
->root
->bs
->open_flags
;
1823 blk
->root_state
.read_only
= blk
->root
->bs
->read_only
;
1824 blk
->root_state
.detect_zeroes
= blk
->root
->bs
->detect_zeroes
;
1828 * Returns the detect-zeroes setting to be used for bdrv_open() of a
1829 * BlockDriverState which is supposed to inherit the root state.
1831 bool blk_get_detect_zeroes_from_root_state(BlockBackend
*blk
)
1833 return blk
->root_state
.detect_zeroes
;
1837 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1838 * supposed to inherit the root state.
1840 int blk_get_open_flags_from_root_state(BlockBackend
*blk
)
1844 bs_flags
= blk
->root_state
.read_only
? 0 : BDRV_O_RDWR
;
1845 bs_flags
|= blk
->root_state
.open_flags
& ~BDRV_O_RDWR
;
1850 BlockBackendRootState
*blk_get_root_state(BlockBackend
*blk
)
1852 return &blk
->root_state
;
1855 int blk_commit_all(void)
1857 BlockBackend
*blk
= NULL
;
1859 while ((blk
= blk_all_next(blk
)) != NULL
) {
1860 AioContext
*aio_context
= blk_get_aio_context(blk
);
1862 aio_context_acquire(aio_context
);
1863 if (blk_is_inserted(blk
) && blk
->root
->bs
->backing
) {
1864 int ret
= bdrv_commit(blk
->root
->bs
);
1866 aio_context_release(aio_context
);
1870 aio_context_release(aio_context
);
1876 /* throttling disk I/O limits */
1877 void blk_set_io_limits(BlockBackend
*blk
, ThrottleConfig
*cfg
)
1879 throttle_group_config(blk
, cfg
);
1882 void blk_io_limits_disable(BlockBackend
*blk
)
1884 assert(blk
->public.throttle_state
);
1885 bdrv_drained_begin(blk_bs(blk
));
1886 throttle_group_unregister_blk(blk
);
1887 bdrv_drained_end(blk_bs(blk
));
1890 /* should be called before blk_set_io_limits if a limit is set */
1891 void blk_io_limits_enable(BlockBackend
*blk
, const char *group
)
1893 assert(!blk
->public.throttle_state
);
1894 throttle_group_register_blk(blk
, group
);
1897 void blk_io_limits_update_group(BlockBackend
*blk
, const char *group
)
1899 /* this BB is not part of any group */
1900 if (!blk
->public.throttle_state
) {
1904 /* this BB is a part of the same group than the one we want */
1905 if (!g_strcmp0(throttle_group_get_name(blk
), group
)) {
1909 /* need to change the group this bs belong to */
1910 blk_io_limits_disable(blk
);
1911 blk_io_limits_enable(blk
, group
);
1914 static void blk_root_drained_begin(BdrvChild
*child
)
1916 BlockBackend
*blk
= child
->opaque
;
1918 if (++blk
->quiesce_counter
== 1) {
1919 if (blk
->dev_ops
&& blk
->dev_ops
->drained_begin
) {
1920 blk
->dev_ops
->drained_begin(blk
->dev_opaque
);
1924 /* Note that blk->root may not be accessible here yet if we are just
1925 * attaching to a BlockDriverState that is drained. Use child instead. */
1927 if (blk
->public.io_limits_disabled
++ == 0) {
1928 throttle_group_restart_blk(blk
);
1932 static void blk_root_drained_end(BdrvChild
*child
)
1934 BlockBackend
*blk
= child
->opaque
;
1935 assert(blk
->quiesce_counter
);
1937 assert(blk
->public.io_limits_disabled
);
1938 --blk
->public.io_limits_disabled
;
1940 if (--blk
->quiesce_counter
== 0) {
1941 if (blk
->dev_ops
&& blk
->dev_ops
->drained_end
) {
1942 blk
->dev_ops
->drained_end(blk
->dev_opaque
);