2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "qemu/option.h"
30 #include "qemu/queue.h"
31 #include "qemu/coroutine.h"
32 #include "qemu/timer.h"
33 #include "qapi-types.h"
34 #include "qemu/hbitmap.h"
35 #include "block/snapshot.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/throttle.h"
39 #define BLOCK_FLAG_ENCRYPT 1
40 #define BLOCK_FLAG_LAZY_REFCOUNTS 8
42 #define BLOCK_OPT_SIZE "size"
43 #define BLOCK_OPT_ENCRYPT "encryption"
44 #define BLOCK_OPT_COMPAT6 "compat6"
45 #define BLOCK_OPT_HWVERSION "hwversion"
46 #define BLOCK_OPT_BACKING_FILE "backing_file"
47 #define BLOCK_OPT_BACKING_FMT "backing_fmt"
48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
49 #define BLOCK_OPT_TABLE_SIZE "table_size"
50 #define BLOCK_OPT_PREALLOC "preallocation"
51 #define BLOCK_OPT_SUBFMT "subformat"
52 #define BLOCK_OPT_COMPAT_LEVEL "compat"
53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
55 #define BLOCK_OPT_REDUNDANCY "redundancy"
56 #define BLOCK_OPT_NOCOW "nocow"
57 #define BLOCK_OPT_OBJECT_SIZE "object_size"
58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
60 #define BLOCK_PROBE_BUF_SIZE 512
62 enum BdrvTrackedRequestType
{
70 typedef struct BdrvTrackedRequest
{
74 enum BdrvTrackedRequestType type
;
77 int64_t overlap_offset
;
78 unsigned int overlap_bytes
;
80 QLIST_ENTRY(BdrvTrackedRequest
) list
;
81 Coroutine
*co
; /* owner, used for deadlock detection */
82 CoQueue wait_queue
; /* coroutines blocked on this request */
84 struct BdrvTrackedRequest
*waiting_for
;
88 const char *format_name
;
91 /* set to true if the BlockDriver is a block filter */
93 /* for snapshots block filter like Quorum can implement the
94 * following recursive callback.
95 * It's purpose is to recurse on the filter children while calling
96 * bdrv_recurse_is_first_non_filter on them.
97 * For a sample implementation look in the future Quorum block filter.
99 bool (*bdrv_recurse_is_first_non_filter
)(BlockDriverState
*bs
,
100 BlockDriverState
*candidate
);
102 int (*bdrv_probe
)(const uint8_t *buf
, int buf_size
, const char *filename
);
103 int (*bdrv_probe_device
)(const char *filename
);
105 /* Any driver implementing this callback is expected to be able to handle
106 * NULL file names in its .bdrv_open() implementation */
107 void (*bdrv_parse_filename
)(const char *filename
, QDict
*options
, Error
**errp
);
108 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
109 * this field set to true, except ones that are defined only by their
111 * An example of the last type will be the quorum block driver.
113 bool bdrv_needs_filename
;
115 /* Set if a driver can support backing files */
116 bool supports_backing
;
118 /* For handling image reopen for split or non-split files */
119 int (*bdrv_reopen_prepare
)(BDRVReopenState
*reopen_state
,
120 BlockReopenQueue
*queue
, Error
**errp
);
121 void (*bdrv_reopen_commit
)(BDRVReopenState
*reopen_state
);
122 void (*bdrv_reopen_abort
)(BDRVReopenState
*reopen_state
);
123 void (*bdrv_join_options
)(QDict
*options
, QDict
*old_options
);
125 int (*bdrv_open
)(BlockDriverState
*bs
, QDict
*options
, int flags
,
127 int (*bdrv_file_open
)(BlockDriverState
*bs
, QDict
*options
, int flags
,
129 void (*bdrv_close
)(BlockDriverState
*bs
);
130 int (*bdrv_create
)(const char *filename
, QemuOpts
*opts
, Error
**errp
);
131 int (*bdrv_set_key
)(BlockDriverState
*bs
, const char *key
);
132 int (*bdrv_make_empty
)(BlockDriverState
*bs
);
134 void (*bdrv_refresh_filename
)(BlockDriverState
*bs
, QDict
*options
);
137 BlockAIOCB
*(*bdrv_aio_readv
)(BlockDriverState
*bs
,
138 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
139 BlockCompletionFunc
*cb
, void *opaque
);
140 BlockAIOCB
*(*bdrv_aio_writev
)(BlockDriverState
*bs
,
141 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
142 BlockCompletionFunc
*cb
, void *opaque
);
143 BlockAIOCB
*(*bdrv_aio_flush
)(BlockDriverState
*bs
,
144 BlockCompletionFunc
*cb
, void *opaque
);
145 BlockAIOCB
*(*bdrv_aio_discard
)(BlockDriverState
*bs
,
146 int64_t sector_num
, int nb_sectors
,
147 BlockCompletionFunc
*cb
, void *opaque
);
149 int coroutine_fn (*bdrv_co_readv
)(BlockDriverState
*bs
,
150 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
);
151 int coroutine_fn (*bdrv_co_preadv
)(BlockDriverState
*bs
,
152 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
);
153 int coroutine_fn (*bdrv_co_writev
)(BlockDriverState
*bs
,
154 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
);
155 int coroutine_fn (*bdrv_co_writev_flags
)(BlockDriverState
*bs
,
156 int64_t sector_num
, int nb_sectors
, QEMUIOVector
*qiov
, int flags
);
157 int coroutine_fn (*bdrv_co_pwritev
)(BlockDriverState
*bs
,
158 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
);
161 * Efficiently zero a region of the disk image. Typically an image format
162 * would use a compact metadata representation to implement this. This
163 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
164 * will be called instead.
166 int coroutine_fn (*bdrv_co_pwrite_zeroes
)(BlockDriverState
*bs
,
167 int64_t offset
, int count
, BdrvRequestFlags flags
);
168 int coroutine_fn (*bdrv_co_discard
)(BlockDriverState
*bs
,
169 int64_t sector_num
, int nb_sectors
);
170 int64_t coroutine_fn (*bdrv_co_get_block_status
)(BlockDriverState
*bs
,
171 int64_t sector_num
, int nb_sectors
, int *pnum
,
172 BlockDriverState
**file
);
175 * Invalidate any cached meta-data.
177 void (*bdrv_invalidate_cache
)(BlockDriverState
*bs
, Error
**errp
);
178 int (*bdrv_inactivate
)(BlockDriverState
*bs
);
181 * Flushes all data for all layers by calling bdrv_co_flush for underlying
182 * layers, if needed. This function is needed for deterministic
183 * synchronization of the flush finishing callback.
185 int coroutine_fn (*bdrv_co_flush
)(BlockDriverState
*bs
);
188 * Flushes all data that was already written to the OS all the way down to
189 * the disk (for example raw-posix calls fsync()).
191 int coroutine_fn (*bdrv_co_flush_to_disk
)(BlockDriverState
*bs
);
194 * Flushes all internal caches to the OS. The data may still sit in a
195 * writeback cache of the host OS, but it will survive a crash of the qemu
198 int coroutine_fn (*bdrv_co_flush_to_os
)(BlockDriverState
*bs
);
200 const char *protocol_name
;
201 int (*bdrv_truncate
)(BlockDriverState
*bs
, int64_t offset
);
203 int64_t (*bdrv_getlength
)(BlockDriverState
*bs
);
204 bool has_variable_length
;
205 int64_t (*bdrv_get_allocated_file_size
)(BlockDriverState
*bs
);
207 int (*bdrv_write_compressed
)(BlockDriverState
*bs
, int64_t sector_num
,
208 const uint8_t *buf
, int nb_sectors
);
210 int (*bdrv_snapshot_create
)(BlockDriverState
*bs
,
211 QEMUSnapshotInfo
*sn_info
);
212 int (*bdrv_snapshot_goto
)(BlockDriverState
*bs
,
213 const char *snapshot_id
);
214 int (*bdrv_snapshot_delete
)(BlockDriverState
*bs
,
215 const char *snapshot_id
,
218 int (*bdrv_snapshot_list
)(BlockDriverState
*bs
,
219 QEMUSnapshotInfo
**psn_info
);
220 int (*bdrv_snapshot_load_tmp
)(BlockDriverState
*bs
,
221 const char *snapshot_id
,
224 int (*bdrv_get_info
)(BlockDriverState
*bs
, BlockDriverInfo
*bdi
);
225 ImageInfoSpecific
*(*bdrv_get_specific_info
)(BlockDriverState
*bs
);
227 int coroutine_fn (*bdrv_save_vmstate
)(BlockDriverState
*bs
,
230 int coroutine_fn (*bdrv_load_vmstate
)(BlockDriverState
*bs
,
234 int (*bdrv_change_backing_file
)(BlockDriverState
*bs
,
235 const char *backing_file
, const char *backing_fmt
);
237 /* removable device specific */
238 bool (*bdrv_is_inserted
)(BlockDriverState
*bs
);
239 int (*bdrv_media_changed
)(BlockDriverState
*bs
);
240 void (*bdrv_eject
)(BlockDriverState
*bs
, bool eject_flag
);
241 void (*bdrv_lock_medium
)(BlockDriverState
*bs
, bool locked
);
243 /* to control generic scsi devices */
244 BlockAIOCB
*(*bdrv_aio_ioctl
)(BlockDriverState
*bs
,
245 unsigned long int req
, void *buf
,
246 BlockCompletionFunc
*cb
, void *opaque
);
248 /* List of options for creating images, terminated by name == NULL */
249 QemuOptsList
*create_opts
;
252 * Returns 0 for completed check, -errno for internal errors.
253 * The check results are stored in result.
255 int (*bdrv_check
)(BlockDriverState
* bs
, BdrvCheckResult
*result
,
258 int (*bdrv_amend_options
)(BlockDriverState
*bs
, QemuOpts
*opts
,
259 BlockDriverAmendStatusCB
*status_cb
,
262 void (*bdrv_debug_event
)(BlockDriverState
*bs
, BlkdebugEvent event
);
264 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
265 int (*bdrv_debug_breakpoint
)(BlockDriverState
*bs
, const char *event
,
267 int (*bdrv_debug_remove_breakpoint
)(BlockDriverState
*bs
,
269 int (*bdrv_debug_resume
)(BlockDriverState
*bs
, const char *tag
);
270 bool (*bdrv_debug_is_suspended
)(BlockDriverState
*bs
, const char *tag
);
272 void (*bdrv_refresh_limits
)(BlockDriverState
*bs
, Error
**errp
);
275 * Returns 1 if newly created images are guaranteed to contain only
276 * zeros, 0 otherwise.
278 int (*bdrv_has_zero_init
)(BlockDriverState
*bs
);
280 /* Remove fd handlers, timers, and other event loop callbacks so the event
281 * loop is no longer in use. Called with no in-flight requests and in
282 * depth-first traversal order with parents before child nodes.
284 void (*bdrv_detach_aio_context
)(BlockDriverState
*bs
);
286 /* Add fd handlers, timers, and other event loop callbacks so I/O requests
287 * can be processed again. Called with no in-flight requests and in
288 * depth-first traversal order with child nodes before parent nodes.
290 void (*bdrv_attach_aio_context
)(BlockDriverState
*bs
,
291 AioContext
*new_context
);
293 /* io queue for linux-aio */
294 void (*bdrv_io_plug
)(BlockDriverState
*bs
);
295 void (*bdrv_io_unplug
)(BlockDriverState
*bs
);
298 * Try to get @bs's logical and physical block size.
299 * On success, store them in @bsz and return zero.
300 * On failure, return negative errno.
302 int (*bdrv_probe_blocksizes
)(BlockDriverState
*bs
, BlockSizes
*bsz
);
304 * Try to get @bs's geometry (cyls, heads, sectors)
305 * On success, store them in @geo and return 0.
306 * On failure return -errno.
307 * Only drivers that want to override guest geometry implement this
308 * callback; see hd_geometry_guess().
310 int (*bdrv_probe_geometry
)(BlockDriverState
*bs
, HDGeometry
*geo
);
313 * Drain and stop any internal sources of requests in the driver, and
314 * remain so until next I/O callback (e.g. bdrv_co_writev) is called.
316 void (*bdrv_drain
)(BlockDriverState
*bs
);
318 void (*bdrv_add_child
)(BlockDriverState
*parent
, BlockDriverState
*child
,
320 void (*bdrv_del_child
)(BlockDriverState
*parent
, BdrvChild
*child
,
323 QLIST_ENTRY(BlockDriver
) list
;
326 typedef struct BlockLimits
{
327 /* maximum number of sectors that can be discarded at once */
330 /* optimal alignment for discard requests in sectors */
331 int64_t discard_alignment
;
333 /* maximum number of bytes that can zeroized at once (since it is
334 * signed, it must be < 2G, if set) */
335 int32_t max_pwrite_zeroes
;
337 /* optimal alignment for write zeroes requests in bytes, must be
338 * power of 2, and less than max_pwrite_zeroes if that is set */
339 uint32_t pwrite_zeroes_alignment
;
341 /* optimal transfer length in sectors */
342 int opt_transfer_length
;
344 /* maximal transfer length in sectors */
345 int max_transfer_length
;
347 /* memory alignment so that no bounce buffer is needed */
348 size_t min_mem_alignment
;
350 /* memory alignment for bounce buffer */
351 size_t opt_mem_alignment
;
353 /* maximum number of iovec elements */
357 typedef struct BdrvOpBlocker BdrvOpBlocker
;
359 typedef struct BdrvAioNotifier
{
360 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
);
361 void (*detach_aio_context
)(void *opaque
);
366 QLIST_ENTRY(BdrvAioNotifier
) list
;
369 struct BdrvChildRole
{
370 void (*inherit_options
)(int *child_flags
, QDict
*child_options
,
371 int parent_flags
, QDict
*parent_options
);
373 void (*change_media
)(BdrvChild
*child
, bool load
);
374 void (*resize
)(BdrvChild
*child
);
376 /* Returns a name that is supposedly more useful for human users than the
377 * node name for identifying the node in question (in particular, a BB
378 * name), or NULL if the parent can't provide a better name. */
379 const char* (*get_name
)(BdrvChild
*child
);
382 * If this pair of functions is implemented, the parent doesn't issue new
383 * requests after returning from .drained_begin() until .drained_end() is
386 * Note that this can be nested. If drained_begin() was called twice, new
387 * I/O is allowed only after drained_end() was called twice, too.
389 void (*drained_begin
)(BdrvChild
*child
);
390 void (*drained_end
)(BdrvChild
*child
);
393 extern const BdrvChildRole child_file
;
394 extern const BdrvChildRole child_format
;
397 BlockDriverState
*bs
;
399 const BdrvChildRole
*role
;
401 QLIST_ENTRY(BdrvChild
) next
;
402 QLIST_ENTRY(BdrvChild
) next_parent
;
406 * Note: the function bdrv_append() copies and swaps contents of
407 * BlockDriverStates, so if you add new fields to this struct, please
408 * inspect bdrv_append() to determine if the new fields need to be
411 struct BlockDriverState
{
412 int64_t total_sectors
; /* if we are reading a disk image, give its
414 int read_only
; /* if true, the media is read only */
415 int open_flags
; /* flags used to open the file, re-used for re-open */
416 int encrypted
; /* if true, the media is encrypted */
417 int valid_key
; /* if true, a valid encryption key has been set */
418 int sg
; /* if true, the device is a /dev/sg* */
419 int copy_on_read
; /* if true, copy read backing sectors into image
420 note this is a reference count */
423 BlockDriver
*drv
; /* NULL means no media */
426 AioContext
*aio_context
; /* event loop used for fd handlers, timers, etc */
427 /* long-running tasks intended to always use the same AioContext as this
428 * BDS may register themselves in this list to be notified of changes
429 * regarding this BDS's context */
430 QLIST_HEAD(, BdrvAioNotifier
) aio_notifiers
;
431 bool walking_aio_notifiers
; /* to make removal during iteration safe */
433 char filename
[PATH_MAX
];
434 char backing_file
[PATH_MAX
]; /* if non zero, the image is a diff of
436 char backing_format
[16]; /* if non-zero and backing_file exists */
438 QDict
*full_open_options
;
439 char exact_filename
[PATH_MAX
];
444 /* Callback before write request is processed */
445 NotifierWithReturnList before_write_notifiers
;
447 /* number of in-flight serialising requests */
448 unsigned int serialising_in_flight
;
450 /* Offset after the highest byte written to */
451 uint64_t wr_highest_offset
;
456 /* Alignment requirement for offset/length of I/O requests */
457 unsigned int request_alignment
;
458 /* Flags honored during pwrite (so far: BDRV_REQ_FUA) */
459 unsigned int supported_write_flags
;
460 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
461 * BDRV_REQ_MAY_UNMAP) */
462 unsigned int supported_zero_flags
;
464 /* the following member gives a name to every node on the bs graph. */
466 /* element of the list of named nodes building the graph */
467 QTAILQ_ENTRY(BlockDriverState
) node_list
;
468 /* element of the list of all BlockDriverStates (all_bdrv_states) */
469 QTAILQ_ENTRY(BlockDriverState
) bs_list
;
470 /* element of the list of monitor-owned BDS */
471 QTAILQ_ENTRY(BlockDriverState
) monitor_list
;
472 QLIST_HEAD(, BdrvDirtyBitmap
) dirty_bitmaps
;
475 QLIST_HEAD(, BdrvTrackedRequest
) tracked_requests
;
477 /* operation blockers */
478 QLIST_HEAD(, BdrvOpBlocker
) op_blockers
[BLOCK_OP_TYPE_MAX
];
480 /* long-running background operation */
483 /* The node that this node inherited default options from (and a reopen on
484 * which can affect this node by changing these defaults). This is always a
485 * parent node of this node. */
486 BlockDriverState
*inherits_from
;
487 QLIST_HEAD(, BdrvChild
) children
;
488 QLIST_HEAD(, BdrvChild
) parents
;
491 QDict
*explicit_options
;
492 BlockdevDetectZeroesOptions detect_zeroes
;
494 /* The error object in use for blocking operations on backing_hd */
495 Error
*backing_blocker
;
497 /* threshold limit for writes, in bytes. "High water mark". */
498 uint64_t write_threshold_offset
;
499 NotifierWithReturn write_threshold_notifier
;
501 /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */
503 unsigned io_plug_disabled
;
508 struct BlockBackendRootState
{
511 BlockdevDetectZeroesOptions detect_zeroes
;
514 typedef enum BlockMirrorBackingMode
{
515 /* Reuse the existing backing chain from the source for the target.
516 * - sync=full: Set backing BDS to NULL.
517 * - sync=top: Use source's backing BDS.
518 * - sync=none: Use source as the backing BDS. */
519 MIRROR_SOURCE_BACKING_CHAIN
,
521 /* Open the target's backing chain completely anew */
522 MIRROR_OPEN_BACKING_CHAIN
,
524 /* Do not change the target's backing BDS after job completion */
525 MIRROR_LEAVE_BACKING_CHAIN
,
526 } BlockMirrorBackingMode
;
528 static inline BlockDriverState
*backing_bs(BlockDriverState
*bs
)
530 return bs
->backing
? bs
->backing
->bs
: NULL
;
534 /* Essential block drivers which must always be statically linked into qemu, and
535 * which therefore can be accessed without using bdrv_find_format() */
536 extern BlockDriver bdrv_file
;
537 extern BlockDriver bdrv_raw
;
538 extern BlockDriver bdrv_qcow2
;
541 * bdrv_setup_io_funcs:
543 * Prepare a #BlockDriver for I/O request processing by populating
544 * unimplemented coroutine and AIO interfaces with generic wrapper functions
545 * that fall back to implemented interfaces.
547 void bdrv_setup_io_funcs(BlockDriver
*bdrv
);
549 int coroutine_fn
bdrv_co_preadv(BlockDriverState
*bs
,
550 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
551 BdrvRequestFlags flags
);
552 int coroutine_fn
bdrv_co_pwritev(BlockDriverState
*bs
,
553 int64_t offset
, unsigned int bytes
, QEMUIOVector
*qiov
,
554 BdrvRequestFlags flags
);
556 int get_tmp_filename(char *filename
, int size
);
557 BlockDriver
*bdrv_probe_all(const uint8_t *buf
, int buf_size
,
558 const char *filename
);
562 * bdrv_add_before_write_notifier:
564 * Register a callback that is invoked before write requests are processed but
565 * after any throttling or waiting for overlapping requests.
567 void bdrv_add_before_write_notifier(BlockDriverState
*bs
,
568 NotifierWithReturn
*notifier
);
571 * bdrv_detach_aio_context:
573 * May be called from .bdrv_detach_aio_context() to detach children from the
574 * current #AioContext. This is only needed by block drivers that manage their
575 * own children. Both ->file and ->backing are automatically handled and
576 * block drivers should not call this function on them explicitly.
578 void bdrv_detach_aio_context(BlockDriverState
*bs
);
581 * bdrv_attach_aio_context:
583 * May be called from .bdrv_attach_aio_context() to attach children to the new
584 * #AioContext. This is only needed by block drivers that manage their own
585 * children. Both ->file and ->backing are automatically handled and block
586 * drivers should not call this function on them explicitly.
588 void bdrv_attach_aio_context(BlockDriverState
*bs
,
589 AioContext
*new_context
);
592 * bdrv_add_aio_context_notifier:
594 * If a long-running job intends to be always run in the same AioContext as a
595 * certain BDS, it may use this function to be notified of changes regarding the
596 * association of the BDS to an AioContext.
598 * attached_aio_context() is called after the target BDS has been attached to a
599 * new AioContext; detach_aio_context() is called before the target BDS is being
600 * detached from its old AioContext.
602 void bdrv_add_aio_context_notifier(BlockDriverState
*bs
,
603 void (*attached_aio_context
)(AioContext
*new_context
, void *opaque
),
604 void (*detach_aio_context
)(void *opaque
), void *opaque
);
607 * bdrv_remove_aio_context_notifier:
609 * Unsubscribe of change notifications regarding the BDS's AioContext. The
610 * parameters given here have to be the same as those given to
611 * bdrv_add_aio_context_notifier().
613 void bdrv_remove_aio_context_notifier(BlockDriverState
*bs
,
614 void (*aio_context_attached
)(AioContext
*,
616 void (*aio_context_detached
)(void *),
620 int is_windows_drive(const char *filename
);
625 * @bs: Block device to operate on.
626 * @base: Block device that will become the new base, or %NULL to
627 * flatten the whole backing file chain onto @bs.
628 * @base_id: The file name that will be written to @bs as the new
629 * backing file if the job completes. Ignored if @base is %NULL.
630 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
631 * @on_error: The action to take upon error.
632 * @cb: Completion function for the job.
633 * @opaque: Opaque pointer value passed to @cb.
634 * @errp: Error object.
636 * Start a streaming operation on @bs. Clusters that are unallocated
637 * in @bs, but allocated in any image between @base and @bs (both
638 * exclusive) will be written to @bs. At the end of a successful
639 * streaming job, the backing file of @bs will be changed to
640 * @base_id in the written image and to @base in the live BlockDriverState.
642 void stream_start(BlockDriverState
*bs
, BlockDriverState
*base
,
643 const char *base_id
, int64_t speed
, BlockdevOnError on_error
,
644 BlockCompletionFunc
*cb
,
645 void *opaque
, Error
**errp
);
649 * @bs: Active block device.
650 * @top: Top block device to be committed.
651 * @base: Block device that will be written into, and become the new top.
652 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
653 * @on_error: The action to take upon error.
654 * @cb: Completion function for the job.
655 * @opaque: Opaque pointer value passed to @cb.
656 * @backing_file_str: String to use as the backing file in @top's overlay
657 * @errp: Error object.
660 void commit_start(BlockDriverState
*bs
, BlockDriverState
*base
,
661 BlockDriverState
*top
, int64_t speed
,
662 BlockdevOnError on_error
, BlockCompletionFunc
*cb
,
663 void *opaque
, const char *backing_file_str
, Error
**errp
);
665 * commit_active_start:
666 * @bs: Active block device to be committed.
667 * @base: Block device that will be written into, and become the new top.
668 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
669 * @on_error: The action to take upon error.
670 * @cb: Completion function for the job.
671 * @opaque: Opaque pointer value passed to @cb.
672 * @errp: Error object.
675 void commit_active_start(BlockDriverState
*bs
, BlockDriverState
*base
,
677 BlockdevOnError on_error
,
678 BlockCompletionFunc
*cb
,
679 void *opaque
, Error
**errp
);
682 * @bs: Block device to operate on.
683 * @target: Block device to write to.
684 * @replaces: Block graph node name to replace once the mirror is done. Can
685 * only be used when full mirroring is selected.
686 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
687 * @granularity: The chosen granularity for the dirty bitmap.
688 * @buf_size: The amount of data that can be in flight at one time.
689 * @mode: Whether to collapse all images in the chain to the target.
690 * @backing_mode: How to establish the target's backing chain after completion.
691 * @on_source_error: The action to take upon error reading from the source.
692 * @on_target_error: The action to take upon error writing to the target.
693 * @unmap: Whether to unmap target where source sectors only contain zeroes.
694 * @cb: Completion function for the job.
695 * @opaque: Opaque pointer value passed to @cb.
696 * @errp: Error object.
698 * Start a mirroring operation on @bs. Clusters that are allocated
699 * in @bs will be written to @bs until the job is cancelled or
700 * manually completed. At the end of a successful mirroring job,
701 * @bs will be switched to read from @target.
703 void mirror_start(BlockDriverState
*bs
, BlockDriverState
*target
,
704 const char *replaces
,
705 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
706 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
707 BlockdevOnError on_source_error
,
708 BlockdevOnError on_target_error
,
710 BlockCompletionFunc
*cb
,
711 void *opaque
, Error
**errp
);
715 * @bs: Block device to operate on.
716 * @target: Block device to write to.
717 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
718 * @sync_mode: What parts of the disk image should be copied to the destination.
719 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
720 * @on_source_error: The action to take upon error reading from the source.
721 * @on_target_error: The action to take upon error writing to the target.
722 * @cb: Completion function for the job.
723 * @opaque: Opaque pointer value passed to @cb.
724 * @txn: Transaction that this job is part of (may be NULL).
726 * Start a backup operation on @bs. Clusters in @bs are written to @target
727 * until the job is cancelled or manually completed.
729 void backup_start(BlockDriverState
*bs
, BlockDriverState
*target
,
730 int64_t speed
, MirrorSyncMode sync_mode
,
731 BdrvDirtyBitmap
*sync_bitmap
,
732 BlockdevOnError on_source_error
,
733 BlockdevOnError on_target_error
,
734 BlockCompletionFunc
*cb
, void *opaque
,
735 BlockJobTxn
*txn
, Error
**errp
);
737 void hmp_drive_add_node(Monitor
*mon
, const char *optstr
);
739 BdrvChild
*bdrv_root_attach_child(BlockDriverState
*child_bs
,
740 const char *child_name
,
741 const BdrvChildRole
*child_role
,
743 void bdrv_root_unref_child(BdrvChild
*child
);
745 const char *bdrv_get_parent_name(const BlockDriverState
*bs
);
746 void blk_dev_change_media_cb(BlockBackend
*blk
, bool load
);
747 bool blk_dev_has_removable_media(BlockBackend
*blk
);
748 bool blk_dev_has_tray(BlockBackend
*blk
);
749 void blk_dev_eject_request(BlockBackend
*blk
, bool force
);
750 bool blk_dev_is_tray_open(BlockBackend
*blk
);
751 bool blk_dev_is_medium_locked(BlockBackend
*blk
);
753 void bdrv_set_dirty(BlockDriverState
*bs
, int64_t cur_sector
, int nr_sectors
);
754 bool bdrv_requests_pending(BlockDriverState
*bs
);
756 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap
*bitmap
, HBitmap
**out
);
757 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap
*bitmap
, HBitmap
*in
);
759 void blockdev_close_all_bdrv_states(void);
761 #endif /* BLOCK_INT_H */