]> git.proxmox.com Git - mirror_qemu.git/blame - include/block/block.h
block: Avoid bs->blk in bdrv_next()
[mirror_qemu.git] / include / block / block.h
CommitLineData
faf07963
PB
1#ifndef BLOCK_H
2#define BLOCK_H
3
737e150e 4#include "block/aio.h"
daf015ef 5#include "qemu/iov.h"
1de7afc9 6#include "qemu/option.h"
10817bf0 7#include "qemu/coroutine.h"
5366d0c8 8#include "block/accounting.h"
ebab2259 9#include "block/dirty-bitmap.h"
7b1b5d19 10#include "qapi/qmp/qobject.h"
2f0c9fe6 11#include "qapi-types.h"
78f9dc85 12#include "qemu/hbitmap.h"
a76bab49 13
faf07963 14/* block.c */
faf07963 15typedef struct BlockDriver BlockDriver;
2f0c9fe6 16typedef struct BlockJob BlockJob;
b4b059f6 17typedef struct BdrvChild BdrvChild;
f3930ed0 18typedef struct BdrvChildRole BdrvChildRole;
c55a832f 19typedef struct BlockJobTxn BlockJobTxn;
7c8eece4 20typedef struct BdrvNextIterator BdrvNextIterator;
faf07963 21
faf07963
PB
22typedef struct BlockDriverInfo {
23 /* in bytes, 0 if irrelevant */
24 int cluster_size;
25 /* offset at which the VM state can be saved (0 if not possible) */
26 int64_t vm_state_offset;
64c79160 27 bool is_dirty;
e1a5c4be
PL
28 /*
29 * True if unallocated blocks read back as zeroes. This is equivalent
b6af0975 30 * to the LBPRZ flag in the SCSI logical block provisioning page.
e1a5c4be
PL
31 */
32 bool unallocated_blocks_are_zero;
33 /*
34 * True if the driver can optimize writing zeroes by unmapping
35 * sectors. This is equivalent to the BLKDISCARDZEROES ioctl in Linux
36 * with the difference that in qemu a discard is allowed to silently
37 * fail. Therefore we have to use bdrv_write_zeroes with the
38 * BDRV_REQ_MAY_UNMAP flag for an optimized zero write with unmapping.
39 * After this call the driver has to guarantee that the contents read
40 * back as zero. It is additionally required that the block device is
41 * opened with BDRV_O_UNMAP flag for this to work.
42 */
43 bool can_write_zeroes_with_unmap;
85f49cad
FZ
44 /*
45 * True if this block driver only supports compressed writes
46 */
47 bool needs_compressed_writes;
faf07963
PB
48} BlockDriverInfo;
49
f8111c24
DXW
50typedef struct BlockFragInfo {
51 uint64_t allocated_clusters;
52 uint64_t total_clusters;
53 uint64_t fragmented_clusters;
e6439d78 54 uint64_t compressed_clusters;
f8111c24
DXW
55} BlockFragInfo;
56
6faac15f 57typedef enum {
9568b511
WC
58 BDRV_REQ_COPY_ON_READ = 0x1,
59 BDRV_REQ_ZERO_WRITE = 0x2,
d32f35cb
PL
60 /* The BDRV_REQ_MAY_UNMAP flag is used to indicate that the block driver
61 * is allowed to optimize a write zeroes request by unmapping (discarding)
62 * blocks if it is guaranteed that the result will read back as
63 * zeroes. The flag is only passed to the driver if the block device is
64 * opened with BDRV_O_UNMAP.
65 */
9568b511 66 BDRV_REQ_MAY_UNMAP = 0x4,
61408b25 67 BDRV_REQ_NO_SERIALISING = 0x8,
bfd18d1e 68 BDRV_REQ_FUA = 0x10,
6faac15f
PL
69} BdrvRequestFlags;
70
892b7de8
ET
71typedef struct BlockSizes {
72 uint32_t phys;
73 uint32_t log;
74} BlockSizes;
75
76typedef struct HDGeometry {
77 uint32_t heads;
78 uint32_t sectors;
79 uint32_t cylinders;
80} HDGeometry;
81
faf07963 82#define BDRV_O_RDWR 0x0002
faf07963 83#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
8bfea15d 84#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
9f7965c7 85#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
5c6c3a6c 86#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
b783e409 87#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
016f5cf6 88#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
53fec9d3 89#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
04c01a5c 90#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
058f8f16 91#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
be028adc 92#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
9e8f1835 93#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
2e40134b
HR
94#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
95 select an appropriate protocol driver,
96 ignoring the format layer */
abb06c5a 97#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
9f7965c7 98
61de4c68 99#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
faf07963 100
54861b92
KW
101
102/* Option names of options parsed by the block layer */
103
104#define BDRV_OPT_CACHE_WB "cache.writeback"
105#define BDRV_OPT_CACHE_DIRECT "cache.direct"
106#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
107
108
6ea44308 109#define BDRV_SECTOR_BITS 9
c63782cb 110#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
3abbc4d9 111#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1)
6ea44308 112
75af1f34
PL
113#define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \
114 INT_MAX >> BDRV_SECTOR_BITS)
115
705be728
FZ
116/*
117 * Allocation status flags
67a0fd2a 118 * BDRV_BLOCK_DATA: data is read from a file returned by bdrv_get_block_status.
4333bb71 119 * BDRV_BLOCK_ZERO: sectors read as zero
67a0fd2a
FZ
120 * BDRV_BLOCK_OFFSET_VALID: sector stored as raw data in a file returned by
121 * bdrv_get_block_status.
e88ae226
KW
122 * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
123 * layer (as opposed to the backing file)
92bc50a5
PL
124 * BDRV_BLOCK_RAW: used internally to indicate that the request
125 * was answered by the raw driver and that one
126 * should look in bs->file directly.
4333bb71
PB
127 *
128 * If BDRV_BLOCK_OFFSET_VALID is set, bits 9-62 represent the offset in
129 * bs->file where sector data can be read from as raw data.
130 *
131 * DATA == 0 && ZERO == 0 means that data is read from backing_hd if present.
132 *
133 * DATA ZERO OFFSET_VALID
134 * t t t sectors read as zero, bs->file is zero at offset
135 * t f t sectors read as valid from bs->file at offset
136 * f t t sectors preallocated, read as zero, bs->file not
137 * necessarily zero at offset
138 * f f t sectors preallocated but read from backing_hd,
139 * bs->file contains garbage at offset
140 * t t f sectors preallocated, read as zero, unknown offset
141 * t f f sectors read from unknown file or offset
142 * f t f not allocated or unknown offset, read as zero
143 * f f f not allocated or unknown offset, read from backing_hd
144 */
e88ae226
KW
145#define BDRV_BLOCK_DATA 0x01
146#define BDRV_BLOCK_ZERO 0x02
147#define BDRV_BLOCK_OFFSET_VALID 0x04
148#define BDRV_BLOCK_RAW 0x08
149#define BDRV_BLOCK_ALLOCATED 0x10
4333bb71
PB
150#define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK
151
e971aa12
JC
152typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
153
154typedef struct BDRVReopenState {
155 BlockDriverState *bs;
156 int flags;
4d2cb092 157 QDict *options;
145f598e 158 QDict *explicit_options;
e971aa12
JC
159 void *opaque;
160} BDRVReopenState;
161
8574575f
FZ
162/*
163 * Block operation types
164 */
165typedef enum BlockOpType {
166 BLOCK_OP_TYPE_BACKUP_SOURCE,
167 BLOCK_OP_TYPE_BACKUP_TARGET,
168 BLOCK_OP_TYPE_CHANGE,
bb00021d
FZ
169 BLOCK_OP_TYPE_COMMIT_SOURCE,
170 BLOCK_OP_TYPE_COMMIT_TARGET,
8574575f
FZ
171 BLOCK_OP_TYPE_DATAPLANE,
172 BLOCK_OP_TYPE_DRIVE_DEL,
173 BLOCK_OP_TYPE_EJECT,
174 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
175 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
176 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
05e4d14b 177 BLOCK_OP_TYPE_MIRROR_SOURCE,
e40e5027 178 BLOCK_OP_TYPE_MIRROR_TARGET,
8574575f
FZ
179 BLOCK_OP_TYPE_RESIZE,
180 BLOCK_OP_TYPE_STREAM,
09158f00 181 BLOCK_OP_TYPE_REPLACE,
8574575f
FZ
182 BLOCK_OP_TYPE_MAX,
183} BlockOpType;
e971aa12 184
d15e5465
LC
185void bdrv_info_print(Monitor *mon, const QObject *data);
186void bdrv_info(Monitor *mon, QObject **ret_data);
218a536a
LC
187void bdrv_stats_print(Monitor *mon, const QObject *data);
188void bdrv_info_stats(Monitor *mon, QObject **ret_data);
faf07963 189
0563e191 190/* disk I/O throttling */
faf07963 191void bdrv_init(void);
eb852011 192void bdrv_init_with_whitelist(void);
e6ff69bf 193bool bdrv_uses_whitelist(void);
98289620 194BlockDriver *bdrv_find_protocol(const char *filename,
b65a5e12
HR
195 bool allow_protocol_prefix,
196 Error **errp);
faf07963 197BlockDriver *bdrv_find_format(const char *format_name);
0e7e1989 198int bdrv_create(BlockDriver *drv, const char* filename,
c282e1fd
CL
199 QemuOpts *opts, Error **errp);
200int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp);
7f06d47e 201BlockDriverState *bdrv_new_root(void);
e4e9986b 202BlockDriverState *bdrv_new(void);
8802d1fd 203void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top);
3f09bfbc
KW
204void bdrv_replace_in_backing_chain(BlockDriverState *old,
205 BlockDriverState *new);
206
baf5602e 207int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
9e8f1835 208int bdrv_parse_discard_flags(const char *mode, int *flags);
b4b059f6
KW
209BdrvChild *bdrv_open_child(const char *filename,
210 QDict *options, const char *bdref_key,
211 BlockDriverState* parent,
212 const BdrvChildRole *child_role,
213 bool allow_none, Error **errp);
8d24cce1 214void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd);
d9b7b057
KW
215int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
216 const char *bdref_key, Error **errp);
ddf5636d 217int bdrv_open(BlockDriverState **pbs, const char *filename,
6ebf9aa2 218 const char *reference, QDict *options, int flags, Error **errp);
e971aa12 219BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
4d2cb092
KW
220 BlockDriverState *bs,
221 QDict *options, int flags);
e971aa12
JC
222int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
223int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp);
224int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
225 BlockReopenQueue *queue, Error **errp);
226void bdrv_reopen_commit(BDRVReopenState *reopen_state);
227void bdrv_reopen_abort(BDRVReopenState *reopen_state);
faf07963
PB
228int bdrv_read(BlockDriverState *bs, int64_t sector_num,
229 uint8_t *buf, int nb_sectors);
230int bdrv_write(BlockDriverState *bs, int64_t sector_num,
231 const uint8_t *buf, int nb_sectors);
4105eaaa 232int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
aa7bfbff 233 int nb_sectors, BdrvRequestFlags flags);
7c84b1b8
MA
234BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, int64_t sector_num,
235 int nb_sectors, BdrvRequestFlags flags,
097310b5 236 BlockCompletionFunc *cb, void *opaque);
d75cbb5e 237int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags);
faf07963
PB
238int bdrv_pread(BlockDriverState *bs, int64_t offset,
239 void *buf, int count);
240int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
241 const void *buf, int count);
8d3b1a2d 242int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov);
f08145fe
KW
243int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
244 const void *buf, int count);
da1fa91d
KW
245int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
246 int nb_sectors, QEMUIOVector *qiov);
470c0504
SH
247int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
248 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
61408b25 249int coroutine_fn bdrv_co_readv_no_serialising(BlockDriverState *bs,
9568b511 250 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
da1fa91d
KW
251int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
252 int nb_sectors, QEMUIOVector *qiov);
f08f2dda
SH
253/*
254 * Efficiently zero a region of the disk image. Note that this is a regular
255 * I/O request like read or write and should have a reasonable size. This
256 * function is not suitable for zeroing the entire image in a single request
257 * because it may allocate memory for the entire region.
258 */
259int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
aa7bfbff 260 int nb_sectors, BdrvRequestFlags flags);
e8a6bb9c
MT
261BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
262 const char *backing_file);
f198fd1c 263int bdrv_get_backing_file_depth(BlockDriverState *bs);
91af7014 264void bdrv_refresh_filename(BlockDriverState *bs);
faf07963 265int bdrv_truncate(BlockDriverState *bs, int64_t offset);
65a9bb25 266int64_t bdrv_nb_sectors(BlockDriverState *bs);
faf07963 267int64_t bdrv_getlength(BlockDriverState *bs);
4a1d5e1f 268int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
96b8f136 269void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
3baca891 270void bdrv_refresh_limits(BlockDriverState *bs, Error **errp);
faf07963 271int bdrv_commit(BlockDriverState *bs);
756e6736
KW
272int bdrv_change_backing_file(BlockDriverState *bs,
273 const char *backing_file, const char *backing_fmt);
5efa9d5a 274void bdrv_register(BlockDriver *bdrv);
6ebdcee2 275int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
54e26900
JC
276 BlockDriverState *base,
277 const char *backing_file_str);
6ebdcee2
JC
278BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
279 BlockDriverState *bs);
79fac568 280BlockDriverState *bdrv_find_base(BlockDriverState *bs);
5efa9d5a 281
e076f338
KW
282
283typedef struct BdrvCheckResult {
284 int corruptions;
285 int leaks;
286 int check_errors;
ccf34716
KW
287 int corruptions_fixed;
288 int leaks_fixed;
c6bb9ad1 289 int64_t image_end_offset;
f8111c24 290 BlockFragInfo bfi;
e076f338
KW
291} BdrvCheckResult;
292
4534ff54
KW
293typedef enum {
294 BDRV_FIX_LEAKS = 1,
295 BDRV_FIX_ERRORS = 2,
296} BdrvCheckMode;
297
298int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
e076f338 299
77485434
HR
300/* The units of offset and total_work_size may be chosen arbitrarily by the
301 * block driver; total_work_size may change during the course of the amendment
302 * operation */
303typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
8b13976d 304 int64_t total_work_size, void *opaque);
77485434 305int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
8b13976d 306 BlockDriverAmendStatusCB *status_cb, void *cb_opaque);
6f176b48 307
f6186f49 308/* external snapshots */
212a5a8f
BC
309bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
310 BlockDriverState *candidate);
311bool bdrv_is_first_non_filter(BlockDriverState *candidate);
f6186f49 312
09158f00 313/* check if a named node can be replaced when doing drive-mirror */
e12f3784
WC
314BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
315 const char *node_name, Error **errp);
09158f00 316
faf07963 317/* async block I/O */
7c84b1b8
MA
318BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
319 QEMUIOVector *iov, int nb_sectors,
097310b5 320 BlockCompletionFunc *cb, void *opaque);
7c84b1b8
MA
321BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
322 QEMUIOVector *iov, int nb_sectors,
097310b5 323 BlockCompletionFunc *cb, void *opaque);
7c84b1b8 324BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
097310b5 325 BlockCompletionFunc *cb, void *opaque);
7c84b1b8
MA
326BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
327 int64_t sector_num, int nb_sectors,
097310b5 328 BlockCompletionFunc *cb, void *opaque);
7c84b1b8
MA
329void bdrv_aio_cancel(BlockAIOCB *acb);
330void bdrv_aio_cancel_async(BlockAIOCB *acb);
faf07963 331
40b4f539 332typedef struct BlockRequest {
91c6e4b7 333 /* Fields to be filled by caller */
8b45f687
FZ
334 union {
335 struct {
336 int64_t sector;
337 int nb_sectors;
338 int flags;
339 QEMUIOVector *qiov;
340 };
341 struct {
342 int req;
343 void *buf;
344 };
345 };
097310b5 346 BlockCompletionFunc *cb;
40b4f539
KW
347 void *opaque;
348
91c6e4b7 349 /* Filled by block layer */
40b4f539
KW
350 int error;
351} BlockRequest;
352
7d780669 353/* sg packet commands */
221f715d 354int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
7c84b1b8 355BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
221f715d 356 unsigned long int req, void *buf,
097310b5 357 BlockCompletionFunc *cb, void *opaque);
7d780669 358
0f15423c 359/* Invalidate any cached metadata used by image formats */
5a8a30db
KW
360void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
361void bdrv_invalidate_cache_all(Error **errp);
76b1c7fe 362int bdrv_inactivate_all(void);
0f15423c 363
faf07963 364/* Ensure contents are flushed to disk. */
205ef796 365int bdrv_flush(BlockDriverState *bs);
07f07615 366int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
2bc93fed 367void bdrv_close_all(void);
5b98db0a 368void bdrv_drain(BlockDriverState *bs);
a77fd4bb 369void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
922453bc 370void bdrv_drain_all(void);
c6ca28d6 371
bb8bf76f 372int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
4265d620 373int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
3ac21627 374int bdrv_has_zero_init_1(BlockDriverState *bs);
f2feebbd 375int bdrv_has_zero_init(BlockDriverState *bs);
4ce78691
PL
376bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
377bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
b6b8a333 378int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
67a0fd2a
FZ
379 int nb_sectors, int *pnum,
380 BlockDriverState **file);
ba3f0e25
FZ
381int64_t bdrv_get_block_status_above(BlockDriverState *bs,
382 BlockDriverState *base,
383 int64_t sector_num,
67a0fd2a
FZ
384 int nb_sectors, int *pnum,
385 BlockDriverState **file);
f58c7b35 386int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
39aa9a12 387 int *pnum);
b35b2bba
MR
388int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
389 int64_t sector_num, int nb_sectors, int *pnum);
faf07963 390
faf07963 391int bdrv_is_read_only(BlockDriverState *bs);
985a03b0 392int bdrv_is_sg(BlockDriverState *bs);
e031f750 393bool bdrv_is_inserted(BlockDriverState *bs);
faf07963 394int bdrv_media_changed(BlockDriverState *bs);
025e849a 395void bdrv_lock_medium(BlockDriverState *bs, bool locked);
f36f3949 396void bdrv_eject(BlockDriverState *bs, bool eject_flag);
f8d6bba1 397const char *bdrv_get_format_name(BlockDriverState *bs);
dc364f4c 398BlockDriverState *bdrv_find_node(const char *node_name);
d5a8ee60 399BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp);
12d3ba82
BC
400BlockDriverState *bdrv_lookup_bs(const char *device,
401 const char *node_name,
402 Error **errp);
5a6684d2 403bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
04df765a 404BlockDriverState *bdrv_next_node(BlockDriverState *bs);
7c8eece4 405BdrvNextIterator *bdrv_next(BdrvNextIterator *it, BlockDriverState **bs);
262b4e8f 406BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
faf07963 407int bdrv_is_encrypted(BlockDriverState *bs);
c0f4ce77 408int bdrv_key_required(BlockDriverState *bs);
faf07963 409int bdrv_set_key(BlockDriverState *bs, const char *key);
4d2855a3 410void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp);
c0f4ce77 411int bdrv_query_missing_keys(void);
faf07963
PB
412void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
413 void *opaque);
20a9e77d 414const char *bdrv_get_node_name(const BlockDriverState *bs);
bfb197e0 415const char *bdrv_get_device_name(const BlockDriverState *bs);
9b2aa84f 416const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
c8433287 417int bdrv_get_flags(BlockDriverState *bs);
faf07963
PB
418int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
419 const uint8_t *buf, int nb_sectors);
420int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
eae041fe 421ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs);
343bded4
PB
422void bdrv_round_to_clusters(BlockDriverState *bs,
423 int64_t sector_num, int nb_sectors,
424 int64_t *cluster_sector_num,
425 int *cluster_nb_sectors);
faf07963 426
045df330 427const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
faf07963
PB
428void bdrv_get_backing_filename(BlockDriverState *bs,
429 char *filename, int filename_size);
dc5a1371 430void bdrv_get_full_backing_filename(BlockDriverState *bs,
9f07429e 431 char *dest, size_t sz, Error **errp);
0a82855a
HR
432void bdrv_get_full_backing_filename_from_filename(const char *backed,
433 const char *backing,
9f07429e
HR
434 char *dest, size_t sz,
435 Error **errp);
199630b6 436int bdrv_is_snapshot(BlockDriverState *bs);
faf07963 437
5c98415b 438int path_has_protocol(const char *path);
faf07963
PB
439int path_is_absolute(const char *path);
440void path_combine(char *dest, int dest_size,
441 const char *base_path,
442 const char *filename);
443
cf8074b3 444int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
45566e9c
CH
445int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
446 int64_t pos, int size);
178e08a5 447
45566e9c
CH
448int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
449 int64_t pos, int size);
178e08a5 450
d92ada22
LC
451void bdrv_img_create(const char *filename, const char *fmt,
452 const char *base_filename, const char *base_fmt,
f382d43a
MR
453 char *options, uint64_t img_size, int flags,
454 Error **errp, bool quiet);
f88e1a42 455
339064d5
KW
456/* Returns the alignment in bytes that is required so that no bounce buffer
457 * is required throughout the stack */
4196d2f0
DL
458size_t bdrv_min_mem_align(BlockDriverState *bs);
459/* Returns optimal alignment in bytes for bounce buffer */
339064d5 460size_t bdrv_opt_mem_align(BlockDriverState *bs);
ba5b7ad4 461void *qemu_blockalign(BlockDriverState *bs, size_t size);
9ebd8448 462void *qemu_blockalign0(BlockDriverState *bs, size_t size);
7d2a35cc 463void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
9ebd8448 464void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
c53b1c51 465bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
ba5b7ad4 466
53fec9d3
SH
467void bdrv_enable_copy_on_read(BlockDriverState *bs);
468void bdrv_disable_copy_on_read(BlockDriverState *bs);
469
9fcb0251
FZ
470void bdrv_ref(BlockDriverState *bs);
471void bdrv_unref(BlockDriverState *bs);
33a60407 472void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
98292c61
WC
473BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
474 BlockDriverState *child_bs,
475 const char *child_name,
476 const BdrvChildRole *child_role);
8b9b0cc2 477
fbe40ff7
FZ
478bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
479void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
480void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
481void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
482void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
483bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
484
9a4f4c31
KW
485#define BLKDBG_EVENT(child, evt) \
486 do { \
487 if (child) { \
488 bdrv_debug_event(child->bs, evt); \
489 } \
490 } while (0)
491
a31939e6 492void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
8b9b0cc2 493
41c695c7
KW
494int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
495 const char *tag);
4cc70e93 496int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
41c695c7
KW
497int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
498bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
499
db519cba
FZ
500/**
501 * bdrv_get_aio_context:
502 *
503 * Returns: the currently bound #AioContext
504 */
505AioContext *bdrv_get_aio_context(BlockDriverState *bs);
506
dcd04228
SH
507/**
508 * bdrv_set_aio_context:
509 *
510 * Changes the #AioContext used for fd handlers, timers, and BHs by this
511 * BlockDriverState and all its children.
512 *
2e5b887c 513 * This function must be called with iothread lock held.
dcd04228
SH
514 */
515void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context);
892b7de8
ET
516int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
517int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
dcd04228 518
448ad91d
ML
519void bdrv_io_plug(BlockDriverState *bs);
520void bdrv_io_unplug(BlockDriverState *bs);
6b98bd64
PB
521void bdrv_io_unplugged_begin(BlockDriverState *bs);
522void bdrv_io_unplugged_end(BlockDriverState *bs);
448ad91d 523
51288d79
FZ
524/**
525 * bdrv_drained_begin:
526 *
527 * Begin a quiesced section for exclusive access to the BDS, by disabling
528 * external request sources including NBD server and device model. Note that
529 * this doesn't block timers or coroutines from submitting more requests, which
530 * means block_job_pause is still necessary.
531 *
532 * This function can be recursive.
533 */
534void bdrv_drained_begin(BlockDriverState *bs);
535
536/**
537 * bdrv_drained_end:
538 *
539 * End a quiescent section started by bdrv_drained_begin().
540 */
541void bdrv_drained_end(BlockDriverState *bs);
542
e06018ad
WC
543void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
544 Error **errp);
545void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
546
8a4bc5aa 547#endif