]> git.proxmox.com Git - mirror_qemu.git/blob - include/block/block.h
block: Add bdrv_try_set_aio_context()
[mirror_qemu.git] / include / block / block.h
1 #ifndef BLOCK_H
2 #define BLOCK_H
3
4 #include "block/aio.h"
5 #include "qapi/qapi-types-block-core.h"
6 #include "block/aio-wait.h"
7 #include "qemu/iov.h"
8 #include "qemu/coroutine.h"
9 #include "block/accounting.h"
10 #include "block/dirty-bitmap.h"
11 #include "block/blockjob.h"
12 #include "qemu/hbitmap.h"
13
14 /* block.c */
15 typedef struct BlockDriver BlockDriver;
16 typedef struct BdrvChild BdrvChild;
17 typedef struct BdrvChildRole BdrvChildRole;
18
19 typedef struct BlockDriverInfo {
20 /* in bytes, 0 if irrelevant */
21 int cluster_size;
22 /* offset at which the VM state can be saved (0 if not possible) */
23 int64_t vm_state_offset;
24 bool is_dirty;
25 /*
26 * True if unallocated blocks read back as zeroes. This is equivalent
27 * to the LBPRZ flag in the SCSI logical block provisioning page.
28 */
29 bool unallocated_blocks_are_zero;
30 /*
31 * True if this block driver only supports compressed writes
32 */
33 bool needs_compressed_writes;
34 } BlockDriverInfo;
35
36 typedef struct BlockFragInfo {
37 uint64_t allocated_clusters;
38 uint64_t total_clusters;
39 uint64_t fragmented_clusters;
40 uint64_t compressed_clusters;
41 } BlockFragInfo;
42
43 typedef enum {
44 BDRV_REQ_COPY_ON_READ = 0x1,
45 BDRV_REQ_ZERO_WRITE = 0x2,
46
47 /*
48 * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
49 * that the block driver should unmap (discard) blocks if it is guaranteed
50 * that the result will read back as zeroes. The flag is only passed to the
51 * driver if the block device is opened with BDRV_O_UNMAP.
52 */
53 BDRV_REQ_MAY_UNMAP = 0x4,
54
55 /*
56 * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that
57 * we don't want wait_serialising_requests() during the read operation.
58 *
59 * This flag is used for backup copy-on-write operations, when we need to
60 * read old data before write (write notifier triggered). It is okay since
61 * we already waited for other serializing requests in the initiating write
62 * (see bdrv_aligned_pwritev), and it is necessary if the initiating write
63 * is already serializing (without the flag, the read would deadlock
64 * waiting for the serialising write to complete).
65 */
66 BDRV_REQ_NO_SERIALISING = 0x8,
67 BDRV_REQ_FUA = 0x10,
68 BDRV_REQ_WRITE_COMPRESSED = 0x20,
69
70 /* Signifies that this write request will not change the visible disk
71 * content. */
72 BDRV_REQ_WRITE_UNCHANGED = 0x40,
73
74 /*
75 * BDRV_REQ_SERIALISING forces request serialisation for writes.
76 * It is used to ensure that writes to the backing file of a backup process
77 * target cannot race with a read of the backup target that defers to the
78 * backing file.
79 *
80 * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to
81 * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be
82 * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long.
83 */
84 BDRV_REQ_SERIALISING = 0x80,
85
86 /* Execute the request only if the operation can be offloaded or otherwise
87 * be executed efficiently, but return an error instead of using a slow
88 * fallback. */
89 BDRV_REQ_NO_FALLBACK = 0x100,
90
91 /* Mask of valid flags */
92 BDRV_REQ_MASK = 0x1ff,
93 } BdrvRequestFlags;
94
95 typedef struct BlockSizes {
96 uint32_t phys;
97 uint32_t log;
98 } BlockSizes;
99
100 typedef struct HDGeometry {
101 uint32_t heads;
102 uint32_t sectors;
103 uint32_t cylinders;
104 } HDGeometry;
105
106 #define BDRV_O_RDWR 0x0002
107 #define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */
108 #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
109 #define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
110 #define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
111 #define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
112 #define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
113 #define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
114 #define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
115 #define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
116 #define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
117 #define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
118 #define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
119 #define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
120 select an appropriate protocol driver,
121 ignoring the format layer */
122 #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
123 #define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */
124
125 #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
126
127
128 /* Option names of options parsed by the block layer */
129
130 #define BDRV_OPT_CACHE_WB "cache.writeback"
131 #define BDRV_OPT_CACHE_DIRECT "cache.direct"
132 #define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
133 #define BDRV_OPT_READ_ONLY "read-only"
134 #define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
135 #define BDRV_OPT_DISCARD "discard"
136 #define BDRV_OPT_FORCE_SHARE "force-share"
137
138
139 #define BDRV_SECTOR_BITS 9
140 #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
141 #define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1)
142
143 #define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \
144 INT_MAX >> BDRV_SECTOR_BITS)
145 #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
146
147 /*
148 * Allocation status flags for bdrv_block_status() and friends.
149 *
150 * Public flags:
151 * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer
152 * BDRV_BLOCK_ZERO: offset reads as zero
153 * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data
154 * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
155 * layer rather than any backing, set by block layer
156 * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this
157 * layer, set by block layer
158 *
159 * Internal flag:
160 * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request
161 * that the block layer recompute the answer from the returned
162 * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID.
163 *
164 * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the
165 * host offset within the returned BDS that is allocated for the
166 * corresponding raw guest data. However, whether that offset
167 * actually contains data also depends on BDRV_BLOCK_DATA, as follows:
168 *
169 * DATA ZERO OFFSET_VALID
170 * t t t sectors read as zero, returned file is zero at offset
171 * t f t sectors read as valid from file at offset
172 * f t t sectors preallocated, read as zero, returned file not
173 * necessarily zero at offset
174 * f f t sectors preallocated but read from backing_hd,
175 * returned file contains garbage at offset
176 * t t f sectors preallocated, read as zero, unknown offset
177 * t f f sectors read from unknown file or offset
178 * f t f not allocated or unknown offset, read as zero
179 * f f f not allocated or unknown offset, read from backing_hd
180 */
181 #define BDRV_BLOCK_DATA 0x01
182 #define BDRV_BLOCK_ZERO 0x02
183 #define BDRV_BLOCK_OFFSET_VALID 0x04
184 #define BDRV_BLOCK_RAW 0x08
185 #define BDRV_BLOCK_ALLOCATED 0x10
186 #define BDRV_BLOCK_EOF 0x20
187 #define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK
188
189 typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
190
191 typedef struct BDRVReopenState {
192 BlockDriverState *bs;
193 int flags;
194 BlockdevDetectZeroesOptions detect_zeroes;
195 bool backing_missing;
196 bool replace_backing_bs; /* new_backing_bs is ignored if this is false */
197 BlockDriverState *new_backing_bs; /* If NULL then detach the current bs */
198 uint64_t perm, shared_perm;
199 QDict *options;
200 QDict *explicit_options;
201 void *opaque;
202 } BDRVReopenState;
203
204 /*
205 * Block operation types
206 */
207 typedef enum BlockOpType {
208 BLOCK_OP_TYPE_BACKUP_SOURCE,
209 BLOCK_OP_TYPE_BACKUP_TARGET,
210 BLOCK_OP_TYPE_CHANGE,
211 BLOCK_OP_TYPE_COMMIT_SOURCE,
212 BLOCK_OP_TYPE_COMMIT_TARGET,
213 BLOCK_OP_TYPE_DATAPLANE,
214 BLOCK_OP_TYPE_DRIVE_DEL,
215 BLOCK_OP_TYPE_EJECT,
216 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
217 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
218 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
219 BLOCK_OP_TYPE_MIRROR_SOURCE,
220 BLOCK_OP_TYPE_MIRROR_TARGET,
221 BLOCK_OP_TYPE_RESIZE,
222 BLOCK_OP_TYPE_STREAM,
223 BLOCK_OP_TYPE_REPLACE,
224 BLOCK_OP_TYPE_MAX,
225 } BlockOpType;
226
227 /* Block node permission constants */
228 enum {
229 /**
230 * A user that has the "permission" of consistent reads is guaranteed that
231 * their view of the contents of the block device is complete and
232 * self-consistent, representing the contents of a disk at a specific
233 * point.
234 *
235 * For most block devices (including their backing files) this is true, but
236 * the property cannot be maintained in a few situations like for
237 * intermediate nodes of a commit block job.
238 */
239 BLK_PERM_CONSISTENT_READ = 0x01,
240
241 /** This permission is required to change the visible disk contents. */
242 BLK_PERM_WRITE = 0x02,
243
244 /**
245 * This permission (which is weaker than BLK_PERM_WRITE) is both enough and
246 * required for writes to the block node when the caller promises that
247 * the visible disk content doesn't change.
248 *
249 * As the BLK_PERM_WRITE permission is strictly stronger, either is
250 * sufficient to perform an unchanging write.
251 */
252 BLK_PERM_WRITE_UNCHANGED = 0x04,
253
254 /** This permission is required to change the size of a block node. */
255 BLK_PERM_RESIZE = 0x08,
256
257 /**
258 * This permission is required to change the node that this BdrvChild
259 * points to.
260 */
261 BLK_PERM_GRAPH_MOD = 0x10,
262
263 BLK_PERM_ALL = 0x1f,
264
265 DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ
266 | BLK_PERM_WRITE
267 | BLK_PERM_WRITE_UNCHANGED
268 | BLK_PERM_RESIZE,
269
270 DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH,
271 };
272
273 char *bdrv_perm_names(uint64_t perm);
274
275 /* disk I/O throttling */
276 void bdrv_init(void);
277 void bdrv_init_with_whitelist(void);
278 bool bdrv_uses_whitelist(void);
279 int bdrv_is_whitelisted(BlockDriver *drv, bool read_only);
280 BlockDriver *bdrv_find_protocol(const char *filename,
281 bool allow_protocol_prefix,
282 Error **errp);
283 BlockDriver *bdrv_find_format(const char *format_name);
284 int bdrv_create(BlockDriver *drv, const char* filename,
285 QemuOpts *opts, Error **errp);
286 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp);
287 BlockDriverState *bdrv_new(void);
288 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
289 Error **errp);
290 void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
291 Error **errp);
292
293 int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
294 int bdrv_parse_discard_flags(const char *mode, int *flags);
295 BdrvChild *bdrv_open_child(const char *filename,
296 QDict *options, const char *bdref_key,
297 BlockDriverState* parent,
298 const BdrvChildRole *child_role,
299 bool allow_none, Error **errp);
300 BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp);
301 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
302 Error **errp);
303 int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
304 const char *bdref_key, Error **errp);
305 BlockDriverState *bdrv_open(const char *filename, const char *reference,
306 QDict *options, int flags, Error **errp);
307 BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
308 int flags, Error **errp);
309 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
310 BlockDriverState *bs, QDict *options,
311 bool keep_old_opts);
312 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
313 int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
314 Error **errp);
315 int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
316 BlockReopenQueue *queue, Error **errp);
317 void bdrv_reopen_commit(BDRVReopenState *reopen_state);
318 void bdrv_reopen_abort(BDRVReopenState *reopen_state);
319 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
320 int bytes, BdrvRequestFlags flags);
321 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
322 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes);
323 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
324 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes);
325 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
326 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
327 const void *buf, int count);
328 /*
329 * Efficiently zero a region of the disk image. Note that this is a regular
330 * I/O request like read or write and should have a reasonable size. This
331 * function is not suitable for zeroing the entire image in a single request
332 * because it may allocate memory for the entire region.
333 */
334 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
335 int bytes, BdrvRequestFlags flags);
336 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
337 const char *backing_file);
338 void bdrv_refresh_filename(BlockDriverState *bs);
339
340 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset,
341 PreallocMode prealloc, Error **errp);
342 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
343 Error **errp);
344
345 int64_t bdrv_nb_sectors(BlockDriverState *bs);
346 int64_t bdrv_getlength(BlockDriverState *bs);
347 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
348 BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
349 BlockDriverState *in_bs, Error **errp);
350 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
351 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp);
352 int bdrv_commit(BlockDriverState *bs);
353 int bdrv_change_backing_file(BlockDriverState *bs,
354 const char *backing_file, const char *backing_fmt);
355 void bdrv_register(BlockDriver *bdrv);
356 int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
357 const char *backing_file_str);
358 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
359 BlockDriverState *bs);
360 BlockDriverState *bdrv_find_base(BlockDriverState *bs);
361 bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base,
362 Error **errp);
363 int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base,
364 Error **errp);
365 void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base);
366
367
368 typedef struct BdrvCheckResult {
369 int corruptions;
370 int leaks;
371 int check_errors;
372 int corruptions_fixed;
373 int leaks_fixed;
374 int64_t image_end_offset;
375 BlockFragInfo bfi;
376 } BdrvCheckResult;
377
378 typedef enum {
379 BDRV_FIX_LEAKS = 1,
380 BDRV_FIX_ERRORS = 2,
381 } BdrvCheckMode;
382
383 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
384
385 /* The units of offset and total_work_size may be chosen arbitrarily by the
386 * block driver; total_work_size may change during the course of the amendment
387 * operation */
388 typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
389 int64_t total_work_size, void *opaque);
390 int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
391 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
392 Error **errp);
393
394 /* external snapshots */
395 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
396 BlockDriverState *candidate);
397 bool bdrv_is_first_non_filter(BlockDriverState *candidate);
398
399 /* check if a named node can be replaced when doing drive-mirror */
400 BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
401 const char *node_name, Error **errp);
402
403 /* async block I/O */
404 void bdrv_aio_cancel(BlockAIOCB *acb);
405 void bdrv_aio_cancel_async(BlockAIOCB *acb);
406
407 /* sg packet commands */
408 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
409
410 /* Invalidate any cached metadata used by image formats */
411 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
412 void bdrv_invalidate_cache_all(Error **errp);
413 int bdrv_inactivate_all(void);
414
415 /* Ensure contents are flushed to disk. */
416 int bdrv_flush(BlockDriverState *bs);
417 int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
418 int bdrv_flush_all(void);
419 void bdrv_close_all(void);
420 void bdrv_drain(BlockDriverState *bs);
421 void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
422 void bdrv_drain_all_begin(void);
423 void bdrv_drain_all_end(void);
424 void bdrv_drain_all(void);
425
426 #define BDRV_POLL_WHILE(bs, cond) ({ \
427 BlockDriverState *bs_ = (bs); \
428 AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
429 cond); })
430
431 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes);
432 int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes);
433 int bdrv_has_zero_init_1(BlockDriverState *bs);
434 int bdrv_has_zero_init(BlockDriverState *bs);
435 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
436 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
437 int bdrv_block_status(BlockDriverState *bs, int64_t offset,
438 int64_t bytes, int64_t *pnum, int64_t *map,
439 BlockDriverState **file);
440 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
441 int64_t offset, int64_t bytes, int64_t *pnum,
442 int64_t *map, BlockDriverState **file);
443 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
444 int64_t *pnum);
445 int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
446 int64_t offset, int64_t bytes, int64_t *pnum);
447
448 bool bdrv_is_read_only(BlockDriverState *bs);
449 int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
450 bool ignore_allow_rdw, Error **errp);
451 int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
452 Error **errp);
453 bool bdrv_is_writable(BlockDriverState *bs);
454 bool bdrv_is_sg(BlockDriverState *bs);
455 bool bdrv_is_inserted(BlockDriverState *bs);
456 void bdrv_lock_medium(BlockDriverState *bs, bool locked);
457 void bdrv_eject(BlockDriverState *bs, bool eject_flag);
458 const char *bdrv_get_format_name(BlockDriverState *bs);
459 BlockDriverState *bdrv_find_node(const char *node_name);
460 BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp);
461 XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp);
462 BlockDriverState *bdrv_lookup_bs(const char *device,
463 const char *node_name,
464 Error **errp);
465 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
466 BlockDriverState *bdrv_next_node(BlockDriverState *bs);
467 BlockDriverState *bdrv_next_all_states(BlockDriverState *bs);
468
469 typedef struct BdrvNextIterator {
470 enum {
471 BDRV_NEXT_BACKEND_ROOTS,
472 BDRV_NEXT_MONITOR_OWNED,
473 } phase;
474 BlockBackend *blk;
475 BlockDriverState *bs;
476 } BdrvNextIterator;
477
478 BlockDriverState *bdrv_first(BdrvNextIterator *it);
479 BlockDriverState *bdrv_next(BdrvNextIterator *it);
480 void bdrv_next_cleanup(BdrvNextIterator *it);
481
482 BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
483 bool bdrv_is_encrypted(BlockDriverState *bs);
484 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
485 void *opaque, bool read_only);
486 const char *bdrv_get_node_name(const BlockDriverState *bs);
487 const char *bdrv_get_device_name(const BlockDriverState *bs);
488 const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
489 int bdrv_get_flags(BlockDriverState *bs);
490 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
491 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
492 Error **errp);
493 void bdrv_round_to_clusters(BlockDriverState *bs,
494 int64_t offset, int64_t bytes,
495 int64_t *cluster_offset,
496 int64_t *cluster_bytes);
497
498 void bdrv_get_backing_filename(BlockDriverState *bs,
499 char *filename, int filename_size);
500 char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
501 char *bdrv_get_full_backing_filename_from_filename(const char *backed,
502 const char *backing,
503 Error **errp);
504 char *bdrv_dirname(BlockDriverState *bs, Error **errp);
505
506 int path_has_protocol(const char *path);
507 int path_is_absolute(const char *path);
508 char *path_combine(const char *base_path, const char *filename);
509
510 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
511 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
512 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
513 int64_t pos, int size);
514
515 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
516 int64_t pos, int size);
517
518 void bdrv_img_create(const char *filename, const char *fmt,
519 const char *base_filename, const char *base_fmt,
520 char *options, uint64_t img_size, int flags,
521 bool quiet, Error **errp);
522
523 /* Returns the alignment in bytes that is required so that no bounce buffer
524 * is required throughout the stack */
525 size_t bdrv_min_mem_align(BlockDriverState *bs);
526 /* Returns optimal alignment in bytes for bounce buffer */
527 size_t bdrv_opt_mem_align(BlockDriverState *bs);
528 void *qemu_blockalign(BlockDriverState *bs, size_t size);
529 void *qemu_blockalign0(BlockDriverState *bs, size_t size);
530 void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
531 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
532 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
533
534 void bdrv_enable_copy_on_read(BlockDriverState *bs);
535 void bdrv_disable_copy_on_read(BlockDriverState *bs);
536
537 void bdrv_ref(BlockDriverState *bs);
538 void bdrv_unref(BlockDriverState *bs);
539 void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
540 BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
541 BlockDriverState *child_bs,
542 const char *child_name,
543 const BdrvChildRole *child_role,
544 Error **errp);
545
546 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
547 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
548 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
549 void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
550 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
551 bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
552
553 #define BLKDBG_EVENT(child, evt) \
554 do { \
555 if (child) { \
556 bdrv_debug_event(child->bs, evt); \
557 } \
558 } while (0)
559
560 void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
561
562 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
563 const char *tag);
564 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
565 int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
566 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
567
568 /**
569 * bdrv_get_aio_context:
570 *
571 * Returns: the currently bound #AioContext
572 */
573 AioContext *bdrv_get_aio_context(BlockDriverState *bs);
574
575 /**
576 * Transfer control to @co in the aio context of @bs
577 */
578 void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co);
579
580 /**
581 * bdrv_set_aio_context:
582 *
583 * Changes the #AioContext used for fd handlers, timers, and BHs by this
584 * BlockDriverState and all its children.
585 *
586 * This function must be called with iothread lock held.
587 */
588 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context);
589 int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
590 Error **errp);
591 int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
592 BdrvChild *ignore_child, Error **errp);
593 bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx,
594 GSList **ignore, Error **errp);
595 bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx,
596 GSList **ignore, Error **errp);
597 int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
598 int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
599
600 void bdrv_io_plug(BlockDriverState *bs);
601 void bdrv_io_unplug(BlockDriverState *bs);
602
603 /**
604 * bdrv_parent_drained_begin:
605 *
606 * Begin a quiesced section of all users of @bs. This is part of
607 * bdrv_drained_begin.
608 */
609 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
610 bool ignore_bds_parents);
611
612 /**
613 * bdrv_parent_drained_begin_single:
614 *
615 * Begin a quiesced section for the parent of @c. If @poll is true, wait for
616 * any pending activity to cease.
617 */
618 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
619
620 /**
621 * bdrv_parent_drained_end:
622 *
623 * End a quiesced section of all users of @bs. This is part of
624 * bdrv_drained_end.
625 */
626 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
627 bool ignore_bds_parents);
628
629 /**
630 * bdrv_drain_poll:
631 *
632 * Poll for pending requests in @bs, its parents (except for @ignore_parent),
633 * and if @recursive is true its children as well (used for subtree drain).
634 *
635 * If @ignore_bds_parents is true, parents that are BlockDriverStates must
636 * ignore the drain request because they will be drained separately (used for
637 * drain_all).
638 *
639 * This is part of bdrv_drained_begin.
640 */
641 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
642 BdrvChild *ignore_parent, bool ignore_bds_parents);
643
644 /**
645 * bdrv_drained_begin:
646 *
647 * Begin a quiesced section for exclusive access to the BDS, by disabling
648 * external request sources including NBD server and device model. Note that
649 * this doesn't block timers or coroutines from submitting more requests, which
650 * means block_job_pause is still necessary.
651 *
652 * This function can be recursive.
653 */
654 void bdrv_drained_begin(BlockDriverState *bs);
655
656 /**
657 * bdrv_do_drained_begin_quiesce:
658 *
659 * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
660 * running requests to complete.
661 */
662 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
663 BdrvChild *parent, bool ignore_bds_parents);
664
665 /**
666 * Like bdrv_drained_begin, but recursively begins a quiesced section for
667 * exclusive access to all child nodes as well.
668 */
669 void bdrv_subtree_drained_begin(BlockDriverState *bs);
670
671 /**
672 * bdrv_drained_end:
673 *
674 * End a quiescent section started by bdrv_drained_begin().
675 */
676 void bdrv_drained_end(BlockDriverState *bs);
677
678 /**
679 * End a quiescent section started by bdrv_subtree_drained_begin().
680 */
681 void bdrv_subtree_drained_end(BlockDriverState *bs);
682
683 void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
684 Error **errp);
685 void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
686
687 bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
688 uint32_t granularity, Error **errp);
689 /**
690 *
691 * bdrv_register_buf/bdrv_unregister_buf:
692 *
693 * Register/unregister a buffer for I/O. For example, VFIO drivers are
694 * interested to know the memory areas that would later be used for I/O, so
695 * that they can prepare IOMMU mapping etc., to get better performance.
696 */
697 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size);
698 void bdrv_unregister_buf(BlockDriverState *bs, void *host);
699
700 /**
701 *
702 * bdrv_co_copy_range:
703 *
704 * Do offloaded copy between two children. If the operation is not implemented
705 * by the driver, or if the backend storage doesn't support it, a negative
706 * error code will be returned.
707 *
708 * Note: block layer doesn't emulate or fallback to a bounce buffer approach
709 * because usually the caller shouldn't attempt offloaded copy any more (e.g.
710 * calling copy_file_range(2)) after the first error, thus it should fall back
711 * to a read+write path in the caller level.
712 *
713 * @src: Source child to copy data from
714 * @src_offset: offset in @src image to read data
715 * @dst: Destination child to copy data to
716 * @dst_offset: offset in @dst image to write data
717 * @bytes: number of bytes to copy
718 * @flags: request flags. Supported flags:
719 * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
720 * write on @dst as if bdrv_co_pwrite_zeroes is
721 * called. Used to simplify caller code, or
722 * during BlockDriver.bdrv_co_copy_range_from()
723 * recursion.
724 * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
725 * requests currently in flight.
726 *
727 * Returns: 0 if succeeded; negative error code if failed.
728 **/
729 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
730 BdrvChild *dst, uint64_t dst_offset,
731 uint64_t bytes, BdrvRequestFlags read_flags,
732 BdrvRequestFlags write_flags);
733 #endif