]> git.proxmox.com Git - mirror_qemu.git/blob - include/block/block_int.h
block: Switch discard length bounds to byte-based
[mirror_qemu.git] / include / block / block_int.h
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #ifndef BLOCK_INT_H
25 #define BLOCK_INT_H
26
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "qemu/option.h"
30 #include "qemu/queue.h"
31 #include "qemu/coroutine.h"
32 #include "qemu/timer.h"
33 #include "qapi-types.h"
34 #include "qemu/hbitmap.h"
35 #include "block/snapshot.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/throttle.h"
38
39 #define BLOCK_FLAG_ENCRYPT 1
40 #define BLOCK_FLAG_LAZY_REFCOUNTS 8
41
42 #define BLOCK_OPT_SIZE "size"
43 #define BLOCK_OPT_ENCRYPT "encryption"
44 #define BLOCK_OPT_COMPAT6 "compat6"
45 #define BLOCK_OPT_HWVERSION "hwversion"
46 #define BLOCK_OPT_BACKING_FILE "backing_file"
47 #define BLOCK_OPT_BACKING_FMT "backing_fmt"
48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
49 #define BLOCK_OPT_TABLE_SIZE "table_size"
50 #define BLOCK_OPT_PREALLOC "preallocation"
51 #define BLOCK_OPT_SUBFMT "subformat"
52 #define BLOCK_OPT_COMPAT_LEVEL "compat"
53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
55 #define BLOCK_OPT_REDUNDANCY "redundancy"
56 #define BLOCK_OPT_NOCOW "nocow"
57 #define BLOCK_OPT_OBJECT_SIZE "object_size"
58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
59
60 #define BLOCK_PROBE_BUF_SIZE 512
61
62 enum BdrvTrackedRequestType {
63 BDRV_TRACKED_READ,
64 BDRV_TRACKED_WRITE,
65 BDRV_TRACKED_FLUSH,
66 BDRV_TRACKED_IOCTL,
67 BDRV_TRACKED_DISCARD,
68 };
69
70 typedef struct BdrvTrackedRequest {
71 BlockDriverState *bs;
72 int64_t offset;
73 unsigned int bytes;
74 enum BdrvTrackedRequestType type;
75
76 bool serialising;
77 int64_t overlap_offset;
78 unsigned int overlap_bytes;
79
80 QLIST_ENTRY(BdrvTrackedRequest) list;
81 Coroutine *co; /* owner, used for deadlock detection */
82 CoQueue wait_queue; /* coroutines blocked on this request */
83
84 struct BdrvTrackedRequest *waiting_for;
85 } BdrvTrackedRequest;
86
87 struct BlockDriver {
88 const char *format_name;
89 int instance_size;
90
91 /* set to true if the BlockDriver is a block filter */
92 bool is_filter;
93 /* for snapshots block filter like Quorum can implement the
94 * following recursive callback.
95 * It's purpose is to recurse on the filter children while calling
96 * bdrv_recurse_is_first_non_filter on them.
97 * For a sample implementation look in the future Quorum block filter.
98 */
99 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs,
100 BlockDriverState *candidate);
101
102 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
103 int (*bdrv_probe_device)(const char *filename);
104
105 /* Any driver implementing this callback is expected to be able to handle
106 * NULL file names in its .bdrv_open() implementation */
107 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
108 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
109 * this field set to true, except ones that are defined only by their
110 * child's bs.
111 * An example of the last type will be the quorum block driver.
112 */
113 bool bdrv_needs_filename;
114
115 /* Set if a driver can support backing files */
116 bool supports_backing;
117
118 /* For handling image reopen for split or non-split files */
119 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
120 BlockReopenQueue *queue, Error **errp);
121 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
122 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
123 void (*bdrv_join_options)(QDict *options, QDict *old_options);
124
125 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
126 Error **errp);
127 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
128 Error **errp);
129 void (*bdrv_close)(BlockDriverState *bs);
130 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp);
131 int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
132 int (*bdrv_make_empty)(BlockDriverState *bs);
133
134 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options);
135
136 /* aio */
137 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs,
138 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
139 BlockCompletionFunc *cb, void *opaque);
140 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs,
141 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
142 BlockCompletionFunc *cb, void *opaque);
143 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
144 BlockCompletionFunc *cb, void *opaque);
145 BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs,
146 int64_t sector_num, int nb_sectors,
147 BlockCompletionFunc *cb, void *opaque);
148
149 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
150 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
151 int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
152 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
153 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
154 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
155 int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs,
156 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
157 int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
158 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
159
160 /*
161 * Efficiently zero a region of the disk image. Typically an image format
162 * would use a compact metadata representation to implement this. This
163 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
164 * will be called instead.
165 */
166 int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
167 int64_t offset, int count, BdrvRequestFlags flags);
168 int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs,
169 int64_t sector_num, int nb_sectors);
170 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs,
171 int64_t sector_num, int nb_sectors, int *pnum,
172 BlockDriverState **file);
173
174 /*
175 * Invalidate any cached meta-data.
176 */
177 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp);
178 int (*bdrv_inactivate)(BlockDriverState *bs);
179
180 /*
181 * Flushes all data for all layers by calling bdrv_co_flush for underlying
182 * layers, if needed. This function is needed for deterministic
183 * synchronization of the flush finishing callback.
184 */
185 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs);
186
187 /*
188 * Flushes all data that was already written to the OS all the way down to
189 * the disk (for example raw-posix calls fsync()).
190 */
191 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
192
193 /*
194 * Flushes all internal caches to the OS. The data may still sit in a
195 * writeback cache of the host OS, but it will survive a crash of the qemu
196 * process.
197 */
198 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
199
200 const char *protocol_name;
201 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset);
202
203 int64_t (*bdrv_getlength)(BlockDriverState *bs);
204 bool has_variable_length;
205 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
206
207 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num,
208 const uint8_t *buf, int nb_sectors);
209
210 int (*bdrv_snapshot_create)(BlockDriverState *bs,
211 QEMUSnapshotInfo *sn_info);
212 int (*bdrv_snapshot_goto)(BlockDriverState *bs,
213 const char *snapshot_id);
214 int (*bdrv_snapshot_delete)(BlockDriverState *bs,
215 const char *snapshot_id,
216 const char *name,
217 Error **errp);
218 int (*bdrv_snapshot_list)(BlockDriverState *bs,
219 QEMUSnapshotInfo **psn_info);
220 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
221 const char *snapshot_id,
222 const char *name,
223 Error **errp);
224 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
225 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs);
226
227 int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs,
228 QEMUIOVector *qiov,
229 int64_t pos);
230 int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs,
231 QEMUIOVector *qiov,
232 int64_t pos);
233
234 int (*bdrv_change_backing_file)(BlockDriverState *bs,
235 const char *backing_file, const char *backing_fmt);
236
237 /* removable device specific */
238 bool (*bdrv_is_inserted)(BlockDriverState *bs);
239 int (*bdrv_media_changed)(BlockDriverState *bs);
240 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
241 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
242
243 /* to control generic scsi devices */
244 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
245 unsigned long int req, void *buf,
246 BlockCompletionFunc *cb, void *opaque);
247
248 /* List of options for creating images, terminated by name == NULL */
249 QemuOptsList *create_opts;
250
251 /*
252 * Returns 0 for completed check, -errno for internal errors.
253 * The check results are stored in result.
254 */
255 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result,
256 BdrvCheckMode fix);
257
258 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts,
259 BlockDriverAmendStatusCB *status_cb,
260 void *cb_opaque);
261
262 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event);
263
264 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
265 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
266 const char *tag);
267 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
268 const char *tag);
269 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
270 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
271
272 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
273
274 /*
275 * Returns 1 if newly created images are guaranteed to contain only
276 * zeros, 0 otherwise.
277 */
278 int (*bdrv_has_zero_init)(BlockDriverState *bs);
279
280 /* Remove fd handlers, timers, and other event loop callbacks so the event
281 * loop is no longer in use. Called with no in-flight requests and in
282 * depth-first traversal order with parents before child nodes.
283 */
284 void (*bdrv_detach_aio_context)(BlockDriverState *bs);
285
286 /* Add fd handlers, timers, and other event loop callbacks so I/O requests
287 * can be processed again. Called with no in-flight requests and in
288 * depth-first traversal order with child nodes before parent nodes.
289 */
290 void (*bdrv_attach_aio_context)(BlockDriverState *bs,
291 AioContext *new_context);
292
293 /* io queue for linux-aio */
294 void (*bdrv_io_plug)(BlockDriverState *bs);
295 void (*bdrv_io_unplug)(BlockDriverState *bs);
296
297 /**
298 * Try to get @bs's logical and physical block size.
299 * On success, store them in @bsz and return zero.
300 * On failure, return negative errno.
301 */
302 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz);
303 /**
304 * Try to get @bs's geometry (cyls, heads, sectors)
305 * On success, store them in @geo and return 0.
306 * On failure return -errno.
307 * Only drivers that want to override guest geometry implement this
308 * callback; see hd_geometry_guess().
309 */
310 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
311
312 /**
313 * Drain and stop any internal sources of requests in the driver, and
314 * remain so until next I/O callback (e.g. bdrv_co_writev) is called.
315 */
316 void (*bdrv_drain)(BlockDriverState *bs);
317
318 void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child,
319 Error **errp);
320 void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child,
321 Error **errp);
322
323 QLIST_ENTRY(BlockDriver) list;
324 };
325
326 typedef struct BlockLimits {
327 /* maximum number of bytes that can be discarded at once (since it
328 * is signed, it must be < 2G, if set), should be multiple of
329 * pdiscard_alignment, but need not be power of 2. May be 0 if no
330 * inherent 32-bit limit */
331 int32_t max_pdiscard;
332
333 /* optimal alignment for discard requests in bytes, must be power
334 * of 2, less than max_pdiscard if that is set, and multiple of
335 * bs->request_alignment. May be 0 if bs->request_alignment is
336 * good enough */
337 uint32_t pdiscard_alignment;
338
339 /* maximum number of bytes that can zeroized at once (since it is
340 * signed, it must be < 2G, if set), should be multiple of
341 * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */
342 int32_t max_pwrite_zeroes;
343
344 /* optimal alignment for write zeroes requests in bytes, must be
345 * power of 2, less than max_pwrite_zeroes if that is set, and
346 * multiple of bs->request_alignment. May be 0 if
347 * bs->request_alignment is good enough */
348 uint32_t pwrite_zeroes_alignment;
349
350 /* optimal transfer length in bytes (must be power of 2, and
351 * multiple of bs->request_alignment), or 0 if no preferred size */
352 uint32_t opt_transfer;
353
354 /* maximal transfer length in bytes (need not be power of 2, but
355 * should be multiple of opt_transfer), or 0 for no 32-bit limit.
356 * For now, anything larger than INT_MAX is clamped down. */
357 uint32_t max_transfer;
358
359 /* memory alignment so that no bounce buffer is needed */
360 size_t min_mem_alignment;
361
362 /* memory alignment for bounce buffer */
363 size_t opt_mem_alignment;
364
365 /* maximum number of iovec elements */
366 int max_iov;
367 } BlockLimits;
368
369 typedef struct BdrvOpBlocker BdrvOpBlocker;
370
371 typedef struct BdrvAioNotifier {
372 void (*attached_aio_context)(AioContext *new_context, void *opaque);
373 void (*detach_aio_context)(void *opaque);
374
375 void *opaque;
376 bool deleted;
377
378 QLIST_ENTRY(BdrvAioNotifier) list;
379 } BdrvAioNotifier;
380
381 struct BdrvChildRole {
382 void (*inherit_options)(int *child_flags, QDict *child_options,
383 int parent_flags, QDict *parent_options);
384
385 void (*change_media)(BdrvChild *child, bool load);
386 void (*resize)(BdrvChild *child);
387
388 /* Returns a name that is supposedly more useful for human users than the
389 * node name for identifying the node in question (in particular, a BB
390 * name), or NULL if the parent can't provide a better name. */
391 const char* (*get_name)(BdrvChild *child);
392
393 /*
394 * If this pair of functions is implemented, the parent doesn't issue new
395 * requests after returning from .drained_begin() until .drained_end() is
396 * called.
397 *
398 * Note that this can be nested. If drained_begin() was called twice, new
399 * I/O is allowed only after drained_end() was called twice, too.
400 */
401 void (*drained_begin)(BdrvChild *child);
402 void (*drained_end)(BdrvChild *child);
403 };
404
405 extern const BdrvChildRole child_file;
406 extern const BdrvChildRole child_format;
407
408 struct BdrvChild {
409 BlockDriverState *bs;
410 char *name;
411 const BdrvChildRole *role;
412 void *opaque;
413 QLIST_ENTRY(BdrvChild) next;
414 QLIST_ENTRY(BdrvChild) next_parent;
415 };
416
417 /*
418 * Note: the function bdrv_append() copies and swaps contents of
419 * BlockDriverStates, so if you add new fields to this struct, please
420 * inspect bdrv_append() to determine if the new fields need to be
421 * copied as well.
422 */
423 struct BlockDriverState {
424 int64_t total_sectors; /* if we are reading a disk image, give its
425 size in sectors */
426 int read_only; /* if true, the media is read only */
427 int open_flags; /* flags used to open the file, re-used for re-open */
428 int encrypted; /* if true, the media is encrypted */
429 int valid_key; /* if true, a valid encryption key has been set */
430 int sg; /* if true, the device is a /dev/sg* */
431 int copy_on_read; /* if true, copy read backing sectors into image
432 note this is a reference count */
433 bool probed;
434
435 BlockDriver *drv; /* NULL means no media */
436 void *opaque;
437
438 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
439 /* long-running tasks intended to always use the same AioContext as this
440 * BDS may register themselves in this list to be notified of changes
441 * regarding this BDS's context */
442 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
443 bool walking_aio_notifiers; /* to make removal during iteration safe */
444
445 char filename[PATH_MAX];
446 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of
447 this file image */
448 char backing_format[16]; /* if non-zero and backing_file exists */
449
450 QDict *full_open_options;
451 char exact_filename[PATH_MAX];
452
453 BdrvChild *backing;
454 BdrvChild *file;
455
456 /* Callback before write request is processed */
457 NotifierWithReturnList before_write_notifiers;
458
459 /* number of in-flight serialising requests */
460 unsigned int serialising_in_flight;
461
462 /* Offset after the highest byte written to */
463 uint64_t wr_highest_offset;
464
465 /* I/O Limits */
466 BlockLimits bl;
467
468 /* Alignment requirement for offset/length of I/O requests */
469 unsigned int request_alignment;
470 /* Flags honored during pwrite (so far: BDRV_REQ_FUA) */
471 unsigned int supported_write_flags;
472 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
473 * BDRV_REQ_MAY_UNMAP) */
474 unsigned int supported_zero_flags;
475
476 /* the following member gives a name to every node on the bs graph. */
477 char node_name[32];
478 /* element of the list of named nodes building the graph */
479 QTAILQ_ENTRY(BlockDriverState) node_list;
480 /* element of the list of all BlockDriverStates (all_bdrv_states) */
481 QTAILQ_ENTRY(BlockDriverState) bs_list;
482 /* element of the list of monitor-owned BDS */
483 QTAILQ_ENTRY(BlockDriverState) monitor_list;
484 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
485 int refcnt;
486
487 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
488
489 /* operation blockers */
490 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
491
492 /* long-running background operation */
493 BlockJob *job;
494
495 /* The node that this node inherited default options from (and a reopen on
496 * which can affect this node by changing these defaults). This is always a
497 * parent node of this node. */
498 BlockDriverState *inherits_from;
499 QLIST_HEAD(, BdrvChild) children;
500 QLIST_HEAD(, BdrvChild) parents;
501
502 QDict *options;
503 QDict *explicit_options;
504 BlockdevDetectZeroesOptions detect_zeroes;
505
506 /* The error object in use for blocking operations on backing_hd */
507 Error *backing_blocker;
508
509 /* threshold limit for writes, in bytes. "High water mark". */
510 uint64_t write_threshold_offset;
511 NotifierWithReturn write_threshold_notifier;
512
513 /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */
514 unsigned io_plugged;
515 unsigned io_plug_disabled;
516
517 int quiesce_counter;
518 };
519
520 struct BlockBackendRootState {
521 int open_flags;
522 bool read_only;
523 BlockdevDetectZeroesOptions detect_zeroes;
524 };
525
526 typedef enum BlockMirrorBackingMode {
527 /* Reuse the existing backing chain from the source for the target.
528 * - sync=full: Set backing BDS to NULL.
529 * - sync=top: Use source's backing BDS.
530 * - sync=none: Use source as the backing BDS. */
531 MIRROR_SOURCE_BACKING_CHAIN,
532
533 /* Open the target's backing chain completely anew */
534 MIRROR_OPEN_BACKING_CHAIN,
535
536 /* Do not change the target's backing BDS after job completion */
537 MIRROR_LEAVE_BACKING_CHAIN,
538 } BlockMirrorBackingMode;
539
540 static inline BlockDriverState *backing_bs(BlockDriverState *bs)
541 {
542 return bs->backing ? bs->backing->bs : NULL;
543 }
544
545
546 /* Essential block drivers which must always be statically linked into qemu, and
547 * which therefore can be accessed without using bdrv_find_format() */
548 extern BlockDriver bdrv_file;
549 extern BlockDriver bdrv_raw;
550 extern BlockDriver bdrv_qcow2;
551
552 /**
553 * bdrv_setup_io_funcs:
554 *
555 * Prepare a #BlockDriver for I/O request processing by populating
556 * unimplemented coroutine and AIO interfaces with generic wrapper functions
557 * that fall back to implemented interfaces.
558 */
559 void bdrv_setup_io_funcs(BlockDriver *bdrv);
560
561 int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
562 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
563 BdrvRequestFlags flags);
564 int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
565 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
566 BdrvRequestFlags flags);
567
568 int get_tmp_filename(char *filename, int size);
569 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
570 const char *filename);
571
572
573 /**
574 * bdrv_add_before_write_notifier:
575 *
576 * Register a callback that is invoked before write requests are processed but
577 * after any throttling or waiting for overlapping requests.
578 */
579 void bdrv_add_before_write_notifier(BlockDriverState *bs,
580 NotifierWithReturn *notifier);
581
582 /**
583 * bdrv_detach_aio_context:
584 *
585 * May be called from .bdrv_detach_aio_context() to detach children from the
586 * current #AioContext. This is only needed by block drivers that manage their
587 * own children. Both ->file and ->backing are automatically handled and
588 * block drivers should not call this function on them explicitly.
589 */
590 void bdrv_detach_aio_context(BlockDriverState *bs);
591
592 /**
593 * bdrv_attach_aio_context:
594 *
595 * May be called from .bdrv_attach_aio_context() to attach children to the new
596 * #AioContext. This is only needed by block drivers that manage their own
597 * children. Both ->file and ->backing are automatically handled and block
598 * drivers should not call this function on them explicitly.
599 */
600 void bdrv_attach_aio_context(BlockDriverState *bs,
601 AioContext *new_context);
602
603 /**
604 * bdrv_add_aio_context_notifier:
605 *
606 * If a long-running job intends to be always run in the same AioContext as a
607 * certain BDS, it may use this function to be notified of changes regarding the
608 * association of the BDS to an AioContext.
609 *
610 * attached_aio_context() is called after the target BDS has been attached to a
611 * new AioContext; detach_aio_context() is called before the target BDS is being
612 * detached from its old AioContext.
613 */
614 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
615 void (*attached_aio_context)(AioContext *new_context, void *opaque),
616 void (*detach_aio_context)(void *opaque), void *opaque);
617
618 /**
619 * bdrv_remove_aio_context_notifier:
620 *
621 * Unsubscribe of change notifications regarding the BDS's AioContext. The
622 * parameters given here have to be the same as those given to
623 * bdrv_add_aio_context_notifier().
624 */
625 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
626 void (*aio_context_attached)(AioContext *,
627 void *),
628 void (*aio_context_detached)(void *),
629 void *opaque);
630
631 #ifdef _WIN32
632 int is_windows_drive(const char *filename);
633 #endif
634
635 /**
636 * stream_start:
637 * @bs: Block device to operate on.
638 * @base: Block device that will become the new base, or %NULL to
639 * flatten the whole backing file chain onto @bs.
640 * @base_id: The file name that will be written to @bs as the new
641 * backing file if the job completes. Ignored if @base is %NULL.
642 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
643 * @on_error: The action to take upon error.
644 * @cb: Completion function for the job.
645 * @opaque: Opaque pointer value passed to @cb.
646 * @errp: Error object.
647 *
648 * Start a streaming operation on @bs. Clusters that are unallocated
649 * in @bs, but allocated in any image between @base and @bs (both
650 * exclusive) will be written to @bs. At the end of a successful
651 * streaming job, the backing file of @bs will be changed to
652 * @base_id in the written image and to @base in the live BlockDriverState.
653 */
654 void stream_start(BlockDriverState *bs, BlockDriverState *base,
655 const char *base_id, int64_t speed, BlockdevOnError on_error,
656 BlockCompletionFunc *cb,
657 void *opaque, Error **errp);
658
659 /**
660 * commit_start:
661 * @bs: Active block device.
662 * @top: Top block device to be committed.
663 * @base: Block device that will be written into, and become the new top.
664 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
665 * @on_error: The action to take upon error.
666 * @cb: Completion function for the job.
667 * @opaque: Opaque pointer value passed to @cb.
668 * @backing_file_str: String to use as the backing file in @top's overlay
669 * @errp: Error object.
670 *
671 */
672 void commit_start(BlockDriverState *bs, BlockDriverState *base,
673 BlockDriverState *top, int64_t speed,
674 BlockdevOnError on_error, BlockCompletionFunc *cb,
675 void *opaque, const char *backing_file_str, Error **errp);
676 /**
677 * commit_active_start:
678 * @bs: Active block device to be committed.
679 * @base: Block device that will be written into, and become the new top.
680 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
681 * @on_error: The action to take upon error.
682 * @cb: Completion function for the job.
683 * @opaque: Opaque pointer value passed to @cb.
684 * @errp: Error object.
685 *
686 */
687 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
688 int64_t speed,
689 BlockdevOnError on_error,
690 BlockCompletionFunc *cb,
691 void *opaque, Error **errp);
692 /*
693 * mirror_start:
694 * @bs: Block device to operate on.
695 * @target: Block device to write to.
696 * @replaces: Block graph node name to replace once the mirror is done. Can
697 * only be used when full mirroring is selected.
698 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
699 * @granularity: The chosen granularity for the dirty bitmap.
700 * @buf_size: The amount of data that can be in flight at one time.
701 * @mode: Whether to collapse all images in the chain to the target.
702 * @backing_mode: How to establish the target's backing chain after completion.
703 * @on_source_error: The action to take upon error reading from the source.
704 * @on_target_error: The action to take upon error writing to the target.
705 * @unmap: Whether to unmap target where source sectors only contain zeroes.
706 * @cb: Completion function for the job.
707 * @opaque: Opaque pointer value passed to @cb.
708 * @errp: Error object.
709 *
710 * Start a mirroring operation on @bs. Clusters that are allocated
711 * in @bs will be written to @bs until the job is cancelled or
712 * manually completed. At the end of a successful mirroring job,
713 * @bs will be switched to read from @target.
714 */
715 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
716 const char *replaces,
717 int64_t speed, uint32_t granularity, int64_t buf_size,
718 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
719 BlockdevOnError on_source_error,
720 BlockdevOnError on_target_error,
721 bool unmap,
722 BlockCompletionFunc *cb,
723 void *opaque, Error **errp);
724
725 /*
726 * backup_start:
727 * @bs: Block device to operate on.
728 * @target: Block device to write to.
729 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
730 * @sync_mode: What parts of the disk image should be copied to the destination.
731 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
732 * @on_source_error: The action to take upon error reading from the source.
733 * @on_target_error: The action to take upon error writing to the target.
734 * @cb: Completion function for the job.
735 * @opaque: Opaque pointer value passed to @cb.
736 * @txn: Transaction that this job is part of (may be NULL).
737 *
738 * Start a backup operation on @bs. Clusters in @bs are written to @target
739 * until the job is cancelled or manually completed.
740 */
741 void backup_start(BlockDriverState *bs, BlockDriverState *target,
742 int64_t speed, MirrorSyncMode sync_mode,
743 BdrvDirtyBitmap *sync_bitmap,
744 BlockdevOnError on_source_error,
745 BlockdevOnError on_target_error,
746 BlockCompletionFunc *cb, void *opaque,
747 BlockJobTxn *txn, Error **errp);
748
749 void hmp_drive_add_node(Monitor *mon, const char *optstr);
750
751 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
752 const char *child_name,
753 const BdrvChildRole *child_role,
754 void *opaque);
755 void bdrv_root_unref_child(BdrvChild *child);
756
757 const char *bdrv_get_parent_name(const BlockDriverState *bs);
758 void blk_dev_change_media_cb(BlockBackend *blk, bool load);
759 bool blk_dev_has_removable_media(BlockBackend *blk);
760 bool blk_dev_has_tray(BlockBackend *blk);
761 void blk_dev_eject_request(BlockBackend *blk, bool force);
762 bool blk_dev_is_tray_open(BlockBackend *blk);
763 bool blk_dev_is_medium_locked(BlockBackend *blk);
764
765 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
766 bool bdrv_requests_pending(BlockDriverState *bs);
767
768 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
769 void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in);
770
771 void blockdev_close_all_bdrv_states(void);
772
773 #endif /* BLOCK_INT_H */