]> git.proxmox.com Git - qemu.git/blame - block.h
hw/pl011.c: Avoid crash on read when no chr backend present
[qemu.git] / block.h
CommitLineData
faf07963
PB
1#ifndef BLOCK_H
2#define BLOCK_H
3
a76bab49 4#include "qemu-aio.h"
3b69e4b9 5#include "qemu-common.h"
0e7e1989 6#include "qemu-option.h"
da1fa91d 7#include "qemu-coroutine.h"
d15e5465 8#include "qobject.h"
a76bab49 9
faf07963 10/* block.c */
faf07963
PB
11typedef struct BlockDriver BlockDriver;
12
faf07963
PB
13typedef struct BlockDriverInfo {
14 /* in bytes, 0 if irrelevant */
15 int cluster_size;
16 /* offset at which the VM state can be saved (0 if not possible) */
17 int64_t vm_state_offset;
64c79160 18 bool is_dirty;
faf07963
PB
19} BlockDriverInfo;
20
f8111c24
DXW
21typedef struct BlockFragInfo {
22 uint64_t allocated_clusters;
23 uint64_t total_clusters;
24 uint64_t fragmented_clusters;
25} BlockFragInfo;
26
faf07963
PB
27typedef struct QEMUSnapshotInfo {
28 char id_str[128]; /* unique snapshot id */
29 /* the following fields are informative. They are not needed for
30 the consistency of the snapshot */
e7d81004 31 char name[256]; /* user chosen name */
c2c9a466 32 uint64_t vm_state_size; /* VM state info size */
faf07963
PB
33 uint32_t date_sec; /* UTC date of the snapshot */
34 uint32_t date_nsec;
35 uint64_t vm_clock_nsec; /* VM clock relative to boot */
36} QEMUSnapshotInfo;
37
145feb17 38/* Callbacks for block device models */
0e49de52 39typedef struct BlockDevOps {
145feb17
MA
40 /*
41 * Runs when virtual media changed (monitor commands eject, change)
7d4b4ba5 42 * Argument load is true on load and false on eject.
145feb17
MA
43 * Beware: doesn't run when a host device's physical media
44 * changes. Sure would be useful if it did.
2c6942fa 45 * Device models with removable media must implement this callback.
145feb17 46 */
7d4b4ba5 47 void (*change_media_cb)(void *opaque, bool load);
025ccaa7
PB
48 /*
49 * Runs when an eject request is issued from the monitor, the tray
50 * is closed, and the medium is locked.
51 * Device models that do not implement is_medium_locked will not need
52 * this callback. Device models that can lock the medium or tray might
53 * want to implement the callback and unlock the tray when "force" is
54 * true, even if they do not support eject requests.
55 */
56 void (*eject_request_cb)(void *opaque, bool force);
e4def80b
MA
57 /*
58 * Is the virtual tray open?
59 * Device models implement this only when the device has a tray.
60 */
61 bool (*is_tray_open)(void *opaque);
f107639a
MA
62 /*
63 * Is the virtual medium locked into the device?
64 * Device models implement this only when device has such a lock.
65 */
66 bool (*is_medium_locked)(void *opaque);
145feb17
MA
67 /*
68 * Runs when the size changed (e.g. monitor command block_resize)
69 */
70 void (*resize_cb)(void *opaque);
0e49de52
MA
71} BlockDevOps;
72
faf07963 73#define BDRV_O_RDWR 0x0002
faf07963 74#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
9f7965c7
AL
75#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
76#define BDRV_O_CACHE_WB 0x0040 /* use write-back caching */
5c6c3a6c 77#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
b783e409 78#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
016f5cf6 79#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
53fec9d3 80#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
ccb1f4a7 81#define BDRV_O_INCOMING 0x0800 /* consistency hint for incoming migration */
9f7965c7 82
ceb25e5c 83#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH)
faf07963 84
6ea44308 85#define BDRV_SECTOR_BITS 9
c63782cb 86#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
3abbc4d9 87#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1)
6ea44308 88
abd7f68d
MA
89typedef enum {
90 BLOCK_ERR_REPORT, BLOCK_ERR_IGNORE, BLOCK_ERR_STOP_ENOSPC,
91 BLOCK_ERR_STOP_ANY
92} BlockErrorAction;
93
2582bfed
LC
94typedef enum {
95 BDRV_ACTION_REPORT, BDRV_ACTION_IGNORE, BDRV_ACTION_STOP
329c0a48 96} BlockQMPEventAction;
2582bfed 97
28a7282a
LC
98void bdrv_iostatus_enable(BlockDriverState *bs);
99void bdrv_iostatus_reset(BlockDriverState *bs);
100void bdrv_iostatus_disable(BlockDriverState *bs);
101bool bdrv_iostatus_is_enabled(const BlockDriverState *bs);
102void bdrv_iostatus_set_err(BlockDriverState *bs, int error);
329c0a48
LC
103void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
104 BlockQMPEventAction action, int is_read);
d15e5465
LC
105void bdrv_info_print(Monitor *mon, const QObject *data);
106void bdrv_info(Monitor *mon, QObject **ret_data);
218a536a
LC
107void bdrv_stats_print(Monitor *mon, const QObject *data);
108void bdrv_info_stats(Monitor *mon, QObject **ret_data);
faf07963 109
0563e191
ZYW
110/* disk I/O throttling */
111void bdrv_io_limits_enable(BlockDriverState *bs);
98f90dba 112void bdrv_io_limits_disable(BlockDriverState *bs);
0563e191
ZYW
113bool bdrv_io_limits_enabled(BlockDriverState *bs);
114
faf07963 115void bdrv_init(void);
eb852011 116void bdrv_init_with_whitelist(void);
b50cbabc 117BlockDriver *bdrv_find_protocol(const char *filename);
faf07963 118BlockDriver *bdrv_find_format(const char *format_name);
eb852011 119BlockDriver *bdrv_find_whitelisted_format(const char *format_name);
0e7e1989
KW
120int bdrv_create(BlockDriver *drv, const char* filename,
121 QEMUOptionParameter *options);
84a12e66 122int bdrv_create_file(const char* filename, QEMUOptionParameter *options);
faf07963 123BlockDriverState *bdrv_new(const char *device_name);
d22b2f41 124void bdrv_make_anon(BlockDriverState *bs);
4ddc07ca 125void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old);
8802d1fd 126void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top);
faf07963 127void bdrv_delete(BlockDriverState *bs);
c3993cdc 128int bdrv_parse_cache_flags(const char *mode, int *flags);
faf07963 129int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags);
d6e9098e
KW
130int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
131 BlockDriver *drv);
faf07963 132void bdrv_close(BlockDriverState *bs);
fa879d62
MA
133int bdrv_attach_dev(BlockDriverState *bs, void *dev);
134void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev);
135void bdrv_detach_dev(BlockDriverState *bs, void *dev);
136void *bdrv_get_attached_dev(BlockDriverState *bs);
0e49de52
MA
137void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
138 void *opaque);
025ccaa7 139void bdrv_dev_eject_request(BlockDriverState *bs, bool force);
2c6942fa 140bool bdrv_dev_has_removable_media(BlockDriverState *bs);
e4def80b 141bool bdrv_dev_is_tray_open(BlockDriverState *bs);
f107639a 142bool bdrv_dev_is_medium_locked(BlockDriverState *bs);
faf07963
PB
143int bdrv_read(BlockDriverState *bs, int64_t sector_num,
144 uint8_t *buf, int nb_sectors);
07d27a44
MA
145int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
146 uint8_t *buf, int nb_sectors);
faf07963
PB
147int bdrv_write(BlockDriverState *bs, int64_t sector_num,
148 const uint8_t *buf, int nb_sectors);
149int bdrv_pread(BlockDriverState *bs, int64_t offset,
150 void *buf, int count);
151int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
152 const void *buf, int count);
f08145fe
KW
153int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
154 const void *buf, int count);
da1fa91d
KW
155int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
156 int nb_sectors, QEMUIOVector *qiov);
470c0504
SH
157int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
158 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
da1fa91d
KW
159int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
160 int nb_sectors, QEMUIOVector *qiov);
f08f2dda
SH
161/*
162 * Efficiently zero a region of the disk image. Note that this is a regular
163 * I/O request like read or write and should have a reasonable size. This
164 * function is not suitable for zeroing the entire image in a single request
165 * because it may allocate memory for the entire region.
166 */
167int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
168 int nb_sectors);
060f51c9
SH
169int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
170 int nb_sectors, int *pnum);
188a7bbf
PB
171int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
172 BlockDriverState *base,
173 int64_t sector_num,
174 int nb_sectors, int *pnum);
e8a6bb9c
MT
175BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
176 const char *backing_file);
faf07963
PB
177int bdrv_truncate(BlockDriverState *bs, int64_t offset);
178int64_t bdrv_getlength(BlockDriverState *bs);
4a1d5e1f 179int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
96b8f136 180void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
faf07963 181int bdrv_commit(BlockDriverState *bs);
e8877497 182int bdrv_commit_all(void);
756e6736
KW
183int bdrv_change_backing_file(BlockDriverState *bs,
184 const char *backing_file, const char *backing_fmt);
5efa9d5a
AL
185void bdrv_register(BlockDriver *bdrv);
186
e076f338
KW
187
188typedef struct BdrvCheckResult {
189 int corruptions;
190 int leaks;
191 int check_errors;
ccf34716
KW
192 int corruptions_fixed;
193 int leaks_fixed;
f8111c24 194 BlockFragInfo bfi;
e076f338
KW
195} BdrvCheckResult;
196
4534ff54
KW
197typedef enum {
198 BDRV_FIX_LEAKS = 1,
199 BDRV_FIX_ERRORS = 2,
200} BdrvCheckMode;
201
202int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
e076f338 203
faf07963 204/* async block I/O */
7cd1e32a 205typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector,
39aa9a12 206 int sector_num);
3b69e4b9
AL
207BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
208 QEMUIOVector *iov, int nb_sectors,
209 BlockDriverCompletionFunc *cb, void *opaque);
210BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
211 QEMUIOVector *iov, int nb_sectors,
212 BlockDriverCompletionFunc *cb, void *opaque);
b2e12bc6 213BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
39aa9a12 214 BlockDriverCompletionFunc *cb, void *opaque);
4265d620
PB
215BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
216 int64_t sector_num, int nb_sectors,
217 BlockDriverCompletionFunc *cb, void *opaque);
faf07963
PB
218void bdrv_aio_cancel(BlockDriverAIOCB *acb);
219
40b4f539
KW
220typedef struct BlockRequest {
221 /* Fields to be filled by multiwrite caller */
222 int64_t sector;
223 int nb_sectors;
224 QEMUIOVector *qiov;
225 BlockDriverCompletionFunc *cb;
226 void *opaque;
227
228 /* Filled by multiwrite implementation */
229 int error;
230} BlockRequest;
231
232int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs,
233 int num_reqs);
234
7d780669 235/* sg packet commands */
221f715d
AL
236int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
237BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
238 unsigned long int req, void *buf,
239 BlockDriverCompletionFunc *cb, void *opaque);
7d780669 240
0f15423c
AL
241/* Invalidate any cached metadata used by image formats */
242void bdrv_invalidate_cache(BlockDriverState *bs);
243void bdrv_invalidate_cache_all(void);
244
07789269
BC
245void bdrv_clear_incoming_migration_all(void);
246
faf07963 247/* Ensure contents are flushed to disk. */
205ef796 248int bdrv_flush(BlockDriverState *bs);
07f07615 249int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
c6ca28d6 250void bdrv_flush_all(void);
2bc93fed 251void bdrv_close_all(void);
922453bc 252void bdrv_drain_all(void);
c6ca28d6 253
bb8bf76f 254int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
4265d620 255int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
f2feebbd 256int bdrv_has_zero_init(BlockDriverState *bs);
f58c7b35 257int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
39aa9a12 258 int *pnum);
faf07963 259
abd7f68d
MA
260void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
261 BlockErrorAction on_write_error);
262BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read);
faf07963 263int bdrv_is_read_only(BlockDriverState *bs);
985a03b0 264int bdrv_is_sg(BlockDriverState *bs);
e900a7b7 265int bdrv_enable_write_cache(BlockDriverState *bs);
425b0148 266void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce);
faf07963
PB
267int bdrv_is_inserted(BlockDriverState *bs);
268int bdrv_media_changed(BlockDriverState *bs);
025e849a 269void bdrv_lock_medium(BlockDriverState *bs, bool locked);
f36f3949 270void bdrv_eject(BlockDriverState *bs, bool eject_flag);
f8d6bba1 271const char *bdrv_get_format_name(BlockDriverState *bs);
faf07963 272BlockDriverState *bdrv_find(const char *name);
2f399b0a 273BlockDriverState *bdrv_next(BlockDriverState *bs);
51de9760
AL
274void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs),
275 void *opaque);
faf07963 276int bdrv_is_encrypted(BlockDriverState *bs);
c0f4ce77 277int bdrv_key_required(BlockDriverState *bs);
faf07963 278int bdrv_set_key(BlockDriverState *bs, const char *key);
c0f4ce77 279int bdrv_query_missing_keys(void);
faf07963
PB
280void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
281 void *opaque);
282const char *bdrv_get_device_name(BlockDriverState *bs);
c8433287 283int bdrv_get_flags(BlockDriverState *bs);
faf07963
PB
284int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
285 const uint8_t *buf, int nb_sectors);
286int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
287
045df330 288const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
faf07963
PB
289void bdrv_get_backing_filename(BlockDriverState *bs,
290 char *filename, int filename_size);
dc5a1371
PB
291void bdrv_get_full_backing_filename(BlockDriverState *bs,
292 char *dest, size_t sz);
feeee5ac 293int bdrv_can_snapshot(BlockDriverState *bs);
199630b6 294int bdrv_is_snapshot(BlockDriverState *bs);
f9092b10 295BlockDriverState *bdrv_snapshots(void);
faf07963
PB
296int bdrv_snapshot_create(BlockDriverState *bs,
297 QEMUSnapshotInfo *sn_info);
298int bdrv_snapshot_goto(BlockDriverState *bs,
299 const char *snapshot_id);
300int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id);
301int bdrv_snapshot_list(BlockDriverState *bs,
302 QEMUSnapshotInfo **psn_info);
51ef6727 303int bdrv_snapshot_load_tmp(BlockDriverState *bs,
304 const char *snapshot_name);
faf07963
PB
305char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn);
306
307char *get_human_readable_size(char *buf, int buf_size, int64_t size);
308int path_is_absolute(const char *path);
309void path_combine(char *dest, int dest_size,
310 const char *base_path,
311 const char *filename);
312
45566e9c
CH
313int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
314 int64_t pos, int size);
178e08a5 315
45566e9c
CH
316int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
317 int64_t pos, int size);
178e08a5 318
f88e1a42
JS
319int bdrv_img_create(const char *filename, const char *fmt,
320 const char *base_filename, const char *base_fmt,
321 char *options, uint64_t img_size, int flags);
322
7b6f9300 323void bdrv_set_buffer_alignment(BlockDriverState *bs, int align);
ba5b7ad4
MA
324void *qemu_blockalign(BlockDriverState *bs, size_t size);
325
23bd90d2 326#define BDRV_SECTORS_PER_DIRTY_CHUNK 2048
6ea44308 327
7cd1e32a 328void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable);
329int bdrv_get_dirty(BlockDriverState *bs, int64_t sector);
a55eb92c
JK
330void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
331 int nr_sectors);
aaa0eb75 332int64_t bdrv_get_dirty_count(BlockDriverState *bs);
8b9b0cc2 333
53fec9d3
SH
334void bdrv_enable_copy_on_read(BlockDriverState *bs);
335void bdrv_disable_copy_on_read(BlockDriverState *bs);
336
db593f25
MT
337void bdrv_set_in_use(BlockDriverState *bs, int in_use);
338int bdrv_in_use(BlockDriverState *bs);
8b9b0cc2 339
a597e79c
CH
340enum BlockAcctType {
341 BDRV_ACCT_READ,
342 BDRV_ACCT_WRITE,
343 BDRV_ACCT_FLUSH,
344 BDRV_MAX_IOTYPE,
345};
346
347typedef struct BlockAcctCookie {
348 int64_t bytes;
c488c7f6 349 int64_t start_time_ns;
a597e79c
CH
350 enum BlockAcctType type;
351} BlockAcctCookie;
352
353void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
354 int64_t bytes, enum BlockAcctType type);
355void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie);
356
8b9b0cc2 357typedef enum {
8252278a
KW
358 BLKDBG_L1_UPDATE,
359
360 BLKDBG_L1_GROW_ALLOC_TABLE,
361 BLKDBG_L1_GROW_WRITE_TABLE,
362 BLKDBG_L1_GROW_ACTIVATE_TABLE,
363
364 BLKDBG_L2_LOAD,
365 BLKDBG_L2_UPDATE,
366 BLKDBG_L2_UPDATE_COMPRESSED,
367 BLKDBG_L2_ALLOC_COW_READ,
368 BLKDBG_L2_ALLOC_WRITE,
369
8252278a 370 BLKDBG_READ_AIO,
8252278a
KW
371 BLKDBG_READ_BACKING_AIO,
372 BLKDBG_READ_COMPRESSED,
373
374 BLKDBG_WRITE_AIO,
375 BLKDBG_WRITE_COMPRESSED,
376
377 BLKDBG_VMSTATE_LOAD,
378 BLKDBG_VMSTATE_SAVE,
379
380 BLKDBG_COW_READ,
381 BLKDBG_COW_WRITE,
382
383 BLKDBG_REFTABLE_LOAD,
384 BLKDBG_REFTABLE_GROW,
385
386 BLKDBG_REFBLOCK_LOAD,
387 BLKDBG_REFBLOCK_UPDATE,
388 BLKDBG_REFBLOCK_UPDATE_PART,
389 BLKDBG_REFBLOCK_ALLOC,
390 BLKDBG_REFBLOCK_ALLOC_HOOKUP,
391 BLKDBG_REFBLOCK_ALLOC_WRITE,
392 BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS,
393 BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE,
394 BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE,
395
396 BLKDBG_CLUSTER_ALLOC,
397 BLKDBG_CLUSTER_ALLOC_BYTES,
398 BLKDBG_CLUSTER_FREE,
399
8b9b0cc2
KW
400 BLKDBG_EVENT_MAX,
401} BlkDebugEvent;
402
403#define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt)
404void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
405
8a4bc5aa 406#endif