]> git.proxmox.com Git - mirror_qemu.git/blame - block/block-backend.c
block: Remove wr_highest_sector from BlockAcctStats
[mirror_qemu.git] / block / block-backend.c
CommitLineData
26f54e9a
MA
1/*
2 * QEMU Block backends
3 *
4 * Copyright (C) 2014 Red Hat, Inc.
5 *
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
11 */
12
13#include "sysemu/block-backend.h"
14#include "block/block_int.h"
18e46a03 15#include "sysemu/blockdev.h"
a7f53e26
MA
16#include "qapi-event.h"
17
18/* Number of coroutines to reserve per attached device model */
19#define COROUTINE_POOL_RESERVATION 64
26f54e9a 20
4981bdec
HR
21static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
22
26f54e9a
MA
23struct BlockBackend {
24 char *name;
25 int refcnt;
7e7d56d9 26 BlockDriverState *bs;
26f8b3a8 27 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
26f54e9a 28 QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */
a7f53e26
MA
29
30 void *dev; /* attached device model, if any */
31 /* TODO change to DeviceState when all users are qdevified */
32 const BlockDevOps *dev_ops;
33 void *dev_opaque;
68e9ec01
HR
34
35 /* the block size for which the guest device expects atomicity */
36 int guest_block_size;
26f54e9a
MA
37};
38
e7f7d676
HR
39typedef struct BlockBackendAIOCB {
40 BlockAIOCB common;
41 QEMUBH *bh;
4981bdec 42 BlockBackend *blk;
e7f7d676
HR
43 int ret;
44} BlockBackendAIOCB;
45
46static const AIOCBInfo block_backend_aiocb_info = {
4981bdec 47 .get_aio_context = blk_aiocb_get_aio_context,
e7f7d676
HR
48 .aiocb_size = sizeof(BlockBackendAIOCB),
49};
50
8fb3c76c
MA
51static void drive_info_del(DriveInfo *dinfo);
52
7e7d56d9 53/* All the BlockBackends (except for hidden ones) */
26f54e9a
MA
54static QTAILQ_HEAD(, BlockBackend) blk_backends =
55 QTAILQ_HEAD_INITIALIZER(blk_backends);
56
57/*
58 * Create a new BlockBackend with @name, with a reference count of one.
59 * @name must not be null or empty.
60 * Fail if a BlockBackend with this name already exists.
61 * Store an error through @errp on failure, unless it's null.
62 * Return the new BlockBackend on success, null on failure.
63 */
64BlockBackend *blk_new(const char *name, Error **errp)
65{
66 BlockBackend *blk;
67
68 assert(name && name[0]);
7f06d47e
MA
69 if (!id_wellformed(name)) {
70 error_setg(errp, "Invalid device name");
71 return NULL;
72 }
26f54e9a
MA
73 if (blk_by_name(name)) {
74 error_setg(errp, "Device with id '%s' already exists", name);
75 return NULL;
76 }
7f06d47e
MA
77 if (bdrv_find_node(name)) {
78 error_setg(errp,
79 "Device name '%s' conflicts with an existing node name",
80 name);
81 return NULL;
82 }
26f54e9a
MA
83
84 blk = g_new0(BlockBackend, 1);
85 blk->name = g_strdup(name);
86 blk->refcnt = 1;
87 QTAILQ_INSERT_TAIL(&blk_backends, blk, link);
88 return blk;
89}
90
7e7d56d9
MA
91/*
92 * Create a new BlockBackend with a new BlockDriverState attached.
7e7d56d9
MA
93 * Otherwise just like blk_new(), which see.
94 */
95BlockBackend *blk_new_with_bs(const char *name, Error **errp)
96{
97 BlockBackend *blk;
98 BlockDriverState *bs;
99
100 blk = blk_new(name, errp);
101 if (!blk) {
102 return NULL;
103 }
104
7f06d47e 105 bs = bdrv_new_root();
7e7d56d9
MA
106 blk->bs = bs;
107 bs->blk = blk;
108 return blk;
109}
110
ca49a4fd
HR
111/*
112 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
113 *
114 * Just as with bdrv_open(), after having called this function the reference to
115 * @options belongs to the block layer (even on failure).
116 *
117 * TODO: Remove @filename and @flags; it should be possible to specify a whole
118 * BDS tree just by specifying the @options QDict (or @reference,
119 * alternatively). At the time of adding this function, this is not possible,
120 * though, so callers of this function have to be able to specify @filename and
121 * @flags.
122 */
123BlockBackend *blk_new_open(const char *name, const char *filename,
124 const char *reference, QDict *options, int flags,
125 Error **errp)
126{
127 BlockBackend *blk;
128 int ret;
129
130 blk = blk_new_with_bs(name, errp);
131 if (!blk) {
132 QDECREF(options);
133 return NULL;
134 }
135
6ebf9aa2 136 ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp);
ca49a4fd
HR
137 if (ret < 0) {
138 blk_unref(blk);
139 return NULL;
140 }
141
142 return blk;
143}
144
26f54e9a
MA
145static void blk_delete(BlockBackend *blk)
146{
147 assert(!blk->refcnt);
a7f53e26 148 assert(!blk->dev);
7e7d56d9 149 if (blk->bs) {
9ba10c95 150 assert(blk->bs->blk == blk);
7e7d56d9 151 blk->bs->blk = NULL;
9ba10c95 152 bdrv_unref(blk->bs);
7e7d56d9
MA
153 blk->bs = NULL;
154 }
3e5a50d6 155 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
7e7d56d9
MA
156 if (blk->name[0]) {
157 QTAILQ_REMOVE(&blk_backends, blk, link);
158 }
26f54e9a 159 g_free(blk->name);
18e46a03 160 drive_info_del(blk->legacy_dinfo);
26f54e9a
MA
161 g_free(blk);
162}
163
8fb3c76c
MA
164static void drive_info_del(DriveInfo *dinfo)
165{
166 if (!dinfo) {
167 return;
168 }
169 qemu_opts_del(dinfo->opts);
8fb3c76c
MA
170 g_free(dinfo->serial);
171 g_free(dinfo);
172}
173
26f54e9a
MA
174/*
175 * Increment @blk's reference count.
176 * @blk must not be null.
177 */
178void blk_ref(BlockBackend *blk)
179{
180 blk->refcnt++;
181}
182
183/*
184 * Decrement @blk's reference count.
185 * If this drops it to zero, destroy @blk.
186 * For convenience, do nothing if @blk is null.
187 */
188void blk_unref(BlockBackend *blk)
189{
190 if (blk) {
191 assert(blk->refcnt > 0);
192 if (!--blk->refcnt) {
193 blk_delete(blk);
194 }
195 }
196}
197
198/*
199 * Return the BlockBackend after @blk.
200 * If @blk is null, return the first one.
201 * Else, return @blk's next sibling, which may be null.
202 *
203 * To iterate over all BlockBackends, do
204 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
205 * ...
206 * }
207 */
208BlockBackend *blk_next(BlockBackend *blk)
209{
210 return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends);
211}
212
213/*
7e7d56d9
MA
214 * Return @blk's name, a non-null string.
215 * Wart: the name is empty iff @blk has been hidden with
3e5a50d6 216 * blk_hide_on_behalf_of_hmp_drive_del().
26f54e9a
MA
217 */
218const char *blk_name(BlockBackend *blk)
219{
220 return blk->name;
221}
222
223/*
224 * Return the BlockBackend with name @name if it exists, else null.
225 * @name must not be null.
226 */
227BlockBackend *blk_by_name(const char *name)
228{
229 BlockBackend *blk;
230
231 assert(name);
232 QTAILQ_FOREACH(blk, &blk_backends, link) {
233 if (!strcmp(name, blk->name)) {
234 return blk;
235 }
236 }
237 return NULL;
238}
7e7d56d9
MA
239
240/*
241 * Return the BlockDriverState attached to @blk if any, else null.
242 */
243BlockDriverState *blk_bs(BlockBackend *blk)
244{
245 return blk->bs;
246}
247
a2d61900
KW
248/*
249 * Changes the BlockDriverState attached to @blk
250 */
251void blk_set_bs(BlockBackend *blk, BlockDriverState *bs)
252{
253 bdrv_ref(bs);
254
255 if (blk->bs) {
256 blk->bs->blk = NULL;
257 bdrv_unref(blk->bs);
258 }
259 assert(bs->blk == NULL);
260
261 blk->bs = bs;
262 bs->blk = blk;
263}
264
18e46a03
MA
265/*
266 * Return @blk's DriveInfo if any, else null.
267 */
268DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
269{
270 return blk->legacy_dinfo;
271}
272
273/*
274 * Set @blk's DriveInfo to @dinfo, and return it.
275 * @blk must not have a DriveInfo set already.
276 * No other BlockBackend may have the same DriveInfo set.
277 */
278DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
279{
280 assert(!blk->legacy_dinfo);
281 return blk->legacy_dinfo = dinfo;
282}
283
284/*
285 * Return the BlockBackend with DriveInfo @dinfo.
286 * It must exist.
287 */
288BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
289{
290 BlockBackend *blk;
291
292 QTAILQ_FOREACH(blk, &blk_backends, link) {
293 if (blk->legacy_dinfo == dinfo) {
294 return blk;
295 }
296 }
297 abort();
298}
299
7e7d56d9
MA
300/*
301 * Hide @blk.
302 * @blk must not have been hidden already.
303 * Make attached BlockDriverState, if any, anonymous.
304 * Once hidden, @blk is invisible to all functions that don't receive
305 * it as argument. For example, blk_by_name() won't return it.
306 * Strictly for use by do_drive_del().
307 * TODO get rid of it!
308 */
3e5a50d6 309void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
7e7d56d9
MA
310{
311 QTAILQ_REMOVE(&blk_backends, blk, link);
312 blk->name[0] = 0;
313 if (blk->bs) {
314 bdrv_make_anon(blk->bs);
315 }
316}
4be74634 317
a7f53e26
MA
318/*
319 * Attach device model @dev to @blk.
320 * Return 0 on success, -EBUSY when a device model is attached already.
321 */
4be74634 322int blk_attach_dev(BlockBackend *blk, void *dev)
a7f53e26 323/* TODO change to DeviceState *dev when all users are qdevified */
4be74634 324{
a7f53e26
MA
325 if (blk->dev) {
326 return -EBUSY;
327 }
84ebe375 328 blk_ref(blk);
a7f53e26
MA
329 blk->dev = dev;
330 bdrv_iostatus_reset(blk->bs);
a7f53e26 331 return 0;
4be74634
MA
332}
333
a7f53e26
MA
334/*
335 * Attach device model @dev to @blk.
336 * @blk must not have a device model attached already.
337 * TODO qdevified devices don't use this, remove when devices are qdevified
338 */
4be74634
MA
339void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
340{
a7f53e26
MA
341 if (blk_attach_dev(blk, dev) < 0) {
342 abort();
343 }
4be74634
MA
344}
345
a7f53e26
MA
346/*
347 * Detach device model @dev from @blk.
348 * @dev must be currently attached to @blk.
349 */
4be74634 350void blk_detach_dev(BlockBackend *blk, void *dev)
a7f53e26 351/* TODO change to DeviceState *dev when all users are qdevified */
4be74634 352{
a7f53e26
MA
353 assert(blk->dev == dev);
354 blk->dev = NULL;
355 blk->dev_ops = NULL;
356 blk->dev_opaque = NULL;
68e9ec01 357 blk->guest_block_size = 512;
84ebe375 358 blk_unref(blk);
4be74634
MA
359}
360
a7f53e26
MA
361/*
362 * Return the device model attached to @blk if any, else null.
363 */
4be74634 364void *blk_get_attached_dev(BlockBackend *blk)
a7f53e26
MA
365/* TODO change to return DeviceState * when all users are qdevified */
366{
367 return blk->dev;
368}
369
370/*
371 * Set @blk's device model callbacks to @ops.
372 * @opaque is the opaque argument to pass to the callbacks.
373 * This is for use by device models.
374 */
375void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
376 void *opaque)
377{
378 blk->dev_ops = ops;
379 blk->dev_opaque = opaque;
380}
381
382/*
383 * Notify @blk's attached device model of media change.
384 * If @load is true, notify of media load.
385 * Else, notify of media eject.
386 * Also send DEVICE_TRAY_MOVED events as appropriate.
387 */
388void blk_dev_change_media_cb(BlockBackend *blk, bool load)
389{
390 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
391 bool tray_was_closed = !blk_dev_is_tray_open(blk);
392
393 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
394 if (tray_was_closed) {
395 /* tray open */
396 qapi_event_send_device_tray_moved(blk_name(blk),
397 true, &error_abort);
398 }
399 if (load) {
400 /* tray close */
401 qapi_event_send_device_tray_moved(blk_name(blk),
402 false, &error_abort);
403 }
404 }
405}
406
407/*
408 * Does @blk's attached device model have removable media?
409 * %true if no device model is attached.
410 */
411bool blk_dev_has_removable_media(BlockBackend *blk)
412{
413 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
414}
415
416/*
417 * Notify @blk's attached device model of a media eject request.
418 * If @force is true, the medium is about to be yanked out forcefully.
419 */
420void blk_dev_eject_request(BlockBackend *blk, bool force)
4be74634 421{
a7f53e26
MA
422 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
423 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
424 }
4be74634
MA
425}
426
a7f53e26
MA
427/*
428 * Does @blk's attached device model have a tray, and is it open?
429 */
430bool blk_dev_is_tray_open(BlockBackend *blk)
4be74634 431{
a7f53e26
MA
432 if (blk->dev_ops && blk->dev_ops->is_tray_open) {
433 return blk->dev_ops->is_tray_open(blk->dev_opaque);
434 }
435 return false;
436}
437
438/*
439 * Does @blk's attached device model have the medium locked?
440 * %false if the device model has no such lock.
441 */
442bool blk_dev_is_medium_locked(BlockBackend *blk)
443{
444 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
445 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
446 }
447 return false;
448}
449
450/*
451 * Notify @blk's attached device model of a backend size change.
452 */
453void blk_dev_resize_cb(BlockBackend *blk)
454{
455 if (blk->dev_ops && blk->dev_ops->resize_cb) {
456 blk->dev_ops->resize_cb(blk->dev_opaque);
457 }
458}
459
460void blk_iostatus_enable(BlockBackend *blk)
461{
462 bdrv_iostatus_enable(blk->bs);
4be74634
MA
463}
464
e7f7d676
HR
465static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
466 size_t size)
467{
468 int64_t len;
469
470 if (size > INT_MAX) {
471 return -EIO;
472 }
473
474 if (!blk_is_inserted(blk)) {
475 return -ENOMEDIUM;
476 }
477
478 len = blk_getlength(blk);
479 if (len < 0) {
480 return len;
481 }
482
483 if (offset < 0) {
484 return -EIO;
485 }
486
487 if (offset > len || len - offset < size) {
488 return -EIO;
489 }
490
491 return 0;
492}
493
494static int blk_check_request(BlockBackend *blk, int64_t sector_num,
495 int nb_sectors)
496{
497 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
498 return -EIO;
499 }
500
501 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
502 return -EIO;
503 }
504
505 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
506 nb_sectors * BDRV_SECTOR_SIZE);
507}
508
4be74634
MA
509int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
510 int nb_sectors)
511{
e7f7d676
HR
512 int ret = blk_check_request(blk, sector_num, nb_sectors);
513 if (ret < 0) {
514 return ret;
515 }
516
4be74634
MA
517 return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
518}
519
520int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
521 int nb_sectors)
522{
e7f7d676
HR
523 int ret = blk_check_request(blk, sector_num, nb_sectors);
524 if (ret < 0) {
525 return ret;
526 }
527
4be74634
MA
528 return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
529}
530
531int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
532 int nb_sectors)
533{
e7f7d676
HR
534 int ret = blk_check_request(blk, sector_num, nb_sectors);
535 if (ret < 0) {
536 return ret;
537 }
538
4be74634
MA
539 return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
540}
541
0df89e8e
KW
542int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
543 int nb_sectors, BdrvRequestFlags flags)
544{
545 int ret = blk_check_request(blk, sector_num, nb_sectors);
546 if (ret < 0) {
547 return ret;
548 }
549
550 return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
551}
552
e7f7d676
HR
553static void error_callback_bh(void *opaque)
554{
555 struct BlockBackendAIOCB *acb = opaque;
556 qemu_bh_delete(acb->bh);
557 acb->common.cb(acb->common.opaque, acb->ret);
558 qemu_aio_unref(acb);
559}
560
561static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
562 void *opaque, int ret)
563{
564 struct BlockBackendAIOCB *acb;
565 QEMUBH *bh;
566
567 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
4981bdec 568 acb->blk = blk;
e7f7d676
HR
569 acb->ret = ret;
570
571 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
572 acb->bh = bh;
573 qemu_bh_schedule(bh);
574
575 return &acb->common;
576}
577
4be74634
MA
578BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
579 int nb_sectors, BdrvRequestFlags flags,
580 BlockCompletionFunc *cb, void *opaque)
581{
e7f7d676
HR
582 int ret = blk_check_request(blk, sector_num, nb_sectors);
583 if (ret < 0) {
584 return abort_aio_request(blk, cb, opaque, ret);
585 }
586
4be74634
MA
587 return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
588 cb, opaque);
589}
590
591int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
592{
e7f7d676
HR
593 int ret = blk_check_byte_request(blk, offset, count);
594 if (ret < 0) {
595 return ret;
596 }
597
4be74634
MA
598 return bdrv_pread(blk->bs, offset, buf, count);
599}
600
601int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
602{
e7f7d676
HR
603 int ret = blk_check_byte_request(blk, offset, count);
604 if (ret < 0) {
605 return ret;
606 }
607
4be74634
MA
608 return bdrv_pwrite(blk->bs, offset, buf, count);
609}
610
611int64_t blk_getlength(BlockBackend *blk)
612{
613 return bdrv_getlength(blk->bs);
614}
615
616void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
617{
618 bdrv_get_geometry(blk->bs, nb_sectors_ptr);
619}
620
1ef01253
HR
621int64_t blk_nb_sectors(BlockBackend *blk)
622{
623 return bdrv_nb_sectors(blk->bs);
624}
625
4be74634
MA
626BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
627 QEMUIOVector *iov, int nb_sectors,
628 BlockCompletionFunc *cb, void *opaque)
629{
e7f7d676
HR
630 int ret = blk_check_request(blk, sector_num, nb_sectors);
631 if (ret < 0) {
632 return abort_aio_request(blk, cb, opaque, ret);
633 }
634
4be74634
MA
635 return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
636}
637
638BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
639 QEMUIOVector *iov, int nb_sectors,
640 BlockCompletionFunc *cb, void *opaque)
641{
e7f7d676
HR
642 int ret = blk_check_request(blk, sector_num, nb_sectors);
643 if (ret < 0) {
644 return abort_aio_request(blk, cb, opaque, ret);
645 }
646
4be74634
MA
647 return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
648}
649
650BlockAIOCB *blk_aio_flush(BlockBackend *blk,
651 BlockCompletionFunc *cb, void *opaque)
652{
653 return bdrv_aio_flush(blk->bs, cb, opaque);
654}
655
656BlockAIOCB *blk_aio_discard(BlockBackend *blk,
657 int64_t sector_num, int nb_sectors,
658 BlockCompletionFunc *cb, void *opaque)
659{
e7f7d676
HR
660 int ret = blk_check_request(blk, sector_num, nb_sectors);
661 if (ret < 0) {
662 return abort_aio_request(blk, cb, opaque, ret);
663 }
664
4be74634
MA
665 return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
666}
667
668void blk_aio_cancel(BlockAIOCB *acb)
669{
670 bdrv_aio_cancel(acb);
671}
672
673void blk_aio_cancel_async(BlockAIOCB *acb)
674{
675 bdrv_aio_cancel_async(acb);
676}
677
678int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
679{
e7f7d676
HR
680 int i, ret;
681
682 for (i = 0; i < num_reqs; i++) {
683 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
684 if (ret < 0) {
685 return ret;
686 }
687 }
688
4be74634
MA
689 return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
690}
691
692int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
693{
694 return bdrv_ioctl(blk->bs, req, buf);
695}
696
697BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
698 BlockCompletionFunc *cb, void *opaque)
699{
700 return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
701}
702
2bb0dce7
HR
703int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
704{
e7f7d676
HR
705 int ret = blk_check_request(blk, sector_num, nb_sectors);
706 if (ret < 0) {
707 return ret;
708 }
709
2bb0dce7
HR
710 return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
711}
712
713int blk_co_flush(BlockBackend *blk)
714{
715 return bdrv_co_flush(blk->bs);
716}
717
4be74634
MA
718int blk_flush(BlockBackend *blk)
719{
720 return bdrv_flush(blk->bs);
721}
722
723int blk_flush_all(void)
724{
725 return bdrv_flush_all();
726}
727
97b0385a
AY
728void blk_drain(BlockBackend *blk)
729{
730 bdrv_drain(blk->bs);
731}
732
4be74634
MA
733void blk_drain_all(void)
734{
735 bdrv_drain_all();
736}
737
738BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
739{
740 return bdrv_get_on_error(blk->bs, is_read);
741}
742
743BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
744 int error)
745{
746 return bdrv_get_error_action(blk->bs, is_read, error);
747}
748
749void blk_error_action(BlockBackend *blk, BlockErrorAction action,
750 bool is_read, int error)
751{
752 bdrv_error_action(blk->bs, action, is_read, error);
753}
754
755int blk_is_read_only(BlockBackend *blk)
756{
757 return bdrv_is_read_only(blk->bs);
758}
759
760int blk_is_sg(BlockBackend *blk)
761{
762 return bdrv_is_sg(blk->bs);
763}
764
765int blk_enable_write_cache(BlockBackend *blk)
766{
767 return bdrv_enable_write_cache(blk->bs);
768}
769
770void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
771{
772 bdrv_set_enable_write_cache(blk->bs, wce);
773}
774
2bb0dce7
HR
775void blk_invalidate_cache(BlockBackend *blk, Error **errp)
776{
777 bdrv_invalidate_cache(blk->bs, errp);
778}
779
e031f750 780bool blk_is_inserted(BlockBackend *blk)
4be74634 781{
db0284f8
HR
782 return blk->bs && bdrv_is_inserted(blk->bs);
783}
784
785bool blk_is_available(BlockBackend *blk)
786{
787 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
4be74634
MA
788}
789
790void blk_lock_medium(BlockBackend *blk, bool locked)
791{
792 bdrv_lock_medium(blk->bs, locked);
793}
794
795void blk_eject(BlockBackend *blk, bool eject_flag)
796{
797 bdrv_eject(blk->bs, eject_flag);
798}
799
800int blk_get_flags(BlockBackend *blk)
801{
802 return bdrv_get_flags(blk->bs);
803}
804
454057b7
PL
805int blk_get_max_transfer_length(BlockBackend *blk)
806{
807 return blk->bs->bl.max_transfer_length;
808}
809
4be74634
MA
810void blk_set_guest_block_size(BlockBackend *blk, int align)
811{
68e9ec01 812 blk->guest_block_size = align;
4be74634
MA
813}
814
815void *blk_blockalign(BlockBackend *blk, size_t size)
816{
817 return qemu_blockalign(blk ? blk->bs : NULL, size);
818}
819
820bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
821{
822 return bdrv_op_is_blocked(blk->bs, op, errp);
823}
824
825void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
826{
827 bdrv_op_unblock(blk->bs, op, reason);
828}
829
830void blk_op_block_all(BlockBackend *blk, Error *reason)
831{
832 bdrv_op_block_all(blk->bs, reason);
833}
834
835void blk_op_unblock_all(BlockBackend *blk, Error *reason)
836{
837 bdrv_op_unblock_all(blk->bs, reason);
838}
839
840AioContext *blk_get_aio_context(BlockBackend *blk)
841{
4981bdec
HR
842 if (blk->bs) {
843 return bdrv_get_aio_context(blk->bs);
844 } else {
845 return qemu_get_aio_context();
846 }
847}
848
849static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
850{
851 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
852 return blk_get_aio_context(blk_acb->blk);
4be74634
MA
853}
854
855void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
856{
857 bdrv_set_aio_context(blk->bs, new_context);
858}
859
2019ba0a
HR
860void blk_add_aio_context_notifier(BlockBackend *blk,
861 void (*attached_aio_context)(AioContext *new_context, void *opaque),
862 void (*detach_aio_context)(void *opaque), void *opaque)
863{
864 bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
865 detach_aio_context, opaque);
866}
867
868void blk_remove_aio_context_notifier(BlockBackend *blk,
869 void (*attached_aio_context)(AioContext *,
870 void *),
871 void (*detach_aio_context)(void *),
872 void *opaque)
873{
874 bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
875 detach_aio_context, opaque);
876}
877
2c28b21f
HR
878void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
879{
880 bdrv_add_close_notifier(blk->bs, notify);
881}
882
4be74634
MA
883void blk_io_plug(BlockBackend *blk)
884{
885 bdrv_io_plug(blk->bs);
886}
887
888void blk_io_unplug(BlockBackend *blk)
889{
890 bdrv_io_unplug(blk->bs);
891}
892
893BlockAcctStats *blk_get_stats(BlockBackend *blk)
894{
895 return bdrv_get_stats(blk->bs);
896}
897
898void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
899 BlockCompletionFunc *cb, void *opaque)
900{
901 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
902}
1ef01253
HR
903
904int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
905 int nb_sectors, BdrvRequestFlags flags)
906{
e7f7d676
HR
907 int ret = blk_check_request(blk, sector_num, nb_sectors);
908 if (ret < 0) {
909 return ret;
910 }
911
1ef01253
HR
912 return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
913}
914
915int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
916 const uint8_t *buf, int nb_sectors)
917{
e7f7d676
HR
918 int ret = blk_check_request(blk, sector_num, nb_sectors);
919 if (ret < 0) {
920 return ret;
921 }
922
1ef01253
HR
923 return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
924}
925
926int blk_truncate(BlockBackend *blk, int64_t offset)
927{
928 return bdrv_truncate(blk->bs, offset);
929}
930
931int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
932{
e7f7d676
HR
933 int ret = blk_check_request(blk, sector_num, nb_sectors);
934 if (ret < 0) {
935 return ret;
936 }
937
1ef01253
HR
938 return bdrv_discard(blk->bs, sector_num, nb_sectors);
939}
940
941int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
942 int64_t pos, int size)
943{
944 return bdrv_save_vmstate(blk->bs, buf, pos, size);
945}
946
947int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
948{
949 return bdrv_load_vmstate(blk->bs, buf, pos, size);
950}
f0272c4d
ET
951
952int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
953{
954 return bdrv_probe_blocksizes(blk->bs, bsz);
955}
956
957int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
958{
959 return bdrv_probe_geometry(blk->bs, geo);
960}