]> git.proxmox.com Git - mirror_qemu.git/blame - block/block-backend.c
block: Fix BB AIOCB AioContext without BDS
[mirror_qemu.git] / block / block-backend.c
CommitLineData
26f54e9a
MA
1/*
2 * QEMU Block backends
3 *
4 * Copyright (C) 2014 Red Hat, Inc.
5 *
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
11 */
12
13#include "sysemu/block-backend.h"
14#include "block/block_int.h"
18e46a03 15#include "sysemu/blockdev.h"
a7f53e26
MA
16#include "qapi-event.h"
17
18/* Number of coroutines to reserve per attached device model */
19#define COROUTINE_POOL_RESERVATION 64
26f54e9a 20
4981bdec
HR
21static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
22
26f54e9a
MA
23struct BlockBackend {
24 char *name;
25 int refcnt;
7e7d56d9 26 BlockDriverState *bs;
26f8b3a8 27 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
26f54e9a 28 QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */
a7f53e26
MA
29
30 void *dev; /* attached device model, if any */
31 /* TODO change to DeviceState when all users are qdevified */
32 const BlockDevOps *dev_ops;
33 void *dev_opaque;
26f54e9a
MA
34};
35
e7f7d676
HR
36typedef struct BlockBackendAIOCB {
37 BlockAIOCB common;
38 QEMUBH *bh;
4981bdec 39 BlockBackend *blk;
e7f7d676
HR
40 int ret;
41} BlockBackendAIOCB;
42
43static const AIOCBInfo block_backend_aiocb_info = {
4981bdec 44 .get_aio_context = blk_aiocb_get_aio_context,
e7f7d676
HR
45 .aiocb_size = sizeof(BlockBackendAIOCB),
46};
47
8fb3c76c
MA
48static void drive_info_del(DriveInfo *dinfo);
49
7e7d56d9 50/* All the BlockBackends (except for hidden ones) */
26f54e9a
MA
51static QTAILQ_HEAD(, BlockBackend) blk_backends =
52 QTAILQ_HEAD_INITIALIZER(blk_backends);
53
54/*
55 * Create a new BlockBackend with @name, with a reference count of one.
56 * @name must not be null or empty.
57 * Fail if a BlockBackend with this name already exists.
58 * Store an error through @errp on failure, unless it's null.
59 * Return the new BlockBackend on success, null on failure.
60 */
61BlockBackend *blk_new(const char *name, Error **errp)
62{
63 BlockBackend *blk;
64
65 assert(name && name[0]);
7f06d47e
MA
66 if (!id_wellformed(name)) {
67 error_setg(errp, "Invalid device name");
68 return NULL;
69 }
26f54e9a
MA
70 if (blk_by_name(name)) {
71 error_setg(errp, "Device with id '%s' already exists", name);
72 return NULL;
73 }
7f06d47e
MA
74 if (bdrv_find_node(name)) {
75 error_setg(errp,
76 "Device name '%s' conflicts with an existing node name",
77 name);
78 return NULL;
79 }
26f54e9a
MA
80
81 blk = g_new0(BlockBackend, 1);
82 blk->name = g_strdup(name);
83 blk->refcnt = 1;
84 QTAILQ_INSERT_TAIL(&blk_backends, blk, link);
85 return blk;
86}
87
7e7d56d9
MA
88/*
89 * Create a new BlockBackend with a new BlockDriverState attached.
7e7d56d9
MA
90 * Otherwise just like blk_new(), which see.
91 */
92BlockBackend *blk_new_with_bs(const char *name, Error **errp)
93{
94 BlockBackend *blk;
95 BlockDriverState *bs;
96
97 blk = blk_new(name, errp);
98 if (!blk) {
99 return NULL;
100 }
101
7f06d47e 102 bs = bdrv_new_root();
7e7d56d9
MA
103 blk->bs = bs;
104 bs->blk = blk;
105 return blk;
106}
107
ca49a4fd
HR
108/*
109 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
110 *
111 * Just as with bdrv_open(), after having called this function the reference to
112 * @options belongs to the block layer (even on failure).
113 *
114 * TODO: Remove @filename and @flags; it should be possible to specify a whole
115 * BDS tree just by specifying the @options QDict (or @reference,
116 * alternatively). At the time of adding this function, this is not possible,
117 * though, so callers of this function have to be able to specify @filename and
118 * @flags.
119 */
120BlockBackend *blk_new_open(const char *name, const char *filename,
121 const char *reference, QDict *options, int flags,
122 Error **errp)
123{
124 BlockBackend *blk;
125 int ret;
126
127 blk = blk_new_with_bs(name, errp);
128 if (!blk) {
129 QDECREF(options);
130 return NULL;
131 }
132
6ebf9aa2 133 ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp);
ca49a4fd
HR
134 if (ret < 0) {
135 blk_unref(blk);
136 return NULL;
137 }
138
139 return blk;
140}
141
26f54e9a
MA
142static void blk_delete(BlockBackend *blk)
143{
144 assert(!blk->refcnt);
a7f53e26 145 assert(!blk->dev);
7e7d56d9 146 if (blk->bs) {
9ba10c95 147 assert(blk->bs->blk == blk);
7e7d56d9 148 blk->bs->blk = NULL;
9ba10c95 149 bdrv_unref(blk->bs);
7e7d56d9
MA
150 blk->bs = NULL;
151 }
3e5a50d6 152 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
7e7d56d9
MA
153 if (blk->name[0]) {
154 QTAILQ_REMOVE(&blk_backends, blk, link);
155 }
26f54e9a 156 g_free(blk->name);
18e46a03 157 drive_info_del(blk->legacy_dinfo);
26f54e9a
MA
158 g_free(blk);
159}
160
8fb3c76c
MA
161static void drive_info_del(DriveInfo *dinfo)
162{
163 if (!dinfo) {
164 return;
165 }
166 qemu_opts_del(dinfo->opts);
8fb3c76c
MA
167 g_free(dinfo->serial);
168 g_free(dinfo);
169}
170
26f54e9a
MA
171/*
172 * Increment @blk's reference count.
173 * @blk must not be null.
174 */
175void blk_ref(BlockBackend *blk)
176{
177 blk->refcnt++;
178}
179
180/*
181 * Decrement @blk's reference count.
182 * If this drops it to zero, destroy @blk.
183 * For convenience, do nothing if @blk is null.
184 */
185void blk_unref(BlockBackend *blk)
186{
187 if (blk) {
188 assert(blk->refcnt > 0);
189 if (!--blk->refcnt) {
190 blk_delete(blk);
191 }
192 }
193}
194
195/*
196 * Return the BlockBackend after @blk.
197 * If @blk is null, return the first one.
198 * Else, return @blk's next sibling, which may be null.
199 *
200 * To iterate over all BlockBackends, do
201 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
202 * ...
203 * }
204 */
205BlockBackend *blk_next(BlockBackend *blk)
206{
207 return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends);
208}
209
210/*
7e7d56d9
MA
211 * Return @blk's name, a non-null string.
212 * Wart: the name is empty iff @blk has been hidden with
3e5a50d6 213 * blk_hide_on_behalf_of_hmp_drive_del().
26f54e9a
MA
214 */
215const char *blk_name(BlockBackend *blk)
216{
217 return blk->name;
218}
219
220/*
221 * Return the BlockBackend with name @name if it exists, else null.
222 * @name must not be null.
223 */
224BlockBackend *blk_by_name(const char *name)
225{
226 BlockBackend *blk;
227
228 assert(name);
229 QTAILQ_FOREACH(blk, &blk_backends, link) {
230 if (!strcmp(name, blk->name)) {
231 return blk;
232 }
233 }
234 return NULL;
235}
7e7d56d9
MA
236
237/*
238 * Return the BlockDriverState attached to @blk if any, else null.
239 */
240BlockDriverState *blk_bs(BlockBackend *blk)
241{
242 return blk->bs;
243}
244
a2d61900
KW
245/*
246 * Changes the BlockDriverState attached to @blk
247 */
248void blk_set_bs(BlockBackend *blk, BlockDriverState *bs)
249{
250 bdrv_ref(bs);
251
252 if (blk->bs) {
253 blk->bs->blk = NULL;
254 bdrv_unref(blk->bs);
255 }
256 assert(bs->blk == NULL);
257
258 blk->bs = bs;
259 bs->blk = blk;
260}
261
18e46a03
MA
262/*
263 * Return @blk's DriveInfo if any, else null.
264 */
265DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
266{
267 return blk->legacy_dinfo;
268}
269
270/*
271 * Set @blk's DriveInfo to @dinfo, and return it.
272 * @blk must not have a DriveInfo set already.
273 * No other BlockBackend may have the same DriveInfo set.
274 */
275DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
276{
277 assert(!blk->legacy_dinfo);
278 return blk->legacy_dinfo = dinfo;
279}
280
281/*
282 * Return the BlockBackend with DriveInfo @dinfo.
283 * It must exist.
284 */
285BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
286{
287 BlockBackend *blk;
288
289 QTAILQ_FOREACH(blk, &blk_backends, link) {
290 if (blk->legacy_dinfo == dinfo) {
291 return blk;
292 }
293 }
294 abort();
295}
296
7e7d56d9
MA
297/*
298 * Hide @blk.
299 * @blk must not have been hidden already.
300 * Make attached BlockDriverState, if any, anonymous.
301 * Once hidden, @blk is invisible to all functions that don't receive
302 * it as argument. For example, blk_by_name() won't return it.
303 * Strictly for use by do_drive_del().
304 * TODO get rid of it!
305 */
3e5a50d6 306void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
7e7d56d9
MA
307{
308 QTAILQ_REMOVE(&blk_backends, blk, link);
309 blk->name[0] = 0;
310 if (blk->bs) {
311 bdrv_make_anon(blk->bs);
312 }
313}
4be74634 314
a7f53e26
MA
315/*
316 * Attach device model @dev to @blk.
317 * Return 0 on success, -EBUSY when a device model is attached already.
318 */
4be74634 319int blk_attach_dev(BlockBackend *blk, void *dev)
a7f53e26 320/* TODO change to DeviceState *dev when all users are qdevified */
4be74634 321{
a7f53e26
MA
322 if (blk->dev) {
323 return -EBUSY;
324 }
84ebe375 325 blk_ref(blk);
a7f53e26
MA
326 blk->dev = dev;
327 bdrv_iostatus_reset(blk->bs);
a7f53e26 328 return 0;
4be74634
MA
329}
330
a7f53e26
MA
331/*
332 * Attach device model @dev to @blk.
333 * @blk must not have a device model attached already.
334 * TODO qdevified devices don't use this, remove when devices are qdevified
335 */
4be74634
MA
336void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
337{
a7f53e26
MA
338 if (blk_attach_dev(blk, dev) < 0) {
339 abort();
340 }
4be74634
MA
341}
342
a7f53e26
MA
343/*
344 * Detach device model @dev from @blk.
345 * @dev must be currently attached to @blk.
346 */
4be74634 347void blk_detach_dev(BlockBackend *blk, void *dev)
a7f53e26 348/* TODO change to DeviceState *dev when all users are qdevified */
4be74634 349{
a7f53e26
MA
350 assert(blk->dev == dev);
351 blk->dev = NULL;
352 blk->dev_ops = NULL;
353 blk->dev_opaque = NULL;
354 bdrv_set_guest_block_size(blk->bs, 512);
84ebe375 355 blk_unref(blk);
4be74634
MA
356}
357
a7f53e26
MA
358/*
359 * Return the device model attached to @blk if any, else null.
360 */
4be74634 361void *blk_get_attached_dev(BlockBackend *blk)
a7f53e26
MA
362/* TODO change to return DeviceState * when all users are qdevified */
363{
364 return blk->dev;
365}
366
367/*
368 * Set @blk's device model callbacks to @ops.
369 * @opaque is the opaque argument to pass to the callbacks.
370 * This is for use by device models.
371 */
372void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
373 void *opaque)
374{
375 blk->dev_ops = ops;
376 blk->dev_opaque = opaque;
377}
378
379/*
380 * Notify @blk's attached device model of media change.
381 * If @load is true, notify of media load.
382 * Else, notify of media eject.
383 * Also send DEVICE_TRAY_MOVED events as appropriate.
384 */
385void blk_dev_change_media_cb(BlockBackend *blk, bool load)
386{
387 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
388 bool tray_was_closed = !blk_dev_is_tray_open(blk);
389
390 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
391 if (tray_was_closed) {
392 /* tray open */
393 qapi_event_send_device_tray_moved(blk_name(blk),
394 true, &error_abort);
395 }
396 if (load) {
397 /* tray close */
398 qapi_event_send_device_tray_moved(blk_name(blk),
399 false, &error_abort);
400 }
401 }
402}
403
404/*
405 * Does @blk's attached device model have removable media?
406 * %true if no device model is attached.
407 */
408bool blk_dev_has_removable_media(BlockBackend *blk)
409{
410 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
411}
412
413/*
414 * Notify @blk's attached device model of a media eject request.
415 * If @force is true, the medium is about to be yanked out forcefully.
416 */
417void blk_dev_eject_request(BlockBackend *blk, bool force)
4be74634 418{
a7f53e26
MA
419 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
420 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
421 }
4be74634
MA
422}
423
a7f53e26
MA
424/*
425 * Does @blk's attached device model have a tray, and is it open?
426 */
427bool blk_dev_is_tray_open(BlockBackend *blk)
4be74634 428{
a7f53e26
MA
429 if (blk->dev_ops && blk->dev_ops->is_tray_open) {
430 return blk->dev_ops->is_tray_open(blk->dev_opaque);
431 }
432 return false;
433}
434
435/*
436 * Does @blk's attached device model have the medium locked?
437 * %false if the device model has no such lock.
438 */
439bool blk_dev_is_medium_locked(BlockBackend *blk)
440{
441 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
442 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
443 }
444 return false;
445}
446
447/*
448 * Notify @blk's attached device model of a backend size change.
449 */
450void blk_dev_resize_cb(BlockBackend *blk)
451{
452 if (blk->dev_ops && blk->dev_ops->resize_cb) {
453 blk->dev_ops->resize_cb(blk->dev_opaque);
454 }
455}
456
457void blk_iostatus_enable(BlockBackend *blk)
458{
459 bdrv_iostatus_enable(blk->bs);
4be74634
MA
460}
461
e7f7d676
HR
462static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
463 size_t size)
464{
465 int64_t len;
466
467 if (size > INT_MAX) {
468 return -EIO;
469 }
470
471 if (!blk_is_inserted(blk)) {
472 return -ENOMEDIUM;
473 }
474
475 len = blk_getlength(blk);
476 if (len < 0) {
477 return len;
478 }
479
480 if (offset < 0) {
481 return -EIO;
482 }
483
484 if (offset > len || len - offset < size) {
485 return -EIO;
486 }
487
488 return 0;
489}
490
491static int blk_check_request(BlockBackend *blk, int64_t sector_num,
492 int nb_sectors)
493{
494 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
495 return -EIO;
496 }
497
498 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
499 return -EIO;
500 }
501
502 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
503 nb_sectors * BDRV_SECTOR_SIZE);
504}
505
4be74634
MA
506int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
507 int nb_sectors)
508{
e7f7d676
HR
509 int ret = blk_check_request(blk, sector_num, nb_sectors);
510 if (ret < 0) {
511 return ret;
512 }
513
4be74634
MA
514 return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
515}
516
517int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
518 int nb_sectors)
519{
e7f7d676
HR
520 int ret = blk_check_request(blk, sector_num, nb_sectors);
521 if (ret < 0) {
522 return ret;
523 }
524
4be74634
MA
525 return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
526}
527
528int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
529 int nb_sectors)
530{
e7f7d676
HR
531 int ret = blk_check_request(blk, sector_num, nb_sectors);
532 if (ret < 0) {
533 return ret;
534 }
535
4be74634
MA
536 return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
537}
538
0df89e8e
KW
539int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
540 int nb_sectors, BdrvRequestFlags flags)
541{
542 int ret = blk_check_request(blk, sector_num, nb_sectors);
543 if (ret < 0) {
544 return ret;
545 }
546
547 return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
548}
549
e7f7d676
HR
550static void error_callback_bh(void *opaque)
551{
552 struct BlockBackendAIOCB *acb = opaque;
553 qemu_bh_delete(acb->bh);
554 acb->common.cb(acb->common.opaque, acb->ret);
555 qemu_aio_unref(acb);
556}
557
558static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
559 void *opaque, int ret)
560{
561 struct BlockBackendAIOCB *acb;
562 QEMUBH *bh;
563
564 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
4981bdec 565 acb->blk = blk;
e7f7d676
HR
566 acb->ret = ret;
567
568 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
569 acb->bh = bh;
570 qemu_bh_schedule(bh);
571
572 return &acb->common;
573}
574
4be74634
MA
575BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
576 int nb_sectors, BdrvRequestFlags flags,
577 BlockCompletionFunc *cb, void *opaque)
578{
e7f7d676
HR
579 int ret = blk_check_request(blk, sector_num, nb_sectors);
580 if (ret < 0) {
581 return abort_aio_request(blk, cb, opaque, ret);
582 }
583
4be74634
MA
584 return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
585 cb, opaque);
586}
587
588int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
589{
e7f7d676
HR
590 int ret = blk_check_byte_request(blk, offset, count);
591 if (ret < 0) {
592 return ret;
593 }
594
4be74634
MA
595 return bdrv_pread(blk->bs, offset, buf, count);
596}
597
598int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
599{
e7f7d676
HR
600 int ret = blk_check_byte_request(blk, offset, count);
601 if (ret < 0) {
602 return ret;
603 }
604
4be74634
MA
605 return bdrv_pwrite(blk->bs, offset, buf, count);
606}
607
608int64_t blk_getlength(BlockBackend *blk)
609{
610 return bdrv_getlength(blk->bs);
611}
612
613void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
614{
615 bdrv_get_geometry(blk->bs, nb_sectors_ptr);
616}
617
1ef01253
HR
618int64_t blk_nb_sectors(BlockBackend *blk)
619{
620 return bdrv_nb_sectors(blk->bs);
621}
622
4be74634
MA
623BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
624 QEMUIOVector *iov, int nb_sectors,
625 BlockCompletionFunc *cb, void *opaque)
626{
e7f7d676
HR
627 int ret = blk_check_request(blk, sector_num, nb_sectors);
628 if (ret < 0) {
629 return abort_aio_request(blk, cb, opaque, ret);
630 }
631
4be74634
MA
632 return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
633}
634
635BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
636 QEMUIOVector *iov, int nb_sectors,
637 BlockCompletionFunc *cb, void *opaque)
638{
e7f7d676
HR
639 int ret = blk_check_request(blk, sector_num, nb_sectors);
640 if (ret < 0) {
641 return abort_aio_request(blk, cb, opaque, ret);
642 }
643
4be74634
MA
644 return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
645}
646
647BlockAIOCB *blk_aio_flush(BlockBackend *blk,
648 BlockCompletionFunc *cb, void *opaque)
649{
650 return bdrv_aio_flush(blk->bs, cb, opaque);
651}
652
653BlockAIOCB *blk_aio_discard(BlockBackend *blk,
654 int64_t sector_num, int nb_sectors,
655 BlockCompletionFunc *cb, void *opaque)
656{
e7f7d676
HR
657 int ret = blk_check_request(blk, sector_num, nb_sectors);
658 if (ret < 0) {
659 return abort_aio_request(blk, cb, opaque, ret);
660 }
661
4be74634
MA
662 return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
663}
664
665void blk_aio_cancel(BlockAIOCB *acb)
666{
667 bdrv_aio_cancel(acb);
668}
669
670void blk_aio_cancel_async(BlockAIOCB *acb)
671{
672 bdrv_aio_cancel_async(acb);
673}
674
675int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
676{
e7f7d676
HR
677 int i, ret;
678
679 for (i = 0; i < num_reqs; i++) {
680 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
681 if (ret < 0) {
682 return ret;
683 }
684 }
685
4be74634
MA
686 return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
687}
688
689int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
690{
691 return bdrv_ioctl(blk->bs, req, buf);
692}
693
694BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
695 BlockCompletionFunc *cb, void *opaque)
696{
697 return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
698}
699
2bb0dce7
HR
700int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
701{
e7f7d676
HR
702 int ret = blk_check_request(blk, sector_num, nb_sectors);
703 if (ret < 0) {
704 return ret;
705 }
706
2bb0dce7
HR
707 return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
708}
709
710int blk_co_flush(BlockBackend *blk)
711{
712 return bdrv_co_flush(blk->bs);
713}
714
4be74634
MA
715int blk_flush(BlockBackend *blk)
716{
717 return bdrv_flush(blk->bs);
718}
719
720int blk_flush_all(void)
721{
722 return bdrv_flush_all();
723}
724
97b0385a
AY
725void blk_drain(BlockBackend *blk)
726{
727 bdrv_drain(blk->bs);
728}
729
4be74634
MA
730void blk_drain_all(void)
731{
732 bdrv_drain_all();
733}
734
735BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
736{
737 return bdrv_get_on_error(blk->bs, is_read);
738}
739
740BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
741 int error)
742{
743 return bdrv_get_error_action(blk->bs, is_read, error);
744}
745
746void blk_error_action(BlockBackend *blk, BlockErrorAction action,
747 bool is_read, int error)
748{
749 bdrv_error_action(blk->bs, action, is_read, error);
750}
751
752int blk_is_read_only(BlockBackend *blk)
753{
754 return bdrv_is_read_only(blk->bs);
755}
756
757int blk_is_sg(BlockBackend *blk)
758{
759 return bdrv_is_sg(blk->bs);
760}
761
762int blk_enable_write_cache(BlockBackend *blk)
763{
764 return bdrv_enable_write_cache(blk->bs);
765}
766
767void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
768{
769 bdrv_set_enable_write_cache(blk->bs, wce);
770}
771
2bb0dce7
HR
772void blk_invalidate_cache(BlockBackend *blk, Error **errp)
773{
774 bdrv_invalidate_cache(blk->bs, errp);
775}
776
e031f750 777bool blk_is_inserted(BlockBackend *blk)
4be74634 778{
db0284f8
HR
779 return blk->bs && bdrv_is_inserted(blk->bs);
780}
781
782bool blk_is_available(BlockBackend *blk)
783{
784 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
4be74634
MA
785}
786
787void blk_lock_medium(BlockBackend *blk, bool locked)
788{
789 bdrv_lock_medium(blk->bs, locked);
790}
791
792void blk_eject(BlockBackend *blk, bool eject_flag)
793{
794 bdrv_eject(blk->bs, eject_flag);
795}
796
797int blk_get_flags(BlockBackend *blk)
798{
799 return bdrv_get_flags(blk->bs);
800}
801
454057b7
PL
802int blk_get_max_transfer_length(BlockBackend *blk)
803{
804 return blk->bs->bl.max_transfer_length;
805}
806
4be74634
MA
807void blk_set_guest_block_size(BlockBackend *blk, int align)
808{
809 bdrv_set_guest_block_size(blk->bs, align);
810}
811
812void *blk_blockalign(BlockBackend *blk, size_t size)
813{
814 return qemu_blockalign(blk ? blk->bs : NULL, size);
815}
816
817bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
818{
819 return bdrv_op_is_blocked(blk->bs, op, errp);
820}
821
822void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
823{
824 bdrv_op_unblock(blk->bs, op, reason);
825}
826
827void blk_op_block_all(BlockBackend *blk, Error *reason)
828{
829 bdrv_op_block_all(blk->bs, reason);
830}
831
832void blk_op_unblock_all(BlockBackend *blk, Error *reason)
833{
834 bdrv_op_unblock_all(blk->bs, reason);
835}
836
837AioContext *blk_get_aio_context(BlockBackend *blk)
838{
4981bdec
HR
839 if (blk->bs) {
840 return bdrv_get_aio_context(blk->bs);
841 } else {
842 return qemu_get_aio_context();
843 }
844}
845
846static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
847{
848 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
849 return blk_get_aio_context(blk_acb->blk);
4be74634
MA
850}
851
852void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
853{
854 bdrv_set_aio_context(blk->bs, new_context);
855}
856
2019ba0a
HR
857void blk_add_aio_context_notifier(BlockBackend *blk,
858 void (*attached_aio_context)(AioContext *new_context, void *opaque),
859 void (*detach_aio_context)(void *opaque), void *opaque)
860{
861 bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
862 detach_aio_context, opaque);
863}
864
865void blk_remove_aio_context_notifier(BlockBackend *blk,
866 void (*attached_aio_context)(AioContext *,
867 void *),
868 void (*detach_aio_context)(void *),
869 void *opaque)
870{
871 bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
872 detach_aio_context, opaque);
873}
874
2c28b21f
HR
875void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
876{
877 bdrv_add_close_notifier(blk->bs, notify);
878}
879
4be74634
MA
880void blk_io_plug(BlockBackend *blk)
881{
882 bdrv_io_plug(blk->bs);
883}
884
885void blk_io_unplug(BlockBackend *blk)
886{
887 bdrv_io_unplug(blk->bs);
888}
889
890BlockAcctStats *blk_get_stats(BlockBackend *blk)
891{
892 return bdrv_get_stats(blk->bs);
893}
894
895void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
896 BlockCompletionFunc *cb, void *opaque)
897{
898 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
899}
1ef01253
HR
900
901int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
902 int nb_sectors, BdrvRequestFlags flags)
903{
e7f7d676
HR
904 int ret = blk_check_request(blk, sector_num, nb_sectors);
905 if (ret < 0) {
906 return ret;
907 }
908
1ef01253
HR
909 return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
910}
911
912int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
913 const uint8_t *buf, int nb_sectors)
914{
e7f7d676
HR
915 int ret = blk_check_request(blk, sector_num, nb_sectors);
916 if (ret < 0) {
917 return ret;
918 }
919
1ef01253
HR
920 return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
921}
922
923int blk_truncate(BlockBackend *blk, int64_t offset)
924{
925 return bdrv_truncate(blk->bs, offset);
926}
927
928int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
929{
e7f7d676
HR
930 int ret = blk_check_request(blk, sector_num, nb_sectors);
931 if (ret < 0) {
932 return ret;
933 }
934
1ef01253
HR
935 return bdrv_discard(blk->bs, sector_num, nb_sectors);
936}
937
938int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
939 int64_t pos, int size)
940{
941 return bdrv_save_vmstate(blk->bs, buf, pos, size);
942}
943
944int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
945{
946 return bdrv_load_vmstate(blk->bs, buf, pos, size);
947}
f0272c4d
ET
948
949int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
950{
951 return bdrv_probe_blocksizes(blk->bs, bsz);
952}
953
954int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
955{
956 return bdrv_probe_geometry(blk->bs, geo);
957}