]> git.proxmox.com Git - mirror_qemu.git/blob - block/block-backend.c
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[mirror_qemu.git] / block / block-backend.c
1 /*
2 * QEMU Block backends
3 *
4 * Copyright (C) 2014-2016 Red Hat, Inc.
5 *
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
21 #include "qemu/id.h"
22 #include "trace.h"
23
24 /* Number of coroutines to reserve per attached device model */
25 #define COROUTINE_POOL_RESERVATION 64
26
27 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
28
29 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
30
31 struct BlockBackend {
32 char *name;
33 int refcnt;
34 BdrvChild *root;
35 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
36 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
37 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
38 BlockBackendPublic public;
39
40 void *dev; /* attached device model, if any */
41 /* TODO change to DeviceState when all users are qdevified */
42 const BlockDevOps *dev_ops;
43 void *dev_opaque;
44
45 /* the block size for which the guest device expects atomicity */
46 int guest_block_size;
47
48 /* If the BDS tree is removed, some of its options are stored here (which
49 * can be used to restore those options in the new BDS on insert) */
50 BlockBackendRootState root_state;
51
52 bool enable_write_cache;
53
54 /* I/O stats (display with "info blockstats"). */
55 BlockAcctStats stats;
56
57 BlockdevOnError on_read_error, on_write_error;
58 bool iostatus_enabled;
59 BlockDeviceIoStatus iostatus;
60
61 bool allow_write_beyond_eof;
62
63 NotifierList remove_bs_notifiers, insert_bs_notifiers;
64 };
65
66 typedef struct BlockBackendAIOCB {
67 BlockAIOCB common;
68 QEMUBH *bh;
69 BlockBackend *blk;
70 int ret;
71 } BlockBackendAIOCB;
72
73 static const AIOCBInfo block_backend_aiocb_info = {
74 .get_aio_context = blk_aiocb_get_aio_context,
75 .aiocb_size = sizeof(BlockBackendAIOCB),
76 };
77
78 static void drive_info_del(DriveInfo *dinfo);
79 static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
80
81 /* All BlockBackends */
82 static QTAILQ_HEAD(, BlockBackend) block_backends =
83 QTAILQ_HEAD_INITIALIZER(block_backends);
84
85 /* All BlockBackends referenced by the monitor and which are iterated through by
86 * blk_next() */
87 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
88 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
89
90 static void blk_root_inherit_options(int *child_flags, QDict *child_options,
91 int parent_flags, QDict *parent_options)
92 {
93 /* We're not supposed to call this function for root nodes */
94 abort();
95 }
96 static void blk_root_drained_begin(BdrvChild *child);
97 static void blk_root_drained_end(BdrvChild *child);
98
99 static void blk_root_change_media(BdrvChild *child, bool load);
100 static void blk_root_resize(BdrvChild *child);
101
102 static const char *blk_root_get_name(BdrvChild *child)
103 {
104 return blk_name(child->opaque);
105 }
106
107 static const BdrvChildRole child_root = {
108 .inherit_options = blk_root_inherit_options,
109
110 .change_media = blk_root_change_media,
111 .resize = blk_root_resize,
112 .get_name = blk_root_get_name,
113
114 .drained_begin = blk_root_drained_begin,
115 .drained_end = blk_root_drained_end,
116 };
117
118 /*
119 * Create a new BlockBackend with a reference count of one.
120 * Store an error through @errp on failure, unless it's null.
121 * Return the new BlockBackend on success, null on failure.
122 */
123 BlockBackend *blk_new(void)
124 {
125 BlockBackend *blk;
126
127 blk = g_new0(BlockBackend, 1);
128 blk->refcnt = 1;
129 blk_set_enable_write_cache(blk, true);
130
131 qemu_co_queue_init(&blk->public.throttled_reqs[0]);
132 qemu_co_queue_init(&blk->public.throttled_reqs[1]);
133
134 notifier_list_init(&blk->remove_bs_notifiers);
135 notifier_list_init(&blk->insert_bs_notifiers);
136
137 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
138 return blk;
139 }
140
141 /*
142 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
143 *
144 * Just as with bdrv_open(), after having called this function the reference to
145 * @options belongs to the block layer (even on failure).
146 *
147 * TODO: Remove @filename and @flags; it should be possible to specify a whole
148 * BDS tree just by specifying the @options QDict (or @reference,
149 * alternatively). At the time of adding this function, this is not possible,
150 * though, so callers of this function have to be able to specify @filename and
151 * @flags.
152 */
153 BlockBackend *blk_new_open(const char *filename, const char *reference,
154 QDict *options, int flags, Error **errp)
155 {
156 BlockBackend *blk;
157 BlockDriverState *bs;
158
159 blk = blk_new();
160 bs = bdrv_open(filename, reference, options, flags, errp);
161 if (!bs) {
162 blk_unref(blk);
163 return NULL;
164 }
165
166 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk);
167
168 return blk;
169 }
170
171 static void blk_delete(BlockBackend *blk)
172 {
173 assert(!blk->refcnt);
174 assert(!blk->name);
175 assert(!blk->dev);
176 if (blk->root) {
177 blk_remove_bs(blk);
178 }
179 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
180 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
181 QTAILQ_REMOVE(&block_backends, blk, link);
182 drive_info_del(blk->legacy_dinfo);
183 block_acct_cleanup(&blk->stats);
184 g_free(blk);
185 }
186
187 static void drive_info_del(DriveInfo *dinfo)
188 {
189 if (!dinfo) {
190 return;
191 }
192 qemu_opts_del(dinfo->opts);
193 g_free(dinfo->serial);
194 g_free(dinfo);
195 }
196
197 int blk_get_refcnt(BlockBackend *blk)
198 {
199 return blk ? blk->refcnt : 0;
200 }
201
202 /*
203 * Increment @blk's reference count.
204 * @blk must not be null.
205 */
206 void blk_ref(BlockBackend *blk)
207 {
208 blk->refcnt++;
209 }
210
211 /*
212 * Decrement @blk's reference count.
213 * If this drops it to zero, destroy @blk.
214 * For convenience, do nothing if @blk is null.
215 */
216 void blk_unref(BlockBackend *blk)
217 {
218 if (blk) {
219 assert(blk->refcnt > 0);
220 if (!--blk->refcnt) {
221 blk_delete(blk);
222 }
223 }
224 }
225
226 /*
227 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
228 * ones which are hidden (i.e. are not referenced by the monitor).
229 */
230 static BlockBackend *blk_all_next(BlockBackend *blk)
231 {
232 return blk ? QTAILQ_NEXT(blk, link)
233 : QTAILQ_FIRST(&block_backends);
234 }
235
236 void blk_remove_all_bs(void)
237 {
238 BlockBackend *blk = NULL;
239
240 while ((blk = blk_all_next(blk)) != NULL) {
241 AioContext *ctx = blk_get_aio_context(blk);
242
243 aio_context_acquire(ctx);
244 if (blk->root) {
245 blk_remove_bs(blk);
246 }
247 aio_context_release(ctx);
248 }
249 }
250
251 /*
252 * Return the monitor-owned BlockBackend after @blk.
253 * If @blk is null, return the first one.
254 * Else, return @blk's next sibling, which may be null.
255 *
256 * To iterate over all BlockBackends, do
257 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
258 * ...
259 * }
260 */
261 BlockBackend *blk_next(BlockBackend *blk)
262 {
263 return blk ? QTAILQ_NEXT(blk, monitor_link)
264 : QTAILQ_FIRST(&monitor_block_backends);
265 }
266
267 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
268 * the monitor or attached to a BlockBackend */
269 BlockDriverState *bdrv_next(BdrvNextIterator *it)
270 {
271 BlockDriverState *bs;
272
273 /* First, return all root nodes of BlockBackends. In order to avoid
274 * returning a BDS twice when multiple BBs refer to it, we only return it
275 * if the BB is the first one in the parent list of the BDS. */
276 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
277 do {
278 it->blk = blk_all_next(it->blk);
279 bs = it->blk ? blk_bs(it->blk) : NULL;
280 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
281
282 if (bs) {
283 return bs;
284 }
285 it->phase = BDRV_NEXT_MONITOR_OWNED;
286 }
287
288 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
289 * BDSes that are attached to a BlockBackend here; they have been handled
290 * by the above block already */
291 do {
292 it->bs = bdrv_next_monitor_owned(it->bs);
293 bs = it->bs;
294 } while (bs && bdrv_has_blk(bs));
295
296 return bs;
297 }
298
299 BlockDriverState *bdrv_first(BdrvNextIterator *it)
300 {
301 *it = (BdrvNextIterator) {
302 .phase = BDRV_NEXT_BACKEND_ROOTS,
303 };
304
305 return bdrv_next(it);
306 }
307
308 /*
309 * Add a BlockBackend into the list of backends referenced by the monitor, with
310 * the given @name acting as the handle for the monitor.
311 * Strictly for use by blockdev.c.
312 *
313 * @name must not be null or empty.
314 *
315 * Returns true on success and false on failure. In the latter case, an Error
316 * object is returned through @errp.
317 */
318 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
319 {
320 assert(!blk->name);
321 assert(name && name[0]);
322
323 if (!id_wellformed(name)) {
324 error_setg(errp, "Invalid device name");
325 return false;
326 }
327 if (blk_by_name(name)) {
328 error_setg(errp, "Device with id '%s' already exists", name);
329 return false;
330 }
331 if (bdrv_find_node(name)) {
332 error_setg(errp,
333 "Device name '%s' conflicts with an existing node name",
334 name);
335 return false;
336 }
337
338 blk->name = g_strdup(name);
339 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
340 return true;
341 }
342
343 /*
344 * Remove a BlockBackend from the list of backends referenced by the monitor.
345 * Strictly for use by blockdev.c.
346 */
347 void monitor_remove_blk(BlockBackend *blk)
348 {
349 if (!blk->name) {
350 return;
351 }
352
353 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
354 g_free(blk->name);
355 blk->name = NULL;
356 }
357
358 /*
359 * Return @blk's name, a non-null string.
360 * Returns an empty string iff @blk is not referenced by the monitor.
361 */
362 const char *blk_name(BlockBackend *blk)
363 {
364 return blk->name ?: "";
365 }
366
367 /*
368 * Return the BlockBackend with name @name if it exists, else null.
369 * @name must not be null.
370 */
371 BlockBackend *blk_by_name(const char *name)
372 {
373 BlockBackend *blk = NULL;
374
375 assert(name);
376 while ((blk = blk_next(blk)) != NULL) {
377 if (!strcmp(name, blk->name)) {
378 return blk;
379 }
380 }
381 return NULL;
382 }
383
384 /*
385 * Return the BlockDriverState attached to @blk if any, else null.
386 */
387 BlockDriverState *blk_bs(BlockBackend *blk)
388 {
389 return blk->root ? blk->root->bs : NULL;
390 }
391
392 static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
393 {
394 BdrvChild *child;
395 QLIST_FOREACH(child, &bs->parents, next_parent) {
396 if (child->role == &child_root) {
397 return child->opaque;
398 }
399 }
400
401 return NULL;
402 }
403
404 /*
405 * Returns true if @bs has an associated BlockBackend.
406 */
407 bool bdrv_has_blk(BlockDriverState *bs)
408 {
409 return bdrv_first_blk(bs) != NULL;
410 }
411
412 /*
413 * Returns true if @bs has only BlockBackends as parents.
414 */
415 bool bdrv_is_root_node(BlockDriverState *bs)
416 {
417 BdrvChild *c;
418
419 QLIST_FOREACH(c, &bs->parents, next_parent) {
420 if (c->role != &child_root) {
421 return false;
422 }
423 }
424
425 return true;
426 }
427
428 /*
429 * Return @blk's DriveInfo if any, else null.
430 */
431 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
432 {
433 return blk->legacy_dinfo;
434 }
435
436 /*
437 * Set @blk's DriveInfo to @dinfo, and return it.
438 * @blk must not have a DriveInfo set already.
439 * No other BlockBackend may have the same DriveInfo set.
440 */
441 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
442 {
443 assert(!blk->legacy_dinfo);
444 return blk->legacy_dinfo = dinfo;
445 }
446
447 /*
448 * Return the BlockBackend with DriveInfo @dinfo.
449 * It must exist.
450 */
451 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
452 {
453 BlockBackend *blk = NULL;
454
455 while ((blk = blk_next(blk)) != NULL) {
456 if (blk->legacy_dinfo == dinfo) {
457 return blk;
458 }
459 }
460 abort();
461 }
462
463 /*
464 * Returns a pointer to the publicly accessible fields of @blk.
465 */
466 BlockBackendPublic *blk_get_public(BlockBackend *blk)
467 {
468 return &blk->public;
469 }
470
471 /*
472 * Returns a BlockBackend given the associated @public fields.
473 */
474 BlockBackend *blk_by_public(BlockBackendPublic *public)
475 {
476 return container_of(public, BlockBackend, public);
477 }
478
479 /*
480 * Disassociates the currently associated BlockDriverState from @blk.
481 */
482 void blk_remove_bs(BlockBackend *blk)
483 {
484 notifier_list_notify(&blk->remove_bs_notifiers, blk);
485 if (blk->public.throttle_state) {
486 throttle_timers_detach_aio_context(&blk->public.throttle_timers);
487 }
488
489 blk_update_root_state(blk);
490
491 bdrv_root_unref_child(blk->root);
492 blk->root = NULL;
493 }
494
495 /*
496 * Associates a new BlockDriverState with @blk.
497 */
498 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
499 {
500 bdrv_ref(bs);
501 blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk);
502
503 notifier_list_notify(&blk->insert_bs_notifiers, blk);
504 if (blk->public.throttle_state) {
505 throttle_timers_attach_aio_context(
506 &blk->public.throttle_timers, bdrv_get_aio_context(bs));
507 }
508 }
509
510 /*
511 * Attach device model @dev to @blk.
512 * Return 0 on success, -EBUSY when a device model is attached already.
513 */
514 int blk_attach_dev(BlockBackend *blk, void *dev)
515 /* TODO change to DeviceState *dev when all users are qdevified */
516 {
517 if (blk->dev) {
518 return -EBUSY;
519 }
520 blk_ref(blk);
521 blk->dev = dev;
522 blk_iostatus_reset(blk);
523 return 0;
524 }
525
526 /*
527 * Attach device model @dev to @blk.
528 * @blk must not have a device model attached already.
529 * TODO qdevified devices don't use this, remove when devices are qdevified
530 */
531 void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
532 {
533 if (blk_attach_dev(blk, dev) < 0) {
534 abort();
535 }
536 }
537
538 /*
539 * Detach device model @dev from @blk.
540 * @dev must be currently attached to @blk.
541 */
542 void blk_detach_dev(BlockBackend *blk, void *dev)
543 /* TODO change to DeviceState *dev when all users are qdevified */
544 {
545 assert(blk->dev == dev);
546 blk->dev = NULL;
547 blk->dev_ops = NULL;
548 blk->dev_opaque = NULL;
549 blk->guest_block_size = 512;
550 blk_unref(blk);
551 }
552
553 /*
554 * Return the device model attached to @blk if any, else null.
555 */
556 void *blk_get_attached_dev(BlockBackend *blk)
557 /* TODO change to return DeviceState * when all users are qdevified */
558 {
559 return blk->dev;
560 }
561
562 /*
563 * Return the BlockBackend which has the device model @dev attached if it
564 * exists, else null.
565 *
566 * @dev must not be null.
567 */
568 BlockBackend *blk_by_dev(void *dev)
569 {
570 BlockBackend *blk = NULL;
571
572 assert(dev != NULL);
573 while ((blk = blk_all_next(blk)) != NULL) {
574 if (blk->dev == dev) {
575 return blk;
576 }
577 }
578 return NULL;
579 }
580
581 /*
582 * Set @blk's device model callbacks to @ops.
583 * @opaque is the opaque argument to pass to the callbacks.
584 * This is for use by device models.
585 */
586 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
587 void *opaque)
588 {
589 blk->dev_ops = ops;
590 blk->dev_opaque = opaque;
591 }
592
593 /*
594 * Notify @blk's attached device model of media change.
595 * If @load is true, notify of media load.
596 * Else, notify of media eject.
597 * Also send DEVICE_TRAY_MOVED events as appropriate.
598 */
599 void blk_dev_change_media_cb(BlockBackend *blk, bool load)
600 {
601 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
602 bool tray_was_open, tray_is_open;
603
604 tray_was_open = blk_dev_is_tray_open(blk);
605 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
606 tray_is_open = blk_dev_is_tray_open(blk);
607
608 if (tray_was_open != tray_is_open) {
609 qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
610 &error_abort);
611 }
612 }
613 }
614
615 static void blk_root_change_media(BdrvChild *child, bool load)
616 {
617 blk_dev_change_media_cb(child->opaque, load);
618 }
619
620 /*
621 * Does @blk's attached device model have removable media?
622 * %true if no device model is attached.
623 */
624 bool blk_dev_has_removable_media(BlockBackend *blk)
625 {
626 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
627 }
628
629 /*
630 * Does @blk's attached device model have a tray?
631 */
632 bool blk_dev_has_tray(BlockBackend *blk)
633 {
634 return blk->dev_ops && blk->dev_ops->is_tray_open;
635 }
636
637 /*
638 * Notify @blk's attached device model of a media eject request.
639 * If @force is true, the medium is about to be yanked out forcefully.
640 */
641 void blk_dev_eject_request(BlockBackend *blk, bool force)
642 {
643 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
644 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
645 }
646 }
647
648 /*
649 * Does @blk's attached device model have a tray, and is it open?
650 */
651 bool blk_dev_is_tray_open(BlockBackend *blk)
652 {
653 if (blk_dev_has_tray(blk)) {
654 return blk->dev_ops->is_tray_open(blk->dev_opaque);
655 }
656 return false;
657 }
658
659 /*
660 * Does @blk's attached device model have the medium locked?
661 * %false if the device model has no such lock.
662 */
663 bool blk_dev_is_medium_locked(BlockBackend *blk)
664 {
665 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
666 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
667 }
668 return false;
669 }
670
671 /*
672 * Notify @blk's attached device model of a backend size change.
673 */
674 static void blk_root_resize(BdrvChild *child)
675 {
676 BlockBackend *blk = child->opaque;
677
678 if (blk->dev_ops && blk->dev_ops->resize_cb) {
679 blk->dev_ops->resize_cb(blk->dev_opaque);
680 }
681 }
682
683 void blk_iostatus_enable(BlockBackend *blk)
684 {
685 blk->iostatus_enabled = true;
686 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
687 }
688
689 /* The I/O status is only enabled if the drive explicitly
690 * enables it _and_ the VM is configured to stop on errors */
691 bool blk_iostatus_is_enabled(const BlockBackend *blk)
692 {
693 return (blk->iostatus_enabled &&
694 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
695 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
696 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
697 }
698
699 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
700 {
701 return blk->iostatus;
702 }
703
704 void blk_iostatus_disable(BlockBackend *blk)
705 {
706 blk->iostatus_enabled = false;
707 }
708
709 void blk_iostatus_reset(BlockBackend *blk)
710 {
711 if (blk_iostatus_is_enabled(blk)) {
712 BlockDriverState *bs = blk_bs(blk);
713 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
714 if (bs && bs->job) {
715 block_job_iostatus_reset(bs->job);
716 }
717 }
718 }
719
720 void blk_iostatus_set_err(BlockBackend *blk, int error)
721 {
722 assert(blk_iostatus_is_enabled(blk));
723 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
724 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
725 BLOCK_DEVICE_IO_STATUS_FAILED;
726 }
727 }
728
729 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
730 {
731 blk->allow_write_beyond_eof = allow;
732 }
733
734 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
735 size_t size)
736 {
737 int64_t len;
738
739 if (size > INT_MAX) {
740 return -EIO;
741 }
742
743 if (!blk_is_available(blk)) {
744 return -ENOMEDIUM;
745 }
746
747 if (offset < 0) {
748 return -EIO;
749 }
750
751 if (!blk->allow_write_beyond_eof) {
752 len = blk_getlength(blk);
753 if (len < 0) {
754 return len;
755 }
756
757 if (offset > len || len - offset < size) {
758 return -EIO;
759 }
760 }
761
762 return 0;
763 }
764
765 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
766 unsigned int bytes, QEMUIOVector *qiov,
767 BdrvRequestFlags flags)
768 {
769 int ret;
770
771 trace_blk_co_preadv(blk, blk_bs(blk), offset, bytes, flags);
772
773 ret = blk_check_byte_request(blk, offset, bytes);
774 if (ret < 0) {
775 return ret;
776 }
777
778 /* throttling disk I/O */
779 if (blk->public.throttle_state) {
780 throttle_group_co_io_limits_intercept(blk, bytes, false);
781 }
782
783 return bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
784 }
785
786 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
787 unsigned int bytes, QEMUIOVector *qiov,
788 BdrvRequestFlags flags)
789 {
790 int ret;
791
792 trace_blk_co_pwritev(blk, blk_bs(blk), offset, bytes, flags);
793
794 ret = blk_check_byte_request(blk, offset, bytes);
795 if (ret < 0) {
796 return ret;
797 }
798
799 /* throttling disk I/O */
800 if (blk->public.throttle_state) {
801 throttle_group_co_io_limits_intercept(blk, bytes, true);
802 }
803
804 if (!blk->enable_write_cache) {
805 flags |= BDRV_REQ_FUA;
806 }
807
808 return bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
809 }
810
811 typedef struct BlkRwCo {
812 BlockBackend *blk;
813 int64_t offset;
814 QEMUIOVector *qiov;
815 int ret;
816 BdrvRequestFlags flags;
817 } BlkRwCo;
818
819 static void blk_read_entry(void *opaque)
820 {
821 BlkRwCo *rwco = opaque;
822
823 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
824 rwco->qiov, rwco->flags);
825 }
826
827 static void blk_write_entry(void *opaque)
828 {
829 BlkRwCo *rwco = opaque;
830
831 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size,
832 rwco->qiov, rwco->flags);
833 }
834
835 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
836 int64_t bytes, CoroutineEntry co_entry,
837 BdrvRequestFlags flags)
838 {
839 AioContext *aio_context;
840 QEMUIOVector qiov;
841 struct iovec iov;
842 Coroutine *co;
843 BlkRwCo rwco;
844
845 iov = (struct iovec) {
846 .iov_base = buf,
847 .iov_len = bytes,
848 };
849 qemu_iovec_init_external(&qiov, &iov, 1);
850
851 rwco = (BlkRwCo) {
852 .blk = blk,
853 .offset = offset,
854 .qiov = &qiov,
855 .flags = flags,
856 .ret = NOT_DONE,
857 };
858
859 co = qemu_coroutine_create(co_entry, &rwco);
860 qemu_coroutine_enter(co);
861
862 aio_context = blk_get_aio_context(blk);
863 while (rwco.ret == NOT_DONE) {
864 aio_poll(aio_context, true);
865 }
866
867 return rwco.ret;
868 }
869
870 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
871 int count)
872 {
873 int ret;
874
875 ret = blk_check_byte_request(blk, offset, count);
876 if (ret < 0) {
877 return ret;
878 }
879
880 blk_root_drained_begin(blk->root);
881 ret = blk_pread(blk, offset, buf, count);
882 blk_root_drained_end(blk->root);
883 return ret;
884 }
885
886 int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
887 int count, BdrvRequestFlags flags)
888 {
889 return blk_prw(blk, offset, NULL, count, blk_write_entry,
890 flags | BDRV_REQ_ZERO_WRITE);
891 }
892
893 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
894 {
895 return bdrv_make_zero(blk->root, flags);
896 }
897
898 static void error_callback_bh(void *opaque)
899 {
900 struct BlockBackendAIOCB *acb = opaque;
901 qemu_bh_delete(acb->bh);
902 acb->common.cb(acb->common.opaque, acb->ret);
903 qemu_aio_unref(acb);
904 }
905
906 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
907 BlockCompletionFunc *cb,
908 void *opaque, int ret)
909 {
910 struct BlockBackendAIOCB *acb;
911 QEMUBH *bh;
912
913 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
914 acb->blk = blk;
915 acb->ret = ret;
916
917 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
918 acb->bh = bh;
919 qemu_bh_schedule(bh);
920
921 return &acb->common;
922 }
923
924 typedef struct BlkAioEmAIOCB {
925 BlockAIOCB common;
926 BlkRwCo rwco;
927 int bytes;
928 bool has_returned;
929 QEMUBH* bh;
930 } BlkAioEmAIOCB;
931
932 static const AIOCBInfo blk_aio_em_aiocb_info = {
933 .aiocb_size = sizeof(BlkAioEmAIOCB),
934 };
935
936 static void blk_aio_complete(BlkAioEmAIOCB *acb)
937 {
938 if (acb->bh) {
939 assert(acb->has_returned);
940 qemu_bh_delete(acb->bh);
941 }
942 if (acb->has_returned) {
943 acb->common.cb(acb->common.opaque, acb->rwco.ret);
944 qemu_aio_unref(acb);
945 }
946 }
947
948 static void blk_aio_complete_bh(void *opaque)
949 {
950 blk_aio_complete(opaque);
951 }
952
953 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
954 QEMUIOVector *qiov, CoroutineEntry co_entry,
955 BdrvRequestFlags flags,
956 BlockCompletionFunc *cb, void *opaque)
957 {
958 BlkAioEmAIOCB *acb;
959 Coroutine *co;
960
961 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
962 acb->rwco = (BlkRwCo) {
963 .blk = blk,
964 .offset = offset,
965 .qiov = qiov,
966 .flags = flags,
967 .ret = NOT_DONE,
968 };
969 acb->bytes = bytes;
970 acb->bh = NULL;
971 acb->has_returned = false;
972
973 co = qemu_coroutine_create(co_entry, acb);
974 qemu_coroutine_enter(co);
975
976 acb->has_returned = true;
977 if (acb->rwco.ret != NOT_DONE) {
978 acb->bh = aio_bh_new(blk_get_aio_context(blk), blk_aio_complete_bh, acb);
979 qemu_bh_schedule(acb->bh);
980 }
981
982 return &acb->common;
983 }
984
985 static void blk_aio_read_entry(void *opaque)
986 {
987 BlkAioEmAIOCB *acb = opaque;
988 BlkRwCo *rwco = &acb->rwco;
989
990 assert(rwco->qiov->size == acb->bytes);
991 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
992 rwco->qiov, rwco->flags);
993 blk_aio_complete(acb);
994 }
995
996 static void blk_aio_write_entry(void *opaque)
997 {
998 BlkAioEmAIOCB *acb = opaque;
999 BlkRwCo *rwco = &acb->rwco;
1000
1001 assert(!rwco->qiov || rwco->qiov->size == acb->bytes);
1002 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
1003 rwco->qiov, rwco->flags);
1004 blk_aio_complete(acb);
1005 }
1006
1007 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1008 int count, BdrvRequestFlags flags,
1009 BlockCompletionFunc *cb, void *opaque)
1010 {
1011 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
1012 flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1013 }
1014
1015 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
1016 {
1017 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
1018 if (ret < 0) {
1019 return ret;
1020 }
1021 return count;
1022 }
1023
1024 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
1025 BdrvRequestFlags flags)
1026 {
1027 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1028 flags);
1029 if (ret < 0) {
1030 return ret;
1031 }
1032 return count;
1033 }
1034
1035 int64_t blk_getlength(BlockBackend *blk)
1036 {
1037 if (!blk_is_available(blk)) {
1038 return -ENOMEDIUM;
1039 }
1040
1041 return bdrv_getlength(blk_bs(blk));
1042 }
1043
1044 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
1045 {
1046 if (!blk_bs(blk)) {
1047 *nb_sectors_ptr = 0;
1048 } else {
1049 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
1050 }
1051 }
1052
1053 int64_t blk_nb_sectors(BlockBackend *blk)
1054 {
1055 if (!blk_is_available(blk)) {
1056 return -ENOMEDIUM;
1057 }
1058
1059 return bdrv_nb_sectors(blk_bs(blk));
1060 }
1061
1062 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1063 QEMUIOVector *qiov, BdrvRequestFlags flags,
1064 BlockCompletionFunc *cb, void *opaque)
1065 {
1066 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1067 blk_aio_read_entry, flags, cb, opaque);
1068 }
1069
1070 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1071 QEMUIOVector *qiov, BdrvRequestFlags flags,
1072 BlockCompletionFunc *cb, void *opaque)
1073 {
1074 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1075 blk_aio_write_entry, flags, cb, opaque);
1076 }
1077
1078 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1079 BlockCompletionFunc *cb, void *opaque)
1080 {
1081 if (!blk_is_available(blk)) {
1082 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1083 }
1084
1085 return bdrv_aio_flush(blk_bs(blk), cb, opaque);
1086 }
1087
1088 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1089 int64_t offset, int count,
1090 BlockCompletionFunc *cb, void *opaque)
1091 {
1092 int ret = blk_check_byte_request(blk, offset, count);
1093 if (ret < 0) {
1094 return blk_abort_aio_request(blk, cb, opaque, ret);
1095 }
1096
1097 return bdrv_aio_pdiscard(blk_bs(blk), offset, count, cb, opaque);
1098 }
1099
1100 void blk_aio_cancel(BlockAIOCB *acb)
1101 {
1102 bdrv_aio_cancel(acb);
1103 }
1104
1105 void blk_aio_cancel_async(BlockAIOCB *acb)
1106 {
1107 bdrv_aio_cancel_async(acb);
1108 }
1109
1110 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1111 {
1112 if (!blk_is_available(blk)) {
1113 return -ENOMEDIUM;
1114 }
1115
1116 return bdrv_ioctl(blk_bs(blk), req, buf);
1117 }
1118
1119 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1120 BlockCompletionFunc *cb, void *opaque)
1121 {
1122 if (!blk_is_available(blk)) {
1123 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
1124 }
1125
1126 return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
1127 }
1128
1129 int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
1130 {
1131 int ret = blk_check_byte_request(blk, offset, count);
1132 if (ret < 0) {
1133 return ret;
1134 }
1135
1136 return bdrv_co_pdiscard(blk_bs(blk), offset, count);
1137 }
1138
1139 int blk_co_flush(BlockBackend *blk)
1140 {
1141 if (!blk_is_available(blk)) {
1142 return -ENOMEDIUM;
1143 }
1144
1145 return bdrv_co_flush(blk_bs(blk));
1146 }
1147
1148 int blk_flush(BlockBackend *blk)
1149 {
1150 if (!blk_is_available(blk)) {
1151 return -ENOMEDIUM;
1152 }
1153
1154 return bdrv_flush(blk_bs(blk));
1155 }
1156
1157 void blk_drain(BlockBackend *blk)
1158 {
1159 if (blk_bs(blk)) {
1160 bdrv_drain(blk_bs(blk));
1161 }
1162 }
1163
1164 void blk_drain_all(void)
1165 {
1166 bdrv_drain_all();
1167 }
1168
1169 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1170 BlockdevOnError on_write_error)
1171 {
1172 blk->on_read_error = on_read_error;
1173 blk->on_write_error = on_write_error;
1174 }
1175
1176 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1177 {
1178 return is_read ? blk->on_read_error : blk->on_write_error;
1179 }
1180
1181 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1182 int error)
1183 {
1184 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1185
1186 switch (on_err) {
1187 case BLOCKDEV_ON_ERROR_ENOSPC:
1188 return (error == ENOSPC) ?
1189 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1190 case BLOCKDEV_ON_ERROR_STOP:
1191 return BLOCK_ERROR_ACTION_STOP;
1192 case BLOCKDEV_ON_ERROR_REPORT:
1193 return BLOCK_ERROR_ACTION_REPORT;
1194 case BLOCKDEV_ON_ERROR_IGNORE:
1195 return BLOCK_ERROR_ACTION_IGNORE;
1196 case BLOCKDEV_ON_ERROR_AUTO:
1197 default:
1198 abort();
1199 }
1200 }
1201
1202 static void send_qmp_error_event(BlockBackend *blk,
1203 BlockErrorAction action,
1204 bool is_read, int error)
1205 {
1206 IoOperationType optype;
1207
1208 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1209 qapi_event_send_block_io_error(blk_name(blk), optype, action,
1210 blk_iostatus_is_enabled(blk),
1211 error == ENOSPC, strerror(error),
1212 &error_abort);
1213 }
1214
1215 /* This is done by device models because, while the block layer knows
1216 * about the error, it does not know whether an operation comes from
1217 * the device or the block layer (from a job, for example).
1218 */
1219 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1220 bool is_read, int error)
1221 {
1222 assert(error >= 0);
1223
1224 if (action == BLOCK_ERROR_ACTION_STOP) {
1225 /* First set the iostatus, so that "info block" returns an iostatus
1226 * that matches the events raised so far (an additional error iostatus
1227 * is fine, but not a lost one).
1228 */
1229 blk_iostatus_set_err(blk, error);
1230
1231 /* Then raise the request to stop the VM and the event.
1232 * qemu_system_vmstop_request_prepare has two effects. First,
1233 * it ensures that the STOP event always comes after the
1234 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1235 * can observe the STOP event and do a "cont" before the STOP
1236 * event is issued, the VM will not stop. In this case, vm_start()
1237 * also ensures that the STOP/RESUME pair of events is emitted.
1238 */
1239 qemu_system_vmstop_request_prepare();
1240 send_qmp_error_event(blk, action, is_read, error);
1241 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1242 } else {
1243 send_qmp_error_event(blk, action, is_read, error);
1244 }
1245 }
1246
1247 int blk_is_read_only(BlockBackend *blk)
1248 {
1249 BlockDriverState *bs = blk_bs(blk);
1250
1251 if (bs) {
1252 return bdrv_is_read_only(bs);
1253 } else {
1254 return blk->root_state.read_only;
1255 }
1256 }
1257
1258 int blk_is_sg(BlockBackend *blk)
1259 {
1260 BlockDriverState *bs = blk_bs(blk);
1261
1262 if (!bs) {
1263 return 0;
1264 }
1265
1266 return bdrv_is_sg(bs);
1267 }
1268
1269 int blk_enable_write_cache(BlockBackend *blk)
1270 {
1271 return blk->enable_write_cache;
1272 }
1273
1274 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1275 {
1276 blk->enable_write_cache = wce;
1277 }
1278
1279 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1280 {
1281 BlockDriverState *bs = blk_bs(blk);
1282
1283 if (!bs) {
1284 error_setg(errp, "Device '%s' has no medium", blk->name);
1285 return;
1286 }
1287
1288 bdrv_invalidate_cache(bs, errp);
1289 }
1290
1291 bool blk_is_inserted(BlockBackend *blk)
1292 {
1293 BlockDriverState *bs = blk_bs(blk);
1294
1295 return bs && bdrv_is_inserted(bs);
1296 }
1297
1298 bool blk_is_available(BlockBackend *blk)
1299 {
1300 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1301 }
1302
1303 void blk_lock_medium(BlockBackend *blk, bool locked)
1304 {
1305 BlockDriverState *bs = blk_bs(blk);
1306
1307 if (bs) {
1308 bdrv_lock_medium(bs, locked);
1309 }
1310 }
1311
1312 void blk_eject(BlockBackend *blk, bool eject_flag)
1313 {
1314 BlockDriverState *bs = blk_bs(blk);
1315
1316 if (bs) {
1317 bdrv_eject(bs, eject_flag);
1318 }
1319 }
1320
1321 int blk_get_flags(BlockBackend *blk)
1322 {
1323 BlockDriverState *bs = blk_bs(blk);
1324
1325 if (bs) {
1326 return bdrv_get_flags(bs);
1327 } else {
1328 return blk->root_state.open_flags;
1329 }
1330 }
1331
1332 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1333 uint32_t blk_get_max_transfer(BlockBackend *blk)
1334 {
1335 BlockDriverState *bs = blk_bs(blk);
1336 uint32_t max = 0;
1337
1338 if (bs) {
1339 max = bs->bl.max_transfer;
1340 }
1341 return MIN_NON_ZERO(max, INT_MAX);
1342 }
1343
1344 int blk_get_max_iov(BlockBackend *blk)
1345 {
1346 return blk->root->bs->bl.max_iov;
1347 }
1348
1349 void blk_set_guest_block_size(BlockBackend *blk, int align)
1350 {
1351 blk->guest_block_size = align;
1352 }
1353
1354 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1355 {
1356 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1357 }
1358
1359 void *blk_blockalign(BlockBackend *blk, size_t size)
1360 {
1361 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1362 }
1363
1364 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1365 {
1366 BlockDriverState *bs = blk_bs(blk);
1367
1368 if (!bs) {
1369 return false;
1370 }
1371
1372 return bdrv_op_is_blocked(bs, op, errp);
1373 }
1374
1375 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1376 {
1377 BlockDriverState *bs = blk_bs(blk);
1378
1379 if (bs) {
1380 bdrv_op_unblock(bs, op, reason);
1381 }
1382 }
1383
1384 void blk_op_block_all(BlockBackend *blk, Error *reason)
1385 {
1386 BlockDriverState *bs = blk_bs(blk);
1387
1388 if (bs) {
1389 bdrv_op_block_all(bs, reason);
1390 }
1391 }
1392
1393 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1394 {
1395 BlockDriverState *bs = blk_bs(blk);
1396
1397 if (bs) {
1398 bdrv_op_unblock_all(bs, reason);
1399 }
1400 }
1401
1402 AioContext *blk_get_aio_context(BlockBackend *blk)
1403 {
1404 BlockDriverState *bs = blk_bs(blk);
1405
1406 if (bs) {
1407 return bdrv_get_aio_context(bs);
1408 } else {
1409 return qemu_get_aio_context();
1410 }
1411 }
1412
1413 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1414 {
1415 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1416 return blk_get_aio_context(blk_acb->blk);
1417 }
1418
1419 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1420 {
1421 BlockDriverState *bs = blk_bs(blk);
1422
1423 if (bs) {
1424 if (blk->public.throttle_state) {
1425 throttle_timers_detach_aio_context(&blk->public.throttle_timers);
1426 }
1427 bdrv_set_aio_context(bs, new_context);
1428 if (blk->public.throttle_state) {
1429 throttle_timers_attach_aio_context(&blk->public.throttle_timers,
1430 new_context);
1431 }
1432 }
1433 }
1434
1435 void blk_add_aio_context_notifier(BlockBackend *blk,
1436 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1437 void (*detach_aio_context)(void *opaque), void *opaque)
1438 {
1439 BlockDriverState *bs = blk_bs(blk);
1440
1441 if (bs) {
1442 bdrv_add_aio_context_notifier(bs, attached_aio_context,
1443 detach_aio_context, opaque);
1444 }
1445 }
1446
1447 void blk_remove_aio_context_notifier(BlockBackend *blk,
1448 void (*attached_aio_context)(AioContext *,
1449 void *),
1450 void (*detach_aio_context)(void *),
1451 void *opaque)
1452 {
1453 BlockDriverState *bs = blk_bs(blk);
1454
1455 if (bs) {
1456 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1457 detach_aio_context, opaque);
1458 }
1459 }
1460
1461 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1462 {
1463 notifier_list_add(&blk->remove_bs_notifiers, notify);
1464 }
1465
1466 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1467 {
1468 notifier_list_add(&blk->insert_bs_notifiers, notify);
1469 }
1470
1471 void blk_io_plug(BlockBackend *blk)
1472 {
1473 BlockDriverState *bs = blk_bs(blk);
1474
1475 if (bs) {
1476 bdrv_io_plug(bs);
1477 }
1478 }
1479
1480 void blk_io_unplug(BlockBackend *blk)
1481 {
1482 BlockDriverState *bs = blk_bs(blk);
1483
1484 if (bs) {
1485 bdrv_io_unplug(bs);
1486 }
1487 }
1488
1489 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1490 {
1491 return &blk->stats;
1492 }
1493
1494 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1495 BlockCompletionFunc *cb, void *opaque)
1496 {
1497 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1498 }
1499
1500 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1501 int count, BdrvRequestFlags flags)
1502 {
1503 return blk_co_pwritev(blk, offset, count, NULL,
1504 flags | BDRV_REQ_ZERO_WRITE);
1505 }
1506
1507 int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
1508 int count)
1509 {
1510 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1511 BDRV_REQ_WRITE_COMPRESSED);
1512 }
1513
1514 int blk_truncate(BlockBackend *blk, int64_t offset)
1515 {
1516 if (!blk_is_available(blk)) {
1517 return -ENOMEDIUM;
1518 }
1519
1520 return bdrv_truncate(blk_bs(blk), offset);
1521 }
1522
1523 int blk_pdiscard(BlockBackend *blk, int64_t offset, int count)
1524 {
1525 int ret = blk_check_byte_request(blk, offset, count);
1526 if (ret < 0) {
1527 return ret;
1528 }
1529
1530 return bdrv_pdiscard(blk_bs(blk), offset, count);
1531 }
1532
1533 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1534 int64_t pos, int size)
1535 {
1536 int ret;
1537
1538 if (!blk_is_available(blk)) {
1539 return -ENOMEDIUM;
1540 }
1541
1542 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
1543 if (ret < 0) {
1544 return ret;
1545 }
1546
1547 if (ret == size && !blk->enable_write_cache) {
1548 ret = bdrv_flush(blk_bs(blk));
1549 }
1550
1551 return ret < 0 ? ret : size;
1552 }
1553
1554 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1555 {
1556 if (!blk_is_available(blk)) {
1557 return -ENOMEDIUM;
1558 }
1559
1560 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
1561 }
1562
1563 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1564 {
1565 if (!blk_is_available(blk)) {
1566 return -ENOMEDIUM;
1567 }
1568
1569 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
1570 }
1571
1572 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1573 {
1574 if (!blk_is_available(blk)) {
1575 return -ENOMEDIUM;
1576 }
1577
1578 return bdrv_probe_geometry(blk_bs(blk), geo);
1579 }
1580
1581 /*
1582 * Updates the BlockBackendRootState object with data from the currently
1583 * attached BlockDriverState.
1584 */
1585 void blk_update_root_state(BlockBackend *blk)
1586 {
1587 assert(blk->root);
1588
1589 blk->root_state.open_flags = blk->root->bs->open_flags;
1590 blk->root_state.read_only = blk->root->bs->read_only;
1591 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
1592 }
1593
1594 /*
1595 * Returns the detect-zeroes setting to be used for bdrv_open() of a
1596 * BlockDriverState which is supposed to inherit the root state.
1597 */
1598 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
1599 {
1600 return blk->root_state.detect_zeroes;
1601 }
1602
1603 /*
1604 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1605 * supposed to inherit the root state.
1606 */
1607 int blk_get_open_flags_from_root_state(BlockBackend *blk)
1608 {
1609 int bs_flags;
1610
1611 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
1612 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
1613
1614 return bs_flags;
1615 }
1616
1617 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
1618 {
1619 return &blk->root_state;
1620 }
1621
1622 int blk_commit_all(void)
1623 {
1624 BlockBackend *blk = NULL;
1625
1626 while ((blk = blk_all_next(blk)) != NULL) {
1627 AioContext *aio_context = blk_get_aio_context(blk);
1628
1629 aio_context_acquire(aio_context);
1630 if (blk_is_inserted(blk) && blk->root->bs->backing) {
1631 int ret = bdrv_commit(blk->root->bs);
1632 if (ret < 0) {
1633 aio_context_release(aio_context);
1634 return ret;
1635 }
1636 }
1637 aio_context_release(aio_context);
1638 }
1639 return 0;
1640 }
1641
1642
1643 /* throttling disk I/O limits */
1644 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
1645 {
1646 throttle_group_config(blk, cfg);
1647 }
1648
1649 void blk_io_limits_disable(BlockBackend *blk)
1650 {
1651 assert(blk->public.throttle_state);
1652 bdrv_drained_begin(blk_bs(blk));
1653 throttle_group_unregister_blk(blk);
1654 bdrv_drained_end(blk_bs(blk));
1655 }
1656
1657 /* should be called before blk_set_io_limits if a limit is set */
1658 void blk_io_limits_enable(BlockBackend *blk, const char *group)
1659 {
1660 assert(!blk->public.throttle_state);
1661 throttle_group_register_blk(blk, group);
1662 }
1663
1664 void blk_io_limits_update_group(BlockBackend *blk, const char *group)
1665 {
1666 /* this BB is not part of any group */
1667 if (!blk->public.throttle_state) {
1668 return;
1669 }
1670
1671 /* this BB is a part of the same group than the one we want */
1672 if (!g_strcmp0(throttle_group_get_name(blk), group)) {
1673 return;
1674 }
1675
1676 /* need to change the group this bs belong to */
1677 blk_io_limits_disable(blk);
1678 blk_io_limits_enable(blk, group);
1679 }
1680
1681 static void blk_root_drained_begin(BdrvChild *child)
1682 {
1683 BlockBackend *blk = child->opaque;
1684
1685 /* Note that blk->root may not be accessible here yet if we are just
1686 * attaching to a BlockDriverState that is drained. Use child instead. */
1687
1688 if (blk->public.io_limits_disabled++ == 0) {
1689 throttle_group_restart_blk(blk);
1690 }
1691 }
1692
1693 static void blk_root_drained_end(BdrvChild *child)
1694 {
1695 BlockBackend *blk = child->opaque;
1696
1697 assert(blk->public.io_limits_disabled);
1698 --blk->public.io_limits_disabled;
1699 }