]> git.proxmox.com Git - mirror_qemu.git/blob - block/block-backend.c
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-3.1-pull-request...
[mirror_qemu.git] / block / block-backend.c
1 /*
2 * QEMU Block backends
3 *
4 * Copyright (C) 2014-2016 Red Hat, Inc.
5 *
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi/error.h"
21 #include "qapi/qapi-events-block.h"
22 #include "qemu/id.h"
23 #include "qemu/option.h"
24 #include "trace.h"
25 #include "migration/misc.h"
26
27 /* Number of coroutines to reserve per attached device model */
28 #define COROUTINE_POOL_RESERVATION 64
29
30 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
31
32 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
33
34 typedef struct BlockBackendAioNotifier {
35 void (*attached_aio_context)(AioContext *new_context, void *opaque);
36 void (*detach_aio_context)(void *opaque);
37 void *opaque;
38 QLIST_ENTRY(BlockBackendAioNotifier) list;
39 } BlockBackendAioNotifier;
40
41 struct BlockBackend {
42 char *name;
43 int refcnt;
44 BdrvChild *root;
45 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
46 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
47 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
48 BlockBackendPublic public;
49
50 void *dev; /* attached device model, if any */
51 bool legacy_dev; /* true if dev is not a DeviceState */
52 /* TODO change to DeviceState when all users are qdevified */
53 const BlockDevOps *dev_ops;
54 void *dev_opaque;
55
56 /* the block size for which the guest device expects atomicity */
57 int guest_block_size;
58
59 /* If the BDS tree is removed, some of its options are stored here (which
60 * can be used to restore those options in the new BDS on insert) */
61 BlockBackendRootState root_state;
62
63 bool enable_write_cache;
64
65 /* I/O stats (display with "info blockstats"). */
66 BlockAcctStats stats;
67
68 BlockdevOnError on_read_error, on_write_error;
69 bool iostatus_enabled;
70 BlockDeviceIoStatus iostatus;
71
72 uint64_t perm;
73 uint64_t shared_perm;
74 bool disable_perm;
75
76 bool allow_write_beyond_eof;
77
78 NotifierList remove_bs_notifiers, insert_bs_notifiers;
79 QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
80
81 int quiesce_counter;
82 VMChangeStateEntry *vmsh;
83 bool force_allow_inactivate;
84
85 /* Number of in-flight aio requests. BlockDriverState also counts
86 * in-flight requests but aio requests can exist even when blk->root is
87 * NULL, so we cannot rely on its counter for that case.
88 * Accessed with atomic ops.
89 */
90 unsigned int in_flight;
91 };
92
93 typedef struct BlockBackendAIOCB {
94 BlockAIOCB common;
95 BlockBackend *blk;
96 int ret;
97 } BlockBackendAIOCB;
98
99 static const AIOCBInfo block_backend_aiocb_info = {
100 .get_aio_context = blk_aiocb_get_aio_context,
101 .aiocb_size = sizeof(BlockBackendAIOCB),
102 };
103
104 static void drive_info_del(DriveInfo *dinfo);
105 static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
106
107 /* All BlockBackends */
108 static QTAILQ_HEAD(, BlockBackend) block_backends =
109 QTAILQ_HEAD_INITIALIZER(block_backends);
110
111 /* All BlockBackends referenced by the monitor and which are iterated through by
112 * blk_next() */
113 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
114 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
115
116 static void blk_root_inherit_options(int *child_flags, QDict *child_options,
117 int parent_flags, QDict *parent_options)
118 {
119 /* We're not supposed to call this function for root nodes */
120 abort();
121 }
122 static void blk_root_drained_begin(BdrvChild *child);
123 static bool blk_root_drained_poll(BdrvChild *child);
124 static void blk_root_drained_end(BdrvChild *child);
125
126 static void blk_root_change_media(BdrvChild *child, bool load);
127 static void blk_root_resize(BdrvChild *child);
128
129 static char *blk_root_get_parent_desc(BdrvChild *child)
130 {
131 BlockBackend *blk = child->opaque;
132 char *dev_id;
133
134 if (blk->name) {
135 return g_strdup(blk->name);
136 }
137
138 dev_id = blk_get_attached_dev_id(blk);
139 if (*dev_id) {
140 return dev_id;
141 } else {
142 /* TODO Callback into the BB owner for something more detailed */
143 g_free(dev_id);
144 return g_strdup("a block device");
145 }
146 }
147
148 static const char *blk_root_get_name(BdrvChild *child)
149 {
150 return blk_name(child->opaque);
151 }
152
153 static void blk_vm_state_changed(void *opaque, int running, RunState state)
154 {
155 Error *local_err = NULL;
156 BlockBackend *blk = opaque;
157
158 if (state == RUN_STATE_INMIGRATE) {
159 return;
160 }
161
162 qemu_del_vm_change_state_handler(blk->vmsh);
163 blk->vmsh = NULL;
164 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
165 if (local_err) {
166 error_report_err(local_err);
167 }
168 }
169
170 /*
171 * Notifies the user of the BlockBackend that migration has completed. qdev
172 * devices can tighten their permissions in response (specifically revoke
173 * shared write permissions that we needed for storage migration).
174 *
175 * If an error is returned, the VM cannot be allowed to be resumed.
176 */
177 static void blk_root_activate(BdrvChild *child, Error **errp)
178 {
179 BlockBackend *blk = child->opaque;
180 Error *local_err = NULL;
181
182 if (!blk->disable_perm) {
183 return;
184 }
185
186 blk->disable_perm = false;
187
188 blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
189 if (local_err) {
190 error_propagate(errp, local_err);
191 blk->disable_perm = true;
192 return;
193 }
194
195 if (runstate_check(RUN_STATE_INMIGRATE)) {
196 /* Activation can happen when migration process is still active, for
197 * example when nbd_server_add is called during non-shared storage
198 * migration. Defer the shared_perm update to migration completion. */
199 if (!blk->vmsh) {
200 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
201 blk);
202 }
203 return;
204 }
205
206 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
207 if (local_err) {
208 error_propagate(errp, local_err);
209 blk->disable_perm = true;
210 return;
211 }
212 }
213
214 void blk_set_force_allow_inactivate(BlockBackend *blk)
215 {
216 blk->force_allow_inactivate = true;
217 }
218
219 static bool blk_can_inactivate(BlockBackend *blk)
220 {
221 /* If it is a guest device, inactivate is ok. */
222 if (blk->dev || blk_name(blk)[0]) {
223 return true;
224 }
225
226 /* Inactivating means no more writes to the image can be done,
227 * even if those writes would be changes invisible to the
228 * guest. For block job BBs that satisfy this, we can just allow
229 * it. This is the case for mirror job source, which is required
230 * by libvirt non-shared block migration. */
231 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
232 return true;
233 }
234
235 return blk->force_allow_inactivate;
236 }
237
238 static int blk_root_inactivate(BdrvChild *child)
239 {
240 BlockBackend *blk = child->opaque;
241
242 if (blk->disable_perm) {
243 return 0;
244 }
245
246 if (!blk_can_inactivate(blk)) {
247 return -EPERM;
248 }
249
250 blk->disable_perm = true;
251 if (blk->root) {
252 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
253 }
254
255 return 0;
256 }
257
258 static void blk_root_attach(BdrvChild *child)
259 {
260 BlockBackend *blk = child->opaque;
261 BlockBackendAioNotifier *notifier;
262
263 trace_blk_root_attach(child, blk, child->bs);
264
265 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
266 bdrv_add_aio_context_notifier(child->bs,
267 notifier->attached_aio_context,
268 notifier->detach_aio_context,
269 notifier->opaque);
270 }
271 }
272
273 static void blk_root_detach(BdrvChild *child)
274 {
275 BlockBackend *blk = child->opaque;
276 BlockBackendAioNotifier *notifier;
277
278 trace_blk_root_detach(child, blk, child->bs);
279
280 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
281 bdrv_remove_aio_context_notifier(child->bs,
282 notifier->attached_aio_context,
283 notifier->detach_aio_context,
284 notifier->opaque);
285 }
286 }
287
288 static const BdrvChildRole child_root = {
289 .inherit_options = blk_root_inherit_options,
290
291 .change_media = blk_root_change_media,
292 .resize = blk_root_resize,
293 .get_name = blk_root_get_name,
294 .get_parent_desc = blk_root_get_parent_desc,
295
296 .drained_begin = blk_root_drained_begin,
297 .drained_poll = blk_root_drained_poll,
298 .drained_end = blk_root_drained_end,
299
300 .activate = blk_root_activate,
301 .inactivate = blk_root_inactivate,
302
303 .attach = blk_root_attach,
304 .detach = blk_root_detach,
305 };
306
307 /*
308 * Create a new BlockBackend with a reference count of one.
309 *
310 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
311 * to request for a block driver node that is attached to this BlockBackend.
312 * @shared_perm is a bitmask which describes which permissions may be granted
313 * to other users of the attached node.
314 * Both sets of permissions can be changed later using blk_set_perm().
315 *
316 * Return the new BlockBackend on success, null on failure.
317 */
318 BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm)
319 {
320 BlockBackend *blk;
321
322 blk = g_new0(BlockBackend, 1);
323 blk->refcnt = 1;
324 blk->perm = perm;
325 blk->shared_perm = shared_perm;
326 blk_set_enable_write_cache(blk, true);
327
328 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT;
329 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
330
331 block_acct_init(&blk->stats);
332
333 notifier_list_init(&blk->remove_bs_notifiers);
334 notifier_list_init(&blk->insert_bs_notifiers);
335 QLIST_INIT(&blk->aio_notifiers);
336
337 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
338 return blk;
339 }
340
341 /*
342 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
343 *
344 * Just as with bdrv_open(), after having called this function the reference to
345 * @options belongs to the block layer (even on failure).
346 *
347 * TODO: Remove @filename and @flags; it should be possible to specify a whole
348 * BDS tree just by specifying the @options QDict (or @reference,
349 * alternatively). At the time of adding this function, this is not possible,
350 * though, so callers of this function have to be able to specify @filename and
351 * @flags.
352 */
353 BlockBackend *blk_new_open(const char *filename, const char *reference,
354 QDict *options, int flags, Error **errp)
355 {
356 BlockBackend *blk;
357 BlockDriverState *bs;
358 uint64_t perm = 0;
359
360 /* blk_new_open() is mainly used in .bdrv_create implementations and the
361 * tools where sharing isn't a concern because the BDS stays private, so we
362 * just request permission according to the flags.
363 *
364 * The exceptions are xen_disk and blockdev_init(); in these cases, the
365 * caller of blk_new_open() doesn't make use of the permissions, but they
366 * shouldn't hurt either. We can still share everything here because the
367 * guest devices will add their own blockers if they can't share. */
368 if ((flags & BDRV_O_NO_IO) == 0) {
369 perm |= BLK_PERM_CONSISTENT_READ;
370 if (flags & BDRV_O_RDWR) {
371 perm |= BLK_PERM_WRITE;
372 }
373 }
374 if (flags & BDRV_O_RESIZE) {
375 perm |= BLK_PERM_RESIZE;
376 }
377
378 blk = blk_new(perm, BLK_PERM_ALL);
379 bs = bdrv_open(filename, reference, options, flags, errp);
380 if (!bs) {
381 blk_unref(blk);
382 return NULL;
383 }
384
385 blk->root = bdrv_root_attach_child(bs, "root", &child_root,
386 perm, BLK_PERM_ALL, blk, errp);
387 if (!blk->root) {
388 bdrv_unref(bs);
389 blk_unref(blk);
390 return NULL;
391 }
392
393 return blk;
394 }
395
396 static void blk_delete(BlockBackend *blk)
397 {
398 assert(!blk->refcnt);
399 assert(!blk->name);
400 assert(!blk->dev);
401 if (blk->public.throttle_group_member.throttle_state) {
402 blk_io_limits_disable(blk);
403 }
404 if (blk->root) {
405 blk_remove_bs(blk);
406 }
407 if (blk->vmsh) {
408 qemu_del_vm_change_state_handler(blk->vmsh);
409 blk->vmsh = NULL;
410 }
411 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
412 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
413 assert(QLIST_EMPTY(&blk->aio_notifiers));
414 QTAILQ_REMOVE(&block_backends, blk, link);
415 drive_info_del(blk->legacy_dinfo);
416 block_acct_cleanup(&blk->stats);
417 g_free(blk);
418 }
419
420 static void drive_info_del(DriveInfo *dinfo)
421 {
422 if (!dinfo) {
423 return;
424 }
425 qemu_opts_del(dinfo->opts);
426 g_free(dinfo);
427 }
428
429 int blk_get_refcnt(BlockBackend *blk)
430 {
431 return blk ? blk->refcnt : 0;
432 }
433
434 /*
435 * Increment @blk's reference count.
436 * @blk must not be null.
437 */
438 void blk_ref(BlockBackend *blk)
439 {
440 assert(blk->refcnt > 0);
441 blk->refcnt++;
442 }
443
444 /*
445 * Decrement @blk's reference count.
446 * If this drops it to zero, destroy @blk.
447 * For convenience, do nothing if @blk is null.
448 */
449 void blk_unref(BlockBackend *blk)
450 {
451 if (blk) {
452 assert(blk->refcnt > 0);
453 if (blk->refcnt > 1) {
454 blk->refcnt--;
455 } else {
456 blk_drain(blk);
457 /* blk_drain() cannot resurrect blk, nobody held a reference */
458 assert(blk->refcnt == 1);
459 blk->refcnt = 0;
460 blk_delete(blk);
461 }
462 }
463 }
464
465 /*
466 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
467 * ones which are hidden (i.e. are not referenced by the monitor).
468 */
469 BlockBackend *blk_all_next(BlockBackend *blk)
470 {
471 return blk ? QTAILQ_NEXT(blk, link)
472 : QTAILQ_FIRST(&block_backends);
473 }
474
475 void blk_remove_all_bs(void)
476 {
477 BlockBackend *blk = NULL;
478
479 while ((blk = blk_all_next(blk)) != NULL) {
480 AioContext *ctx = blk_get_aio_context(blk);
481
482 aio_context_acquire(ctx);
483 if (blk->root) {
484 blk_remove_bs(blk);
485 }
486 aio_context_release(ctx);
487 }
488 }
489
490 /*
491 * Return the monitor-owned BlockBackend after @blk.
492 * If @blk is null, return the first one.
493 * Else, return @blk's next sibling, which may be null.
494 *
495 * To iterate over all BlockBackends, do
496 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
497 * ...
498 * }
499 */
500 BlockBackend *blk_next(BlockBackend *blk)
501 {
502 return blk ? QTAILQ_NEXT(blk, monitor_link)
503 : QTAILQ_FIRST(&monitor_block_backends);
504 }
505
506 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
507 * the monitor or attached to a BlockBackend */
508 BlockDriverState *bdrv_next(BdrvNextIterator *it)
509 {
510 BlockDriverState *bs, *old_bs;
511
512 /* Must be called from the main loop */
513 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
514
515 /* First, return all root nodes of BlockBackends. In order to avoid
516 * returning a BDS twice when multiple BBs refer to it, we only return it
517 * if the BB is the first one in the parent list of the BDS. */
518 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
519 BlockBackend *old_blk = it->blk;
520
521 old_bs = old_blk ? blk_bs(old_blk) : NULL;
522
523 do {
524 it->blk = blk_all_next(it->blk);
525 bs = it->blk ? blk_bs(it->blk) : NULL;
526 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
527
528 if (it->blk) {
529 blk_ref(it->blk);
530 }
531 blk_unref(old_blk);
532
533 if (bs) {
534 bdrv_ref(bs);
535 bdrv_unref(old_bs);
536 return bs;
537 }
538 it->phase = BDRV_NEXT_MONITOR_OWNED;
539 } else {
540 old_bs = it->bs;
541 }
542
543 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
544 * BDSes that are attached to a BlockBackend here; they have been handled
545 * by the above block already */
546 do {
547 it->bs = bdrv_next_monitor_owned(it->bs);
548 bs = it->bs;
549 } while (bs && bdrv_has_blk(bs));
550
551 if (bs) {
552 bdrv_ref(bs);
553 }
554 bdrv_unref(old_bs);
555
556 return bs;
557 }
558
559 static void bdrv_next_reset(BdrvNextIterator *it)
560 {
561 *it = (BdrvNextIterator) {
562 .phase = BDRV_NEXT_BACKEND_ROOTS,
563 };
564 }
565
566 BlockDriverState *bdrv_first(BdrvNextIterator *it)
567 {
568 bdrv_next_reset(it);
569 return bdrv_next(it);
570 }
571
572 /* Must be called when aborting a bdrv_next() iteration before
573 * bdrv_next() returns NULL */
574 void bdrv_next_cleanup(BdrvNextIterator *it)
575 {
576 /* Must be called from the main loop */
577 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
578
579 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
580 if (it->blk) {
581 bdrv_unref(blk_bs(it->blk));
582 blk_unref(it->blk);
583 }
584 } else {
585 bdrv_unref(it->bs);
586 }
587
588 bdrv_next_reset(it);
589 }
590
591 /*
592 * Add a BlockBackend into the list of backends referenced by the monitor, with
593 * the given @name acting as the handle for the monitor.
594 * Strictly for use by blockdev.c.
595 *
596 * @name must not be null or empty.
597 *
598 * Returns true on success and false on failure. In the latter case, an Error
599 * object is returned through @errp.
600 */
601 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
602 {
603 assert(!blk->name);
604 assert(name && name[0]);
605
606 if (!id_wellformed(name)) {
607 error_setg(errp, "Invalid device name");
608 return false;
609 }
610 if (blk_by_name(name)) {
611 error_setg(errp, "Device with id '%s' already exists", name);
612 return false;
613 }
614 if (bdrv_find_node(name)) {
615 error_setg(errp,
616 "Device name '%s' conflicts with an existing node name",
617 name);
618 return false;
619 }
620
621 blk->name = g_strdup(name);
622 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
623 return true;
624 }
625
626 /*
627 * Remove a BlockBackend from the list of backends referenced by the monitor.
628 * Strictly for use by blockdev.c.
629 */
630 void monitor_remove_blk(BlockBackend *blk)
631 {
632 if (!blk->name) {
633 return;
634 }
635
636 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
637 g_free(blk->name);
638 blk->name = NULL;
639 }
640
641 /*
642 * Return @blk's name, a non-null string.
643 * Returns an empty string iff @blk is not referenced by the monitor.
644 */
645 const char *blk_name(const BlockBackend *blk)
646 {
647 return blk->name ?: "";
648 }
649
650 /*
651 * Return the BlockBackend with name @name if it exists, else null.
652 * @name must not be null.
653 */
654 BlockBackend *blk_by_name(const char *name)
655 {
656 BlockBackend *blk = NULL;
657
658 assert(name);
659 while ((blk = blk_next(blk)) != NULL) {
660 if (!strcmp(name, blk->name)) {
661 return blk;
662 }
663 }
664 return NULL;
665 }
666
667 /*
668 * Return the BlockDriverState attached to @blk if any, else null.
669 */
670 BlockDriverState *blk_bs(BlockBackend *blk)
671 {
672 return blk->root ? blk->root->bs : NULL;
673 }
674
675 static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
676 {
677 BdrvChild *child;
678 QLIST_FOREACH(child, &bs->parents, next_parent) {
679 if (child->role == &child_root) {
680 return child->opaque;
681 }
682 }
683
684 return NULL;
685 }
686
687 /*
688 * Returns true if @bs has an associated BlockBackend.
689 */
690 bool bdrv_has_blk(BlockDriverState *bs)
691 {
692 return bdrv_first_blk(bs) != NULL;
693 }
694
695 /*
696 * Returns true if @bs has only BlockBackends as parents.
697 */
698 bool bdrv_is_root_node(BlockDriverState *bs)
699 {
700 BdrvChild *c;
701
702 QLIST_FOREACH(c, &bs->parents, next_parent) {
703 if (c->role != &child_root) {
704 return false;
705 }
706 }
707
708 return true;
709 }
710
711 /*
712 * Return @blk's DriveInfo if any, else null.
713 */
714 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
715 {
716 return blk->legacy_dinfo;
717 }
718
719 /*
720 * Set @blk's DriveInfo to @dinfo, and return it.
721 * @blk must not have a DriveInfo set already.
722 * No other BlockBackend may have the same DriveInfo set.
723 */
724 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
725 {
726 assert(!blk->legacy_dinfo);
727 return blk->legacy_dinfo = dinfo;
728 }
729
730 /*
731 * Return the BlockBackend with DriveInfo @dinfo.
732 * It must exist.
733 */
734 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
735 {
736 BlockBackend *blk = NULL;
737
738 while ((blk = blk_next(blk)) != NULL) {
739 if (blk->legacy_dinfo == dinfo) {
740 return blk;
741 }
742 }
743 abort();
744 }
745
746 /*
747 * Returns a pointer to the publicly accessible fields of @blk.
748 */
749 BlockBackendPublic *blk_get_public(BlockBackend *blk)
750 {
751 return &blk->public;
752 }
753
754 /*
755 * Returns a BlockBackend given the associated @public fields.
756 */
757 BlockBackend *blk_by_public(BlockBackendPublic *public)
758 {
759 return container_of(public, BlockBackend, public);
760 }
761
762 /*
763 * Disassociates the currently associated BlockDriverState from @blk.
764 */
765 void blk_remove_bs(BlockBackend *blk)
766 {
767 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
768 BlockDriverState *bs;
769
770 notifier_list_notify(&blk->remove_bs_notifiers, blk);
771 if (tgm->throttle_state) {
772 bs = blk_bs(blk);
773 bdrv_drained_begin(bs);
774 throttle_group_detach_aio_context(tgm);
775 throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
776 bdrv_drained_end(bs);
777 }
778
779 blk_update_root_state(blk);
780
781 /* bdrv_root_unref_child() will cause blk->root to become stale and may
782 * switch to a completion coroutine later on. Let's drain all I/O here
783 * to avoid that and a potential QEMU crash.
784 */
785 blk_drain(blk);
786 bdrv_root_unref_child(blk->root);
787 blk->root = NULL;
788 }
789
790 /*
791 * Associates a new BlockDriverState with @blk.
792 */
793 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
794 {
795 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
796 blk->root = bdrv_root_attach_child(bs, "root", &child_root,
797 blk->perm, blk->shared_perm, blk, errp);
798 if (blk->root == NULL) {
799 return -EPERM;
800 }
801 bdrv_ref(bs);
802
803 notifier_list_notify(&blk->insert_bs_notifiers, blk);
804 if (tgm->throttle_state) {
805 throttle_group_detach_aio_context(tgm);
806 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
807 }
808
809 return 0;
810 }
811
812 /*
813 * Sets the permission bitmasks that the user of the BlockBackend needs.
814 */
815 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
816 Error **errp)
817 {
818 int ret;
819
820 if (blk->root && !blk->disable_perm) {
821 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
822 if (ret < 0) {
823 return ret;
824 }
825 }
826
827 blk->perm = perm;
828 blk->shared_perm = shared_perm;
829
830 return 0;
831 }
832
833 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
834 {
835 *perm = blk->perm;
836 *shared_perm = blk->shared_perm;
837 }
838
839 static int blk_do_attach_dev(BlockBackend *blk, void *dev)
840 {
841 if (blk->dev) {
842 return -EBUSY;
843 }
844
845 /* While migration is still incoming, we don't need to apply the
846 * permissions of guest device BlockBackends. We might still have a block
847 * job or NBD server writing to the image for storage migration. */
848 if (runstate_check(RUN_STATE_INMIGRATE)) {
849 blk->disable_perm = true;
850 }
851
852 blk_ref(blk);
853 blk->dev = dev;
854 blk->legacy_dev = false;
855 blk_iostatus_reset(blk);
856
857 return 0;
858 }
859
860 /*
861 * Attach device model @dev to @blk.
862 * Return 0 on success, -EBUSY when a device model is attached already.
863 */
864 int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
865 {
866 return blk_do_attach_dev(blk, dev);
867 }
868
869 /*
870 * Attach device model @dev to @blk.
871 * @blk must not have a device model attached already.
872 * TODO qdevified devices don't use this, remove when devices are qdevified
873 */
874 void blk_attach_dev_legacy(BlockBackend *blk, void *dev)
875 {
876 if (blk_do_attach_dev(blk, dev) < 0) {
877 abort();
878 }
879 blk->legacy_dev = true;
880 }
881
882 /*
883 * Detach device model @dev from @blk.
884 * @dev must be currently attached to @blk.
885 */
886 void blk_detach_dev(BlockBackend *blk, void *dev)
887 /* TODO change to DeviceState *dev when all users are qdevified */
888 {
889 assert(blk->dev == dev);
890 blk->dev = NULL;
891 blk->dev_ops = NULL;
892 blk->dev_opaque = NULL;
893 blk->guest_block_size = 512;
894 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
895 blk_unref(blk);
896 }
897
898 /*
899 * Return the device model attached to @blk if any, else null.
900 */
901 void *blk_get_attached_dev(BlockBackend *blk)
902 /* TODO change to return DeviceState * when all users are qdevified */
903 {
904 return blk->dev;
905 }
906
907 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
908 * device attached to the BlockBackend. */
909 char *blk_get_attached_dev_id(BlockBackend *blk)
910 {
911 DeviceState *dev;
912
913 assert(!blk->legacy_dev);
914 dev = blk->dev;
915
916 if (!dev) {
917 return g_strdup("");
918 } else if (dev->id) {
919 return g_strdup(dev->id);
920 }
921
922 return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
923 }
924
925 /*
926 * Return the BlockBackend which has the device model @dev attached if it
927 * exists, else null.
928 *
929 * @dev must not be null.
930 */
931 BlockBackend *blk_by_dev(void *dev)
932 {
933 BlockBackend *blk = NULL;
934
935 assert(dev != NULL);
936 while ((blk = blk_all_next(blk)) != NULL) {
937 if (blk->dev == dev) {
938 return blk;
939 }
940 }
941 return NULL;
942 }
943
944 /*
945 * Set @blk's device model callbacks to @ops.
946 * @opaque is the opaque argument to pass to the callbacks.
947 * This is for use by device models.
948 */
949 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
950 void *opaque)
951 {
952 /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
953 * it that way, so we can assume blk->dev, if present, is a DeviceState if
954 * blk->dev_ops is set. Non-device users may use dev_ops without device. */
955 assert(!blk->legacy_dev);
956
957 blk->dev_ops = ops;
958 blk->dev_opaque = opaque;
959
960 /* Are we currently quiesced? Should we enforce this right now? */
961 if (blk->quiesce_counter && ops->drained_begin) {
962 ops->drained_begin(opaque);
963 }
964 }
965
966 /*
967 * Notify @blk's attached device model of media change.
968 *
969 * If @load is true, notify of media load. This action can fail, meaning that
970 * the medium cannot be loaded. @errp is set then.
971 *
972 * If @load is false, notify of media eject. This can never fail.
973 *
974 * Also send DEVICE_TRAY_MOVED events as appropriate.
975 */
976 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
977 {
978 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
979 bool tray_was_open, tray_is_open;
980 Error *local_err = NULL;
981
982 assert(!blk->legacy_dev);
983
984 tray_was_open = blk_dev_is_tray_open(blk);
985 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
986 if (local_err) {
987 assert(load == true);
988 error_propagate(errp, local_err);
989 return;
990 }
991 tray_is_open = blk_dev_is_tray_open(blk);
992
993 if (tray_was_open != tray_is_open) {
994 char *id = blk_get_attached_dev_id(blk);
995 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open);
996 g_free(id);
997 }
998 }
999 }
1000
1001 static void blk_root_change_media(BdrvChild *child, bool load)
1002 {
1003 blk_dev_change_media_cb(child->opaque, load, NULL);
1004 }
1005
1006 /*
1007 * Does @blk's attached device model have removable media?
1008 * %true if no device model is attached.
1009 */
1010 bool blk_dev_has_removable_media(BlockBackend *blk)
1011 {
1012 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
1013 }
1014
1015 /*
1016 * Does @blk's attached device model have a tray?
1017 */
1018 bool blk_dev_has_tray(BlockBackend *blk)
1019 {
1020 return blk->dev_ops && blk->dev_ops->is_tray_open;
1021 }
1022
1023 /*
1024 * Notify @blk's attached device model of a media eject request.
1025 * If @force is true, the medium is about to be yanked out forcefully.
1026 */
1027 void blk_dev_eject_request(BlockBackend *blk, bool force)
1028 {
1029 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
1030 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
1031 }
1032 }
1033
1034 /*
1035 * Does @blk's attached device model have a tray, and is it open?
1036 */
1037 bool blk_dev_is_tray_open(BlockBackend *blk)
1038 {
1039 if (blk_dev_has_tray(blk)) {
1040 return blk->dev_ops->is_tray_open(blk->dev_opaque);
1041 }
1042 return false;
1043 }
1044
1045 /*
1046 * Does @blk's attached device model have the medium locked?
1047 * %false if the device model has no such lock.
1048 */
1049 bool blk_dev_is_medium_locked(BlockBackend *blk)
1050 {
1051 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
1052 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
1053 }
1054 return false;
1055 }
1056
1057 /*
1058 * Notify @blk's attached device model of a backend size change.
1059 */
1060 static void blk_root_resize(BdrvChild *child)
1061 {
1062 BlockBackend *blk = child->opaque;
1063
1064 if (blk->dev_ops && blk->dev_ops->resize_cb) {
1065 blk->dev_ops->resize_cb(blk->dev_opaque);
1066 }
1067 }
1068
1069 void blk_iostatus_enable(BlockBackend *blk)
1070 {
1071 blk->iostatus_enabled = true;
1072 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1073 }
1074
1075 /* The I/O status is only enabled if the drive explicitly
1076 * enables it _and_ the VM is configured to stop on errors */
1077 bool blk_iostatus_is_enabled(const BlockBackend *blk)
1078 {
1079 return (blk->iostatus_enabled &&
1080 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
1081 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
1082 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
1083 }
1084
1085 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
1086 {
1087 return blk->iostatus;
1088 }
1089
1090 void blk_iostatus_disable(BlockBackend *blk)
1091 {
1092 blk->iostatus_enabled = false;
1093 }
1094
1095 void blk_iostatus_reset(BlockBackend *blk)
1096 {
1097 if (blk_iostatus_is_enabled(blk)) {
1098 BlockDriverState *bs = blk_bs(blk);
1099 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1100 if (bs && bs->job) {
1101 block_job_iostatus_reset(bs->job);
1102 }
1103 }
1104 }
1105
1106 void blk_iostatus_set_err(BlockBackend *blk, int error)
1107 {
1108 assert(blk_iostatus_is_enabled(blk));
1109 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1110 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
1111 BLOCK_DEVICE_IO_STATUS_FAILED;
1112 }
1113 }
1114
1115 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
1116 {
1117 blk->allow_write_beyond_eof = allow;
1118 }
1119
1120 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
1121 size_t size)
1122 {
1123 int64_t len;
1124
1125 if (size > INT_MAX) {
1126 return -EIO;
1127 }
1128
1129 if (!blk_is_available(blk)) {
1130 return -ENOMEDIUM;
1131 }
1132
1133 if (offset < 0) {
1134 return -EIO;
1135 }
1136
1137 if (!blk->allow_write_beyond_eof) {
1138 len = blk_getlength(blk);
1139 if (len < 0) {
1140 return len;
1141 }
1142
1143 if (offset > len || len - offset < size) {
1144 return -EIO;
1145 }
1146 }
1147
1148 return 0;
1149 }
1150
1151 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1152 unsigned int bytes, QEMUIOVector *qiov,
1153 BdrvRequestFlags flags)
1154 {
1155 int ret;
1156 BlockDriverState *bs = blk_bs(blk);
1157
1158 trace_blk_co_preadv(blk, bs, offset, bytes, flags);
1159
1160 ret = blk_check_byte_request(blk, offset, bytes);
1161 if (ret < 0) {
1162 return ret;
1163 }
1164
1165 bdrv_inc_in_flight(bs);
1166
1167 /* throttling disk I/O */
1168 if (blk->public.throttle_group_member.throttle_state) {
1169 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1170 bytes, false);
1171 }
1172
1173 ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
1174 bdrv_dec_in_flight(bs);
1175 return ret;
1176 }
1177
1178 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1179 unsigned int bytes, QEMUIOVector *qiov,
1180 BdrvRequestFlags flags)
1181 {
1182 int ret;
1183 BlockDriverState *bs = blk_bs(blk);
1184
1185 trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
1186
1187 ret = blk_check_byte_request(blk, offset, bytes);
1188 if (ret < 0) {
1189 return ret;
1190 }
1191
1192 bdrv_inc_in_flight(bs);
1193 /* throttling disk I/O */
1194 if (blk->public.throttle_group_member.throttle_state) {
1195 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1196 bytes, true);
1197 }
1198
1199 if (!blk->enable_write_cache) {
1200 flags |= BDRV_REQ_FUA;
1201 }
1202
1203 ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
1204 bdrv_dec_in_flight(bs);
1205 return ret;
1206 }
1207
1208 typedef struct BlkRwCo {
1209 BlockBackend *blk;
1210 int64_t offset;
1211 void *iobuf;
1212 int ret;
1213 BdrvRequestFlags flags;
1214 } BlkRwCo;
1215
1216 static void blk_read_entry(void *opaque)
1217 {
1218 BlkRwCo *rwco = opaque;
1219 QEMUIOVector *qiov = rwco->iobuf;
1220
1221 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
1222 qiov, rwco->flags);
1223 }
1224
1225 static void blk_write_entry(void *opaque)
1226 {
1227 BlkRwCo *rwco = opaque;
1228 QEMUIOVector *qiov = rwco->iobuf;
1229
1230 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
1231 qiov, rwco->flags);
1232 }
1233
1234 static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
1235 int64_t bytes, CoroutineEntry co_entry,
1236 BdrvRequestFlags flags)
1237 {
1238 QEMUIOVector qiov;
1239 struct iovec iov;
1240 BlkRwCo rwco;
1241
1242 iov = (struct iovec) {
1243 .iov_base = buf,
1244 .iov_len = bytes,
1245 };
1246 qemu_iovec_init_external(&qiov, &iov, 1);
1247
1248 rwco = (BlkRwCo) {
1249 .blk = blk,
1250 .offset = offset,
1251 .iobuf = &qiov,
1252 .flags = flags,
1253 .ret = NOT_DONE,
1254 };
1255
1256 if (qemu_in_coroutine()) {
1257 /* Fast-path if already in coroutine context */
1258 co_entry(&rwco);
1259 } else {
1260 Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
1261 bdrv_coroutine_enter(blk_bs(blk), co);
1262 BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1263 }
1264
1265 return rwco.ret;
1266 }
1267
1268 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
1269 int count)
1270 {
1271 int ret;
1272
1273 ret = blk_check_byte_request(blk, offset, count);
1274 if (ret < 0) {
1275 return ret;
1276 }
1277
1278 blk_root_drained_begin(blk->root);
1279 ret = blk_pread(blk, offset, buf, count);
1280 blk_root_drained_end(blk->root);
1281 return ret;
1282 }
1283
1284 int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1285 int bytes, BdrvRequestFlags flags)
1286 {
1287 return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
1288 flags | BDRV_REQ_ZERO_WRITE);
1289 }
1290
1291 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
1292 {
1293 return bdrv_make_zero(blk->root, flags);
1294 }
1295
1296 static void blk_inc_in_flight(BlockBackend *blk)
1297 {
1298 atomic_inc(&blk->in_flight);
1299 }
1300
1301 static void blk_dec_in_flight(BlockBackend *blk)
1302 {
1303 atomic_dec(&blk->in_flight);
1304 aio_wait_kick();
1305 }
1306
1307 static void error_callback_bh(void *opaque)
1308 {
1309 struct BlockBackendAIOCB *acb = opaque;
1310
1311 blk_dec_in_flight(acb->blk);
1312 acb->common.cb(acb->common.opaque, acb->ret);
1313 qemu_aio_unref(acb);
1314 }
1315
1316 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
1317 BlockCompletionFunc *cb,
1318 void *opaque, int ret)
1319 {
1320 struct BlockBackendAIOCB *acb;
1321
1322 blk_inc_in_flight(blk);
1323 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
1324 acb->blk = blk;
1325 acb->ret = ret;
1326
1327 aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb);
1328 return &acb->common;
1329 }
1330
1331 typedef struct BlkAioEmAIOCB {
1332 BlockAIOCB common;
1333 BlkRwCo rwco;
1334 int bytes;
1335 bool has_returned;
1336 } BlkAioEmAIOCB;
1337
1338 static const AIOCBInfo blk_aio_em_aiocb_info = {
1339 .aiocb_size = sizeof(BlkAioEmAIOCB),
1340 };
1341
1342 static void blk_aio_complete(BlkAioEmAIOCB *acb)
1343 {
1344 if (acb->has_returned) {
1345 acb->common.cb(acb->common.opaque, acb->rwco.ret);
1346 blk_dec_in_flight(acb->rwco.blk);
1347 qemu_aio_unref(acb);
1348 }
1349 }
1350
1351 static void blk_aio_complete_bh(void *opaque)
1352 {
1353 BlkAioEmAIOCB *acb = opaque;
1354 assert(acb->has_returned);
1355 blk_aio_complete(acb);
1356 }
1357
1358 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
1359 void *iobuf, CoroutineEntry co_entry,
1360 BdrvRequestFlags flags,
1361 BlockCompletionFunc *cb, void *opaque)
1362 {
1363 BlkAioEmAIOCB *acb;
1364 Coroutine *co;
1365
1366 blk_inc_in_flight(blk);
1367 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1368 acb->rwco = (BlkRwCo) {
1369 .blk = blk,
1370 .offset = offset,
1371 .iobuf = iobuf,
1372 .flags = flags,
1373 .ret = NOT_DONE,
1374 };
1375 acb->bytes = bytes;
1376 acb->has_returned = false;
1377
1378 co = qemu_coroutine_create(co_entry, acb);
1379 bdrv_coroutine_enter(blk_bs(blk), co);
1380
1381 acb->has_returned = true;
1382 if (acb->rwco.ret != NOT_DONE) {
1383 aio_bh_schedule_oneshot(blk_get_aio_context(blk),
1384 blk_aio_complete_bh, acb);
1385 }
1386
1387 return &acb->common;
1388 }
1389
1390 static void blk_aio_read_entry(void *opaque)
1391 {
1392 BlkAioEmAIOCB *acb = opaque;
1393 BlkRwCo *rwco = &acb->rwco;
1394 QEMUIOVector *qiov = rwco->iobuf;
1395
1396 assert(qiov->size == acb->bytes);
1397 rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
1398 qiov, rwco->flags);
1399 blk_aio_complete(acb);
1400 }
1401
1402 static void blk_aio_write_entry(void *opaque)
1403 {
1404 BlkAioEmAIOCB *acb = opaque;
1405 BlkRwCo *rwco = &acb->rwco;
1406 QEMUIOVector *qiov = rwco->iobuf;
1407
1408 assert(!qiov || qiov->size == acb->bytes);
1409 rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
1410 qiov, rwco->flags);
1411 blk_aio_complete(acb);
1412 }
1413
1414 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1415 int count, BdrvRequestFlags flags,
1416 BlockCompletionFunc *cb, void *opaque)
1417 {
1418 return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
1419 flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1420 }
1421
1422 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
1423 {
1424 int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
1425 if (ret < 0) {
1426 return ret;
1427 }
1428 return count;
1429 }
1430
1431 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
1432 BdrvRequestFlags flags)
1433 {
1434 int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1435 flags);
1436 if (ret < 0) {
1437 return ret;
1438 }
1439 return count;
1440 }
1441
1442 int64_t blk_getlength(BlockBackend *blk)
1443 {
1444 if (!blk_is_available(blk)) {
1445 return -ENOMEDIUM;
1446 }
1447
1448 return bdrv_getlength(blk_bs(blk));
1449 }
1450
1451 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
1452 {
1453 if (!blk_bs(blk)) {
1454 *nb_sectors_ptr = 0;
1455 } else {
1456 bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
1457 }
1458 }
1459
1460 int64_t blk_nb_sectors(BlockBackend *blk)
1461 {
1462 if (!blk_is_available(blk)) {
1463 return -ENOMEDIUM;
1464 }
1465
1466 return bdrv_nb_sectors(blk_bs(blk));
1467 }
1468
1469 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1470 QEMUIOVector *qiov, BdrvRequestFlags flags,
1471 BlockCompletionFunc *cb, void *opaque)
1472 {
1473 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1474 blk_aio_read_entry, flags, cb, opaque);
1475 }
1476
1477 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1478 QEMUIOVector *qiov, BdrvRequestFlags flags,
1479 BlockCompletionFunc *cb, void *opaque)
1480 {
1481 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1482 blk_aio_write_entry, flags, cb, opaque);
1483 }
1484
1485 static void blk_aio_flush_entry(void *opaque)
1486 {
1487 BlkAioEmAIOCB *acb = opaque;
1488 BlkRwCo *rwco = &acb->rwco;
1489
1490 rwco->ret = blk_co_flush(rwco->blk);
1491 blk_aio_complete(acb);
1492 }
1493
1494 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1495 BlockCompletionFunc *cb, void *opaque)
1496 {
1497 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
1498 }
1499
1500 static void blk_aio_pdiscard_entry(void *opaque)
1501 {
1502 BlkAioEmAIOCB *acb = opaque;
1503 BlkRwCo *rwco = &acb->rwco;
1504
1505 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
1506 blk_aio_complete(acb);
1507 }
1508
1509 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1510 int64_t offset, int bytes,
1511 BlockCompletionFunc *cb, void *opaque)
1512 {
1513 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
1514 cb, opaque);
1515 }
1516
1517 void blk_aio_cancel(BlockAIOCB *acb)
1518 {
1519 bdrv_aio_cancel(acb);
1520 }
1521
1522 void blk_aio_cancel_async(BlockAIOCB *acb)
1523 {
1524 bdrv_aio_cancel_async(acb);
1525 }
1526
1527 int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1528 {
1529 if (!blk_is_available(blk)) {
1530 return -ENOMEDIUM;
1531 }
1532
1533 return bdrv_co_ioctl(blk_bs(blk), req, buf);
1534 }
1535
1536 static void blk_ioctl_entry(void *opaque)
1537 {
1538 BlkRwCo *rwco = opaque;
1539 QEMUIOVector *qiov = rwco->iobuf;
1540
1541 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
1542 qiov->iov[0].iov_base);
1543 }
1544
1545 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1546 {
1547 return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
1548 }
1549
1550 static void blk_aio_ioctl_entry(void *opaque)
1551 {
1552 BlkAioEmAIOCB *acb = opaque;
1553 BlkRwCo *rwco = &acb->rwco;
1554
1555 rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
1556
1557 blk_aio_complete(acb);
1558 }
1559
1560 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1561 BlockCompletionFunc *cb, void *opaque)
1562 {
1563 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
1564 }
1565
1566 int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1567 {
1568 int ret = blk_check_byte_request(blk, offset, bytes);
1569 if (ret < 0) {
1570 return ret;
1571 }
1572
1573 return bdrv_co_pdiscard(blk->root, offset, bytes);
1574 }
1575
1576 int blk_co_flush(BlockBackend *blk)
1577 {
1578 if (!blk_is_available(blk)) {
1579 return -ENOMEDIUM;
1580 }
1581
1582 return bdrv_co_flush(blk_bs(blk));
1583 }
1584
1585 static void blk_flush_entry(void *opaque)
1586 {
1587 BlkRwCo *rwco = opaque;
1588 rwco->ret = blk_co_flush(rwco->blk);
1589 }
1590
1591 int blk_flush(BlockBackend *blk)
1592 {
1593 return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
1594 }
1595
1596 void blk_drain(BlockBackend *blk)
1597 {
1598 BlockDriverState *bs = blk_bs(blk);
1599
1600 if (bs) {
1601 bdrv_drained_begin(bs);
1602 }
1603
1604 /* We may have -ENOMEDIUM completions in flight */
1605 AIO_WAIT_WHILE(blk_get_aio_context(blk),
1606 atomic_mb_read(&blk->in_flight) > 0);
1607
1608 if (bs) {
1609 bdrv_drained_end(bs);
1610 }
1611 }
1612
1613 void blk_drain_all(void)
1614 {
1615 BlockBackend *blk = NULL;
1616
1617 bdrv_drain_all_begin();
1618
1619 while ((blk = blk_all_next(blk)) != NULL) {
1620 AioContext *ctx = blk_get_aio_context(blk);
1621
1622 aio_context_acquire(ctx);
1623
1624 /* We may have -ENOMEDIUM completions in flight */
1625 AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
1626
1627 aio_context_release(ctx);
1628 }
1629
1630 bdrv_drain_all_end();
1631 }
1632
1633 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1634 BlockdevOnError on_write_error)
1635 {
1636 blk->on_read_error = on_read_error;
1637 blk->on_write_error = on_write_error;
1638 }
1639
1640 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1641 {
1642 return is_read ? blk->on_read_error : blk->on_write_error;
1643 }
1644
1645 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1646 int error)
1647 {
1648 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1649
1650 switch (on_err) {
1651 case BLOCKDEV_ON_ERROR_ENOSPC:
1652 return (error == ENOSPC) ?
1653 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1654 case BLOCKDEV_ON_ERROR_STOP:
1655 return BLOCK_ERROR_ACTION_STOP;
1656 case BLOCKDEV_ON_ERROR_REPORT:
1657 return BLOCK_ERROR_ACTION_REPORT;
1658 case BLOCKDEV_ON_ERROR_IGNORE:
1659 return BLOCK_ERROR_ACTION_IGNORE;
1660 case BLOCKDEV_ON_ERROR_AUTO:
1661 default:
1662 abort();
1663 }
1664 }
1665
1666 static void send_qmp_error_event(BlockBackend *blk,
1667 BlockErrorAction action,
1668 bool is_read, int error)
1669 {
1670 IoOperationType optype;
1671 BlockDriverState *bs = blk_bs(blk);
1672
1673 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1674 qapi_event_send_block_io_error(blk_name(blk), !!bs,
1675 bs ? bdrv_get_node_name(bs) : NULL, optype,
1676 action, blk_iostatus_is_enabled(blk),
1677 error == ENOSPC, strerror(error));
1678 }
1679
1680 /* This is done by device models because, while the block layer knows
1681 * about the error, it does not know whether an operation comes from
1682 * the device or the block layer (from a job, for example).
1683 */
1684 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1685 bool is_read, int error)
1686 {
1687 assert(error >= 0);
1688
1689 if (action == BLOCK_ERROR_ACTION_STOP) {
1690 /* First set the iostatus, so that "info block" returns an iostatus
1691 * that matches the events raised so far (an additional error iostatus
1692 * is fine, but not a lost one).
1693 */
1694 blk_iostatus_set_err(blk, error);
1695
1696 /* Then raise the request to stop the VM and the event.
1697 * qemu_system_vmstop_request_prepare has two effects. First,
1698 * it ensures that the STOP event always comes after the
1699 * BLOCK_IO_ERROR event. Second, it ensures that even if management
1700 * can observe the STOP event and do a "cont" before the STOP
1701 * event is issued, the VM will not stop. In this case, vm_start()
1702 * also ensures that the STOP/RESUME pair of events is emitted.
1703 */
1704 qemu_system_vmstop_request_prepare();
1705 send_qmp_error_event(blk, action, is_read, error);
1706 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1707 } else {
1708 send_qmp_error_event(blk, action, is_read, error);
1709 }
1710 }
1711
1712 bool blk_is_read_only(BlockBackend *blk)
1713 {
1714 BlockDriverState *bs = blk_bs(blk);
1715
1716 if (bs) {
1717 return bdrv_is_read_only(bs);
1718 } else {
1719 return blk->root_state.read_only;
1720 }
1721 }
1722
1723 bool blk_is_sg(BlockBackend *blk)
1724 {
1725 BlockDriverState *bs = blk_bs(blk);
1726
1727 if (!bs) {
1728 return false;
1729 }
1730
1731 return bdrv_is_sg(bs);
1732 }
1733
1734 bool blk_enable_write_cache(BlockBackend *blk)
1735 {
1736 return blk->enable_write_cache;
1737 }
1738
1739 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1740 {
1741 blk->enable_write_cache = wce;
1742 }
1743
1744 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1745 {
1746 BlockDriverState *bs = blk_bs(blk);
1747
1748 if (!bs) {
1749 error_setg(errp, "Device '%s' has no medium", blk->name);
1750 return;
1751 }
1752
1753 bdrv_invalidate_cache(bs, errp);
1754 }
1755
1756 bool blk_is_inserted(BlockBackend *blk)
1757 {
1758 BlockDriverState *bs = blk_bs(blk);
1759
1760 return bs && bdrv_is_inserted(bs);
1761 }
1762
1763 bool blk_is_available(BlockBackend *blk)
1764 {
1765 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1766 }
1767
1768 void blk_lock_medium(BlockBackend *blk, bool locked)
1769 {
1770 BlockDriverState *bs = blk_bs(blk);
1771
1772 if (bs) {
1773 bdrv_lock_medium(bs, locked);
1774 }
1775 }
1776
1777 void blk_eject(BlockBackend *blk, bool eject_flag)
1778 {
1779 BlockDriverState *bs = blk_bs(blk);
1780 char *id;
1781
1782 /* blk_eject is only called by qdevified devices */
1783 assert(!blk->legacy_dev);
1784
1785 if (bs) {
1786 bdrv_eject(bs, eject_flag);
1787 }
1788
1789 /* Whether or not we ejected on the backend,
1790 * the frontend experienced a tray event. */
1791 id = blk_get_attached_dev_id(blk);
1792 qapi_event_send_device_tray_moved(blk_name(blk), id,
1793 eject_flag);
1794 g_free(id);
1795 }
1796
1797 int blk_get_flags(BlockBackend *blk)
1798 {
1799 BlockDriverState *bs = blk_bs(blk);
1800
1801 if (bs) {
1802 return bdrv_get_flags(bs);
1803 } else {
1804 return blk->root_state.open_flags;
1805 }
1806 }
1807
1808 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1809 uint32_t blk_get_max_transfer(BlockBackend *blk)
1810 {
1811 BlockDriverState *bs = blk_bs(blk);
1812 uint32_t max = 0;
1813
1814 if (bs) {
1815 max = bs->bl.max_transfer;
1816 }
1817 return MIN_NON_ZERO(max, INT_MAX);
1818 }
1819
1820 int blk_get_max_iov(BlockBackend *blk)
1821 {
1822 return blk->root->bs->bl.max_iov;
1823 }
1824
1825 void blk_set_guest_block_size(BlockBackend *blk, int align)
1826 {
1827 blk->guest_block_size = align;
1828 }
1829
1830 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1831 {
1832 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1833 }
1834
1835 void *blk_blockalign(BlockBackend *blk, size_t size)
1836 {
1837 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1838 }
1839
1840 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1841 {
1842 BlockDriverState *bs = blk_bs(blk);
1843
1844 if (!bs) {
1845 return false;
1846 }
1847
1848 return bdrv_op_is_blocked(bs, op, errp);
1849 }
1850
1851 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1852 {
1853 BlockDriverState *bs = blk_bs(blk);
1854
1855 if (bs) {
1856 bdrv_op_unblock(bs, op, reason);
1857 }
1858 }
1859
1860 void blk_op_block_all(BlockBackend *blk, Error *reason)
1861 {
1862 BlockDriverState *bs = blk_bs(blk);
1863
1864 if (bs) {
1865 bdrv_op_block_all(bs, reason);
1866 }
1867 }
1868
1869 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1870 {
1871 BlockDriverState *bs = blk_bs(blk);
1872
1873 if (bs) {
1874 bdrv_op_unblock_all(bs, reason);
1875 }
1876 }
1877
1878 AioContext *blk_get_aio_context(BlockBackend *blk)
1879 {
1880 return bdrv_get_aio_context(blk_bs(blk));
1881 }
1882
1883 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1884 {
1885 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1886 return blk_get_aio_context(blk_acb->blk);
1887 }
1888
1889 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1890 {
1891 BlockDriverState *bs = blk_bs(blk);
1892 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
1893
1894 if (bs) {
1895 if (tgm->throttle_state) {
1896 bdrv_drained_begin(bs);
1897 throttle_group_detach_aio_context(tgm);
1898 throttle_group_attach_aio_context(tgm, new_context);
1899 bdrv_drained_end(bs);
1900 }
1901 bdrv_set_aio_context(bs, new_context);
1902 }
1903 }
1904
1905 void blk_add_aio_context_notifier(BlockBackend *blk,
1906 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1907 void (*detach_aio_context)(void *opaque), void *opaque)
1908 {
1909 BlockBackendAioNotifier *notifier;
1910 BlockDriverState *bs = blk_bs(blk);
1911
1912 notifier = g_new(BlockBackendAioNotifier, 1);
1913 notifier->attached_aio_context = attached_aio_context;
1914 notifier->detach_aio_context = detach_aio_context;
1915 notifier->opaque = opaque;
1916 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
1917
1918 if (bs) {
1919 bdrv_add_aio_context_notifier(bs, attached_aio_context,
1920 detach_aio_context, opaque);
1921 }
1922 }
1923
1924 void blk_remove_aio_context_notifier(BlockBackend *blk,
1925 void (*attached_aio_context)(AioContext *,
1926 void *),
1927 void (*detach_aio_context)(void *),
1928 void *opaque)
1929 {
1930 BlockBackendAioNotifier *notifier;
1931 BlockDriverState *bs = blk_bs(blk);
1932
1933 if (bs) {
1934 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1935 detach_aio_context, opaque);
1936 }
1937
1938 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
1939 if (notifier->attached_aio_context == attached_aio_context &&
1940 notifier->detach_aio_context == detach_aio_context &&
1941 notifier->opaque == opaque) {
1942 QLIST_REMOVE(notifier, list);
1943 g_free(notifier);
1944 return;
1945 }
1946 }
1947
1948 abort();
1949 }
1950
1951 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1952 {
1953 notifier_list_add(&blk->remove_bs_notifiers, notify);
1954 }
1955
1956 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1957 {
1958 notifier_list_add(&blk->insert_bs_notifiers, notify);
1959 }
1960
1961 void blk_io_plug(BlockBackend *blk)
1962 {
1963 BlockDriverState *bs = blk_bs(blk);
1964
1965 if (bs) {
1966 bdrv_io_plug(bs);
1967 }
1968 }
1969
1970 void blk_io_unplug(BlockBackend *blk)
1971 {
1972 BlockDriverState *bs = blk_bs(blk);
1973
1974 if (bs) {
1975 bdrv_io_unplug(bs);
1976 }
1977 }
1978
1979 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1980 {
1981 return &blk->stats;
1982 }
1983
1984 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1985 BlockCompletionFunc *cb, void *opaque)
1986 {
1987 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1988 }
1989
1990 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1991 int bytes, BdrvRequestFlags flags)
1992 {
1993 return blk_co_pwritev(blk, offset, bytes, NULL,
1994 flags | BDRV_REQ_ZERO_WRITE);
1995 }
1996
1997 int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
1998 int count)
1999 {
2000 return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
2001 BDRV_REQ_WRITE_COMPRESSED);
2002 }
2003
2004 int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc,
2005 Error **errp)
2006 {
2007 if (!blk_is_available(blk)) {
2008 error_setg(errp, "No medium inserted");
2009 return -ENOMEDIUM;
2010 }
2011
2012 return bdrv_truncate(blk->root, offset, prealloc, errp);
2013 }
2014
2015 static void blk_pdiscard_entry(void *opaque)
2016 {
2017 BlkRwCo *rwco = opaque;
2018 QEMUIOVector *qiov = rwco->iobuf;
2019
2020 rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
2021 }
2022
2023 int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
2024 {
2025 return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
2026 }
2027
2028 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
2029 int64_t pos, int size)
2030 {
2031 int ret;
2032
2033 if (!blk_is_available(blk)) {
2034 return -ENOMEDIUM;
2035 }
2036
2037 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
2038 if (ret < 0) {
2039 return ret;
2040 }
2041
2042 if (ret == size && !blk->enable_write_cache) {
2043 ret = bdrv_flush(blk_bs(blk));
2044 }
2045
2046 return ret < 0 ? ret : size;
2047 }
2048
2049 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
2050 {
2051 if (!blk_is_available(blk)) {
2052 return -ENOMEDIUM;
2053 }
2054
2055 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
2056 }
2057
2058 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
2059 {
2060 if (!blk_is_available(blk)) {
2061 return -ENOMEDIUM;
2062 }
2063
2064 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
2065 }
2066
2067 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
2068 {
2069 if (!blk_is_available(blk)) {
2070 return -ENOMEDIUM;
2071 }
2072
2073 return bdrv_probe_geometry(blk_bs(blk), geo);
2074 }
2075
2076 /*
2077 * Updates the BlockBackendRootState object with data from the currently
2078 * attached BlockDriverState.
2079 */
2080 void blk_update_root_state(BlockBackend *blk)
2081 {
2082 assert(blk->root);
2083
2084 blk->root_state.open_flags = blk->root->bs->open_flags;
2085 blk->root_state.read_only = blk->root->bs->read_only;
2086 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
2087 }
2088
2089 /*
2090 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2091 * BlockDriverState which is supposed to inherit the root state.
2092 */
2093 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
2094 {
2095 return blk->root_state.detect_zeroes;
2096 }
2097
2098 /*
2099 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2100 * supposed to inherit the root state.
2101 */
2102 int blk_get_open_flags_from_root_state(BlockBackend *blk)
2103 {
2104 int bs_flags;
2105
2106 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
2107 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
2108
2109 return bs_flags;
2110 }
2111
2112 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
2113 {
2114 return &blk->root_state;
2115 }
2116
2117 int blk_commit_all(void)
2118 {
2119 BlockBackend *blk = NULL;
2120
2121 while ((blk = blk_all_next(blk)) != NULL) {
2122 AioContext *aio_context = blk_get_aio_context(blk);
2123
2124 aio_context_acquire(aio_context);
2125 if (blk_is_inserted(blk) && blk->root->bs->backing) {
2126 int ret = bdrv_commit(blk->root->bs);
2127 if (ret < 0) {
2128 aio_context_release(aio_context);
2129 return ret;
2130 }
2131 }
2132 aio_context_release(aio_context);
2133 }
2134 return 0;
2135 }
2136
2137
2138 /* throttling disk I/O limits */
2139 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
2140 {
2141 throttle_group_config(&blk->public.throttle_group_member, cfg);
2142 }
2143
2144 void blk_io_limits_disable(BlockBackend *blk)
2145 {
2146 BlockDriverState *bs = blk_bs(blk);
2147 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2148 assert(tgm->throttle_state);
2149 if (bs) {
2150 bdrv_drained_begin(bs);
2151 }
2152 throttle_group_unregister_tgm(tgm);
2153 if (bs) {
2154 bdrv_drained_end(bs);
2155 }
2156 }
2157
2158 /* should be called before blk_set_io_limits if a limit is set */
2159 void blk_io_limits_enable(BlockBackend *blk, const char *group)
2160 {
2161 assert(!blk->public.throttle_group_member.throttle_state);
2162 throttle_group_register_tgm(&blk->public.throttle_group_member,
2163 group, blk_get_aio_context(blk));
2164 }
2165
2166 void blk_io_limits_update_group(BlockBackend *blk, const char *group)
2167 {
2168 /* this BB is not part of any group */
2169 if (!blk->public.throttle_group_member.throttle_state) {
2170 return;
2171 }
2172
2173 /* this BB is a part of the same group than the one we want */
2174 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
2175 group)) {
2176 return;
2177 }
2178
2179 /* need to change the group this bs belong to */
2180 blk_io_limits_disable(blk);
2181 blk_io_limits_enable(blk, group);
2182 }
2183
2184 static void blk_root_drained_begin(BdrvChild *child)
2185 {
2186 BlockBackend *blk = child->opaque;
2187
2188 if (++blk->quiesce_counter == 1) {
2189 if (blk->dev_ops && blk->dev_ops->drained_begin) {
2190 blk->dev_ops->drained_begin(blk->dev_opaque);
2191 }
2192 }
2193
2194 /* Note that blk->root may not be accessible here yet if we are just
2195 * attaching to a BlockDriverState that is drained. Use child instead. */
2196
2197 if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
2198 throttle_group_restart_tgm(&blk->public.throttle_group_member);
2199 }
2200 }
2201
2202 static bool blk_root_drained_poll(BdrvChild *child)
2203 {
2204 BlockBackend *blk = child->opaque;
2205 assert(blk->quiesce_counter);
2206 return !!blk->in_flight;
2207 }
2208
2209 static void blk_root_drained_end(BdrvChild *child)
2210 {
2211 BlockBackend *blk = child->opaque;
2212 assert(blk->quiesce_counter);
2213
2214 assert(blk->public.throttle_group_member.io_limits_disabled);
2215 atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
2216
2217 if (--blk->quiesce_counter == 0) {
2218 if (blk->dev_ops && blk->dev_ops->drained_end) {
2219 blk->dev_ops->drained_end(blk->dev_opaque);
2220 }
2221 }
2222 }
2223
2224 void blk_register_buf(BlockBackend *blk, void *host, size_t size)
2225 {
2226 bdrv_register_buf(blk_bs(blk), host, size);
2227 }
2228
2229 void blk_unregister_buf(BlockBackend *blk, void *host)
2230 {
2231 bdrv_unregister_buf(blk_bs(blk), host);
2232 }
2233
2234 int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
2235 BlockBackend *blk_out, int64_t off_out,
2236 int bytes, BdrvRequestFlags read_flags,
2237 BdrvRequestFlags write_flags)
2238 {
2239 int r;
2240 r = blk_check_byte_request(blk_in, off_in, bytes);
2241 if (r) {
2242 return r;
2243 }
2244 r = blk_check_byte_request(blk_out, off_out, bytes);
2245 if (r) {
2246 return r;
2247 }
2248 return bdrv_co_copy_range(blk_in->root, off_in,
2249 blk_out->root, off_out,
2250 bytes, read_flags, write_flags);
2251 }