]> git.proxmox.com Git - mirror_qemu.git/blob - block/block-backend.c
block: Fix deadlocks in bdrv_graph_wrunlock()
[mirror_qemu.git] / block / block-backend.c
1 /*
2 * QEMU Block backends
3 *
4 * Copyright (C) 2014-2016 Red Hat, Inc.
5 *
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/coroutines.h"
18 #include "block/throttle-groups.h"
19 #include "hw/qdev-core.h"
20 #include "sysemu/blockdev.h"
21 #include "sysemu/runstate.h"
22 #include "sysemu/replay.h"
23 #include "qapi/error.h"
24 #include "qapi/qapi-events-block.h"
25 #include "qemu/id.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/option.h"
28 #include "trace.h"
29 #include "migration/misc.h"
30
31 /* Number of coroutines to reserve per attached device model */
32 #define COROUTINE_POOL_RESERVATION 64
33
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
35
36 typedef struct BlockBackendAioNotifier {
37 void (*attached_aio_context)(AioContext *new_context, void *opaque);
38 void (*detach_aio_context)(void *opaque);
39 void *opaque;
40 QLIST_ENTRY(BlockBackendAioNotifier) list;
41 } BlockBackendAioNotifier;
42
43 struct BlockBackend {
44 char *name;
45 int refcnt;
46 BdrvChild *root;
47 AioContext *ctx;
48 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
49 QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
50 QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
51 BlockBackendPublic public;
52
53 DeviceState *dev; /* attached device model, if any */
54 const BlockDevOps *dev_ops;
55 void *dev_opaque;
56
57 /* If the BDS tree is removed, some of its options are stored here (which
58 * can be used to restore those options in the new BDS on insert) */
59 BlockBackendRootState root_state;
60
61 bool enable_write_cache;
62
63 /* I/O stats (display with "info blockstats"). */
64 BlockAcctStats stats;
65
66 BlockdevOnError on_read_error, on_write_error;
67 bool iostatus_enabled;
68 BlockDeviceIoStatus iostatus;
69
70 uint64_t perm;
71 uint64_t shared_perm;
72 bool disable_perm;
73
74 bool allow_aio_context_change;
75 bool allow_write_beyond_eof;
76
77 /* Protected by BQL */
78 NotifierList remove_bs_notifiers, insert_bs_notifiers;
79 QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
80
81 int quiesce_counter; /* atomic: written under BQL, read by other threads */
82 QemuMutex queued_requests_lock; /* protects queued_requests */
83 CoQueue queued_requests;
84 bool disable_request_queuing; /* atomic */
85
86 VMChangeStateEntry *vmsh;
87 bool force_allow_inactivate;
88
89 /* Number of in-flight aio requests. BlockDriverState also counts
90 * in-flight requests but aio requests can exist even when blk->root is
91 * NULL, so we cannot rely on its counter for that case.
92 * Accessed with atomic ops.
93 */
94 unsigned int in_flight;
95 };
96
97 typedef struct BlockBackendAIOCB {
98 BlockAIOCB common;
99 BlockBackend *blk;
100 int ret;
101 } BlockBackendAIOCB;
102
103 static const AIOCBInfo block_backend_aiocb_info = {
104 .aiocb_size = sizeof(BlockBackendAIOCB),
105 };
106
107 static void drive_info_del(DriveInfo *dinfo);
108 static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
109
110 /* All BlockBackends. Protected by BQL. */
111 static QTAILQ_HEAD(, BlockBackend) block_backends =
112 QTAILQ_HEAD_INITIALIZER(block_backends);
113
114 /*
115 * All BlockBackends referenced by the monitor and which are iterated through by
116 * blk_next(). Protected by BQL.
117 */
118 static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
119 QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
120
121 static int coroutine_mixed_fn GRAPH_RDLOCK
122 blk_set_perm_locked(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
123 Error **errp);
124
125 static void blk_root_inherit_options(BdrvChildRole role, bool parent_is_format,
126 int *child_flags, QDict *child_options,
127 int parent_flags, QDict *parent_options)
128 {
129 /* We're not supposed to call this function for root nodes */
130 abort();
131 }
132 static void blk_root_drained_begin(BdrvChild *child);
133 static bool blk_root_drained_poll(BdrvChild *child);
134 static void blk_root_drained_end(BdrvChild *child);
135
136 static void blk_root_change_media(BdrvChild *child, bool load);
137 static void blk_root_resize(BdrvChild *child);
138
139 static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx,
140 GHashTable *visited, Transaction *tran,
141 Error **errp);
142
143 static char *blk_root_get_parent_desc(BdrvChild *child)
144 {
145 BlockBackend *blk = child->opaque;
146 g_autofree char *dev_id = NULL;
147
148 if (blk->name) {
149 return g_strdup_printf("block device '%s'", blk->name);
150 }
151
152 dev_id = blk_get_attached_dev_id(blk);
153 if (*dev_id) {
154 return g_strdup_printf("block device '%s'", dev_id);
155 } else {
156 /* TODO Callback into the BB owner for something more detailed */
157 return g_strdup("an unnamed block device");
158 }
159 }
160
161 static const char *blk_root_get_name(BdrvChild *child)
162 {
163 return blk_name(child->opaque);
164 }
165
166 static void blk_vm_state_changed(void *opaque, bool running, RunState state)
167 {
168 Error *local_err = NULL;
169 BlockBackend *blk = opaque;
170
171 if (state == RUN_STATE_INMIGRATE) {
172 return;
173 }
174
175 qemu_del_vm_change_state_handler(blk->vmsh);
176 blk->vmsh = NULL;
177 blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
178 if (local_err) {
179 error_report_err(local_err);
180 }
181 }
182
183 /*
184 * Notifies the user of the BlockBackend that migration has completed. qdev
185 * devices can tighten their permissions in response (specifically revoke
186 * shared write permissions that we needed for storage migration).
187 *
188 * If an error is returned, the VM cannot be allowed to be resumed.
189 */
190 static void GRAPH_RDLOCK blk_root_activate(BdrvChild *child, Error **errp)
191 {
192 BlockBackend *blk = child->opaque;
193 Error *local_err = NULL;
194 uint64_t saved_shared_perm;
195
196 if (!blk->disable_perm) {
197 return;
198 }
199
200 blk->disable_perm = false;
201
202 /*
203 * blk->shared_perm contains the permissions we want to share once
204 * migration is really completely done. For now, we need to share
205 * all; but we also need to retain blk->shared_perm, which is
206 * overwritten by a successful blk_set_perm() call. Save it and
207 * restore it below.
208 */
209 saved_shared_perm = blk->shared_perm;
210
211 blk_set_perm_locked(blk, blk->perm, BLK_PERM_ALL, &local_err);
212 if (local_err) {
213 error_propagate(errp, local_err);
214 blk->disable_perm = true;
215 return;
216 }
217 blk->shared_perm = saved_shared_perm;
218
219 if (runstate_check(RUN_STATE_INMIGRATE)) {
220 /* Activation can happen when migration process is still active, for
221 * example when nbd_server_add is called during non-shared storage
222 * migration. Defer the shared_perm update to migration completion. */
223 if (!blk->vmsh) {
224 blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
225 blk);
226 }
227 return;
228 }
229
230 blk_set_perm_locked(blk, blk->perm, blk->shared_perm, &local_err);
231 if (local_err) {
232 error_propagate(errp, local_err);
233 blk->disable_perm = true;
234 return;
235 }
236 }
237
238 void blk_set_force_allow_inactivate(BlockBackend *blk)
239 {
240 GLOBAL_STATE_CODE();
241 blk->force_allow_inactivate = true;
242 }
243
244 static bool blk_can_inactivate(BlockBackend *blk)
245 {
246 /* If it is a guest device, inactivate is ok. */
247 if (blk->dev || blk_name(blk)[0]) {
248 return true;
249 }
250
251 /* Inactivating means no more writes to the image can be done,
252 * even if those writes would be changes invisible to the
253 * guest. For block job BBs that satisfy this, we can just allow
254 * it. This is the case for mirror job source, which is required
255 * by libvirt non-shared block migration. */
256 if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
257 return true;
258 }
259
260 return blk->force_allow_inactivate;
261 }
262
263 static int GRAPH_RDLOCK blk_root_inactivate(BdrvChild *child)
264 {
265 BlockBackend *blk = child->opaque;
266
267 if (blk->disable_perm) {
268 return 0;
269 }
270
271 if (!blk_can_inactivate(blk)) {
272 return -EPERM;
273 }
274
275 blk->disable_perm = true;
276 if (blk->root) {
277 bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
278 }
279
280 return 0;
281 }
282
283 static void blk_root_attach(BdrvChild *child)
284 {
285 BlockBackend *blk = child->opaque;
286 BlockBackendAioNotifier *notifier;
287
288 trace_blk_root_attach(child, blk, child->bs);
289
290 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
291 bdrv_add_aio_context_notifier(child->bs,
292 notifier->attached_aio_context,
293 notifier->detach_aio_context,
294 notifier->opaque);
295 }
296 }
297
298 static void blk_root_detach(BdrvChild *child)
299 {
300 BlockBackend *blk = child->opaque;
301 BlockBackendAioNotifier *notifier;
302
303 trace_blk_root_detach(child, blk, child->bs);
304
305 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
306 bdrv_remove_aio_context_notifier(child->bs,
307 notifier->attached_aio_context,
308 notifier->detach_aio_context,
309 notifier->opaque);
310 }
311 }
312
313 static AioContext *blk_root_get_parent_aio_context(BdrvChild *c)
314 {
315 BlockBackend *blk = c->opaque;
316 IO_CODE();
317
318 return blk_get_aio_context(blk);
319 }
320
321 static const BdrvChildClass child_root = {
322 .inherit_options = blk_root_inherit_options,
323
324 .change_media = blk_root_change_media,
325 .resize = blk_root_resize,
326 .get_name = blk_root_get_name,
327 .get_parent_desc = blk_root_get_parent_desc,
328
329 .drained_begin = blk_root_drained_begin,
330 .drained_poll = blk_root_drained_poll,
331 .drained_end = blk_root_drained_end,
332
333 .activate = blk_root_activate,
334 .inactivate = blk_root_inactivate,
335
336 .attach = blk_root_attach,
337 .detach = blk_root_detach,
338
339 .change_aio_ctx = blk_root_change_aio_ctx,
340
341 .get_parent_aio_context = blk_root_get_parent_aio_context,
342 };
343
344 /*
345 * Create a new BlockBackend with a reference count of one.
346 *
347 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
348 * to request for a block driver node that is attached to this BlockBackend.
349 * @shared_perm is a bitmask which describes which permissions may be granted
350 * to other users of the attached node.
351 * Both sets of permissions can be changed later using blk_set_perm().
352 *
353 * Return the new BlockBackend on success, null on failure.
354 */
355 BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
356 {
357 BlockBackend *blk;
358
359 GLOBAL_STATE_CODE();
360
361 blk = g_new0(BlockBackend, 1);
362 blk->refcnt = 1;
363 blk->ctx = ctx;
364 blk->perm = perm;
365 blk->shared_perm = shared_perm;
366 blk_set_enable_write_cache(blk, true);
367
368 blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT;
369 blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
370
371 block_acct_init(&blk->stats);
372
373 qemu_mutex_init(&blk->queued_requests_lock);
374 qemu_co_queue_init(&blk->queued_requests);
375 notifier_list_init(&blk->remove_bs_notifiers);
376 notifier_list_init(&blk->insert_bs_notifiers);
377 QLIST_INIT(&blk->aio_notifiers);
378
379 QTAILQ_INSERT_TAIL(&block_backends, blk, link);
380 return blk;
381 }
382
383 /*
384 * Create a new BlockBackend connected to an existing BlockDriverState.
385 *
386 * @perm is a bitmasks of BLK_PERM_* constants which describes the
387 * permissions to request for @bs that is attached to this
388 * BlockBackend. @shared_perm is a bitmask which describes which
389 * permissions may be granted to other users of the attached node.
390 * Both sets of permissions can be changed later using blk_set_perm().
391 *
392 * Return the new BlockBackend on success, null on failure.
393 *
394 * Callers must hold the AioContext lock of @bs.
395 */
396 BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
397 uint64_t shared_perm, Error **errp)
398 {
399 BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm);
400
401 GLOBAL_STATE_CODE();
402
403 if (blk_insert_bs(blk, bs, errp) < 0) {
404 blk_unref(blk);
405 return NULL;
406 }
407 return blk;
408 }
409
410 /*
411 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
412 * By default, the new BlockBackend is in the main AioContext, but if the
413 * parameters connect it with any existing node in a different AioContext, it
414 * may end up there instead.
415 *
416 * Just as with bdrv_open(), after having called this function the reference to
417 * @options belongs to the block layer (even on failure).
418 *
419 * Called without holding an AioContext lock.
420 *
421 * TODO: Remove @filename and @flags; it should be possible to specify a whole
422 * BDS tree just by specifying the @options QDict (or @reference,
423 * alternatively). At the time of adding this function, this is not possible,
424 * though, so callers of this function have to be able to specify @filename and
425 * @flags.
426 */
427 BlockBackend *blk_new_open(const char *filename, const char *reference,
428 QDict *options, int flags, Error **errp)
429 {
430 BlockBackend *blk;
431 BlockDriverState *bs;
432 AioContext *ctx;
433 uint64_t perm = 0;
434 uint64_t shared = BLK_PERM_ALL;
435
436 GLOBAL_STATE_CODE();
437
438 /*
439 * blk_new_open() is mainly used in .bdrv_create implementations and the
440 * tools where sharing isn't a major concern because the BDS stays private
441 * and the file is generally not supposed to be used by a second process,
442 * so we just request permission according to the flags.
443 *
444 * The exceptions are xen_disk and blockdev_init(); in these cases, the
445 * caller of blk_new_open() doesn't make use of the permissions, but they
446 * shouldn't hurt either. We can still share everything here because the
447 * guest devices will add their own blockers if they can't share.
448 */
449 if ((flags & BDRV_O_NO_IO) == 0) {
450 perm |= BLK_PERM_CONSISTENT_READ;
451 if (flags & BDRV_O_RDWR) {
452 perm |= BLK_PERM_WRITE;
453 }
454 }
455 if (flags & BDRV_O_RESIZE) {
456 perm |= BLK_PERM_RESIZE;
457 }
458 if (flags & BDRV_O_NO_SHARE) {
459 shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED;
460 }
461
462 aio_context_acquire(qemu_get_aio_context());
463 bs = bdrv_open(filename, reference, options, flags, errp);
464 aio_context_release(qemu_get_aio_context());
465 if (!bs) {
466 return NULL;
467 }
468
469 /* bdrv_open() could have moved bs to a different AioContext */
470 ctx = bdrv_get_aio_context(bs);
471 blk = blk_new(bdrv_get_aio_context(bs), perm, shared);
472 blk->perm = perm;
473 blk->shared_perm = shared;
474
475 aio_context_acquire(ctx);
476 blk_insert_bs(blk, bs, errp);
477 bdrv_unref(bs);
478 aio_context_release(ctx);
479
480 if (!blk->root) {
481 blk_unref(blk);
482 return NULL;
483 }
484
485 return blk;
486 }
487
488 static void blk_delete(BlockBackend *blk)
489 {
490 assert(!blk->refcnt);
491 assert(!blk->name);
492 assert(!blk->dev);
493 if (blk->public.throttle_group_member.throttle_state) {
494 blk_io_limits_disable(blk);
495 }
496 if (blk->root) {
497 blk_remove_bs(blk);
498 }
499 if (blk->vmsh) {
500 qemu_del_vm_change_state_handler(blk->vmsh);
501 blk->vmsh = NULL;
502 }
503 assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
504 assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
505 assert(QLIST_EMPTY(&blk->aio_notifiers));
506 assert(qemu_co_queue_empty(&blk->queued_requests));
507 qemu_mutex_destroy(&blk->queued_requests_lock);
508 QTAILQ_REMOVE(&block_backends, blk, link);
509 drive_info_del(blk->legacy_dinfo);
510 block_acct_cleanup(&blk->stats);
511 g_free(blk);
512 }
513
514 static void drive_info_del(DriveInfo *dinfo)
515 {
516 if (!dinfo) {
517 return;
518 }
519 qemu_opts_del(dinfo->opts);
520 g_free(dinfo);
521 }
522
523 int blk_get_refcnt(BlockBackend *blk)
524 {
525 GLOBAL_STATE_CODE();
526 return blk ? blk->refcnt : 0;
527 }
528
529 /*
530 * Increment @blk's reference count.
531 * @blk must not be null.
532 */
533 void blk_ref(BlockBackend *blk)
534 {
535 assert(blk->refcnt > 0);
536 GLOBAL_STATE_CODE();
537 blk->refcnt++;
538 }
539
540 /*
541 * Decrement @blk's reference count.
542 * If this drops it to zero, destroy @blk.
543 * For convenience, do nothing if @blk is null.
544 */
545 void blk_unref(BlockBackend *blk)
546 {
547 GLOBAL_STATE_CODE();
548 if (blk) {
549 assert(blk->refcnt > 0);
550 if (blk->refcnt > 1) {
551 blk->refcnt--;
552 } else {
553 blk_drain(blk);
554 /* blk_drain() cannot resurrect blk, nobody held a reference */
555 assert(blk->refcnt == 1);
556 blk->refcnt = 0;
557 blk_delete(blk);
558 }
559 }
560 }
561
562 /*
563 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
564 * ones which are hidden (i.e. are not referenced by the monitor).
565 */
566 BlockBackend *blk_all_next(BlockBackend *blk)
567 {
568 GLOBAL_STATE_CODE();
569 return blk ? QTAILQ_NEXT(blk, link)
570 : QTAILQ_FIRST(&block_backends);
571 }
572
573 void blk_remove_all_bs(void)
574 {
575 BlockBackend *blk = NULL;
576
577 GLOBAL_STATE_CODE();
578
579 while ((blk = blk_all_next(blk)) != NULL) {
580 AioContext *ctx = blk_get_aio_context(blk);
581
582 aio_context_acquire(ctx);
583 if (blk->root) {
584 blk_remove_bs(blk);
585 }
586 aio_context_release(ctx);
587 }
588 }
589
590 /*
591 * Return the monitor-owned BlockBackend after @blk.
592 * If @blk is null, return the first one.
593 * Else, return @blk's next sibling, which may be null.
594 *
595 * To iterate over all BlockBackends, do
596 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
597 * ...
598 * }
599 */
600 BlockBackend *blk_next(BlockBackend *blk)
601 {
602 GLOBAL_STATE_CODE();
603 return blk ? QTAILQ_NEXT(blk, monitor_link)
604 : QTAILQ_FIRST(&monitor_block_backends);
605 }
606
607 /* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
608 * the monitor or attached to a BlockBackend */
609 BlockDriverState *bdrv_next(BdrvNextIterator *it)
610 {
611 BlockDriverState *bs, *old_bs;
612
613 /* Must be called from the main loop */
614 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
615
616 /* First, return all root nodes of BlockBackends. In order to avoid
617 * returning a BDS twice when multiple BBs refer to it, we only return it
618 * if the BB is the first one in the parent list of the BDS. */
619 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
620 BlockBackend *old_blk = it->blk;
621
622 old_bs = old_blk ? blk_bs(old_blk) : NULL;
623
624 do {
625 it->blk = blk_all_next(it->blk);
626 bs = it->blk ? blk_bs(it->blk) : NULL;
627 } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
628
629 if (it->blk) {
630 blk_ref(it->blk);
631 }
632 blk_unref(old_blk);
633
634 if (bs) {
635 bdrv_ref(bs);
636 bdrv_unref(old_bs);
637 return bs;
638 }
639 it->phase = BDRV_NEXT_MONITOR_OWNED;
640 } else {
641 old_bs = it->bs;
642 }
643
644 /* Then return the monitor-owned BDSes without a BB attached. Ignore all
645 * BDSes that are attached to a BlockBackend here; they have been handled
646 * by the above block already */
647 do {
648 it->bs = bdrv_next_monitor_owned(it->bs);
649 bs = it->bs;
650 } while (bs && bdrv_has_blk(bs));
651
652 if (bs) {
653 bdrv_ref(bs);
654 }
655 bdrv_unref(old_bs);
656
657 return bs;
658 }
659
660 static void bdrv_next_reset(BdrvNextIterator *it)
661 {
662 *it = (BdrvNextIterator) {
663 .phase = BDRV_NEXT_BACKEND_ROOTS,
664 };
665 }
666
667 BlockDriverState *bdrv_first(BdrvNextIterator *it)
668 {
669 GLOBAL_STATE_CODE();
670 bdrv_next_reset(it);
671 return bdrv_next(it);
672 }
673
674 /* Must be called when aborting a bdrv_next() iteration before
675 * bdrv_next() returns NULL */
676 void bdrv_next_cleanup(BdrvNextIterator *it)
677 {
678 /* Must be called from the main loop */
679 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
680
681 if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
682 if (it->blk) {
683 bdrv_unref(blk_bs(it->blk));
684 blk_unref(it->blk);
685 }
686 } else {
687 bdrv_unref(it->bs);
688 }
689
690 bdrv_next_reset(it);
691 }
692
693 /*
694 * Add a BlockBackend into the list of backends referenced by the monitor, with
695 * the given @name acting as the handle for the monitor.
696 * Strictly for use by blockdev.c.
697 *
698 * @name must not be null or empty.
699 *
700 * Returns true on success and false on failure. In the latter case, an Error
701 * object is returned through @errp.
702 */
703 bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
704 {
705 assert(!blk->name);
706 assert(name && name[0]);
707 GLOBAL_STATE_CODE();
708
709 if (!id_wellformed(name)) {
710 error_setg(errp, "Invalid device name");
711 return false;
712 }
713 if (blk_by_name(name)) {
714 error_setg(errp, "Device with id '%s' already exists", name);
715 return false;
716 }
717 if (bdrv_find_node(name)) {
718 error_setg(errp,
719 "Device name '%s' conflicts with an existing node name",
720 name);
721 return false;
722 }
723
724 blk->name = g_strdup(name);
725 QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
726 return true;
727 }
728
729 /*
730 * Remove a BlockBackend from the list of backends referenced by the monitor.
731 * Strictly for use by blockdev.c.
732 */
733 void monitor_remove_blk(BlockBackend *blk)
734 {
735 GLOBAL_STATE_CODE();
736
737 if (!blk->name) {
738 return;
739 }
740
741 QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
742 g_free(blk->name);
743 blk->name = NULL;
744 }
745
746 /*
747 * Return @blk's name, a non-null string.
748 * Returns an empty string iff @blk is not referenced by the monitor.
749 */
750 const char *blk_name(const BlockBackend *blk)
751 {
752 IO_CODE();
753 return blk->name ?: "";
754 }
755
756 /*
757 * Return the BlockBackend with name @name if it exists, else null.
758 * @name must not be null.
759 */
760 BlockBackend *blk_by_name(const char *name)
761 {
762 BlockBackend *blk = NULL;
763
764 GLOBAL_STATE_CODE();
765 assert(name);
766 while ((blk = blk_next(blk)) != NULL) {
767 if (!strcmp(name, blk->name)) {
768 return blk;
769 }
770 }
771 return NULL;
772 }
773
774 /*
775 * Return the BlockDriverState attached to @blk if any, else null.
776 */
777 BlockDriverState *blk_bs(BlockBackend *blk)
778 {
779 IO_CODE();
780 return blk->root ? blk->root->bs : NULL;
781 }
782
783 static BlockBackend * GRAPH_RDLOCK bdrv_first_blk(BlockDriverState *bs)
784 {
785 BdrvChild *child;
786
787 GLOBAL_STATE_CODE();
788 assert_bdrv_graph_readable();
789
790 QLIST_FOREACH(child, &bs->parents, next_parent) {
791 if (child->klass == &child_root) {
792 return child->opaque;
793 }
794 }
795
796 return NULL;
797 }
798
799 /*
800 * Returns true if @bs has an associated BlockBackend.
801 */
802 bool bdrv_has_blk(BlockDriverState *bs)
803 {
804 GLOBAL_STATE_CODE();
805 return bdrv_first_blk(bs) != NULL;
806 }
807
808 /*
809 * Returns true if @bs has only BlockBackends as parents.
810 */
811 bool bdrv_is_root_node(BlockDriverState *bs)
812 {
813 BdrvChild *c;
814
815 GLOBAL_STATE_CODE();
816 assert_bdrv_graph_readable();
817
818 QLIST_FOREACH(c, &bs->parents, next_parent) {
819 if (c->klass != &child_root) {
820 return false;
821 }
822 }
823
824 return true;
825 }
826
827 /*
828 * Return @blk's DriveInfo if any, else null.
829 */
830 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
831 {
832 GLOBAL_STATE_CODE();
833 return blk->legacy_dinfo;
834 }
835
836 /*
837 * Set @blk's DriveInfo to @dinfo, and return it.
838 * @blk must not have a DriveInfo set already.
839 * No other BlockBackend may have the same DriveInfo set.
840 */
841 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
842 {
843 assert(!blk->legacy_dinfo);
844 GLOBAL_STATE_CODE();
845 return blk->legacy_dinfo = dinfo;
846 }
847
848 /*
849 * Return the BlockBackend with DriveInfo @dinfo.
850 * It must exist.
851 */
852 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
853 {
854 BlockBackend *blk = NULL;
855 GLOBAL_STATE_CODE();
856
857 while ((blk = blk_next(blk)) != NULL) {
858 if (blk->legacy_dinfo == dinfo) {
859 return blk;
860 }
861 }
862 abort();
863 }
864
865 /*
866 * Returns a pointer to the publicly accessible fields of @blk.
867 */
868 BlockBackendPublic *blk_get_public(BlockBackend *blk)
869 {
870 GLOBAL_STATE_CODE();
871 return &blk->public;
872 }
873
874 /*
875 * Returns a BlockBackend given the associated @public fields.
876 */
877 BlockBackend *blk_by_public(BlockBackendPublic *public)
878 {
879 GLOBAL_STATE_CODE();
880 return container_of(public, BlockBackend, public);
881 }
882
883 /*
884 * Disassociates the currently associated BlockDriverState from @blk.
885 *
886 * The caller must hold the AioContext lock for the BlockBackend.
887 */
888 void blk_remove_bs(BlockBackend *blk)
889 {
890 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
891 BdrvChild *root;
892 AioContext *ctx;
893
894 GLOBAL_STATE_CODE();
895
896 notifier_list_notify(&blk->remove_bs_notifiers, blk);
897 if (tgm->throttle_state) {
898 BlockDriverState *bs = blk_bs(blk);
899
900 /*
901 * Take a ref in case blk_bs() changes across bdrv_drained_begin(), for
902 * example, if a temporary filter node is removed by a blockjob.
903 */
904 bdrv_ref(bs);
905 bdrv_drained_begin(bs);
906 throttle_group_detach_aio_context(tgm);
907 throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
908 bdrv_drained_end(bs);
909 bdrv_unref(bs);
910 }
911
912 blk_update_root_state(blk);
913
914 /* bdrv_root_unref_child() will cause blk->root to become stale and may
915 * switch to a completion coroutine later on. Let's drain all I/O here
916 * to avoid that and a potential QEMU crash.
917 */
918 blk_drain(blk);
919 root = blk->root;
920 blk->root = NULL;
921
922 ctx = bdrv_get_aio_context(root->bs);
923 bdrv_graph_wrlock(root->bs);
924 bdrv_root_unref_child(root);
925 bdrv_graph_wrunlock_ctx(ctx);
926 }
927
928 /*
929 * Associates a new BlockDriverState with @blk.
930 *
931 * Callers must hold the AioContext lock of @bs.
932 */
933 int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
934 {
935 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
936 AioContext *ctx = bdrv_get_aio_context(bs);
937
938 GLOBAL_STATE_CODE();
939 bdrv_ref(bs);
940 bdrv_graph_wrlock(bs);
941 blk->root = bdrv_root_attach_child(bs, "root", &child_root,
942 BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
943 blk->perm, blk->shared_perm,
944 blk, errp);
945 bdrv_graph_wrunlock_ctx(ctx);
946 if (blk->root == NULL) {
947 return -EPERM;
948 }
949
950 notifier_list_notify(&blk->insert_bs_notifiers, blk);
951 if (tgm->throttle_state) {
952 throttle_group_detach_aio_context(tgm);
953 throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
954 }
955
956 return 0;
957 }
958
959 /*
960 * Change BlockDriverState associated with @blk.
961 */
962 int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp)
963 {
964 GLOBAL_STATE_CODE();
965 return bdrv_replace_child_bs(blk->root, new_bs, errp);
966 }
967
968 /*
969 * Sets the permission bitmasks that the user of the BlockBackend needs.
970 */
971 static int coroutine_mixed_fn GRAPH_RDLOCK
972 blk_set_perm_locked(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
973 Error **errp)
974 {
975 int ret;
976 GLOBAL_STATE_CODE();
977
978 if (blk->root && !blk->disable_perm) {
979 ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
980 if (ret < 0) {
981 return ret;
982 }
983 }
984
985 blk->perm = perm;
986 blk->shared_perm = shared_perm;
987
988 return 0;
989 }
990
991 int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
992 Error **errp)
993 {
994 GLOBAL_STATE_CODE();
995 GRAPH_RDLOCK_GUARD_MAINLOOP();
996
997 return blk_set_perm_locked(blk, perm, shared_perm, errp);
998 }
999
1000 void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
1001 {
1002 GLOBAL_STATE_CODE();
1003 *perm = blk->perm;
1004 *shared_perm = blk->shared_perm;
1005 }
1006
1007 /*
1008 * Attach device model @dev to @blk.
1009 * Return 0 on success, -EBUSY when a device model is attached already.
1010 */
1011 int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
1012 {
1013 GLOBAL_STATE_CODE();
1014 if (blk->dev) {
1015 return -EBUSY;
1016 }
1017
1018 /* While migration is still incoming, we don't need to apply the
1019 * permissions of guest device BlockBackends. We might still have a block
1020 * job or NBD server writing to the image for storage migration. */
1021 if (runstate_check(RUN_STATE_INMIGRATE)) {
1022 blk->disable_perm = true;
1023 }
1024
1025 blk_ref(blk);
1026 blk->dev = dev;
1027 blk_iostatus_reset(blk);
1028
1029 return 0;
1030 }
1031
1032 /*
1033 * Detach device model @dev from @blk.
1034 * @dev must be currently attached to @blk.
1035 */
1036 void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
1037 {
1038 assert(blk->dev == dev);
1039 GLOBAL_STATE_CODE();
1040 blk->dev = NULL;
1041 blk->dev_ops = NULL;
1042 blk->dev_opaque = NULL;
1043 blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
1044 blk_unref(blk);
1045 }
1046
1047 /*
1048 * Return the device model attached to @blk if any, else null.
1049 */
1050 DeviceState *blk_get_attached_dev(BlockBackend *blk)
1051 {
1052 GLOBAL_STATE_CODE();
1053 return blk->dev;
1054 }
1055
1056 /* Return the qdev ID, or if no ID is assigned the QOM path, of the block
1057 * device attached to the BlockBackend. */
1058 char *blk_get_attached_dev_id(BlockBackend *blk)
1059 {
1060 DeviceState *dev = blk->dev;
1061 IO_CODE();
1062
1063 if (!dev) {
1064 return g_strdup("");
1065 } else if (dev->id) {
1066 return g_strdup(dev->id);
1067 }
1068
1069 return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
1070 }
1071
1072 /*
1073 * Return the BlockBackend which has the device model @dev attached if it
1074 * exists, else null.
1075 *
1076 * @dev must not be null.
1077 */
1078 BlockBackend *blk_by_dev(void *dev)
1079 {
1080 BlockBackend *blk = NULL;
1081
1082 GLOBAL_STATE_CODE();
1083
1084 assert(dev != NULL);
1085 while ((blk = blk_all_next(blk)) != NULL) {
1086 if (blk->dev == dev) {
1087 return blk;
1088 }
1089 }
1090 return NULL;
1091 }
1092
1093 /*
1094 * Set @blk's device model callbacks to @ops.
1095 * @opaque is the opaque argument to pass to the callbacks.
1096 * This is for use by device models.
1097 */
1098 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
1099 void *opaque)
1100 {
1101 GLOBAL_STATE_CODE();
1102 blk->dev_ops = ops;
1103 blk->dev_opaque = opaque;
1104
1105 /* Are we currently quiesced? Should we enforce this right now? */
1106 if (qatomic_read(&blk->quiesce_counter) && ops && ops->drained_begin) {
1107 ops->drained_begin(opaque);
1108 }
1109 }
1110
1111 /*
1112 * Notify @blk's attached device model of media change.
1113 *
1114 * If @load is true, notify of media load. This action can fail, meaning that
1115 * the medium cannot be loaded. @errp is set then.
1116 *
1117 * If @load is false, notify of media eject. This can never fail.
1118 *
1119 * Also send DEVICE_TRAY_MOVED events as appropriate.
1120 */
1121 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
1122 {
1123 GLOBAL_STATE_CODE();
1124 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
1125 bool tray_was_open, tray_is_open;
1126 Error *local_err = NULL;
1127
1128 tray_was_open = blk_dev_is_tray_open(blk);
1129 blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
1130 if (local_err) {
1131 assert(load == true);
1132 error_propagate(errp, local_err);
1133 return;
1134 }
1135 tray_is_open = blk_dev_is_tray_open(blk);
1136
1137 if (tray_was_open != tray_is_open) {
1138 char *id = blk_get_attached_dev_id(blk);
1139 qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open);
1140 g_free(id);
1141 }
1142 }
1143 }
1144
1145 static void blk_root_change_media(BdrvChild *child, bool load)
1146 {
1147 blk_dev_change_media_cb(child->opaque, load, NULL);
1148 }
1149
1150 /*
1151 * Does @blk's attached device model have removable media?
1152 * %true if no device model is attached.
1153 */
1154 bool blk_dev_has_removable_media(BlockBackend *blk)
1155 {
1156 GLOBAL_STATE_CODE();
1157 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
1158 }
1159
1160 /*
1161 * Does @blk's attached device model have a tray?
1162 */
1163 bool blk_dev_has_tray(BlockBackend *blk)
1164 {
1165 IO_CODE();
1166 return blk->dev_ops && blk->dev_ops->is_tray_open;
1167 }
1168
1169 /*
1170 * Notify @blk's attached device model of a media eject request.
1171 * If @force is true, the medium is about to be yanked out forcefully.
1172 */
1173 void blk_dev_eject_request(BlockBackend *blk, bool force)
1174 {
1175 GLOBAL_STATE_CODE();
1176 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
1177 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
1178 }
1179 }
1180
1181 /*
1182 * Does @blk's attached device model have a tray, and is it open?
1183 */
1184 bool blk_dev_is_tray_open(BlockBackend *blk)
1185 {
1186 IO_CODE();
1187 if (blk_dev_has_tray(blk)) {
1188 return blk->dev_ops->is_tray_open(blk->dev_opaque);
1189 }
1190 return false;
1191 }
1192
1193 /*
1194 * Does @blk's attached device model have the medium locked?
1195 * %false if the device model has no such lock.
1196 */
1197 bool blk_dev_is_medium_locked(BlockBackend *blk)
1198 {
1199 GLOBAL_STATE_CODE();
1200 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
1201 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
1202 }
1203 return false;
1204 }
1205
1206 /*
1207 * Notify @blk's attached device model of a backend size change.
1208 */
1209 static void blk_root_resize(BdrvChild *child)
1210 {
1211 BlockBackend *blk = child->opaque;
1212
1213 if (blk->dev_ops && blk->dev_ops->resize_cb) {
1214 blk->dev_ops->resize_cb(blk->dev_opaque);
1215 }
1216 }
1217
1218 void blk_iostatus_enable(BlockBackend *blk)
1219 {
1220 GLOBAL_STATE_CODE();
1221 blk->iostatus_enabled = true;
1222 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1223 }
1224
1225 /* The I/O status is only enabled if the drive explicitly
1226 * enables it _and_ the VM is configured to stop on errors */
1227 bool blk_iostatus_is_enabled(const BlockBackend *blk)
1228 {
1229 IO_CODE();
1230 return (blk->iostatus_enabled &&
1231 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
1232 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
1233 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
1234 }
1235
1236 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
1237 {
1238 GLOBAL_STATE_CODE();
1239 return blk->iostatus;
1240 }
1241
1242 void blk_iostatus_disable(BlockBackend *blk)
1243 {
1244 GLOBAL_STATE_CODE();
1245 blk->iostatus_enabled = false;
1246 }
1247
1248 void blk_iostatus_reset(BlockBackend *blk)
1249 {
1250 GLOBAL_STATE_CODE();
1251 if (blk_iostatus_is_enabled(blk)) {
1252 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1253 }
1254 }
1255
1256 void blk_iostatus_set_err(BlockBackend *blk, int error)
1257 {
1258 IO_CODE();
1259 assert(blk_iostatus_is_enabled(blk));
1260 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1261 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
1262 BLOCK_DEVICE_IO_STATUS_FAILED;
1263 }
1264 }
1265
1266 void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
1267 {
1268 IO_CODE();
1269 blk->allow_write_beyond_eof = allow;
1270 }
1271
1272 void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
1273 {
1274 IO_CODE();
1275 blk->allow_aio_context_change = allow;
1276 }
1277
1278 void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
1279 {
1280 IO_CODE();
1281 qatomic_set(&blk->disable_request_queuing, disable);
1282 }
1283
1284 static int coroutine_fn GRAPH_RDLOCK
1285 blk_check_byte_request(BlockBackend *blk, int64_t offset, int64_t bytes)
1286 {
1287 int64_t len;
1288
1289 if (bytes < 0) {
1290 return -EIO;
1291 }
1292
1293 if (!blk_co_is_available(blk)) {
1294 return -ENOMEDIUM;
1295 }
1296
1297 if (offset < 0) {
1298 return -EIO;
1299 }
1300
1301 if (!blk->allow_write_beyond_eof) {
1302 len = bdrv_co_getlength(blk_bs(blk));
1303 if (len < 0) {
1304 return len;
1305 }
1306
1307 if (offset > len || len - offset < bytes) {
1308 return -EIO;
1309 }
1310 }
1311
1312 return 0;
1313 }
1314
1315 /* Are we currently in a drained section? */
1316 bool blk_in_drain(BlockBackend *blk)
1317 {
1318 GLOBAL_STATE_CODE(); /* change to IO_OR_GS_CODE(), if necessary */
1319 return qatomic_read(&blk->quiesce_counter);
1320 }
1321
1322 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1323 static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
1324 {
1325 assert(blk->in_flight > 0);
1326
1327 if (qatomic_read(&blk->quiesce_counter) &&
1328 !qatomic_read(&blk->disable_request_queuing)) {
1329 /*
1330 * Take lock before decrementing in flight counter so main loop thread
1331 * waits for us to enqueue ourselves before it can leave the drained
1332 * section.
1333 */
1334 qemu_mutex_lock(&blk->queued_requests_lock);
1335 blk_dec_in_flight(blk);
1336 qemu_co_queue_wait(&blk->queued_requests, &blk->queued_requests_lock);
1337 blk_inc_in_flight(blk);
1338 qemu_mutex_unlock(&blk->queued_requests_lock);
1339 }
1340 }
1341
1342 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1343 static int coroutine_fn
1344 blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes,
1345 QEMUIOVector *qiov, size_t qiov_offset,
1346 BdrvRequestFlags flags)
1347 {
1348 int ret;
1349 BlockDriverState *bs;
1350 IO_CODE();
1351
1352 blk_wait_while_drained(blk);
1353 GRAPH_RDLOCK_GUARD();
1354
1355 /* Call blk_bs() only after waiting, the graph may have changed */
1356 bs = blk_bs(blk);
1357 trace_blk_co_preadv(blk, bs, offset, bytes, flags);
1358
1359 ret = blk_check_byte_request(blk, offset, bytes);
1360 if (ret < 0) {
1361 return ret;
1362 }
1363
1364 bdrv_inc_in_flight(bs);
1365
1366 /* throttling disk I/O */
1367 if (blk->public.throttle_group_member.throttle_state) {
1368 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1369 bytes, THROTTLE_READ);
1370 }
1371
1372 ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset,
1373 flags);
1374 bdrv_dec_in_flight(bs);
1375 return ret;
1376 }
1377
1378 int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, int64_t bytes,
1379 void *buf, BdrvRequestFlags flags)
1380 {
1381 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1382 IO_OR_GS_CODE();
1383
1384 assert(bytes <= SIZE_MAX);
1385
1386 return blk_co_preadv(blk, offset, bytes, &qiov, flags);
1387 }
1388
1389 int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1390 int64_t bytes, QEMUIOVector *qiov,
1391 BdrvRequestFlags flags)
1392 {
1393 int ret;
1394 IO_OR_GS_CODE();
1395
1396 blk_inc_in_flight(blk);
1397 ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, 0, flags);
1398 blk_dec_in_flight(blk);
1399
1400 return ret;
1401 }
1402
1403 int coroutine_fn blk_co_preadv_part(BlockBackend *blk, int64_t offset,
1404 int64_t bytes, QEMUIOVector *qiov,
1405 size_t qiov_offset, BdrvRequestFlags flags)
1406 {
1407 int ret;
1408 IO_OR_GS_CODE();
1409
1410 blk_inc_in_flight(blk);
1411 ret = blk_co_do_preadv_part(blk, offset, bytes, qiov, qiov_offset, flags);
1412 blk_dec_in_flight(blk);
1413
1414 return ret;
1415 }
1416
1417 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1418 static int coroutine_fn
1419 blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
1420 QEMUIOVector *qiov, size_t qiov_offset,
1421 BdrvRequestFlags flags)
1422 {
1423 int ret;
1424 BlockDriverState *bs;
1425 IO_CODE();
1426
1427 blk_wait_while_drained(blk);
1428 GRAPH_RDLOCK_GUARD();
1429
1430 /* Call blk_bs() only after waiting, the graph may have changed */
1431 bs = blk_bs(blk);
1432 trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
1433
1434 ret = blk_check_byte_request(blk, offset, bytes);
1435 if (ret < 0) {
1436 return ret;
1437 }
1438
1439 bdrv_inc_in_flight(bs);
1440 /* throttling disk I/O */
1441 if (blk->public.throttle_group_member.throttle_state) {
1442 throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1443 bytes, THROTTLE_WRITE);
1444 }
1445
1446 if (!blk->enable_write_cache) {
1447 flags |= BDRV_REQ_FUA;
1448 }
1449
1450 ret = bdrv_co_pwritev_part(blk->root, offset, bytes, qiov, qiov_offset,
1451 flags);
1452 bdrv_dec_in_flight(bs);
1453 return ret;
1454 }
1455
1456 int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
1457 int64_t bytes,
1458 QEMUIOVector *qiov, size_t qiov_offset,
1459 BdrvRequestFlags flags)
1460 {
1461 int ret;
1462 IO_OR_GS_CODE();
1463
1464 blk_inc_in_flight(blk);
1465 ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags);
1466 blk_dec_in_flight(blk);
1467
1468 return ret;
1469 }
1470
1471 int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, int64_t bytes,
1472 const void *buf, BdrvRequestFlags flags)
1473 {
1474 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1475 IO_OR_GS_CODE();
1476
1477 assert(bytes <= SIZE_MAX);
1478
1479 return blk_co_pwritev(blk, offset, bytes, &qiov, flags);
1480 }
1481
1482 int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1483 int64_t bytes, QEMUIOVector *qiov,
1484 BdrvRequestFlags flags)
1485 {
1486 IO_OR_GS_CODE();
1487 return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags);
1488 }
1489
1490 int coroutine_fn blk_co_block_status_above(BlockBackend *blk,
1491 BlockDriverState *base,
1492 int64_t offset, int64_t bytes,
1493 int64_t *pnum, int64_t *map,
1494 BlockDriverState **file)
1495 {
1496 IO_CODE();
1497 GRAPH_RDLOCK_GUARD();
1498 return bdrv_co_block_status_above(blk_bs(blk), base, offset, bytes, pnum,
1499 map, file);
1500 }
1501
1502 int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk,
1503 BlockDriverState *base,
1504 bool include_base, int64_t offset,
1505 int64_t bytes, int64_t *pnum)
1506 {
1507 IO_CODE();
1508 GRAPH_RDLOCK_GUARD();
1509 return bdrv_co_is_allocated_above(blk_bs(blk), base, include_base, offset,
1510 bytes, pnum);
1511 }
1512
1513 typedef struct BlkRwCo {
1514 BlockBackend *blk;
1515 int64_t offset;
1516 void *iobuf;
1517 int ret;
1518 BdrvRequestFlags flags;
1519 } BlkRwCo;
1520
1521 int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
1522 {
1523 GLOBAL_STATE_CODE();
1524 return bdrv_make_zero(blk->root, flags);
1525 }
1526
1527 void blk_inc_in_flight(BlockBackend *blk)
1528 {
1529 IO_CODE();
1530 qatomic_inc(&blk->in_flight);
1531 }
1532
1533 void blk_dec_in_flight(BlockBackend *blk)
1534 {
1535 IO_CODE();
1536 qatomic_dec(&blk->in_flight);
1537 aio_wait_kick();
1538 }
1539
1540 static void error_callback_bh(void *opaque)
1541 {
1542 struct BlockBackendAIOCB *acb = opaque;
1543
1544 blk_dec_in_flight(acb->blk);
1545 acb->common.cb(acb->common.opaque, acb->ret);
1546 qemu_aio_unref(acb);
1547 }
1548
1549 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
1550 BlockCompletionFunc *cb,
1551 void *opaque, int ret)
1552 {
1553 struct BlockBackendAIOCB *acb;
1554 IO_CODE();
1555
1556 blk_inc_in_flight(blk);
1557 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
1558 acb->blk = blk;
1559 acb->ret = ret;
1560
1561 replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
1562 error_callback_bh, acb);
1563 return &acb->common;
1564 }
1565
1566 typedef struct BlkAioEmAIOCB {
1567 BlockAIOCB common;
1568 BlkRwCo rwco;
1569 int64_t bytes;
1570 bool has_returned;
1571 } BlkAioEmAIOCB;
1572
1573 static const AIOCBInfo blk_aio_em_aiocb_info = {
1574 .aiocb_size = sizeof(BlkAioEmAIOCB),
1575 };
1576
1577 static void blk_aio_complete(BlkAioEmAIOCB *acb)
1578 {
1579 if (acb->has_returned) {
1580 acb->common.cb(acb->common.opaque, acb->rwco.ret);
1581 blk_dec_in_flight(acb->rwco.blk);
1582 qemu_aio_unref(acb);
1583 }
1584 }
1585
1586 static void blk_aio_complete_bh(void *opaque)
1587 {
1588 BlkAioEmAIOCB *acb = opaque;
1589 assert(acb->has_returned);
1590 blk_aio_complete(acb);
1591 }
1592
1593 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,
1594 int64_t bytes,
1595 void *iobuf, CoroutineEntry co_entry,
1596 BdrvRequestFlags flags,
1597 BlockCompletionFunc *cb, void *opaque)
1598 {
1599 BlkAioEmAIOCB *acb;
1600 Coroutine *co;
1601
1602 blk_inc_in_flight(blk);
1603 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1604 acb->rwco = (BlkRwCo) {
1605 .blk = blk,
1606 .offset = offset,
1607 .iobuf = iobuf,
1608 .flags = flags,
1609 .ret = NOT_DONE,
1610 };
1611 acb->bytes = bytes;
1612 acb->has_returned = false;
1613
1614 co = qemu_coroutine_create(co_entry, acb);
1615 aio_co_enter(qemu_get_current_aio_context(), co);
1616
1617 acb->has_returned = true;
1618 if (acb->rwco.ret != NOT_DONE) {
1619 replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
1620 blk_aio_complete_bh, acb);
1621 }
1622
1623 return &acb->common;
1624 }
1625
1626 static void coroutine_fn blk_aio_read_entry(void *opaque)
1627 {
1628 BlkAioEmAIOCB *acb = opaque;
1629 BlkRwCo *rwco = &acb->rwco;
1630 QEMUIOVector *qiov = rwco->iobuf;
1631
1632 assert(qiov->size == acb->bytes);
1633 rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov,
1634 0, rwco->flags);
1635 blk_aio_complete(acb);
1636 }
1637
1638 static void coroutine_fn blk_aio_write_entry(void *opaque)
1639 {
1640 BlkAioEmAIOCB *acb = opaque;
1641 BlkRwCo *rwco = &acb->rwco;
1642 QEMUIOVector *qiov = rwco->iobuf;
1643
1644 assert(!qiov || qiov->size == acb->bytes);
1645 rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
1646 qiov, 0, rwco->flags);
1647 blk_aio_complete(acb);
1648 }
1649
1650 BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1651 int64_t bytes, BdrvRequestFlags flags,
1652 BlockCompletionFunc *cb, void *opaque)
1653 {
1654 IO_CODE();
1655 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_write_entry,
1656 flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1657 }
1658
1659 int64_t coroutine_fn blk_co_getlength(BlockBackend *blk)
1660 {
1661 IO_CODE();
1662 GRAPH_RDLOCK_GUARD();
1663
1664 if (!blk_co_is_available(blk)) {
1665 return -ENOMEDIUM;
1666 }
1667
1668 return bdrv_co_getlength(blk_bs(blk));
1669 }
1670
1671 int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk)
1672 {
1673 BlockDriverState *bs = blk_bs(blk);
1674
1675 IO_CODE();
1676 GRAPH_RDLOCK_GUARD();
1677
1678 if (!bs) {
1679 return -ENOMEDIUM;
1680 } else {
1681 return bdrv_co_nb_sectors(bs);
1682 }
1683 }
1684
1685 /*
1686 * This wrapper is written by hand because this function is in the hot I/O path,
1687 * via blk_get_geometry.
1688 */
1689 int64_t coroutine_mixed_fn blk_nb_sectors(BlockBackend *blk)
1690 {
1691 BlockDriverState *bs = blk_bs(blk);
1692
1693 IO_CODE();
1694
1695 if (!bs) {
1696 return -ENOMEDIUM;
1697 } else {
1698 return bdrv_nb_sectors(bs);
1699 }
1700 }
1701
1702 /* return 0 as number of sectors if no device present or error */
1703 void coroutine_fn blk_co_get_geometry(BlockBackend *blk,
1704 uint64_t *nb_sectors_ptr)
1705 {
1706 int64_t ret = blk_co_nb_sectors(blk);
1707 *nb_sectors_ptr = ret < 0 ? 0 : ret;
1708 }
1709
1710 /*
1711 * This wrapper is written by hand because this function is in the hot I/O path.
1712 */
1713 void coroutine_mixed_fn blk_get_geometry(BlockBackend *blk,
1714 uint64_t *nb_sectors_ptr)
1715 {
1716 int64_t ret = blk_nb_sectors(blk);
1717 *nb_sectors_ptr = ret < 0 ? 0 : ret;
1718 }
1719
1720 BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1721 QEMUIOVector *qiov, BdrvRequestFlags flags,
1722 BlockCompletionFunc *cb, void *opaque)
1723 {
1724 IO_CODE();
1725 assert((uint64_t)qiov->size <= INT64_MAX);
1726 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1727 blk_aio_read_entry, flags, cb, opaque);
1728 }
1729
1730 BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1731 QEMUIOVector *qiov, BdrvRequestFlags flags,
1732 BlockCompletionFunc *cb, void *opaque)
1733 {
1734 IO_CODE();
1735 assert((uint64_t)qiov->size <= INT64_MAX);
1736 return blk_aio_prwv(blk, offset, qiov->size, qiov,
1737 blk_aio_write_entry, flags, cb, opaque);
1738 }
1739
1740 void blk_aio_cancel(BlockAIOCB *acb)
1741 {
1742 GLOBAL_STATE_CODE();
1743 bdrv_aio_cancel(acb);
1744 }
1745
1746 void blk_aio_cancel_async(BlockAIOCB *acb)
1747 {
1748 IO_CODE();
1749 bdrv_aio_cancel_async(acb);
1750 }
1751
1752 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1753 static int coroutine_fn
1754 blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1755 {
1756 IO_CODE();
1757
1758 blk_wait_while_drained(blk);
1759 GRAPH_RDLOCK_GUARD();
1760
1761 if (!blk_co_is_available(blk)) {
1762 return -ENOMEDIUM;
1763 }
1764
1765 return bdrv_co_ioctl(blk_bs(blk), req, buf);
1766 }
1767
1768 int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
1769 void *buf)
1770 {
1771 int ret;
1772 IO_OR_GS_CODE();
1773
1774 blk_inc_in_flight(blk);
1775 ret = blk_co_do_ioctl(blk, req, buf);
1776 blk_dec_in_flight(blk);
1777
1778 return ret;
1779 }
1780
1781 static void coroutine_fn blk_aio_ioctl_entry(void *opaque)
1782 {
1783 BlkAioEmAIOCB *acb = opaque;
1784 BlkRwCo *rwco = &acb->rwco;
1785
1786 rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
1787
1788 blk_aio_complete(acb);
1789 }
1790
1791 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1792 BlockCompletionFunc *cb, void *opaque)
1793 {
1794 IO_CODE();
1795 return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
1796 }
1797
1798 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1799 static int coroutine_fn
1800 blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
1801 {
1802 int ret;
1803 IO_CODE();
1804
1805 blk_wait_while_drained(blk);
1806 GRAPH_RDLOCK_GUARD();
1807
1808 ret = blk_check_byte_request(blk, offset, bytes);
1809 if (ret < 0) {
1810 return ret;
1811 }
1812
1813 return bdrv_co_pdiscard(blk->root, offset, bytes);
1814 }
1815
1816 static void coroutine_fn blk_aio_pdiscard_entry(void *opaque)
1817 {
1818 BlkAioEmAIOCB *acb = opaque;
1819 BlkRwCo *rwco = &acb->rwco;
1820
1821 rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, acb->bytes);
1822 blk_aio_complete(acb);
1823 }
1824
1825 BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1826 int64_t offset, int64_t bytes,
1827 BlockCompletionFunc *cb, void *opaque)
1828 {
1829 IO_CODE();
1830 return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
1831 cb, opaque);
1832 }
1833
1834 int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset,
1835 int64_t bytes)
1836 {
1837 int ret;
1838 IO_OR_GS_CODE();
1839
1840 blk_inc_in_flight(blk);
1841 ret = blk_co_do_pdiscard(blk, offset, bytes);
1842 blk_dec_in_flight(blk);
1843
1844 return ret;
1845 }
1846
1847 /* To be called between exactly one pair of blk_inc/dec_in_flight() */
1848 static int coroutine_fn blk_co_do_flush(BlockBackend *blk)
1849 {
1850 IO_CODE();
1851 blk_wait_while_drained(blk);
1852 GRAPH_RDLOCK_GUARD();
1853
1854 if (!blk_co_is_available(blk)) {
1855 return -ENOMEDIUM;
1856 }
1857
1858 return bdrv_co_flush(blk_bs(blk));
1859 }
1860
1861 static void coroutine_fn blk_aio_flush_entry(void *opaque)
1862 {
1863 BlkAioEmAIOCB *acb = opaque;
1864 BlkRwCo *rwco = &acb->rwco;
1865
1866 rwco->ret = blk_co_do_flush(rwco->blk);
1867 blk_aio_complete(acb);
1868 }
1869
1870 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1871 BlockCompletionFunc *cb, void *opaque)
1872 {
1873 IO_CODE();
1874 return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
1875 }
1876
1877 int coroutine_fn blk_co_flush(BlockBackend *blk)
1878 {
1879 int ret;
1880 IO_OR_GS_CODE();
1881
1882 blk_inc_in_flight(blk);
1883 ret = blk_co_do_flush(blk);
1884 blk_dec_in_flight(blk);
1885
1886 return ret;
1887 }
1888
1889 static void coroutine_fn blk_aio_zone_report_entry(void *opaque)
1890 {
1891 BlkAioEmAIOCB *acb = opaque;
1892 BlkRwCo *rwco = &acb->rwco;
1893
1894 rwco->ret = blk_co_zone_report(rwco->blk, rwco->offset,
1895 (unsigned int*)(uintptr_t)acb->bytes,
1896 rwco->iobuf);
1897 blk_aio_complete(acb);
1898 }
1899
1900 BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset,
1901 unsigned int *nr_zones,
1902 BlockZoneDescriptor *zones,
1903 BlockCompletionFunc *cb, void *opaque)
1904 {
1905 BlkAioEmAIOCB *acb;
1906 Coroutine *co;
1907 IO_CODE();
1908
1909 blk_inc_in_flight(blk);
1910 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1911 acb->rwco = (BlkRwCo) {
1912 .blk = blk,
1913 .offset = offset,
1914 .iobuf = zones,
1915 .ret = NOT_DONE,
1916 };
1917 acb->bytes = (int64_t)(uintptr_t)nr_zones,
1918 acb->has_returned = false;
1919
1920 co = qemu_coroutine_create(blk_aio_zone_report_entry, acb);
1921 aio_co_enter(qemu_get_current_aio_context(), co);
1922
1923 acb->has_returned = true;
1924 if (acb->rwco.ret != NOT_DONE) {
1925 replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
1926 blk_aio_complete_bh, acb);
1927 }
1928
1929 return &acb->common;
1930 }
1931
1932 static void coroutine_fn blk_aio_zone_mgmt_entry(void *opaque)
1933 {
1934 BlkAioEmAIOCB *acb = opaque;
1935 BlkRwCo *rwco = &acb->rwco;
1936
1937 rwco->ret = blk_co_zone_mgmt(rwco->blk,
1938 (BlockZoneOp)(uintptr_t)rwco->iobuf,
1939 rwco->offset, acb->bytes);
1940 blk_aio_complete(acb);
1941 }
1942
1943 BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
1944 int64_t offset, int64_t len,
1945 BlockCompletionFunc *cb, void *opaque) {
1946 BlkAioEmAIOCB *acb;
1947 Coroutine *co;
1948 IO_CODE();
1949
1950 blk_inc_in_flight(blk);
1951 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1952 acb->rwco = (BlkRwCo) {
1953 .blk = blk,
1954 .offset = offset,
1955 .iobuf = (void *)(uintptr_t)op,
1956 .ret = NOT_DONE,
1957 };
1958 acb->bytes = len;
1959 acb->has_returned = false;
1960
1961 co = qemu_coroutine_create(blk_aio_zone_mgmt_entry, acb);
1962 aio_co_enter(qemu_get_current_aio_context(), co);
1963
1964 acb->has_returned = true;
1965 if (acb->rwco.ret != NOT_DONE) {
1966 replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
1967 blk_aio_complete_bh, acb);
1968 }
1969
1970 return &acb->common;
1971 }
1972
1973 static void coroutine_fn blk_aio_zone_append_entry(void *opaque)
1974 {
1975 BlkAioEmAIOCB *acb = opaque;
1976 BlkRwCo *rwco = &acb->rwco;
1977
1978 rwco->ret = blk_co_zone_append(rwco->blk, (int64_t *)(uintptr_t)acb->bytes,
1979 rwco->iobuf, rwco->flags);
1980 blk_aio_complete(acb);
1981 }
1982
1983 BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset,
1984 QEMUIOVector *qiov, BdrvRequestFlags flags,
1985 BlockCompletionFunc *cb, void *opaque) {
1986 BlkAioEmAIOCB *acb;
1987 Coroutine *co;
1988 IO_CODE();
1989
1990 blk_inc_in_flight(blk);
1991 acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1992 acb->rwco = (BlkRwCo) {
1993 .blk = blk,
1994 .ret = NOT_DONE,
1995 .flags = flags,
1996 .iobuf = qiov,
1997 };
1998 acb->bytes = (int64_t)(uintptr_t)offset;
1999 acb->has_returned = false;
2000
2001 co = qemu_coroutine_create(blk_aio_zone_append_entry, acb);
2002 aio_co_enter(qemu_get_current_aio_context(), co);
2003 acb->has_returned = true;
2004 if (acb->rwco.ret != NOT_DONE) {
2005 replay_bh_schedule_oneshot_event(qemu_get_current_aio_context(),
2006 blk_aio_complete_bh, acb);
2007 }
2008
2009 return &acb->common;
2010 }
2011
2012 /*
2013 * Send a zone_report command.
2014 * offset is a byte offset from the start of the device. No alignment
2015 * required for offset.
2016 * nr_zones represents IN maximum and OUT actual.
2017 */
2018 int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset,
2019 unsigned int *nr_zones,
2020 BlockZoneDescriptor *zones)
2021 {
2022 int ret;
2023 IO_CODE();
2024
2025 blk_inc_in_flight(blk); /* increase before waiting */
2026 blk_wait_while_drained(blk);
2027 GRAPH_RDLOCK_GUARD();
2028 if (!blk_is_available(blk)) {
2029 blk_dec_in_flight(blk);
2030 return -ENOMEDIUM;
2031 }
2032 ret = bdrv_co_zone_report(blk_bs(blk), offset, nr_zones, zones);
2033 blk_dec_in_flight(blk);
2034 return ret;
2035 }
2036
2037 /*
2038 * Send a zone_management command.
2039 * op is the zone operation;
2040 * offset is the byte offset from the start of the zoned device;
2041 * len is the maximum number of bytes the command should operate on. It
2042 * should be aligned with the device zone size.
2043 */
2044 int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op,
2045 int64_t offset, int64_t len)
2046 {
2047 int ret;
2048 IO_CODE();
2049
2050 blk_inc_in_flight(blk);
2051 blk_wait_while_drained(blk);
2052 GRAPH_RDLOCK_GUARD();
2053
2054 ret = blk_check_byte_request(blk, offset, len);
2055 if (ret < 0) {
2056 blk_dec_in_flight(blk);
2057 return ret;
2058 }
2059
2060 ret = bdrv_co_zone_mgmt(blk_bs(blk), op, offset, len);
2061 blk_dec_in_flight(blk);
2062 return ret;
2063 }
2064
2065 /*
2066 * Send a zone_append command.
2067 */
2068 int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset,
2069 QEMUIOVector *qiov, BdrvRequestFlags flags)
2070 {
2071 int ret;
2072 IO_CODE();
2073
2074 blk_inc_in_flight(blk);
2075 blk_wait_while_drained(blk);
2076 GRAPH_RDLOCK_GUARD();
2077 if (!blk_is_available(blk)) {
2078 blk_dec_in_flight(blk);
2079 return -ENOMEDIUM;
2080 }
2081
2082 ret = bdrv_co_zone_append(blk_bs(blk), offset, qiov, flags);
2083 blk_dec_in_flight(blk);
2084 return ret;
2085 }
2086
2087 void blk_drain(BlockBackend *blk)
2088 {
2089 BlockDriverState *bs = blk_bs(blk);
2090 GLOBAL_STATE_CODE();
2091
2092 if (bs) {
2093 bdrv_ref(bs);
2094 bdrv_drained_begin(bs);
2095 }
2096
2097 /* We may have -ENOMEDIUM completions in flight */
2098 AIO_WAIT_WHILE(blk_get_aio_context(blk),
2099 qatomic_read(&blk->in_flight) > 0);
2100
2101 if (bs) {
2102 bdrv_drained_end(bs);
2103 bdrv_unref(bs);
2104 }
2105 }
2106
2107 void blk_drain_all(void)
2108 {
2109 BlockBackend *blk = NULL;
2110
2111 GLOBAL_STATE_CODE();
2112
2113 bdrv_drain_all_begin();
2114
2115 while ((blk = blk_all_next(blk)) != NULL) {
2116 /* We may have -ENOMEDIUM completions in flight */
2117 AIO_WAIT_WHILE_UNLOCKED(NULL, qatomic_read(&blk->in_flight) > 0);
2118 }
2119
2120 bdrv_drain_all_end();
2121 }
2122
2123 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
2124 BlockdevOnError on_write_error)
2125 {
2126 GLOBAL_STATE_CODE();
2127 blk->on_read_error = on_read_error;
2128 blk->on_write_error = on_write_error;
2129 }
2130
2131 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
2132 {
2133 IO_CODE();
2134 return is_read ? blk->on_read_error : blk->on_write_error;
2135 }
2136
2137 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
2138 int error)
2139 {
2140 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
2141 IO_CODE();
2142
2143 switch (on_err) {
2144 case BLOCKDEV_ON_ERROR_ENOSPC:
2145 return (error == ENOSPC) ?
2146 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
2147 case BLOCKDEV_ON_ERROR_STOP:
2148 return BLOCK_ERROR_ACTION_STOP;
2149 case BLOCKDEV_ON_ERROR_REPORT:
2150 return BLOCK_ERROR_ACTION_REPORT;
2151 case BLOCKDEV_ON_ERROR_IGNORE:
2152 return BLOCK_ERROR_ACTION_IGNORE;
2153 case BLOCKDEV_ON_ERROR_AUTO:
2154 default:
2155 abort();
2156 }
2157 }
2158
2159 static void send_qmp_error_event(BlockBackend *blk,
2160 BlockErrorAction action,
2161 bool is_read, int error)
2162 {
2163 IoOperationType optype;
2164 BlockDriverState *bs = blk_bs(blk);
2165
2166 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
2167 qapi_event_send_block_io_error(blk_name(blk),
2168 bs ? bdrv_get_node_name(bs) : NULL, optype,
2169 action, blk_iostatus_is_enabled(blk),
2170 error == ENOSPC, strerror(error));
2171 }
2172
2173 /* This is done by device models because, while the block layer knows
2174 * about the error, it does not know whether an operation comes from
2175 * the device or the block layer (from a job, for example).
2176 */
2177 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
2178 bool is_read, int error)
2179 {
2180 assert(error >= 0);
2181 IO_CODE();
2182
2183 if (action == BLOCK_ERROR_ACTION_STOP) {
2184 /* First set the iostatus, so that "info block" returns an iostatus
2185 * that matches the events raised so far (an additional error iostatus
2186 * is fine, but not a lost one).
2187 */
2188 blk_iostatus_set_err(blk, error);
2189
2190 /* Then raise the request to stop the VM and the event.
2191 * qemu_system_vmstop_request_prepare has two effects. First,
2192 * it ensures that the STOP event always comes after the
2193 * BLOCK_IO_ERROR event. Second, it ensures that even if management
2194 * can observe the STOP event and do a "cont" before the STOP
2195 * event is issued, the VM will not stop. In this case, vm_start()
2196 * also ensures that the STOP/RESUME pair of events is emitted.
2197 */
2198 qemu_system_vmstop_request_prepare();
2199 send_qmp_error_event(blk, action, is_read, error);
2200 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
2201 } else {
2202 send_qmp_error_event(blk, action, is_read, error);
2203 }
2204 }
2205
2206 /*
2207 * Returns true if the BlockBackend can support taking write permissions
2208 * (because its root node is not read-only).
2209 */
2210 bool blk_supports_write_perm(BlockBackend *blk)
2211 {
2212 BlockDriverState *bs = blk_bs(blk);
2213 GLOBAL_STATE_CODE();
2214
2215 if (bs) {
2216 return !bdrv_is_read_only(bs);
2217 } else {
2218 return blk->root_state.open_flags & BDRV_O_RDWR;
2219 }
2220 }
2221
2222 /*
2223 * Returns true if the BlockBackend can be written to in its current
2224 * configuration (i.e. if write permission have been requested)
2225 */
2226 bool blk_is_writable(BlockBackend *blk)
2227 {
2228 IO_CODE();
2229 return blk->perm & BLK_PERM_WRITE;
2230 }
2231
2232 bool blk_is_sg(BlockBackend *blk)
2233 {
2234 BlockDriverState *bs = blk_bs(blk);
2235 GLOBAL_STATE_CODE();
2236
2237 if (!bs) {
2238 return false;
2239 }
2240
2241 return bdrv_is_sg(bs);
2242 }
2243
2244 bool blk_enable_write_cache(BlockBackend *blk)
2245 {
2246 IO_CODE();
2247 return blk->enable_write_cache;
2248 }
2249
2250 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
2251 {
2252 IO_CODE();
2253 blk->enable_write_cache = wce;
2254 }
2255
2256 void blk_activate(BlockBackend *blk, Error **errp)
2257 {
2258 BlockDriverState *bs = blk_bs(blk);
2259 GLOBAL_STATE_CODE();
2260
2261 if (!bs) {
2262 error_setg(errp, "Device '%s' has no medium", blk->name);
2263 return;
2264 }
2265
2266 /*
2267 * Migration code can call this function in coroutine context, so leave
2268 * coroutine context if necessary.
2269 */
2270 if (qemu_in_coroutine()) {
2271 bdrv_co_activate(bs, errp);
2272 } else {
2273 GRAPH_RDLOCK_GUARD_MAINLOOP();
2274 bdrv_activate(bs, errp);
2275 }
2276 }
2277
2278 bool coroutine_fn blk_co_is_inserted(BlockBackend *blk)
2279 {
2280 BlockDriverState *bs = blk_bs(blk);
2281 IO_CODE();
2282 assert_bdrv_graph_readable();
2283
2284 return bs && bdrv_co_is_inserted(bs);
2285 }
2286
2287 bool coroutine_fn blk_co_is_available(BlockBackend *blk)
2288 {
2289 IO_CODE();
2290 return blk_co_is_inserted(blk) && !blk_dev_is_tray_open(blk);
2291 }
2292
2293 void coroutine_fn blk_co_lock_medium(BlockBackend *blk, bool locked)
2294 {
2295 BlockDriverState *bs = blk_bs(blk);
2296 IO_CODE();
2297 GRAPH_RDLOCK_GUARD();
2298
2299 if (bs) {
2300 bdrv_co_lock_medium(bs, locked);
2301 }
2302 }
2303
2304 void coroutine_fn blk_co_eject(BlockBackend *blk, bool eject_flag)
2305 {
2306 BlockDriverState *bs = blk_bs(blk);
2307 char *id;
2308 IO_CODE();
2309 GRAPH_RDLOCK_GUARD();
2310
2311 if (bs) {
2312 bdrv_co_eject(bs, eject_flag);
2313 }
2314
2315 /* Whether or not we ejected on the backend,
2316 * the frontend experienced a tray event. */
2317 id = blk_get_attached_dev_id(blk);
2318 qapi_event_send_device_tray_moved(blk_name(blk), id,
2319 eject_flag);
2320 g_free(id);
2321 }
2322
2323 int blk_get_flags(BlockBackend *blk)
2324 {
2325 BlockDriverState *bs = blk_bs(blk);
2326 GLOBAL_STATE_CODE();
2327
2328 if (bs) {
2329 return bdrv_get_flags(bs);
2330 } else {
2331 return blk->root_state.open_flags;
2332 }
2333 }
2334
2335 /* Returns the minimum request alignment, in bytes; guaranteed nonzero */
2336 uint32_t blk_get_request_alignment(BlockBackend *blk)
2337 {
2338 BlockDriverState *bs = blk_bs(blk);
2339 IO_CODE();
2340 return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE;
2341 }
2342
2343 /* Returns the maximum hardware transfer length, in bytes; guaranteed nonzero */
2344 uint64_t blk_get_max_hw_transfer(BlockBackend *blk)
2345 {
2346 BlockDriverState *bs = blk_bs(blk);
2347 uint64_t max = INT_MAX;
2348 IO_CODE();
2349
2350 if (bs) {
2351 max = MIN_NON_ZERO(max, bs->bl.max_hw_transfer);
2352 max = MIN_NON_ZERO(max, bs->bl.max_transfer);
2353 }
2354 return ROUND_DOWN(max, blk_get_request_alignment(blk));
2355 }
2356
2357 /* Returns the maximum transfer length, in bytes; guaranteed nonzero */
2358 uint32_t blk_get_max_transfer(BlockBackend *blk)
2359 {
2360 BlockDriverState *bs = blk_bs(blk);
2361 uint32_t max = INT_MAX;
2362 IO_CODE();
2363
2364 if (bs) {
2365 max = MIN_NON_ZERO(max, bs->bl.max_transfer);
2366 }
2367 return ROUND_DOWN(max, blk_get_request_alignment(blk));
2368 }
2369
2370 int blk_get_max_hw_iov(BlockBackend *blk)
2371 {
2372 IO_CODE();
2373 return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov,
2374 blk->root->bs->bl.max_iov);
2375 }
2376
2377 int blk_get_max_iov(BlockBackend *blk)
2378 {
2379 IO_CODE();
2380 return blk->root->bs->bl.max_iov;
2381 }
2382
2383 void *blk_try_blockalign(BlockBackend *blk, size_t size)
2384 {
2385 IO_CODE();
2386 return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
2387 }
2388
2389 void *blk_blockalign(BlockBackend *blk, size_t size)
2390 {
2391 IO_CODE();
2392 return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
2393 }
2394
2395 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
2396 {
2397 BlockDriverState *bs = blk_bs(blk);
2398 GLOBAL_STATE_CODE();
2399 GRAPH_RDLOCK_GUARD_MAINLOOP();
2400
2401 if (!bs) {
2402 return false;
2403 }
2404
2405 return bdrv_op_is_blocked(bs, op, errp);
2406 }
2407
2408 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
2409 {
2410 BlockDriverState *bs = blk_bs(blk);
2411 GLOBAL_STATE_CODE();
2412
2413 if (bs) {
2414 bdrv_op_unblock(bs, op, reason);
2415 }
2416 }
2417
2418 void blk_op_block_all(BlockBackend *blk, Error *reason)
2419 {
2420 BlockDriverState *bs = blk_bs(blk);
2421 GLOBAL_STATE_CODE();
2422
2423 if (bs) {
2424 bdrv_op_block_all(bs, reason);
2425 }
2426 }
2427
2428 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
2429 {
2430 BlockDriverState *bs = blk_bs(blk);
2431 GLOBAL_STATE_CODE();
2432
2433 if (bs) {
2434 bdrv_op_unblock_all(bs, reason);
2435 }
2436 }
2437
2438 AioContext *blk_get_aio_context(BlockBackend *blk)
2439 {
2440 BlockDriverState *bs;
2441 IO_CODE();
2442
2443 if (!blk) {
2444 return qemu_get_aio_context();
2445 }
2446
2447 bs = blk_bs(blk);
2448 if (bs) {
2449 AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
2450 assert(ctx == blk->ctx);
2451 }
2452
2453 return blk->ctx;
2454 }
2455
2456 int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
2457 Error **errp)
2458 {
2459 bool old_allow_change;
2460 BlockDriverState *bs = blk_bs(blk);
2461 int ret;
2462
2463 GLOBAL_STATE_CODE();
2464
2465 if (!bs) {
2466 blk->ctx = new_context;
2467 return 0;
2468 }
2469
2470 bdrv_ref(bs);
2471
2472 old_allow_change = blk->allow_aio_context_change;
2473 blk->allow_aio_context_change = true;
2474
2475 ret = bdrv_try_change_aio_context(bs, new_context, NULL, errp);
2476
2477 blk->allow_aio_context_change = old_allow_change;
2478
2479 bdrv_unref(bs);
2480 return ret;
2481 }
2482
2483 typedef struct BdrvStateBlkRootContext {
2484 AioContext *new_ctx;
2485 BlockBackend *blk;
2486 } BdrvStateBlkRootContext;
2487
2488 static void blk_root_set_aio_ctx_commit(void *opaque)
2489 {
2490 BdrvStateBlkRootContext *s = opaque;
2491 BlockBackend *blk = s->blk;
2492 AioContext *new_context = s->new_ctx;
2493 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2494
2495 blk->ctx = new_context;
2496 if (tgm->throttle_state) {
2497 throttle_group_detach_aio_context(tgm);
2498 throttle_group_attach_aio_context(tgm, new_context);
2499 }
2500 }
2501
2502 static TransactionActionDrv set_blk_root_context = {
2503 .commit = blk_root_set_aio_ctx_commit,
2504 .clean = g_free,
2505 };
2506
2507 static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx,
2508 GHashTable *visited, Transaction *tran,
2509 Error **errp)
2510 {
2511 BlockBackend *blk = child->opaque;
2512 BdrvStateBlkRootContext *s;
2513
2514 if (!blk->allow_aio_context_change) {
2515 /*
2516 * Manually created BlockBackends (those with a name) that are not
2517 * attached to anything can change their AioContext without updating
2518 * their user; return an error for others.
2519 */
2520 if (!blk->name || blk->dev) {
2521 /* TODO Add BB name/QOM path */
2522 error_setg(errp, "Cannot change iothread of active block backend");
2523 return false;
2524 }
2525 }
2526
2527 s = g_new(BdrvStateBlkRootContext, 1);
2528 *s = (BdrvStateBlkRootContext) {
2529 .new_ctx = ctx,
2530 .blk = blk,
2531 };
2532
2533 tran_add(tran, &set_blk_root_context, s);
2534 return true;
2535 }
2536
2537 void blk_add_aio_context_notifier(BlockBackend *blk,
2538 void (*attached_aio_context)(AioContext *new_context, void *opaque),
2539 void (*detach_aio_context)(void *opaque), void *opaque)
2540 {
2541 BlockBackendAioNotifier *notifier;
2542 BlockDriverState *bs = blk_bs(blk);
2543 GLOBAL_STATE_CODE();
2544
2545 notifier = g_new(BlockBackendAioNotifier, 1);
2546 notifier->attached_aio_context = attached_aio_context;
2547 notifier->detach_aio_context = detach_aio_context;
2548 notifier->opaque = opaque;
2549 QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
2550
2551 if (bs) {
2552 bdrv_add_aio_context_notifier(bs, attached_aio_context,
2553 detach_aio_context, opaque);
2554 }
2555 }
2556
2557 void blk_remove_aio_context_notifier(BlockBackend *blk,
2558 void (*attached_aio_context)(AioContext *,
2559 void *),
2560 void (*detach_aio_context)(void *),
2561 void *opaque)
2562 {
2563 BlockBackendAioNotifier *notifier;
2564 BlockDriverState *bs = blk_bs(blk);
2565
2566 GLOBAL_STATE_CODE();
2567
2568 if (bs) {
2569 bdrv_remove_aio_context_notifier(bs, attached_aio_context,
2570 detach_aio_context, opaque);
2571 }
2572
2573 QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
2574 if (notifier->attached_aio_context == attached_aio_context &&
2575 notifier->detach_aio_context == detach_aio_context &&
2576 notifier->opaque == opaque) {
2577 QLIST_REMOVE(notifier, list);
2578 g_free(notifier);
2579 return;
2580 }
2581 }
2582
2583 abort();
2584 }
2585
2586 void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
2587 {
2588 GLOBAL_STATE_CODE();
2589 notifier_list_add(&blk->remove_bs_notifiers, notify);
2590 }
2591
2592 void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
2593 {
2594 GLOBAL_STATE_CODE();
2595 notifier_list_add(&blk->insert_bs_notifiers, notify);
2596 }
2597
2598 BlockAcctStats *blk_get_stats(BlockBackend *blk)
2599 {
2600 IO_CODE();
2601 return &blk->stats;
2602 }
2603
2604 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
2605 BlockCompletionFunc *cb, void *opaque)
2606 {
2607 IO_CODE();
2608 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
2609 }
2610
2611 int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
2612 int64_t bytes, BdrvRequestFlags flags)
2613 {
2614 IO_OR_GS_CODE();
2615 return blk_co_pwritev(blk, offset, bytes, NULL,
2616 flags | BDRV_REQ_ZERO_WRITE);
2617 }
2618
2619 int coroutine_fn blk_co_pwrite_compressed(BlockBackend *blk, int64_t offset,
2620 int64_t bytes, const void *buf)
2621 {
2622 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
2623 IO_OR_GS_CODE();
2624 return blk_co_pwritev_part(blk, offset, bytes, &qiov, 0,
2625 BDRV_REQ_WRITE_COMPRESSED);
2626 }
2627
2628 int coroutine_fn blk_co_truncate(BlockBackend *blk, int64_t offset, bool exact,
2629 PreallocMode prealloc, BdrvRequestFlags flags,
2630 Error **errp)
2631 {
2632 IO_OR_GS_CODE();
2633 GRAPH_RDLOCK_GUARD();
2634 if (!blk_co_is_available(blk)) {
2635 error_setg(errp, "No medium inserted");
2636 return -ENOMEDIUM;
2637 }
2638
2639 return bdrv_co_truncate(blk->root, offset, exact, prealloc, flags, errp);
2640 }
2641
2642 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
2643 int64_t pos, int size)
2644 {
2645 int ret;
2646 GLOBAL_STATE_CODE();
2647
2648 if (!blk_is_available(blk)) {
2649 return -ENOMEDIUM;
2650 }
2651
2652 ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
2653 if (ret < 0) {
2654 return ret;
2655 }
2656
2657 if (ret == size && !blk->enable_write_cache) {
2658 ret = bdrv_flush(blk_bs(blk));
2659 }
2660
2661 return ret < 0 ? ret : size;
2662 }
2663
2664 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
2665 {
2666 GLOBAL_STATE_CODE();
2667 if (!blk_is_available(blk)) {
2668 return -ENOMEDIUM;
2669 }
2670
2671 return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
2672 }
2673
2674 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
2675 {
2676 GLOBAL_STATE_CODE();
2677 GRAPH_RDLOCK_GUARD_MAINLOOP();
2678
2679 if (!blk_is_available(blk)) {
2680 return -ENOMEDIUM;
2681 }
2682
2683 return bdrv_probe_blocksizes(blk_bs(blk), bsz);
2684 }
2685
2686 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
2687 {
2688 GLOBAL_STATE_CODE();
2689 if (!blk_is_available(blk)) {
2690 return -ENOMEDIUM;
2691 }
2692
2693 return bdrv_probe_geometry(blk_bs(blk), geo);
2694 }
2695
2696 /*
2697 * Updates the BlockBackendRootState object with data from the currently
2698 * attached BlockDriverState.
2699 */
2700 void blk_update_root_state(BlockBackend *blk)
2701 {
2702 GLOBAL_STATE_CODE();
2703 assert(blk->root);
2704
2705 blk->root_state.open_flags = blk->root->bs->open_flags;
2706 blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
2707 }
2708
2709 /*
2710 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2711 * BlockDriverState which is supposed to inherit the root state.
2712 */
2713 bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
2714 {
2715 GLOBAL_STATE_CODE();
2716 return blk->root_state.detect_zeroes;
2717 }
2718
2719 /*
2720 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2721 * supposed to inherit the root state.
2722 */
2723 int blk_get_open_flags_from_root_state(BlockBackend *blk)
2724 {
2725 GLOBAL_STATE_CODE();
2726 return blk->root_state.open_flags;
2727 }
2728
2729 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
2730 {
2731 GLOBAL_STATE_CODE();
2732 return &blk->root_state;
2733 }
2734
2735 int blk_commit_all(void)
2736 {
2737 BlockBackend *blk = NULL;
2738 GLOBAL_STATE_CODE();
2739 GRAPH_RDLOCK_GUARD_MAINLOOP();
2740
2741 while ((blk = blk_all_next(blk)) != NULL) {
2742 AioContext *aio_context = blk_get_aio_context(blk);
2743 BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk));
2744
2745 aio_context_acquire(aio_context);
2746 if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) {
2747 int ret;
2748
2749 ret = bdrv_commit(unfiltered_bs);
2750 if (ret < 0) {
2751 aio_context_release(aio_context);
2752 return ret;
2753 }
2754 }
2755 aio_context_release(aio_context);
2756 }
2757 return 0;
2758 }
2759
2760
2761 /* throttling disk I/O limits */
2762 void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
2763 {
2764 GLOBAL_STATE_CODE();
2765 throttle_group_config(&blk->public.throttle_group_member, cfg);
2766 }
2767
2768 void blk_io_limits_disable(BlockBackend *blk)
2769 {
2770 BlockDriverState *bs = blk_bs(blk);
2771 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2772 assert(tgm->throttle_state);
2773 GLOBAL_STATE_CODE();
2774 if (bs) {
2775 bdrv_ref(bs);
2776 bdrv_drained_begin(bs);
2777 }
2778 throttle_group_unregister_tgm(tgm);
2779 if (bs) {
2780 bdrv_drained_end(bs);
2781 bdrv_unref(bs);
2782 }
2783 }
2784
2785 /* should be called before blk_set_io_limits if a limit is set */
2786 void blk_io_limits_enable(BlockBackend *blk, const char *group)
2787 {
2788 assert(!blk->public.throttle_group_member.throttle_state);
2789 GLOBAL_STATE_CODE();
2790 throttle_group_register_tgm(&blk->public.throttle_group_member,
2791 group, blk_get_aio_context(blk));
2792 }
2793
2794 void blk_io_limits_update_group(BlockBackend *blk, const char *group)
2795 {
2796 GLOBAL_STATE_CODE();
2797 /* this BB is not part of any group */
2798 if (!blk->public.throttle_group_member.throttle_state) {
2799 return;
2800 }
2801
2802 /* this BB is a part of the same group than the one we want */
2803 if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
2804 group)) {
2805 return;
2806 }
2807
2808 /* need to change the group this bs belong to */
2809 blk_io_limits_disable(blk);
2810 blk_io_limits_enable(blk, group);
2811 }
2812
2813 static void blk_root_drained_begin(BdrvChild *child)
2814 {
2815 BlockBackend *blk = child->opaque;
2816 ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2817
2818 if (qatomic_fetch_inc(&blk->quiesce_counter) == 0) {
2819 if (blk->dev_ops && blk->dev_ops->drained_begin) {
2820 blk->dev_ops->drained_begin(blk->dev_opaque);
2821 }
2822 }
2823
2824 /* Note that blk->root may not be accessible here yet if we are just
2825 * attaching to a BlockDriverState that is drained. Use child instead. */
2826
2827 if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
2828 throttle_group_restart_tgm(tgm);
2829 }
2830 }
2831
2832 static bool blk_root_drained_poll(BdrvChild *child)
2833 {
2834 BlockBackend *blk = child->opaque;
2835 bool busy = false;
2836 assert(qatomic_read(&blk->quiesce_counter));
2837
2838 if (blk->dev_ops && blk->dev_ops->drained_poll) {
2839 busy = blk->dev_ops->drained_poll(blk->dev_opaque);
2840 }
2841 return busy || !!blk->in_flight;
2842 }
2843
2844 static void blk_root_drained_end(BdrvChild *child)
2845 {
2846 BlockBackend *blk = child->opaque;
2847 assert(qatomic_read(&blk->quiesce_counter));
2848
2849 assert(blk->public.throttle_group_member.io_limits_disabled);
2850 qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
2851
2852 if (qatomic_fetch_dec(&blk->quiesce_counter) == 1) {
2853 if (blk->dev_ops && blk->dev_ops->drained_end) {
2854 blk->dev_ops->drained_end(blk->dev_opaque);
2855 }
2856 qemu_mutex_lock(&blk->queued_requests_lock);
2857 while (qemu_co_enter_next(&blk->queued_requests,
2858 &blk->queued_requests_lock)) {
2859 /* Resume all queued requests */
2860 }
2861 qemu_mutex_unlock(&blk->queued_requests_lock);
2862 }
2863 }
2864
2865 bool blk_register_buf(BlockBackend *blk, void *host, size_t size, Error **errp)
2866 {
2867 BlockDriverState *bs = blk_bs(blk);
2868
2869 GLOBAL_STATE_CODE();
2870
2871 if (bs) {
2872 return bdrv_register_buf(bs, host, size, errp);
2873 }
2874 return true;
2875 }
2876
2877 void blk_unregister_buf(BlockBackend *blk, void *host, size_t size)
2878 {
2879 BlockDriverState *bs = blk_bs(blk);
2880
2881 GLOBAL_STATE_CODE();
2882
2883 if (bs) {
2884 bdrv_unregister_buf(bs, host, size);
2885 }
2886 }
2887
2888 int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
2889 BlockBackend *blk_out, int64_t off_out,
2890 int64_t bytes, BdrvRequestFlags read_flags,
2891 BdrvRequestFlags write_flags)
2892 {
2893 int r;
2894 IO_CODE();
2895 GRAPH_RDLOCK_GUARD();
2896
2897 r = blk_check_byte_request(blk_in, off_in, bytes);
2898 if (r) {
2899 return r;
2900 }
2901 r = blk_check_byte_request(blk_out, off_out, bytes);
2902 if (r) {
2903 return r;
2904 }
2905
2906 return bdrv_co_copy_range(blk_in->root, off_in,
2907 blk_out->root, off_out,
2908 bytes, read_flags, write_flags);
2909 }
2910
2911 const BdrvChild *blk_root(BlockBackend *blk)
2912 {
2913 GLOBAL_STATE_CODE();
2914 return blk->root;
2915 }
2916
2917 int blk_make_empty(BlockBackend *blk, Error **errp)
2918 {
2919 GLOBAL_STATE_CODE();
2920 GRAPH_RDLOCK_GUARD_MAINLOOP();
2921
2922 if (!blk_is_available(blk)) {
2923 error_setg(errp, "No medium inserted");
2924 return -ENOMEDIUM;
2925 }
2926
2927 return bdrv_make_empty(blk->root, errp);
2928 }