#include "block/qdict.h"
#include "qemu/error-report.h"
#include "module_block.h"
+#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
return bdrv_drain_poll(bs, false, NULL, false);
}
-static void bdrv_child_cb_drained_end(BdrvChild *child)
+static void bdrv_child_cb_drained_end(BdrvChild *child,
+ int *drained_end_counter)
{
BlockDriverState *bs = child->opaque;
- bdrv_drained_end(bs);
+ bdrv_drained_end_no_poll(bs, drained_end_counter);
}
static void bdrv_child_cb_attach(BdrvChild *child)
uint64_t perm, uint64_t shared,
uint64_t *nperm, uint64_t *nshared)
{
- if (c == NULL) {
- *nperm = perm & DEFAULT_PERM_PASSTHROUGH;
- *nshared = (shared & DEFAULT_PERM_PASSTHROUGH) | DEFAULT_PERM_UNCHANGED;
- return;
- }
-
- *nperm = (perm & DEFAULT_PERM_PASSTHROUGH) |
- (c->perm & DEFAULT_PERM_UNCHANGED);
- *nshared = (shared & DEFAULT_PERM_PASSTHROUGH) |
- (c->shared_perm & DEFAULT_PERM_UNCHANGED);
+ *nperm = perm & DEFAULT_PERM_PASSTHROUGH;
+ *nshared = (shared & DEFAULT_PERM_PASSTHROUGH) | DEFAULT_PERM_UNCHANGED;
}
void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
BlockDriverState *new_bs)
{
BlockDriverState *old_bs = child->bs;
- int i;
+ int new_bs_quiesce_counter;
+ int drain_saldo;
assert(!child->frozen);
if (old_bs && new_bs) {
assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
}
+
+ new_bs_quiesce_counter = (new_bs ? new_bs->quiesce_counter : 0);
+ drain_saldo = new_bs_quiesce_counter - child->parent_quiesce_counter;
+
+ /*
+ * If the new child node is drained but the old one was not, flush
+ * all outstanding requests to the old child node.
+ */
+ while (drain_saldo > 0 && child->role->drained_begin) {
+ bdrv_parent_drained_begin_single(child, true);
+ drain_saldo--;
+ }
+
if (old_bs) {
/* Detach first so that the recursive drain sections coming from @child
* are already gone and we only end the drain sections that came from
if (child->role->detach) {
child->role->detach(child);
}
- if (old_bs->quiesce_counter && child->role->drained_end) {
- int num = old_bs->quiesce_counter;
- if (child->role->parent_is_bds) {
- num -= bdrv_drain_all_count;
- }
- assert(num >= 0);
- for (i = 0; i < num; i++) {
- child->role->drained_end(child);
- }
- }
QLIST_REMOVE(child, next_parent);
}
if (new_bs) {
QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent);
- if (new_bs->quiesce_counter && child->role->drained_begin) {
- int num = new_bs->quiesce_counter;
- if (child->role->parent_is_bds) {
- num -= bdrv_drain_all_count;
- }
- assert(num >= 0);
- for (i = 0; i < num; i++) {
- bdrv_parent_drained_begin_single(child, true);
- }
- }
+
+ /*
+ * Detaching the old node may have led to the new node's
+ * quiesce_counter having been decreased. Not a problem, we
+ * just need to recognize this here and then invoke
+ * drained_end appropriately more often.
+ */
+ assert(new_bs->quiesce_counter <= new_bs_quiesce_counter);
+ drain_saldo += new_bs->quiesce_counter - new_bs_quiesce_counter;
/* Attach only after starting new drained sections, so that recursive
* drain sections coming from @child don't get an extra .drained_begin
child->role->attach(child);
}
}
+
+ /*
+ * If the old child node was drained but the new one is not, allow
+ * requests to come in only after the new node has been attached.
+ */
+ while (drain_saldo < 0 && child->role->drained_end) {
+ bdrv_parent_drained_end_single(child);
+ drain_saldo++;
+ }
}
/*
{
BdrvChild *c, *next;
GSList *list = NULL, *p;
- uint64_t old_perm, old_shared;
uint64_t perm = 0, shared = BLK_PERM_ALL;
int ret;
bdrv_unref(from);
}
- bdrv_get_cumulative_perm(to, &old_perm, &old_shared);
- bdrv_set_perm(to, old_perm | perm, old_shared | shared);
+ bdrv_get_cumulative_perm(to, &perm, &shared);
+ bdrv_set_perm(to, perm, shared);
out:
g_slist_free(list);
int ret = -EIO;
bdrv_ref(top);
+ bdrv_subtree_drained_begin(top);
if (!top->drv || !base->drv) {
goto exit;
ret = 0;
exit:
+ bdrv_subtree_drained_end(top);
bdrv_unref(top);
return ret;
}
return 0;
}
+int bdrv_has_zero_init_truncate(BlockDriverState *bs)
+{
+ if (!bs->drv) {
+ return 0;
+ }
+
+ if (bs->backing) {
+ /* Depends on the backing image length, but better safe than sorry */
+ return 0;
+ }
+ if (bs->drv->bdrv_has_zero_init_truncate) {
+ return bs->drv->bdrv_has_zero_init_truncate(bs);
+ }
+ if (bs->file && bs->drv->is_filter) {
+ return bdrv_has_zero_init_truncate(bs->file->bs);
+ }
+
+ /* safe default */
+ return 0;
+}
+
bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
{
BlockDriverInfo bdi;
for (bm = bdrv_dirty_bitmap_next(bs, NULL); bm;
bm = bdrv_dirty_bitmap_next(bs, bm))
{
- bdrv_dirty_bitmap_set_migration(bm, false);
+ bdrv_dirty_bitmap_skip_store(bm, false);
}
ret = refresh_total_sectors(bs, bs->total_sectors);
* Changes the AioContext used for fd handlers, timers, and BHs by this
* BlockDriverState and all its children and parents.
*
+ * Must be called from the main AioContext.
+ *
* The caller must own the AioContext lock for the old AioContext of bs, but it
* must not own the AioContext lock for new_context (unless new_context is the
* same as the current context of bs).
void bdrv_set_aio_context_ignore(BlockDriverState *bs,
AioContext *new_context, GSList **ignore)
{
+ AioContext *old_context = bdrv_get_aio_context(bs);
BdrvChild *child;
- if (bdrv_get_aio_context(bs) == new_context) {
+ g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
+
+ if (old_context == new_context) {
return;
}
bdrv_detach_aio_context(bs);
- /* This function executes in the old AioContext so acquire the new one in
- * case it runs in a different thread.
- */
- aio_context_acquire(new_context);
+ /* Acquire the new context, if necessary */
+ if (qemu_get_aio_context() != new_context) {
+ aio_context_acquire(new_context);
+ }
+
bdrv_attach_aio_context(bs, new_context);
+
+ /*
+ * If this function was recursively called from
+ * bdrv_set_aio_context_ignore(), there may be nodes in the
+ * subtree that have not yet been moved to the new AioContext.
+ * Release the old one so bdrv_drained_end() can poll them.
+ */
+ if (qemu_get_aio_context() != old_context) {
+ aio_context_release(old_context);
+ }
+
bdrv_drained_end(bs);
- aio_context_release(new_context);
+
+ if (qemu_get_aio_context() != old_context) {
+ aio_context_acquire(old_context);
+ }
+ if (qemu_get_aio_context() != new_context) {
+ aio_context_release(new_context);
+ }
}
static bool bdrv_parent_can_set_aio_context(BdrvChild *c, AioContext *ctx,