return 0;
}
-static int bdrv_test_change_backing_file(BlockDriverState *bs,
- const char *backing_file,
- const char *backing_fmt)
+static int bdrv_test_co_change_backing_file(BlockDriverState *bs,
+ const char *backing_file,
+ const char *backing_fmt)
{
return 0;
}
.bdrv_child_perm = bdrv_default_perms,
- .bdrv_change_backing_file = bdrv_test_change_backing_file,
+ .bdrv_co_change_backing_file = bdrv_test_co_change_backing_file,
};
static void aio_ret_cb(void *opaque, int ret)
static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
{
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_acquire(bdrv_get_aio_context(bs));
- }
do_drain_begin(drain_type, bs);
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_release(bdrv_get_aio_context(bs));
- }
}
static BlockBackend * no_coroutine_fn test_setup(void)
static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
{
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_acquire(bdrv_get_aio_context(bs));
- }
do_drain_end(drain_type, bs);
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_release(bdrv_get_aio_context(bs));
- }
}
-static void test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type,
- bool recursive)
+/*
+ * Locking the block graph would be a bit cumbersome here because this function
+ * is called both in coroutine and non-coroutine context. We know this is a test
+ * and nothing else is running, so don't bother with TSA.
+ */
+static void coroutine_mixed_fn TSA_NO_TSA
+test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type,
+ bool recursive)
{
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *backing = bs->backing->bs;
blk_unref(blk);
}
-static void test_quiesce_common(BlockBackend *blk, enum drain_type drain_type,
- bool recursive)
+/*
+ * Locking the block graph would be a bit cumbersome here because this function
+ * is called both in coroutine and non-coroutine context. We know this is a test
+ * and nothing else is running, so don't bother with TSA.
+ */
+static void coroutine_mixed_fn TSA_NO_TSA
+test_quiesce_common(BlockBackend *blk, enum drain_type drain_type,
+ bool recursive)
{
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *backing = bs->backing->bs;
{
struct test_iothread_data *data = opaque;
- /* Test that the AioContext is not yet locked in a random BH that is
- * executed during drain, otherwise this would deadlock. */
- aio_context_acquire(bdrv_get_aio_context(data->bs));
bdrv_flush(data->bs);
- aio_context_release(bdrv_get_aio_context(data->bs));
+ bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */
}
/*
blk_set_disable_request_queuing(blk, true);
blk_set_aio_context(blk, ctx_a, &error_abort);
- aio_context_acquire(ctx_a);
s->bh_indirection_ctx = ctx_b;
g_assert(acb != NULL);
g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
- aio_context_release(ctx_a);
-
data = (struct test_iothread_data) {
.bs = bs,
.drain_type = drain_type,
switch (drain_thread) {
case 0:
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_acquire(ctx_a);
- }
-
+ /*
+ * Increment in_flight so that do_drain_begin() waits for
+ * test_iothread_main_thread_bh(). This prevents the race between
+ * test_iothread_main_thread_bh() in IOThread a and do_drain_begin() in
+ * this thread. test_iothread_main_thread_bh() decrements in_flight.
+ */
+ bdrv_inc_in_flight(bs);
aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
/* The request is running on the IOThread a. Draining its block device
do_drain_begin(drain_type, bs);
g_assert_cmpint(bs->in_flight, ==, 0);
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_release(ctx_a);
- }
qemu_event_wait(&done_event);
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_acquire(ctx_a);
- }
g_assert_cmpint(aio_ret, ==, 0);
do_drain_end(drain_type, bs);
-
- if (drain_type != BDRV_DRAIN_ALL) {
- aio_context_release(ctx_a);
- }
break;
case 1:
co = qemu_coroutine_create(test_iothread_drain_co_entry, &data);
g_assert_not_reached();
}
- aio_context_acquire(ctx_a);
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
- aio_context_release(ctx_a);
bdrv_unref(bs);
blk_unref(blk);
BlockJob *job;
TestBlockJob *tjob;
IOThread *iothread = NULL;
- AioContext *ctx;
int ret;
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
}
if (use_iothread) {
+ AioContext *ctx;
+
iothread = iothread_new();
ctx = iothread_get_aio_context(iothread);
blk_set_aio_context(blk_src, ctx, &error_abort);
- } else {
- ctx = qemu_get_aio_context();
}
target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
blk_insert_bs(blk_target, target, &error_abort);
blk_set_allow_aio_context_change(blk_target, true);
- aio_context_acquire(ctx);
tjob = block_job_create("job0", &test_job_driver, NULL, src,
0, BLK_PERM_ALL,
0, 0, NULL, NULL, &error_abort);
tjob->bs = src;
job = &tjob->common;
+
+ bdrv_graph_wrlock();
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
+ bdrv_graph_wrunlock();
switch (result) {
case TEST_JOB_SUCCESS:
tjob->prepare_ret = -EIO;
break;
}
- aio_context_release(ctx);
job_start(&job->job);
}
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
- aio_context_acquire(ctx);
if (use_iothread) {
blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
}
- aio_context_release(ctx);
blk_unref(blk_src);
blk_unref(blk_target);
static void bdrv_test_top_close(BlockDriverState *bs)
{
BdrvChild *c, *next_c;
+
+ bdrv_graph_wrlock();
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
bdrv_unref_child(bs, c);
}
+ bdrv_graph_wrunlock();
}
static int coroutine_fn GRAPH_RDLOCK
blk_co_unref(blk);
} else {
BdrvChild *c, *next_c;
+ bdrv_graph_co_rdlock();
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
- bdrv_unref_child(bs, c);
+ bdrv_graph_co_rdunlock();
+ bdrv_co_unref_child(bs, c);
+ bdrv_graph_co_rdlock();
}
+ bdrv_graph_co_rdunlock();
}
dbdd->done = true;
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
&error_abort);
+ bdrv_graph_wrlock();
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
+ bdrv_graph_wrunlock();
/* This child will be the one to pass to requests through to, and
* it will stall until a drain occurs */
&error_abort);
child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
/* Takes our reference to child_bs */
+ bdrv_graph_wrlock();
tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
&child_of_bds,
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
&error_abort);
+ bdrv_graph_wrunlock();
/* This child is just there to be deleted
* (for detach_instead_of_delete == true) */
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
&error_abort);
+ bdrv_graph_wrlock();
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
&error_abort);
+ bdrv_graph_wrunlock();
blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
blk_insert_bs(blk, bs, &error_abort);
};
static struct detach_by_parent_data detach_by_parent_data;
-static void detach_indirect_bh(void *opaque)
+static void no_coroutine_fn detach_indirect_bh(void *opaque)
{
struct detach_by_parent_data *data = opaque;
bdrv_dec_in_flight(data->child_b->bs);
+
+ bdrv_graph_wrlock();
bdrv_unref_child(data->parent_b, data->child_b);
bdrv_ref(data->c);
data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
&child_of_bds, BDRV_CHILD_DATA,
&error_abort);
+ bdrv_graph_wrunlock();
}
-static void detach_by_parent_aio_cb(void *opaque, int ret)
+static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
{
struct detach_by_parent_data *data = &detach_by_parent_data;
g_assert_cmpint(ret, ==, 0);
if (data->by_parent_cb) {
bdrv_inc_in_flight(data->child_b->bs);
- detach_indirect_bh(data);
+ aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
+ detach_indirect_bh, &detach_by_parent_data);
}
}
-static void detach_by_driver_cb_drained_begin(BdrvChild *child)
+static void GRAPH_RDLOCK detach_by_driver_cb_drained_begin(BdrvChild *child)
{
struct detach_by_parent_data *data = &detach_by_parent_data;
* state is messed up, but if it is only polled in the single
* BDRV_POLL_WHILE() at the end of the drain, this should work fine.
*/
-static void test_detach_indirect(bool by_parent_cb)
+static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
{
BlockBackend *blk;
BlockDriverState *parent_a, *parent_b, *a, *b, *c;
/* Set child relationships */
bdrv_ref(b);
bdrv_ref(a);
+ bdrv_graph_wrlock();
child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
BDRV_CHILD_DATA, &error_abort);
child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
bdrv_attach_child(parent_a, a, "PA-A",
by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
BDRV_CHILD_DATA, &error_abort);
+ bdrv_graph_wrunlock();
g_assert_cmpint(parent_a->refcnt, ==, 1);
g_assert_cmpint(parent_b->refcnt, ==, 1);
g_assert_cmpint(base->in_flight, ==, 0);
bdrv_append(overlay, base, &error_abort);
+
g_assert_cmpint(base->in_flight, ==, 0);
g_assert_cmpint(overlay->in_flight, ==, 0);
bdrv_drained_begin(bs);
bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
-
- aio_context_acquire(ctx_a);
bdrv_drained_end(bs);
bdrv_drained_begin(bs);
bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
- aio_context_release(ctx_a);
- aio_context_acquire(ctx_b);
bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
- aio_context_release(ctx_b);
bdrv_drained_end(bs);
bdrv_unref(bs);
* Establish the chain last, so the chain links are the first
* elements in the BDS.parents lists
*/
+ bdrv_graph_wrlock();
for (i = 0; i < 3; i++) {
if (i) {
/* Takes the reference to chain[i - 1] */
&chain_child_class, BDRV_CHILD_COW, &error_abort);
}
}
+ bdrv_graph_wrunlock();
job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
{
BDRVReplaceTestState *s = bs->opaque;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
if (!s->setup_completed) {
return;
}
new_child_bs->total_sectors = 1;
bdrv_ref(old_child_bs);
+ bdrv_graph_wrlock();
bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
BDRV_CHILD_COW, &error_abort);
+ bdrv_graph_wrunlock();
parent_s->setup_completed = true;
for (i = 0; i < old_drain_count; i++) {
parent_s->was_undrained = false;
g_assert(parent_bs->quiesce_counter == old_drain_count);
+ bdrv_drained_begin(old_child_bs);
+ bdrv_drained_begin(new_child_bs);
+ bdrv_graph_wrlock();
bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
+ bdrv_graph_wrunlock();
+ bdrv_drained_end(new_child_bs);
+ bdrv_drained_end(old_child_bs);
g_assert(parent_bs->quiesce_counter == new_drain_count);
if (!old_drain_count && !new_drain_count) {