BlockDriverState *bs;
AioContext *aio_context;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
bs = bdrv_lookup_bs(name, name, errp);
if (bs == NULL) {
return NULL;
SnapshotInfo *info = NULL;
int ret;
+ GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
bs = qmp_get_root_bs(device, errp);
if (!bs) {
return NULL;
AioContext *aio_context;
int ret1;
+ GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
tran_add(tran, &internal_snapshot_drv, state);
device = internal->device;
AioContext *aio_context;
Error *local_error = NULL;
+ GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
if (!state->created) {
return;
}
AioContext *aio_context;
uint64_t perm, shared;
+ /* TODO We'll eventually have to take a writer lock in this function */
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
tran_add(tran, &external_snapshot_drv, state);
/* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
bool set_backing_hd = false;
int ret;
+ GLOBAL_STATE_CODE();
+
tran_add(tran, &drive_backup_drv, state);
if (!backup->has_mode) {
}
/* Early check to avoid creating target */
+ bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
+ bdrv_graph_rdunlock_main_loop();
goto out;
}
+ bdrv_graph_rdunlock_main_loop();
flags = bs->open_flags | BDRV_O_RDWR;
BlockDriverState *explicit_backing =
bdrv_skip_implicit_filters(source);
+ bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(explicit_backing);
+ bdrv_graph_rdunlock_main_loop();
+
bdrv_img_create(backup->target, format,
explicit_backing->filename,
explicit_backing->drv->format_name, NULL,
return;
}
+ bdrv_graph_co_rdlock();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
error_setg(errp, QERR_DEVICE_IN_USE, device);
+ bdrv_graph_co_rdunlock();
return;
}
+ bdrv_graph_co_rdunlock();
blk = blk_co_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
if (!blk) {
Error *local_err = NULL;
int job_flags = JOB_DEFAULT;
+ GLOBAL_STATE_CODE();
+
if (base && base_node) {
error_setg(errp, "'base' and 'base-node' cannot be specified "
"at the same time");
goto out;
}
assert(bdrv_get_aio_context(base_bs) == aio_context);
+
+ bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(base_bs);
+ bdrv_graph_rdunlock_main_loop();
}
if (bottom) {
* Check for op blockers in the whole chain between bs and base (or bottom)
*/
iter_end = bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
+ bdrv_graph_rdlock_main_loop();
for (iter = bs; iter && iter != iter_end;
iter = bdrv_filter_or_cow_bs(iter))
{
if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) {
+ bdrv_graph_rdunlock_main_loop();
goto out;
}
}
+ bdrv_graph_rdunlock_main_loop();
/* if we are streaming the entire chain, the result will have no backing
* file, and specifying one is therefore an error */
int job_flags = JOB_DEFAULT;
uint64_t top_perm, top_shared;
+ /* TODO We'll eventually have to take a writer lock in this function */
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
if (!has_speed) {
speed = 0;
}
XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp)
{
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
return bdrv_get_xdbg_block_graph(errp);
}
}
/* Early check to avoid creating target */
+ bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
+ bdrv_graph_rdunlock_main_loop();
return;
}
+ bdrv_graph_rdunlock_main_loop();
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
break;
case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
/* create new image with backing file */
+ bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(explicit_backing);
+ bdrv_graph_rdunlock_main_loop();
+
bdrv_img_create(arg->target, format,
explicit_backing->filename,
explicit_backing->drv->format_name,
/* even though we are not necessarily operating on bs, we need it to
* determine if block ops are currently prohibited on the chain */
+ bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
+ bdrv_graph_rdunlock_main_loop();
goto out;
}
+ bdrv_graph_rdunlock_main_loop();
/* final sanity check */
if (!bdrv_chain_contains(bs, image_bs)) {
BlockDriverState *bs;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_find_node(node_name);
if (!bs) {
aio_context_release(aio_context);
}
-static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs,
- const char *child_name)
+static BdrvChild * GRAPH_RDLOCK
+bdrv_find_child(BlockDriverState *parent_bs, const char *child_name)
{
BdrvChild *child;
BlockDriverState *parent_bs, *new_bs = NULL;
BdrvChild *p_child;
+ bdrv_graph_wrlock(NULL);
+
parent_bs = bdrv_lookup_bs(parent, parent, errp);
if (!parent_bs) {
- return;
+ goto out;
}
if (!child == !node) {
} else {
error_setg(errp, "Either child or node must be specified");
}
- return;
+ goto out;
}
if (child) {
if (!p_child) {
error_setg(errp, "Node '%s' does not have child '%s'",
parent, child);
- return;
+ goto out;
}
bdrv_del_child(parent_bs, p_child, errp);
}
new_bs = bdrv_find_node(node);
if (!new_bs) {
error_setg(errp, "Node '%s' not found", node);
- return;
+ goto out;
}
bdrv_add_child(parent_bs, new_bs, errp);
}
+
+out:
+ bdrv_graph_wrunlock();
}
BlockJobInfoList *qmp_query_block_jobs(Error **errp)
AioContext *new_context;
BlockDriverState *bs;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
bs = bdrv_find_node(node_name);
if (!bs) {
error_setg(errp, "Failed to find node with node-name='%s'", node_name);