X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=blockjob.c;h=9ca942ba01673c69a1879fd4a898e3662eeb4104;hb=48cefd94c748cf8d5bb3ffda53bdc8e503882a3f;hp=bf7ef48f988b2920b222e9874f1fb4be44ec5fe0;hpb=850a8242a5303ceddff5d6700ee9d15307bf1b9f;p=mirror_qemu.git diff --git a/blockjob.c b/blockjob.c index bf7ef48f98..9ca942ba01 100644 --- a/blockjob.c +++ b/blockjob.c @@ -81,10 +81,6 @@ BlockJob *block_job_get(const char *id) } } -static void block_job_attached_aio_context(AioContext *new_context, - void *opaque); -static void block_job_detach_aio_context(void *opaque); - void block_job_free(Job *job) { BlockJob *bjob = container_of(job, BlockJob, job); @@ -92,28 +88,10 @@ void block_job_free(Job *job) bs->job = NULL; block_job_remove_all_bdrv(bjob); - blk_remove_aio_context_notifier(bjob->blk, - block_job_attached_aio_context, - block_job_detach_aio_context, bjob); blk_unref(bjob->blk); error_free(bjob->blocker); } -static void block_job_attached_aio_context(AioContext *new_context, - void *opaque) -{ - BlockJob *job = opaque; - const JobDriver *drv = job->job.driver; - BlockJobDriver *bjdrv = container_of(drv, BlockJobDriver, job_driver); - - job->job.aio_context = new_context; - if (bjdrv->attached_aio_context) { - bjdrv->attached_aio_context(job, new_context); - } - - job_resume(&job->job); -} - void block_job_drain(Job *job) { BlockJob *bjob = container_of(job, BlockJob, job); @@ -126,23 +104,6 @@ void block_job_drain(Job *job) } } -static void block_job_detach_aio_context(void *opaque) -{ - BlockJob *job = opaque; - - /* In case the job terminates during aio_poll()... */ - job_ref(&job->job); - - job_pause(&job->job); - - while (!job->job.paused && !job_is_completed(&job->job)) { - job_drain(&job->job); - } - - job->job.aio_context = NULL; - job_unref(&job->job); -} - static char *child_job_get_parent_desc(BdrvChild *c) { BlockJob *job = c->opaque; @@ -164,7 +125,7 @@ static bool child_job_drained_poll(BdrvChild *c) /* An inactive or completed job doesn't have any pending requests. Jobs * with !job->busy are either already paused or have a pause point after * being reentered, so no job driver code will run before they pause. */ - if (!job->busy || job_is_completed(job) || job->deferred_to_main_loop) { + if (!job->busy || job_is_completed(job)) { return false; } @@ -183,11 +144,46 @@ static void child_job_drained_end(BdrvChild *c) job_resume(&job->job); } +static bool child_job_can_set_aio_ctx(BdrvChild *c, AioContext *ctx, + GSList **ignore, Error **errp) +{ + BlockJob *job = c->opaque; + GSList *l; + + for (l = job->nodes; l; l = l->next) { + BdrvChild *sibling = l->data; + if (!bdrv_child_can_set_aio_context(sibling, ctx, ignore, errp)) { + return false; + } + } + return true; +} + +static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx, + GSList **ignore) +{ + BlockJob *job = c->opaque; + GSList *l; + + for (l = job->nodes; l; l = l->next) { + BdrvChild *sibling = l->data; + if (g_slist_find(*ignore, sibling)) { + continue; + } + *ignore = g_slist_prepend(*ignore, sibling); + bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore); + } + + job->job.aio_context = ctx; +} + static const BdrvChildRole child_job = { .get_parent_desc = child_job_get_parent_desc, .drained_begin = child_job_drained_begin, .drained_poll = child_job_drained_poll, .drained_end = child_job_drained_end, + .can_set_aio_ctx = child_job_can_set_aio_ctx, + .set_aio_ctx = child_job_set_aio_ctx, .stay_at_node = true, }; @@ -221,6 +217,11 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, return 0; } +static void block_job_on_idle(Notifier *n, void *opaque) +{ + aio_wait_kick(); +} + bool block_job_is_internal(BlockJob *job) { return (job->job.id == NULL); @@ -416,6 +417,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, job->finalize_completed_notifier.notify = block_job_event_completed; job->pending_notifier.notify = block_job_event_pending; job->ready_notifier.notify = block_job_event_ready; + job->idle_notifier.notify = block_job_on_idle; notifier_list_add(&job->job.on_finalize_cancelled, &job->finalize_cancelled_notifier); @@ -423,6 +425,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, &job->finalize_completed_notifier); notifier_list_add(&job->job.on_pending, &job->pending_notifier); notifier_list_add(&job->job.on_ready, &job->ready_notifier); + notifier_list_add(&job->job.on_idle, &job->idle_notifier); error_setg(&job->blocker, "block device is in use by block job: %s", job_type_str(&job->job)); @@ -431,8 +434,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); - blk_add_aio_context_notifier(blk, block_job_attached_aio_context, - block_job_detach_aio_context, job); + blk_set_allow_aio_context_change(blk, true); /* Only set speed when necessary to avoid NotSupported error */ if (speed != 0) { @@ -494,9 +496,11 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, action); } if (action == BLOCK_ERROR_ACTION_STOP) { - job_pause(&job->job); - /* make the pause user visible, which will be resumed from QMP. */ - job->job.user_paused = true; + if (!job->job.user_paused) { + job_pause(&job->job); + /* make the pause user visible, which will be resumed from QMP. */ + job->job.user_paused = true; + } block_job_iostatus_set_err(job, error); } return action;