X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=blockjob.c;h=25fe8e625d29e551a59b6789512caab5ec9b2920;hb=416af8564f4a30f207269307616a9e0b3f3c6570;hp=4868453d7498f188e9480defc0ab51bc838b0531;hpb=d7e2fe4aac8b74bbfe82b2309536528b4dbe0d34;p=mirror_qemu.git diff --git a/blockjob.c b/blockjob.c index 4868453d74..25fe8e625d 100644 --- a/blockjob.c +++ b/blockjob.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "block/aio-wait.h" #include "block/block.h" #include "block/blockjob_int.h" #include "block/block_int.h" @@ -32,25 +33,9 @@ #include "qapi/error.h" #include "qapi/qapi-events-block-core.h" #include "qapi/qmp/qerror.h" -#include "qemu/coroutine.h" #include "qemu/main-loop.h" #include "qemu/timer.h" -/* - * The block job API is composed of two categories of functions. - * - * The first includes functions used by the monitor. The monitor is - * peculiar in that it accesses the block job list with block_job_get, and - * therefore needs consistency across block_job_get and the actual operation - * (e.g. block_job_set_speed). The consistency is achieved with - * aio_context_acquire/release. These functions are declared in blockjob.h. - * - * The second includes functions used by the block job drivers and sometimes - * by the core block layer. These do not care about locking, because the - * whole coroutine runs under the AioContext lock, and are declared in - * blockjob_int.h. - */ - static bool is_block_job(Job *job) { return job_type(job) == JOB_TYPE_BACKUP || @@ -59,21 +44,21 @@ static bool is_block_job(Job *job) job_type(job) == JOB_TYPE_STREAM; } -BlockJob *block_job_next(BlockJob *bjob) +BlockJob *block_job_next_locked(BlockJob *bjob) { Job *job = bjob ? &bjob->job : NULL; GLOBAL_STATE_CODE(); do { - job = job_next(job); + job = job_next_locked(job); } while (job && !is_block_job(job)); return job ? container_of(job, BlockJob, job) : NULL; } -BlockJob *block_job_get(const char *id) +BlockJob *block_job_get_locked(const char *id) { - Job *job = job_get(id); + Job *job = job_get_locked(id); GLOBAL_STATE_CODE(); if (job && is_block_job(job)) { @@ -83,6 +68,12 @@ BlockJob *block_job_get(const char *id) } } +BlockJob *block_job_get(const char *id) +{ + JOB_LOCK_GUARD(); + return block_job_get_locked(id); +} + void block_job_free(Job *job) { BlockJob *bjob = container_of(job, BlockJob, job); @@ -114,8 +105,10 @@ static bool child_job_drained_poll(BdrvChild *c) /* An inactive or completed job doesn't have any pending requests. Jobs * with !job->busy are either already paused or have a pause point after * being reentered, so no job driver code will run before they pause. */ - if (!job->busy || job_is_completed(job)) { - return false; + WITH_JOB_LOCK_GUARD() { + if (!job->busy || job_is_completed_locked(job)) { + return false; + } } /* Otherwise, assume that it isn't fully stopped yet, but allow the job to @@ -127,48 +120,61 @@ static bool child_job_drained_poll(BdrvChild *c) } } -static void child_job_drained_end(BdrvChild *c, int *drained_end_counter) +static void child_job_drained_end(BdrvChild *c) { BlockJob *job = c->opaque; job_resume(&job->job); } -static bool child_job_can_set_aio_ctx(BdrvChild *c, AioContext *ctx, - GSList **ignore, Error **errp) +typedef struct BdrvStateChildJobContext { + AioContext *new_ctx; + BlockJob *job; +} BdrvStateChildJobContext; + +static void child_job_set_aio_ctx_commit(void *opaque) { - BlockJob *job = c->opaque; - GSList *l; + BdrvStateChildJobContext *s = opaque; + BlockJob *job = s->job; - for (l = job->nodes; l; l = l->next) { - BdrvChild *sibling = l->data; - if (!bdrv_child_can_set_aio_context(sibling, ctx, ignore, errp)) { - return false; - } - } - return true; + job_set_aio_context(&job->job, s->new_ctx); } -static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx, - GSList **ignore) +static TransactionActionDrv change_child_job_context = { + .commit = child_job_set_aio_ctx_commit, + .clean = g_free, +}; + +static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { BlockJob *job = c->opaque; + BdrvStateChildJobContext *s; GSList *l; for (l = job->nodes; l; l = l->next) { BdrvChild *sibling = l->data; - if (g_slist_find(*ignore, sibling)) { - continue; + if (!bdrv_child_change_aio_context(sibling, ctx, visited, + tran, errp)) { + return false; } - *ignore = g_slist_prepend(*ignore, sibling); - bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore); } - job->job.aio_context = ctx; + s = g_new(BdrvStateChildJobContext, 1); + *s = (BdrvStateChildJobContext) { + .new_ctx = ctx, + .job = job, + }; + + tran_add(tran, &change_child_job_context, s); + return true; } static AioContext *child_job_get_parent_aio_context(BdrvChild *c) { BlockJob *job = c->opaque; + IO_CODE(); + JOB_LOCK_GUARD(); return job->job.aio_context; } @@ -178,8 +184,7 @@ static const BdrvChildClass child_job = { .drained_begin = child_job_drained_begin, .drained_poll = child_job_drained_poll, .drained_end = child_job_drained_end, - .can_set_aio_ctx = child_job_can_set_aio_ctx, - .set_aio_ctx = child_job_set_aio_ctx, + .change_aio_ctx = child_job_change_aio_ctx, .stay_at_node = true, .get_parent_aio_context = child_job_get_parent_aio_context, }; @@ -225,20 +230,27 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) { BdrvChild *c; + AioContext *ctx = bdrv_get_aio_context(bs); bool need_context_ops; GLOBAL_STATE_CODE(); bdrv_ref(bs); - need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context; + need_context_ops = ctx != job->job.aio_context; - if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { - aio_context_release(job->job.aio_context); + if (need_context_ops) { + if (job->job.aio_context != qemu_get_aio_context()) { + aio_context_release(job->job.aio_context); + } + aio_context_acquire(ctx); } c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job, errp); - if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { - aio_context_acquire(job->job.aio_context); + if (need_context_ops) { + aio_context_release(ctx); + if (job->job.aio_context != qemu_get_aio_context()) { + aio_context_acquire(job->job.aio_context); + } } if (c == NULL) { return -EPERM; @@ -250,7 +262,8 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, return 0; } -static void block_job_on_idle(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_on_idle_locked(Notifier *n, void *opaque) { aio_wait_kick(); } @@ -271,14 +284,14 @@ static bool job_timer_pending(Job *job) return timer_pending(&job->sleep_timer); } -bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) +bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp) { const BlockJobDriver *drv = block_job_driver(job); int64_t old_speed = job->speed; GLOBAL_STATE_CODE(); - if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { + if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) { return false; } if (speed < 0) { @@ -292,7 +305,9 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) job->speed = speed; if (drv->set_speed) { + job_unlock(); drv->set_speed(job, speed); + job_lock(); } if (speed && speed <= old_speed) { @@ -300,18 +315,42 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) } /* kick only if a timer is pending */ - job_enter_cond(&job->job, job_timer_pending); + job_enter_cond_locked(&job->job, job_timer_pending); return true; } -int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n) +static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) +{ + JOB_LOCK_GUARD(); + return block_job_set_speed_locked(job, speed, errp); +} + +void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n) { IO_CODE(); - return ratelimit_calculate_delay(&job->limit, n); + ratelimit_calculate_delay(&job->limit, n); +} + +void block_job_ratelimit_sleep(BlockJob *job) +{ + uint64_t delay_ns; + + /* + * Sleep at least once. If the job is reentered early, keep waiting until + * we've waited for the full time that is necessary to keep the job at the + * right speed. + * + * Make sure to recalculate the delay after each (possibly interrupted) + * sleep because the speed can change while the job has yielded. + */ + do { + delay_ns = ratelimit_calculate_delay(&job->limit, 0); + job_sleep_ns(&job->job, delay_ns); + } while (delay_ns && !job_is_cancelled(&job->job)); } -BlockJobInfo *block_job_query(BlockJob *job, Error **errp) +BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp) { BlockJobInfo *info; uint64_t progress_current, progress_total; @@ -329,18 +368,17 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) info = g_new0(BlockJobInfo, 1); info->type = g_strdup(job_type_str(&job->job)); info->device = g_strdup(job->job.id); - info->busy = qatomic_read(&job->job.busy); + info->busy = job->job.busy; info->paused = job->job.pause_count > 0; info->offset = progress_current; info->len = progress_total; info->speed = job->speed; info->io_status = job->iostatus; - info->ready = job_is_ready(&job->job), + info->ready = job_is_ready_locked(&job->job), info->status = job->job.status; info->auto_finalize = job->job.auto_finalize; info->auto_dismiss = job->job.auto_dismiss; if (job->job.ret) { - info->has_error = true; info->error = job->job.err ? g_strdup(error_get_pretty(job->job.err)) : g_strdup(strerror(-job->job.ret)); @@ -348,7 +386,8 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) return info; } -static void block_job_iostatus_set_err(BlockJob *job, int error) +/* Called with job lock held */ +static void block_job_iostatus_set_err_locked(BlockJob *job, int error) { if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : @@ -356,7 +395,8 @@ static void block_job_iostatus_set_err(BlockJob *job, int error) } } -static void block_job_event_cancelled(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_cancelled_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; uint64_t progress_current, progress_total; @@ -375,7 +415,8 @@ static void block_job_event_cancelled(Notifier *n, void *opaque) job->speed); } -static void block_job_event_completed(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_completed_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; const char *msg = NULL; @@ -397,11 +438,11 @@ static void block_job_event_completed(Notifier *n, void *opaque) progress_total, progress_current, job->speed, - !!msg, msg); } -static void block_job_event_pending(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_pending_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; @@ -413,7 +454,8 @@ static void block_job_event_pending(Notifier *n, void *opaque) job->job.id); } -static void block_job_event_ready(Notifier *n, void *opaque) +/* Called with job_mutex lock held. */ +static void block_job_event_ready_locked(Notifier *n, void *opaque) { BlockJob *job = opaque; uint64_t progress_current, progress_total; @@ -433,11 +475,6 @@ static void block_job_event_ready(Notifier *n, void *opaque) } -/* - * API for block job drivers and the block layer. These functions are - * declared in blockjob_int.h. - */ - void *block_job_create(const char *job_id, const BlockJobDriver *driver, JobTxn *txn, BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, int64_t speed, int flags, @@ -463,19 +500,21 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, ratelimit_init(&job->limit); - job->finalize_cancelled_notifier.notify = block_job_event_cancelled; - job->finalize_completed_notifier.notify = block_job_event_completed; - job->pending_notifier.notify = block_job_event_pending; - job->ready_notifier.notify = block_job_event_ready; - job->idle_notifier.notify = block_job_on_idle; - - notifier_list_add(&job->job.on_finalize_cancelled, - &job->finalize_cancelled_notifier); - notifier_list_add(&job->job.on_finalize_completed, - &job->finalize_completed_notifier); - notifier_list_add(&job->job.on_pending, &job->pending_notifier); - notifier_list_add(&job->job.on_ready, &job->ready_notifier); - notifier_list_add(&job->job.on_idle, &job->idle_notifier); + job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked; + job->finalize_completed_notifier.notify = block_job_event_completed_locked; + job->pending_notifier.notify = block_job_event_pending_locked; + job->ready_notifier.notify = block_job_event_ready_locked; + job->idle_notifier.notify = block_job_on_idle_locked; + + WITH_JOB_LOCK_GUARD() { + notifier_list_add(&job->job.on_finalize_cancelled, + &job->finalize_cancelled_notifier); + notifier_list_add(&job->job.on_finalize_completed, + &job->finalize_completed_notifier); + notifier_list_add(&job->job.on_pending, &job->pending_notifier); + notifier_list_add(&job->job.on_ready, &job->ready_notifier); + notifier_list_add(&job->job.on_idle, &job->idle_notifier); + } error_setg(&job->blocker, "block device is in use by block job: %s", job_type_str(&job->job)); @@ -498,7 +537,7 @@ fail: return NULL; } -void block_job_iostatus_reset(BlockJob *job) +void block_job_iostatus_reset_locked(BlockJob *job) { GLOBAL_STATE_CODE(); if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { @@ -508,6 +547,12 @@ void block_job_iostatus_reset(BlockJob *job) job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; } +static void block_job_iostatus_reset(BlockJob *job) +{ + JOB_LOCK_GUARD(); + block_job_iostatus_reset_locked(job); +} + void block_job_user_resume(Job *job) { BlockJob *bjob = container_of(job, BlockJob, job); @@ -546,12 +591,17 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, action); } if (action == BLOCK_ERROR_ACTION_STOP) { - if (!job->job.user_paused) { - job_pause(&job->job); - /* make the pause user visible, which will be resumed from QMP. */ - job->job.user_paused = true; + WITH_JOB_LOCK_GUARD() { + if (!job->job.user_paused) { + job_pause_locked(&job->job); + /* + * make the pause user visible, which will be + * resumed from QMP. + */ + job->job.user_paused = true; + } + block_job_iostatus_set_err_locked(job, error); } - block_job_iostatus_set_err(job, error); } return action; }