2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
38 #include "qemu/timer.h"
40 /* Right now, this mutex is only needed to synchronize accesses to job->busy
41 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
43 static QemuMutex block_job_mutex
;
45 /* BlockJob State Transition Table */
46 bool BlockJobSTT
[BLOCK_JOB_STATUS__MAX
][BLOCK_JOB_STATUS__MAX
] = {
47 /* U, C, R, P, Y, S, X, E, N */
48 /* U: */ [BLOCK_JOB_STATUS_UNDEFINED
] = {0, 1, 0, 0, 0, 0, 0, 0, 0},
49 /* C: */ [BLOCK_JOB_STATUS_CREATED
] = {0, 0, 1, 0, 0, 0, 1, 0, 1},
50 /* R: */ [BLOCK_JOB_STATUS_RUNNING
] = {0, 0, 0, 1, 1, 0, 1, 1, 0},
51 /* P: */ [BLOCK_JOB_STATUS_PAUSED
] = {0, 0, 1, 0, 0, 0, 0, 0, 0},
52 /* Y: */ [BLOCK_JOB_STATUS_READY
] = {0, 0, 0, 0, 0, 1, 1, 1, 0},
53 /* S: */ [BLOCK_JOB_STATUS_STANDBY
] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
54 /* X: */ [BLOCK_JOB_STATUS_ABORTING
] = {0, 0, 0, 0, 0, 0, 1, 1, 0},
55 /* E: */ [BLOCK_JOB_STATUS_CONCLUDED
] = {0, 0, 0, 0, 0, 0, 0, 0, 1},
56 /* N: */ [BLOCK_JOB_STATUS_NULL
] = {0, 0, 0, 0, 0, 0, 0, 0, 0},
59 bool BlockJobVerbTable
[BLOCK_JOB_VERB__MAX
][BLOCK_JOB_STATUS__MAX
] = {
60 /* U, C, R, P, Y, S, X, E, N */
61 [BLOCK_JOB_VERB_CANCEL
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
62 [BLOCK_JOB_VERB_PAUSE
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
63 [BLOCK_JOB_VERB_RESUME
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
64 [BLOCK_JOB_VERB_SET_SPEED
] = {0, 1, 1, 1, 1, 1, 0, 0, 0},
65 [BLOCK_JOB_VERB_COMPLETE
] = {0, 0, 0, 0, 1, 0, 0, 0, 0},
66 [BLOCK_JOB_VERB_DISMISS
] = {0, 0, 0, 0, 0, 0, 0, 1, 0},
69 static void block_job_state_transition(BlockJob
*job
, BlockJobStatus s1
)
71 BlockJobStatus s0
= job
->status
;
72 assert(s1
>= 0 && s1
<= BLOCK_JOB_STATUS__MAX
);
73 trace_block_job_state_transition(job
, job
->ret
, BlockJobSTT
[s0
][s1
] ?
74 "allowed" : "disallowed",
75 qapi_enum_lookup(&BlockJobStatus_lookup
,
77 qapi_enum_lookup(&BlockJobStatus_lookup
,
79 assert(BlockJobSTT
[s0
][s1
]);
83 static int block_job_apply_verb(BlockJob
*job
, BlockJobVerb bv
, Error
**errp
)
85 assert(bv
>= 0 && bv
<= BLOCK_JOB_VERB__MAX
);
86 trace_block_job_apply_verb(job
, qapi_enum_lookup(&BlockJobStatus_lookup
,
88 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
),
89 BlockJobVerbTable
[bv
][job
->status
] ?
90 "allowed" : "prohibited");
91 if (BlockJobVerbTable
[bv
][job
->status
]) {
94 error_setg(errp
, "Job '%s' in state '%s' cannot accept command verb '%s'",
95 job
->id
, qapi_enum_lookup(&BlockJobStatus_lookup
, job
->status
),
96 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
));
100 static void block_job_lock(void)
102 qemu_mutex_lock(&block_job_mutex
);
105 static void block_job_unlock(void)
107 qemu_mutex_unlock(&block_job_mutex
);
110 static void __attribute__((__constructor__
)) block_job_init(void)
112 qemu_mutex_init(&block_job_mutex
);
115 static void block_job_event_cancelled(BlockJob
*job
);
116 static void block_job_event_completed(BlockJob
*job
, const char *msg
);
117 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
));
119 /* Transactional group of block jobs */
122 /* Is this txn being cancelled? */
126 QLIST_HEAD(, BlockJob
) jobs
;
128 /* Reference count */
132 static QLIST_HEAD(, BlockJob
) block_jobs
= QLIST_HEAD_INITIALIZER(block_jobs
);
135 * The block job API is composed of two categories of functions.
137 * The first includes functions used by the monitor. The monitor is
138 * peculiar in that it accesses the block job list with block_job_get, and
139 * therefore needs consistency across block_job_get and the actual operation
140 * (e.g. block_job_set_speed). The consistency is achieved with
141 * aio_context_acquire/release. These functions are declared in blockjob.h.
143 * The second includes functions used by the block job drivers and sometimes
144 * by the core block layer. These do not care about locking, because the
145 * whole coroutine runs under the AioContext lock, and are declared in
149 BlockJob
*block_job_next(BlockJob
*job
)
152 return QLIST_FIRST(&block_jobs
);
154 return QLIST_NEXT(job
, job_list
);
157 BlockJob
*block_job_get(const char *id
)
161 QLIST_FOREACH(job
, &block_jobs
, job_list
) {
162 if (job
->id
&& !strcmp(id
, job
->id
)) {
170 BlockJobTxn
*block_job_txn_new(void)
172 BlockJobTxn
*txn
= g_new0(BlockJobTxn
, 1);
173 QLIST_INIT(&txn
->jobs
);
178 static void block_job_txn_ref(BlockJobTxn
*txn
)
183 void block_job_txn_unref(BlockJobTxn
*txn
)
185 if (txn
&& --txn
->refcnt
== 0) {
190 void block_job_txn_add_job(BlockJobTxn
*txn
, BlockJob
*job
)
199 QLIST_INSERT_HEAD(&txn
->jobs
, job
, txn_list
);
200 block_job_txn_ref(txn
);
203 static void block_job_pause(BlockJob
*job
)
208 static void block_job_resume(BlockJob
*job
)
210 assert(job
->pause_count
> 0);
212 if (job
->pause_count
) {
215 block_job_enter(job
);
218 void block_job_ref(BlockJob
*job
)
223 static void block_job_attached_aio_context(AioContext
*new_context
,
225 static void block_job_detach_aio_context(void *opaque
);
227 void block_job_unref(BlockJob
*job
)
229 if (--job
->refcnt
== 0) {
230 assert(job
->status
== BLOCK_JOB_STATUS_NULL
);
231 BlockDriverState
*bs
= blk_bs(job
->blk
);
232 QLIST_REMOVE(job
, job_list
);
234 block_job_remove_all_bdrv(job
);
235 blk_remove_aio_context_notifier(job
->blk
,
236 block_job_attached_aio_context
,
237 block_job_detach_aio_context
, job
);
239 error_free(job
->blocker
);
241 assert(!timer_pending(&job
->sleep_timer
));
246 static void block_job_attached_aio_context(AioContext
*new_context
,
249 BlockJob
*job
= opaque
;
251 if (job
->driver
->attached_aio_context
) {
252 job
->driver
->attached_aio_context(job
, new_context
);
255 block_job_resume(job
);
258 static void block_job_drain(BlockJob
*job
)
260 /* If job is !job->busy this kicks it into the next pause point. */
261 block_job_enter(job
);
264 if (job
->driver
->drain
) {
265 job
->driver
->drain(job
);
269 static void block_job_detach_aio_context(void *opaque
)
271 BlockJob
*job
= opaque
;
273 /* In case the job terminates during aio_poll()... */
276 block_job_pause(job
);
278 while (!job
->paused
&& !job
->completed
) {
279 block_job_drain(job
);
282 block_job_unref(job
);
285 static char *child_job_get_parent_desc(BdrvChild
*c
)
287 BlockJob
*job
= c
->opaque
;
288 return g_strdup_printf("%s job '%s'",
289 BlockJobType_str(job
->driver
->job_type
),
293 static void child_job_drained_begin(BdrvChild
*c
)
295 BlockJob
*job
= c
->opaque
;
296 block_job_pause(job
);
299 static void child_job_drained_end(BdrvChild
*c
)
301 BlockJob
*job
= c
->opaque
;
302 block_job_resume(job
);
305 static const BdrvChildRole child_job
= {
306 .get_parent_desc
= child_job_get_parent_desc
,
307 .drained_begin
= child_job_drained_begin
,
308 .drained_end
= child_job_drained_end
,
309 .stay_at_node
= true,
312 void block_job_remove_all_bdrv(BlockJob
*job
)
315 for (l
= job
->nodes
; l
; l
= l
->next
) {
316 BdrvChild
*c
= l
->data
;
317 bdrv_op_unblock_all(c
->bs
, job
->blocker
);
318 bdrv_root_unref_child(c
);
320 g_slist_free(job
->nodes
);
324 int block_job_add_bdrv(BlockJob
*job
, const char *name
, BlockDriverState
*bs
,
325 uint64_t perm
, uint64_t shared_perm
, Error
**errp
)
329 c
= bdrv_root_attach_child(bs
, name
, &child_job
, perm
, shared_perm
,
335 job
->nodes
= g_slist_prepend(job
->nodes
, c
);
337 bdrv_op_block_all(bs
, job
->blocker
);
342 bool block_job_is_internal(BlockJob
*job
)
344 return (job
->id
== NULL
);
347 static bool block_job_started(BlockJob
*job
)
353 * All jobs must allow a pause point before entering their job proper. This
354 * ensures that jobs can be paused prior to being started, then resumed later.
356 static void coroutine_fn
block_job_co_entry(void *opaque
)
358 BlockJob
*job
= opaque
;
360 assert(job
&& job
->driver
&& job
->driver
->start
);
361 block_job_pause_point(job
);
362 job
->driver
->start(job
);
365 static void block_job_sleep_timer_cb(void *opaque
)
367 BlockJob
*job
= opaque
;
369 block_job_enter(job
);
372 void block_job_start(BlockJob
*job
)
374 assert(job
&& !block_job_started(job
) && job
->paused
&&
375 job
->driver
&& job
->driver
->start
);
376 job
->co
= qemu_coroutine_create(block_job_co_entry
, job
);
380 block_job_state_transition(job
, BLOCK_JOB_STATUS_RUNNING
);
381 bdrv_coroutine_enter(blk_bs(job
->blk
), job
->co
);
384 static void block_job_decommission(BlockJob
*job
)
387 job
->completed
= true;
390 job
->deferred_to_main_loop
= true;
391 block_job_state_transition(job
, BLOCK_JOB_STATUS_NULL
);
392 block_job_unref(job
);
395 static void block_job_do_dismiss(BlockJob
*job
)
397 block_job_decommission(job
);
400 static void block_job_conclude(BlockJob
*job
)
402 block_job_state_transition(job
, BLOCK_JOB_STATUS_CONCLUDED
);
403 if (job
->auto_dismiss
|| !block_job_started(job
)) {
404 block_job_do_dismiss(job
);
408 static void block_job_update_rc(BlockJob
*job
)
410 if (!job
->ret
&& block_job_is_cancelled(job
)) {
411 job
->ret
= -ECANCELED
;
414 block_job_state_transition(job
, BLOCK_JOB_STATUS_ABORTING
);
418 static void block_job_completed_single(BlockJob
*job
)
420 assert(job
->completed
);
422 /* Ensure abort is called for late-transactional failures */
423 block_job_update_rc(job
);
426 if (job
->driver
->commit
) {
427 job
->driver
->commit(job
);
430 if (job
->driver
->abort
) {
431 job
->driver
->abort(job
);
434 if (job
->driver
->clean
) {
435 job
->driver
->clean(job
);
439 job
->cb(job
->opaque
, job
->ret
);
442 /* Emit events only if we actually started */
443 if (block_job_started(job
)) {
444 if (block_job_is_cancelled(job
)) {
445 block_job_event_cancelled(job
);
447 const char *msg
= NULL
;
449 msg
= strerror(-job
->ret
);
451 block_job_event_completed(job
, msg
);
455 QLIST_REMOVE(job
, txn_list
);
456 block_job_txn_unref(job
->txn
);
457 block_job_conclude(job
);
460 static void block_job_cancel_async(BlockJob
*job
)
462 if (job
->iostatus
!= BLOCK_DEVICE_IO_STATUS_OK
) {
463 block_job_iostatus_reset(job
);
465 if (job
->user_paused
) {
466 /* Do not call block_job_enter here, the caller will handle it. */
467 job
->user_paused
= false;
470 job
->cancelled
= true;
473 static int block_job_finish_sync(BlockJob
*job
,
474 void (*finish
)(BlockJob
*, Error
**errp
),
477 Error
*local_err
= NULL
;
480 assert(blk_bs(job
->blk
)->job
== job
);
485 finish(job
, &local_err
);
488 error_propagate(errp
, local_err
);
489 block_job_unref(job
);
492 /* block_job_drain calls block_job_enter, and it should be enough to
493 * induce progress until the job completes or moves to the main thread.
495 while (!job
->deferred_to_main_loop
&& !job
->completed
) {
496 block_job_drain(job
);
498 while (!job
->completed
) {
499 aio_poll(qemu_get_aio_context(), true);
501 ret
= (job
->cancelled
&& job
->ret
== 0) ? -ECANCELED
: job
->ret
;
502 block_job_unref(job
);
506 static void block_job_completed_txn_abort(BlockJob
*job
)
509 BlockJobTxn
*txn
= job
->txn
;
514 * We are cancelled by another job, which will handle everything.
518 txn
->aborting
= true;
519 block_job_txn_ref(txn
);
521 /* We are the first failed job. Cancel other jobs. */
522 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
523 ctx
= blk_get_aio_context(other_job
->blk
);
524 aio_context_acquire(ctx
);
527 /* Other jobs are effectively cancelled by us, set the status for
528 * them; this job, however, may or may not be cancelled, depending
529 * on the caller, so leave it. */
530 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
531 if (other_job
!= job
) {
532 block_job_cancel_async(other_job
);
535 while (!QLIST_EMPTY(&txn
->jobs
)) {
536 other_job
= QLIST_FIRST(&txn
->jobs
);
537 ctx
= blk_get_aio_context(other_job
->blk
);
538 if (!other_job
->completed
) {
539 assert(other_job
->cancelled
);
540 block_job_finish_sync(other_job
, NULL
, NULL
);
542 block_job_completed_single(other_job
);
543 aio_context_release(ctx
);
546 block_job_txn_unref(txn
);
549 static void block_job_completed_txn_success(BlockJob
*job
)
552 BlockJobTxn
*txn
= job
->txn
;
553 BlockJob
*other_job
, *next
;
555 * Successful completion, see if there are other running jobs in this
558 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
559 if (!other_job
->completed
) {
563 /* We are the last completed job, commit the transaction. */
564 QLIST_FOREACH_SAFE(other_job
, &txn
->jobs
, txn_list
, next
) {
565 ctx
= blk_get_aio_context(other_job
->blk
);
566 aio_context_acquire(ctx
);
567 assert(other_job
->ret
== 0);
568 block_job_completed_single(other_job
);
569 aio_context_release(ctx
);
573 /* Assumes the block_job_mutex is held */
574 static bool block_job_timer_pending(BlockJob
*job
)
576 return timer_pending(&job
->sleep_timer
);
579 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
581 Error
*local_err
= NULL
;
582 int64_t old_speed
= job
->speed
;
584 if (!job
->driver
->set_speed
) {
585 error_setg(errp
, QERR_UNSUPPORTED
);
588 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_SET_SPEED
, errp
)) {
591 job
->driver
->set_speed(job
, speed
, &local_err
);
593 error_propagate(errp
, local_err
);
598 if (speed
&& speed
<= old_speed
) {
602 /* kick only if a timer is pending */
603 block_job_enter_cond(job
, block_job_timer_pending
);
606 void block_job_complete(BlockJob
*job
, Error
**errp
)
608 /* Should not be reachable via external interface for internal jobs */
610 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_COMPLETE
, errp
)) {
613 if (job
->pause_count
|| job
->cancelled
|| !job
->driver
->complete
) {
614 error_setg(errp
, "The active block job '%s' cannot be completed",
619 job
->driver
->complete(job
, errp
);
622 void block_job_dismiss(BlockJob
**jobptr
, Error
**errp
)
624 BlockJob
*job
= *jobptr
;
625 /* similarly to _complete, this is QMP-interface only. */
627 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_DISMISS
, errp
)) {
631 block_job_do_dismiss(job
);
635 void block_job_user_pause(BlockJob
*job
, Error
**errp
)
637 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_PAUSE
, errp
)) {
640 if (job
->user_paused
) {
641 error_setg(errp
, "Job is already paused");
644 job
->user_paused
= true;
645 block_job_pause(job
);
648 bool block_job_user_paused(BlockJob
*job
)
650 return job
->user_paused
;
653 void block_job_user_resume(BlockJob
*job
, Error
**errp
)
656 if (!job
->user_paused
|| job
->pause_count
<= 0) {
657 error_setg(errp
, "Can't resume a job that was not paused");
660 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_RESUME
, errp
)) {
663 block_job_iostatus_reset(job
);
664 job
->user_paused
= false;
665 block_job_resume(job
);
668 void block_job_cancel(BlockJob
*job
)
670 if (job
->status
== BLOCK_JOB_STATUS_CONCLUDED
) {
671 block_job_do_dismiss(job
);
672 } else if (block_job_started(job
)) {
673 block_job_cancel_async(job
);
674 block_job_enter(job
);
676 block_job_completed(job
, -ECANCELED
);
680 void block_job_user_cancel(BlockJob
*job
, Error
**errp
)
682 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_CANCEL
, errp
)) {
685 block_job_cancel(job
);
688 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
689 * used with block_job_finish_sync() without the need for (rather nasty)
690 * function pointer casts there. */
691 static void block_job_cancel_err(BlockJob
*job
, Error
**errp
)
693 block_job_cancel(job
);
696 int block_job_cancel_sync(BlockJob
*job
)
698 return block_job_finish_sync(job
, &block_job_cancel_err
, NULL
);
701 void block_job_cancel_sync_all(void)
704 AioContext
*aio_context
;
706 while ((job
= QLIST_FIRST(&block_jobs
))) {
707 aio_context
= blk_get_aio_context(job
->blk
);
708 aio_context_acquire(aio_context
);
709 block_job_cancel_sync(job
);
710 aio_context_release(aio_context
);
714 int block_job_complete_sync(BlockJob
*job
, Error
**errp
)
716 return block_job_finish_sync(job
, &block_job_complete
, errp
);
719 BlockJobInfo
*block_job_query(BlockJob
*job
, Error
**errp
)
723 if (block_job_is_internal(job
)) {
724 error_setg(errp
, "Cannot query QEMU internal jobs");
727 info
= g_new0(BlockJobInfo
, 1);
728 info
->type
= g_strdup(BlockJobType_str(job
->driver
->job_type
));
729 info
->device
= g_strdup(job
->id
);
730 info
->len
= job
->len
;
731 info
->busy
= atomic_read(&job
->busy
);
732 info
->paused
= job
->pause_count
> 0;
733 info
->offset
= job
->offset
;
734 info
->speed
= job
->speed
;
735 info
->io_status
= job
->iostatus
;
736 info
->ready
= job
->ready
;
737 info
->status
= job
->status
;
741 static void block_job_iostatus_set_err(BlockJob
*job
, int error
)
743 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
744 job
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
745 BLOCK_DEVICE_IO_STATUS_FAILED
;
749 static void block_job_event_cancelled(BlockJob
*job
)
751 if (block_job_is_internal(job
)) {
755 qapi_event_send_block_job_cancelled(job
->driver
->job_type
,
763 static void block_job_event_completed(BlockJob
*job
, const char *msg
)
765 if (block_job_is_internal(job
)) {
769 qapi_event_send_block_job_completed(job
->driver
->job_type
,
780 * API for block job drivers and the block layer. These functions are
781 * declared in blockjob_int.h.
784 void *block_job_create(const char *job_id
, const BlockJobDriver
*driver
,
785 BlockJobTxn
*txn
, BlockDriverState
*bs
, uint64_t perm
,
786 uint64_t shared_perm
, int64_t speed
, int flags
,
787 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
)
794 error_setg(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
798 if (job_id
== NULL
&& !(flags
& BLOCK_JOB_INTERNAL
)) {
799 job_id
= bdrv_get_device_name(bs
);
801 error_setg(errp
, "An explicit job ID is required for this node");
807 if (flags
& BLOCK_JOB_INTERNAL
) {
808 error_setg(errp
, "Cannot specify job ID for internal block job");
812 if (!id_wellformed(job_id
)) {
813 error_setg(errp
, "Invalid job ID '%s'", job_id
);
817 if (block_job_get(job_id
)) {
818 error_setg(errp
, "Job ID '%s' already in use", job_id
);
823 blk
= blk_new(perm
, shared_perm
);
824 ret
= blk_insert_bs(blk
, bs
, errp
);
830 job
= g_malloc0(driver
->instance_size
);
831 job
->driver
= driver
;
832 job
->id
= g_strdup(job_id
);
835 job
->opaque
= opaque
;
838 job
->pause_count
= 1;
840 job
->auto_dismiss
= !(flags
& BLOCK_JOB_MANUAL_DISMISS
);
841 block_job_state_transition(job
, BLOCK_JOB_STATUS_CREATED
);
842 aio_timer_init(qemu_get_aio_context(), &job
->sleep_timer
,
843 QEMU_CLOCK_REALTIME
, SCALE_NS
,
844 block_job_sleep_timer_cb
, job
);
846 error_setg(&job
->blocker
, "block device is in use by block job: %s",
847 BlockJobType_str(driver
->job_type
));
848 block_job_add_bdrv(job
, "main node", bs
, 0, BLK_PERM_ALL
, &error_abort
);
851 bdrv_op_unblock(bs
, BLOCK_OP_TYPE_DATAPLANE
, job
->blocker
);
853 QLIST_INSERT_HEAD(&block_jobs
, job
, job_list
);
855 blk_add_aio_context_notifier(blk
, block_job_attached_aio_context
,
856 block_job_detach_aio_context
, job
);
858 /* Only set speed when necessary to avoid NotSupported error */
860 Error
*local_err
= NULL
;
862 block_job_set_speed(job
, speed
, &local_err
);
864 block_job_early_fail(job
);
865 error_propagate(errp
, local_err
);
870 /* Single jobs are modeled as single-job transactions for sake of
871 * consolidating the job management logic */
873 txn
= block_job_txn_new();
874 block_job_txn_add_job(txn
, job
);
875 block_job_txn_unref(txn
);
877 block_job_txn_add_job(txn
, job
);
883 void block_job_pause_all(void)
885 BlockJob
*job
= NULL
;
886 while ((job
= block_job_next(job
))) {
887 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
889 aio_context_acquire(aio_context
);
891 block_job_pause(job
);
892 aio_context_release(aio_context
);
896 void block_job_early_fail(BlockJob
*job
)
898 assert(job
->status
== BLOCK_JOB_STATUS_CREATED
);
899 block_job_decommission(job
);
902 void block_job_completed(BlockJob
*job
, int ret
)
904 assert(job
&& job
->txn
&& !job
->completed
);
905 assert(blk_bs(job
->blk
)->job
== job
);
906 job
->completed
= true;
908 block_job_update_rc(job
);
909 trace_block_job_completed(job
, ret
, job
->ret
);
911 block_job_completed_txn_abort(job
);
913 block_job_completed_txn_success(job
);
917 static bool block_job_should_pause(BlockJob
*job
)
919 return job
->pause_count
> 0;
922 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
923 * Reentering the job coroutine with block_job_enter() before the timer has
924 * expired is allowed and cancels the timer.
926 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
927 * called explicitly. */
928 static void block_job_do_yield(BlockJob
*job
, uint64_t ns
)
932 timer_mod(&job
->sleep_timer
, ns
);
936 qemu_coroutine_yield();
938 /* Set by block_job_enter before re-entering the coroutine. */
942 void coroutine_fn
block_job_pause_point(BlockJob
*job
)
944 assert(job
&& block_job_started(job
));
946 if (!block_job_should_pause(job
)) {
949 if (block_job_is_cancelled(job
)) {
953 if (job
->driver
->pause
) {
954 job
->driver
->pause(job
);
957 if (block_job_should_pause(job
) && !block_job_is_cancelled(job
)) {
958 BlockJobStatus status
= job
->status
;
959 block_job_state_transition(job
, status
== BLOCK_JOB_STATUS_READY
? \
960 BLOCK_JOB_STATUS_STANDBY
: \
961 BLOCK_JOB_STATUS_PAUSED
);
963 block_job_do_yield(job
, -1);
965 block_job_state_transition(job
, status
);
968 if (job
->driver
->resume
) {
969 job
->driver
->resume(job
);
973 void block_job_resume_all(void)
975 BlockJob
*job
, *next
;
977 QLIST_FOREACH_SAFE(job
, &block_jobs
, job_list
, next
) {
978 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
980 aio_context_acquire(aio_context
);
981 block_job_resume(job
);
982 block_job_unref(job
);
983 aio_context_release(aio_context
);
988 * Conditionally enter a block_job pending a call to fn() while
989 * under the block_job_lock critical section.
991 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
))
993 if (!block_job_started(job
)) {
996 if (job
->deferred_to_main_loop
) {
1006 if (fn
&& !fn(job
)) {
1011 assert(!job
->deferred_to_main_loop
);
1012 timer_del(&job
->sleep_timer
);
1015 aio_co_wake(job
->co
);
1018 void block_job_enter(BlockJob
*job
)
1020 block_job_enter_cond(job
, NULL
);
1023 bool block_job_is_cancelled(BlockJob
*job
)
1025 return job
->cancelled
;
1028 void block_job_sleep_ns(BlockJob
*job
, int64_t ns
)
1032 /* Check cancellation *before* setting busy = false, too! */
1033 if (block_job_is_cancelled(job
)) {
1037 if (!block_job_should_pause(job
)) {
1038 block_job_do_yield(job
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + ns
);
1041 block_job_pause_point(job
);
1044 void block_job_yield(BlockJob
*job
)
1048 /* Check cancellation *before* setting busy = false, too! */
1049 if (block_job_is_cancelled(job
)) {
1053 if (!block_job_should_pause(job
)) {
1054 block_job_do_yield(job
, -1);
1057 block_job_pause_point(job
);
1060 void block_job_iostatus_reset(BlockJob
*job
)
1062 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1065 assert(job
->user_paused
&& job
->pause_count
> 0);
1066 job
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1069 void block_job_event_ready(BlockJob
*job
)
1071 block_job_state_transition(job
, BLOCK_JOB_STATUS_READY
);
1074 if (block_job_is_internal(job
)) {
1078 qapi_event_send_block_job_ready(job
->driver
->job_type
,
1082 job
->speed
, &error_abort
);
1085 BlockErrorAction
block_job_error_action(BlockJob
*job
, BlockdevOnError on_err
,
1086 int is_read
, int error
)
1088 BlockErrorAction action
;
1091 case BLOCKDEV_ON_ERROR_ENOSPC
:
1092 case BLOCKDEV_ON_ERROR_AUTO
:
1093 action
= (error
== ENOSPC
) ?
1094 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1096 case BLOCKDEV_ON_ERROR_STOP
:
1097 action
= BLOCK_ERROR_ACTION_STOP
;
1099 case BLOCKDEV_ON_ERROR_REPORT
:
1100 action
= BLOCK_ERROR_ACTION_REPORT
;
1102 case BLOCKDEV_ON_ERROR_IGNORE
:
1103 action
= BLOCK_ERROR_ACTION_IGNORE
;
1108 if (!block_job_is_internal(job
)) {
1109 qapi_event_send_block_job_error(job
->id
,
1110 is_read
? IO_OPERATION_TYPE_READ
:
1111 IO_OPERATION_TYPE_WRITE
,
1112 action
, &error_abort
);
1114 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1115 block_job_pause(job
);
1116 /* make the pause user visible, which will be resumed from QMP. */
1117 job
->user_paused
= true;
1118 block_job_iostatus_set_err(job
, error
);
1125 AioContext
*aio_context
;
1126 BlockJobDeferToMainLoopFn
*fn
;
1128 } BlockJobDeferToMainLoopData
;
1130 static void block_job_defer_to_main_loop_bh(void *opaque
)
1132 BlockJobDeferToMainLoopData
*data
= opaque
;
1133 AioContext
*aio_context
;
1135 /* Prevent race with block_job_defer_to_main_loop() */
1136 aio_context_acquire(data
->aio_context
);
1138 /* Fetch BDS AioContext again, in case it has changed */
1139 aio_context
= blk_get_aio_context(data
->job
->blk
);
1140 if (aio_context
!= data
->aio_context
) {
1141 aio_context_acquire(aio_context
);
1144 data
->fn(data
->job
, data
->opaque
);
1146 if (aio_context
!= data
->aio_context
) {
1147 aio_context_release(aio_context
);
1150 aio_context_release(data
->aio_context
);
1155 void block_job_defer_to_main_loop(BlockJob
*job
,
1156 BlockJobDeferToMainLoopFn
*fn
,
1159 BlockJobDeferToMainLoopData
*data
= g_malloc(sizeof(*data
));
1161 data
->aio_context
= blk_get_aio_context(job
->blk
);
1163 data
->opaque
= opaque
;
1164 job
->deferred_to_main_loop
= true;
1166 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1167 block_job_defer_to_main_loop_bh
, data
);