2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
38 #include "qemu/timer.h"
40 /* Right now, this mutex is only needed to synchronize accesses to job->busy
41 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
43 static QemuMutex block_job_mutex
;
45 /* BlockJob State Transition Table */
46 bool BlockJobSTT
[BLOCK_JOB_STATUS__MAX
][BLOCK_JOB_STATUS__MAX
] = {
47 /* U, C, R, P, Y, S, W, X, E, N */
48 /* U: */ [BLOCK_JOB_STATUS_UNDEFINED
] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
49 /* C: */ [BLOCK_JOB_STATUS_CREATED
] = {0, 0, 1, 0, 0, 0, 0, 1, 0, 1},
50 /* R: */ [BLOCK_JOB_STATUS_RUNNING
] = {0, 0, 0, 1, 1, 0, 1, 1, 0, 0},
51 /* P: */ [BLOCK_JOB_STATUS_PAUSED
] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
52 /* Y: */ [BLOCK_JOB_STATUS_READY
] = {0, 0, 0, 0, 0, 1, 1, 1, 0, 0},
53 /* S: */ [BLOCK_JOB_STATUS_STANDBY
] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0},
54 /* W: */ [BLOCK_JOB_STATUS_WAITING
] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
55 /* X: */ [BLOCK_JOB_STATUS_ABORTING
] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
56 /* E: */ [BLOCK_JOB_STATUS_CONCLUDED
] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
57 /* N: */ [BLOCK_JOB_STATUS_NULL
] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
60 bool BlockJobVerbTable
[BLOCK_JOB_VERB__MAX
][BLOCK_JOB_STATUS__MAX
] = {
61 /* U, C, R, P, Y, S, W, X, E, N */
62 [BLOCK_JOB_VERB_CANCEL
] = {0, 1, 1, 1, 1, 1, 1, 0, 0, 0},
63 [BLOCK_JOB_VERB_PAUSE
] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
64 [BLOCK_JOB_VERB_RESUME
] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
65 [BLOCK_JOB_VERB_SET_SPEED
] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0},
66 [BLOCK_JOB_VERB_COMPLETE
] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0},
67 [BLOCK_JOB_VERB_DISMISS
] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
70 static void block_job_state_transition(BlockJob
*job
, BlockJobStatus s1
)
72 BlockJobStatus s0
= job
->status
;
73 assert(s1
>= 0 && s1
<= BLOCK_JOB_STATUS__MAX
);
74 trace_block_job_state_transition(job
, job
->ret
, BlockJobSTT
[s0
][s1
] ?
75 "allowed" : "disallowed",
76 qapi_enum_lookup(&BlockJobStatus_lookup
,
78 qapi_enum_lookup(&BlockJobStatus_lookup
,
80 assert(BlockJobSTT
[s0
][s1
]);
84 static int block_job_apply_verb(BlockJob
*job
, BlockJobVerb bv
, Error
**errp
)
86 assert(bv
>= 0 && bv
<= BLOCK_JOB_VERB__MAX
);
87 trace_block_job_apply_verb(job
, qapi_enum_lookup(&BlockJobStatus_lookup
,
89 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
),
90 BlockJobVerbTable
[bv
][job
->status
] ?
91 "allowed" : "prohibited");
92 if (BlockJobVerbTable
[bv
][job
->status
]) {
95 error_setg(errp
, "Job '%s' in state '%s' cannot accept command verb '%s'",
96 job
->id
, qapi_enum_lookup(&BlockJobStatus_lookup
, job
->status
),
97 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
));
101 static void block_job_lock(void)
103 qemu_mutex_lock(&block_job_mutex
);
106 static void block_job_unlock(void)
108 qemu_mutex_unlock(&block_job_mutex
);
111 static void __attribute__((__constructor__
)) block_job_init(void)
113 qemu_mutex_init(&block_job_mutex
);
116 static void block_job_event_cancelled(BlockJob
*job
);
117 static void block_job_event_completed(BlockJob
*job
, const char *msg
);
118 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
));
120 /* Transactional group of block jobs */
123 /* Is this txn being cancelled? */
127 QLIST_HEAD(, BlockJob
) jobs
;
129 /* Reference count */
133 static QLIST_HEAD(, BlockJob
) block_jobs
= QLIST_HEAD_INITIALIZER(block_jobs
);
136 * The block job API is composed of two categories of functions.
138 * The first includes functions used by the monitor. The monitor is
139 * peculiar in that it accesses the block job list with block_job_get, and
140 * therefore needs consistency across block_job_get and the actual operation
141 * (e.g. block_job_set_speed). The consistency is achieved with
142 * aio_context_acquire/release. These functions are declared in blockjob.h.
144 * The second includes functions used by the block job drivers and sometimes
145 * by the core block layer. These do not care about locking, because the
146 * whole coroutine runs under the AioContext lock, and are declared in
150 BlockJob
*block_job_next(BlockJob
*job
)
153 return QLIST_FIRST(&block_jobs
);
155 return QLIST_NEXT(job
, job_list
);
158 BlockJob
*block_job_get(const char *id
)
162 QLIST_FOREACH(job
, &block_jobs
, job_list
) {
163 if (job
->id
&& !strcmp(id
, job
->id
)) {
171 BlockJobTxn
*block_job_txn_new(void)
173 BlockJobTxn
*txn
= g_new0(BlockJobTxn
, 1);
174 QLIST_INIT(&txn
->jobs
);
179 static void block_job_txn_ref(BlockJobTxn
*txn
)
184 void block_job_txn_unref(BlockJobTxn
*txn
)
186 if (txn
&& --txn
->refcnt
== 0) {
191 void block_job_txn_add_job(BlockJobTxn
*txn
, BlockJob
*job
)
200 QLIST_INSERT_HEAD(&txn
->jobs
, job
, txn_list
);
201 block_job_txn_ref(txn
);
204 static void block_job_pause(BlockJob
*job
)
209 static void block_job_resume(BlockJob
*job
)
211 assert(job
->pause_count
> 0);
213 if (job
->pause_count
) {
216 block_job_enter(job
);
219 void block_job_ref(BlockJob
*job
)
224 static void block_job_attached_aio_context(AioContext
*new_context
,
226 static void block_job_detach_aio_context(void *opaque
);
228 void block_job_unref(BlockJob
*job
)
230 if (--job
->refcnt
== 0) {
231 assert(job
->status
== BLOCK_JOB_STATUS_NULL
);
232 BlockDriverState
*bs
= blk_bs(job
->blk
);
233 QLIST_REMOVE(job
, job_list
);
235 block_job_remove_all_bdrv(job
);
236 blk_remove_aio_context_notifier(job
->blk
,
237 block_job_attached_aio_context
,
238 block_job_detach_aio_context
, job
);
240 error_free(job
->blocker
);
242 assert(!timer_pending(&job
->sleep_timer
));
247 static void block_job_attached_aio_context(AioContext
*new_context
,
250 BlockJob
*job
= opaque
;
252 if (job
->driver
->attached_aio_context
) {
253 job
->driver
->attached_aio_context(job
, new_context
);
256 block_job_resume(job
);
259 static void block_job_drain(BlockJob
*job
)
261 /* If job is !job->busy this kicks it into the next pause point. */
262 block_job_enter(job
);
265 if (job
->driver
->drain
) {
266 job
->driver
->drain(job
);
270 static void block_job_detach_aio_context(void *opaque
)
272 BlockJob
*job
= opaque
;
274 /* In case the job terminates during aio_poll()... */
277 block_job_pause(job
);
279 while (!job
->paused
&& !job
->completed
) {
280 block_job_drain(job
);
283 block_job_unref(job
);
286 static char *child_job_get_parent_desc(BdrvChild
*c
)
288 BlockJob
*job
= c
->opaque
;
289 return g_strdup_printf("%s job '%s'",
290 BlockJobType_str(job
->driver
->job_type
),
294 static void child_job_drained_begin(BdrvChild
*c
)
296 BlockJob
*job
= c
->opaque
;
297 block_job_pause(job
);
300 static void child_job_drained_end(BdrvChild
*c
)
302 BlockJob
*job
= c
->opaque
;
303 block_job_resume(job
);
306 static const BdrvChildRole child_job
= {
307 .get_parent_desc
= child_job_get_parent_desc
,
308 .drained_begin
= child_job_drained_begin
,
309 .drained_end
= child_job_drained_end
,
310 .stay_at_node
= true,
313 void block_job_remove_all_bdrv(BlockJob
*job
)
316 for (l
= job
->nodes
; l
; l
= l
->next
) {
317 BdrvChild
*c
= l
->data
;
318 bdrv_op_unblock_all(c
->bs
, job
->blocker
);
319 bdrv_root_unref_child(c
);
321 g_slist_free(job
->nodes
);
325 int block_job_add_bdrv(BlockJob
*job
, const char *name
, BlockDriverState
*bs
,
326 uint64_t perm
, uint64_t shared_perm
, Error
**errp
)
330 c
= bdrv_root_attach_child(bs
, name
, &child_job
, perm
, shared_perm
,
336 job
->nodes
= g_slist_prepend(job
->nodes
, c
);
338 bdrv_op_block_all(bs
, job
->blocker
);
343 bool block_job_is_internal(BlockJob
*job
)
345 return (job
->id
== NULL
);
348 static bool block_job_started(BlockJob
*job
)
354 * All jobs must allow a pause point before entering their job proper. This
355 * ensures that jobs can be paused prior to being started, then resumed later.
357 static void coroutine_fn
block_job_co_entry(void *opaque
)
359 BlockJob
*job
= opaque
;
361 assert(job
&& job
->driver
&& job
->driver
->start
);
362 block_job_pause_point(job
);
363 job
->driver
->start(job
);
366 static void block_job_sleep_timer_cb(void *opaque
)
368 BlockJob
*job
= opaque
;
370 block_job_enter(job
);
373 void block_job_start(BlockJob
*job
)
375 assert(job
&& !block_job_started(job
) && job
->paused
&&
376 job
->driver
&& job
->driver
->start
);
377 job
->co
= qemu_coroutine_create(block_job_co_entry
, job
);
381 block_job_state_transition(job
, BLOCK_JOB_STATUS_RUNNING
);
382 bdrv_coroutine_enter(blk_bs(job
->blk
), job
->co
);
385 static void block_job_decommission(BlockJob
*job
)
388 job
->completed
= true;
391 job
->deferred_to_main_loop
= true;
392 block_job_state_transition(job
, BLOCK_JOB_STATUS_NULL
);
393 block_job_unref(job
);
396 static void block_job_do_dismiss(BlockJob
*job
)
398 block_job_decommission(job
);
401 static void block_job_conclude(BlockJob
*job
)
403 block_job_state_transition(job
, BLOCK_JOB_STATUS_CONCLUDED
);
404 if (job
->auto_dismiss
|| !block_job_started(job
)) {
405 block_job_do_dismiss(job
);
409 static void block_job_update_rc(BlockJob
*job
)
411 if (!job
->ret
&& block_job_is_cancelled(job
)) {
412 job
->ret
= -ECANCELED
;
415 block_job_state_transition(job
, BLOCK_JOB_STATUS_ABORTING
);
419 static int block_job_prepare(BlockJob
*job
)
421 if (job
->ret
== 0 && job
->driver
->prepare
) {
422 job
->ret
= job
->driver
->prepare(job
);
427 static void block_job_commit(BlockJob
*job
)
430 if (job
->driver
->commit
) {
431 job
->driver
->commit(job
);
435 static void block_job_abort(BlockJob
*job
)
438 if (job
->driver
->abort
) {
439 job
->driver
->abort(job
);
443 static void block_job_clean(BlockJob
*job
)
445 if (job
->driver
->clean
) {
446 job
->driver
->clean(job
);
450 static int block_job_completed_single(BlockJob
*job
)
452 assert(job
->completed
);
454 /* Ensure abort is called for late-transactional failures */
455 block_job_update_rc(job
);
458 block_job_commit(job
);
460 block_job_abort(job
);
462 block_job_clean(job
);
465 job
->cb(job
->opaque
, job
->ret
);
468 /* Emit events only if we actually started */
469 if (block_job_started(job
)) {
470 if (block_job_is_cancelled(job
)) {
471 block_job_event_cancelled(job
);
473 const char *msg
= NULL
;
475 msg
= strerror(-job
->ret
);
477 block_job_event_completed(job
, msg
);
481 QLIST_REMOVE(job
, txn_list
);
482 block_job_txn_unref(job
->txn
);
483 block_job_conclude(job
);
487 static void block_job_cancel_async(BlockJob
*job
)
489 if (job
->iostatus
!= BLOCK_DEVICE_IO_STATUS_OK
) {
490 block_job_iostatus_reset(job
);
492 if (job
->user_paused
) {
493 /* Do not call block_job_enter here, the caller will handle it. */
494 job
->user_paused
= false;
497 job
->cancelled
= true;
500 static int block_job_txn_apply(BlockJobTxn
*txn
, int fn(BlockJob
*))
503 BlockJob
*job
, *next
;
506 QLIST_FOREACH_SAFE(job
, &txn
->jobs
, txn_list
, next
) {
507 ctx
= blk_get_aio_context(job
->blk
);
508 aio_context_acquire(ctx
);
510 aio_context_release(ctx
);
518 static int block_job_finish_sync(BlockJob
*job
,
519 void (*finish
)(BlockJob
*, Error
**errp
),
522 Error
*local_err
= NULL
;
525 assert(blk_bs(job
->blk
)->job
== job
);
530 finish(job
, &local_err
);
533 error_propagate(errp
, local_err
);
534 block_job_unref(job
);
537 /* block_job_drain calls block_job_enter, and it should be enough to
538 * induce progress until the job completes or moves to the main thread.
540 while (!job
->deferred_to_main_loop
&& !job
->completed
) {
541 block_job_drain(job
);
543 while (!job
->completed
) {
544 aio_poll(qemu_get_aio_context(), true);
546 ret
= (job
->cancelled
&& job
->ret
== 0) ? -ECANCELED
: job
->ret
;
547 block_job_unref(job
);
551 static void block_job_completed_txn_abort(BlockJob
*job
)
554 BlockJobTxn
*txn
= job
->txn
;
559 * We are cancelled by another job, which will handle everything.
563 txn
->aborting
= true;
564 block_job_txn_ref(txn
);
566 /* We are the first failed job. Cancel other jobs. */
567 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
568 ctx
= blk_get_aio_context(other_job
->blk
);
569 aio_context_acquire(ctx
);
572 /* Other jobs are effectively cancelled by us, set the status for
573 * them; this job, however, may or may not be cancelled, depending
574 * on the caller, so leave it. */
575 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
576 if (other_job
!= job
) {
577 block_job_cancel_async(other_job
);
580 while (!QLIST_EMPTY(&txn
->jobs
)) {
581 other_job
= QLIST_FIRST(&txn
->jobs
);
582 ctx
= blk_get_aio_context(other_job
->blk
);
583 if (!other_job
->completed
) {
584 assert(other_job
->cancelled
);
585 block_job_finish_sync(other_job
, NULL
, NULL
);
587 block_job_completed_single(other_job
);
588 aio_context_release(ctx
);
591 block_job_txn_unref(txn
);
594 static void block_job_completed_txn_success(BlockJob
*job
)
596 BlockJobTxn
*txn
= job
->txn
;
600 block_job_state_transition(job
, BLOCK_JOB_STATUS_WAITING
);
603 * Successful completion, see if there are other running jobs in this
606 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
607 if (!other_job
->completed
) {
610 assert(other_job
->ret
== 0);
613 /* Jobs may require some prep-work to complete without failure */
614 rc
= block_job_txn_apply(txn
, block_job_prepare
);
616 block_job_completed_txn_abort(job
);
620 /* We are the last completed job, commit the transaction. */
621 block_job_txn_apply(txn
, block_job_completed_single
);
624 /* Assumes the block_job_mutex is held */
625 static bool block_job_timer_pending(BlockJob
*job
)
627 return timer_pending(&job
->sleep_timer
);
630 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
632 Error
*local_err
= NULL
;
633 int64_t old_speed
= job
->speed
;
635 if (!job
->driver
->set_speed
) {
636 error_setg(errp
, QERR_UNSUPPORTED
);
639 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_SET_SPEED
, errp
)) {
642 job
->driver
->set_speed(job
, speed
, &local_err
);
644 error_propagate(errp
, local_err
);
649 if (speed
&& speed
<= old_speed
) {
653 /* kick only if a timer is pending */
654 block_job_enter_cond(job
, block_job_timer_pending
);
657 void block_job_complete(BlockJob
*job
, Error
**errp
)
659 /* Should not be reachable via external interface for internal jobs */
661 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_COMPLETE
, errp
)) {
664 if (job
->pause_count
|| job
->cancelled
|| !job
->driver
->complete
) {
665 error_setg(errp
, "The active block job '%s' cannot be completed",
670 job
->driver
->complete(job
, errp
);
673 void block_job_dismiss(BlockJob
**jobptr
, Error
**errp
)
675 BlockJob
*job
= *jobptr
;
676 /* similarly to _complete, this is QMP-interface only. */
678 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_DISMISS
, errp
)) {
682 block_job_do_dismiss(job
);
686 void block_job_user_pause(BlockJob
*job
, Error
**errp
)
688 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_PAUSE
, errp
)) {
691 if (job
->user_paused
) {
692 error_setg(errp
, "Job is already paused");
695 job
->user_paused
= true;
696 block_job_pause(job
);
699 bool block_job_user_paused(BlockJob
*job
)
701 return job
->user_paused
;
704 void block_job_user_resume(BlockJob
*job
, Error
**errp
)
707 if (!job
->user_paused
|| job
->pause_count
<= 0) {
708 error_setg(errp
, "Can't resume a job that was not paused");
711 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_RESUME
, errp
)) {
714 block_job_iostatus_reset(job
);
715 job
->user_paused
= false;
716 block_job_resume(job
);
719 void block_job_cancel(BlockJob
*job
)
721 if (job
->status
== BLOCK_JOB_STATUS_CONCLUDED
) {
722 block_job_do_dismiss(job
);
723 } else if (block_job_started(job
)) {
724 block_job_cancel_async(job
);
725 block_job_enter(job
);
727 block_job_completed(job
, -ECANCELED
);
731 void block_job_user_cancel(BlockJob
*job
, Error
**errp
)
733 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_CANCEL
, errp
)) {
736 block_job_cancel(job
);
739 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
740 * used with block_job_finish_sync() without the need for (rather nasty)
741 * function pointer casts there. */
742 static void block_job_cancel_err(BlockJob
*job
, Error
**errp
)
744 block_job_cancel(job
);
747 int block_job_cancel_sync(BlockJob
*job
)
749 return block_job_finish_sync(job
, &block_job_cancel_err
, NULL
);
752 void block_job_cancel_sync_all(void)
755 AioContext
*aio_context
;
757 while ((job
= QLIST_FIRST(&block_jobs
))) {
758 aio_context
= blk_get_aio_context(job
->blk
);
759 aio_context_acquire(aio_context
);
760 block_job_cancel_sync(job
);
761 aio_context_release(aio_context
);
765 int block_job_complete_sync(BlockJob
*job
, Error
**errp
)
767 return block_job_finish_sync(job
, &block_job_complete
, errp
);
770 BlockJobInfo
*block_job_query(BlockJob
*job
, Error
**errp
)
774 if (block_job_is_internal(job
)) {
775 error_setg(errp
, "Cannot query QEMU internal jobs");
778 info
= g_new0(BlockJobInfo
, 1);
779 info
->type
= g_strdup(BlockJobType_str(job
->driver
->job_type
));
780 info
->device
= g_strdup(job
->id
);
781 info
->len
= job
->len
;
782 info
->busy
= atomic_read(&job
->busy
);
783 info
->paused
= job
->pause_count
> 0;
784 info
->offset
= job
->offset
;
785 info
->speed
= job
->speed
;
786 info
->io_status
= job
->iostatus
;
787 info
->ready
= job
->ready
;
788 info
->status
= job
->status
;
792 static void block_job_iostatus_set_err(BlockJob
*job
, int error
)
794 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
795 job
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
796 BLOCK_DEVICE_IO_STATUS_FAILED
;
800 static void block_job_event_cancelled(BlockJob
*job
)
802 if (block_job_is_internal(job
)) {
806 qapi_event_send_block_job_cancelled(job
->driver
->job_type
,
814 static void block_job_event_completed(BlockJob
*job
, const char *msg
)
816 if (block_job_is_internal(job
)) {
820 qapi_event_send_block_job_completed(job
->driver
->job_type
,
831 * API for block job drivers and the block layer. These functions are
832 * declared in blockjob_int.h.
835 void *block_job_create(const char *job_id
, const BlockJobDriver
*driver
,
836 BlockJobTxn
*txn
, BlockDriverState
*bs
, uint64_t perm
,
837 uint64_t shared_perm
, int64_t speed
, int flags
,
838 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
)
845 error_setg(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
849 if (job_id
== NULL
&& !(flags
& BLOCK_JOB_INTERNAL
)) {
850 job_id
= bdrv_get_device_name(bs
);
852 error_setg(errp
, "An explicit job ID is required for this node");
858 if (flags
& BLOCK_JOB_INTERNAL
) {
859 error_setg(errp
, "Cannot specify job ID for internal block job");
863 if (!id_wellformed(job_id
)) {
864 error_setg(errp
, "Invalid job ID '%s'", job_id
);
868 if (block_job_get(job_id
)) {
869 error_setg(errp
, "Job ID '%s' already in use", job_id
);
874 blk
= blk_new(perm
, shared_perm
);
875 ret
= blk_insert_bs(blk
, bs
, errp
);
881 job
= g_malloc0(driver
->instance_size
);
882 job
->driver
= driver
;
883 job
->id
= g_strdup(job_id
);
886 job
->opaque
= opaque
;
889 job
->pause_count
= 1;
891 job
->auto_dismiss
= !(flags
& BLOCK_JOB_MANUAL_DISMISS
);
892 block_job_state_transition(job
, BLOCK_JOB_STATUS_CREATED
);
893 aio_timer_init(qemu_get_aio_context(), &job
->sleep_timer
,
894 QEMU_CLOCK_REALTIME
, SCALE_NS
,
895 block_job_sleep_timer_cb
, job
);
897 error_setg(&job
->blocker
, "block device is in use by block job: %s",
898 BlockJobType_str(driver
->job_type
));
899 block_job_add_bdrv(job
, "main node", bs
, 0, BLK_PERM_ALL
, &error_abort
);
902 bdrv_op_unblock(bs
, BLOCK_OP_TYPE_DATAPLANE
, job
->blocker
);
904 QLIST_INSERT_HEAD(&block_jobs
, job
, job_list
);
906 blk_add_aio_context_notifier(blk
, block_job_attached_aio_context
,
907 block_job_detach_aio_context
, job
);
909 /* Only set speed when necessary to avoid NotSupported error */
911 Error
*local_err
= NULL
;
913 block_job_set_speed(job
, speed
, &local_err
);
915 block_job_early_fail(job
);
916 error_propagate(errp
, local_err
);
921 /* Single jobs are modeled as single-job transactions for sake of
922 * consolidating the job management logic */
924 txn
= block_job_txn_new();
925 block_job_txn_add_job(txn
, job
);
926 block_job_txn_unref(txn
);
928 block_job_txn_add_job(txn
, job
);
934 void block_job_pause_all(void)
936 BlockJob
*job
= NULL
;
937 while ((job
= block_job_next(job
))) {
938 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
940 aio_context_acquire(aio_context
);
942 block_job_pause(job
);
943 aio_context_release(aio_context
);
947 void block_job_early_fail(BlockJob
*job
)
949 assert(job
->status
== BLOCK_JOB_STATUS_CREATED
);
950 block_job_decommission(job
);
953 void block_job_completed(BlockJob
*job
, int ret
)
955 assert(job
&& job
->txn
&& !job
->completed
);
956 assert(blk_bs(job
->blk
)->job
== job
);
957 job
->completed
= true;
959 block_job_update_rc(job
);
960 trace_block_job_completed(job
, ret
, job
->ret
);
962 block_job_completed_txn_abort(job
);
964 block_job_completed_txn_success(job
);
968 static bool block_job_should_pause(BlockJob
*job
)
970 return job
->pause_count
> 0;
973 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
974 * Reentering the job coroutine with block_job_enter() before the timer has
975 * expired is allowed and cancels the timer.
977 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
978 * called explicitly. */
979 static void block_job_do_yield(BlockJob
*job
, uint64_t ns
)
983 timer_mod(&job
->sleep_timer
, ns
);
987 qemu_coroutine_yield();
989 /* Set by block_job_enter before re-entering the coroutine. */
993 void coroutine_fn
block_job_pause_point(BlockJob
*job
)
995 assert(job
&& block_job_started(job
));
997 if (!block_job_should_pause(job
)) {
1000 if (block_job_is_cancelled(job
)) {
1004 if (job
->driver
->pause
) {
1005 job
->driver
->pause(job
);
1008 if (block_job_should_pause(job
) && !block_job_is_cancelled(job
)) {
1009 BlockJobStatus status
= job
->status
;
1010 block_job_state_transition(job
, status
== BLOCK_JOB_STATUS_READY
? \
1011 BLOCK_JOB_STATUS_STANDBY
: \
1012 BLOCK_JOB_STATUS_PAUSED
);
1014 block_job_do_yield(job
, -1);
1015 job
->paused
= false;
1016 block_job_state_transition(job
, status
);
1019 if (job
->driver
->resume
) {
1020 job
->driver
->resume(job
);
1024 void block_job_resume_all(void)
1026 BlockJob
*job
, *next
;
1028 QLIST_FOREACH_SAFE(job
, &block_jobs
, job_list
, next
) {
1029 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
1031 aio_context_acquire(aio_context
);
1032 block_job_resume(job
);
1033 block_job_unref(job
);
1034 aio_context_release(aio_context
);
1039 * Conditionally enter a block_job pending a call to fn() while
1040 * under the block_job_lock critical section.
1042 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
))
1044 if (!block_job_started(job
)) {
1047 if (job
->deferred_to_main_loop
) {
1057 if (fn
&& !fn(job
)) {
1062 assert(!job
->deferred_to_main_loop
);
1063 timer_del(&job
->sleep_timer
);
1066 aio_co_wake(job
->co
);
1069 void block_job_enter(BlockJob
*job
)
1071 block_job_enter_cond(job
, NULL
);
1074 bool block_job_is_cancelled(BlockJob
*job
)
1076 return job
->cancelled
;
1079 void block_job_sleep_ns(BlockJob
*job
, int64_t ns
)
1083 /* Check cancellation *before* setting busy = false, too! */
1084 if (block_job_is_cancelled(job
)) {
1088 if (!block_job_should_pause(job
)) {
1089 block_job_do_yield(job
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + ns
);
1092 block_job_pause_point(job
);
1095 void block_job_yield(BlockJob
*job
)
1099 /* Check cancellation *before* setting busy = false, too! */
1100 if (block_job_is_cancelled(job
)) {
1104 if (!block_job_should_pause(job
)) {
1105 block_job_do_yield(job
, -1);
1108 block_job_pause_point(job
);
1111 void block_job_iostatus_reset(BlockJob
*job
)
1113 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1116 assert(job
->user_paused
&& job
->pause_count
> 0);
1117 job
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1120 void block_job_event_ready(BlockJob
*job
)
1122 block_job_state_transition(job
, BLOCK_JOB_STATUS_READY
);
1125 if (block_job_is_internal(job
)) {
1129 qapi_event_send_block_job_ready(job
->driver
->job_type
,
1133 job
->speed
, &error_abort
);
1136 BlockErrorAction
block_job_error_action(BlockJob
*job
, BlockdevOnError on_err
,
1137 int is_read
, int error
)
1139 BlockErrorAction action
;
1142 case BLOCKDEV_ON_ERROR_ENOSPC
:
1143 case BLOCKDEV_ON_ERROR_AUTO
:
1144 action
= (error
== ENOSPC
) ?
1145 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1147 case BLOCKDEV_ON_ERROR_STOP
:
1148 action
= BLOCK_ERROR_ACTION_STOP
;
1150 case BLOCKDEV_ON_ERROR_REPORT
:
1151 action
= BLOCK_ERROR_ACTION_REPORT
;
1153 case BLOCKDEV_ON_ERROR_IGNORE
:
1154 action
= BLOCK_ERROR_ACTION_IGNORE
;
1159 if (!block_job_is_internal(job
)) {
1160 qapi_event_send_block_job_error(job
->id
,
1161 is_read
? IO_OPERATION_TYPE_READ
:
1162 IO_OPERATION_TYPE_WRITE
,
1163 action
, &error_abort
);
1165 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1166 block_job_pause(job
);
1167 /* make the pause user visible, which will be resumed from QMP. */
1168 job
->user_paused
= true;
1169 block_job_iostatus_set_err(job
, error
);
1176 AioContext
*aio_context
;
1177 BlockJobDeferToMainLoopFn
*fn
;
1179 } BlockJobDeferToMainLoopData
;
1181 static void block_job_defer_to_main_loop_bh(void *opaque
)
1183 BlockJobDeferToMainLoopData
*data
= opaque
;
1184 AioContext
*aio_context
;
1186 /* Prevent race with block_job_defer_to_main_loop() */
1187 aio_context_acquire(data
->aio_context
);
1189 /* Fetch BDS AioContext again, in case it has changed */
1190 aio_context
= blk_get_aio_context(data
->job
->blk
);
1191 if (aio_context
!= data
->aio_context
) {
1192 aio_context_acquire(aio_context
);
1195 data
->fn(data
->job
, data
->opaque
);
1197 if (aio_context
!= data
->aio_context
) {
1198 aio_context_release(aio_context
);
1201 aio_context_release(data
->aio_context
);
1206 void block_job_defer_to_main_loop(BlockJob
*job
,
1207 BlockJobDeferToMainLoopFn
*fn
,
1210 BlockJobDeferToMainLoopData
*data
= g_malloc(sizeof(*data
));
1212 data
->aio_context
= blk_get_aio_context(job
->blk
);
1214 data
->opaque
= opaque
;
1215 job
->deferred_to_main_loop
= true;
1217 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1218 block_job_defer_to_main_loop_bh
, data
);