2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
38 #include "qemu/timer.h"
40 /* Right now, this mutex is only needed to synchronize accesses to job->busy
41 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
43 static QemuMutex block_job_mutex
;
45 /* BlockJob State Transition Table */
46 bool BlockJobSTT
[BLOCK_JOB_STATUS__MAX
][BLOCK_JOB_STATUS__MAX
] = {
47 /* U, C, R, P, Y, S, X */
48 /* U: */ [BLOCK_JOB_STATUS_UNDEFINED
] = {0, 1, 0, 0, 0, 0, 0},
49 /* C: */ [BLOCK_JOB_STATUS_CREATED
] = {0, 0, 1, 0, 0, 0, 1},
50 /* R: */ [BLOCK_JOB_STATUS_RUNNING
] = {0, 0, 0, 1, 1, 0, 1},
51 /* P: */ [BLOCK_JOB_STATUS_PAUSED
] = {0, 0, 1, 0, 0, 0, 0},
52 /* Y: */ [BLOCK_JOB_STATUS_READY
] = {0, 0, 0, 0, 0, 1, 1},
53 /* S: */ [BLOCK_JOB_STATUS_STANDBY
] = {0, 0, 0, 0, 1, 0, 0},
54 /* X: */ [BLOCK_JOB_STATUS_ABORTING
] = {0, 0, 0, 0, 0, 0, 0},
57 bool BlockJobVerbTable
[BLOCK_JOB_VERB__MAX
][BLOCK_JOB_STATUS__MAX
] = {
58 /* U, C, R, P, Y, S, X */
59 [BLOCK_JOB_VERB_CANCEL
] = {0, 1, 1, 1, 1, 1, 0},
60 [BLOCK_JOB_VERB_PAUSE
] = {0, 1, 1, 1, 1, 1, 0},
61 [BLOCK_JOB_VERB_RESUME
] = {0, 1, 1, 1, 1, 1, 0},
62 [BLOCK_JOB_VERB_SET_SPEED
] = {0, 1, 1, 1, 1, 1, 0},
63 [BLOCK_JOB_VERB_COMPLETE
] = {0, 0, 0, 0, 1, 0, 0},
66 static void block_job_state_transition(BlockJob
*job
, BlockJobStatus s1
)
68 BlockJobStatus s0
= job
->status
;
69 assert(s1
>= 0 && s1
<= BLOCK_JOB_STATUS__MAX
);
70 trace_block_job_state_transition(job
, job
->ret
, BlockJobSTT
[s0
][s1
] ?
71 "allowed" : "disallowed",
72 qapi_enum_lookup(&BlockJobStatus_lookup
,
74 qapi_enum_lookup(&BlockJobStatus_lookup
,
76 assert(BlockJobSTT
[s0
][s1
]);
80 static int block_job_apply_verb(BlockJob
*job
, BlockJobVerb bv
, Error
**errp
)
82 assert(bv
>= 0 && bv
<= BLOCK_JOB_VERB__MAX
);
83 trace_block_job_apply_verb(job
, qapi_enum_lookup(&BlockJobStatus_lookup
,
85 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
),
86 BlockJobVerbTable
[bv
][job
->status
] ?
87 "allowed" : "prohibited");
88 if (BlockJobVerbTable
[bv
][job
->status
]) {
91 error_setg(errp
, "Job '%s' in state '%s' cannot accept command verb '%s'",
92 job
->id
, qapi_enum_lookup(&BlockJobStatus_lookup
, job
->status
),
93 qapi_enum_lookup(&BlockJobVerb_lookup
, bv
));
97 static void block_job_lock(void)
99 qemu_mutex_lock(&block_job_mutex
);
102 static void block_job_unlock(void)
104 qemu_mutex_unlock(&block_job_mutex
);
107 static void __attribute__((__constructor__
)) block_job_init(void)
109 qemu_mutex_init(&block_job_mutex
);
112 static void block_job_event_cancelled(BlockJob
*job
);
113 static void block_job_event_completed(BlockJob
*job
, const char *msg
);
114 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
));
116 /* Transactional group of block jobs */
119 /* Is this txn being cancelled? */
123 QLIST_HEAD(, BlockJob
) jobs
;
125 /* Reference count */
129 static QLIST_HEAD(, BlockJob
) block_jobs
= QLIST_HEAD_INITIALIZER(block_jobs
);
132 * The block job API is composed of two categories of functions.
134 * The first includes functions used by the monitor. The monitor is
135 * peculiar in that it accesses the block job list with block_job_get, and
136 * therefore needs consistency across block_job_get and the actual operation
137 * (e.g. block_job_set_speed). The consistency is achieved with
138 * aio_context_acquire/release. These functions are declared in blockjob.h.
140 * The second includes functions used by the block job drivers and sometimes
141 * by the core block layer. These do not care about locking, because the
142 * whole coroutine runs under the AioContext lock, and are declared in
146 BlockJob
*block_job_next(BlockJob
*job
)
149 return QLIST_FIRST(&block_jobs
);
151 return QLIST_NEXT(job
, job_list
);
154 BlockJob
*block_job_get(const char *id
)
158 QLIST_FOREACH(job
, &block_jobs
, job_list
) {
159 if (job
->id
&& !strcmp(id
, job
->id
)) {
167 BlockJobTxn
*block_job_txn_new(void)
169 BlockJobTxn
*txn
= g_new0(BlockJobTxn
, 1);
170 QLIST_INIT(&txn
->jobs
);
175 static void block_job_txn_ref(BlockJobTxn
*txn
)
180 void block_job_txn_unref(BlockJobTxn
*txn
)
182 if (txn
&& --txn
->refcnt
== 0) {
187 void block_job_txn_add_job(BlockJobTxn
*txn
, BlockJob
*job
)
196 QLIST_INSERT_HEAD(&txn
->jobs
, job
, txn_list
);
197 block_job_txn_ref(txn
);
200 static void block_job_pause(BlockJob
*job
)
205 static void block_job_resume(BlockJob
*job
)
207 assert(job
->pause_count
> 0);
209 if (job
->pause_count
) {
212 block_job_enter(job
);
215 void block_job_ref(BlockJob
*job
)
220 static void block_job_attached_aio_context(AioContext
*new_context
,
222 static void block_job_detach_aio_context(void *opaque
);
224 void block_job_unref(BlockJob
*job
)
226 if (--job
->refcnt
== 0) {
227 BlockDriverState
*bs
= blk_bs(job
->blk
);
228 QLIST_REMOVE(job
, job_list
);
230 block_job_remove_all_bdrv(job
);
231 blk_remove_aio_context_notifier(job
->blk
,
232 block_job_attached_aio_context
,
233 block_job_detach_aio_context
, job
);
235 error_free(job
->blocker
);
237 assert(!timer_pending(&job
->sleep_timer
));
242 static void block_job_attached_aio_context(AioContext
*new_context
,
245 BlockJob
*job
= opaque
;
247 if (job
->driver
->attached_aio_context
) {
248 job
->driver
->attached_aio_context(job
, new_context
);
251 block_job_resume(job
);
254 static void block_job_drain(BlockJob
*job
)
256 /* If job is !job->busy this kicks it into the next pause point. */
257 block_job_enter(job
);
260 if (job
->driver
->drain
) {
261 job
->driver
->drain(job
);
265 static void block_job_detach_aio_context(void *opaque
)
267 BlockJob
*job
= opaque
;
269 /* In case the job terminates during aio_poll()... */
272 block_job_pause(job
);
274 while (!job
->paused
&& !job
->completed
) {
275 block_job_drain(job
);
278 block_job_unref(job
);
281 static char *child_job_get_parent_desc(BdrvChild
*c
)
283 BlockJob
*job
= c
->opaque
;
284 return g_strdup_printf("%s job '%s'",
285 BlockJobType_str(job
->driver
->job_type
),
289 static void child_job_drained_begin(BdrvChild
*c
)
291 BlockJob
*job
= c
->opaque
;
292 block_job_pause(job
);
295 static void child_job_drained_end(BdrvChild
*c
)
297 BlockJob
*job
= c
->opaque
;
298 block_job_resume(job
);
301 static const BdrvChildRole child_job
= {
302 .get_parent_desc
= child_job_get_parent_desc
,
303 .drained_begin
= child_job_drained_begin
,
304 .drained_end
= child_job_drained_end
,
305 .stay_at_node
= true,
308 void block_job_remove_all_bdrv(BlockJob
*job
)
311 for (l
= job
->nodes
; l
; l
= l
->next
) {
312 BdrvChild
*c
= l
->data
;
313 bdrv_op_unblock_all(c
->bs
, job
->blocker
);
314 bdrv_root_unref_child(c
);
316 g_slist_free(job
->nodes
);
320 int block_job_add_bdrv(BlockJob
*job
, const char *name
, BlockDriverState
*bs
,
321 uint64_t perm
, uint64_t shared_perm
, Error
**errp
)
325 c
= bdrv_root_attach_child(bs
, name
, &child_job
, perm
, shared_perm
,
331 job
->nodes
= g_slist_prepend(job
->nodes
, c
);
333 bdrv_op_block_all(bs
, job
->blocker
);
338 bool block_job_is_internal(BlockJob
*job
)
340 return (job
->id
== NULL
);
343 static bool block_job_started(BlockJob
*job
)
349 * All jobs must allow a pause point before entering their job proper. This
350 * ensures that jobs can be paused prior to being started, then resumed later.
352 static void coroutine_fn
block_job_co_entry(void *opaque
)
354 BlockJob
*job
= opaque
;
356 assert(job
&& job
->driver
&& job
->driver
->start
);
357 block_job_pause_point(job
);
358 job
->driver
->start(job
);
361 static void block_job_sleep_timer_cb(void *opaque
)
363 BlockJob
*job
= opaque
;
365 block_job_enter(job
);
368 void block_job_start(BlockJob
*job
)
370 assert(job
&& !block_job_started(job
) && job
->paused
&&
371 job
->driver
&& job
->driver
->start
);
372 job
->co
= qemu_coroutine_create(block_job_co_entry
, job
);
376 block_job_state_transition(job
, BLOCK_JOB_STATUS_RUNNING
);
377 bdrv_coroutine_enter(blk_bs(job
->blk
), job
->co
);
380 static void block_job_completed_single(BlockJob
*job
)
382 assert(job
->completed
);
384 if (job
->ret
|| block_job_is_cancelled(job
)) {
385 block_job_state_transition(job
, BLOCK_JOB_STATUS_ABORTING
);
389 if (job
->driver
->commit
) {
390 job
->driver
->commit(job
);
393 if (job
->driver
->abort
) {
394 job
->driver
->abort(job
);
397 if (job
->driver
->clean
) {
398 job
->driver
->clean(job
);
402 job
->cb(job
->opaque
, job
->ret
);
405 /* Emit events only if we actually started */
406 if (block_job_started(job
)) {
407 if (block_job_is_cancelled(job
)) {
408 block_job_event_cancelled(job
);
410 const char *msg
= NULL
;
412 msg
= strerror(-job
->ret
);
414 block_job_event_completed(job
, msg
);
418 QLIST_REMOVE(job
, txn_list
);
419 block_job_txn_unref(job
->txn
);
420 block_job_unref(job
);
423 static void block_job_cancel_async(BlockJob
*job
)
425 if (job
->iostatus
!= BLOCK_DEVICE_IO_STATUS_OK
) {
426 block_job_iostatus_reset(job
);
428 if (job
->user_paused
) {
429 /* Do not call block_job_enter here, the caller will handle it. */
430 job
->user_paused
= false;
433 job
->cancelled
= true;
436 static int block_job_finish_sync(BlockJob
*job
,
437 void (*finish
)(BlockJob
*, Error
**errp
),
440 Error
*local_err
= NULL
;
443 assert(blk_bs(job
->blk
)->job
== job
);
448 finish(job
, &local_err
);
451 error_propagate(errp
, local_err
);
452 block_job_unref(job
);
455 /* block_job_drain calls block_job_enter, and it should be enough to
456 * induce progress until the job completes or moves to the main thread.
458 while (!job
->deferred_to_main_loop
&& !job
->completed
) {
459 block_job_drain(job
);
461 while (!job
->completed
) {
462 aio_poll(qemu_get_aio_context(), true);
464 ret
= (job
->cancelled
&& job
->ret
== 0) ? -ECANCELED
: job
->ret
;
465 block_job_unref(job
);
469 static void block_job_completed_txn_abort(BlockJob
*job
)
472 BlockJobTxn
*txn
= job
->txn
;
477 * We are cancelled by another job, which will handle everything.
481 txn
->aborting
= true;
482 block_job_txn_ref(txn
);
484 /* We are the first failed job. Cancel other jobs. */
485 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
486 ctx
= blk_get_aio_context(other_job
->blk
);
487 aio_context_acquire(ctx
);
490 /* Other jobs are effectively cancelled by us, set the status for
491 * them; this job, however, may or may not be cancelled, depending
492 * on the caller, so leave it. */
493 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
494 if (other_job
!= job
) {
495 block_job_cancel_async(other_job
);
498 while (!QLIST_EMPTY(&txn
->jobs
)) {
499 other_job
= QLIST_FIRST(&txn
->jobs
);
500 ctx
= blk_get_aio_context(other_job
->blk
);
501 if (!other_job
->completed
) {
502 assert(other_job
->cancelled
);
503 block_job_finish_sync(other_job
, NULL
, NULL
);
505 block_job_completed_single(other_job
);
506 aio_context_release(ctx
);
509 block_job_txn_unref(txn
);
512 static void block_job_completed_txn_success(BlockJob
*job
)
515 BlockJobTxn
*txn
= job
->txn
;
516 BlockJob
*other_job
, *next
;
518 * Successful completion, see if there are other running jobs in this
521 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
522 if (!other_job
->completed
) {
526 /* We are the last completed job, commit the transaction. */
527 QLIST_FOREACH_SAFE(other_job
, &txn
->jobs
, txn_list
, next
) {
528 ctx
= blk_get_aio_context(other_job
->blk
);
529 aio_context_acquire(ctx
);
530 assert(other_job
->ret
== 0);
531 block_job_completed_single(other_job
);
532 aio_context_release(ctx
);
536 /* Assumes the block_job_mutex is held */
537 static bool block_job_timer_pending(BlockJob
*job
)
539 return timer_pending(&job
->sleep_timer
);
542 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
544 Error
*local_err
= NULL
;
545 int64_t old_speed
= job
->speed
;
547 if (!job
->driver
->set_speed
) {
548 error_setg(errp
, QERR_UNSUPPORTED
);
551 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_SET_SPEED
, errp
)) {
554 job
->driver
->set_speed(job
, speed
, &local_err
);
556 error_propagate(errp
, local_err
);
561 if (speed
&& speed
<= old_speed
) {
565 /* kick only if a timer is pending */
566 block_job_enter_cond(job
, block_job_timer_pending
);
569 void block_job_complete(BlockJob
*job
, Error
**errp
)
571 /* Should not be reachable via external interface for internal jobs */
573 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_COMPLETE
, errp
)) {
576 if (job
->pause_count
|| job
->cancelled
|| !job
->driver
->complete
) {
577 error_setg(errp
, "The active block job '%s' cannot be completed",
582 job
->driver
->complete(job
, errp
);
585 void block_job_user_pause(BlockJob
*job
, Error
**errp
)
587 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_PAUSE
, errp
)) {
590 if (job
->user_paused
) {
591 error_setg(errp
, "Job is already paused");
594 job
->user_paused
= true;
595 block_job_pause(job
);
598 bool block_job_user_paused(BlockJob
*job
)
600 return job
->user_paused
;
603 void block_job_user_resume(BlockJob
*job
, Error
**errp
)
606 if (!job
->user_paused
|| job
->pause_count
<= 0) {
607 error_setg(errp
, "Can't resume a job that was not paused");
610 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_RESUME
, errp
)) {
613 block_job_iostatus_reset(job
);
614 job
->user_paused
= false;
615 block_job_resume(job
);
618 void block_job_cancel(BlockJob
*job
)
620 if (block_job_started(job
)) {
621 block_job_cancel_async(job
);
622 block_job_enter(job
);
624 block_job_completed(job
, -ECANCELED
);
628 void block_job_user_cancel(BlockJob
*job
, Error
**errp
)
630 if (block_job_apply_verb(job
, BLOCK_JOB_VERB_CANCEL
, errp
)) {
633 block_job_cancel(job
);
636 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
637 * used with block_job_finish_sync() without the need for (rather nasty)
638 * function pointer casts there. */
639 static void block_job_cancel_err(BlockJob
*job
, Error
**errp
)
641 block_job_cancel(job
);
644 int block_job_cancel_sync(BlockJob
*job
)
646 return block_job_finish_sync(job
, &block_job_cancel_err
, NULL
);
649 void block_job_cancel_sync_all(void)
652 AioContext
*aio_context
;
654 while ((job
= QLIST_FIRST(&block_jobs
))) {
655 aio_context
= blk_get_aio_context(job
->blk
);
656 aio_context_acquire(aio_context
);
657 block_job_cancel_sync(job
);
658 aio_context_release(aio_context
);
662 int block_job_complete_sync(BlockJob
*job
, Error
**errp
)
664 return block_job_finish_sync(job
, &block_job_complete
, errp
);
667 BlockJobInfo
*block_job_query(BlockJob
*job
, Error
**errp
)
671 if (block_job_is_internal(job
)) {
672 error_setg(errp
, "Cannot query QEMU internal jobs");
675 info
= g_new0(BlockJobInfo
, 1);
676 info
->type
= g_strdup(BlockJobType_str(job
->driver
->job_type
));
677 info
->device
= g_strdup(job
->id
);
678 info
->len
= job
->len
;
679 info
->busy
= atomic_read(&job
->busy
);
680 info
->paused
= job
->pause_count
> 0;
681 info
->offset
= job
->offset
;
682 info
->speed
= job
->speed
;
683 info
->io_status
= job
->iostatus
;
684 info
->ready
= job
->ready
;
685 info
->status
= job
->status
;
689 static void block_job_iostatus_set_err(BlockJob
*job
, int error
)
691 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
692 job
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
693 BLOCK_DEVICE_IO_STATUS_FAILED
;
697 static void block_job_event_cancelled(BlockJob
*job
)
699 if (block_job_is_internal(job
)) {
703 qapi_event_send_block_job_cancelled(job
->driver
->job_type
,
711 static void block_job_event_completed(BlockJob
*job
, const char *msg
)
713 if (block_job_is_internal(job
)) {
717 qapi_event_send_block_job_completed(job
->driver
->job_type
,
728 * API for block job drivers and the block layer. These functions are
729 * declared in blockjob_int.h.
732 void *block_job_create(const char *job_id
, const BlockJobDriver
*driver
,
733 BlockJobTxn
*txn
, BlockDriverState
*bs
, uint64_t perm
,
734 uint64_t shared_perm
, int64_t speed
, int flags
,
735 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
)
742 error_setg(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
746 if (job_id
== NULL
&& !(flags
& BLOCK_JOB_INTERNAL
)) {
747 job_id
= bdrv_get_device_name(bs
);
749 error_setg(errp
, "An explicit job ID is required for this node");
755 if (flags
& BLOCK_JOB_INTERNAL
) {
756 error_setg(errp
, "Cannot specify job ID for internal block job");
760 if (!id_wellformed(job_id
)) {
761 error_setg(errp
, "Invalid job ID '%s'", job_id
);
765 if (block_job_get(job_id
)) {
766 error_setg(errp
, "Job ID '%s' already in use", job_id
);
771 blk
= blk_new(perm
, shared_perm
);
772 ret
= blk_insert_bs(blk
, bs
, errp
);
778 job
= g_malloc0(driver
->instance_size
);
779 job
->driver
= driver
;
780 job
->id
= g_strdup(job_id
);
783 job
->opaque
= opaque
;
786 job
->pause_count
= 1;
788 block_job_state_transition(job
, BLOCK_JOB_STATUS_CREATED
);
789 aio_timer_init(qemu_get_aio_context(), &job
->sleep_timer
,
790 QEMU_CLOCK_REALTIME
, SCALE_NS
,
791 block_job_sleep_timer_cb
, job
);
793 error_setg(&job
->blocker
, "block device is in use by block job: %s",
794 BlockJobType_str(driver
->job_type
));
795 block_job_add_bdrv(job
, "main node", bs
, 0, BLK_PERM_ALL
, &error_abort
);
798 bdrv_op_unblock(bs
, BLOCK_OP_TYPE_DATAPLANE
, job
->blocker
);
800 QLIST_INSERT_HEAD(&block_jobs
, job
, job_list
);
802 blk_add_aio_context_notifier(blk
, block_job_attached_aio_context
,
803 block_job_detach_aio_context
, job
);
805 /* Only set speed when necessary to avoid NotSupported error */
807 Error
*local_err
= NULL
;
809 block_job_set_speed(job
, speed
, &local_err
);
811 block_job_unref(job
);
812 error_propagate(errp
, local_err
);
817 /* Single jobs are modeled as single-job transactions for sake of
818 * consolidating the job management logic */
820 txn
= block_job_txn_new();
821 block_job_txn_add_job(txn
, job
);
822 block_job_txn_unref(txn
);
824 block_job_txn_add_job(txn
, job
);
830 void block_job_pause_all(void)
832 BlockJob
*job
= NULL
;
833 while ((job
= block_job_next(job
))) {
834 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
836 aio_context_acquire(aio_context
);
838 block_job_pause(job
);
839 aio_context_release(aio_context
);
843 void block_job_early_fail(BlockJob
*job
)
845 block_job_unref(job
);
848 void block_job_completed(BlockJob
*job
, int ret
)
850 assert(job
&& job
->txn
&& !job
->completed
);
851 assert(blk_bs(job
->blk
)->job
== job
);
852 job
->completed
= true;
854 if (ret
< 0 || block_job_is_cancelled(job
)) {
855 block_job_completed_txn_abort(job
);
857 block_job_completed_txn_success(job
);
861 static bool block_job_should_pause(BlockJob
*job
)
863 return job
->pause_count
> 0;
866 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
867 * Reentering the job coroutine with block_job_enter() before the timer has
868 * expired is allowed and cancels the timer.
870 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
871 * called explicitly. */
872 static void block_job_do_yield(BlockJob
*job
, uint64_t ns
)
876 timer_mod(&job
->sleep_timer
, ns
);
880 qemu_coroutine_yield();
882 /* Set by block_job_enter before re-entering the coroutine. */
886 void coroutine_fn
block_job_pause_point(BlockJob
*job
)
888 assert(job
&& block_job_started(job
));
890 if (!block_job_should_pause(job
)) {
893 if (block_job_is_cancelled(job
)) {
897 if (job
->driver
->pause
) {
898 job
->driver
->pause(job
);
901 if (block_job_should_pause(job
) && !block_job_is_cancelled(job
)) {
902 BlockJobStatus status
= job
->status
;
903 block_job_state_transition(job
, status
== BLOCK_JOB_STATUS_READY
? \
904 BLOCK_JOB_STATUS_STANDBY
: \
905 BLOCK_JOB_STATUS_PAUSED
);
907 block_job_do_yield(job
, -1);
909 block_job_state_transition(job
, status
);
912 if (job
->driver
->resume
) {
913 job
->driver
->resume(job
);
917 void block_job_resume_all(void)
919 BlockJob
*job
, *next
;
921 QLIST_FOREACH_SAFE(job
, &block_jobs
, job_list
, next
) {
922 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
924 aio_context_acquire(aio_context
);
925 block_job_resume(job
);
926 block_job_unref(job
);
927 aio_context_release(aio_context
);
932 * Conditionally enter a block_job pending a call to fn() while
933 * under the block_job_lock critical section.
935 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
))
937 if (!block_job_started(job
)) {
940 if (job
->deferred_to_main_loop
) {
950 if (fn
&& !fn(job
)) {
955 assert(!job
->deferred_to_main_loop
);
956 timer_del(&job
->sleep_timer
);
959 aio_co_wake(job
->co
);
962 void block_job_enter(BlockJob
*job
)
964 block_job_enter_cond(job
, NULL
);
967 bool block_job_is_cancelled(BlockJob
*job
)
969 return job
->cancelled
;
972 void block_job_sleep_ns(BlockJob
*job
, int64_t ns
)
976 /* Check cancellation *before* setting busy = false, too! */
977 if (block_job_is_cancelled(job
)) {
981 if (!block_job_should_pause(job
)) {
982 block_job_do_yield(job
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + ns
);
985 block_job_pause_point(job
);
988 void block_job_yield(BlockJob
*job
)
992 /* Check cancellation *before* setting busy = false, too! */
993 if (block_job_is_cancelled(job
)) {
997 if (!block_job_should_pause(job
)) {
998 block_job_do_yield(job
, -1);
1001 block_job_pause_point(job
);
1004 void block_job_iostatus_reset(BlockJob
*job
)
1006 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1009 assert(job
->user_paused
&& job
->pause_count
> 0);
1010 job
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
1013 void block_job_event_ready(BlockJob
*job
)
1015 block_job_state_transition(job
, BLOCK_JOB_STATUS_READY
);
1018 if (block_job_is_internal(job
)) {
1022 qapi_event_send_block_job_ready(job
->driver
->job_type
,
1026 job
->speed
, &error_abort
);
1029 BlockErrorAction
block_job_error_action(BlockJob
*job
, BlockdevOnError on_err
,
1030 int is_read
, int error
)
1032 BlockErrorAction action
;
1035 case BLOCKDEV_ON_ERROR_ENOSPC
:
1036 case BLOCKDEV_ON_ERROR_AUTO
:
1037 action
= (error
== ENOSPC
) ?
1038 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
1040 case BLOCKDEV_ON_ERROR_STOP
:
1041 action
= BLOCK_ERROR_ACTION_STOP
;
1043 case BLOCKDEV_ON_ERROR_REPORT
:
1044 action
= BLOCK_ERROR_ACTION_REPORT
;
1046 case BLOCKDEV_ON_ERROR_IGNORE
:
1047 action
= BLOCK_ERROR_ACTION_IGNORE
;
1052 if (!block_job_is_internal(job
)) {
1053 qapi_event_send_block_job_error(job
->id
,
1054 is_read
? IO_OPERATION_TYPE_READ
:
1055 IO_OPERATION_TYPE_WRITE
,
1056 action
, &error_abort
);
1058 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1059 block_job_pause(job
);
1060 /* make the pause user visible, which will be resumed from QMP. */
1061 job
->user_paused
= true;
1062 block_job_iostatus_set_err(job
, error
);
1069 AioContext
*aio_context
;
1070 BlockJobDeferToMainLoopFn
*fn
;
1072 } BlockJobDeferToMainLoopData
;
1074 static void block_job_defer_to_main_loop_bh(void *opaque
)
1076 BlockJobDeferToMainLoopData
*data
= opaque
;
1077 AioContext
*aio_context
;
1079 /* Prevent race with block_job_defer_to_main_loop() */
1080 aio_context_acquire(data
->aio_context
);
1082 /* Fetch BDS AioContext again, in case it has changed */
1083 aio_context
= blk_get_aio_context(data
->job
->blk
);
1084 if (aio_context
!= data
->aio_context
) {
1085 aio_context_acquire(aio_context
);
1088 data
->fn(data
->job
, data
->opaque
);
1090 if (aio_context
!= data
->aio_context
) {
1091 aio_context_release(aio_context
);
1094 aio_context_release(data
->aio_context
);
1099 void block_job_defer_to_main_loop(BlockJob
*job
,
1100 BlockJobDeferToMainLoopFn
*fn
,
1103 BlockJobDeferToMainLoopData
*data
= g_malloc(sizeof(*data
));
1105 data
->aio_context
= blk_get_aio_context(job
->blk
);
1107 data
->opaque
= opaque
;
1108 job
->deferred_to_main_loop
= true;
1110 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1111 block_job_defer_to_main_loop_bh
, data
);