2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
38 #include "qemu/timer.h"
40 /* Right now, this mutex is only needed to synchronize accesses to job->busy
41 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
43 static QemuMutex block_job_mutex
;
45 /* BlockJob State Transition Table */
46 bool BlockJobSTT
[BLOCK_JOB_STATUS__MAX
][BLOCK_JOB_STATUS__MAX
] = {
47 /* U, C, R, P, Y, S */
48 /* U: */ [BLOCK_JOB_STATUS_UNDEFINED
] = {0, 1, 0, 0, 0, 0},
49 /* C: */ [BLOCK_JOB_STATUS_CREATED
] = {0, 0, 1, 0, 0, 0},
50 /* R: */ [BLOCK_JOB_STATUS_RUNNING
] = {0, 0, 0, 1, 1, 0},
51 /* P: */ [BLOCK_JOB_STATUS_PAUSED
] = {0, 0, 1, 0, 0, 0},
52 /* Y: */ [BLOCK_JOB_STATUS_READY
] = {0, 0, 0, 0, 0, 1},
53 /* S: */ [BLOCK_JOB_STATUS_STANDBY
] = {0, 0, 0, 0, 1, 0},
56 static void block_job_state_transition(BlockJob
*job
, BlockJobStatus s1
)
58 BlockJobStatus s0
= job
->status
;
59 assert(s1
>= 0 && s1
<= BLOCK_JOB_STATUS__MAX
);
60 trace_block_job_state_transition(job
, job
->ret
, BlockJobSTT
[s0
][s1
] ?
61 "allowed" : "disallowed",
62 qapi_enum_lookup(&BlockJobStatus_lookup
,
64 qapi_enum_lookup(&BlockJobStatus_lookup
,
66 assert(BlockJobSTT
[s0
][s1
]);
70 static void block_job_lock(void)
72 qemu_mutex_lock(&block_job_mutex
);
75 static void block_job_unlock(void)
77 qemu_mutex_unlock(&block_job_mutex
);
80 static void __attribute__((__constructor__
)) block_job_init(void)
82 qemu_mutex_init(&block_job_mutex
);
85 static void block_job_event_cancelled(BlockJob
*job
);
86 static void block_job_event_completed(BlockJob
*job
, const char *msg
);
87 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
));
89 /* Transactional group of block jobs */
92 /* Is this txn being cancelled? */
96 QLIST_HEAD(, BlockJob
) jobs
;
102 static QLIST_HEAD(, BlockJob
) block_jobs
= QLIST_HEAD_INITIALIZER(block_jobs
);
105 * The block job API is composed of two categories of functions.
107 * The first includes functions used by the monitor. The monitor is
108 * peculiar in that it accesses the block job list with block_job_get, and
109 * therefore needs consistency across block_job_get and the actual operation
110 * (e.g. block_job_set_speed). The consistency is achieved with
111 * aio_context_acquire/release. These functions are declared in blockjob.h.
113 * The second includes functions used by the block job drivers and sometimes
114 * by the core block layer. These do not care about locking, because the
115 * whole coroutine runs under the AioContext lock, and are declared in
119 BlockJob
*block_job_next(BlockJob
*job
)
122 return QLIST_FIRST(&block_jobs
);
124 return QLIST_NEXT(job
, job_list
);
127 BlockJob
*block_job_get(const char *id
)
131 QLIST_FOREACH(job
, &block_jobs
, job_list
) {
132 if (job
->id
&& !strcmp(id
, job
->id
)) {
140 BlockJobTxn
*block_job_txn_new(void)
142 BlockJobTxn
*txn
= g_new0(BlockJobTxn
, 1);
143 QLIST_INIT(&txn
->jobs
);
148 static void block_job_txn_ref(BlockJobTxn
*txn
)
153 void block_job_txn_unref(BlockJobTxn
*txn
)
155 if (txn
&& --txn
->refcnt
== 0) {
160 void block_job_txn_add_job(BlockJobTxn
*txn
, BlockJob
*job
)
169 QLIST_INSERT_HEAD(&txn
->jobs
, job
, txn_list
);
170 block_job_txn_ref(txn
);
173 static void block_job_pause(BlockJob
*job
)
178 static void block_job_resume(BlockJob
*job
)
180 assert(job
->pause_count
> 0);
182 if (job
->pause_count
) {
185 block_job_enter(job
);
188 void block_job_ref(BlockJob
*job
)
193 static void block_job_attached_aio_context(AioContext
*new_context
,
195 static void block_job_detach_aio_context(void *opaque
);
197 void block_job_unref(BlockJob
*job
)
199 if (--job
->refcnt
== 0) {
200 BlockDriverState
*bs
= blk_bs(job
->blk
);
201 QLIST_REMOVE(job
, job_list
);
203 block_job_remove_all_bdrv(job
);
204 blk_remove_aio_context_notifier(job
->blk
,
205 block_job_attached_aio_context
,
206 block_job_detach_aio_context
, job
);
208 error_free(job
->blocker
);
210 assert(!timer_pending(&job
->sleep_timer
));
215 static void block_job_attached_aio_context(AioContext
*new_context
,
218 BlockJob
*job
= opaque
;
220 if (job
->driver
->attached_aio_context
) {
221 job
->driver
->attached_aio_context(job
, new_context
);
224 block_job_resume(job
);
227 static void block_job_drain(BlockJob
*job
)
229 /* If job is !job->busy this kicks it into the next pause point. */
230 block_job_enter(job
);
233 if (job
->driver
->drain
) {
234 job
->driver
->drain(job
);
238 static void block_job_detach_aio_context(void *opaque
)
240 BlockJob
*job
= opaque
;
242 /* In case the job terminates during aio_poll()... */
245 block_job_pause(job
);
247 while (!job
->paused
&& !job
->completed
) {
248 block_job_drain(job
);
251 block_job_unref(job
);
254 static char *child_job_get_parent_desc(BdrvChild
*c
)
256 BlockJob
*job
= c
->opaque
;
257 return g_strdup_printf("%s job '%s'",
258 BlockJobType_str(job
->driver
->job_type
),
262 static void child_job_drained_begin(BdrvChild
*c
)
264 BlockJob
*job
= c
->opaque
;
265 block_job_pause(job
);
268 static void child_job_drained_end(BdrvChild
*c
)
270 BlockJob
*job
= c
->opaque
;
271 block_job_resume(job
);
274 static const BdrvChildRole child_job
= {
275 .get_parent_desc
= child_job_get_parent_desc
,
276 .drained_begin
= child_job_drained_begin
,
277 .drained_end
= child_job_drained_end
,
278 .stay_at_node
= true,
281 void block_job_remove_all_bdrv(BlockJob
*job
)
284 for (l
= job
->nodes
; l
; l
= l
->next
) {
285 BdrvChild
*c
= l
->data
;
286 bdrv_op_unblock_all(c
->bs
, job
->blocker
);
287 bdrv_root_unref_child(c
);
289 g_slist_free(job
->nodes
);
293 int block_job_add_bdrv(BlockJob
*job
, const char *name
, BlockDriverState
*bs
,
294 uint64_t perm
, uint64_t shared_perm
, Error
**errp
)
298 c
= bdrv_root_attach_child(bs
, name
, &child_job
, perm
, shared_perm
,
304 job
->nodes
= g_slist_prepend(job
->nodes
, c
);
306 bdrv_op_block_all(bs
, job
->blocker
);
311 bool block_job_is_internal(BlockJob
*job
)
313 return (job
->id
== NULL
);
316 static bool block_job_started(BlockJob
*job
)
322 * All jobs must allow a pause point before entering their job proper. This
323 * ensures that jobs can be paused prior to being started, then resumed later.
325 static void coroutine_fn
block_job_co_entry(void *opaque
)
327 BlockJob
*job
= opaque
;
329 assert(job
&& job
->driver
&& job
->driver
->start
);
330 block_job_pause_point(job
);
331 job
->driver
->start(job
);
334 static void block_job_sleep_timer_cb(void *opaque
)
336 BlockJob
*job
= opaque
;
338 block_job_enter(job
);
341 void block_job_start(BlockJob
*job
)
343 assert(job
&& !block_job_started(job
) && job
->paused
&&
344 job
->driver
&& job
->driver
->start
);
345 job
->co
= qemu_coroutine_create(block_job_co_entry
, job
);
349 block_job_state_transition(job
, BLOCK_JOB_STATUS_RUNNING
);
350 bdrv_coroutine_enter(blk_bs(job
->blk
), job
->co
);
353 static void block_job_completed_single(BlockJob
*job
)
355 assert(job
->completed
);
358 if (job
->driver
->commit
) {
359 job
->driver
->commit(job
);
362 if (job
->driver
->abort
) {
363 job
->driver
->abort(job
);
366 if (job
->driver
->clean
) {
367 job
->driver
->clean(job
);
371 job
->cb(job
->opaque
, job
->ret
);
374 /* Emit events only if we actually started */
375 if (block_job_started(job
)) {
376 if (block_job_is_cancelled(job
)) {
377 block_job_event_cancelled(job
);
379 const char *msg
= NULL
;
381 msg
= strerror(-job
->ret
);
383 block_job_event_completed(job
, msg
);
387 QLIST_REMOVE(job
, txn_list
);
388 block_job_txn_unref(job
->txn
);
389 block_job_unref(job
);
392 static void block_job_cancel_async(BlockJob
*job
)
394 if (job
->iostatus
!= BLOCK_DEVICE_IO_STATUS_OK
) {
395 block_job_iostatus_reset(job
);
397 if (job
->user_paused
) {
398 /* Do not call block_job_enter here, the caller will handle it. */
399 job
->user_paused
= false;
402 job
->cancelled
= true;
405 static int block_job_finish_sync(BlockJob
*job
,
406 void (*finish
)(BlockJob
*, Error
**errp
),
409 Error
*local_err
= NULL
;
412 assert(blk_bs(job
->blk
)->job
== job
);
417 finish(job
, &local_err
);
420 error_propagate(errp
, local_err
);
421 block_job_unref(job
);
424 /* block_job_drain calls block_job_enter, and it should be enough to
425 * induce progress until the job completes or moves to the main thread.
427 while (!job
->deferred_to_main_loop
&& !job
->completed
) {
428 block_job_drain(job
);
430 while (!job
->completed
) {
431 aio_poll(qemu_get_aio_context(), true);
433 ret
= (job
->cancelled
&& job
->ret
== 0) ? -ECANCELED
: job
->ret
;
434 block_job_unref(job
);
438 static void block_job_completed_txn_abort(BlockJob
*job
)
441 BlockJobTxn
*txn
= job
->txn
;
446 * We are cancelled by another job, which will handle everything.
450 txn
->aborting
= true;
451 block_job_txn_ref(txn
);
453 /* We are the first failed job. Cancel other jobs. */
454 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
455 ctx
= blk_get_aio_context(other_job
->blk
);
456 aio_context_acquire(ctx
);
459 /* Other jobs are effectively cancelled by us, set the status for
460 * them; this job, however, may or may not be cancelled, depending
461 * on the caller, so leave it. */
462 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
463 if (other_job
!= job
) {
464 block_job_cancel_async(other_job
);
467 while (!QLIST_EMPTY(&txn
->jobs
)) {
468 other_job
= QLIST_FIRST(&txn
->jobs
);
469 ctx
= blk_get_aio_context(other_job
->blk
);
470 if (!other_job
->completed
) {
471 assert(other_job
->cancelled
);
472 block_job_finish_sync(other_job
, NULL
, NULL
);
474 block_job_completed_single(other_job
);
475 aio_context_release(ctx
);
478 block_job_txn_unref(txn
);
481 static void block_job_completed_txn_success(BlockJob
*job
)
484 BlockJobTxn
*txn
= job
->txn
;
485 BlockJob
*other_job
, *next
;
487 * Successful completion, see if there are other running jobs in this
490 QLIST_FOREACH(other_job
, &txn
->jobs
, txn_list
) {
491 if (!other_job
->completed
) {
495 /* We are the last completed job, commit the transaction. */
496 QLIST_FOREACH_SAFE(other_job
, &txn
->jobs
, txn_list
, next
) {
497 ctx
= blk_get_aio_context(other_job
->blk
);
498 aio_context_acquire(ctx
);
499 assert(other_job
->ret
== 0);
500 block_job_completed_single(other_job
);
501 aio_context_release(ctx
);
505 /* Assumes the block_job_mutex is held */
506 static bool block_job_timer_pending(BlockJob
*job
)
508 return timer_pending(&job
->sleep_timer
);
511 void block_job_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
513 Error
*local_err
= NULL
;
514 int64_t old_speed
= job
->speed
;
516 if (!job
->driver
->set_speed
) {
517 error_setg(errp
, QERR_UNSUPPORTED
);
520 job
->driver
->set_speed(job
, speed
, &local_err
);
522 error_propagate(errp
, local_err
);
527 if (speed
&& speed
<= old_speed
) {
531 /* kick only if a timer is pending */
532 block_job_enter_cond(job
, block_job_timer_pending
);
535 void block_job_complete(BlockJob
*job
, Error
**errp
)
537 /* Should not be reachable via external interface for internal jobs */
539 if (job
->pause_count
|| job
->cancelled
||
540 !block_job_started(job
) || !job
->driver
->complete
) {
541 error_setg(errp
, "The active block job '%s' cannot be completed",
546 job
->driver
->complete(job
, errp
);
549 void block_job_user_pause(BlockJob
*job
)
551 job
->user_paused
= true;
552 block_job_pause(job
);
555 bool block_job_user_paused(BlockJob
*job
)
557 return job
->user_paused
;
560 void block_job_user_resume(BlockJob
*job
)
562 if (job
&& job
->user_paused
&& job
->pause_count
> 0) {
563 block_job_iostatus_reset(job
);
564 job
->user_paused
= false;
565 block_job_resume(job
);
569 void block_job_cancel(BlockJob
*job
)
571 if (block_job_started(job
)) {
572 block_job_cancel_async(job
);
573 block_job_enter(job
);
575 block_job_completed(job
, -ECANCELED
);
579 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
580 * used with block_job_finish_sync() without the need for (rather nasty)
581 * function pointer casts there. */
582 static void block_job_cancel_err(BlockJob
*job
, Error
**errp
)
584 block_job_cancel(job
);
587 int block_job_cancel_sync(BlockJob
*job
)
589 return block_job_finish_sync(job
, &block_job_cancel_err
, NULL
);
592 void block_job_cancel_sync_all(void)
595 AioContext
*aio_context
;
597 while ((job
= QLIST_FIRST(&block_jobs
))) {
598 aio_context
= blk_get_aio_context(job
->blk
);
599 aio_context_acquire(aio_context
);
600 block_job_cancel_sync(job
);
601 aio_context_release(aio_context
);
605 int block_job_complete_sync(BlockJob
*job
, Error
**errp
)
607 return block_job_finish_sync(job
, &block_job_complete
, errp
);
610 BlockJobInfo
*block_job_query(BlockJob
*job
, Error
**errp
)
614 if (block_job_is_internal(job
)) {
615 error_setg(errp
, "Cannot query QEMU internal jobs");
618 info
= g_new0(BlockJobInfo
, 1);
619 info
->type
= g_strdup(BlockJobType_str(job
->driver
->job_type
));
620 info
->device
= g_strdup(job
->id
);
621 info
->len
= job
->len
;
622 info
->busy
= atomic_read(&job
->busy
);
623 info
->paused
= job
->pause_count
> 0;
624 info
->offset
= job
->offset
;
625 info
->speed
= job
->speed
;
626 info
->io_status
= job
->iostatus
;
627 info
->ready
= job
->ready
;
628 info
->status
= job
->status
;
632 static void block_job_iostatus_set_err(BlockJob
*job
, int error
)
634 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
635 job
->iostatus
= error
== ENOSPC
? BLOCK_DEVICE_IO_STATUS_NOSPACE
:
636 BLOCK_DEVICE_IO_STATUS_FAILED
;
640 static void block_job_event_cancelled(BlockJob
*job
)
642 if (block_job_is_internal(job
)) {
646 qapi_event_send_block_job_cancelled(job
->driver
->job_type
,
654 static void block_job_event_completed(BlockJob
*job
, const char *msg
)
656 if (block_job_is_internal(job
)) {
660 qapi_event_send_block_job_completed(job
->driver
->job_type
,
671 * API for block job drivers and the block layer. These functions are
672 * declared in blockjob_int.h.
675 void *block_job_create(const char *job_id
, const BlockJobDriver
*driver
,
676 BlockJobTxn
*txn
, BlockDriverState
*bs
, uint64_t perm
,
677 uint64_t shared_perm
, int64_t speed
, int flags
,
678 BlockCompletionFunc
*cb
, void *opaque
, Error
**errp
)
685 error_setg(errp
, QERR_DEVICE_IN_USE
, bdrv_get_device_name(bs
));
689 if (job_id
== NULL
&& !(flags
& BLOCK_JOB_INTERNAL
)) {
690 job_id
= bdrv_get_device_name(bs
);
692 error_setg(errp
, "An explicit job ID is required for this node");
698 if (flags
& BLOCK_JOB_INTERNAL
) {
699 error_setg(errp
, "Cannot specify job ID for internal block job");
703 if (!id_wellformed(job_id
)) {
704 error_setg(errp
, "Invalid job ID '%s'", job_id
);
708 if (block_job_get(job_id
)) {
709 error_setg(errp
, "Job ID '%s' already in use", job_id
);
714 blk
= blk_new(perm
, shared_perm
);
715 ret
= blk_insert_bs(blk
, bs
, errp
);
721 job
= g_malloc0(driver
->instance_size
);
722 job
->driver
= driver
;
723 job
->id
= g_strdup(job_id
);
726 job
->opaque
= opaque
;
729 job
->pause_count
= 1;
731 block_job_state_transition(job
, BLOCK_JOB_STATUS_CREATED
);
732 aio_timer_init(qemu_get_aio_context(), &job
->sleep_timer
,
733 QEMU_CLOCK_REALTIME
, SCALE_NS
,
734 block_job_sleep_timer_cb
, job
);
736 error_setg(&job
->blocker
, "block device is in use by block job: %s",
737 BlockJobType_str(driver
->job_type
));
738 block_job_add_bdrv(job
, "main node", bs
, 0, BLK_PERM_ALL
, &error_abort
);
741 bdrv_op_unblock(bs
, BLOCK_OP_TYPE_DATAPLANE
, job
->blocker
);
743 QLIST_INSERT_HEAD(&block_jobs
, job
, job_list
);
745 blk_add_aio_context_notifier(blk
, block_job_attached_aio_context
,
746 block_job_detach_aio_context
, job
);
748 /* Only set speed when necessary to avoid NotSupported error */
750 Error
*local_err
= NULL
;
752 block_job_set_speed(job
, speed
, &local_err
);
754 block_job_unref(job
);
755 error_propagate(errp
, local_err
);
760 /* Single jobs are modeled as single-job transactions for sake of
761 * consolidating the job management logic */
763 txn
= block_job_txn_new();
764 block_job_txn_add_job(txn
, job
);
765 block_job_txn_unref(txn
);
767 block_job_txn_add_job(txn
, job
);
773 void block_job_pause_all(void)
775 BlockJob
*job
= NULL
;
776 while ((job
= block_job_next(job
))) {
777 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
779 aio_context_acquire(aio_context
);
781 block_job_pause(job
);
782 aio_context_release(aio_context
);
786 void block_job_early_fail(BlockJob
*job
)
788 block_job_unref(job
);
791 void block_job_completed(BlockJob
*job
, int ret
)
793 assert(job
&& job
->txn
&& !job
->completed
);
794 assert(blk_bs(job
->blk
)->job
== job
);
795 job
->completed
= true;
797 if (ret
< 0 || block_job_is_cancelled(job
)) {
798 block_job_completed_txn_abort(job
);
800 block_job_completed_txn_success(job
);
804 static bool block_job_should_pause(BlockJob
*job
)
806 return job
->pause_count
> 0;
809 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
810 * Reentering the job coroutine with block_job_enter() before the timer has
811 * expired is allowed and cancels the timer.
813 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
814 * called explicitly. */
815 static void block_job_do_yield(BlockJob
*job
, uint64_t ns
)
819 timer_mod(&job
->sleep_timer
, ns
);
823 qemu_coroutine_yield();
825 /* Set by block_job_enter before re-entering the coroutine. */
829 void coroutine_fn
block_job_pause_point(BlockJob
*job
)
831 assert(job
&& block_job_started(job
));
833 if (!block_job_should_pause(job
)) {
836 if (block_job_is_cancelled(job
)) {
840 if (job
->driver
->pause
) {
841 job
->driver
->pause(job
);
844 if (block_job_should_pause(job
) && !block_job_is_cancelled(job
)) {
845 BlockJobStatus status
= job
->status
;
846 block_job_state_transition(job
, status
== BLOCK_JOB_STATUS_READY
? \
847 BLOCK_JOB_STATUS_STANDBY
: \
848 BLOCK_JOB_STATUS_PAUSED
);
850 block_job_do_yield(job
, -1);
852 block_job_state_transition(job
, status
);
855 if (job
->driver
->resume
) {
856 job
->driver
->resume(job
);
860 void block_job_resume_all(void)
862 BlockJob
*job
, *next
;
864 QLIST_FOREACH_SAFE(job
, &block_jobs
, job_list
, next
) {
865 AioContext
*aio_context
= blk_get_aio_context(job
->blk
);
867 aio_context_acquire(aio_context
);
868 block_job_resume(job
);
869 block_job_unref(job
);
870 aio_context_release(aio_context
);
875 * Conditionally enter a block_job pending a call to fn() while
876 * under the block_job_lock critical section.
878 static void block_job_enter_cond(BlockJob
*job
, bool(*fn
)(BlockJob
*job
))
880 if (!block_job_started(job
)) {
883 if (job
->deferred_to_main_loop
) {
893 if (fn
&& !fn(job
)) {
898 assert(!job
->deferred_to_main_loop
);
899 timer_del(&job
->sleep_timer
);
902 aio_co_wake(job
->co
);
905 void block_job_enter(BlockJob
*job
)
907 block_job_enter_cond(job
, NULL
);
910 bool block_job_is_cancelled(BlockJob
*job
)
912 return job
->cancelled
;
915 void block_job_sleep_ns(BlockJob
*job
, int64_t ns
)
919 /* Check cancellation *before* setting busy = false, too! */
920 if (block_job_is_cancelled(job
)) {
924 if (!block_job_should_pause(job
)) {
925 block_job_do_yield(job
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + ns
);
928 block_job_pause_point(job
);
931 void block_job_yield(BlockJob
*job
)
935 /* Check cancellation *before* setting busy = false, too! */
936 if (block_job_is_cancelled(job
)) {
940 if (!block_job_should_pause(job
)) {
941 block_job_do_yield(job
, -1);
944 block_job_pause_point(job
);
947 void block_job_iostatus_reset(BlockJob
*job
)
949 if (job
->iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
952 assert(job
->user_paused
&& job
->pause_count
> 0);
953 job
->iostatus
= BLOCK_DEVICE_IO_STATUS_OK
;
956 void block_job_event_ready(BlockJob
*job
)
958 block_job_state_transition(job
, BLOCK_JOB_STATUS_READY
);
961 if (block_job_is_internal(job
)) {
965 qapi_event_send_block_job_ready(job
->driver
->job_type
,
969 job
->speed
, &error_abort
);
972 BlockErrorAction
block_job_error_action(BlockJob
*job
, BlockdevOnError on_err
,
973 int is_read
, int error
)
975 BlockErrorAction action
;
978 case BLOCKDEV_ON_ERROR_ENOSPC
:
979 case BLOCKDEV_ON_ERROR_AUTO
:
980 action
= (error
== ENOSPC
) ?
981 BLOCK_ERROR_ACTION_STOP
: BLOCK_ERROR_ACTION_REPORT
;
983 case BLOCKDEV_ON_ERROR_STOP
:
984 action
= BLOCK_ERROR_ACTION_STOP
;
986 case BLOCKDEV_ON_ERROR_REPORT
:
987 action
= BLOCK_ERROR_ACTION_REPORT
;
989 case BLOCKDEV_ON_ERROR_IGNORE
:
990 action
= BLOCK_ERROR_ACTION_IGNORE
;
995 if (!block_job_is_internal(job
)) {
996 qapi_event_send_block_job_error(job
->id
,
997 is_read
? IO_OPERATION_TYPE_READ
:
998 IO_OPERATION_TYPE_WRITE
,
999 action
, &error_abort
);
1001 if (action
== BLOCK_ERROR_ACTION_STOP
) {
1002 /* make the pause user visible, which will be resumed from QMP. */
1003 block_job_user_pause(job
);
1004 block_job_iostatus_set_err(job
, error
);
1011 AioContext
*aio_context
;
1012 BlockJobDeferToMainLoopFn
*fn
;
1014 } BlockJobDeferToMainLoopData
;
1016 static void block_job_defer_to_main_loop_bh(void *opaque
)
1018 BlockJobDeferToMainLoopData
*data
= opaque
;
1019 AioContext
*aio_context
;
1021 /* Prevent race with block_job_defer_to_main_loop() */
1022 aio_context_acquire(data
->aio_context
);
1024 /* Fetch BDS AioContext again, in case it has changed */
1025 aio_context
= blk_get_aio_context(data
->job
->blk
);
1026 if (aio_context
!= data
->aio_context
) {
1027 aio_context_acquire(aio_context
);
1030 data
->fn(data
->job
, data
->opaque
);
1032 if (aio_context
!= data
->aio_context
) {
1033 aio_context_release(aio_context
);
1036 aio_context_release(data
->aio_context
);
1041 void block_job_defer_to_main_loop(BlockJob
*job
,
1042 BlockJobDeferToMainLoopFn
*fn
,
1045 BlockJobDeferToMainLoopData
*data
= g_malloc(sizeof(*data
));
1047 data
->aio_context
= blk_get_aio_context(job
->blk
);
1049 data
->opaque
= opaque
;
1050 job
->deferred_to_main_loop
= true;
1052 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1053 block_job_defer_to_main_loop_bh
, data
);