]> git.proxmox.com Git - mirror_qemu.git/blame - blockjob.c
blockjob: introduce block_job_cancel_async, check iostatus invariants
[mirror_qemu.git] / blockjob.c
CommitLineData
2f0c9fe6
PB
1/*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
d38ea87a 26#include "qemu/osdep.h"
2f0c9fe6 27#include "qemu-common.h"
737e150e 28#include "block/block.h"
c87621ea 29#include "block/blockjob_int.h"
737e150e 30#include "block/block_int.h"
373340b2 31#include "sysemu/block-backend.h"
cc7a8ea7 32#include "qapi/qmp/qerror.h"
7b1b5d19 33#include "qapi/qmp/qjson.h"
10817bf0 34#include "qemu/coroutine.h"
7f0317cf 35#include "qemu/id.h"
2f0c9fe6 36#include "qmp-commands.h"
1de7afc9 37#include "qemu/timer.h"
5a2d2cbd 38#include "qapi-event.h"
2f0c9fe6 39
8254b6d9
JS
40static void block_job_event_cancelled(BlockJob *job);
41static void block_job_event_completed(BlockJob *job, const char *msg);
42
c55a832f
FZ
43/* Transactional group of block jobs */
44struct BlockJobTxn {
45
46 /* Is this txn being cancelled? */
47 bool aborting;
48
49 /* List of jobs */
50 QLIST_HEAD(, BlockJob) jobs;
51
52 /* Reference count */
53 int refcnt;
54};
55
a7112795
AG
56static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
57
88691b37
PB
58/*
59 * The block job API is composed of two categories of functions.
60 *
61 * The first includes functions used by the monitor. The monitor is
62 * peculiar in that it accesses the block job list with block_job_get, and
63 * therefore needs consistency across block_job_get and the actual operation
64 * (e.g. block_job_set_speed). The consistency is achieved with
65 * aio_context_acquire/release. These functions are declared in blockjob.h.
66 *
67 * The second includes functions used by the block job drivers and sometimes
68 * by the core block layer. These do not care about locking, because the
69 * whole coroutine runs under the AioContext lock, and are declared in
70 * blockjob_int.h.
71 */
72
a7112795
AG
73BlockJob *block_job_next(BlockJob *job)
74{
75 if (!job) {
76 return QLIST_FIRST(&block_jobs);
77 }
78 return QLIST_NEXT(job, job_list);
79}
80
ffb1f10c
AG
81BlockJob *block_job_get(const char *id)
82{
83 BlockJob *job;
84
85 QLIST_FOREACH(job, &block_jobs, job_list) {
559b935f 86 if (job->id && !strcmp(id, job->id)) {
ffb1f10c
AG
87 return job;
88 }
89 }
90
91 return NULL;
92}
93
f321dcb5
PB
94static void block_job_pause(BlockJob *job)
95{
96 job->pause_count++;
97}
98
99static void block_job_resume(BlockJob *job)
100{
101 assert(job->pause_count > 0);
102 job->pause_count--;
103 if (job->pause_count) {
104 return;
105 }
106 block_job_enter(job);
107}
108
05b0d8e3
PB
109static void block_job_ref(BlockJob *job)
110{
111 ++job->refcnt;
112}
113
114static void block_job_attached_aio_context(AioContext *new_context,
115 void *opaque);
116static void block_job_detach_aio_context(void *opaque);
117
118static void block_job_unref(BlockJob *job)
119{
120 if (--job->refcnt == 0) {
121 BlockDriverState *bs = blk_bs(job->blk);
122 bs->job = NULL;
123 block_job_remove_all_bdrv(job);
124 blk_remove_aio_context_notifier(job->blk,
125 block_job_attached_aio_context,
126 block_job_detach_aio_context, job);
127 blk_unref(job->blk);
128 error_free(job->blocker);
129 g_free(job->id);
130 QLIST_REMOVE(job, job_list);
131 g_free(job);
132 }
133}
134
463e0be1
SH
135static void block_job_attached_aio_context(AioContext *new_context,
136 void *opaque)
137{
138 BlockJob *job = opaque;
139
140 if (job->driver->attached_aio_context) {
141 job->driver->attached_aio_context(job, new_context);
142 }
143
144 block_job_resume(job);
145}
146
bae8196d
PB
147static void block_job_drain(BlockJob *job)
148{
149 /* If job is !job->busy this kicks it into the next pause point. */
150 block_job_enter(job);
151
152 blk_drain(job->blk);
153 if (job->driver->drain) {
154 job->driver->drain(job);
155 }
156}
157
463e0be1
SH
158static void block_job_detach_aio_context(void *opaque)
159{
160 BlockJob *job = opaque;
161
162 /* In case the job terminates during aio_poll()... */
163 block_job_ref(job);
164
165 block_job_pause(job);
166
463e0be1 167 while (!job->paused && !job->completed) {
bae8196d 168 block_job_drain(job);
463e0be1
SH
169 }
170
171 block_job_unref(job);
172}
173
f321dcb5
PB
174static char *child_job_get_parent_desc(BdrvChild *c)
175{
176 BlockJob *job = c->opaque;
177 return g_strdup_printf("%s job '%s'",
178 BlockJobType_lookup[job->driver->job_type],
179 job->id);
180}
181
182static const BdrvChildRole child_job = {
183 .get_parent_desc = child_job_get_parent_desc,
184 .stay_at_node = true,
185};
186
187static void block_job_drained_begin(void *opaque)
188{
189 BlockJob *job = opaque;
190 block_job_pause(job);
191}
192
193static void block_job_drained_end(void *opaque)
194{
195 BlockJob *job = opaque;
196 block_job_resume(job);
197}
198
199static const BlockDevOps block_job_dev_ops = {
200 .drained_begin = block_job_drained_begin,
201 .drained_end = block_job_drained_end,
202};
203
bbc02b90
KW
204void block_job_remove_all_bdrv(BlockJob *job)
205{
206 GSList *l;
207 for (l = job->nodes; l; l = l->next) {
208 BdrvChild *c = l->data;
209 bdrv_op_unblock_all(c->bs, job->blocker);
210 bdrv_root_unref_child(c);
211 }
212 g_slist_free(job->nodes);
213 job->nodes = NULL;
214}
215
76d554e2
KW
216int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
217 uint64_t perm, uint64_t shared_perm, Error **errp)
23d402d4 218{
76d554e2
KW
219 BdrvChild *c;
220
221 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
222 job, errp);
223 if (c == NULL) {
224 return -EPERM;
225 }
226
227 job->nodes = g_slist_prepend(job->nodes, c);
23d402d4
AG
228 bdrv_ref(bs);
229 bdrv_op_block_all(bs, job->blocker);
76d554e2
KW
230
231 return 0;
23d402d4
AG
232}
233
559b935f
JS
234bool block_job_is_internal(BlockJob *job)
235{
236 return (job->id == NULL);
237}
238
5ccac6f1
JS
239static bool block_job_started(BlockJob *job)
240{
241 return job->co;
242}
243
e3796a24
JS
244/**
245 * All jobs must allow a pause point before entering their job proper. This
246 * ensures that jobs can be paused prior to being started, then resumed later.
247 */
248static void coroutine_fn block_job_co_entry(void *opaque)
249{
250 BlockJob *job = opaque;
251
252 assert(job && job->driver && job->driver->start);
253 block_job_pause_point(job);
254 job->driver->start(job);
255}
256
5ccac6f1
JS
257void block_job_start(BlockJob *job)
258{
259 assert(job && !block_job_started(job) && job->paused &&
e3796a24
JS
260 job->driver && job->driver->start);
261 job->co = qemu_coroutine_create(block_job_co_entry, job);
262 job->pause_count--;
263 job->busy = true;
264 job->paused = false;
aef4278c 265 bdrv_coroutine_enter(blk_bs(job->blk), job->co);
5ccac6f1
JS
266}
267
c55a832f
FZ
268static void block_job_completed_single(BlockJob *job)
269{
270 if (!job->ret) {
271 if (job->driver->commit) {
272 job->driver->commit(job);
273 }
274 } else {
275 if (job->driver->abort) {
276 job->driver->abort(job);
277 }
278 }
e8a40bf7
JS
279 if (job->driver->clean) {
280 job->driver->clean(job);
281 }
8254b6d9
JS
282
283 if (job->cb) {
284 job->cb(job->opaque, job->ret);
285 }
5ccac6f1
JS
286
287 /* Emit events only if we actually started */
288 if (block_job_started(job)) {
289 if (block_job_is_cancelled(job)) {
290 block_job_event_cancelled(job);
291 } else {
292 const char *msg = NULL;
293 if (job->ret < 0) {
294 msg = strerror(-job->ret);
295 }
296 block_job_event_completed(job, msg);
8254b6d9 297 }
8254b6d9
JS
298 }
299
c55a832f 300 if (job->txn) {
1e93b9fb 301 QLIST_REMOVE(job, txn_list);
c55a832f
FZ
302 block_job_txn_unref(job->txn);
303 }
304 block_job_unref(job);
305}
306
4c241cf5
PB
307static void block_job_cancel_async(BlockJob *job)
308{
309 if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
310 block_job_iostatus_reset(job);
311 }
312 if (job->user_paused) {
313 /* Do not call block_job_enter here, the caller will handle it. */
314 job->user_paused = false;
315 job->pause_count--;
316 }
317 job->cancelled = true;
318}
319
c55a832f
FZ
320static void block_job_completed_txn_abort(BlockJob *job)
321{
322 AioContext *ctx;
323 BlockJobTxn *txn = job->txn;
324 BlockJob *other_job, *next;
325
326 if (txn->aborting) {
327 /*
328 * We are cancelled by another job, which will handle everything.
329 */
330 return;
331 }
332 txn->aborting = true;
333 /* We are the first failed job. Cancel other jobs. */
334 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
b6d2e599 335 ctx = blk_get_aio_context(other_job->blk);
c55a832f
FZ
336 aio_context_acquire(ctx);
337 }
338 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
339 if (other_job == job || other_job->completed) {
340 /* Other jobs are "effectively" cancelled by us, set the status for
341 * them; this job, however, may or may not be cancelled, depending
342 * on the caller, so leave it. */
343 if (other_job != job) {
4c241cf5 344 block_job_cancel_async(other_job);
c55a832f
FZ
345 }
346 continue;
347 }
348 block_job_cancel_sync(other_job);
349 assert(other_job->completed);
350 }
351 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
b6d2e599 352 ctx = blk_get_aio_context(other_job->blk);
c55a832f
FZ
353 block_job_completed_single(other_job);
354 aio_context_release(ctx);
355 }
356}
357
358static void block_job_completed_txn_success(BlockJob *job)
359{
360 AioContext *ctx;
361 BlockJobTxn *txn = job->txn;
362 BlockJob *other_job, *next;
363 /*
364 * Successful completion, see if there are other running jobs in this
365 * txn.
366 */
367 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
368 if (!other_job->completed) {
369 return;
370 }
371 }
372 /* We are the last completed job, commit the transaction. */
373 QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
b6d2e599 374 ctx = blk_get_aio_context(other_job->blk);
c55a832f
FZ
375 aio_context_acquire(ctx);
376 assert(other_job->ret == 0);
377 block_job_completed_single(other_job);
378 aio_context_release(ctx);
379 }
380}
381
2f0c9fe6
PB
382void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
383{
384 Error *local_err = NULL;
385
3fc4b10a 386 if (!job->driver->set_speed) {
c6bd8c70 387 error_setg(errp, QERR_UNSUPPORTED);
2f0c9fe6
PB
388 return;
389 }
3fc4b10a 390 job->driver->set_speed(job, speed, &local_err);
84d18f06 391 if (local_err) {
2f0c9fe6
PB
392 error_propagate(errp, local_err);
393 return;
394 }
395
396 job->speed = speed;
397}
398
aeae883b
PB
399void block_job_complete(BlockJob *job, Error **errp)
400{
559b935f
JS
401 /* Should not be reachable via external interface for internal jobs */
402 assert(job->id);
5ccac6f1
JS
403 if (job->pause_count || job->cancelled ||
404 !block_job_started(job) || !job->driver->complete) {
9df229c3
AG
405 error_setg(errp, "The active block job '%s' cannot be completed",
406 job->id);
aeae883b
PB
407 return;
408 }
409
3fc4b10a 410 job->driver->complete(job, errp);
aeae883b
PB
411}
412
0df4ba58
JS
413void block_job_user_pause(BlockJob *job)
414{
415 job->user_paused = true;
416 block_job_pause(job);
417}
418
0df4ba58
JS
419bool block_job_user_paused(BlockJob *job)
420{
6573d9c6 421 return job->user_paused;
0df4ba58
JS
422}
423
0df4ba58
JS
424void block_job_user_resume(BlockJob *job)
425{
426 if (job && job->user_paused && job->pause_count > 0) {
2caf63a9 427 block_job_iostatus_reset(job);
4c241cf5 428 job->user_paused = false;
0df4ba58
JS
429 block_job_resume(job);
430 }
431}
432
8acc72a4
PB
433void block_job_cancel(BlockJob *job)
434{
5ccac6f1 435 if (block_job_started(job)) {
4c241cf5 436 block_job_cancel_async(job);
5ccac6f1
JS
437 block_job_enter(job);
438 } else {
439 block_job_completed(job, -ECANCELED);
440 }
8acc72a4
PB
441}
442
345f9e1b
HR
443static int block_job_finish_sync(BlockJob *job,
444 void (*finish)(BlockJob *, Error **errp),
445 Error **errp)
2f0c9fe6 446{
345f9e1b 447 Error *local_err = NULL;
94db6d2d 448 int ret;
2f0c9fe6 449
b6d2e599 450 assert(blk_bs(job->blk)->job == job);
2f0c9fe6 451
94db6d2d 452 block_job_ref(job);
bae8196d 453
345f9e1b
HR
454 finish(job, &local_err);
455 if (local_err) {
456 error_propagate(errp, local_err);
94db6d2d 457 block_job_unref(job);
345f9e1b
HR
458 return -EBUSY;
459 }
bae8196d
PB
460 /* block_job_drain calls block_job_enter, and it should be enough to
461 * induce progress until the job completes or moves to the main thread.
462 */
463 while (!job->deferred_to_main_loop && !job->completed) {
464 block_job_drain(job);
465 }
94db6d2d 466 while (!job->completed) {
bae8196d 467 aio_poll(qemu_get_aio_context(), true);
2f0c9fe6 468 }
94db6d2d
FZ
469 ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
470 block_job_unref(job);
471 return ret;
2f0c9fe6
PB
472}
473
345f9e1b
HR
474/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
475 * used with block_job_finish_sync() without the need for (rather nasty)
476 * function pointer casts there. */
477static void block_job_cancel_err(BlockJob *job, Error **errp)
478{
479 block_job_cancel(job);
480}
481
482int block_job_cancel_sync(BlockJob *job)
483{
484 return block_job_finish_sync(job, &block_job_cancel_err, NULL);
485}
486
a1a2af07
KW
487void block_job_cancel_sync_all(void)
488{
489 BlockJob *job;
490 AioContext *aio_context;
491
492 while ((job = QLIST_FIRST(&block_jobs))) {
b6d2e599 493 aio_context = blk_get_aio_context(job->blk);
a1a2af07
KW
494 aio_context_acquire(aio_context);
495 block_job_cancel_sync(job);
496 aio_context_release(aio_context);
497 }
498}
499
345f9e1b
HR
500int block_job_complete_sync(BlockJob *job, Error **errp)
501{
502 return block_job_finish_sync(job, &block_job_complete, errp);
503}
504
559b935f 505BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
30e628b7 506{
559b935f
JS
507 BlockJobInfo *info;
508
509 if (block_job_is_internal(job)) {
510 error_setg(errp, "Cannot query QEMU internal jobs");
511 return NULL;
512 }
513 info = g_new0(BlockJobInfo, 1);
79e14bf7 514 info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]);
8ccb9569 515 info->device = g_strdup(job->id);
32c81a4a
PB
516 info->len = job->len;
517 info->busy = job->busy;
751ebd76 518 info->paused = job->pause_count > 0;
32c81a4a
PB
519 info->offset = job->offset;
520 info->speed = job->speed;
521 info->io_status = job->iostatus;
ef6dbf1e 522 info->ready = job->ready;
30e628b7
PB
523 return info;
524}
32c81a4a
PB
525
526static void block_job_iostatus_set_err(BlockJob *job, int error)
527{
528 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
529 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
530 BLOCK_DEVICE_IO_STATUS_FAILED;
531 }
532}
533
8254b6d9 534static void block_job_event_cancelled(BlockJob *job)
bcada37b 535{
559b935f
JS
536 if (block_job_is_internal(job)) {
537 return;
538 }
539
bcada37b 540 qapi_event_send_block_job_cancelled(job->driver->job_type,
8ccb9569 541 job->id,
bcada37b
WX
542 job->len,
543 job->offset,
544 job->speed,
545 &error_abort);
546}
32c81a4a 547
8254b6d9 548static void block_job_event_completed(BlockJob *job, const char *msg)
a66a2a36 549{
559b935f
JS
550 if (block_job_is_internal(job)) {
551 return;
552 }
553
bcada37b 554 qapi_event_send_block_job_completed(job->driver->job_type,
8ccb9569 555 job->id,
bcada37b
WX
556 job->len,
557 job->offset,
558 job->speed,
559 !!msg,
560 msg,
561 &error_abort);
a66a2a36
PB
562}
563
88691b37
PB
564/*
565 * API for block job drivers and the block layer. These functions are
566 * declared in blockjob_int.h.
567 */
568
569void *block_job_create(const char *job_id, const BlockJobDriver *driver,
570 BlockDriverState *bs, uint64_t perm,
571 uint64_t shared_perm, int64_t speed, int flags,
572 BlockCompletionFunc *cb, void *opaque, Error **errp)
573{
574 BlockBackend *blk;
575 BlockJob *job;
576 int ret;
577
578 if (bs->job) {
579 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
580 return NULL;
581 }
582
583 if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
584 job_id = bdrv_get_device_name(bs);
585 if (!*job_id) {
586 error_setg(errp, "An explicit job ID is required for this node");
587 return NULL;
588 }
589 }
590
591 if (job_id) {
592 if (flags & BLOCK_JOB_INTERNAL) {
593 error_setg(errp, "Cannot specify job ID for internal block job");
594 return NULL;
595 }
596
597 if (!id_wellformed(job_id)) {
598 error_setg(errp, "Invalid job ID '%s'", job_id);
599 return NULL;
600 }
601
602 if (block_job_get(job_id)) {
603 error_setg(errp, "Job ID '%s' already in use", job_id);
604 return NULL;
605 }
606 }
607
608 blk = blk_new(perm, shared_perm);
609 ret = blk_insert_bs(blk, bs, errp);
610 if (ret < 0) {
611 blk_unref(blk);
612 return NULL;
613 }
614
615 job = g_malloc0(driver->instance_size);
616 job->driver = driver;
617 job->id = g_strdup(job_id);
618 job->blk = blk;
619 job->cb = cb;
620 job->opaque = opaque;
621 job->busy = false;
622 job->paused = true;
623 job->pause_count = 1;
624 job->refcnt = 1;
625
626 error_setg(&job->blocker, "block device is in use by block job: %s",
627 BlockJobType_lookup[driver->job_type]);
628 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
629 bs->job = job;
630
631 blk_set_dev_ops(blk, &block_job_dev_ops, job);
632 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
633
634 QLIST_INSERT_HEAD(&block_jobs, job, job_list);
635
636 blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
637 block_job_detach_aio_context, job);
638
639 /* Only set speed when necessary to avoid NotSupported error */
640 if (speed != 0) {
641 Error *local_err = NULL;
642
643 block_job_set_speed(job, speed, &local_err);
644 if (local_err) {
645 block_job_unref(job);
646 error_propagate(errp, local_err);
647 return NULL;
648 }
649 }
650 return job;
651}
652
f321dcb5
PB
653void block_job_pause_all(void)
654{
655 BlockJob *job = NULL;
656 while ((job = block_job_next(job))) {
657 AioContext *aio_context = blk_get_aio_context(job->blk);
658
659 aio_context_acquire(aio_context);
660 block_job_pause(job);
661 aio_context_release(aio_context);
662 }
663}
664
88691b37
PB
665void block_job_early_fail(BlockJob *job)
666{
667 block_job_unref(job);
668}
669
670void block_job_completed(BlockJob *job, int ret)
671{
672 assert(blk_bs(job->blk)->job == job);
673 assert(!job->completed);
674 job->completed = true;
675 job->ret = ret;
676 if (!job->txn) {
677 block_job_completed_single(job);
678 } else if (ret < 0 || block_job_is_cancelled(job)) {
679 block_job_completed_txn_abort(job);
680 } else {
681 block_job_completed_txn_success(job);
682 }
683}
684
685static bool block_job_should_pause(BlockJob *job)
686{
687 return job->pause_count > 0;
688}
689
690void coroutine_fn block_job_pause_point(BlockJob *job)
691{
692 assert(job && block_job_started(job));
693
694 if (!block_job_should_pause(job)) {
695 return;
696 }
697 if (block_job_is_cancelled(job)) {
698 return;
699 }
700
701 if (job->driver->pause) {
702 job->driver->pause(job);
703 }
704
705 if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
706 job->paused = true;
707 job->busy = false;
708 qemu_coroutine_yield(); /* wait for block_job_resume() */
709 job->busy = true;
710 job->paused = false;
711 }
712
713 if (job->driver->resume) {
714 job->driver->resume(job);
715 }
716}
717
f321dcb5
PB
718void block_job_resume_all(void)
719{
720 BlockJob *job = NULL;
721 while ((job = block_job_next(job))) {
722 AioContext *aio_context = blk_get_aio_context(job->blk);
723
724 aio_context_acquire(aio_context);
725 block_job_resume(job);
726 aio_context_release(aio_context);
727 }
728}
729
88691b37
PB
730void block_job_enter(BlockJob *job)
731{
732 if (job->co && !job->busy) {
733 bdrv_coroutine_enter(blk_bs(job->blk), job->co);
734 }
735}
736
737bool block_job_is_cancelled(BlockJob *job)
738{
739 return job->cancelled;
740}
741
742void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
743{
744 assert(job->busy);
745
746 /* Check cancellation *before* setting busy = false, too! */
747 if (block_job_is_cancelled(job)) {
748 return;
749 }
750
751 job->busy = false;
752 if (!block_job_should_pause(job)) {
753 co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
754 }
755 job->busy = true;
756
757 block_job_pause_point(job);
758}
759
760void block_job_yield(BlockJob *job)
761{
762 assert(job->busy);
763
764 /* Check cancellation *before* setting busy = false, too! */
765 if (block_job_is_cancelled(job)) {
766 return;
767 }
768
769 job->busy = false;
770 if (!block_job_should_pause(job)) {
771 qemu_coroutine_yield();
772 }
773 job->busy = true;
774
775 block_job_pause_point(job);
776}
777
2caf63a9
PB
778void block_job_iostatus_reset(BlockJob *job)
779{
4c241cf5
PB
780 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
781 return;
782 }
783 assert(job->user_paused && job->pause_count > 0);
2caf63a9
PB
784 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
785}
786
bcada37b 787void block_job_event_ready(BlockJob *job)
a66a2a36 788{
ef6dbf1e
HR
789 job->ready = true;
790
559b935f
JS
791 if (block_job_is_internal(job)) {
792 return;
793 }
794
518848a2 795 qapi_event_send_block_job_ready(job->driver->job_type,
8ccb9569 796 job->id,
518848a2
MA
797 job->len,
798 job->offset,
799 job->speed, &error_abort);
a66a2a36
PB
800}
801
81e254dc 802BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
32c81a4a
PB
803 int is_read, int error)
804{
805 BlockErrorAction action;
806
807 switch (on_err) {
808 case BLOCKDEV_ON_ERROR_ENOSPC:
8c398252 809 case BLOCKDEV_ON_ERROR_AUTO:
a589569f
WX
810 action = (error == ENOSPC) ?
811 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
32c81a4a
PB
812 break;
813 case BLOCKDEV_ON_ERROR_STOP:
a589569f 814 action = BLOCK_ERROR_ACTION_STOP;
32c81a4a
PB
815 break;
816 case BLOCKDEV_ON_ERROR_REPORT:
a589569f 817 action = BLOCK_ERROR_ACTION_REPORT;
32c81a4a
PB
818 break;
819 case BLOCKDEV_ON_ERROR_IGNORE:
a589569f 820 action = BLOCK_ERROR_ACTION_IGNORE;
32c81a4a
PB
821 break;
822 default:
823 abort();
824 }
559b935f
JS
825 if (!block_job_is_internal(job)) {
826 qapi_event_send_block_job_error(job->id,
827 is_read ? IO_OPERATION_TYPE_READ :
828 IO_OPERATION_TYPE_WRITE,
829 action, &error_abort);
830 }
a589569f 831 if (action == BLOCK_ERROR_ACTION_STOP) {
751ebd76 832 /* make the pause user visible, which will be resumed from QMP. */
0df4ba58 833 block_job_user_pause(job);
32c81a4a 834 block_job_iostatus_set_err(job, error);
32c81a4a
PB
835 }
836 return action;
837}
dec7d421
SH
838
839typedef struct {
840 BlockJob *job;
dec7d421
SH
841 AioContext *aio_context;
842 BlockJobDeferToMainLoopFn *fn;
843 void *opaque;
844} BlockJobDeferToMainLoopData;
845
846static void block_job_defer_to_main_loop_bh(void *opaque)
847{
848 BlockJobDeferToMainLoopData *data = opaque;
849 AioContext *aio_context;
850
dec7d421
SH
851 /* Prevent race with block_job_defer_to_main_loop() */
852 aio_context_acquire(data->aio_context);
853
854 /* Fetch BDS AioContext again, in case it has changed */
b6d2e599 855 aio_context = blk_get_aio_context(data->job->blk);
d79df2a2
PB
856 if (aio_context != data->aio_context) {
857 aio_context_acquire(aio_context);
858 }
dec7d421 859
794f0141 860 data->job->deferred_to_main_loop = false;
dec7d421
SH
861 data->fn(data->job, data->opaque);
862
d79df2a2
PB
863 if (aio_context != data->aio_context) {
864 aio_context_release(aio_context);
865 }
dec7d421
SH
866
867 aio_context_release(data->aio_context);
868
869 g_free(data);
870}
871
872void block_job_defer_to_main_loop(BlockJob *job,
873 BlockJobDeferToMainLoopFn *fn,
874 void *opaque)
875{
876 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
877 data->job = job;
b6d2e599 878 data->aio_context = blk_get_aio_context(job->blk);
dec7d421
SH
879 data->fn = fn;
880 data->opaque = opaque;
794f0141 881 job->deferred_to_main_loop = true;
dec7d421 882
fffb6e12
PB
883 aio_bh_schedule_oneshot(qemu_get_aio_context(),
884 block_job_defer_to_main_loop_bh, data);
dec7d421 885}
c55a832f
FZ
886
887BlockJobTxn *block_job_txn_new(void)
888{
889 BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
890 QLIST_INIT(&txn->jobs);
891 txn->refcnt = 1;
892 return txn;
893}
894
895static void block_job_txn_ref(BlockJobTxn *txn)
896{
897 txn->refcnt++;
898}
899
900void block_job_txn_unref(BlockJobTxn *txn)
901{
902 if (txn && --txn->refcnt == 0) {
903 g_free(txn);
904 }
905}
906
907void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
908{
909 if (!txn) {
910 return;
911 }
912
913 assert(!job->txn);
914 job->txn = txn;
915
916 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
917 block_job_txn_ref(txn);
918}