]> git.proxmox.com Git - mirror_qemu.git/blame - blockjob.c
block/file-posix: set up Linux AIO and io_uring in the current thread
[mirror_qemu.git] / blockjob.c
CommitLineData
2f0c9fe6
PB
1/*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
d38ea87a 26#include "qemu/osdep.h"
e2c1c34f 27#include "block/aio-wait.h"
737e150e 28#include "block/block.h"
c87621ea 29#include "block/blockjob_int.h"
737e150e 30#include "block/block_int.h"
c9de4050 31#include "block/trace.h"
373340b2 32#include "sysemu/block-backend.h"
e688df6b 33#include "qapi/error.h"
9af23989 34#include "qapi/qapi-events-block-core.h"
cc7a8ea7 35#include "qapi/qmp/qerror.h"
db725815 36#include "qemu/main-loop.h"
1de7afc9 37#include "qemu/timer.h"
2f0c9fe6 38
e7c1d78b 39static bool is_block_job(Job *job)
a7112795 40{
e7c1d78b
KW
41 return job_type(job) == JOB_TYPE_BACKUP ||
42 job_type(job) == JOB_TYPE_COMMIT ||
43 job_type(job) == JOB_TYPE_MIRROR ||
44 job_type(job) == JOB_TYPE_STREAM;
45}
46
f41ab73f 47BlockJob *block_job_next_locked(BlockJob *bjob)
e7c1d78b
KW
48{
49 Job *job = bjob ? &bjob->job : NULL;
cf81ae28 50 GLOBAL_STATE_CODE();
e7c1d78b
KW
51
52 do {
f41ab73f 53 job = job_next_locked(job);
e7c1d78b
KW
54 } while (job && !is_block_job(job));
55
56 return job ? container_of(job, BlockJob, job) : NULL;
a7112795
AG
57}
58
f41ab73f
EGE
59BlockJob *block_job_get_locked(const char *id)
60{
61 Job *job = job_get_locked(id);
cf81ae28 62 GLOBAL_STATE_CODE();
ffb1f10c 63
e7c1d78b
KW
64 if (job && is_block_job(job)) {
65 return container_of(job, BlockJob, job);
66 } else {
67 return NULL;
ffb1f10c 68 }
ffb1f10c
AG
69}
70
f41ab73f
EGE
71BlockJob *block_job_get(const char *id)
72{
73 JOB_LOCK_GUARD();
74 return block_job_get_locked(id);
75}
76
80fa2c75 77void block_job_free(Job *job)
05b0d8e3 78{
80fa2c75 79 BlockJob *bjob = container_of(job, BlockJob, job);
e2d9faf5 80 GLOBAL_STATE_CODE();
80fa2c75 81
80fa2c75 82 block_job_remove_all_bdrv(bjob);
4951967d 83 ratelimit_destroy(&bjob->limit);
80fa2c75 84 error_free(bjob->blocker);
05b0d8e3
PB
85}
86
f321dcb5
PB
87static char *child_job_get_parent_desc(BdrvChild *c)
88{
89 BlockJob *job = c->opaque;
252291ea 90 return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
f321dcb5
PB
91}
92
ad90feba 93static void child_job_drained_begin(BdrvChild *c)
f321dcb5 94{
ad90feba 95 BlockJob *job = c->opaque;
b15de828 96 job_pause(&job->job);
f321dcb5
PB
97}
98
89bd0305
KW
99static bool child_job_drained_poll(BdrvChild *c)
100{
101 BlockJob *bjob = c->opaque;
102 Job *job = &bjob->job;
103 const BlockJobDriver *drv = block_job_driver(bjob);
104
105 /* An inactive or completed job doesn't have any pending requests. Jobs
106 * with !job->busy are either already paused or have a pause point after
107 * being reentered, so no job driver code will run before they pause. */
880eeec6
EGE
108 WITH_JOB_LOCK_GUARD() {
109 if (!job->busy || job_is_completed_locked(job)) {
110 return false;
111 }
89bd0305
KW
112 }
113
114 /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
115 * override this assumption. */
116 if (drv->drained_poll) {
117 return drv->drained_poll(bjob);
118 } else {
119 return true;
120 }
121}
122
2f65df6e 123static void child_job_drained_end(BdrvChild *c)
f321dcb5 124{
ad90feba 125 BlockJob *job = c->opaque;
b15de828 126 job_resume(&job->job);
f321dcb5
PB
127}
128
3428b100
EGE
129typedef struct BdrvStateChildJobContext {
130 AioContext *new_ctx;
131 BlockJob *job;
132} BdrvStateChildJobContext;
133
134static void child_job_set_aio_ctx_commit(void *opaque)
135{
136 BdrvStateChildJobContext *s = opaque;
137 BlockJob *job = s->job;
138
139 job_set_aio_context(&job->job, s->new_ctx);
140}
141
142static TransactionActionDrv change_child_job_context = {
143 .commit = child_job_set_aio_ctx_commit,
144 .clean = g_free,
145};
146
147static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx,
148 GHashTable *visited, Transaction *tran,
149 Error **errp)
150{
151 BlockJob *job = c->opaque;
152 BdrvStateChildJobContext *s;
153 GSList *l;
154
155 for (l = job->nodes; l; l = l->next) {
156 BdrvChild *sibling = l->data;
157 if (!bdrv_child_change_aio_context(sibling, ctx, visited,
158 tran, errp)) {
159 return false;
160 }
161 }
162
163 s = g_new(BdrvStateChildJobContext, 1);
164 *s = (BdrvStateChildJobContext) {
165 .new_ctx = ctx,
166 .job = job,
167 };
168
169 tran_add(tran, &change_child_job_context, s);
170 return true;
171}
172
3ca1f322
VSO
173static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
174{
175 BlockJob *job = c->opaque;
d5f8d79c
HR
176 IO_CODE();
177 JOB_LOCK_GUARD();
3ca1f322
VSO
178
179 return job->job.aio_context;
180}
181
bd86fb99 182static const BdrvChildClass child_job = {
ad90feba
KW
183 .get_parent_desc = child_job_get_parent_desc,
184 .drained_begin = child_job_drained_begin,
89bd0305 185 .drained_poll = child_job_drained_poll,
ad90feba 186 .drained_end = child_job_drained_end,
3428b100 187 .change_aio_ctx = child_job_change_aio_ctx,
ad90feba 188 .stay_at_node = true,
3ca1f322 189 .get_parent_aio_context = child_job_get_parent_aio_context,
f321dcb5
PB
190};
191
bbc02b90
KW
192void block_job_remove_all_bdrv(BlockJob *job)
193{
cf81ae28 194 GLOBAL_STATE_CODE();
d876bf67
SL
195 /*
196 * bdrv_root_unref_child() may reach child_job_[can_]set_aio_ctx(),
197 * which will also traverse job->nodes, so consume the list one by
198 * one to make sure that such a concurrent access does not attempt
199 * to process an already freed BdrvChild.
200 */
67446e60 201 aio_context_release(job->job.aio_context);
ede01e46 202 bdrv_graph_wrlock(NULL);
67446e60 203 aio_context_acquire(job->job.aio_context);
d876bf67
SL
204 while (job->nodes) {
205 GSList *l = job->nodes;
bbc02b90 206 BdrvChild *c = l->data;
d876bf67
SL
207
208 job->nodes = l->next;
209
bbc02b90
KW
210 bdrv_op_unblock_all(c->bs, job->blocker);
211 bdrv_root_unref_child(c);
d876bf67
SL
212
213 g_slist_free_1(l);
bbc02b90 214 }
6bc0bcc8 215 bdrv_graph_wrunlock_ctx(job->job.aio_context);
bbc02b90
KW
216}
217
8164102f
VSO
218bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
219{
220 GSList *el;
cf81ae28 221 GLOBAL_STATE_CODE();
8164102f
VSO
222
223 for (el = job->nodes; el; el = el->next) {
224 BdrvChild *c = el->data;
225 if (c->bs == bs) {
226 return true;
227 }
228 }
229
230 return false;
231}
232
76d554e2
KW
233int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
234 uint64_t perm, uint64_t shared_perm, Error **errp)
23d402d4 235{
76d554e2 236 BdrvChild *c;
22dd9405 237 AioContext *ctx = bdrv_get_aio_context(bs);
076d467a 238 bool need_context_ops;
cf81ae28 239 GLOBAL_STATE_CODE();
76d554e2 240
b441dc71 241 bdrv_ref(bs);
076d467a 242
22dd9405 243 need_context_ops = ctx != job->job.aio_context;
076d467a 244
22dd9405
KW
245 if (need_context_ops) {
246 if (job->job.aio_context != qemu_get_aio_context()) {
247 aio_context_release(job->job.aio_context);
248 }
249 aio_context_acquire(ctx);
132ada80 250 }
228ca37e 251 c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
258b7765 252 errp);
22dd9405
KW
253 if (need_context_ops) {
254 aio_context_release(ctx);
255 if (job->job.aio_context != qemu_get_aio_context()) {
256 aio_context_acquire(job->job.aio_context);
257 }
132ada80 258 }
76d554e2
KW
259 if (c == NULL) {
260 return -EPERM;
261 }
262
263 job->nodes = g_slist_prepend(job->nodes, c);
23d402d4 264 bdrv_op_block_all(bs, job->blocker);
76d554e2
KW
265
266 return 0;
23d402d4
AG
267}
268
243c6ec7
EGE
269/* Called with job_mutex lock held. */
270static void block_job_on_idle_locked(Notifier *n, void *opaque)
34dc97b9 271{
cfe29d82 272 aio_wait_kick();
34dc97b9
KW
273}
274
559b935f
JS
275bool block_job_is_internal(BlockJob *job)
276{
33e9e9bd 277 return (job->job.id == NULL);
559b935f
JS
278}
279
bd21935b
KW
280const BlockJobDriver *block_job_driver(BlockJob *job)
281{
9f6bb4c0 282 return container_of(job->job.driver, BlockJobDriver, job_driver);
bd21935b
KW
283}
284
da01ff7f
KW
285/* Assumes the job_mutex is held */
286static bool job_timer_pending(Job *job)
287{
288 return timer_pending(&job->sleep_timer);
289}
290
f41ab73f 291bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
2f0c9fe6 292{
e0323a04 293 const BlockJobDriver *drv = block_job_driver(job);
aa9ef2e6 294 int64_t old_speed = job->speed;
2f0c9fe6 295
cf81ae28
EGE
296 GLOBAL_STATE_CODE();
297
f41ab73f 298 if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
775d0c05 299 return false;
0ec4dfb8 300 }
18bb6928 301 if (speed < 0) {
1ef7d9d3
KW
302 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "speed",
303 "a non-negative value");
775d0c05 304 return false;
2f0c9fe6
PB
305 }
306
18bb6928
KW
307 ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
308
2f0c9fe6 309 job->speed = speed;
e0323a04
VSO
310
311 if (drv->set_speed) {
f41ab73f 312 job_unlock();
e0323a04 313 drv->set_speed(job, speed);
f41ab73f 314 job_lock();
e0323a04
VSO
315 }
316
d4fce188 317 if (speed && speed <= old_speed) {
775d0c05 318 return true;
aa9ef2e6
JS
319 }
320
321 /* kick only if a timer is pending */
f41ab73f 322 job_enter_cond_locked(&job->job, job_timer_pending);
775d0c05
VSO
323
324 return true;
2f0c9fe6
PB
325}
326
ba6a9100 327static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
f41ab73f
EGE
328{
329 JOB_LOCK_GUARD();
330 return block_job_set_speed_locked(job, speed, errp);
331}
332
61a3a5a7
FE
333void block_job_change_locked(BlockJob *job, BlockJobChangeOptions *opts,
334 Error **errp)
335{
336 const BlockJobDriver *drv = block_job_driver(job);
337
338 GLOBAL_STATE_CODE();
339
340 if (job_apply_verb_locked(&job->job, JOB_VERB_CHANGE, errp)) {
341 return;
342 }
343
344 if (drv->change) {
345 job_unlock();
346 drv->change(job, opts, errp);
347 job_lock();
348 } else {
349 error_setg(errp, "Job type does not support change");
350 }
351}
352
018e5987 353void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n)
dee81d51 354{
e2d9faf5 355 IO_CODE();
018e5987
KW
356 ratelimit_calculate_delay(&job->limit, n);
357}
358
359void block_job_ratelimit_sleep(BlockJob *job)
360{
361 uint64_t delay_ns;
362
363 /*
364 * Sleep at least once. If the job is reentered early, keep waiting until
365 * we've waited for the full time that is necessary to keep the job at the
366 * right speed.
367 *
368 * Make sure to recalculate the delay after each (possibly interrupted)
369 * sleep because the speed can change while the job has yielded.
370 */
371 do {
372 delay_ns = ratelimit_calculate_delay(&job->limit, 0);
373 job_sleep_ns(&job->job, delay_ns);
374 } while (delay_ns && !job_is_cancelled(&job->job));
dee81d51
KW
375}
376
f41ab73f 377BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
30e628b7 378{
559b935f 379 BlockJobInfo *info;
a7b4f8fc 380 uint64_t progress_current, progress_total;
59fd8254 381 const BlockJobDriver *drv = block_job_driver(job);
559b935f 382
cf81ae28
EGE
383 GLOBAL_STATE_CODE();
384
559b935f
JS
385 if (block_job_is_internal(job)) {
386 error_setg(errp, "Cannot query QEMU internal jobs");
387 return NULL;
388 }
a7b4f8fc
EGE
389
390 progress_get_snapshot(&job->job.progress, &progress_current,
391 &progress_total);
392
559b935f 393 info = g_new0(BlockJobInfo, 1);
d67c54d0 394 info->type = job_type(&job->job);
33e9e9bd 395 info->device = g_strdup(job->job.id);
fca26318 396 info->busy = job->job.busy;
da01ff7f 397 info->paused = job->job.pause_count > 0;
a7b4f8fc
EGE
398 info->offset = progress_current;
399 info->len = progress_total;
32c81a4a
PB
400 info->speed = job->speed;
401 info->io_status = job->iostatus;
f41ab73f 402 info->ready = job_is_ready_locked(&job->job),
a50c2ab8 403 info->status = job->job.status;
bb02b65c
KW
404 info->auto_finalize = job->job.auto_finalize;
405 info->auto_dismiss = job->job.auto_dismiss;
3b6ad623 406 if (job->job.ret) {
3b6ad623
SG
407 info->error = job->job.err ?
408 g_strdup(error_get_pretty(job->job.err)) :
409 g_strdup(strerror(-job->job.ret));
410 }
59fd8254
FE
411 if (drv->query) {
412 job_unlock();
413 drv->query(job, info);
414 job_lock();
415 }
30e628b7
PB
416 return info;
417}
32c81a4a 418
d59cb66d
EGE
419/* Called with job lock held */
420static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
32c81a4a
PB
421{
422 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
423 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
424 BLOCK_DEVICE_IO_STATUS_FAILED;
425 }
426}
427
243c6ec7
EGE
428/* Called with job_mutex lock held. */
429static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
bcada37b 430{
139a9f02 431 BlockJob *job = opaque;
a7b4f8fc 432 uint64_t progress_current, progress_total;
139a9f02 433
559b935f
JS
434 if (block_job_is_internal(job)) {
435 return;
436 }
437
a7b4f8fc
EGE
438 progress_get_snapshot(&job->job.progress, &progress_current,
439 &progress_total);
440
252291ea 441 qapi_event_send_block_job_cancelled(job_type(&job->job),
33e9e9bd 442 job->job.id,
a7b4f8fc
EGE
443 progress_total,
444 progress_current,
3ab72385 445 job->speed);
bcada37b 446}
32c81a4a 447
243c6ec7
EGE
448/* Called with job_mutex lock held. */
449static void block_job_event_completed_locked(Notifier *n, void *opaque)
a66a2a36 450{
139a9f02
KW
451 BlockJob *job = opaque;
452 const char *msg = NULL;
a7b4f8fc 453 uint64_t progress_current, progress_total;
139a9f02 454
559b935f
JS
455 if (block_job_is_internal(job)) {
456 return;
457 }
458
4ad35181 459 if (job->job.ret < 0) {
3b6ad623 460 msg = error_get_pretty(job->job.err);
139a9f02
KW
461 }
462
a7b4f8fc
EGE
463 progress_get_snapshot(&job->job.progress, &progress_current,
464 &progress_total);
465
252291ea 466 qapi_event_send_block_job_completed(job_type(&job->job),
33e9e9bd 467 job->job.id,
a7b4f8fc
EGE
468 progress_total,
469 progress_current,
bcada37b 470 job->speed,
3ab72385 471 msg);
a66a2a36
PB
472}
473
243c6ec7
EGE
474/* Called with job_mutex lock held. */
475static void block_job_event_pending_locked(Notifier *n, void *opaque)
5f241594 476{
139a9f02
KW
477 BlockJob *job = opaque;
478
5d4f3769
KW
479 if (block_job_is_internal(job)) {
480 return;
5f241594 481 }
5d4f3769
KW
482
483 qapi_event_send_block_job_pending(job_type(&job->job),
3ab72385 484 job->job.id);
5f241594
JS
485}
486
243c6ec7
EGE
487/* Called with job_mutex lock held. */
488static void block_job_event_ready_locked(Notifier *n, void *opaque)
2e1795b5
KW
489{
490 BlockJob *job = opaque;
a7b4f8fc 491 uint64_t progress_current, progress_total;
2e1795b5
KW
492
493 if (block_job_is_internal(job)) {
494 return;
495 }
496
a7b4f8fc
EGE
497 progress_get_snapshot(&job->job.progress, &progress_current,
498 &progress_total);
499
2e1795b5
KW
500 qapi_event_send_block_job_ready(job_type(&job->job),
501 job->job.id,
a7b4f8fc
EGE
502 progress_total,
503 progress_current,
3ab72385 504 job->speed);
2e1795b5
KW
505}
506
507
88691b37 508void *block_job_create(const char *job_id, const BlockJobDriver *driver,
62c9e416 509 JobTxn *txn, BlockDriverState *bs, uint64_t perm,
88691b37
PB
510 uint64_t shared_perm, int64_t speed, int flags,
511 BlockCompletionFunc *cb, void *opaque, Error **errp)
512{
88691b37 513 BlockJob *job;
985cac8f 514 int ret;
e2d9faf5 515 GLOBAL_STATE_CODE();
f3bbc53d
KW
516
517 bdrv_graph_wrlock(bs);
88691b37 518
bb02b65c 519 if (job_id == NULL && !(flags & JOB_INTERNAL)) {
88691b37 520 job_id = bdrv_get_device_name(bs);
88691b37
PB
521 }
522
985cac8f 523 job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
4ad35181 524 flags, cb, opaque, errp);
33e9e9bd 525 if (job == NULL) {
6bc0bcc8 526 bdrv_graph_wrunlock(bs);
33e9e9bd
KW
527 return NULL;
528 }
529
e7c1d78b 530 assert(is_block_job(&job->job));
80fa2c75 531 assert(job->job.driver->free == &block_job_free);
b15de828 532 assert(job->job.driver->user_resume == &block_job_user_resume);
e7c1d78b 533
4951967d
PB
534 ratelimit_init(&job->limit);
535
243c6ec7
EGE
536 job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
537 job->finalize_completed_notifier.notify = block_job_event_completed_locked;
538 job->pending_notifier.notify = block_job_event_pending_locked;
539 job->ready_notifier.notify = block_job_event_ready_locked;
540 job->idle_notifier.notify = block_job_on_idle_locked;
139a9f02 541
880eeec6
EGE
542 WITH_JOB_LOCK_GUARD() {
543 notifier_list_add(&job->job.on_finalize_cancelled,
544 &job->finalize_cancelled_notifier);
545 notifier_list_add(&job->job.on_finalize_completed,
546 &job->finalize_completed_notifier);
547 notifier_list_add(&job->job.on_pending, &job->pending_notifier);
548 notifier_list_add(&job->job.on_ready, &job->ready_notifier);
549 notifier_list_add(&job->job.on_idle, &job->idle_notifier);
550 }
139a9f02 551
88691b37 552 error_setg(&job->blocker, "block device is in use by block job: %s",
252291ea 553 job_type_str(&job->job));
88691b37 554
985cac8f
VSO
555 ret = block_job_add_bdrv(job, "main node", bs, perm, shared_perm, errp);
556 if (ret < 0) {
557 goto fail;
558 }
88691b37 559
985cac8f 560 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
88691b37 561
c02b83ed 562 if (!block_job_set_speed(job, speed, errp)) {
985cac8f 563 goto fail;
88691b37 564 }
75859b94 565
6bc0bcc8 566 bdrv_graph_wrunlock(bs);
88691b37 567 return job;
985cac8f
VSO
568
569fail:
6bc0bcc8 570 bdrv_graph_wrunlock(bs);
985cac8f
VSO
571 job_early_fail(&job->job);
572 return NULL;
88691b37
PB
573}
574
f41ab73f 575void block_job_iostatus_reset_locked(BlockJob *job)
2caf63a9 576{
cf81ae28 577 GLOBAL_STATE_CODE();
4c241cf5
PB
578 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
579 return;
580 }
b15de828 581 assert(job->job.user_paused && job->job.pause_count > 0);
2caf63a9
PB
582 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
583}
584
ba6a9100 585static void block_job_iostatus_reset(BlockJob *job)
f41ab73f
EGE
586{
587 JOB_LOCK_GUARD();
588 block_job_iostatus_reset_locked(job);
589}
590
b15de828
KW
591void block_job_user_resume(Job *job)
592{
593 BlockJob *bjob = container_of(job, BlockJob, job);
e2d9faf5 594 GLOBAL_STATE_CODE();
b15de828
KW
595 block_job_iostatus_reset(bjob);
596}
597
81e254dc 598BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
32c81a4a
PB
599 int is_read, int error)
600{
601 BlockErrorAction action;
e2d9faf5 602 IO_CODE();
32c81a4a
PB
603
604 switch (on_err) {
605 case BLOCKDEV_ON_ERROR_ENOSPC:
8c398252 606 case BLOCKDEV_ON_ERROR_AUTO:
a589569f
WX
607 action = (error == ENOSPC) ?
608 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
32c81a4a
PB
609 break;
610 case BLOCKDEV_ON_ERROR_STOP:
a589569f 611 action = BLOCK_ERROR_ACTION_STOP;
32c81a4a
PB
612 break;
613 case BLOCKDEV_ON_ERROR_REPORT:
a589569f 614 action = BLOCK_ERROR_ACTION_REPORT;
32c81a4a
PB
615 break;
616 case BLOCKDEV_ON_ERROR_IGNORE:
a589569f 617 action = BLOCK_ERROR_ACTION_IGNORE;
32c81a4a
PB
618 break;
619 default:
620 abort();
621 }
559b935f 622 if (!block_job_is_internal(job)) {
33e9e9bd 623 qapi_event_send_block_job_error(job->job.id,
559b935f
JS
624 is_read ? IO_OPERATION_TYPE_READ :
625 IO_OPERATION_TYPE_WRITE,
3ab72385 626 action);
559b935f 627 }
a589569f 628 if (action == BLOCK_ERROR_ACTION_STOP) {
880eeec6
EGE
629 WITH_JOB_LOCK_GUARD() {
630 if (!job->job.user_paused) {
631 job_pause_locked(&job->job);
632 /*
633 * make the pause user visible, which will be
634 * resumed from QMP.
635 */
636 job->job.user_paused = true;
637 }
d59cb66d 638 block_job_iostatus_set_err_locked(job, error);
8d9648cb 639 }
32c81a4a
PB
640 }
641 return action;
642}
df9a3165
VSO
643
644AioContext *block_job_get_aio_context(BlockJob *job)
645{
cf81ae28 646 GLOBAL_STATE_CODE();
df9a3165
VSO
647 return job->job.aio_context;
648}