]> git.proxmox.com Git - mirror_qemu.git/blame - job.c
job: Add job_dismiss()
[mirror_qemu.git] / job.c
CommitLineData
33e9e9bd
KW
1/*
2 * Background jobs (long-running operations)
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012, 2018 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26#include "qemu/osdep.h"
27#include "qemu-common.h"
28#include "qapi/error.h"
29#include "qemu/job.h"
30#include "qemu/id.h"
1908a559 31#include "qemu/main-loop.h"
a50c2ab8 32#include "trace-root.h"
33e9e9bd 33
e7c1d78b
KW
34static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
35
a50c2ab8
KW
36/* Job State Transition Table */
37bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = {
38 /* U, C, R, P, Y, S, W, D, X, E, N */
39 /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
40 /* C: */ [JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1},
41 /* R: */ [JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0},
42 /* P: */ [JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0},
43 /* Y: */ [JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0},
44 /* S: */ [JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
45 /* W: */ [JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0},
46 /* D: */ [JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
47 /* X: */ [JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0},
48 /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
49 /* N: */ [JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
50};
51
52bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = {
53 /* U, C, R, P, Y, S, W, D, X, E, N */
54 [JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0},
55 [JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
56 [JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
57 [JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0},
58 [JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
59 [JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
60 [JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0},
61};
62
7eaa8fb5
KW
63/* Transactional group of jobs */
64struct JobTxn {
65
66 /* Is this txn being cancelled? */
67 bool aborting;
68
69 /* List of jobs */
70 QLIST_HEAD(, Job) jobs;
71
72 /* Reference count */
73 int refcnt;
74};
75
da01ff7f
KW
76/* Right now, this mutex is only needed to synchronize accesses to job->busy
77 * and job->sleep_timer, such as concurrent calls to job_do_yield and
78 * job_enter. */
79static QemuMutex job_mutex;
80
81static void job_lock(void)
82{
83 qemu_mutex_lock(&job_mutex);
84}
85
86static void job_unlock(void)
87{
88 qemu_mutex_unlock(&job_mutex);
89}
90
91static void __attribute__((__constructor__)) job_init(void)
92{
93 qemu_mutex_init(&job_mutex);
94}
95
7eaa8fb5
KW
96JobTxn *job_txn_new(void)
97{
98 JobTxn *txn = g_new0(JobTxn, 1);
99 QLIST_INIT(&txn->jobs);
100 txn->refcnt = 1;
101 return txn;
102}
103
104static void job_txn_ref(JobTxn *txn)
105{
106 txn->refcnt++;
107}
108
109void job_txn_unref(JobTxn *txn)
110{
111 if (txn && --txn->refcnt == 0) {
112 g_free(txn);
113 }
114}
115
116void job_txn_add_job(JobTxn *txn, Job *job)
117{
118 if (!txn) {
119 return;
120 }
121
122 assert(!job->txn);
123 job->txn = txn;
124
125 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
126 job_txn_ref(txn);
127}
128
129static void job_txn_del_job(Job *job)
130{
131 if (job->txn) {
132 QLIST_REMOVE(job, txn_list);
133 job_txn_unref(job->txn);
134 job->txn = NULL;
135 }
136}
137
138static int job_txn_apply(JobTxn *txn, int fn(Job *), bool lock)
139{
140 AioContext *ctx;
141 Job *job, *next;
142 int rc = 0;
143
144 QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
145 if (lock) {
146 ctx = job->aio_context;
147 aio_context_acquire(ctx);
148 }
149 rc = fn(job);
150 if (lock) {
151 aio_context_release(ctx);
152 }
153 if (rc) {
154 break;
155 }
156 }
157 return rc;
158}
159
160
a50c2ab8
KW
161/* TODO Make static once the whole state machine is in job.c */
162void job_state_transition(Job *job, JobStatus s1)
163{
164 JobStatus s0 = job->status;
165 assert(s1 >= 0 && s1 <= JOB_STATUS__MAX);
4ad35181 166 trace_job_state_transition(job, job->ret,
a50c2ab8
KW
167 JobSTT[s0][s1] ? "allowed" : "disallowed",
168 JobStatus_str(s0), JobStatus_str(s1));
169 assert(JobSTT[s0][s1]);
170 job->status = s1;
171}
172
173int job_apply_verb(Job *job, JobVerb verb, Error **errp)
174{
175 JobStatus s0 = job->status;
176 assert(verb >= 0 && verb <= JOB_VERB__MAX);
177 trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb),
178 JobVerbTable[verb][s0] ? "allowed" : "prohibited");
179 if (JobVerbTable[verb][s0]) {
180 return 0;
181 }
182 error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'",
183 job->id, JobStatus_str(s0), JobVerb_str(verb));
184 return -EPERM;
185}
186
252291ea
KW
187JobType job_type(const Job *job)
188{
189 return job->driver->job_type;
190}
191
192const char *job_type_str(const Job *job)
193{
194 return JobType_str(job_type(job));
195}
196
daa7f2f9
KW
197bool job_is_cancelled(Job *job)
198{
199 return job->cancelled;
200}
201
dbe5e6c1
KW
202bool job_is_completed(Job *job)
203{
204 switch (job->status) {
205 case JOB_STATUS_UNDEFINED:
206 case JOB_STATUS_CREATED:
207 case JOB_STATUS_RUNNING:
208 case JOB_STATUS_PAUSED:
209 case JOB_STATUS_READY:
210 case JOB_STATUS_STANDBY:
211 return false;
212 case JOB_STATUS_WAITING:
213 case JOB_STATUS_PENDING:
214 case JOB_STATUS_ABORTING:
215 case JOB_STATUS_CONCLUDED:
216 case JOB_STATUS_NULL:
217 return true;
218 default:
219 g_assert_not_reached();
220 }
221 return false;
222}
223
3d70ff53 224static bool job_started(Job *job)
da01ff7f
KW
225{
226 return job->co;
227}
228
198c49cc 229static bool job_should_pause(Job *job)
da01ff7f
KW
230{
231 return job->pause_count > 0;
232}
233
e7c1d78b
KW
234Job *job_next(Job *job)
235{
236 if (!job) {
237 return QLIST_FIRST(&jobs);
238 }
239 return QLIST_NEXT(job, job_list);
240}
241
242Job *job_get(const char *id)
243{
244 Job *job;
245
246 QLIST_FOREACH(job, &jobs, job_list) {
247 if (job->id && !strcmp(id, job->id)) {
248 return job;
249 }
250 }
251
252 return NULL;
253}
254
5d43e86e
KW
255static void job_sleep_timer_cb(void *opaque)
256{
257 Job *job = opaque;
258
259 job_enter(job);
260}
261
7eaa8fb5
KW
262void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
263 AioContext *ctx, int flags, BlockCompletionFunc *cb,
264 void *opaque, Error **errp)
33e9e9bd
KW
265{
266 Job *job;
267
268 if (job_id) {
bb02b65c
KW
269 if (flags & JOB_INTERNAL) {
270 error_setg(errp, "Cannot specify job ID for internal job");
271 return NULL;
272 }
33e9e9bd
KW
273 if (!id_wellformed(job_id)) {
274 error_setg(errp, "Invalid job ID '%s'", job_id);
275 return NULL;
276 }
e7c1d78b
KW
277 if (job_get(job_id)) {
278 error_setg(errp, "Job ID '%s' already in use", job_id);
279 return NULL;
280 }
bb02b65c
KW
281 } else if (!(flags & JOB_INTERNAL)) {
282 error_setg(errp, "An explicit job ID is required");
283 return NULL;
33e9e9bd
KW
284 }
285
286 job = g_malloc0(driver->instance_size);
287 job->driver = driver;
288 job->id = g_strdup(job_id);
80fa2c75 289 job->refcnt = 1;
08be6fe2 290 job->aio_context = ctx;
da01ff7f
KW
291 job->busy = false;
292 job->paused = true;
293 job->pause_count = 1;
bb02b65c
KW
294 job->auto_finalize = !(flags & JOB_MANUAL_FINALIZE);
295 job->auto_dismiss = !(flags & JOB_MANUAL_DISMISS);
4ad35181
KW
296 job->cb = cb;
297 job->opaque = opaque;
33e9e9bd 298
139a9f02
KW
299 notifier_list_init(&job->on_finalize_cancelled);
300 notifier_list_init(&job->on_finalize_completed);
301 notifier_list_init(&job->on_pending);
302
a50c2ab8 303 job_state_transition(job, JOB_STATUS_CREATED);
5d43e86e
KW
304 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
305 QEMU_CLOCK_REALTIME, SCALE_NS,
306 job_sleep_timer_cb, job);
a50c2ab8 307
e7c1d78b
KW
308 QLIST_INSERT_HEAD(&jobs, job, job_list);
309
7eaa8fb5
KW
310 /* Single jobs are modeled as single-job transactions for sake of
311 * consolidating the job management logic */
312 if (!txn) {
313 txn = job_txn_new();
314 job_txn_add_job(txn, job);
315 job_txn_unref(txn);
316 } else {
317 job_txn_add_job(txn, job);
318 }
319
33e9e9bd
KW
320 return job;
321}
fd61a701 322
80fa2c75 323void job_ref(Job *job)
fd61a701 324{
80fa2c75
KW
325 ++job->refcnt;
326}
327
328void job_unref(Job *job)
329{
330 if (--job->refcnt == 0) {
331 assert(job->status == JOB_STATUS_NULL);
5d43e86e 332 assert(!timer_pending(&job->sleep_timer));
7eaa8fb5 333 assert(!job->txn);
e7c1d78b 334
80fa2c75
KW
335 if (job->driver->free) {
336 job->driver->free(job);
337 }
338
339 QLIST_REMOVE(job, job_list);
340
341 g_free(job->id);
342 g_free(job);
343 }
fd61a701 344}
1908a559 345
139a9f02
KW
346void job_event_cancelled(Job *job)
347{
348 notifier_list_notify(&job->on_finalize_cancelled, job);
349}
350
351void job_event_completed(Job *job)
352{
353 notifier_list_notify(&job->on_finalize_completed, job);
354}
355
7eaa8fb5 356static void job_event_pending(Job *job)
139a9f02
KW
357{
358 notifier_list_notify(&job->on_pending, job);
359}
360
da01ff7f
KW
361void job_enter_cond(Job *job, bool(*fn)(Job *job))
362{
363 if (!job_started(job)) {
364 return;
365 }
366 if (job->deferred_to_main_loop) {
367 return;
368 }
369
370 job_lock();
371 if (job->busy) {
372 job_unlock();
373 return;
374 }
375
376 if (fn && !fn(job)) {
377 job_unlock();
378 return;
379 }
380
381 assert(!job->deferred_to_main_loop);
382 timer_del(&job->sleep_timer);
383 job->busy = true;
384 job_unlock();
385 aio_co_wake(job->co);
386}
387
5d43e86e
KW
388void job_enter(Job *job)
389{
390 job_enter_cond(job, NULL);
391}
392
da01ff7f 393/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
3d70ff53
KW
394 * Reentering the job coroutine with job_enter() before the timer has expired
395 * is allowed and cancels the timer.
da01ff7f 396 *
3d70ff53 397 * If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be
da01ff7f 398 * called explicitly. */
198c49cc 399static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
da01ff7f
KW
400{
401 job_lock();
402 if (ns != -1) {
403 timer_mod(&job->sleep_timer, ns);
404 }
405 job->busy = false;
406 job_unlock();
407 qemu_coroutine_yield();
408
409 /* Set by job_enter_cond() before re-entering the coroutine. */
410 assert(job->busy);
411}
412
413void coroutine_fn job_pause_point(Job *job)
414{
415 assert(job && job_started(job));
416
417 if (!job_should_pause(job)) {
418 return;
419 }
420 if (job_is_cancelled(job)) {
421 return;
422 }
423
424 if (job->driver->pause) {
425 job->driver->pause(job);
426 }
427
428 if (job_should_pause(job) && !job_is_cancelled(job)) {
429 JobStatus status = job->status;
430 job_state_transition(job, status == JOB_STATUS_READY
431 ? JOB_STATUS_STANDBY
432 : JOB_STATUS_PAUSED);
433 job->paused = true;
434 job_do_yield(job, -1);
435 job->paused = false;
436 job_state_transition(job, status);
437 }
438
439 if (job->driver->resume) {
440 job->driver->resume(job);
441 }
442}
443
198c49cc
KW
444void job_yield(Job *job)
445{
446 assert(job->busy);
447
448 /* Check cancellation *before* setting busy = false, too! */
449 if (job_is_cancelled(job)) {
450 return;
451 }
452
453 if (!job_should_pause(job)) {
454 job_do_yield(job, -1);
455 }
456
457 job_pause_point(job);
458}
459
5d43e86e
KW
460void coroutine_fn job_sleep_ns(Job *job, int64_t ns)
461{
462 assert(job->busy);
463
464 /* Check cancellation *before* setting busy = false, too! */
465 if (job_is_cancelled(job)) {
466 return;
467 }
468
469 if (!job_should_pause(job)) {
470 job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
471 }
472
473 job_pause_point(job);
474}
475
b69f777d
KW
476void job_drain(Job *job)
477{
478 /* If job is !busy this kicks it into the next pause point. */
479 job_enter(job);
480
481 if (job->driver->drain) {
482 job->driver->drain(job);
483 }
484}
485
486
da01ff7f
KW
487/**
488 * All jobs must allow a pause point before entering their job proper. This
489 * ensures that jobs can be paused prior to being started, then resumed later.
490 */
491static void coroutine_fn job_co_entry(void *opaque)
492{
493 Job *job = opaque;
494
495 assert(job && job->driver && job->driver->start);
496 job_pause_point(job);
497 job->driver->start(job);
498}
499
500
501void job_start(Job *job)
502{
503 assert(job && !job_started(job) && job->paused &&
504 job->driver && job->driver->start);
505 job->co = qemu_coroutine_create(job_co_entry, job);
506 job->pause_count--;
507 job->busy = true;
508 job->paused = false;
509 job_state_transition(job, JOB_STATUS_RUNNING);
510 aio_co_enter(job->aio_context, job->co);
511}
512
b15de828
KW
513/* Assumes the block_job_mutex is held */
514static bool job_timer_not_pending(Job *job)
515{
516 return !timer_pending(&job->sleep_timer);
517}
518
519void job_pause(Job *job)
520{
521 job->pause_count++;
522}
523
524void job_resume(Job *job)
525{
526 assert(job->pause_count > 0);
527 job->pause_count--;
528 if (job->pause_count) {
529 return;
530 }
531
532 /* kick only if no timer is pending */
533 job_enter_cond(job, job_timer_not_pending);
534}
535
536void job_user_pause(Job *job, Error **errp)
537{
538 if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) {
539 return;
540 }
541 if (job->user_paused) {
542 error_setg(errp, "Job is already paused");
543 return;
544 }
545 job->user_paused = true;
546 job_pause(job);
547}
548
549bool job_user_paused(Job *job)
550{
551 return job->user_paused;
552}
553
554void job_user_resume(Job *job, Error **errp)
555{
556 assert(job);
557 if (!job->user_paused || job->pause_count <= 0) {
558 error_setg(errp, "Can't resume a job that was not paused");
559 return;
560 }
561 if (job_apply_verb(job, JOB_VERB_RESUME, errp)) {
562 return;
563 }
564 if (job->driver->user_resume) {
565 job->driver->user_resume(job);
566 }
567 job->user_paused = false;
568 job_resume(job);
569}
570
5f9a6a08 571static void job_do_dismiss(Job *job)
4ad35181
KW
572{
573 assert(job);
574 job->busy = false;
575 job->paused = false;
576 job->deferred_to_main_loop = true;
577
7eaa8fb5 578 job_txn_del_job(job);
4ad35181
KW
579
580 job_state_transition(job, JOB_STATUS_NULL);
581 job_unref(job);
582}
583
5f9a6a08
KW
584void job_dismiss(Job **jobptr, Error **errp)
585{
586 Job *job = *jobptr;
587 /* similarly to _complete, this is QMP-interface only. */
588 assert(job->id);
589 if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) {
590 return;
591 }
592
593 job_do_dismiss(job);
594 *jobptr = NULL;
595}
596
4ad35181
KW
597void job_early_fail(Job *job)
598{
599 assert(job->status == JOB_STATUS_CREATED);
600 job_do_dismiss(job);
601}
602
603static void job_conclude(Job *job)
604{
605 job_state_transition(job, JOB_STATUS_CONCLUDED);
606 if (job->auto_dismiss || !job_started(job)) {
607 job_do_dismiss(job);
608 }
609}
610
3d70ff53 611static void job_update_rc(Job *job)
4ad35181
KW
612{
613 if (!job->ret && job_is_cancelled(job)) {
614 job->ret = -ECANCELED;
615 }
616 if (job->ret) {
617 job_state_transition(job, JOB_STATUS_ABORTING);
618 }
619}
620
621static void job_commit(Job *job)
622{
623 assert(!job->ret);
624 if (job->driver->commit) {
625 job->driver->commit(job);
626 }
627}
628
629static void job_abort(Job *job)
630{
631 assert(job->ret);
632 if (job->driver->abort) {
633 job->driver->abort(job);
634 }
635}
636
637static void job_clean(Job *job)
638{
639 if (job->driver->clean) {
640 job->driver->clean(job);
641 }
642}
643
7eaa8fb5 644static int job_finalize_single(Job *job)
4ad35181
KW
645{
646 assert(job_is_completed(job));
647
648 /* Ensure abort is called for late-transactional failures */
649 job_update_rc(job);
650
651 if (!job->ret) {
652 job_commit(job);
653 } else {
654 job_abort(job);
655 }
656 job_clean(job);
657
658 if (job->cb) {
659 job->cb(job->opaque, job->ret);
660 }
661
662 /* Emit events only if we actually started */
663 if (job_started(job)) {
664 if (job_is_cancelled(job)) {
665 job_event_cancelled(job);
666 } else {
667 job_event_completed(job);
668 }
669 }
670
7eaa8fb5 671 job_txn_del_job(job);
4ad35181
KW
672 job_conclude(job);
673 return 0;
674}
675
3d70ff53 676static void job_cancel_async(Job *job, bool force)
7eaa8fb5
KW
677{
678 if (job->user_paused) {
679 /* Do not call job_enter here, the caller will handle it. */
680 job->user_paused = false;
681 if (job->driver->user_resume) {
682 job->driver->user_resume(job);
683 }
684 assert(job->pause_count > 0);
685 job->pause_count--;
686 }
687 job->cancelled = true;
688 /* To prevent 'force == false' overriding a previous 'force == true' */
689 job->force_cancel |= force;
690}
691
3d70ff53 692static void job_completed_txn_abort(Job *job)
7eaa8fb5
KW
693{
694 AioContext *ctx;
695 JobTxn *txn = job->txn;
696 Job *other_job;
697
698 if (txn->aborting) {
699 /*
700 * We are cancelled by another job, which will handle everything.
701 */
702 return;
703 }
704 txn->aborting = true;
705 job_txn_ref(txn);
706
707 /* We are the first failed job. Cancel other jobs. */
708 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
709 ctx = other_job->aio_context;
710 aio_context_acquire(ctx);
711 }
712
713 /* Other jobs are effectively cancelled by us, set the status for
714 * them; this job, however, may or may not be cancelled, depending
715 * on the caller, so leave it. */
716 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
717 if (other_job != job) {
718 job_cancel_async(other_job, false);
719 }
720 }
721 while (!QLIST_EMPTY(&txn->jobs)) {
722 other_job = QLIST_FIRST(&txn->jobs);
723 ctx = other_job->aio_context;
724 if (!job_is_completed(other_job)) {
725 assert(job_is_cancelled(other_job));
726 job_finish_sync(other_job, NULL, NULL);
727 }
728 job_finalize_single(other_job);
729 aio_context_release(ctx);
730 }
731
732 job_txn_unref(txn);
733}
734
735static int job_prepare(Job *job)
736{
737 if (job->ret == 0 && job->driver->prepare) {
738 job->ret = job->driver->prepare(job);
739 }
740 return job->ret;
741}
742
743static int job_needs_finalize(Job *job)
744{
745 return !job->auto_finalize;
746}
747
748static void job_do_finalize(Job *job)
749{
750 int rc;
751 assert(job && job->txn);
752
753 /* prepare the transaction to complete */
754 rc = job_txn_apply(job->txn, job_prepare, true);
755 if (rc) {
756 job_completed_txn_abort(job);
757 } else {
758 job_txn_apply(job->txn, job_finalize_single, true);
759 }
760}
761
762void job_finalize(Job *job, Error **errp)
763{
764 assert(job && job->id);
765 if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) {
766 return;
767 }
768 job_do_finalize(job);
769}
770
771static int job_transition_to_pending(Job *job)
772{
773 job_state_transition(job, JOB_STATUS_PENDING);
774 if (!job->auto_finalize) {
775 job_event_pending(job);
776 }
777 return 0;
778}
779
3d70ff53 780static void job_completed_txn_success(Job *job)
7eaa8fb5
KW
781{
782 JobTxn *txn = job->txn;
783 Job *other_job;
784
785 job_state_transition(job, JOB_STATUS_WAITING);
786
787 /*
788 * Successful completion, see if there are other running jobs in this
789 * txn.
790 */
791 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
792 if (!job_is_completed(other_job)) {
793 return;
794 }
795 assert(other_job->ret == 0);
796 }
797
798 job_txn_apply(txn, job_transition_to_pending, false);
799
800 /* If no jobs need manual finalization, automatically do so */
801 if (job_txn_apply(txn, job_needs_finalize, false) == 0) {
802 job_do_finalize(job);
803 }
804}
805
3d70ff53
KW
806void job_completed(Job *job, int ret)
807{
808 assert(job && job->txn && !job_is_completed(job));
809 job->ret = ret;
810 job_update_rc(job);
811 trace_job_completed(job, ret, job->ret);
812 if (job->ret) {
813 job_completed_txn_abort(job);
814 } else {
815 job_completed_txn_success(job);
816 }
817}
818
819void job_cancel(Job *job, bool force)
820{
821 if (job->status == JOB_STATUS_CONCLUDED) {
822 job_do_dismiss(job);
823 return;
824 }
825 job_cancel_async(job, force);
826 if (!job_started(job)) {
827 job_completed(job, -ECANCELED);
828 } else if (job->deferred_to_main_loop) {
829 job_completed_txn_abort(job);
830 } else {
831 job_enter(job);
832 }
833}
834
835void job_user_cancel(Job *job, bool force, Error **errp)
836{
837 if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) {
838 return;
839 }
840 job_cancel(job, force);
841}
842
843/* A wrapper around job_cancel() taking an Error ** parameter so it may be
844 * used with job_finish_sync() without the need for (rather nasty) function
845 * pointer casts there. */
846static void job_cancel_err(Job *job, Error **errp)
847{
848 job_cancel(job, false);
849}
850
851int job_cancel_sync(Job *job)
852{
853 return job_finish_sync(job, &job_cancel_err, NULL);
854}
855
856void job_cancel_sync_all(void)
857{
858 Job *job;
859 AioContext *aio_context;
860
861 while ((job = job_next(NULL))) {
862 aio_context = job->aio_context;
863 aio_context_acquire(aio_context);
864 job_cancel_sync(job);
865 aio_context_release(aio_context);
866 }
867}
868
869int job_complete_sync(Job *job, Error **errp)
870{
871 return job_finish_sync(job, job_complete, errp);
872}
873
3453d972
KW
874void job_complete(Job *job, Error **errp)
875{
876 /* Should not be reachable via external interface for internal jobs */
877 assert(job->id);
878 if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) {
879 return;
880 }
881 if (job->pause_count || job_is_cancelled(job) || !job->driver->complete) {
882 error_setg(errp, "The active block job '%s' cannot be completed",
883 job->id);
884 return;
885 }
886
887 job->driver->complete(job, errp);
888}
889
b15de828 890
1908a559
KW
891typedef struct {
892 Job *job;
893 JobDeferToMainLoopFn *fn;
894 void *opaque;
895} JobDeferToMainLoopData;
896
897static void job_defer_to_main_loop_bh(void *opaque)
898{
899 JobDeferToMainLoopData *data = opaque;
900 Job *job = data->job;
901 AioContext *aio_context = job->aio_context;
902
903 aio_context_acquire(aio_context);
904 data->fn(data->job, data->opaque);
905 aio_context_release(aio_context);
906
907 g_free(data);
908}
909
910void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque)
911{
912 JobDeferToMainLoopData *data = g_malloc(sizeof(*data));
913 data->job = job;
914 data->fn = fn;
915 data->opaque = opaque;
916 job->deferred_to_main_loop = true;
917
918 aio_bh_schedule_oneshot(qemu_get_aio_context(),
919 job_defer_to_main_loop_bh, data);
920}
6a74c075
KW
921
922int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
923{
924 Error *local_err = NULL;
925 int ret;
926
927 job_ref(job);
928
929 if (finish) {
930 finish(job, &local_err);
931 }
932 if (local_err) {
933 error_propagate(errp, local_err);
934 job_unref(job);
935 return -EBUSY;
936 }
937 /* job_drain calls job_enter, and it should be enough to induce progress
938 * until the job completes or moves to the main thread. */
939 while (!job->deferred_to_main_loop && !job_is_completed(job)) {
940 job_drain(job);
941 }
942 while (!job_is_completed(job)) {
943 aio_poll(qemu_get_aio_context(), true);
944 }
945 ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
946 job_unref(job);
947 return ret;
948}