]>
Commit | Line | Data |
---|---|---|
33e9e9bd KW |
1 | /* |
2 | * Background jobs (long-running operations) | |
3 | * | |
4 | * Copyright (c) 2011 IBM Corp. | |
5 | * Copyright (c) 2012, 2018 Red Hat, Inc. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
26 | #include "qemu/osdep.h" | |
27 | #include "qemu-common.h" | |
28 | #include "qapi/error.h" | |
29 | #include "qemu/job.h" | |
30 | #include "qemu/id.h" | |
1908a559 | 31 | #include "qemu/main-loop.h" |
a50c2ab8 | 32 | #include "trace-root.h" |
33e9e9bd | 33 | |
e7c1d78b KW |
34 | static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs); |
35 | ||
a50c2ab8 KW |
36 | /* Job State Transition Table */ |
37 | bool JobSTT[JOB_STATUS__MAX][JOB_STATUS__MAX] = { | |
38 | /* U, C, R, P, Y, S, W, D, X, E, N */ | |
39 | /* U: */ [JOB_STATUS_UNDEFINED] = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | |
40 | /* C: */ [JOB_STATUS_CREATED] = {0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1}, | |
41 | /* R: */ [JOB_STATUS_RUNNING] = {0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0}, | |
42 | /* P: */ [JOB_STATUS_PAUSED] = {0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, | |
43 | /* Y: */ [JOB_STATUS_READY] = {0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0}, | |
44 | /* S: */ [JOB_STATUS_STANDBY] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, | |
45 | /* W: */ [JOB_STATUS_WAITING] = {0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0}, | |
46 | /* D: */ [JOB_STATUS_PENDING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | |
47 | /* X: */ [JOB_STATUS_ABORTING] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0}, | |
48 | /* E: */ [JOB_STATUS_CONCLUDED] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, | |
49 | /* N: */ [JOB_STATUS_NULL] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | |
50 | }; | |
51 | ||
52 | bool JobVerbTable[JOB_VERB__MAX][JOB_STATUS__MAX] = { | |
53 | /* U, C, R, P, Y, S, W, D, X, E, N */ | |
54 | [JOB_VERB_CANCEL] = {0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0}, | |
55 | [JOB_VERB_PAUSE] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | |
56 | [JOB_VERB_RESUME] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | |
57 | [JOB_VERB_SET_SPEED] = {0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}, | |
58 | [JOB_VERB_COMPLETE] = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, | |
59 | [JOB_VERB_FINALIZE] = {0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, | |
60 | [JOB_VERB_DISMISS] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, | |
61 | }; | |
62 | ||
da01ff7f KW |
63 | /* Right now, this mutex is only needed to synchronize accesses to job->busy |
64 | * and job->sleep_timer, such as concurrent calls to job_do_yield and | |
65 | * job_enter. */ | |
66 | static QemuMutex job_mutex; | |
67 | ||
68 | static void job_lock(void) | |
69 | { | |
70 | qemu_mutex_lock(&job_mutex); | |
71 | } | |
72 | ||
73 | static void job_unlock(void) | |
74 | { | |
75 | qemu_mutex_unlock(&job_mutex); | |
76 | } | |
77 | ||
78 | static void __attribute__((__constructor__)) job_init(void) | |
79 | { | |
80 | qemu_mutex_init(&job_mutex); | |
81 | } | |
82 | ||
a50c2ab8 KW |
83 | /* TODO Make static once the whole state machine is in job.c */ |
84 | void job_state_transition(Job *job, JobStatus s1) | |
85 | { | |
86 | JobStatus s0 = job->status; | |
87 | assert(s1 >= 0 && s1 <= JOB_STATUS__MAX); | |
88 | trace_job_state_transition(job, /* TODO re-enable: job->ret */ 0, | |
89 | JobSTT[s0][s1] ? "allowed" : "disallowed", | |
90 | JobStatus_str(s0), JobStatus_str(s1)); | |
91 | assert(JobSTT[s0][s1]); | |
92 | job->status = s1; | |
93 | } | |
94 | ||
95 | int job_apply_verb(Job *job, JobVerb verb, Error **errp) | |
96 | { | |
97 | JobStatus s0 = job->status; | |
98 | assert(verb >= 0 && verb <= JOB_VERB__MAX); | |
99 | trace_job_apply_verb(job, JobStatus_str(s0), JobVerb_str(verb), | |
100 | JobVerbTable[verb][s0] ? "allowed" : "prohibited"); | |
101 | if (JobVerbTable[verb][s0]) { | |
102 | return 0; | |
103 | } | |
104 | error_setg(errp, "Job '%s' in state '%s' cannot accept command verb '%s'", | |
105 | job->id, JobStatus_str(s0), JobVerb_str(verb)); | |
106 | return -EPERM; | |
107 | } | |
108 | ||
252291ea KW |
109 | JobType job_type(const Job *job) |
110 | { | |
111 | return job->driver->job_type; | |
112 | } | |
113 | ||
114 | const char *job_type_str(const Job *job) | |
115 | { | |
116 | return JobType_str(job_type(job)); | |
117 | } | |
118 | ||
daa7f2f9 KW |
119 | bool job_is_cancelled(Job *job) |
120 | { | |
121 | return job->cancelled; | |
122 | } | |
123 | ||
da01ff7f KW |
124 | bool job_started(Job *job) |
125 | { | |
126 | return job->co; | |
127 | } | |
128 | ||
129 | bool job_should_pause(Job *job) | |
130 | { | |
131 | return job->pause_count > 0; | |
132 | } | |
133 | ||
e7c1d78b KW |
134 | Job *job_next(Job *job) |
135 | { | |
136 | if (!job) { | |
137 | return QLIST_FIRST(&jobs); | |
138 | } | |
139 | return QLIST_NEXT(job, job_list); | |
140 | } | |
141 | ||
142 | Job *job_get(const char *id) | |
143 | { | |
144 | Job *job; | |
145 | ||
146 | QLIST_FOREACH(job, &jobs, job_list) { | |
147 | if (job->id && !strcmp(id, job->id)) { | |
148 | return job; | |
149 | } | |
150 | } | |
151 | ||
152 | return NULL; | |
153 | } | |
154 | ||
5d43e86e KW |
155 | static void job_sleep_timer_cb(void *opaque) |
156 | { | |
157 | Job *job = opaque; | |
158 | ||
159 | job_enter(job); | |
160 | } | |
161 | ||
08be6fe2 KW |
162 | void *job_create(const char *job_id, const JobDriver *driver, AioContext *ctx, |
163 | Error **errp) | |
33e9e9bd KW |
164 | { |
165 | Job *job; | |
166 | ||
167 | if (job_id) { | |
168 | if (!id_wellformed(job_id)) { | |
169 | error_setg(errp, "Invalid job ID '%s'", job_id); | |
170 | return NULL; | |
171 | } | |
e7c1d78b KW |
172 | if (job_get(job_id)) { |
173 | error_setg(errp, "Job ID '%s' already in use", job_id); | |
174 | return NULL; | |
175 | } | |
33e9e9bd KW |
176 | } |
177 | ||
178 | job = g_malloc0(driver->instance_size); | |
179 | job->driver = driver; | |
180 | job->id = g_strdup(job_id); | |
80fa2c75 | 181 | job->refcnt = 1; |
08be6fe2 | 182 | job->aio_context = ctx; |
da01ff7f KW |
183 | job->busy = false; |
184 | job->paused = true; | |
185 | job->pause_count = 1; | |
33e9e9bd | 186 | |
a50c2ab8 | 187 | job_state_transition(job, JOB_STATUS_CREATED); |
5d43e86e KW |
188 | aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, |
189 | QEMU_CLOCK_REALTIME, SCALE_NS, | |
190 | job_sleep_timer_cb, job); | |
a50c2ab8 | 191 | |
e7c1d78b KW |
192 | QLIST_INSERT_HEAD(&jobs, job, job_list); |
193 | ||
33e9e9bd KW |
194 | return job; |
195 | } | |
fd61a701 | 196 | |
80fa2c75 | 197 | void job_ref(Job *job) |
fd61a701 | 198 | { |
80fa2c75 KW |
199 | ++job->refcnt; |
200 | } | |
201 | ||
202 | void job_unref(Job *job) | |
203 | { | |
204 | if (--job->refcnt == 0) { | |
205 | assert(job->status == JOB_STATUS_NULL); | |
5d43e86e | 206 | assert(!timer_pending(&job->sleep_timer)); |
e7c1d78b | 207 | |
80fa2c75 KW |
208 | if (job->driver->free) { |
209 | job->driver->free(job); | |
210 | } | |
211 | ||
212 | QLIST_REMOVE(job, job_list); | |
213 | ||
214 | g_free(job->id); | |
215 | g_free(job); | |
216 | } | |
fd61a701 | 217 | } |
1908a559 | 218 | |
da01ff7f KW |
219 | void job_enter_cond(Job *job, bool(*fn)(Job *job)) |
220 | { | |
221 | if (!job_started(job)) { | |
222 | return; | |
223 | } | |
224 | if (job->deferred_to_main_loop) { | |
225 | return; | |
226 | } | |
227 | ||
228 | job_lock(); | |
229 | if (job->busy) { | |
230 | job_unlock(); | |
231 | return; | |
232 | } | |
233 | ||
234 | if (fn && !fn(job)) { | |
235 | job_unlock(); | |
236 | return; | |
237 | } | |
238 | ||
239 | assert(!job->deferred_to_main_loop); | |
240 | timer_del(&job->sleep_timer); | |
241 | job->busy = true; | |
242 | job_unlock(); | |
243 | aio_co_wake(job->co); | |
244 | } | |
245 | ||
5d43e86e KW |
246 | void job_enter(Job *job) |
247 | { | |
248 | job_enter_cond(job, NULL); | |
249 | } | |
250 | ||
da01ff7f KW |
251 | /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds. |
252 | * Reentering the job coroutine with block_job_enter() before the timer has | |
253 | * expired is allowed and cancels the timer. | |
254 | * | |
255 | * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be | |
256 | * called explicitly. */ | |
257 | void coroutine_fn job_do_yield(Job *job, uint64_t ns) | |
258 | { | |
259 | job_lock(); | |
260 | if (ns != -1) { | |
261 | timer_mod(&job->sleep_timer, ns); | |
262 | } | |
263 | job->busy = false; | |
264 | job_unlock(); | |
265 | qemu_coroutine_yield(); | |
266 | ||
267 | /* Set by job_enter_cond() before re-entering the coroutine. */ | |
268 | assert(job->busy); | |
269 | } | |
270 | ||
271 | void coroutine_fn job_pause_point(Job *job) | |
272 | { | |
273 | assert(job && job_started(job)); | |
274 | ||
275 | if (!job_should_pause(job)) { | |
276 | return; | |
277 | } | |
278 | if (job_is_cancelled(job)) { | |
279 | return; | |
280 | } | |
281 | ||
282 | if (job->driver->pause) { | |
283 | job->driver->pause(job); | |
284 | } | |
285 | ||
286 | if (job_should_pause(job) && !job_is_cancelled(job)) { | |
287 | JobStatus status = job->status; | |
288 | job_state_transition(job, status == JOB_STATUS_READY | |
289 | ? JOB_STATUS_STANDBY | |
290 | : JOB_STATUS_PAUSED); | |
291 | job->paused = true; | |
292 | job_do_yield(job, -1); | |
293 | job->paused = false; | |
294 | job_state_transition(job, status); | |
295 | } | |
296 | ||
297 | if (job->driver->resume) { | |
298 | job->driver->resume(job); | |
299 | } | |
300 | } | |
301 | ||
5d43e86e KW |
302 | void coroutine_fn job_sleep_ns(Job *job, int64_t ns) |
303 | { | |
304 | assert(job->busy); | |
305 | ||
306 | /* Check cancellation *before* setting busy = false, too! */ | |
307 | if (job_is_cancelled(job)) { | |
308 | return; | |
309 | } | |
310 | ||
311 | if (!job_should_pause(job)) { | |
312 | job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns); | |
313 | } | |
314 | ||
315 | job_pause_point(job); | |
316 | } | |
317 | ||
da01ff7f KW |
318 | /** |
319 | * All jobs must allow a pause point before entering their job proper. This | |
320 | * ensures that jobs can be paused prior to being started, then resumed later. | |
321 | */ | |
322 | static void coroutine_fn job_co_entry(void *opaque) | |
323 | { | |
324 | Job *job = opaque; | |
325 | ||
326 | assert(job && job->driver && job->driver->start); | |
327 | job_pause_point(job); | |
328 | job->driver->start(job); | |
329 | } | |
330 | ||
331 | ||
332 | void job_start(Job *job) | |
333 | { | |
334 | assert(job && !job_started(job) && job->paused && | |
335 | job->driver && job->driver->start); | |
336 | job->co = qemu_coroutine_create(job_co_entry, job); | |
337 | job->pause_count--; | |
338 | job->busy = true; | |
339 | job->paused = false; | |
340 | job_state_transition(job, JOB_STATUS_RUNNING); | |
341 | aio_co_enter(job->aio_context, job->co); | |
342 | } | |
343 | ||
b15de828 KW |
344 | /* Assumes the block_job_mutex is held */ |
345 | static bool job_timer_not_pending(Job *job) | |
346 | { | |
347 | return !timer_pending(&job->sleep_timer); | |
348 | } | |
349 | ||
350 | void job_pause(Job *job) | |
351 | { | |
352 | job->pause_count++; | |
353 | } | |
354 | ||
355 | void job_resume(Job *job) | |
356 | { | |
357 | assert(job->pause_count > 0); | |
358 | job->pause_count--; | |
359 | if (job->pause_count) { | |
360 | return; | |
361 | } | |
362 | ||
363 | /* kick only if no timer is pending */ | |
364 | job_enter_cond(job, job_timer_not_pending); | |
365 | } | |
366 | ||
367 | void job_user_pause(Job *job, Error **errp) | |
368 | { | |
369 | if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) { | |
370 | return; | |
371 | } | |
372 | if (job->user_paused) { | |
373 | error_setg(errp, "Job is already paused"); | |
374 | return; | |
375 | } | |
376 | job->user_paused = true; | |
377 | job_pause(job); | |
378 | } | |
379 | ||
380 | bool job_user_paused(Job *job) | |
381 | { | |
382 | return job->user_paused; | |
383 | } | |
384 | ||
385 | void job_user_resume(Job *job, Error **errp) | |
386 | { | |
387 | assert(job); | |
388 | if (!job->user_paused || job->pause_count <= 0) { | |
389 | error_setg(errp, "Can't resume a job that was not paused"); | |
390 | return; | |
391 | } | |
392 | if (job_apply_verb(job, JOB_VERB_RESUME, errp)) { | |
393 | return; | |
394 | } | |
395 | if (job->driver->user_resume) { | |
396 | job->driver->user_resume(job); | |
397 | } | |
398 | job->user_paused = false; | |
399 | job_resume(job); | |
400 | } | |
401 | ||
402 | ||
1908a559 KW |
403 | typedef struct { |
404 | Job *job; | |
405 | JobDeferToMainLoopFn *fn; | |
406 | void *opaque; | |
407 | } JobDeferToMainLoopData; | |
408 | ||
409 | static void job_defer_to_main_loop_bh(void *opaque) | |
410 | { | |
411 | JobDeferToMainLoopData *data = opaque; | |
412 | Job *job = data->job; | |
413 | AioContext *aio_context = job->aio_context; | |
414 | ||
415 | aio_context_acquire(aio_context); | |
416 | data->fn(data->job, data->opaque); | |
417 | aio_context_release(aio_context); | |
418 | ||
419 | g_free(data); | |
420 | } | |
421 | ||
422 | void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque) | |
423 | { | |
424 | JobDeferToMainLoopData *data = g_malloc(sizeof(*data)); | |
425 | data->job = job; | |
426 | data->fn = fn; | |
427 | data->opaque = opaque; | |
428 | job->deferred_to_main_loop = true; | |
429 | ||
430 | aio_bh_schedule_oneshot(qemu_get_aio_context(), | |
431 | job_defer_to_main_loop_bh, data); | |
432 | } |