]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU System Emulator block driver | |
3 | * | |
4 | * Copyright (c) 2011 IBM Corp. | |
5 | * Copyright (c) 2012 Red Hat, Inc. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
26 | #include "qemu/osdep.h" | |
27 | #include "qemu-common.h" | |
28 | #include "block/block.h" | |
29 | #include "block/blockjob_int.h" | |
30 | #include "block/block_int.h" | |
31 | #include "sysemu/block-backend.h" | |
32 | #include "qapi/qmp/qerror.h" | |
33 | #include "qapi/qmp/qjson.h" | |
34 | #include "qemu/coroutine.h" | |
35 | #include "qemu/id.h" | |
36 | #include "qmp-commands.h" | |
37 | #include "qemu/timer.h" | |
38 | #include "qapi-event.h" | |
39 | ||
40 | static void block_job_event_cancelled(BlockJob *job); | |
41 | static void block_job_event_completed(BlockJob *job, const char *msg); | |
42 | ||
43 | /* Transactional group of block jobs */ | |
44 | struct BlockJobTxn { | |
45 | ||
46 | /* Is this txn being cancelled? */ | |
47 | bool aborting; | |
48 | ||
49 | /* List of jobs */ | |
50 | QLIST_HEAD(, BlockJob) jobs; | |
51 | ||
52 | /* Reference count */ | |
53 | int refcnt; | |
54 | }; | |
55 | ||
56 | static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs); | |
57 | ||
58 | static char *child_job_get_parent_desc(BdrvChild *c) | |
59 | { | |
60 | BlockJob *job = c->opaque; | |
61 | return g_strdup_printf("%s job '%s'", | |
62 | BlockJobType_lookup[job->driver->job_type], | |
63 | job->id); | |
64 | } | |
65 | ||
66 | static const BdrvChildRole child_job = { | |
67 | .get_parent_desc = child_job_get_parent_desc, | |
68 | .stay_at_node = true, | |
69 | }; | |
70 | ||
71 | static void block_job_drained_begin(void *opaque) | |
72 | { | |
73 | BlockJob *job = opaque; | |
74 | block_job_pause(job); | |
75 | } | |
76 | ||
77 | static void block_job_drained_end(void *opaque) | |
78 | { | |
79 | BlockJob *job = opaque; | |
80 | block_job_resume(job); | |
81 | } | |
82 | ||
83 | static const BlockDevOps block_job_dev_ops = { | |
84 | .drained_begin = block_job_drained_begin, | |
85 | .drained_end = block_job_drained_end, | |
86 | }; | |
87 | ||
88 | BlockJob *block_job_next(BlockJob *job) | |
89 | { | |
90 | if (!job) { | |
91 | return QLIST_FIRST(&block_jobs); | |
92 | } | |
93 | return QLIST_NEXT(job, job_list); | |
94 | } | |
95 | ||
96 | BlockJob *block_job_get(const char *id) | |
97 | { | |
98 | BlockJob *job; | |
99 | ||
100 | QLIST_FOREACH(job, &block_jobs, job_list) { | |
101 | if (job->id && !strcmp(id, job->id)) { | |
102 | return job; | |
103 | } | |
104 | } | |
105 | ||
106 | return NULL; | |
107 | } | |
108 | ||
109 | static void block_job_attached_aio_context(AioContext *new_context, | |
110 | void *opaque) | |
111 | { | |
112 | BlockJob *job = opaque; | |
113 | ||
114 | if (job->driver->attached_aio_context) { | |
115 | job->driver->attached_aio_context(job, new_context); | |
116 | } | |
117 | ||
118 | block_job_resume(job); | |
119 | } | |
120 | ||
121 | static void block_job_drain(BlockJob *job) | |
122 | { | |
123 | /* If job is !job->busy this kicks it into the next pause point. */ | |
124 | block_job_enter(job); | |
125 | ||
126 | blk_drain(job->blk); | |
127 | if (job->driver->drain) { | |
128 | job->driver->drain(job); | |
129 | } | |
130 | } | |
131 | ||
132 | static void block_job_detach_aio_context(void *opaque) | |
133 | { | |
134 | BlockJob *job = opaque; | |
135 | ||
136 | /* In case the job terminates during aio_poll()... */ | |
137 | block_job_ref(job); | |
138 | ||
139 | block_job_pause(job); | |
140 | ||
141 | while (!job->paused && !job->completed) { | |
142 | block_job_drain(job); | |
143 | } | |
144 | ||
145 | block_job_unref(job); | |
146 | } | |
147 | ||
148 | void block_job_remove_all_bdrv(BlockJob *job) | |
149 | { | |
150 | GSList *l; | |
151 | for (l = job->nodes; l; l = l->next) { | |
152 | BdrvChild *c = l->data; | |
153 | bdrv_op_unblock_all(c->bs, job->blocker); | |
154 | bdrv_root_unref_child(c); | |
155 | } | |
156 | g_slist_free(job->nodes); | |
157 | job->nodes = NULL; | |
158 | } | |
159 | ||
160 | int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, | |
161 | uint64_t perm, uint64_t shared_perm, Error **errp) | |
162 | { | |
163 | BdrvChild *c; | |
164 | ||
165 | c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm, | |
166 | job, errp); | |
167 | if (c == NULL) { | |
168 | return -EPERM; | |
169 | } | |
170 | ||
171 | job->nodes = g_slist_prepend(job->nodes, c); | |
172 | bdrv_ref(bs); | |
173 | bdrv_op_block_all(bs, job->blocker); | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
178 | void *block_job_create(const char *job_id, const BlockJobDriver *driver, | |
179 | BlockDriverState *bs, uint64_t perm, | |
180 | uint64_t shared_perm, int64_t speed, int flags, | |
181 | BlockCompletionFunc *cb, void *opaque, Error **errp) | |
182 | { | |
183 | BlockBackend *blk; | |
184 | BlockJob *job; | |
185 | int ret; | |
186 | ||
187 | if (bs->job) { | |
188 | error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); | |
189 | return NULL; | |
190 | } | |
191 | ||
192 | if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) { | |
193 | job_id = bdrv_get_device_name(bs); | |
194 | if (!*job_id) { | |
195 | error_setg(errp, "An explicit job ID is required for this node"); | |
196 | return NULL; | |
197 | } | |
198 | } | |
199 | ||
200 | if (job_id) { | |
201 | if (flags & BLOCK_JOB_INTERNAL) { | |
202 | error_setg(errp, "Cannot specify job ID for internal block job"); | |
203 | return NULL; | |
204 | } | |
205 | ||
206 | if (!id_wellformed(job_id)) { | |
207 | error_setg(errp, "Invalid job ID '%s'", job_id); | |
208 | return NULL; | |
209 | } | |
210 | ||
211 | if (block_job_get(job_id)) { | |
212 | error_setg(errp, "Job ID '%s' already in use", job_id); | |
213 | return NULL; | |
214 | } | |
215 | } | |
216 | ||
217 | blk = blk_new(perm, shared_perm); | |
218 | ret = blk_insert_bs(blk, bs, errp); | |
219 | if (ret < 0) { | |
220 | blk_unref(blk); | |
221 | return NULL; | |
222 | } | |
223 | ||
224 | job = g_malloc0(driver->instance_size); | |
225 | job->driver = driver; | |
226 | job->id = g_strdup(job_id); | |
227 | job->blk = blk; | |
228 | job->cb = cb; | |
229 | job->opaque = opaque; | |
230 | job->busy = false; | |
231 | job->paused = true; | |
232 | job->pause_count = 1; | |
233 | job->refcnt = 1; | |
234 | ||
235 | error_setg(&job->blocker, "block device is in use by block job: %s", | |
236 | BlockJobType_lookup[driver->job_type]); | |
237 | block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort); | |
238 | bs->job = job; | |
239 | ||
240 | blk_set_dev_ops(blk, &block_job_dev_ops, job); | |
241 | bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); | |
242 | ||
243 | QLIST_INSERT_HEAD(&block_jobs, job, job_list); | |
244 | ||
245 | blk_add_aio_context_notifier(blk, block_job_attached_aio_context, | |
246 | block_job_detach_aio_context, job); | |
247 | ||
248 | /* Only set speed when necessary to avoid NotSupported error */ | |
249 | if (speed != 0) { | |
250 | Error *local_err = NULL; | |
251 | ||
252 | block_job_set_speed(job, speed, &local_err); | |
253 | if (local_err) { | |
254 | block_job_unref(job); | |
255 | error_propagate(errp, local_err); | |
256 | return NULL; | |
257 | } | |
258 | } | |
259 | return job; | |
260 | } | |
261 | ||
262 | bool block_job_is_internal(BlockJob *job) | |
263 | { | |
264 | return (job->id == NULL); | |
265 | } | |
266 | ||
267 | static bool block_job_started(BlockJob *job) | |
268 | { | |
269 | return job->co; | |
270 | } | |
271 | ||
272 | /** | |
273 | * All jobs must allow a pause point before entering their job proper. This | |
274 | * ensures that jobs can be paused prior to being started, then resumed later. | |
275 | */ | |
276 | static void coroutine_fn block_job_co_entry(void *opaque) | |
277 | { | |
278 | BlockJob *job = opaque; | |
279 | ||
280 | assert(job && job->driver && job->driver->start); | |
281 | block_job_pause_point(job); | |
282 | job->driver->start(job); | |
283 | } | |
284 | ||
285 | void block_job_start(BlockJob *job) | |
286 | { | |
287 | assert(job && !block_job_started(job) && job->paused && | |
288 | job->driver && job->driver->start); | |
289 | job->co = qemu_coroutine_create(block_job_co_entry, job); | |
290 | job->pause_count--; | |
291 | job->busy = true; | |
292 | job->paused = false; | |
293 | qemu_coroutine_enter(job->co); | |
294 | } | |
295 | ||
296 | void block_job_ref(BlockJob *job) | |
297 | { | |
298 | ++job->refcnt; | |
299 | } | |
300 | ||
301 | void block_job_unref(BlockJob *job) | |
302 | { | |
303 | if (--job->refcnt == 0) { | |
304 | BlockDriverState *bs = blk_bs(job->blk); | |
305 | bs->job = NULL; | |
306 | block_job_remove_all_bdrv(job); | |
307 | blk_remove_aio_context_notifier(job->blk, | |
308 | block_job_attached_aio_context, | |
309 | block_job_detach_aio_context, job); | |
310 | blk_unref(job->blk); | |
311 | error_free(job->blocker); | |
312 | g_free(job->id); | |
313 | QLIST_REMOVE(job, job_list); | |
314 | g_free(job); | |
315 | } | |
316 | } | |
317 | ||
318 | static void block_job_completed_single(BlockJob *job) | |
319 | { | |
320 | if (!job->ret) { | |
321 | if (job->driver->commit) { | |
322 | job->driver->commit(job); | |
323 | } | |
324 | } else { | |
325 | if (job->driver->abort) { | |
326 | job->driver->abort(job); | |
327 | } | |
328 | } | |
329 | if (job->driver->clean) { | |
330 | job->driver->clean(job); | |
331 | } | |
332 | ||
333 | if (job->cb) { | |
334 | job->cb(job->opaque, job->ret); | |
335 | } | |
336 | ||
337 | /* Emit events only if we actually started */ | |
338 | if (block_job_started(job)) { | |
339 | if (block_job_is_cancelled(job)) { | |
340 | block_job_event_cancelled(job); | |
341 | } else { | |
342 | const char *msg = NULL; | |
343 | if (job->ret < 0) { | |
344 | msg = strerror(-job->ret); | |
345 | } | |
346 | block_job_event_completed(job, msg); | |
347 | } | |
348 | } | |
349 | ||
350 | if (job->txn) { | |
351 | QLIST_REMOVE(job, txn_list); | |
352 | block_job_txn_unref(job->txn); | |
353 | } | |
354 | block_job_unref(job); | |
355 | } | |
356 | ||
357 | static void block_job_completed_txn_abort(BlockJob *job) | |
358 | { | |
359 | AioContext *ctx; | |
360 | BlockJobTxn *txn = job->txn; | |
361 | BlockJob *other_job, *next; | |
362 | ||
363 | if (txn->aborting) { | |
364 | /* | |
365 | * We are cancelled by another job, which will handle everything. | |
366 | */ | |
367 | return; | |
368 | } | |
369 | txn->aborting = true; | |
370 | /* We are the first failed job. Cancel other jobs. */ | |
371 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
372 | ctx = blk_get_aio_context(other_job->blk); | |
373 | aio_context_acquire(ctx); | |
374 | } | |
375 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
376 | if (other_job == job || other_job->completed) { | |
377 | /* Other jobs are "effectively" cancelled by us, set the status for | |
378 | * them; this job, however, may or may not be cancelled, depending | |
379 | * on the caller, so leave it. */ | |
380 | if (other_job != job) { | |
381 | other_job->cancelled = true; | |
382 | } | |
383 | continue; | |
384 | } | |
385 | block_job_cancel_sync(other_job); | |
386 | assert(other_job->completed); | |
387 | } | |
388 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | |
389 | ctx = blk_get_aio_context(other_job->blk); | |
390 | block_job_completed_single(other_job); | |
391 | aio_context_release(ctx); | |
392 | } | |
393 | } | |
394 | ||
395 | static void block_job_completed_txn_success(BlockJob *job) | |
396 | { | |
397 | AioContext *ctx; | |
398 | BlockJobTxn *txn = job->txn; | |
399 | BlockJob *other_job, *next; | |
400 | /* | |
401 | * Successful completion, see if there are other running jobs in this | |
402 | * txn. | |
403 | */ | |
404 | QLIST_FOREACH(other_job, &txn->jobs, txn_list) { | |
405 | if (!other_job->completed) { | |
406 | return; | |
407 | } | |
408 | } | |
409 | /* We are the last completed job, commit the transaction. */ | |
410 | QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { | |
411 | ctx = blk_get_aio_context(other_job->blk); | |
412 | aio_context_acquire(ctx); | |
413 | assert(other_job->ret == 0); | |
414 | block_job_completed_single(other_job); | |
415 | aio_context_release(ctx); | |
416 | } | |
417 | } | |
418 | ||
419 | void block_job_completed(BlockJob *job, int ret) | |
420 | { | |
421 | assert(blk_bs(job->blk)->job == job); | |
422 | assert(!job->completed); | |
423 | job->completed = true; | |
424 | job->ret = ret; | |
425 | if (!job->txn) { | |
426 | block_job_completed_single(job); | |
427 | } else if (ret < 0 || block_job_is_cancelled(job)) { | |
428 | block_job_completed_txn_abort(job); | |
429 | } else { | |
430 | block_job_completed_txn_success(job); | |
431 | } | |
432 | } | |
433 | ||
434 | void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) | |
435 | { | |
436 | Error *local_err = NULL; | |
437 | ||
438 | if (!job->driver->set_speed) { | |
439 | error_setg(errp, QERR_UNSUPPORTED); | |
440 | return; | |
441 | } | |
442 | job->driver->set_speed(job, speed, &local_err); | |
443 | if (local_err) { | |
444 | error_propagate(errp, local_err); | |
445 | return; | |
446 | } | |
447 | ||
448 | job->speed = speed; | |
449 | } | |
450 | ||
451 | void block_job_complete(BlockJob *job, Error **errp) | |
452 | { | |
453 | /* Should not be reachable via external interface for internal jobs */ | |
454 | assert(job->id); | |
455 | if (job->pause_count || job->cancelled || | |
456 | !block_job_started(job) || !job->driver->complete) { | |
457 | error_setg(errp, "The active block job '%s' cannot be completed", | |
458 | job->id); | |
459 | return; | |
460 | } | |
461 | ||
462 | job->driver->complete(job, errp); | |
463 | } | |
464 | ||
465 | void block_job_pause(BlockJob *job) | |
466 | { | |
467 | job->pause_count++; | |
468 | } | |
469 | ||
470 | void block_job_user_pause(BlockJob *job) | |
471 | { | |
472 | job->user_paused = true; | |
473 | block_job_pause(job); | |
474 | } | |
475 | ||
476 | static bool block_job_should_pause(BlockJob *job) | |
477 | { | |
478 | return job->pause_count > 0; | |
479 | } | |
480 | ||
481 | bool block_job_user_paused(BlockJob *job) | |
482 | { | |
483 | return job ? job->user_paused : 0; | |
484 | } | |
485 | ||
486 | void coroutine_fn block_job_pause_point(BlockJob *job) | |
487 | { | |
488 | assert(job && block_job_started(job)); | |
489 | ||
490 | if (!block_job_should_pause(job)) { | |
491 | return; | |
492 | } | |
493 | if (block_job_is_cancelled(job)) { | |
494 | return; | |
495 | } | |
496 | ||
497 | if (job->driver->pause) { | |
498 | job->driver->pause(job); | |
499 | } | |
500 | ||
501 | if (block_job_should_pause(job) && !block_job_is_cancelled(job)) { | |
502 | job->paused = true; | |
503 | job->busy = false; | |
504 | qemu_coroutine_yield(); /* wait for block_job_resume() */ | |
505 | job->busy = true; | |
506 | job->paused = false; | |
507 | } | |
508 | ||
509 | if (job->driver->resume) { | |
510 | job->driver->resume(job); | |
511 | } | |
512 | } | |
513 | ||
514 | void block_job_resume(BlockJob *job) | |
515 | { | |
516 | assert(job->pause_count > 0); | |
517 | job->pause_count--; | |
518 | if (job->pause_count) { | |
519 | return; | |
520 | } | |
521 | block_job_enter(job); | |
522 | } | |
523 | ||
524 | void block_job_user_resume(BlockJob *job) | |
525 | { | |
526 | if (job && job->user_paused && job->pause_count > 0) { | |
527 | job->user_paused = false; | |
528 | block_job_resume(job); | |
529 | } | |
530 | } | |
531 | ||
532 | void block_job_enter(BlockJob *job) | |
533 | { | |
534 | if (job->co && !job->busy) { | |
535 | qemu_coroutine_enter(job->co); | |
536 | } | |
537 | } | |
538 | ||
539 | void block_job_cancel(BlockJob *job) | |
540 | { | |
541 | if (block_job_started(job)) { | |
542 | job->cancelled = true; | |
543 | block_job_iostatus_reset(job); | |
544 | block_job_enter(job); | |
545 | } else { | |
546 | block_job_completed(job, -ECANCELED); | |
547 | } | |
548 | } | |
549 | ||
550 | bool block_job_is_cancelled(BlockJob *job) | |
551 | { | |
552 | return job->cancelled; | |
553 | } | |
554 | ||
555 | void block_job_iostatus_reset(BlockJob *job) | |
556 | { | |
557 | job->iostatus = BLOCK_DEVICE_IO_STATUS_OK; | |
558 | if (job->driver->iostatus_reset) { | |
559 | job->driver->iostatus_reset(job); | |
560 | } | |
561 | } | |
562 | ||
563 | static int block_job_finish_sync(BlockJob *job, | |
564 | void (*finish)(BlockJob *, Error **errp), | |
565 | Error **errp) | |
566 | { | |
567 | Error *local_err = NULL; | |
568 | int ret; | |
569 | ||
570 | assert(blk_bs(job->blk)->job == job); | |
571 | ||
572 | block_job_ref(job); | |
573 | ||
574 | finish(job, &local_err); | |
575 | if (local_err) { | |
576 | error_propagate(errp, local_err); | |
577 | block_job_unref(job); | |
578 | return -EBUSY; | |
579 | } | |
580 | /* block_job_drain calls block_job_enter, and it should be enough to | |
581 | * induce progress until the job completes or moves to the main thread. | |
582 | */ | |
583 | while (!job->deferred_to_main_loop && !job->completed) { | |
584 | block_job_drain(job); | |
585 | } | |
586 | while (!job->completed) { | |
587 | aio_poll(qemu_get_aio_context(), true); | |
588 | } | |
589 | ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret; | |
590 | block_job_unref(job); | |
591 | return ret; | |
592 | } | |
593 | ||
594 | /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be | |
595 | * used with block_job_finish_sync() without the need for (rather nasty) | |
596 | * function pointer casts there. */ | |
597 | static void block_job_cancel_err(BlockJob *job, Error **errp) | |
598 | { | |
599 | block_job_cancel(job); | |
600 | } | |
601 | ||
602 | int block_job_cancel_sync(BlockJob *job) | |
603 | { | |
604 | return block_job_finish_sync(job, &block_job_cancel_err, NULL); | |
605 | } | |
606 | ||
607 | void block_job_cancel_sync_all(void) | |
608 | { | |
609 | BlockJob *job; | |
610 | AioContext *aio_context; | |
611 | ||
612 | while ((job = QLIST_FIRST(&block_jobs))) { | |
613 | aio_context = blk_get_aio_context(job->blk); | |
614 | aio_context_acquire(aio_context); | |
615 | block_job_cancel_sync(job); | |
616 | aio_context_release(aio_context); | |
617 | } | |
618 | } | |
619 | ||
620 | int block_job_complete_sync(BlockJob *job, Error **errp) | |
621 | { | |
622 | return block_job_finish_sync(job, &block_job_complete, errp); | |
623 | } | |
624 | ||
625 | void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) | |
626 | { | |
627 | assert(job->busy); | |
628 | ||
629 | /* Check cancellation *before* setting busy = false, too! */ | |
630 | if (block_job_is_cancelled(job)) { | |
631 | return; | |
632 | } | |
633 | ||
634 | job->busy = false; | |
635 | if (!block_job_should_pause(job)) { | |
636 | co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns); | |
637 | } | |
638 | job->busy = true; | |
639 | ||
640 | block_job_pause_point(job); | |
641 | } | |
642 | ||
643 | void block_job_yield(BlockJob *job) | |
644 | { | |
645 | assert(job->busy); | |
646 | ||
647 | /* Check cancellation *before* setting busy = false, too! */ | |
648 | if (block_job_is_cancelled(job)) { | |
649 | return; | |
650 | } | |
651 | ||
652 | job->busy = false; | |
653 | if (!block_job_should_pause(job)) { | |
654 | qemu_coroutine_yield(); | |
655 | } | |
656 | job->busy = true; | |
657 | ||
658 | block_job_pause_point(job); | |
659 | } | |
660 | ||
661 | BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | |
662 | { | |
663 | BlockJobInfo *info; | |
664 | ||
665 | if (block_job_is_internal(job)) { | |
666 | error_setg(errp, "Cannot query QEMU internal jobs"); | |
667 | return NULL; | |
668 | } | |
669 | info = g_new0(BlockJobInfo, 1); | |
670 | info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]); | |
671 | info->device = g_strdup(job->id); | |
672 | info->len = job->len; | |
673 | info->busy = job->busy; | |
674 | info->paused = job->pause_count > 0; | |
675 | info->offset = job->offset; | |
676 | info->speed = job->speed; | |
677 | info->io_status = job->iostatus; | |
678 | info->ready = job->ready; | |
679 | return info; | |
680 | } | |
681 | ||
682 | static void block_job_iostatus_set_err(BlockJob *job, int error) | |
683 | { | |
684 | if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) { | |
685 | job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE : | |
686 | BLOCK_DEVICE_IO_STATUS_FAILED; | |
687 | } | |
688 | } | |
689 | ||
690 | static void block_job_event_cancelled(BlockJob *job) | |
691 | { | |
692 | if (block_job_is_internal(job)) { | |
693 | return; | |
694 | } | |
695 | ||
696 | qapi_event_send_block_job_cancelled(job->driver->job_type, | |
697 | job->id, | |
698 | job->len, | |
699 | job->offset, | |
700 | job->speed, | |
701 | &error_abort); | |
702 | } | |
703 | ||
704 | static void block_job_event_completed(BlockJob *job, const char *msg) | |
705 | { | |
706 | if (block_job_is_internal(job)) { | |
707 | return; | |
708 | } | |
709 | ||
710 | qapi_event_send_block_job_completed(job->driver->job_type, | |
711 | job->id, | |
712 | job->len, | |
713 | job->offset, | |
714 | job->speed, | |
715 | !!msg, | |
716 | msg, | |
717 | &error_abort); | |
718 | } | |
719 | ||
720 | void block_job_event_ready(BlockJob *job) | |
721 | { | |
722 | job->ready = true; | |
723 | ||
724 | if (block_job_is_internal(job)) { | |
725 | return; | |
726 | } | |
727 | ||
728 | qapi_event_send_block_job_ready(job->driver->job_type, | |
729 | job->id, | |
730 | job->len, | |
731 | job->offset, | |
732 | job->speed, &error_abort); | |
733 | } | |
734 | ||
735 | BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err, | |
736 | int is_read, int error) | |
737 | { | |
738 | BlockErrorAction action; | |
739 | ||
740 | switch (on_err) { | |
741 | case BLOCKDEV_ON_ERROR_ENOSPC: | |
742 | case BLOCKDEV_ON_ERROR_AUTO: | |
743 | action = (error == ENOSPC) ? | |
744 | BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT; | |
745 | break; | |
746 | case BLOCKDEV_ON_ERROR_STOP: | |
747 | action = BLOCK_ERROR_ACTION_STOP; | |
748 | break; | |
749 | case BLOCKDEV_ON_ERROR_REPORT: | |
750 | action = BLOCK_ERROR_ACTION_REPORT; | |
751 | break; | |
752 | case BLOCKDEV_ON_ERROR_IGNORE: | |
753 | action = BLOCK_ERROR_ACTION_IGNORE; | |
754 | break; | |
755 | default: | |
756 | abort(); | |
757 | } | |
758 | if (!block_job_is_internal(job)) { | |
759 | qapi_event_send_block_job_error(job->id, | |
760 | is_read ? IO_OPERATION_TYPE_READ : | |
761 | IO_OPERATION_TYPE_WRITE, | |
762 | action, &error_abort); | |
763 | } | |
764 | if (action == BLOCK_ERROR_ACTION_STOP) { | |
765 | /* make the pause user visible, which will be resumed from QMP. */ | |
766 | block_job_user_pause(job); | |
767 | block_job_iostatus_set_err(job, error); | |
768 | } | |
769 | return action; | |
770 | } | |
771 | ||
772 | typedef struct { | |
773 | BlockJob *job; | |
774 | AioContext *aio_context; | |
775 | BlockJobDeferToMainLoopFn *fn; | |
776 | void *opaque; | |
777 | } BlockJobDeferToMainLoopData; | |
778 | ||
779 | static void block_job_defer_to_main_loop_bh(void *opaque) | |
780 | { | |
781 | BlockJobDeferToMainLoopData *data = opaque; | |
782 | AioContext *aio_context; | |
783 | ||
784 | /* Prevent race with block_job_defer_to_main_loop() */ | |
785 | aio_context_acquire(data->aio_context); | |
786 | ||
787 | /* Fetch BDS AioContext again, in case it has changed */ | |
788 | aio_context = blk_get_aio_context(data->job->blk); | |
789 | if (aio_context != data->aio_context) { | |
790 | aio_context_acquire(aio_context); | |
791 | } | |
792 | ||
793 | data->job->deferred_to_main_loop = false; | |
794 | data->fn(data->job, data->opaque); | |
795 | ||
796 | if (aio_context != data->aio_context) { | |
797 | aio_context_release(aio_context); | |
798 | } | |
799 | ||
800 | aio_context_release(data->aio_context); | |
801 | ||
802 | g_free(data); | |
803 | } | |
804 | ||
805 | void block_job_defer_to_main_loop(BlockJob *job, | |
806 | BlockJobDeferToMainLoopFn *fn, | |
807 | void *opaque) | |
808 | { | |
809 | BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data)); | |
810 | data->job = job; | |
811 | data->aio_context = blk_get_aio_context(job->blk); | |
812 | data->fn = fn; | |
813 | data->opaque = opaque; | |
814 | job->deferred_to_main_loop = true; | |
815 | ||
816 | aio_bh_schedule_oneshot(qemu_get_aio_context(), | |
817 | block_job_defer_to_main_loop_bh, data); | |
818 | } | |
819 | ||
820 | BlockJobTxn *block_job_txn_new(void) | |
821 | { | |
822 | BlockJobTxn *txn = g_new0(BlockJobTxn, 1); | |
823 | QLIST_INIT(&txn->jobs); | |
824 | txn->refcnt = 1; | |
825 | return txn; | |
826 | } | |
827 | ||
828 | static void block_job_txn_ref(BlockJobTxn *txn) | |
829 | { | |
830 | txn->refcnt++; | |
831 | } | |
832 | ||
833 | void block_job_txn_unref(BlockJobTxn *txn) | |
834 | { | |
835 | if (txn && --txn->refcnt == 0) { | |
836 | g_free(txn); | |
837 | } | |
838 | } | |
839 | ||
840 | void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job) | |
841 | { | |
842 | if (!txn) { | |
843 | return; | |
844 | } | |
845 | ||
846 | assert(!job->txn); | |
847 | job->txn = txn; | |
848 | ||
849 | QLIST_INSERT_HEAD(&txn->jobs, job, txn_list); | |
850 | block_job_txn_ref(txn); | |
851 | } |