]> git.proxmox.com Git - mirror_qemu.git/blob - blockjob.c
meson: Replace softmmu_ss -> system_ss
[mirror_qemu.git] / blockjob.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "qemu/osdep.h"
27 #include "block/aio-wait.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/timer.h"
38
39 static bool is_block_job(Job *job)
40 {
41 return job_type(job) == JOB_TYPE_BACKUP ||
42 job_type(job) == JOB_TYPE_COMMIT ||
43 job_type(job) == JOB_TYPE_MIRROR ||
44 job_type(job) == JOB_TYPE_STREAM;
45 }
46
47 BlockJob *block_job_next_locked(BlockJob *bjob)
48 {
49 Job *job = bjob ? &bjob->job : NULL;
50 GLOBAL_STATE_CODE();
51
52 do {
53 job = job_next_locked(job);
54 } while (job && !is_block_job(job));
55
56 return job ? container_of(job, BlockJob, job) : NULL;
57 }
58
59 BlockJob *block_job_get_locked(const char *id)
60 {
61 Job *job = job_get_locked(id);
62 GLOBAL_STATE_CODE();
63
64 if (job && is_block_job(job)) {
65 return container_of(job, BlockJob, job);
66 } else {
67 return NULL;
68 }
69 }
70
71 BlockJob *block_job_get(const char *id)
72 {
73 JOB_LOCK_GUARD();
74 return block_job_get_locked(id);
75 }
76
77 void block_job_free(Job *job)
78 {
79 BlockJob *bjob = container_of(job, BlockJob, job);
80 GLOBAL_STATE_CODE();
81
82 block_job_remove_all_bdrv(bjob);
83 ratelimit_destroy(&bjob->limit);
84 error_free(bjob->blocker);
85 }
86
87 static char *child_job_get_parent_desc(BdrvChild *c)
88 {
89 BlockJob *job = c->opaque;
90 return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
91 }
92
93 static void child_job_drained_begin(BdrvChild *c)
94 {
95 BlockJob *job = c->opaque;
96 job_pause(&job->job);
97 }
98
99 static bool child_job_drained_poll(BdrvChild *c)
100 {
101 BlockJob *bjob = c->opaque;
102 Job *job = &bjob->job;
103 const BlockJobDriver *drv = block_job_driver(bjob);
104
105 /* An inactive or completed job doesn't have any pending requests. Jobs
106 * with !job->busy are either already paused or have a pause point after
107 * being reentered, so no job driver code will run before they pause. */
108 WITH_JOB_LOCK_GUARD() {
109 if (!job->busy || job_is_completed_locked(job)) {
110 return false;
111 }
112 }
113
114 /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
115 * override this assumption. */
116 if (drv->drained_poll) {
117 return drv->drained_poll(bjob);
118 } else {
119 return true;
120 }
121 }
122
123 static void child_job_drained_end(BdrvChild *c)
124 {
125 BlockJob *job = c->opaque;
126 job_resume(&job->job);
127 }
128
129 typedef struct BdrvStateChildJobContext {
130 AioContext *new_ctx;
131 BlockJob *job;
132 } BdrvStateChildJobContext;
133
134 static void child_job_set_aio_ctx_commit(void *opaque)
135 {
136 BdrvStateChildJobContext *s = opaque;
137 BlockJob *job = s->job;
138
139 job_set_aio_context(&job->job, s->new_ctx);
140 }
141
142 static TransactionActionDrv change_child_job_context = {
143 .commit = child_job_set_aio_ctx_commit,
144 .clean = g_free,
145 };
146
147 static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx,
148 GHashTable *visited, Transaction *tran,
149 Error **errp)
150 {
151 BlockJob *job = c->opaque;
152 BdrvStateChildJobContext *s;
153 GSList *l;
154
155 for (l = job->nodes; l; l = l->next) {
156 BdrvChild *sibling = l->data;
157 if (!bdrv_child_change_aio_context(sibling, ctx, visited,
158 tran, errp)) {
159 return false;
160 }
161 }
162
163 s = g_new(BdrvStateChildJobContext, 1);
164 *s = (BdrvStateChildJobContext) {
165 .new_ctx = ctx,
166 .job = job,
167 };
168
169 tran_add(tran, &change_child_job_context, s);
170 return true;
171 }
172
173 static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
174 {
175 BlockJob *job = c->opaque;
176 IO_CODE();
177 JOB_LOCK_GUARD();
178
179 return job->job.aio_context;
180 }
181
182 static const BdrvChildClass child_job = {
183 .get_parent_desc = child_job_get_parent_desc,
184 .drained_begin = child_job_drained_begin,
185 .drained_poll = child_job_drained_poll,
186 .drained_end = child_job_drained_end,
187 .change_aio_ctx = child_job_change_aio_ctx,
188 .stay_at_node = true,
189 .get_parent_aio_context = child_job_get_parent_aio_context,
190 };
191
192 void block_job_remove_all_bdrv(BlockJob *job)
193 {
194 GLOBAL_STATE_CODE();
195 /*
196 * bdrv_root_unref_child() may reach child_job_[can_]set_aio_ctx(),
197 * which will also traverse job->nodes, so consume the list one by
198 * one to make sure that such a concurrent access does not attempt
199 * to process an already freed BdrvChild.
200 */
201 while (job->nodes) {
202 GSList *l = job->nodes;
203 BdrvChild *c = l->data;
204
205 job->nodes = l->next;
206
207 bdrv_op_unblock_all(c->bs, job->blocker);
208 bdrv_root_unref_child(c);
209
210 g_slist_free_1(l);
211 }
212 }
213
214 bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
215 {
216 GSList *el;
217 GLOBAL_STATE_CODE();
218
219 for (el = job->nodes; el; el = el->next) {
220 BdrvChild *c = el->data;
221 if (c->bs == bs) {
222 return true;
223 }
224 }
225
226 return false;
227 }
228
229 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
230 uint64_t perm, uint64_t shared_perm, Error **errp)
231 {
232 BdrvChild *c;
233 bool need_context_ops;
234 GLOBAL_STATE_CODE();
235
236 bdrv_ref(bs);
237
238 need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context;
239
240 if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
241 aio_context_release(job->job.aio_context);
242 }
243 c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
244 errp);
245 if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) {
246 aio_context_acquire(job->job.aio_context);
247 }
248 if (c == NULL) {
249 return -EPERM;
250 }
251
252 job->nodes = g_slist_prepend(job->nodes, c);
253 bdrv_op_block_all(bs, job->blocker);
254
255 return 0;
256 }
257
258 /* Called with job_mutex lock held. */
259 static void block_job_on_idle_locked(Notifier *n, void *opaque)
260 {
261 aio_wait_kick();
262 }
263
264 bool block_job_is_internal(BlockJob *job)
265 {
266 return (job->job.id == NULL);
267 }
268
269 const BlockJobDriver *block_job_driver(BlockJob *job)
270 {
271 return container_of(job->job.driver, BlockJobDriver, job_driver);
272 }
273
274 /* Assumes the job_mutex is held */
275 static bool job_timer_pending(Job *job)
276 {
277 return timer_pending(&job->sleep_timer);
278 }
279
280 bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
281 {
282 const BlockJobDriver *drv = block_job_driver(job);
283 int64_t old_speed = job->speed;
284
285 GLOBAL_STATE_CODE();
286
287 if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
288 return false;
289 }
290 if (speed < 0) {
291 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "speed",
292 "a non-negative value");
293 return false;
294 }
295
296 ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
297
298 job->speed = speed;
299
300 if (drv->set_speed) {
301 job_unlock();
302 drv->set_speed(job, speed);
303 job_lock();
304 }
305
306 if (speed && speed <= old_speed) {
307 return true;
308 }
309
310 /* kick only if a timer is pending */
311 job_enter_cond_locked(&job->job, job_timer_pending);
312
313 return true;
314 }
315
316 static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
317 {
318 JOB_LOCK_GUARD();
319 return block_job_set_speed_locked(job, speed, errp);
320 }
321
322 void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n)
323 {
324 IO_CODE();
325 ratelimit_calculate_delay(&job->limit, n);
326 }
327
328 void block_job_ratelimit_sleep(BlockJob *job)
329 {
330 uint64_t delay_ns;
331
332 /*
333 * Sleep at least once. If the job is reentered early, keep waiting until
334 * we've waited for the full time that is necessary to keep the job at the
335 * right speed.
336 *
337 * Make sure to recalculate the delay after each (possibly interrupted)
338 * sleep because the speed can change while the job has yielded.
339 */
340 do {
341 delay_ns = ratelimit_calculate_delay(&job->limit, 0);
342 job_sleep_ns(&job->job, delay_ns);
343 } while (delay_ns && !job_is_cancelled(&job->job));
344 }
345
346 BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
347 {
348 BlockJobInfo *info;
349 uint64_t progress_current, progress_total;
350
351 GLOBAL_STATE_CODE();
352
353 if (block_job_is_internal(job)) {
354 error_setg(errp, "Cannot query QEMU internal jobs");
355 return NULL;
356 }
357
358 progress_get_snapshot(&job->job.progress, &progress_current,
359 &progress_total);
360
361 info = g_new0(BlockJobInfo, 1);
362 info->type = g_strdup(job_type_str(&job->job));
363 info->device = g_strdup(job->job.id);
364 info->busy = job->job.busy;
365 info->paused = job->job.pause_count > 0;
366 info->offset = progress_current;
367 info->len = progress_total;
368 info->speed = job->speed;
369 info->io_status = job->iostatus;
370 info->ready = job_is_ready_locked(&job->job),
371 info->status = job->job.status;
372 info->auto_finalize = job->job.auto_finalize;
373 info->auto_dismiss = job->job.auto_dismiss;
374 if (job->job.ret) {
375 info->error = job->job.err ?
376 g_strdup(error_get_pretty(job->job.err)) :
377 g_strdup(strerror(-job->job.ret));
378 }
379 return info;
380 }
381
382 /* Called with job lock held */
383 static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
384 {
385 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
386 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
387 BLOCK_DEVICE_IO_STATUS_FAILED;
388 }
389 }
390
391 /* Called with job_mutex lock held. */
392 static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
393 {
394 BlockJob *job = opaque;
395 uint64_t progress_current, progress_total;
396
397 if (block_job_is_internal(job)) {
398 return;
399 }
400
401 progress_get_snapshot(&job->job.progress, &progress_current,
402 &progress_total);
403
404 qapi_event_send_block_job_cancelled(job_type(&job->job),
405 job->job.id,
406 progress_total,
407 progress_current,
408 job->speed);
409 }
410
411 /* Called with job_mutex lock held. */
412 static void block_job_event_completed_locked(Notifier *n, void *opaque)
413 {
414 BlockJob *job = opaque;
415 const char *msg = NULL;
416 uint64_t progress_current, progress_total;
417
418 if (block_job_is_internal(job)) {
419 return;
420 }
421
422 if (job->job.ret < 0) {
423 msg = error_get_pretty(job->job.err);
424 }
425
426 progress_get_snapshot(&job->job.progress, &progress_current,
427 &progress_total);
428
429 qapi_event_send_block_job_completed(job_type(&job->job),
430 job->job.id,
431 progress_total,
432 progress_current,
433 job->speed,
434 msg);
435 }
436
437 /* Called with job_mutex lock held. */
438 static void block_job_event_pending_locked(Notifier *n, void *opaque)
439 {
440 BlockJob *job = opaque;
441
442 if (block_job_is_internal(job)) {
443 return;
444 }
445
446 qapi_event_send_block_job_pending(job_type(&job->job),
447 job->job.id);
448 }
449
450 /* Called with job_mutex lock held. */
451 static void block_job_event_ready_locked(Notifier *n, void *opaque)
452 {
453 BlockJob *job = opaque;
454 uint64_t progress_current, progress_total;
455
456 if (block_job_is_internal(job)) {
457 return;
458 }
459
460 progress_get_snapshot(&job->job.progress, &progress_current,
461 &progress_total);
462
463 qapi_event_send_block_job_ready(job_type(&job->job),
464 job->job.id,
465 progress_total,
466 progress_current,
467 job->speed);
468 }
469
470
471 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
472 JobTxn *txn, BlockDriverState *bs, uint64_t perm,
473 uint64_t shared_perm, int64_t speed, int flags,
474 BlockCompletionFunc *cb, void *opaque, Error **errp)
475 {
476 BlockJob *job;
477 int ret;
478 GLOBAL_STATE_CODE();
479
480 if (job_id == NULL && !(flags & JOB_INTERNAL)) {
481 job_id = bdrv_get_device_name(bs);
482 }
483
484 job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
485 flags, cb, opaque, errp);
486 if (job == NULL) {
487 return NULL;
488 }
489
490 assert(is_block_job(&job->job));
491 assert(job->job.driver->free == &block_job_free);
492 assert(job->job.driver->user_resume == &block_job_user_resume);
493
494 ratelimit_init(&job->limit);
495
496 job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
497 job->finalize_completed_notifier.notify = block_job_event_completed_locked;
498 job->pending_notifier.notify = block_job_event_pending_locked;
499 job->ready_notifier.notify = block_job_event_ready_locked;
500 job->idle_notifier.notify = block_job_on_idle_locked;
501
502 WITH_JOB_LOCK_GUARD() {
503 notifier_list_add(&job->job.on_finalize_cancelled,
504 &job->finalize_cancelled_notifier);
505 notifier_list_add(&job->job.on_finalize_completed,
506 &job->finalize_completed_notifier);
507 notifier_list_add(&job->job.on_pending, &job->pending_notifier);
508 notifier_list_add(&job->job.on_ready, &job->ready_notifier);
509 notifier_list_add(&job->job.on_idle, &job->idle_notifier);
510 }
511
512 error_setg(&job->blocker, "block device is in use by block job: %s",
513 job_type_str(&job->job));
514
515 ret = block_job_add_bdrv(job, "main node", bs, perm, shared_perm, errp);
516 if (ret < 0) {
517 goto fail;
518 }
519
520 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
521
522 if (!block_job_set_speed(job, speed, errp)) {
523 goto fail;
524 }
525
526 return job;
527
528 fail:
529 job_early_fail(&job->job);
530 return NULL;
531 }
532
533 void block_job_iostatus_reset_locked(BlockJob *job)
534 {
535 GLOBAL_STATE_CODE();
536 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
537 return;
538 }
539 assert(job->job.user_paused && job->job.pause_count > 0);
540 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
541 }
542
543 static void block_job_iostatus_reset(BlockJob *job)
544 {
545 JOB_LOCK_GUARD();
546 block_job_iostatus_reset_locked(job);
547 }
548
549 void block_job_user_resume(Job *job)
550 {
551 BlockJob *bjob = container_of(job, BlockJob, job);
552 GLOBAL_STATE_CODE();
553 block_job_iostatus_reset(bjob);
554 }
555
556 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
557 int is_read, int error)
558 {
559 BlockErrorAction action;
560 IO_CODE();
561
562 switch (on_err) {
563 case BLOCKDEV_ON_ERROR_ENOSPC:
564 case BLOCKDEV_ON_ERROR_AUTO:
565 action = (error == ENOSPC) ?
566 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
567 break;
568 case BLOCKDEV_ON_ERROR_STOP:
569 action = BLOCK_ERROR_ACTION_STOP;
570 break;
571 case BLOCKDEV_ON_ERROR_REPORT:
572 action = BLOCK_ERROR_ACTION_REPORT;
573 break;
574 case BLOCKDEV_ON_ERROR_IGNORE:
575 action = BLOCK_ERROR_ACTION_IGNORE;
576 break;
577 default:
578 abort();
579 }
580 if (!block_job_is_internal(job)) {
581 qapi_event_send_block_job_error(job->job.id,
582 is_read ? IO_OPERATION_TYPE_READ :
583 IO_OPERATION_TYPE_WRITE,
584 action);
585 }
586 if (action == BLOCK_ERROR_ACTION_STOP) {
587 WITH_JOB_LOCK_GUARD() {
588 if (!job->job.user_paused) {
589 job_pause_locked(&job->job);
590 /*
591 * make the pause user visible, which will be
592 * resumed from QMP.
593 */
594 job->job.user_paused = true;
595 }
596 block_job_iostatus_set_err_locked(job, error);
597 }
598 }
599 return action;
600 }
601
602 AioContext *block_job_get_aio_context(BlockJob *job)
603 {
604 GLOBAL_STATE_CODE();
605 return job->job.aio_context;
606 }