]> git.proxmox.com Git - mirror_qemu.git/blob - blockdev.c
spapr_pci: Add and export DMA resetting helper
[mirror_qemu.git] / blockdev.c
1 /*
2 * QEMU host block devices
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or
7 * later. See the COPYING file in the top-level directory.
8 *
9 * This file incorporates work covered by the following copyright and
10 * permission notice:
11 *
12 * Copyright (c) 2003-2008 Fabrice Bellard
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this software and associated documentation files (the "Software"), to deal
16 * in the Software without restriction, including without limitation the rights
17 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18 * copies of the Software, and to permit persons to whom the Software is
19 * furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 * THE SOFTWARE.
31 */
32
33 #include "qemu/osdep.h"
34 #include "sysemu/block-backend.h"
35 #include "sysemu/blockdev.h"
36 #include "hw/block/block.h"
37 #include "block/blockjob.h"
38 #include "block/throttle-groups.h"
39 #include "monitor/monitor.h"
40 #include "qemu/error-report.h"
41 #include "qemu/option.h"
42 #include "qemu/config-file.h"
43 #include "qapi/qmp/types.h"
44 #include "qapi-visit.h"
45 #include "qapi/qmp/qerror.h"
46 #include "qapi/qmp-output-visitor.h"
47 #include "qapi/util.h"
48 #include "sysemu/sysemu.h"
49 #include "block/block_int.h"
50 #include "qmp-commands.h"
51 #include "trace.h"
52 #include "sysemu/arch_init.h"
53 #include "qemu/cutils.h"
54 #include "qemu/help_option.h"
55
56 static QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
57 QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
58
59 static const char *const if_name[IF_COUNT] = {
60 [IF_NONE] = "none",
61 [IF_IDE] = "ide",
62 [IF_SCSI] = "scsi",
63 [IF_FLOPPY] = "floppy",
64 [IF_PFLASH] = "pflash",
65 [IF_MTD] = "mtd",
66 [IF_SD] = "sd",
67 [IF_VIRTIO] = "virtio",
68 [IF_XEN] = "xen",
69 };
70
71 static int if_max_devs[IF_COUNT] = {
72 /*
73 * Do not change these numbers! They govern how drive option
74 * index maps to unit and bus. That mapping is ABI.
75 *
76 * All controllers used to implement if=T drives need to support
77 * if_max_devs[T] units, for any T with if_max_devs[T] != 0.
78 * Otherwise, some index values map to "impossible" bus, unit
79 * values.
80 *
81 * For instance, if you change [IF_SCSI] to 255, -drive
82 * if=scsi,index=12 no longer means bus=1,unit=5, but
83 * bus=0,unit=12. With an lsi53c895a controller (7 units max),
84 * the drive can't be set up. Regression.
85 */
86 [IF_IDE] = 2,
87 [IF_SCSI] = 7,
88 };
89
90 /**
91 * Boards may call this to offer board-by-board overrides
92 * of the default, global values.
93 */
94 void override_max_devs(BlockInterfaceType type, int max_devs)
95 {
96 BlockBackend *blk;
97 DriveInfo *dinfo;
98
99 if (max_devs <= 0) {
100 return;
101 }
102
103 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
104 dinfo = blk_legacy_dinfo(blk);
105 if (dinfo->type == type) {
106 fprintf(stderr, "Cannot override units-per-bus property of"
107 " the %s interface, because a drive of that type has"
108 " already been added.\n", if_name[type]);
109 g_assert_not_reached();
110 }
111 }
112
113 if_max_devs[type] = max_devs;
114 }
115
116 /*
117 * We automatically delete the drive when a device using it gets
118 * unplugged. Questionable feature, but we can't just drop it.
119 * Device models call blockdev_mark_auto_del() to schedule the
120 * automatic deletion, and generic qdev code calls blockdev_auto_del()
121 * when deletion is actually safe.
122 */
123 void blockdev_mark_auto_del(BlockBackend *blk)
124 {
125 DriveInfo *dinfo = blk_legacy_dinfo(blk);
126 BlockDriverState *bs = blk_bs(blk);
127 AioContext *aio_context;
128
129 if (!dinfo) {
130 return;
131 }
132
133 if (bs) {
134 aio_context = bdrv_get_aio_context(bs);
135 aio_context_acquire(aio_context);
136
137 if (bs->job) {
138 block_job_cancel(bs->job);
139 }
140
141 aio_context_release(aio_context);
142 }
143
144 dinfo->auto_del = 1;
145 }
146
147 void blockdev_auto_del(BlockBackend *blk)
148 {
149 DriveInfo *dinfo = blk_legacy_dinfo(blk);
150
151 if (dinfo && dinfo->auto_del) {
152 monitor_remove_blk(blk);
153 blk_unref(blk);
154 }
155 }
156
157 /**
158 * Returns the current mapping of how many units per bus
159 * a particular interface can support.
160 *
161 * A positive integer indicates n units per bus.
162 * 0 implies the mapping has not been established.
163 * -1 indicates an invalid BlockInterfaceType was given.
164 */
165 int drive_get_max_devs(BlockInterfaceType type)
166 {
167 if (type >= IF_IDE && type < IF_COUNT) {
168 return if_max_devs[type];
169 }
170
171 return -1;
172 }
173
174 static int drive_index_to_bus_id(BlockInterfaceType type, int index)
175 {
176 int max_devs = if_max_devs[type];
177 return max_devs ? index / max_devs : 0;
178 }
179
180 static int drive_index_to_unit_id(BlockInterfaceType type, int index)
181 {
182 int max_devs = if_max_devs[type];
183 return max_devs ? index % max_devs : index;
184 }
185
186 QemuOpts *drive_def(const char *optstr)
187 {
188 return qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false);
189 }
190
191 QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
192 const char *optstr)
193 {
194 QemuOpts *opts;
195
196 opts = drive_def(optstr);
197 if (!opts) {
198 return NULL;
199 }
200 if (type != IF_DEFAULT) {
201 qemu_opt_set(opts, "if", if_name[type], &error_abort);
202 }
203 if (index >= 0) {
204 qemu_opt_set_number(opts, "index", index, &error_abort);
205 }
206 if (file)
207 qemu_opt_set(opts, "file", file, &error_abort);
208 return opts;
209 }
210
211 DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit)
212 {
213 BlockBackend *blk;
214 DriveInfo *dinfo;
215
216 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
217 dinfo = blk_legacy_dinfo(blk);
218 if (dinfo && dinfo->type == type
219 && dinfo->bus == bus && dinfo->unit == unit) {
220 return dinfo;
221 }
222 }
223
224 return NULL;
225 }
226
227 bool drive_check_orphaned(void)
228 {
229 BlockBackend *blk;
230 DriveInfo *dinfo;
231 bool rs = false;
232
233 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
234 dinfo = blk_legacy_dinfo(blk);
235 /* If dinfo->bdrv->dev is NULL, it has no device attached. */
236 /* Unless this is a default drive, this may be an oversight. */
237 if (!blk_get_attached_dev(blk) && !dinfo->is_default &&
238 dinfo->type != IF_NONE) {
239 fprintf(stderr, "Warning: Orphaned drive without device: "
240 "id=%s,file=%s,if=%s,bus=%d,unit=%d\n",
241 blk_name(blk), blk_bs(blk) ? blk_bs(blk)->filename : "",
242 if_name[dinfo->type], dinfo->bus, dinfo->unit);
243 rs = true;
244 }
245 }
246
247 return rs;
248 }
249
250 DriveInfo *drive_get_by_index(BlockInterfaceType type, int index)
251 {
252 return drive_get(type,
253 drive_index_to_bus_id(type, index),
254 drive_index_to_unit_id(type, index));
255 }
256
257 int drive_get_max_bus(BlockInterfaceType type)
258 {
259 int max_bus;
260 BlockBackend *blk;
261 DriveInfo *dinfo;
262
263 max_bus = -1;
264 for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
265 dinfo = blk_legacy_dinfo(blk);
266 if (dinfo && dinfo->type == type && dinfo->bus > max_bus) {
267 max_bus = dinfo->bus;
268 }
269 }
270 return max_bus;
271 }
272
273 /* Get a block device. This should only be used for single-drive devices
274 (e.g. SD/Floppy/MTD). Multi-disk devices (scsi/ide) should use the
275 appropriate bus. */
276 DriveInfo *drive_get_next(BlockInterfaceType type)
277 {
278 static int next_block_unit[IF_COUNT];
279
280 return drive_get(type, 0, next_block_unit[type]++);
281 }
282
283 static void bdrv_format_print(void *opaque, const char *name)
284 {
285 error_printf(" %s", name);
286 }
287
288 typedef struct {
289 QEMUBH *bh;
290 BlockDriverState *bs;
291 } BDRVPutRefBH;
292
293 static int parse_block_error_action(const char *buf, bool is_read, Error **errp)
294 {
295 if (!strcmp(buf, "ignore")) {
296 return BLOCKDEV_ON_ERROR_IGNORE;
297 } else if (!is_read && !strcmp(buf, "enospc")) {
298 return BLOCKDEV_ON_ERROR_ENOSPC;
299 } else if (!strcmp(buf, "stop")) {
300 return BLOCKDEV_ON_ERROR_STOP;
301 } else if (!strcmp(buf, "report")) {
302 return BLOCKDEV_ON_ERROR_REPORT;
303 } else {
304 error_setg(errp, "'%s' invalid %s error action",
305 buf, is_read ? "read" : "write");
306 return -1;
307 }
308 }
309
310 static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals,
311 Error **errp)
312 {
313 const QListEntry *entry;
314 for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) {
315 switch (qobject_type(entry->value)) {
316
317 case QTYPE_QSTRING: {
318 unsigned long long length;
319 const char *str = qstring_get_str(qobject_to_qstring(entry->value));
320 if (parse_uint_full(str, &length, 10) == 0 &&
321 length > 0 && length <= UINT_MAX) {
322 block_acct_add_interval(stats, (unsigned) length);
323 } else {
324 error_setg(errp, "Invalid interval length: %s", str);
325 return false;
326 }
327 break;
328 }
329
330 case QTYPE_QINT: {
331 int64_t length = qint_get_int(qobject_to_qint(entry->value));
332 if (length > 0 && length <= UINT_MAX) {
333 block_acct_add_interval(stats, (unsigned) length);
334 } else {
335 error_setg(errp, "Invalid interval length: %" PRId64, length);
336 return false;
337 }
338 break;
339 }
340
341 default:
342 error_setg(errp, "The specification of stats-intervals is invalid");
343 return false;
344 }
345 }
346 return true;
347 }
348
349 typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType;
350
351 /* All parameters but @opts are optional and may be set to NULL. */
352 static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags,
353 const char **throttling_group, ThrottleConfig *throttle_cfg,
354 BlockdevDetectZeroesOptions *detect_zeroes, Error **errp)
355 {
356 const char *discard;
357 Error *local_error = NULL;
358 const char *aio;
359
360 if (bdrv_flags) {
361 if (!qemu_opt_get_bool(opts, "read-only", false)) {
362 *bdrv_flags |= BDRV_O_RDWR;
363 }
364 if (qemu_opt_get_bool(opts, "copy-on-read", false)) {
365 *bdrv_flags |= BDRV_O_COPY_ON_READ;
366 }
367
368 if ((discard = qemu_opt_get(opts, "discard")) != NULL) {
369 if (bdrv_parse_discard_flags(discard, bdrv_flags) != 0) {
370 error_setg(errp, "Invalid discard option");
371 return;
372 }
373 }
374
375 if ((aio = qemu_opt_get(opts, "aio")) != NULL) {
376 if (!strcmp(aio, "native")) {
377 *bdrv_flags |= BDRV_O_NATIVE_AIO;
378 } else if (!strcmp(aio, "threads")) {
379 /* this is the default */
380 } else {
381 error_setg(errp, "invalid aio option");
382 return;
383 }
384 }
385 }
386
387 /* disk I/O throttling */
388 if (throttling_group) {
389 *throttling_group = qemu_opt_get(opts, "throttling.group");
390 }
391
392 if (throttle_cfg) {
393 throttle_config_init(throttle_cfg);
394 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg =
395 qemu_opt_get_number(opts, "throttling.bps-total", 0);
396 throttle_cfg->buckets[THROTTLE_BPS_READ].avg =
397 qemu_opt_get_number(opts, "throttling.bps-read", 0);
398 throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg =
399 qemu_opt_get_number(opts, "throttling.bps-write", 0);
400 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg =
401 qemu_opt_get_number(opts, "throttling.iops-total", 0);
402 throttle_cfg->buckets[THROTTLE_OPS_READ].avg =
403 qemu_opt_get_number(opts, "throttling.iops-read", 0);
404 throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg =
405 qemu_opt_get_number(opts, "throttling.iops-write", 0);
406
407 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max =
408 qemu_opt_get_number(opts, "throttling.bps-total-max", 0);
409 throttle_cfg->buckets[THROTTLE_BPS_READ].max =
410 qemu_opt_get_number(opts, "throttling.bps-read-max", 0);
411 throttle_cfg->buckets[THROTTLE_BPS_WRITE].max =
412 qemu_opt_get_number(opts, "throttling.bps-write-max", 0);
413 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max =
414 qemu_opt_get_number(opts, "throttling.iops-total-max", 0);
415 throttle_cfg->buckets[THROTTLE_OPS_READ].max =
416 qemu_opt_get_number(opts, "throttling.iops-read-max", 0);
417 throttle_cfg->buckets[THROTTLE_OPS_WRITE].max =
418 qemu_opt_get_number(opts, "throttling.iops-write-max", 0);
419
420 throttle_cfg->buckets[THROTTLE_BPS_TOTAL].burst_length =
421 qemu_opt_get_number(opts, "throttling.bps-total-max-length", 1);
422 throttle_cfg->buckets[THROTTLE_BPS_READ].burst_length =
423 qemu_opt_get_number(opts, "throttling.bps-read-max-length", 1);
424 throttle_cfg->buckets[THROTTLE_BPS_WRITE].burst_length =
425 qemu_opt_get_number(opts, "throttling.bps-write-max-length", 1);
426 throttle_cfg->buckets[THROTTLE_OPS_TOTAL].burst_length =
427 qemu_opt_get_number(opts, "throttling.iops-total-max-length", 1);
428 throttle_cfg->buckets[THROTTLE_OPS_READ].burst_length =
429 qemu_opt_get_number(opts, "throttling.iops-read-max-length", 1);
430 throttle_cfg->buckets[THROTTLE_OPS_WRITE].burst_length =
431 qemu_opt_get_number(opts, "throttling.iops-write-max-length", 1);
432
433 throttle_cfg->op_size =
434 qemu_opt_get_number(opts, "throttling.iops-size", 0);
435
436 if (!throttle_is_valid(throttle_cfg, errp)) {
437 return;
438 }
439 }
440
441 if (detect_zeroes) {
442 *detect_zeroes =
443 qapi_enum_parse(BlockdevDetectZeroesOptions_lookup,
444 qemu_opt_get(opts, "detect-zeroes"),
445 BLOCKDEV_DETECT_ZEROES_OPTIONS__MAX,
446 BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
447 &local_error);
448 if (local_error) {
449 error_propagate(errp, local_error);
450 return;
451 }
452
453 if (bdrv_flags &&
454 *detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP &&
455 !(*bdrv_flags & BDRV_O_UNMAP))
456 {
457 error_setg(errp, "setting detect-zeroes to unmap is not allowed "
458 "without setting discard operation to unmap");
459 return;
460 }
461 }
462 }
463
464 /* Takes the ownership of bs_opts */
465 static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
466 Error **errp)
467 {
468 const char *buf;
469 int bdrv_flags = 0;
470 int on_read_error, on_write_error;
471 bool account_invalid, account_failed;
472 bool writethrough;
473 BlockBackend *blk;
474 BlockDriverState *bs;
475 ThrottleConfig cfg;
476 int snapshot = 0;
477 Error *error = NULL;
478 QemuOpts *opts;
479 QDict *interval_dict = NULL;
480 QList *interval_list = NULL;
481 const char *id;
482 BlockdevDetectZeroesOptions detect_zeroes =
483 BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
484 const char *throttling_group = NULL;
485
486 /* Check common options by copying from bs_opts to opts, all other options
487 * stay in bs_opts for processing by bdrv_open(). */
488 id = qdict_get_try_str(bs_opts, "id");
489 opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error);
490 if (error) {
491 error_propagate(errp, error);
492 goto err_no_opts;
493 }
494
495 qemu_opts_absorb_qdict(opts, bs_opts, &error);
496 if (error) {
497 error_propagate(errp, error);
498 goto early_err;
499 }
500
501 if (id) {
502 qdict_del(bs_opts, "id");
503 }
504
505 /* extract parameters */
506 snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
507
508 account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true);
509 account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true);
510
511 writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true);
512
513 qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals.");
514 qdict_array_split(interval_dict, &interval_list);
515
516 if (qdict_size(interval_dict) != 0) {
517 error_setg(errp, "Invalid option stats-intervals.%s",
518 qdict_first(interval_dict)->key);
519 goto early_err;
520 }
521
522 extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg,
523 &detect_zeroes, &error);
524 if (error) {
525 error_propagate(errp, error);
526 goto early_err;
527 }
528
529 if ((buf = qemu_opt_get(opts, "format")) != NULL) {
530 if (is_help_option(buf)) {
531 error_printf("Supported formats:");
532 bdrv_iterate_format(bdrv_format_print, NULL);
533 error_printf("\n");
534 goto early_err;
535 }
536
537 if (qdict_haskey(bs_opts, "driver")) {
538 error_setg(errp, "Cannot specify both 'driver' and 'format'");
539 goto early_err;
540 }
541 qdict_put(bs_opts, "driver", qstring_from_str(buf));
542 }
543
544 on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
545 if ((buf = qemu_opt_get(opts, "werror")) != NULL) {
546 on_write_error = parse_block_error_action(buf, 0, &error);
547 if (error) {
548 error_propagate(errp, error);
549 goto early_err;
550 }
551 }
552
553 on_read_error = BLOCKDEV_ON_ERROR_REPORT;
554 if ((buf = qemu_opt_get(opts, "rerror")) != NULL) {
555 on_read_error = parse_block_error_action(buf, 1, &error);
556 if (error) {
557 error_propagate(errp, error);
558 goto early_err;
559 }
560 }
561
562 if (snapshot) {
563 bdrv_flags |= BDRV_O_SNAPSHOT;
564 }
565
566 /* init */
567 if ((!file || !*file) && !qdict_size(bs_opts)) {
568 BlockBackendRootState *blk_rs;
569
570 blk = blk_new();
571 blk_rs = blk_get_root_state(blk);
572 blk_rs->open_flags = bdrv_flags;
573 blk_rs->read_only = !(bdrv_flags & BDRV_O_RDWR);
574 blk_rs->detect_zeroes = detect_zeroes;
575
576 QDECREF(bs_opts);
577 } else {
578 if (file && !*file) {
579 file = NULL;
580 }
581
582 /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
583 * with other callers) rather than what we want as the real defaults.
584 * Apply the defaults here instead. */
585 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
586 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
587 assert((bdrv_flags & BDRV_O_CACHE_MASK) == 0);
588
589 if (runstate_check(RUN_STATE_INMIGRATE)) {
590 bdrv_flags |= BDRV_O_INACTIVE;
591 }
592
593 blk = blk_new_open(file, NULL, bs_opts, bdrv_flags, errp);
594 if (!blk) {
595 goto err_no_bs_opts;
596 }
597 bs = blk_bs(blk);
598
599 bs->detect_zeroes = detect_zeroes;
600
601 if (bdrv_key_required(bs)) {
602 autostart = 0;
603 }
604
605 block_acct_init(blk_get_stats(blk), account_invalid, account_failed);
606
607 if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) {
608 blk_unref(blk);
609 blk = NULL;
610 goto err_no_bs_opts;
611 }
612 }
613
614 /* disk I/O throttling */
615 if (throttle_enabled(&cfg)) {
616 if (!throttling_group) {
617 throttling_group = blk_name(blk);
618 }
619 blk_io_limits_enable(blk, throttling_group);
620 blk_set_io_limits(blk, &cfg);
621 }
622
623 blk_set_enable_write_cache(blk, !writethrough);
624 blk_set_on_error(blk, on_read_error, on_write_error);
625
626 if (!monitor_add_blk(blk, qemu_opts_id(opts), errp)) {
627 blk_unref(blk);
628 blk = NULL;
629 goto err_no_bs_opts;
630 }
631
632 err_no_bs_opts:
633 qemu_opts_del(opts);
634 QDECREF(interval_dict);
635 QDECREF(interval_list);
636 return blk;
637
638 early_err:
639 qemu_opts_del(opts);
640 QDECREF(interval_dict);
641 QDECREF(interval_list);
642 err_no_opts:
643 QDECREF(bs_opts);
644 return NULL;
645 }
646
647 static QemuOptsList qemu_root_bds_opts;
648
649 /* Takes the ownership of bs_opts */
650 static BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp)
651 {
652 BlockDriverState *bs;
653 QemuOpts *opts;
654 Error *local_error = NULL;
655 BlockdevDetectZeroesOptions detect_zeroes;
656 int bdrv_flags = 0;
657
658 opts = qemu_opts_create(&qemu_root_bds_opts, NULL, 1, errp);
659 if (!opts) {
660 goto fail;
661 }
662
663 qemu_opts_absorb_qdict(opts, bs_opts, &local_error);
664 if (local_error) {
665 error_propagate(errp, local_error);
666 goto fail;
667 }
668
669 extract_common_blockdev_options(opts, &bdrv_flags, NULL, NULL,
670 &detect_zeroes, &local_error);
671 if (local_error) {
672 error_propagate(errp, local_error);
673 goto fail;
674 }
675
676 /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
677 * with other callers) rather than what we want as the real defaults.
678 * Apply the defaults here instead. */
679 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
680 qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
681
682 if (runstate_check(RUN_STATE_INMIGRATE)) {
683 bdrv_flags |= BDRV_O_INACTIVE;
684 }
685
686 bs = bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp);
687 if (!bs) {
688 goto fail_no_bs_opts;
689 }
690
691 bs->detect_zeroes = detect_zeroes;
692
693 fail_no_bs_opts:
694 qemu_opts_del(opts);
695 return bs;
696
697 fail:
698 qemu_opts_del(opts);
699 QDECREF(bs_opts);
700 return NULL;
701 }
702
703 void blockdev_close_all_bdrv_states(void)
704 {
705 BlockDriverState *bs, *next_bs;
706
707 QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) {
708 AioContext *ctx = bdrv_get_aio_context(bs);
709
710 aio_context_acquire(ctx);
711 bdrv_unref(bs);
712 aio_context_release(ctx);
713 }
714 }
715
716 /* Iterates over the list of monitor-owned BlockDriverStates */
717 BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs)
718 {
719 return bs ? QTAILQ_NEXT(bs, monitor_list)
720 : QTAILQ_FIRST(&monitor_bdrv_states);
721 }
722
723 static void qemu_opt_rename(QemuOpts *opts, const char *from, const char *to,
724 Error **errp)
725 {
726 const char *value;
727
728 value = qemu_opt_get(opts, from);
729 if (value) {
730 if (qemu_opt_find(opts, to)) {
731 error_setg(errp, "'%s' and its alias '%s' can't be used at the "
732 "same time", to, from);
733 return;
734 }
735 }
736
737 /* rename all items in opts */
738 while ((value = qemu_opt_get(opts, from))) {
739 qemu_opt_set(opts, to, value, &error_abort);
740 qemu_opt_unset(opts, from);
741 }
742 }
743
744 QemuOptsList qemu_legacy_drive_opts = {
745 .name = "drive",
746 .head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head),
747 .desc = {
748 {
749 .name = "bus",
750 .type = QEMU_OPT_NUMBER,
751 .help = "bus number",
752 },{
753 .name = "unit",
754 .type = QEMU_OPT_NUMBER,
755 .help = "unit number (i.e. lun for scsi)",
756 },{
757 .name = "index",
758 .type = QEMU_OPT_NUMBER,
759 .help = "index number",
760 },{
761 .name = "media",
762 .type = QEMU_OPT_STRING,
763 .help = "media type (disk, cdrom)",
764 },{
765 .name = "if",
766 .type = QEMU_OPT_STRING,
767 .help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)",
768 },{
769 .name = "cyls",
770 .type = QEMU_OPT_NUMBER,
771 .help = "number of cylinders (ide disk geometry)",
772 },{
773 .name = "heads",
774 .type = QEMU_OPT_NUMBER,
775 .help = "number of heads (ide disk geometry)",
776 },{
777 .name = "secs",
778 .type = QEMU_OPT_NUMBER,
779 .help = "number of sectors (ide disk geometry)",
780 },{
781 .name = "trans",
782 .type = QEMU_OPT_STRING,
783 .help = "chs translation (auto, lba, none)",
784 },{
785 .name = "boot",
786 .type = QEMU_OPT_BOOL,
787 .help = "(deprecated, ignored)",
788 },{
789 .name = "addr",
790 .type = QEMU_OPT_STRING,
791 .help = "pci address (virtio only)",
792 },{
793 .name = "serial",
794 .type = QEMU_OPT_STRING,
795 .help = "disk serial number",
796 },{
797 .name = "file",
798 .type = QEMU_OPT_STRING,
799 .help = "file name",
800 },
801
802 /* Options that are passed on, but have special semantics with -drive */
803 {
804 .name = "read-only",
805 .type = QEMU_OPT_BOOL,
806 .help = "open drive file as read-only",
807 },{
808 .name = "rerror",
809 .type = QEMU_OPT_STRING,
810 .help = "read error action",
811 },{
812 .name = "werror",
813 .type = QEMU_OPT_STRING,
814 .help = "write error action",
815 },{
816 .name = "copy-on-read",
817 .type = QEMU_OPT_BOOL,
818 .help = "copy read data from backing file into image file",
819 },
820
821 { /* end of list */ }
822 },
823 };
824
825 DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type)
826 {
827 const char *value;
828 BlockBackend *blk;
829 DriveInfo *dinfo = NULL;
830 QDict *bs_opts;
831 QemuOpts *legacy_opts;
832 DriveMediaType media = MEDIA_DISK;
833 BlockInterfaceType type;
834 int cyls, heads, secs, translation;
835 int max_devs, bus_id, unit_id, index;
836 const char *devaddr;
837 const char *werror, *rerror;
838 bool read_only = false;
839 bool copy_on_read;
840 const char *serial;
841 const char *filename;
842 Error *local_err = NULL;
843 int i;
844
845 /* Change legacy command line options into QMP ones */
846 static const struct {
847 const char *from;
848 const char *to;
849 } opt_renames[] = {
850 { "iops", "throttling.iops-total" },
851 { "iops_rd", "throttling.iops-read" },
852 { "iops_wr", "throttling.iops-write" },
853
854 { "bps", "throttling.bps-total" },
855 { "bps_rd", "throttling.bps-read" },
856 { "bps_wr", "throttling.bps-write" },
857
858 { "iops_max", "throttling.iops-total-max" },
859 { "iops_rd_max", "throttling.iops-read-max" },
860 { "iops_wr_max", "throttling.iops-write-max" },
861
862 { "bps_max", "throttling.bps-total-max" },
863 { "bps_rd_max", "throttling.bps-read-max" },
864 { "bps_wr_max", "throttling.bps-write-max" },
865
866 { "iops_size", "throttling.iops-size" },
867
868 { "group", "throttling.group" },
869
870 { "readonly", "read-only" },
871 };
872
873 for (i = 0; i < ARRAY_SIZE(opt_renames); i++) {
874 qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to,
875 &local_err);
876 if (local_err) {
877 error_report_err(local_err);
878 return NULL;
879 }
880 }
881
882 value = qemu_opt_get(all_opts, "cache");
883 if (value) {
884 int flags = 0;
885 bool writethrough;
886
887 if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) {
888 error_report("invalid cache option");
889 return NULL;
890 }
891
892 /* Specific options take precedence */
893 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) {
894 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB,
895 !writethrough, &error_abort);
896 }
897 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) {
898 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT,
899 !!(flags & BDRV_O_NOCACHE), &error_abort);
900 }
901 if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) {
902 qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH,
903 !!(flags & BDRV_O_NO_FLUSH), &error_abort);
904 }
905 qemu_opt_unset(all_opts, "cache");
906 }
907
908 /* Get a QDict for processing the options */
909 bs_opts = qdict_new();
910 qemu_opts_to_qdict(all_opts, bs_opts);
911
912 legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0,
913 &error_abort);
914 qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err);
915 if (local_err) {
916 error_report_err(local_err);
917 goto fail;
918 }
919
920 /* Deprecated option boot=[on|off] */
921 if (qemu_opt_get(legacy_opts, "boot") != NULL) {
922 fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be "
923 "ignored. Future versions will reject this parameter. Please "
924 "update your scripts.\n");
925 }
926
927 /* Media type */
928 value = qemu_opt_get(legacy_opts, "media");
929 if (value) {
930 if (!strcmp(value, "disk")) {
931 media = MEDIA_DISK;
932 } else if (!strcmp(value, "cdrom")) {
933 media = MEDIA_CDROM;
934 read_only = true;
935 } else {
936 error_report("'%s' invalid media", value);
937 goto fail;
938 }
939 }
940
941 /* copy-on-read is disabled with a warning for read-only devices */
942 read_only |= qemu_opt_get_bool(legacy_opts, "read-only", false);
943 copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false);
944
945 if (read_only && copy_on_read) {
946 error_report("warning: disabling copy-on-read on read-only drive");
947 copy_on_read = false;
948 }
949
950 qdict_put(bs_opts, "read-only",
951 qstring_from_str(read_only ? "on" : "off"));
952 qdict_put(bs_opts, "copy-on-read",
953 qstring_from_str(copy_on_read ? "on" :"off"));
954
955 /* Controller type */
956 value = qemu_opt_get(legacy_opts, "if");
957 if (value) {
958 for (type = 0;
959 type < IF_COUNT && strcmp(value, if_name[type]);
960 type++) {
961 }
962 if (type == IF_COUNT) {
963 error_report("unsupported bus type '%s'", value);
964 goto fail;
965 }
966 } else {
967 type = block_default_type;
968 }
969
970 /* Geometry */
971 cyls = qemu_opt_get_number(legacy_opts, "cyls", 0);
972 heads = qemu_opt_get_number(legacy_opts, "heads", 0);
973 secs = qemu_opt_get_number(legacy_opts, "secs", 0);
974
975 if (cyls || heads || secs) {
976 if (cyls < 1) {
977 error_report("invalid physical cyls number");
978 goto fail;
979 }
980 if (heads < 1) {
981 error_report("invalid physical heads number");
982 goto fail;
983 }
984 if (secs < 1) {
985 error_report("invalid physical secs number");
986 goto fail;
987 }
988 }
989
990 translation = BIOS_ATA_TRANSLATION_AUTO;
991 value = qemu_opt_get(legacy_opts, "trans");
992 if (value != NULL) {
993 if (!cyls) {
994 error_report("'%s' trans must be used with cyls, heads and secs",
995 value);
996 goto fail;
997 }
998 if (!strcmp(value, "none")) {
999 translation = BIOS_ATA_TRANSLATION_NONE;
1000 } else if (!strcmp(value, "lba")) {
1001 translation = BIOS_ATA_TRANSLATION_LBA;
1002 } else if (!strcmp(value, "large")) {
1003 translation = BIOS_ATA_TRANSLATION_LARGE;
1004 } else if (!strcmp(value, "rechs")) {
1005 translation = BIOS_ATA_TRANSLATION_RECHS;
1006 } else if (!strcmp(value, "auto")) {
1007 translation = BIOS_ATA_TRANSLATION_AUTO;
1008 } else {
1009 error_report("'%s' invalid translation type", value);
1010 goto fail;
1011 }
1012 }
1013
1014 if (media == MEDIA_CDROM) {
1015 if (cyls || secs || heads) {
1016 error_report("CHS can't be set with media=cdrom");
1017 goto fail;
1018 }
1019 }
1020
1021 /* Device address specified by bus/unit or index.
1022 * If none was specified, try to find the first free one. */
1023 bus_id = qemu_opt_get_number(legacy_opts, "bus", 0);
1024 unit_id = qemu_opt_get_number(legacy_opts, "unit", -1);
1025 index = qemu_opt_get_number(legacy_opts, "index", -1);
1026
1027 max_devs = if_max_devs[type];
1028
1029 if (index != -1) {
1030 if (bus_id != 0 || unit_id != -1) {
1031 error_report("index cannot be used with bus and unit");
1032 goto fail;
1033 }
1034 bus_id = drive_index_to_bus_id(type, index);
1035 unit_id = drive_index_to_unit_id(type, index);
1036 }
1037
1038 if (unit_id == -1) {
1039 unit_id = 0;
1040 while (drive_get(type, bus_id, unit_id) != NULL) {
1041 unit_id++;
1042 if (max_devs && unit_id >= max_devs) {
1043 unit_id -= max_devs;
1044 bus_id++;
1045 }
1046 }
1047 }
1048
1049 if (max_devs && unit_id >= max_devs) {
1050 error_report("unit %d too big (max is %d)", unit_id, max_devs - 1);
1051 goto fail;
1052 }
1053
1054 if (drive_get(type, bus_id, unit_id) != NULL) {
1055 error_report("drive with bus=%d, unit=%d (index=%d) exists",
1056 bus_id, unit_id, index);
1057 goto fail;
1058 }
1059
1060 /* Serial number */
1061 serial = qemu_opt_get(legacy_opts, "serial");
1062
1063 /* no id supplied -> create one */
1064 if (qemu_opts_id(all_opts) == NULL) {
1065 char *new_id;
1066 const char *mediastr = "";
1067 if (type == IF_IDE || type == IF_SCSI) {
1068 mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd";
1069 }
1070 if (max_devs) {
1071 new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id,
1072 mediastr, unit_id);
1073 } else {
1074 new_id = g_strdup_printf("%s%s%i", if_name[type],
1075 mediastr, unit_id);
1076 }
1077 qdict_put(bs_opts, "id", qstring_from_str(new_id));
1078 g_free(new_id);
1079 }
1080
1081 /* Add virtio block device */
1082 devaddr = qemu_opt_get(legacy_opts, "addr");
1083 if (devaddr && type != IF_VIRTIO) {
1084 error_report("addr is not supported by this bus type");
1085 goto fail;
1086 }
1087
1088 if (type == IF_VIRTIO) {
1089 QemuOpts *devopts;
1090 devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
1091 &error_abort);
1092 if (arch_type == QEMU_ARCH_S390X) {
1093 qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort);
1094 } else {
1095 qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort);
1096 }
1097 qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
1098 &error_abort);
1099 if (devaddr) {
1100 qemu_opt_set(devopts, "addr", devaddr, &error_abort);
1101 }
1102 }
1103
1104 filename = qemu_opt_get(legacy_opts, "file");
1105
1106 /* Check werror/rerror compatibility with if=... */
1107 werror = qemu_opt_get(legacy_opts, "werror");
1108 if (werror != NULL) {
1109 if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO &&
1110 type != IF_NONE) {
1111 error_report("werror is not supported by this bus type");
1112 goto fail;
1113 }
1114 qdict_put(bs_opts, "werror", qstring_from_str(werror));
1115 }
1116
1117 rerror = qemu_opt_get(legacy_opts, "rerror");
1118 if (rerror != NULL) {
1119 if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI &&
1120 type != IF_NONE) {
1121 error_report("rerror is not supported by this bus type");
1122 goto fail;
1123 }
1124 qdict_put(bs_opts, "rerror", qstring_from_str(rerror));
1125 }
1126
1127 /* Actual block device init: Functionality shared with blockdev-add */
1128 blk = blockdev_init(filename, bs_opts, &local_err);
1129 bs_opts = NULL;
1130 if (!blk) {
1131 if (local_err) {
1132 error_report_err(local_err);
1133 }
1134 goto fail;
1135 } else {
1136 assert(!local_err);
1137 }
1138
1139 /* Create legacy DriveInfo */
1140 dinfo = g_malloc0(sizeof(*dinfo));
1141 dinfo->opts = all_opts;
1142
1143 dinfo->cyls = cyls;
1144 dinfo->heads = heads;
1145 dinfo->secs = secs;
1146 dinfo->trans = translation;
1147
1148 dinfo->type = type;
1149 dinfo->bus = bus_id;
1150 dinfo->unit = unit_id;
1151 dinfo->devaddr = devaddr;
1152 dinfo->serial = g_strdup(serial);
1153
1154 blk_set_legacy_dinfo(blk, dinfo);
1155
1156 switch(type) {
1157 case IF_IDE:
1158 case IF_SCSI:
1159 case IF_XEN:
1160 case IF_NONE:
1161 dinfo->media_cd = media == MEDIA_CDROM;
1162 break;
1163 default:
1164 break;
1165 }
1166
1167 fail:
1168 qemu_opts_del(legacy_opts);
1169 QDECREF(bs_opts);
1170 return dinfo;
1171 }
1172
1173 void hmp_commit(Monitor *mon, const QDict *qdict)
1174 {
1175 const char *device = qdict_get_str(qdict, "device");
1176 BlockBackend *blk;
1177 int ret;
1178
1179 if (!strcmp(device, "all")) {
1180 ret = blk_commit_all();
1181 } else {
1182 BlockDriverState *bs;
1183 AioContext *aio_context;
1184
1185 blk = blk_by_name(device);
1186 if (!blk) {
1187 monitor_printf(mon, "Device '%s' not found\n", device);
1188 return;
1189 }
1190 if (!blk_is_available(blk)) {
1191 monitor_printf(mon, "Device '%s' has no medium\n", device);
1192 return;
1193 }
1194
1195 bs = blk_bs(blk);
1196 aio_context = bdrv_get_aio_context(bs);
1197 aio_context_acquire(aio_context);
1198
1199 ret = bdrv_commit(bs);
1200
1201 aio_context_release(aio_context);
1202 }
1203 if (ret < 0) {
1204 monitor_printf(mon, "'commit' error for '%s': %s\n", device,
1205 strerror(-ret));
1206 }
1207 }
1208
1209 static void blockdev_do_action(TransactionAction *action, Error **errp)
1210 {
1211 TransactionActionList list;
1212
1213 list.value = action;
1214 list.next = NULL;
1215 qmp_transaction(&list, false, NULL, errp);
1216 }
1217
1218 void qmp_blockdev_snapshot_sync(bool has_device, const char *device,
1219 bool has_node_name, const char *node_name,
1220 const char *snapshot_file,
1221 bool has_snapshot_node_name,
1222 const char *snapshot_node_name,
1223 bool has_format, const char *format,
1224 bool has_mode, NewImageMode mode, Error **errp)
1225 {
1226 BlockdevSnapshotSync snapshot = {
1227 .has_device = has_device,
1228 .device = (char *) device,
1229 .has_node_name = has_node_name,
1230 .node_name = (char *) node_name,
1231 .snapshot_file = (char *) snapshot_file,
1232 .has_snapshot_node_name = has_snapshot_node_name,
1233 .snapshot_node_name = (char *) snapshot_node_name,
1234 .has_format = has_format,
1235 .format = (char *) format,
1236 .has_mode = has_mode,
1237 .mode = mode,
1238 };
1239 TransactionAction action = {
1240 .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC,
1241 .u.blockdev_snapshot_sync.data = &snapshot,
1242 };
1243 blockdev_do_action(&action, errp);
1244 }
1245
1246 void qmp_blockdev_snapshot(const char *node, const char *overlay,
1247 Error **errp)
1248 {
1249 BlockdevSnapshot snapshot_data = {
1250 .node = (char *) node,
1251 .overlay = (char *) overlay
1252 };
1253 TransactionAction action = {
1254 .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT,
1255 .u.blockdev_snapshot.data = &snapshot_data,
1256 };
1257 blockdev_do_action(&action, errp);
1258 }
1259
1260 void qmp_blockdev_snapshot_internal_sync(const char *device,
1261 const char *name,
1262 Error **errp)
1263 {
1264 BlockdevSnapshotInternal snapshot = {
1265 .device = (char *) device,
1266 .name = (char *) name
1267 };
1268 TransactionAction action = {
1269 .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC,
1270 .u.blockdev_snapshot_internal_sync.data = &snapshot,
1271 };
1272 blockdev_do_action(&action, errp);
1273 }
1274
1275 SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
1276 bool has_id,
1277 const char *id,
1278 bool has_name,
1279 const char *name,
1280 Error **errp)
1281 {
1282 BlockDriverState *bs;
1283 BlockBackend *blk;
1284 AioContext *aio_context;
1285 QEMUSnapshotInfo sn;
1286 Error *local_err = NULL;
1287 SnapshotInfo *info = NULL;
1288 int ret;
1289
1290 blk = blk_by_name(device);
1291 if (!blk) {
1292 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1293 "Device '%s' not found", device);
1294 return NULL;
1295 }
1296
1297 aio_context = blk_get_aio_context(blk);
1298 aio_context_acquire(aio_context);
1299
1300 if (!has_id) {
1301 id = NULL;
1302 }
1303
1304 if (!has_name) {
1305 name = NULL;
1306 }
1307
1308 if (!id && !name) {
1309 error_setg(errp, "Name or id must be provided");
1310 goto out_aio_context;
1311 }
1312
1313 if (!blk_is_available(blk)) {
1314 error_setg(errp, "Device '%s' has no medium", device);
1315 goto out_aio_context;
1316 }
1317 bs = blk_bs(blk);
1318
1319 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) {
1320 goto out_aio_context;
1321 }
1322
1323 ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err);
1324 if (local_err) {
1325 error_propagate(errp, local_err);
1326 goto out_aio_context;
1327 }
1328 if (!ret) {
1329 error_setg(errp,
1330 "Snapshot with id '%s' and name '%s' does not exist on "
1331 "device '%s'",
1332 STR_OR_NULL(id), STR_OR_NULL(name), device);
1333 goto out_aio_context;
1334 }
1335
1336 bdrv_snapshot_delete(bs, id, name, &local_err);
1337 if (local_err) {
1338 error_propagate(errp, local_err);
1339 goto out_aio_context;
1340 }
1341
1342 aio_context_release(aio_context);
1343
1344 info = g_new0(SnapshotInfo, 1);
1345 info->id = g_strdup(sn.id_str);
1346 info->name = g_strdup(sn.name);
1347 info->date_nsec = sn.date_nsec;
1348 info->date_sec = sn.date_sec;
1349 info->vm_state_size = sn.vm_state_size;
1350 info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000;
1351 info->vm_clock_sec = sn.vm_clock_nsec / 1000000000;
1352
1353 return info;
1354
1355 out_aio_context:
1356 aio_context_release(aio_context);
1357 return NULL;
1358 }
1359
1360 /**
1361 * block_dirty_bitmap_lookup:
1362 * Return a dirty bitmap (if present), after validating
1363 * the node reference and bitmap names.
1364 *
1365 * @node: The name of the BDS node to search for bitmaps
1366 * @name: The name of the bitmap to search for
1367 * @pbs: Output pointer for BDS lookup, if desired. Can be NULL.
1368 * @paio: Output pointer for aio_context acquisition, if desired. Can be NULL.
1369 * @errp: Output pointer for error information. Can be NULL.
1370 *
1371 * @return: A bitmap object on success, or NULL on failure.
1372 */
1373 static BdrvDirtyBitmap *block_dirty_bitmap_lookup(const char *node,
1374 const char *name,
1375 BlockDriverState **pbs,
1376 AioContext **paio,
1377 Error **errp)
1378 {
1379 BlockDriverState *bs;
1380 BdrvDirtyBitmap *bitmap;
1381 AioContext *aio_context;
1382
1383 if (!node) {
1384 error_setg(errp, "Node cannot be NULL");
1385 return NULL;
1386 }
1387 if (!name) {
1388 error_setg(errp, "Bitmap name cannot be NULL");
1389 return NULL;
1390 }
1391 bs = bdrv_lookup_bs(node, node, NULL);
1392 if (!bs) {
1393 error_setg(errp, "Node '%s' not found", node);
1394 return NULL;
1395 }
1396
1397 aio_context = bdrv_get_aio_context(bs);
1398 aio_context_acquire(aio_context);
1399
1400 bitmap = bdrv_find_dirty_bitmap(bs, name);
1401 if (!bitmap) {
1402 error_setg(errp, "Dirty bitmap '%s' not found", name);
1403 goto fail;
1404 }
1405
1406 if (pbs) {
1407 *pbs = bs;
1408 }
1409 if (paio) {
1410 *paio = aio_context;
1411 } else {
1412 aio_context_release(aio_context);
1413 }
1414
1415 return bitmap;
1416
1417 fail:
1418 aio_context_release(aio_context);
1419 return NULL;
1420 }
1421
1422 /* New and old BlockDriverState structs for atomic group operations */
1423
1424 typedef struct BlkActionState BlkActionState;
1425
1426 /**
1427 * BlkActionOps:
1428 * Table of operations that define an Action.
1429 *
1430 * @instance_size: Size of state struct, in bytes.
1431 * @prepare: Prepare the work, must NOT be NULL.
1432 * @commit: Commit the changes, can be NULL.
1433 * @abort: Abort the changes on fail, can be NULL.
1434 * @clean: Clean up resources after all transaction actions have called
1435 * commit() or abort(). Can be NULL.
1436 *
1437 * Only prepare() may fail. In a single transaction, only one of commit() or
1438 * abort() will be called. clean() will always be called if it is present.
1439 */
1440 typedef struct BlkActionOps {
1441 size_t instance_size;
1442 void (*prepare)(BlkActionState *common, Error **errp);
1443 void (*commit)(BlkActionState *common);
1444 void (*abort)(BlkActionState *common);
1445 void (*clean)(BlkActionState *common);
1446 } BlkActionOps;
1447
1448 /**
1449 * BlkActionState:
1450 * Describes one Action's state within a Transaction.
1451 *
1452 * @action: QAPI-defined enum identifying which Action to perform.
1453 * @ops: Table of ActionOps this Action can perform.
1454 * @block_job_txn: Transaction which this action belongs to.
1455 * @entry: List membership for all Actions in this Transaction.
1456 *
1457 * This structure must be arranged as first member in a subclassed type,
1458 * assuming that the compiler will also arrange it to the same offsets as the
1459 * base class.
1460 */
1461 struct BlkActionState {
1462 TransactionAction *action;
1463 const BlkActionOps *ops;
1464 BlockJobTxn *block_job_txn;
1465 TransactionProperties *txn_props;
1466 QSIMPLEQ_ENTRY(BlkActionState) entry;
1467 };
1468
1469 /* internal snapshot private data */
1470 typedef struct InternalSnapshotState {
1471 BlkActionState common;
1472 BlockDriverState *bs;
1473 AioContext *aio_context;
1474 QEMUSnapshotInfo sn;
1475 bool created;
1476 } InternalSnapshotState;
1477
1478
1479 static int action_check_completion_mode(BlkActionState *s, Error **errp)
1480 {
1481 if (s->txn_props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
1482 error_setg(errp,
1483 "Action '%s' does not support Transaction property "
1484 "completion-mode = %s",
1485 TransactionActionKind_lookup[s->action->type],
1486 ActionCompletionMode_lookup[s->txn_props->completion_mode]);
1487 return -1;
1488 }
1489 return 0;
1490 }
1491
1492 static void internal_snapshot_prepare(BlkActionState *common,
1493 Error **errp)
1494 {
1495 Error *local_err = NULL;
1496 const char *device;
1497 const char *name;
1498 BlockBackend *blk;
1499 BlockDriverState *bs;
1500 QEMUSnapshotInfo old_sn, *sn;
1501 bool ret;
1502 qemu_timeval tv;
1503 BlockdevSnapshotInternal *internal;
1504 InternalSnapshotState *state;
1505 int ret1;
1506
1507 g_assert(common->action->type ==
1508 TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC);
1509 internal = common->action->u.blockdev_snapshot_internal_sync.data;
1510 state = DO_UPCAST(InternalSnapshotState, common, common);
1511
1512 /* 1. parse input */
1513 device = internal->device;
1514 name = internal->name;
1515
1516 /* 2. check for validation */
1517 if (action_check_completion_mode(common, errp) < 0) {
1518 return;
1519 }
1520
1521 blk = blk_by_name(device);
1522 if (!blk) {
1523 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1524 "Device '%s' not found", device);
1525 return;
1526 }
1527
1528 /* AioContext is released in .clean() */
1529 state->aio_context = blk_get_aio_context(blk);
1530 aio_context_acquire(state->aio_context);
1531
1532 if (!blk_is_available(blk)) {
1533 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
1534 return;
1535 }
1536 bs = blk_bs(blk);
1537
1538 state->bs = bs;
1539 bdrv_drained_begin(bs);
1540
1541 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
1542 return;
1543 }
1544
1545 if (bdrv_is_read_only(bs)) {
1546 error_setg(errp, "Device '%s' is read only", device);
1547 return;
1548 }
1549
1550 if (!bdrv_can_snapshot(bs)) {
1551 error_setg(errp, "Block format '%s' used by device '%s' "
1552 "does not support internal snapshots",
1553 bs->drv->format_name, device);
1554 return;
1555 }
1556
1557 if (!strlen(name)) {
1558 error_setg(errp, "Name is empty");
1559 return;
1560 }
1561
1562 /* check whether a snapshot with name exist */
1563 ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn,
1564 &local_err);
1565 if (local_err) {
1566 error_propagate(errp, local_err);
1567 return;
1568 } else if (ret) {
1569 error_setg(errp,
1570 "Snapshot with name '%s' already exists on device '%s'",
1571 name, device);
1572 return;
1573 }
1574
1575 /* 3. take the snapshot */
1576 sn = &state->sn;
1577 pstrcpy(sn->name, sizeof(sn->name), name);
1578 qemu_gettimeofday(&tv);
1579 sn->date_sec = tv.tv_sec;
1580 sn->date_nsec = tv.tv_usec * 1000;
1581 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1582
1583 ret1 = bdrv_snapshot_create(bs, sn);
1584 if (ret1 < 0) {
1585 error_setg_errno(errp, -ret1,
1586 "Failed to create snapshot '%s' on device '%s'",
1587 name, device);
1588 return;
1589 }
1590
1591 /* 4. succeed, mark a snapshot is created */
1592 state->created = true;
1593 }
1594
1595 static void internal_snapshot_abort(BlkActionState *common)
1596 {
1597 InternalSnapshotState *state =
1598 DO_UPCAST(InternalSnapshotState, common, common);
1599 BlockDriverState *bs = state->bs;
1600 QEMUSnapshotInfo *sn = &state->sn;
1601 Error *local_error = NULL;
1602
1603 if (!state->created) {
1604 return;
1605 }
1606
1607 if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
1608 error_reportf_err(local_error,
1609 "Failed to delete snapshot with id '%s' and "
1610 "name '%s' on device '%s' in abort: ",
1611 sn->id_str, sn->name,
1612 bdrv_get_device_name(bs));
1613 }
1614 }
1615
1616 static void internal_snapshot_clean(BlkActionState *common)
1617 {
1618 InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
1619 common, common);
1620
1621 if (state->aio_context) {
1622 if (state->bs) {
1623 bdrv_drained_end(state->bs);
1624 }
1625 aio_context_release(state->aio_context);
1626 }
1627 }
1628
1629 /* external snapshot private data */
1630 typedef struct ExternalSnapshotState {
1631 BlkActionState common;
1632 BlockDriverState *old_bs;
1633 BlockDriverState *new_bs;
1634 AioContext *aio_context;
1635 } ExternalSnapshotState;
1636
1637 static void external_snapshot_prepare(BlkActionState *common,
1638 Error **errp)
1639 {
1640 int flags = 0;
1641 QDict *options = NULL;
1642 Error *local_err = NULL;
1643 /* Device and node name of the image to generate the snapshot from */
1644 const char *device;
1645 const char *node_name;
1646 /* Reference to the new image (for 'blockdev-snapshot') */
1647 const char *snapshot_ref;
1648 /* File name of the new image (for 'blockdev-snapshot-sync') */
1649 const char *new_image_file;
1650 ExternalSnapshotState *state =
1651 DO_UPCAST(ExternalSnapshotState, common, common);
1652 TransactionAction *action = common->action;
1653
1654 /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
1655 * purpose but a different set of parameters */
1656 switch (action->type) {
1657 case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT:
1658 {
1659 BlockdevSnapshot *s = action->u.blockdev_snapshot.data;
1660 device = s->node;
1661 node_name = s->node;
1662 new_image_file = NULL;
1663 snapshot_ref = s->overlay;
1664 }
1665 break;
1666 case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
1667 {
1668 BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
1669 device = s->has_device ? s->device : NULL;
1670 node_name = s->has_node_name ? s->node_name : NULL;
1671 new_image_file = s->snapshot_file;
1672 snapshot_ref = NULL;
1673 }
1674 break;
1675 default:
1676 g_assert_not_reached();
1677 }
1678
1679 /* start processing */
1680 if (action_check_completion_mode(common, errp) < 0) {
1681 return;
1682 }
1683
1684 state->old_bs = bdrv_lookup_bs(device, node_name, errp);
1685 if (!state->old_bs) {
1686 return;
1687 }
1688
1689 /* Acquire AioContext now so any threads operating on old_bs stop */
1690 state->aio_context = bdrv_get_aio_context(state->old_bs);
1691 aio_context_acquire(state->aio_context);
1692 bdrv_drained_begin(state->old_bs);
1693
1694 if (!bdrv_is_inserted(state->old_bs)) {
1695 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
1696 return;
1697 }
1698
1699 if (bdrv_op_is_blocked(state->old_bs,
1700 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
1701 return;
1702 }
1703
1704 if (!bdrv_is_read_only(state->old_bs)) {
1705 if (bdrv_flush(state->old_bs)) {
1706 error_setg(errp, QERR_IO_ERROR);
1707 return;
1708 }
1709 }
1710
1711 if (!bdrv_is_first_non_filter(state->old_bs)) {
1712 error_setg(errp, QERR_FEATURE_DISABLED, "snapshot");
1713 return;
1714 }
1715
1716 if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
1717 BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
1718 const char *format = s->has_format ? s->format : "qcow2";
1719 enum NewImageMode mode;
1720 const char *snapshot_node_name =
1721 s->has_snapshot_node_name ? s->snapshot_node_name : NULL;
1722
1723 if (node_name && !snapshot_node_name) {
1724 error_setg(errp, "New snapshot node name missing");
1725 return;
1726 }
1727
1728 if (snapshot_node_name &&
1729 bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
1730 error_setg(errp, "New snapshot node name already in use");
1731 return;
1732 }
1733
1734 flags = state->old_bs->open_flags;
1735 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
1736
1737 /* create new image w/backing file */
1738 mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS;
1739 if (mode != NEW_IMAGE_MODE_EXISTING) {
1740 int64_t size = bdrv_getlength(state->old_bs);
1741 if (size < 0) {
1742 error_setg_errno(errp, -size, "bdrv_getlength failed");
1743 return;
1744 }
1745 bdrv_img_create(new_image_file, format,
1746 state->old_bs->filename,
1747 state->old_bs->drv->format_name,
1748 NULL, size, flags, &local_err, false);
1749 if (local_err) {
1750 error_propagate(errp, local_err);
1751 return;
1752 }
1753 }
1754
1755 options = qdict_new();
1756 if (s->has_snapshot_node_name) {
1757 qdict_put(options, "node-name",
1758 qstring_from_str(snapshot_node_name));
1759 }
1760 qdict_put(options, "driver", qstring_from_str(format));
1761
1762 flags |= BDRV_O_NO_BACKING;
1763 }
1764
1765 state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags,
1766 errp);
1767 /* We will manually add the backing_hd field to the bs later */
1768 if (!state->new_bs) {
1769 return;
1770 }
1771
1772 if (bdrv_has_blk(state->new_bs)) {
1773 error_setg(errp, "The snapshot is already in use by %s",
1774 bdrv_get_parent_name(state->new_bs));
1775 return;
1776 }
1777
1778 if (bdrv_op_is_blocked(state->new_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
1779 errp)) {
1780 return;
1781 }
1782
1783 if (state->new_bs->backing != NULL) {
1784 error_setg(errp, "The snapshot already has a backing image");
1785 return;
1786 }
1787
1788 if (!state->new_bs->drv->supports_backing) {
1789 error_setg(errp, "The snapshot does not support backing images");
1790 }
1791 }
1792
1793 static void external_snapshot_commit(BlkActionState *common)
1794 {
1795 ExternalSnapshotState *state =
1796 DO_UPCAST(ExternalSnapshotState, common, common);
1797
1798 bdrv_set_aio_context(state->new_bs, state->aio_context);
1799
1800 /* This removes our old bs and adds the new bs */
1801 bdrv_append(state->new_bs, state->old_bs);
1802 /* We don't need (or want) to use the transactional
1803 * bdrv_reopen_multiple() across all the entries at once, because we
1804 * don't want to abort all of them if one of them fails the reopen */
1805 if (!state->old_bs->copy_on_read) {
1806 bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR,
1807 NULL);
1808 }
1809 }
1810
1811 static void external_snapshot_abort(BlkActionState *common)
1812 {
1813 ExternalSnapshotState *state =
1814 DO_UPCAST(ExternalSnapshotState, common, common);
1815 if (state->new_bs) {
1816 bdrv_unref(state->new_bs);
1817 }
1818 }
1819
1820 static void external_snapshot_clean(BlkActionState *common)
1821 {
1822 ExternalSnapshotState *state =
1823 DO_UPCAST(ExternalSnapshotState, common, common);
1824 if (state->aio_context) {
1825 bdrv_drained_end(state->old_bs);
1826 aio_context_release(state->aio_context);
1827 }
1828 }
1829
1830 typedef struct DriveBackupState {
1831 BlkActionState common;
1832 BlockDriverState *bs;
1833 AioContext *aio_context;
1834 BlockJob *job;
1835 } DriveBackupState;
1836
1837 static void do_drive_backup(const char *device, const char *target,
1838 bool has_format, const char *format,
1839 enum MirrorSyncMode sync,
1840 bool has_mode, enum NewImageMode mode,
1841 bool has_speed, int64_t speed,
1842 bool has_bitmap, const char *bitmap,
1843 bool has_on_source_error,
1844 BlockdevOnError on_source_error,
1845 bool has_on_target_error,
1846 BlockdevOnError on_target_error,
1847 BlockJobTxn *txn, Error **errp);
1848
1849 static void drive_backup_prepare(BlkActionState *common, Error **errp)
1850 {
1851 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1852 BlockBackend *blk;
1853 DriveBackup *backup;
1854 Error *local_err = NULL;
1855
1856 assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
1857 backup = common->action->u.drive_backup.data;
1858
1859 blk = blk_by_name(backup->device);
1860 if (!blk) {
1861 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
1862 "Device '%s' not found", backup->device);
1863 return;
1864 }
1865
1866 if (!blk_is_available(blk)) {
1867 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
1868 return;
1869 }
1870
1871 /* AioContext is released in .clean() */
1872 state->aio_context = blk_get_aio_context(blk);
1873 aio_context_acquire(state->aio_context);
1874 bdrv_drained_begin(blk_bs(blk));
1875 state->bs = blk_bs(blk);
1876
1877 do_drive_backup(backup->device, backup->target,
1878 backup->has_format, backup->format,
1879 backup->sync,
1880 backup->has_mode, backup->mode,
1881 backup->has_speed, backup->speed,
1882 backup->has_bitmap, backup->bitmap,
1883 backup->has_on_source_error, backup->on_source_error,
1884 backup->has_on_target_error, backup->on_target_error,
1885 common->block_job_txn, &local_err);
1886 if (local_err) {
1887 error_propagate(errp, local_err);
1888 return;
1889 }
1890
1891 state->job = state->bs->job;
1892 }
1893
1894 static void drive_backup_abort(BlkActionState *common)
1895 {
1896 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1897 BlockDriverState *bs = state->bs;
1898
1899 /* Only cancel if it's the job we started */
1900 if (bs && bs->job && bs->job == state->job) {
1901 block_job_cancel_sync(bs->job);
1902 }
1903 }
1904
1905 static void drive_backup_clean(BlkActionState *common)
1906 {
1907 DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
1908
1909 if (state->aio_context) {
1910 bdrv_drained_end(state->bs);
1911 aio_context_release(state->aio_context);
1912 }
1913 }
1914
1915 typedef struct BlockdevBackupState {
1916 BlkActionState common;
1917 BlockDriverState *bs;
1918 BlockJob *job;
1919 AioContext *aio_context;
1920 } BlockdevBackupState;
1921
1922 static void do_blockdev_backup(const char *device, const char *target,
1923 enum MirrorSyncMode sync,
1924 bool has_speed, int64_t speed,
1925 bool has_on_source_error,
1926 BlockdevOnError on_source_error,
1927 bool has_on_target_error,
1928 BlockdevOnError on_target_error,
1929 BlockJobTxn *txn, Error **errp);
1930
1931 static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
1932 {
1933 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1934 BlockdevBackup *backup;
1935 BlockBackend *blk, *target;
1936 Error *local_err = NULL;
1937
1938 assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
1939 backup = common->action->u.blockdev_backup.data;
1940
1941 blk = blk_by_name(backup->device);
1942 if (!blk) {
1943 error_setg(errp, "Device '%s' not found", backup->device);
1944 return;
1945 }
1946
1947 if (!blk_is_available(blk)) {
1948 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
1949 return;
1950 }
1951
1952 target = blk_by_name(backup->target);
1953 if (!target) {
1954 error_setg(errp, "Device '%s' not found", backup->target);
1955 return;
1956 }
1957
1958 /* AioContext is released in .clean() */
1959 state->aio_context = blk_get_aio_context(blk);
1960 if (state->aio_context != blk_get_aio_context(target)) {
1961 state->aio_context = NULL;
1962 error_setg(errp, "Backup between two IO threads is not implemented");
1963 return;
1964 }
1965 aio_context_acquire(state->aio_context);
1966 state->bs = blk_bs(blk);
1967 bdrv_drained_begin(state->bs);
1968
1969 do_blockdev_backup(backup->device, backup->target,
1970 backup->sync,
1971 backup->has_speed, backup->speed,
1972 backup->has_on_source_error, backup->on_source_error,
1973 backup->has_on_target_error, backup->on_target_error,
1974 common->block_job_txn, &local_err);
1975 if (local_err) {
1976 error_propagate(errp, local_err);
1977 return;
1978 }
1979
1980 state->job = state->bs->job;
1981 }
1982
1983 static void blockdev_backup_abort(BlkActionState *common)
1984 {
1985 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1986 BlockDriverState *bs = state->bs;
1987
1988 /* Only cancel if it's the job we started */
1989 if (bs && bs->job && bs->job == state->job) {
1990 block_job_cancel_sync(bs->job);
1991 }
1992 }
1993
1994 static void blockdev_backup_clean(BlkActionState *common)
1995 {
1996 BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
1997
1998 if (state->aio_context) {
1999 bdrv_drained_end(state->bs);
2000 aio_context_release(state->aio_context);
2001 }
2002 }
2003
2004 typedef struct BlockDirtyBitmapState {
2005 BlkActionState common;
2006 BdrvDirtyBitmap *bitmap;
2007 BlockDriverState *bs;
2008 AioContext *aio_context;
2009 HBitmap *backup;
2010 bool prepared;
2011 } BlockDirtyBitmapState;
2012
2013 static void block_dirty_bitmap_add_prepare(BlkActionState *common,
2014 Error **errp)
2015 {
2016 Error *local_err = NULL;
2017 BlockDirtyBitmapAdd *action;
2018 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2019 common, common);
2020
2021 if (action_check_completion_mode(common, errp) < 0) {
2022 return;
2023 }
2024
2025 action = common->action->u.block_dirty_bitmap_add.data;
2026 /* AIO context taken and released within qmp_block_dirty_bitmap_add */
2027 qmp_block_dirty_bitmap_add(action->node, action->name,
2028 action->has_granularity, action->granularity,
2029 &local_err);
2030
2031 if (!local_err) {
2032 state->prepared = true;
2033 } else {
2034 error_propagate(errp, local_err);
2035 }
2036 }
2037
2038 static void block_dirty_bitmap_add_abort(BlkActionState *common)
2039 {
2040 BlockDirtyBitmapAdd *action;
2041 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2042 common, common);
2043
2044 action = common->action->u.block_dirty_bitmap_add.data;
2045 /* Should not be able to fail: IF the bitmap was added via .prepare(),
2046 * then the node reference and bitmap name must have been valid.
2047 */
2048 if (state->prepared) {
2049 qmp_block_dirty_bitmap_remove(action->node, action->name, &error_abort);
2050 }
2051 }
2052
2053 static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
2054 Error **errp)
2055 {
2056 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2057 common, common);
2058 BlockDirtyBitmap *action;
2059
2060 if (action_check_completion_mode(common, errp) < 0) {
2061 return;
2062 }
2063
2064 action = common->action->u.block_dirty_bitmap_clear.data;
2065 state->bitmap = block_dirty_bitmap_lookup(action->node,
2066 action->name,
2067 &state->bs,
2068 &state->aio_context,
2069 errp);
2070 if (!state->bitmap) {
2071 return;
2072 }
2073
2074 if (bdrv_dirty_bitmap_frozen(state->bitmap)) {
2075 error_setg(errp, "Cannot modify a frozen bitmap");
2076 return;
2077 } else if (!bdrv_dirty_bitmap_enabled(state->bitmap)) {
2078 error_setg(errp, "Cannot clear a disabled bitmap");
2079 return;
2080 }
2081
2082 bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
2083 /* AioContext is released in .clean() */
2084 }
2085
2086 static void block_dirty_bitmap_clear_abort(BlkActionState *common)
2087 {
2088 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2089 common, common);
2090
2091 bdrv_undo_clear_dirty_bitmap(state->bitmap, state->backup);
2092 }
2093
2094 static void block_dirty_bitmap_clear_commit(BlkActionState *common)
2095 {
2096 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2097 common, common);
2098
2099 hbitmap_free(state->backup);
2100 }
2101
2102 static void block_dirty_bitmap_clear_clean(BlkActionState *common)
2103 {
2104 BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
2105 common, common);
2106
2107 if (state->aio_context) {
2108 aio_context_release(state->aio_context);
2109 }
2110 }
2111
2112 static void abort_prepare(BlkActionState *common, Error **errp)
2113 {
2114 error_setg(errp, "Transaction aborted using Abort action");
2115 }
2116
2117 static void abort_commit(BlkActionState *common)
2118 {
2119 g_assert_not_reached(); /* this action never succeeds */
2120 }
2121
2122 static const BlkActionOps actions[] = {
2123 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = {
2124 .instance_size = sizeof(ExternalSnapshotState),
2125 .prepare = external_snapshot_prepare,
2126 .commit = external_snapshot_commit,
2127 .abort = external_snapshot_abort,
2128 .clean = external_snapshot_clean,
2129 },
2130 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = {
2131 .instance_size = sizeof(ExternalSnapshotState),
2132 .prepare = external_snapshot_prepare,
2133 .commit = external_snapshot_commit,
2134 .abort = external_snapshot_abort,
2135 .clean = external_snapshot_clean,
2136 },
2137 [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = {
2138 .instance_size = sizeof(DriveBackupState),
2139 .prepare = drive_backup_prepare,
2140 .abort = drive_backup_abort,
2141 .clean = drive_backup_clean,
2142 },
2143 [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = {
2144 .instance_size = sizeof(BlockdevBackupState),
2145 .prepare = blockdev_backup_prepare,
2146 .abort = blockdev_backup_abort,
2147 .clean = blockdev_backup_clean,
2148 },
2149 [TRANSACTION_ACTION_KIND_ABORT] = {
2150 .instance_size = sizeof(BlkActionState),
2151 .prepare = abort_prepare,
2152 .commit = abort_commit,
2153 },
2154 [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = {
2155 .instance_size = sizeof(InternalSnapshotState),
2156 .prepare = internal_snapshot_prepare,
2157 .abort = internal_snapshot_abort,
2158 .clean = internal_snapshot_clean,
2159 },
2160 [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD] = {
2161 .instance_size = sizeof(BlockDirtyBitmapState),
2162 .prepare = block_dirty_bitmap_add_prepare,
2163 .abort = block_dirty_bitmap_add_abort,
2164 },
2165 [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR] = {
2166 .instance_size = sizeof(BlockDirtyBitmapState),
2167 .prepare = block_dirty_bitmap_clear_prepare,
2168 .commit = block_dirty_bitmap_clear_commit,
2169 .abort = block_dirty_bitmap_clear_abort,
2170 .clean = block_dirty_bitmap_clear_clean,
2171 }
2172 };
2173
2174 /**
2175 * Allocate a TransactionProperties structure if necessary, and fill
2176 * that structure with desired defaults if they are unset.
2177 */
2178 static TransactionProperties *get_transaction_properties(
2179 TransactionProperties *props)
2180 {
2181 if (!props) {
2182 props = g_new0(TransactionProperties, 1);
2183 }
2184
2185 if (!props->has_completion_mode) {
2186 props->has_completion_mode = true;
2187 props->completion_mode = ACTION_COMPLETION_MODE_INDIVIDUAL;
2188 }
2189
2190 return props;
2191 }
2192
2193 /*
2194 * 'Atomic' group operations. The operations are performed as a set, and if
2195 * any fail then we roll back all operations in the group.
2196 */
2197 void qmp_transaction(TransactionActionList *dev_list,
2198 bool has_props,
2199 struct TransactionProperties *props,
2200 Error **errp)
2201 {
2202 TransactionActionList *dev_entry = dev_list;
2203 BlockJobTxn *block_job_txn = NULL;
2204 BlkActionState *state, *next;
2205 Error *local_err = NULL;
2206
2207 QSIMPLEQ_HEAD(snap_bdrv_states, BlkActionState) snap_bdrv_states;
2208 QSIMPLEQ_INIT(&snap_bdrv_states);
2209
2210 /* Does this transaction get canceled as a group on failure?
2211 * If not, we don't really need to make a BlockJobTxn.
2212 */
2213 props = get_transaction_properties(props);
2214 if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
2215 block_job_txn = block_job_txn_new();
2216 }
2217
2218 /* drain all i/o before any operations */
2219 bdrv_drain_all();
2220
2221 /* We don't do anything in this loop that commits us to the operations */
2222 while (NULL != dev_entry) {
2223 TransactionAction *dev_info = NULL;
2224 const BlkActionOps *ops;
2225
2226 dev_info = dev_entry->value;
2227 dev_entry = dev_entry->next;
2228
2229 assert(dev_info->type < ARRAY_SIZE(actions));
2230
2231 ops = &actions[dev_info->type];
2232 assert(ops->instance_size > 0);
2233
2234 state = g_malloc0(ops->instance_size);
2235 state->ops = ops;
2236 state->action = dev_info;
2237 state->block_job_txn = block_job_txn;
2238 state->txn_props = props;
2239 QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, state, entry);
2240
2241 state->ops->prepare(state, &local_err);
2242 if (local_err) {
2243 error_propagate(errp, local_err);
2244 goto delete_and_fail;
2245 }
2246 }
2247
2248 QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
2249 if (state->ops->commit) {
2250 state->ops->commit(state);
2251 }
2252 }
2253
2254 /* success */
2255 goto exit;
2256
2257 delete_and_fail:
2258 /* failure, and it is all-or-none; roll back all operations */
2259 QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
2260 if (state->ops->abort) {
2261 state->ops->abort(state);
2262 }
2263 }
2264 exit:
2265 QSIMPLEQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) {
2266 if (state->ops->clean) {
2267 state->ops->clean(state);
2268 }
2269 g_free(state);
2270 }
2271 if (!has_props) {
2272 qapi_free_TransactionProperties(props);
2273 }
2274 block_job_txn_unref(block_job_txn);
2275 }
2276
2277 static int do_open_tray(const char *device, bool force, Error **errp);
2278
2279 void qmp_eject(const char *device, bool has_force, bool force, Error **errp)
2280 {
2281 Error *local_err = NULL;
2282 int rc;
2283
2284 if (!has_force) {
2285 force = false;
2286 }
2287
2288 rc = do_open_tray(device, force, &local_err);
2289 if (local_err) {
2290 error_propagate(errp, local_err);
2291 return;
2292 }
2293
2294 if (rc == EINPROGRESS) {
2295 error_setg(errp, "Device '%s' is locked and force was not specified, "
2296 "wait for tray to open and try again", device);
2297 return;
2298 }
2299
2300 qmp_x_blockdev_remove_medium(device, errp);
2301 }
2302
2303 void qmp_block_passwd(bool has_device, const char *device,
2304 bool has_node_name, const char *node_name,
2305 const char *password, Error **errp)
2306 {
2307 Error *local_err = NULL;
2308 BlockDriverState *bs;
2309 AioContext *aio_context;
2310
2311 bs = bdrv_lookup_bs(has_device ? device : NULL,
2312 has_node_name ? node_name : NULL,
2313 &local_err);
2314 if (local_err) {
2315 error_propagate(errp, local_err);
2316 return;
2317 }
2318
2319 aio_context = bdrv_get_aio_context(bs);
2320 aio_context_acquire(aio_context);
2321
2322 bdrv_add_key(bs, password, errp);
2323
2324 aio_context_release(aio_context);
2325 }
2326
2327 /**
2328 * returns -errno on fatal error, +errno for non-fatal situations.
2329 * errp will always be set when the return code is negative.
2330 * May return +ENOSYS if the device has no tray,
2331 * or +EINPROGRESS if the tray is locked and the guest has been notified.
2332 */
2333 static int do_open_tray(const char *device, bool force, Error **errp)
2334 {
2335 BlockBackend *blk;
2336 bool locked;
2337
2338 blk = blk_by_name(device);
2339 if (!blk) {
2340 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2341 "Device '%s' not found", device);
2342 return -ENODEV;
2343 }
2344
2345 if (!blk_dev_has_removable_media(blk)) {
2346 error_setg(errp, "Device '%s' is not removable", device);
2347 return -ENOTSUP;
2348 }
2349
2350 if (!blk_dev_has_tray(blk)) {
2351 /* Ignore this command on tray-less devices */
2352 return ENOSYS;
2353 }
2354
2355 if (blk_dev_is_tray_open(blk)) {
2356 return 0;
2357 }
2358
2359 locked = blk_dev_is_medium_locked(blk);
2360 if (locked) {
2361 blk_dev_eject_request(blk, force);
2362 }
2363
2364 if (!locked || force) {
2365 blk_dev_change_media_cb(blk, false);
2366 }
2367
2368 if (locked && !force) {
2369 return EINPROGRESS;
2370 }
2371
2372 return 0;
2373 }
2374
2375 void qmp_blockdev_open_tray(const char *device, bool has_force, bool force,
2376 Error **errp)
2377 {
2378 if (!has_force) {
2379 force = false;
2380 }
2381 do_open_tray(device, force, errp);
2382 }
2383
2384 void qmp_blockdev_close_tray(const char *device, Error **errp)
2385 {
2386 BlockBackend *blk;
2387
2388 blk = blk_by_name(device);
2389 if (!blk) {
2390 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2391 "Device '%s' not found", device);
2392 return;
2393 }
2394
2395 if (!blk_dev_has_removable_media(blk)) {
2396 error_setg(errp, "Device '%s' is not removable", device);
2397 return;
2398 }
2399
2400 if (!blk_dev_has_tray(blk)) {
2401 /* Ignore this command on tray-less devices */
2402 return;
2403 }
2404
2405 if (!blk_dev_is_tray_open(blk)) {
2406 return;
2407 }
2408
2409 blk_dev_change_media_cb(blk, true);
2410 }
2411
2412 void qmp_x_blockdev_remove_medium(const char *device, Error **errp)
2413 {
2414 BlockBackend *blk;
2415 BlockDriverState *bs;
2416 AioContext *aio_context;
2417 bool has_device;
2418
2419 blk = blk_by_name(device);
2420 if (!blk) {
2421 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2422 "Device '%s' not found", device);
2423 return;
2424 }
2425
2426 /* For BBs without a device, we can exchange the BDS tree at will */
2427 has_device = blk_get_attached_dev(blk);
2428
2429 if (has_device && !blk_dev_has_removable_media(blk)) {
2430 error_setg(errp, "Device '%s' is not removable", device);
2431 return;
2432 }
2433
2434 if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
2435 error_setg(errp, "Tray of device '%s' is not open", device);
2436 return;
2437 }
2438
2439 bs = blk_bs(blk);
2440 if (!bs) {
2441 return;
2442 }
2443
2444 aio_context = bdrv_get_aio_context(bs);
2445 aio_context_acquire(aio_context);
2446
2447 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
2448 goto out;
2449 }
2450
2451 blk_remove_bs(blk);
2452
2453 if (!blk_dev_has_tray(blk)) {
2454 /* For tray-less devices, blockdev-open-tray is a no-op (or may not be
2455 * called at all); therefore, the medium needs to be ejected here.
2456 * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load
2457 * value passed here (i.e. false). */
2458 blk_dev_change_media_cb(blk, false);
2459 }
2460
2461 out:
2462 aio_context_release(aio_context);
2463 }
2464
2465 static void qmp_blockdev_insert_anon_medium(const char *device,
2466 BlockDriverState *bs, Error **errp)
2467 {
2468 BlockBackend *blk;
2469 bool has_device;
2470
2471 blk = blk_by_name(device);
2472 if (!blk) {
2473 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2474 "Device '%s' not found", device);
2475 return;
2476 }
2477
2478 /* For BBs without a device, we can exchange the BDS tree at will */
2479 has_device = blk_get_attached_dev(blk);
2480
2481 if (has_device && !blk_dev_has_removable_media(blk)) {
2482 error_setg(errp, "Device '%s' is not removable", device);
2483 return;
2484 }
2485
2486 if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) {
2487 error_setg(errp, "Tray of device '%s' is not open", device);
2488 return;
2489 }
2490
2491 if (blk_bs(blk)) {
2492 error_setg(errp, "There already is a medium in device '%s'", device);
2493 return;
2494 }
2495
2496 blk_insert_bs(blk, bs);
2497
2498 if (!blk_dev_has_tray(blk)) {
2499 /* For tray-less devices, blockdev-close-tray is a no-op (or may not be
2500 * called at all); therefore, the medium needs to be pushed into the
2501 * slot here.
2502 * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load
2503 * value passed here (i.e. true). */
2504 blk_dev_change_media_cb(blk, true);
2505 }
2506 }
2507
2508 void qmp_x_blockdev_insert_medium(const char *device, const char *node_name,
2509 Error **errp)
2510 {
2511 BlockDriverState *bs;
2512
2513 bs = bdrv_find_node(node_name);
2514 if (!bs) {
2515 error_setg(errp, "Node '%s' not found", node_name);
2516 return;
2517 }
2518
2519 if (bdrv_has_blk(bs)) {
2520 error_setg(errp, "Node '%s' is already in use by '%s'", node_name,
2521 bdrv_get_parent_name(bs));
2522 return;
2523 }
2524
2525 qmp_blockdev_insert_anon_medium(device, bs, errp);
2526 }
2527
2528 void qmp_blockdev_change_medium(const char *device, const char *filename,
2529 bool has_format, const char *format,
2530 bool has_read_only,
2531 BlockdevChangeReadOnlyMode read_only,
2532 Error **errp)
2533 {
2534 BlockBackend *blk;
2535 BlockDriverState *medium_bs = NULL;
2536 int bdrv_flags;
2537 QDict *options = NULL;
2538 Error *err = NULL;
2539
2540 blk = blk_by_name(device);
2541 if (!blk) {
2542 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2543 "Device '%s' not found", device);
2544 goto fail;
2545 }
2546
2547 if (blk_bs(blk)) {
2548 blk_update_root_state(blk);
2549 }
2550
2551 bdrv_flags = blk_get_open_flags_from_root_state(blk);
2552 bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING |
2553 BDRV_O_PROTOCOL);
2554
2555 if (!has_read_only) {
2556 read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN;
2557 }
2558
2559 switch (read_only) {
2560 case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN:
2561 break;
2562
2563 case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY:
2564 bdrv_flags &= ~BDRV_O_RDWR;
2565 break;
2566
2567 case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE:
2568 bdrv_flags |= BDRV_O_RDWR;
2569 break;
2570
2571 default:
2572 abort();
2573 }
2574
2575 if (has_format) {
2576 options = qdict_new();
2577 qdict_put(options, "driver", qstring_from_str(format));
2578 }
2579
2580 medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp);
2581 if (!medium_bs) {
2582 goto fail;
2583 }
2584
2585 bdrv_add_key(medium_bs, NULL, &err);
2586 if (err) {
2587 error_propagate(errp, err);
2588 goto fail;
2589 }
2590
2591 qmp_blockdev_open_tray(device, false, false, &err);
2592 if (err) {
2593 error_propagate(errp, err);
2594 goto fail;
2595 }
2596
2597 qmp_x_blockdev_remove_medium(device, &err);
2598 if (err) {
2599 error_propagate(errp, err);
2600 goto fail;
2601 }
2602
2603 qmp_blockdev_insert_anon_medium(device, medium_bs, &err);
2604 if (err) {
2605 error_propagate(errp, err);
2606 goto fail;
2607 }
2608
2609 blk_apply_root_state(blk, medium_bs);
2610
2611 qmp_blockdev_close_tray(device, errp);
2612
2613 fail:
2614 /* If the medium has been inserted, the device has its own reference, so
2615 * ours must be relinquished; and if it has not been inserted successfully,
2616 * the reference must be relinquished anyway */
2617 bdrv_unref(medium_bs);
2618 }
2619
2620 /* throttling disk I/O limits */
2621 void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd,
2622 int64_t bps_wr,
2623 int64_t iops,
2624 int64_t iops_rd,
2625 int64_t iops_wr,
2626 bool has_bps_max,
2627 int64_t bps_max,
2628 bool has_bps_rd_max,
2629 int64_t bps_rd_max,
2630 bool has_bps_wr_max,
2631 int64_t bps_wr_max,
2632 bool has_iops_max,
2633 int64_t iops_max,
2634 bool has_iops_rd_max,
2635 int64_t iops_rd_max,
2636 bool has_iops_wr_max,
2637 int64_t iops_wr_max,
2638 bool has_bps_max_length,
2639 int64_t bps_max_length,
2640 bool has_bps_rd_max_length,
2641 int64_t bps_rd_max_length,
2642 bool has_bps_wr_max_length,
2643 int64_t bps_wr_max_length,
2644 bool has_iops_max_length,
2645 int64_t iops_max_length,
2646 bool has_iops_rd_max_length,
2647 int64_t iops_rd_max_length,
2648 bool has_iops_wr_max_length,
2649 int64_t iops_wr_max_length,
2650 bool has_iops_size,
2651 int64_t iops_size,
2652 bool has_group,
2653 const char *group, Error **errp)
2654 {
2655 ThrottleConfig cfg;
2656 BlockDriverState *bs;
2657 BlockBackend *blk;
2658 AioContext *aio_context;
2659
2660 blk = blk_by_name(device);
2661 if (!blk) {
2662 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
2663 "Device '%s' not found", device);
2664 return;
2665 }
2666
2667 aio_context = blk_get_aio_context(blk);
2668 aio_context_acquire(aio_context);
2669
2670 bs = blk_bs(blk);
2671 if (!bs) {
2672 error_setg(errp, "Device '%s' has no medium", device);
2673 goto out;
2674 }
2675
2676 throttle_config_init(&cfg);
2677 cfg.buckets[THROTTLE_BPS_TOTAL].avg = bps;
2678 cfg.buckets[THROTTLE_BPS_READ].avg = bps_rd;
2679 cfg.buckets[THROTTLE_BPS_WRITE].avg = bps_wr;
2680
2681 cfg.buckets[THROTTLE_OPS_TOTAL].avg = iops;
2682 cfg.buckets[THROTTLE_OPS_READ].avg = iops_rd;
2683 cfg.buckets[THROTTLE_OPS_WRITE].avg = iops_wr;
2684
2685 if (has_bps_max) {
2686 cfg.buckets[THROTTLE_BPS_TOTAL].max = bps_max;
2687 }
2688 if (has_bps_rd_max) {
2689 cfg.buckets[THROTTLE_BPS_READ].max = bps_rd_max;
2690 }
2691 if (has_bps_wr_max) {
2692 cfg.buckets[THROTTLE_BPS_WRITE].max = bps_wr_max;
2693 }
2694 if (has_iops_max) {
2695 cfg.buckets[THROTTLE_OPS_TOTAL].max = iops_max;
2696 }
2697 if (has_iops_rd_max) {
2698 cfg.buckets[THROTTLE_OPS_READ].max = iops_rd_max;
2699 }
2700 if (has_iops_wr_max) {
2701 cfg.buckets[THROTTLE_OPS_WRITE].max = iops_wr_max;
2702 }
2703
2704 if (has_bps_max_length) {
2705 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = bps_max_length;
2706 }
2707 if (has_bps_rd_max_length) {
2708 cfg.buckets[THROTTLE_BPS_READ].burst_length = bps_rd_max_length;
2709 }
2710 if (has_bps_wr_max_length) {
2711 cfg.buckets[THROTTLE_BPS_WRITE].burst_length = bps_wr_max_length;
2712 }
2713 if (has_iops_max_length) {
2714 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = iops_max_length;
2715 }
2716 if (has_iops_rd_max_length) {
2717 cfg.buckets[THROTTLE_OPS_READ].burst_length = iops_rd_max_length;
2718 }
2719 if (has_iops_wr_max_length) {
2720 cfg.buckets[THROTTLE_OPS_WRITE].burst_length = iops_wr_max_length;
2721 }
2722
2723 if (has_iops_size) {
2724 cfg.op_size = iops_size;
2725 }
2726
2727 if (!throttle_is_valid(&cfg, errp)) {
2728 goto out;
2729 }
2730
2731 if (throttle_enabled(&cfg)) {
2732 /* Enable I/O limits if they're not enabled yet, otherwise
2733 * just update the throttling group. */
2734 if (!blk_get_public(blk)->throttle_state) {
2735 blk_io_limits_enable(blk, has_group ? group : device);
2736 } else if (has_group) {
2737 blk_io_limits_update_group(blk, group);
2738 }
2739 /* Set the new throttling configuration */
2740 blk_set_io_limits(blk, &cfg);
2741 } else if (blk_get_public(blk)->throttle_state) {
2742 /* If all throttling settings are set to 0, disable I/O limits */
2743 blk_io_limits_disable(blk);
2744 }
2745
2746 out:
2747 aio_context_release(aio_context);
2748 }
2749
2750 void qmp_block_dirty_bitmap_add(const char *node, const char *name,
2751 bool has_granularity, uint32_t granularity,
2752 Error **errp)
2753 {
2754 AioContext *aio_context;
2755 BlockDriverState *bs;
2756
2757 if (!name || name[0] == '\0') {
2758 error_setg(errp, "Bitmap name cannot be empty");
2759 return;
2760 }
2761
2762 bs = bdrv_lookup_bs(node, node, errp);
2763 if (!bs) {
2764 return;
2765 }
2766
2767 aio_context = bdrv_get_aio_context(bs);
2768 aio_context_acquire(aio_context);
2769
2770 if (has_granularity) {
2771 if (granularity < 512 || !is_power_of_2(granularity)) {
2772 error_setg(errp, "Granularity must be power of 2 "
2773 "and at least 512");
2774 goto out;
2775 }
2776 } else {
2777 /* Default to cluster size, if available: */
2778 granularity = bdrv_get_default_bitmap_granularity(bs);
2779 }
2780
2781 bdrv_create_dirty_bitmap(bs, granularity, name, errp);
2782
2783 out:
2784 aio_context_release(aio_context);
2785 }
2786
2787 void qmp_block_dirty_bitmap_remove(const char *node, const char *name,
2788 Error **errp)
2789 {
2790 AioContext *aio_context;
2791 BlockDriverState *bs;
2792 BdrvDirtyBitmap *bitmap;
2793
2794 bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp);
2795 if (!bitmap || !bs) {
2796 return;
2797 }
2798
2799 if (bdrv_dirty_bitmap_frozen(bitmap)) {
2800 error_setg(errp,
2801 "Bitmap '%s' is currently frozen and cannot be removed",
2802 name);
2803 goto out;
2804 }
2805 bdrv_dirty_bitmap_make_anon(bitmap);
2806 bdrv_release_dirty_bitmap(bs, bitmap);
2807
2808 out:
2809 aio_context_release(aio_context);
2810 }
2811
2812 /**
2813 * Completely clear a bitmap, for the purposes of synchronizing a bitmap
2814 * immediately after a full backup operation.
2815 */
2816 void qmp_block_dirty_bitmap_clear(const char *node, const char *name,
2817 Error **errp)
2818 {
2819 AioContext *aio_context;
2820 BdrvDirtyBitmap *bitmap;
2821 BlockDriverState *bs;
2822
2823 bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp);
2824 if (!bitmap || !bs) {
2825 return;
2826 }
2827
2828 if (bdrv_dirty_bitmap_frozen(bitmap)) {
2829 error_setg(errp,
2830 "Bitmap '%s' is currently frozen and cannot be modified",
2831 name);
2832 goto out;
2833 } else if (!bdrv_dirty_bitmap_enabled(bitmap)) {
2834 error_setg(errp,
2835 "Bitmap '%s' is currently disabled and cannot be cleared",
2836 name);
2837 goto out;
2838 }
2839
2840 bdrv_clear_dirty_bitmap(bitmap, NULL);
2841
2842 out:
2843 aio_context_release(aio_context);
2844 }
2845
2846 void hmp_drive_del(Monitor *mon, const QDict *qdict)
2847 {
2848 const char *id = qdict_get_str(qdict, "id");
2849 BlockBackend *blk;
2850 BlockDriverState *bs;
2851 AioContext *aio_context;
2852 Error *local_err = NULL;
2853
2854 bs = bdrv_find_node(id);
2855 if (bs) {
2856 qmp_x_blockdev_del(false, NULL, true, id, &local_err);
2857 if (local_err) {
2858 error_report_err(local_err);
2859 }
2860 return;
2861 }
2862
2863 blk = blk_by_name(id);
2864 if (!blk) {
2865 error_report("Device '%s' not found", id);
2866 return;
2867 }
2868
2869 if (!blk_legacy_dinfo(blk)) {
2870 error_report("Deleting device added with blockdev-add"
2871 " is not supported");
2872 return;
2873 }
2874
2875 aio_context = blk_get_aio_context(blk);
2876 aio_context_acquire(aio_context);
2877
2878 bs = blk_bs(blk);
2879 if (bs) {
2880 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) {
2881 error_report_err(local_err);
2882 aio_context_release(aio_context);
2883 return;
2884 }
2885
2886 blk_remove_bs(blk);
2887 }
2888
2889 /* Make the BlockBackend and the attached BlockDriverState anonymous */
2890 monitor_remove_blk(blk);
2891
2892 /* If this BlockBackend has a device attached to it, its refcount will be
2893 * decremented when the device is removed; otherwise we have to do so here.
2894 */
2895 if (blk_get_attached_dev(blk)) {
2896 /* Further I/O must not pause the guest */
2897 blk_set_on_error(blk, BLOCKDEV_ON_ERROR_REPORT,
2898 BLOCKDEV_ON_ERROR_REPORT);
2899 } else {
2900 blk_unref(blk);
2901 }
2902
2903 aio_context_release(aio_context);
2904 }
2905
2906 void qmp_block_resize(bool has_device, const char *device,
2907 bool has_node_name, const char *node_name,
2908 int64_t size, Error **errp)
2909 {
2910 Error *local_err = NULL;
2911 BlockDriverState *bs;
2912 AioContext *aio_context;
2913 int ret;
2914
2915 bs = bdrv_lookup_bs(has_device ? device : NULL,
2916 has_node_name ? node_name : NULL,
2917 &local_err);
2918 if (local_err) {
2919 error_propagate(errp, local_err);
2920 return;
2921 }
2922
2923 aio_context = bdrv_get_aio_context(bs);
2924 aio_context_acquire(aio_context);
2925
2926 if (!bdrv_is_first_non_filter(bs)) {
2927 error_setg(errp, QERR_FEATURE_DISABLED, "resize");
2928 goto out;
2929 }
2930
2931 if (size < 0) {
2932 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size");
2933 goto out;
2934 }
2935
2936 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
2937 error_setg(errp, QERR_DEVICE_IN_USE, device);
2938 goto out;
2939 }
2940
2941 /* complete all in-flight operations before resizing the device */
2942 bdrv_drain_all();
2943
2944 ret = bdrv_truncate(bs, size);
2945 switch (ret) {
2946 case 0:
2947 break;
2948 case -ENOMEDIUM:
2949 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
2950 break;
2951 case -ENOTSUP:
2952 error_setg(errp, QERR_UNSUPPORTED);
2953 break;
2954 case -EACCES:
2955 error_setg(errp, "Device '%s' is read only", device);
2956 break;
2957 case -EBUSY:
2958 error_setg(errp, QERR_DEVICE_IN_USE, device);
2959 break;
2960 default:
2961 error_setg_errno(errp, -ret, "Could not resize");
2962 break;
2963 }
2964
2965 out:
2966 aio_context_release(aio_context);
2967 }
2968
2969 static void block_job_cb(void *opaque, int ret)
2970 {
2971 /* Note that this function may be executed from another AioContext besides
2972 * the QEMU main loop. If you need to access anything that assumes the
2973 * QEMU global mutex, use a BH or introduce a mutex.
2974 */
2975
2976 BlockDriverState *bs = opaque;
2977 const char *msg = NULL;
2978
2979 trace_block_job_cb(bs, bs->job, ret);
2980
2981 assert(bs->job);
2982
2983 if (ret < 0) {
2984 msg = strerror(-ret);
2985 }
2986
2987 if (block_job_is_cancelled(bs->job)) {
2988 block_job_event_cancelled(bs->job);
2989 } else {
2990 block_job_event_completed(bs->job, msg);
2991 }
2992 }
2993
2994 void qmp_block_stream(const char *device,
2995 bool has_base, const char *base,
2996 bool has_backing_file, const char *backing_file,
2997 bool has_speed, int64_t speed,
2998 bool has_on_error, BlockdevOnError on_error,
2999 Error **errp)
3000 {
3001 BlockBackend *blk;
3002 BlockDriverState *bs;
3003 BlockDriverState *base_bs = NULL;
3004 AioContext *aio_context;
3005 Error *local_err = NULL;
3006 const char *base_name = NULL;
3007
3008 if (!has_on_error) {
3009 on_error = BLOCKDEV_ON_ERROR_REPORT;
3010 }
3011
3012 blk = blk_by_name(device);
3013 if (!blk) {
3014 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3015 "Device '%s' not found", device);
3016 return;
3017 }
3018
3019 aio_context = blk_get_aio_context(blk);
3020 aio_context_acquire(aio_context);
3021
3022 if (!blk_is_available(blk)) {
3023 error_setg(errp, "Device '%s' has no medium", device);
3024 goto out;
3025 }
3026 bs = blk_bs(blk);
3027
3028 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_STREAM, errp)) {
3029 goto out;
3030 }
3031
3032 if (has_base) {
3033 base_bs = bdrv_find_backing_image(bs, base);
3034 if (base_bs == NULL) {
3035 error_setg(errp, QERR_BASE_NOT_FOUND, base);
3036 goto out;
3037 }
3038 assert(bdrv_get_aio_context(base_bs) == aio_context);
3039 base_name = base;
3040 }
3041
3042 /* if we are streaming the entire chain, the result will have no backing
3043 * file, and specifying one is therefore an error */
3044 if (base_bs == NULL && has_backing_file) {
3045 error_setg(errp, "backing file specified, but streaming the "
3046 "entire chain");
3047 goto out;
3048 }
3049
3050 /* backing_file string overrides base bs filename */
3051 base_name = has_backing_file ? backing_file : base_name;
3052
3053 stream_start(bs, base_bs, base_name, has_speed ? speed : 0,
3054 on_error, block_job_cb, bs, &local_err);
3055 if (local_err) {
3056 error_propagate(errp, local_err);
3057 goto out;
3058 }
3059
3060 trace_qmp_block_stream(bs, bs->job);
3061
3062 out:
3063 aio_context_release(aio_context);
3064 }
3065
3066 void qmp_block_commit(const char *device,
3067 bool has_base, const char *base,
3068 bool has_top, const char *top,
3069 bool has_backing_file, const char *backing_file,
3070 bool has_speed, int64_t speed,
3071 Error **errp)
3072 {
3073 BlockBackend *blk;
3074 BlockDriverState *bs;
3075 BlockDriverState *base_bs, *top_bs;
3076 AioContext *aio_context;
3077 Error *local_err = NULL;
3078 /* This will be part of the QMP command, if/when the
3079 * BlockdevOnError change for blkmirror makes it in
3080 */
3081 BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT;
3082
3083 if (!has_speed) {
3084 speed = 0;
3085 }
3086
3087 /* Important Note:
3088 * libvirt relies on the DeviceNotFound error class in order to probe for
3089 * live commit feature versions; for this to work, we must make sure to
3090 * perform the device lookup before any generic errors that may occur in a
3091 * scenario in which all optional arguments are omitted. */
3092 blk = blk_by_name(device);
3093 if (!blk) {
3094 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3095 "Device '%s' not found", device);
3096 return;
3097 }
3098
3099 aio_context = blk_get_aio_context(blk);
3100 aio_context_acquire(aio_context);
3101
3102 if (!blk_is_available(blk)) {
3103 error_setg(errp, "Device '%s' has no medium", device);
3104 goto out;
3105 }
3106 bs = blk_bs(blk);
3107
3108 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) {
3109 goto out;
3110 }
3111
3112 /* default top_bs is the active layer */
3113 top_bs = bs;
3114
3115 if (has_top && top) {
3116 if (strcmp(bs->filename, top) != 0) {
3117 top_bs = bdrv_find_backing_image(bs, top);
3118 }
3119 }
3120
3121 if (top_bs == NULL) {
3122 error_setg(errp, "Top image file %s not found", top ? top : "NULL");
3123 goto out;
3124 }
3125
3126 assert(bdrv_get_aio_context(top_bs) == aio_context);
3127
3128 if (has_base && base) {
3129 base_bs = bdrv_find_backing_image(top_bs, base);
3130 } else {
3131 base_bs = bdrv_find_base(top_bs);
3132 }
3133
3134 if (base_bs == NULL) {
3135 error_setg(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL");
3136 goto out;
3137 }
3138
3139 assert(bdrv_get_aio_context(base_bs) == aio_context);
3140
3141 if (bdrv_op_is_blocked(base_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
3142 goto out;
3143 }
3144
3145 /* Do not allow attempts to commit an image into itself */
3146 if (top_bs == base_bs) {
3147 error_setg(errp, "cannot commit an image into itself");
3148 goto out;
3149 }
3150
3151 if (top_bs == bs) {
3152 if (has_backing_file) {
3153 error_setg(errp, "'backing-file' specified,"
3154 " but 'top' is the active layer");
3155 goto out;
3156 }
3157 commit_active_start(bs, base_bs, speed, on_error, block_job_cb,
3158 bs, &local_err);
3159 } else {
3160 commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs,
3161 has_backing_file ? backing_file : NULL, &local_err);
3162 }
3163 if (local_err != NULL) {
3164 error_propagate(errp, local_err);
3165 goto out;
3166 }
3167
3168 out:
3169 aio_context_release(aio_context);
3170 }
3171
3172 static void do_drive_backup(const char *device, const char *target,
3173 bool has_format, const char *format,
3174 enum MirrorSyncMode sync,
3175 bool has_mode, enum NewImageMode mode,
3176 bool has_speed, int64_t speed,
3177 bool has_bitmap, const char *bitmap,
3178 bool has_on_source_error,
3179 BlockdevOnError on_source_error,
3180 bool has_on_target_error,
3181 BlockdevOnError on_target_error,
3182 BlockJobTxn *txn, Error **errp)
3183 {
3184 BlockBackend *blk;
3185 BlockDriverState *bs;
3186 BlockDriverState *target_bs;
3187 BlockDriverState *source = NULL;
3188 BdrvDirtyBitmap *bmap = NULL;
3189 AioContext *aio_context;
3190 QDict *options = NULL;
3191 Error *local_err = NULL;
3192 int flags;
3193 int64_t size;
3194
3195 if (!has_speed) {
3196 speed = 0;
3197 }
3198 if (!has_on_source_error) {
3199 on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3200 }
3201 if (!has_on_target_error) {
3202 on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3203 }
3204 if (!has_mode) {
3205 mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
3206 }
3207
3208 blk = blk_by_name(device);
3209 if (!blk) {
3210 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3211 "Device '%s' not found", device);
3212 return;
3213 }
3214
3215 aio_context = blk_get_aio_context(blk);
3216 aio_context_acquire(aio_context);
3217
3218 /* Although backup_run has this check too, we need to use bs->drv below, so
3219 * do an early check redundantly. */
3220 if (!blk_is_available(blk)) {
3221 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
3222 goto out;
3223 }
3224 bs = blk_bs(blk);
3225
3226 if (!has_format) {
3227 format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name;
3228 }
3229
3230 /* Early check to avoid creating target */
3231 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
3232 goto out;
3233 }
3234
3235 flags = bs->open_flags | BDRV_O_RDWR;
3236
3237 /* See if we have a backing HD we can use to create our new image
3238 * on top of. */
3239 if (sync == MIRROR_SYNC_MODE_TOP) {
3240 source = backing_bs(bs);
3241 if (!source) {
3242 sync = MIRROR_SYNC_MODE_FULL;
3243 }
3244 }
3245 if (sync == MIRROR_SYNC_MODE_NONE) {
3246 source = bs;
3247 }
3248
3249 size = bdrv_getlength(bs);
3250 if (size < 0) {
3251 error_setg_errno(errp, -size, "bdrv_getlength failed");
3252 goto out;
3253 }
3254
3255 if (mode != NEW_IMAGE_MODE_EXISTING) {
3256 assert(format);
3257 if (source) {
3258 bdrv_img_create(target, format, source->filename,
3259 source->drv->format_name, NULL,
3260 size, flags, &local_err, false);
3261 } else {
3262 bdrv_img_create(target, format, NULL, NULL, NULL,
3263 size, flags, &local_err, false);
3264 }
3265 }
3266
3267 if (local_err) {
3268 error_propagate(errp, local_err);
3269 goto out;
3270 }
3271
3272 if (format) {
3273 options = qdict_new();
3274 qdict_put(options, "driver", qstring_from_str(format));
3275 }
3276
3277 target_bs = bdrv_open(target, NULL, options, flags, errp);
3278 if (!target_bs) {
3279 goto out;
3280 }
3281
3282 bdrv_set_aio_context(target_bs, aio_context);
3283
3284 if (has_bitmap) {
3285 bmap = bdrv_find_dirty_bitmap(bs, bitmap);
3286 if (!bmap) {
3287 error_setg(errp, "Bitmap '%s' could not be found", bitmap);
3288 bdrv_unref(target_bs);
3289 goto out;
3290 }
3291 }
3292
3293 backup_start(bs, target_bs, speed, sync, bmap,
3294 on_source_error, on_target_error,
3295 block_job_cb, bs, txn, &local_err);
3296 bdrv_unref(target_bs);
3297 if (local_err != NULL) {
3298 error_propagate(errp, local_err);
3299 goto out;
3300 }
3301
3302 out:
3303 aio_context_release(aio_context);
3304 }
3305
3306 void qmp_drive_backup(const char *device, const char *target,
3307 bool has_format, const char *format,
3308 enum MirrorSyncMode sync,
3309 bool has_mode, enum NewImageMode mode,
3310 bool has_speed, int64_t speed,
3311 bool has_bitmap, const char *bitmap,
3312 bool has_on_source_error, BlockdevOnError on_source_error,
3313 bool has_on_target_error, BlockdevOnError on_target_error,
3314 Error **errp)
3315 {
3316 return do_drive_backup(device, target, has_format, format, sync,
3317 has_mode, mode, has_speed, speed,
3318 has_bitmap, bitmap,
3319 has_on_source_error, on_source_error,
3320 has_on_target_error, on_target_error,
3321 NULL, errp);
3322 }
3323
3324 BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp)
3325 {
3326 return bdrv_named_nodes_list(errp);
3327 }
3328
3329 void do_blockdev_backup(const char *device, const char *target,
3330 enum MirrorSyncMode sync,
3331 bool has_speed, int64_t speed,
3332 bool has_on_source_error,
3333 BlockdevOnError on_source_error,
3334 bool has_on_target_error,
3335 BlockdevOnError on_target_error,
3336 BlockJobTxn *txn, Error **errp)
3337 {
3338 BlockBackend *blk, *target_blk;
3339 BlockDriverState *bs;
3340 BlockDriverState *target_bs;
3341 Error *local_err = NULL;
3342 AioContext *aio_context;
3343
3344 if (!has_speed) {
3345 speed = 0;
3346 }
3347 if (!has_on_source_error) {
3348 on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3349 }
3350 if (!has_on_target_error) {
3351 on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3352 }
3353
3354 blk = blk_by_name(device);
3355 if (!blk) {
3356 error_setg(errp, "Device '%s' not found", device);
3357 return;
3358 }
3359
3360 aio_context = blk_get_aio_context(blk);
3361 aio_context_acquire(aio_context);
3362
3363 if (!blk_is_available(blk)) {
3364 error_setg(errp, "Device '%s' has no medium", device);
3365 goto out;
3366 }
3367 bs = blk_bs(blk);
3368
3369 target_blk = blk_by_name(target);
3370 if (!target_blk) {
3371 error_setg(errp, "Device '%s' not found", target);
3372 goto out;
3373 }
3374
3375 if (!blk_is_available(target_blk)) {
3376 error_setg(errp, "Device '%s' has no medium", target);
3377 goto out;
3378 }
3379 target_bs = blk_bs(target_blk);
3380
3381 bdrv_set_aio_context(target_bs, aio_context);
3382 backup_start(bs, target_bs, speed, sync, NULL, on_source_error,
3383 on_target_error, block_job_cb, bs, txn, &local_err);
3384 if (local_err != NULL) {
3385 error_propagate(errp, local_err);
3386 }
3387 out:
3388 aio_context_release(aio_context);
3389 }
3390
3391 void qmp_blockdev_backup(const char *device, const char *target,
3392 enum MirrorSyncMode sync,
3393 bool has_speed, int64_t speed,
3394 bool has_on_source_error,
3395 BlockdevOnError on_source_error,
3396 bool has_on_target_error,
3397 BlockdevOnError on_target_error,
3398 Error **errp)
3399 {
3400 do_blockdev_backup(device, target, sync, has_speed, speed,
3401 has_on_source_error, on_source_error,
3402 has_on_target_error, on_target_error,
3403 NULL, errp);
3404 }
3405
3406 /* Parameter check and block job starting for drive mirroring.
3407 * Caller should hold @device and @target's aio context (must be the same).
3408 **/
3409 static void blockdev_mirror_common(BlockDriverState *bs,
3410 BlockDriverState *target,
3411 bool has_replaces, const char *replaces,
3412 enum MirrorSyncMode sync,
3413 bool has_speed, int64_t speed,
3414 bool has_granularity, uint32_t granularity,
3415 bool has_buf_size, int64_t buf_size,
3416 bool has_on_source_error,
3417 BlockdevOnError on_source_error,
3418 bool has_on_target_error,
3419 BlockdevOnError on_target_error,
3420 bool has_unmap, bool unmap,
3421 Error **errp)
3422 {
3423
3424 if (!has_speed) {
3425 speed = 0;
3426 }
3427 if (!has_on_source_error) {
3428 on_source_error = BLOCKDEV_ON_ERROR_REPORT;
3429 }
3430 if (!has_on_target_error) {
3431 on_target_error = BLOCKDEV_ON_ERROR_REPORT;
3432 }
3433 if (!has_granularity) {
3434 granularity = 0;
3435 }
3436 if (!has_buf_size) {
3437 buf_size = 0;
3438 }
3439 if (!has_unmap) {
3440 unmap = true;
3441 }
3442
3443 if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
3444 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
3445 "a value in range [512B, 64MB]");
3446 return;
3447 }
3448 if (granularity & (granularity - 1)) {
3449 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
3450 "power of 2");
3451 return;
3452 }
3453
3454 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
3455 return;
3456 }
3457 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) {
3458 return;
3459 }
3460
3461 if (!bs->backing && sync == MIRROR_SYNC_MODE_TOP) {
3462 sync = MIRROR_SYNC_MODE_FULL;
3463 }
3464
3465 /* pass the node name to replace to mirror start since it's loose coupling
3466 * and will allow to check whether the node still exist at mirror completion
3467 */
3468 mirror_start(bs, target,
3469 has_replaces ? replaces : NULL,
3470 speed, granularity, buf_size, sync,
3471 on_source_error, on_target_error, unmap,
3472 block_job_cb, bs, errp);
3473 }
3474
3475 void qmp_drive_mirror(const char *device, const char *target,
3476 bool has_format, const char *format,
3477 bool has_node_name, const char *node_name,
3478 bool has_replaces, const char *replaces,
3479 enum MirrorSyncMode sync,
3480 bool has_mode, enum NewImageMode mode,
3481 bool has_speed, int64_t speed,
3482 bool has_granularity, uint32_t granularity,
3483 bool has_buf_size, int64_t buf_size,
3484 bool has_on_source_error, BlockdevOnError on_source_error,
3485 bool has_on_target_error, BlockdevOnError on_target_error,
3486 bool has_unmap, bool unmap,
3487 Error **errp)
3488 {
3489 BlockDriverState *bs;
3490 BlockBackend *blk;
3491 BlockDriverState *source, *target_bs;
3492 AioContext *aio_context;
3493 Error *local_err = NULL;
3494 QDict *options = NULL;
3495 int flags;
3496 int64_t size;
3497
3498 blk = blk_by_name(device);
3499 if (!blk) {
3500 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3501 "Device '%s' not found", device);
3502 return;
3503 }
3504
3505 aio_context = blk_get_aio_context(blk);
3506 aio_context_acquire(aio_context);
3507
3508 if (!blk_is_available(blk)) {
3509 error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
3510 goto out;
3511 }
3512 bs = blk_bs(blk);
3513 if (!has_mode) {
3514 mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
3515 }
3516
3517 if (!has_format) {
3518 format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name;
3519 }
3520
3521 flags = bs->open_flags | BDRV_O_RDWR;
3522 source = backing_bs(bs);
3523 if (!source && sync == MIRROR_SYNC_MODE_TOP) {
3524 sync = MIRROR_SYNC_MODE_FULL;
3525 }
3526 if (sync == MIRROR_SYNC_MODE_NONE) {
3527 source = bs;
3528 }
3529
3530 size = bdrv_getlength(bs);
3531 if (size < 0) {
3532 error_setg_errno(errp, -size, "bdrv_getlength failed");
3533 goto out;
3534 }
3535
3536 if (has_replaces) {
3537 BlockDriverState *to_replace_bs;
3538 AioContext *replace_aio_context;
3539 int64_t replace_size;
3540
3541 if (!has_node_name) {
3542 error_setg(errp, "a node-name must be provided when replacing a"
3543 " named node of the graph");
3544 goto out;
3545 }
3546
3547 to_replace_bs = check_to_replace_node(bs, replaces, &local_err);
3548
3549 if (!to_replace_bs) {
3550 error_propagate(errp, local_err);
3551 goto out;
3552 }
3553
3554 replace_aio_context = bdrv_get_aio_context(to_replace_bs);
3555 aio_context_acquire(replace_aio_context);
3556 replace_size = bdrv_getlength(to_replace_bs);
3557 aio_context_release(replace_aio_context);
3558
3559 if (size != replace_size) {
3560 error_setg(errp, "cannot replace image with a mirror image of "
3561 "different size");
3562 goto out;
3563 }
3564 }
3565
3566 if ((sync == MIRROR_SYNC_MODE_FULL || !source)
3567 && mode != NEW_IMAGE_MODE_EXISTING)
3568 {
3569 /* create new image w/o backing file */
3570 assert(format);
3571 bdrv_img_create(target, format,
3572 NULL, NULL, NULL, size, flags, &local_err, false);
3573 } else {
3574 switch (mode) {
3575 case NEW_IMAGE_MODE_EXISTING:
3576 break;
3577 case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
3578 /* create new image with backing file */
3579 bdrv_img_create(target, format,
3580 source->filename,
3581 source->drv->format_name,
3582 NULL, size, flags, &local_err, false);
3583 break;
3584 default:
3585 abort();
3586 }
3587 }
3588
3589 if (local_err) {
3590 error_propagate(errp, local_err);
3591 goto out;
3592 }
3593
3594 options = qdict_new();
3595 if (has_node_name) {
3596 qdict_put(options, "node-name", qstring_from_str(node_name));
3597 }
3598 if (format) {
3599 qdict_put(options, "driver", qstring_from_str(format));
3600 }
3601
3602 /* Mirroring takes care of copy-on-write using the source's backing
3603 * file.
3604 */
3605 target_bs = bdrv_open(target, NULL, options, flags | BDRV_O_NO_BACKING,
3606 errp);
3607 if (!target_bs) {
3608 goto out;
3609 }
3610
3611 bdrv_set_aio_context(target_bs, aio_context);
3612
3613 blockdev_mirror_common(bs, target_bs,
3614 has_replaces, replaces, sync,
3615 has_speed, speed,
3616 has_granularity, granularity,
3617 has_buf_size, buf_size,
3618 has_on_source_error, on_source_error,
3619 has_on_target_error, on_target_error,
3620 has_unmap, unmap,
3621 &local_err);
3622 bdrv_unref(target_bs);
3623 if (local_err) {
3624 error_propagate(errp, local_err);
3625 }
3626 out:
3627 aio_context_release(aio_context);
3628 }
3629
3630 void qmp_blockdev_mirror(const char *device, const char *target,
3631 bool has_replaces, const char *replaces,
3632 MirrorSyncMode sync,
3633 bool has_speed, int64_t speed,
3634 bool has_granularity, uint32_t granularity,
3635 bool has_buf_size, int64_t buf_size,
3636 bool has_on_source_error,
3637 BlockdevOnError on_source_error,
3638 bool has_on_target_error,
3639 BlockdevOnError on_target_error,
3640 Error **errp)
3641 {
3642 BlockDriverState *bs;
3643 BlockBackend *blk;
3644 BlockDriverState *target_bs;
3645 AioContext *aio_context;
3646 Error *local_err = NULL;
3647
3648 blk = blk_by_name(device);
3649 if (!blk) {
3650 error_setg(errp, "Device '%s' not found", device);
3651 return;
3652 }
3653 bs = blk_bs(blk);
3654
3655 if (!bs) {
3656 error_setg(errp, "Device '%s' has no media", device);
3657 return;
3658 }
3659
3660 target_bs = bdrv_lookup_bs(target, target, errp);
3661 if (!target_bs) {
3662 return;
3663 }
3664
3665 aio_context = bdrv_get_aio_context(bs);
3666 aio_context_acquire(aio_context);
3667
3668 bdrv_set_aio_context(target_bs, aio_context);
3669
3670 blockdev_mirror_common(bs, target_bs,
3671 has_replaces, replaces, sync,
3672 has_speed, speed,
3673 has_granularity, granularity,
3674 has_buf_size, buf_size,
3675 has_on_source_error, on_source_error,
3676 has_on_target_error, on_target_error,
3677 true, true,
3678 &local_err);
3679 if (local_err) {
3680 error_propagate(errp, local_err);
3681 }
3682
3683 aio_context_release(aio_context);
3684 }
3685
3686 /* Get the block job for a given device name and acquire its AioContext */
3687 static BlockJob *find_block_job(const char *device, AioContext **aio_context,
3688 Error **errp)
3689 {
3690 BlockBackend *blk;
3691 BlockDriverState *bs;
3692
3693 *aio_context = NULL;
3694
3695 blk = blk_by_name(device);
3696 if (!blk) {
3697 goto notfound;
3698 }
3699
3700 *aio_context = blk_get_aio_context(blk);
3701 aio_context_acquire(*aio_context);
3702
3703 if (!blk_is_available(blk)) {
3704 goto notfound;
3705 }
3706 bs = blk_bs(blk);
3707
3708 if (!bs->job) {
3709 goto notfound;
3710 }
3711
3712 return bs->job;
3713
3714 notfound:
3715 error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
3716 "No active block job on device '%s'", device);
3717 if (*aio_context) {
3718 aio_context_release(*aio_context);
3719 *aio_context = NULL;
3720 }
3721 return NULL;
3722 }
3723
3724 void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
3725 {
3726 AioContext *aio_context;
3727 BlockJob *job = find_block_job(device, &aio_context, errp);
3728
3729 if (!job) {
3730 return;
3731 }
3732
3733 block_job_set_speed(job, speed, errp);
3734 aio_context_release(aio_context);
3735 }
3736
3737 void qmp_block_job_cancel(const char *device,
3738 bool has_force, bool force, Error **errp)
3739 {
3740 AioContext *aio_context;
3741 BlockJob *job = find_block_job(device, &aio_context, errp);
3742
3743 if (!job) {
3744 return;
3745 }
3746
3747 if (!has_force) {
3748 force = false;
3749 }
3750
3751 if (job->user_paused && !force) {
3752 error_setg(errp, "The block job for device '%s' is currently paused",
3753 device);
3754 goto out;
3755 }
3756
3757 trace_qmp_block_job_cancel(job);
3758 block_job_cancel(job);
3759 out:
3760 aio_context_release(aio_context);
3761 }
3762
3763 void qmp_block_job_pause(const char *device, Error **errp)
3764 {
3765 AioContext *aio_context;
3766 BlockJob *job = find_block_job(device, &aio_context, errp);
3767
3768 if (!job || job->user_paused) {
3769 return;
3770 }
3771
3772 job->user_paused = true;
3773 trace_qmp_block_job_pause(job);
3774 block_job_pause(job);
3775 aio_context_release(aio_context);
3776 }
3777
3778 void qmp_block_job_resume(const char *device, Error **errp)
3779 {
3780 AioContext *aio_context;
3781 BlockJob *job = find_block_job(device, &aio_context, errp);
3782
3783 if (!job || !job->user_paused) {
3784 return;
3785 }
3786
3787 job->user_paused = false;
3788 trace_qmp_block_job_resume(job);
3789 block_job_resume(job);
3790 aio_context_release(aio_context);
3791 }
3792
3793 void qmp_block_job_complete(const char *device, Error **errp)
3794 {
3795 AioContext *aio_context;
3796 BlockJob *job = find_block_job(device, &aio_context, errp);
3797
3798 if (!job) {
3799 return;
3800 }
3801
3802 trace_qmp_block_job_complete(job);
3803 block_job_complete(job, errp);
3804 aio_context_release(aio_context);
3805 }
3806
3807 void qmp_change_backing_file(const char *device,
3808 const char *image_node_name,
3809 const char *backing_file,
3810 Error **errp)
3811 {
3812 BlockBackend *blk;
3813 BlockDriverState *bs = NULL;
3814 AioContext *aio_context;
3815 BlockDriverState *image_bs = NULL;
3816 Error *local_err = NULL;
3817 bool ro;
3818 int open_flags;
3819 int ret;
3820
3821 blk = blk_by_name(device);
3822 if (!blk) {
3823 error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
3824 "Device '%s' not found", device);
3825 return;
3826 }
3827
3828 aio_context = blk_get_aio_context(blk);
3829 aio_context_acquire(aio_context);
3830
3831 if (!blk_is_available(blk)) {
3832 error_setg(errp, "Device '%s' has no medium", device);
3833 goto out;
3834 }
3835 bs = blk_bs(blk);
3836
3837 image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err);
3838 if (local_err) {
3839 error_propagate(errp, local_err);
3840 goto out;
3841 }
3842
3843 if (!image_bs) {
3844 error_setg(errp, "image file not found");
3845 goto out;
3846 }
3847
3848 if (bdrv_find_base(image_bs) == image_bs) {
3849 error_setg(errp, "not allowing backing file change on an image "
3850 "without a backing file");
3851 goto out;
3852 }
3853
3854 /* even though we are not necessarily operating on bs, we need it to
3855 * determine if block ops are currently prohibited on the chain */
3856 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
3857 goto out;
3858 }
3859
3860 /* final sanity check */
3861 if (!bdrv_chain_contains(bs, image_bs)) {
3862 error_setg(errp, "'%s' and image file are not in the same chain",
3863 device);
3864 goto out;
3865 }
3866
3867 /* if not r/w, reopen to make r/w */
3868 open_flags = image_bs->open_flags;
3869 ro = bdrv_is_read_only(image_bs);
3870
3871 if (ro) {
3872 bdrv_reopen(image_bs, open_flags | BDRV_O_RDWR, &local_err);
3873 if (local_err) {
3874 error_propagate(errp, local_err);
3875 goto out;
3876 }
3877 }
3878
3879 ret = bdrv_change_backing_file(image_bs, backing_file,
3880 image_bs->drv ? image_bs->drv->format_name : "");
3881
3882 if (ret < 0) {
3883 error_setg_errno(errp, -ret, "Could not change backing file to '%s'",
3884 backing_file);
3885 /* don't exit here, so we can try to restore open flags if
3886 * appropriate */
3887 }
3888
3889 if (ro) {
3890 bdrv_reopen(image_bs, open_flags, &local_err);
3891 if (local_err) {
3892 error_propagate(errp, local_err); /* will preserve prior errp */
3893 }
3894 }
3895
3896 out:
3897 aio_context_release(aio_context);
3898 }
3899
3900 void hmp_drive_add_node(Monitor *mon, const char *optstr)
3901 {
3902 QemuOpts *opts;
3903 QDict *qdict;
3904 Error *local_err = NULL;
3905
3906 opts = qemu_opts_parse_noisily(&qemu_drive_opts, optstr, false);
3907 if (!opts) {
3908 return;
3909 }
3910
3911 qdict = qemu_opts_to_qdict(opts, NULL);
3912
3913 if (!qdict_get_try_str(qdict, "node-name")) {
3914 QDECREF(qdict);
3915 error_report("'node-name' needs to be specified");
3916 goto out;
3917 }
3918
3919 BlockDriverState *bs = bds_tree_init(qdict, &local_err);
3920 if (!bs) {
3921 error_report_err(local_err);
3922 goto out;
3923 }
3924
3925 QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list);
3926
3927 out:
3928 qemu_opts_del(opts);
3929 }
3930
3931 void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
3932 {
3933 QmpOutputVisitor *ov = qmp_output_visitor_new();
3934 BlockDriverState *bs;
3935 BlockBackend *blk = NULL;
3936 QObject *obj;
3937 QDict *qdict;
3938 Error *local_err = NULL;
3939
3940 /* TODO Sort it out in raw-posix and drive_new(): Reject aio=native with
3941 * cache.direct=false instead of silently switching to aio=threads, except
3942 * when called from drive_new().
3943 *
3944 * For now, simply forbidding the combination for all drivers will do. */
3945 if (options->has_aio && options->aio == BLOCKDEV_AIO_OPTIONS_NATIVE) {
3946 bool direct = options->has_cache &&
3947 options->cache->has_direct &&
3948 options->cache->direct;
3949 if (!direct) {
3950 error_setg(errp, "aio=native requires cache.direct=true");
3951 goto fail;
3952 }
3953 }
3954
3955 visit_type_BlockdevOptions(qmp_output_get_visitor(ov), NULL, &options,
3956 &local_err);
3957 if (local_err) {
3958 error_propagate(errp, local_err);
3959 goto fail;
3960 }
3961
3962 obj = qmp_output_get_qobject(ov);
3963 qdict = qobject_to_qdict(obj);
3964
3965 qdict_flatten(qdict);
3966
3967 if (options->has_id) {
3968 blk = blockdev_init(NULL, qdict, &local_err);
3969 if (local_err) {
3970 error_propagate(errp, local_err);
3971 goto fail;
3972 }
3973
3974 bs = blk_bs(blk);
3975 } else {
3976 if (!qdict_get_try_str(qdict, "node-name")) {
3977 error_setg(errp, "'id' and/or 'node-name' need to be specified for "
3978 "the root node");
3979 goto fail;
3980 }
3981
3982 bs = bds_tree_init(qdict, errp);
3983 if (!bs) {
3984 goto fail;
3985 }
3986
3987 QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list);
3988 }
3989
3990 if (bs && bdrv_key_required(bs)) {
3991 if (blk) {
3992 monitor_remove_blk(blk);
3993 blk_unref(blk);
3994 } else {
3995 QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
3996 bdrv_unref(bs);
3997 }
3998 error_setg(errp, "blockdev-add doesn't support encrypted devices");
3999 goto fail;
4000 }
4001
4002 fail:
4003 qmp_output_visitor_cleanup(ov);
4004 }
4005
4006 void qmp_x_blockdev_del(bool has_id, const char *id,
4007 bool has_node_name, const char *node_name, Error **errp)
4008 {
4009 AioContext *aio_context;
4010 BlockBackend *blk;
4011 BlockDriverState *bs;
4012
4013 if (has_id && has_node_name) {
4014 error_setg(errp, "Only one of id and node-name must be specified");
4015 return;
4016 } else if (!has_id && !has_node_name) {
4017 error_setg(errp, "No block device specified");
4018 return;
4019 }
4020
4021 if (has_id) {
4022 /* blk_by_name() never returns a BB that is not owned by the monitor */
4023 blk = blk_by_name(id);
4024 if (!blk) {
4025 error_setg(errp, "Cannot find block backend %s", id);
4026 return;
4027 }
4028 if (blk_legacy_dinfo(blk)) {
4029 error_setg(errp, "Deleting block backend added with drive-add"
4030 " is not supported");
4031 return;
4032 }
4033 if (blk_get_refcnt(blk) > 1) {
4034 error_setg(errp, "Block backend %s is in use", id);
4035 return;
4036 }
4037 bs = blk_bs(blk);
4038 aio_context = blk_get_aio_context(blk);
4039 } else {
4040 blk = NULL;
4041 bs = bdrv_find_node(node_name);
4042 if (!bs) {
4043 error_setg(errp, "Cannot find node %s", node_name);
4044 return;
4045 }
4046 if (bdrv_has_blk(bs)) {
4047 error_setg(errp, "Node %s is in use by %s",
4048 node_name, bdrv_get_parent_name(bs));
4049 return;
4050 }
4051 aio_context = bdrv_get_aio_context(bs);
4052 }
4053
4054 aio_context_acquire(aio_context);
4055
4056 if (bs) {
4057 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) {
4058 goto out;
4059 }
4060
4061 if (!blk && !bs->monitor_list.tqe_prev) {
4062 error_setg(errp, "Node %s is not owned by the monitor",
4063 bs->node_name);
4064 goto out;
4065 }
4066
4067 if (bs->refcnt > 1) {
4068 error_setg(errp, "Block device %s is in use",
4069 bdrv_get_device_or_node_name(bs));
4070 goto out;
4071 }
4072 }
4073
4074 if (blk) {
4075 monitor_remove_blk(blk);
4076 blk_unref(blk);
4077 } else {
4078 QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
4079 bdrv_unref(bs);
4080 }
4081
4082 out:
4083 aio_context_release(aio_context);
4084 }
4085
4086 static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs,
4087 const char *child_name)
4088 {
4089 BdrvChild *child;
4090
4091 QLIST_FOREACH(child, &parent_bs->children, next) {
4092 if (strcmp(child->name, child_name) == 0) {
4093 return child;
4094 }
4095 }
4096
4097 return NULL;
4098 }
4099
4100 void qmp_x_blockdev_change(const char *parent, bool has_child,
4101 const char *child, bool has_node,
4102 const char *node, Error **errp)
4103 {
4104 BlockDriverState *parent_bs, *new_bs = NULL;
4105 BdrvChild *p_child;
4106
4107 parent_bs = bdrv_lookup_bs(parent, parent, errp);
4108 if (!parent_bs) {
4109 return;
4110 }
4111
4112 if (has_child == has_node) {
4113 if (has_child) {
4114 error_setg(errp, "The parameters child and node are in conflict");
4115 } else {
4116 error_setg(errp, "Either child or node must be specified");
4117 }
4118 return;
4119 }
4120
4121 if (has_child) {
4122 p_child = bdrv_find_child(parent_bs, child);
4123 if (!p_child) {
4124 error_setg(errp, "Node '%s' does not have child '%s'",
4125 parent, child);
4126 return;
4127 }
4128 bdrv_del_child(parent_bs, p_child, errp);
4129 }
4130
4131 if (has_node) {
4132 new_bs = bdrv_find_node(node);
4133 if (!new_bs) {
4134 error_setg(errp, "Node '%s' not found", node);
4135 return;
4136 }
4137 bdrv_add_child(parent_bs, new_bs, errp);
4138 }
4139 }
4140
4141 BlockJobInfoList *qmp_query_block_jobs(Error **errp)
4142 {
4143 BlockJobInfoList *head = NULL, **p_next = &head;
4144 BlockDriverState *bs;
4145 BdrvNextIterator it;
4146
4147 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
4148 AioContext *aio_context = bdrv_get_aio_context(bs);
4149
4150 aio_context_acquire(aio_context);
4151
4152 if (bs->job) {
4153 BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1);
4154 elem->value = block_job_query(bs->job);
4155 *p_next = elem;
4156 p_next = &elem->next;
4157 }
4158
4159 aio_context_release(aio_context);
4160 }
4161
4162 return head;
4163 }
4164
4165 QemuOptsList qemu_common_drive_opts = {
4166 .name = "drive",
4167 .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
4168 .desc = {
4169 {
4170 .name = "snapshot",
4171 .type = QEMU_OPT_BOOL,
4172 .help = "enable/disable snapshot mode",
4173 },{
4174 .name = "discard",
4175 .type = QEMU_OPT_STRING,
4176 .help = "discard operation (ignore/off, unmap/on)",
4177 },{
4178 .name = "aio",
4179 .type = QEMU_OPT_STRING,
4180 .help = "host AIO implementation (threads, native)",
4181 },{
4182 .name = BDRV_OPT_CACHE_WB,
4183 .type = QEMU_OPT_BOOL,
4184 .help = "Enable writeback mode",
4185 },{
4186 .name = "format",
4187 .type = QEMU_OPT_STRING,
4188 .help = "disk format (raw, qcow2, ...)",
4189 },{
4190 .name = "rerror",
4191 .type = QEMU_OPT_STRING,
4192 .help = "read error action",
4193 },{
4194 .name = "werror",
4195 .type = QEMU_OPT_STRING,
4196 .help = "write error action",
4197 },{
4198 .name = "read-only",
4199 .type = QEMU_OPT_BOOL,
4200 .help = "open drive file as read-only",
4201 },{
4202 .name = "throttling.iops-total",
4203 .type = QEMU_OPT_NUMBER,
4204 .help = "limit total I/O operations per second",
4205 },{
4206 .name = "throttling.iops-read",
4207 .type = QEMU_OPT_NUMBER,
4208 .help = "limit read operations per second",
4209 },{
4210 .name = "throttling.iops-write",
4211 .type = QEMU_OPT_NUMBER,
4212 .help = "limit write operations per second",
4213 },{
4214 .name = "throttling.bps-total",
4215 .type = QEMU_OPT_NUMBER,
4216 .help = "limit total bytes per second",
4217 },{
4218 .name = "throttling.bps-read",
4219 .type = QEMU_OPT_NUMBER,
4220 .help = "limit read bytes per second",
4221 },{
4222 .name = "throttling.bps-write",
4223 .type = QEMU_OPT_NUMBER,
4224 .help = "limit write bytes per second",
4225 },{
4226 .name = "throttling.iops-total-max",
4227 .type = QEMU_OPT_NUMBER,
4228 .help = "I/O operations burst",
4229 },{
4230 .name = "throttling.iops-read-max",
4231 .type = QEMU_OPT_NUMBER,
4232 .help = "I/O operations read burst",
4233 },{
4234 .name = "throttling.iops-write-max",
4235 .type = QEMU_OPT_NUMBER,
4236 .help = "I/O operations write burst",
4237 },{
4238 .name = "throttling.bps-total-max",
4239 .type = QEMU_OPT_NUMBER,
4240 .help = "total bytes burst",
4241 },{
4242 .name = "throttling.bps-read-max",
4243 .type = QEMU_OPT_NUMBER,
4244 .help = "total bytes read burst",
4245 },{
4246 .name = "throttling.bps-write-max",
4247 .type = QEMU_OPT_NUMBER,
4248 .help = "total bytes write burst",
4249 },{
4250 .name = "throttling.iops-total-max-length",
4251 .type = QEMU_OPT_NUMBER,
4252 .help = "length of the iops-total-max burst period, in seconds",
4253 },{
4254 .name = "throttling.iops-read-max-length",
4255 .type = QEMU_OPT_NUMBER,
4256 .help = "length of the iops-read-max burst period, in seconds",
4257 },{
4258 .name = "throttling.iops-write-max-length",
4259 .type = QEMU_OPT_NUMBER,
4260 .help = "length of the iops-write-max burst period, in seconds",
4261 },{
4262 .name = "throttling.bps-total-max-length",
4263 .type = QEMU_OPT_NUMBER,
4264 .help = "length of the bps-total-max burst period, in seconds",
4265 },{
4266 .name = "throttling.bps-read-max-length",
4267 .type = QEMU_OPT_NUMBER,
4268 .help = "length of the bps-read-max burst period, in seconds",
4269 },{
4270 .name = "throttling.bps-write-max-length",
4271 .type = QEMU_OPT_NUMBER,
4272 .help = "length of the bps-write-max burst period, in seconds",
4273 },{
4274 .name = "throttling.iops-size",
4275 .type = QEMU_OPT_NUMBER,
4276 .help = "when limiting by iops max size of an I/O in bytes",
4277 },{
4278 .name = "throttling.group",
4279 .type = QEMU_OPT_STRING,
4280 .help = "name of the block throttling group",
4281 },{
4282 .name = "copy-on-read",
4283 .type = QEMU_OPT_BOOL,
4284 .help = "copy read data from backing file into image file",
4285 },{
4286 .name = "detect-zeroes",
4287 .type = QEMU_OPT_STRING,
4288 .help = "try to optimize zero writes (off, on, unmap)",
4289 },{
4290 .name = "stats-account-invalid",
4291 .type = QEMU_OPT_BOOL,
4292 .help = "whether to account for invalid I/O operations "
4293 "in the statistics",
4294 },{
4295 .name = "stats-account-failed",
4296 .type = QEMU_OPT_BOOL,
4297 .help = "whether to account for failed I/O operations "
4298 "in the statistics",
4299 },
4300 { /* end of list */ }
4301 },
4302 };
4303
4304 static QemuOptsList qemu_root_bds_opts = {
4305 .name = "root-bds",
4306 .head = QTAILQ_HEAD_INITIALIZER(qemu_root_bds_opts.head),
4307 .desc = {
4308 {
4309 .name = "discard",
4310 .type = QEMU_OPT_STRING,
4311 .help = "discard operation (ignore/off, unmap/on)",
4312 },{
4313 .name = "aio",
4314 .type = QEMU_OPT_STRING,
4315 .help = "host AIO implementation (threads, native)",
4316 },{
4317 .name = "read-only",
4318 .type = QEMU_OPT_BOOL,
4319 .help = "open drive file as read-only",
4320 },{
4321 .name = "copy-on-read",
4322 .type = QEMU_OPT_BOOL,
4323 .help = "copy read data from backing file into image file",
4324 },{
4325 .name = "detect-zeroes",
4326 .type = QEMU_OPT_STRING,
4327 .help = "try to optimize zero writes (off, on, unmap)",
4328 },
4329 { /* end of list */ }
4330 },
4331 };
4332
4333 QemuOptsList qemu_drive_opts = {
4334 .name = "drive",
4335 .head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head),
4336 .desc = {
4337 /*
4338 * no elements => accept any params
4339 * validation will happen later
4340 */
4341 { /* end of list */ }
4342 },
4343 };