]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - block.c
vfio-pci: Fix error path sign
[mirror_qemu.git] / block.c
... / ...
CommitLineData
1/*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24#include "config-host.h"
25#include "qemu-common.h"
26#include "trace.h"
27#include "block/block_int.h"
28#include "block/blockjob.h"
29#include "qemu/module.h"
30#include "qapi/qmp/qjson.h"
31#include "sysemu/block-backend.h"
32#include "sysemu/sysemu.h"
33#include "qemu/notify.h"
34#include "block/coroutine.h"
35#include "block/qapi.h"
36#include "qmp-commands.h"
37#include "qemu/timer.h"
38#include "qapi-event.h"
39
40#ifdef CONFIG_BSD
41#include <sys/types.h>
42#include <sys/stat.h>
43#include <sys/ioctl.h>
44#include <sys/queue.h>
45#ifndef __DragonFly__
46#include <sys/disk.h>
47#endif
48#endif
49
50#ifdef _WIN32
51#include <windows.h>
52#endif
53
54struct BdrvDirtyBitmap {
55 HBitmap *bitmap;
56 QLIST_ENTRY(BdrvDirtyBitmap) list;
57};
58
59#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
60
61static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockCompletionFunc *cb, void *opaque);
64static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockCompletionFunc *cb, void *opaque);
67static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87static void coroutine_fn bdrv_co_do_rw(void *opaque);
88static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
90
91static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
93
94static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
97static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
99
100static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
101 int nr_sectors);
102static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
103 int nr_sectors);
104/* If non-zero, use only whitelisted block drivers */
105static int use_bdrv_whitelist;
106
107#ifdef _WIN32
108static int is_windows_drive_prefix(const char *filename)
109{
110 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
111 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
112 filename[1] == ':');
113}
114
115int is_windows_drive(const char *filename)
116{
117 if (is_windows_drive_prefix(filename) &&
118 filename[2] == '\0')
119 return 1;
120 if (strstart(filename, "\\\\.\\", NULL) ||
121 strstart(filename, "//./", NULL))
122 return 1;
123 return 0;
124}
125#endif
126
127/* throttling disk I/O limits */
128void bdrv_set_io_limits(BlockDriverState *bs,
129 ThrottleConfig *cfg)
130{
131 int i;
132
133 throttle_config(&bs->throttle_state, cfg);
134
135 for (i = 0; i < 2; i++) {
136 qemu_co_enter_next(&bs->throttled_reqs[i]);
137 }
138}
139
140/* this function drain all the throttled IOs */
141static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
142{
143 bool drained = false;
144 bool enabled = bs->io_limits_enabled;
145 int i;
146
147 bs->io_limits_enabled = false;
148
149 for (i = 0; i < 2; i++) {
150 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
151 drained = true;
152 }
153 }
154
155 bs->io_limits_enabled = enabled;
156
157 return drained;
158}
159
160void bdrv_io_limits_disable(BlockDriverState *bs)
161{
162 bs->io_limits_enabled = false;
163
164 bdrv_start_throttled_reqs(bs);
165
166 throttle_destroy(&bs->throttle_state);
167}
168
169static void bdrv_throttle_read_timer_cb(void *opaque)
170{
171 BlockDriverState *bs = opaque;
172 qemu_co_enter_next(&bs->throttled_reqs[0]);
173}
174
175static void bdrv_throttle_write_timer_cb(void *opaque)
176{
177 BlockDriverState *bs = opaque;
178 qemu_co_enter_next(&bs->throttled_reqs[1]);
179}
180
181/* should be called before bdrv_set_io_limits if a limit is set */
182void bdrv_io_limits_enable(BlockDriverState *bs)
183{
184 assert(!bs->io_limits_enabled);
185 throttle_init(&bs->throttle_state,
186 bdrv_get_aio_context(bs),
187 QEMU_CLOCK_VIRTUAL,
188 bdrv_throttle_read_timer_cb,
189 bdrv_throttle_write_timer_cb,
190 bs);
191 bs->io_limits_enabled = true;
192}
193
194/* This function makes an IO wait if needed
195 *
196 * @nb_sectors: the number of sectors of the IO
197 * @is_write: is the IO a write
198 */
199static void bdrv_io_limits_intercept(BlockDriverState *bs,
200 unsigned int bytes,
201 bool is_write)
202{
203 /* does this io must wait */
204 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
205
206 /* if must wait or any request of this type throttled queue the IO */
207 if (must_wait ||
208 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
209 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
210 }
211
212 /* the IO will be executed, do the accounting */
213 throttle_account(&bs->throttle_state, is_write, bytes);
214
215
216 /* if the next request must wait -> do nothing */
217 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
218 return;
219 }
220
221 /* else queue next request for execution */
222 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
223}
224
225size_t bdrv_opt_mem_align(BlockDriverState *bs)
226{
227 if (!bs || !bs->drv) {
228 /* 4k should be on the safe side */
229 return 4096;
230 }
231
232 return bs->bl.opt_mem_alignment;
233}
234
235/* check if the path starts with "<protocol>:" */
236int path_has_protocol(const char *path)
237{
238 const char *p;
239
240#ifdef _WIN32
241 if (is_windows_drive(path) ||
242 is_windows_drive_prefix(path)) {
243 return 0;
244 }
245 p = path + strcspn(path, ":/\\");
246#else
247 p = path + strcspn(path, ":/");
248#endif
249
250 return *p == ':';
251}
252
253int path_is_absolute(const char *path)
254{
255#ifdef _WIN32
256 /* specific case for names like: "\\.\d:" */
257 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
258 return 1;
259 }
260 return (*path == '/' || *path == '\\');
261#else
262 return (*path == '/');
263#endif
264}
265
266/* if filename is absolute, just copy it to dest. Otherwise, build a
267 path to it by considering it is relative to base_path. URL are
268 supported. */
269void path_combine(char *dest, int dest_size,
270 const char *base_path,
271 const char *filename)
272{
273 const char *p, *p1;
274 int len;
275
276 if (dest_size <= 0)
277 return;
278 if (path_is_absolute(filename)) {
279 pstrcpy(dest, dest_size, filename);
280 } else {
281 p = strchr(base_path, ':');
282 if (p)
283 p++;
284 else
285 p = base_path;
286 p1 = strrchr(base_path, '/');
287#ifdef _WIN32
288 {
289 const char *p2;
290 p2 = strrchr(base_path, '\\');
291 if (!p1 || p2 > p1)
292 p1 = p2;
293 }
294#endif
295 if (p1)
296 p1++;
297 else
298 p1 = base_path;
299 if (p1 > p)
300 p = p1;
301 len = p - base_path;
302 if (len > dest_size - 1)
303 len = dest_size - 1;
304 memcpy(dest, base_path, len);
305 dest[len] = '\0';
306 pstrcat(dest, dest_size, filename);
307 }
308}
309
310void bdrv_get_full_backing_filename_from_filename(const char *backed,
311 const char *backing,
312 char *dest, size_t sz,
313 Error **errp)
314{
315 if (backing[0] == '\0' || path_has_protocol(backing) ||
316 path_is_absolute(backing))
317 {
318 pstrcpy(dest, sz, backing);
319 } else if (backed[0] == '\0' || strstart(backed, "json:", NULL)) {
320 error_setg(errp, "Cannot use relative backing file names for '%s'",
321 backed);
322 } else {
323 path_combine(dest, sz, backed, backing);
324 }
325}
326
327void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz,
328 Error **errp)
329{
330 char *backed = bs->exact_filename[0] ? bs->exact_filename : bs->filename;
331
332 bdrv_get_full_backing_filename_from_filename(backed, bs->backing_file,
333 dest, sz, errp);
334}
335
336void bdrv_register(BlockDriver *bdrv)
337{
338 /* Block drivers without coroutine functions need emulation */
339 if (!bdrv->bdrv_co_readv) {
340 bdrv->bdrv_co_readv = bdrv_co_readv_em;
341 bdrv->bdrv_co_writev = bdrv_co_writev_em;
342
343 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
344 * the block driver lacks aio we need to emulate that too.
345 */
346 if (!bdrv->bdrv_aio_readv) {
347 /* add AIO emulation layer */
348 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
349 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
350 }
351 }
352
353 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
354}
355
356BlockDriverState *bdrv_new_root(void)
357{
358 BlockDriverState *bs = bdrv_new();
359
360 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
361 return bs;
362}
363
364BlockDriverState *bdrv_new(void)
365{
366 BlockDriverState *bs;
367 int i;
368
369 bs = g_new0(BlockDriverState, 1);
370 QLIST_INIT(&bs->dirty_bitmaps);
371 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
372 QLIST_INIT(&bs->op_blockers[i]);
373 }
374 bdrv_iostatus_disable(bs);
375 notifier_list_init(&bs->close_notifiers);
376 notifier_with_return_list_init(&bs->before_write_notifiers);
377 qemu_co_queue_init(&bs->throttled_reqs[0]);
378 qemu_co_queue_init(&bs->throttled_reqs[1]);
379 bs->refcnt = 1;
380 bs->aio_context = qemu_get_aio_context();
381
382 return bs;
383}
384
385void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
386{
387 notifier_list_add(&bs->close_notifiers, notify);
388}
389
390BlockDriver *bdrv_find_format(const char *format_name)
391{
392 BlockDriver *drv1;
393 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
394 if (!strcmp(drv1->format_name, format_name)) {
395 return drv1;
396 }
397 }
398 return NULL;
399}
400
401static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
402{
403 static const char *whitelist_rw[] = {
404 CONFIG_BDRV_RW_WHITELIST
405 };
406 static const char *whitelist_ro[] = {
407 CONFIG_BDRV_RO_WHITELIST
408 };
409 const char **p;
410
411 if (!whitelist_rw[0] && !whitelist_ro[0]) {
412 return 1; /* no whitelist, anything goes */
413 }
414
415 for (p = whitelist_rw; *p; p++) {
416 if (!strcmp(drv->format_name, *p)) {
417 return 1;
418 }
419 }
420 if (read_only) {
421 for (p = whitelist_ro; *p; p++) {
422 if (!strcmp(drv->format_name, *p)) {
423 return 1;
424 }
425 }
426 }
427 return 0;
428}
429
430BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
431 bool read_only)
432{
433 BlockDriver *drv = bdrv_find_format(format_name);
434 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
435}
436
437typedef struct CreateCo {
438 BlockDriver *drv;
439 char *filename;
440 QemuOpts *opts;
441 int ret;
442 Error *err;
443} CreateCo;
444
445static void coroutine_fn bdrv_create_co_entry(void *opaque)
446{
447 Error *local_err = NULL;
448 int ret;
449
450 CreateCo *cco = opaque;
451 assert(cco->drv);
452
453 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
454 if (local_err) {
455 error_propagate(&cco->err, local_err);
456 }
457 cco->ret = ret;
458}
459
460int bdrv_create(BlockDriver *drv, const char* filename,
461 QemuOpts *opts, Error **errp)
462{
463 int ret;
464
465 Coroutine *co;
466 CreateCo cco = {
467 .drv = drv,
468 .filename = g_strdup(filename),
469 .opts = opts,
470 .ret = NOT_DONE,
471 .err = NULL,
472 };
473
474 if (!drv->bdrv_create) {
475 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
476 ret = -ENOTSUP;
477 goto out;
478 }
479
480 if (qemu_in_coroutine()) {
481 /* Fast-path if already in coroutine context */
482 bdrv_create_co_entry(&cco);
483 } else {
484 co = qemu_coroutine_create(bdrv_create_co_entry);
485 qemu_coroutine_enter(co, &cco);
486 while (cco.ret == NOT_DONE) {
487 aio_poll(qemu_get_aio_context(), true);
488 }
489 }
490
491 ret = cco.ret;
492 if (ret < 0) {
493 if (cco.err) {
494 error_propagate(errp, cco.err);
495 } else {
496 error_setg_errno(errp, -ret, "Could not create image");
497 }
498 }
499
500out:
501 g_free(cco.filename);
502 return ret;
503}
504
505int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
506{
507 BlockDriver *drv;
508 Error *local_err = NULL;
509 int ret;
510
511 drv = bdrv_find_protocol(filename, true, errp);
512 if (drv == NULL) {
513 return -ENOENT;
514 }
515
516 ret = bdrv_create(drv, filename, opts, &local_err);
517 if (local_err) {
518 error_propagate(errp, local_err);
519 }
520 return ret;
521}
522
523void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
524{
525 BlockDriver *drv = bs->drv;
526 Error *local_err = NULL;
527
528 memset(&bs->bl, 0, sizeof(bs->bl));
529
530 if (!drv) {
531 return;
532 }
533
534 /* Take some limits from the children as a default */
535 if (bs->file) {
536 bdrv_refresh_limits(bs->file, &local_err);
537 if (local_err) {
538 error_propagate(errp, local_err);
539 return;
540 }
541 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
542 bs->bl.max_transfer_length = bs->file->bl.max_transfer_length;
543 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
544 } else {
545 bs->bl.opt_mem_alignment = 512;
546 }
547
548 if (bs->backing_hd) {
549 bdrv_refresh_limits(bs->backing_hd, &local_err);
550 if (local_err) {
551 error_propagate(errp, local_err);
552 return;
553 }
554 bs->bl.opt_transfer_length =
555 MAX(bs->bl.opt_transfer_length,
556 bs->backing_hd->bl.opt_transfer_length);
557 bs->bl.max_transfer_length =
558 MIN_NON_ZERO(bs->bl.max_transfer_length,
559 bs->backing_hd->bl.max_transfer_length);
560 bs->bl.opt_mem_alignment =
561 MAX(bs->bl.opt_mem_alignment,
562 bs->backing_hd->bl.opt_mem_alignment);
563 }
564
565 /* Then let the driver override it */
566 if (drv->bdrv_refresh_limits) {
567 drv->bdrv_refresh_limits(bs, errp);
568 }
569}
570
571/**
572 * Try to get @bs's logical and physical block size.
573 * On success, store them in @bsz struct and return 0.
574 * On failure return -errno.
575 * @bs must not be empty.
576 */
577int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
578{
579 BlockDriver *drv = bs->drv;
580
581 if (drv && drv->bdrv_probe_blocksizes) {
582 return drv->bdrv_probe_blocksizes(bs, bsz);
583 }
584
585 return -ENOTSUP;
586}
587
588/**
589 * Try to get @bs's geometry (cyls, heads, sectors).
590 * On success, store them in @geo struct and return 0.
591 * On failure return -errno.
592 * @bs must not be empty.
593 */
594int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
595{
596 BlockDriver *drv = bs->drv;
597
598 if (drv && drv->bdrv_probe_geometry) {
599 return drv->bdrv_probe_geometry(bs, geo);
600 }
601
602 return -ENOTSUP;
603}
604
605/*
606 * Create a uniquely-named empty temporary file.
607 * Return 0 upon success, otherwise a negative errno value.
608 */
609int get_tmp_filename(char *filename, int size)
610{
611#ifdef _WIN32
612 char temp_dir[MAX_PATH];
613 /* GetTempFileName requires that its output buffer (4th param)
614 have length MAX_PATH or greater. */
615 assert(size >= MAX_PATH);
616 return (GetTempPath(MAX_PATH, temp_dir)
617 && GetTempFileName(temp_dir, "qem", 0, filename)
618 ? 0 : -GetLastError());
619#else
620 int fd;
621 const char *tmpdir;
622 tmpdir = getenv("TMPDIR");
623 if (!tmpdir) {
624 tmpdir = "/var/tmp";
625 }
626 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
627 return -EOVERFLOW;
628 }
629 fd = mkstemp(filename);
630 if (fd < 0) {
631 return -errno;
632 }
633 if (close(fd) != 0) {
634 unlink(filename);
635 return -errno;
636 }
637 return 0;
638#endif
639}
640
641/*
642 * Detect host devices. By convention, /dev/cdrom[N] is always
643 * recognized as a host CDROM.
644 */
645static BlockDriver *find_hdev_driver(const char *filename)
646{
647 int score_max = 0, score;
648 BlockDriver *drv = NULL, *d;
649
650 QLIST_FOREACH(d, &bdrv_drivers, list) {
651 if (d->bdrv_probe_device) {
652 score = d->bdrv_probe_device(filename);
653 if (score > score_max) {
654 score_max = score;
655 drv = d;
656 }
657 }
658 }
659
660 return drv;
661}
662
663BlockDriver *bdrv_find_protocol(const char *filename,
664 bool allow_protocol_prefix,
665 Error **errp)
666{
667 BlockDriver *drv1;
668 char protocol[128];
669 int len;
670 const char *p;
671
672 /* TODO Drivers without bdrv_file_open must be specified explicitly */
673
674 /*
675 * XXX(hch): we really should not let host device detection
676 * override an explicit protocol specification, but moving this
677 * later breaks access to device names with colons in them.
678 * Thanks to the brain-dead persistent naming schemes on udev-
679 * based Linux systems those actually are quite common.
680 */
681 drv1 = find_hdev_driver(filename);
682 if (drv1) {
683 return drv1;
684 }
685
686 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
687 return &bdrv_file;
688 }
689
690 p = strchr(filename, ':');
691 assert(p != NULL);
692 len = p - filename;
693 if (len > sizeof(protocol) - 1)
694 len = sizeof(protocol) - 1;
695 memcpy(protocol, filename, len);
696 protocol[len] = '\0';
697 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
698 if (drv1->protocol_name &&
699 !strcmp(drv1->protocol_name, protocol)) {
700 return drv1;
701 }
702 }
703
704 error_setg(errp, "Unknown protocol '%s'", protocol);
705 return NULL;
706}
707
708/*
709 * Guess image format by probing its contents.
710 * This is not a good idea when your image is raw (CVE-2008-2004), but
711 * we do it anyway for backward compatibility.
712 *
713 * @buf contains the image's first @buf_size bytes.
714 * @buf_size is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
715 * but can be smaller if the image file is smaller)
716 * @filename is its filename.
717 *
718 * For all block drivers, call the bdrv_probe() method to get its
719 * probing score.
720 * Return the first block driver with the highest probing score.
721 */
722BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
723 const char *filename)
724{
725 int score_max = 0, score;
726 BlockDriver *drv = NULL, *d;
727
728 QLIST_FOREACH(d, &bdrv_drivers, list) {
729 if (d->bdrv_probe) {
730 score = d->bdrv_probe(buf, buf_size, filename);
731 if (score > score_max) {
732 score_max = score;
733 drv = d;
734 }
735 }
736 }
737
738 return drv;
739}
740
741static int find_image_format(BlockDriverState *bs, const char *filename,
742 BlockDriver **pdrv, Error **errp)
743{
744 BlockDriver *drv;
745 uint8_t buf[BLOCK_PROBE_BUF_SIZE];
746 int ret = 0;
747
748 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
749 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
750 *pdrv = &bdrv_raw;
751 return ret;
752 }
753
754 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
755 if (ret < 0) {
756 error_setg_errno(errp, -ret, "Could not read image for determining its "
757 "format");
758 *pdrv = NULL;
759 return ret;
760 }
761
762 drv = bdrv_probe_all(buf, ret, filename);
763 if (!drv) {
764 error_setg(errp, "Could not determine image format: No compatible "
765 "driver found");
766 ret = -ENOENT;
767 }
768 *pdrv = drv;
769 return ret;
770}
771
772/**
773 * Set the current 'total_sectors' value
774 * Return 0 on success, -errno on error.
775 */
776static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
777{
778 BlockDriver *drv = bs->drv;
779
780 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
781 if (bs->sg)
782 return 0;
783
784 /* query actual device if possible, otherwise just trust the hint */
785 if (drv->bdrv_getlength) {
786 int64_t length = drv->bdrv_getlength(bs);
787 if (length < 0) {
788 return length;
789 }
790 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
791 }
792
793 bs->total_sectors = hint;
794 return 0;
795}
796
797/**
798 * Set open flags for a given discard mode
799 *
800 * Return 0 on success, -1 if the discard mode was invalid.
801 */
802int bdrv_parse_discard_flags(const char *mode, int *flags)
803{
804 *flags &= ~BDRV_O_UNMAP;
805
806 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
807 /* do nothing */
808 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
809 *flags |= BDRV_O_UNMAP;
810 } else {
811 return -1;
812 }
813
814 return 0;
815}
816
817/**
818 * Set open flags for a given cache mode
819 *
820 * Return 0 on success, -1 if the cache mode was invalid.
821 */
822int bdrv_parse_cache_flags(const char *mode, int *flags)
823{
824 *flags &= ~BDRV_O_CACHE_MASK;
825
826 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
827 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
828 } else if (!strcmp(mode, "directsync")) {
829 *flags |= BDRV_O_NOCACHE;
830 } else if (!strcmp(mode, "writeback")) {
831 *flags |= BDRV_O_CACHE_WB;
832 } else if (!strcmp(mode, "unsafe")) {
833 *flags |= BDRV_O_CACHE_WB;
834 *flags |= BDRV_O_NO_FLUSH;
835 } else if (!strcmp(mode, "writethrough")) {
836 /* this is the default */
837 } else {
838 return -1;
839 }
840
841 return 0;
842}
843
844/**
845 * The copy-on-read flag is actually a reference count so multiple users may
846 * use the feature without worrying about clobbering its previous state.
847 * Copy-on-read stays enabled until all users have called to disable it.
848 */
849void bdrv_enable_copy_on_read(BlockDriverState *bs)
850{
851 bs->copy_on_read++;
852}
853
854void bdrv_disable_copy_on_read(BlockDriverState *bs)
855{
856 assert(bs->copy_on_read > 0);
857 bs->copy_on_read--;
858}
859
860/*
861 * Returns the flags that a temporary snapshot should get, based on the
862 * originally requested flags (the originally requested image will have flags
863 * like a backing file)
864 */
865static int bdrv_temp_snapshot_flags(int flags)
866{
867 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
868}
869
870/*
871 * Returns the flags that bs->file should get, based on the given flags for
872 * the parent BDS
873 */
874static int bdrv_inherited_flags(int flags)
875{
876 /* Enable protocol handling, disable format probing for bs->file */
877 flags |= BDRV_O_PROTOCOL;
878
879 /* Our block drivers take care to send flushes and respect unmap policy,
880 * so we can enable both unconditionally on lower layers. */
881 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
882
883 /* Clear flags that only apply to the top layer */
884 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
885
886 return flags;
887}
888
889/*
890 * Returns the flags that bs->backing_hd should get, based on the given flags
891 * for the parent BDS
892 */
893static int bdrv_backing_flags(int flags)
894{
895 /* backing files always opened read-only */
896 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
897
898 /* snapshot=on is handled on the top layer */
899 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
900
901 return flags;
902}
903
904static int bdrv_open_flags(BlockDriverState *bs, int flags)
905{
906 int open_flags = flags | BDRV_O_CACHE_WB;
907
908 /*
909 * Clear flags that are internal to the block layer before opening the
910 * image.
911 */
912 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL);
913
914 /*
915 * Snapshots should be writable.
916 */
917 if (flags & BDRV_O_TEMPORARY) {
918 open_flags |= BDRV_O_RDWR;
919 }
920
921 return open_flags;
922}
923
924static void bdrv_assign_node_name(BlockDriverState *bs,
925 const char *node_name,
926 Error **errp)
927{
928 if (!node_name) {
929 return;
930 }
931
932 /* Check for empty string or invalid characters */
933 if (!id_wellformed(node_name)) {
934 error_setg(errp, "Invalid node name");
935 return;
936 }
937
938 /* takes care of avoiding namespaces collisions */
939 if (blk_by_name(node_name)) {
940 error_setg(errp, "node-name=%s is conflicting with a device id",
941 node_name);
942 return;
943 }
944
945 /* takes care of avoiding duplicates node names */
946 if (bdrv_find_node(node_name)) {
947 error_setg(errp, "Duplicate node name");
948 return;
949 }
950
951 /* copy node name into the bs and insert it into the graph list */
952 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
953 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
954}
955
956/*
957 * Common part for opening disk images and files
958 *
959 * Removes all processed options from *options.
960 */
961static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
962 QDict *options, int flags, BlockDriver *drv, Error **errp)
963{
964 int ret, open_flags;
965 const char *filename;
966 const char *node_name = NULL;
967 Error *local_err = NULL;
968
969 assert(drv != NULL);
970 assert(bs->file == NULL);
971 assert(options != NULL && bs->options != options);
972
973 if (file != NULL) {
974 filename = file->filename;
975 } else {
976 filename = qdict_get_try_str(options, "filename");
977 }
978
979 if (drv->bdrv_needs_filename && !filename) {
980 error_setg(errp, "The '%s' block driver requires a file name",
981 drv->format_name);
982 return -EINVAL;
983 }
984
985 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
986
987 node_name = qdict_get_try_str(options, "node-name");
988 bdrv_assign_node_name(bs, node_name, &local_err);
989 if (local_err) {
990 error_propagate(errp, local_err);
991 return -EINVAL;
992 }
993 qdict_del(options, "node-name");
994
995 /* bdrv_open() with directly using a protocol as drv. This layer is already
996 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
997 * and return immediately. */
998 if (file != NULL && drv->bdrv_file_open) {
999 bdrv_swap(file, bs);
1000 return 0;
1001 }
1002
1003 bs->open_flags = flags;
1004 bs->guest_block_size = 512;
1005 bs->request_alignment = 512;
1006 bs->zero_beyond_eof = true;
1007 open_flags = bdrv_open_flags(bs, flags);
1008 bs->read_only = !(open_flags & BDRV_O_RDWR);
1009
1010 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
1011 error_setg(errp,
1012 !bs->read_only && bdrv_is_whitelisted(drv, true)
1013 ? "Driver '%s' can only be used for read-only devices"
1014 : "Driver '%s' is not whitelisted",
1015 drv->format_name);
1016 return -ENOTSUP;
1017 }
1018
1019 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
1020 if (flags & BDRV_O_COPY_ON_READ) {
1021 if (!bs->read_only) {
1022 bdrv_enable_copy_on_read(bs);
1023 } else {
1024 error_setg(errp, "Can't use copy-on-read on read-only device");
1025 return -EINVAL;
1026 }
1027 }
1028
1029 if (filename != NULL) {
1030 pstrcpy(bs->filename, sizeof(bs->filename), filename);
1031 } else {
1032 bs->filename[0] = '\0';
1033 }
1034 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename);
1035
1036 bs->drv = drv;
1037 bs->opaque = g_malloc0(drv->instance_size);
1038
1039 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
1040
1041 /* Open the image, either directly or using a protocol */
1042 if (drv->bdrv_file_open) {
1043 assert(file == NULL);
1044 assert(!drv->bdrv_needs_filename || filename != NULL);
1045 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
1046 } else {
1047 if (file == NULL) {
1048 error_setg(errp, "Can't use '%s' as a block driver for the "
1049 "protocol level", drv->format_name);
1050 ret = -EINVAL;
1051 goto free_and_fail;
1052 }
1053 bs->file = file;
1054 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
1055 }
1056
1057 if (ret < 0) {
1058 if (local_err) {
1059 error_propagate(errp, local_err);
1060 } else if (bs->filename[0]) {
1061 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
1062 } else {
1063 error_setg_errno(errp, -ret, "Could not open image");
1064 }
1065 goto free_and_fail;
1066 }
1067
1068 if (bs->encrypted) {
1069 error_report("Encrypted images are deprecated");
1070 error_printf("Support for them will be removed in a future release.\n"
1071 "You can use 'qemu-img convert' to convert your image"
1072 " to an unencrypted one.\n");
1073 }
1074
1075 ret = refresh_total_sectors(bs, bs->total_sectors);
1076 if (ret < 0) {
1077 error_setg_errno(errp, -ret, "Could not refresh total sector count");
1078 goto free_and_fail;
1079 }
1080
1081 bdrv_refresh_limits(bs, &local_err);
1082 if (local_err) {
1083 error_propagate(errp, local_err);
1084 ret = -EINVAL;
1085 goto free_and_fail;
1086 }
1087
1088 assert(bdrv_opt_mem_align(bs) != 0);
1089 assert((bs->request_alignment != 0) || bs->sg);
1090 return 0;
1091
1092free_and_fail:
1093 bs->file = NULL;
1094 g_free(bs->opaque);
1095 bs->opaque = NULL;
1096 bs->drv = NULL;
1097 return ret;
1098}
1099
1100static QDict *parse_json_filename(const char *filename, Error **errp)
1101{
1102 QObject *options_obj;
1103 QDict *options;
1104 int ret;
1105
1106 ret = strstart(filename, "json:", &filename);
1107 assert(ret);
1108
1109 options_obj = qobject_from_json(filename);
1110 if (!options_obj) {
1111 error_setg(errp, "Could not parse the JSON options");
1112 return NULL;
1113 }
1114
1115 if (qobject_type(options_obj) != QTYPE_QDICT) {
1116 qobject_decref(options_obj);
1117 error_setg(errp, "Invalid JSON object given");
1118 return NULL;
1119 }
1120
1121 options = qobject_to_qdict(options_obj);
1122 qdict_flatten(options);
1123
1124 return options;
1125}
1126
1127/*
1128 * Fills in default options for opening images and converts the legacy
1129 * filename/flags pair to option QDict entries.
1130 */
1131static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
1132 BlockDriver *drv, Error **errp)
1133{
1134 const char *filename = *pfilename;
1135 const char *drvname;
1136 bool protocol = flags & BDRV_O_PROTOCOL;
1137 bool parse_filename = false;
1138 Error *local_err = NULL;
1139
1140 /* Parse json: pseudo-protocol */
1141 if (filename && g_str_has_prefix(filename, "json:")) {
1142 QDict *json_options = parse_json_filename(filename, &local_err);
1143 if (local_err) {
1144 error_propagate(errp, local_err);
1145 return -EINVAL;
1146 }
1147
1148 /* Options given in the filename have lower priority than options
1149 * specified directly */
1150 qdict_join(*options, json_options, false);
1151 QDECREF(json_options);
1152 *pfilename = filename = NULL;
1153 }
1154
1155 /* Fetch the file name from the options QDict if necessary */
1156 if (protocol && filename) {
1157 if (!qdict_haskey(*options, "filename")) {
1158 qdict_put(*options, "filename", qstring_from_str(filename));
1159 parse_filename = true;
1160 } else {
1161 error_setg(errp, "Can't specify 'file' and 'filename' options at "
1162 "the same time");
1163 return -EINVAL;
1164 }
1165 }
1166
1167 /* Find the right block driver */
1168 filename = qdict_get_try_str(*options, "filename");
1169 drvname = qdict_get_try_str(*options, "driver");
1170
1171 if (drv) {
1172 if (drvname) {
1173 error_setg(errp, "Driver specified twice");
1174 return -EINVAL;
1175 }
1176 drvname = drv->format_name;
1177 qdict_put(*options, "driver", qstring_from_str(drvname));
1178 } else {
1179 if (!drvname && protocol) {
1180 if (filename) {
1181 drv = bdrv_find_protocol(filename, parse_filename, errp);
1182 if (!drv) {
1183 return -EINVAL;
1184 }
1185
1186 drvname = drv->format_name;
1187 qdict_put(*options, "driver", qstring_from_str(drvname));
1188 } else {
1189 error_setg(errp, "Must specify either driver or file");
1190 return -EINVAL;
1191 }
1192 } else if (drvname) {
1193 drv = bdrv_find_format(drvname);
1194 if (!drv) {
1195 error_setg(errp, "Unknown driver '%s'", drvname);
1196 return -ENOENT;
1197 }
1198 }
1199 }
1200
1201 assert(drv || !protocol);
1202
1203 /* Driver-specific filename parsing */
1204 if (drv && drv->bdrv_parse_filename && parse_filename) {
1205 drv->bdrv_parse_filename(filename, *options, &local_err);
1206 if (local_err) {
1207 error_propagate(errp, local_err);
1208 return -EINVAL;
1209 }
1210
1211 if (!drv->bdrv_needs_filename) {
1212 qdict_del(*options, "filename");
1213 }
1214 }
1215
1216 return 0;
1217}
1218
1219void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1220{
1221
1222 if (bs->backing_hd) {
1223 assert(bs->backing_blocker);
1224 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1225 } else if (backing_hd) {
1226 error_setg(&bs->backing_blocker,
1227 "device is used as backing hd of '%s'",
1228 bdrv_get_device_name(bs));
1229 }
1230
1231 bs->backing_hd = backing_hd;
1232 if (!backing_hd) {
1233 error_free(bs->backing_blocker);
1234 bs->backing_blocker = NULL;
1235 goto out;
1236 }
1237 bs->open_flags &= ~BDRV_O_NO_BACKING;
1238 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1239 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1240 backing_hd->drv ? backing_hd->drv->format_name : "");
1241
1242 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1243 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1244 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET,
1245 bs->backing_blocker);
1246out:
1247 bdrv_refresh_limits(bs, NULL);
1248}
1249
1250/*
1251 * Opens the backing file for a BlockDriverState if not yet open
1252 *
1253 * options is a QDict of options to pass to the block drivers, or NULL for an
1254 * empty set of options. The reference to the QDict is transferred to this
1255 * function (even on failure), so if the caller intends to reuse the dictionary,
1256 * it needs to use QINCREF() before calling bdrv_file_open.
1257 */
1258int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1259{
1260 char *backing_filename = g_malloc0(PATH_MAX);
1261 int ret = 0;
1262 BlockDriverState *backing_hd;
1263 Error *local_err = NULL;
1264
1265 if (bs->backing_hd != NULL) {
1266 QDECREF(options);
1267 goto free_exit;
1268 }
1269
1270 /* NULL means an empty set of options */
1271 if (options == NULL) {
1272 options = qdict_new();
1273 }
1274
1275 bs->open_flags &= ~BDRV_O_NO_BACKING;
1276 if (qdict_haskey(options, "file.filename")) {
1277 backing_filename[0] = '\0';
1278 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1279 QDECREF(options);
1280 goto free_exit;
1281 } else {
1282 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX,
1283 &local_err);
1284 if (local_err) {
1285 ret = -EINVAL;
1286 error_propagate(errp, local_err);
1287 QDECREF(options);
1288 goto free_exit;
1289 }
1290 }
1291
1292 if (!bs->drv || !bs->drv->supports_backing) {
1293 ret = -EINVAL;
1294 error_setg(errp, "Driver doesn't support backing files");
1295 QDECREF(options);
1296 goto free_exit;
1297 }
1298
1299 backing_hd = bdrv_new();
1300
1301 if (bs->backing_format[0] != '\0' && !qdict_haskey(options, "driver")) {
1302 qdict_put(options, "driver", qstring_from_str(bs->backing_format));
1303 }
1304
1305 assert(bs->backing_hd == NULL);
1306 ret = bdrv_open(&backing_hd,
1307 *backing_filename ? backing_filename : NULL, NULL, options,
1308 bdrv_backing_flags(bs->open_flags), NULL, &local_err);
1309 if (ret < 0) {
1310 bdrv_unref(backing_hd);
1311 backing_hd = NULL;
1312 bs->open_flags |= BDRV_O_NO_BACKING;
1313 error_setg(errp, "Could not open backing file: %s",
1314 error_get_pretty(local_err));
1315 error_free(local_err);
1316 goto free_exit;
1317 }
1318 bdrv_set_backing_hd(bs, backing_hd);
1319
1320free_exit:
1321 g_free(backing_filename);
1322 return ret;
1323}
1324
1325/*
1326 * Opens a disk image whose options are given as BlockdevRef in another block
1327 * device's options.
1328 *
1329 * If allow_none is true, no image will be opened if filename is false and no
1330 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1331 *
1332 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1333 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1334 * itself, all options starting with "${bdref_key}." are considered part of the
1335 * BlockdevRef.
1336 *
1337 * The BlockdevRef will be removed from the options QDict.
1338 *
1339 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1340 */
1341int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1342 QDict *options, const char *bdref_key, int flags,
1343 bool allow_none, Error **errp)
1344{
1345 QDict *image_options;
1346 int ret;
1347 char *bdref_key_dot;
1348 const char *reference;
1349
1350 assert(pbs);
1351 assert(*pbs == NULL);
1352
1353 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1354 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1355 g_free(bdref_key_dot);
1356
1357 reference = qdict_get_try_str(options, bdref_key);
1358 if (!filename && !reference && !qdict_size(image_options)) {
1359 if (allow_none) {
1360 ret = 0;
1361 } else {
1362 error_setg(errp, "A block device must be specified for \"%s\"",
1363 bdref_key);
1364 ret = -EINVAL;
1365 }
1366 QDECREF(image_options);
1367 goto done;
1368 }
1369
1370 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1371
1372done:
1373 qdict_del(options, bdref_key);
1374 return ret;
1375}
1376
1377int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1378{
1379 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1380 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1381 int64_t total_size;
1382 QemuOpts *opts = NULL;
1383 QDict *snapshot_options;
1384 BlockDriverState *bs_snapshot;
1385 Error *local_err;
1386 int ret;
1387
1388 /* if snapshot, we create a temporary backing file and open it
1389 instead of opening 'filename' directly */
1390
1391 /* Get the required size from the image */
1392 total_size = bdrv_getlength(bs);
1393 if (total_size < 0) {
1394 ret = total_size;
1395 error_setg_errno(errp, -total_size, "Could not get image size");
1396 goto out;
1397 }
1398
1399 /* Create the temporary image */
1400 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1401 if (ret < 0) {
1402 error_setg_errno(errp, -ret, "Could not get temporary filename");
1403 goto out;
1404 }
1405
1406 opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0,
1407 &error_abort);
1408 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size, &error_abort);
1409 ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err);
1410 qemu_opts_del(opts);
1411 if (ret < 0) {
1412 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1413 "'%s': %s", tmp_filename,
1414 error_get_pretty(local_err));
1415 error_free(local_err);
1416 goto out;
1417 }
1418
1419 /* Prepare a new options QDict for the temporary file */
1420 snapshot_options = qdict_new();
1421 qdict_put(snapshot_options, "file.driver",
1422 qstring_from_str("file"));
1423 qdict_put(snapshot_options, "file.filename",
1424 qstring_from_str(tmp_filename));
1425
1426 bs_snapshot = bdrv_new();
1427
1428 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1429 flags, &bdrv_qcow2, &local_err);
1430 if (ret < 0) {
1431 error_propagate(errp, local_err);
1432 goto out;
1433 }
1434
1435 bdrv_append(bs_snapshot, bs);
1436
1437out:
1438 g_free(tmp_filename);
1439 return ret;
1440}
1441
1442/*
1443 * Opens a disk image (raw, qcow2, vmdk, ...)
1444 *
1445 * options is a QDict of options to pass to the block drivers, or NULL for an
1446 * empty set of options. The reference to the QDict belongs to the block layer
1447 * after the call (even on failure), so if the caller intends to reuse the
1448 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1449 *
1450 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1451 * If it is not NULL, the referenced BDS will be reused.
1452 *
1453 * The reference parameter may be used to specify an existing block device which
1454 * should be opened. If specified, neither options nor a filename may be given,
1455 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1456 */
1457int bdrv_open(BlockDriverState **pbs, const char *filename,
1458 const char *reference, QDict *options, int flags,
1459 BlockDriver *drv, Error **errp)
1460{
1461 int ret;
1462 BlockDriverState *file = NULL, *bs;
1463 const char *drvname;
1464 Error *local_err = NULL;
1465 int snapshot_flags = 0;
1466
1467 assert(pbs);
1468
1469 if (reference) {
1470 bool options_non_empty = options ? qdict_size(options) : false;
1471 QDECREF(options);
1472
1473 if (*pbs) {
1474 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1475 "another block device");
1476 return -EINVAL;
1477 }
1478
1479 if (filename || options_non_empty) {
1480 error_setg(errp, "Cannot reference an existing block device with "
1481 "additional options or a new filename");
1482 return -EINVAL;
1483 }
1484
1485 bs = bdrv_lookup_bs(reference, reference, errp);
1486 if (!bs) {
1487 return -ENODEV;
1488 }
1489 bdrv_ref(bs);
1490 *pbs = bs;
1491 return 0;
1492 }
1493
1494 if (*pbs) {
1495 bs = *pbs;
1496 } else {
1497 bs = bdrv_new();
1498 }
1499
1500 /* NULL means an empty set of options */
1501 if (options == NULL) {
1502 options = qdict_new();
1503 }
1504
1505 ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
1506 if (local_err) {
1507 goto fail;
1508 }
1509
1510 /* Find the right image format driver */
1511 drv = NULL;
1512 drvname = qdict_get_try_str(options, "driver");
1513 if (drvname) {
1514 drv = bdrv_find_format(drvname);
1515 qdict_del(options, "driver");
1516 if (!drv) {
1517 error_setg(errp, "Unknown driver: '%s'", drvname);
1518 ret = -EINVAL;
1519 goto fail;
1520 }
1521 }
1522
1523 assert(drvname || !(flags & BDRV_O_PROTOCOL));
1524 if (drv && !drv->bdrv_file_open) {
1525 /* If the user explicitly wants a format driver here, we'll need to add
1526 * another layer for the protocol in bs->file */
1527 flags &= ~BDRV_O_PROTOCOL;
1528 }
1529
1530 bs->options = options;
1531 options = qdict_clone_shallow(options);
1532
1533 /* Open image file without format layer */
1534 if ((flags & BDRV_O_PROTOCOL) == 0) {
1535 if (flags & BDRV_O_RDWR) {
1536 flags |= BDRV_O_ALLOW_RDWR;
1537 }
1538 if (flags & BDRV_O_SNAPSHOT) {
1539 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1540 flags = bdrv_backing_flags(flags);
1541 }
1542
1543 assert(file == NULL);
1544 ret = bdrv_open_image(&file, filename, options, "file",
1545 bdrv_inherited_flags(flags),
1546 true, &local_err);
1547 if (ret < 0) {
1548 goto fail;
1549 }
1550 }
1551
1552 /* Image format probing */
1553 bs->probed = !drv;
1554 if (!drv && file) {
1555 ret = find_image_format(file, filename, &drv, &local_err);
1556 if (ret < 0) {
1557 goto fail;
1558 }
1559 } else if (!drv) {
1560 error_setg(errp, "Must specify either driver or file");
1561 ret = -EINVAL;
1562 goto fail;
1563 }
1564
1565 /* Open the image */
1566 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1567 if (ret < 0) {
1568 goto fail;
1569 }
1570
1571 if (file && (bs->file != file)) {
1572 bdrv_unref(file);
1573 file = NULL;
1574 }
1575
1576 /* If there is a backing file, use it */
1577 if ((flags & BDRV_O_NO_BACKING) == 0) {
1578 QDict *backing_options;
1579
1580 qdict_extract_subqdict(options, &backing_options, "backing.");
1581 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1582 if (ret < 0) {
1583 goto close_and_fail;
1584 }
1585 }
1586
1587 bdrv_refresh_filename(bs);
1588
1589 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1590 * temporary snapshot afterwards. */
1591 if (snapshot_flags) {
1592 ret = bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1593 if (local_err) {
1594 goto close_and_fail;
1595 }
1596 }
1597
1598 /* Check if any unknown options were used */
1599 if (options && (qdict_size(options) != 0)) {
1600 const QDictEntry *entry = qdict_first(options);
1601 if (flags & BDRV_O_PROTOCOL) {
1602 error_setg(errp, "Block protocol '%s' doesn't support the option "
1603 "'%s'", drv->format_name, entry->key);
1604 } else {
1605 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1606 "support the option '%s'", drv->format_name,
1607 bdrv_get_device_name(bs), entry->key);
1608 }
1609
1610 ret = -EINVAL;
1611 goto close_and_fail;
1612 }
1613
1614 if (!bdrv_key_required(bs)) {
1615 if (bs->blk) {
1616 blk_dev_change_media_cb(bs->blk, true);
1617 }
1618 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1619 && !runstate_check(RUN_STATE_INMIGRATE)
1620 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1621 error_setg(errp,
1622 "Guest must be stopped for opening of encrypted image");
1623 ret = -EBUSY;
1624 goto close_and_fail;
1625 }
1626
1627 QDECREF(options);
1628 *pbs = bs;
1629 return 0;
1630
1631fail:
1632 if (file != NULL) {
1633 bdrv_unref(file);
1634 }
1635 QDECREF(bs->options);
1636 QDECREF(options);
1637 bs->options = NULL;
1638 if (!*pbs) {
1639 /* If *pbs is NULL, a new BDS has been created in this function and
1640 needs to be freed now. Otherwise, it does not need to be closed,
1641 since it has not really been opened yet. */
1642 bdrv_unref(bs);
1643 }
1644 if (local_err) {
1645 error_propagate(errp, local_err);
1646 }
1647 return ret;
1648
1649close_and_fail:
1650 /* See fail path, but now the BDS has to be always closed */
1651 if (*pbs) {
1652 bdrv_close(bs);
1653 } else {
1654 bdrv_unref(bs);
1655 }
1656 QDECREF(options);
1657 if (local_err) {
1658 error_propagate(errp, local_err);
1659 }
1660 return ret;
1661}
1662
1663typedef struct BlockReopenQueueEntry {
1664 bool prepared;
1665 BDRVReopenState state;
1666 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1667} BlockReopenQueueEntry;
1668
1669/*
1670 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1671 * reopen of multiple devices.
1672 *
1673 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1674 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1675 * be created and initialized. This newly created BlockReopenQueue should be
1676 * passed back in for subsequent calls that are intended to be of the same
1677 * atomic 'set'.
1678 *
1679 * bs is the BlockDriverState to add to the reopen queue.
1680 *
1681 * flags contains the open flags for the associated bs
1682 *
1683 * returns a pointer to bs_queue, which is either the newly allocated
1684 * bs_queue, or the existing bs_queue being used.
1685 *
1686 */
1687BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1688 BlockDriverState *bs, int flags)
1689{
1690 assert(bs != NULL);
1691
1692 BlockReopenQueueEntry *bs_entry;
1693 if (bs_queue == NULL) {
1694 bs_queue = g_new0(BlockReopenQueue, 1);
1695 QSIMPLEQ_INIT(bs_queue);
1696 }
1697
1698 /* bdrv_open() masks this flag out */
1699 flags &= ~BDRV_O_PROTOCOL;
1700
1701 if (bs->file) {
1702 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1703 }
1704
1705 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1706 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1707
1708 bs_entry->state.bs = bs;
1709 bs_entry->state.flags = flags;
1710
1711 return bs_queue;
1712}
1713
1714/*
1715 * Reopen multiple BlockDriverStates atomically & transactionally.
1716 *
1717 * The queue passed in (bs_queue) must have been built up previous
1718 * via bdrv_reopen_queue().
1719 *
1720 * Reopens all BDS specified in the queue, with the appropriate
1721 * flags. All devices are prepared for reopen, and failure of any
1722 * device will cause all device changes to be abandonded, and intermediate
1723 * data cleaned up.
1724 *
1725 * If all devices prepare successfully, then the changes are committed
1726 * to all devices.
1727 *
1728 */
1729int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1730{
1731 int ret = -1;
1732 BlockReopenQueueEntry *bs_entry, *next;
1733 Error *local_err = NULL;
1734
1735 assert(bs_queue != NULL);
1736
1737 bdrv_drain_all();
1738
1739 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1740 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1741 error_propagate(errp, local_err);
1742 goto cleanup;
1743 }
1744 bs_entry->prepared = true;
1745 }
1746
1747 /* If we reach this point, we have success and just need to apply the
1748 * changes
1749 */
1750 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1751 bdrv_reopen_commit(&bs_entry->state);
1752 }
1753
1754 ret = 0;
1755
1756cleanup:
1757 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1758 if (ret && bs_entry->prepared) {
1759 bdrv_reopen_abort(&bs_entry->state);
1760 }
1761 g_free(bs_entry);
1762 }
1763 g_free(bs_queue);
1764 return ret;
1765}
1766
1767
1768/* Reopen a single BlockDriverState with the specified flags. */
1769int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1770{
1771 int ret = -1;
1772 Error *local_err = NULL;
1773 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1774
1775 ret = bdrv_reopen_multiple(queue, &local_err);
1776 if (local_err != NULL) {
1777 error_propagate(errp, local_err);
1778 }
1779 return ret;
1780}
1781
1782
1783/*
1784 * Prepares a BlockDriverState for reopen. All changes are staged in the
1785 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1786 * the block driver layer .bdrv_reopen_prepare()
1787 *
1788 * bs is the BlockDriverState to reopen
1789 * flags are the new open flags
1790 * queue is the reopen queue
1791 *
1792 * Returns 0 on success, non-zero on error. On error errp will be set
1793 * as well.
1794 *
1795 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1796 * It is the responsibility of the caller to then call the abort() or
1797 * commit() for any other BDS that have been left in a prepare() state
1798 *
1799 */
1800int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1801 Error **errp)
1802{
1803 int ret = -1;
1804 Error *local_err = NULL;
1805 BlockDriver *drv;
1806
1807 assert(reopen_state != NULL);
1808 assert(reopen_state->bs->drv != NULL);
1809 drv = reopen_state->bs->drv;
1810
1811 /* if we are to stay read-only, do not allow permission change
1812 * to r/w */
1813 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1814 reopen_state->flags & BDRV_O_RDWR) {
1815 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1816 bdrv_get_device_name(reopen_state->bs));
1817 goto error;
1818 }
1819
1820
1821 ret = bdrv_flush(reopen_state->bs);
1822 if (ret) {
1823 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1824 strerror(-ret));
1825 goto error;
1826 }
1827
1828 if (drv->bdrv_reopen_prepare) {
1829 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1830 if (ret) {
1831 if (local_err != NULL) {
1832 error_propagate(errp, local_err);
1833 } else {
1834 error_setg(errp, "failed while preparing to reopen image '%s'",
1835 reopen_state->bs->filename);
1836 }
1837 goto error;
1838 }
1839 } else {
1840 /* It is currently mandatory to have a bdrv_reopen_prepare()
1841 * handler for each supported drv. */
1842 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1843 drv->format_name, bdrv_get_device_name(reopen_state->bs),
1844 "reopening of file");
1845 ret = -1;
1846 goto error;
1847 }
1848
1849 ret = 0;
1850
1851error:
1852 return ret;
1853}
1854
1855/*
1856 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1857 * makes them final by swapping the staging BlockDriverState contents into
1858 * the active BlockDriverState contents.
1859 */
1860void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1861{
1862 BlockDriver *drv;
1863
1864 assert(reopen_state != NULL);
1865 drv = reopen_state->bs->drv;
1866 assert(drv != NULL);
1867
1868 /* If there are any driver level actions to take */
1869 if (drv->bdrv_reopen_commit) {
1870 drv->bdrv_reopen_commit(reopen_state);
1871 }
1872
1873 /* set BDS specific flags now */
1874 reopen_state->bs->open_flags = reopen_state->flags;
1875 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1876 BDRV_O_CACHE_WB);
1877 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1878
1879 bdrv_refresh_limits(reopen_state->bs, NULL);
1880}
1881
1882/*
1883 * Abort the reopen, and delete and free the staged changes in
1884 * reopen_state
1885 */
1886void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1887{
1888 BlockDriver *drv;
1889
1890 assert(reopen_state != NULL);
1891 drv = reopen_state->bs->drv;
1892 assert(drv != NULL);
1893
1894 if (drv->bdrv_reopen_abort) {
1895 drv->bdrv_reopen_abort(reopen_state);
1896 }
1897}
1898
1899
1900void bdrv_close(BlockDriverState *bs)
1901{
1902 BdrvAioNotifier *ban, *ban_next;
1903
1904 if (bs->job) {
1905 block_job_cancel_sync(bs->job);
1906 }
1907 bdrv_drain_all(); /* complete I/O */
1908 bdrv_flush(bs);
1909 bdrv_drain_all(); /* in case flush left pending I/O */
1910 notifier_list_notify(&bs->close_notifiers, bs);
1911
1912 if (bs->drv) {
1913 if (bs->backing_hd) {
1914 BlockDriverState *backing_hd = bs->backing_hd;
1915 bdrv_set_backing_hd(bs, NULL);
1916 bdrv_unref(backing_hd);
1917 }
1918 bs->drv->bdrv_close(bs);
1919 g_free(bs->opaque);
1920 bs->opaque = NULL;
1921 bs->drv = NULL;
1922 bs->copy_on_read = 0;
1923 bs->backing_file[0] = '\0';
1924 bs->backing_format[0] = '\0';
1925 bs->total_sectors = 0;
1926 bs->encrypted = 0;
1927 bs->valid_key = 0;
1928 bs->sg = 0;
1929 bs->zero_beyond_eof = false;
1930 QDECREF(bs->options);
1931 bs->options = NULL;
1932 QDECREF(bs->full_open_options);
1933 bs->full_open_options = NULL;
1934
1935 if (bs->file != NULL) {
1936 bdrv_unref(bs->file);
1937 bs->file = NULL;
1938 }
1939 }
1940
1941 if (bs->blk) {
1942 blk_dev_change_media_cb(bs->blk, false);
1943 }
1944
1945 /*throttling disk I/O limits*/
1946 if (bs->io_limits_enabled) {
1947 bdrv_io_limits_disable(bs);
1948 }
1949
1950 QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
1951 g_free(ban);
1952 }
1953 QLIST_INIT(&bs->aio_notifiers);
1954}
1955
1956void bdrv_close_all(void)
1957{
1958 BlockDriverState *bs;
1959
1960 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1961 AioContext *aio_context = bdrv_get_aio_context(bs);
1962
1963 aio_context_acquire(aio_context);
1964 bdrv_close(bs);
1965 aio_context_release(aio_context);
1966 }
1967}
1968
1969/* Check if any requests are in-flight (including throttled requests) */
1970static bool bdrv_requests_pending(BlockDriverState *bs)
1971{
1972 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1973 return true;
1974 }
1975 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1976 return true;
1977 }
1978 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1979 return true;
1980 }
1981 if (bs->file && bdrv_requests_pending(bs->file)) {
1982 return true;
1983 }
1984 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1985 return true;
1986 }
1987 return false;
1988}
1989
1990static bool bdrv_drain_one(BlockDriverState *bs)
1991{
1992 bool bs_busy;
1993
1994 bdrv_flush_io_queue(bs);
1995 bdrv_start_throttled_reqs(bs);
1996 bs_busy = bdrv_requests_pending(bs);
1997 bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
1998 return bs_busy;
1999}
2000
2001/*
2002 * Wait for pending requests to complete on a single BlockDriverState subtree
2003 *
2004 * See the warning in bdrv_drain_all(). This function can only be called if
2005 * you are sure nothing can generate I/O because you have op blockers
2006 * installed.
2007 *
2008 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
2009 * AioContext.
2010 */
2011void bdrv_drain(BlockDriverState *bs)
2012{
2013 while (bdrv_drain_one(bs)) {
2014 /* Keep iterating */
2015 }
2016}
2017
2018/*
2019 * Wait for pending requests to complete across all BlockDriverStates
2020 *
2021 * This function does not flush data to disk, use bdrv_flush_all() for that
2022 * after calling this function.
2023 *
2024 * Note that completion of an asynchronous I/O operation can trigger any
2025 * number of other I/O operations on other devices---for example a coroutine
2026 * can be arbitrarily complex and a constant flow of I/O can come until the
2027 * coroutine is complete. Because of this, it is not possible to have a
2028 * function to drain a single device's I/O queue.
2029 */
2030void bdrv_drain_all(void)
2031{
2032 /* Always run first iteration so any pending completion BHs run */
2033 bool busy = true;
2034 BlockDriverState *bs;
2035
2036 while (busy) {
2037 busy = false;
2038
2039 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2040 AioContext *aio_context = bdrv_get_aio_context(bs);
2041
2042 aio_context_acquire(aio_context);
2043 busy |= bdrv_drain_one(bs);
2044 aio_context_release(aio_context);
2045 }
2046 }
2047}
2048
2049/* make a BlockDriverState anonymous by removing from bdrv_state and
2050 * graph_bdrv_state list.
2051 Also, NULL terminate the device_name to prevent double remove */
2052void bdrv_make_anon(BlockDriverState *bs)
2053{
2054 /*
2055 * Take care to remove bs from bdrv_states only when it's actually
2056 * in it. Note that bs->device_list.tqe_prev is initially null,
2057 * and gets set to non-null by QTAILQ_INSERT_TAIL(). Establish
2058 * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by
2059 * resetting it to null on remove.
2060 */
2061 if (bs->device_list.tqe_prev) {
2062 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
2063 bs->device_list.tqe_prev = NULL;
2064 }
2065 if (bs->node_name[0] != '\0') {
2066 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
2067 }
2068 bs->node_name[0] = '\0';
2069}
2070
2071static void bdrv_rebind(BlockDriverState *bs)
2072{
2073 if (bs->drv && bs->drv->bdrv_rebind) {
2074 bs->drv->bdrv_rebind(bs);
2075 }
2076}
2077
2078static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
2079 BlockDriverState *bs_src)
2080{
2081 /* move some fields that need to stay attached to the device */
2082
2083 /* dev info */
2084 bs_dest->guest_block_size = bs_src->guest_block_size;
2085 bs_dest->copy_on_read = bs_src->copy_on_read;
2086
2087 bs_dest->enable_write_cache = bs_src->enable_write_cache;
2088
2089 /* i/o throttled req */
2090 memcpy(&bs_dest->throttle_state,
2091 &bs_src->throttle_state,
2092 sizeof(ThrottleState));
2093 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
2094 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
2095 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
2096
2097 /* r/w error */
2098 bs_dest->on_read_error = bs_src->on_read_error;
2099 bs_dest->on_write_error = bs_src->on_write_error;
2100
2101 /* i/o status */
2102 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
2103 bs_dest->iostatus = bs_src->iostatus;
2104
2105 /* dirty bitmap */
2106 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
2107
2108 /* reference count */
2109 bs_dest->refcnt = bs_src->refcnt;
2110
2111 /* job */
2112 bs_dest->job = bs_src->job;
2113
2114 /* keep the same entry in bdrv_states */
2115 bs_dest->device_list = bs_src->device_list;
2116 bs_dest->blk = bs_src->blk;
2117
2118 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
2119 sizeof(bs_dest->op_blockers));
2120}
2121
2122/*
2123 * Swap bs contents for two image chains while they are live,
2124 * while keeping required fields on the BlockDriverState that is
2125 * actually attached to a device.
2126 *
2127 * This will modify the BlockDriverState fields, and swap contents
2128 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2129 *
2130 * bs_new must not be attached to a BlockBackend.
2131 *
2132 * This function does not create any image files.
2133 */
2134void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2135{
2136 BlockDriverState tmp;
2137
2138 /* The code needs to swap the node_name but simply swapping node_list won't
2139 * work so first remove the nodes from the graph list, do the swap then
2140 * insert them back if needed.
2141 */
2142 if (bs_new->node_name[0] != '\0') {
2143 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2144 }
2145 if (bs_old->node_name[0] != '\0') {
2146 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2147 }
2148
2149 /* bs_new must be unattached and shouldn't have anything fancy enabled */
2150 assert(!bs_new->blk);
2151 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2152 assert(bs_new->job == NULL);
2153 assert(bs_new->io_limits_enabled == false);
2154 assert(!throttle_have_timer(&bs_new->throttle_state));
2155
2156 tmp = *bs_new;
2157 *bs_new = *bs_old;
2158 *bs_old = tmp;
2159
2160 /* there are some fields that should not be swapped, move them back */
2161 bdrv_move_feature_fields(&tmp, bs_old);
2162 bdrv_move_feature_fields(bs_old, bs_new);
2163 bdrv_move_feature_fields(bs_new, &tmp);
2164
2165 /* bs_new must remain unattached */
2166 assert(!bs_new->blk);
2167
2168 /* Check a few fields that should remain attached to the device */
2169 assert(bs_new->job == NULL);
2170 assert(bs_new->io_limits_enabled == false);
2171 assert(!throttle_have_timer(&bs_new->throttle_state));
2172
2173 /* insert the nodes back into the graph node list if needed */
2174 if (bs_new->node_name[0] != '\0') {
2175 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2176 }
2177 if (bs_old->node_name[0] != '\0') {
2178 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2179 }
2180
2181 bdrv_rebind(bs_new);
2182 bdrv_rebind(bs_old);
2183}
2184
2185/*
2186 * Add new bs contents at the top of an image chain while the chain is
2187 * live, while keeping required fields on the top layer.
2188 *
2189 * This will modify the BlockDriverState fields, and swap contents
2190 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2191 *
2192 * bs_new must not be attached to a BlockBackend.
2193 *
2194 * This function does not create any image files.
2195 */
2196void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2197{
2198 bdrv_swap(bs_new, bs_top);
2199
2200 /* The contents of 'tmp' will become bs_top, as we are
2201 * swapping bs_new and bs_top contents. */
2202 bdrv_set_backing_hd(bs_top, bs_new);
2203}
2204
2205static void bdrv_delete(BlockDriverState *bs)
2206{
2207 assert(!bs->job);
2208 assert(bdrv_op_blocker_is_empty(bs));
2209 assert(!bs->refcnt);
2210 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2211
2212 bdrv_close(bs);
2213
2214 /* remove from list, if necessary */
2215 bdrv_make_anon(bs);
2216
2217 g_free(bs);
2218}
2219
2220/*
2221 * Run consistency checks on an image
2222 *
2223 * Returns 0 if the check could be completed (it doesn't mean that the image is
2224 * free of errors) or -errno when an internal error occurred. The results of the
2225 * check are stored in res.
2226 */
2227int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2228{
2229 if (bs->drv == NULL) {
2230 return -ENOMEDIUM;
2231 }
2232 if (bs->drv->bdrv_check == NULL) {
2233 return -ENOTSUP;
2234 }
2235
2236 memset(res, 0, sizeof(*res));
2237 return bs->drv->bdrv_check(bs, res, fix);
2238}
2239
2240#define COMMIT_BUF_SECTORS 2048
2241
2242/* commit COW file into the raw image */
2243int bdrv_commit(BlockDriverState *bs)
2244{
2245 BlockDriver *drv = bs->drv;
2246 int64_t sector, total_sectors, length, backing_length;
2247 int n, ro, open_flags;
2248 int ret = 0;
2249 uint8_t *buf = NULL;
2250
2251 if (!drv)
2252 return -ENOMEDIUM;
2253
2254 if (!bs->backing_hd) {
2255 return -ENOTSUP;
2256 }
2257
2258 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
2259 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
2260 return -EBUSY;
2261 }
2262
2263 ro = bs->backing_hd->read_only;
2264 open_flags = bs->backing_hd->open_flags;
2265
2266 if (ro) {
2267 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2268 return -EACCES;
2269 }
2270 }
2271
2272 length = bdrv_getlength(bs);
2273 if (length < 0) {
2274 ret = length;
2275 goto ro_cleanup;
2276 }
2277
2278 backing_length = bdrv_getlength(bs->backing_hd);
2279 if (backing_length < 0) {
2280 ret = backing_length;
2281 goto ro_cleanup;
2282 }
2283
2284 /* If our top snapshot is larger than the backing file image,
2285 * grow the backing file image if possible. If not possible,
2286 * we must return an error */
2287 if (length > backing_length) {
2288 ret = bdrv_truncate(bs->backing_hd, length);
2289 if (ret < 0) {
2290 goto ro_cleanup;
2291 }
2292 }
2293
2294 total_sectors = length >> BDRV_SECTOR_BITS;
2295
2296 /* qemu_try_blockalign() for bs will choose an alignment that works for
2297 * bs->backing_hd as well, so no need to compare the alignment manually. */
2298 buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2299 if (buf == NULL) {
2300 ret = -ENOMEM;
2301 goto ro_cleanup;
2302 }
2303
2304 for (sector = 0; sector < total_sectors; sector += n) {
2305 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2306 if (ret < 0) {
2307 goto ro_cleanup;
2308 }
2309 if (ret) {
2310 ret = bdrv_read(bs, sector, buf, n);
2311 if (ret < 0) {
2312 goto ro_cleanup;
2313 }
2314
2315 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2316 if (ret < 0) {
2317 goto ro_cleanup;
2318 }
2319 }
2320 }
2321
2322 if (drv->bdrv_make_empty) {
2323 ret = drv->bdrv_make_empty(bs);
2324 if (ret < 0) {
2325 goto ro_cleanup;
2326 }
2327 bdrv_flush(bs);
2328 }
2329
2330 /*
2331 * Make sure all data we wrote to the backing device is actually
2332 * stable on disk.
2333 */
2334 if (bs->backing_hd) {
2335 bdrv_flush(bs->backing_hd);
2336 }
2337
2338 ret = 0;
2339ro_cleanup:
2340 qemu_vfree(buf);
2341
2342 if (ro) {
2343 /* ignoring error return here */
2344 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2345 }
2346
2347 return ret;
2348}
2349
2350int bdrv_commit_all(void)
2351{
2352 BlockDriverState *bs;
2353
2354 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2355 AioContext *aio_context = bdrv_get_aio_context(bs);
2356
2357 aio_context_acquire(aio_context);
2358 if (bs->drv && bs->backing_hd) {
2359 int ret = bdrv_commit(bs);
2360 if (ret < 0) {
2361 aio_context_release(aio_context);
2362 return ret;
2363 }
2364 }
2365 aio_context_release(aio_context);
2366 }
2367 return 0;
2368}
2369
2370/**
2371 * Remove an active request from the tracked requests list
2372 *
2373 * This function should be called when a tracked request is completing.
2374 */
2375static void tracked_request_end(BdrvTrackedRequest *req)
2376{
2377 if (req->serialising) {
2378 req->bs->serialising_in_flight--;
2379 }
2380
2381 QLIST_REMOVE(req, list);
2382 qemu_co_queue_restart_all(&req->wait_queue);
2383}
2384
2385/**
2386 * Add an active request to the tracked requests list
2387 */
2388static void tracked_request_begin(BdrvTrackedRequest *req,
2389 BlockDriverState *bs,
2390 int64_t offset,
2391 unsigned int bytes, bool is_write)
2392{
2393 *req = (BdrvTrackedRequest){
2394 .bs = bs,
2395 .offset = offset,
2396 .bytes = bytes,
2397 .is_write = is_write,
2398 .co = qemu_coroutine_self(),
2399 .serialising = false,
2400 .overlap_offset = offset,
2401 .overlap_bytes = bytes,
2402 };
2403
2404 qemu_co_queue_init(&req->wait_queue);
2405
2406 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2407}
2408
2409static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2410{
2411 int64_t overlap_offset = req->offset & ~(align - 1);
2412 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2413 - overlap_offset;
2414
2415 if (!req->serialising) {
2416 req->bs->serialising_in_flight++;
2417 req->serialising = true;
2418 }
2419
2420 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2421 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2422}
2423
2424/**
2425 * Round a region to cluster boundaries
2426 */
2427void bdrv_round_to_clusters(BlockDriverState *bs,
2428 int64_t sector_num, int nb_sectors,
2429 int64_t *cluster_sector_num,
2430 int *cluster_nb_sectors)
2431{
2432 BlockDriverInfo bdi;
2433
2434 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2435 *cluster_sector_num = sector_num;
2436 *cluster_nb_sectors = nb_sectors;
2437 } else {
2438 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2439 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2440 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2441 nb_sectors, c);
2442 }
2443}
2444
2445static int bdrv_get_cluster_size(BlockDriverState *bs)
2446{
2447 BlockDriverInfo bdi;
2448 int ret;
2449
2450 ret = bdrv_get_info(bs, &bdi);
2451 if (ret < 0 || bdi.cluster_size == 0) {
2452 return bs->request_alignment;
2453 } else {
2454 return bdi.cluster_size;
2455 }
2456}
2457
2458static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2459 int64_t offset, unsigned int bytes)
2460{
2461 /* aaaa bbbb */
2462 if (offset >= req->overlap_offset + req->overlap_bytes) {
2463 return false;
2464 }
2465 /* bbbb aaaa */
2466 if (req->overlap_offset >= offset + bytes) {
2467 return false;
2468 }
2469 return true;
2470}
2471
2472static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2473{
2474 BlockDriverState *bs = self->bs;
2475 BdrvTrackedRequest *req;
2476 bool retry;
2477 bool waited = false;
2478
2479 if (!bs->serialising_in_flight) {
2480 return false;
2481 }
2482
2483 do {
2484 retry = false;
2485 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2486 if (req == self || (!req->serialising && !self->serialising)) {
2487 continue;
2488 }
2489 if (tracked_request_overlaps(req, self->overlap_offset,
2490 self->overlap_bytes))
2491 {
2492 /* Hitting this means there was a reentrant request, for
2493 * example, a block driver issuing nested requests. This must
2494 * never happen since it means deadlock.
2495 */
2496 assert(qemu_coroutine_self() != req->co);
2497
2498 /* If the request is already (indirectly) waiting for us, or
2499 * will wait for us as soon as it wakes up, then just go on
2500 * (instead of producing a deadlock in the former case). */
2501 if (!req->waiting_for) {
2502 self->waiting_for = req;
2503 qemu_co_queue_wait(&req->wait_queue);
2504 self->waiting_for = NULL;
2505 retry = true;
2506 waited = true;
2507 break;
2508 }
2509 }
2510 }
2511 } while (retry);
2512
2513 return waited;
2514}
2515
2516/*
2517 * Return values:
2518 * 0 - success
2519 * -EINVAL - backing format specified, but no file
2520 * -ENOSPC - can't update the backing file because no space is left in the
2521 * image file header
2522 * -ENOTSUP - format driver doesn't support changing the backing file
2523 */
2524int bdrv_change_backing_file(BlockDriverState *bs,
2525 const char *backing_file, const char *backing_fmt)
2526{
2527 BlockDriver *drv = bs->drv;
2528 int ret;
2529
2530 /* Backing file format doesn't make sense without a backing file */
2531 if (backing_fmt && !backing_file) {
2532 return -EINVAL;
2533 }
2534
2535 if (drv->bdrv_change_backing_file != NULL) {
2536 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2537 } else {
2538 ret = -ENOTSUP;
2539 }
2540
2541 if (ret == 0) {
2542 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2543 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2544 }
2545 return ret;
2546}
2547
2548/*
2549 * Finds the image layer in the chain that has 'bs' as its backing file.
2550 *
2551 * active is the current topmost image.
2552 *
2553 * Returns NULL if bs is not found in active's image chain,
2554 * or if active == bs.
2555 *
2556 * Returns the bottommost base image if bs == NULL.
2557 */
2558BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2559 BlockDriverState *bs)
2560{
2561 while (active && bs != active->backing_hd) {
2562 active = active->backing_hd;
2563 }
2564
2565 return active;
2566}
2567
2568/* Given a BDS, searches for the base layer. */
2569BlockDriverState *bdrv_find_base(BlockDriverState *bs)
2570{
2571 return bdrv_find_overlay(bs, NULL);
2572}
2573
2574typedef struct BlkIntermediateStates {
2575 BlockDriverState *bs;
2576 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2577} BlkIntermediateStates;
2578
2579
2580/*
2581 * Drops images above 'base' up to and including 'top', and sets the image
2582 * above 'top' to have base as its backing file.
2583 *
2584 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2585 * information in 'bs' can be properly updated.
2586 *
2587 * E.g., this will convert the following chain:
2588 * bottom <- base <- intermediate <- top <- active
2589 *
2590 * to
2591 *
2592 * bottom <- base <- active
2593 *
2594 * It is allowed for bottom==base, in which case it converts:
2595 *
2596 * base <- intermediate <- top <- active
2597 *
2598 * to
2599 *
2600 * base <- active
2601 *
2602 * If backing_file_str is non-NULL, it will be used when modifying top's
2603 * overlay image metadata.
2604 *
2605 * Error conditions:
2606 * if active == top, that is considered an error
2607 *
2608 */
2609int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2610 BlockDriverState *base, const char *backing_file_str)
2611{
2612 BlockDriverState *intermediate;
2613 BlockDriverState *base_bs = NULL;
2614 BlockDriverState *new_top_bs = NULL;
2615 BlkIntermediateStates *intermediate_state, *next;
2616 int ret = -EIO;
2617
2618 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2619 QSIMPLEQ_INIT(&states_to_delete);
2620
2621 if (!top->drv || !base->drv) {
2622 goto exit;
2623 }
2624
2625 new_top_bs = bdrv_find_overlay(active, top);
2626
2627 if (new_top_bs == NULL) {
2628 /* we could not find the image above 'top', this is an error */
2629 goto exit;
2630 }
2631
2632 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2633 * to do, no intermediate images */
2634 if (new_top_bs->backing_hd == base) {
2635 ret = 0;
2636 goto exit;
2637 }
2638
2639 intermediate = top;
2640
2641 /* now we will go down through the list, and add each BDS we find
2642 * into our deletion queue, until we hit the 'base'
2643 */
2644 while (intermediate) {
2645 intermediate_state = g_new0(BlkIntermediateStates, 1);
2646 intermediate_state->bs = intermediate;
2647 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2648
2649 if (intermediate->backing_hd == base) {
2650 base_bs = intermediate->backing_hd;
2651 break;
2652 }
2653 intermediate = intermediate->backing_hd;
2654 }
2655 if (base_bs == NULL) {
2656 /* something went wrong, we did not end at the base. safely
2657 * unravel everything, and exit with error */
2658 goto exit;
2659 }
2660
2661 /* success - we can delete the intermediate states, and link top->base */
2662 backing_file_str = backing_file_str ? backing_file_str : base_bs->filename;
2663 ret = bdrv_change_backing_file(new_top_bs, backing_file_str,
2664 base_bs->drv ? base_bs->drv->format_name : "");
2665 if (ret) {
2666 goto exit;
2667 }
2668 bdrv_set_backing_hd(new_top_bs, base_bs);
2669
2670 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2671 /* so that bdrv_close() does not recursively close the chain */
2672 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2673 bdrv_unref(intermediate_state->bs);
2674 }
2675 ret = 0;
2676
2677exit:
2678 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2679 g_free(intermediate_state);
2680 }
2681 return ret;
2682}
2683
2684
2685static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2686 size_t size)
2687{
2688 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
2689 return -EIO;
2690 }
2691
2692 if (!bdrv_is_inserted(bs)) {
2693 return -ENOMEDIUM;
2694 }
2695
2696 if (offset < 0) {
2697 return -EIO;
2698 }
2699
2700 return 0;
2701}
2702
2703static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2704 int nb_sectors)
2705{
2706 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
2707 return -EIO;
2708 }
2709
2710 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2711 nb_sectors * BDRV_SECTOR_SIZE);
2712}
2713
2714typedef struct RwCo {
2715 BlockDriverState *bs;
2716 int64_t offset;
2717 QEMUIOVector *qiov;
2718 bool is_write;
2719 int ret;
2720 BdrvRequestFlags flags;
2721} RwCo;
2722
2723static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2724{
2725 RwCo *rwco = opaque;
2726
2727 if (!rwco->is_write) {
2728 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2729 rwco->qiov->size, rwco->qiov,
2730 rwco->flags);
2731 } else {
2732 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2733 rwco->qiov->size, rwco->qiov,
2734 rwco->flags);
2735 }
2736}
2737
2738/*
2739 * Process a vectored synchronous request using coroutines
2740 */
2741static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2742 QEMUIOVector *qiov, bool is_write,
2743 BdrvRequestFlags flags)
2744{
2745 Coroutine *co;
2746 RwCo rwco = {
2747 .bs = bs,
2748 .offset = offset,
2749 .qiov = qiov,
2750 .is_write = is_write,
2751 .ret = NOT_DONE,
2752 .flags = flags,
2753 };
2754
2755 /**
2756 * In sync call context, when the vcpu is blocked, this throttling timer
2757 * will not fire; so the I/O throttling function has to be disabled here
2758 * if it has been enabled.
2759 */
2760 if (bs->io_limits_enabled) {
2761 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2762 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2763 bdrv_io_limits_disable(bs);
2764 }
2765
2766 if (qemu_in_coroutine()) {
2767 /* Fast-path if already in coroutine context */
2768 bdrv_rw_co_entry(&rwco);
2769 } else {
2770 AioContext *aio_context = bdrv_get_aio_context(bs);
2771
2772 co = qemu_coroutine_create(bdrv_rw_co_entry);
2773 qemu_coroutine_enter(co, &rwco);
2774 while (rwco.ret == NOT_DONE) {
2775 aio_poll(aio_context, true);
2776 }
2777 }
2778 return rwco.ret;
2779}
2780
2781/*
2782 * Process a synchronous request using coroutines
2783 */
2784static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2785 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2786{
2787 QEMUIOVector qiov;
2788 struct iovec iov = {
2789 .iov_base = (void *)buf,
2790 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2791 };
2792
2793 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
2794 return -EINVAL;
2795 }
2796
2797 qemu_iovec_init_external(&qiov, &iov, 1);
2798 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2799 &qiov, is_write, flags);
2800}
2801
2802/* return < 0 if error. See bdrv_write() for the return codes */
2803int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2804 uint8_t *buf, int nb_sectors)
2805{
2806 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2807}
2808
2809/* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2810int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2811 uint8_t *buf, int nb_sectors)
2812{
2813 bool enabled;
2814 int ret;
2815
2816 enabled = bs->io_limits_enabled;
2817 bs->io_limits_enabled = false;
2818 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2819 bs->io_limits_enabled = enabled;
2820 return ret;
2821}
2822
2823/* Return < 0 if error. Important errors are:
2824 -EIO generic I/O error (may happen for all errors)
2825 -ENOMEDIUM No media inserted.
2826 -EINVAL Invalid sector number or nb_sectors
2827 -EACCES Trying to write a read-only device
2828*/
2829int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2830 const uint8_t *buf, int nb_sectors)
2831{
2832 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2833}
2834
2835int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2836 int nb_sectors, BdrvRequestFlags flags)
2837{
2838 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2839 BDRV_REQ_ZERO_WRITE | flags);
2840}
2841
2842/*
2843 * Completely zero out a block device with the help of bdrv_write_zeroes.
2844 * The operation is sped up by checking the block status and only writing
2845 * zeroes to the device if they currently do not return zeroes. Optional
2846 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2847 *
2848 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2849 */
2850int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2851{
2852 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
2853 int n;
2854
2855 target_sectors = bdrv_nb_sectors(bs);
2856 if (target_sectors < 0) {
2857 return target_sectors;
2858 }
2859
2860 for (;;) {
2861 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
2862 if (nb_sectors <= 0) {
2863 return 0;
2864 }
2865 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2866 if (ret < 0) {
2867 error_report("error getting block status at sector %" PRId64 ": %s",
2868 sector_num, strerror(-ret));
2869 return ret;
2870 }
2871 if (ret & BDRV_BLOCK_ZERO) {
2872 sector_num += n;
2873 continue;
2874 }
2875 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2876 if (ret < 0) {
2877 error_report("error writing zeroes at sector %" PRId64 ": %s",
2878 sector_num, strerror(-ret));
2879 return ret;
2880 }
2881 sector_num += n;
2882 }
2883}
2884
2885int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2886{
2887 QEMUIOVector qiov;
2888 struct iovec iov = {
2889 .iov_base = (void *)buf,
2890 .iov_len = bytes,
2891 };
2892 int ret;
2893
2894 if (bytes < 0) {
2895 return -EINVAL;
2896 }
2897
2898 qemu_iovec_init_external(&qiov, &iov, 1);
2899 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2900 if (ret < 0) {
2901 return ret;
2902 }
2903
2904 return bytes;
2905}
2906
2907int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2908{
2909 int ret;
2910
2911 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2912 if (ret < 0) {
2913 return ret;
2914 }
2915
2916 return qiov->size;
2917}
2918
2919int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2920 const void *buf, int bytes)
2921{
2922 QEMUIOVector qiov;
2923 struct iovec iov = {
2924 .iov_base = (void *) buf,
2925 .iov_len = bytes,
2926 };
2927
2928 if (bytes < 0) {
2929 return -EINVAL;
2930 }
2931
2932 qemu_iovec_init_external(&qiov, &iov, 1);
2933 return bdrv_pwritev(bs, offset, &qiov);
2934}
2935
2936/*
2937 * Writes to the file and ensures that no writes are reordered across this
2938 * request (acts as a barrier)
2939 *
2940 * Returns 0 on success, -errno in error cases.
2941 */
2942int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2943 const void *buf, int count)
2944{
2945 int ret;
2946
2947 ret = bdrv_pwrite(bs, offset, buf, count);
2948 if (ret < 0) {
2949 return ret;
2950 }
2951
2952 /* No flush needed for cache modes that already do it */
2953 if (bs->enable_write_cache) {
2954 bdrv_flush(bs);
2955 }
2956
2957 return 0;
2958}
2959
2960static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2961 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2962{
2963 /* Perform I/O through a temporary buffer so that users who scribble over
2964 * their read buffer while the operation is in progress do not end up
2965 * modifying the image file. This is critical for zero-copy guest I/O
2966 * where anything might happen inside guest memory.
2967 */
2968 void *bounce_buffer;
2969
2970 BlockDriver *drv = bs->drv;
2971 struct iovec iov;
2972 QEMUIOVector bounce_qiov;
2973 int64_t cluster_sector_num;
2974 int cluster_nb_sectors;
2975 size_t skip_bytes;
2976 int ret;
2977
2978 /* Cover entire cluster so no additional backing file I/O is required when
2979 * allocating cluster in the image file.
2980 */
2981 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2982 &cluster_sector_num, &cluster_nb_sectors);
2983
2984 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2985 cluster_sector_num, cluster_nb_sectors);
2986
2987 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2988 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
2989 if (bounce_buffer == NULL) {
2990 ret = -ENOMEM;
2991 goto err;
2992 }
2993
2994 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2995
2996 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2997 &bounce_qiov);
2998 if (ret < 0) {
2999 goto err;
3000 }
3001
3002 if (drv->bdrv_co_write_zeroes &&
3003 buffer_is_zero(bounce_buffer, iov.iov_len)) {
3004 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
3005 cluster_nb_sectors, 0);
3006 } else {
3007 /* This does not change the data on the disk, it is not necessary
3008 * to flush even in cache=writethrough mode.
3009 */
3010 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
3011 &bounce_qiov);
3012 }
3013
3014 if (ret < 0) {
3015 /* It might be okay to ignore write errors for guest requests. If this
3016 * is a deliberate copy-on-read then we don't want to ignore the error.
3017 * Simply report it in all cases.
3018 */
3019 goto err;
3020 }
3021
3022 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
3023 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
3024 nb_sectors * BDRV_SECTOR_SIZE);
3025
3026err:
3027 qemu_vfree(bounce_buffer);
3028 return ret;
3029}
3030
3031/*
3032 * Forwards an already correctly aligned request to the BlockDriver. This
3033 * handles copy on read and zeroing after EOF; any other features must be
3034 * implemented by the caller.
3035 */
3036static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3037 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3038 int64_t align, QEMUIOVector *qiov, int flags)
3039{
3040 BlockDriver *drv = bs->drv;
3041 int ret;
3042
3043 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3044 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3045
3046 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3047 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3048 assert(!qiov || bytes == qiov->size);
3049
3050 /* Handle Copy on Read and associated serialisation */
3051 if (flags & BDRV_REQ_COPY_ON_READ) {
3052 /* If we touch the same cluster it counts as an overlap. This
3053 * guarantees that allocating writes will be serialized and not race
3054 * with each other for the same cluster. For example, in copy-on-read
3055 * it ensures that the CoR read and write operations are atomic and
3056 * guest writes cannot interleave between them. */
3057 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3058 }
3059
3060 wait_serialising_requests(req);
3061
3062 if (flags & BDRV_REQ_COPY_ON_READ) {
3063 int pnum;
3064
3065 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3066 if (ret < 0) {
3067 goto out;
3068 }
3069
3070 if (!ret || pnum != nb_sectors) {
3071 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3072 goto out;
3073 }
3074 }
3075
3076 /* Forward the request to the BlockDriver */
3077 if (!bs->zero_beyond_eof) {
3078 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3079 } else {
3080 /* Read zeros after EOF */
3081 int64_t total_sectors, max_nb_sectors;
3082
3083 total_sectors = bdrv_nb_sectors(bs);
3084 if (total_sectors < 0) {
3085 ret = total_sectors;
3086 goto out;
3087 }
3088
3089 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3090 align >> BDRV_SECTOR_BITS);
3091 if (nb_sectors < max_nb_sectors) {
3092 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3093 } else if (max_nb_sectors > 0) {
3094 QEMUIOVector local_qiov;
3095
3096 qemu_iovec_init(&local_qiov, qiov->niov);
3097 qemu_iovec_concat(&local_qiov, qiov, 0,
3098 max_nb_sectors * BDRV_SECTOR_SIZE);
3099
3100 ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
3101 &local_qiov);
3102
3103 qemu_iovec_destroy(&local_qiov);
3104 } else {
3105 ret = 0;
3106 }
3107
3108 /* Reading beyond end of file is supposed to produce zeroes */
3109 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3110 uint64_t offset = MAX(0, total_sectors - sector_num);
3111 uint64_t bytes = (sector_num + nb_sectors - offset) *
3112 BDRV_SECTOR_SIZE;
3113 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3114 }
3115 }
3116
3117out:
3118 return ret;
3119}
3120
3121static inline uint64_t bdrv_get_align(BlockDriverState *bs)
3122{
3123 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3124 return MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3125}
3126
3127static inline bool bdrv_req_is_aligned(BlockDriverState *bs,
3128 int64_t offset, size_t bytes)
3129{
3130 int64_t align = bdrv_get_align(bs);
3131 return !(offset & (align - 1) || (bytes & (align - 1)));
3132}
3133
3134/*
3135 * Handle a read request in coroutine context
3136 */
3137static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3138 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3139 BdrvRequestFlags flags)
3140{
3141 BlockDriver *drv = bs->drv;
3142 BdrvTrackedRequest req;
3143
3144 uint64_t align = bdrv_get_align(bs);
3145 uint8_t *head_buf = NULL;
3146 uint8_t *tail_buf = NULL;
3147 QEMUIOVector local_qiov;
3148 bool use_local_qiov = false;
3149 int ret;
3150
3151 if (!drv) {
3152 return -ENOMEDIUM;
3153 }
3154
3155 ret = bdrv_check_byte_request(bs, offset, bytes);
3156 if (ret < 0) {
3157 return ret;
3158 }
3159
3160 if (bs->copy_on_read) {
3161 flags |= BDRV_REQ_COPY_ON_READ;
3162 }
3163
3164 /* throttling disk I/O */
3165 if (bs->io_limits_enabled) {
3166 bdrv_io_limits_intercept(bs, bytes, false);
3167 }
3168
3169 /* Align read if necessary by padding qiov */
3170 if (offset & (align - 1)) {
3171 head_buf = qemu_blockalign(bs, align);
3172 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3173 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3174 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3175 use_local_qiov = true;
3176
3177 bytes += offset & (align - 1);
3178 offset = offset & ~(align - 1);
3179 }
3180
3181 if ((offset + bytes) & (align - 1)) {
3182 if (!use_local_qiov) {
3183 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3184 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3185 use_local_qiov = true;
3186 }
3187 tail_buf = qemu_blockalign(bs, align);
3188 qemu_iovec_add(&local_qiov, tail_buf,
3189 align - ((offset + bytes) & (align - 1)));
3190
3191 bytes = ROUND_UP(bytes, align);
3192 }
3193
3194 tracked_request_begin(&req, bs, offset, bytes, false);
3195 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3196 use_local_qiov ? &local_qiov : qiov,
3197 flags);
3198 tracked_request_end(&req);
3199
3200 if (use_local_qiov) {
3201 qemu_iovec_destroy(&local_qiov);
3202 qemu_vfree(head_buf);
3203 qemu_vfree(tail_buf);
3204 }
3205
3206 return ret;
3207}
3208
3209static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3210 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3211 BdrvRequestFlags flags)
3212{
3213 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
3214 return -EINVAL;
3215 }
3216
3217 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3218 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3219}
3220
3221int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3222 int nb_sectors, QEMUIOVector *qiov)
3223{
3224 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3225
3226 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3227}
3228
3229int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3230 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3231{
3232 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3233
3234 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3235 BDRV_REQ_COPY_ON_READ);
3236}
3237
3238#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
3239
3240static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3241 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3242{
3243 BlockDriver *drv = bs->drv;
3244 QEMUIOVector qiov;
3245 struct iovec iov = {0};
3246 int ret = 0;
3247
3248 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
3249 BDRV_REQUEST_MAX_SECTORS);
3250
3251 while (nb_sectors > 0 && !ret) {
3252 int num = nb_sectors;
3253
3254 /* Align request. Block drivers can expect the "bulk" of the request
3255 * to be aligned.
3256 */
3257 if (bs->bl.write_zeroes_alignment
3258 && num > bs->bl.write_zeroes_alignment) {
3259 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3260 /* Make a small request up to the first aligned sector. */
3261 num = bs->bl.write_zeroes_alignment;
3262 num -= sector_num % bs->bl.write_zeroes_alignment;
3263 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3264 /* Shorten the request to the last aligned sector. num cannot
3265 * underflow because num > bs->bl.write_zeroes_alignment.
3266 */
3267 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3268 }
3269 }
3270
3271 /* limit request size */
3272 if (num > max_write_zeroes) {
3273 num = max_write_zeroes;
3274 }
3275
3276 ret = -ENOTSUP;
3277 /* First try the efficient write zeroes operation */
3278 if (drv->bdrv_co_write_zeroes) {
3279 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3280 }
3281
3282 if (ret == -ENOTSUP) {
3283 /* Fall back to bounce buffer if write zeroes is unsupported */
3284 int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
3285 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
3286 num = MIN(num, max_xfer_len);
3287 iov.iov_len = num * BDRV_SECTOR_SIZE;
3288 if (iov.iov_base == NULL) {
3289 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
3290 if (iov.iov_base == NULL) {
3291 ret = -ENOMEM;
3292 goto fail;
3293 }
3294 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3295 }
3296 qemu_iovec_init_external(&qiov, &iov, 1);
3297
3298 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3299
3300 /* Keep bounce buffer around if it is big enough for all
3301 * all future requests.
3302 */
3303 if (num < max_xfer_len) {
3304 qemu_vfree(iov.iov_base);
3305 iov.iov_base = NULL;
3306 }
3307 }
3308
3309 sector_num += num;
3310 nb_sectors -= num;
3311 }
3312
3313fail:
3314 qemu_vfree(iov.iov_base);
3315 return ret;
3316}
3317
3318/*
3319 * Forwards an already correctly aligned write request to the BlockDriver.
3320 */
3321static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3322 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3323 QEMUIOVector *qiov, int flags)
3324{
3325 BlockDriver *drv = bs->drv;
3326 bool waited;
3327 int ret;
3328
3329 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3330 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3331
3332 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3333 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3334 assert(!qiov || bytes == qiov->size);
3335
3336 waited = wait_serialising_requests(req);
3337 assert(!waited || !req->serialising);
3338 assert(req->overlap_offset <= offset);
3339 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3340
3341 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3342
3343 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3344 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3345 qemu_iovec_is_zero(qiov)) {
3346 flags |= BDRV_REQ_ZERO_WRITE;
3347 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3348 flags |= BDRV_REQ_MAY_UNMAP;
3349 }
3350 }
3351
3352 if (ret < 0) {
3353 /* Do nothing, write notifier decided to fail this request */
3354 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3355 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3356 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3357 } else {
3358 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3359 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3360 }
3361 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3362
3363 if (ret == 0 && !bs->enable_write_cache) {
3364 ret = bdrv_co_flush(bs);
3365 }
3366
3367 bdrv_set_dirty(bs, sector_num, nb_sectors);
3368
3369 block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
3370
3371 if (ret >= 0) {
3372 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3373 }
3374
3375 return ret;
3376}
3377
3378/*
3379 * Handle a write request in coroutine context
3380 */
3381static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3382 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3383 BdrvRequestFlags flags)
3384{
3385 BdrvTrackedRequest req;
3386 uint64_t align = bdrv_get_align(bs);
3387 uint8_t *head_buf = NULL;
3388 uint8_t *tail_buf = NULL;
3389 QEMUIOVector local_qiov;
3390 bool use_local_qiov = false;
3391 int ret;
3392
3393 if (!bs->drv) {
3394 return -ENOMEDIUM;
3395 }
3396 if (bs->read_only) {
3397 return -EACCES;
3398 }
3399
3400 ret = bdrv_check_byte_request(bs, offset, bytes);
3401 if (ret < 0) {
3402 return ret;
3403 }
3404
3405 /* throttling disk I/O */
3406 if (bs->io_limits_enabled) {
3407 bdrv_io_limits_intercept(bs, bytes, true);
3408 }
3409
3410 /*
3411 * Align write if necessary by performing a read-modify-write cycle.
3412 * Pad qiov with the read parts and be sure to have a tracked request not
3413 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3414 */
3415 tracked_request_begin(&req, bs, offset, bytes, true);
3416
3417 if (offset & (align - 1)) {
3418 QEMUIOVector head_qiov;
3419 struct iovec head_iov;
3420
3421 mark_request_serialising(&req, align);
3422 wait_serialising_requests(&req);
3423
3424 head_buf = qemu_blockalign(bs, align);
3425 head_iov = (struct iovec) {
3426 .iov_base = head_buf,
3427 .iov_len = align,
3428 };
3429 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3430
3431 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3432 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3433 align, &head_qiov, 0);
3434 if (ret < 0) {
3435 goto fail;
3436 }
3437 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3438
3439 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3440 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3441 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3442 use_local_qiov = true;
3443
3444 bytes += offset & (align - 1);
3445 offset = offset & ~(align - 1);
3446 }
3447
3448 if ((offset + bytes) & (align - 1)) {
3449 QEMUIOVector tail_qiov;
3450 struct iovec tail_iov;
3451 size_t tail_bytes;
3452 bool waited;
3453
3454 mark_request_serialising(&req, align);
3455 waited = wait_serialising_requests(&req);
3456 assert(!waited || !use_local_qiov);
3457
3458 tail_buf = qemu_blockalign(bs, align);
3459 tail_iov = (struct iovec) {
3460 .iov_base = tail_buf,
3461 .iov_len = align,
3462 };
3463 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3464
3465 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3466 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3467 align, &tail_qiov, 0);
3468 if (ret < 0) {
3469 goto fail;
3470 }
3471 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3472
3473 if (!use_local_qiov) {
3474 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3475 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3476 use_local_qiov = true;
3477 }
3478
3479 tail_bytes = (offset + bytes) & (align - 1);
3480 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3481
3482 bytes = ROUND_UP(bytes, align);
3483 }
3484
3485 if (use_local_qiov) {
3486 /* Local buffer may have non-zero data. */
3487 flags &= ~BDRV_REQ_ZERO_WRITE;
3488 }
3489 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3490 use_local_qiov ? &local_qiov : qiov,
3491 flags);
3492
3493fail:
3494 tracked_request_end(&req);
3495
3496 if (use_local_qiov) {
3497 qemu_iovec_destroy(&local_qiov);
3498 }
3499 qemu_vfree(head_buf);
3500 qemu_vfree(tail_buf);
3501
3502 return ret;
3503}
3504
3505static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3506 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3507 BdrvRequestFlags flags)
3508{
3509 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
3510 return -EINVAL;
3511 }
3512
3513 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3514 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3515}
3516
3517int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3518 int nb_sectors, QEMUIOVector *qiov)
3519{
3520 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3521
3522 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3523}
3524
3525int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3526 int64_t sector_num, int nb_sectors,
3527 BdrvRequestFlags flags)
3528{
3529 int ret;
3530
3531 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3532
3533 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3534 flags &= ~BDRV_REQ_MAY_UNMAP;
3535 }
3536 if (bdrv_req_is_aligned(bs, sector_num << BDRV_SECTOR_BITS,
3537 nb_sectors << BDRV_SECTOR_BITS)) {
3538 ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3539 BDRV_REQ_ZERO_WRITE | flags);
3540 } else {
3541 uint8_t *buf;
3542 QEMUIOVector local_qiov;
3543 size_t bytes = nb_sectors << BDRV_SECTOR_BITS;
3544
3545 buf = qemu_memalign(bdrv_opt_mem_align(bs), bytes);
3546 memset(buf, 0, bytes);
3547 qemu_iovec_init(&local_qiov, 1);
3548 qemu_iovec_add(&local_qiov, buf, bytes);
3549
3550 ret = bdrv_co_do_writev(bs, sector_num, nb_sectors, &local_qiov,
3551 BDRV_REQ_ZERO_WRITE | flags);
3552 qemu_vfree(buf);
3553 }
3554 return ret;
3555}
3556
3557/**
3558 * Truncate file to 'offset' bytes (needed only for file protocols)
3559 */
3560int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3561{
3562 BlockDriver *drv = bs->drv;
3563 int ret;
3564 if (!drv)
3565 return -ENOMEDIUM;
3566 if (!drv->bdrv_truncate)
3567 return -ENOTSUP;
3568 if (bs->read_only)
3569 return -EACCES;
3570
3571 ret = drv->bdrv_truncate(bs, offset);
3572 if (ret == 0) {
3573 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3574 if (bs->blk) {
3575 blk_dev_resize_cb(bs->blk);
3576 }
3577 }
3578 return ret;
3579}
3580
3581/**
3582 * Length of a allocated file in bytes. Sparse files are counted by actual
3583 * allocated space. Return < 0 if error or unknown.
3584 */
3585int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3586{
3587 BlockDriver *drv = bs->drv;
3588 if (!drv) {
3589 return -ENOMEDIUM;
3590 }
3591 if (drv->bdrv_get_allocated_file_size) {
3592 return drv->bdrv_get_allocated_file_size(bs);
3593 }
3594 if (bs->file) {
3595 return bdrv_get_allocated_file_size(bs->file);
3596 }
3597 return -ENOTSUP;
3598}
3599
3600/**
3601 * Return number of sectors on success, -errno on error.
3602 */
3603int64_t bdrv_nb_sectors(BlockDriverState *bs)
3604{
3605 BlockDriver *drv = bs->drv;
3606
3607 if (!drv)
3608 return -ENOMEDIUM;
3609
3610 if (drv->has_variable_length) {
3611 int ret = refresh_total_sectors(bs, bs->total_sectors);
3612 if (ret < 0) {
3613 return ret;
3614 }
3615 }
3616 return bs->total_sectors;
3617}
3618
3619/**
3620 * Return length in bytes on success, -errno on error.
3621 * The length is always a multiple of BDRV_SECTOR_SIZE.
3622 */
3623int64_t bdrv_getlength(BlockDriverState *bs)
3624{
3625 int64_t ret = bdrv_nb_sectors(bs);
3626
3627 return ret < 0 ? ret : ret * BDRV_SECTOR_SIZE;
3628}
3629
3630/* return 0 as number of sectors if no device present or error */
3631void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3632{
3633 int64_t nb_sectors = bdrv_nb_sectors(bs);
3634
3635 *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
3636}
3637
3638void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3639 BlockdevOnError on_write_error)
3640{
3641 bs->on_read_error = on_read_error;
3642 bs->on_write_error = on_write_error;
3643}
3644
3645BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3646{
3647 return is_read ? bs->on_read_error : bs->on_write_error;
3648}
3649
3650BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3651{
3652 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3653
3654 switch (on_err) {
3655 case BLOCKDEV_ON_ERROR_ENOSPC:
3656 return (error == ENOSPC) ?
3657 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
3658 case BLOCKDEV_ON_ERROR_STOP:
3659 return BLOCK_ERROR_ACTION_STOP;
3660 case BLOCKDEV_ON_ERROR_REPORT:
3661 return BLOCK_ERROR_ACTION_REPORT;
3662 case BLOCKDEV_ON_ERROR_IGNORE:
3663 return BLOCK_ERROR_ACTION_IGNORE;
3664 default:
3665 abort();
3666 }
3667}
3668
3669static void send_qmp_error_event(BlockDriverState *bs,
3670 BlockErrorAction action,
3671 bool is_read, int error)
3672{
3673 IoOperationType optype;
3674
3675 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
3676 qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
3677 bdrv_iostatus_is_enabled(bs),
3678 error == ENOSPC, strerror(error),
3679 &error_abort);
3680}
3681
3682/* This is done by device models because, while the block layer knows
3683 * about the error, it does not know whether an operation comes from
3684 * the device or the block layer (from a job, for example).
3685 */
3686void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3687 bool is_read, int error)
3688{
3689 assert(error >= 0);
3690
3691 if (action == BLOCK_ERROR_ACTION_STOP) {
3692 /* First set the iostatus, so that "info block" returns an iostatus
3693 * that matches the events raised so far (an additional error iostatus
3694 * is fine, but not a lost one).
3695 */
3696 bdrv_iostatus_set_err(bs, error);
3697
3698 /* Then raise the request to stop the VM and the event.
3699 * qemu_system_vmstop_request_prepare has two effects. First,
3700 * it ensures that the STOP event always comes after the
3701 * BLOCK_IO_ERROR event. Second, it ensures that even if management
3702 * can observe the STOP event and do a "cont" before the STOP
3703 * event is issued, the VM will not stop. In this case, vm_start()
3704 * also ensures that the STOP/RESUME pair of events is emitted.
3705 */
3706 qemu_system_vmstop_request_prepare();
3707 send_qmp_error_event(bs, action, is_read, error);
3708 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3709 } else {
3710 send_qmp_error_event(bs, action, is_read, error);
3711 }
3712}
3713
3714int bdrv_is_read_only(BlockDriverState *bs)
3715{
3716 return bs->read_only;
3717}
3718
3719int bdrv_is_sg(BlockDriverState *bs)
3720{
3721 return bs->sg;
3722}
3723
3724int bdrv_enable_write_cache(BlockDriverState *bs)
3725{
3726 return bs->enable_write_cache;
3727}
3728
3729void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3730{
3731 bs->enable_write_cache = wce;
3732
3733 /* so a reopen() will preserve wce */
3734 if (wce) {
3735 bs->open_flags |= BDRV_O_CACHE_WB;
3736 } else {
3737 bs->open_flags &= ~BDRV_O_CACHE_WB;
3738 }
3739}
3740
3741int bdrv_is_encrypted(BlockDriverState *bs)
3742{
3743 if (bs->backing_hd && bs->backing_hd->encrypted)
3744 return 1;
3745 return bs->encrypted;
3746}
3747
3748int bdrv_key_required(BlockDriverState *bs)
3749{
3750 BlockDriverState *backing_hd = bs->backing_hd;
3751
3752 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3753 return 1;
3754 return (bs->encrypted && !bs->valid_key);
3755}
3756
3757int bdrv_set_key(BlockDriverState *bs, const char *key)
3758{
3759 int ret;
3760 if (bs->backing_hd && bs->backing_hd->encrypted) {
3761 ret = bdrv_set_key(bs->backing_hd, key);
3762 if (ret < 0)
3763 return ret;
3764 if (!bs->encrypted)
3765 return 0;
3766 }
3767 if (!bs->encrypted) {
3768 return -EINVAL;
3769 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3770 return -ENOMEDIUM;
3771 }
3772 ret = bs->drv->bdrv_set_key(bs, key);
3773 if (ret < 0) {
3774 bs->valid_key = 0;
3775 } else if (!bs->valid_key) {
3776 bs->valid_key = 1;
3777 if (bs->blk) {
3778 /* call the change callback now, we skipped it on open */
3779 blk_dev_change_media_cb(bs->blk, true);
3780 }
3781 }
3782 return ret;
3783}
3784
3785/*
3786 * Provide an encryption key for @bs.
3787 * If @key is non-null:
3788 * If @bs is not encrypted, fail.
3789 * Else if the key is invalid, fail.
3790 * Else set @bs's key to @key, replacing the existing key, if any.
3791 * If @key is null:
3792 * If @bs is encrypted and still lacks a key, fail.
3793 * Else do nothing.
3794 * On failure, store an error object through @errp if non-null.
3795 */
3796void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp)
3797{
3798 if (key) {
3799 if (!bdrv_is_encrypted(bs)) {
3800 error_setg(errp, "Device '%s' is not encrypted",
3801 bdrv_get_device_name(bs));
3802 } else if (bdrv_set_key(bs, key) < 0) {
3803 error_set(errp, QERR_INVALID_PASSWORD);
3804 }
3805 } else {
3806 if (bdrv_key_required(bs)) {
3807 error_set(errp, ERROR_CLASS_DEVICE_ENCRYPTED,
3808 "'%s' (%s) is encrypted",
3809 bdrv_get_device_name(bs),
3810 bdrv_get_encrypted_filename(bs));
3811 }
3812 }
3813}
3814
3815const char *bdrv_get_format_name(BlockDriverState *bs)
3816{
3817 return bs->drv ? bs->drv->format_name : NULL;
3818}
3819
3820static int qsort_strcmp(const void *a, const void *b)
3821{
3822 return strcmp(a, b);
3823}
3824
3825void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3826 void *opaque)
3827{
3828 BlockDriver *drv;
3829 int count = 0;
3830 int i;
3831 const char **formats = NULL;
3832
3833 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3834 if (drv->format_name) {
3835 bool found = false;
3836 int i = count;
3837 while (formats && i && !found) {
3838 found = !strcmp(formats[--i], drv->format_name);
3839 }
3840
3841 if (!found) {
3842 formats = g_renew(const char *, formats, count + 1);
3843 formats[count++] = drv->format_name;
3844 }
3845 }
3846 }
3847
3848 qsort(formats, count, sizeof(formats[0]), qsort_strcmp);
3849
3850 for (i = 0; i < count; i++) {
3851 it(opaque, formats[i]);
3852 }
3853
3854 g_free(formats);
3855}
3856
3857/* This function is to find a node in the bs graph */
3858BlockDriverState *bdrv_find_node(const char *node_name)
3859{
3860 BlockDriverState *bs;
3861
3862 assert(node_name);
3863
3864 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3865 if (!strcmp(node_name, bs->node_name)) {
3866 return bs;
3867 }
3868 }
3869 return NULL;
3870}
3871
3872/* Put this QMP function here so it can access the static graph_bdrv_states. */
3873BlockDeviceInfoList *bdrv_named_nodes_list(void)
3874{
3875 BlockDeviceInfoList *list, *entry;
3876 BlockDriverState *bs;
3877
3878 list = NULL;
3879 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3880 entry = g_malloc0(sizeof(*entry));
3881 entry->value = bdrv_block_device_info(bs);
3882 entry->next = list;
3883 list = entry;
3884 }
3885
3886 return list;
3887}
3888
3889BlockDriverState *bdrv_lookup_bs(const char *device,
3890 const char *node_name,
3891 Error **errp)
3892{
3893 BlockBackend *blk;
3894 BlockDriverState *bs;
3895
3896 if (device) {
3897 blk = blk_by_name(device);
3898
3899 if (blk) {
3900 return blk_bs(blk);
3901 }
3902 }
3903
3904 if (node_name) {
3905 bs = bdrv_find_node(node_name);
3906
3907 if (bs) {
3908 return bs;
3909 }
3910 }
3911
3912 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3913 device ? device : "",
3914 node_name ? node_name : "");
3915 return NULL;
3916}
3917
3918/* If 'base' is in the same chain as 'top', return true. Otherwise,
3919 * return false. If either argument is NULL, return false. */
3920bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base)
3921{
3922 while (top && top != base) {
3923 top = top->backing_hd;
3924 }
3925
3926 return top != NULL;
3927}
3928
3929BlockDriverState *bdrv_next_node(BlockDriverState *bs)
3930{
3931 if (!bs) {
3932 return QTAILQ_FIRST(&graph_bdrv_states);
3933 }
3934 return QTAILQ_NEXT(bs, node_list);
3935}
3936
3937BlockDriverState *bdrv_next(BlockDriverState *bs)
3938{
3939 if (!bs) {
3940 return QTAILQ_FIRST(&bdrv_states);
3941 }
3942 return QTAILQ_NEXT(bs, device_list);
3943}
3944
3945const char *bdrv_get_node_name(const BlockDriverState *bs)
3946{
3947 return bs->node_name;
3948}
3949
3950/* TODO check what callers really want: bs->node_name or blk_name() */
3951const char *bdrv_get_device_name(const BlockDriverState *bs)
3952{
3953 return bs->blk ? blk_name(bs->blk) : "";
3954}
3955
3956int bdrv_get_flags(BlockDriverState *bs)
3957{
3958 return bs->open_flags;
3959}
3960
3961int bdrv_flush_all(void)
3962{
3963 BlockDriverState *bs;
3964 int result = 0;
3965
3966 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3967 AioContext *aio_context = bdrv_get_aio_context(bs);
3968 int ret;
3969
3970 aio_context_acquire(aio_context);
3971 ret = bdrv_flush(bs);
3972 if (ret < 0 && !result) {
3973 result = ret;
3974 }
3975 aio_context_release(aio_context);
3976 }
3977
3978 return result;
3979}
3980
3981int bdrv_has_zero_init_1(BlockDriverState *bs)
3982{
3983 return 1;
3984}
3985
3986int bdrv_has_zero_init(BlockDriverState *bs)
3987{
3988 assert(bs->drv);
3989
3990 /* If BS is a copy on write image, it is initialized to
3991 the contents of the base image, which may not be zeroes. */
3992 if (bs->backing_hd) {
3993 return 0;
3994 }
3995 if (bs->drv->bdrv_has_zero_init) {
3996 return bs->drv->bdrv_has_zero_init(bs);
3997 }
3998
3999 /* safe default */
4000 return 0;
4001}
4002
4003bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
4004{
4005 BlockDriverInfo bdi;
4006
4007 if (bs->backing_hd) {
4008 return false;
4009 }
4010
4011 if (bdrv_get_info(bs, &bdi) == 0) {
4012 return bdi.unallocated_blocks_are_zero;
4013 }
4014
4015 return false;
4016}
4017
4018bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
4019{
4020 BlockDriverInfo bdi;
4021
4022 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
4023 return false;
4024 }
4025
4026 if (bdrv_get_info(bs, &bdi) == 0) {
4027 return bdi.can_write_zeroes_with_unmap;
4028 }
4029
4030 return false;
4031}
4032
4033typedef struct BdrvCoGetBlockStatusData {
4034 BlockDriverState *bs;
4035 BlockDriverState *base;
4036 int64_t sector_num;
4037 int nb_sectors;
4038 int *pnum;
4039 int64_t ret;
4040 bool done;
4041} BdrvCoGetBlockStatusData;
4042
4043/*
4044 * Returns the allocation status of the specified sectors.
4045 * Drivers not implementing the functionality are assumed to not support
4046 * backing files, hence all their sectors are reported as allocated.
4047 *
4048 * If 'sector_num' is beyond the end of the disk image the return value is 0
4049 * and 'pnum' is set to 0.
4050 *
4051 * 'pnum' is set to the number of sectors (including and immediately following
4052 * the specified sector) that are known to be in the same
4053 * allocated/unallocated state.
4054 *
4055 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
4056 * beyond the end of the disk image it will be clamped.
4057 */
4058static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
4059 int64_t sector_num,
4060 int nb_sectors, int *pnum)
4061{
4062 int64_t total_sectors;
4063 int64_t n;
4064 int64_t ret, ret2;
4065
4066 total_sectors = bdrv_nb_sectors(bs);
4067 if (total_sectors < 0) {
4068 return total_sectors;
4069 }
4070
4071 if (sector_num >= total_sectors) {
4072 *pnum = 0;
4073 return 0;
4074 }
4075
4076 n = total_sectors - sector_num;
4077 if (n < nb_sectors) {
4078 nb_sectors = n;
4079 }
4080
4081 if (!bs->drv->bdrv_co_get_block_status) {
4082 *pnum = nb_sectors;
4083 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
4084 if (bs->drv->protocol_name) {
4085 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
4086 }
4087 return ret;
4088 }
4089
4090 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
4091 if (ret < 0) {
4092 *pnum = 0;
4093 return ret;
4094 }
4095
4096 if (ret & BDRV_BLOCK_RAW) {
4097 assert(ret & BDRV_BLOCK_OFFSET_VALID);
4098 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4099 *pnum, pnum);
4100 }
4101
4102 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
4103 ret |= BDRV_BLOCK_ALLOCATED;
4104 }
4105
4106 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
4107 if (bdrv_unallocated_blocks_are_zero(bs)) {
4108 ret |= BDRV_BLOCK_ZERO;
4109 } else if (bs->backing_hd) {
4110 BlockDriverState *bs2 = bs->backing_hd;
4111 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
4112 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
4113 ret |= BDRV_BLOCK_ZERO;
4114 }
4115 }
4116 }
4117
4118 if (bs->file &&
4119 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
4120 (ret & BDRV_BLOCK_OFFSET_VALID)) {
4121 int file_pnum;
4122
4123 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
4124 *pnum, &file_pnum);
4125 if (ret2 >= 0) {
4126 /* Ignore errors. This is just providing extra information, it
4127 * is useful but not necessary.
4128 */
4129 if (!file_pnum) {
4130 /* !file_pnum indicates an offset at or beyond the EOF; it is
4131 * perfectly valid for the format block driver to point to such
4132 * offsets, so catch it and mark everything as zero */
4133 ret |= BDRV_BLOCK_ZERO;
4134 } else {
4135 /* Limit request to the range reported by the protocol driver */
4136 *pnum = file_pnum;
4137 ret |= (ret2 & BDRV_BLOCK_ZERO);
4138 }
4139 }
4140 }
4141
4142 return ret;
4143}
4144
4145/* Coroutine wrapper for bdrv_get_block_status() */
4146static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
4147{
4148 BdrvCoGetBlockStatusData *data = opaque;
4149 BlockDriverState *bs = data->bs;
4150
4151 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4152 data->pnum);
4153 data->done = true;
4154}
4155
4156/*
4157 * Synchronous wrapper around bdrv_co_get_block_status().
4158 *
4159 * See bdrv_co_get_block_status() for details.
4160 */
4161int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4162 int nb_sectors, int *pnum)
4163{
4164 Coroutine *co;
4165 BdrvCoGetBlockStatusData data = {
4166 .bs = bs,
4167 .sector_num = sector_num,
4168 .nb_sectors = nb_sectors,
4169 .pnum = pnum,
4170 .done = false,
4171 };
4172
4173 if (qemu_in_coroutine()) {
4174 /* Fast-path if already in coroutine context */
4175 bdrv_get_block_status_co_entry(&data);
4176 } else {
4177 AioContext *aio_context = bdrv_get_aio_context(bs);
4178
4179 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4180 qemu_coroutine_enter(co, &data);
4181 while (!data.done) {
4182 aio_poll(aio_context, true);
4183 }
4184 }
4185 return data.ret;
4186}
4187
4188int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4189 int nb_sectors, int *pnum)
4190{
4191 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4192 if (ret < 0) {
4193 return ret;
4194 }
4195 return !!(ret & BDRV_BLOCK_ALLOCATED);
4196}
4197
4198/*
4199 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4200 *
4201 * Return true if the given sector is allocated in any image between
4202 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4203 * sector is allocated in any image of the chain. Return false otherwise.
4204 *
4205 * 'pnum' is set to the number of sectors (including and immediately following
4206 * the specified sector) that are known to be in the same
4207 * allocated/unallocated state.
4208 *
4209 */
4210int bdrv_is_allocated_above(BlockDriverState *top,
4211 BlockDriverState *base,
4212 int64_t sector_num,
4213 int nb_sectors, int *pnum)
4214{
4215 BlockDriverState *intermediate;
4216 int ret, n = nb_sectors;
4217
4218 intermediate = top;
4219 while (intermediate && intermediate != base) {
4220 int pnum_inter;
4221 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4222 &pnum_inter);
4223 if (ret < 0) {
4224 return ret;
4225 } else if (ret) {
4226 *pnum = pnum_inter;
4227 return 1;
4228 }
4229
4230 /*
4231 * [sector_num, nb_sectors] is unallocated on top but intermediate
4232 * might have
4233 *
4234 * [sector_num+x, nr_sectors] allocated.
4235 */
4236 if (n > pnum_inter &&
4237 (intermediate == top ||
4238 sector_num + pnum_inter < intermediate->total_sectors)) {
4239 n = pnum_inter;
4240 }
4241
4242 intermediate = intermediate->backing_hd;
4243 }
4244
4245 *pnum = n;
4246 return 0;
4247}
4248
4249const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4250{
4251 if (bs->backing_hd && bs->backing_hd->encrypted)
4252 return bs->backing_file;
4253 else if (bs->encrypted)
4254 return bs->filename;
4255 else
4256 return NULL;
4257}
4258
4259void bdrv_get_backing_filename(BlockDriverState *bs,
4260 char *filename, int filename_size)
4261{
4262 pstrcpy(filename, filename_size, bs->backing_file);
4263}
4264
4265int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4266 const uint8_t *buf, int nb_sectors)
4267{
4268 BlockDriver *drv = bs->drv;
4269 int ret;
4270
4271 if (!drv) {
4272 return -ENOMEDIUM;
4273 }
4274 if (!drv->bdrv_write_compressed) {
4275 return -ENOTSUP;
4276 }
4277 ret = bdrv_check_request(bs, sector_num, nb_sectors);
4278 if (ret < 0) {
4279 return ret;
4280 }
4281
4282 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4283
4284 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4285}
4286
4287int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4288{
4289 BlockDriver *drv = bs->drv;
4290 if (!drv)
4291 return -ENOMEDIUM;
4292 if (!drv->bdrv_get_info)
4293 return -ENOTSUP;
4294 memset(bdi, 0, sizeof(*bdi));
4295 return drv->bdrv_get_info(bs, bdi);
4296}
4297
4298ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4299{
4300 BlockDriver *drv = bs->drv;
4301 if (drv && drv->bdrv_get_specific_info) {
4302 return drv->bdrv_get_specific_info(bs);
4303 }
4304 return NULL;
4305}
4306
4307int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4308 int64_t pos, int size)
4309{
4310 QEMUIOVector qiov;
4311 struct iovec iov = {
4312 .iov_base = (void *) buf,
4313 .iov_len = size,
4314 };
4315
4316 qemu_iovec_init_external(&qiov, &iov, 1);
4317 return bdrv_writev_vmstate(bs, &qiov, pos);
4318}
4319
4320int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4321{
4322 BlockDriver *drv = bs->drv;
4323
4324 if (!drv) {
4325 return -ENOMEDIUM;
4326 } else if (drv->bdrv_save_vmstate) {
4327 return drv->bdrv_save_vmstate(bs, qiov, pos);
4328 } else if (bs->file) {
4329 return bdrv_writev_vmstate(bs->file, qiov, pos);
4330 }
4331
4332 return -ENOTSUP;
4333}
4334
4335int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4336 int64_t pos, int size)
4337{
4338 BlockDriver *drv = bs->drv;
4339 if (!drv)
4340 return -ENOMEDIUM;
4341 if (drv->bdrv_load_vmstate)
4342 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4343 if (bs->file)
4344 return bdrv_load_vmstate(bs->file, buf, pos, size);
4345 return -ENOTSUP;
4346}
4347
4348void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4349{
4350 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4351 return;
4352 }
4353
4354 bs->drv->bdrv_debug_event(bs, event);
4355}
4356
4357int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4358 const char *tag)
4359{
4360 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4361 bs = bs->file;
4362 }
4363
4364 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4365 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4366 }
4367
4368 return -ENOTSUP;
4369}
4370
4371int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4372{
4373 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4374 bs = bs->file;
4375 }
4376
4377 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4378 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4379 }
4380
4381 return -ENOTSUP;
4382}
4383
4384int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4385{
4386 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4387 bs = bs->file;
4388 }
4389
4390 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4391 return bs->drv->bdrv_debug_resume(bs, tag);
4392 }
4393
4394 return -ENOTSUP;
4395}
4396
4397bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4398{
4399 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4400 bs = bs->file;
4401 }
4402
4403 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4404 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4405 }
4406
4407 return false;
4408}
4409
4410int bdrv_is_snapshot(BlockDriverState *bs)
4411{
4412 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4413}
4414
4415/* backing_file can either be relative, or absolute, or a protocol. If it is
4416 * relative, it must be relative to the chain. So, passing in bs->filename
4417 * from a BDS as backing_file should not be done, as that may be relative to
4418 * the CWD rather than the chain. */
4419BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4420 const char *backing_file)
4421{
4422 char *filename_full = NULL;
4423 char *backing_file_full = NULL;
4424 char *filename_tmp = NULL;
4425 int is_protocol = 0;
4426 BlockDriverState *curr_bs = NULL;
4427 BlockDriverState *retval = NULL;
4428
4429 if (!bs || !bs->drv || !backing_file) {
4430 return NULL;
4431 }
4432
4433 filename_full = g_malloc(PATH_MAX);
4434 backing_file_full = g_malloc(PATH_MAX);
4435 filename_tmp = g_malloc(PATH_MAX);
4436
4437 is_protocol = path_has_protocol(backing_file);
4438
4439 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4440
4441 /* If either of the filename paths is actually a protocol, then
4442 * compare unmodified paths; otherwise make paths relative */
4443 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4444 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4445 retval = curr_bs->backing_hd;
4446 break;
4447 }
4448 } else {
4449 /* If not an absolute filename path, make it relative to the current
4450 * image's filename path */
4451 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4452 backing_file);
4453
4454 /* We are going to compare absolute pathnames */
4455 if (!realpath(filename_tmp, filename_full)) {
4456 continue;
4457 }
4458
4459 /* We need to make sure the backing filename we are comparing against
4460 * is relative to the current image filename (or absolute) */
4461 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4462 curr_bs->backing_file);
4463
4464 if (!realpath(filename_tmp, backing_file_full)) {
4465 continue;
4466 }
4467
4468 if (strcmp(backing_file_full, filename_full) == 0) {
4469 retval = curr_bs->backing_hd;
4470 break;
4471 }
4472 }
4473 }
4474
4475 g_free(filename_full);
4476 g_free(backing_file_full);
4477 g_free(filename_tmp);
4478 return retval;
4479}
4480
4481int bdrv_get_backing_file_depth(BlockDriverState *bs)
4482{
4483 if (!bs->drv) {
4484 return 0;
4485 }
4486
4487 if (!bs->backing_hd) {
4488 return 0;
4489 }
4490
4491 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4492}
4493
4494/**************************************************************/
4495/* async I/Os */
4496
4497BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4498 QEMUIOVector *qiov, int nb_sectors,
4499 BlockCompletionFunc *cb, void *opaque)
4500{
4501 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4502
4503 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4504 cb, opaque, false);
4505}
4506
4507BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4508 QEMUIOVector *qiov, int nb_sectors,
4509 BlockCompletionFunc *cb, void *opaque)
4510{
4511 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4512
4513 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4514 cb, opaque, true);
4515}
4516
4517BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4518 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4519 BlockCompletionFunc *cb, void *opaque)
4520{
4521 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4522
4523 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4524 BDRV_REQ_ZERO_WRITE | flags,
4525 cb, opaque, true);
4526}
4527
4528
4529typedef struct MultiwriteCB {
4530 int error;
4531 int num_requests;
4532 int num_callbacks;
4533 struct {
4534 BlockCompletionFunc *cb;
4535 void *opaque;
4536 QEMUIOVector *free_qiov;
4537 } callbacks[];
4538} MultiwriteCB;
4539
4540static void multiwrite_user_cb(MultiwriteCB *mcb)
4541{
4542 int i;
4543
4544 for (i = 0; i < mcb->num_callbacks; i++) {
4545 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4546 if (mcb->callbacks[i].free_qiov) {
4547 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4548 }
4549 g_free(mcb->callbacks[i].free_qiov);
4550 }
4551}
4552
4553static void multiwrite_cb(void *opaque, int ret)
4554{
4555 MultiwriteCB *mcb = opaque;
4556
4557 trace_multiwrite_cb(mcb, ret);
4558
4559 if (ret < 0 && !mcb->error) {
4560 mcb->error = ret;
4561 }
4562
4563 mcb->num_requests--;
4564 if (mcb->num_requests == 0) {
4565 multiwrite_user_cb(mcb);
4566 g_free(mcb);
4567 }
4568}
4569
4570static int multiwrite_req_compare(const void *a, const void *b)
4571{
4572 const BlockRequest *req1 = a, *req2 = b;
4573
4574 /*
4575 * Note that we can't simply subtract req2->sector from req1->sector
4576 * here as that could overflow the return value.
4577 */
4578 if (req1->sector > req2->sector) {
4579 return 1;
4580 } else if (req1->sector < req2->sector) {
4581 return -1;
4582 } else {
4583 return 0;
4584 }
4585}
4586
4587/*
4588 * Takes a bunch of requests and tries to merge them. Returns the number of
4589 * requests that remain after merging.
4590 */
4591static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4592 int num_reqs, MultiwriteCB *mcb)
4593{
4594 int i, outidx;
4595
4596 // Sort requests by start sector
4597 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4598
4599 // Check if adjacent requests touch the same clusters. If so, combine them,
4600 // filling up gaps with zero sectors.
4601 outidx = 0;
4602 for (i = 1; i < num_reqs; i++) {
4603 int merge = 0;
4604 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4605
4606 // Handle exactly sequential writes and overlapping writes.
4607 if (reqs[i].sector <= oldreq_last) {
4608 merge = 1;
4609 }
4610
4611 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4612 merge = 0;
4613 }
4614
4615 if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
4616 reqs[i].nb_sectors > bs->bl.max_transfer_length) {
4617 merge = 0;
4618 }
4619
4620 if (merge) {
4621 size_t size;
4622 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4623 qemu_iovec_init(qiov,
4624 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4625
4626 // Add the first request to the merged one. If the requests are
4627 // overlapping, drop the last sectors of the first request.
4628 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4629 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4630
4631 // We should need to add any zeros between the two requests
4632 assert (reqs[i].sector <= oldreq_last);
4633
4634 // Add the second request
4635 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4636
4637 // Add tail of first request, if necessary
4638 if (qiov->size < reqs[outidx].qiov->size) {
4639 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
4640 reqs[outidx].qiov->size - qiov->size);
4641 }
4642
4643 reqs[outidx].nb_sectors = qiov->size >> 9;
4644 reqs[outidx].qiov = qiov;
4645
4646 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4647 } else {
4648 outidx++;
4649 reqs[outidx].sector = reqs[i].sector;
4650 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4651 reqs[outidx].qiov = reqs[i].qiov;
4652 }
4653 }
4654
4655 block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1);
4656
4657 return outidx + 1;
4658}
4659
4660/*
4661 * Submit multiple AIO write requests at once.
4662 *
4663 * On success, the function returns 0 and all requests in the reqs array have
4664 * been submitted. In error case this function returns -1, and any of the
4665 * requests may or may not be submitted yet. In particular, this means that the
4666 * callback will be called for some of the requests, for others it won't. The
4667 * caller must check the error field of the BlockRequest to wait for the right
4668 * callbacks (if error != 0, no callback will be called).
4669 *
4670 * The implementation may modify the contents of the reqs array, e.g. to merge
4671 * requests. However, the fields opaque and error are left unmodified as they
4672 * are used to signal failure for a single request to the caller.
4673 */
4674int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4675{
4676 MultiwriteCB *mcb;
4677 int i;
4678
4679 /* don't submit writes if we don't have a medium */
4680 if (bs->drv == NULL) {
4681 for (i = 0; i < num_reqs; i++) {
4682 reqs[i].error = -ENOMEDIUM;
4683 }
4684 return -1;
4685 }
4686
4687 if (num_reqs == 0) {
4688 return 0;
4689 }
4690
4691 // Create MultiwriteCB structure
4692 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4693 mcb->num_requests = 0;
4694 mcb->num_callbacks = num_reqs;
4695
4696 for (i = 0; i < num_reqs; i++) {
4697 mcb->callbacks[i].cb = reqs[i].cb;
4698 mcb->callbacks[i].opaque = reqs[i].opaque;
4699 }
4700
4701 // Check for mergable requests
4702 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4703
4704 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4705
4706 /* Run the aio requests. */
4707 mcb->num_requests = num_reqs;
4708 for (i = 0; i < num_reqs; i++) {
4709 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4710 reqs[i].nb_sectors, reqs[i].flags,
4711 multiwrite_cb, mcb,
4712 true);
4713 }
4714
4715 return 0;
4716}
4717
4718void bdrv_aio_cancel(BlockAIOCB *acb)
4719{
4720 qemu_aio_ref(acb);
4721 bdrv_aio_cancel_async(acb);
4722 while (acb->refcnt > 1) {
4723 if (acb->aiocb_info->get_aio_context) {
4724 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
4725 } else if (acb->bs) {
4726 aio_poll(bdrv_get_aio_context(acb->bs), true);
4727 } else {
4728 abort();
4729 }
4730 }
4731 qemu_aio_unref(acb);
4732}
4733
4734/* Async version of aio cancel. The caller is not blocked if the acb implements
4735 * cancel_async, otherwise we do nothing and let the request normally complete.
4736 * In either case the completion callback must be called. */
4737void bdrv_aio_cancel_async(BlockAIOCB *acb)
4738{
4739 if (acb->aiocb_info->cancel_async) {
4740 acb->aiocb_info->cancel_async(acb);
4741 }
4742}
4743
4744/**************************************************************/
4745/* async block device emulation */
4746
4747typedef struct BlockAIOCBSync {
4748 BlockAIOCB common;
4749 QEMUBH *bh;
4750 int ret;
4751 /* vector translation state */
4752 QEMUIOVector *qiov;
4753 uint8_t *bounce;
4754 int is_write;
4755} BlockAIOCBSync;
4756
4757static const AIOCBInfo bdrv_em_aiocb_info = {
4758 .aiocb_size = sizeof(BlockAIOCBSync),
4759};
4760
4761static void bdrv_aio_bh_cb(void *opaque)
4762{
4763 BlockAIOCBSync *acb = opaque;
4764
4765 if (!acb->is_write && acb->ret >= 0) {
4766 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4767 }
4768 qemu_vfree(acb->bounce);
4769 acb->common.cb(acb->common.opaque, acb->ret);
4770 qemu_bh_delete(acb->bh);
4771 acb->bh = NULL;
4772 qemu_aio_unref(acb);
4773}
4774
4775static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4776 int64_t sector_num,
4777 QEMUIOVector *qiov,
4778 int nb_sectors,
4779 BlockCompletionFunc *cb,
4780 void *opaque,
4781 int is_write)
4782
4783{
4784 BlockAIOCBSync *acb;
4785
4786 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4787 acb->is_write = is_write;
4788 acb->qiov = qiov;
4789 acb->bounce = qemu_try_blockalign(bs, qiov->size);
4790 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4791
4792 if (acb->bounce == NULL) {
4793 acb->ret = -ENOMEM;
4794 } else if (is_write) {
4795 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4796 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4797 } else {
4798 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4799 }
4800
4801 qemu_bh_schedule(acb->bh);
4802
4803 return &acb->common;
4804}
4805
4806static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4807 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4808 BlockCompletionFunc *cb, void *opaque)
4809{
4810 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4811}
4812
4813static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4814 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4815 BlockCompletionFunc *cb, void *opaque)
4816{
4817 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4818}
4819
4820
4821typedef struct BlockAIOCBCoroutine {
4822 BlockAIOCB common;
4823 BlockRequest req;
4824 bool is_write;
4825 bool *done;
4826 QEMUBH* bh;
4827} BlockAIOCBCoroutine;
4828
4829static const AIOCBInfo bdrv_em_co_aiocb_info = {
4830 .aiocb_size = sizeof(BlockAIOCBCoroutine),
4831};
4832
4833static void bdrv_co_em_bh(void *opaque)
4834{
4835 BlockAIOCBCoroutine *acb = opaque;
4836
4837 acb->common.cb(acb->common.opaque, acb->req.error);
4838
4839 qemu_bh_delete(acb->bh);
4840 qemu_aio_unref(acb);
4841}
4842
4843/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4844static void coroutine_fn bdrv_co_do_rw(void *opaque)
4845{
4846 BlockAIOCBCoroutine *acb = opaque;
4847 BlockDriverState *bs = acb->common.bs;
4848
4849 if (!acb->is_write) {
4850 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4851 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4852 } else {
4853 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4854 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4855 }
4856
4857 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4858 qemu_bh_schedule(acb->bh);
4859}
4860
4861static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4862 int64_t sector_num,
4863 QEMUIOVector *qiov,
4864 int nb_sectors,
4865 BdrvRequestFlags flags,
4866 BlockCompletionFunc *cb,
4867 void *opaque,
4868 bool is_write)
4869{
4870 Coroutine *co;
4871 BlockAIOCBCoroutine *acb;
4872
4873 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4874 acb->req.sector = sector_num;
4875 acb->req.nb_sectors = nb_sectors;
4876 acb->req.qiov = qiov;
4877 acb->req.flags = flags;
4878 acb->is_write = is_write;
4879
4880 co = qemu_coroutine_create(bdrv_co_do_rw);
4881 qemu_coroutine_enter(co, acb);
4882
4883 return &acb->common;
4884}
4885
4886static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4887{
4888 BlockAIOCBCoroutine *acb = opaque;
4889 BlockDriverState *bs = acb->common.bs;
4890
4891 acb->req.error = bdrv_co_flush(bs);
4892 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4893 qemu_bh_schedule(acb->bh);
4894}
4895
4896BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4897 BlockCompletionFunc *cb, void *opaque)
4898{
4899 trace_bdrv_aio_flush(bs, opaque);
4900
4901 Coroutine *co;
4902 BlockAIOCBCoroutine *acb;
4903
4904 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4905
4906 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4907 qemu_coroutine_enter(co, acb);
4908
4909 return &acb->common;
4910}
4911
4912static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4913{
4914 BlockAIOCBCoroutine *acb = opaque;
4915 BlockDriverState *bs = acb->common.bs;
4916
4917 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4918 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4919 qemu_bh_schedule(acb->bh);
4920}
4921
4922BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4923 int64_t sector_num, int nb_sectors,
4924 BlockCompletionFunc *cb, void *opaque)
4925{
4926 Coroutine *co;
4927 BlockAIOCBCoroutine *acb;
4928
4929 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4930
4931 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4932 acb->req.sector = sector_num;
4933 acb->req.nb_sectors = nb_sectors;
4934 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4935 qemu_coroutine_enter(co, acb);
4936
4937 return &acb->common;
4938}
4939
4940void bdrv_init(void)
4941{
4942 module_call_init(MODULE_INIT_BLOCK);
4943}
4944
4945void bdrv_init_with_whitelist(void)
4946{
4947 use_bdrv_whitelist = 1;
4948 bdrv_init();
4949}
4950
4951void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4952 BlockCompletionFunc *cb, void *opaque)
4953{
4954 BlockAIOCB *acb;
4955
4956 acb = g_slice_alloc(aiocb_info->aiocb_size);
4957 acb->aiocb_info = aiocb_info;
4958 acb->bs = bs;
4959 acb->cb = cb;
4960 acb->opaque = opaque;
4961 acb->refcnt = 1;
4962 return acb;
4963}
4964
4965void qemu_aio_ref(void *p)
4966{
4967 BlockAIOCB *acb = p;
4968 acb->refcnt++;
4969}
4970
4971void qemu_aio_unref(void *p)
4972{
4973 BlockAIOCB *acb = p;
4974 assert(acb->refcnt > 0);
4975 if (--acb->refcnt == 0) {
4976 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4977 }
4978}
4979
4980/**************************************************************/
4981/* Coroutine block device emulation */
4982
4983typedef struct CoroutineIOCompletion {
4984 Coroutine *coroutine;
4985 int ret;
4986} CoroutineIOCompletion;
4987
4988static void bdrv_co_io_em_complete(void *opaque, int ret)
4989{
4990 CoroutineIOCompletion *co = opaque;
4991
4992 co->ret = ret;
4993 qemu_coroutine_enter(co->coroutine, NULL);
4994}
4995
4996static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4997 int nb_sectors, QEMUIOVector *iov,
4998 bool is_write)
4999{
5000 CoroutineIOCompletion co = {
5001 .coroutine = qemu_coroutine_self(),
5002 };
5003 BlockAIOCB *acb;
5004
5005 if (is_write) {
5006 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
5007 bdrv_co_io_em_complete, &co);
5008 } else {
5009 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
5010 bdrv_co_io_em_complete, &co);
5011 }
5012
5013 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
5014 if (!acb) {
5015 return -EIO;
5016 }
5017 qemu_coroutine_yield();
5018
5019 return co.ret;
5020}
5021
5022static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
5023 int64_t sector_num, int nb_sectors,
5024 QEMUIOVector *iov)
5025{
5026 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
5027}
5028
5029static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
5030 int64_t sector_num, int nb_sectors,
5031 QEMUIOVector *iov)
5032{
5033 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
5034}
5035
5036static void coroutine_fn bdrv_flush_co_entry(void *opaque)
5037{
5038 RwCo *rwco = opaque;
5039
5040 rwco->ret = bdrv_co_flush(rwco->bs);
5041}
5042
5043int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
5044{
5045 int ret;
5046
5047 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
5048 return 0;
5049 }
5050
5051 /* Write back cached data to the OS even with cache=unsafe */
5052 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
5053 if (bs->drv->bdrv_co_flush_to_os) {
5054 ret = bs->drv->bdrv_co_flush_to_os(bs);
5055 if (ret < 0) {
5056 return ret;
5057 }
5058 }
5059
5060 /* But don't actually force it to the disk with cache=unsafe */
5061 if (bs->open_flags & BDRV_O_NO_FLUSH) {
5062 goto flush_parent;
5063 }
5064
5065 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
5066 if (bs->drv->bdrv_co_flush_to_disk) {
5067 ret = bs->drv->bdrv_co_flush_to_disk(bs);
5068 } else if (bs->drv->bdrv_aio_flush) {
5069 BlockAIOCB *acb;
5070 CoroutineIOCompletion co = {
5071 .coroutine = qemu_coroutine_self(),
5072 };
5073
5074 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
5075 if (acb == NULL) {
5076 ret = -EIO;
5077 } else {
5078 qemu_coroutine_yield();
5079 ret = co.ret;
5080 }
5081 } else {
5082 /*
5083 * Some block drivers always operate in either writethrough or unsafe
5084 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
5085 * know how the server works (because the behaviour is hardcoded or
5086 * depends on server-side configuration), so we can't ensure that
5087 * everything is safe on disk. Returning an error doesn't work because
5088 * that would break guests even if the server operates in writethrough
5089 * mode.
5090 *
5091 * Let's hope the user knows what he's doing.
5092 */
5093 ret = 0;
5094 }
5095 if (ret < 0) {
5096 return ret;
5097 }
5098
5099 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
5100 * in the case of cache=unsafe, so there are no useless flushes.
5101 */
5102flush_parent:
5103 return bdrv_co_flush(bs->file);
5104}
5105
5106void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
5107{
5108 Error *local_err = NULL;
5109 int ret;
5110
5111 if (!bs->drv) {
5112 return;
5113 }
5114
5115 if (!(bs->open_flags & BDRV_O_INCOMING)) {
5116 return;
5117 }
5118 bs->open_flags &= ~BDRV_O_INCOMING;
5119
5120 if (bs->drv->bdrv_invalidate_cache) {
5121 bs->drv->bdrv_invalidate_cache(bs, &local_err);
5122 } else if (bs->file) {
5123 bdrv_invalidate_cache(bs->file, &local_err);
5124 }
5125 if (local_err) {
5126 error_propagate(errp, local_err);
5127 return;
5128 }
5129
5130 ret = refresh_total_sectors(bs, bs->total_sectors);
5131 if (ret < 0) {
5132 error_setg_errno(errp, -ret, "Could not refresh total sector count");
5133 return;
5134 }
5135}
5136
5137void bdrv_invalidate_cache_all(Error **errp)
5138{
5139 BlockDriverState *bs;
5140 Error *local_err = NULL;
5141
5142 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5143 AioContext *aio_context = bdrv_get_aio_context(bs);
5144
5145 aio_context_acquire(aio_context);
5146 bdrv_invalidate_cache(bs, &local_err);
5147 aio_context_release(aio_context);
5148 if (local_err) {
5149 error_propagate(errp, local_err);
5150 return;
5151 }
5152 }
5153}
5154
5155int bdrv_flush(BlockDriverState *bs)
5156{
5157 Coroutine *co;
5158 RwCo rwco = {
5159 .bs = bs,
5160 .ret = NOT_DONE,
5161 };
5162
5163 if (qemu_in_coroutine()) {
5164 /* Fast-path if already in coroutine context */
5165 bdrv_flush_co_entry(&rwco);
5166 } else {
5167 AioContext *aio_context = bdrv_get_aio_context(bs);
5168
5169 co = qemu_coroutine_create(bdrv_flush_co_entry);
5170 qemu_coroutine_enter(co, &rwco);
5171 while (rwco.ret == NOT_DONE) {
5172 aio_poll(aio_context, true);
5173 }
5174 }
5175
5176 return rwco.ret;
5177}
5178
5179typedef struct DiscardCo {
5180 BlockDriverState *bs;
5181 int64_t sector_num;
5182 int nb_sectors;
5183 int ret;
5184} DiscardCo;
5185static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5186{
5187 DiscardCo *rwco = opaque;
5188
5189 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5190}
5191
5192int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5193 int nb_sectors)
5194{
5195 int max_discard, ret;
5196
5197 if (!bs->drv) {
5198 return -ENOMEDIUM;
5199 }
5200
5201 ret = bdrv_check_request(bs, sector_num, nb_sectors);
5202 if (ret < 0) {
5203 return ret;
5204 } else if (bs->read_only) {
5205 return -EROFS;
5206 }
5207
5208 bdrv_reset_dirty(bs, sector_num, nb_sectors);
5209
5210 /* Do nothing if disabled. */
5211 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5212 return 0;
5213 }
5214
5215 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5216 return 0;
5217 }
5218
5219 max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
5220 while (nb_sectors > 0) {
5221 int ret;
5222 int num = nb_sectors;
5223
5224 /* align request */
5225 if (bs->bl.discard_alignment &&
5226 num >= bs->bl.discard_alignment &&
5227 sector_num % bs->bl.discard_alignment) {
5228 if (num > bs->bl.discard_alignment) {
5229 num = bs->bl.discard_alignment;
5230 }
5231 num -= sector_num % bs->bl.discard_alignment;
5232 }
5233
5234 /* limit request size */
5235 if (num > max_discard) {
5236 num = max_discard;
5237 }
5238
5239 if (bs->drv->bdrv_co_discard) {
5240 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5241 } else {
5242 BlockAIOCB *acb;
5243 CoroutineIOCompletion co = {
5244 .coroutine = qemu_coroutine_self(),
5245 };
5246
5247 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5248 bdrv_co_io_em_complete, &co);
5249 if (acb == NULL) {
5250 return -EIO;
5251 } else {
5252 qemu_coroutine_yield();
5253 ret = co.ret;
5254 }
5255 }
5256 if (ret && ret != -ENOTSUP) {
5257 return ret;
5258 }
5259
5260 sector_num += num;
5261 nb_sectors -= num;
5262 }
5263 return 0;
5264}
5265
5266int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5267{
5268 Coroutine *co;
5269 DiscardCo rwco = {
5270 .bs = bs,
5271 .sector_num = sector_num,
5272 .nb_sectors = nb_sectors,
5273 .ret = NOT_DONE,
5274 };
5275
5276 if (qemu_in_coroutine()) {
5277 /* Fast-path if already in coroutine context */
5278 bdrv_discard_co_entry(&rwco);
5279 } else {
5280 AioContext *aio_context = bdrv_get_aio_context(bs);
5281
5282 co = qemu_coroutine_create(bdrv_discard_co_entry);
5283 qemu_coroutine_enter(co, &rwco);
5284 while (rwco.ret == NOT_DONE) {
5285 aio_poll(aio_context, true);
5286 }
5287 }
5288
5289 return rwco.ret;
5290}
5291
5292/**************************************************************/
5293/* removable device support */
5294
5295/**
5296 * Return TRUE if the media is present
5297 */
5298int bdrv_is_inserted(BlockDriverState *bs)
5299{
5300 BlockDriver *drv = bs->drv;
5301
5302 if (!drv)
5303 return 0;
5304 if (!drv->bdrv_is_inserted)
5305 return 1;
5306 return drv->bdrv_is_inserted(bs);
5307}
5308
5309/**
5310 * Return whether the media changed since the last call to this
5311 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5312 */
5313int bdrv_media_changed(BlockDriverState *bs)
5314{
5315 BlockDriver *drv = bs->drv;
5316
5317 if (drv && drv->bdrv_media_changed) {
5318 return drv->bdrv_media_changed(bs);
5319 }
5320 return -ENOTSUP;
5321}
5322
5323/**
5324 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5325 */
5326void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5327{
5328 BlockDriver *drv = bs->drv;
5329 const char *device_name;
5330
5331 if (drv && drv->bdrv_eject) {
5332 drv->bdrv_eject(bs, eject_flag);
5333 }
5334
5335 device_name = bdrv_get_device_name(bs);
5336 if (device_name[0] != '\0') {
5337 qapi_event_send_device_tray_moved(device_name,
5338 eject_flag, &error_abort);
5339 }
5340}
5341
5342/**
5343 * Lock or unlock the media (if it is locked, the user won't be able
5344 * to eject it manually).
5345 */
5346void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5347{
5348 BlockDriver *drv = bs->drv;
5349
5350 trace_bdrv_lock_medium(bs, locked);
5351
5352 if (drv && drv->bdrv_lock_medium) {
5353 drv->bdrv_lock_medium(bs, locked);
5354 }
5355}
5356
5357/* needed for generic scsi interface */
5358
5359int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5360{
5361 BlockDriver *drv = bs->drv;
5362
5363 if (drv && drv->bdrv_ioctl)
5364 return drv->bdrv_ioctl(bs, req, buf);
5365 return -ENOTSUP;
5366}
5367
5368BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5369 unsigned long int req, void *buf,
5370 BlockCompletionFunc *cb, void *opaque)
5371{
5372 BlockDriver *drv = bs->drv;
5373
5374 if (drv && drv->bdrv_aio_ioctl)
5375 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5376 return NULL;
5377}
5378
5379void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5380{
5381 bs->guest_block_size = align;
5382}
5383
5384void *qemu_blockalign(BlockDriverState *bs, size_t size)
5385{
5386 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5387}
5388
5389void *qemu_blockalign0(BlockDriverState *bs, size_t size)
5390{
5391 return memset(qemu_blockalign(bs, size), 0, size);
5392}
5393
5394void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
5395{
5396 size_t align = bdrv_opt_mem_align(bs);
5397
5398 /* Ensure that NULL is never returned on success */
5399 assert(align > 0);
5400 if (size == 0) {
5401 size = align;
5402 }
5403
5404 return qemu_try_memalign(align, size);
5405}
5406
5407void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
5408{
5409 void *mem = qemu_try_blockalign(bs, size);
5410
5411 if (mem) {
5412 memset(mem, 0, size);
5413 }
5414
5415 return mem;
5416}
5417
5418/*
5419 * Check if all memory in this vector is sector aligned.
5420 */
5421bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5422{
5423 int i;
5424 size_t alignment = bdrv_opt_mem_align(bs);
5425
5426 for (i = 0; i < qiov->niov; i++) {
5427 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5428 return false;
5429 }
5430 if (qiov->iov[i].iov_len % alignment) {
5431 return false;
5432 }
5433 }
5434
5435 return true;
5436}
5437
5438BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5439 Error **errp)
5440{
5441 int64_t bitmap_size;
5442 BdrvDirtyBitmap *bitmap;
5443
5444 assert((granularity & (granularity - 1)) == 0);
5445
5446 granularity >>= BDRV_SECTOR_BITS;
5447 assert(granularity);
5448 bitmap_size = bdrv_nb_sectors(bs);
5449 if (bitmap_size < 0) {
5450 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5451 errno = -bitmap_size;
5452 return NULL;
5453 }
5454 bitmap = g_new0(BdrvDirtyBitmap, 1);
5455 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5456 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5457 return bitmap;
5458}
5459
5460void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5461{
5462 BdrvDirtyBitmap *bm, *next;
5463 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5464 if (bm == bitmap) {
5465 QLIST_REMOVE(bitmap, list);
5466 hbitmap_free(bitmap->bitmap);
5467 g_free(bitmap);
5468 return;
5469 }
5470 }
5471}
5472
5473BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5474{
5475 BdrvDirtyBitmap *bm;
5476 BlockDirtyInfoList *list = NULL;
5477 BlockDirtyInfoList **plist = &list;
5478
5479 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5480 BlockDirtyInfo *info = g_new0(BlockDirtyInfo, 1);
5481 BlockDirtyInfoList *entry = g_new0(BlockDirtyInfoList, 1);
5482 info->count = bdrv_get_dirty_count(bs, bm);
5483 info->granularity =
5484 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5485 entry->value = info;
5486 *plist = entry;
5487 plist = &entry->next;
5488 }
5489
5490 return list;
5491}
5492
5493int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5494{
5495 if (bitmap) {
5496 return hbitmap_get(bitmap->bitmap, sector);
5497 } else {
5498 return 0;
5499 }
5500}
5501
5502void bdrv_dirty_iter_init(BlockDriverState *bs,
5503 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5504{
5505 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5506}
5507
5508void bdrv_set_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
5509 int64_t cur_sector, int nr_sectors)
5510{
5511 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5512}
5513
5514void bdrv_reset_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
5515 int64_t cur_sector, int nr_sectors)
5516{
5517 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5518}
5519
5520static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5521 int nr_sectors)
5522{
5523 BdrvDirtyBitmap *bitmap;
5524 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5525 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5526 }
5527}
5528
5529static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
5530 int nr_sectors)
5531{
5532 BdrvDirtyBitmap *bitmap;
5533 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5534 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5535 }
5536}
5537
5538int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5539{
5540 return hbitmap_count(bitmap->bitmap);
5541}
5542
5543/* Get a reference to bs */
5544void bdrv_ref(BlockDriverState *bs)
5545{
5546 bs->refcnt++;
5547}
5548
5549/* Release a previously grabbed reference to bs.
5550 * If after releasing, reference count is zero, the BlockDriverState is
5551 * deleted. */
5552void bdrv_unref(BlockDriverState *bs)
5553{
5554 if (!bs) {
5555 return;
5556 }
5557 assert(bs->refcnt > 0);
5558 if (--bs->refcnt == 0) {
5559 bdrv_delete(bs);
5560 }
5561}
5562
5563struct BdrvOpBlocker {
5564 Error *reason;
5565 QLIST_ENTRY(BdrvOpBlocker) list;
5566};
5567
5568bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5569{
5570 BdrvOpBlocker *blocker;
5571 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5572 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5573 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5574 if (errp) {
5575 error_setg(errp, "Device '%s' is busy: %s",
5576 bdrv_get_device_name(bs),
5577 error_get_pretty(blocker->reason));
5578 }
5579 return true;
5580 }
5581 return false;
5582}
5583
5584void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5585{
5586 BdrvOpBlocker *blocker;
5587 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5588
5589 blocker = g_new0(BdrvOpBlocker, 1);
5590 blocker->reason = reason;
5591 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5592}
5593
5594void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5595{
5596 BdrvOpBlocker *blocker, *next;
5597 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5598 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5599 if (blocker->reason == reason) {
5600 QLIST_REMOVE(blocker, list);
5601 g_free(blocker);
5602 }
5603 }
5604}
5605
5606void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5607{
5608 int i;
5609 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5610 bdrv_op_block(bs, i, reason);
5611 }
5612}
5613
5614void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5615{
5616 int i;
5617 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5618 bdrv_op_unblock(bs, i, reason);
5619 }
5620}
5621
5622bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5623{
5624 int i;
5625
5626 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5627 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5628 return false;
5629 }
5630 }
5631 return true;
5632}
5633
5634void bdrv_iostatus_enable(BlockDriverState *bs)
5635{
5636 bs->iostatus_enabled = true;
5637 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5638}
5639
5640/* The I/O status is only enabled if the drive explicitly
5641 * enables it _and_ the VM is configured to stop on errors */
5642bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5643{
5644 return (bs->iostatus_enabled &&
5645 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5646 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5647 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5648}
5649
5650void bdrv_iostatus_disable(BlockDriverState *bs)
5651{
5652 bs->iostatus_enabled = false;
5653}
5654
5655void bdrv_iostatus_reset(BlockDriverState *bs)
5656{
5657 if (bdrv_iostatus_is_enabled(bs)) {
5658 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5659 if (bs->job) {
5660 block_job_iostatus_reset(bs->job);
5661 }
5662 }
5663}
5664
5665void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5666{
5667 assert(bdrv_iostatus_is_enabled(bs));
5668 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5669 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5670 BLOCK_DEVICE_IO_STATUS_FAILED;
5671 }
5672}
5673
5674void bdrv_img_create(const char *filename, const char *fmt,
5675 const char *base_filename, const char *base_fmt,
5676 char *options, uint64_t img_size, int flags,
5677 Error **errp, bool quiet)
5678{
5679 QemuOptsList *create_opts = NULL;
5680 QemuOpts *opts = NULL;
5681 const char *backing_fmt, *backing_file;
5682 int64_t size;
5683 BlockDriver *drv, *proto_drv;
5684 BlockDriver *backing_drv = NULL;
5685 Error *local_err = NULL;
5686 int ret = 0;
5687
5688 /* Find driver and parse its options */
5689 drv = bdrv_find_format(fmt);
5690 if (!drv) {
5691 error_setg(errp, "Unknown file format '%s'", fmt);
5692 return;
5693 }
5694
5695 proto_drv = bdrv_find_protocol(filename, true, errp);
5696 if (!proto_drv) {
5697 return;
5698 }
5699
5700 if (!drv->create_opts) {
5701 error_setg(errp, "Format driver '%s' does not support image creation",
5702 drv->format_name);
5703 return;
5704 }
5705
5706 if (!proto_drv->create_opts) {
5707 error_setg(errp, "Protocol driver '%s' does not support image creation",
5708 proto_drv->format_name);
5709 return;
5710 }
5711
5712 create_opts = qemu_opts_append(create_opts, drv->create_opts);
5713 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
5714
5715 /* Create parameter list with default values */
5716 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5717 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size, &error_abort);
5718
5719 /* Parse -o options */
5720 if (options) {
5721 qemu_opts_do_parse(opts, options, NULL, &local_err);
5722 if (local_err) {
5723 error_report_err(local_err);
5724 local_err = NULL;
5725 error_setg(errp, "Invalid options for file format '%s'", fmt);
5726 goto out;
5727 }
5728 }
5729
5730 if (base_filename) {
5731 qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename, &local_err);
5732 if (local_err) {
5733 error_setg(errp, "Backing file not supported for file format '%s'",
5734 fmt);
5735 goto out;
5736 }
5737 }
5738
5739 if (base_fmt) {
5740 qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt, &local_err);
5741 if (local_err) {
5742 error_setg(errp, "Backing file format not supported for file "
5743 "format '%s'", fmt);
5744 goto out;
5745 }
5746 }
5747
5748 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5749 if (backing_file) {
5750 if (!strcmp(filename, backing_file)) {
5751 error_setg(errp, "Error: Trying to create an image with the "
5752 "same filename as the backing file");
5753 goto out;
5754 }
5755 }
5756
5757 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5758 if (backing_fmt) {
5759 backing_drv = bdrv_find_format(backing_fmt);
5760 if (!backing_drv) {
5761 error_setg(errp, "Unknown backing file format '%s'",
5762 backing_fmt);
5763 goto out;
5764 }
5765 }
5766
5767 // The size for the image must always be specified, with one exception:
5768 // If we are using a backing file, we can obtain the size from there
5769 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5770 if (size == -1) {
5771 if (backing_file) {
5772 BlockDriverState *bs;
5773 char *full_backing = g_new0(char, PATH_MAX);
5774 int64_t size;
5775 int back_flags;
5776
5777 bdrv_get_full_backing_filename_from_filename(filename, backing_file,
5778 full_backing, PATH_MAX,
5779 &local_err);
5780 if (local_err) {
5781 g_free(full_backing);
5782 goto out;
5783 }
5784
5785 /* backing files always opened read-only */
5786 back_flags =
5787 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5788
5789 bs = NULL;
5790 ret = bdrv_open(&bs, full_backing, NULL, NULL, back_flags,
5791 backing_drv, &local_err);
5792 g_free(full_backing);
5793 if (ret < 0) {
5794 goto out;
5795 }
5796 size = bdrv_getlength(bs);
5797 if (size < 0) {
5798 error_setg_errno(errp, -size, "Could not get size of '%s'",
5799 backing_file);
5800 bdrv_unref(bs);
5801 goto out;
5802 }
5803
5804 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size, &error_abort);
5805
5806 bdrv_unref(bs);
5807 } else {
5808 error_setg(errp, "Image creation needs a size parameter");
5809 goto out;
5810 }
5811 }
5812
5813 if (!quiet) {
5814 printf("Formatting '%s', fmt=%s", filename, fmt);
5815 qemu_opts_print(opts, " ");
5816 puts("");
5817 }
5818
5819 ret = bdrv_create(drv, filename, opts, &local_err);
5820
5821 if (ret == -EFBIG) {
5822 /* This is generally a better message than whatever the driver would
5823 * deliver (especially because of the cluster_size_hint), since that
5824 * is most probably not much different from "image too large". */
5825 const char *cluster_size_hint = "";
5826 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
5827 cluster_size_hint = " (try using a larger cluster size)";
5828 }
5829 error_setg(errp, "The image size is too large for file format '%s'"
5830 "%s", fmt, cluster_size_hint);
5831 error_free(local_err);
5832 local_err = NULL;
5833 }
5834
5835out:
5836 qemu_opts_del(opts);
5837 qemu_opts_free(create_opts);
5838 if (local_err) {
5839 error_propagate(errp, local_err);
5840 }
5841}
5842
5843AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5844{
5845 return bs->aio_context;
5846}
5847
5848void bdrv_detach_aio_context(BlockDriverState *bs)
5849{
5850 BdrvAioNotifier *baf;
5851
5852 if (!bs->drv) {
5853 return;
5854 }
5855
5856 QLIST_FOREACH(baf, &bs->aio_notifiers, list) {
5857 baf->detach_aio_context(baf->opaque);
5858 }
5859
5860 if (bs->io_limits_enabled) {
5861 throttle_detach_aio_context(&bs->throttle_state);
5862 }
5863 if (bs->drv->bdrv_detach_aio_context) {
5864 bs->drv->bdrv_detach_aio_context(bs);
5865 }
5866 if (bs->file) {
5867 bdrv_detach_aio_context(bs->file);
5868 }
5869 if (bs->backing_hd) {
5870 bdrv_detach_aio_context(bs->backing_hd);
5871 }
5872
5873 bs->aio_context = NULL;
5874}
5875
5876void bdrv_attach_aio_context(BlockDriverState *bs,
5877 AioContext *new_context)
5878{
5879 BdrvAioNotifier *ban;
5880
5881 if (!bs->drv) {
5882 return;
5883 }
5884
5885 bs->aio_context = new_context;
5886
5887 if (bs->backing_hd) {
5888 bdrv_attach_aio_context(bs->backing_hd, new_context);
5889 }
5890 if (bs->file) {
5891 bdrv_attach_aio_context(bs->file, new_context);
5892 }
5893 if (bs->drv->bdrv_attach_aio_context) {
5894 bs->drv->bdrv_attach_aio_context(bs, new_context);
5895 }
5896 if (bs->io_limits_enabled) {
5897 throttle_attach_aio_context(&bs->throttle_state, new_context);
5898 }
5899
5900 QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
5901 ban->attached_aio_context(new_context, ban->opaque);
5902 }
5903}
5904
5905void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5906{
5907 bdrv_drain_all(); /* ensure there are no in-flight requests */
5908
5909 bdrv_detach_aio_context(bs);
5910
5911 /* This function executes in the old AioContext so acquire the new one in
5912 * case it runs in a different thread.
5913 */
5914 aio_context_acquire(new_context);
5915 bdrv_attach_aio_context(bs, new_context);
5916 aio_context_release(new_context);
5917}
5918
5919void bdrv_add_aio_context_notifier(BlockDriverState *bs,
5920 void (*attached_aio_context)(AioContext *new_context, void *opaque),
5921 void (*detach_aio_context)(void *opaque), void *opaque)
5922{
5923 BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1);
5924 *ban = (BdrvAioNotifier){
5925 .attached_aio_context = attached_aio_context,
5926 .detach_aio_context = detach_aio_context,
5927 .opaque = opaque
5928 };
5929
5930 QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list);
5931}
5932
5933void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
5934 void (*attached_aio_context)(AioContext *,
5935 void *),
5936 void (*detach_aio_context)(void *),
5937 void *opaque)
5938{
5939 BdrvAioNotifier *ban, *ban_next;
5940
5941 QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
5942 if (ban->attached_aio_context == attached_aio_context &&
5943 ban->detach_aio_context == detach_aio_context &&
5944 ban->opaque == opaque)
5945 {
5946 QLIST_REMOVE(ban, list);
5947 g_free(ban);
5948
5949 return;
5950 }
5951 }
5952
5953 abort();
5954}
5955
5956void bdrv_add_before_write_notifier(BlockDriverState *bs,
5957 NotifierWithReturn *notifier)
5958{
5959 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5960}
5961
5962int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts,
5963 BlockDriverAmendStatusCB *status_cb)
5964{
5965 if (!bs->drv->bdrv_amend_options) {
5966 return -ENOTSUP;
5967 }
5968 return bs->drv->bdrv_amend_options(bs, opts, status_cb);
5969}
5970
5971/* This function will be called by the bdrv_recurse_is_first_non_filter method
5972 * of block filter and by bdrv_is_first_non_filter.
5973 * It is used to test if the given bs is the candidate or recurse more in the
5974 * node graph.
5975 */
5976bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5977 BlockDriverState *candidate)
5978{
5979 /* return false if basic checks fails */
5980 if (!bs || !bs->drv) {
5981 return false;
5982 }
5983
5984 /* the code reached a non block filter driver -> check if the bs is
5985 * the same as the candidate. It's the recursion termination condition.
5986 */
5987 if (!bs->drv->is_filter) {
5988 return bs == candidate;
5989 }
5990 /* Down this path the driver is a block filter driver */
5991
5992 /* If the block filter recursion method is defined use it to recurse down
5993 * the node graph.
5994 */
5995 if (bs->drv->bdrv_recurse_is_first_non_filter) {
5996 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5997 }
5998
5999 /* the driver is a block filter but don't allow to recurse -> return false
6000 */
6001 return false;
6002}
6003
6004/* This function checks if the candidate is the first non filter bs down it's
6005 * bs chain. Since we don't have pointers to parents it explore all bs chains
6006 * from the top. Some filters can choose not to pass down the recursion.
6007 */
6008bool bdrv_is_first_non_filter(BlockDriverState *candidate)
6009{
6010 BlockDriverState *bs;
6011
6012 /* walk down the bs forest recursively */
6013 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
6014 bool perm;
6015
6016 /* try to recurse in this top level bs */
6017 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
6018
6019 /* candidate is the first non filter */
6020 if (perm) {
6021 return true;
6022 }
6023 }
6024
6025 return false;
6026}
6027
6028BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
6029{
6030 BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
6031 AioContext *aio_context;
6032
6033 if (!to_replace_bs) {
6034 error_setg(errp, "Node name '%s' not found", node_name);
6035 return NULL;
6036 }
6037
6038 aio_context = bdrv_get_aio_context(to_replace_bs);
6039 aio_context_acquire(aio_context);
6040
6041 if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
6042 to_replace_bs = NULL;
6043 goto out;
6044 }
6045
6046 /* We don't want arbitrary node of the BDS chain to be replaced only the top
6047 * most non filter in order to prevent data corruption.
6048 * Another benefit is that this tests exclude backing files which are
6049 * blocked by the backing blockers.
6050 */
6051 if (!bdrv_is_first_non_filter(to_replace_bs)) {
6052 error_setg(errp, "Only top most non filter can be replaced");
6053 to_replace_bs = NULL;
6054 goto out;
6055 }
6056
6057out:
6058 aio_context_release(aio_context);
6059 return to_replace_bs;
6060}
6061
6062void bdrv_io_plug(BlockDriverState *bs)
6063{
6064 BlockDriver *drv = bs->drv;
6065 if (drv && drv->bdrv_io_plug) {
6066 drv->bdrv_io_plug(bs);
6067 } else if (bs->file) {
6068 bdrv_io_plug(bs->file);
6069 }
6070}
6071
6072void bdrv_io_unplug(BlockDriverState *bs)
6073{
6074 BlockDriver *drv = bs->drv;
6075 if (drv && drv->bdrv_io_unplug) {
6076 drv->bdrv_io_unplug(bs);
6077 } else if (bs->file) {
6078 bdrv_io_unplug(bs->file);
6079 }
6080}
6081
6082void bdrv_flush_io_queue(BlockDriverState *bs)
6083{
6084 BlockDriver *drv = bs->drv;
6085 if (drv && drv->bdrv_flush_io_queue) {
6086 drv->bdrv_flush_io_queue(bs);
6087 } else if (bs->file) {
6088 bdrv_flush_io_queue(bs->file);
6089 }
6090}
6091
6092static bool append_open_options(QDict *d, BlockDriverState *bs)
6093{
6094 const QDictEntry *entry;
6095 bool found_any = false;
6096
6097 for (entry = qdict_first(bs->options); entry;
6098 entry = qdict_next(bs->options, entry))
6099 {
6100 /* Only take options for this level and exclude all non-driver-specific
6101 * options */
6102 if (!strchr(qdict_entry_key(entry), '.') &&
6103 strcmp(qdict_entry_key(entry), "node-name"))
6104 {
6105 qobject_incref(qdict_entry_value(entry));
6106 qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry));
6107 found_any = true;
6108 }
6109 }
6110
6111 return found_any;
6112}
6113
6114/* Updates the following BDS fields:
6115 * - exact_filename: A filename which may be used for opening a block device
6116 * which (mostly) equals the given BDS (even without any
6117 * other options; so reading and writing must return the same
6118 * results, but caching etc. may be different)
6119 * - full_open_options: Options which, when given when opening a block device
6120 * (without a filename), result in a BDS (mostly)
6121 * equalling the given one
6122 * - filename: If exact_filename is set, it is copied here. Otherwise,
6123 * full_open_options is converted to a JSON object, prefixed with
6124 * "json:" (for use through the JSON pseudo protocol) and put here.
6125 */
6126void bdrv_refresh_filename(BlockDriverState *bs)
6127{
6128 BlockDriver *drv = bs->drv;
6129 QDict *opts;
6130
6131 if (!drv) {
6132 return;
6133 }
6134
6135 /* This BDS's file name will most probably depend on its file's name, so
6136 * refresh that first */
6137 if (bs->file) {
6138 bdrv_refresh_filename(bs->file);
6139 }
6140
6141 if (drv->bdrv_refresh_filename) {
6142 /* Obsolete information is of no use here, so drop the old file name
6143 * information before refreshing it */
6144 bs->exact_filename[0] = '\0';
6145 if (bs->full_open_options) {
6146 QDECREF(bs->full_open_options);
6147 bs->full_open_options = NULL;
6148 }
6149
6150 drv->bdrv_refresh_filename(bs);
6151 } else if (bs->file) {
6152 /* Try to reconstruct valid information from the underlying file */
6153 bool has_open_options;
6154
6155 bs->exact_filename[0] = '\0';
6156 if (bs->full_open_options) {
6157 QDECREF(bs->full_open_options);
6158 bs->full_open_options = NULL;
6159 }
6160
6161 opts = qdict_new();
6162 has_open_options = append_open_options(opts, bs);
6163
6164 /* If no specific options have been given for this BDS, the filename of
6165 * the underlying file should suffice for this one as well */
6166 if (bs->file->exact_filename[0] && !has_open_options) {
6167 strcpy(bs->exact_filename, bs->file->exact_filename);
6168 }
6169 /* Reconstructing the full options QDict is simple for most format block
6170 * drivers, as long as the full options are known for the underlying
6171 * file BDS. The full options QDict of that file BDS should somehow
6172 * contain a representation of the filename, therefore the following
6173 * suffices without querying the (exact_)filename of this BDS. */
6174 if (bs->file->full_open_options) {
6175 qdict_put_obj(opts, "driver",
6176 QOBJECT(qstring_from_str(drv->format_name)));
6177 QINCREF(bs->file->full_open_options);
6178 qdict_put_obj(opts, "file", QOBJECT(bs->file->full_open_options));
6179
6180 bs->full_open_options = opts;
6181 } else {
6182 QDECREF(opts);
6183 }
6184 } else if (!bs->full_open_options && qdict_size(bs->options)) {
6185 /* There is no underlying file BDS (at least referenced by BDS.file),
6186 * so the full options QDict should be equal to the options given
6187 * specifically for this block device when it was opened (plus the
6188 * driver specification).
6189 * Because those options don't change, there is no need to update
6190 * full_open_options when it's already set. */
6191
6192 opts = qdict_new();
6193 append_open_options(opts, bs);
6194 qdict_put_obj(opts, "driver",
6195 QOBJECT(qstring_from_str(drv->format_name)));
6196
6197 if (bs->exact_filename[0]) {
6198 /* This may not work for all block protocol drivers (some may
6199 * require this filename to be parsed), but we have to find some
6200 * default solution here, so just include it. If some block driver
6201 * does not support pure options without any filename at all or
6202 * needs some special format of the options QDict, it needs to
6203 * implement the driver-specific bdrv_refresh_filename() function.
6204 */
6205 qdict_put_obj(opts, "filename",
6206 QOBJECT(qstring_from_str(bs->exact_filename)));
6207 }
6208
6209 bs->full_open_options = opts;
6210 }
6211
6212 if (bs->exact_filename[0]) {
6213 pstrcpy(bs->filename, sizeof(bs->filename), bs->exact_filename);
6214 } else if (bs->full_open_options) {
6215 QString *json = qobject_to_json(QOBJECT(bs->full_open_options));
6216 snprintf(bs->filename, sizeof(bs->filename), "json:%s",
6217 qstring_get_str(json));
6218 QDECREF(json);
6219 }
6220}
6221
6222/* This accessor function purpose is to allow the device models to access the
6223 * BlockAcctStats structure embedded inside a BlockDriverState without being
6224 * aware of the BlockDriverState structure layout.
6225 * It will go away when the BlockAcctStats structure will be moved inside
6226 * the device models.
6227 */
6228BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
6229{
6230 return &bs->stats;
6231}