]> git.proxmox.com Git - mirror_qemu.git/blob - block.c
block: Reuse success path from bdrv_open()
[mirror_qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "monitor/monitor.h"
28 #include "block/block_int.h"
29 #include "block/blockjob.h"
30 #include "qemu/module.h"
31 #include "qapi/qmp/qjson.h"
32 #include "sysemu/sysemu.h"
33 #include "qemu/notify.h"
34 #include "block/coroutine.h"
35 #include "block/qapi.h"
36 #include "qmp-commands.h"
37 #include "qemu/timer.h"
38
39 #ifdef CONFIG_BSD
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
44 #ifndef __DragonFly__
45 #include <sys/disk.h>
46 #endif
47 #endif
48
49 #ifdef _WIN32
50 #include <windows.h>
51 #endif
52
53 struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
56 };
57
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
59
60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockDriverCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockDriverCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
90
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
93
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
99
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
102
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
105 {
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109 }
110
111 int is_windows_drive(const char *filename)
112 {
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120 }
121 #endif
122
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
126 {
127 int i;
128
129 throttle_config(&bs->throttle_state, cfg);
130
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
133 }
134 }
135
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138 {
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
142
143 bs->io_limits_enabled = false;
144
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
148 }
149 }
150
151 bs->io_limits_enabled = enabled;
152
153 return drained;
154 }
155
156 void bdrv_io_limits_disable(BlockDriverState *bs)
157 {
158 bs->io_limits_enabled = false;
159
160 bdrv_start_throttled_reqs(bs);
161
162 throttle_destroy(&bs->throttle_state);
163 }
164
165 static void bdrv_throttle_read_timer_cb(void *opaque)
166 {
167 BlockDriverState *bs = opaque;
168 qemu_co_enter_next(&bs->throttled_reqs[0]);
169 }
170
171 static void bdrv_throttle_write_timer_cb(void *opaque)
172 {
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
175 }
176
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
179 {
180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 QEMU_CLOCK_VIRTUAL,
183 bdrv_throttle_read_timer_cb,
184 bdrv_throttle_write_timer_cb,
185 bs);
186 bs->io_limits_enabled = true;
187 }
188
189 /* This function makes an IO wait if needed
190 *
191 * @nb_sectors: the number of sectors of the IO
192 * @is_write: is the IO a write
193 */
194 static void bdrv_io_limits_intercept(BlockDriverState *bs,
195 unsigned int bytes,
196 bool is_write)
197 {
198 /* does this io must wait */
199 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
200
201 /* if must wait or any request of this type throttled queue the IO */
202 if (must_wait ||
203 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
204 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
205 }
206
207 /* the IO will be executed, do the accounting */
208 throttle_account(&bs->throttle_state, is_write, bytes);
209
210
211 /* if the next request must wait -> do nothing */
212 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
213 return;
214 }
215
216 /* else queue next request for execution */
217 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
218 }
219
220 size_t bdrv_opt_mem_align(BlockDriverState *bs)
221 {
222 if (!bs || !bs->drv) {
223 /* 4k should be on the safe side */
224 return 4096;
225 }
226
227 return bs->bl.opt_mem_alignment;
228 }
229
230 /* check if the path starts with "<protocol>:" */
231 static int path_has_protocol(const char *path)
232 {
233 const char *p;
234
235 #ifdef _WIN32
236 if (is_windows_drive(path) ||
237 is_windows_drive_prefix(path)) {
238 return 0;
239 }
240 p = path + strcspn(path, ":/\\");
241 #else
242 p = path + strcspn(path, ":/");
243 #endif
244
245 return *p == ':';
246 }
247
248 int path_is_absolute(const char *path)
249 {
250 #ifdef _WIN32
251 /* specific case for names like: "\\.\d:" */
252 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
253 return 1;
254 }
255 return (*path == '/' || *path == '\\');
256 #else
257 return (*path == '/');
258 #endif
259 }
260
261 /* if filename is absolute, just copy it to dest. Otherwise, build a
262 path to it by considering it is relative to base_path. URL are
263 supported. */
264 void path_combine(char *dest, int dest_size,
265 const char *base_path,
266 const char *filename)
267 {
268 const char *p, *p1;
269 int len;
270
271 if (dest_size <= 0)
272 return;
273 if (path_is_absolute(filename)) {
274 pstrcpy(dest, dest_size, filename);
275 } else {
276 p = strchr(base_path, ':');
277 if (p)
278 p++;
279 else
280 p = base_path;
281 p1 = strrchr(base_path, '/');
282 #ifdef _WIN32
283 {
284 const char *p2;
285 p2 = strrchr(base_path, '\\');
286 if (!p1 || p2 > p1)
287 p1 = p2;
288 }
289 #endif
290 if (p1)
291 p1++;
292 else
293 p1 = base_path;
294 if (p1 > p)
295 p = p1;
296 len = p - base_path;
297 if (len > dest_size - 1)
298 len = dest_size - 1;
299 memcpy(dest, base_path, len);
300 dest[len] = '\0';
301 pstrcat(dest, dest_size, filename);
302 }
303 }
304
305 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
306 {
307 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
308 pstrcpy(dest, sz, bs->backing_file);
309 } else {
310 path_combine(dest, sz, bs->filename, bs->backing_file);
311 }
312 }
313
314 void bdrv_register(BlockDriver *bdrv)
315 {
316 /* Block drivers without coroutine functions need emulation */
317 if (!bdrv->bdrv_co_readv) {
318 bdrv->bdrv_co_readv = bdrv_co_readv_em;
319 bdrv->bdrv_co_writev = bdrv_co_writev_em;
320
321 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
322 * the block driver lacks aio we need to emulate that too.
323 */
324 if (!bdrv->bdrv_aio_readv) {
325 /* add AIO emulation layer */
326 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
327 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
328 }
329 }
330
331 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
332 }
333
334 /* create a new block device (by default it is empty) */
335 BlockDriverState *bdrv_new(const char *device_name)
336 {
337 BlockDriverState *bs;
338
339 bs = g_malloc0(sizeof(BlockDriverState));
340 QLIST_INIT(&bs->dirty_bitmaps);
341 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
342 if (device_name[0] != '\0') {
343 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
344 }
345 bdrv_iostatus_disable(bs);
346 notifier_list_init(&bs->close_notifiers);
347 notifier_with_return_list_init(&bs->before_write_notifiers);
348 qemu_co_queue_init(&bs->throttled_reqs[0]);
349 qemu_co_queue_init(&bs->throttled_reqs[1]);
350 bs->refcnt = 1;
351
352 return bs;
353 }
354
355 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
356 {
357 notifier_list_add(&bs->close_notifiers, notify);
358 }
359
360 BlockDriver *bdrv_find_format(const char *format_name)
361 {
362 BlockDriver *drv1;
363 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
364 if (!strcmp(drv1->format_name, format_name)) {
365 return drv1;
366 }
367 }
368 return NULL;
369 }
370
371 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
372 {
373 static const char *whitelist_rw[] = {
374 CONFIG_BDRV_RW_WHITELIST
375 };
376 static const char *whitelist_ro[] = {
377 CONFIG_BDRV_RO_WHITELIST
378 };
379 const char **p;
380
381 if (!whitelist_rw[0] && !whitelist_ro[0]) {
382 return 1; /* no whitelist, anything goes */
383 }
384
385 for (p = whitelist_rw; *p; p++) {
386 if (!strcmp(drv->format_name, *p)) {
387 return 1;
388 }
389 }
390 if (read_only) {
391 for (p = whitelist_ro; *p; p++) {
392 if (!strcmp(drv->format_name, *p)) {
393 return 1;
394 }
395 }
396 }
397 return 0;
398 }
399
400 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
401 bool read_only)
402 {
403 BlockDriver *drv = bdrv_find_format(format_name);
404 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
405 }
406
407 typedef struct CreateCo {
408 BlockDriver *drv;
409 char *filename;
410 QEMUOptionParameter *options;
411 int ret;
412 Error *err;
413 } CreateCo;
414
415 static void coroutine_fn bdrv_create_co_entry(void *opaque)
416 {
417 Error *local_err = NULL;
418 int ret;
419
420 CreateCo *cco = opaque;
421 assert(cco->drv);
422
423 ret = cco->drv->bdrv_create(cco->filename, cco->options, &local_err);
424 if (local_err) {
425 error_propagate(&cco->err, local_err);
426 }
427 cco->ret = ret;
428 }
429
430 int bdrv_create(BlockDriver *drv, const char* filename,
431 QEMUOptionParameter *options, Error **errp)
432 {
433 int ret;
434
435 Coroutine *co;
436 CreateCo cco = {
437 .drv = drv,
438 .filename = g_strdup(filename),
439 .options = options,
440 .ret = NOT_DONE,
441 .err = NULL,
442 };
443
444 if (!drv->bdrv_create) {
445 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
446 ret = -ENOTSUP;
447 goto out;
448 }
449
450 if (qemu_in_coroutine()) {
451 /* Fast-path if already in coroutine context */
452 bdrv_create_co_entry(&cco);
453 } else {
454 co = qemu_coroutine_create(bdrv_create_co_entry);
455 qemu_coroutine_enter(co, &cco);
456 while (cco.ret == NOT_DONE) {
457 qemu_aio_wait();
458 }
459 }
460
461 ret = cco.ret;
462 if (ret < 0) {
463 if (cco.err) {
464 error_propagate(errp, cco.err);
465 } else {
466 error_setg_errno(errp, -ret, "Could not create image");
467 }
468 }
469
470 out:
471 g_free(cco.filename);
472 return ret;
473 }
474
475 int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
476 Error **errp)
477 {
478 BlockDriver *drv;
479 Error *local_err = NULL;
480 int ret;
481
482 drv = bdrv_find_protocol(filename, true);
483 if (drv == NULL) {
484 error_setg(errp, "Could not find protocol for file '%s'", filename);
485 return -ENOENT;
486 }
487
488 ret = bdrv_create(drv, filename, options, &local_err);
489 if (local_err) {
490 error_propagate(errp, local_err);
491 }
492 return ret;
493 }
494
495 int bdrv_refresh_limits(BlockDriverState *bs)
496 {
497 BlockDriver *drv = bs->drv;
498
499 memset(&bs->bl, 0, sizeof(bs->bl));
500
501 if (!drv) {
502 return 0;
503 }
504
505 /* Take some limits from the children as a default */
506 if (bs->file) {
507 bdrv_refresh_limits(bs->file);
508 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
509 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
510 } else {
511 bs->bl.opt_mem_alignment = 512;
512 }
513
514 if (bs->backing_hd) {
515 bdrv_refresh_limits(bs->backing_hd);
516 bs->bl.opt_transfer_length =
517 MAX(bs->bl.opt_transfer_length,
518 bs->backing_hd->bl.opt_transfer_length);
519 bs->bl.opt_mem_alignment =
520 MAX(bs->bl.opt_mem_alignment,
521 bs->backing_hd->bl.opt_mem_alignment);
522 }
523
524 /* Then let the driver override it */
525 if (drv->bdrv_refresh_limits) {
526 return drv->bdrv_refresh_limits(bs);
527 }
528
529 return 0;
530 }
531
532 /*
533 * Create a uniquely-named empty temporary file.
534 * Return 0 upon success, otherwise a negative errno value.
535 */
536 int get_tmp_filename(char *filename, int size)
537 {
538 #ifdef _WIN32
539 char temp_dir[MAX_PATH];
540 /* GetTempFileName requires that its output buffer (4th param)
541 have length MAX_PATH or greater. */
542 assert(size >= MAX_PATH);
543 return (GetTempPath(MAX_PATH, temp_dir)
544 && GetTempFileName(temp_dir, "qem", 0, filename)
545 ? 0 : -GetLastError());
546 #else
547 int fd;
548 const char *tmpdir;
549 tmpdir = getenv("TMPDIR");
550 if (!tmpdir)
551 tmpdir = "/tmp";
552 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
553 return -EOVERFLOW;
554 }
555 fd = mkstemp(filename);
556 if (fd < 0) {
557 return -errno;
558 }
559 if (close(fd) != 0) {
560 unlink(filename);
561 return -errno;
562 }
563 return 0;
564 #endif
565 }
566
567 /*
568 * Detect host devices. By convention, /dev/cdrom[N] is always
569 * recognized as a host CDROM.
570 */
571 static BlockDriver *find_hdev_driver(const char *filename)
572 {
573 int score_max = 0, score;
574 BlockDriver *drv = NULL, *d;
575
576 QLIST_FOREACH(d, &bdrv_drivers, list) {
577 if (d->bdrv_probe_device) {
578 score = d->bdrv_probe_device(filename);
579 if (score > score_max) {
580 score_max = score;
581 drv = d;
582 }
583 }
584 }
585
586 return drv;
587 }
588
589 BlockDriver *bdrv_find_protocol(const char *filename,
590 bool allow_protocol_prefix)
591 {
592 BlockDriver *drv1;
593 char protocol[128];
594 int len;
595 const char *p;
596
597 /* TODO Drivers without bdrv_file_open must be specified explicitly */
598
599 /*
600 * XXX(hch): we really should not let host device detection
601 * override an explicit protocol specification, but moving this
602 * later breaks access to device names with colons in them.
603 * Thanks to the brain-dead persistent naming schemes on udev-
604 * based Linux systems those actually are quite common.
605 */
606 drv1 = find_hdev_driver(filename);
607 if (drv1) {
608 return drv1;
609 }
610
611 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
612 return bdrv_find_format("file");
613 }
614
615 p = strchr(filename, ':');
616 assert(p != NULL);
617 len = p - filename;
618 if (len > sizeof(protocol) - 1)
619 len = sizeof(protocol) - 1;
620 memcpy(protocol, filename, len);
621 protocol[len] = '\0';
622 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
623 if (drv1->protocol_name &&
624 !strcmp(drv1->protocol_name, protocol)) {
625 return drv1;
626 }
627 }
628 return NULL;
629 }
630
631 static int find_image_format(BlockDriverState *bs, const char *filename,
632 BlockDriver **pdrv, Error **errp)
633 {
634 int score, score_max;
635 BlockDriver *drv1, *drv;
636 uint8_t buf[2048];
637 int ret = 0;
638
639 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
640 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
641 drv = bdrv_find_format("raw");
642 if (!drv) {
643 error_setg(errp, "Could not find raw image format");
644 ret = -ENOENT;
645 }
646 *pdrv = drv;
647 return ret;
648 }
649
650 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
651 if (ret < 0) {
652 error_setg_errno(errp, -ret, "Could not read image for determining its "
653 "format");
654 *pdrv = NULL;
655 return ret;
656 }
657
658 score_max = 0;
659 drv = NULL;
660 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
661 if (drv1->bdrv_probe) {
662 score = drv1->bdrv_probe(buf, ret, filename);
663 if (score > score_max) {
664 score_max = score;
665 drv = drv1;
666 }
667 }
668 }
669 if (!drv) {
670 error_setg(errp, "Could not determine image format: No compatible "
671 "driver found");
672 ret = -ENOENT;
673 }
674 *pdrv = drv;
675 return ret;
676 }
677
678 /**
679 * Set the current 'total_sectors' value
680 */
681 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
682 {
683 BlockDriver *drv = bs->drv;
684
685 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
686 if (bs->sg)
687 return 0;
688
689 /* query actual device if possible, otherwise just trust the hint */
690 if (drv->bdrv_getlength) {
691 int64_t length = drv->bdrv_getlength(bs);
692 if (length < 0) {
693 return length;
694 }
695 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
696 }
697
698 bs->total_sectors = hint;
699 return 0;
700 }
701
702 /**
703 * Set open flags for a given discard mode
704 *
705 * Return 0 on success, -1 if the discard mode was invalid.
706 */
707 int bdrv_parse_discard_flags(const char *mode, int *flags)
708 {
709 *flags &= ~BDRV_O_UNMAP;
710
711 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
712 /* do nothing */
713 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
714 *flags |= BDRV_O_UNMAP;
715 } else {
716 return -1;
717 }
718
719 return 0;
720 }
721
722 /**
723 * Set open flags for a given cache mode
724 *
725 * Return 0 on success, -1 if the cache mode was invalid.
726 */
727 int bdrv_parse_cache_flags(const char *mode, int *flags)
728 {
729 *flags &= ~BDRV_O_CACHE_MASK;
730
731 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
732 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
733 } else if (!strcmp(mode, "directsync")) {
734 *flags |= BDRV_O_NOCACHE;
735 } else if (!strcmp(mode, "writeback")) {
736 *flags |= BDRV_O_CACHE_WB;
737 } else if (!strcmp(mode, "unsafe")) {
738 *flags |= BDRV_O_CACHE_WB;
739 *flags |= BDRV_O_NO_FLUSH;
740 } else if (!strcmp(mode, "writethrough")) {
741 /* this is the default */
742 } else {
743 return -1;
744 }
745
746 return 0;
747 }
748
749 /**
750 * The copy-on-read flag is actually a reference count so multiple users may
751 * use the feature without worrying about clobbering its previous state.
752 * Copy-on-read stays enabled until all users have called to disable it.
753 */
754 void bdrv_enable_copy_on_read(BlockDriverState *bs)
755 {
756 bs->copy_on_read++;
757 }
758
759 void bdrv_disable_copy_on_read(BlockDriverState *bs)
760 {
761 assert(bs->copy_on_read > 0);
762 bs->copy_on_read--;
763 }
764
765 static int bdrv_open_flags(BlockDriverState *bs, int flags)
766 {
767 int open_flags = flags | BDRV_O_CACHE_WB;
768
769 /*
770 * Clear flags that are internal to the block layer before opening the
771 * image.
772 */
773 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
774
775 /*
776 * Snapshots should be writable.
777 */
778 if (bs->is_temporary) {
779 open_flags |= BDRV_O_RDWR;
780 }
781
782 return open_flags;
783 }
784
785 static int bdrv_assign_node_name(BlockDriverState *bs,
786 const char *node_name,
787 Error **errp)
788 {
789 if (!node_name) {
790 return 0;
791 }
792
793 /* empty string node name is invalid */
794 if (node_name[0] == '\0') {
795 error_setg(errp, "Empty node name");
796 return -EINVAL;
797 }
798
799 /* takes care of avoiding namespaces collisions */
800 if (bdrv_find(node_name)) {
801 error_setg(errp, "node-name=%s is conflicting with a device id",
802 node_name);
803 return -EINVAL;
804 }
805
806 /* takes care of avoiding duplicates node names */
807 if (bdrv_find_node(node_name)) {
808 error_setg(errp, "Duplicate node name");
809 return -EINVAL;
810 }
811
812 /* copy node name into the bs and insert it into the graph list */
813 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
814 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
815
816 return 0;
817 }
818
819 /*
820 * Common part for opening disk images and files
821 *
822 * Removes all processed options from *options.
823 */
824 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
825 QDict *options, int flags, BlockDriver *drv, Error **errp)
826 {
827 int ret, open_flags;
828 const char *filename;
829 const char *node_name = NULL;
830 Error *local_err = NULL;
831
832 assert(drv != NULL);
833 assert(bs->file == NULL);
834 assert(options != NULL && bs->options != options);
835
836 if (file != NULL) {
837 filename = file->filename;
838 } else {
839 filename = qdict_get_try_str(options, "filename");
840 }
841
842 if (drv->bdrv_needs_filename && !filename) {
843 error_setg(errp, "The '%s' block driver requires a file name",
844 drv->format_name);
845 return -EINVAL;
846 }
847
848 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
849
850 node_name = qdict_get_try_str(options, "node-name");
851 ret = bdrv_assign_node_name(bs, node_name, errp);
852 if (ret < 0) {
853 return ret;
854 }
855 qdict_del(options, "node-name");
856
857 /* bdrv_open() with directly using a protocol as drv. This layer is already
858 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
859 * and return immediately. */
860 if (file != NULL && drv->bdrv_file_open) {
861 bdrv_swap(file, bs);
862 return 0;
863 }
864
865 bs->open_flags = flags;
866 bs->guest_block_size = 512;
867 bs->request_alignment = 512;
868 bs->zero_beyond_eof = true;
869 open_flags = bdrv_open_flags(bs, flags);
870 bs->read_only = !(open_flags & BDRV_O_RDWR);
871
872 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
873 error_setg(errp,
874 !bs->read_only && bdrv_is_whitelisted(drv, true)
875 ? "Driver '%s' can only be used for read-only devices"
876 : "Driver '%s' is not whitelisted",
877 drv->format_name);
878 return -ENOTSUP;
879 }
880
881 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
882 if (flags & BDRV_O_COPY_ON_READ) {
883 if (!bs->read_only) {
884 bdrv_enable_copy_on_read(bs);
885 } else {
886 error_setg(errp, "Can't use copy-on-read on read-only device");
887 return -EINVAL;
888 }
889 }
890
891 if (filename != NULL) {
892 pstrcpy(bs->filename, sizeof(bs->filename), filename);
893 } else {
894 bs->filename[0] = '\0';
895 }
896
897 bs->drv = drv;
898 bs->opaque = g_malloc0(drv->instance_size);
899
900 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
901
902 /* Open the image, either directly or using a protocol */
903 if (drv->bdrv_file_open) {
904 assert(file == NULL);
905 assert(!drv->bdrv_needs_filename || filename != NULL);
906 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
907 } else {
908 if (file == NULL) {
909 error_setg(errp, "Can't use '%s' as a block driver for the "
910 "protocol level", drv->format_name);
911 ret = -EINVAL;
912 goto free_and_fail;
913 }
914 bs->file = file;
915 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
916 }
917
918 if (ret < 0) {
919 if (local_err) {
920 error_propagate(errp, local_err);
921 } else if (bs->filename[0]) {
922 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
923 } else {
924 error_setg_errno(errp, -ret, "Could not open image");
925 }
926 goto free_and_fail;
927 }
928
929 ret = refresh_total_sectors(bs, bs->total_sectors);
930 if (ret < 0) {
931 error_setg_errno(errp, -ret, "Could not refresh total sector count");
932 goto free_and_fail;
933 }
934
935 bdrv_refresh_limits(bs);
936 assert(bdrv_opt_mem_align(bs) != 0);
937 assert(bs->request_alignment != 0);
938
939 #ifndef _WIN32
940 if (bs->is_temporary) {
941 assert(bs->filename[0] != '\0');
942 unlink(bs->filename);
943 }
944 #endif
945 return 0;
946
947 free_and_fail:
948 bs->file = NULL;
949 g_free(bs->opaque);
950 bs->opaque = NULL;
951 bs->drv = NULL;
952 return ret;
953 }
954
955 /*
956 * Opens a file using a protocol (file, host_device, nbd, ...)
957 *
958 * options is an indirect pointer to a QDict of options to pass to the block
959 * drivers, or pointer to NULL for an empty set of options. If this function
960 * takes ownership of the QDict reference, it will set *options to NULL;
961 * otherwise, it will contain unused/unrecognized options after this function
962 * returns. Then, the caller is responsible for freeing it. If it intends to
963 * reuse the QDict, QINCREF() should be called beforehand.
964 */
965 static int bdrv_file_open(BlockDriverState *bs, const char *filename,
966 QDict **options, int flags, Error **errp)
967 {
968 BlockDriver *drv;
969 const char *drvname;
970 bool allow_protocol_prefix = false;
971 Error *local_err = NULL;
972 int ret;
973
974 /* Fetch the file name from the options QDict if necessary */
975 if (!filename) {
976 filename = qdict_get_try_str(*options, "filename");
977 } else if (filename && !qdict_haskey(*options, "filename")) {
978 qdict_put(*options, "filename", qstring_from_str(filename));
979 allow_protocol_prefix = true;
980 } else {
981 error_setg(errp, "Can't specify 'file' and 'filename' options at the "
982 "same time");
983 ret = -EINVAL;
984 goto fail;
985 }
986
987 /* Find the right block driver */
988 drvname = qdict_get_try_str(*options, "driver");
989 if (drvname) {
990 drv = bdrv_find_format(drvname);
991 if (!drv) {
992 error_setg(errp, "Unknown driver '%s'", drvname);
993 }
994 qdict_del(*options, "driver");
995 } else if (filename) {
996 drv = bdrv_find_protocol(filename, allow_protocol_prefix);
997 if (!drv) {
998 error_setg(errp, "Unknown protocol");
999 }
1000 } else {
1001 error_setg(errp, "Must specify either driver or file");
1002 drv = NULL;
1003 }
1004
1005 if (!drv) {
1006 /* errp has been set already */
1007 ret = -ENOENT;
1008 goto fail;
1009 }
1010
1011 /* Parse the filename and open it */
1012 if (drv->bdrv_parse_filename && filename) {
1013 drv->bdrv_parse_filename(filename, *options, &local_err);
1014 if (local_err) {
1015 error_propagate(errp, local_err);
1016 ret = -EINVAL;
1017 goto fail;
1018 }
1019 qdict_del(*options, "filename");
1020 }
1021
1022 if (!drv->bdrv_file_open) {
1023 ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err);
1024 *options = NULL;
1025 } else {
1026 ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err);
1027 }
1028 if (ret < 0) {
1029 error_propagate(errp, local_err);
1030 goto fail;
1031 }
1032
1033 bs->growable = 1;
1034 return 0;
1035
1036 fail:
1037 return ret;
1038 }
1039
1040 /*
1041 * Opens the backing file for a BlockDriverState if not yet open
1042 *
1043 * options is a QDict of options to pass to the block drivers, or NULL for an
1044 * empty set of options. The reference to the QDict is transferred to this
1045 * function (even on failure), so if the caller intends to reuse the dictionary,
1046 * it needs to use QINCREF() before calling bdrv_file_open.
1047 */
1048 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1049 {
1050 char backing_filename[PATH_MAX];
1051 int back_flags, ret;
1052 BlockDriver *back_drv = NULL;
1053 Error *local_err = NULL;
1054
1055 if (bs->backing_hd != NULL) {
1056 QDECREF(options);
1057 return 0;
1058 }
1059
1060 /* NULL means an empty set of options */
1061 if (options == NULL) {
1062 options = qdict_new();
1063 }
1064
1065 bs->open_flags &= ~BDRV_O_NO_BACKING;
1066 if (qdict_haskey(options, "file.filename")) {
1067 backing_filename[0] = '\0';
1068 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1069 QDECREF(options);
1070 return 0;
1071 } else {
1072 bdrv_get_full_backing_filename(bs, backing_filename,
1073 sizeof(backing_filename));
1074 }
1075
1076 if (bs->backing_format[0] != '\0') {
1077 back_drv = bdrv_find_format(bs->backing_format);
1078 }
1079
1080 /* backing files always opened read-only */
1081 back_flags = bs->open_flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT |
1082 BDRV_O_COPY_ON_READ);
1083
1084 assert(bs->backing_hd == NULL);
1085 ret = bdrv_open(&bs->backing_hd,
1086 *backing_filename ? backing_filename : NULL, NULL, options,
1087 back_flags, back_drv, &local_err);
1088 if (ret < 0) {
1089 bs->backing_hd = NULL;
1090 bs->open_flags |= BDRV_O_NO_BACKING;
1091 error_setg(errp, "Could not open backing file: %s",
1092 error_get_pretty(local_err));
1093 error_free(local_err);
1094 return ret;
1095 }
1096
1097 if (bs->backing_hd->file) {
1098 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1099 bs->backing_hd->file->filename);
1100 }
1101
1102 /* Recalculate the BlockLimits with the backing file */
1103 bdrv_refresh_limits(bs);
1104
1105 return 0;
1106 }
1107
1108 /*
1109 * Opens a disk image whose options are given as BlockdevRef in another block
1110 * device's options.
1111 *
1112 * If force_raw is true, bdrv_file_open() will be used, thereby preventing any
1113 * image format auto-detection. If it is false and a filename is given,
1114 * bdrv_open() will be used for auto-detection.
1115 *
1116 * If allow_none is true, no image will be opened if filename is false and no
1117 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1118 *
1119 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1120 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1121 * itself, all options starting with "${bdref_key}." are considered part of the
1122 * BlockdevRef.
1123 *
1124 * The BlockdevRef will be removed from the options QDict.
1125 *
1126 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1127 */
1128 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1129 QDict *options, const char *bdref_key, int flags,
1130 bool force_raw, bool allow_none, Error **errp)
1131 {
1132 QDict *image_options;
1133 int ret;
1134 char *bdref_key_dot;
1135 const char *reference;
1136
1137 assert(pbs);
1138 assert(*pbs == NULL);
1139
1140 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1141 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1142 g_free(bdref_key_dot);
1143
1144 reference = qdict_get_try_str(options, bdref_key);
1145 if (!filename && !reference && !qdict_size(image_options)) {
1146 if (allow_none) {
1147 ret = 0;
1148 } else {
1149 error_setg(errp, "A block device must be specified for \"%s\"",
1150 bdref_key);
1151 ret = -EINVAL;
1152 }
1153 goto done;
1154 }
1155
1156 if (filename && !force_raw) {
1157 /* If a filename is given and the block driver should be detected
1158 automatically (instead of using none), use bdrv_open() in order to do
1159 that auto-detection. */
1160 if (reference) {
1161 error_setg(errp, "Cannot reference an existing block device while "
1162 "giving a filename");
1163 ret = -EINVAL;
1164 goto done;
1165 }
1166
1167 ret = bdrv_open(pbs, filename, NULL, image_options, flags, NULL, errp);
1168 } else {
1169 ret = bdrv_open(pbs, filename, reference, image_options,
1170 flags | BDRV_O_PROTOCOL, NULL, errp);
1171 }
1172
1173 done:
1174 qdict_del(options, bdref_key);
1175 return ret;
1176 }
1177
1178 /*
1179 * Opens a disk image (raw, qcow2, vmdk, ...)
1180 *
1181 * options is a QDict of options to pass to the block drivers, or NULL for an
1182 * empty set of options. The reference to the QDict belongs to the block layer
1183 * after the call (even on failure), so if the caller intends to reuse the
1184 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1185 *
1186 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1187 * If it is not NULL, the referenced BDS will be reused.
1188 *
1189 * The reference parameter may be used to specify an existing block device which
1190 * should be opened. If specified, neither options nor a filename may be given,
1191 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1192 */
1193 int bdrv_open(BlockDriverState **pbs, const char *filename,
1194 const char *reference, QDict *options, int flags,
1195 BlockDriver *drv, Error **errp)
1196 {
1197 int ret;
1198 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1199 char tmp_filename[PATH_MAX + 1];
1200 BlockDriverState *file = NULL, *bs;
1201 const char *drvname;
1202 Error *local_err = NULL;
1203
1204 assert(pbs);
1205
1206 if (reference) {
1207 bool options_non_empty = options ? qdict_size(options) : false;
1208 QDECREF(options);
1209
1210 if (*pbs) {
1211 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1212 "another block device");
1213 return -EINVAL;
1214 }
1215
1216 if (filename || options_non_empty) {
1217 error_setg(errp, "Cannot reference an existing block device with "
1218 "additional options or a new filename");
1219 return -EINVAL;
1220 }
1221
1222 bs = bdrv_lookup_bs(reference, reference, errp);
1223 if (!bs) {
1224 return -ENODEV;
1225 }
1226 bdrv_ref(bs);
1227 *pbs = bs;
1228 return 0;
1229 }
1230
1231 if (*pbs) {
1232 bs = *pbs;
1233 } else {
1234 bs = bdrv_new("");
1235 }
1236
1237 /* NULL means an empty set of options */
1238 if (options == NULL) {
1239 options = qdict_new();
1240 }
1241
1242 bs->options = options;
1243 options = qdict_clone_shallow(options);
1244
1245 if (flags & BDRV_O_PROTOCOL) {
1246 assert(!drv);
1247 ret = bdrv_file_open(bs, filename, &options, flags & ~BDRV_O_PROTOCOL,
1248 &local_err);
1249 if (!ret) {
1250 goto done;
1251 } else if (bs->drv) {
1252 goto close_and_fail;
1253 } else {
1254 goto fail;
1255 }
1256 }
1257
1258 /* For snapshot=on, create a temporary qcow2 overlay */
1259 if (flags & BDRV_O_SNAPSHOT) {
1260 BlockDriverState *bs1;
1261 int64_t total_size;
1262 BlockDriver *bdrv_qcow2;
1263 QEMUOptionParameter *create_options;
1264 QDict *snapshot_options;
1265
1266 /* if snapshot, we create a temporary backing file and open it
1267 instead of opening 'filename' directly */
1268
1269 /* Get the required size from the image */
1270 QINCREF(options);
1271 bs1 = NULL;
1272 ret = bdrv_open(&bs1, filename, NULL, options, BDRV_O_NO_BACKING,
1273 drv, &local_err);
1274 if (ret < 0) {
1275 goto fail;
1276 }
1277 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
1278
1279 bdrv_unref(bs1);
1280
1281 /* Create the temporary image */
1282 ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
1283 if (ret < 0) {
1284 error_setg_errno(errp, -ret, "Could not get temporary filename");
1285 goto fail;
1286 }
1287
1288 bdrv_qcow2 = bdrv_find_format("qcow2");
1289 create_options = parse_option_parameters("", bdrv_qcow2->create_options,
1290 NULL);
1291
1292 set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
1293
1294 ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
1295 free_option_parameters(create_options);
1296 if (ret < 0) {
1297 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1298 "'%s': %s", tmp_filename,
1299 error_get_pretty(local_err));
1300 error_free(local_err);
1301 local_err = NULL;
1302 goto fail;
1303 }
1304
1305 /* Prepare a new options QDict for the temporary file, where user
1306 * options refer to the backing file */
1307 if (filename) {
1308 qdict_put(options, "file.filename", qstring_from_str(filename));
1309 }
1310 if (drv) {
1311 qdict_put(options, "driver", qstring_from_str(drv->format_name));
1312 }
1313
1314 snapshot_options = qdict_new();
1315 qdict_put(snapshot_options, "backing", options);
1316 qdict_flatten(snapshot_options);
1317
1318 bs->options = snapshot_options;
1319 options = qdict_clone_shallow(bs->options);
1320
1321 filename = tmp_filename;
1322 drv = bdrv_qcow2;
1323 bs->is_temporary = 1;
1324 }
1325
1326 /* Open image file without format layer */
1327 if (flags & BDRV_O_RDWR) {
1328 flags |= BDRV_O_ALLOW_RDWR;
1329 }
1330
1331 assert(file == NULL);
1332 ret = bdrv_open_image(&file, filename, options, "file",
1333 bdrv_open_flags(bs, flags | BDRV_O_UNMAP), true, true,
1334 &local_err);
1335 if (ret < 0) {
1336 goto fail;
1337 }
1338
1339 /* Find the right image format driver */
1340 drvname = qdict_get_try_str(options, "driver");
1341 if (drvname) {
1342 drv = bdrv_find_format(drvname);
1343 qdict_del(options, "driver");
1344 if (!drv) {
1345 error_setg(errp, "Invalid driver: '%s'", drvname);
1346 ret = -EINVAL;
1347 goto unlink_and_fail;
1348 }
1349 }
1350
1351 if (!drv) {
1352 if (file) {
1353 ret = find_image_format(file, filename, &drv, &local_err);
1354 } else {
1355 error_setg(errp, "Must specify either driver or file");
1356 ret = -EINVAL;
1357 goto unlink_and_fail;
1358 }
1359 }
1360
1361 if (!drv) {
1362 goto unlink_and_fail;
1363 }
1364
1365 /* Open the image */
1366 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1367 if (ret < 0) {
1368 goto unlink_and_fail;
1369 }
1370
1371 if (file && (bs->file != file)) {
1372 bdrv_unref(file);
1373 file = NULL;
1374 }
1375
1376 /* If there is a backing file, use it */
1377 if ((flags & BDRV_O_NO_BACKING) == 0) {
1378 QDict *backing_options;
1379
1380 qdict_extract_subqdict(options, &backing_options, "backing.");
1381 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1382 if (ret < 0) {
1383 goto close_and_fail;
1384 }
1385 }
1386
1387 done:
1388 /* Check if any unknown options were used */
1389 if (options && (qdict_size(options) != 0)) {
1390 const QDictEntry *entry = qdict_first(options);
1391 if (flags & BDRV_O_PROTOCOL) {
1392 error_setg(errp, "Block protocol '%s' doesn't support the option "
1393 "'%s'", drv->format_name, entry->key);
1394 } else {
1395 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1396 "support the option '%s'", drv->format_name,
1397 bs->device_name, entry->key);
1398 }
1399
1400 ret = -EINVAL;
1401 goto close_and_fail;
1402 }
1403 QDECREF(options);
1404
1405 if (!bdrv_key_required(bs)) {
1406 bdrv_dev_change_media_cb(bs, true);
1407 }
1408
1409 *pbs = bs;
1410 return 0;
1411
1412 unlink_and_fail:
1413 if (file != NULL) {
1414 bdrv_unref(file);
1415 }
1416 if (bs->is_temporary) {
1417 unlink(filename);
1418 }
1419 fail:
1420 QDECREF(bs->options);
1421 QDECREF(options);
1422 bs->options = NULL;
1423 if (!*pbs) {
1424 /* If *pbs is NULL, a new BDS has been created in this function and
1425 needs to be freed now. Otherwise, it does not need to be closed,
1426 since it has not really been opened yet. */
1427 bdrv_unref(bs);
1428 }
1429 if (local_err) {
1430 error_propagate(errp, local_err);
1431 }
1432 return ret;
1433
1434 close_and_fail:
1435 /* See fail path, but now the BDS has to be always closed */
1436 if (*pbs) {
1437 bdrv_close(bs);
1438 } else {
1439 bdrv_unref(bs);
1440 }
1441 QDECREF(options);
1442 if (local_err) {
1443 error_propagate(errp, local_err);
1444 }
1445 return ret;
1446 }
1447
1448 typedef struct BlockReopenQueueEntry {
1449 bool prepared;
1450 BDRVReopenState state;
1451 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1452 } BlockReopenQueueEntry;
1453
1454 /*
1455 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1456 * reopen of multiple devices.
1457 *
1458 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1459 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1460 * be created and initialized. This newly created BlockReopenQueue should be
1461 * passed back in for subsequent calls that are intended to be of the same
1462 * atomic 'set'.
1463 *
1464 * bs is the BlockDriverState to add to the reopen queue.
1465 *
1466 * flags contains the open flags for the associated bs
1467 *
1468 * returns a pointer to bs_queue, which is either the newly allocated
1469 * bs_queue, or the existing bs_queue being used.
1470 *
1471 */
1472 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1473 BlockDriverState *bs, int flags)
1474 {
1475 assert(bs != NULL);
1476
1477 BlockReopenQueueEntry *bs_entry;
1478 if (bs_queue == NULL) {
1479 bs_queue = g_new0(BlockReopenQueue, 1);
1480 QSIMPLEQ_INIT(bs_queue);
1481 }
1482
1483 if (bs->file) {
1484 bdrv_reopen_queue(bs_queue, bs->file, flags);
1485 }
1486
1487 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1488 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1489
1490 bs_entry->state.bs = bs;
1491 bs_entry->state.flags = flags;
1492
1493 return bs_queue;
1494 }
1495
1496 /*
1497 * Reopen multiple BlockDriverStates atomically & transactionally.
1498 *
1499 * The queue passed in (bs_queue) must have been built up previous
1500 * via bdrv_reopen_queue().
1501 *
1502 * Reopens all BDS specified in the queue, with the appropriate
1503 * flags. All devices are prepared for reopen, and failure of any
1504 * device will cause all device changes to be abandonded, and intermediate
1505 * data cleaned up.
1506 *
1507 * If all devices prepare successfully, then the changes are committed
1508 * to all devices.
1509 *
1510 */
1511 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1512 {
1513 int ret = -1;
1514 BlockReopenQueueEntry *bs_entry, *next;
1515 Error *local_err = NULL;
1516
1517 assert(bs_queue != NULL);
1518
1519 bdrv_drain_all();
1520
1521 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1522 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1523 error_propagate(errp, local_err);
1524 goto cleanup;
1525 }
1526 bs_entry->prepared = true;
1527 }
1528
1529 /* If we reach this point, we have success and just need to apply the
1530 * changes
1531 */
1532 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1533 bdrv_reopen_commit(&bs_entry->state);
1534 }
1535
1536 ret = 0;
1537
1538 cleanup:
1539 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1540 if (ret && bs_entry->prepared) {
1541 bdrv_reopen_abort(&bs_entry->state);
1542 }
1543 g_free(bs_entry);
1544 }
1545 g_free(bs_queue);
1546 return ret;
1547 }
1548
1549
1550 /* Reopen a single BlockDriverState with the specified flags. */
1551 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1552 {
1553 int ret = -1;
1554 Error *local_err = NULL;
1555 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1556
1557 ret = bdrv_reopen_multiple(queue, &local_err);
1558 if (local_err != NULL) {
1559 error_propagate(errp, local_err);
1560 }
1561 return ret;
1562 }
1563
1564
1565 /*
1566 * Prepares a BlockDriverState for reopen. All changes are staged in the
1567 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1568 * the block driver layer .bdrv_reopen_prepare()
1569 *
1570 * bs is the BlockDriverState to reopen
1571 * flags are the new open flags
1572 * queue is the reopen queue
1573 *
1574 * Returns 0 on success, non-zero on error. On error errp will be set
1575 * as well.
1576 *
1577 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1578 * It is the responsibility of the caller to then call the abort() or
1579 * commit() for any other BDS that have been left in a prepare() state
1580 *
1581 */
1582 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1583 Error **errp)
1584 {
1585 int ret = -1;
1586 Error *local_err = NULL;
1587 BlockDriver *drv;
1588
1589 assert(reopen_state != NULL);
1590 assert(reopen_state->bs->drv != NULL);
1591 drv = reopen_state->bs->drv;
1592
1593 /* if we are to stay read-only, do not allow permission change
1594 * to r/w */
1595 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1596 reopen_state->flags & BDRV_O_RDWR) {
1597 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1598 reopen_state->bs->device_name);
1599 goto error;
1600 }
1601
1602
1603 ret = bdrv_flush(reopen_state->bs);
1604 if (ret) {
1605 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1606 strerror(-ret));
1607 goto error;
1608 }
1609
1610 if (drv->bdrv_reopen_prepare) {
1611 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1612 if (ret) {
1613 if (local_err != NULL) {
1614 error_propagate(errp, local_err);
1615 } else {
1616 error_setg(errp, "failed while preparing to reopen image '%s'",
1617 reopen_state->bs->filename);
1618 }
1619 goto error;
1620 }
1621 } else {
1622 /* It is currently mandatory to have a bdrv_reopen_prepare()
1623 * handler for each supported drv. */
1624 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1625 drv->format_name, reopen_state->bs->device_name,
1626 "reopening of file");
1627 ret = -1;
1628 goto error;
1629 }
1630
1631 ret = 0;
1632
1633 error:
1634 return ret;
1635 }
1636
1637 /*
1638 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1639 * makes them final by swapping the staging BlockDriverState contents into
1640 * the active BlockDriverState contents.
1641 */
1642 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1643 {
1644 BlockDriver *drv;
1645
1646 assert(reopen_state != NULL);
1647 drv = reopen_state->bs->drv;
1648 assert(drv != NULL);
1649
1650 /* If there are any driver level actions to take */
1651 if (drv->bdrv_reopen_commit) {
1652 drv->bdrv_reopen_commit(reopen_state);
1653 }
1654
1655 /* set BDS specific flags now */
1656 reopen_state->bs->open_flags = reopen_state->flags;
1657 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1658 BDRV_O_CACHE_WB);
1659 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1660
1661 bdrv_refresh_limits(reopen_state->bs);
1662 }
1663
1664 /*
1665 * Abort the reopen, and delete and free the staged changes in
1666 * reopen_state
1667 */
1668 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1669 {
1670 BlockDriver *drv;
1671
1672 assert(reopen_state != NULL);
1673 drv = reopen_state->bs->drv;
1674 assert(drv != NULL);
1675
1676 if (drv->bdrv_reopen_abort) {
1677 drv->bdrv_reopen_abort(reopen_state);
1678 }
1679 }
1680
1681
1682 void bdrv_close(BlockDriverState *bs)
1683 {
1684 if (bs->job) {
1685 block_job_cancel_sync(bs->job);
1686 }
1687 bdrv_drain_all(); /* complete I/O */
1688 bdrv_flush(bs);
1689 bdrv_drain_all(); /* in case flush left pending I/O */
1690 notifier_list_notify(&bs->close_notifiers, bs);
1691
1692 if (bs->drv) {
1693 if (bs->backing_hd) {
1694 bdrv_unref(bs->backing_hd);
1695 bs->backing_hd = NULL;
1696 }
1697 bs->drv->bdrv_close(bs);
1698 g_free(bs->opaque);
1699 #ifdef _WIN32
1700 if (bs->is_temporary) {
1701 unlink(bs->filename);
1702 }
1703 #endif
1704 bs->opaque = NULL;
1705 bs->drv = NULL;
1706 bs->copy_on_read = 0;
1707 bs->backing_file[0] = '\0';
1708 bs->backing_format[0] = '\0';
1709 bs->total_sectors = 0;
1710 bs->encrypted = 0;
1711 bs->valid_key = 0;
1712 bs->sg = 0;
1713 bs->growable = 0;
1714 bs->zero_beyond_eof = false;
1715 QDECREF(bs->options);
1716 bs->options = NULL;
1717
1718 if (bs->file != NULL) {
1719 bdrv_unref(bs->file);
1720 bs->file = NULL;
1721 }
1722 }
1723
1724 bdrv_dev_change_media_cb(bs, false);
1725
1726 /*throttling disk I/O limits*/
1727 if (bs->io_limits_enabled) {
1728 bdrv_io_limits_disable(bs);
1729 }
1730 }
1731
1732 void bdrv_close_all(void)
1733 {
1734 BlockDriverState *bs;
1735
1736 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1737 bdrv_close(bs);
1738 }
1739 }
1740
1741 /* Check if any requests are in-flight (including throttled requests) */
1742 static bool bdrv_requests_pending(BlockDriverState *bs)
1743 {
1744 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1745 return true;
1746 }
1747 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1748 return true;
1749 }
1750 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1751 return true;
1752 }
1753 if (bs->file && bdrv_requests_pending(bs->file)) {
1754 return true;
1755 }
1756 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1757 return true;
1758 }
1759 return false;
1760 }
1761
1762 static bool bdrv_requests_pending_all(void)
1763 {
1764 BlockDriverState *bs;
1765 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1766 if (bdrv_requests_pending(bs)) {
1767 return true;
1768 }
1769 }
1770 return false;
1771 }
1772
1773 /*
1774 * Wait for pending requests to complete across all BlockDriverStates
1775 *
1776 * This function does not flush data to disk, use bdrv_flush_all() for that
1777 * after calling this function.
1778 *
1779 * Note that completion of an asynchronous I/O operation can trigger any
1780 * number of other I/O operations on other devices---for example a coroutine
1781 * can be arbitrarily complex and a constant flow of I/O can come until the
1782 * coroutine is complete. Because of this, it is not possible to have a
1783 * function to drain a single device's I/O queue.
1784 */
1785 void bdrv_drain_all(void)
1786 {
1787 /* Always run first iteration so any pending completion BHs run */
1788 bool busy = true;
1789 BlockDriverState *bs;
1790
1791 while (busy) {
1792 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1793 bdrv_start_throttled_reqs(bs);
1794 }
1795
1796 busy = bdrv_requests_pending_all();
1797 busy |= aio_poll(qemu_get_aio_context(), busy);
1798 }
1799 }
1800
1801 /* make a BlockDriverState anonymous by removing from bdrv_state and
1802 * graph_bdrv_state list.
1803 Also, NULL terminate the device_name to prevent double remove */
1804 void bdrv_make_anon(BlockDriverState *bs)
1805 {
1806 if (bs->device_name[0] != '\0') {
1807 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1808 }
1809 bs->device_name[0] = '\0';
1810 if (bs->node_name[0] != '\0') {
1811 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1812 }
1813 bs->node_name[0] = '\0';
1814 }
1815
1816 static void bdrv_rebind(BlockDriverState *bs)
1817 {
1818 if (bs->drv && bs->drv->bdrv_rebind) {
1819 bs->drv->bdrv_rebind(bs);
1820 }
1821 }
1822
1823 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1824 BlockDriverState *bs_src)
1825 {
1826 /* move some fields that need to stay attached to the device */
1827 bs_dest->open_flags = bs_src->open_flags;
1828
1829 /* dev info */
1830 bs_dest->dev_ops = bs_src->dev_ops;
1831 bs_dest->dev_opaque = bs_src->dev_opaque;
1832 bs_dest->dev = bs_src->dev;
1833 bs_dest->guest_block_size = bs_src->guest_block_size;
1834 bs_dest->copy_on_read = bs_src->copy_on_read;
1835
1836 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1837
1838 /* i/o throttled req */
1839 memcpy(&bs_dest->throttle_state,
1840 &bs_src->throttle_state,
1841 sizeof(ThrottleState));
1842 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1843 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1844 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1845
1846 /* r/w error */
1847 bs_dest->on_read_error = bs_src->on_read_error;
1848 bs_dest->on_write_error = bs_src->on_write_error;
1849
1850 /* i/o status */
1851 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1852 bs_dest->iostatus = bs_src->iostatus;
1853
1854 /* dirty bitmap */
1855 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1856
1857 /* reference count */
1858 bs_dest->refcnt = bs_src->refcnt;
1859
1860 /* job */
1861 bs_dest->in_use = bs_src->in_use;
1862 bs_dest->job = bs_src->job;
1863
1864 /* keep the same entry in bdrv_states */
1865 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1866 bs_src->device_name);
1867 bs_dest->device_list = bs_src->device_list;
1868
1869 /* keep the same entry in graph_bdrv_states
1870 * We do want to swap name but don't want to swap linked list entries
1871 */
1872 bs_dest->node_list = bs_src->node_list;
1873 }
1874
1875 /*
1876 * Swap bs contents for two image chains while they are live,
1877 * while keeping required fields on the BlockDriverState that is
1878 * actually attached to a device.
1879 *
1880 * This will modify the BlockDriverState fields, and swap contents
1881 * between bs_new and bs_old. Both bs_new and bs_old are modified.
1882 *
1883 * bs_new is required to be anonymous.
1884 *
1885 * This function does not create any image files.
1886 */
1887 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
1888 {
1889 BlockDriverState tmp;
1890
1891 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
1892 assert(bs_new->device_name[0] == '\0');
1893 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
1894 assert(bs_new->job == NULL);
1895 assert(bs_new->dev == NULL);
1896 assert(bs_new->in_use == 0);
1897 assert(bs_new->io_limits_enabled == false);
1898 assert(!throttle_have_timer(&bs_new->throttle_state));
1899
1900 tmp = *bs_new;
1901 *bs_new = *bs_old;
1902 *bs_old = tmp;
1903
1904 /* there are some fields that should not be swapped, move them back */
1905 bdrv_move_feature_fields(&tmp, bs_old);
1906 bdrv_move_feature_fields(bs_old, bs_new);
1907 bdrv_move_feature_fields(bs_new, &tmp);
1908
1909 /* bs_new shouldn't be in bdrv_states even after the swap! */
1910 assert(bs_new->device_name[0] == '\0');
1911
1912 /* Check a few fields that should remain attached to the device */
1913 assert(bs_new->dev == NULL);
1914 assert(bs_new->job == NULL);
1915 assert(bs_new->in_use == 0);
1916 assert(bs_new->io_limits_enabled == false);
1917 assert(!throttle_have_timer(&bs_new->throttle_state));
1918
1919 bdrv_rebind(bs_new);
1920 bdrv_rebind(bs_old);
1921 }
1922
1923 /*
1924 * Add new bs contents at the top of an image chain while the chain is
1925 * live, while keeping required fields on the top layer.
1926 *
1927 * This will modify the BlockDriverState fields, and swap contents
1928 * between bs_new and bs_top. Both bs_new and bs_top are modified.
1929 *
1930 * bs_new is required to be anonymous.
1931 *
1932 * This function does not create any image files.
1933 */
1934 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
1935 {
1936 bdrv_swap(bs_new, bs_top);
1937
1938 /* The contents of 'tmp' will become bs_top, as we are
1939 * swapping bs_new and bs_top contents. */
1940 bs_top->backing_hd = bs_new;
1941 bs_top->open_flags &= ~BDRV_O_NO_BACKING;
1942 pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
1943 bs_new->filename);
1944 pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
1945 bs_new->drv ? bs_new->drv->format_name : "");
1946 }
1947
1948 static void bdrv_delete(BlockDriverState *bs)
1949 {
1950 assert(!bs->dev);
1951 assert(!bs->job);
1952 assert(!bs->in_use);
1953 assert(!bs->refcnt);
1954 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1955
1956 bdrv_close(bs);
1957
1958 /* remove from list, if necessary */
1959 bdrv_make_anon(bs);
1960
1961 g_free(bs);
1962 }
1963
1964 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
1965 /* TODO change to DeviceState *dev when all users are qdevified */
1966 {
1967 if (bs->dev) {
1968 return -EBUSY;
1969 }
1970 bs->dev = dev;
1971 bdrv_iostatus_reset(bs);
1972 return 0;
1973 }
1974
1975 /* TODO qdevified devices don't use this, remove when devices are qdevified */
1976 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
1977 {
1978 if (bdrv_attach_dev(bs, dev) < 0) {
1979 abort();
1980 }
1981 }
1982
1983 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
1984 /* TODO change to DeviceState *dev when all users are qdevified */
1985 {
1986 assert(bs->dev == dev);
1987 bs->dev = NULL;
1988 bs->dev_ops = NULL;
1989 bs->dev_opaque = NULL;
1990 bs->guest_block_size = 512;
1991 }
1992
1993 /* TODO change to return DeviceState * when all users are qdevified */
1994 void *bdrv_get_attached_dev(BlockDriverState *bs)
1995 {
1996 return bs->dev;
1997 }
1998
1999 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2000 void *opaque)
2001 {
2002 bs->dev_ops = ops;
2003 bs->dev_opaque = opaque;
2004 }
2005
2006 void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
2007 enum MonitorEvent ev,
2008 BlockErrorAction action, bool is_read)
2009 {
2010 QObject *data;
2011 const char *action_str;
2012
2013 switch (action) {
2014 case BDRV_ACTION_REPORT:
2015 action_str = "report";
2016 break;
2017 case BDRV_ACTION_IGNORE:
2018 action_str = "ignore";
2019 break;
2020 case BDRV_ACTION_STOP:
2021 action_str = "stop";
2022 break;
2023 default:
2024 abort();
2025 }
2026
2027 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
2028 bdrv->device_name,
2029 action_str,
2030 is_read ? "read" : "write");
2031 monitor_protocol_event(ev, data);
2032
2033 qobject_decref(data);
2034 }
2035
2036 static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
2037 {
2038 QObject *data;
2039
2040 data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
2041 bdrv_get_device_name(bs), ejected);
2042 monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
2043
2044 qobject_decref(data);
2045 }
2046
2047 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2048 {
2049 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2050 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2051 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2052 if (tray_was_closed) {
2053 /* tray open */
2054 bdrv_emit_qmp_eject_event(bs, true);
2055 }
2056 if (load) {
2057 /* tray close */
2058 bdrv_emit_qmp_eject_event(bs, false);
2059 }
2060 }
2061 }
2062
2063 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2064 {
2065 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2066 }
2067
2068 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2069 {
2070 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2071 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2072 }
2073 }
2074
2075 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2076 {
2077 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2078 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2079 }
2080 return false;
2081 }
2082
2083 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2084 {
2085 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2086 bs->dev_ops->resize_cb(bs->dev_opaque);
2087 }
2088 }
2089
2090 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2091 {
2092 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2093 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2094 }
2095 return false;
2096 }
2097
2098 /*
2099 * Run consistency checks on an image
2100 *
2101 * Returns 0 if the check could be completed (it doesn't mean that the image is
2102 * free of errors) or -errno when an internal error occurred. The results of the
2103 * check are stored in res.
2104 */
2105 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2106 {
2107 if (bs->drv->bdrv_check == NULL) {
2108 return -ENOTSUP;
2109 }
2110
2111 memset(res, 0, sizeof(*res));
2112 return bs->drv->bdrv_check(bs, res, fix);
2113 }
2114
2115 #define COMMIT_BUF_SECTORS 2048
2116
2117 /* commit COW file into the raw image */
2118 int bdrv_commit(BlockDriverState *bs)
2119 {
2120 BlockDriver *drv = bs->drv;
2121 int64_t sector, total_sectors, length, backing_length;
2122 int n, ro, open_flags;
2123 int ret = 0;
2124 uint8_t *buf = NULL;
2125 char filename[PATH_MAX];
2126
2127 if (!drv)
2128 return -ENOMEDIUM;
2129
2130 if (!bs->backing_hd) {
2131 return -ENOTSUP;
2132 }
2133
2134 if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
2135 return -EBUSY;
2136 }
2137
2138 ro = bs->backing_hd->read_only;
2139 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2140 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2141 open_flags = bs->backing_hd->open_flags;
2142
2143 if (ro) {
2144 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2145 return -EACCES;
2146 }
2147 }
2148
2149 length = bdrv_getlength(bs);
2150 if (length < 0) {
2151 ret = length;
2152 goto ro_cleanup;
2153 }
2154
2155 backing_length = bdrv_getlength(bs->backing_hd);
2156 if (backing_length < 0) {
2157 ret = backing_length;
2158 goto ro_cleanup;
2159 }
2160
2161 /* If our top snapshot is larger than the backing file image,
2162 * grow the backing file image if possible. If not possible,
2163 * we must return an error */
2164 if (length > backing_length) {
2165 ret = bdrv_truncate(bs->backing_hd, length);
2166 if (ret < 0) {
2167 goto ro_cleanup;
2168 }
2169 }
2170
2171 total_sectors = length >> BDRV_SECTOR_BITS;
2172 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2173
2174 for (sector = 0; sector < total_sectors; sector += n) {
2175 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2176 if (ret < 0) {
2177 goto ro_cleanup;
2178 }
2179 if (ret) {
2180 ret = bdrv_read(bs, sector, buf, n);
2181 if (ret < 0) {
2182 goto ro_cleanup;
2183 }
2184
2185 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2186 if (ret < 0) {
2187 goto ro_cleanup;
2188 }
2189 }
2190 }
2191
2192 if (drv->bdrv_make_empty) {
2193 ret = drv->bdrv_make_empty(bs);
2194 if (ret < 0) {
2195 goto ro_cleanup;
2196 }
2197 bdrv_flush(bs);
2198 }
2199
2200 /*
2201 * Make sure all data we wrote to the backing device is actually
2202 * stable on disk.
2203 */
2204 if (bs->backing_hd) {
2205 bdrv_flush(bs->backing_hd);
2206 }
2207
2208 ret = 0;
2209 ro_cleanup:
2210 g_free(buf);
2211
2212 if (ro) {
2213 /* ignoring error return here */
2214 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2215 }
2216
2217 return ret;
2218 }
2219
2220 int bdrv_commit_all(void)
2221 {
2222 BlockDriverState *bs;
2223
2224 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2225 if (bs->drv && bs->backing_hd) {
2226 int ret = bdrv_commit(bs);
2227 if (ret < 0) {
2228 return ret;
2229 }
2230 }
2231 }
2232 return 0;
2233 }
2234
2235 /**
2236 * Remove an active request from the tracked requests list
2237 *
2238 * This function should be called when a tracked request is completing.
2239 */
2240 static void tracked_request_end(BdrvTrackedRequest *req)
2241 {
2242 if (req->serialising) {
2243 req->bs->serialising_in_flight--;
2244 }
2245
2246 QLIST_REMOVE(req, list);
2247 qemu_co_queue_restart_all(&req->wait_queue);
2248 }
2249
2250 /**
2251 * Add an active request to the tracked requests list
2252 */
2253 static void tracked_request_begin(BdrvTrackedRequest *req,
2254 BlockDriverState *bs,
2255 int64_t offset,
2256 unsigned int bytes, bool is_write)
2257 {
2258 *req = (BdrvTrackedRequest){
2259 .bs = bs,
2260 .offset = offset,
2261 .bytes = bytes,
2262 .is_write = is_write,
2263 .co = qemu_coroutine_self(),
2264 .serialising = false,
2265 .overlap_offset = offset,
2266 .overlap_bytes = bytes,
2267 };
2268
2269 qemu_co_queue_init(&req->wait_queue);
2270
2271 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2272 }
2273
2274 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2275 {
2276 int64_t overlap_offset = req->offset & ~(align - 1);
2277 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2278 - overlap_offset;
2279
2280 if (!req->serialising) {
2281 req->bs->serialising_in_flight++;
2282 req->serialising = true;
2283 }
2284
2285 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2286 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2287 }
2288
2289 /**
2290 * Round a region to cluster boundaries
2291 */
2292 void bdrv_round_to_clusters(BlockDriverState *bs,
2293 int64_t sector_num, int nb_sectors,
2294 int64_t *cluster_sector_num,
2295 int *cluster_nb_sectors)
2296 {
2297 BlockDriverInfo bdi;
2298
2299 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2300 *cluster_sector_num = sector_num;
2301 *cluster_nb_sectors = nb_sectors;
2302 } else {
2303 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2304 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2305 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2306 nb_sectors, c);
2307 }
2308 }
2309
2310 static int bdrv_get_cluster_size(BlockDriverState *bs)
2311 {
2312 BlockDriverInfo bdi;
2313 int ret;
2314
2315 ret = bdrv_get_info(bs, &bdi);
2316 if (ret < 0 || bdi.cluster_size == 0) {
2317 return bs->request_alignment;
2318 } else {
2319 return bdi.cluster_size;
2320 }
2321 }
2322
2323 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2324 int64_t offset, unsigned int bytes)
2325 {
2326 /* aaaa bbbb */
2327 if (offset >= req->overlap_offset + req->overlap_bytes) {
2328 return false;
2329 }
2330 /* bbbb aaaa */
2331 if (req->overlap_offset >= offset + bytes) {
2332 return false;
2333 }
2334 return true;
2335 }
2336
2337 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2338 {
2339 BlockDriverState *bs = self->bs;
2340 BdrvTrackedRequest *req;
2341 bool retry;
2342 bool waited = false;
2343
2344 if (!bs->serialising_in_flight) {
2345 return false;
2346 }
2347
2348 do {
2349 retry = false;
2350 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2351 if (req == self || (!req->serialising && !self->serialising)) {
2352 continue;
2353 }
2354 if (tracked_request_overlaps(req, self->overlap_offset,
2355 self->overlap_bytes))
2356 {
2357 /* Hitting this means there was a reentrant request, for
2358 * example, a block driver issuing nested requests. This must
2359 * never happen since it means deadlock.
2360 */
2361 assert(qemu_coroutine_self() != req->co);
2362
2363 /* If the request is already (indirectly) waiting for us, or
2364 * will wait for us as soon as it wakes up, then just go on
2365 * (instead of producing a deadlock in the former case). */
2366 if (!req->waiting_for) {
2367 self->waiting_for = req;
2368 qemu_co_queue_wait(&req->wait_queue);
2369 self->waiting_for = NULL;
2370 retry = true;
2371 waited = true;
2372 break;
2373 }
2374 }
2375 }
2376 } while (retry);
2377
2378 return waited;
2379 }
2380
2381 /*
2382 * Return values:
2383 * 0 - success
2384 * -EINVAL - backing format specified, but no file
2385 * -ENOSPC - can't update the backing file because no space is left in the
2386 * image file header
2387 * -ENOTSUP - format driver doesn't support changing the backing file
2388 */
2389 int bdrv_change_backing_file(BlockDriverState *bs,
2390 const char *backing_file, const char *backing_fmt)
2391 {
2392 BlockDriver *drv = bs->drv;
2393 int ret;
2394
2395 /* Backing file format doesn't make sense without a backing file */
2396 if (backing_fmt && !backing_file) {
2397 return -EINVAL;
2398 }
2399
2400 if (drv->bdrv_change_backing_file != NULL) {
2401 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2402 } else {
2403 ret = -ENOTSUP;
2404 }
2405
2406 if (ret == 0) {
2407 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2408 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2409 }
2410 return ret;
2411 }
2412
2413 /*
2414 * Finds the image layer in the chain that has 'bs' as its backing file.
2415 *
2416 * active is the current topmost image.
2417 *
2418 * Returns NULL if bs is not found in active's image chain,
2419 * or if active == bs.
2420 */
2421 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2422 BlockDriverState *bs)
2423 {
2424 BlockDriverState *overlay = NULL;
2425 BlockDriverState *intermediate;
2426
2427 assert(active != NULL);
2428 assert(bs != NULL);
2429
2430 /* if bs is the same as active, then by definition it has no overlay
2431 */
2432 if (active == bs) {
2433 return NULL;
2434 }
2435
2436 intermediate = active;
2437 while (intermediate->backing_hd) {
2438 if (intermediate->backing_hd == bs) {
2439 overlay = intermediate;
2440 break;
2441 }
2442 intermediate = intermediate->backing_hd;
2443 }
2444
2445 return overlay;
2446 }
2447
2448 typedef struct BlkIntermediateStates {
2449 BlockDriverState *bs;
2450 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2451 } BlkIntermediateStates;
2452
2453
2454 /*
2455 * Drops images above 'base' up to and including 'top', and sets the image
2456 * above 'top' to have base as its backing file.
2457 *
2458 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2459 * information in 'bs' can be properly updated.
2460 *
2461 * E.g., this will convert the following chain:
2462 * bottom <- base <- intermediate <- top <- active
2463 *
2464 * to
2465 *
2466 * bottom <- base <- active
2467 *
2468 * It is allowed for bottom==base, in which case it converts:
2469 *
2470 * base <- intermediate <- top <- active
2471 *
2472 * to
2473 *
2474 * base <- active
2475 *
2476 * Error conditions:
2477 * if active == top, that is considered an error
2478 *
2479 */
2480 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2481 BlockDriverState *base)
2482 {
2483 BlockDriverState *intermediate;
2484 BlockDriverState *base_bs = NULL;
2485 BlockDriverState *new_top_bs = NULL;
2486 BlkIntermediateStates *intermediate_state, *next;
2487 int ret = -EIO;
2488
2489 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2490 QSIMPLEQ_INIT(&states_to_delete);
2491
2492 if (!top->drv || !base->drv) {
2493 goto exit;
2494 }
2495
2496 new_top_bs = bdrv_find_overlay(active, top);
2497
2498 if (new_top_bs == NULL) {
2499 /* we could not find the image above 'top', this is an error */
2500 goto exit;
2501 }
2502
2503 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2504 * to do, no intermediate images */
2505 if (new_top_bs->backing_hd == base) {
2506 ret = 0;
2507 goto exit;
2508 }
2509
2510 intermediate = top;
2511
2512 /* now we will go down through the list, and add each BDS we find
2513 * into our deletion queue, until we hit the 'base'
2514 */
2515 while (intermediate) {
2516 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2517 intermediate_state->bs = intermediate;
2518 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2519
2520 if (intermediate->backing_hd == base) {
2521 base_bs = intermediate->backing_hd;
2522 break;
2523 }
2524 intermediate = intermediate->backing_hd;
2525 }
2526 if (base_bs == NULL) {
2527 /* something went wrong, we did not end at the base. safely
2528 * unravel everything, and exit with error */
2529 goto exit;
2530 }
2531
2532 /* success - we can delete the intermediate states, and link top->base */
2533 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2534 base_bs->drv ? base_bs->drv->format_name : "");
2535 if (ret) {
2536 goto exit;
2537 }
2538 new_top_bs->backing_hd = base_bs;
2539
2540 bdrv_refresh_limits(new_top_bs);
2541
2542 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2543 /* so that bdrv_close() does not recursively close the chain */
2544 intermediate_state->bs->backing_hd = NULL;
2545 bdrv_unref(intermediate_state->bs);
2546 }
2547 ret = 0;
2548
2549 exit:
2550 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2551 g_free(intermediate_state);
2552 }
2553 return ret;
2554 }
2555
2556
2557 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2558 size_t size)
2559 {
2560 int64_t len;
2561
2562 if (!bdrv_is_inserted(bs))
2563 return -ENOMEDIUM;
2564
2565 if (bs->growable)
2566 return 0;
2567
2568 len = bdrv_getlength(bs);
2569
2570 if (offset < 0)
2571 return -EIO;
2572
2573 if ((offset > len) || (len - offset < size))
2574 return -EIO;
2575
2576 return 0;
2577 }
2578
2579 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2580 int nb_sectors)
2581 {
2582 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2583 nb_sectors * BDRV_SECTOR_SIZE);
2584 }
2585
2586 typedef struct RwCo {
2587 BlockDriverState *bs;
2588 int64_t offset;
2589 QEMUIOVector *qiov;
2590 bool is_write;
2591 int ret;
2592 BdrvRequestFlags flags;
2593 } RwCo;
2594
2595 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2596 {
2597 RwCo *rwco = opaque;
2598
2599 if (!rwco->is_write) {
2600 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2601 rwco->qiov->size, rwco->qiov,
2602 rwco->flags);
2603 } else {
2604 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2605 rwco->qiov->size, rwco->qiov,
2606 rwco->flags);
2607 }
2608 }
2609
2610 /*
2611 * Process a vectored synchronous request using coroutines
2612 */
2613 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2614 QEMUIOVector *qiov, bool is_write,
2615 BdrvRequestFlags flags)
2616 {
2617 Coroutine *co;
2618 RwCo rwco = {
2619 .bs = bs,
2620 .offset = offset,
2621 .qiov = qiov,
2622 .is_write = is_write,
2623 .ret = NOT_DONE,
2624 .flags = flags,
2625 };
2626
2627 /**
2628 * In sync call context, when the vcpu is blocked, this throttling timer
2629 * will not fire; so the I/O throttling function has to be disabled here
2630 * if it has been enabled.
2631 */
2632 if (bs->io_limits_enabled) {
2633 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2634 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2635 bdrv_io_limits_disable(bs);
2636 }
2637
2638 if (qemu_in_coroutine()) {
2639 /* Fast-path if already in coroutine context */
2640 bdrv_rw_co_entry(&rwco);
2641 } else {
2642 co = qemu_coroutine_create(bdrv_rw_co_entry);
2643 qemu_coroutine_enter(co, &rwco);
2644 while (rwco.ret == NOT_DONE) {
2645 qemu_aio_wait();
2646 }
2647 }
2648 return rwco.ret;
2649 }
2650
2651 /*
2652 * Process a synchronous request using coroutines
2653 */
2654 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2655 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2656 {
2657 QEMUIOVector qiov;
2658 struct iovec iov = {
2659 .iov_base = (void *)buf,
2660 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2661 };
2662
2663 qemu_iovec_init_external(&qiov, &iov, 1);
2664 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2665 &qiov, is_write, flags);
2666 }
2667
2668 /* return < 0 if error. See bdrv_write() for the return codes */
2669 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2670 uint8_t *buf, int nb_sectors)
2671 {
2672 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2673 }
2674
2675 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2676 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2677 uint8_t *buf, int nb_sectors)
2678 {
2679 bool enabled;
2680 int ret;
2681
2682 enabled = bs->io_limits_enabled;
2683 bs->io_limits_enabled = false;
2684 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2685 bs->io_limits_enabled = enabled;
2686 return ret;
2687 }
2688
2689 /* Return < 0 if error. Important errors are:
2690 -EIO generic I/O error (may happen for all errors)
2691 -ENOMEDIUM No media inserted.
2692 -EINVAL Invalid sector number or nb_sectors
2693 -EACCES Trying to write a read-only device
2694 */
2695 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2696 const uint8_t *buf, int nb_sectors)
2697 {
2698 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2699 }
2700
2701 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2702 int nb_sectors, BdrvRequestFlags flags)
2703 {
2704 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2705 BDRV_REQ_ZERO_WRITE | flags);
2706 }
2707
2708 /*
2709 * Completely zero out a block device with the help of bdrv_write_zeroes.
2710 * The operation is sped up by checking the block status and only writing
2711 * zeroes to the device if they currently do not return zeroes. Optional
2712 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2713 *
2714 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2715 */
2716 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2717 {
2718 int64_t target_size = bdrv_getlength(bs) / BDRV_SECTOR_SIZE;
2719 int64_t ret, nb_sectors, sector_num = 0;
2720 int n;
2721
2722 for (;;) {
2723 nb_sectors = target_size - sector_num;
2724 if (nb_sectors <= 0) {
2725 return 0;
2726 }
2727 if (nb_sectors > INT_MAX) {
2728 nb_sectors = INT_MAX;
2729 }
2730 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2731 if (ret < 0) {
2732 error_report("error getting block status at sector %" PRId64 ": %s",
2733 sector_num, strerror(-ret));
2734 return ret;
2735 }
2736 if (ret & BDRV_BLOCK_ZERO) {
2737 sector_num += n;
2738 continue;
2739 }
2740 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2741 if (ret < 0) {
2742 error_report("error writing zeroes at sector %" PRId64 ": %s",
2743 sector_num, strerror(-ret));
2744 return ret;
2745 }
2746 sector_num += n;
2747 }
2748 }
2749
2750 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2751 {
2752 QEMUIOVector qiov;
2753 struct iovec iov = {
2754 .iov_base = (void *)buf,
2755 .iov_len = bytes,
2756 };
2757 int ret;
2758
2759 if (bytes < 0) {
2760 return -EINVAL;
2761 }
2762
2763 qemu_iovec_init_external(&qiov, &iov, 1);
2764 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2765 if (ret < 0) {
2766 return ret;
2767 }
2768
2769 return bytes;
2770 }
2771
2772 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2773 {
2774 int ret;
2775
2776 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2777 if (ret < 0) {
2778 return ret;
2779 }
2780
2781 return qiov->size;
2782 }
2783
2784 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2785 const void *buf, int bytes)
2786 {
2787 QEMUIOVector qiov;
2788 struct iovec iov = {
2789 .iov_base = (void *) buf,
2790 .iov_len = bytes,
2791 };
2792
2793 if (bytes < 0) {
2794 return -EINVAL;
2795 }
2796
2797 qemu_iovec_init_external(&qiov, &iov, 1);
2798 return bdrv_pwritev(bs, offset, &qiov);
2799 }
2800
2801 /*
2802 * Writes to the file and ensures that no writes are reordered across this
2803 * request (acts as a barrier)
2804 *
2805 * Returns 0 on success, -errno in error cases.
2806 */
2807 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2808 const void *buf, int count)
2809 {
2810 int ret;
2811
2812 ret = bdrv_pwrite(bs, offset, buf, count);
2813 if (ret < 0) {
2814 return ret;
2815 }
2816
2817 /* No flush needed for cache modes that already do it */
2818 if (bs->enable_write_cache) {
2819 bdrv_flush(bs);
2820 }
2821
2822 return 0;
2823 }
2824
2825 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2826 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2827 {
2828 /* Perform I/O through a temporary buffer so that users who scribble over
2829 * their read buffer while the operation is in progress do not end up
2830 * modifying the image file. This is critical for zero-copy guest I/O
2831 * where anything might happen inside guest memory.
2832 */
2833 void *bounce_buffer;
2834
2835 BlockDriver *drv = bs->drv;
2836 struct iovec iov;
2837 QEMUIOVector bounce_qiov;
2838 int64_t cluster_sector_num;
2839 int cluster_nb_sectors;
2840 size_t skip_bytes;
2841 int ret;
2842
2843 /* Cover entire cluster so no additional backing file I/O is required when
2844 * allocating cluster in the image file.
2845 */
2846 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2847 &cluster_sector_num, &cluster_nb_sectors);
2848
2849 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2850 cluster_sector_num, cluster_nb_sectors);
2851
2852 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2853 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2854 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2855
2856 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2857 &bounce_qiov);
2858 if (ret < 0) {
2859 goto err;
2860 }
2861
2862 if (drv->bdrv_co_write_zeroes &&
2863 buffer_is_zero(bounce_buffer, iov.iov_len)) {
2864 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
2865 cluster_nb_sectors, 0);
2866 } else {
2867 /* This does not change the data on the disk, it is not necessary
2868 * to flush even in cache=writethrough mode.
2869 */
2870 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
2871 &bounce_qiov);
2872 }
2873
2874 if (ret < 0) {
2875 /* It might be okay to ignore write errors for guest requests. If this
2876 * is a deliberate copy-on-read then we don't want to ignore the error.
2877 * Simply report it in all cases.
2878 */
2879 goto err;
2880 }
2881
2882 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
2883 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2884 nb_sectors * BDRV_SECTOR_SIZE);
2885
2886 err:
2887 qemu_vfree(bounce_buffer);
2888 return ret;
2889 }
2890
2891 /*
2892 * Forwards an already correctly aligned request to the BlockDriver. This
2893 * handles copy on read and zeroing after EOF; any other features must be
2894 * implemented by the caller.
2895 */
2896 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
2897 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
2898 int64_t align, QEMUIOVector *qiov, int flags)
2899 {
2900 BlockDriver *drv = bs->drv;
2901 int ret;
2902
2903 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
2904 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
2905
2906 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
2907 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
2908
2909 /* Handle Copy on Read and associated serialisation */
2910 if (flags & BDRV_REQ_COPY_ON_READ) {
2911 /* If we touch the same cluster it counts as an overlap. This
2912 * guarantees that allocating writes will be serialized and not race
2913 * with each other for the same cluster. For example, in copy-on-read
2914 * it ensures that the CoR read and write operations are atomic and
2915 * guest writes cannot interleave between them. */
2916 mark_request_serialising(req, bdrv_get_cluster_size(bs));
2917 }
2918
2919 wait_serialising_requests(req);
2920
2921 if (flags & BDRV_REQ_COPY_ON_READ) {
2922 int pnum;
2923
2924 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
2925 if (ret < 0) {
2926 goto out;
2927 }
2928
2929 if (!ret || pnum != nb_sectors) {
2930 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
2931 goto out;
2932 }
2933 }
2934
2935 /* Forward the request to the BlockDriver */
2936 if (!(bs->zero_beyond_eof && bs->growable)) {
2937 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
2938 } else {
2939 /* Read zeros after EOF of growable BDSes */
2940 int64_t len, total_sectors, max_nb_sectors;
2941
2942 len = bdrv_getlength(bs);
2943 if (len < 0) {
2944 ret = len;
2945 goto out;
2946 }
2947
2948 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
2949 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
2950 align >> BDRV_SECTOR_BITS);
2951 if (max_nb_sectors > 0) {
2952 ret = drv->bdrv_co_readv(bs, sector_num,
2953 MIN(nb_sectors, max_nb_sectors), qiov);
2954 } else {
2955 ret = 0;
2956 }
2957
2958 /* Reading beyond end of file is supposed to produce zeroes */
2959 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
2960 uint64_t offset = MAX(0, total_sectors - sector_num);
2961 uint64_t bytes = (sector_num + nb_sectors - offset) *
2962 BDRV_SECTOR_SIZE;
2963 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
2964 }
2965 }
2966
2967 out:
2968 return ret;
2969 }
2970
2971 /*
2972 * Handle a read request in coroutine context
2973 */
2974 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
2975 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
2976 BdrvRequestFlags flags)
2977 {
2978 BlockDriver *drv = bs->drv;
2979 BdrvTrackedRequest req;
2980
2981 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
2982 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
2983 uint8_t *head_buf = NULL;
2984 uint8_t *tail_buf = NULL;
2985 QEMUIOVector local_qiov;
2986 bool use_local_qiov = false;
2987 int ret;
2988
2989 if (!drv) {
2990 return -ENOMEDIUM;
2991 }
2992 if (bdrv_check_byte_request(bs, offset, bytes)) {
2993 return -EIO;
2994 }
2995
2996 if (bs->copy_on_read) {
2997 flags |= BDRV_REQ_COPY_ON_READ;
2998 }
2999
3000 /* throttling disk I/O */
3001 if (bs->io_limits_enabled) {
3002 bdrv_io_limits_intercept(bs, bytes, false);
3003 }
3004
3005 /* Align read if necessary by padding qiov */
3006 if (offset & (align - 1)) {
3007 head_buf = qemu_blockalign(bs, align);
3008 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3009 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3010 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3011 use_local_qiov = true;
3012
3013 bytes += offset & (align - 1);
3014 offset = offset & ~(align - 1);
3015 }
3016
3017 if ((offset + bytes) & (align - 1)) {
3018 if (!use_local_qiov) {
3019 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3020 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3021 use_local_qiov = true;
3022 }
3023 tail_buf = qemu_blockalign(bs, align);
3024 qemu_iovec_add(&local_qiov, tail_buf,
3025 align - ((offset + bytes) & (align - 1)));
3026
3027 bytes = ROUND_UP(bytes, align);
3028 }
3029
3030 tracked_request_begin(&req, bs, offset, bytes, false);
3031 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3032 use_local_qiov ? &local_qiov : qiov,
3033 flags);
3034 tracked_request_end(&req);
3035
3036 if (use_local_qiov) {
3037 qemu_iovec_destroy(&local_qiov);
3038 qemu_vfree(head_buf);
3039 qemu_vfree(tail_buf);
3040 }
3041
3042 return ret;
3043 }
3044
3045 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3046 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3047 BdrvRequestFlags flags)
3048 {
3049 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3050 return -EINVAL;
3051 }
3052
3053 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3054 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3055 }
3056
3057 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3058 int nb_sectors, QEMUIOVector *qiov)
3059 {
3060 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3061
3062 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3063 }
3064
3065 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3066 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3067 {
3068 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3069
3070 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3071 BDRV_REQ_COPY_ON_READ);
3072 }
3073
3074 /* if no limit is specified in the BlockLimits use a default
3075 * of 32768 512-byte sectors (16 MiB) per request.
3076 */
3077 #define MAX_WRITE_ZEROES_DEFAULT 32768
3078
3079 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3080 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3081 {
3082 BlockDriver *drv = bs->drv;
3083 QEMUIOVector qiov;
3084 struct iovec iov = {0};
3085 int ret = 0;
3086
3087 int max_write_zeroes = bs->bl.max_write_zeroes ?
3088 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3089
3090 while (nb_sectors > 0 && !ret) {
3091 int num = nb_sectors;
3092
3093 /* Align request. Block drivers can expect the "bulk" of the request
3094 * to be aligned.
3095 */
3096 if (bs->bl.write_zeroes_alignment
3097 && num > bs->bl.write_zeroes_alignment) {
3098 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3099 /* Make a small request up to the first aligned sector. */
3100 num = bs->bl.write_zeroes_alignment;
3101 num -= sector_num % bs->bl.write_zeroes_alignment;
3102 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3103 /* Shorten the request to the last aligned sector. num cannot
3104 * underflow because num > bs->bl.write_zeroes_alignment.
3105 */
3106 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3107 }
3108 }
3109
3110 /* limit request size */
3111 if (num > max_write_zeroes) {
3112 num = max_write_zeroes;
3113 }
3114
3115 ret = -ENOTSUP;
3116 /* First try the efficient write zeroes operation */
3117 if (drv->bdrv_co_write_zeroes) {
3118 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3119 }
3120
3121 if (ret == -ENOTSUP) {
3122 /* Fall back to bounce buffer if write zeroes is unsupported */
3123 iov.iov_len = num * BDRV_SECTOR_SIZE;
3124 if (iov.iov_base == NULL) {
3125 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3126 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3127 }
3128 qemu_iovec_init_external(&qiov, &iov, 1);
3129
3130 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3131
3132 /* Keep bounce buffer around if it is big enough for all
3133 * all future requests.
3134 */
3135 if (num < max_write_zeroes) {
3136 qemu_vfree(iov.iov_base);
3137 iov.iov_base = NULL;
3138 }
3139 }
3140
3141 sector_num += num;
3142 nb_sectors -= num;
3143 }
3144
3145 qemu_vfree(iov.iov_base);
3146 return ret;
3147 }
3148
3149 /*
3150 * Forwards an already correctly aligned write request to the BlockDriver.
3151 */
3152 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3153 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3154 QEMUIOVector *qiov, int flags)
3155 {
3156 BlockDriver *drv = bs->drv;
3157 bool waited;
3158 int ret;
3159
3160 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3161 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3162
3163 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3164 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3165
3166 waited = wait_serialising_requests(req);
3167 assert(!waited || !req->serialising);
3168 assert(req->overlap_offset <= offset);
3169 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3170
3171 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3172
3173 if (ret < 0) {
3174 /* Do nothing, write notifier decided to fail this request */
3175 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3176 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3177 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3178 } else {
3179 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3180 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3181 }
3182 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3183
3184 if (ret == 0 && !bs->enable_write_cache) {
3185 ret = bdrv_co_flush(bs);
3186 }
3187
3188 bdrv_set_dirty(bs, sector_num, nb_sectors);
3189
3190 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3191 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3192 }
3193 if (bs->growable && ret >= 0) {
3194 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3195 }
3196
3197 return ret;
3198 }
3199
3200 /*
3201 * Handle a write request in coroutine context
3202 */
3203 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3204 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3205 BdrvRequestFlags flags)
3206 {
3207 BdrvTrackedRequest req;
3208 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3209 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3210 uint8_t *head_buf = NULL;
3211 uint8_t *tail_buf = NULL;
3212 QEMUIOVector local_qiov;
3213 bool use_local_qiov = false;
3214 int ret;
3215
3216 if (!bs->drv) {
3217 return -ENOMEDIUM;
3218 }
3219 if (bs->read_only) {
3220 return -EACCES;
3221 }
3222 if (bdrv_check_byte_request(bs, offset, bytes)) {
3223 return -EIO;
3224 }
3225
3226 /* throttling disk I/O */
3227 if (bs->io_limits_enabled) {
3228 bdrv_io_limits_intercept(bs, bytes, true);
3229 }
3230
3231 /*
3232 * Align write if necessary by performing a read-modify-write cycle.
3233 * Pad qiov with the read parts and be sure to have a tracked request not
3234 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3235 */
3236 tracked_request_begin(&req, bs, offset, bytes, true);
3237
3238 if (offset & (align - 1)) {
3239 QEMUIOVector head_qiov;
3240 struct iovec head_iov;
3241
3242 mark_request_serialising(&req, align);
3243 wait_serialising_requests(&req);
3244
3245 head_buf = qemu_blockalign(bs, align);
3246 head_iov = (struct iovec) {
3247 .iov_base = head_buf,
3248 .iov_len = align,
3249 };
3250 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3251
3252 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3253 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3254 align, &head_qiov, 0);
3255 if (ret < 0) {
3256 goto fail;
3257 }
3258 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3259
3260 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3261 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3262 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3263 use_local_qiov = true;
3264
3265 bytes += offset & (align - 1);
3266 offset = offset & ~(align - 1);
3267 }
3268
3269 if ((offset + bytes) & (align - 1)) {
3270 QEMUIOVector tail_qiov;
3271 struct iovec tail_iov;
3272 size_t tail_bytes;
3273 bool waited;
3274
3275 mark_request_serialising(&req, align);
3276 waited = wait_serialising_requests(&req);
3277 assert(!waited || !use_local_qiov);
3278
3279 tail_buf = qemu_blockalign(bs, align);
3280 tail_iov = (struct iovec) {
3281 .iov_base = tail_buf,
3282 .iov_len = align,
3283 };
3284 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3285
3286 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3287 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3288 align, &tail_qiov, 0);
3289 if (ret < 0) {
3290 goto fail;
3291 }
3292 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3293
3294 if (!use_local_qiov) {
3295 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3296 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3297 use_local_qiov = true;
3298 }
3299
3300 tail_bytes = (offset + bytes) & (align - 1);
3301 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3302
3303 bytes = ROUND_UP(bytes, align);
3304 }
3305
3306 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3307 use_local_qiov ? &local_qiov : qiov,
3308 flags);
3309
3310 fail:
3311 tracked_request_end(&req);
3312
3313 if (use_local_qiov) {
3314 qemu_iovec_destroy(&local_qiov);
3315 }
3316 qemu_vfree(head_buf);
3317 qemu_vfree(tail_buf);
3318
3319 return ret;
3320 }
3321
3322 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3323 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3324 BdrvRequestFlags flags)
3325 {
3326 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3327 return -EINVAL;
3328 }
3329
3330 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3331 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3332 }
3333
3334 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3335 int nb_sectors, QEMUIOVector *qiov)
3336 {
3337 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3338
3339 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3340 }
3341
3342 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3343 int64_t sector_num, int nb_sectors,
3344 BdrvRequestFlags flags)
3345 {
3346 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3347
3348 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3349 flags &= ~BDRV_REQ_MAY_UNMAP;
3350 }
3351
3352 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3353 BDRV_REQ_ZERO_WRITE | flags);
3354 }
3355
3356 /**
3357 * Truncate file to 'offset' bytes (needed only for file protocols)
3358 */
3359 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3360 {
3361 BlockDriver *drv = bs->drv;
3362 int ret;
3363 if (!drv)
3364 return -ENOMEDIUM;
3365 if (!drv->bdrv_truncate)
3366 return -ENOTSUP;
3367 if (bs->read_only)
3368 return -EACCES;
3369 if (bdrv_in_use(bs))
3370 return -EBUSY;
3371 ret = drv->bdrv_truncate(bs, offset);
3372 if (ret == 0) {
3373 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3374 bdrv_dev_resize_cb(bs);
3375 }
3376 return ret;
3377 }
3378
3379 /**
3380 * Length of a allocated file in bytes. Sparse files are counted by actual
3381 * allocated space. Return < 0 if error or unknown.
3382 */
3383 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3384 {
3385 BlockDriver *drv = bs->drv;
3386 if (!drv) {
3387 return -ENOMEDIUM;
3388 }
3389 if (drv->bdrv_get_allocated_file_size) {
3390 return drv->bdrv_get_allocated_file_size(bs);
3391 }
3392 if (bs->file) {
3393 return bdrv_get_allocated_file_size(bs->file);
3394 }
3395 return -ENOTSUP;
3396 }
3397
3398 /**
3399 * Length of a file in bytes. Return < 0 if error or unknown.
3400 */
3401 int64_t bdrv_getlength(BlockDriverState *bs)
3402 {
3403 BlockDriver *drv = bs->drv;
3404 if (!drv)
3405 return -ENOMEDIUM;
3406
3407 if (drv->has_variable_length) {
3408 int ret = refresh_total_sectors(bs, bs->total_sectors);
3409 if (ret < 0) {
3410 return ret;
3411 }
3412 }
3413 return bs->total_sectors * BDRV_SECTOR_SIZE;
3414 }
3415
3416 /* return 0 as number of sectors if no device present or error */
3417 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3418 {
3419 int64_t length;
3420 length = bdrv_getlength(bs);
3421 if (length < 0)
3422 length = 0;
3423 else
3424 length = length >> BDRV_SECTOR_BITS;
3425 *nb_sectors_ptr = length;
3426 }
3427
3428 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3429 BlockdevOnError on_write_error)
3430 {
3431 bs->on_read_error = on_read_error;
3432 bs->on_write_error = on_write_error;
3433 }
3434
3435 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3436 {
3437 return is_read ? bs->on_read_error : bs->on_write_error;
3438 }
3439
3440 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3441 {
3442 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3443
3444 switch (on_err) {
3445 case BLOCKDEV_ON_ERROR_ENOSPC:
3446 return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
3447 case BLOCKDEV_ON_ERROR_STOP:
3448 return BDRV_ACTION_STOP;
3449 case BLOCKDEV_ON_ERROR_REPORT:
3450 return BDRV_ACTION_REPORT;
3451 case BLOCKDEV_ON_ERROR_IGNORE:
3452 return BDRV_ACTION_IGNORE;
3453 default:
3454 abort();
3455 }
3456 }
3457
3458 /* This is done by device models because, while the block layer knows
3459 * about the error, it does not know whether an operation comes from
3460 * the device or the block layer (from a job, for example).
3461 */
3462 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3463 bool is_read, int error)
3464 {
3465 assert(error >= 0);
3466 bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
3467 if (action == BDRV_ACTION_STOP) {
3468 vm_stop(RUN_STATE_IO_ERROR);
3469 bdrv_iostatus_set_err(bs, error);
3470 }
3471 }
3472
3473 int bdrv_is_read_only(BlockDriverState *bs)
3474 {
3475 return bs->read_only;
3476 }
3477
3478 int bdrv_is_sg(BlockDriverState *bs)
3479 {
3480 return bs->sg;
3481 }
3482
3483 int bdrv_enable_write_cache(BlockDriverState *bs)
3484 {
3485 return bs->enable_write_cache;
3486 }
3487
3488 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3489 {
3490 bs->enable_write_cache = wce;
3491
3492 /* so a reopen() will preserve wce */
3493 if (wce) {
3494 bs->open_flags |= BDRV_O_CACHE_WB;
3495 } else {
3496 bs->open_flags &= ~BDRV_O_CACHE_WB;
3497 }
3498 }
3499
3500 int bdrv_is_encrypted(BlockDriverState *bs)
3501 {
3502 if (bs->backing_hd && bs->backing_hd->encrypted)
3503 return 1;
3504 return bs->encrypted;
3505 }
3506
3507 int bdrv_key_required(BlockDriverState *bs)
3508 {
3509 BlockDriverState *backing_hd = bs->backing_hd;
3510
3511 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3512 return 1;
3513 return (bs->encrypted && !bs->valid_key);
3514 }
3515
3516 int bdrv_set_key(BlockDriverState *bs, const char *key)
3517 {
3518 int ret;
3519 if (bs->backing_hd && bs->backing_hd->encrypted) {
3520 ret = bdrv_set_key(bs->backing_hd, key);
3521 if (ret < 0)
3522 return ret;
3523 if (!bs->encrypted)
3524 return 0;
3525 }
3526 if (!bs->encrypted) {
3527 return -EINVAL;
3528 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3529 return -ENOMEDIUM;
3530 }
3531 ret = bs->drv->bdrv_set_key(bs, key);
3532 if (ret < 0) {
3533 bs->valid_key = 0;
3534 } else if (!bs->valid_key) {
3535 bs->valid_key = 1;
3536 /* call the change callback now, we skipped it on open */
3537 bdrv_dev_change_media_cb(bs, true);
3538 }
3539 return ret;
3540 }
3541
3542 const char *bdrv_get_format_name(BlockDriverState *bs)
3543 {
3544 return bs->drv ? bs->drv->format_name : NULL;
3545 }
3546
3547 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3548 void *opaque)
3549 {
3550 BlockDriver *drv;
3551
3552 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3553 it(opaque, drv->format_name);
3554 }
3555 }
3556
3557 /* This function is to find block backend bs */
3558 BlockDriverState *bdrv_find(const char *name)
3559 {
3560 BlockDriverState *bs;
3561
3562 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3563 if (!strcmp(name, bs->device_name)) {
3564 return bs;
3565 }
3566 }
3567 return NULL;
3568 }
3569
3570 /* This function is to find a node in the bs graph */
3571 BlockDriverState *bdrv_find_node(const char *node_name)
3572 {
3573 BlockDriverState *bs;
3574
3575 assert(node_name);
3576
3577 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3578 if (!strcmp(node_name, bs->node_name)) {
3579 return bs;
3580 }
3581 }
3582 return NULL;
3583 }
3584
3585 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3586 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3587 {
3588 BlockDeviceInfoList *list, *entry;
3589 BlockDriverState *bs;
3590
3591 list = NULL;
3592 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3593 entry = g_malloc0(sizeof(*entry));
3594 entry->value = bdrv_block_device_info(bs);
3595 entry->next = list;
3596 list = entry;
3597 }
3598
3599 return list;
3600 }
3601
3602 BlockDriverState *bdrv_lookup_bs(const char *device,
3603 const char *node_name,
3604 Error **errp)
3605 {
3606 BlockDriverState *bs = NULL;
3607
3608 if (device) {
3609 bs = bdrv_find(device);
3610
3611 if (bs) {
3612 return bs;
3613 }
3614 }
3615
3616 if (node_name) {
3617 bs = bdrv_find_node(node_name);
3618
3619 if (bs) {
3620 return bs;
3621 }
3622 }
3623
3624 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3625 device ? device : "",
3626 node_name ? node_name : "");
3627 return NULL;
3628 }
3629
3630 BlockDriverState *bdrv_next(BlockDriverState *bs)
3631 {
3632 if (!bs) {
3633 return QTAILQ_FIRST(&bdrv_states);
3634 }
3635 return QTAILQ_NEXT(bs, device_list);
3636 }
3637
3638 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3639 {
3640 BlockDriverState *bs;
3641
3642 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3643 it(opaque, bs);
3644 }
3645 }
3646
3647 const char *bdrv_get_device_name(BlockDriverState *bs)
3648 {
3649 return bs->device_name;
3650 }
3651
3652 int bdrv_get_flags(BlockDriverState *bs)
3653 {
3654 return bs->open_flags;
3655 }
3656
3657 int bdrv_flush_all(void)
3658 {
3659 BlockDriverState *bs;
3660 int result = 0;
3661
3662 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3663 int ret = bdrv_flush(bs);
3664 if (ret < 0 && !result) {
3665 result = ret;
3666 }
3667 }
3668
3669 return result;
3670 }
3671
3672 int bdrv_has_zero_init_1(BlockDriverState *bs)
3673 {
3674 return 1;
3675 }
3676
3677 int bdrv_has_zero_init(BlockDriverState *bs)
3678 {
3679 assert(bs->drv);
3680
3681 /* If BS is a copy on write image, it is initialized to
3682 the contents of the base image, which may not be zeroes. */
3683 if (bs->backing_hd) {
3684 return 0;
3685 }
3686 if (bs->drv->bdrv_has_zero_init) {
3687 return bs->drv->bdrv_has_zero_init(bs);
3688 }
3689
3690 /* safe default */
3691 return 0;
3692 }
3693
3694 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3695 {
3696 BlockDriverInfo bdi;
3697
3698 if (bs->backing_hd) {
3699 return false;
3700 }
3701
3702 if (bdrv_get_info(bs, &bdi) == 0) {
3703 return bdi.unallocated_blocks_are_zero;
3704 }
3705
3706 return false;
3707 }
3708
3709 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3710 {
3711 BlockDriverInfo bdi;
3712
3713 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3714 return false;
3715 }
3716
3717 if (bdrv_get_info(bs, &bdi) == 0) {
3718 return bdi.can_write_zeroes_with_unmap;
3719 }
3720
3721 return false;
3722 }
3723
3724 typedef struct BdrvCoGetBlockStatusData {
3725 BlockDriverState *bs;
3726 BlockDriverState *base;
3727 int64_t sector_num;
3728 int nb_sectors;
3729 int *pnum;
3730 int64_t ret;
3731 bool done;
3732 } BdrvCoGetBlockStatusData;
3733
3734 /*
3735 * Returns true iff the specified sector is present in the disk image. Drivers
3736 * not implementing the functionality are assumed to not support backing files,
3737 * hence all their sectors are reported as allocated.
3738 *
3739 * If 'sector_num' is beyond the end of the disk image the return value is 0
3740 * and 'pnum' is set to 0.
3741 *
3742 * 'pnum' is set to the number of sectors (including and immediately following
3743 * the specified sector) that are known to be in the same
3744 * allocated/unallocated state.
3745 *
3746 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3747 * beyond the end of the disk image it will be clamped.
3748 */
3749 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3750 int64_t sector_num,
3751 int nb_sectors, int *pnum)
3752 {
3753 int64_t length;
3754 int64_t n;
3755 int64_t ret, ret2;
3756
3757 length = bdrv_getlength(bs);
3758 if (length < 0) {
3759 return length;
3760 }
3761
3762 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
3763 *pnum = 0;
3764 return 0;
3765 }
3766
3767 n = bs->total_sectors - sector_num;
3768 if (n < nb_sectors) {
3769 nb_sectors = n;
3770 }
3771
3772 if (!bs->drv->bdrv_co_get_block_status) {
3773 *pnum = nb_sectors;
3774 ret = BDRV_BLOCK_DATA;
3775 if (bs->drv->protocol_name) {
3776 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3777 }
3778 return ret;
3779 }
3780
3781 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3782 if (ret < 0) {
3783 *pnum = 0;
3784 return ret;
3785 }
3786
3787 if (ret & BDRV_BLOCK_RAW) {
3788 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3789 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3790 *pnum, pnum);
3791 }
3792
3793 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3794 if (bdrv_unallocated_blocks_are_zero(bs)) {
3795 ret |= BDRV_BLOCK_ZERO;
3796 } else if (bs->backing_hd) {
3797 BlockDriverState *bs2 = bs->backing_hd;
3798 int64_t length2 = bdrv_getlength(bs2);
3799 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3800 ret |= BDRV_BLOCK_ZERO;
3801 }
3802 }
3803 }
3804
3805 if (bs->file &&
3806 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3807 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3808 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3809 *pnum, pnum);
3810 if (ret2 >= 0) {
3811 /* Ignore errors. This is just providing extra information, it
3812 * is useful but not necessary.
3813 */
3814 ret |= (ret2 & BDRV_BLOCK_ZERO);
3815 }
3816 }
3817
3818 return ret;
3819 }
3820
3821 /* Coroutine wrapper for bdrv_get_block_status() */
3822 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
3823 {
3824 BdrvCoGetBlockStatusData *data = opaque;
3825 BlockDriverState *bs = data->bs;
3826
3827 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
3828 data->pnum);
3829 data->done = true;
3830 }
3831
3832 /*
3833 * Synchronous wrapper around bdrv_co_get_block_status().
3834 *
3835 * See bdrv_co_get_block_status() for details.
3836 */
3837 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
3838 int nb_sectors, int *pnum)
3839 {
3840 Coroutine *co;
3841 BdrvCoGetBlockStatusData data = {
3842 .bs = bs,
3843 .sector_num = sector_num,
3844 .nb_sectors = nb_sectors,
3845 .pnum = pnum,
3846 .done = false,
3847 };
3848
3849 if (qemu_in_coroutine()) {
3850 /* Fast-path if already in coroutine context */
3851 bdrv_get_block_status_co_entry(&data);
3852 } else {
3853 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
3854 qemu_coroutine_enter(co, &data);
3855 while (!data.done) {
3856 qemu_aio_wait();
3857 }
3858 }
3859 return data.ret;
3860 }
3861
3862 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
3863 int nb_sectors, int *pnum)
3864 {
3865 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
3866 if (ret < 0) {
3867 return ret;
3868 }
3869 return
3870 (ret & BDRV_BLOCK_DATA) ||
3871 ((ret & BDRV_BLOCK_ZERO) && !bdrv_has_zero_init(bs));
3872 }
3873
3874 /*
3875 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
3876 *
3877 * Return true if the given sector is allocated in any image between
3878 * BASE and TOP (inclusive). BASE can be NULL to check if the given
3879 * sector is allocated in any image of the chain. Return false otherwise.
3880 *
3881 * 'pnum' is set to the number of sectors (including and immediately following
3882 * the specified sector) that are known to be in the same
3883 * allocated/unallocated state.
3884 *
3885 */
3886 int bdrv_is_allocated_above(BlockDriverState *top,
3887 BlockDriverState *base,
3888 int64_t sector_num,
3889 int nb_sectors, int *pnum)
3890 {
3891 BlockDriverState *intermediate;
3892 int ret, n = nb_sectors;
3893
3894 intermediate = top;
3895 while (intermediate && intermediate != base) {
3896 int pnum_inter;
3897 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
3898 &pnum_inter);
3899 if (ret < 0) {
3900 return ret;
3901 } else if (ret) {
3902 *pnum = pnum_inter;
3903 return 1;
3904 }
3905
3906 /*
3907 * [sector_num, nb_sectors] is unallocated on top but intermediate
3908 * might have
3909 *
3910 * [sector_num+x, nr_sectors] allocated.
3911 */
3912 if (n > pnum_inter &&
3913 (intermediate == top ||
3914 sector_num + pnum_inter < intermediate->total_sectors)) {
3915 n = pnum_inter;
3916 }
3917
3918 intermediate = intermediate->backing_hd;
3919 }
3920
3921 *pnum = n;
3922 return 0;
3923 }
3924
3925 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
3926 {
3927 if (bs->backing_hd && bs->backing_hd->encrypted)
3928 return bs->backing_file;
3929 else if (bs->encrypted)
3930 return bs->filename;
3931 else
3932 return NULL;
3933 }
3934
3935 void bdrv_get_backing_filename(BlockDriverState *bs,
3936 char *filename, int filename_size)
3937 {
3938 pstrcpy(filename, filename_size, bs->backing_file);
3939 }
3940
3941 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
3942 const uint8_t *buf, int nb_sectors)
3943 {
3944 BlockDriver *drv = bs->drv;
3945 if (!drv)
3946 return -ENOMEDIUM;
3947 if (!drv->bdrv_write_compressed)
3948 return -ENOTSUP;
3949 if (bdrv_check_request(bs, sector_num, nb_sectors))
3950 return -EIO;
3951
3952 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
3953
3954 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
3955 }
3956
3957 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
3958 {
3959 BlockDriver *drv = bs->drv;
3960 if (!drv)
3961 return -ENOMEDIUM;
3962 if (!drv->bdrv_get_info)
3963 return -ENOTSUP;
3964 memset(bdi, 0, sizeof(*bdi));
3965 return drv->bdrv_get_info(bs, bdi);
3966 }
3967
3968 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
3969 {
3970 BlockDriver *drv = bs->drv;
3971 if (drv && drv->bdrv_get_specific_info) {
3972 return drv->bdrv_get_specific_info(bs);
3973 }
3974 return NULL;
3975 }
3976
3977 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
3978 int64_t pos, int size)
3979 {
3980 QEMUIOVector qiov;
3981 struct iovec iov = {
3982 .iov_base = (void *) buf,
3983 .iov_len = size,
3984 };
3985
3986 qemu_iovec_init_external(&qiov, &iov, 1);
3987 return bdrv_writev_vmstate(bs, &qiov, pos);
3988 }
3989
3990 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
3991 {
3992 BlockDriver *drv = bs->drv;
3993
3994 if (!drv) {
3995 return -ENOMEDIUM;
3996 } else if (drv->bdrv_save_vmstate) {
3997 return drv->bdrv_save_vmstate(bs, qiov, pos);
3998 } else if (bs->file) {
3999 return bdrv_writev_vmstate(bs->file, qiov, pos);
4000 }
4001
4002 return -ENOTSUP;
4003 }
4004
4005 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4006 int64_t pos, int size)
4007 {
4008 BlockDriver *drv = bs->drv;
4009 if (!drv)
4010 return -ENOMEDIUM;
4011 if (drv->bdrv_load_vmstate)
4012 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4013 if (bs->file)
4014 return bdrv_load_vmstate(bs->file, buf, pos, size);
4015 return -ENOTSUP;
4016 }
4017
4018 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4019 {
4020 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4021 return;
4022 }
4023
4024 bs->drv->bdrv_debug_event(bs, event);
4025 }
4026
4027 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4028 const char *tag)
4029 {
4030 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4031 bs = bs->file;
4032 }
4033
4034 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4035 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4036 }
4037
4038 return -ENOTSUP;
4039 }
4040
4041 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4042 {
4043 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4044 bs = bs->file;
4045 }
4046
4047 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4048 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4049 }
4050
4051 return -ENOTSUP;
4052 }
4053
4054 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4055 {
4056 while (bs && bs->drv && !bs->drv->bdrv_debug_resume) {
4057 bs = bs->file;
4058 }
4059
4060 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4061 return bs->drv->bdrv_debug_resume(bs, tag);
4062 }
4063
4064 return -ENOTSUP;
4065 }
4066
4067 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4068 {
4069 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4070 bs = bs->file;
4071 }
4072
4073 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4074 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4075 }
4076
4077 return false;
4078 }
4079
4080 int bdrv_is_snapshot(BlockDriverState *bs)
4081 {
4082 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4083 }
4084
4085 /* backing_file can either be relative, or absolute, or a protocol. If it is
4086 * relative, it must be relative to the chain. So, passing in bs->filename
4087 * from a BDS as backing_file should not be done, as that may be relative to
4088 * the CWD rather than the chain. */
4089 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4090 const char *backing_file)
4091 {
4092 char *filename_full = NULL;
4093 char *backing_file_full = NULL;
4094 char *filename_tmp = NULL;
4095 int is_protocol = 0;
4096 BlockDriverState *curr_bs = NULL;
4097 BlockDriverState *retval = NULL;
4098
4099 if (!bs || !bs->drv || !backing_file) {
4100 return NULL;
4101 }
4102
4103 filename_full = g_malloc(PATH_MAX);
4104 backing_file_full = g_malloc(PATH_MAX);
4105 filename_tmp = g_malloc(PATH_MAX);
4106
4107 is_protocol = path_has_protocol(backing_file);
4108
4109 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4110
4111 /* If either of the filename paths is actually a protocol, then
4112 * compare unmodified paths; otherwise make paths relative */
4113 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4114 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4115 retval = curr_bs->backing_hd;
4116 break;
4117 }
4118 } else {
4119 /* If not an absolute filename path, make it relative to the current
4120 * image's filename path */
4121 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4122 backing_file);
4123
4124 /* We are going to compare absolute pathnames */
4125 if (!realpath(filename_tmp, filename_full)) {
4126 continue;
4127 }
4128
4129 /* We need to make sure the backing filename we are comparing against
4130 * is relative to the current image filename (or absolute) */
4131 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4132 curr_bs->backing_file);
4133
4134 if (!realpath(filename_tmp, backing_file_full)) {
4135 continue;
4136 }
4137
4138 if (strcmp(backing_file_full, filename_full) == 0) {
4139 retval = curr_bs->backing_hd;
4140 break;
4141 }
4142 }
4143 }
4144
4145 g_free(filename_full);
4146 g_free(backing_file_full);
4147 g_free(filename_tmp);
4148 return retval;
4149 }
4150
4151 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4152 {
4153 if (!bs->drv) {
4154 return 0;
4155 }
4156
4157 if (!bs->backing_hd) {
4158 return 0;
4159 }
4160
4161 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4162 }
4163
4164 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
4165 {
4166 BlockDriverState *curr_bs = NULL;
4167
4168 if (!bs) {
4169 return NULL;
4170 }
4171
4172 curr_bs = bs;
4173
4174 while (curr_bs->backing_hd) {
4175 curr_bs = curr_bs->backing_hd;
4176 }
4177 return curr_bs;
4178 }
4179
4180 /**************************************************************/
4181 /* async I/Os */
4182
4183 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4184 QEMUIOVector *qiov, int nb_sectors,
4185 BlockDriverCompletionFunc *cb, void *opaque)
4186 {
4187 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4188
4189 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4190 cb, opaque, false);
4191 }
4192
4193 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4194 QEMUIOVector *qiov, int nb_sectors,
4195 BlockDriverCompletionFunc *cb, void *opaque)
4196 {
4197 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4198
4199 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4200 cb, opaque, true);
4201 }
4202
4203 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4204 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4205 BlockDriverCompletionFunc *cb, void *opaque)
4206 {
4207 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4208
4209 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4210 BDRV_REQ_ZERO_WRITE | flags,
4211 cb, opaque, true);
4212 }
4213
4214
4215 typedef struct MultiwriteCB {
4216 int error;
4217 int num_requests;
4218 int num_callbacks;
4219 struct {
4220 BlockDriverCompletionFunc *cb;
4221 void *opaque;
4222 QEMUIOVector *free_qiov;
4223 } callbacks[];
4224 } MultiwriteCB;
4225
4226 static void multiwrite_user_cb(MultiwriteCB *mcb)
4227 {
4228 int i;
4229
4230 for (i = 0; i < mcb->num_callbacks; i++) {
4231 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4232 if (mcb->callbacks[i].free_qiov) {
4233 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4234 }
4235 g_free(mcb->callbacks[i].free_qiov);
4236 }
4237 }
4238
4239 static void multiwrite_cb(void *opaque, int ret)
4240 {
4241 MultiwriteCB *mcb = opaque;
4242
4243 trace_multiwrite_cb(mcb, ret);
4244
4245 if (ret < 0 && !mcb->error) {
4246 mcb->error = ret;
4247 }
4248
4249 mcb->num_requests--;
4250 if (mcb->num_requests == 0) {
4251 multiwrite_user_cb(mcb);
4252 g_free(mcb);
4253 }
4254 }
4255
4256 static int multiwrite_req_compare(const void *a, const void *b)
4257 {
4258 const BlockRequest *req1 = a, *req2 = b;
4259
4260 /*
4261 * Note that we can't simply subtract req2->sector from req1->sector
4262 * here as that could overflow the return value.
4263 */
4264 if (req1->sector > req2->sector) {
4265 return 1;
4266 } else if (req1->sector < req2->sector) {
4267 return -1;
4268 } else {
4269 return 0;
4270 }
4271 }
4272
4273 /*
4274 * Takes a bunch of requests and tries to merge them. Returns the number of
4275 * requests that remain after merging.
4276 */
4277 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4278 int num_reqs, MultiwriteCB *mcb)
4279 {
4280 int i, outidx;
4281
4282 // Sort requests by start sector
4283 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4284
4285 // Check if adjacent requests touch the same clusters. If so, combine them,
4286 // filling up gaps with zero sectors.
4287 outidx = 0;
4288 for (i = 1; i < num_reqs; i++) {
4289 int merge = 0;
4290 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4291
4292 // Handle exactly sequential writes and overlapping writes.
4293 if (reqs[i].sector <= oldreq_last) {
4294 merge = 1;
4295 }
4296
4297 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4298 merge = 0;
4299 }
4300
4301 if (merge) {
4302 size_t size;
4303 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4304 qemu_iovec_init(qiov,
4305 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4306
4307 // Add the first request to the merged one. If the requests are
4308 // overlapping, drop the last sectors of the first request.
4309 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4310 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4311
4312 // We should need to add any zeros between the two requests
4313 assert (reqs[i].sector <= oldreq_last);
4314
4315 // Add the second request
4316 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4317
4318 reqs[outidx].nb_sectors = qiov->size >> 9;
4319 reqs[outidx].qiov = qiov;
4320
4321 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4322 } else {
4323 outidx++;
4324 reqs[outidx].sector = reqs[i].sector;
4325 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4326 reqs[outidx].qiov = reqs[i].qiov;
4327 }
4328 }
4329
4330 return outidx + 1;
4331 }
4332
4333 /*
4334 * Submit multiple AIO write requests at once.
4335 *
4336 * On success, the function returns 0 and all requests in the reqs array have
4337 * been submitted. In error case this function returns -1, and any of the
4338 * requests may or may not be submitted yet. In particular, this means that the
4339 * callback will be called for some of the requests, for others it won't. The
4340 * caller must check the error field of the BlockRequest to wait for the right
4341 * callbacks (if error != 0, no callback will be called).
4342 *
4343 * The implementation may modify the contents of the reqs array, e.g. to merge
4344 * requests. However, the fields opaque and error are left unmodified as they
4345 * are used to signal failure for a single request to the caller.
4346 */
4347 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4348 {
4349 MultiwriteCB *mcb;
4350 int i;
4351
4352 /* don't submit writes if we don't have a medium */
4353 if (bs->drv == NULL) {
4354 for (i = 0; i < num_reqs; i++) {
4355 reqs[i].error = -ENOMEDIUM;
4356 }
4357 return -1;
4358 }
4359
4360 if (num_reqs == 0) {
4361 return 0;
4362 }
4363
4364 // Create MultiwriteCB structure
4365 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4366 mcb->num_requests = 0;
4367 mcb->num_callbacks = num_reqs;
4368
4369 for (i = 0; i < num_reqs; i++) {
4370 mcb->callbacks[i].cb = reqs[i].cb;
4371 mcb->callbacks[i].opaque = reqs[i].opaque;
4372 }
4373
4374 // Check for mergable requests
4375 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4376
4377 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4378
4379 /* Run the aio requests. */
4380 mcb->num_requests = num_reqs;
4381 for (i = 0; i < num_reqs; i++) {
4382 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4383 reqs[i].nb_sectors, reqs[i].flags,
4384 multiwrite_cb, mcb,
4385 true);
4386 }
4387
4388 return 0;
4389 }
4390
4391 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
4392 {
4393 acb->aiocb_info->cancel(acb);
4394 }
4395
4396 /**************************************************************/
4397 /* async block device emulation */
4398
4399 typedef struct BlockDriverAIOCBSync {
4400 BlockDriverAIOCB common;
4401 QEMUBH *bh;
4402 int ret;
4403 /* vector translation state */
4404 QEMUIOVector *qiov;
4405 uint8_t *bounce;
4406 int is_write;
4407 } BlockDriverAIOCBSync;
4408
4409 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4410 {
4411 BlockDriverAIOCBSync *acb =
4412 container_of(blockacb, BlockDriverAIOCBSync, common);
4413 qemu_bh_delete(acb->bh);
4414 acb->bh = NULL;
4415 qemu_aio_release(acb);
4416 }
4417
4418 static const AIOCBInfo bdrv_em_aiocb_info = {
4419 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4420 .cancel = bdrv_aio_cancel_em,
4421 };
4422
4423 static void bdrv_aio_bh_cb(void *opaque)
4424 {
4425 BlockDriverAIOCBSync *acb = opaque;
4426
4427 if (!acb->is_write)
4428 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4429 qemu_vfree(acb->bounce);
4430 acb->common.cb(acb->common.opaque, acb->ret);
4431 qemu_bh_delete(acb->bh);
4432 acb->bh = NULL;
4433 qemu_aio_release(acb);
4434 }
4435
4436 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4437 int64_t sector_num,
4438 QEMUIOVector *qiov,
4439 int nb_sectors,
4440 BlockDriverCompletionFunc *cb,
4441 void *opaque,
4442 int is_write)
4443
4444 {
4445 BlockDriverAIOCBSync *acb;
4446
4447 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4448 acb->is_write = is_write;
4449 acb->qiov = qiov;
4450 acb->bounce = qemu_blockalign(bs, qiov->size);
4451 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
4452
4453 if (is_write) {
4454 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4455 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4456 } else {
4457 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4458 }
4459
4460 qemu_bh_schedule(acb->bh);
4461
4462 return &acb->common;
4463 }
4464
4465 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4466 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4467 BlockDriverCompletionFunc *cb, void *opaque)
4468 {
4469 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4470 }
4471
4472 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4473 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4474 BlockDriverCompletionFunc *cb, void *opaque)
4475 {
4476 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4477 }
4478
4479
4480 typedef struct BlockDriverAIOCBCoroutine {
4481 BlockDriverAIOCB common;
4482 BlockRequest req;
4483 bool is_write;
4484 bool *done;
4485 QEMUBH* bh;
4486 } BlockDriverAIOCBCoroutine;
4487
4488 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4489 {
4490 BlockDriverAIOCBCoroutine *acb =
4491 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4492 bool done = false;
4493
4494 acb->done = &done;
4495 while (!done) {
4496 qemu_aio_wait();
4497 }
4498 }
4499
4500 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4501 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4502 .cancel = bdrv_aio_co_cancel_em,
4503 };
4504
4505 static void bdrv_co_em_bh(void *opaque)
4506 {
4507 BlockDriverAIOCBCoroutine *acb = opaque;
4508
4509 acb->common.cb(acb->common.opaque, acb->req.error);
4510
4511 if (acb->done) {
4512 *acb->done = true;
4513 }
4514
4515 qemu_bh_delete(acb->bh);
4516 qemu_aio_release(acb);
4517 }
4518
4519 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4520 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4521 {
4522 BlockDriverAIOCBCoroutine *acb = opaque;
4523 BlockDriverState *bs = acb->common.bs;
4524
4525 if (!acb->is_write) {
4526 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4527 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4528 } else {
4529 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4530 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4531 }
4532
4533 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
4534 qemu_bh_schedule(acb->bh);
4535 }
4536
4537 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4538 int64_t sector_num,
4539 QEMUIOVector *qiov,
4540 int nb_sectors,
4541 BdrvRequestFlags flags,
4542 BlockDriverCompletionFunc *cb,
4543 void *opaque,
4544 bool is_write)
4545 {
4546 Coroutine *co;
4547 BlockDriverAIOCBCoroutine *acb;
4548
4549 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4550 acb->req.sector = sector_num;
4551 acb->req.nb_sectors = nb_sectors;
4552 acb->req.qiov = qiov;
4553 acb->req.flags = flags;
4554 acb->is_write = is_write;
4555 acb->done = NULL;
4556
4557 co = qemu_coroutine_create(bdrv_co_do_rw);
4558 qemu_coroutine_enter(co, acb);
4559
4560 return &acb->common;
4561 }
4562
4563 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4564 {
4565 BlockDriverAIOCBCoroutine *acb = opaque;
4566 BlockDriverState *bs = acb->common.bs;
4567
4568 acb->req.error = bdrv_co_flush(bs);
4569 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
4570 qemu_bh_schedule(acb->bh);
4571 }
4572
4573 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4574 BlockDriverCompletionFunc *cb, void *opaque)
4575 {
4576 trace_bdrv_aio_flush(bs, opaque);
4577
4578 Coroutine *co;
4579 BlockDriverAIOCBCoroutine *acb;
4580
4581 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4582 acb->done = NULL;
4583
4584 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4585 qemu_coroutine_enter(co, acb);
4586
4587 return &acb->common;
4588 }
4589
4590 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4591 {
4592 BlockDriverAIOCBCoroutine *acb = opaque;
4593 BlockDriverState *bs = acb->common.bs;
4594
4595 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4596 acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
4597 qemu_bh_schedule(acb->bh);
4598 }
4599
4600 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4601 int64_t sector_num, int nb_sectors,
4602 BlockDriverCompletionFunc *cb, void *opaque)
4603 {
4604 Coroutine *co;
4605 BlockDriverAIOCBCoroutine *acb;
4606
4607 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4608
4609 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4610 acb->req.sector = sector_num;
4611 acb->req.nb_sectors = nb_sectors;
4612 acb->done = NULL;
4613 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4614 qemu_coroutine_enter(co, acb);
4615
4616 return &acb->common;
4617 }
4618
4619 void bdrv_init(void)
4620 {
4621 module_call_init(MODULE_INIT_BLOCK);
4622 }
4623
4624 void bdrv_init_with_whitelist(void)
4625 {
4626 use_bdrv_whitelist = 1;
4627 bdrv_init();
4628 }
4629
4630 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4631 BlockDriverCompletionFunc *cb, void *opaque)
4632 {
4633 BlockDriverAIOCB *acb;
4634
4635 acb = g_slice_alloc(aiocb_info->aiocb_size);
4636 acb->aiocb_info = aiocb_info;
4637 acb->bs = bs;
4638 acb->cb = cb;
4639 acb->opaque = opaque;
4640 return acb;
4641 }
4642
4643 void qemu_aio_release(void *p)
4644 {
4645 BlockDriverAIOCB *acb = p;
4646 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4647 }
4648
4649 /**************************************************************/
4650 /* Coroutine block device emulation */
4651
4652 typedef struct CoroutineIOCompletion {
4653 Coroutine *coroutine;
4654 int ret;
4655 } CoroutineIOCompletion;
4656
4657 static void bdrv_co_io_em_complete(void *opaque, int ret)
4658 {
4659 CoroutineIOCompletion *co = opaque;
4660
4661 co->ret = ret;
4662 qemu_coroutine_enter(co->coroutine, NULL);
4663 }
4664
4665 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4666 int nb_sectors, QEMUIOVector *iov,
4667 bool is_write)
4668 {
4669 CoroutineIOCompletion co = {
4670 .coroutine = qemu_coroutine_self(),
4671 };
4672 BlockDriverAIOCB *acb;
4673
4674 if (is_write) {
4675 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4676 bdrv_co_io_em_complete, &co);
4677 } else {
4678 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4679 bdrv_co_io_em_complete, &co);
4680 }
4681
4682 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4683 if (!acb) {
4684 return -EIO;
4685 }
4686 qemu_coroutine_yield();
4687
4688 return co.ret;
4689 }
4690
4691 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4692 int64_t sector_num, int nb_sectors,
4693 QEMUIOVector *iov)
4694 {
4695 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4696 }
4697
4698 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4699 int64_t sector_num, int nb_sectors,
4700 QEMUIOVector *iov)
4701 {
4702 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4703 }
4704
4705 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4706 {
4707 RwCo *rwco = opaque;
4708
4709 rwco->ret = bdrv_co_flush(rwco->bs);
4710 }
4711
4712 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4713 {
4714 int ret;
4715
4716 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4717 return 0;
4718 }
4719
4720 /* Write back cached data to the OS even with cache=unsafe */
4721 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4722 if (bs->drv->bdrv_co_flush_to_os) {
4723 ret = bs->drv->bdrv_co_flush_to_os(bs);
4724 if (ret < 0) {
4725 return ret;
4726 }
4727 }
4728
4729 /* But don't actually force it to the disk with cache=unsafe */
4730 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4731 goto flush_parent;
4732 }
4733
4734 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4735 if (bs->drv->bdrv_co_flush_to_disk) {
4736 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4737 } else if (bs->drv->bdrv_aio_flush) {
4738 BlockDriverAIOCB *acb;
4739 CoroutineIOCompletion co = {
4740 .coroutine = qemu_coroutine_self(),
4741 };
4742
4743 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4744 if (acb == NULL) {
4745 ret = -EIO;
4746 } else {
4747 qemu_coroutine_yield();
4748 ret = co.ret;
4749 }
4750 } else {
4751 /*
4752 * Some block drivers always operate in either writethrough or unsafe
4753 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4754 * know how the server works (because the behaviour is hardcoded or
4755 * depends on server-side configuration), so we can't ensure that
4756 * everything is safe on disk. Returning an error doesn't work because
4757 * that would break guests even if the server operates in writethrough
4758 * mode.
4759 *
4760 * Let's hope the user knows what he's doing.
4761 */
4762 ret = 0;
4763 }
4764 if (ret < 0) {
4765 return ret;
4766 }
4767
4768 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4769 * in the case of cache=unsafe, so there are no useless flushes.
4770 */
4771 flush_parent:
4772 return bdrv_co_flush(bs->file);
4773 }
4774
4775 void bdrv_invalidate_cache(BlockDriverState *bs)
4776 {
4777 if (bs->drv && bs->drv->bdrv_invalidate_cache) {
4778 bs->drv->bdrv_invalidate_cache(bs);
4779 }
4780 }
4781
4782 void bdrv_invalidate_cache_all(void)
4783 {
4784 BlockDriverState *bs;
4785
4786 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4787 bdrv_invalidate_cache(bs);
4788 }
4789 }
4790
4791 void bdrv_clear_incoming_migration_all(void)
4792 {
4793 BlockDriverState *bs;
4794
4795 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4796 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
4797 }
4798 }
4799
4800 int bdrv_flush(BlockDriverState *bs)
4801 {
4802 Coroutine *co;
4803 RwCo rwco = {
4804 .bs = bs,
4805 .ret = NOT_DONE,
4806 };
4807
4808 if (qemu_in_coroutine()) {
4809 /* Fast-path if already in coroutine context */
4810 bdrv_flush_co_entry(&rwco);
4811 } else {
4812 co = qemu_coroutine_create(bdrv_flush_co_entry);
4813 qemu_coroutine_enter(co, &rwco);
4814 while (rwco.ret == NOT_DONE) {
4815 qemu_aio_wait();
4816 }
4817 }
4818
4819 return rwco.ret;
4820 }
4821
4822 typedef struct DiscardCo {
4823 BlockDriverState *bs;
4824 int64_t sector_num;
4825 int nb_sectors;
4826 int ret;
4827 } DiscardCo;
4828 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
4829 {
4830 DiscardCo *rwco = opaque;
4831
4832 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
4833 }
4834
4835 /* if no limit is specified in the BlockLimits use a default
4836 * of 32768 512-byte sectors (16 MiB) per request.
4837 */
4838 #define MAX_DISCARD_DEFAULT 32768
4839
4840 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
4841 int nb_sectors)
4842 {
4843 int max_discard;
4844
4845 if (!bs->drv) {
4846 return -ENOMEDIUM;
4847 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
4848 return -EIO;
4849 } else if (bs->read_only) {
4850 return -EROFS;
4851 }
4852
4853 bdrv_reset_dirty(bs, sector_num, nb_sectors);
4854
4855 /* Do nothing if disabled. */
4856 if (!(bs->open_flags & BDRV_O_UNMAP)) {
4857 return 0;
4858 }
4859
4860 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
4861 return 0;
4862 }
4863
4864 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
4865 while (nb_sectors > 0) {
4866 int ret;
4867 int num = nb_sectors;
4868
4869 /* align request */
4870 if (bs->bl.discard_alignment &&
4871 num >= bs->bl.discard_alignment &&
4872 sector_num % bs->bl.discard_alignment) {
4873 if (num > bs->bl.discard_alignment) {
4874 num = bs->bl.discard_alignment;
4875 }
4876 num -= sector_num % bs->bl.discard_alignment;
4877 }
4878
4879 /* limit request size */
4880 if (num > max_discard) {
4881 num = max_discard;
4882 }
4883
4884 if (bs->drv->bdrv_co_discard) {
4885 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
4886 } else {
4887 BlockDriverAIOCB *acb;
4888 CoroutineIOCompletion co = {
4889 .coroutine = qemu_coroutine_self(),
4890 };
4891
4892 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
4893 bdrv_co_io_em_complete, &co);
4894 if (acb == NULL) {
4895 return -EIO;
4896 } else {
4897 qemu_coroutine_yield();
4898 ret = co.ret;
4899 }
4900 }
4901 if (ret && ret != -ENOTSUP) {
4902 return ret;
4903 }
4904
4905 sector_num += num;
4906 nb_sectors -= num;
4907 }
4908 return 0;
4909 }
4910
4911 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
4912 {
4913 Coroutine *co;
4914 DiscardCo rwco = {
4915 .bs = bs,
4916 .sector_num = sector_num,
4917 .nb_sectors = nb_sectors,
4918 .ret = NOT_DONE,
4919 };
4920
4921 if (qemu_in_coroutine()) {
4922 /* Fast-path if already in coroutine context */
4923 bdrv_discard_co_entry(&rwco);
4924 } else {
4925 co = qemu_coroutine_create(bdrv_discard_co_entry);
4926 qemu_coroutine_enter(co, &rwco);
4927 while (rwco.ret == NOT_DONE) {
4928 qemu_aio_wait();
4929 }
4930 }
4931
4932 return rwco.ret;
4933 }
4934
4935 /**************************************************************/
4936 /* removable device support */
4937
4938 /**
4939 * Return TRUE if the media is present
4940 */
4941 int bdrv_is_inserted(BlockDriverState *bs)
4942 {
4943 BlockDriver *drv = bs->drv;
4944
4945 if (!drv)
4946 return 0;
4947 if (!drv->bdrv_is_inserted)
4948 return 1;
4949 return drv->bdrv_is_inserted(bs);
4950 }
4951
4952 /**
4953 * Return whether the media changed since the last call to this
4954 * function, or -ENOTSUP if we don't know. Most drivers don't know.
4955 */
4956 int bdrv_media_changed(BlockDriverState *bs)
4957 {
4958 BlockDriver *drv = bs->drv;
4959
4960 if (drv && drv->bdrv_media_changed) {
4961 return drv->bdrv_media_changed(bs);
4962 }
4963 return -ENOTSUP;
4964 }
4965
4966 /**
4967 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
4968 */
4969 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
4970 {
4971 BlockDriver *drv = bs->drv;
4972
4973 if (drv && drv->bdrv_eject) {
4974 drv->bdrv_eject(bs, eject_flag);
4975 }
4976
4977 if (bs->device_name[0] != '\0') {
4978 bdrv_emit_qmp_eject_event(bs, eject_flag);
4979 }
4980 }
4981
4982 /**
4983 * Lock or unlock the media (if it is locked, the user won't be able
4984 * to eject it manually).
4985 */
4986 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
4987 {
4988 BlockDriver *drv = bs->drv;
4989
4990 trace_bdrv_lock_medium(bs, locked);
4991
4992 if (drv && drv->bdrv_lock_medium) {
4993 drv->bdrv_lock_medium(bs, locked);
4994 }
4995 }
4996
4997 /* needed for generic scsi interface */
4998
4999 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5000 {
5001 BlockDriver *drv = bs->drv;
5002
5003 if (drv && drv->bdrv_ioctl)
5004 return drv->bdrv_ioctl(bs, req, buf);
5005 return -ENOTSUP;
5006 }
5007
5008 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5009 unsigned long int req, void *buf,
5010 BlockDriverCompletionFunc *cb, void *opaque)
5011 {
5012 BlockDriver *drv = bs->drv;
5013
5014 if (drv && drv->bdrv_aio_ioctl)
5015 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5016 return NULL;
5017 }
5018
5019 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5020 {
5021 bs->guest_block_size = align;
5022 }
5023
5024 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5025 {
5026 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5027 }
5028
5029 /*
5030 * Check if all memory in this vector is sector aligned.
5031 */
5032 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5033 {
5034 int i;
5035 size_t alignment = bdrv_opt_mem_align(bs);
5036
5037 for (i = 0; i < qiov->niov; i++) {
5038 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5039 return false;
5040 }
5041 if (qiov->iov[i].iov_len % alignment) {
5042 return false;
5043 }
5044 }
5045
5046 return true;
5047 }
5048
5049 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity)
5050 {
5051 int64_t bitmap_size;
5052 BdrvDirtyBitmap *bitmap;
5053
5054 assert((granularity & (granularity - 1)) == 0);
5055
5056 granularity >>= BDRV_SECTOR_BITS;
5057 assert(granularity);
5058 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
5059 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5060 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5061 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5062 return bitmap;
5063 }
5064
5065 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5066 {
5067 BdrvDirtyBitmap *bm, *next;
5068 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5069 if (bm == bitmap) {
5070 QLIST_REMOVE(bitmap, list);
5071 hbitmap_free(bitmap->bitmap);
5072 g_free(bitmap);
5073 return;
5074 }
5075 }
5076 }
5077
5078 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5079 {
5080 BdrvDirtyBitmap *bm;
5081 BlockDirtyInfoList *list = NULL;
5082 BlockDirtyInfoList **plist = &list;
5083
5084 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5085 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5086 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5087 info->count = bdrv_get_dirty_count(bs, bm);
5088 info->granularity =
5089 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5090 entry->value = info;
5091 *plist = entry;
5092 plist = &entry->next;
5093 }
5094
5095 return list;
5096 }
5097
5098 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5099 {
5100 if (bitmap) {
5101 return hbitmap_get(bitmap->bitmap, sector);
5102 } else {
5103 return 0;
5104 }
5105 }
5106
5107 void bdrv_dirty_iter_init(BlockDriverState *bs,
5108 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5109 {
5110 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5111 }
5112
5113 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5114 int nr_sectors)
5115 {
5116 BdrvDirtyBitmap *bitmap;
5117 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5118 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5119 }
5120 }
5121
5122 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5123 {
5124 BdrvDirtyBitmap *bitmap;
5125 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5126 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5127 }
5128 }
5129
5130 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5131 {
5132 return hbitmap_count(bitmap->bitmap);
5133 }
5134
5135 /* Get a reference to bs */
5136 void bdrv_ref(BlockDriverState *bs)
5137 {
5138 bs->refcnt++;
5139 }
5140
5141 /* Release a previously grabbed reference to bs.
5142 * If after releasing, reference count is zero, the BlockDriverState is
5143 * deleted. */
5144 void bdrv_unref(BlockDriverState *bs)
5145 {
5146 assert(bs->refcnt > 0);
5147 if (--bs->refcnt == 0) {
5148 bdrv_delete(bs);
5149 }
5150 }
5151
5152 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
5153 {
5154 assert(bs->in_use != in_use);
5155 bs->in_use = in_use;
5156 }
5157
5158 int bdrv_in_use(BlockDriverState *bs)
5159 {
5160 return bs->in_use;
5161 }
5162
5163 void bdrv_iostatus_enable(BlockDriverState *bs)
5164 {
5165 bs->iostatus_enabled = true;
5166 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5167 }
5168
5169 /* The I/O status is only enabled if the drive explicitly
5170 * enables it _and_ the VM is configured to stop on errors */
5171 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5172 {
5173 return (bs->iostatus_enabled &&
5174 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5175 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5176 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5177 }
5178
5179 void bdrv_iostatus_disable(BlockDriverState *bs)
5180 {
5181 bs->iostatus_enabled = false;
5182 }
5183
5184 void bdrv_iostatus_reset(BlockDriverState *bs)
5185 {
5186 if (bdrv_iostatus_is_enabled(bs)) {
5187 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5188 if (bs->job) {
5189 block_job_iostatus_reset(bs->job);
5190 }
5191 }
5192 }
5193
5194 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5195 {
5196 assert(bdrv_iostatus_is_enabled(bs));
5197 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5198 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5199 BLOCK_DEVICE_IO_STATUS_FAILED;
5200 }
5201 }
5202
5203 void
5204 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5205 enum BlockAcctType type)
5206 {
5207 assert(type < BDRV_MAX_IOTYPE);
5208
5209 cookie->bytes = bytes;
5210 cookie->start_time_ns = get_clock();
5211 cookie->type = type;
5212 }
5213
5214 void
5215 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5216 {
5217 assert(cookie->type < BDRV_MAX_IOTYPE);
5218
5219 bs->nr_bytes[cookie->type] += cookie->bytes;
5220 bs->nr_ops[cookie->type]++;
5221 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
5222 }
5223
5224 void bdrv_img_create(const char *filename, const char *fmt,
5225 const char *base_filename, const char *base_fmt,
5226 char *options, uint64_t img_size, int flags,
5227 Error **errp, bool quiet)
5228 {
5229 QEMUOptionParameter *param = NULL, *create_options = NULL;
5230 QEMUOptionParameter *backing_fmt, *backing_file, *size;
5231 BlockDriver *drv, *proto_drv;
5232 BlockDriver *backing_drv = NULL;
5233 Error *local_err = NULL;
5234 int ret = 0;
5235
5236 /* Find driver and parse its options */
5237 drv = bdrv_find_format(fmt);
5238 if (!drv) {
5239 error_setg(errp, "Unknown file format '%s'", fmt);
5240 return;
5241 }
5242
5243 proto_drv = bdrv_find_protocol(filename, true);
5244 if (!proto_drv) {
5245 error_setg(errp, "Unknown protocol '%s'", filename);
5246 return;
5247 }
5248
5249 create_options = append_option_parameters(create_options,
5250 drv->create_options);
5251 create_options = append_option_parameters(create_options,
5252 proto_drv->create_options);
5253
5254 /* Create parameter list with default values */
5255 param = parse_option_parameters("", create_options, param);
5256
5257 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
5258
5259 /* Parse -o options */
5260 if (options) {
5261 param = parse_option_parameters(options, create_options, param);
5262 if (param == NULL) {
5263 error_setg(errp, "Invalid options for file format '%s'.", fmt);
5264 goto out;
5265 }
5266 }
5267
5268 if (base_filename) {
5269 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
5270 base_filename)) {
5271 error_setg(errp, "Backing file not supported for file format '%s'",
5272 fmt);
5273 goto out;
5274 }
5275 }
5276
5277 if (base_fmt) {
5278 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5279 error_setg(errp, "Backing file format not supported for file "
5280 "format '%s'", fmt);
5281 goto out;
5282 }
5283 }
5284
5285 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
5286 if (backing_file && backing_file->value.s) {
5287 if (!strcmp(filename, backing_file->value.s)) {
5288 error_setg(errp, "Error: Trying to create an image with the "
5289 "same filename as the backing file");
5290 goto out;
5291 }
5292 }
5293
5294 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
5295 if (backing_fmt && backing_fmt->value.s) {
5296 backing_drv = bdrv_find_format(backing_fmt->value.s);
5297 if (!backing_drv) {
5298 error_setg(errp, "Unknown backing file format '%s'",
5299 backing_fmt->value.s);
5300 goto out;
5301 }
5302 }
5303
5304 // The size for the image must always be specified, with one exception:
5305 // If we are using a backing file, we can obtain the size from there
5306 size = get_option_parameter(param, BLOCK_OPT_SIZE);
5307 if (size && size->value.n == -1) {
5308 if (backing_file && backing_file->value.s) {
5309 BlockDriverState *bs;
5310 uint64_t size;
5311 char buf[32];
5312 int back_flags;
5313
5314 /* backing files always opened read-only */
5315 back_flags =
5316 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5317
5318 bs = NULL;
5319 ret = bdrv_open(&bs, backing_file->value.s, NULL, NULL, back_flags,
5320 backing_drv, &local_err);
5321 if (ret < 0) {
5322 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5323 backing_file->value.s,
5324 error_get_pretty(local_err));
5325 error_free(local_err);
5326 local_err = NULL;
5327 goto out;
5328 }
5329 bdrv_get_geometry(bs, &size);
5330 size *= 512;
5331
5332 snprintf(buf, sizeof(buf), "%" PRId64, size);
5333 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
5334
5335 bdrv_unref(bs);
5336 } else {
5337 error_setg(errp, "Image creation needs a size parameter");
5338 goto out;
5339 }
5340 }
5341
5342 if (!quiet) {
5343 printf("Formatting '%s', fmt=%s ", filename, fmt);
5344 print_option_parameters(param);
5345 puts("");
5346 }
5347 ret = bdrv_create(drv, filename, param, &local_err);
5348 if (ret == -EFBIG) {
5349 /* This is generally a better message than whatever the driver would
5350 * deliver (especially because of the cluster_size_hint), since that
5351 * is most probably not much different from "image too large". */
5352 const char *cluster_size_hint = "";
5353 if (get_option_parameter(create_options, BLOCK_OPT_CLUSTER_SIZE)) {
5354 cluster_size_hint = " (try using a larger cluster size)";
5355 }
5356 error_setg(errp, "The image size is too large for file format '%s'"
5357 "%s", fmt, cluster_size_hint);
5358 error_free(local_err);
5359 local_err = NULL;
5360 }
5361
5362 out:
5363 free_option_parameters(create_options);
5364 free_option_parameters(param);
5365
5366 if (local_err) {
5367 error_propagate(errp, local_err);
5368 }
5369 }
5370
5371 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5372 {
5373 /* Currently BlockDriverState always uses the main loop AioContext */
5374 return qemu_get_aio_context();
5375 }
5376
5377 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5378 NotifierWithReturn *notifier)
5379 {
5380 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5381 }
5382
5383 int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options)
5384 {
5385 if (bs->drv->bdrv_amend_options == NULL) {
5386 return -ENOTSUP;
5387 }
5388 return bs->drv->bdrv_amend_options(bs, options);
5389 }
5390
5391 /* Used to recurse on single child block filters.
5392 * Single child block filter will store their child in bs->file.
5393 */
5394 bool bdrv_generic_is_first_non_filter(BlockDriverState *bs,
5395 BlockDriverState *candidate)
5396 {
5397 if (!bs->drv) {
5398 return false;
5399 }
5400
5401 if (!bs->drv->authorizations[BS_IS_A_FILTER]) {
5402 if (bs == candidate) {
5403 return true;
5404 } else {
5405 return false;
5406 }
5407 }
5408
5409 if (!bs->drv->authorizations[BS_FILTER_PASS_DOWN]) {
5410 return false;
5411 }
5412
5413 if (!bs->file) {
5414 return false;
5415 }
5416
5417 return bdrv_recurse_is_first_non_filter(bs->file, candidate);
5418 }
5419
5420 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5421 BlockDriverState *candidate)
5422 {
5423 if (bs->drv && bs->drv->bdrv_recurse_is_first_non_filter) {
5424 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5425 }
5426
5427 return bdrv_generic_is_first_non_filter(bs, candidate);
5428 }
5429
5430 /* This function checks if the candidate is the first non filter bs down it's
5431 * bs chain. Since we don't have pointers to parents it explore all bs chains
5432 * from the top. Some filters can choose not to pass down the recursion.
5433 */
5434 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5435 {
5436 BlockDriverState *bs;
5437
5438 /* walk down the bs forest recursively */
5439 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5440 bool perm;
5441
5442 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5443
5444 /* candidate is the first non filter */
5445 if (perm) {
5446 return true;
5447 }
5448 }
5449
5450 return false;
5451 }