]> git.proxmox.com Git - mirror_qemu.git/blob - block.c
block: Inline bdrv_file_open()
[mirror_qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "block/block_int.h"
28 #include "block/blockjob.h"
29 #include "qemu/module.h"
30 #include "qapi/qmp/qjson.h"
31 #include "sysemu/sysemu.h"
32 #include "qemu/notify.h"
33 #include "block/coroutine.h"
34 #include "block/qapi.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
37 #include "qapi-event.h"
38
39 #ifdef CONFIG_BSD
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
44 #ifndef __DragonFly__
45 #include <sys/disk.h>
46 #endif
47 #endif
48
49 #ifdef _WIN32
50 #include <windows.h>
51 #endif
52
53 struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
56 };
57
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
59
60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockDriverCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockDriverCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
90
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
93
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
99
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
102
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
105 {
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109 }
110
111 int is_windows_drive(const char *filename)
112 {
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120 }
121 #endif
122
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
126 {
127 int i;
128
129 throttle_config(&bs->throttle_state, cfg);
130
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
133 }
134 }
135
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138 {
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
142
143 bs->io_limits_enabled = false;
144
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
148 }
149 }
150
151 bs->io_limits_enabled = enabled;
152
153 return drained;
154 }
155
156 void bdrv_io_limits_disable(BlockDriverState *bs)
157 {
158 bs->io_limits_enabled = false;
159
160 bdrv_start_throttled_reqs(bs);
161
162 throttle_destroy(&bs->throttle_state);
163 }
164
165 static void bdrv_throttle_read_timer_cb(void *opaque)
166 {
167 BlockDriverState *bs = opaque;
168 qemu_co_enter_next(&bs->throttled_reqs[0]);
169 }
170
171 static void bdrv_throttle_write_timer_cb(void *opaque)
172 {
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
175 }
176
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
179 {
180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 bdrv_get_aio_context(bs),
183 QEMU_CLOCK_VIRTUAL,
184 bdrv_throttle_read_timer_cb,
185 bdrv_throttle_write_timer_cb,
186 bs);
187 bs->io_limits_enabled = true;
188 }
189
190 /* This function makes an IO wait if needed
191 *
192 * @nb_sectors: the number of sectors of the IO
193 * @is_write: is the IO a write
194 */
195 static void bdrv_io_limits_intercept(BlockDriverState *bs,
196 unsigned int bytes,
197 bool is_write)
198 {
199 /* does this io must wait */
200 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
201
202 /* if must wait or any request of this type throttled queue the IO */
203 if (must_wait ||
204 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
205 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
206 }
207
208 /* the IO will be executed, do the accounting */
209 throttle_account(&bs->throttle_state, is_write, bytes);
210
211
212 /* if the next request must wait -> do nothing */
213 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
214 return;
215 }
216
217 /* else queue next request for execution */
218 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
219 }
220
221 size_t bdrv_opt_mem_align(BlockDriverState *bs)
222 {
223 if (!bs || !bs->drv) {
224 /* 4k should be on the safe side */
225 return 4096;
226 }
227
228 return bs->bl.opt_mem_alignment;
229 }
230
231 /* check if the path starts with "<protocol>:" */
232 static int path_has_protocol(const char *path)
233 {
234 const char *p;
235
236 #ifdef _WIN32
237 if (is_windows_drive(path) ||
238 is_windows_drive_prefix(path)) {
239 return 0;
240 }
241 p = path + strcspn(path, ":/\\");
242 #else
243 p = path + strcspn(path, ":/");
244 #endif
245
246 return *p == ':';
247 }
248
249 int path_is_absolute(const char *path)
250 {
251 #ifdef _WIN32
252 /* specific case for names like: "\\.\d:" */
253 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
254 return 1;
255 }
256 return (*path == '/' || *path == '\\');
257 #else
258 return (*path == '/');
259 #endif
260 }
261
262 /* if filename is absolute, just copy it to dest. Otherwise, build a
263 path to it by considering it is relative to base_path. URL are
264 supported. */
265 void path_combine(char *dest, int dest_size,
266 const char *base_path,
267 const char *filename)
268 {
269 const char *p, *p1;
270 int len;
271
272 if (dest_size <= 0)
273 return;
274 if (path_is_absolute(filename)) {
275 pstrcpy(dest, dest_size, filename);
276 } else {
277 p = strchr(base_path, ':');
278 if (p)
279 p++;
280 else
281 p = base_path;
282 p1 = strrchr(base_path, '/');
283 #ifdef _WIN32
284 {
285 const char *p2;
286 p2 = strrchr(base_path, '\\');
287 if (!p1 || p2 > p1)
288 p1 = p2;
289 }
290 #endif
291 if (p1)
292 p1++;
293 else
294 p1 = base_path;
295 if (p1 > p)
296 p = p1;
297 len = p - base_path;
298 if (len > dest_size - 1)
299 len = dest_size - 1;
300 memcpy(dest, base_path, len);
301 dest[len] = '\0';
302 pstrcat(dest, dest_size, filename);
303 }
304 }
305
306 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
307 {
308 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
309 pstrcpy(dest, sz, bs->backing_file);
310 } else {
311 path_combine(dest, sz, bs->filename, bs->backing_file);
312 }
313 }
314
315 void bdrv_register(BlockDriver *bdrv)
316 {
317 /* Block drivers without coroutine functions need emulation */
318 if (!bdrv->bdrv_co_readv) {
319 bdrv->bdrv_co_readv = bdrv_co_readv_em;
320 bdrv->bdrv_co_writev = bdrv_co_writev_em;
321
322 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
323 * the block driver lacks aio we need to emulate that too.
324 */
325 if (!bdrv->bdrv_aio_readv) {
326 /* add AIO emulation layer */
327 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
328 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
329 }
330 }
331
332 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
333 }
334
335 /* create a new block device (by default it is empty) */
336 BlockDriverState *bdrv_new(const char *device_name, Error **errp)
337 {
338 BlockDriverState *bs;
339 int i;
340
341 if (bdrv_find(device_name)) {
342 error_setg(errp, "Device with id '%s' already exists",
343 device_name);
344 return NULL;
345 }
346 if (bdrv_find_node(device_name)) {
347 error_setg(errp, "Device with node-name '%s' already exists",
348 device_name);
349 return NULL;
350 }
351
352 bs = g_malloc0(sizeof(BlockDriverState));
353 QLIST_INIT(&bs->dirty_bitmaps);
354 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
355 if (device_name[0] != '\0') {
356 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
357 }
358 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
359 QLIST_INIT(&bs->op_blockers[i]);
360 }
361 bdrv_iostatus_disable(bs);
362 notifier_list_init(&bs->close_notifiers);
363 notifier_with_return_list_init(&bs->before_write_notifiers);
364 qemu_co_queue_init(&bs->throttled_reqs[0]);
365 qemu_co_queue_init(&bs->throttled_reqs[1]);
366 bs->refcnt = 1;
367 bs->aio_context = qemu_get_aio_context();
368
369 return bs;
370 }
371
372 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
373 {
374 notifier_list_add(&bs->close_notifiers, notify);
375 }
376
377 BlockDriver *bdrv_find_format(const char *format_name)
378 {
379 BlockDriver *drv1;
380 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
381 if (!strcmp(drv1->format_name, format_name)) {
382 return drv1;
383 }
384 }
385 return NULL;
386 }
387
388 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
389 {
390 static const char *whitelist_rw[] = {
391 CONFIG_BDRV_RW_WHITELIST
392 };
393 static const char *whitelist_ro[] = {
394 CONFIG_BDRV_RO_WHITELIST
395 };
396 const char **p;
397
398 if (!whitelist_rw[0] && !whitelist_ro[0]) {
399 return 1; /* no whitelist, anything goes */
400 }
401
402 for (p = whitelist_rw; *p; p++) {
403 if (!strcmp(drv->format_name, *p)) {
404 return 1;
405 }
406 }
407 if (read_only) {
408 for (p = whitelist_ro; *p; p++) {
409 if (!strcmp(drv->format_name, *p)) {
410 return 1;
411 }
412 }
413 }
414 return 0;
415 }
416
417 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
418 bool read_only)
419 {
420 BlockDriver *drv = bdrv_find_format(format_name);
421 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
422 }
423
424 typedef struct CreateCo {
425 BlockDriver *drv;
426 char *filename;
427 QemuOpts *opts;
428 int ret;
429 Error *err;
430 } CreateCo;
431
432 static void coroutine_fn bdrv_create_co_entry(void *opaque)
433 {
434 Error *local_err = NULL;
435 int ret;
436
437 CreateCo *cco = opaque;
438 assert(cco->drv);
439
440 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
441 if (local_err) {
442 error_propagate(&cco->err, local_err);
443 }
444 cco->ret = ret;
445 }
446
447 int bdrv_create(BlockDriver *drv, const char* filename,
448 QemuOpts *opts, Error **errp)
449 {
450 int ret;
451
452 Coroutine *co;
453 CreateCo cco = {
454 .drv = drv,
455 .filename = g_strdup(filename),
456 .opts = opts,
457 .ret = NOT_DONE,
458 .err = NULL,
459 };
460
461 if (!drv->bdrv_create) {
462 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
463 ret = -ENOTSUP;
464 goto out;
465 }
466
467 if (qemu_in_coroutine()) {
468 /* Fast-path if already in coroutine context */
469 bdrv_create_co_entry(&cco);
470 } else {
471 co = qemu_coroutine_create(bdrv_create_co_entry);
472 qemu_coroutine_enter(co, &cco);
473 while (cco.ret == NOT_DONE) {
474 qemu_aio_wait();
475 }
476 }
477
478 ret = cco.ret;
479 if (ret < 0) {
480 if (cco.err) {
481 error_propagate(errp, cco.err);
482 } else {
483 error_setg_errno(errp, -ret, "Could not create image");
484 }
485 }
486
487 out:
488 g_free(cco.filename);
489 return ret;
490 }
491
492 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
493 {
494 BlockDriver *drv;
495 Error *local_err = NULL;
496 int ret;
497
498 drv = bdrv_find_protocol(filename, true);
499 if (drv == NULL) {
500 error_setg(errp, "Could not find protocol for file '%s'", filename);
501 return -ENOENT;
502 }
503
504 ret = bdrv_create(drv, filename, opts, &local_err);
505 if (local_err) {
506 error_propagate(errp, local_err);
507 }
508 return ret;
509 }
510
511 int bdrv_refresh_limits(BlockDriverState *bs)
512 {
513 BlockDriver *drv = bs->drv;
514
515 memset(&bs->bl, 0, sizeof(bs->bl));
516
517 if (!drv) {
518 return 0;
519 }
520
521 /* Take some limits from the children as a default */
522 if (bs->file) {
523 bdrv_refresh_limits(bs->file);
524 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
525 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
526 } else {
527 bs->bl.opt_mem_alignment = 512;
528 }
529
530 if (bs->backing_hd) {
531 bdrv_refresh_limits(bs->backing_hd);
532 bs->bl.opt_transfer_length =
533 MAX(bs->bl.opt_transfer_length,
534 bs->backing_hd->bl.opt_transfer_length);
535 bs->bl.opt_mem_alignment =
536 MAX(bs->bl.opt_mem_alignment,
537 bs->backing_hd->bl.opt_mem_alignment);
538 }
539
540 /* Then let the driver override it */
541 if (drv->bdrv_refresh_limits) {
542 return drv->bdrv_refresh_limits(bs);
543 }
544
545 return 0;
546 }
547
548 /*
549 * Create a uniquely-named empty temporary file.
550 * Return 0 upon success, otherwise a negative errno value.
551 */
552 int get_tmp_filename(char *filename, int size)
553 {
554 #ifdef _WIN32
555 char temp_dir[MAX_PATH];
556 /* GetTempFileName requires that its output buffer (4th param)
557 have length MAX_PATH or greater. */
558 assert(size >= MAX_PATH);
559 return (GetTempPath(MAX_PATH, temp_dir)
560 && GetTempFileName(temp_dir, "qem", 0, filename)
561 ? 0 : -GetLastError());
562 #else
563 int fd;
564 const char *tmpdir;
565 tmpdir = getenv("TMPDIR");
566 if (!tmpdir) {
567 tmpdir = "/var/tmp";
568 }
569 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
570 return -EOVERFLOW;
571 }
572 fd = mkstemp(filename);
573 if (fd < 0) {
574 return -errno;
575 }
576 if (close(fd) != 0) {
577 unlink(filename);
578 return -errno;
579 }
580 return 0;
581 #endif
582 }
583
584 /*
585 * Detect host devices. By convention, /dev/cdrom[N] is always
586 * recognized as a host CDROM.
587 */
588 static BlockDriver *find_hdev_driver(const char *filename)
589 {
590 int score_max = 0, score;
591 BlockDriver *drv = NULL, *d;
592
593 QLIST_FOREACH(d, &bdrv_drivers, list) {
594 if (d->bdrv_probe_device) {
595 score = d->bdrv_probe_device(filename);
596 if (score > score_max) {
597 score_max = score;
598 drv = d;
599 }
600 }
601 }
602
603 return drv;
604 }
605
606 BlockDriver *bdrv_find_protocol(const char *filename,
607 bool allow_protocol_prefix)
608 {
609 BlockDriver *drv1;
610 char protocol[128];
611 int len;
612 const char *p;
613
614 /* TODO Drivers without bdrv_file_open must be specified explicitly */
615
616 /*
617 * XXX(hch): we really should not let host device detection
618 * override an explicit protocol specification, but moving this
619 * later breaks access to device names with colons in them.
620 * Thanks to the brain-dead persistent naming schemes on udev-
621 * based Linux systems those actually are quite common.
622 */
623 drv1 = find_hdev_driver(filename);
624 if (drv1) {
625 return drv1;
626 }
627
628 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
629 return bdrv_find_format("file");
630 }
631
632 p = strchr(filename, ':');
633 assert(p != NULL);
634 len = p - filename;
635 if (len > sizeof(protocol) - 1)
636 len = sizeof(protocol) - 1;
637 memcpy(protocol, filename, len);
638 protocol[len] = '\0';
639 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
640 if (drv1->protocol_name &&
641 !strcmp(drv1->protocol_name, protocol)) {
642 return drv1;
643 }
644 }
645 return NULL;
646 }
647
648 static int find_image_format(BlockDriverState *bs, const char *filename,
649 BlockDriver **pdrv, Error **errp)
650 {
651 int score, score_max;
652 BlockDriver *drv1, *drv;
653 uint8_t buf[2048];
654 int ret = 0;
655
656 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
657 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
658 drv = bdrv_find_format("raw");
659 if (!drv) {
660 error_setg(errp, "Could not find raw image format");
661 ret = -ENOENT;
662 }
663 *pdrv = drv;
664 return ret;
665 }
666
667 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
668 if (ret < 0) {
669 error_setg_errno(errp, -ret, "Could not read image for determining its "
670 "format");
671 *pdrv = NULL;
672 return ret;
673 }
674
675 score_max = 0;
676 drv = NULL;
677 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
678 if (drv1->bdrv_probe) {
679 score = drv1->bdrv_probe(buf, ret, filename);
680 if (score > score_max) {
681 score_max = score;
682 drv = drv1;
683 }
684 }
685 }
686 if (!drv) {
687 error_setg(errp, "Could not determine image format: No compatible "
688 "driver found");
689 ret = -ENOENT;
690 }
691 *pdrv = drv;
692 return ret;
693 }
694
695 /**
696 * Set the current 'total_sectors' value
697 */
698 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
699 {
700 BlockDriver *drv = bs->drv;
701
702 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
703 if (bs->sg)
704 return 0;
705
706 /* query actual device if possible, otherwise just trust the hint */
707 if (drv->bdrv_getlength) {
708 int64_t length = drv->bdrv_getlength(bs);
709 if (length < 0) {
710 return length;
711 }
712 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
713 }
714
715 bs->total_sectors = hint;
716 return 0;
717 }
718
719 /**
720 * Set open flags for a given discard mode
721 *
722 * Return 0 on success, -1 if the discard mode was invalid.
723 */
724 int bdrv_parse_discard_flags(const char *mode, int *flags)
725 {
726 *flags &= ~BDRV_O_UNMAP;
727
728 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
729 /* do nothing */
730 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
731 *flags |= BDRV_O_UNMAP;
732 } else {
733 return -1;
734 }
735
736 return 0;
737 }
738
739 /**
740 * Set open flags for a given cache mode
741 *
742 * Return 0 on success, -1 if the cache mode was invalid.
743 */
744 int bdrv_parse_cache_flags(const char *mode, int *flags)
745 {
746 *flags &= ~BDRV_O_CACHE_MASK;
747
748 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
749 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
750 } else if (!strcmp(mode, "directsync")) {
751 *flags |= BDRV_O_NOCACHE;
752 } else if (!strcmp(mode, "writeback")) {
753 *flags |= BDRV_O_CACHE_WB;
754 } else if (!strcmp(mode, "unsafe")) {
755 *flags |= BDRV_O_CACHE_WB;
756 *flags |= BDRV_O_NO_FLUSH;
757 } else if (!strcmp(mode, "writethrough")) {
758 /* this is the default */
759 } else {
760 return -1;
761 }
762
763 return 0;
764 }
765
766 /**
767 * The copy-on-read flag is actually a reference count so multiple users may
768 * use the feature without worrying about clobbering its previous state.
769 * Copy-on-read stays enabled until all users have called to disable it.
770 */
771 void bdrv_enable_copy_on_read(BlockDriverState *bs)
772 {
773 bs->copy_on_read++;
774 }
775
776 void bdrv_disable_copy_on_read(BlockDriverState *bs)
777 {
778 assert(bs->copy_on_read > 0);
779 bs->copy_on_read--;
780 }
781
782 /*
783 * Returns the flags that a temporary snapshot should get, based on the
784 * originally requested flags (the originally requested image will have flags
785 * like a backing file)
786 */
787 static int bdrv_temp_snapshot_flags(int flags)
788 {
789 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
790 }
791
792 /*
793 * Returns the flags that bs->file should get, based on the given flags for
794 * the parent BDS
795 */
796 static int bdrv_inherited_flags(int flags)
797 {
798 /* Enable protocol handling, disable format probing for bs->file */
799 flags |= BDRV_O_PROTOCOL;
800
801 /* Our block drivers take care to send flushes and respect unmap policy,
802 * so we can enable both unconditionally on lower layers. */
803 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
804
805 /* Clear flags that only apply to the top layer */
806 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
807
808 return flags;
809 }
810
811 /*
812 * Returns the flags that bs->backing_hd should get, based on the given flags
813 * for the parent BDS
814 */
815 static int bdrv_backing_flags(int flags)
816 {
817 /* backing files always opened read-only */
818 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
819
820 /* snapshot=on is handled on the top layer */
821 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
822
823 return flags;
824 }
825
826 static int bdrv_open_flags(BlockDriverState *bs, int flags)
827 {
828 int open_flags = flags | BDRV_O_CACHE_WB;
829
830 /*
831 * Clear flags that are internal to the block layer before opening the
832 * image.
833 */
834 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
835
836 /*
837 * Snapshots should be writable.
838 */
839 if (flags & BDRV_O_TEMPORARY) {
840 open_flags |= BDRV_O_RDWR;
841 }
842
843 return open_flags;
844 }
845
846 static void bdrv_assign_node_name(BlockDriverState *bs,
847 const char *node_name,
848 Error **errp)
849 {
850 if (!node_name) {
851 return;
852 }
853
854 /* empty string node name is invalid */
855 if (node_name[0] == '\0') {
856 error_setg(errp, "Empty node name");
857 return;
858 }
859
860 /* takes care of avoiding namespaces collisions */
861 if (bdrv_find(node_name)) {
862 error_setg(errp, "node-name=%s is conflicting with a device id",
863 node_name);
864 return;
865 }
866
867 /* takes care of avoiding duplicates node names */
868 if (bdrv_find_node(node_name)) {
869 error_setg(errp, "Duplicate node name");
870 return;
871 }
872
873 /* copy node name into the bs and insert it into the graph list */
874 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
875 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
876 }
877
878 /*
879 * Common part for opening disk images and files
880 *
881 * Removes all processed options from *options.
882 */
883 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
884 QDict *options, int flags, BlockDriver *drv, Error **errp)
885 {
886 int ret, open_flags;
887 const char *filename;
888 const char *node_name = NULL;
889 Error *local_err = NULL;
890
891 assert(drv != NULL);
892 assert(bs->file == NULL);
893 assert(options != NULL && bs->options != options);
894
895 if (file != NULL) {
896 filename = file->filename;
897 } else {
898 filename = qdict_get_try_str(options, "filename");
899 }
900
901 if (drv->bdrv_needs_filename && !filename) {
902 error_setg(errp, "The '%s' block driver requires a file name",
903 drv->format_name);
904 return -EINVAL;
905 }
906
907 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
908
909 node_name = qdict_get_try_str(options, "node-name");
910 bdrv_assign_node_name(bs, node_name, &local_err);
911 if (local_err) {
912 error_propagate(errp, local_err);
913 return -EINVAL;
914 }
915 qdict_del(options, "node-name");
916
917 /* bdrv_open() with directly using a protocol as drv. This layer is already
918 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
919 * and return immediately. */
920 if (file != NULL && drv->bdrv_file_open) {
921 bdrv_swap(file, bs);
922 return 0;
923 }
924
925 bs->open_flags = flags;
926 bs->guest_block_size = 512;
927 bs->request_alignment = 512;
928 bs->zero_beyond_eof = true;
929 open_flags = bdrv_open_flags(bs, flags);
930 bs->read_only = !(open_flags & BDRV_O_RDWR);
931
932 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
933 error_setg(errp,
934 !bs->read_only && bdrv_is_whitelisted(drv, true)
935 ? "Driver '%s' can only be used for read-only devices"
936 : "Driver '%s' is not whitelisted",
937 drv->format_name);
938 return -ENOTSUP;
939 }
940
941 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
942 if (flags & BDRV_O_COPY_ON_READ) {
943 if (!bs->read_only) {
944 bdrv_enable_copy_on_read(bs);
945 } else {
946 error_setg(errp, "Can't use copy-on-read on read-only device");
947 return -EINVAL;
948 }
949 }
950
951 if (filename != NULL) {
952 pstrcpy(bs->filename, sizeof(bs->filename), filename);
953 } else {
954 bs->filename[0] = '\0';
955 }
956
957 bs->drv = drv;
958 bs->opaque = g_malloc0(drv->instance_size);
959
960 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
961
962 /* Open the image, either directly or using a protocol */
963 if (drv->bdrv_file_open) {
964 assert(file == NULL);
965 assert(!drv->bdrv_needs_filename || filename != NULL);
966 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
967 } else {
968 if (file == NULL) {
969 error_setg(errp, "Can't use '%s' as a block driver for the "
970 "protocol level", drv->format_name);
971 ret = -EINVAL;
972 goto free_and_fail;
973 }
974 bs->file = file;
975 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
976 }
977
978 if (ret < 0) {
979 if (local_err) {
980 error_propagate(errp, local_err);
981 } else if (bs->filename[0]) {
982 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
983 } else {
984 error_setg_errno(errp, -ret, "Could not open image");
985 }
986 goto free_and_fail;
987 }
988
989 ret = refresh_total_sectors(bs, bs->total_sectors);
990 if (ret < 0) {
991 error_setg_errno(errp, -ret, "Could not refresh total sector count");
992 goto free_and_fail;
993 }
994
995 bdrv_refresh_limits(bs);
996 assert(bdrv_opt_mem_align(bs) != 0);
997 assert((bs->request_alignment != 0) || bs->sg);
998 return 0;
999
1000 free_and_fail:
1001 bs->file = NULL;
1002 g_free(bs->opaque);
1003 bs->opaque = NULL;
1004 bs->drv = NULL;
1005 return ret;
1006 }
1007
1008 static QDict *parse_json_filename(const char *filename, Error **errp)
1009 {
1010 QObject *options_obj;
1011 QDict *options;
1012 int ret;
1013
1014 ret = strstart(filename, "json:", &filename);
1015 assert(ret);
1016
1017 options_obj = qobject_from_json(filename);
1018 if (!options_obj) {
1019 error_setg(errp, "Could not parse the JSON options");
1020 return NULL;
1021 }
1022
1023 if (qobject_type(options_obj) != QTYPE_QDICT) {
1024 qobject_decref(options_obj);
1025 error_setg(errp, "Invalid JSON object given");
1026 return NULL;
1027 }
1028
1029 options = qobject_to_qdict(options_obj);
1030 qdict_flatten(options);
1031
1032 return options;
1033 }
1034
1035 /*
1036 * Fills in default options for opening images and converts the legacy
1037 * filename/flags pair to option QDict entries.
1038 */
1039 static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
1040 BlockDriver *drv, Error **errp)
1041 {
1042 const char *filename = *pfilename;
1043 const char *drvname;
1044 bool protocol = flags & BDRV_O_PROTOCOL;
1045 bool parse_filename = false;
1046 Error *local_err = NULL;
1047
1048 /* Parse json: pseudo-protocol */
1049 if (filename && g_str_has_prefix(filename, "json:")) {
1050 QDict *json_options = parse_json_filename(filename, &local_err);
1051 if (local_err) {
1052 error_propagate(errp, local_err);
1053 return -EINVAL;
1054 }
1055
1056 /* Options given in the filename have lower priority than options
1057 * specified directly */
1058 qdict_join(*options, json_options, false);
1059 QDECREF(json_options);
1060 *pfilename = filename = NULL;
1061 }
1062
1063 /* Fetch the file name from the options QDict if necessary */
1064 if (protocol && filename) {
1065 if (!qdict_haskey(*options, "filename")) {
1066 qdict_put(*options, "filename", qstring_from_str(filename));
1067 parse_filename = true;
1068 } else {
1069 error_setg(errp, "Can't specify 'file' and 'filename' options at "
1070 "the same time");
1071 return -EINVAL;
1072 }
1073 }
1074
1075 /* Find the right block driver */
1076 filename = qdict_get_try_str(*options, "filename");
1077 drvname = qdict_get_try_str(*options, "driver");
1078
1079 if (drv) {
1080 if (drvname) {
1081 error_setg(errp, "Driver specified twice");
1082 return -EINVAL;
1083 }
1084 drvname = drv->format_name;
1085 qdict_put(*options, "driver", qstring_from_str(drvname));
1086 } else {
1087 if (!drvname && protocol) {
1088 if (filename) {
1089 drv = bdrv_find_protocol(filename, parse_filename);
1090 if (!drv) {
1091 error_setg(errp, "Unknown protocol");
1092 return -EINVAL;
1093 }
1094
1095 drvname = drv->format_name;
1096 qdict_put(*options, "driver", qstring_from_str(drvname));
1097 } else {
1098 error_setg(errp, "Must specify either driver or file");
1099 return -EINVAL;
1100 }
1101 } else if (drvname) {
1102 drv = bdrv_find_format(drvname);
1103 if (!drv) {
1104 error_setg(errp, "Unknown driver '%s'", drvname);
1105 return -ENOENT;
1106 }
1107 }
1108 }
1109
1110 assert(drv || !protocol);
1111
1112 /* Driver-specific filename parsing */
1113 if (drv && drv->bdrv_parse_filename && parse_filename) {
1114 drv->bdrv_parse_filename(filename, *options, &local_err);
1115 if (local_err) {
1116 error_propagate(errp, local_err);
1117 return -EINVAL;
1118 }
1119
1120 if (!drv->bdrv_needs_filename) {
1121 qdict_del(*options, "filename");
1122 }
1123 }
1124
1125 return 0;
1126 }
1127
1128 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1129 {
1130
1131 if (bs->backing_hd) {
1132 assert(bs->backing_blocker);
1133 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1134 } else if (backing_hd) {
1135 error_setg(&bs->backing_blocker,
1136 "device is used as backing hd of '%s'",
1137 bs->device_name);
1138 }
1139
1140 bs->backing_hd = backing_hd;
1141 if (!backing_hd) {
1142 error_free(bs->backing_blocker);
1143 bs->backing_blocker = NULL;
1144 goto out;
1145 }
1146 bs->open_flags &= ~BDRV_O_NO_BACKING;
1147 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1148 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1149 backing_hd->drv ? backing_hd->drv->format_name : "");
1150
1151 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1152 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1153 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1154 bs->backing_blocker);
1155 out:
1156 bdrv_refresh_limits(bs);
1157 }
1158
1159 /*
1160 * Opens the backing file for a BlockDriverState if not yet open
1161 *
1162 * options is a QDict of options to pass to the block drivers, or NULL for an
1163 * empty set of options. The reference to the QDict is transferred to this
1164 * function (even on failure), so if the caller intends to reuse the dictionary,
1165 * it needs to use QINCREF() before calling bdrv_file_open.
1166 */
1167 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1168 {
1169 char *backing_filename = g_malloc0(PATH_MAX);
1170 int ret = 0;
1171 BlockDriver *back_drv = NULL;
1172 BlockDriverState *backing_hd;
1173 Error *local_err = NULL;
1174
1175 if (bs->backing_hd != NULL) {
1176 QDECREF(options);
1177 goto free_exit;
1178 }
1179
1180 /* NULL means an empty set of options */
1181 if (options == NULL) {
1182 options = qdict_new();
1183 }
1184
1185 bs->open_flags &= ~BDRV_O_NO_BACKING;
1186 if (qdict_haskey(options, "file.filename")) {
1187 backing_filename[0] = '\0';
1188 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1189 QDECREF(options);
1190 goto free_exit;
1191 } else {
1192 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
1193 }
1194
1195 backing_hd = bdrv_new("", errp);
1196
1197 if (bs->backing_format[0] != '\0') {
1198 back_drv = bdrv_find_format(bs->backing_format);
1199 }
1200
1201 assert(bs->backing_hd == NULL);
1202 ret = bdrv_open(&backing_hd,
1203 *backing_filename ? backing_filename : NULL, NULL, options,
1204 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
1205 if (ret < 0) {
1206 bdrv_unref(backing_hd);
1207 backing_hd = NULL;
1208 bs->open_flags |= BDRV_O_NO_BACKING;
1209 error_setg(errp, "Could not open backing file: %s",
1210 error_get_pretty(local_err));
1211 error_free(local_err);
1212 goto free_exit;
1213 }
1214 bdrv_set_backing_hd(bs, backing_hd);
1215
1216 free_exit:
1217 g_free(backing_filename);
1218 return ret;
1219 }
1220
1221 /*
1222 * Opens a disk image whose options are given as BlockdevRef in another block
1223 * device's options.
1224 *
1225 * If allow_none is true, no image will be opened if filename is false and no
1226 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1227 *
1228 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1229 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1230 * itself, all options starting with "${bdref_key}." are considered part of the
1231 * BlockdevRef.
1232 *
1233 * The BlockdevRef will be removed from the options QDict.
1234 *
1235 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1236 */
1237 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1238 QDict *options, const char *bdref_key, int flags,
1239 bool allow_none, Error **errp)
1240 {
1241 QDict *image_options;
1242 int ret;
1243 char *bdref_key_dot;
1244 const char *reference;
1245
1246 assert(pbs);
1247 assert(*pbs == NULL);
1248
1249 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1250 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1251 g_free(bdref_key_dot);
1252
1253 reference = qdict_get_try_str(options, bdref_key);
1254 if (!filename && !reference && !qdict_size(image_options)) {
1255 if (allow_none) {
1256 ret = 0;
1257 } else {
1258 error_setg(errp, "A block device must be specified for \"%s\"",
1259 bdref_key);
1260 ret = -EINVAL;
1261 }
1262 QDECREF(image_options);
1263 goto done;
1264 }
1265
1266 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1267
1268 done:
1269 qdict_del(options, bdref_key);
1270 return ret;
1271 }
1272
1273 void bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1274 {
1275 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1276 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1277 int64_t total_size;
1278 BlockDriver *bdrv_qcow2;
1279 QemuOpts *opts = NULL;
1280 QDict *snapshot_options;
1281 BlockDriverState *bs_snapshot;
1282 Error *local_err;
1283 int ret;
1284
1285 /* if snapshot, we create a temporary backing file and open it
1286 instead of opening 'filename' directly */
1287
1288 /* Get the required size from the image */
1289 total_size = bdrv_getlength(bs);
1290 if (total_size < 0) {
1291 error_setg_errno(errp, -total_size, "Could not get image size");
1292 goto out;
1293 }
1294 total_size &= BDRV_SECTOR_MASK;
1295
1296 /* Create the temporary image */
1297 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1298 if (ret < 0) {
1299 error_setg_errno(errp, -ret, "Could not get temporary filename");
1300 goto out;
1301 }
1302
1303 bdrv_qcow2 = bdrv_find_format("qcow2");
1304 opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0,
1305 &error_abort);
1306 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
1307 ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err);
1308 qemu_opts_del(opts);
1309 if (ret < 0) {
1310 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1311 "'%s': %s", tmp_filename,
1312 error_get_pretty(local_err));
1313 error_free(local_err);
1314 goto out;
1315 }
1316
1317 /* Prepare a new options QDict for the temporary file */
1318 snapshot_options = qdict_new();
1319 qdict_put(snapshot_options, "file.driver",
1320 qstring_from_str("file"));
1321 qdict_put(snapshot_options, "file.filename",
1322 qstring_from_str(tmp_filename));
1323
1324 bs_snapshot = bdrv_new("", &error_abort);
1325
1326 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1327 flags, bdrv_qcow2, &local_err);
1328 if (ret < 0) {
1329 error_propagate(errp, local_err);
1330 goto out;
1331 }
1332
1333 bdrv_append(bs_snapshot, bs);
1334
1335 out:
1336 g_free(tmp_filename);
1337 }
1338
1339 /*
1340 * Opens a disk image (raw, qcow2, vmdk, ...)
1341 *
1342 * options is a QDict of options to pass to the block drivers, or NULL for an
1343 * empty set of options. The reference to the QDict belongs to the block layer
1344 * after the call (even on failure), so if the caller intends to reuse the
1345 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1346 *
1347 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1348 * If it is not NULL, the referenced BDS will be reused.
1349 *
1350 * The reference parameter may be used to specify an existing block device which
1351 * should be opened. If specified, neither options nor a filename may be given,
1352 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1353 */
1354 int bdrv_open(BlockDriverState **pbs, const char *filename,
1355 const char *reference, QDict *options, int flags,
1356 BlockDriver *drv, Error **errp)
1357 {
1358 int ret;
1359 BlockDriverState *file = NULL, *bs;
1360 const char *drvname;
1361 Error *local_err = NULL;
1362 int snapshot_flags = 0;
1363
1364 assert(pbs);
1365
1366 if (reference) {
1367 bool options_non_empty = options ? qdict_size(options) : false;
1368 QDECREF(options);
1369
1370 if (*pbs) {
1371 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1372 "another block device");
1373 return -EINVAL;
1374 }
1375
1376 if (filename || options_non_empty) {
1377 error_setg(errp, "Cannot reference an existing block device with "
1378 "additional options or a new filename");
1379 return -EINVAL;
1380 }
1381
1382 bs = bdrv_lookup_bs(reference, reference, errp);
1383 if (!bs) {
1384 return -ENODEV;
1385 }
1386 bdrv_ref(bs);
1387 *pbs = bs;
1388 return 0;
1389 }
1390
1391 if (*pbs) {
1392 bs = *pbs;
1393 } else {
1394 bs = bdrv_new("", &error_abort);
1395 }
1396
1397 /* NULL means an empty set of options */
1398 if (options == NULL) {
1399 options = qdict_new();
1400 }
1401
1402 ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
1403 if (local_err) {
1404 goto fail;
1405 }
1406
1407 bs->options = options;
1408 options = qdict_clone_shallow(options);
1409
1410 /* Open image file without format layer */
1411 if ((flags & BDRV_O_PROTOCOL) == 0) {
1412 if (flags & BDRV_O_RDWR) {
1413 flags |= BDRV_O_ALLOW_RDWR;
1414 }
1415 if (flags & BDRV_O_SNAPSHOT) {
1416 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1417 flags = bdrv_backing_flags(flags);
1418 }
1419
1420 assert(file == NULL);
1421 ret = bdrv_open_image(&file, filename, options, "file",
1422 bdrv_inherited_flags(flags),
1423 true, &local_err);
1424 if (ret < 0) {
1425 goto fail;
1426 }
1427 }
1428
1429 /* Find the right image format driver */
1430 drv = NULL;
1431 drvname = qdict_get_try_str(options, "driver");
1432 assert(drvname || !(flags & BDRV_O_PROTOCOL));
1433
1434 if (drvname) {
1435 drv = bdrv_find_format(drvname);
1436 qdict_del(options, "driver");
1437 if (!drv) {
1438 error_setg(errp, "Unknown driver: '%s'", drvname);
1439 ret = -EINVAL;
1440 goto fail;
1441 }
1442 } else if (file) {
1443 ret = find_image_format(file, filename, &drv, &local_err);
1444 if (ret < 0) {
1445 goto fail;
1446 }
1447 } else {
1448 error_setg(errp, "Must specify either driver or file");
1449 ret = -EINVAL;
1450 goto fail;
1451 }
1452
1453 /* Open the image */
1454 if (flags & BDRV_O_PROTOCOL) {
1455 if (!drv->bdrv_file_open) {
1456 const char *filename;
1457 filename = qdict_get_try_str(options, "filename");
1458 ret = bdrv_open(&bs, filename, NULL, options,
1459 flags & ~BDRV_O_PROTOCOL, drv, &local_err);
1460 options = NULL;
1461 } else {
1462 ret = bdrv_open_common(bs, NULL, options,
1463 flags & ~BDRV_O_PROTOCOL, drv, &local_err);
1464 }
1465 if (!ret) {
1466 bs->growable = 1;
1467 goto done;
1468 } else if (bs->drv) {
1469 goto close_and_fail;
1470 } else {
1471 goto fail;
1472 }
1473 }
1474
1475 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1476 if (ret < 0) {
1477 goto fail;
1478 }
1479
1480 if (file && (bs->file != file)) {
1481 bdrv_unref(file);
1482 file = NULL;
1483 }
1484
1485 /* If there is a backing file, use it */
1486 if ((flags & BDRV_O_NO_BACKING) == 0) {
1487 QDict *backing_options;
1488
1489 qdict_extract_subqdict(options, &backing_options, "backing.");
1490 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1491 if (ret < 0) {
1492 goto close_and_fail;
1493 }
1494 }
1495
1496 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1497 * temporary snapshot afterwards. */
1498 if (snapshot_flags) {
1499 bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1500 if (local_err) {
1501 error_propagate(errp, local_err);
1502 goto close_and_fail;
1503 }
1504 }
1505
1506
1507 done:
1508 /* Check if any unknown options were used */
1509 if (options && (qdict_size(options) != 0)) {
1510 const QDictEntry *entry = qdict_first(options);
1511 if (flags & BDRV_O_PROTOCOL) {
1512 error_setg(errp, "Block protocol '%s' doesn't support the option "
1513 "'%s'", drv->format_name, entry->key);
1514 } else {
1515 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1516 "support the option '%s'", drv->format_name,
1517 bs->device_name, entry->key);
1518 }
1519
1520 ret = -EINVAL;
1521 goto close_and_fail;
1522 }
1523
1524 if (!bdrv_key_required(bs)) {
1525 bdrv_dev_change_media_cb(bs, true);
1526 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1527 && !runstate_check(RUN_STATE_INMIGRATE)
1528 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1529 error_setg(errp,
1530 "Guest must be stopped for opening of encrypted image");
1531 ret = -EBUSY;
1532 goto close_and_fail;
1533 }
1534
1535 QDECREF(options);
1536 *pbs = bs;
1537 return 0;
1538
1539 fail:
1540 if (file != NULL) {
1541 bdrv_unref(file);
1542 }
1543 QDECREF(bs->options);
1544 QDECREF(options);
1545 bs->options = NULL;
1546 if (!*pbs) {
1547 /* If *pbs is NULL, a new BDS has been created in this function and
1548 needs to be freed now. Otherwise, it does not need to be closed,
1549 since it has not really been opened yet. */
1550 bdrv_unref(bs);
1551 }
1552 if (local_err) {
1553 error_propagate(errp, local_err);
1554 }
1555 return ret;
1556
1557 close_and_fail:
1558 /* See fail path, but now the BDS has to be always closed */
1559 if (*pbs) {
1560 bdrv_close(bs);
1561 } else {
1562 bdrv_unref(bs);
1563 }
1564 QDECREF(options);
1565 if (local_err) {
1566 error_propagate(errp, local_err);
1567 }
1568 return ret;
1569 }
1570
1571 typedef struct BlockReopenQueueEntry {
1572 bool prepared;
1573 BDRVReopenState state;
1574 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1575 } BlockReopenQueueEntry;
1576
1577 /*
1578 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1579 * reopen of multiple devices.
1580 *
1581 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1582 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1583 * be created and initialized. This newly created BlockReopenQueue should be
1584 * passed back in for subsequent calls that are intended to be of the same
1585 * atomic 'set'.
1586 *
1587 * bs is the BlockDriverState to add to the reopen queue.
1588 *
1589 * flags contains the open flags for the associated bs
1590 *
1591 * returns a pointer to bs_queue, which is either the newly allocated
1592 * bs_queue, or the existing bs_queue being used.
1593 *
1594 */
1595 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1596 BlockDriverState *bs, int flags)
1597 {
1598 assert(bs != NULL);
1599
1600 BlockReopenQueueEntry *bs_entry;
1601 if (bs_queue == NULL) {
1602 bs_queue = g_new0(BlockReopenQueue, 1);
1603 QSIMPLEQ_INIT(bs_queue);
1604 }
1605
1606 /* bdrv_open() masks this flag out */
1607 flags &= ~BDRV_O_PROTOCOL;
1608
1609 if (bs->file) {
1610 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1611 }
1612
1613 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1614 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1615
1616 bs_entry->state.bs = bs;
1617 bs_entry->state.flags = flags;
1618
1619 return bs_queue;
1620 }
1621
1622 /*
1623 * Reopen multiple BlockDriverStates atomically & transactionally.
1624 *
1625 * The queue passed in (bs_queue) must have been built up previous
1626 * via bdrv_reopen_queue().
1627 *
1628 * Reopens all BDS specified in the queue, with the appropriate
1629 * flags. All devices are prepared for reopen, and failure of any
1630 * device will cause all device changes to be abandonded, and intermediate
1631 * data cleaned up.
1632 *
1633 * If all devices prepare successfully, then the changes are committed
1634 * to all devices.
1635 *
1636 */
1637 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1638 {
1639 int ret = -1;
1640 BlockReopenQueueEntry *bs_entry, *next;
1641 Error *local_err = NULL;
1642
1643 assert(bs_queue != NULL);
1644
1645 bdrv_drain_all();
1646
1647 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1648 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1649 error_propagate(errp, local_err);
1650 goto cleanup;
1651 }
1652 bs_entry->prepared = true;
1653 }
1654
1655 /* If we reach this point, we have success and just need to apply the
1656 * changes
1657 */
1658 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1659 bdrv_reopen_commit(&bs_entry->state);
1660 }
1661
1662 ret = 0;
1663
1664 cleanup:
1665 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1666 if (ret && bs_entry->prepared) {
1667 bdrv_reopen_abort(&bs_entry->state);
1668 }
1669 g_free(bs_entry);
1670 }
1671 g_free(bs_queue);
1672 return ret;
1673 }
1674
1675
1676 /* Reopen a single BlockDriverState with the specified flags. */
1677 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1678 {
1679 int ret = -1;
1680 Error *local_err = NULL;
1681 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1682
1683 ret = bdrv_reopen_multiple(queue, &local_err);
1684 if (local_err != NULL) {
1685 error_propagate(errp, local_err);
1686 }
1687 return ret;
1688 }
1689
1690
1691 /*
1692 * Prepares a BlockDriverState for reopen. All changes are staged in the
1693 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1694 * the block driver layer .bdrv_reopen_prepare()
1695 *
1696 * bs is the BlockDriverState to reopen
1697 * flags are the new open flags
1698 * queue is the reopen queue
1699 *
1700 * Returns 0 on success, non-zero on error. On error errp will be set
1701 * as well.
1702 *
1703 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1704 * It is the responsibility of the caller to then call the abort() or
1705 * commit() for any other BDS that have been left in a prepare() state
1706 *
1707 */
1708 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1709 Error **errp)
1710 {
1711 int ret = -1;
1712 Error *local_err = NULL;
1713 BlockDriver *drv;
1714
1715 assert(reopen_state != NULL);
1716 assert(reopen_state->bs->drv != NULL);
1717 drv = reopen_state->bs->drv;
1718
1719 /* if we are to stay read-only, do not allow permission change
1720 * to r/w */
1721 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1722 reopen_state->flags & BDRV_O_RDWR) {
1723 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1724 reopen_state->bs->device_name);
1725 goto error;
1726 }
1727
1728
1729 ret = bdrv_flush(reopen_state->bs);
1730 if (ret) {
1731 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1732 strerror(-ret));
1733 goto error;
1734 }
1735
1736 if (drv->bdrv_reopen_prepare) {
1737 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1738 if (ret) {
1739 if (local_err != NULL) {
1740 error_propagate(errp, local_err);
1741 } else {
1742 error_setg(errp, "failed while preparing to reopen image '%s'",
1743 reopen_state->bs->filename);
1744 }
1745 goto error;
1746 }
1747 } else {
1748 /* It is currently mandatory to have a bdrv_reopen_prepare()
1749 * handler for each supported drv. */
1750 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1751 drv->format_name, reopen_state->bs->device_name,
1752 "reopening of file");
1753 ret = -1;
1754 goto error;
1755 }
1756
1757 ret = 0;
1758
1759 error:
1760 return ret;
1761 }
1762
1763 /*
1764 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1765 * makes them final by swapping the staging BlockDriverState contents into
1766 * the active BlockDriverState contents.
1767 */
1768 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1769 {
1770 BlockDriver *drv;
1771
1772 assert(reopen_state != NULL);
1773 drv = reopen_state->bs->drv;
1774 assert(drv != NULL);
1775
1776 /* If there are any driver level actions to take */
1777 if (drv->bdrv_reopen_commit) {
1778 drv->bdrv_reopen_commit(reopen_state);
1779 }
1780
1781 /* set BDS specific flags now */
1782 reopen_state->bs->open_flags = reopen_state->flags;
1783 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1784 BDRV_O_CACHE_WB);
1785 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1786
1787 bdrv_refresh_limits(reopen_state->bs);
1788 }
1789
1790 /*
1791 * Abort the reopen, and delete and free the staged changes in
1792 * reopen_state
1793 */
1794 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1795 {
1796 BlockDriver *drv;
1797
1798 assert(reopen_state != NULL);
1799 drv = reopen_state->bs->drv;
1800 assert(drv != NULL);
1801
1802 if (drv->bdrv_reopen_abort) {
1803 drv->bdrv_reopen_abort(reopen_state);
1804 }
1805 }
1806
1807
1808 void bdrv_close(BlockDriverState *bs)
1809 {
1810 if (bs->job) {
1811 block_job_cancel_sync(bs->job);
1812 }
1813 bdrv_drain_all(); /* complete I/O */
1814 bdrv_flush(bs);
1815 bdrv_drain_all(); /* in case flush left pending I/O */
1816 notifier_list_notify(&bs->close_notifiers, bs);
1817
1818 if (bs->drv) {
1819 if (bs->backing_hd) {
1820 BlockDriverState *backing_hd = bs->backing_hd;
1821 bdrv_set_backing_hd(bs, NULL);
1822 bdrv_unref(backing_hd);
1823 }
1824 bs->drv->bdrv_close(bs);
1825 g_free(bs->opaque);
1826 bs->opaque = NULL;
1827 bs->drv = NULL;
1828 bs->copy_on_read = 0;
1829 bs->backing_file[0] = '\0';
1830 bs->backing_format[0] = '\0';
1831 bs->total_sectors = 0;
1832 bs->encrypted = 0;
1833 bs->valid_key = 0;
1834 bs->sg = 0;
1835 bs->growable = 0;
1836 bs->zero_beyond_eof = false;
1837 QDECREF(bs->options);
1838 bs->options = NULL;
1839
1840 if (bs->file != NULL) {
1841 bdrv_unref(bs->file);
1842 bs->file = NULL;
1843 }
1844 }
1845
1846 bdrv_dev_change_media_cb(bs, false);
1847
1848 /*throttling disk I/O limits*/
1849 if (bs->io_limits_enabled) {
1850 bdrv_io_limits_disable(bs);
1851 }
1852 }
1853
1854 void bdrv_close_all(void)
1855 {
1856 BlockDriverState *bs;
1857
1858 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1859 AioContext *aio_context = bdrv_get_aio_context(bs);
1860
1861 aio_context_acquire(aio_context);
1862 bdrv_close(bs);
1863 aio_context_release(aio_context);
1864 }
1865 }
1866
1867 /* Check if any requests are in-flight (including throttled requests) */
1868 static bool bdrv_requests_pending(BlockDriverState *bs)
1869 {
1870 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1871 return true;
1872 }
1873 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1874 return true;
1875 }
1876 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1877 return true;
1878 }
1879 if (bs->file && bdrv_requests_pending(bs->file)) {
1880 return true;
1881 }
1882 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1883 return true;
1884 }
1885 return false;
1886 }
1887
1888 /*
1889 * Wait for pending requests to complete across all BlockDriverStates
1890 *
1891 * This function does not flush data to disk, use bdrv_flush_all() for that
1892 * after calling this function.
1893 *
1894 * Note that completion of an asynchronous I/O operation can trigger any
1895 * number of other I/O operations on other devices---for example a coroutine
1896 * can be arbitrarily complex and a constant flow of I/O can come until the
1897 * coroutine is complete. Because of this, it is not possible to have a
1898 * function to drain a single device's I/O queue.
1899 */
1900 void bdrv_drain_all(void)
1901 {
1902 /* Always run first iteration so any pending completion BHs run */
1903 bool busy = true;
1904 BlockDriverState *bs;
1905
1906 while (busy) {
1907 busy = false;
1908
1909 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1910 AioContext *aio_context = bdrv_get_aio_context(bs);
1911 bool bs_busy;
1912
1913 aio_context_acquire(aio_context);
1914 bdrv_start_throttled_reqs(bs);
1915 bs_busy = bdrv_requests_pending(bs);
1916 bs_busy |= aio_poll(aio_context, bs_busy);
1917 aio_context_release(aio_context);
1918
1919 busy |= bs_busy;
1920 }
1921 }
1922 }
1923
1924 /* make a BlockDriverState anonymous by removing from bdrv_state and
1925 * graph_bdrv_state list.
1926 Also, NULL terminate the device_name to prevent double remove */
1927 void bdrv_make_anon(BlockDriverState *bs)
1928 {
1929 if (bs->device_name[0] != '\0') {
1930 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1931 }
1932 bs->device_name[0] = '\0';
1933 if (bs->node_name[0] != '\0') {
1934 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1935 }
1936 bs->node_name[0] = '\0';
1937 }
1938
1939 static void bdrv_rebind(BlockDriverState *bs)
1940 {
1941 if (bs->drv && bs->drv->bdrv_rebind) {
1942 bs->drv->bdrv_rebind(bs);
1943 }
1944 }
1945
1946 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1947 BlockDriverState *bs_src)
1948 {
1949 /* move some fields that need to stay attached to the device */
1950
1951 /* dev info */
1952 bs_dest->dev_ops = bs_src->dev_ops;
1953 bs_dest->dev_opaque = bs_src->dev_opaque;
1954 bs_dest->dev = bs_src->dev;
1955 bs_dest->guest_block_size = bs_src->guest_block_size;
1956 bs_dest->copy_on_read = bs_src->copy_on_read;
1957
1958 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1959
1960 /* i/o throttled req */
1961 memcpy(&bs_dest->throttle_state,
1962 &bs_src->throttle_state,
1963 sizeof(ThrottleState));
1964 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1965 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1966 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1967
1968 /* r/w error */
1969 bs_dest->on_read_error = bs_src->on_read_error;
1970 bs_dest->on_write_error = bs_src->on_write_error;
1971
1972 /* i/o status */
1973 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1974 bs_dest->iostatus = bs_src->iostatus;
1975
1976 /* dirty bitmap */
1977 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1978
1979 /* reference count */
1980 bs_dest->refcnt = bs_src->refcnt;
1981
1982 /* job */
1983 bs_dest->job = bs_src->job;
1984
1985 /* keep the same entry in bdrv_states */
1986 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1987 bs_src->device_name);
1988 bs_dest->device_list = bs_src->device_list;
1989 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1990 sizeof(bs_dest->op_blockers));
1991 }
1992
1993 /*
1994 * Swap bs contents for two image chains while they are live,
1995 * while keeping required fields on the BlockDriverState that is
1996 * actually attached to a device.
1997 *
1998 * This will modify the BlockDriverState fields, and swap contents
1999 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2000 *
2001 * bs_new is required to be anonymous.
2002 *
2003 * This function does not create any image files.
2004 */
2005 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2006 {
2007 BlockDriverState tmp;
2008
2009 /* The code needs to swap the node_name but simply swapping node_list won't
2010 * work so first remove the nodes from the graph list, do the swap then
2011 * insert them back if needed.
2012 */
2013 if (bs_new->node_name[0] != '\0') {
2014 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2015 }
2016 if (bs_old->node_name[0] != '\0') {
2017 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2018 }
2019
2020 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2021 assert(bs_new->device_name[0] == '\0');
2022 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2023 assert(bs_new->job == NULL);
2024 assert(bs_new->dev == NULL);
2025 assert(bs_new->io_limits_enabled == false);
2026 assert(!throttle_have_timer(&bs_new->throttle_state));
2027
2028 tmp = *bs_new;
2029 *bs_new = *bs_old;
2030 *bs_old = tmp;
2031
2032 /* there are some fields that should not be swapped, move them back */
2033 bdrv_move_feature_fields(&tmp, bs_old);
2034 bdrv_move_feature_fields(bs_old, bs_new);
2035 bdrv_move_feature_fields(bs_new, &tmp);
2036
2037 /* bs_new shouldn't be in bdrv_states even after the swap! */
2038 assert(bs_new->device_name[0] == '\0');
2039
2040 /* Check a few fields that should remain attached to the device */
2041 assert(bs_new->dev == NULL);
2042 assert(bs_new->job == NULL);
2043 assert(bs_new->io_limits_enabled == false);
2044 assert(!throttle_have_timer(&bs_new->throttle_state));
2045
2046 /* insert the nodes back into the graph node list if needed */
2047 if (bs_new->node_name[0] != '\0') {
2048 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2049 }
2050 if (bs_old->node_name[0] != '\0') {
2051 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2052 }
2053
2054 bdrv_rebind(bs_new);
2055 bdrv_rebind(bs_old);
2056 }
2057
2058 /*
2059 * Add new bs contents at the top of an image chain while the chain is
2060 * live, while keeping required fields on the top layer.
2061 *
2062 * This will modify the BlockDriverState fields, and swap contents
2063 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2064 *
2065 * bs_new is required to be anonymous.
2066 *
2067 * This function does not create any image files.
2068 */
2069 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2070 {
2071 bdrv_swap(bs_new, bs_top);
2072
2073 /* The contents of 'tmp' will become bs_top, as we are
2074 * swapping bs_new and bs_top contents. */
2075 bdrv_set_backing_hd(bs_top, bs_new);
2076 }
2077
2078 static void bdrv_delete(BlockDriverState *bs)
2079 {
2080 assert(!bs->dev);
2081 assert(!bs->job);
2082 assert(bdrv_op_blocker_is_empty(bs));
2083 assert(!bs->refcnt);
2084 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2085
2086 bdrv_close(bs);
2087
2088 /* remove from list, if necessary */
2089 bdrv_make_anon(bs);
2090
2091 g_free(bs);
2092 }
2093
2094 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2095 /* TODO change to DeviceState *dev when all users are qdevified */
2096 {
2097 if (bs->dev) {
2098 return -EBUSY;
2099 }
2100 bs->dev = dev;
2101 bdrv_iostatus_reset(bs);
2102 return 0;
2103 }
2104
2105 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2106 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
2107 {
2108 if (bdrv_attach_dev(bs, dev) < 0) {
2109 abort();
2110 }
2111 }
2112
2113 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2114 /* TODO change to DeviceState *dev when all users are qdevified */
2115 {
2116 assert(bs->dev == dev);
2117 bs->dev = NULL;
2118 bs->dev_ops = NULL;
2119 bs->dev_opaque = NULL;
2120 bs->guest_block_size = 512;
2121 }
2122
2123 /* TODO change to return DeviceState * when all users are qdevified */
2124 void *bdrv_get_attached_dev(BlockDriverState *bs)
2125 {
2126 return bs->dev;
2127 }
2128
2129 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2130 void *opaque)
2131 {
2132 bs->dev_ops = ops;
2133 bs->dev_opaque = opaque;
2134 }
2135
2136 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2137 {
2138 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2139 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2140 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2141 if (tray_was_closed) {
2142 /* tray open */
2143 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2144 true, &error_abort);
2145 }
2146 if (load) {
2147 /* tray close */
2148 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2149 false, &error_abort);
2150 }
2151 }
2152 }
2153
2154 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2155 {
2156 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2157 }
2158
2159 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2160 {
2161 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2162 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2163 }
2164 }
2165
2166 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2167 {
2168 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2169 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2170 }
2171 return false;
2172 }
2173
2174 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2175 {
2176 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2177 bs->dev_ops->resize_cb(bs->dev_opaque);
2178 }
2179 }
2180
2181 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2182 {
2183 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2184 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2185 }
2186 return false;
2187 }
2188
2189 /*
2190 * Run consistency checks on an image
2191 *
2192 * Returns 0 if the check could be completed (it doesn't mean that the image is
2193 * free of errors) or -errno when an internal error occurred. The results of the
2194 * check are stored in res.
2195 */
2196 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2197 {
2198 if (bs->drv->bdrv_check == NULL) {
2199 return -ENOTSUP;
2200 }
2201
2202 memset(res, 0, sizeof(*res));
2203 return bs->drv->bdrv_check(bs, res, fix);
2204 }
2205
2206 #define COMMIT_BUF_SECTORS 2048
2207
2208 /* commit COW file into the raw image */
2209 int bdrv_commit(BlockDriverState *bs)
2210 {
2211 BlockDriver *drv = bs->drv;
2212 int64_t sector, total_sectors, length, backing_length;
2213 int n, ro, open_flags;
2214 int ret = 0;
2215 uint8_t *buf = NULL;
2216 char filename[PATH_MAX];
2217
2218 if (!drv)
2219 return -ENOMEDIUM;
2220
2221 if (!bs->backing_hd) {
2222 return -ENOTSUP;
2223 }
2224
2225 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2226 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2227 return -EBUSY;
2228 }
2229
2230 ro = bs->backing_hd->read_only;
2231 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2232 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2233 open_flags = bs->backing_hd->open_flags;
2234
2235 if (ro) {
2236 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2237 return -EACCES;
2238 }
2239 }
2240
2241 length = bdrv_getlength(bs);
2242 if (length < 0) {
2243 ret = length;
2244 goto ro_cleanup;
2245 }
2246
2247 backing_length = bdrv_getlength(bs->backing_hd);
2248 if (backing_length < 0) {
2249 ret = backing_length;
2250 goto ro_cleanup;
2251 }
2252
2253 /* If our top snapshot is larger than the backing file image,
2254 * grow the backing file image if possible. If not possible,
2255 * we must return an error */
2256 if (length > backing_length) {
2257 ret = bdrv_truncate(bs->backing_hd, length);
2258 if (ret < 0) {
2259 goto ro_cleanup;
2260 }
2261 }
2262
2263 total_sectors = length >> BDRV_SECTOR_BITS;
2264 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2265
2266 for (sector = 0; sector < total_sectors; sector += n) {
2267 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2268 if (ret < 0) {
2269 goto ro_cleanup;
2270 }
2271 if (ret) {
2272 ret = bdrv_read(bs, sector, buf, n);
2273 if (ret < 0) {
2274 goto ro_cleanup;
2275 }
2276
2277 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2278 if (ret < 0) {
2279 goto ro_cleanup;
2280 }
2281 }
2282 }
2283
2284 if (drv->bdrv_make_empty) {
2285 ret = drv->bdrv_make_empty(bs);
2286 if (ret < 0) {
2287 goto ro_cleanup;
2288 }
2289 bdrv_flush(bs);
2290 }
2291
2292 /*
2293 * Make sure all data we wrote to the backing device is actually
2294 * stable on disk.
2295 */
2296 if (bs->backing_hd) {
2297 bdrv_flush(bs->backing_hd);
2298 }
2299
2300 ret = 0;
2301 ro_cleanup:
2302 g_free(buf);
2303
2304 if (ro) {
2305 /* ignoring error return here */
2306 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2307 }
2308
2309 return ret;
2310 }
2311
2312 int bdrv_commit_all(void)
2313 {
2314 BlockDriverState *bs;
2315
2316 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2317 AioContext *aio_context = bdrv_get_aio_context(bs);
2318
2319 aio_context_acquire(aio_context);
2320 if (bs->drv && bs->backing_hd) {
2321 int ret = bdrv_commit(bs);
2322 if (ret < 0) {
2323 aio_context_release(aio_context);
2324 return ret;
2325 }
2326 }
2327 aio_context_release(aio_context);
2328 }
2329 return 0;
2330 }
2331
2332 /**
2333 * Remove an active request from the tracked requests list
2334 *
2335 * This function should be called when a tracked request is completing.
2336 */
2337 static void tracked_request_end(BdrvTrackedRequest *req)
2338 {
2339 if (req->serialising) {
2340 req->bs->serialising_in_flight--;
2341 }
2342
2343 QLIST_REMOVE(req, list);
2344 qemu_co_queue_restart_all(&req->wait_queue);
2345 }
2346
2347 /**
2348 * Add an active request to the tracked requests list
2349 */
2350 static void tracked_request_begin(BdrvTrackedRequest *req,
2351 BlockDriverState *bs,
2352 int64_t offset,
2353 unsigned int bytes, bool is_write)
2354 {
2355 *req = (BdrvTrackedRequest){
2356 .bs = bs,
2357 .offset = offset,
2358 .bytes = bytes,
2359 .is_write = is_write,
2360 .co = qemu_coroutine_self(),
2361 .serialising = false,
2362 .overlap_offset = offset,
2363 .overlap_bytes = bytes,
2364 };
2365
2366 qemu_co_queue_init(&req->wait_queue);
2367
2368 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2369 }
2370
2371 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2372 {
2373 int64_t overlap_offset = req->offset & ~(align - 1);
2374 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2375 - overlap_offset;
2376
2377 if (!req->serialising) {
2378 req->bs->serialising_in_flight++;
2379 req->serialising = true;
2380 }
2381
2382 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2383 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2384 }
2385
2386 /**
2387 * Round a region to cluster boundaries
2388 */
2389 void bdrv_round_to_clusters(BlockDriverState *bs,
2390 int64_t sector_num, int nb_sectors,
2391 int64_t *cluster_sector_num,
2392 int *cluster_nb_sectors)
2393 {
2394 BlockDriverInfo bdi;
2395
2396 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2397 *cluster_sector_num = sector_num;
2398 *cluster_nb_sectors = nb_sectors;
2399 } else {
2400 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2401 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2402 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2403 nb_sectors, c);
2404 }
2405 }
2406
2407 static int bdrv_get_cluster_size(BlockDriverState *bs)
2408 {
2409 BlockDriverInfo bdi;
2410 int ret;
2411
2412 ret = bdrv_get_info(bs, &bdi);
2413 if (ret < 0 || bdi.cluster_size == 0) {
2414 return bs->request_alignment;
2415 } else {
2416 return bdi.cluster_size;
2417 }
2418 }
2419
2420 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2421 int64_t offset, unsigned int bytes)
2422 {
2423 /* aaaa bbbb */
2424 if (offset >= req->overlap_offset + req->overlap_bytes) {
2425 return false;
2426 }
2427 /* bbbb aaaa */
2428 if (req->overlap_offset >= offset + bytes) {
2429 return false;
2430 }
2431 return true;
2432 }
2433
2434 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2435 {
2436 BlockDriverState *bs = self->bs;
2437 BdrvTrackedRequest *req;
2438 bool retry;
2439 bool waited = false;
2440
2441 if (!bs->serialising_in_flight) {
2442 return false;
2443 }
2444
2445 do {
2446 retry = false;
2447 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2448 if (req == self || (!req->serialising && !self->serialising)) {
2449 continue;
2450 }
2451 if (tracked_request_overlaps(req, self->overlap_offset,
2452 self->overlap_bytes))
2453 {
2454 /* Hitting this means there was a reentrant request, for
2455 * example, a block driver issuing nested requests. This must
2456 * never happen since it means deadlock.
2457 */
2458 assert(qemu_coroutine_self() != req->co);
2459
2460 /* If the request is already (indirectly) waiting for us, or
2461 * will wait for us as soon as it wakes up, then just go on
2462 * (instead of producing a deadlock in the former case). */
2463 if (!req->waiting_for) {
2464 self->waiting_for = req;
2465 qemu_co_queue_wait(&req->wait_queue);
2466 self->waiting_for = NULL;
2467 retry = true;
2468 waited = true;
2469 break;
2470 }
2471 }
2472 }
2473 } while (retry);
2474
2475 return waited;
2476 }
2477
2478 /*
2479 * Return values:
2480 * 0 - success
2481 * -EINVAL - backing format specified, but no file
2482 * -ENOSPC - can't update the backing file because no space is left in the
2483 * image file header
2484 * -ENOTSUP - format driver doesn't support changing the backing file
2485 */
2486 int bdrv_change_backing_file(BlockDriverState *bs,
2487 const char *backing_file, const char *backing_fmt)
2488 {
2489 BlockDriver *drv = bs->drv;
2490 int ret;
2491
2492 /* Backing file format doesn't make sense without a backing file */
2493 if (backing_fmt && !backing_file) {
2494 return -EINVAL;
2495 }
2496
2497 if (drv->bdrv_change_backing_file != NULL) {
2498 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2499 } else {
2500 ret = -ENOTSUP;
2501 }
2502
2503 if (ret == 0) {
2504 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2505 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2506 }
2507 return ret;
2508 }
2509
2510 /*
2511 * Finds the image layer in the chain that has 'bs' as its backing file.
2512 *
2513 * active is the current topmost image.
2514 *
2515 * Returns NULL if bs is not found in active's image chain,
2516 * or if active == bs.
2517 */
2518 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2519 BlockDriverState *bs)
2520 {
2521 BlockDriverState *overlay = NULL;
2522 BlockDriverState *intermediate;
2523
2524 assert(active != NULL);
2525 assert(bs != NULL);
2526
2527 /* if bs is the same as active, then by definition it has no overlay
2528 */
2529 if (active == bs) {
2530 return NULL;
2531 }
2532
2533 intermediate = active;
2534 while (intermediate->backing_hd) {
2535 if (intermediate->backing_hd == bs) {
2536 overlay = intermediate;
2537 break;
2538 }
2539 intermediate = intermediate->backing_hd;
2540 }
2541
2542 return overlay;
2543 }
2544
2545 typedef struct BlkIntermediateStates {
2546 BlockDriverState *bs;
2547 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2548 } BlkIntermediateStates;
2549
2550
2551 /*
2552 * Drops images above 'base' up to and including 'top', and sets the image
2553 * above 'top' to have base as its backing file.
2554 *
2555 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2556 * information in 'bs' can be properly updated.
2557 *
2558 * E.g., this will convert the following chain:
2559 * bottom <- base <- intermediate <- top <- active
2560 *
2561 * to
2562 *
2563 * bottom <- base <- active
2564 *
2565 * It is allowed for bottom==base, in which case it converts:
2566 *
2567 * base <- intermediate <- top <- active
2568 *
2569 * to
2570 *
2571 * base <- active
2572 *
2573 * Error conditions:
2574 * if active == top, that is considered an error
2575 *
2576 */
2577 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2578 BlockDriverState *base)
2579 {
2580 BlockDriverState *intermediate;
2581 BlockDriverState *base_bs = NULL;
2582 BlockDriverState *new_top_bs = NULL;
2583 BlkIntermediateStates *intermediate_state, *next;
2584 int ret = -EIO;
2585
2586 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2587 QSIMPLEQ_INIT(&states_to_delete);
2588
2589 if (!top->drv || !base->drv) {
2590 goto exit;
2591 }
2592
2593 new_top_bs = bdrv_find_overlay(active, top);
2594
2595 if (new_top_bs == NULL) {
2596 /* we could not find the image above 'top', this is an error */
2597 goto exit;
2598 }
2599
2600 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2601 * to do, no intermediate images */
2602 if (new_top_bs->backing_hd == base) {
2603 ret = 0;
2604 goto exit;
2605 }
2606
2607 intermediate = top;
2608
2609 /* now we will go down through the list, and add each BDS we find
2610 * into our deletion queue, until we hit the 'base'
2611 */
2612 while (intermediate) {
2613 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2614 intermediate_state->bs = intermediate;
2615 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2616
2617 if (intermediate->backing_hd == base) {
2618 base_bs = intermediate->backing_hd;
2619 break;
2620 }
2621 intermediate = intermediate->backing_hd;
2622 }
2623 if (base_bs == NULL) {
2624 /* something went wrong, we did not end at the base. safely
2625 * unravel everything, and exit with error */
2626 goto exit;
2627 }
2628
2629 /* success - we can delete the intermediate states, and link top->base */
2630 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2631 base_bs->drv ? base_bs->drv->format_name : "");
2632 if (ret) {
2633 goto exit;
2634 }
2635 bdrv_set_backing_hd(new_top_bs, base_bs);
2636
2637 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2638 /* so that bdrv_close() does not recursively close the chain */
2639 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2640 bdrv_unref(intermediate_state->bs);
2641 }
2642 ret = 0;
2643
2644 exit:
2645 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2646 g_free(intermediate_state);
2647 }
2648 return ret;
2649 }
2650
2651
2652 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2653 size_t size)
2654 {
2655 int64_t len;
2656
2657 if (size > INT_MAX) {
2658 return -EIO;
2659 }
2660
2661 if (!bdrv_is_inserted(bs))
2662 return -ENOMEDIUM;
2663
2664 if (bs->growable)
2665 return 0;
2666
2667 len = bdrv_getlength(bs);
2668
2669 if (offset < 0)
2670 return -EIO;
2671
2672 if ((offset > len) || (len - offset < size))
2673 return -EIO;
2674
2675 return 0;
2676 }
2677
2678 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2679 int nb_sectors)
2680 {
2681 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2682 return -EIO;
2683 }
2684
2685 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2686 nb_sectors * BDRV_SECTOR_SIZE);
2687 }
2688
2689 typedef struct RwCo {
2690 BlockDriverState *bs;
2691 int64_t offset;
2692 QEMUIOVector *qiov;
2693 bool is_write;
2694 int ret;
2695 BdrvRequestFlags flags;
2696 } RwCo;
2697
2698 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2699 {
2700 RwCo *rwco = opaque;
2701
2702 if (!rwco->is_write) {
2703 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2704 rwco->qiov->size, rwco->qiov,
2705 rwco->flags);
2706 } else {
2707 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2708 rwco->qiov->size, rwco->qiov,
2709 rwco->flags);
2710 }
2711 }
2712
2713 /*
2714 * Process a vectored synchronous request using coroutines
2715 */
2716 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2717 QEMUIOVector *qiov, bool is_write,
2718 BdrvRequestFlags flags)
2719 {
2720 Coroutine *co;
2721 RwCo rwco = {
2722 .bs = bs,
2723 .offset = offset,
2724 .qiov = qiov,
2725 .is_write = is_write,
2726 .ret = NOT_DONE,
2727 .flags = flags,
2728 };
2729
2730 /**
2731 * In sync call context, when the vcpu is blocked, this throttling timer
2732 * will not fire; so the I/O throttling function has to be disabled here
2733 * if it has been enabled.
2734 */
2735 if (bs->io_limits_enabled) {
2736 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2737 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2738 bdrv_io_limits_disable(bs);
2739 }
2740
2741 if (qemu_in_coroutine()) {
2742 /* Fast-path if already in coroutine context */
2743 bdrv_rw_co_entry(&rwco);
2744 } else {
2745 AioContext *aio_context = bdrv_get_aio_context(bs);
2746
2747 co = qemu_coroutine_create(bdrv_rw_co_entry);
2748 qemu_coroutine_enter(co, &rwco);
2749 while (rwco.ret == NOT_DONE) {
2750 aio_poll(aio_context, true);
2751 }
2752 }
2753 return rwco.ret;
2754 }
2755
2756 /*
2757 * Process a synchronous request using coroutines
2758 */
2759 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2760 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2761 {
2762 QEMUIOVector qiov;
2763 struct iovec iov = {
2764 .iov_base = (void *)buf,
2765 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2766 };
2767
2768 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2769 return -EINVAL;
2770 }
2771
2772 qemu_iovec_init_external(&qiov, &iov, 1);
2773 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2774 &qiov, is_write, flags);
2775 }
2776
2777 /* return < 0 if error. See bdrv_write() for the return codes */
2778 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2779 uint8_t *buf, int nb_sectors)
2780 {
2781 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2782 }
2783
2784 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2785 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2786 uint8_t *buf, int nb_sectors)
2787 {
2788 bool enabled;
2789 int ret;
2790
2791 enabled = bs->io_limits_enabled;
2792 bs->io_limits_enabled = false;
2793 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2794 bs->io_limits_enabled = enabled;
2795 return ret;
2796 }
2797
2798 /* Return < 0 if error. Important errors are:
2799 -EIO generic I/O error (may happen for all errors)
2800 -ENOMEDIUM No media inserted.
2801 -EINVAL Invalid sector number or nb_sectors
2802 -EACCES Trying to write a read-only device
2803 */
2804 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2805 const uint8_t *buf, int nb_sectors)
2806 {
2807 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2808 }
2809
2810 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2811 int nb_sectors, BdrvRequestFlags flags)
2812 {
2813 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2814 BDRV_REQ_ZERO_WRITE | flags);
2815 }
2816
2817 /*
2818 * Completely zero out a block device with the help of bdrv_write_zeroes.
2819 * The operation is sped up by checking the block status and only writing
2820 * zeroes to the device if they currently do not return zeroes. Optional
2821 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2822 *
2823 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2824 */
2825 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2826 {
2827 int64_t target_size;
2828 int64_t ret, nb_sectors, sector_num = 0;
2829 int n;
2830
2831 target_size = bdrv_getlength(bs);
2832 if (target_size < 0) {
2833 return target_size;
2834 }
2835 target_size /= BDRV_SECTOR_SIZE;
2836
2837 for (;;) {
2838 nb_sectors = target_size - sector_num;
2839 if (nb_sectors <= 0) {
2840 return 0;
2841 }
2842 if (nb_sectors > INT_MAX) {
2843 nb_sectors = INT_MAX;
2844 }
2845 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2846 if (ret < 0) {
2847 error_report("error getting block status at sector %" PRId64 ": %s",
2848 sector_num, strerror(-ret));
2849 return ret;
2850 }
2851 if (ret & BDRV_BLOCK_ZERO) {
2852 sector_num += n;
2853 continue;
2854 }
2855 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2856 if (ret < 0) {
2857 error_report("error writing zeroes at sector %" PRId64 ": %s",
2858 sector_num, strerror(-ret));
2859 return ret;
2860 }
2861 sector_num += n;
2862 }
2863 }
2864
2865 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2866 {
2867 QEMUIOVector qiov;
2868 struct iovec iov = {
2869 .iov_base = (void *)buf,
2870 .iov_len = bytes,
2871 };
2872 int ret;
2873
2874 if (bytes < 0) {
2875 return -EINVAL;
2876 }
2877
2878 qemu_iovec_init_external(&qiov, &iov, 1);
2879 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2880 if (ret < 0) {
2881 return ret;
2882 }
2883
2884 return bytes;
2885 }
2886
2887 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2888 {
2889 int ret;
2890
2891 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2892 if (ret < 0) {
2893 return ret;
2894 }
2895
2896 return qiov->size;
2897 }
2898
2899 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2900 const void *buf, int bytes)
2901 {
2902 QEMUIOVector qiov;
2903 struct iovec iov = {
2904 .iov_base = (void *) buf,
2905 .iov_len = bytes,
2906 };
2907
2908 if (bytes < 0) {
2909 return -EINVAL;
2910 }
2911
2912 qemu_iovec_init_external(&qiov, &iov, 1);
2913 return bdrv_pwritev(bs, offset, &qiov);
2914 }
2915
2916 /*
2917 * Writes to the file and ensures that no writes are reordered across this
2918 * request (acts as a barrier)
2919 *
2920 * Returns 0 on success, -errno in error cases.
2921 */
2922 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2923 const void *buf, int count)
2924 {
2925 int ret;
2926
2927 ret = bdrv_pwrite(bs, offset, buf, count);
2928 if (ret < 0) {
2929 return ret;
2930 }
2931
2932 /* No flush needed for cache modes that already do it */
2933 if (bs->enable_write_cache) {
2934 bdrv_flush(bs);
2935 }
2936
2937 return 0;
2938 }
2939
2940 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2941 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2942 {
2943 /* Perform I/O through a temporary buffer so that users who scribble over
2944 * their read buffer while the operation is in progress do not end up
2945 * modifying the image file. This is critical for zero-copy guest I/O
2946 * where anything might happen inside guest memory.
2947 */
2948 void *bounce_buffer;
2949
2950 BlockDriver *drv = bs->drv;
2951 struct iovec iov;
2952 QEMUIOVector bounce_qiov;
2953 int64_t cluster_sector_num;
2954 int cluster_nb_sectors;
2955 size_t skip_bytes;
2956 int ret;
2957
2958 /* Cover entire cluster so no additional backing file I/O is required when
2959 * allocating cluster in the image file.
2960 */
2961 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2962 &cluster_sector_num, &cluster_nb_sectors);
2963
2964 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2965 cluster_sector_num, cluster_nb_sectors);
2966
2967 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2968 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2969 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2970
2971 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2972 &bounce_qiov);
2973 if (ret < 0) {
2974 goto err;
2975 }
2976
2977 if (drv->bdrv_co_write_zeroes &&
2978 buffer_is_zero(bounce_buffer, iov.iov_len)) {
2979 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
2980 cluster_nb_sectors, 0);
2981 } else {
2982 /* This does not change the data on the disk, it is not necessary
2983 * to flush even in cache=writethrough mode.
2984 */
2985 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
2986 &bounce_qiov);
2987 }
2988
2989 if (ret < 0) {
2990 /* It might be okay to ignore write errors for guest requests. If this
2991 * is a deliberate copy-on-read then we don't want to ignore the error.
2992 * Simply report it in all cases.
2993 */
2994 goto err;
2995 }
2996
2997 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
2998 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2999 nb_sectors * BDRV_SECTOR_SIZE);
3000
3001 err:
3002 qemu_vfree(bounce_buffer);
3003 return ret;
3004 }
3005
3006 /*
3007 * Forwards an already correctly aligned request to the BlockDriver. This
3008 * handles copy on read and zeroing after EOF; any other features must be
3009 * implemented by the caller.
3010 */
3011 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3012 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3013 int64_t align, QEMUIOVector *qiov, int flags)
3014 {
3015 BlockDriver *drv = bs->drv;
3016 int ret;
3017
3018 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3019 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3020
3021 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3022 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3023
3024 /* Handle Copy on Read and associated serialisation */
3025 if (flags & BDRV_REQ_COPY_ON_READ) {
3026 /* If we touch the same cluster it counts as an overlap. This
3027 * guarantees that allocating writes will be serialized and not race
3028 * with each other for the same cluster. For example, in copy-on-read
3029 * it ensures that the CoR read and write operations are atomic and
3030 * guest writes cannot interleave between them. */
3031 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3032 }
3033
3034 wait_serialising_requests(req);
3035
3036 if (flags & BDRV_REQ_COPY_ON_READ) {
3037 int pnum;
3038
3039 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3040 if (ret < 0) {
3041 goto out;
3042 }
3043
3044 if (!ret || pnum != nb_sectors) {
3045 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3046 goto out;
3047 }
3048 }
3049
3050 /* Forward the request to the BlockDriver */
3051 if (!(bs->zero_beyond_eof && bs->growable)) {
3052 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3053 } else {
3054 /* Read zeros after EOF of growable BDSes */
3055 int64_t len, total_sectors, max_nb_sectors;
3056
3057 len = bdrv_getlength(bs);
3058 if (len < 0) {
3059 ret = len;
3060 goto out;
3061 }
3062
3063 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
3064 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3065 align >> BDRV_SECTOR_BITS);
3066 if (max_nb_sectors > 0) {
3067 ret = drv->bdrv_co_readv(bs, sector_num,
3068 MIN(nb_sectors, max_nb_sectors), qiov);
3069 } else {
3070 ret = 0;
3071 }
3072
3073 /* Reading beyond end of file is supposed to produce zeroes */
3074 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3075 uint64_t offset = MAX(0, total_sectors - sector_num);
3076 uint64_t bytes = (sector_num + nb_sectors - offset) *
3077 BDRV_SECTOR_SIZE;
3078 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3079 }
3080 }
3081
3082 out:
3083 return ret;
3084 }
3085
3086 /*
3087 * Handle a read request in coroutine context
3088 */
3089 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3090 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3091 BdrvRequestFlags flags)
3092 {
3093 BlockDriver *drv = bs->drv;
3094 BdrvTrackedRequest req;
3095
3096 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3097 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3098 uint8_t *head_buf = NULL;
3099 uint8_t *tail_buf = NULL;
3100 QEMUIOVector local_qiov;
3101 bool use_local_qiov = false;
3102 int ret;
3103
3104 if (!drv) {
3105 return -ENOMEDIUM;
3106 }
3107 if (bdrv_check_byte_request(bs, offset, bytes)) {
3108 return -EIO;
3109 }
3110
3111 if (bs->copy_on_read) {
3112 flags |= BDRV_REQ_COPY_ON_READ;
3113 }
3114
3115 /* throttling disk I/O */
3116 if (bs->io_limits_enabled) {
3117 bdrv_io_limits_intercept(bs, bytes, false);
3118 }
3119
3120 /* Align read if necessary by padding qiov */
3121 if (offset & (align - 1)) {
3122 head_buf = qemu_blockalign(bs, align);
3123 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3124 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3125 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3126 use_local_qiov = true;
3127
3128 bytes += offset & (align - 1);
3129 offset = offset & ~(align - 1);
3130 }
3131
3132 if ((offset + bytes) & (align - 1)) {
3133 if (!use_local_qiov) {
3134 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3135 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3136 use_local_qiov = true;
3137 }
3138 tail_buf = qemu_blockalign(bs, align);
3139 qemu_iovec_add(&local_qiov, tail_buf,
3140 align - ((offset + bytes) & (align - 1)));
3141
3142 bytes = ROUND_UP(bytes, align);
3143 }
3144
3145 tracked_request_begin(&req, bs, offset, bytes, false);
3146 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3147 use_local_qiov ? &local_qiov : qiov,
3148 flags);
3149 tracked_request_end(&req);
3150
3151 if (use_local_qiov) {
3152 qemu_iovec_destroy(&local_qiov);
3153 qemu_vfree(head_buf);
3154 qemu_vfree(tail_buf);
3155 }
3156
3157 return ret;
3158 }
3159
3160 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3161 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3162 BdrvRequestFlags flags)
3163 {
3164 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3165 return -EINVAL;
3166 }
3167
3168 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3169 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3170 }
3171
3172 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3173 int nb_sectors, QEMUIOVector *qiov)
3174 {
3175 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3176
3177 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3178 }
3179
3180 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3181 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3182 {
3183 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3184
3185 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3186 BDRV_REQ_COPY_ON_READ);
3187 }
3188
3189 /* if no limit is specified in the BlockLimits use a default
3190 * of 32768 512-byte sectors (16 MiB) per request.
3191 */
3192 #define MAX_WRITE_ZEROES_DEFAULT 32768
3193
3194 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3195 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3196 {
3197 BlockDriver *drv = bs->drv;
3198 QEMUIOVector qiov;
3199 struct iovec iov = {0};
3200 int ret = 0;
3201
3202 int max_write_zeroes = bs->bl.max_write_zeroes ?
3203 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3204
3205 while (nb_sectors > 0 && !ret) {
3206 int num = nb_sectors;
3207
3208 /* Align request. Block drivers can expect the "bulk" of the request
3209 * to be aligned.
3210 */
3211 if (bs->bl.write_zeroes_alignment
3212 && num > bs->bl.write_zeroes_alignment) {
3213 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3214 /* Make a small request up to the first aligned sector. */
3215 num = bs->bl.write_zeroes_alignment;
3216 num -= sector_num % bs->bl.write_zeroes_alignment;
3217 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3218 /* Shorten the request to the last aligned sector. num cannot
3219 * underflow because num > bs->bl.write_zeroes_alignment.
3220 */
3221 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3222 }
3223 }
3224
3225 /* limit request size */
3226 if (num > max_write_zeroes) {
3227 num = max_write_zeroes;
3228 }
3229
3230 ret = -ENOTSUP;
3231 /* First try the efficient write zeroes operation */
3232 if (drv->bdrv_co_write_zeroes) {
3233 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3234 }
3235
3236 if (ret == -ENOTSUP) {
3237 /* Fall back to bounce buffer if write zeroes is unsupported */
3238 iov.iov_len = num * BDRV_SECTOR_SIZE;
3239 if (iov.iov_base == NULL) {
3240 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3241 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3242 }
3243 qemu_iovec_init_external(&qiov, &iov, 1);
3244
3245 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3246
3247 /* Keep bounce buffer around if it is big enough for all
3248 * all future requests.
3249 */
3250 if (num < max_write_zeroes) {
3251 qemu_vfree(iov.iov_base);
3252 iov.iov_base = NULL;
3253 }
3254 }
3255
3256 sector_num += num;
3257 nb_sectors -= num;
3258 }
3259
3260 qemu_vfree(iov.iov_base);
3261 return ret;
3262 }
3263
3264 /*
3265 * Forwards an already correctly aligned write request to the BlockDriver.
3266 */
3267 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3268 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3269 QEMUIOVector *qiov, int flags)
3270 {
3271 BlockDriver *drv = bs->drv;
3272 bool waited;
3273 int ret;
3274
3275 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3276 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3277
3278 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3279 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3280
3281 waited = wait_serialising_requests(req);
3282 assert(!waited || !req->serialising);
3283 assert(req->overlap_offset <= offset);
3284 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3285
3286 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3287
3288 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3289 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3290 qemu_iovec_is_zero(qiov)) {
3291 flags |= BDRV_REQ_ZERO_WRITE;
3292 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3293 flags |= BDRV_REQ_MAY_UNMAP;
3294 }
3295 }
3296
3297 if (ret < 0) {
3298 /* Do nothing, write notifier decided to fail this request */
3299 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3300 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3301 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3302 } else {
3303 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3304 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3305 }
3306 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3307
3308 if (ret == 0 && !bs->enable_write_cache) {
3309 ret = bdrv_co_flush(bs);
3310 }
3311
3312 bdrv_set_dirty(bs, sector_num, nb_sectors);
3313
3314 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3315 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3316 }
3317 if (bs->growable && ret >= 0) {
3318 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3319 }
3320
3321 return ret;
3322 }
3323
3324 /*
3325 * Handle a write request in coroutine context
3326 */
3327 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3328 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3329 BdrvRequestFlags flags)
3330 {
3331 BdrvTrackedRequest req;
3332 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3333 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3334 uint8_t *head_buf = NULL;
3335 uint8_t *tail_buf = NULL;
3336 QEMUIOVector local_qiov;
3337 bool use_local_qiov = false;
3338 int ret;
3339
3340 if (!bs->drv) {
3341 return -ENOMEDIUM;
3342 }
3343 if (bs->read_only) {
3344 return -EACCES;
3345 }
3346 if (bdrv_check_byte_request(bs, offset, bytes)) {
3347 return -EIO;
3348 }
3349
3350 /* throttling disk I/O */
3351 if (bs->io_limits_enabled) {
3352 bdrv_io_limits_intercept(bs, bytes, true);
3353 }
3354
3355 /*
3356 * Align write if necessary by performing a read-modify-write cycle.
3357 * Pad qiov with the read parts and be sure to have a tracked request not
3358 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3359 */
3360 tracked_request_begin(&req, bs, offset, bytes, true);
3361
3362 if (offset & (align - 1)) {
3363 QEMUIOVector head_qiov;
3364 struct iovec head_iov;
3365
3366 mark_request_serialising(&req, align);
3367 wait_serialising_requests(&req);
3368
3369 head_buf = qemu_blockalign(bs, align);
3370 head_iov = (struct iovec) {
3371 .iov_base = head_buf,
3372 .iov_len = align,
3373 };
3374 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3375
3376 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3377 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3378 align, &head_qiov, 0);
3379 if (ret < 0) {
3380 goto fail;
3381 }
3382 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3383
3384 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3385 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3386 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3387 use_local_qiov = true;
3388
3389 bytes += offset & (align - 1);
3390 offset = offset & ~(align - 1);
3391 }
3392
3393 if ((offset + bytes) & (align - 1)) {
3394 QEMUIOVector tail_qiov;
3395 struct iovec tail_iov;
3396 size_t tail_bytes;
3397 bool waited;
3398
3399 mark_request_serialising(&req, align);
3400 waited = wait_serialising_requests(&req);
3401 assert(!waited || !use_local_qiov);
3402
3403 tail_buf = qemu_blockalign(bs, align);
3404 tail_iov = (struct iovec) {
3405 .iov_base = tail_buf,
3406 .iov_len = align,
3407 };
3408 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3409
3410 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3411 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3412 align, &tail_qiov, 0);
3413 if (ret < 0) {
3414 goto fail;
3415 }
3416 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3417
3418 if (!use_local_qiov) {
3419 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3420 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3421 use_local_qiov = true;
3422 }
3423
3424 tail_bytes = (offset + bytes) & (align - 1);
3425 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3426
3427 bytes = ROUND_UP(bytes, align);
3428 }
3429
3430 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3431 use_local_qiov ? &local_qiov : qiov,
3432 flags);
3433
3434 fail:
3435 tracked_request_end(&req);
3436
3437 if (use_local_qiov) {
3438 qemu_iovec_destroy(&local_qiov);
3439 }
3440 qemu_vfree(head_buf);
3441 qemu_vfree(tail_buf);
3442
3443 return ret;
3444 }
3445
3446 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3447 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3448 BdrvRequestFlags flags)
3449 {
3450 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3451 return -EINVAL;
3452 }
3453
3454 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3455 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3456 }
3457
3458 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3459 int nb_sectors, QEMUIOVector *qiov)
3460 {
3461 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3462
3463 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3464 }
3465
3466 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3467 int64_t sector_num, int nb_sectors,
3468 BdrvRequestFlags flags)
3469 {
3470 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3471
3472 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3473 flags &= ~BDRV_REQ_MAY_UNMAP;
3474 }
3475
3476 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3477 BDRV_REQ_ZERO_WRITE | flags);
3478 }
3479
3480 /**
3481 * Truncate file to 'offset' bytes (needed only for file protocols)
3482 */
3483 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3484 {
3485 BlockDriver *drv = bs->drv;
3486 int ret;
3487 if (!drv)
3488 return -ENOMEDIUM;
3489 if (!drv->bdrv_truncate)
3490 return -ENOTSUP;
3491 if (bs->read_only)
3492 return -EACCES;
3493 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
3494 return -EBUSY;
3495 }
3496 ret = drv->bdrv_truncate(bs, offset);
3497 if (ret == 0) {
3498 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3499 bdrv_dev_resize_cb(bs);
3500 }
3501 return ret;
3502 }
3503
3504 /**
3505 * Length of a allocated file in bytes. Sparse files are counted by actual
3506 * allocated space. Return < 0 if error or unknown.
3507 */
3508 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3509 {
3510 BlockDriver *drv = bs->drv;
3511 if (!drv) {
3512 return -ENOMEDIUM;
3513 }
3514 if (drv->bdrv_get_allocated_file_size) {
3515 return drv->bdrv_get_allocated_file_size(bs);
3516 }
3517 if (bs->file) {
3518 return bdrv_get_allocated_file_size(bs->file);
3519 }
3520 return -ENOTSUP;
3521 }
3522
3523 /**
3524 * Length of a file in bytes. Return < 0 if error or unknown.
3525 */
3526 int64_t bdrv_getlength(BlockDriverState *bs)
3527 {
3528 BlockDriver *drv = bs->drv;
3529 if (!drv)
3530 return -ENOMEDIUM;
3531
3532 if (drv->has_variable_length) {
3533 int ret = refresh_total_sectors(bs, bs->total_sectors);
3534 if (ret < 0) {
3535 return ret;
3536 }
3537 }
3538 return bs->total_sectors * BDRV_SECTOR_SIZE;
3539 }
3540
3541 /* return 0 as number of sectors if no device present or error */
3542 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3543 {
3544 int64_t length;
3545 length = bdrv_getlength(bs);
3546 if (length < 0)
3547 length = 0;
3548 else
3549 length = length >> BDRV_SECTOR_BITS;
3550 *nb_sectors_ptr = length;
3551 }
3552
3553 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3554 BlockdevOnError on_write_error)
3555 {
3556 bs->on_read_error = on_read_error;
3557 bs->on_write_error = on_write_error;
3558 }
3559
3560 BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
3561 {
3562 return is_read ? bs->on_read_error : bs->on_write_error;
3563 }
3564
3565 BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
3566 {
3567 BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
3568
3569 switch (on_err) {
3570 case BLOCKDEV_ON_ERROR_ENOSPC:
3571 return (error == ENOSPC) ?
3572 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
3573 case BLOCKDEV_ON_ERROR_STOP:
3574 return BLOCK_ERROR_ACTION_STOP;
3575 case BLOCKDEV_ON_ERROR_REPORT:
3576 return BLOCK_ERROR_ACTION_REPORT;
3577 case BLOCKDEV_ON_ERROR_IGNORE:
3578 return BLOCK_ERROR_ACTION_IGNORE;
3579 default:
3580 abort();
3581 }
3582 }
3583
3584 /* This is done by device models because, while the block layer knows
3585 * about the error, it does not know whether an operation comes from
3586 * the device or the block layer (from a job, for example).
3587 */
3588 void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
3589 bool is_read, int error)
3590 {
3591 assert(error >= 0);
3592
3593 if (action == BLOCK_ERROR_ACTION_STOP) {
3594 /* First set the iostatus, so that "info block" returns an iostatus
3595 * that matches the events raised so far (an additional error iostatus
3596 * is fine, but not a lost one).
3597 */
3598 bdrv_iostatus_set_err(bs, error);
3599
3600 /* Then raise the request to stop the VM and the event.
3601 * qemu_system_vmstop_request_prepare has two effects. First,
3602 * it ensures that the STOP event always comes after the
3603 * BLOCK_IO_ERROR event. Second, it ensures that even if management
3604 * can observe the STOP event and do a "cont" before the STOP
3605 * event is issued, the VM will not stop. In this case, vm_start()
3606 * also ensures that the STOP/RESUME pair of events is emitted.
3607 */
3608 qemu_system_vmstop_request_prepare();
3609 qapi_event_send_block_io_error(bdrv_get_device_name(bs),
3610 is_read ? IO_OPERATION_TYPE_READ :
3611 IO_OPERATION_TYPE_WRITE,
3612 action, &error_abort);
3613 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
3614 } else {
3615 qapi_event_send_block_io_error(bdrv_get_device_name(bs),
3616 is_read ? IO_OPERATION_TYPE_READ :
3617 IO_OPERATION_TYPE_WRITE,
3618 action, &error_abort);
3619 }
3620 }
3621
3622 int bdrv_is_read_only(BlockDriverState *bs)
3623 {
3624 return bs->read_only;
3625 }
3626
3627 int bdrv_is_sg(BlockDriverState *bs)
3628 {
3629 return bs->sg;
3630 }
3631
3632 int bdrv_enable_write_cache(BlockDriverState *bs)
3633 {
3634 return bs->enable_write_cache;
3635 }
3636
3637 void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
3638 {
3639 bs->enable_write_cache = wce;
3640
3641 /* so a reopen() will preserve wce */
3642 if (wce) {
3643 bs->open_flags |= BDRV_O_CACHE_WB;
3644 } else {
3645 bs->open_flags &= ~BDRV_O_CACHE_WB;
3646 }
3647 }
3648
3649 int bdrv_is_encrypted(BlockDriverState *bs)
3650 {
3651 if (bs->backing_hd && bs->backing_hd->encrypted)
3652 return 1;
3653 return bs->encrypted;
3654 }
3655
3656 int bdrv_key_required(BlockDriverState *bs)
3657 {
3658 BlockDriverState *backing_hd = bs->backing_hd;
3659
3660 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
3661 return 1;
3662 return (bs->encrypted && !bs->valid_key);
3663 }
3664
3665 int bdrv_set_key(BlockDriverState *bs, const char *key)
3666 {
3667 int ret;
3668 if (bs->backing_hd && bs->backing_hd->encrypted) {
3669 ret = bdrv_set_key(bs->backing_hd, key);
3670 if (ret < 0)
3671 return ret;
3672 if (!bs->encrypted)
3673 return 0;
3674 }
3675 if (!bs->encrypted) {
3676 return -EINVAL;
3677 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
3678 return -ENOMEDIUM;
3679 }
3680 ret = bs->drv->bdrv_set_key(bs, key);
3681 if (ret < 0) {
3682 bs->valid_key = 0;
3683 } else if (!bs->valid_key) {
3684 bs->valid_key = 1;
3685 /* call the change callback now, we skipped it on open */
3686 bdrv_dev_change_media_cb(bs, true);
3687 }
3688 return ret;
3689 }
3690
3691 const char *bdrv_get_format_name(BlockDriverState *bs)
3692 {
3693 return bs->drv ? bs->drv->format_name : NULL;
3694 }
3695
3696 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
3697 void *opaque)
3698 {
3699 BlockDriver *drv;
3700 int count = 0;
3701 const char **formats = NULL;
3702
3703 QLIST_FOREACH(drv, &bdrv_drivers, list) {
3704 if (drv->format_name) {
3705 bool found = false;
3706 int i = count;
3707 while (formats && i && !found) {
3708 found = !strcmp(formats[--i], drv->format_name);
3709 }
3710
3711 if (!found) {
3712 formats = g_realloc(formats, (count + 1) * sizeof(char *));
3713 formats[count++] = drv->format_name;
3714 it(opaque, drv->format_name);
3715 }
3716 }
3717 }
3718 g_free(formats);
3719 }
3720
3721 /* This function is to find block backend bs */
3722 BlockDriverState *bdrv_find(const char *name)
3723 {
3724 BlockDriverState *bs;
3725
3726 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3727 if (!strcmp(name, bs->device_name)) {
3728 return bs;
3729 }
3730 }
3731 return NULL;
3732 }
3733
3734 /* This function is to find a node in the bs graph */
3735 BlockDriverState *bdrv_find_node(const char *node_name)
3736 {
3737 BlockDriverState *bs;
3738
3739 assert(node_name);
3740
3741 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3742 if (!strcmp(node_name, bs->node_name)) {
3743 return bs;
3744 }
3745 }
3746 return NULL;
3747 }
3748
3749 /* Put this QMP function here so it can access the static graph_bdrv_states. */
3750 BlockDeviceInfoList *bdrv_named_nodes_list(void)
3751 {
3752 BlockDeviceInfoList *list, *entry;
3753 BlockDriverState *bs;
3754
3755 list = NULL;
3756 QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
3757 entry = g_malloc0(sizeof(*entry));
3758 entry->value = bdrv_block_device_info(bs);
3759 entry->next = list;
3760 list = entry;
3761 }
3762
3763 return list;
3764 }
3765
3766 BlockDriverState *bdrv_lookup_bs(const char *device,
3767 const char *node_name,
3768 Error **errp)
3769 {
3770 BlockDriverState *bs = NULL;
3771
3772 if (device) {
3773 bs = bdrv_find(device);
3774
3775 if (bs) {
3776 return bs;
3777 }
3778 }
3779
3780 if (node_name) {
3781 bs = bdrv_find_node(node_name);
3782
3783 if (bs) {
3784 return bs;
3785 }
3786 }
3787
3788 error_setg(errp, "Cannot find device=%s nor node_name=%s",
3789 device ? device : "",
3790 node_name ? node_name : "");
3791 return NULL;
3792 }
3793
3794 BlockDriverState *bdrv_next(BlockDriverState *bs)
3795 {
3796 if (!bs) {
3797 return QTAILQ_FIRST(&bdrv_states);
3798 }
3799 return QTAILQ_NEXT(bs, device_list);
3800 }
3801
3802 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
3803 {
3804 BlockDriverState *bs;
3805
3806 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3807 it(opaque, bs);
3808 }
3809 }
3810
3811 const char *bdrv_get_device_name(BlockDriverState *bs)
3812 {
3813 return bs->device_name;
3814 }
3815
3816 int bdrv_get_flags(BlockDriverState *bs)
3817 {
3818 return bs->open_flags;
3819 }
3820
3821 int bdrv_flush_all(void)
3822 {
3823 BlockDriverState *bs;
3824 int result = 0;
3825
3826 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
3827 AioContext *aio_context = bdrv_get_aio_context(bs);
3828 int ret;
3829
3830 aio_context_acquire(aio_context);
3831 ret = bdrv_flush(bs);
3832 if (ret < 0 && !result) {
3833 result = ret;
3834 }
3835 aio_context_release(aio_context);
3836 }
3837
3838 return result;
3839 }
3840
3841 int bdrv_has_zero_init_1(BlockDriverState *bs)
3842 {
3843 return 1;
3844 }
3845
3846 int bdrv_has_zero_init(BlockDriverState *bs)
3847 {
3848 assert(bs->drv);
3849
3850 /* If BS is a copy on write image, it is initialized to
3851 the contents of the base image, which may not be zeroes. */
3852 if (bs->backing_hd) {
3853 return 0;
3854 }
3855 if (bs->drv->bdrv_has_zero_init) {
3856 return bs->drv->bdrv_has_zero_init(bs);
3857 }
3858
3859 /* safe default */
3860 return 0;
3861 }
3862
3863 bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs)
3864 {
3865 BlockDriverInfo bdi;
3866
3867 if (bs->backing_hd) {
3868 return false;
3869 }
3870
3871 if (bdrv_get_info(bs, &bdi) == 0) {
3872 return bdi.unallocated_blocks_are_zero;
3873 }
3874
3875 return false;
3876 }
3877
3878 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
3879 {
3880 BlockDriverInfo bdi;
3881
3882 if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) {
3883 return false;
3884 }
3885
3886 if (bdrv_get_info(bs, &bdi) == 0) {
3887 return bdi.can_write_zeroes_with_unmap;
3888 }
3889
3890 return false;
3891 }
3892
3893 typedef struct BdrvCoGetBlockStatusData {
3894 BlockDriverState *bs;
3895 BlockDriverState *base;
3896 int64_t sector_num;
3897 int nb_sectors;
3898 int *pnum;
3899 int64_t ret;
3900 bool done;
3901 } BdrvCoGetBlockStatusData;
3902
3903 /*
3904 * Returns true iff the specified sector is present in the disk image. Drivers
3905 * not implementing the functionality are assumed to not support backing files,
3906 * hence all their sectors are reported as allocated.
3907 *
3908 * If 'sector_num' is beyond the end of the disk image the return value is 0
3909 * and 'pnum' is set to 0.
3910 *
3911 * 'pnum' is set to the number of sectors (including and immediately following
3912 * the specified sector) that are known to be in the same
3913 * allocated/unallocated state.
3914 *
3915 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
3916 * beyond the end of the disk image it will be clamped.
3917 */
3918 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
3919 int64_t sector_num,
3920 int nb_sectors, int *pnum)
3921 {
3922 int64_t length;
3923 int64_t n;
3924 int64_t ret, ret2;
3925
3926 length = bdrv_getlength(bs);
3927 if (length < 0) {
3928 return length;
3929 }
3930
3931 if (sector_num >= (length >> BDRV_SECTOR_BITS)) {
3932 *pnum = 0;
3933 return 0;
3934 }
3935
3936 n = bs->total_sectors - sector_num;
3937 if (n < nb_sectors) {
3938 nb_sectors = n;
3939 }
3940
3941 if (!bs->drv->bdrv_co_get_block_status) {
3942 *pnum = nb_sectors;
3943 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
3944 if (bs->drv->protocol_name) {
3945 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
3946 }
3947 return ret;
3948 }
3949
3950 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
3951 if (ret < 0) {
3952 *pnum = 0;
3953 return ret;
3954 }
3955
3956 if (ret & BDRV_BLOCK_RAW) {
3957 assert(ret & BDRV_BLOCK_OFFSET_VALID);
3958 return bdrv_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3959 *pnum, pnum);
3960 }
3961
3962 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
3963 ret |= BDRV_BLOCK_ALLOCATED;
3964 }
3965
3966 if (!(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO)) {
3967 if (bdrv_unallocated_blocks_are_zero(bs)) {
3968 ret |= BDRV_BLOCK_ZERO;
3969 } else if (bs->backing_hd) {
3970 BlockDriverState *bs2 = bs->backing_hd;
3971 int64_t length2 = bdrv_getlength(bs2);
3972 if (length2 >= 0 && sector_num >= (length2 >> BDRV_SECTOR_BITS)) {
3973 ret |= BDRV_BLOCK_ZERO;
3974 }
3975 }
3976 }
3977
3978 if (bs->file &&
3979 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
3980 (ret & BDRV_BLOCK_OFFSET_VALID)) {
3981 ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
3982 *pnum, pnum);
3983 if (ret2 >= 0) {
3984 /* Ignore errors. This is just providing extra information, it
3985 * is useful but not necessary.
3986 */
3987 ret |= (ret2 & BDRV_BLOCK_ZERO);
3988 }
3989 }
3990
3991 return ret;
3992 }
3993
3994 /* Coroutine wrapper for bdrv_get_block_status() */
3995 static void coroutine_fn bdrv_get_block_status_co_entry(void *opaque)
3996 {
3997 BdrvCoGetBlockStatusData *data = opaque;
3998 BlockDriverState *bs = data->bs;
3999
4000 data->ret = bdrv_co_get_block_status(bs, data->sector_num, data->nb_sectors,
4001 data->pnum);
4002 data->done = true;
4003 }
4004
4005 /*
4006 * Synchronous wrapper around bdrv_co_get_block_status().
4007 *
4008 * See bdrv_co_get_block_status() for details.
4009 */
4010 int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
4011 int nb_sectors, int *pnum)
4012 {
4013 Coroutine *co;
4014 BdrvCoGetBlockStatusData data = {
4015 .bs = bs,
4016 .sector_num = sector_num,
4017 .nb_sectors = nb_sectors,
4018 .pnum = pnum,
4019 .done = false,
4020 };
4021
4022 if (qemu_in_coroutine()) {
4023 /* Fast-path if already in coroutine context */
4024 bdrv_get_block_status_co_entry(&data);
4025 } else {
4026 AioContext *aio_context = bdrv_get_aio_context(bs);
4027
4028 co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
4029 qemu_coroutine_enter(co, &data);
4030 while (!data.done) {
4031 aio_poll(aio_context, true);
4032 }
4033 }
4034 return data.ret;
4035 }
4036
4037 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
4038 int nb_sectors, int *pnum)
4039 {
4040 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
4041 if (ret < 0) {
4042 return ret;
4043 }
4044 return (ret & BDRV_BLOCK_ALLOCATED);
4045 }
4046
4047 /*
4048 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
4049 *
4050 * Return true if the given sector is allocated in any image between
4051 * BASE and TOP (inclusive). BASE can be NULL to check if the given
4052 * sector is allocated in any image of the chain. Return false otherwise.
4053 *
4054 * 'pnum' is set to the number of sectors (including and immediately following
4055 * the specified sector) that are known to be in the same
4056 * allocated/unallocated state.
4057 *
4058 */
4059 int bdrv_is_allocated_above(BlockDriverState *top,
4060 BlockDriverState *base,
4061 int64_t sector_num,
4062 int nb_sectors, int *pnum)
4063 {
4064 BlockDriverState *intermediate;
4065 int ret, n = nb_sectors;
4066
4067 intermediate = top;
4068 while (intermediate && intermediate != base) {
4069 int pnum_inter;
4070 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
4071 &pnum_inter);
4072 if (ret < 0) {
4073 return ret;
4074 } else if (ret) {
4075 *pnum = pnum_inter;
4076 return 1;
4077 }
4078
4079 /*
4080 * [sector_num, nb_sectors] is unallocated on top but intermediate
4081 * might have
4082 *
4083 * [sector_num+x, nr_sectors] allocated.
4084 */
4085 if (n > pnum_inter &&
4086 (intermediate == top ||
4087 sector_num + pnum_inter < intermediate->total_sectors)) {
4088 n = pnum_inter;
4089 }
4090
4091 intermediate = intermediate->backing_hd;
4092 }
4093
4094 *pnum = n;
4095 return 0;
4096 }
4097
4098 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
4099 {
4100 if (bs->backing_hd && bs->backing_hd->encrypted)
4101 return bs->backing_file;
4102 else if (bs->encrypted)
4103 return bs->filename;
4104 else
4105 return NULL;
4106 }
4107
4108 void bdrv_get_backing_filename(BlockDriverState *bs,
4109 char *filename, int filename_size)
4110 {
4111 pstrcpy(filename, filename_size, bs->backing_file);
4112 }
4113
4114 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
4115 const uint8_t *buf, int nb_sectors)
4116 {
4117 BlockDriver *drv = bs->drv;
4118 if (!drv)
4119 return -ENOMEDIUM;
4120 if (!drv->bdrv_write_compressed)
4121 return -ENOTSUP;
4122 if (bdrv_check_request(bs, sector_num, nb_sectors))
4123 return -EIO;
4124
4125 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
4126
4127 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
4128 }
4129
4130 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
4131 {
4132 BlockDriver *drv = bs->drv;
4133 if (!drv)
4134 return -ENOMEDIUM;
4135 if (!drv->bdrv_get_info)
4136 return -ENOTSUP;
4137 memset(bdi, 0, sizeof(*bdi));
4138 return drv->bdrv_get_info(bs, bdi);
4139 }
4140
4141 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs)
4142 {
4143 BlockDriver *drv = bs->drv;
4144 if (drv && drv->bdrv_get_specific_info) {
4145 return drv->bdrv_get_specific_info(bs);
4146 }
4147 return NULL;
4148 }
4149
4150 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
4151 int64_t pos, int size)
4152 {
4153 QEMUIOVector qiov;
4154 struct iovec iov = {
4155 .iov_base = (void *) buf,
4156 .iov_len = size,
4157 };
4158
4159 qemu_iovec_init_external(&qiov, &iov, 1);
4160 return bdrv_writev_vmstate(bs, &qiov, pos);
4161 }
4162
4163 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
4164 {
4165 BlockDriver *drv = bs->drv;
4166
4167 if (!drv) {
4168 return -ENOMEDIUM;
4169 } else if (drv->bdrv_save_vmstate) {
4170 return drv->bdrv_save_vmstate(bs, qiov, pos);
4171 } else if (bs->file) {
4172 return bdrv_writev_vmstate(bs->file, qiov, pos);
4173 }
4174
4175 return -ENOTSUP;
4176 }
4177
4178 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
4179 int64_t pos, int size)
4180 {
4181 BlockDriver *drv = bs->drv;
4182 if (!drv)
4183 return -ENOMEDIUM;
4184 if (drv->bdrv_load_vmstate)
4185 return drv->bdrv_load_vmstate(bs, buf, pos, size);
4186 if (bs->file)
4187 return bdrv_load_vmstate(bs->file, buf, pos, size);
4188 return -ENOTSUP;
4189 }
4190
4191 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
4192 {
4193 if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
4194 return;
4195 }
4196
4197 bs->drv->bdrv_debug_event(bs, event);
4198 }
4199
4200 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
4201 const char *tag)
4202 {
4203 while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
4204 bs = bs->file;
4205 }
4206
4207 if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
4208 return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
4209 }
4210
4211 return -ENOTSUP;
4212 }
4213
4214 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
4215 {
4216 while (bs && bs->drv && !bs->drv->bdrv_debug_remove_breakpoint) {
4217 bs = bs->file;
4218 }
4219
4220 if (bs && bs->drv && bs->drv->bdrv_debug_remove_breakpoint) {
4221 return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
4222 }
4223
4224 return -ENOTSUP;
4225 }
4226
4227 int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
4228 {
4229 while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
4230 bs = bs->file;
4231 }
4232
4233 if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
4234 return bs->drv->bdrv_debug_resume(bs, tag);
4235 }
4236
4237 return -ENOTSUP;
4238 }
4239
4240 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
4241 {
4242 while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
4243 bs = bs->file;
4244 }
4245
4246 if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
4247 return bs->drv->bdrv_debug_is_suspended(bs, tag);
4248 }
4249
4250 return false;
4251 }
4252
4253 int bdrv_is_snapshot(BlockDriverState *bs)
4254 {
4255 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
4256 }
4257
4258 /* backing_file can either be relative, or absolute, or a protocol. If it is
4259 * relative, it must be relative to the chain. So, passing in bs->filename
4260 * from a BDS as backing_file should not be done, as that may be relative to
4261 * the CWD rather than the chain. */
4262 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
4263 const char *backing_file)
4264 {
4265 char *filename_full = NULL;
4266 char *backing_file_full = NULL;
4267 char *filename_tmp = NULL;
4268 int is_protocol = 0;
4269 BlockDriverState *curr_bs = NULL;
4270 BlockDriverState *retval = NULL;
4271
4272 if (!bs || !bs->drv || !backing_file) {
4273 return NULL;
4274 }
4275
4276 filename_full = g_malloc(PATH_MAX);
4277 backing_file_full = g_malloc(PATH_MAX);
4278 filename_tmp = g_malloc(PATH_MAX);
4279
4280 is_protocol = path_has_protocol(backing_file);
4281
4282 for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
4283
4284 /* If either of the filename paths is actually a protocol, then
4285 * compare unmodified paths; otherwise make paths relative */
4286 if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
4287 if (strcmp(backing_file, curr_bs->backing_file) == 0) {
4288 retval = curr_bs->backing_hd;
4289 break;
4290 }
4291 } else {
4292 /* If not an absolute filename path, make it relative to the current
4293 * image's filename path */
4294 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4295 backing_file);
4296
4297 /* We are going to compare absolute pathnames */
4298 if (!realpath(filename_tmp, filename_full)) {
4299 continue;
4300 }
4301
4302 /* We need to make sure the backing filename we are comparing against
4303 * is relative to the current image filename (or absolute) */
4304 path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
4305 curr_bs->backing_file);
4306
4307 if (!realpath(filename_tmp, backing_file_full)) {
4308 continue;
4309 }
4310
4311 if (strcmp(backing_file_full, filename_full) == 0) {
4312 retval = curr_bs->backing_hd;
4313 break;
4314 }
4315 }
4316 }
4317
4318 g_free(filename_full);
4319 g_free(backing_file_full);
4320 g_free(filename_tmp);
4321 return retval;
4322 }
4323
4324 int bdrv_get_backing_file_depth(BlockDriverState *bs)
4325 {
4326 if (!bs->drv) {
4327 return 0;
4328 }
4329
4330 if (!bs->backing_hd) {
4331 return 0;
4332 }
4333
4334 return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
4335 }
4336
4337 BlockDriverState *bdrv_find_base(BlockDriverState *bs)
4338 {
4339 BlockDriverState *curr_bs = NULL;
4340
4341 if (!bs) {
4342 return NULL;
4343 }
4344
4345 curr_bs = bs;
4346
4347 while (curr_bs->backing_hd) {
4348 curr_bs = curr_bs->backing_hd;
4349 }
4350 return curr_bs;
4351 }
4352
4353 /**************************************************************/
4354 /* async I/Os */
4355
4356 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
4357 QEMUIOVector *qiov, int nb_sectors,
4358 BlockDriverCompletionFunc *cb, void *opaque)
4359 {
4360 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
4361
4362 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4363 cb, opaque, false);
4364 }
4365
4366 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
4367 QEMUIOVector *qiov, int nb_sectors,
4368 BlockDriverCompletionFunc *cb, void *opaque)
4369 {
4370 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
4371
4372 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
4373 cb, opaque, true);
4374 }
4375
4376 BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
4377 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
4378 BlockDriverCompletionFunc *cb, void *opaque)
4379 {
4380 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
4381
4382 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
4383 BDRV_REQ_ZERO_WRITE | flags,
4384 cb, opaque, true);
4385 }
4386
4387
4388 typedef struct MultiwriteCB {
4389 int error;
4390 int num_requests;
4391 int num_callbacks;
4392 struct {
4393 BlockDriverCompletionFunc *cb;
4394 void *opaque;
4395 QEMUIOVector *free_qiov;
4396 } callbacks[];
4397 } MultiwriteCB;
4398
4399 static void multiwrite_user_cb(MultiwriteCB *mcb)
4400 {
4401 int i;
4402
4403 for (i = 0; i < mcb->num_callbacks; i++) {
4404 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
4405 if (mcb->callbacks[i].free_qiov) {
4406 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
4407 }
4408 g_free(mcb->callbacks[i].free_qiov);
4409 }
4410 }
4411
4412 static void multiwrite_cb(void *opaque, int ret)
4413 {
4414 MultiwriteCB *mcb = opaque;
4415
4416 trace_multiwrite_cb(mcb, ret);
4417
4418 if (ret < 0 && !mcb->error) {
4419 mcb->error = ret;
4420 }
4421
4422 mcb->num_requests--;
4423 if (mcb->num_requests == 0) {
4424 multiwrite_user_cb(mcb);
4425 g_free(mcb);
4426 }
4427 }
4428
4429 static int multiwrite_req_compare(const void *a, const void *b)
4430 {
4431 const BlockRequest *req1 = a, *req2 = b;
4432
4433 /*
4434 * Note that we can't simply subtract req2->sector from req1->sector
4435 * here as that could overflow the return value.
4436 */
4437 if (req1->sector > req2->sector) {
4438 return 1;
4439 } else if (req1->sector < req2->sector) {
4440 return -1;
4441 } else {
4442 return 0;
4443 }
4444 }
4445
4446 /*
4447 * Takes a bunch of requests and tries to merge them. Returns the number of
4448 * requests that remain after merging.
4449 */
4450 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
4451 int num_reqs, MultiwriteCB *mcb)
4452 {
4453 int i, outidx;
4454
4455 // Sort requests by start sector
4456 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
4457
4458 // Check if adjacent requests touch the same clusters. If so, combine them,
4459 // filling up gaps with zero sectors.
4460 outidx = 0;
4461 for (i = 1; i < num_reqs; i++) {
4462 int merge = 0;
4463 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
4464
4465 // Handle exactly sequential writes and overlapping writes.
4466 if (reqs[i].sector <= oldreq_last) {
4467 merge = 1;
4468 }
4469
4470 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
4471 merge = 0;
4472 }
4473
4474 if (merge) {
4475 size_t size;
4476 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
4477 qemu_iovec_init(qiov,
4478 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
4479
4480 // Add the first request to the merged one. If the requests are
4481 // overlapping, drop the last sectors of the first request.
4482 size = (reqs[i].sector - reqs[outidx].sector) << 9;
4483 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
4484
4485 // We should need to add any zeros between the two requests
4486 assert (reqs[i].sector <= oldreq_last);
4487
4488 // Add the second request
4489 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
4490
4491 reqs[outidx].nb_sectors = qiov->size >> 9;
4492 reqs[outidx].qiov = qiov;
4493
4494 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
4495 } else {
4496 outidx++;
4497 reqs[outidx].sector = reqs[i].sector;
4498 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
4499 reqs[outidx].qiov = reqs[i].qiov;
4500 }
4501 }
4502
4503 return outidx + 1;
4504 }
4505
4506 /*
4507 * Submit multiple AIO write requests at once.
4508 *
4509 * On success, the function returns 0 and all requests in the reqs array have
4510 * been submitted. In error case this function returns -1, and any of the
4511 * requests may or may not be submitted yet. In particular, this means that the
4512 * callback will be called for some of the requests, for others it won't. The
4513 * caller must check the error field of the BlockRequest to wait for the right
4514 * callbacks (if error != 0, no callback will be called).
4515 *
4516 * The implementation may modify the contents of the reqs array, e.g. to merge
4517 * requests. However, the fields opaque and error are left unmodified as they
4518 * are used to signal failure for a single request to the caller.
4519 */
4520 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
4521 {
4522 MultiwriteCB *mcb;
4523 int i;
4524
4525 /* don't submit writes if we don't have a medium */
4526 if (bs->drv == NULL) {
4527 for (i = 0; i < num_reqs; i++) {
4528 reqs[i].error = -ENOMEDIUM;
4529 }
4530 return -1;
4531 }
4532
4533 if (num_reqs == 0) {
4534 return 0;
4535 }
4536
4537 // Create MultiwriteCB structure
4538 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
4539 mcb->num_requests = 0;
4540 mcb->num_callbacks = num_reqs;
4541
4542 for (i = 0; i < num_reqs; i++) {
4543 mcb->callbacks[i].cb = reqs[i].cb;
4544 mcb->callbacks[i].opaque = reqs[i].opaque;
4545 }
4546
4547 // Check for mergable requests
4548 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
4549
4550 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
4551
4552 /* Run the aio requests. */
4553 mcb->num_requests = num_reqs;
4554 for (i = 0; i < num_reqs; i++) {
4555 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
4556 reqs[i].nb_sectors, reqs[i].flags,
4557 multiwrite_cb, mcb,
4558 true);
4559 }
4560
4561 return 0;
4562 }
4563
4564 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
4565 {
4566 acb->aiocb_info->cancel(acb);
4567 }
4568
4569 /**************************************************************/
4570 /* async block device emulation */
4571
4572 typedef struct BlockDriverAIOCBSync {
4573 BlockDriverAIOCB common;
4574 QEMUBH *bh;
4575 int ret;
4576 /* vector translation state */
4577 QEMUIOVector *qiov;
4578 uint8_t *bounce;
4579 int is_write;
4580 } BlockDriverAIOCBSync;
4581
4582 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
4583 {
4584 BlockDriverAIOCBSync *acb =
4585 container_of(blockacb, BlockDriverAIOCBSync, common);
4586 qemu_bh_delete(acb->bh);
4587 acb->bh = NULL;
4588 qemu_aio_release(acb);
4589 }
4590
4591 static const AIOCBInfo bdrv_em_aiocb_info = {
4592 .aiocb_size = sizeof(BlockDriverAIOCBSync),
4593 .cancel = bdrv_aio_cancel_em,
4594 };
4595
4596 static void bdrv_aio_bh_cb(void *opaque)
4597 {
4598 BlockDriverAIOCBSync *acb = opaque;
4599
4600 if (!acb->is_write)
4601 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
4602 qemu_vfree(acb->bounce);
4603 acb->common.cb(acb->common.opaque, acb->ret);
4604 qemu_bh_delete(acb->bh);
4605 acb->bh = NULL;
4606 qemu_aio_release(acb);
4607 }
4608
4609 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
4610 int64_t sector_num,
4611 QEMUIOVector *qiov,
4612 int nb_sectors,
4613 BlockDriverCompletionFunc *cb,
4614 void *opaque,
4615 int is_write)
4616
4617 {
4618 BlockDriverAIOCBSync *acb;
4619
4620 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
4621 acb->is_write = is_write;
4622 acb->qiov = qiov;
4623 acb->bounce = qemu_blockalign(bs, qiov->size);
4624 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
4625
4626 if (is_write) {
4627 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
4628 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
4629 } else {
4630 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
4631 }
4632
4633 qemu_bh_schedule(acb->bh);
4634
4635 return &acb->common;
4636 }
4637
4638 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
4639 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4640 BlockDriverCompletionFunc *cb, void *opaque)
4641 {
4642 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
4643 }
4644
4645 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
4646 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
4647 BlockDriverCompletionFunc *cb, void *opaque)
4648 {
4649 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
4650 }
4651
4652
4653 typedef struct BlockDriverAIOCBCoroutine {
4654 BlockDriverAIOCB common;
4655 BlockRequest req;
4656 bool is_write;
4657 bool *done;
4658 QEMUBH* bh;
4659 } BlockDriverAIOCBCoroutine;
4660
4661 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
4662 {
4663 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
4664 BlockDriverAIOCBCoroutine *acb =
4665 container_of(blockacb, BlockDriverAIOCBCoroutine, common);
4666 bool done = false;
4667
4668 acb->done = &done;
4669 while (!done) {
4670 aio_poll(aio_context, true);
4671 }
4672 }
4673
4674 static const AIOCBInfo bdrv_em_co_aiocb_info = {
4675 .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
4676 .cancel = bdrv_aio_co_cancel_em,
4677 };
4678
4679 static void bdrv_co_em_bh(void *opaque)
4680 {
4681 BlockDriverAIOCBCoroutine *acb = opaque;
4682
4683 acb->common.cb(acb->common.opaque, acb->req.error);
4684
4685 if (acb->done) {
4686 *acb->done = true;
4687 }
4688
4689 qemu_bh_delete(acb->bh);
4690 qemu_aio_release(acb);
4691 }
4692
4693 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
4694 static void coroutine_fn bdrv_co_do_rw(void *opaque)
4695 {
4696 BlockDriverAIOCBCoroutine *acb = opaque;
4697 BlockDriverState *bs = acb->common.bs;
4698
4699 if (!acb->is_write) {
4700 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
4701 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4702 } else {
4703 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
4704 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
4705 }
4706
4707 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4708 qemu_bh_schedule(acb->bh);
4709 }
4710
4711 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
4712 int64_t sector_num,
4713 QEMUIOVector *qiov,
4714 int nb_sectors,
4715 BdrvRequestFlags flags,
4716 BlockDriverCompletionFunc *cb,
4717 void *opaque,
4718 bool is_write)
4719 {
4720 Coroutine *co;
4721 BlockDriverAIOCBCoroutine *acb;
4722
4723 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4724 acb->req.sector = sector_num;
4725 acb->req.nb_sectors = nb_sectors;
4726 acb->req.qiov = qiov;
4727 acb->req.flags = flags;
4728 acb->is_write = is_write;
4729 acb->done = NULL;
4730
4731 co = qemu_coroutine_create(bdrv_co_do_rw);
4732 qemu_coroutine_enter(co, acb);
4733
4734 return &acb->common;
4735 }
4736
4737 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
4738 {
4739 BlockDriverAIOCBCoroutine *acb = opaque;
4740 BlockDriverState *bs = acb->common.bs;
4741
4742 acb->req.error = bdrv_co_flush(bs);
4743 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4744 qemu_bh_schedule(acb->bh);
4745 }
4746
4747 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
4748 BlockDriverCompletionFunc *cb, void *opaque)
4749 {
4750 trace_bdrv_aio_flush(bs, opaque);
4751
4752 Coroutine *co;
4753 BlockDriverAIOCBCoroutine *acb;
4754
4755 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4756 acb->done = NULL;
4757
4758 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
4759 qemu_coroutine_enter(co, acb);
4760
4761 return &acb->common;
4762 }
4763
4764 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
4765 {
4766 BlockDriverAIOCBCoroutine *acb = opaque;
4767 BlockDriverState *bs = acb->common.bs;
4768
4769 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
4770 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
4771 qemu_bh_schedule(acb->bh);
4772 }
4773
4774 BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
4775 int64_t sector_num, int nb_sectors,
4776 BlockDriverCompletionFunc *cb, void *opaque)
4777 {
4778 Coroutine *co;
4779 BlockDriverAIOCBCoroutine *acb;
4780
4781 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
4782
4783 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
4784 acb->req.sector = sector_num;
4785 acb->req.nb_sectors = nb_sectors;
4786 acb->done = NULL;
4787 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
4788 qemu_coroutine_enter(co, acb);
4789
4790 return &acb->common;
4791 }
4792
4793 void bdrv_init(void)
4794 {
4795 module_call_init(MODULE_INIT_BLOCK);
4796 }
4797
4798 void bdrv_init_with_whitelist(void)
4799 {
4800 use_bdrv_whitelist = 1;
4801 bdrv_init();
4802 }
4803
4804 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
4805 BlockDriverCompletionFunc *cb, void *opaque)
4806 {
4807 BlockDriverAIOCB *acb;
4808
4809 acb = g_slice_alloc(aiocb_info->aiocb_size);
4810 acb->aiocb_info = aiocb_info;
4811 acb->bs = bs;
4812 acb->cb = cb;
4813 acb->opaque = opaque;
4814 return acb;
4815 }
4816
4817 void qemu_aio_release(void *p)
4818 {
4819 BlockDriverAIOCB *acb = p;
4820 g_slice_free1(acb->aiocb_info->aiocb_size, acb);
4821 }
4822
4823 /**************************************************************/
4824 /* Coroutine block device emulation */
4825
4826 typedef struct CoroutineIOCompletion {
4827 Coroutine *coroutine;
4828 int ret;
4829 } CoroutineIOCompletion;
4830
4831 static void bdrv_co_io_em_complete(void *opaque, int ret)
4832 {
4833 CoroutineIOCompletion *co = opaque;
4834
4835 co->ret = ret;
4836 qemu_coroutine_enter(co->coroutine, NULL);
4837 }
4838
4839 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
4840 int nb_sectors, QEMUIOVector *iov,
4841 bool is_write)
4842 {
4843 CoroutineIOCompletion co = {
4844 .coroutine = qemu_coroutine_self(),
4845 };
4846 BlockDriverAIOCB *acb;
4847
4848 if (is_write) {
4849 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
4850 bdrv_co_io_em_complete, &co);
4851 } else {
4852 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
4853 bdrv_co_io_em_complete, &co);
4854 }
4855
4856 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
4857 if (!acb) {
4858 return -EIO;
4859 }
4860 qemu_coroutine_yield();
4861
4862 return co.ret;
4863 }
4864
4865 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
4866 int64_t sector_num, int nb_sectors,
4867 QEMUIOVector *iov)
4868 {
4869 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
4870 }
4871
4872 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
4873 int64_t sector_num, int nb_sectors,
4874 QEMUIOVector *iov)
4875 {
4876 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
4877 }
4878
4879 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
4880 {
4881 RwCo *rwco = opaque;
4882
4883 rwco->ret = bdrv_co_flush(rwco->bs);
4884 }
4885
4886 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
4887 {
4888 int ret;
4889
4890 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
4891 return 0;
4892 }
4893
4894 /* Write back cached data to the OS even with cache=unsafe */
4895 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
4896 if (bs->drv->bdrv_co_flush_to_os) {
4897 ret = bs->drv->bdrv_co_flush_to_os(bs);
4898 if (ret < 0) {
4899 return ret;
4900 }
4901 }
4902
4903 /* But don't actually force it to the disk with cache=unsafe */
4904 if (bs->open_flags & BDRV_O_NO_FLUSH) {
4905 goto flush_parent;
4906 }
4907
4908 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
4909 if (bs->drv->bdrv_co_flush_to_disk) {
4910 ret = bs->drv->bdrv_co_flush_to_disk(bs);
4911 } else if (bs->drv->bdrv_aio_flush) {
4912 BlockDriverAIOCB *acb;
4913 CoroutineIOCompletion co = {
4914 .coroutine = qemu_coroutine_self(),
4915 };
4916
4917 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
4918 if (acb == NULL) {
4919 ret = -EIO;
4920 } else {
4921 qemu_coroutine_yield();
4922 ret = co.ret;
4923 }
4924 } else {
4925 /*
4926 * Some block drivers always operate in either writethrough or unsafe
4927 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
4928 * know how the server works (because the behaviour is hardcoded or
4929 * depends on server-side configuration), so we can't ensure that
4930 * everything is safe on disk. Returning an error doesn't work because
4931 * that would break guests even if the server operates in writethrough
4932 * mode.
4933 *
4934 * Let's hope the user knows what he's doing.
4935 */
4936 ret = 0;
4937 }
4938 if (ret < 0) {
4939 return ret;
4940 }
4941
4942 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
4943 * in the case of cache=unsafe, so there are no useless flushes.
4944 */
4945 flush_parent:
4946 return bdrv_co_flush(bs->file);
4947 }
4948
4949 void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
4950 {
4951 Error *local_err = NULL;
4952 int ret;
4953
4954 if (!bs->drv) {
4955 return;
4956 }
4957
4958 if (bs->drv->bdrv_invalidate_cache) {
4959 bs->drv->bdrv_invalidate_cache(bs, &local_err);
4960 } else if (bs->file) {
4961 bdrv_invalidate_cache(bs->file, &local_err);
4962 }
4963 if (local_err) {
4964 error_propagate(errp, local_err);
4965 return;
4966 }
4967
4968 ret = refresh_total_sectors(bs, bs->total_sectors);
4969 if (ret < 0) {
4970 error_setg_errno(errp, -ret, "Could not refresh total sector count");
4971 return;
4972 }
4973 }
4974
4975 void bdrv_invalidate_cache_all(Error **errp)
4976 {
4977 BlockDriverState *bs;
4978 Error *local_err = NULL;
4979
4980 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4981 AioContext *aio_context = bdrv_get_aio_context(bs);
4982
4983 aio_context_acquire(aio_context);
4984 bdrv_invalidate_cache(bs, &local_err);
4985 aio_context_release(aio_context);
4986 if (local_err) {
4987 error_propagate(errp, local_err);
4988 return;
4989 }
4990 }
4991 }
4992
4993 void bdrv_clear_incoming_migration_all(void)
4994 {
4995 BlockDriverState *bs;
4996
4997 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
4998 AioContext *aio_context = bdrv_get_aio_context(bs);
4999
5000 aio_context_acquire(aio_context);
5001 bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
5002 aio_context_release(aio_context);
5003 }
5004 }
5005
5006 int bdrv_flush(BlockDriverState *bs)
5007 {
5008 Coroutine *co;
5009 RwCo rwco = {
5010 .bs = bs,
5011 .ret = NOT_DONE,
5012 };
5013
5014 if (qemu_in_coroutine()) {
5015 /* Fast-path if already in coroutine context */
5016 bdrv_flush_co_entry(&rwco);
5017 } else {
5018 AioContext *aio_context = bdrv_get_aio_context(bs);
5019
5020 co = qemu_coroutine_create(bdrv_flush_co_entry);
5021 qemu_coroutine_enter(co, &rwco);
5022 while (rwco.ret == NOT_DONE) {
5023 aio_poll(aio_context, true);
5024 }
5025 }
5026
5027 return rwco.ret;
5028 }
5029
5030 typedef struct DiscardCo {
5031 BlockDriverState *bs;
5032 int64_t sector_num;
5033 int nb_sectors;
5034 int ret;
5035 } DiscardCo;
5036 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
5037 {
5038 DiscardCo *rwco = opaque;
5039
5040 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
5041 }
5042
5043 /* if no limit is specified in the BlockLimits use a default
5044 * of 32768 512-byte sectors (16 MiB) per request.
5045 */
5046 #define MAX_DISCARD_DEFAULT 32768
5047
5048 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
5049 int nb_sectors)
5050 {
5051 int max_discard;
5052
5053 if (!bs->drv) {
5054 return -ENOMEDIUM;
5055 } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
5056 return -EIO;
5057 } else if (bs->read_only) {
5058 return -EROFS;
5059 }
5060
5061 bdrv_reset_dirty(bs, sector_num, nb_sectors);
5062
5063 /* Do nothing if disabled. */
5064 if (!(bs->open_flags & BDRV_O_UNMAP)) {
5065 return 0;
5066 }
5067
5068 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
5069 return 0;
5070 }
5071
5072 max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
5073 while (nb_sectors > 0) {
5074 int ret;
5075 int num = nb_sectors;
5076
5077 /* align request */
5078 if (bs->bl.discard_alignment &&
5079 num >= bs->bl.discard_alignment &&
5080 sector_num % bs->bl.discard_alignment) {
5081 if (num > bs->bl.discard_alignment) {
5082 num = bs->bl.discard_alignment;
5083 }
5084 num -= sector_num % bs->bl.discard_alignment;
5085 }
5086
5087 /* limit request size */
5088 if (num > max_discard) {
5089 num = max_discard;
5090 }
5091
5092 if (bs->drv->bdrv_co_discard) {
5093 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
5094 } else {
5095 BlockDriverAIOCB *acb;
5096 CoroutineIOCompletion co = {
5097 .coroutine = qemu_coroutine_self(),
5098 };
5099
5100 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
5101 bdrv_co_io_em_complete, &co);
5102 if (acb == NULL) {
5103 return -EIO;
5104 } else {
5105 qemu_coroutine_yield();
5106 ret = co.ret;
5107 }
5108 }
5109 if (ret && ret != -ENOTSUP) {
5110 return ret;
5111 }
5112
5113 sector_num += num;
5114 nb_sectors -= num;
5115 }
5116 return 0;
5117 }
5118
5119 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
5120 {
5121 Coroutine *co;
5122 DiscardCo rwco = {
5123 .bs = bs,
5124 .sector_num = sector_num,
5125 .nb_sectors = nb_sectors,
5126 .ret = NOT_DONE,
5127 };
5128
5129 if (qemu_in_coroutine()) {
5130 /* Fast-path if already in coroutine context */
5131 bdrv_discard_co_entry(&rwco);
5132 } else {
5133 AioContext *aio_context = bdrv_get_aio_context(bs);
5134
5135 co = qemu_coroutine_create(bdrv_discard_co_entry);
5136 qemu_coroutine_enter(co, &rwco);
5137 while (rwco.ret == NOT_DONE) {
5138 aio_poll(aio_context, true);
5139 }
5140 }
5141
5142 return rwco.ret;
5143 }
5144
5145 /**************************************************************/
5146 /* removable device support */
5147
5148 /**
5149 * Return TRUE if the media is present
5150 */
5151 int bdrv_is_inserted(BlockDriverState *bs)
5152 {
5153 BlockDriver *drv = bs->drv;
5154
5155 if (!drv)
5156 return 0;
5157 if (!drv->bdrv_is_inserted)
5158 return 1;
5159 return drv->bdrv_is_inserted(bs);
5160 }
5161
5162 /**
5163 * Return whether the media changed since the last call to this
5164 * function, or -ENOTSUP if we don't know. Most drivers don't know.
5165 */
5166 int bdrv_media_changed(BlockDriverState *bs)
5167 {
5168 BlockDriver *drv = bs->drv;
5169
5170 if (drv && drv->bdrv_media_changed) {
5171 return drv->bdrv_media_changed(bs);
5172 }
5173 return -ENOTSUP;
5174 }
5175
5176 /**
5177 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
5178 */
5179 void bdrv_eject(BlockDriverState *bs, bool eject_flag)
5180 {
5181 BlockDriver *drv = bs->drv;
5182
5183 if (drv && drv->bdrv_eject) {
5184 drv->bdrv_eject(bs, eject_flag);
5185 }
5186
5187 if (bs->device_name[0] != '\0') {
5188 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
5189 eject_flag, &error_abort);
5190 }
5191 }
5192
5193 /**
5194 * Lock or unlock the media (if it is locked, the user won't be able
5195 * to eject it manually).
5196 */
5197 void bdrv_lock_medium(BlockDriverState *bs, bool locked)
5198 {
5199 BlockDriver *drv = bs->drv;
5200
5201 trace_bdrv_lock_medium(bs, locked);
5202
5203 if (drv && drv->bdrv_lock_medium) {
5204 drv->bdrv_lock_medium(bs, locked);
5205 }
5206 }
5207
5208 /* needed for generic scsi interface */
5209
5210 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
5211 {
5212 BlockDriver *drv = bs->drv;
5213
5214 if (drv && drv->bdrv_ioctl)
5215 return drv->bdrv_ioctl(bs, req, buf);
5216 return -ENOTSUP;
5217 }
5218
5219 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
5220 unsigned long int req, void *buf,
5221 BlockDriverCompletionFunc *cb, void *opaque)
5222 {
5223 BlockDriver *drv = bs->drv;
5224
5225 if (drv && drv->bdrv_aio_ioctl)
5226 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
5227 return NULL;
5228 }
5229
5230 void bdrv_set_guest_block_size(BlockDriverState *bs, int align)
5231 {
5232 bs->guest_block_size = align;
5233 }
5234
5235 void *qemu_blockalign(BlockDriverState *bs, size_t size)
5236 {
5237 return qemu_memalign(bdrv_opt_mem_align(bs), size);
5238 }
5239
5240 /*
5241 * Check if all memory in this vector is sector aligned.
5242 */
5243 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
5244 {
5245 int i;
5246 size_t alignment = bdrv_opt_mem_align(bs);
5247
5248 for (i = 0; i < qiov->niov; i++) {
5249 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
5250 return false;
5251 }
5252 if (qiov->iov[i].iov_len % alignment) {
5253 return false;
5254 }
5255 }
5256
5257 return true;
5258 }
5259
5260 BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
5261 Error **errp)
5262 {
5263 int64_t bitmap_size;
5264 BdrvDirtyBitmap *bitmap;
5265
5266 assert((granularity & (granularity - 1)) == 0);
5267
5268 granularity >>= BDRV_SECTOR_BITS;
5269 assert(granularity);
5270 bitmap_size = bdrv_getlength(bs);
5271 if (bitmap_size < 0) {
5272 error_setg_errno(errp, -bitmap_size, "could not get length of device");
5273 errno = -bitmap_size;
5274 return NULL;
5275 }
5276 bitmap_size >>= BDRV_SECTOR_BITS;
5277 bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
5278 bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
5279 QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
5280 return bitmap;
5281 }
5282
5283 void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5284 {
5285 BdrvDirtyBitmap *bm, *next;
5286 QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) {
5287 if (bm == bitmap) {
5288 QLIST_REMOVE(bitmap, list);
5289 hbitmap_free(bitmap->bitmap);
5290 g_free(bitmap);
5291 return;
5292 }
5293 }
5294 }
5295
5296 BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
5297 {
5298 BdrvDirtyBitmap *bm;
5299 BlockDirtyInfoList *list = NULL;
5300 BlockDirtyInfoList **plist = &list;
5301
5302 QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
5303 BlockDirtyInfo *info = g_malloc0(sizeof(BlockDirtyInfo));
5304 BlockDirtyInfoList *entry = g_malloc0(sizeof(BlockDirtyInfoList));
5305 info->count = bdrv_get_dirty_count(bs, bm);
5306 info->granularity =
5307 ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bm->bitmap));
5308 entry->value = info;
5309 *plist = entry;
5310 plist = &entry->next;
5311 }
5312
5313 return list;
5314 }
5315
5316 int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector)
5317 {
5318 if (bitmap) {
5319 return hbitmap_get(bitmap->bitmap, sector);
5320 } else {
5321 return 0;
5322 }
5323 }
5324
5325 void bdrv_dirty_iter_init(BlockDriverState *bs,
5326 BdrvDirtyBitmap *bitmap, HBitmapIter *hbi)
5327 {
5328 hbitmap_iter_init(hbi, bitmap->bitmap, 0);
5329 }
5330
5331 void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
5332 int nr_sectors)
5333 {
5334 BdrvDirtyBitmap *bitmap;
5335 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5336 hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
5337 }
5338 }
5339
5340 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
5341 {
5342 BdrvDirtyBitmap *bitmap;
5343 QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
5344 hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
5345 }
5346 }
5347
5348 int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap)
5349 {
5350 return hbitmap_count(bitmap->bitmap);
5351 }
5352
5353 /* Get a reference to bs */
5354 void bdrv_ref(BlockDriverState *bs)
5355 {
5356 bs->refcnt++;
5357 }
5358
5359 /* Release a previously grabbed reference to bs.
5360 * If after releasing, reference count is zero, the BlockDriverState is
5361 * deleted. */
5362 void bdrv_unref(BlockDriverState *bs)
5363 {
5364 assert(bs->refcnt > 0);
5365 if (--bs->refcnt == 0) {
5366 bdrv_delete(bs);
5367 }
5368 }
5369
5370 struct BdrvOpBlocker {
5371 Error *reason;
5372 QLIST_ENTRY(BdrvOpBlocker) list;
5373 };
5374
5375 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
5376 {
5377 BdrvOpBlocker *blocker;
5378 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5379 if (!QLIST_EMPTY(&bs->op_blockers[op])) {
5380 blocker = QLIST_FIRST(&bs->op_blockers[op]);
5381 if (errp) {
5382 error_setg(errp, "Device '%s' is busy: %s",
5383 bs->device_name, error_get_pretty(blocker->reason));
5384 }
5385 return true;
5386 }
5387 return false;
5388 }
5389
5390 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason)
5391 {
5392 BdrvOpBlocker *blocker;
5393 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5394
5395 blocker = g_malloc0(sizeof(BdrvOpBlocker));
5396 blocker->reason = reason;
5397 QLIST_INSERT_HEAD(&bs->op_blockers[op], blocker, list);
5398 }
5399
5400 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason)
5401 {
5402 BdrvOpBlocker *blocker, *next;
5403 assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
5404 QLIST_FOREACH_SAFE(blocker, &bs->op_blockers[op], list, next) {
5405 if (blocker->reason == reason) {
5406 QLIST_REMOVE(blocker, list);
5407 g_free(blocker);
5408 }
5409 }
5410 }
5411
5412 void bdrv_op_block_all(BlockDriverState *bs, Error *reason)
5413 {
5414 int i;
5415 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5416 bdrv_op_block(bs, i, reason);
5417 }
5418 }
5419
5420 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason)
5421 {
5422 int i;
5423 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5424 bdrv_op_unblock(bs, i, reason);
5425 }
5426 }
5427
5428 bool bdrv_op_blocker_is_empty(BlockDriverState *bs)
5429 {
5430 int i;
5431
5432 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
5433 if (!QLIST_EMPTY(&bs->op_blockers[i])) {
5434 return false;
5435 }
5436 }
5437 return true;
5438 }
5439
5440 void bdrv_iostatus_enable(BlockDriverState *bs)
5441 {
5442 bs->iostatus_enabled = true;
5443 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5444 }
5445
5446 /* The I/O status is only enabled if the drive explicitly
5447 * enables it _and_ the VM is configured to stop on errors */
5448 bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
5449 {
5450 return (bs->iostatus_enabled &&
5451 (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
5452 bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
5453 bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
5454 }
5455
5456 void bdrv_iostatus_disable(BlockDriverState *bs)
5457 {
5458 bs->iostatus_enabled = false;
5459 }
5460
5461 void bdrv_iostatus_reset(BlockDriverState *bs)
5462 {
5463 if (bdrv_iostatus_is_enabled(bs)) {
5464 bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
5465 if (bs->job) {
5466 block_job_iostatus_reset(bs->job);
5467 }
5468 }
5469 }
5470
5471 void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
5472 {
5473 assert(bdrv_iostatus_is_enabled(bs));
5474 if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
5475 bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
5476 BLOCK_DEVICE_IO_STATUS_FAILED;
5477 }
5478 }
5479
5480 void
5481 bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
5482 enum BlockAcctType type)
5483 {
5484 assert(type < BDRV_MAX_IOTYPE);
5485
5486 cookie->bytes = bytes;
5487 cookie->start_time_ns = get_clock();
5488 cookie->type = type;
5489 }
5490
5491 void
5492 bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
5493 {
5494 assert(cookie->type < BDRV_MAX_IOTYPE);
5495
5496 bs->nr_bytes[cookie->type] += cookie->bytes;
5497 bs->nr_ops[cookie->type]++;
5498 bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
5499 }
5500
5501 void bdrv_img_create(const char *filename, const char *fmt,
5502 const char *base_filename, const char *base_fmt,
5503 char *options, uint64_t img_size, int flags,
5504 Error **errp, bool quiet)
5505 {
5506 QemuOptsList *create_opts = NULL;
5507 QemuOpts *opts = NULL;
5508 const char *backing_fmt, *backing_file;
5509 int64_t size;
5510 BlockDriver *drv, *proto_drv;
5511 BlockDriver *backing_drv = NULL;
5512 Error *local_err = NULL;
5513 int ret = 0;
5514
5515 /* Find driver and parse its options */
5516 drv = bdrv_find_format(fmt);
5517 if (!drv) {
5518 error_setg(errp, "Unknown file format '%s'", fmt);
5519 return;
5520 }
5521
5522 proto_drv = bdrv_find_protocol(filename, true);
5523 if (!proto_drv) {
5524 error_setg(errp, "Unknown protocol '%s'", filename);
5525 return;
5526 }
5527
5528 create_opts = qemu_opts_append(create_opts, drv->create_opts);
5529 create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
5530
5531 /* Create parameter list with default values */
5532 opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
5533 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size);
5534
5535 /* Parse -o options */
5536 if (options) {
5537 if (qemu_opts_do_parse(opts, options, NULL) != 0) {
5538 error_setg(errp, "Invalid options for file format '%s'", fmt);
5539 goto out;
5540 }
5541 }
5542
5543 if (base_filename) {
5544 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename)) {
5545 error_setg(errp, "Backing file not supported for file format '%s'",
5546 fmt);
5547 goto out;
5548 }
5549 }
5550
5551 if (base_fmt) {
5552 if (qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt)) {
5553 error_setg(errp, "Backing file format not supported for file "
5554 "format '%s'", fmt);
5555 goto out;
5556 }
5557 }
5558
5559 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5560 if (backing_file) {
5561 if (!strcmp(filename, backing_file)) {
5562 error_setg(errp, "Error: Trying to create an image with the "
5563 "same filename as the backing file");
5564 goto out;
5565 }
5566 }
5567
5568 backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
5569 if (backing_fmt) {
5570 backing_drv = bdrv_find_format(backing_fmt);
5571 if (!backing_drv) {
5572 error_setg(errp, "Unknown backing file format '%s'",
5573 backing_fmt);
5574 goto out;
5575 }
5576 }
5577
5578 // The size for the image must always be specified, with one exception:
5579 // If we are using a backing file, we can obtain the size from there
5580 size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5581 if (size == -1) {
5582 if (backing_file) {
5583 BlockDriverState *bs;
5584 uint64_t size;
5585 int back_flags;
5586
5587 /* backing files always opened read-only */
5588 back_flags =
5589 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
5590
5591 bs = NULL;
5592 ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
5593 backing_drv, &local_err);
5594 if (ret < 0) {
5595 error_setg_errno(errp, -ret, "Could not open '%s': %s",
5596 backing_file,
5597 error_get_pretty(local_err));
5598 error_free(local_err);
5599 local_err = NULL;
5600 goto out;
5601 }
5602 bdrv_get_geometry(bs, &size);
5603 size *= 512;
5604
5605 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size);
5606
5607 bdrv_unref(bs);
5608 } else {
5609 error_setg(errp, "Image creation needs a size parameter");
5610 goto out;
5611 }
5612 }
5613
5614 if (!quiet) {
5615 printf("Formatting '%s', fmt=%s ", filename, fmt);
5616 qemu_opts_print(opts);
5617 puts("");
5618 }
5619
5620 ret = bdrv_create(drv, filename, opts, &local_err);
5621
5622 if (ret == -EFBIG) {
5623 /* This is generally a better message than whatever the driver would
5624 * deliver (especially because of the cluster_size_hint), since that
5625 * is most probably not much different from "image too large". */
5626 const char *cluster_size_hint = "";
5627 if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
5628 cluster_size_hint = " (try using a larger cluster size)";
5629 }
5630 error_setg(errp, "The image size is too large for file format '%s'"
5631 "%s", fmt, cluster_size_hint);
5632 error_free(local_err);
5633 local_err = NULL;
5634 }
5635
5636 out:
5637 qemu_opts_del(opts);
5638 qemu_opts_free(create_opts);
5639 if (local_err) {
5640 error_propagate(errp, local_err);
5641 }
5642 }
5643
5644 AioContext *bdrv_get_aio_context(BlockDriverState *bs)
5645 {
5646 return bs->aio_context;
5647 }
5648
5649 void bdrv_detach_aio_context(BlockDriverState *bs)
5650 {
5651 if (!bs->drv) {
5652 return;
5653 }
5654
5655 if (bs->io_limits_enabled) {
5656 throttle_detach_aio_context(&bs->throttle_state);
5657 }
5658 if (bs->drv->bdrv_detach_aio_context) {
5659 bs->drv->bdrv_detach_aio_context(bs);
5660 }
5661 if (bs->file) {
5662 bdrv_detach_aio_context(bs->file);
5663 }
5664 if (bs->backing_hd) {
5665 bdrv_detach_aio_context(bs->backing_hd);
5666 }
5667
5668 bs->aio_context = NULL;
5669 }
5670
5671 void bdrv_attach_aio_context(BlockDriverState *bs,
5672 AioContext *new_context)
5673 {
5674 if (!bs->drv) {
5675 return;
5676 }
5677
5678 bs->aio_context = new_context;
5679
5680 if (bs->backing_hd) {
5681 bdrv_attach_aio_context(bs->backing_hd, new_context);
5682 }
5683 if (bs->file) {
5684 bdrv_attach_aio_context(bs->file, new_context);
5685 }
5686 if (bs->drv->bdrv_attach_aio_context) {
5687 bs->drv->bdrv_attach_aio_context(bs, new_context);
5688 }
5689 if (bs->io_limits_enabled) {
5690 throttle_attach_aio_context(&bs->throttle_state, new_context);
5691 }
5692 }
5693
5694 void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
5695 {
5696 bdrv_drain_all(); /* ensure there are no in-flight requests */
5697
5698 bdrv_detach_aio_context(bs);
5699
5700 /* This function executes in the old AioContext so acquire the new one in
5701 * case it runs in a different thread.
5702 */
5703 aio_context_acquire(new_context);
5704 bdrv_attach_aio_context(bs, new_context);
5705 aio_context_release(new_context);
5706 }
5707
5708 void bdrv_add_before_write_notifier(BlockDriverState *bs,
5709 NotifierWithReturn *notifier)
5710 {
5711 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
5712 }
5713
5714 int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts)
5715 {
5716 if (!bs->drv->bdrv_amend_options) {
5717 return -ENOTSUP;
5718 }
5719 return bs->drv->bdrv_amend_options(bs, opts);
5720 }
5721
5722 /* This function will be called by the bdrv_recurse_is_first_non_filter method
5723 * of block filter and by bdrv_is_first_non_filter.
5724 * It is used to test if the given bs is the candidate or recurse more in the
5725 * node graph.
5726 */
5727 bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
5728 BlockDriverState *candidate)
5729 {
5730 /* return false if basic checks fails */
5731 if (!bs || !bs->drv) {
5732 return false;
5733 }
5734
5735 /* the code reached a non block filter driver -> check if the bs is
5736 * the same as the candidate. It's the recursion termination condition.
5737 */
5738 if (!bs->drv->is_filter) {
5739 return bs == candidate;
5740 }
5741 /* Down this path the driver is a block filter driver */
5742
5743 /* If the block filter recursion method is defined use it to recurse down
5744 * the node graph.
5745 */
5746 if (bs->drv->bdrv_recurse_is_first_non_filter) {
5747 return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
5748 }
5749
5750 /* the driver is a block filter but don't allow to recurse -> return false
5751 */
5752 return false;
5753 }
5754
5755 /* This function checks if the candidate is the first non filter bs down it's
5756 * bs chain. Since we don't have pointers to parents it explore all bs chains
5757 * from the top. Some filters can choose not to pass down the recursion.
5758 */
5759 bool bdrv_is_first_non_filter(BlockDriverState *candidate)
5760 {
5761 BlockDriverState *bs;
5762
5763 /* walk down the bs forest recursively */
5764 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
5765 bool perm;
5766
5767 /* try to recurse in this top level bs */
5768 perm = bdrv_recurse_is_first_non_filter(bs, candidate);
5769
5770 /* candidate is the first non filter */
5771 if (perm) {
5772 return true;
5773 }
5774 }
5775
5776 return false;
5777 }